aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/ocrdma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ocrdma')
-rw-r--r--drivers/infiniband/hw/ocrdma/Kconfig2
-rw-r--r--drivers/infiniband/hw/ocrdma/Makefile2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h234
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h35
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c23
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c836
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h20
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c243
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h506
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c616
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h54
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c1192
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h6
13 files changed, 2752 insertions, 1017 deletions
diff --git a/drivers/infiniband/hw/ocrdma/Kconfig b/drivers/infiniband/hw/ocrdma/Kconfig
index b5b6056c851..c0cddc0192d 100644
--- a/drivers/infiniband/hw/ocrdma/Kconfig
+++ b/drivers/infiniband/hw/ocrdma/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_OCRDMA
tristate "Emulex One Connect HCA support"
- depends on ETHERNET && NETDEVICES && PCI && (IPV6 || IPV6=n)
+ depends on ETHERNET && NETDEVICES && PCI && INET && (IPV6 || IPV6=n)
select NET_VENDOR_EMULEX
select BE2NET
---help---
diff --git a/drivers/infiniband/hw/ocrdma/Makefile b/drivers/infiniband/hw/ocrdma/Makefile
index 06a5bed12e4..d1bfd4f4cdd 100644
--- a/drivers/infiniband/hw/ocrdma/Makefile
+++ b/drivers/infiniband/hw/ocrdma/Makefile
@@ -2,4 +2,4 @@ ccflags-y := -Idrivers/net/ethernet/emulex/benet
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o
-ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o
+ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o ocrdma_stats.o
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 48970af2367..19011dbb930 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -35,19 +35,27 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
#include <be_roce.h>
#include "ocrdma_sli.h"
-#define OCRDMA_ROCE_DEV_VERSION "1.0.0"
+#define OCRDMA_ROCE_DRV_VERSION "10.2.145.0u"
+
+#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
-#define ocrdma_err(format, arg...) printk(KERN_ERR format, ##arg)
+#define OC_NAME_SH OCRDMA_NODE_DESC "(Skyhawk)"
+#define OC_NAME_UNKNOWN OCRDMA_NODE_DESC "(Unknown)"
+#define OC_SKH_DEVICE_PF 0x720
+#define OC_SKH_DEVICE_VF 0x728
#define OCRDMA_MAX_AH 512
#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
+#define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
+
struct ocrdma_dev_attr {
u8 fw_ver[32];
u32 vendor_id;
@@ -58,13 +66,16 @@ struct ocrdma_dev_attr {
u16 max_qp;
u16 max_wqe;
u16 max_rqe;
+ u16 max_srq;
u32 max_inline_data;
int max_send_sge;
int max_recv_sge;
int max_srq_sge;
+ int max_rdma_sge;
int max_mr;
u64 max_mr_size;
u32 max_num_mr_pbl;
+ int max_mw;
int max_fmr;
int max_map_per_fmr;
int max_pages_per_frmr;
@@ -83,6 +94,12 @@ struct ocrdma_dev_attr {
u8 num_ird_pages;
};
+struct ocrdma_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+};
+
struct ocrdma_pbl {
void *va;
dma_addr_t pa;
@@ -97,7 +114,6 @@ struct ocrdma_queue_info {
u16 id; /* qid, where to ring the doorbell. */
u16 head, tail;
bool created;
- atomic_t used; /* Number of valid elements in the queue */
};
struct ocrdma_eq {
@@ -123,6 +139,52 @@ struct mqe_ctx {
bool cmd_done;
};
+struct ocrdma_hw_mr {
+ u32 lkey;
+ u8 fr_mr;
+ u8 remote_atomic;
+ u8 remote_rd;
+ u8 remote_wr;
+ u8 local_rd;
+ u8 local_wr;
+ u8 mw_bind;
+ u8 rsvd;
+ u64 len;
+ struct ocrdma_pbl *pbl_table;
+ u32 num_pbls;
+ u32 num_pbes;
+ u32 pbl_size;
+ u32 pbe_size;
+ u64 fbo;
+ u64 va;
+};
+
+struct ocrdma_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct ocrdma_hw_mr hwmr;
+};
+
+struct ocrdma_stats {
+ u8 type;
+ struct ocrdma_dev *dev;
+};
+
+struct stats_mem {
+ struct ocrdma_mqe mqe;
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+ char *debugfs_mem;
+};
+
+struct phy_info {
+ u16 auto_speeds_supported;
+ u16 fixed_speeds_supported;
+ u16 phy_type;
+ u16 interface_type;
+};
+
struct ocrdma_dev {
struct ib_device ibdev;
struct ocrdma_dev_attr attr;
@@ -133,8 +195,7 @@ struct ocrdma_dev {
struct ocrdma_cq **cq_tbl;
struct ocrdma_qp **qp_tbl;
- struct ocrdma_eq meq;
- struct ocrdma_eq *qp_eq_tbl;
+ struct ocrdma_eq *eq_tbl;
int eq_cnt;
u16 base_eqid;
u16 max_eq;
@@ -167,15 +228,34 @@ struct ocrdma_dev {
struct mqe_ctx mqe_ctx;
struct be_dev_info nic_info;
+ struct phy_info phy;
+ char model_number[32];
+ u32 hba_port_num;
struct list_head entry;
struct rcu_head rcu;
int id;
+ u64 stag_arr[OCRDMA_MAX_STAG];
+ u16 pvid;
+ u32 asic_id;
+
+ ulong last_stats_time;
+ struct mutex stats_lock; /* provide synch for debugfs operations */
+ struct stats_mem stats_mem;
+ struct ocrdma_stats rsrc_stats;
+ struct ocrdma_stats rx_stats;
+ struct ocrdma_stats wqe_stats;
+ struct ocrdma_stats tx_stats;
+ struct ocrdma_stats db_err_stats;
+ struct ocrdma_stats tx_qp_err_stats;
+ struct ocrdma_stats rx_qp_err_stats;
+ struct ocrdma_stats tx_dbg_stats;
+ struct ocrdma_stats rx_dbg_stats;
+ struct dentry *dir;
};
struct ocrdma_cq {
struct ib_cq ibcq;
- struct ocrdma_dev *dev;
struct ocrdma_cqe *va;
u32 phase;
u32 getp; /* pointer to pending wrs to
@@ -184,8 +264,8 @@ struct ocrdma_cq {
*/
u32 max_hw_cqe;
bool phase_change;
- bool armed, solicited;
- bool arm_needed;
+ bool deferred_arm, deferred_sol;
+ bool first_arm;
spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
* to cq polling
@@ -198,7 +278,7 @@ struct ocrdma_cq {
struct ocrdma_ucontext *ucontext;
dma_addr_t pa;
u32 len;
- atomic_t use_cnt;
+ u32 cqe_cnt;
/* head of all qp's sq and rq for which cqes need to be flushed
* by the software.
@@ -208,9 +288,7 @@ struct ocrdma_cq {
struct ocrdma_pd {
struct ib_pd ibpd;
- struct ocrdma_dev *dev;
struct ocrdma_ucontext *uctx;
- atomic_t use_cnt;
u32 id;
int num_dpp_qp;
u32 dpp_page;
@@ -219,7 +297,6 @@ struct ocrdma_pd {
struct ocrdma_ah {
struct ib_ah ibah;
- struct ocrdma_dev *dev;
struct ocrdma_av *av;
u16 sgid_index;
u32 id;
@@ -239,18 +316,17 @@ struct ocrdma_qp_hwq_info {
struct ocrdma_srq {
struct ib_srq ibsrq;
- struct ocrdma_dev *dev;
u8 __iomem *db;
+ struct ocrdma_qp_hwq_info rq;
+ u64 *rqe_wr_id_tbl;
+ u32 *idx_bit_fields;
+ u32 bit_fields_len;
+
/* provide synchronization to multiple context(s) posting rqe */
spinlock_t q_lock ____cacheline_aligned;
- struct ocrdma_qp_hwq_info rq;
struct ocrdma_pd *pd;
- atomic_t use_cnt;
u32 id;
- u64 *rqe_wr_id_tbl;
- u32 *idx_bit_fields;
- u32 bit_fields_len;
};
struct ocrdma_qp {
@@ -258,8 +334,6 @@ struct ocrdma_qp {
struct ocrdma_dev *dev;
u8 __iomem *sq_db;
- /* provide synchronization to multiple context(s) posting wqe, rqe */
- spinlock_t q_lock ____cacheline_aligned;
struct ocrdma_qp_hwq_info sq;
struct {
uint64_t wrid;
@@ -269,6 +343,9 @@ struct ocrdma_qp {
uint8_t rsvd[3];
} *wqe_wr_id_tbl;
u32 max_inline_data;
+
+ /* provide synchronization to multiple context(s) posting wqe, rqe */
+ spinlock_t q_lock ____cacheline_aligned;
struct ocrdma_cq *sq_cq;
/* list maintained per CQ to flush SQ errors */
struct list_head sq_entry;
@@ -294,46 +371,17 @@ struct ocrdma_qp {
u32 qkey;
bool dpp_enabled;
u8 *ird_q_va;
-};
-
-#define OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp) \
- (((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) && \
- (qp->id < 64)) ? 24 : 16)
-
-struct ocrdma_hw_mr {
- struct ocrdma_dev *dev;
- u32 lkey;
- u8 fr_mr;
- u8 remote_atomic;
- u8 remote_rd;
- u8 remote_wr;
- u8 local_rd;
- u8 local_wr;
- u8 mw_bind;
- u8 rsvd;
- u64 len;
- struct ocrdma_pbl *pbl_table;
- u32 num_pbls;
- u32 num_pbes;
- u32 pbl_size;
- u32 pbe_size;
- u64 fbo;
- u64 va;
-};
-
-struct ocrdma_mr {
- struct ib_mr ibmr;
- struct ib_umem *umem;
- struct ocrdma_hw_mr hwmr;
- struct ocrdma_pd *pd;
+ bool signaled;
};
struct ocrdma_ucontext {
struct ib_ucontext ibucontext;
- struct ocrdma_dev *dev;
struct list_head mm_head;
struct mutex mm_list_lock; /* protects list entries of mm type */
+ struct ocrdma_pd *cntxt_pd;
+ int pd_in_use;
+
struct {
u32 *va;
dma_addr_t pa;
@@ -390,4 +438,84 @@ static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq)
return container_of(ibsrq, struct ocrdma_srq, ibsrq);
}
+static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe)
+{
+ int cqe_valid;
+ cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID;
+ return (cqe_valid == cq->phase);
+}
+
+static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe)
+{
+ return (le32_to_cpu(cqe->flags_status_srcqpn) &
+ OCRDMA_CQE_QTYPE) ? 0 : 1;
+}
+
+static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe)
+{
+ return (le32_to_cpu(cqe->flags_status_srcqpn) &
+ OCRDMA_CQE_INVALIDATE) ? 1 : 0;
+}
+
+static inline int is_cqe_imm(struct ocrdma_cqe *cqe)
+{
+ return (le32_to_cpu(cqe->flags_status_srcqpn) &
+ OCRDMA_CQE_IMM) ? 1 : 0;
+}
+
+static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe)
+{
+ return (le32_to_cpu(cqe->flags_status_srcqpn) &
+ OCRDMA_CQE_WRITE_IMM) ? 1 : 0;
+}
+
+static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
+ struct ib_ah_attr *ah_attr, u8 *mac_addr)
+{
+ struct in6_addr in6;
+
+ memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
+ if (rdma_is_multicast_addr(&in6))
+ rdma_get_mcast_mac(&in6, mac_addr);
+ else
+ memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
+ return 0;
+}
+
+static inline char *hca_name(struct ocrdma_dev *dev)
+{
+ switch (dev->nic_info.pdev->device) {
+ case OC_SKH_DEVICE_PF:
+ case OC_SKH_DEVICE_VF:
+ return OC_NAME_SH;
+ default:
+ return OC_NAME_UNKNOWN;
+ }
+}
+
+static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev,
+ int eqid)
+{
+ int indx;
+
+ for (indx = 0; indx < dev->eq_cnt; indx++) {
+ if (dev->eq_tbl[indx].q.id == eqid)
+ return indx;
+ }
+
+ return -EINVAL;
+}
+
+static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev)
+{
+ if (dev->nic_info.dev_family == 0xF && !dev->asic_id) {
+ pci_read_config_dword(
+ dev->nic_info.pdev,
+ OCRDMA_SLI_ASIC_ID_OFFSET, &dev->asic_id);
+ }
+
+ return (dev->asic_id & OCRDMA_SLI_ASIC_GEN_NUM_MASK) >>
+ OCRDMA_SLI_ASIC_GEN_NUM_SHIFT;
+}
+
#endif
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index 517ab20b727..1554cca5712 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -28,6 +28,10 @@
#ifndef __OCRDMA_ABI_H__
#define __OCRDMA_ABI_H__
+#define OCRDMA_ABI_VERSION 2
+#define OCRDMA_BE_ROCE_ABI_VERSION 1
+/* user kernel communication data structures. */
+
struct ocrdma_alloc_ucontext_resp {
u32 dev_id;
u32 wqe_size;
@@ -35,16 +39,16 @@ struct ocrdma_alloc_ucontext_resp {
u32 dpp_wqe_size;
u64 ah_tbl_page;
u32 ah_tbl_len;
- u32 rsvd;
- u8 fw_ver[32];
u32 rqe_size;
+ u8 fw_ver[32];
+ /* for future use/new features in progress */
u64 rsvd1;
-} __packed;
+ u64 rsvd2;
+};
-/* user kernel communication data structures. */
struct ocrdma_alloc_pd_ureq {
u64 rsvd1;
-} __packed;
+};
struct ocrdma_alloc_pd_uresp {
u32 id;
@@ -52,12 +56,12 @@ struct ocrdma_alloc_pd_uresp {
u32 dpp_page_addr_hi;
u32 dpp_page_addr_lo;
u64 rsvd1;
-} __packed;
+};
struct ocrdma_create_cq_ureq {
u32 dpp_cq;
- u32 rsvd;
-} __packed;
+ u32 rsvd; /* pad */
+};
#define MAX_CQ_PAGES 8
struct ocrdma_create_cq_uresp {
@@ -69,9 +73,10 @@ struct ocrdma_create_cq_uresp {
u64 db_page_addr;
u32 db_page_size;
u32 phase_change;
+ /* for future use/new features in progress */
u64 rsvd1;
u64 rsvd2;
-} __packed;
+};
#define MAX_QP_PAGES 8
#define MAX_UD_AV_PAGES 8
@@ -80,14 +85,14 @@ struct ocrdma_create_qp_ureq {
u8 enable_dpp_cq;
u8 rsvd;
u16 dpp_cq_id;
- u32 rsvd1;
+ u32 rsvd1; /* pad */
};
struct ocrdma_create_qp_uresp {
u16 qp_id;
u16 sq_dbid;
u16 rq_dbid;
- u16 resv0;
+ u16 resv0; /* pad */
u32 sq_page_size;
u32 rq_page_size;
u32 num_sq_pages;
@@ -98,19 +103,17 @@ struct ocrdma_create_qp_uresp {
u32 db_page_size;
u32 dpp_credit;
u32 dpp_offset;
- u32 rsvd1;
u32 num_wqe_allocated;
u32 num_rqe_allocated;
u32 db_sq_offset;
u32 db_rq_offset;
u32 db_shift;
- u64 rsvd2;
- u64 rsvd3;
+ u64 rsvd[11];
} __packed;
struct ocrdma_create_srq_uresp {
u16 rq_dbid;
- u16 resv0;
+ u16 resv0; /* pad */
u32 resv1;
u32 rq_page_size;
@@ -126,6 +129,6 @@ struct ocrdma_create_srq_uresp {
u64 rsvd2;
u64 rsvd3;
-} __packed;
+};
#endif /* __OCRDMA_ABI_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index a877a8ed790..d4cc01f10c0 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -29,19 +29,17 @@
#include <net/netevent.h>
#include <rdma/ib_addr.h>
-#include <rdma/ib_cache.h>
#include "ocrdma.h"
#include "ocrdma_verbs.h"
#include "ocrdma_ah.h"
#include "ocrdma_hw.h"
-static inline int set_av_attr(struct ocrdma_ah *ah,
+static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
struct ib_ah_attr *attr, int pdid)
{
int status = 0;
u16 vlan_tag; bool vlan_enabled = false;
- struct ocrdma_dev *dev = ah->dev;
struct ocrdma_eth_vlan eth;
struct ocrdma_grh grh;
int eth_sz;
@@ -51,7 +49,9 @@ static inline int set_av_attr(struct ocrdma_ah *ah,
ah->sgid_index = attr->grh.sgid_index;
- vlan_tag = rdma_get_vlan_id(&attr->grh.dgid);
+ vlan_tag = attr->vlan_id;
+ if (!vlan_tag || (vlan_tag > 0xFFF))
+ vlan_tag = dev->pvid;
if (vlan_tag && (vlan_tag < 0x1000)) {
eth.eth_type = cpu_to_be16(0x8100);
eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
@@ -64,7 +64,8 @@ static inline int set_av_attr(struct ocrdma_ah *ah,
eth_sz = sizeof(struct ocrdma_eth_basic);
}
memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
- status = ocrdma_resolve_dgid(dev, &attr->grh.dgid, &eth.dmac[0]);
+ memcpy(&eth.dmac[0], attr->dmac, ETH_ALEN);
+ status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
if (status)
return status;
status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index,
@@ -84,6 +85,7 @@ static inline int set_av_attr(struct ocrdma_ah *ah,
memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
if (vlan_enabled)
ah->av->valid |= OCRDMA_AV_VLAN_VALID;
+ ah->av->valid = cpu_to_le32(ah->av->valid);
return status;
}
@@ -93,20 +95,19 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
int status;
struct ocrdma_ah *ah;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = pd->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
if (!(attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
- ah = kzalloc(sizeof *ah, GFP_ATOMIC);
+ ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
- ah->dev = pd->dev;
status = ocrdma_alloc_av(dev, ah);
if (status)
goto av_err;
- status = set_av_attr(ah, attr, pd->id);
+ status = set_av_attr(dev, ah, attr, pd->id);
if (status)
goto av_conf_err;
@@ -127,7 +128,9 @@ av_err:
int ocrdma_destroy_ah(struct ib_ah *ibah)
{
struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
- ocrdma_free_av(ah->dev, ah);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
+
+ ocrdma_free_av(dev, ah);
kfree(ah);
return 0;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 71942af4fce..3bbf2010a82 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -32,7 +32,6 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
-#include <rdma/ib_addr.h>
#include "ocrdma.h"
#include "ocrdma_hw.h"
@@ -94,7 +93,7 @@ enum cqe_status {
static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
{
- return (u8 *)eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
+ return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
}
static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
@@ -105,8 +104,7 @@ static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
{
struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
- ((u8 *) dev->mq.cq.va +
- (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
+ (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
return NULL;
@@ -120,21 +118,17 @@ static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
{
- return (struct ocrdma_mqe *)((u8 *) dev->mq.sq.va +
- (dev->mq.sq.head *
- sizeof(struct ocrdma_mqe)));
+ return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
}
static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
{
dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1);
- atomic_inc(&dev->mq.sq.used);
}
static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
{
- return (void *)((u8 *) dev->mq.sq.va +
- (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe)));
+ return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
}
enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
@@ -155,7 +149,7 @@ enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
return IB_QPS_SQE;
case OCRDMA_QPS_ERR:
return IB_QPS_ERR;
- };
+ }
return IB_QPS_ERR;
}
@@ -176,13 +170,13 @@ static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
return OCRDMA_QPS_SQE;
case IB_QPS_ERR:
return OCRDMA_QPS_ERR;
- };
+ }
return OCRDMA_QPS_ERR;
}
static int ocrdma_get_mbx_errno(u32 status)
{
- int err_num = -EFAULT;
+ int err_num;
u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
OCRDMA_MBX_RSP_STATUS_SHIFT;
u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
@@ -248,6 +242,23 @@ static int ocrdma_get_mbx_errno(u32 status)
return err_num;
}
+char *port_speed_string(struct ocrdma_dev *dev)
+{
+ char *str = "";
+ u16 speeds_supported;
+
+ speeds_supported = dev->phy.fixed_speeds_supported |
+ dev->phy.auto_speeds_supported;
+ if (speeds_supported & OCRDMA_PHY_SPEED_40GBPS)
+ str = "40Gbps ";
+ else if (speeds_supported & OCRDMA_PHY_SPEED_10GBPS)
+ str = "10Gbps ";
+ else if (speeds_supported & OCRDMA_PHY_SPEED_1GBPS)
+ str = "1Gbps ";
+
+ return str;
+}
+
static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
{
int err_num = -EINVAL;
@@ -261,10 +272,11 @@ static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
break;
case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES:
case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING:
- err_num = -EAGAIN;
+ err_num = -EINVAL;
break;
case OCRDMA_MBX_CQE_STATUS_DMA_FAILED:
- err_num = -EIO;
+ default:
+ err_num = -EINVAL;
break;
}
return err_num;
@@ -336,6 +348,11 @@ static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
return mqe;
}
+static void *ocrdma_alloc_mqe(void)
+{
+ return kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
+}
+
static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
{
dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
@@ -368,24 +385,8 @@ static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
}
}
-static void ocrdma_assign_eq_vect_gen2(struct ocrdma_dev *dev,
- struct ocrdma_eq *eq)
-{
- /* assign vector and update vector id for next EQ */
- eq->vector = dev->nic_info.msix.start_vector;
- dev->nic_info.msix.start_vector += 1;
-}
-
-static void ocrdma_free_eq_vect_gen2(struct ocrdma_dev *dev)
-{
- /* this assumes that EQs are freed in exactly reverse order
- * as its allocation.
- */
- dev->nic_info.msix.start_vector -= 1;
-}
-
-static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q,
- int queue_type)
+static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev,
+ struct ocrdma_queue_info *q, int queue_type)
{
u8 opcode = 0;
int status;
@@ -424,11 +425,8 @@ static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
memset(cmd, 0, sizeof(*cmd));
ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,
sizeof(*cmd));
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
- cmd->req.rsvd_version = 0;
- else
- cmd->req.rsvd_version = 2;
+ cmd->req.rsvd_version = 2;
cmd->num_pages = 4;
cmd->valid = OCRDMA_CREATE_EQ_VALID;
cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT;
@@ -439,12 +437,7 @@ static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
NULL);
if (!status) {
eq->q.id = rsp->vector_eqid & 0xffff;
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
- ocrdma_assign_eq_vect_gen2(dev, eq);
- else {
- eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
- dev->nic_info.msix.start_vector += 1;
- }
+ eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
eq->q.created = true;
}
return status;
@@ -472,7 +465,7 @@ mbx_err:
return status;
}
-static int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
+int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
{
int irq;
@@ -487,8 +480,6 @@ static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
{
if (eq->q.created) {
ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
- ocrdma_free_eq_vect_gen2(dev);
ocrdma_free_q(dev, &eq->q);
}
}
@@ -507,13 +498,12 @@ static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
_ocrdma_destroy_eq(dev, eq);
}
-static void ocrdma_destroy_qp_eqs(struct ocrdma_dev *dev)
+static void ocrdma_destroy_eqs(struct ocrdma_dev *dev)
{
int i;
- /* deallocate the data path eqs */
for (i = 0; i < dev->eq_cnt; i++)
- ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);
+ ocrdma_destroy_eq(dev, &dev->eq_tbl[i]);
}
static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
@@ -528,16 +518,21 @@ static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
- cmd->pgsz_pgcnt = PAGES_4K_SPANNED(cq->va, cq->size);
+ cmd->req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
+ cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) <<
+ OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
+ cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size);
+
cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
- cmd->eqn = (eq->id << OCRDMA_CREATE_CQ_EQID_SHIFT);
+ cmd->eqn = eq->id;
+ cmd->cqe_count = cq->size / sizeof(struct ocrdma_mcqe);
- ocrdma_build_q_pages(&cmd->pa[0], cmd->pgsz_pgcnt,
+ ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
cq->dma, PAGE_SIZE_4K);
status = be_roce_mcc_cmd(dev->nic_info.netdev,
cmd, sizeof(*cmd), NULL, NULL);
if (!status) {
- cq->id = (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
+ cq->id = (u16) (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
cq->created = true;
}
return status;
@@ -564,32 +559,22 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
memset(cmd, 0, sizeof(*cmd));
num_pages = PAGES_4K_SPANNED(mq->va, mq->size);
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
- ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ,
- OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
- cmd->v0.pages = num_pages;
- cmd->v0.async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
- cmd->v0.async_cqid_valid = (cq->id << 1);
- cmd->v0.cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
- OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
- cmd->v0.cqid_ringsize |=
- (cq->id << OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT);
- cmd->v0.valid = OCRDMA_CREATE_MQ_VALID;
- pa = &cmd->v0.pa[0];
- } else {
- ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT,
- OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
- cmd->req.rsvd_version = 1;
- cmd->v1.cqid_pages = num_pages;
- cmd->v1.cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
- cmd->v1.async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
- cmd->v1.async_event_bitmap = Bit(20);
- cmd->v1.async_cqid_ringsize = cq->id;
- cmd->v1.async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
- OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
- cmd->v1.valid = OCRDMA_CREATE_MQ_VALID;
- pa = &cmd->v1.pa[0];
- }
+ ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT,
+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+ cmd->req.rsvd_version = 1;
+ cmd->cqid_pages = num_pages;
+ cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
+ cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
+
+ cmd->async_event_bitmap = Bit(OCRDMA_ASYNC_GRP5_EVE_CODE);
+ cmd->async_event_bitmap |= Bit(OCRDMA_ASYNC_RDMA_EVE_CODE);
+
+ cmd->async_cqid_ringsize = cq->id;
+ cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
+ OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
+ cmd->valid = OCRDMA_CREATE_MQ_VALID;
+ pa = &cmd->pa[0];
+
ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K);
status = be_roce_mcc_cmd(dev->nic_info.netdev,
cmd, sizeof(*cmd), NULL, NULL);
@@ -610,7 +595,8 @@ static int ocrdma_create_mq(struct ocrdma_dev *dev)
if (status)
goto alloc_err;
- status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->meq.q);
+ dev->eq_tbl[0].cq_cnt++;
+ status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q);
if (status)
goto mbx_cq_free;
@@ -667,7 +653,7 @@ static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,
if (qp == NULL)
BUG();
- ocrdma_qp_state_machine(qp, new_ib_qps, &old_ib_qps);
+ ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps);
}
static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
@@ -675,7 +661,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
{
struct ocrdma_qp *qp = NULL;
struct ocrdma_cq *cq = NULL;
- struct ib_event ib_evt;
+ struct ib_event ib_evt = { 0 };
int cq_event = 0;
int qp_event = 1;
int srq_event = 0;
@@ -700,6 +686,8 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
case OCRDMA_CQ_OVERRUN_ERROR:
ib_evt.element.cq = &cq->ibcq;
ib_evt.event = IB_EVENT_CQ_ERR;
+ cq_event = 1;
+ qp_event = 0;
break;
case OCRDMA_CQ_QPCAT_ERROR:
ib_evt.element.qp = &qp->ibqp;
@@ -745,7 +733,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
qp_event = 0;
srq_event = 0;
dev_event = 0;
- ocrdma_err("%s() unknown type=0x%x\n", __func__, type);
+ pr_err("%s() unknown type=0x%x\n", __func__, type);
break;
}
@@ -760,11 +748,35 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
qp->srq->ibsrq.event_handler(&ib_evt,
qp->srq->ibsrq.
srq_context);
- } else if (dev_event)
+ } else if (dev_event) {
+ pr_err("%s: Fatal event received\n", dev->ibdev.name);
ib_dispatch_event(&ib_evt);
+ }
}
+static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
+ struct ocrdma_ae_mcqe *cqe)
+{
+ struct ocrdma_ae_pvid_mcqe *evt;
+ int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
+ OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
+
+ switch (type) {
+ case OCRDMA_ASYNC_EVENT_PVID_STATE:
+ evt = (struct ocrdma_ae_pvid_mcqe *)cqe;
+ if ((evt->tag_enabled & OCRDMA_AE_PVID_MCQE_ENABLED_MASK) >>
+ OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT)
+ dev->pvid = ((evt->tag_enabled &
+ OCRDMA_AE_PVID_MCQE_TAG_MASK) >>
+ OCRDMA_AE_PVID_MCQE_TAG_SHIFT);
+ break;
+ default:
+ /* Not interested evts. */
+ break;
+ }
+}
+
static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
{
/* async CQE processing */
@@ -772,11 +784,13 @@ static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
- if (evt_code == OCRDMA_ASYNC_EVE_CODE)
+ if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE)
ocrdma_dispatch_ibevent(dev, cqe);
+ else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE)
+ ocrdma_process_grp5_aync(dev, cqe);
else
- ocrdma_err("%s(%d) invalid evt code=0x%x\n",
- __func__, dev->id, evt_code);
+ pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
+ dev->id, evt_code);
}
static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
@@ -790,8 +804,8 @@ static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
dev->mqe_ctx.cmd_done = true;
wake_up(&dev->mqe_ctx.cmd_wait);
} else
- ocrdma_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",
- __func__, cqe->tag_lo, dev->mqe_ctx.tag);
+ pr_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",
+ __func__, cqe->tag_lo, dev->mqe_ctx.tag);
}
static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
@@ -809,8 +823,6 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
ocrdma_process_acqe(dev, cqe);
else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)
ocrdma_process_mcqe(dev, cqe);
- else
- ocrdma_err("%s() cqe->compl is not set.\n", __func__);
memset(cqe, 0, sizeof(struct ocrdma_mcqe));
ocrdma_mcq_inc_tail(dev);
}
@@ -868,16 +880,8 @@ static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
BUG();
cq = dev->cq_tbl[cq_idx];
- if (cq == NULL) {
- ocrdma_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx);
+ if (cq == NULL)
return;
- }
- spin_lock_irqsave(&cq->cq_lock, flags);
- cq->armed = false;
- cq->solicited = false;
- spin_unlock_irqrestore(&cq->cq_lock, flags);
-
- ocrdma_ring_cq_db(dev, cq->id, false, false, 0);
if (cq->ibcq.comp_handler) {
spin_lock_irqsave(&cq->comp_handler_lock, flags);
@@ -902,27 +906,35 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
struct ocrdma_dev *dev = eq->dev;
struct ocrdma_eqe eqe;
struct ocrdma_eqe *ptr;
- u16 eqe_popped = 0;
u16 cq_id;
- while (1) {
+ int budget = eq->cq_cnt;
+
+ do {
ptr = ocrdma_get_eqe(eq);
eqe = *ptr;
ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
break;
- eqe_popped += 1;
+
ptr->id_valid = 0;
+ /* ring eq doorbell as soon as its consumed. */
+ ocrdma_ring_eq_db(dev, eq->q.id, false, true, 1);
/* check whether its CQE or not. */
if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {
cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;
ocrdma_cq_handler(dev, cq_id);
}
ocrdma_eq_inc_tail(eq);
- }
- ocrdma_ring_eq_db(dev, eq->q.id, true, true, eqe_popped);
- /* Ring EQ doorbell with num_popped to 0 to enable interrupts again. */
- if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
- ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
+
+ /* There can be a stale EQE after the last bound CQ is
+ * destroyed. EQE valid and budget == 0 implies this.
+ */
+ if (budget)
+ budget--;
+
+ } while (budget);
+
+ ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
return IRQ_HANDLED;
}
@@ -959,7 +971,8 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
{
int status = 0;
u16 cqe_status, ext_status;
- struct ocrdma_mqe *rsp;
+ struct ocrdma_mqe *rsp_mqe;
+ struct ocrdma_mbx_rsp *rsp = NULL;
mutex_lock(&dev->mqe_ctx.lock);
ocrdma_post_mqe(dev, mqe);
@@ -968,24 +981,61 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
goto mbx_err;
cqe_status = dev->mqe_ctx.cqe_status;
ext_status = dev->mqe_ctx.ext_status;
- rsp = ocrdma_get_mqe_rsp(dev);
- ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe)));
+ rsp_mqe = ocrdma_get_mqe_rsp(dev);
+ ocrdma_copy_le32_to_cpu(mqe, rsp_mqe, (sizeof(*mqe)));
+ if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
+ OCRDMA_MQE_HDR_EMB_SHIFT)
+ rsp = &mqe->u.rsp;
+
if (cqe_status || ext_status) {
- ocrdma_err
- ("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
- __func__,
- (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
- OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status);
+ pr_err("%s() cqe_status=0x%x, ext_status=0x%x,",
+ __func__, cqe_status, ext_status);
+ if (rsp) {
+ /* This is for embedded cmds. */
+ pr_err("opcode=0x%x, subsystem=0x%x\n",
+ (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
+ OCRDMA_MBX_RSP_OPCODE_SHIFT,
+ (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
+ OCRDMA_MBX_RSP_SUBSYS_SHIFT);
+ }
status = ocrdma_get_mbx_cqe_errno(cqe_status);
goto mbx_err;
}
- if (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK)
+ /* For non embedded, rsp errors are handled in ocrdma_nonemb_mbx_cmd */
+ if (rsp && (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK))
status = ocrdma_get_mbx_errno(mqe->u.rsp.status);
mbx_err:
mutex_unlock(&dev->mqe_ctx.lock);
return status;
}
+static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe,
+ void *payload_va)
+{
+ int status = 0;
+ struct ocrdma_mbx_rsp *rsp = payload_va;
+
+ if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
+ OCRDMA_MQE_HDR_EMB_SHIFT)
+ BUG();
+
+ status = ocrdma_mbx_cmd(dev, mqe);
+ if (!status)
+ /* For non embedded, only CQE failures are handled in
+ * ocrdma_mbx_cmd. We need to check for RSP errors.
+ */
+ if (rsp->status & OCRDMA_MBX_RSP_STATUS_MASK)
+ status = ocrdma_get_mbx_errno(rsp->status);
+
+ if (status)
+ pr_err("opcode=0x%x, subsystem=0x%x\n",
+ (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
+ OCRDMA_MBX_RSP_OPCODE_SHIFT,
+ (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
+ OCRDMA_MBX_RSP_SUBSYS_SHIFT);
+ return status;
+}
+
static void ocrdma_get_attr(struct ocrdma_dev *dev,
struct ocrdma_dev_attr *attr,
struct ocrdma_mbx_query_config *rsp)
@@ -996,6 +1046,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_qp =
(rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
+ attr->max_srq =
+ (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
attr->max_send_sge = ((rsp->max_write_send_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
@@ -1005,6 +1058,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_srq_sge = (rsp->max_srq_rqe_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
+ attr->max_rdma_sge = (rsp->max_write_send_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT;
attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
@@ -1020,6 +1076,7 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >>
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
+ attr->max_mw = rsp->max_mw;
attr->max_mr = rsp->max_mr;
attr->max_mr_size = ~0ull;
attr->max_fmr = 0;
@@ -1027,6 +1084,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
attr->max_cqe = rsp->max_cq_cqes_per_cq &
OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK;
+ attr->max_cq = (rsp->max_cq_cqes_per_cq &
+ OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET;
attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) *
@@ -1038,7 +1098,7 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_inline_data =
attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
sizeof(struct ocrdma_sge));
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
attr->ird = 1;
attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
@@ -1059,7 +1119,6 @@ static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
return -EINVAL;
dev->base_eqid = conf->base_eqid;
dev->max_eq = conf->max_eq;
- dev->attr.max_cq = OCRDMA_MAX_CQ - 1;
return 0;
}
@@ -1113,6 +1172,96 @@ mbx_err:
return status;
}
+int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset)
+{
+ struct ocrdma_rdma_stats_req *req = dev->stats_mem.va;
+ struct ocrdma_mqe *mqe = &dev->stats_mem.mqe;
+ struct ocrdma_rdma_stats_resp *old_stats = NULL;
+ int status;
+
+ old_stats = kzalloc(sizeof(*old_stats), GFP_KERNEL);
+ if (old_stats == NULL)
+ return -ENOMEM;
+
+ memset(mqe, 0, sizeof(*mqe));
+ mqe->hdr.pyld_len = dev->stats_mem.size;
+ mqe->hdr.spcl_sge_cnt_emb |=
+ (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
+ OCRDMA_MQE_HDR_SGE_CNT_MASK;
+ mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dev->stats_mem.pa & 0xffffffff);
+ mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dev->stats_mem.pa);
+ mqe->u.nonemb_req.sge[0].len = dev->stats_mem.size;
+
+ /* Cache the old stats */
+ memcpy(old_stats, req, sizeof(struct ocrdma_rdma_stats_resp));
+ memset(req, 0, dev->stats_mem.size);
+
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)req,
+ OCRDMA_CMD_GET_RDMA_STATS,
+ OCRDMA_SUBSYS_ROCE,
+ dev->stats_mem.size);
+ if (reset)
+ req->reset_stats = reset;
+
+ status = ocrdma_nonemb_mbx_cmd(dev, mqe, dev->stats_mem.va);
+ if (status)
+ /* Copy from cache, if mbox fails */
+ memcpy(req, old_stats, sizeof(struct ocrdma_rdma_stats_resp));
+ else
+ ocrdma_le32_to_cpu(req, dev->stats_mem.size);
+
+ kfree(old_stats);
+ return status;
+}
+
+static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
+{
+ int status = -ENOMEM;
+ struct ocrdma_dma_mem dma;
+ struct ocrdma_mqe *mqe;
+ struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp;
+ struct mgmt_hba_attribs *hba_attribs;
+
+ mqe = ocrdma_alloc_mqe();
+ if (!mqe)
+ return status;
+ memset(mqe, 0, sizeof(*mqe));
+
+ dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp);
+ dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev,
+ dma.size, &dma.pa, GFP_KERNEL);
+ if (!dma.va)
+ goto free_mqe;
+
+ mqe->hdr.pyld_len = dma.size;
+ mqe->hdr.spcl_sge_cnt_emb |=
+ (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
+ OCRDMA_MQE_HDR_SGE_CNT_MASK;
+ mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dma.pa & 0xffffffff);
+ mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dma.pa);
+ mqe->u.nonemb_req.sge[0].len = dma.size;
+
+ memset(dma.va, 0, dma.size);
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)dma.va,
+ OCRDMA_CMD_GET_CTRL_ATTRIBUTES,
+ OCRDMA_SUBSYS_COMMON,
+ dma.size);
+
+ status = ocrdma_nonemb_mbx_cmd(dev, mqe, dma.va);
+ if (!status) {
+ ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va;
+ hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs;
+
+ dev->hba_port_num = hba_attribs->phy_port;
+ strncpy(dev->model_number,
+ hba_attribs->controller_model_number, 31);
+ }
+ dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa);
+free_mqe:
+ kfree(mqe);
+ return status;
+}
+
static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
{
int status = -ENOMEM;
@@ -1132,6 +1281,63 @@ mbx_err:
return status;
}
+int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
+{
+ int status = -ENOMEM;
+ struct ocrdma_get_link_speed_rsp *rsp;
+ struct ocrdma_mqe *cmd;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
+ sizeof(*cmd));
+ if (!cmd)
+ return status;
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
+ OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+
+ ((struct ocrdma_mbx_hdr *)cmd->u.cmd)->rsvd_version = 0x1;
+
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+
+ rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
+ *lnk_speed = rsp->phys_port_speed;
+
+mbx_err:
+ kfree(cmd);
+ return status;
+}
+
+static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev)
+{
+ int status = -ENOMEM;
+ struct ocrdma_mqe *cmd;
+ struct ocrdma_get_phy_info_rsp *rsp;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_PHY_DETAILS, sizeof(*cmd));
+ if (!cmd)
+ return status;
+
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
+ OCRDMA_CMD_PHY_DETAILS, OCRDMA_SUBSYS_COMMON,
+ sizeof(*cmd));
+
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+
+ rsp = (struct ocrdma_get_phy_info_rsp *)cmd;
+ dev->phy.phy_type = le16_to_cpu(rsp->phy_type);
+ dev->phy.auto_speeds_supported =
+ le16_to_cpu(rsp->auto_speeds_supported);
+ dev->phy.fixed_speeds_supported =
+ le16_to_cpu(rsp->fixed_speeds_supported);
+mbx_err:
+ kfree(cmd);
+ return status;
+}
+
int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
{
int status = -ENOMEM;
@@ -1201,7 +1407,7 @@ static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
{
- int i ;
+ int i;
int status = 0;
int max_ah;
struct ocrdma_create_ah_tbl *cmd;
@@ -1310,19 +1516,19 @@ static u16 ocrdma_bind_eq(struct ocrdma_dev *dev)
u16 eq_id;
mutex_lock(&dev->dev_lock);
- cq_cnt = dev->qp_eq_tbl[0].cq_cnt;
- eq_id = dev->qp_eq_tbl[0].q.id;
+ cq_cnt = dev->eq_tbl[0].cq_cnt;
+ eq_id = dev->eq_tbl[0].q.id;
/* find the EQ which is has the least number of
* CQs associated with it.
*/
for (i = 0; i < dev->eq_cnt; i++) {
- if (dev->qp_eq_tbl[i].cq_cnt < cq_cnt) {
- cq_cnt = dev->qp_eq_tbl[i].cq_cnt;
- eq_id = dev->qp_eq_tbl[i].q.id;
+ if (dev->eq_tbl[i].cq_cnt < cq_cnt) {
+ cq_cnt = dev->eq_tbl[i].cq_cnt;
+ eq_id = dev->eq_tbl[i].q.id;
selected_eq = i;
}
}
- dev->qp_eq_tbl[selected_eq].cq_cnt += 1;
+ dev->eq_tbl[selected_eq].cq_cnt += 1;
mutex_unlock(&dev->dev_lock);
return eq_id;
}
@@ -1332,17 +1538,15 @@ static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
int i;
mutex_lock(&dev->dev_lock);
- for (i = 0; i < dev->eq_cnt; i++) {
- if (dev->qp_eq_tbl[i].q.id != eq_id)
- continue;
- dev->qp_eq_tbl[i].cq_cnt -= 1;
- break;
- }
+ i = ocrdma_get_eq_table_index(dev, eq_id);
+ if (i == -EINVAL)
+ BUG();
+ dev->eq_tbl[i].cq_cnt -= 1;
mutex_unlock(&dev->dev_lock);
}
int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
- int entries, int dpp_cq)
+ int entries, int dpp_cq, u16 pd_id)
{
int status = -ENOMEM; int max_hw_cqe;
struct pci_dev *pdev = dev->nic_info.pdev;
@@ -1350,14 +1554,12 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
struct ocrdma_create_cq_rsp *rsp;
u32 hw_pages, cqe_size, page_size, cqe_count;
- if (dpp_cq)
- return -EINVAL;
if (entries > dev->attr.max_cqe) {
- ocrdma_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
- __func__, dev->id, dev->attr.max_cqe, entries);
+ pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
+ __func__, dev->id, dev->attr.max_cqe, entries);
return -EINVAL;
}
- if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY))
+ if (dpp_cq && (ocrdma_get_asic_type(dev) != OCRDMA_ASIC_GEN_SKH_R))
return -EINVAL;
if (dpp_cq) {
@@ -1391,15 +1593,14 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
cmd->cmd.pgsz_pgcnt |= hw_pages;
cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
- if (dev->eq_cnt < 0)
- goto eq_err;
cq->eqn = ocrdma_bind_eq(dev);
- cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
+ cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3;
cqe_count = cq->len / cqe_size;
- if (cqe_count > 1024)
+ cq->cqe_cnt = cqe_count;
+ if (cqe_count > 1024) {
/* Set cnt to 3 to indicate more than 1024 cq entries */
cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
- else {
+ } else {
u8 count = 0;
switch (cqe_count) {
case 256:
@@ -1418,7 +1619,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
}
/* shared eq between all the consumer cqs. */
cmd->cmd.eqn = cq->eqn;
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
if (dpp_cq)
cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
OCRDMA_CREATE_CQ_TYPE_SHIFT;
@@ -1430,6 +1631,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
cq->phase_change = true;
}
+ cmd->cmd.pd_id = pd_id; /* valid only for v3 */
ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status)
@@ -1441,7 +1643,6 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
return 0;
mbx_err:
ocrdma_unbind_eq(dev, cq->eqn);
-eq_err:
dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
mem_err:
kfree(cmd);
@@ -1463,12 +1664,9 @@ int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
(cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
OCRDMA_DESTROY_CQ_QID_MASK;
- ocrdma_unbind_eq(dev, cq->eqn);
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
- if (status)
- goto mbx_err;
+ ocrdma_unbind_eq(dev, cq->eqn);
dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
-mbx_err:
kfree(cmd);
return status;
}
@@ -1538,6 +1736,7 @@ static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
return -ENOMEM;
cmd->num_pbl_pdid =
pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT);
+ cmd->fr_mr = hwmr->fr_mr;
cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr <<
OCRDMA_REG_NSMR_REMOTE_WR_SHIFT);
@@ -1621,7 +1820,7 @@ int ocrdma_reg_mr(struct ocrdma_dev *dev,
status = ocrdma_mbx_reg_mr(dev, hwmr, pdid,
cur_pbl_cnt, hwmr->pbe_size, last);
if (status) {
- ocrdma_err("%s() status=%d\n", __func__, status);
+ pr_err("%s() status=%d\n", __func__, status);
return status;
}
/* if there is no more pbls to register then exit. */
@@ -1644,7 +1843,7 @@ int ocrdma_reg_mr(struct ocrdma_dev *dev,
break;
}
if (status)
- ocrdma_err("%s() err. status=%d\n", __func__, status);
+ pr_err("%s() err. status=%d\n", __func__, status);
return status;
}
@@ -1692,8 +1891,16 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);
}
-int ocrdma_qp_state_machine(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
- enum ib_qp_state *old_ib_state)
+static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
+{
+ qp->sq.head = 0;
+ qp->sq.tail = 0;
+ qp->rq.head = 0;
+ qp->rq.tail = 0;
+}
+
+int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
+ enum ib_qp_state *old_ib_state)
{
unsigned long flags;
int status = 0;
@@ -1710,96 +1917,15 @@ int ocrdma_qp_state_machine(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
return 1;
}
- switch (qp->state) {
- case OCRDMA_QPS_RST:
- switch (new_state) {
- case OCRDMA_QPS_RST:
- case OCRDMA_QPS_INIT:
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_INIT:
- /* qps: INIT->XXX */
- switch (new_state) {
- case OCRDMA_QPS_INIT:
- case OCRDMA_QPS_RTR:
- break;
- case OCRDMA_QPS_ERR:
- ocrdma_flush_qp(qp);
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_RTR:
- /* qps: RTS->XXX */
- switch (new_state) {
- case OCRDMA_QPS_RTS:
- break;
- case OCRDMA_QPS_ERR:
- ocrdma_flush_qp(qp);
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_RTS:
- /* qps: RTS->XXX */
- switch (new_state) {
- case OCRDMA_QPS_SQD:
- case OCRDMA_QPS_SQE:
- break;
- case OCRDMA_QPS_ERR:
- ocrdma_flush_qp(qp);
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_SQD:
- /* qps: SQD->XXX */
- switch (new_state) {
- case OCRDMA_QPS_RTS:
- case OCRDMA_QPS_SQE:
- case OCRDMA_QPS_ERR:
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_SQE:
- switch (new_state) {
- case OCRDMA_QPS_RTS:
- case OCRDMA_QPS_ERR:
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_ERR:
- /* qps: ERR->XXX */
- switch (new_state) {
- case OCRDMA_QPS_RST:
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- default:
- status = -EINVAL;
- break;
- };
- if (!status)
- qp->state = new_state;
+
+ if (new_state == OCRDMA_QPS_INIT) {
+ ocrdma_init_hwq_ptr(qp);
+ ocrdma_del_flush_qp(qp);
+ } else if (new_state == OCRDMA_QPS_ERR) {
+ ocrdma_flush_qp(qp);
+ }
+
+ qp->state = new_state;
spin_unlock_irqrestore(&qp->q_lock, flags);
return status;
@@ -1833,16 +1959,15 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
u32 max_wqe_allocated;
u32 max_sges = attrs->cap.max_send_sge;
- max_wqe_allocated = attrs->cap.max_send_wr;
- /* need to allocate one extra to for GEN1 family */
- if (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY)
- max_wqe_allocated += 1;
+ /* QP1 may exceed 127 */
+ max_wqe_allocated = min_t(u32, attrs->cap.max_send_wr + 1,
+ dev->attr.max_wqe);
status = ocrdma_build_q_conf(&max_wqe_allocated,
dev->attr.wqe_size, &hw_pages, &hw_page_size);
if (status) {
- ocrdma_err("%s() req. max_send_wr=0x%x\n", __func__,
- max_wqe_allocated);
+ pr_err("%s() req. max_send_wr=0x%x\n", __func__,
+ max_wqe_allocated);
return -EINVAL;
}
qp->sq.max_cnt = max_wqe_allocated;
@@ -1891,8 +2016,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size,
&hw_pages, &hw_page_size);
if (status) {
- ocrdma_err("%s() req. max_recv_wr=0x%x\n", __func__,
- attrs->cap.max_recv_wr + 1);
+ pr_err("%s() req. max_recv_wr=0x%x\n", __func__,
+ attrs->cap.max_recv_wr + 1);
return status;
}
qp->rq.max_cnt = max_rqe_allocated;
@@ -1900,7 +2025,7 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->rq.va)
- return status;
+ return -ENOMEM;
memset(qp->rq.va, 0, len);
qp->rq.pa = pa;
qp->rq.len = len;
@@ -1948,6 +2073,8 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
dma_addr_t pa = 0;
int ird_page_size = dev->attr.ird_page_size;
int ird_q_len = dev->attr.num_ird_pages * ird_page_size;
+ struct ocrdma_hdr_wqe *rqe;
+ int i = 0;
if (dev->attr.ird == 0)
return 0;
@@ -1959,6 +2086,15 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
memset(qp->ird_q_va, 0, ird_q_len);
ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
pa, ird_page_size);
+ for (; i < ird_q_len / dev->attr.rqe_size; i++) {
+ rqe = (struct ocrdma_hdr_wqe *)(qp->ird_q_va +
+ (i * dev->attr.rqe_size));
+ rqe->cw = 0;
+ rqe->cw |= 2;
+ rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
+ rqe->cw |= (8 << OCRDMA_WQE_SIZE_SHIFT);
+ rqe->cw |= (8 << OCRDMA_WQE_NXT_WQE_SIZE_SHIFT);
+ }
return 0;
}
@@ -2023,7 +2159,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
break;
default:
return -EINVAL;
- };
+ }
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
if (!cmd)
@@ -2070,10 +2206,10 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
qp->rq_cq = cq;
- if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
- (attrs->cap.max_inline_data <= dev->attr.max_inline_data))
+ if (pd->dpp_enabled && pd->num_dpp_qp) {
ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
dpp_cq_id);
+ }
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status)
@@ -2087,10 +2223,10 @@ mbx_err:
if (qp->rq.va)
dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
rq_err:
- ocrdma_err("%s(%d) rq_err\n", __func__, dev->id);
+ pr_err("%s(%d) rq_err\n", __func__, dev->id);
dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
sq_err:
- ocrdma_err("%s(%d) sq_err\n", __func__, dev->id);
+ pr_err("%s(%d) sq_err\n", __func__, dev->id);
kfree(cmd);
return status;
}
@@ -2116,69 +2252,60 @@ mbx_err:
return status;
}
-int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid,
- u8 *mac_addr)
-{
- struct in6_addr in6;
-
- memcpy(&in6, dgid, sizeof in6);
- if (rdma_is_multicast_addr(&in6))
- rdma_get_mcast_mac(&in6, mac_addr);
- else if (rdma_link_local_addr(&in6))
- rdma_get_ll_mac(&in6, mac_addr);
- else {
- ocrdma_err("%s() fail to resolve mac_addr.\n", __func__);
- return -EINVAL;
- }
- return 0;
-}
-
-static void ocrdma_set_av_params(struct ocrdma_qp *qp,
+static int ocrdma_set_av_params(struct ocrdma_qp *qp,
struct ocrdma_modify_qp *cmd,
struct ib_qp_attr *attrs)
{
+ int status;
struct ib_ah_attr *ah_attr = &attrs->ah_attr;
- union ib_gid sgid;
+ union ib_gid sgid, zgid;
u32 vlan_id;
u8 mac_addr[6];
+
if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
- return;
+ return -EINVAL;
cmd->params.tclass_sq_psn |=
(ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
cmd->params.rnt_rc_sl_fl |=
(ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
+ cmd->params.rnt_rc_sl_fl |= (ah_attr->sl << OCRDMA_QP_PARAMS_SL_SHIFT);
cmd->params.hop_lmt_rq_psn |=
(ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
sizeof(cmd->params.dgid));
- ocrdma_query_gid(&qp->dev->ibdev, 1,
- ah_attr->grh.sgid_index, &sgid);
+ status = ocrdma_query_gid(&qp->dev->ibdev, 1,
+ ah_attr->grh.sgid_index, &sgid);
+ if (status)
+ return status;
+
+ memset(&zgid, 0, sizeof(zgid));
+ if (!memcmp(&sgid, &zgid, sizeof(zgid)))
+ return -EINVAL;
+
qp->sgid_idx = ah_attr->grh.sgid_index;
memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
- ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]);
+ ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]);
cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
(mac_addr[2] << 16) | (mac_addr[3] << 24);
/* convert them to LE format. */
ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
- vlan_id = rdma_get_vlan_id(&sgid);
+ vlan_id = ah_attr->vlan_id;
if (vlan_id && (vlan_id < 0x1000)) {
cmd->params.vlan_dmac_b4_to_b5 |=
vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
}
+ return 0;
}
static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
struct ocrdma_modify_qp *cmd,
- struct ib_qp_attr *attrs, int attr_mask,
- enum ib_qp_state old_qps)
+ struct ib_qp_attr *attrs, int attr_mask)
{
int status = 0;
- struct net_device *netdev = qp->dev->nic_info.netdev;
- int eth_mtu = iboe_get_mtu(netdev->mtu);
if (attr_mask & IB_QP_PKEY_INDEX) {
cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
@@ -2190,9 +2317,11 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
cmd->params.qkey = attrs->qkey;
cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
}
- if (attr_mask & IB_QP_AV)
- ocrdma_set_av_params(qp, cmd, attrs);
- else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
+ if (attr_mask & IB_QP_AV) {
+ status = ocrdma_set_av_params(qp, cmd, attrs);
+ if (status)
+ return status;
+ } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
/* set the default mac address for UD, GSI QPs */
cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
(qp->dev->nic_info.mac_addr[1] << 8) |
@@ -2213,8 +2342,8 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
}
if (attr_mask & IB_QP_PATH_MTU) {
- if (ib_mtu_enum_to_int(eth_mtu) <
- ib_mtu_enum_to_int(attrs->path_mtu)) {
+ if (attrs->path_mtu < IB_MTU_256 ||
+ attrs->path_mtu > IB_MTU_4096) {
status = -EINVAL;
goto pmtu_err;
}
@@ -2279,8 +2408,7 @@ pmtu_err:
}
int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
- struct ib_qp_attr *attrs, int attr_mask,
- enum ib_qp_state old_qps)
+ struct ib_qp_attr *attrs, int attr_mask)
{
int status = -ENOMEM;
struct ocrdma_modify_qp *cmd;
@@ -2297,11 +2425,13 @@ int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
OCRDMA_QP_PARAMS_STATE_SHIFT) &
OCRDMA_QP_PARAMS_STATE_MASK;
cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;
- } else
+ } else {
cmd->params.max_sge_recv_flags |=
(qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &
OCRDMA_QP_PARAMS_STATE_MASK;
- status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps);
+ }
+
+ status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask);
if (status)
goto mbx_err;
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
@@ -2338,7 +2468,7 @@ mbx_err:
return status;
}
-int ocrdma_mbx_create_srq(struct ocrdma_srq *srq,
+int ocrdma_mbx_create_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
struct ib_srq_init_attr *srq_attr,
struct ocrdma_pd *pd)
{
@@ -2348,7 +2478,6 @@ int ocrdma_mbx_create_srq(struct ocrdma_srq *srq,
struct ocrdma_create_srq_rsp *rsp;
struct ocrdma_create_srq *cmd;
dma_addr_t pa;
- struct ocrdma_dev *dev = srq->dev;
struct pci_dev *pdev = dev->nic_info.pdev;
u32 max_rqe_allocated;
@@ -2362,8 +2491,8 @@ int ocrdma_mbx_create_srq(struct ocrdma_srq *srq,
dev->attr.rqe_size,
&hw_pages, &hw_page_size);
if (status) {
- ocrdma_err("%s() req. max_wr=0x%x\n", __func__,
- srq_attr->attr.max_wr);
+ pr_err("%s() req. max_wr=0x%x\n", __func__,
+ srq_attr->attr.max_wr);
status = -EINVAL;
goto ret;
}
@@ -2418,13 +2547,16 @@ int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
{
int status = -ENOMEM;
struct ocrdma_modify_srq *cmd;
- cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
+ struct ocrdma_pd *pd = srq->pd;
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_SRQ, sizeof(*cmd));
if (!cmd)
return status;
cmd->id = srq->id;
cmd->limit_max_rqe |= srq_attr->srq_limit <<
OCRDMA_MODIFY_SRQ_LIMIT_SHIFT;
- status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
kfree(cmd);
return status;
}
@@ -2433,11 +2565,13 @@ int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
{
int status = -ENOMEM;
struct ocrdma_query_srq *cmd;
- cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
+ struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device);
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_SRQ, sizeof(*cmd));
if (!cmd)
return status;
cmd->id = srq->rq.dbid;
- status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status == 0) {
struct ocrdma_query_srq_rsp *rsp =
(struct ocrdma_query_srq_rsp *)cmd;
@@ -2462,7 +2596,7 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
if (!cmd)
return status;
cmd->id = srq->id;
- status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (srq->rq.va)
dma_free_coherent(&pdev->dev, srq->rq.len,
srq->rq.va, srq->rq.pa);
@@ -2504,38 +2638,7 @@ int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
return 0;
}
-static int ocrdma_create_mq_eq(struct ocrdma_dev *dev)
-{
- int status;
- int irq;
- unsigned long flags = 0;
- int num_eq = 0;
-
- if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
- flags = IRQF_SHARED;
- else {
- num_eq = dev->nic_info.msix.num_vectors -
- dev->nic_info.msix.start_vector;
- /* minimum two vectors/eq are required for rdma to work.
- * one for control path and one for data path.
- */
- if (num_eq < 2)
- return -EBUSY;
- }
-
- status = ocrdma_create_eq(dev, &dev->meq, OCRDMA_EQ_LEN);
- if (status)
- return status;
- sprintf(dev->meq.irq_name, "ocrdma_mq%d", dev->id);
- irq = ocrdma_get_irq(dev, &dev->meq);
- status = request_irq(irq, ocrdma_irq_handler, flags, dev->meq.irq_name,
- &dev->meq);
- if (status)
- _ocrdma_destroy_eq(dev, &dev->meq);
- return status;
-}
-
-static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev)
+static int ocrdma_create_eqs(struct ocrdma_dev *dev)
{
int num_eq, i, status = 0;
int irq;
@@ -2546,49 +2649,47 @@ static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev)
if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
num_eq = 1;
flags = IRQF_SHARED;
- } else
+ } else {
num_eq = min_t(u32, num_eq, num_online_cpus());
- dev->qp_eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
- if (!dev->qp_eq_tbl)
+ }
+
+ if (!num_eq)
+ return -EINVAL;
+
+ dev->eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
+ if (!dev->eq_tbl)
return -ENOMEM;
for (i = 0; i < num_eq; i++) {
- status = ocrdma_create_eq(dev, &dev->qp_eq_tbl[i],
- OCRDMA_EQ_LEN);
+ status = ocrdma_create_eq(dev, &dev->eq_tbl[i],
+ OCRDMA_EQ_LEN);
if (status) {
status = -EINVAL;
break;
}
- sprintf(dev->qp_eq_tbl[i].irq_name, "ocrdma_qp%d-%d",
+ sprintf(dev->eq_tbl[i].irq_name, "ocrdma%d-%d",
dev->id, i);
- irq = ocrdma_get_irq(dev, &dev->qp_eq_tbl[i]);
+ irq = ocrdma_get_irq(dev, &dev->eq_tbl[i]);
status = request_irq(irq, ocrdma_irq_handler, flags,
- dev->qp_eq_tbl[i].irq_name,
- &dev->qp_eq_tbl[i]);
- if (status) {
- _ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);
- status = -EINVAL;
- break;
- }
+ dev->eq_tbl[i].irq_name,
+ &dev->eq_tbl[i]);
+ if (status)
+ goto done;
dev->eq_cnt += 1;
}
/* one eq is sufficient for data path to work */
- if (dev->eq_cnt >= 1)
- return 0;
- if (status)
- ocrdma_destroy_qp_eqs(dev);
+ return 0;
+done:
+ ocrdma_destroy_eqs(dev);
return status;
}
int ocrdma_init_hw(struct ocrdma_dev *dev)
{
int status;
- /* set up control path eq */
- status = ocrdma_create_mq_eq(dev);
- if (status)
- return status;
- /* set up data path eq */
- status = ocrdma_create_qp_eqs(dev);
+
+ /* create the eqs */
+ status = ocrdma_create_eqs(dev);
if (status)
goto qpeq_err;
status = ocrdma_create_mq(dev);
@@ -2606,15 +2707,21 @@ int ocrdma_init_hw(struct ocrdma_dev *dev)
status = ocrdma_mbx_create_ah_tbl(dev);
if (status)
goto conf_err;
+ status = ocrdma_mbx_get_phy_info(dev);
+ if (status)
+ goto conf_err;
+ status = ocrdma_mbx_get_ctrl_attribs(dev);
+ if (status)
+ goto conf_err;
+
return 0;
conf_err:
ocrdma_destroy_mq(dev);
mq_err:
- ocrdma_destroy_qp_eqs(dev);
+ ocrdma_destroy_eqs(dev);
qpeq_err:
- ocrdma_destroy_eq(dev, &dev->meq);
- ocrdma_err("%s() status=%d\n", __func__, status);
+ pr_err("%s() status=%d\n", __func__, status);
return status;
}
@@ -2622,10 +2729,9 @@ void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
{
ocrdma_mbx_delete_ah_tbl(dev);
- /* cleanup the data path eqs */
- ocrdma_destroy_qp_eqs(dev);
+ /* cleanup the eqs */
+ ocrdma_destroy_eqs(dev);
/* cleanup the control path */
ocrdma_destroy_mq(dev);
- ocrdma_destroy_eq(dev, &dev->meq);
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index be5db77404d..e513f729314 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -78,6 +78,11 @@ static inline void ocrdma_copy_le32_to_cpu(void *dst, void *src, u32 len)
#endif
}
+static inline u64 ocrdma_get_db_addr(struct ocrdma_dev *dev, u32 pdid)
+{
+ return dev->nic_info.unmapped_db + (pdid * dev->nic_info.db_page_size);
+}
+
int ocrdma_init_hw(struct ocrdma_dev *);
void ocrdma_cleanup_hw(struct ocrdma_dev *);
@@ -86,9 +91,9 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
bool solicited, u16 cqe_popped);
/* verbs specific mailbox commands */
+int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed);
int ocrdma_query_config(struct ocrdma_dev *,
struct ocrdma_mbx_query_config *config);
-int ocrdma_resolve_dgid(struct ocrdma_dev *, union ib_gid *dgid, u8 *mac_addr);
int ocrdma_mbx_alloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
@@ -100,20 +105,18 @@ int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *, int fmr, u32 lkey);
int ocrdma_reg_mr(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr,
u32 pd_id, int acc);
int ocrdma_mbx_create_cq(struct ocrdma_dev *, struct ocrdma_cq *,
- int entries, int dpp_cq);
+ int entries, int dpp_cq, u16 pd_id);
int ocrdma_mbx_destroy_cq(struct ocrdma_dev *, struct ocrdma_cq *);
int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs,
u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
u16 *dpp_credit_lmt);
int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *,
- struct ib_qp_attr *attrs, int attr_mask,
- enum ib_qp_state old_qps);
+ struct ib_qp_attr *attrs, int attr_mask);
int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *,
struct ocrdma_qp_params *param);
int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *);
-
-int ocrdma_mbx_create_srq(struct ocrdma_srq *,
+int ocrdma_mbx_create_srq(struct ocrdma_dev *, struct ocrdma_srq *,
struct ib_srq_init_attr *,
struct ocrdma_pd *);
int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *);
@@ -123,10 +126,13 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *);
int ocrdma_alloc_av(struct ocrdma_dev *, struct ocrdma_ah *);
int ocrdma_free_av(struct ocrdma_dev *, struct ocrdma_ah *);
-int ocrdma_qp_state_machine(struct ocrdma_qp *, enum ib_qp_state new_state,
+int ocrdma_qp_state_change(struct ocrdma_qp *, enum ib_qp_state new_state,
enum ib_qp_state *old_ib_state);
bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
void ocrdma_flush_qp(struct ocrdma_qp *);
+int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
+int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
+char *port_speed_string(struct ocrdma_dev *dev);
#endif /* __OCRDMA_HW_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 48928c8e777..7c504e07974 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -39,9 +39,11 @@
#include "ocrdma_ah.h"
#include "be_roce.h"
#include "ocrdma_hw.h"
+#include "ocrdma_stats.h"
+#include "ocrdma_abi.h"
-MODULE_VERSION(OCRDMA_ROCE_DEV_VERSION);
-MODULE_DESCRIPTION("Emulex RoCE HCA Driver");
+MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
+MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("GPL");
@@ -66,46 +68,24 @@ void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
guid[7] = mac_addr[5];
}
-static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr,
- bool is_vlan, u16 vlan_id)
-{
- sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
- sgid->raw[8] = mac_addr[0] ^ 2;
- sgid->raw[9] = mac_addr[1];
- sgid->raw[10] = mac_addr[2];
- if (is_vlan) {
- sgid->raw[11] = vlan_id >> 8;
- sgid->raw[12] = vlan_id & 0xff;
- } else {
- sgid->raw[11] = 0xff;
- sgid->raw[12] = 0xfe;
- }
- sgid->raw[13] = mac_addr[3];
- sgid->raw[14] = mac_addr[4];
- sgid->raw[15] = mac_addr[5];
-}
-
-static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
- bool is_vlan, u16 vlan_id)
+static bool ocrdma_add_sgid(struct ocrdma_dev *dev, union ib_gid *new_sgid)
{
int i;
- union ib_gid new_sgid;
unsigned long flags;
memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
- ocrdma_build_sgid_mac(&new_sgid, mac_addr, is_vlan, vlan_id);
spin_lock_irqsave(&dev->sgid_lock, flags);
for (i = 0; i < OCRDMA_MAX_SGID; i++) {
if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
sizeof(union ib_gid))) {
/* found free entry */
- memcpy(&dev->sgid_tbl[i], &new_sgid,
+ memcpy(&dev->sgid_tbl[i], new_sgid,
sizeof(union ib_gid));
spin_unlock_irqrestore(&dev->sgid_lock, flags);
return true;
- } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid,
+ } else if (!memcmp(&dev->sgid_tbl[i], new_sgid,
sizeof(union ib_gid))) {
/* entry already present, no addition is required. */
spin_unlock_irqrestore(&dev->sgid_lock, flags);
@@ -116,20 +96,17 @@ static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
return false;
}
-static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
- bool is_vlan, u16 vlan_id)
+static bool ocrdma_del_sgid(struct ocrdma_dev *dev, union ib_gid *sgid)
{
int found = false;
int i;
- union ib_gid sgid;
unsigned long flags;
- ocrdma_build_sgid_mac(&sgid, mac_addr, is_vlan, vlan_id);
spin_lock_irqsave(&dev->sgid_lock, flags);
/* first is default sgid, which cannot be deleted. */
for (i = 1; i < OCRDMA_MAX_SGID; i++) {
- if (!memcmp(&dev->sgid_tbl[i], &sgid, sizeof(union ib_gid))) {
+ if (!memcmp(&dev->sgid_tbl[i], sgid, sizeof(union ib_gid))) {
/* found matching entry */
memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid));
found = true;
@@ -140,75 +117,18 @@ static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
return found;
}
-static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
-{
- /* GID Index 0 - Invariant manufacturer-assigned EUI-64 */
- union ib_gid *sgid = &dev->sgid_tbl[0];
-
- sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
- ocrdma_get_guid(dev, &sgid->raw[8]);
-}
-
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
-static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
+static int ocrdma_addr_event(unsigned long event, struct net_device *netdev,
+ union ib_gid *gid)
{
- struct net_device *netdev, *tmp;
- u16 vlan_id;
- bool is_vlan;
-
- netdev = dev->nic_info.netdev;
-
- rcu_read_lock();
- for_each_netdev_rcu(&init_net, tmp) {
- if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
- if (!netif_running(tmp) || !netif_oper_up(tmp))
- continue;
- if (netdev != tmp) {
- vlan_id = vlan_dev_vlan_id(tmp);
- is_vlan = true;
- } else {
- is_vlan = false;
- vlan_id = 0;
- tmp = netdev;
- }
- ocrdma_add_sgid(dev, tmp->dev_addr, is_vlan, vlan_id);
- }
- }
- rcu_read_unlock();
-}
-#else
-static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
-{
-
-}
-#endif /* VLAN */
-
-static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
-{
- ocrdma_add_default_sgid(dev);
- ocrdma_add_vlan_sgids(dev);
- return 0;
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-
-static int ocrdma_inet6addr_event(struct notifier_block *notifier,
- unsigned long event, void *ptr)
-{
- struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
- struct net_device *netdev = ifa->idev->dev;
struct ib_event gid_event;
struct ocrdma_dev *dev;
bool found = false;
bool updated = false;
bool is_vlan = false;
- u16 vid = 0;
is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
- if (is_vlan) {
- vid = vlan_dev_vlan_id(netdev);
- netdev = vlan_dev_real_dev(netdev);
- }
+ if (is_vlan)
+ netdev = rdma_vlan_dev_real_dev(netdev);
rcu_read_lock();
list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
@@ -221,16 +141,14 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
if (!found)
return NOTIFY_DONE;
- if (!rdma_link_local_addr((struct in6_addr *)&ifa->addr))
- return NOTIFY_DONE;
mutex_lock(&dev->dev_lock);
switch (event) {
case NETDEV_UP:
- updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
+ updated = ocrdma_add_sgid(dev, gid);
break;
case NETDEV_DOWN:
- updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
+ updated = ocrdma_del_sgid(dev, gid);
break;
default:
break;
@@ -246,6 +164,32 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
return NOTIFY_OK;
}
+static int ocrdma_inetaddr_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ union ib_gid gid;
+ struct net_device *netdev = ifa->ifa_dev->dev;
+
+ ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
+ return ocrdma_addr_event(event, netdev, &gid);
+}
+
+static struct notifier_block ocrdma_inetaddr_notifier = {
+ .notifier_call = ocrdma_inetaddr_event
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+static int ocrdma_inet6addr_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
+ union ib_gid *gid = (union ib_gid *)&ifa->addr;
+ struct net_device *netdev = ifa->idev->dev;
+ return ocrdma_addr_event(event, netdev, gid);
+}
+
static struct notifier_block ocrdma_inet6addr_notifier = {
.notifier_call = ocrdma_inet6addr_event
};
@@ -265,6 +209,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
sizeof(OCRDMA_NODE_DESC));
dev->ibdev.owner = THIS_MODULE;
+ dev->ibdev.uverbs_abi_ver = OCRDMA_ABI_VERSION;
dev->ibdev.uverbs_cmd_mask =
OCRDMA_UVERBS(GET_CONTEXT) |
OCRDMA_UVERBS(QUERY_DEVICE) |
@@ -326,9 +271,14 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev->ibdev.req_notify_cq = ocrdma_arm_cq;
dev->ibdev.get_dma_mr = ocrdma_get_dma_mr;
+ dev->ibdev.reg_phys_mr = ocrdma_reg_kernel_mr;
dev->ibdev.dereg_mr = ocrdma_dereg_mr;
dev->ibdev.reg_user_mr = ocrdma_reg_user_mr;
+ dev->ibdev.alloc_fast_reg_mr = ocrdma_alloc_frmr;
+ dev->ibdev.alloc_fast_reg_page_list = ocrdma_alloc_frmr_page_list;
+ dev->ibdev.free_fast_reg_page_list = ocrdma_free_frmr_page_list;
+
/* mandatory to support user space verbs consumer. */
dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext;
dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext;
@@ -337,7 +287,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev->ibdev.process_mad = ocrdma_process_mad;
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
dev->ibdev.uverbs_cmd_mask |=
OCRDMA_UVERBS(CREATE_SRQ) |
OCRDMA_UVERBS(MODIFY_SRQ) |
@@ -378,7 +328,7 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
spin_lock_init(&dev->flush_q_lock);
return 0;
alloc_err:
- ocrdma_err("%s(%d) error.\n", __func__, dev->id);
+ pr_err("%s(%d) error.\n", __func__, dev->id);
return -ENOMEM;
}
@@ -389,14 +339,47 @@ static void ocrdma_free_resources(struct ocrdma_dev *dev)
kfree(dev->sgid_tbl);
}
+/* OCRDMA sysfs interface */
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct ocrdma_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor);
+}
+
+static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct ocrdma_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%s", &dev->attr.fw_ver[0]);
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
+
+static struct device_attribute *ocrdma_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_fw_ver
+};
+
+static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
+ device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]);
+}
+
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
{
- int status = 0;
+ int status = 0, i;
struct ocrdma_dev *dev;
dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev));
if (!dev) {
- ocrdma_err("Unable to allocate ib device\n");
+ pr_err("Unable to allocate ib device\n");
return NULL;
}
dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL);
@@ -416,19 +399,29 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
if (status)
goto alloc_err;
- status = ocrdma_build_sgid_tbl(dev);
- if (status)
- goto alloc_err;
-
status = ocrdma_register_device(dev);
if (status)
goto alloc_err;
+ for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
+ if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i]))
+ goto sysfs_err;
spin_lock(&ocrdma_devlist_lock);
list_add_tail_rcu(&dev->entry, &ocrdma_dev_list);
spin_unlock(&ocrdma_devlist_lock);
+ /* Init stats */
+ ocrdma_add_port_stats(dev);
+
+ pr_info("%s %s: %s \"%s\" port %d\n",
+ dev_name(&dev->nic_info.pdev->dev), hca_name(dev),
+ port_speed_string(dev), dev->model_number,
+ dev->hba_port_num);
+ pr_info("%s ocrdma%d driver loaded successfully\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id);
return dev;
+sysfs_err:
+ ocrdma_remove_sysfiles(dev);
alloc_err:
ocrdma_free_resources(dev);
ocrdma_cleanup_hw(dev);
@@ -437,7 +430,7 @@ init_err:
idr_err:
kfree(dev->mbx_cmd);
ib_dealloc_device(&dev->ibdev);
- ocrdma_err("%s() leaving. ret=%d\n", __func__, status);
+ pr_err("%s() leaving. ret=%d\n", __func__, status);
return NULL;
}
@@ -445,9 +438,6 @@ static void ocrdma_remove_free(struct rcu_head *rcu)
{
struct ocrdma_dev *dev = container_of(rcu, struct ocrdma_dev, rcu);
- ocrdma_free_resources(dev);
- ocrdma_cleanup_hw(dev);
-
idr_remove(&ocrdma_dev_id, dev->id);
kfree(dev->mbx_cmd);
ib_dealloc_device(&dev->ibdev);
@@ -458,11 +448,18 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
/* first unregister with stack to stop all the active traffic
* of the registered clients.
*/
+ ocrdma_rem_port_stats(dev);
+ ocrdma_remove_sysfiles(dev);
+
ib_unregister_device(&dev->ibdev);
spin_lock(&ocrdma_devlist_lock);
list_del_rcu(&dev->entry);
spin_unlock(&ocrdma_devlist_lock);
+
+ ocrdma_free_resources(dev);
+ ocrdma_cleanup_hw(dev);
+
call_rcu(&dev->rcu, ocrdma_remove_free);
}
@@ -491,7 +488,7 @@ static int ocrdma_close(struct ocrdma_dev *dev)
cur_qp = dev->qp_tbl;
for (i = 0; i < OCRDMA_MAX_QP; i++) {
qp = cur_qp[i];
- if (qp) {
+ if (qp && qp->ibqp.qp_type != IB_QPT_GSI) {
/* change the QP state to ERROR */
_ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);
@@ -524,7 +521,7 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
case BE_DEV_DOWN:
ocrdma_close(dev);
break;
- };
+ }
}
static struct ocrdma_driver ocrdma_drv = {
@@ -532,6 +529,7 @@ static struct ocrdma_driver ocrdma_drv = {
.add = ocrdma_add,
.remove = ocrdma_remove,
.state_change_handler = ocrdma_event_handler,
+ .be_abi_version = OCRDMA_BE_ROCE_ABI_VERSION,
};
static void ocrdma_unregister_inet6addr_notifier(void)
@@ -541,20 +539,37 @@ static void ocrdma_unregister_inet6addr_notifier(void)
#endif
}
+static void ocrdma_unregister_inetaddr_notifier(void)
+{
+ unregister_inetaddr_notifier(&ocrdma_inetaddr_notifier);
+}
+
static int __init ocrdma_init_module(void)
{
int status;
+ ocrdma_init_debugfs();
+
+ status = register_inetaddr_notifier(&ocrdma_inetaddr_notifier);
+ if (status)
+ return status;
+
#if IS_ENABLED(CONFIG_IPV6)
status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier);
if (status)
- return status;
+ goto err_notifier6;
#endif
status = be_roce_register_driver(&ocrdma_drv);
if (status)
- ocrdma_unregister_inet6addr_notifier();
+ goto err_be_reg;
+ return 0;
+
+err_be_reg:
+ ocrdma_unregister_inet6addr_notifier();
+err_notifier6:
+ ocrdma_unregister_inetaddr_notifier();
return status;
}
@@ -562,6 +577,8 @@ static void __exit ocrdma_exit_module(void)
{
be_roce_unregister_driver(&ocrdma_drv);
ocrdma_unregister_inet6addr_notifier();
+ ocrdma_unregister_inetaddr_notifier();
+ ocrdma_rem_debugfs();
}
module_init(ocrdma_init_module);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index c75cbdfa87e..96c9ee602ba 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -30,8 +30,16 @@
#define Bit(_b) (1 << (_b))
-#define OCRDMA_GEN1_FAMILY 0xB
-#define OCRDMA_GEN2_FAMILY 0x2
+enum {
+ OCRDMA_ASIC_GEN_SKH_R = 0x04,
+ OCRDMA_ASIC_GEN_LANCER = 0x0B
+};
+
+enum {
+ OCRDMA_ASIC_REV_A0 = 0x00,
+ OCRDMA_ASIC_REV_B0 = 0x10,
+ OCRDMA_ASIC_REV_C0 = 0x20
+};
#define OCRDMA_SUBSYS_ROCE 10
enum {
@@ -64,21 +72,25 @@ enum {
OCRDMA_CMD_ATTACH_MCAST,
OCRDMA_CMD_DETACH_MCAST,
+ OCRDMA_CMD_GET_RDMA_STATS,
OCRDMA_CMD_MAX
};
#define OCRDMA_SUBSYS_COMMON 1
enum {
+ OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1 = 5,
OCRDMA_CMD_CREATE_CQ = 12,
OCRDMA_CMD_CREATE_EQ = 13,
OCRDMA_CMD_CREATE_MQ = 21,
+ OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32,
OCRDMA_CMD_GET_FW_VER = 35,
OCRDMA_CMD_DELETE_MQ = 53,
OCRDMA_CMD_DELETE_CQ = 54,
OCRDMA_CMD_DELETE_EQ = 55,
OCRDMA_CMD_GET_FW_CONFIG = 58,
- OCRDMA_CMD_CREATE_MQ_EXT = 90
+ OCRDMA_CMD_CREATE_MQ_EXT = 90,
+ OCRDMA_CMD_PHY_DETAILS = 102
};
enum {
@@ -91,18 +103,21 @@ enum {
#define OCRDMA_MAX_QP 2048
#define OCRDMA_MAX_CQ 2048
+#define OCRDMA_MAX_STAG 8192
enum {
OCRDMA_DB_RQ_OFFSET = 0xE0,
- OCRDMA_DB_GEN2_RQ1_OFFSET = 0x100,
- OCRDMA_DB_GEN2_RQ2_OFFSET = 0xC0,
+ OCRDMA_DB_GEN2_RQ_OFFSET = 0x100,
OCRDMA_DB_SQ_OFFSET = 0x60,
OCRDMA_DB_GEN2_SQ_OFFSET = 0x1C0,
OCRDMA_DB_SRQ_OFFSET = OCRDMA_DB_RQ_OFFSET,
- OCRDMA_DB_GEN2_SRQ_OFFSET = OCRDMA_DB_GEN2_RQ1_OFFSET,
+ OCRDMA_DB_GEN2_SRQ_OFFSET = OCRDMA_DB_GEN2_RQ_OFFSET,
OCRDMA_DB_CQ_OFFSET = 0x120,
OCRDMA_DB_EQ_OFFSET = OCRDMA_DB_CQ_OFFSET,
- OCRDMA_DB_MQ_OFFSET = 0x140
+ OCRDMA_DB_MQ_OFFSET = 0x140,
+
+ OCRDMA_DB_SQ_SHIFT = 16,
+ OCRDMA_DB_RQ_SHIFT = 24
};
#define OCRDMA_DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
@@ -137,14 +152,21 @@ enum {
#define OCRDMA_MIN_Q_PAGE_SIZE (4096)
#define OCRDMA_MAX_Q_PAGES (8)
+#define OCRDMA_SLI_ASIC_ID_OFFSET 0x9C
+#define OCRDMA_SLI_ASIC_REV_MASK 0x000000FF
+#define OCRDMA_SLI_ASIC_GEN_NUM_MASK 0x0000FF00
+#define OCRDMA_SLI_ASIC_GEN_NUM_SHIFT 0x08
/*
# 0: 4K Bytes
# 1: 8K Bytes
# 2: 16K Bytes
# 3: 32K Bytes
# 4: 64K Bytes
+# 5: 128K Bytes
+# 6: 256K Bytes
+# 7: 512K Bytes
*/
-#define OCRDMA_MAX_Q_PAGE_SIZE_CNT (5)
+#define OCRDMA_MAX_Q_PAGE_SIZE_CNT (8)
#define OCRDMA_Q_PAGE_BASE_SIZE (OCRDMA_MIN_Q_PAGE_SIZE * OCRDMA_MAX_Q_PAGES)
#define MAX_OCRDMA_QP_PAGES (8)
@@ -177,7 +199,7 @@ struct ocrdma_mbx_hdr {
u32 timeout; /* in seconds */
u32 cmd_len;
u32 rsvd_version;
-} __packed;
+};
enum {
OCRDMA_MBX_RSP_OPCODE_SHIFT = 0,
@@ -197,7 +219,7 @@ struct ocrdma_mbx_rsp {
u32 status;
u32 rsp_len;
u32 add_rsp_len;
-} __packed;
+};
enum {
OCRDMA_MQE_EMBEDDED = 1,
@@ -208,7 +230,7 @@ struct ocrdma_mqe_sge {
u32 pa_lo;
u32 pa_hi;
u32 len;
-} __packed;
+};
enum {
OCRDMA_MQE_HDR_EMB_SHIFT = 0,
@@ -225,12 +247,12 @@ struct ocrdma_mqe_hdr {
u32 tag_lo;
u32 tag_hi;
u32 rsvd3;
-} __packed;
+};
struct ocrdma_mqe_emb_cmd {
struct ocrdma_mbx_hdr mch;
u8 pyld[220];
-} __packed;
+};
struct ocrdma_mqe {
struct ocrdma_mqe_hdr hdr;
@@ -242,7 +264,7 @@ struct ocrdma_mqe {
u8 cmd[236];
struct ocrdma_mbx_rsp rsp;
} u;
-} __packed;
+};
#define OCRDMA_EQ_LEN 4096
#define OCRDMA_MQ_CQ_LEN 256
@@ -259,12 +281,12 @@ struct ocrdma_mqe {
struct ocrdma_delete_q_req {
struct ocrdma_mbx_hdr req;
u32 id;
-} __packed;
+};
struct ocrdma_pa {
u32 lo;
u32 hi;
-} __packed;
+};
#define MAX_OCRDMA_EQ_PAGES (8)
struct ocrdma_create_eq_req {
@@ -275,7 +297,7 @@ struct ocrdma_create_eq_req {
u32 delay;
u32 rsvd;
struct ocrdma_pa pa[MAX_OCRDMA_EQ_PAGES];
-} __packed;
+};
enum {
OCRDMA_CREATE_EQ_VALID = Bit(29),
@@ -310,7 +332,7 @@ struct ocrdma_mcqe {
u32 tag_lo;
u32 tag_hi;
u32 valid_ae_cmpl_cons;
-} __packed;
+};
enum {
OCRDMA_AE_MCQE_QPVALID = Bit(31),
@@ -332,7 +354,21 @@ struct ocrdma_ae_mcqe {
u32 cqvalid_cqid;
u32 evt_tag;
u32 valid_ae_event;
-} __packed;
+};
+
+enum {
+ OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT = 0,
+ OCRDMA_AE_PVID_MCQE_ENABLED_MASK = 0xFF,
+ OCRDMA_AE_PVID_MCQE_TAG_SHIFT = 16,
+ OCRDMA_AE_PVID_MCQE_TAG_MASK = 0xFFFF << OCRDMA_AE_PVID_MCQE_TAG_SHIFT
+};
+
+struct ocrdma_ae_pvid_mcqe {
+ u32 tag_enabled;
+ u32 event_tag;
+ u32 rsvd1;
+ u32 rsvd2;
+};
enum {
OCRDMA_AE_MPA_MCQE_REQ_ID_SHIFT = 16,
@@ -356,7 +392,7 @@ struct ocrdma_ae_mpa_mcqe {
u32 w1;
u32 w2;
u32 valid_ae_event;
-} __packed;
+};
enum {
OCRDMA_AE_QP_MCQE_NEW_QP_STATE_SHIFT = 0,
@@ -382,9 +418,11 @@ struct ocrdma_ae_qp_mcqe {
u32 w1;
u32 w2;
u32 valid_ae_event;
-} __packed;
+};
-#define OCRDMA_ASYNC_EVE_CODE 0x14
+#define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14
+#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5
+#define OCRDMA_ASYNC_EVENT_PVID_STATE 0x3
enum OCRDMA_ASYNC_EVENT_TYPE {
OCRDMA_CQ_ERROR = 0x00,
@@ -487,7 +525,8 @@ struct ocrdma_mbx_query_config {
u32 max_ird_ord_per_qp;
u32 max_shared_ird_ord;
u32 max_mr;
- u64 max_mr_size;
+ u32 max_mr_size_lo;
+ u32 max_mr_size_hi;
u32 max_num_mr_pbl;
u32 max_mw;
u32 max_fmr;
@@ -502,14 +541,14 @@ struct ocrdma_mbx_query_config {
u32 max_wqes_rqes_per_q;
u32 max_cq_cqes_per_cq;
u32 max_srq_rqe_sge;
-} __packed;
+};
struct ocrdma_fw_ver_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
u8 running_ver[32];
-} __packed;
+};
struct ocrdma_fw_conf_rsp {
struct ocrdma_mqe_hdr hdr;
@@ -535,14 +574,65 @@ struct ocrdma_fw_conf_rsp {
u32 base_eqid;
u32 max_eq;
-} __packed;
+};
enum {
OCRDMA_FN_MODE_RDMA = 0x4
};
+struct ocrdma_get_phy_info_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+
+ u16 phy_type;
+ u16 interface_type;
+ u32 misc_params;
+ u16 ext_phy_details;
+ u16 rsvd;
+ u16 auto_speeds_supported;
+ u16 fixed_speeds_supported;
+ u32 future_use[2];
+};
+
+enum {
+ OCRDMA_PHY_SPEED_ZERO = 0x0,
+ OCRDMA_PHY_SPEED_10MBPS = 0x1,
+ OCRDMA_PHY_SPEED_100MBPS = 0x2,
+ OCRDMA_PHY_SPEED_1GBPS = 0x4,
+ OCRDMA_PHY_SPEED_10GBPS = 0x8,
+ OCRDMA_PHY_SPEED_40GBPS = 0x20
+};
+
+
+struct ocrdma_get_link_speed_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+
+ u8 pt_port_num;
+ u8 link_duplex;
+ u8 phys_port_speed;
+ u8 phys_port_fault;
+ u16 rsvd1;
+ u16 qos_lnk_speed;
+ u8 logical_lnk_status;
+ u8 rsvd2[3];
+};
+
+enum {
+ OCRDMA_PHYS_LINK_SPEED_ZERO = 0x0,
+ OCRDMA_PHYS_LINK_SPEED_10MBPS = 0x1,
+ OCRDMA_PHYS_LINK_SPEED_100MBPS = 0x2,
+ OCRDMA_PHYS_LINK_SPEED_1GBPS = 0x3,
+ OCRDMA_PHYS_LINK_SPEED_10GBPS = 0x4,
+ OCRDMA_PHYS_LINK_SPEED_20GBPS = 0x5,
+ OCRDMA_PHYS_LINK_SPEED_25GBPS = 0x6,
+ OCRDMA_PHYS_LINK_SPEED_40GBPS = 0x7,
+ OCRDMA_PHYS_LINK_SPEED_100GBPS = 0x8
+};
+
enum {
OCRDMA_CREATE_CQ_VER2 = 2,
+ OCRDMA_CREATE_CQ_VER3 = 3,
OCRDMA_CREATE_CQ_PAGE_CNT_MASK = 0xFFFF,
OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT = 16,
@@ -576,7 +666,8 @@ struct ocrdma_create_cq_cmd {
u32 pgsz_pgcnt;
u32 ev_cnt_flags;
u32 eqn;
- u32 cqe_count;
+ u16 cqe_count;
+ u16 pd_id;
u32 rsvd6;
struct ocrdma_pa pa[OCRDMA_CREATE_CQ_MAX_PAGES];
};
@@ -584,7 +675,7 @@ struct ocrdma_create_cq_cmd {
struct ocrdma_create_cq {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_create_cq_cmd cmd;
-} __packed;
+};
enum {
OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK = 0xFFFF
@@ -593,12 +684,12 @@ enum {
struct ocrdma_create_cq_cmd_rsp {
struct ocrdma_mbx_rsp rsp;
u32 cq_id;
-} __packed;
+};
struct ocrdma_create_cq_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_create_cq_cmd_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT = 22,
@@ -608,16 +699,8 @@ enum {
OCRDMA_CREATE_MQ_ASYNC_CQ_VALID = Bit(0)
};
-struct ocrdma_create_mq_v0 {
- u32 pages;
- u32 cqid_ringsize;
- u32 valid;
- u32 async_cqid_valid;
- u32 rsvd;
- struct ocrdma_pa pa[8];
-} __packed;
-
-struct ocrdma_create_mq_v1 {
+struct ocrdma_create_mq_req {
+ struct ocrdma_mbx_hdr req;
u32 cqid_pages;
u32 async_event_bitmap;
u32 async_cqid_ringsize;
@@ -625,20 +708,12 @@ struct ocrdma_create_mq_v1 {
u32 async_cqid_valid;
u32 rsvd;
struct ocrdma_pa pa[8];
-} __packed;
-
-struct ocrdma_create_mq_req {
- struct ocrdma_mbx_hdr req;
- union {
- struct ocrdma_create_mq_v0 v0;
- struct ocrdma_create_mq_v1 v1;
- };
-} __packed;
+};
struct ocrdma_create_mq_rsp {
struct ocrdma_mbx_rsp rsp;
u32 id;
-} __packed;
+};
enum {
OCRDMA_DESTROY_CQ_QID_SHIFT = 0,
@@ -653,12 +728,12 @@ struct ocrdma_destroy_cq {
struct ocrdma_mbx_hdr req;
u32 bypass_flush_qid;
-} __packed;
+};
struct ocrdma_destroy_cq_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_QPT_GSI = 1,
@@ -782,7 +857,7 @@ struct ocrdma_create_qp_req {
u32 dpp_credits_cqid;
u32 rpir_lkey;
struct ocrdma_pa ird_addr[MAX_OCRDMA_IRD_PAGES];
-} __packed;
+};
enum {
OCRDMA_CREATE_QP_RSP_QP_ID_SHIFT = 0,
@@ -836,18 +911,18 @@ struct ocrdma_create_qp_rsp {
u32 max_ord_ird;
u32 sq_rq_id;
u32 dpp_response;
-} __packed;
+};
struct ocrdma_destroy_qp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
u32 qp_id;
-} __packed;
+};
struct ocrdma_destroy_qp_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_MODIFY_QP_ID_SHIFT = 0,
@@ -991,7 +1066,7 @@ struct ocrdma_qp_params {
u32 dmac_b0_to_b3;
u32 vlan_dmac_b4_to_b5;
u32 qkey;
-} __packed;
+};
struct ocrdma_modify_qp {
@@ -1002,7 +1077,7 @@ struct ocrdma_modify_qp {
u32 flags;
u32 rdma_flags;
u32 num_outstanding_atomic_rd;
-} __packed;
+};
enum {
OCRDMA_MODIFY_QP_RSP_MAX_RQE_SHIFT = 0,
@@ -1017,28 +1092,29 @@ enum {
OCRDMA_MODIFY_QP_RSP_MAX_ORD_MASK = 0xFFFF <<
OCRDMA_MODIFY_QP_RSP_MAX_ORD_SHIFT
};
+
struct ocrdma_modify_qp_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
u32 max_wqe_rqe;
u32 max_ord_ird;
-} __packed;
+};
struct ocrdma_query_qp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
-#define OCRDMA_QUERY_UP_QP_ID_SHIFT 0
-#define OCRDMA_QUERY_UP_QP_ID_MASK 0xFFFFFF
+#define OCRDMA_QUERY_UP_QP_ID_SHIFT 0
+#define OCRDMA_QUERY_UP_QP_ID_MASK 0xFFFFFF
u32 qp_id;
-} __packed;
+};
struct ocrdma_query_qp_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
struct ocrdma_qp_params params;
-} __packed;
+};
enum {
OCRDMA_CREATE_SRQ_PD_ID_SHIFT = 0,
@@ -1067,7 +1143,7 @@ struct ocrdma_create_srq {
u32 max_sge_rqe;
u32 pages_rqe_sz;
struct ocrdma_pa rq_addr[MAX_OCRDMA_SRQ_PAGES];
-} __packed;
+};
enum {
OCRDMA_CREATE_SRQ_RSP_SRQ_ID_SHIFT = 0,
@@ -1086,7 +1162,7 @@ struct ocrdma_create_srq_rsp {
u32 id;
u32 max_sge_rqe_allocated;
-} __packed;
+};
enum {
OCRDMA_MODIFY_SRQ_ID_SHIFT = 0,
@@ -1105,7 +1181,7 @@ struct ocrdma_modify_srq {
u32 id;
u32 limit_max_rqe;
-} __packed;
+};
enum {
OCRDMA_QUERY_SRQ_ID_SHIFT = 0,
@@ -1117,7 +1193,7 @@ struct ocrdma_query_srq {
struct ocrdma_mbx_rsp req;
u32 id;
-} __packed;
+};
enum {
OCRDMA_QUERY_SRQ_RSP_PD_ID_SHIFT = 0,
@@ -1139,7 +1215,7 @@ struct ocrdma_query_srq_rsp {
u32 max_rqe_pdid;
u32 srq_lmt_max_sge;
-} __packed;
+};
enum {
OCRDMA_DESTROY_SRQ_ID_SHIFT = 0,
@@ -1151,7 +1227,7 @@ struct ocrdma_destroy_srq {
struct ocrdma_mbx_rsp req;
u32 id;
-} __packed;
+};
enum {
OCRDMA_ALLOC_PD_ENABLE_DPP = BIT(16),
@@ -1163,7 +1239,7 @@ struct ocrdma_alloc_pd {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
u32 enable_dpp_rsvd;
-} __packed;
+};
enum {
OCRDMA_ALLOC_PD_RSP_DPP = Bit(16),
@@ -1175,18 +1251,18 @@ struct ocrdma_alloc_pd_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
u32 dpp_page_pdid;
-} __packed;
+};
struct ocrdma_dealloc_pd {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
u32 id;
-} __packed;
+};
struct ocrdma_dealloc_pd_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_ADDR_CHECK_ENABLE = 1,
@@ -1222,7 +1298,7 @@ struct ocrdma_alloc_lkey {
u32 pdid;
u32 pbl_sz_flags;
-} __packed;
+};
struct ocrdma_alloc_lkey_rsp {
struct ocrdma_mqe_hdr hdr;
@@ -1230,7 +1306,7 @@ struct ocrdma_alloc_lkey_rsp {
u32 lrkey;
u32 num_pbl_rsvd;
-} __packed;
+};
struct ocrdma_dealloc_lkey {
struct ocrdma_mqe_hdr hdr;
@@ -1238,12 +1314,12 @@ struct ocrdma_dealloc_lkey {
u32 lkey;
u32 rsvd_frmr;
-} __packed;
+};
struct ocrdma_dealloc_lkey_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
#define MAX_OCRDMA_NSMR_PBL (u32)22
#define MAX_OCRDMA_PBL_SIZE 65536
@@ -1289,7 +1365,7 @@ struct ocrdma_reg_nsmr {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr cmd;
- u32 lrkey_key_index;
+ u32 fr_mr;
u32 num_pbl_pdid;
u32 flags_hpage_pbe_sz;
u32 totlen_low;
@@ -1299,7 +1375,7 @@ struct ocrdma_reg_nsmr {
u32 va_loaddr;
u32 va_hiaddr;
struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL];
-} __packed;
+};
enum {
OCRDMA_REG_NSMR_CONT_PBL_SHIFT = 0,
@@ -1321,12 +1397,12 @@ struct ocrdma_reg_nsmr_cont {
u32 last;
struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL];
-} __packed;
+};
struct ocrdma_pbe {
u32 pa_hi;
u32 pa_lo;
-} __packed;
+};
enum {
OCRDMA_REG_NSMR_RSP_NUM_PBL_SHIFT = 16,
@@ -1338,7 +1414,7 @@ struct ocrdma_reg_nsmr_rsp {
u32 lrkey;
u32 num_pbl;
-} __packed;
+};
enum {
OCRDMA_REG_NSMR_CONT_RSP_LRKEY_INDEX_SHIFT = 0,
@@ -1358,7 +1434,7 @@ struct ocrdma_reg_nsmr_cont_rsp {
u32 lrkey_key_index;
u32 num_pbl;
-} __packed;
+};
enum {
OCRDMA_ALLOC_MW_PD_ID_SHIFT = 0,
@@ -1370,7 +1446,7 @@ struct ocrdma_alloc_mw {
struct ocrdma_mbx_hdr req;
u32 pdid;
-} __packed;
+};
enum {
OCRDMA_ALLOC_MW_RSP_LRKEY_INDEX_SHIFT = 0,
@@ -1382,7 +1458,7 @@ struct ocrdma_alloc_mw_rsp {
struct ocrdma_mbx_rsp rsp;
u32 lrkey_index;
-} __packed;
+};
struct ocrdma_attach_mcast {
struct ocrdma_mqe_hdr hdr;
@@ -1391,12 +1467,12 @@ struct ocrdma_attach_mcast {
u8 mgid[16];
u32 mac_b0_to_b3;
u32 vlan_mac_b4_to_b5;
-} __packed;
+};
struct ocrdma_attach_mcast_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
struct ocrdma_detach_mcast {
struct ocrdma_mqe_hdr hdr;
@@ -1405,12 +1481,12 @@ struct ocrdma_detach_mcast {
u8 mgid[16];
u32 mac_b0_to_b3;
u32 vlan_mac_b4_to_b5;
-} __packed;
+};
struct ocrdma_detach_mcast_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_CREATE_AH_NUM_PAGES_SHIFT = 19,
@@ -1434,24 +1510,24 @@ struct ocrdma_create_ah_tbl {
u32 ah_conf;
struct ocrdma_pa tbl_addr[8];
-} __packed;
+};
struct ocrdma_create_ah_tbl_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
u32 ahid;
-} __packed;
+};
struct ocrdma_delete_ah_tbl {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
u32 ahid;
-} __packed;
+};
struct ocrdma_delete_ah_tbl_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_EQE_VALID_SHIFT = 0,
@@ -1464,7 +1540,7 @@ enum {
struct ocrdma_eqe {
u32 id_valid;
-} __packed;
+};
enum OCRDMA_CQE_STATUS {
OCRDMA_CQE_SUCCESS = 0,
@@ -1548,29 +1624,14 @@ struct ocrdma_cqe {
} cmn;
};
u32 flags_status_srcqpn; /* w3 */
-} __packed;
-
-#define is_cqe_valid(cq, cqe) \
- (((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID)\
- == cq->phase) ? 1 : 0)
-#define is_cqe_for_sq(cqe) \
- ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_QTYPE) ? 0 : 1)
-#define is_cqe_for_rq(cqe) \
- ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_QTYPE) ? 1 : 0)
-#define is_cqe_invalidated(cqe) \
- ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_INVALIDATE) ? \
- 1 : 0)
-#define is_cqe_imm(cqe) \
- ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_IMM) ? 1 : 0)
-#define is_cqe_wr_imm(cqe) \
- ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_WRITE_IMM) ? 1 : 0)
+};
struct ocrdma_sge {
u32 addr_hi;
u32 addr_lo;
u32 lrkey;
u32 len;
-} __packed;
+};
enum {
OCRDMA_FLAG_SIG = 0x1,
@@ -1594,6 +1655,7 @@ enum OCRDMA_WQE_OPCODE {
OCRDMA_SEND = 0x00,
OCRDMA_CMP_SWP = 0x14,
OCRDMA_BIND_MW = 0x10,
+ OCRDMA_FR_MR = 0x11,
OCRDMA_RESV1 = 0x0A,
OCRDMA_LKEY_INV = 0x15,
OCRDMA_FETCH_ADD = 0x13,
@@ -1631,14 +1693,26 @@ struct ocrdma_hdr_wqe {
u32 lkey;
};
u32 total_len;
-} __packed;
+};
struct ocrdma_ewqe_ud_hdr {
u32 rsvd_dest_qpn;
u32 qkey;
u32 rsvd_ahid;
u32 rsvd;
-} __packed;
+};
+
+/* extended wqe followed by hdr_wqe for Fast Memory register */
+struct ocrdma_ewqe_fr {
+ u32 va_hi;
+ u32 va_lo;
+ u32 fbo_hi;
+ u32 fbo_lo;
+ u32 size_sge;
+ u32 num_sges;
+ u32 rsvd;
+ u32 rsvd2;
+};
struct ocrdma_eth_basic {
u8 dmac[6];
@@ -1663,7 +1737,7 @@ struct ocrdma_grh {
u16 rsvd;
} __packed;
-#define OCRDMA_AV_VALID Bit(0)
+#define OCRDMA_AV_VALID Bit(7)
#define OCRDMA_AV_VLAN_VALID Bit(1)
struct ocrdma_av {
@@ -1672,4 +1746,208 @@ struct ocrdma_av {
u32 valid;
} __packed;
+struct ocrdma_rsrc_stats {
+ u32 dpp_pds;
+ u32 non_dpp_pds;
+ u32 rc_dpp_qps;
+ u32 uc_dpp_qps;
+ u32 ud_dpp_qps;
+ u32 rc_non_dpp_qps;
+ u32 rsvd;
+ u32 uc_non_dpp_qps;
+ u32 ud_non_dpp_qps;
+ u32 rsvd1;
+ u32 srqs;
+ u32 rbqs;
+ u32 r64K_nsmr;
+ u32 r64K_to_2M_nsmr;
+ u32 r2M_to_44M_nsmr;
+ u32 r44M_to_1G_nsmr;
+ u32 r1G_to_4G_nsmr;
+ u32 nsmr_count_4G_to_32G;
+ u32 r32G_to_64G_nsmr;
+ u32 r64G_to_128G_nsmr;
+ u32 r128G_to_higher_nsmr;
+ u32 embedded_nsmr;
+ u32 frmr;
+ u32 prefetch_qps;
+ u32 ondemand_qps;
+ u32 phy_mr;
+ u32 mw;
+ u32 rsvd2[7];
+};
+
+struct ocrdma_db_err_stats {
+ u32 sq_doorbell_errors;
+ u32 cq_doorbell_errors;
+ u32 rq_srq_doorbell_errors;
+ u32 cq_overflow_errors;
+ u32 rsvd[4];
+};
+
+struct ocrdma_wqe_stats {
+ u32 large_send_rc_wqes_lo;
+ u32 large_send_rc_wqes_hi;
+ u32 large_write_rc_wqes_lo;
+ u32 large_write_rc_wqes_hi;
+ u32 rsvd[4];
+ u32 read_wqes_lo;
+ u32 read_wqes_hi;
+ u32 frmr_wqes_lo;
+ u32 frmr_wqes_hi;
+ u32 mw_bind_wqes_lo;
+ u32 mw_bind_wqes_hi;
+ u32 invalidate_wqes_lo;
+ u32 invalidate_wqes_hi;
+ u32 rsvd1[2];
+ u32 dpp_wqe_drops;
+ u32 rsvd2[5];
+};
+
+struct ocrdma_tx_stats {
+ u32 send_pkts_lo;
+ u32 send_pkts_hi;
+ u32 write_pkts_lo;
+ u32 write_pkts_hi;
+ u32 read_pkts_lo;
+ u32 read_pkts_hi;
+ u32 read_rsp_pkts_lo;
+ u32 read_rsp_pkts_hi;
+ u32 ack_pkts_lo;
+ u32 ack_pkts_hi;
+ u32 send_bytes_lo;
+ u32 send_bytes_hi;
+ u32 write_bytes_lo;
+ u32 write_bytes_hi;
+ u32 read_req_bytes_lo;
+ u32 read_req_bytes_hi;
+ u32 read_rsp_bytes_lo;
+ u32 read_rsp_bytes_hi;
+ u32 ack_timeouts;
+ u32 rsvd[5];
+};
+
+
+struct ocrdma_tx_qp_err_stats {
+ u32 local_length_errors;
+ u32 local_protection_errors;
+ u32 local_qp_operation_errors;
+ u32 retry_count_exceeded_errors;
+ u32 rnr_retry_count_exceeded_errors;
+ u32 rsvd[3];
+};
+
+struct ocrdma_rx_stats {
+ u32 roce_frame_bytes_lo;
+ u32 roce_frame_bytes_hi;
+ u32 roce_frame_icrc_drops;
+ u32 roce_frame_payload_len_drops;
+ u32 ud_drops;
+ u32 qp1_drops;
+ u32 psn_error_request_packets;
+ u32 psn_error_resp_packets;
+ u32 rnr_nak_timeouts;
+ u32 rnr_nak_receives;
+ u32 roce_frame_rxmt_drops;
+ u32 nak_count_psn_sequence_errors;
+ u32 rc_drop_count_lookup_errors;
+ u32 rq_rnr_naks;
+ u32 srq_rnr_naks;
+ u32 roce_frames_lo;
+ u32 roce_frames_hi;
+ u32 rsvd;
+};
+
+struct ocrdma_rx_qp_err_stats {
+ u32 nak_invalid_requst_errors;
+ u32 nak_remote_operation_errors;
+ u32 nak_count_remote_access_errors;
+ u32 local_length_errors;
+ u32 local_protection_errors;
+ u32 local_qp_operation_errors;
+ u32 rsvd[2];
+};
+
+struct ocrdma_tx_dbg_stats {
+ u32 data[100];
+};
+
+struct ocrdma_rx_dbg_stats {
+ u32 data[200];
+};
+
+struct ocrdma_rdma_stats_req {
+ struct ocrdma_mbx_hdr hdr;
+ u8 reset_stats;
+ u8 rsvd[3];
+} __packed;
+
+struct ocrdma_rdma_stats_resp {
+ struct ocrdma_mbx_hdr hdr;
+ struct ocrdma_rsrc_stats act_rsrc_stats;
+ struct ocrdma_rsrc_stats th_rsrc_stats;
+ struct ocrdma_db_err_stats db_err_stats;
+ struct ocrdma_wqe_stats wqe_stats;
+ struct ocrdma_tx_stats tx_stats;
+ struct ocrdma_tx_qp_err_stats tx_qp_err_stats;
+ struct ocrdma_rx_stats rx_stats;
+ struct ocrdma_rx_qp_err_stats rx_qp_err_stats;
+ struct ocrdma_tx_dbg_stats tx_dbg_stats;
+ struct ocrdma_rx_dbg_stats rx_dbg_stats;
+} __packed;
+
+
+struct mgmt_hba_attribs {
+ u8 flashrom_version_string[32];
+ u8 manufacturer_name[32];
+ u32 supported_modes;
+ u32 rsvd0[3];
+ u8 ncsi_ver_string[12];
+ u32 default_extended_timeout;
+ u8 controller_model_number[32];
+ u8 controller_description[64];
+ u8 controller_serial_number[32];
+ u8 ip_version_string[32];
+ u8 firmware_version_string[32];
+ u8 bios_version_string[32];
+ u8 redboot_version_string[32];
+ u8 driver_version_string[32];
+ u8 fw_on_flash_version_string[32];
+ u32 functionalities_supported;
+ u16 max_cdblength;
+ u8 asic_revision;
+ u8 generational_guid[16];
+ u8 hba_port_count;
+ u16 default_link_down_timeout;
+ u8 iscsi_ver_min_max;
+ u8 multifunction_device;
+ u8 cache_valid;
+ u8 hba_status;
+ u8 max_domains_supported;
+ u8 phy_port;
+ u32 firmware_post_status;
+ u32 hba_mtu[8];
+ u32 rsvd1[4];
+};
+
+struct mgmt_controller_attrib {
+ struct mgmt_hba_attribs hba_attribs;
+ u16 pci_vendor_id;
+ u16 pci_device_id;
+ u16 pci_sub_vendor_id;
+ u16 pci_sub_system_id;
+ u8 pci_bus_number;
+ u8 pci_device_number;
+ u8 pci_function_number;
+ u8 interface_type;
+ u64 unique_identifier;
+ u32 rsvd0[5];
+};
+
+struct ocrdma_get_ctrl_attribs_rsp {
+ struct ocrdma_mbx_hdr hdr;
+ struct mgmt_controller_attrib ctrl_attribs;
+};
+
+
#endif /* __OCRDMA_SLI_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
new file mode 100644
index 00000000000..41a9aec9998
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -0,0 +1,616 @@
+/*******************************************************************
+ * This file is part of the Emulex RoCE Device Driver for *
+ * RoCE (RDMA over Converged Ethernet) adapters. *
+ * Copyright (C) 2008-2014 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ *******************************************************************/
+
+#include <rdma/ib_addr.h>
+#include "ocrdma_stats.h"
+
+static struct dentry *ocrdma_dbgfs_dir;
+
+static int ocrdma_add_stat(char *start, char *pcur,
+ char *name, u64 count)
+{
+ char buff[128] = {0};
+ int cpy_len = 0;
+
+ snprintf(buff, 128, "%s: %llu\n", name, count);
+ cpy_len = strlen(buff);
+
+ if (pcur + cpy_len > start + OCRDMA_MAX_DBGFS_MEM) {
+ pr_err("%s: No space in stats buff\n", __func__);
+ return 0;
+ }
+
+ memcpy(pcur, buff, cpy_len);
+ return cpy_len;
+}
+
+static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev)
+{
+ struct stats_mem *mem = &dev->stats_mem;
+
+ /* Alloc mbox command mem*/
+ mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
+ sizeof(struct ocrdma_rdma_stats_resp));
+
+ mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
+ &mem->pa, GFP_KERNEL);
+ if (!mem->va) {
+ pr_err("%s: stats mbox allocation failed\n", __func__);
+ return false;
+ }
+
+ memset(mem->va, 0, mem->size);
+
+ /* Alloc debugfs mem */
+ mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
+ if (!mem->debugfs_mem) {
+ pr_err("%s: stats debugfs mem allocation failed\n", __func__);
+ return false;
+ }
+
+ return true;
+}
+
+static void ocrdma_release_stats_mem(struct ocrdma_dev *dev)
+{
+ struct stats_mem *mem = &dev->stats_mem;
+
+ if (mem->va)
+ dma_free_coherent(&dev->nic_info.pdev->dev, mem->size,
+ mem->va, mem->pa);
+ kfree(mem->debugfs_mem);
+}
+
+static char *ocrdma_resource_stats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "active_dpp_pds",
+ (u64)rsrc_stats->dpp_pds);
+ pcur += ocrdma_add_stat(stats, pcur, "active_non_dpp_pds",
+ (u64)rsrc_stats->non_dpp_pds);
+ pcur += ocrdma_add_stat(stats, pcur, "active_rc_dpp_qps",
+ (u64)rsrc_stats->rc_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_uc_dpp_qps",
+ (u64)rsrc_stats->uc_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_ud_dpp_qps",
+ (u64)rsrc_stats->ud_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_rc_non_dpp_qps",
+ (u64)rsrc_stats->rc_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_uc_non_dpp_qps",
+ (u64)rsrc_stats->uc_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_ud_non_dpp_qps",
+ (u64)rsrc_stats->ud_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_srqs",
+ (u64)rsrc_stats->srqs);
+ pcur += ocrdma_add_stat(stats, pcur, "active_rbqs",
+ (u64)rsrc_stats->rbqs);
+ pcur += ocrdma_add_stat(stats, pcur, "active_64K_nsmr",
+ (u64)rsrc_stats->r64K_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_64K_to_2M_nsmr",
+ (u64)rsrc_stats->r64K_to_2M_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_2M_to_44M_nsmr",
+ (u64)rsrc_stats->r2M_to_44M_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_44M_to_1G_nsmr",
+ (u64)rsrc_stats->r44M_to_1G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_1G_to_4G_nsmr",
+ (u64)rsrc_stats->r1G_to_4G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_nsmr_count_4G_to_32G",
+ (u64)rsrc_stats->nsmr_count_4G_to_32G);
+ pcur += ocrdma_add_stat(stats, pcur, "active_32G_to_64G_nsmr",
+ (u64)rsrc_stats->r32G_to_64G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_64G_to_128G_nsmr",
+ (u64)rsrc_stats->r64G_to_128G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_128G_to_higher_nsmr",
+ (u64)rsrc_stats->r128G_to_higher_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_embedded_nsmr",
+ (u64)rsrc_stats->embedded_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_frmr",
+ (u64)rsrc_stats->frmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_prefetch_qps",
+ (u64)rsrc_stats->prefetch_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_ondemand_qps",
+ (u64)rsrc_stats->ondemand_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_phy_mr",
+ (u64)rsrc_stats->phy_mr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_mw",
+ (u64)rsrc_stats->mw);
+
+ /* Print the threshold stats */
+ rsrc_stats = &rdma_stats->th_rsrc_stats;
+
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_dpp_pds",
+ (u64)rsrc_stats->dpp_pds);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_non_dpp_pds",
+ (u64)rsrc_stats->non_dpp_pds);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_dpp_qps",
+ (u64)rsrc_stats->rc_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_dpp_qps",
+ (u64)rsrc_stats->uc_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_dpp_qps",
+ (u64)rsrc_stats->ud_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_non_dpp_qps",
+ (u64)rsrc_stats->rc_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_non_dpp_qps",
+ (u64)rsrc_stats->uc_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_non_dpp_qps",
+ (u64)rsrc_stats->ud_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_srqs",
+ (u64)rsrc_stats->srqs);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_rbqs",
+ (u64)rsrc_stats->rbqs);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_nsmr",
+ (u64)rsrc_stats->r64K_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_to_2M_nsmr",
+ (u64)rsrc_stats->r64K_to_2M_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_2M_to_44M_nsmr",
+ (u64)rsrc_stats->r2M_to_44M_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_44M_to_1G_nsmr",
+ (u64)rsrc_stats->r44M_to_1G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_1G_to_4G_nsmr",
+ (u64)rsrc_stats->r1G_to_4G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_nsmr_count_4G_to_32G",
+ (u64)rsrc_stats->nsmr_count_4G_to_32G);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_32G_to_64G_nsmr",
+ (u64)rsrc_stats->r32G_to_64G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_64G_to_128G_nsmr",
+ (u64)rsrc_stats->r64G_to_128G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_128G_to_higher_nsmr",
+ (u64)rsrc_stats->r128G_to_higher_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_embedded_nsmr",
+ (u64)rsrc_stats->embedded_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_frmr",
+ (u64)rsrc_stats->frmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_prefetch_qps",
+ (u64)rsrc_stats->prefetch_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_ondemand_qps",
+ (u64)rsrc_stats->ondemand_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_phy_mr",
+ (u64)rsrc_stats->phy_mr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_mw",
+ (u64)rsrc_stats->mw);
+ return stats;
+}
+
+static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat
+ (stats, pcur, "roce_frame_bytes",
+ convert_to_64bit(rx_stats->roce_frame_bytes_lo,
+ rx_stats->roce_frame_bytes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "roce_frame_icrc_drops",
+ (u64)rx_stats->roce_frame_icrc_drops);
+ pcur += ocrdma_add_stat(stats, pcur, "roce_frame_payload_len_drops",
+ (u64)rx_stats->roce_frame_payload_len_drops);
+ pcur += ocrdma_add_stat(stats, pcur, "ud_drops",
+ (u64)rx_stats->ud_drops);
+ pcur += ocrdma_add_stat(stats, pcur, "qp1_drops",
+ (u64)rx_stats->qp1_drops);
+ pcur += ocrdma_add_stat(stats, pcur, "psn_error_request_packets",
+ (u64)rx_stats->psn_error_request_packets);
+ pcur += ocrdma_add_stat(stats, pcur, "psn_error_resp_packets",
+ (u64)rx_stats->psn_error_resp_packets);
+ pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_timeouts",
+ (u64)rx_stats->rnr_nak_timeouts);
+ pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_receives",
+ (u64)rx_stats->rnr_nak_receives);
+ pcur += ocrdma_add_stat(stats, pcur, "roce_frame_rxmt_drops",
+ (u64)rx_stats->roce_frame_rxmt_drops);
+ pcur += ocrdma_add_stat(stats, pcur, "nak_count_psn_sequence_errors",
+ (u64)rx_stats->nak_count_psn_sequence_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "rc_drop_count_lookup_errors",
+ (u64)rx_stats->rc_drop_count_lookup_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "rq_rnr_naks",
+ (u64)rx_stats->rq_rnr_naks);
+ pcur += ocrdma_add_stat(stats, pcur, "srq_rnr_naks",
+ (u64)rx_stats->srq_rnr_naks);
+ pcur += ocrdma_add_stat(stats, pcur, "roce_frames",
+ convert_to_64bit(rx_stats->roce_frames_lo,
+ rx_stats->roce_frames_hi));
+
+ return stats;
+}
+
+static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "send_pkts",
+ convert_to_64bit(tx_stats->send_pkts_lo,
+ tx_stats->send_pkts_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "write_pkts",
+ convert_to_64bit(tx_stats->write_pkts_lo,
+ tx_stats->write_pkts_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "read_pkts",
+ convert_to_64bit(tx_stats->read_pkts_lo,
+ tx_stats->read_pkts_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "read_rsp_pkts",
+ convert_to_64bit(tx_stats->read_rsp_pkts_lo,
+ tx_stats->read_rsp_pkts_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "ack_pkts",
+ convert_to_64bit(tx_stats->ack_pkts_lo,
+ tx_stats->ack_pkts_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "send_bytes",
+ convert_to_64bit(tx_stats->send_bytes_lo,
+ tx_stats->send_bytes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "write_bytes",
+ convert_to_64bit(tx_stats->write_bytes_lo,
+ tx_stats->write_bytes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "read_req_bytes",
+ convert_to_64bit(tx_stats->read_req_bytes_lo,
+ tx_stats->read_req_bytes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "read_rsp_bytes",
+ convert_to_64bit(tx_stats->read_rsp_bytes_lo,
+ tx_stats->read_rsp_bytes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "ack_timeouts",
+ (u64)tx_stats->ack_timeouts);
+
+ return stats;
+}
+
+static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_wqe_stats *wqe_stats = &rdma_stats->wqe_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "large_send_rc_wqes",
+ convert_to_64bit(wqe_stats->large_send_rc_wqes_lo,
+ wqe_stats->large_send_rc_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "large_write_rc_wqes",
+ convert_to_64bit(wqe_stats->large_write_rc_wqes_lo,
+ wqe_stats->large_write_rc_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "read_wqes",
+ convert_to_64bit(wqe_stats->read_wqes_lo,
+ wqe_stats->read_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "frmr_wqes",
+ convert_to_64bit(wqe_stats->frmr_wqes_lo,
+ wqe_stats->frmr_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "mw_bind_wqes",
+ convert_to_64bit(wqe_stats->mw_bind_wqes_lo,
+ wqe_stats->mw_bind_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "invalidate_wqes",
+ convert_to_64bit(wqe_stats->invalidate_wqes_lo,
+ wqe_stats->invalidate_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "dpp_wqe_drops",
+ (u64)wqe_stats->dpp_wqe_drops);
+ return stats;
+}
+
+static char *ocrdma_db_errstats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_db_err_stats *db_err_stats = &rdma_stats->db_err_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "sq_doorbell_errors",
+ (u64)db_err_stats->sq_doorbell_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "cq_doorbell_errors",
+ (u64)db_err_stats->cq_doorbell_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "rq_srq_doorbell_errors",
+ (u64)db_err_stats->rq_srq_doorbell_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "cq_overflow_errors",
+ (u64)db_err_stats->cq_overflow_errors);
+ return stats;
+}
+
+static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_rx_qp_err_stats *rx_qp_err_stats =
+ &rdma_stats->rx_qp_err_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_requst_errors",
+ (u64)rx_qp_err_stats->nak_invalid_requst_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors",
+ (u64)rx_qp_err_stats->nak_remote_operation_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors",
+ (u64)rx_qp_err_stats->nak_count_remote_access_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
+ (u64)rx_qp_err_stats->local_length_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
+ (u64)rx_qp_err_stats->local_protection_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
+ (u64)rx_qp_err_stats->local_qp_operation_errors);
+ return stats;
+}
+
+static char *ocrdma_txqp_errstats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_tx_qp_err_stats *tx_qp_err_stats =
+ &rdma_stats->tx_qp_err_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
+ (u64)tx_qp_err_stats->local_length_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
+ (u64)tx_qp_err_stats->local_protection_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
+ (u64)tx_qp_err_stats->local_qp_operation_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "retry_count_exceeded_errors",
+ (u64)tx_qp_err_stats->retry_count_exceeded_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "rnr_retry_count_exceeded_errors",
+ (u64)tx_qp_err_stats->rnr_retry_count_exceeded_errors);
+ return stats;
+}
+
+static char *ocrdma_tx_dbg_stats(struct ocrdma_dev *dev)
+{
+ int i;
+ char *pstats = dev->stats_mem.debugfs_mem;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_tx_dbg_stats *tx_dbg_stats =
+ &rdma_stats->tx_dbg_stats;
+
+ memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ for (i = 0; i < 100; i++)
+ pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
+ tx_dbg_stats->data[i]);
+
+ return dev->stats_mem.debugfs_mem;
+}
+
+static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
+{
+ int i;
+ char *pstats = dev->stats_mem.debugfs_mem;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_rx_dbg_stats *rx_dbg_stats =
+ &rdma_stats->rx_dbg_stats;
+
+ memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ for (i = 0; i < 200; i++)
+ pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
+ rx_dbg_stats->data[i]);
+
+ return dev->stats_mem.debugfs_mem;
+}
+
+static void ocrdma_update_stats(struct ocrdma_dev *dev)
+{
+ ulong now = jiffies, secs;
+ int status = 0;
+
+ secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
+ if (secs) {
+ /* update */
+ status = ocrdma_mbx_rdma_stats(dev, false);
+ if (status)
+ pr_err("%s: stats mbox failed with status = %d\n",
+ __func__, status);
+ dev->last_stats_time = jiffies;
+ }
+}
+
+static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
+ size_t usr_buf_len, loff_t *ppos)
+{
+ struct ocrdma_stats *pstats = filp->private_data;
+ struct ocrdma_dev *dev = pstats->dev;
+ ssize_t status = 0;
+ char *data = NULL;
+
+ /* No partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ mutex_lock(&dev->stats_lock);
+
+ ocrdma_update_stats(dev);
+
+ switch (pstats->type) {
+ case OCRDMA_RSRC_STATS:
+ data = ocrdma_resource_stats(dev);
+ break;
+ case OCRDMA_RXSTATS:
+ data = ocrdma_rx_stats(dev);
+ break;
+ case OCRDMA_WQESTATS:
+ data = ocrdma_wqe_stats(dev);
+ break;
+ case OCRDMA_TXSTATS:
+ data = ocrdma_tx_stats(dev);
+ break;
+ case OCRDMA_DB_ERRSTATS:
+ data = ocrdma_db_errstats(dev);
+ break;
+ case OCRDMA_RXQP_ERRSTATS:
+ data = ocrdma_rxqp_errstats(dev);
+ break;
+ case OCRDMA_TXQP_ERRSTATS:
+ data = ocrdma_txqp_errstats(dev);
+ break;
+ case OCRDMA_TX_DBG_STATS:
+ data = ocrdma_tx_dbg_stats(dev);
+ break;
+ case OCRDMA_RX_DBG_STATS:
+ data = ocrdma_rx_dbg_stats(dev);
+ break;
+
+ default:
+ status = -EFAULT;
+ goto exit;
+ }
+
+ if (usr_buf_len < strlen(data)) {
+ status = -ENOSPC;
+ goto exit;
+ }
+
+ status = simple_read_from_buffer(buffer, usr_buf_len, ppos, data,
+ strlen(data));
+exit:
+ mutex_unlock(&dev->stats_lock);
+ return status;
+}
+
+static const struct file_operations ocrdma_dbg_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ocrdma_dbgfs_ops_read,
+};
+
+void ocrdma_add_port_stats(struct ocrdma_dev *dev)
+{
+ if (!ocrdma_dbgfs_dir)
+ return;
+
+ /* Create post stats base dir */
+ dev->dir = debugfs_create_dir(dev->ibdev.name, ocrdma_dbgfs_dir);
+ if (!dev->dir)
+ goto err;
+
+ dev->rsrc_stats.type = OCRDMA_RSRC_STATS;
+ dev->rsrc_stats.dev = dev;
+ if (!debugfs_create_file("resource_stats", S_IRUSR, dev->dir,
+ &dev->rsrc_stats, &ocrdma_dbg_ops))
+ goto err;
+
+ dev->rx_stats.type = OCRDMA_RXSTATS;
+ dev->rx_stats.dev = dev;
+ if (!debugfs_create_file("rx_stats", S_IRUSR, dev->dir,
+ &dev->rx_stats, &ocrdma_dbg_ops))
+ goto err;
+
+ dev->wqe_stats.type = OCRDMA_WQESTATS;
+ dev->wqe_stats.dev = dev;
+ if (!debugfs_create_file("wqe_stats", S_IRUSR, dev->dir,
+ &dev->wqe_stats, &ocrdma_dbg_ops))
+ goto err;
+
+ dev->tx_stats.type = OCRDMA_TXSTATS;
+ dev->tx_stats.dev = dev;
+ if (!debugfs_create_file("tx_stats", S_IRUSR, dev->dir,
+ &dev->tx_stats, &ocrdma_dbg_ops))
+ goto err;
+
+ dev->db_err_stats.type = OCRDMA_DB_ERRSTATS;
+ dev->db_err_stats.dev = dev;
+ if (!debugfs_create_file("db_err_stats", S_IRUSR, dev->dir,
+ &dev->db_err_stats, &ocrdma_dbg_ops))
+ goto err;
+
+
+ dev->tx_qp_err_stats.type = OCRDMA_TXQP_ERRSTATS;
+ dev->tx_qp_err_stats.dev = dev;
+ if (!debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir,
+ &dev->tx_qp_err_stats, &ocrdma_dbg_ops))
+ goto err;
+
+ dev->rx_qp_err_stats.type = OCRDMA_RXQP_ERRSTATS;
+ dev->rx_qp_err_stats.dev = dev;
+ if (!debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir,
+ &dev->rx_qp_err_stats, &ocrdma_dbg_ops))
+ goto err;
+
+
+ dev->tx_dbg_stats.type = OCRDMA_TX_DBG_STATS;
+ dev->tx_dbg_stats.dev = dev;
+ if (!debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir,
+ &dev->tx_dbg_stats, &ocrdma_dbg_ops))
+ goto err;
+
+ dev->rx_dbg_stats.type = OCRDMA_RX_DBG_STATS;
+ dev->rx_dbg_stats.dev = dev;
+ if (!debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir,
+ &dev->rx_dbg_stats, &ocrdma_dbg_ops))
+ goto err;
+
+ /* Now create dma_mem for stats mbx command */
+ if (!ocrdma_alloc_stats_mem(dev))
+ goto err;
+
+ mutex_init(&dev->stats_lock);
+
+ return;
+err:
+ ocrdma_release_stats_mem(dev);
+ debugfs_remove_recursive(dev->dir);
+ dev->dir = NULL;
+}
+
+void ocrdma_rem_port_stats(struct ocrdma_dev *dev)
+{
+ if (!dev->dir)
+ return;
+ mutex_destroy(&dev->stats_lock);
+ ocrdma_release_stats_mem(dev);
+ debugfs_remove(dev->dir);
+}
+
+void ocrdma_init_debugfs(void)
+{
+ /* Create base dir in debugfs root dir */
+ ocrdma_dbgfs_dir = debugfs_create_dir("ocrdma", NULL);
+}
+
+void ocrdma_rem_debugfs(void)
+{
+ debugfs_remove_recursive(ocrdma_dbgfs_dir);
+}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
new file mode 100644
index 00000000000..5f5e20c46d7
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -0,0 +1,54 @@
+/*******************************************************************
+ * This file is part of the Emulex RoCE Device Driver for *
+ * RoCE (RDMA over Converged Ethernet) adapters. *
+ * Copyright (C) 2008-2014 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ *******************************************************************/
+
+#ifndef __OCRDMA_STATS_H__
+#define __OCRDMA_STATS_H__
+
+#include <linux/debugfs.h>
+#include "ocrdma.h"
+#include "ocrdma_hw.h"
+
+#define OCRDMA_MAX_DBGFS_MEM 4096
+
+enum OCRDMA_STATS_TYPE {
+ OCRDMA_RSRC_STATS,
+ OCRDMA_RXSTATS,
+ OCRDMA_WQESTATS,
+ OCRDMA_TXSTATS,
+ OCRDMA_DB_ERRSTATS,
+ OCRDMA_RXQP_ERRSTATS,
+ OCRDMA_TXQP_ERRSTATS,
+ OCRDMA_TX_DBG_STATS,
+ OCRDMA_RX_DBG_STATS
+};
+
+void ocrdma_rem_debugfs(void);
+void ocrdma_init_debugfs(void);
+void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
+void ocrdma_add_port_stats(struct ocrdma_dev *dev);
+
+#endif /* __OCRDMA_STATS_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index b29a4246ef4..edf6211d84b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
dev = get_ocrdma_dev(ibdev);
memset(sgid, 0, sizeof(*sgid));
- if (index >= OCRDMA_MAX_SGID)
+ if (index > OCRDMA_MAX_SGID)
return -EINVAL;
memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -75,20 +75,21 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
attr->vendor_part_id = dev->nic_info.pdev->device;
attr->hw_ver = 0;
attr->max_qp = dev->attr.max_qp;
- attr->max_ah = dev->attr.max_qp;
+ attr->max_ah = OCRDMA_MAX_AH;
attr->max_qp_wr = dev->attr.max_wqe;
attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
IB_DEVICE_RC_RNR_NAK_GEN |
IB_DEVICE_SHUTDOWN_PORT |
IB_DEVICE_SYS_IMAGE_GUID |
- IB_DEVICE_LOCAL_DMA_LKEY;
+ IB_DEVICE_LOCAL_DMA_LKEY |
+ IB_DEVICE_MEM_MGT_EXTENSIONS;
attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
attr->max_sge_rd = 0;
attr->max_cq = dev->attr.max_cq;
attr->max_cqe = dev->attr.max_cqe;
attr->max_mr = dev->attr.max_mr;
- attr->max_mw = 0;
+ attr->max_mw = dev->attr.max_mw;
attr->max_pd = dev->attr.max_pd;
attr->atomic_cap = 0;
attr->max_fmr = 0;
@@ -96,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
attr->max_qp_rd_atom =
min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
- attr->max_srq = (dev->attr.max_qp - 1);
+ attr->max_srq = dev->attr.max_srq;
attr->max_srq_sge = dev->attr.max_srq_sge;
attr->max_srq_wr = dev->attr.max_rqe;
attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
@@ -105,6 +106,44 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
return 0;
}
+static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
+ u8 *ib_speed, u8 *ib_width)
+{
+ int status;
+ u8 speed;
+
+ status = ocrdma_mbx_get_link_speed(dev, &speed);
+ if (status)
+ speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
+
+ switch (speed) {
+ case OCRDMA_PHYS_LINK_SPEED_1GBPS:
+ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case OCRDMA_PHYS_LINK_SPEED_10GBPS:
+ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case OCRDMA_PHYS_LINK_SPEED_20GBPS:
+ *ib_speed = IB_SPEED_DDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case OCRDMA_PHYS_LINK_SPEED_40GBPS:
+ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ default:
+ /* Unsupported */
+ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ }
+}
+
int ocrdma_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props)
{
@@ -114,8 +153,8 @@ int ocrdma_query_port(struct ib_device *ibdev,
dev = get_ocrdma_dev(ibdev);
if (port > 1) {
- ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__,
- dev->id, port);
+ pr_err("%s(%d) invalid_port=0x%x\n", __func__,
+ dev->id, port);
return -EINVAL;
}
netdev = dev->nic_info.netdev;
@@ -136,13 +175,13 @@ int ocrdma_query_port(struct ib_device *ibdev,
props->port_cap_flags =
IB_PORT_CM_SUP |
IB_PORT_REINIT_SUP |
- IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;
+ IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = OCRDMA_MAX_SGID;
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
props->qkey_viol_cntr = 0;
- props->active_width = IB_WIDTH_1X;
- props->active_speed = 4;
+ get_link_speed_and_width(dev, &props->active_speed,
+ &props->active_width);
props->max_msg_sz = 0x80000000;
props->max_vl_num = 4;
return 0;
@@ -155,8 +194,7 @@ int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
dev = get_ocrdma_dev(ibdev);
if (port > 1) {
- ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__,
- dev->id, port);
+ pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
return -EINVAL;
}
return 0;
@@ -187,7 +225,7 @@ static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
mutex_lock(&uctx->mm_list_lock);
list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
- if (len != mm->key.len || phy_addr != mm->key.phy_addr)
+ if (len != mm->key.len && phy_addr != mm->key.phy_addr)
continue;
list_del(&mm->entry);
@@ -205,7 +243,7 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
mutex_lock(&uctx->mm_list_lock);
list_for_each_entry(mm, &uctx->mm_head, entry) {
- if (len != mm->key.len || phy_addr != mm->key.phy_addr)
+ if (len != mm->key.len && phy_addr != mm->key.phy_addr)
continue;
found = true;
@@ -215,6 +253,108 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
return found;
}
+static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
+ struct ocrdma_ucontext *uctx,
+ struct ib_udata *udata)
+{
+ struct ocrdma_pd *pd = NULL;
+ int status = 0;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ if (udata && uctx) {
+ pd->dpp_enabled =
+ ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
+ pd->num_dpp_qp =
+ pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
+ }
+
+retry:
+ status = ocrdma_mbx_alloc_pd(dev, pd);
+ if (status) {
+ if (pd->dpp_enabled) {
+ pd->dpp_enabled = false;
+ pd->num_dpp_qp = 0;
+ goto retry;
+ } else {
+ kfree(pd);
+ return ERR_PTR(status);
+ }
+ }
+
+ return pd;
+}
+
+static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
+ struct ocrdma_pd *pd)
+{
+ return (uctx->cntxt_pd == pd ? true : false);
+}
+
+static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
+ struct ocrdma_pd *pd)
+{
+ int status = 0;
+
+ status = ocrdma_mbx_dealloc_pd(dev, pd);
+ kfree(pd);
+ return status;
+}
+
+static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
+ struct ocrdma_ucontext *uctx,
+ struct ib_udata *udata)
+{
+ int status = 0;
+
+ uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
+ if (IS_ERR(uctx->cntxt_pd)) {
+ status = PTR_ERR(uctx->cntxt_pd);
+ uctx->cntxt_pd = NULL;
+ goto err;
+ }
+
+ uctx->cntxt_pd->uctx = uctx;
+ uctx->cntxt_pd->ibpd.device = &dev->ibdev;
+err:
+ return status;
+}
+
+static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
+{
+ int status = 0;
+ struct ocrdma_pd *pd = uctx->cntxt_pd;
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
+
+ BUG_ON(uctx->pd_in_use);
+ uctx->cntxt_pd = NULL;
+ status = _ocrdma_dealloc_pd(dev, pd);
+ return status;
+}
+
+static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
+{
+ struct ocrdma_pd *pd = NULL;
+
+ mutex_lock(&uctx->mm_list_lock);
+ if (!uctx->pd_in_use) {
+ uctx->pd_in_use = true;
+ pd = uctx->cntxt_pd;
+ }
+ mutex_unlock(&uctx->mm_list_lock);
+
+ return pd;
+}
+
+static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
+{
+ mutex_lock(&uctx->mm_list_lock);
+ uctx->pd_in_use = false;
+ mutex_unlock(&uctx->mm_list_lock);
+}
+
struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
@@ -230,7 +370,6 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
- ctx->dev = dev;
INIT_LIST_HEAD(&ctx->mm_head);
mutex_init(&ctx->mm_list_lock);
@@ -243,18 +382,23 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
memset(ctx->ah_tbl.va, 0, map_len);
ctx->ah_tbl.len = map_len;
+ memset(&resp, 0, sizeof(resp));
resp.ah_tbl_len = ctx->ah_tbl.len;
resp.ah_tbl_page = ctx->ah_tbl.pa;
status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
if (status)
goto map_err;
+
+ status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
+ if (status)
+ goto pd_err;
+
resp.dev_id = dev->id;
resp.max_inline_data = dev->attr.max_inline_data;
resp.wqe_size = dev->attr.wqe_size;
resp.rqe_size = dev->attr.rqe_size;
resp.dpp_wqe_size = dev->attr.wqe_size;
- resp.rsvd = 0;
memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
status = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -263,6 +407,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
return &ctx->ibucontext;
cpy_err:
+pd_err:
ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
map_err:
dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
@@ -273,9 +418,13 @@ map_err:
int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
{
+ int status = 0;
struct ocrdma_mm *mm, *tmp;
struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
- struct pci_dev *pdev = uctx->dev->nic_info.pdev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
+ struct pci_dev *pdev = dev->nic_info.pdev;
+
+ status = ocrdma_dealloc_ucontext_pd(uctx);
ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
@@ -286,13 +435,13 @@ int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
kfree(mm);
}
kfree(uctx);
- return 0;
+ return status;
}
int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
- struct ocrdma_dev *dev = ucontext->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
unsigned long len = (vma->vm_end - vma->vm_start);
@@ -308,7 +457,10 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
dev->nic_info.db_total_size)) &&
(len <= dev->nic_info.db_page_size)) {
- /* doorbell mapping */
+ if (vma->vm_flags & VM_READ)
+ return -EPERM;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
len, vma->vm_page_prot);
} else if (dev->nic_info.dpp_unmapped_len &&
@@ -316,19 +468,20 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
(vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
dev->nic_info.dpp_unmapped_len)) &&
(len <= dev->nic_info.dpp_unmapped_len)) {
- /* dpp area mapping */
+ if (vma->vm_flags & VM_READ)
+ return -EPERM;
+
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
len, vma->vm_page_prot);
} else {
- /* queue memory mapping */
status = remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff, len, vma->vm_page_prot);
}
return status;
}
-static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
+static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
struct ib_ucontext *ib_ctx,
struct ib_udata *udata)
{
@@ -339,21 +492,21 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
struct ocrdma_alloc_pd_uresp rsp;
struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
+ memset(&rsp, 0, sizeof(rsp));
rsp.id = pd->id;
rsp.dpp_enabled = pd->dpp_enabled;
- db_page_addr = pd->dev->nic_info.unmapped_db +
- (pd->id * pd->dev->nic_info.db_page_size);
- db_page_size = pd->dev->nic_info.db_page_size;
+ db_page_addr = ocrdma_get_db_addr(dev, pd->id);
+ db_page_size = dev->nic_info.db_page_size;
status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
if (status)
return status;
if (pd->dpp_enabled) {
- dpp_page_addr = pd->dev->nic_info.dpp_unmapped_addr +
- (pd->id * OCRDMA_DPP_PAGE_SIZE);
+ dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
+ (pd->id * PAGE_SIZE);
status = ocrdma_add_mmap(uctx, dpp_page_addr,
- OCRDMA_DPP_PAGE_SIZE);
+ PAGE_SIZE);
if (status)
goto dpp_map_err;
rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
@@ -369,7 +522,7 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
ucopy_err:
if (pd->dpp_enabled)
- ocrdma_del_mmap(pd->uctx, dpp_page_addr, OCRDMA_DPP_PAGE_SIZE);
+ ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
dpp_map_err:
ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
return status;
@@ -381,84 +534,75 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
{
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
struct ocrdma_pd *pd;
+ struct ocrdma_ucontext *uctx = NULL;
int status;
+ u8 is_uctx_pd = false;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
- pd->dev = dev;
if (udata && context) {
- pd->dpp_enabled = (dev->nic_info.dev_family ==
- OCRDMA_GEN2_FAMILY) ? true : false;
- pd->num_dpp_qp =
- pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
+ uctx = get_ocrdma_ucontext(context);
+ pd = ocrdma_get_ucontext_pd(uctx);
+ if (pd) {
+ is_uctx_pd = true;
+ goto pd_mapping;
+ }
}
- status = ocrdma_mbx_alloc_pd(dev, pd);
- if (status) {
- kfree(pd);
- return ERR_PTR(status);
+
+ pd = _ocrdma_alloc_pd(dev, uctx, udata);
+ if (IS_ERR(pd)) {
+ status = PTR_ERR(pd);
+ goto exit;
}
- atomic_set(&pd->use_cnt, 0);
+pd_mapping:
if (udata && context) {
- status = ocrdma_copy_pd_uresp(pd, context, udata);
+ status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
if (status)
goto err;
}
return &pd->ibpd;
err:
- ocrdma_dealloc_pd(&pd->ibpd);
+ if (is_uctx_pd) {
+ ocrdma_release_ucontext_pd(uctx);
+ } else {
+ status = ocrdma_mbx_dealloc_pd(dev, pd);
+ kfree(pd);
+ }
+exit:
return ERR_PTR(status);
}
int ocrdma_dealloc_pd(struct ib_pd *ibpd)
{
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = pd->dev;
- int status;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+ struct ocrdma_ucontext *uctx = NULL;
+ int status = 0;
u64 usr_db;
- if (atomic_read(&pd->use_cnt)) {
- ocrdma_err("%s(%d) pd=0x%x is in use.\n",
- __func__, dev->id, pd->id);
- status = -EFAULT;
- goto dealloc_err;
- }
- status = ocrdma_mbx_dealloc_pd(dev, pd);
- if (pd->uctx) {
+ uctx = pd->uctx;
+ if (uctx) {
u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
- (pd->id * OCRDMA_DPP_PAGE_SIZE);
+ (pd->id * PAGE_SIZE);
if (pd->dpp_enabled)
- ocrdma_del_mmap(pd->uctx, dpp_db, OCRDMA_DPP_PAGE_SIZE);
- usr_db = dev->nic_info.unmapped_db +
- (pd->id * dev->nic_info.db_page_size);
+ ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
+ usr_db = ocrdma_get_db_addr(dev, pd->id);
ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
+
+ if (is_ucontext_pd(uctx, pd)) {
+ ocrdma_release_ucontext_pd(uctx);
+ return status;
+ }
}
- kfree(pd);
-dealloc_err:
+ status = _ocrdma_dealloc_pd(dev, pd);
return status;
}
-static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd,
- int acc, u32 num_pbls,
- u32 addr_check)
+static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
+ u32 pdid, int acc, u32 num_pbls, u32 addr_check)
{
int status;
- struct ocrdma_mr *mr;
- struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = pd->dev;
-
- if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
- ocrdma_err("%s(%d) leaving err, invalid access rights\n",
- __func__, dev->id);
- return ERR_PTR(-EINVAL);
- }
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr)
- return ERR_PTR(-ENOMEM);
- mr->hwmr.dev = dev;
mr->hwmr.fr_mr = 0;
mr->hwmr.local_rd = 1;
mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
@@ -468,26 +612,38 @@ static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd,
mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
mr->hwmr.num_pbls = num_pbls;
- status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pd->id, addr_check);
- if (status) {
- kfree(mr);
- return ERR_PTR(-ENOMEM);
- }
- mr->pd = pd;
- atomic_inc(&pd->use_cnt);
+ status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
+ if (status)
+ return status;
+
mr->ibmr.lkey = mr->hwmr.lkey;
if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
mr->ibmr.rkey = mr->hwmr.lkey;
- return mr;
+ return 0;
}
struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
{
+ int status;
struct ocrdma_mr *mr;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+
+ if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
+ pr_err("%s err, invalid access rights\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
- mr = ocrdma_alloc_lkey(ibpd, acc, 0, OCRDMA_ADDR_CHECK_DISABLE);
- if (IS_ERR(mr))
- return ERR_CAST(mr);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
+ OCRDMA_ADDR_CHECK_DISABLE);
+ if (status) {
+ kfree(mr);
+ return ERR_PTR(status);
+ }
return &mr->ibmr;
}
@@ -511,7 +667,8 @@ static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
}
}
-static int ocrdma_get_pbl_info(struct ocrdma_mr *mr, u32 num_pbes)
+static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
+ u32 num_pbes)
{
u32 num_pbls = 0;
u32 idx = 0;
@@ -527,7 +684,7 @@ static int ocrdma_get_pbl_info(struct ocrdma_mr *mr, u32 num_pbes)
num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
num_pbls = num_pbls / (pbl_size / sizeof(u64));
idx++;
- } while (num_pbls >= mr->hwmr.dev->attr.max_num_mr_pbl);
+ } while (num_pbls >= dev->attr.max_num_mr_pbl);
mr->hwmr.num_pbes = num_pbes;
mr->hwmr.num_pbls = num_pbls;
@@ -568,10 +725,10 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
u32 num_pbes)
{
struct ocrdma_pbe *pbe;
- struct ib_umem_chunk *chunk;
+ struct scatterlist *sg;
struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
struct ib_umem *umem = mr->umem;
- int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
+ int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
if (!mr->hwmr.num_pbes)
return;
@@ -581,39 +738,37 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
shift = ilog2(umem->page_size);
- list_for_each_entry(chunk, &umem->chunk_list, list) {
- /* get all the dma regions from the chunk. */
- for (i = 0; i < chunk->nmap; i++) {
- pages = sg_dma_len(&chunk->page_list[i]) >> shift;
- for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
- /* store the page address in pbe */
- pbe->pa_lo =
- cpu_to_le32(sg_dma_address
- (&chunk->page_list[i]) +
- (umem->page_size * pg_cnt));
- pbe->pa_hi =
- cpu_to_le32(upper_32_bits
- ((sg_dma_address
- (&chunk->page_list[i]) +
- umem->page_size * pg_cnt)));
- pbe_cnt += 1;
- total_num_pbes += 1;
- pbe++;
-
- /* if done building pbes, issue the mbx cmd. */
- if (total_num_pbes == num_pbes)
- return;
-
- /* if the given pbl is full storing the pbes,
- * move to next pbl.
- */
- if (pbe_cnt ==
- (mr->hwmr.pbl_size / sizeof(u64))) {
- pbl_tbl++;
- pbe = (struct ocrdma_pbe *)pbl_tbl->va;
- pbe_cnt = 0;
- }
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ pages = sg_dma_len(sg) >> shift;
+ for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
+ /* store the page address in pbe */
+ pbe->pa_lo =
+ cpu_to_le32(sg_dma_address
+ (sg) +
+ (umem->page_size * pg_cnt));
+ pbe->pa_hi =
+ cpu_to_le32(upper_32_bits
+ ((sg_dma_address
+ (sg) +
+ umem->page_size * pg_cnt)));
+ pbe_cnt += 1;
+ total_num_pbes += 1;
+ pbe++;
+
+ /* if done building pbes, issue the mbx cmd. */
+ if (total_num_pbes == num_pbes)
+ return;
+
+ /* if the given pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (pbe_cnt ==
+ (mr->hwmr.pbl_size / sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ pbe_cnt = 0;
}
+
}
}
}
@@ -622,13 +777,12 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
u64 usr_addr, int acc, struct ib_udata *udata)
{
int status = -ENOMEM;
- struct ocrdma_dev *dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
struct ocrdma_mr *mr;
struct ocrdma_pd *pd;
u32 num_pbes;
pd = get_ocrdma_pd(ibpd);
- dev = pd->dev;
if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
return ERR_PTR(-EINVAL);
@@ -636,14 +790,13 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(status);
- mr->hwmr.dev = dev;
mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
if (IS_ERR(mr->umem)) {
status = -EFAULT;
goto umem_err;
}
num_pbes = ib_umem_page_count(mr->umem);
- status = ocrdma_get_pbl_info(mr, num_pbes);
+ status = ocrdma_get_pbl_info(dev, mr, num_pbes);
if (status)
goto umem_err;
@@ -663,8 +816,6 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
if (status)
goto mbx_err;
- mr->pd = pd;
- atomic_inc(&pd->use_cnt);
mr->ibmr.lkey = mr->hwmr.lkey;
if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
mr->ibmr.rkey = mr->hwmr.lkey;
@@ -681,15 +832,13 @@ umem_err:
int ocrdma_dereg_mr(struct ib_mr *ib_mr)
{
struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
- struct ocrdma_dev *dev = mr->hwmr.dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
int status;
status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
- if (mr->hwmr.fr_mr == 0)
- ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
+ ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
- atomic_dec(&mr->pd->use_cnt);
/* it could be user registered memory. */
if (mr->umem)
ib_umem_release(mr->umem);
@@ -697,28 +846,29 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
return status;
}
-static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata,
+static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
+ struct ib_udata *udata,
struct ib_ucontext *ib_ctx)
{
int status;
- struct ocrdma_ucontext *uctx;
+ struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
struct ocrdma_create_cq_uresp uresp;
+ memset(&uresp, 0, sizeof(uresp));
uresp.cq_id = cq->id;
- uresp.page_size = cq->len;
+ uresp.page_size = PAGE_ALIGN(cq->len);
uresp.num_pages = 1;
uresp.max_hw_cqe = cq->max_hw_cqe;
uresp.page_addr[0] = cq->pa;
- uresp.db_page_addr = cq->dev->nic_info.unmapped_db;
- uresp.db_page_size = cq->dev->nic_info.db_page_size;
+ uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
+ uresp.db_page_size = dev->nic_info.db_page_size;
uresp.phase_change = cq->phase_change ? 1 : 0;
status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (status) {
- ocrdma_err("%s(%d) copy error cqid=0x%x.\n",
- __func__, cq->dev->id, cq->id);
+ pr_err("%s(%d) copy error cqid=0x%x.\n",
+ __func__, dev->id, cq->id);
goto err;
}
- uctx = get_ocrdma_ucontext(ib_ctx);
status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
if (status)
goto err;
@@ -738,6 +888,8 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
{
struct ocrdma_cq *cq;
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
+ struct ocrdma_ucontext *uctx = NULL;
+ u16 pd_id = 0;
int status;
struct ocrdma_create_cq_ureq ureq;
@@ -752,25 +904,27 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
spin_lock_init(&cq->cq_lock);
spin_lock_init(&cq->comp_handler_lock);
- atomic_set(&cq->use_cnt, 0);
INIT_LIST_HEAD(&cq->sq_head);
INIT_LIST_HEAD(&cq->rq_head);
- cq->dev = dev;
+ cq->first_arm = true;
- status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq);
+ if (ib_ctx) {
+ uctx = get_ocrdma_ucontext(ib_ctx);
+ pd_id = uctx->cntxt_pd->id;
+ }
+
+ status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
if (status) {
kfree(cq);
return ERR_PTR(status);
}
if (ib_ctx) {
- status = ocrdma_copy_cq_uresp(cq, udata, ib_ctx);
+ status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
if (status)
goto ctx_err;
}
cq->phase = OCRDMA_CQE_VALID;
- cq->arm_needed = true;
dev->cq_tbl[cq->id] = cq;
-
return &cq->ibcq;
ctx_err:
@@ -793,23 +947,60 @@ int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
return status;
}
+static void ocrdma_flush_cq(struct ocrdma_cq *cq)
+{
+ int cqe_cnt;
+ int valid_count = 0;
+ unsigned long flags;
+
+ struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
+ struct ocrdma_cqe *cqe = NULL;
+
+ cqe = cq->va;
+ cqe_cnt = cq->cqe_cnt;
+
+ /* Last irq might have scheduled a polling thread
+ * sync-up with it before hard flushing.
+ */
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ while (cqe_cnt) {
+ if (is_cqe_valid(cq, cqe))
+ valid_count++;
+ cqe++;
+ cqe_cnt--;
+ }
+ ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+}
+
int ocrdma_destroy_cq(struct ib_cq *ibcq)
{
int status;
struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
- struct ocrdma_dev *dev = cq->dev;
+ struct ocrdma_eq *eq = NULL;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
+ int pdid = 0;
+ u32 irq, indx;
- if (atomic_read(&cq->use_cnt))
- return -EINVAL;
+ dev->cq_tbl[cq->id] = NULL;
+ indx = ocrdma_get_eq_table_index(dev, cq->eqn);
+ if (indx == -EINVAL)
+ BUG();
- status = ocrdma_mbx_destroy_cq(dev, cq);
+ eq = &dev->eq_tbl[indx];
+ irq = ocrdma_get_irq(dev, eq);
+ synchronize_irq(irq);
+ ocrdma_flush_cq(cq);
+ status = ocrdma_mbx_destroy_cq(dev, cq);
if (cq->ucontext) {
- ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, cq->len);
- ocrdma_del_mmap(cq->ucontext, dev->nic_info.unmapped_db,
+ pdid = cq->ucontext->cntxt_pd->id;
+ ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
+ PAGE_ALIGN(cq->len));
+ ocrdma_del_mmap(cq->ucontext,
+ ocrdma_get_db_addr(dev, pdid),
dev->nic_info.db_page_size);
}
- dev->cq_tbl[cq->id] = NULL;
kfree(cq);
return status;
@@ -834,70 +1025,70 @@ static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
struct ib_qp_init_attr *attrs)
{
- if (attrs->qp_type != IB_QPT_GSI &&
- attrs->qp_type != IB_QPT_RC &&
- attrs->qp_type != IB_QPT_UD) {
- ocrdma_err("%s(%d) unsupported qp type=0x%x requested\n",
- __func__, dev->id, attrs->qp_type);
+ if ((attrs->qp_type != IB_QPT_GSI) &&
+ (attrs->qp_type != IB_QPT_RC) &&
+ (attrs->qp_type != IB_QPT_UC) &&
+ (attrs->qp_type != IB_QPT_UD)) {
+ pr_err("%s(%d) unsupported qp type=0x%x requested\n",
+ __func__, dev->id, attrs->qp_type);
return -EINVAL;
}
- if (attrs->cap.max_send_wr > dev->attr.max_wqe) {
- ocrdma_err("%s(%d) unsupported send_wr=0x%x requested\n",
- __func__, dev->id, attrs->cap.max_send_wr);
- ocrdma_err("%s(%d) supported send_wr=0x%x\n",
- __func__, dev->id, dev->attr.max_wqe);
+ /* Skip the check for QP1 to support CM size of 128 */
+ if ((attrs->qp_type != IB_QPT_GSI) &&
+ (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
+ pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
+ __func__, dev->id, attrs->cap.max_send_wr);
+ pr_err("%s(%d) supported send_wr=0x%x\n",
+ __func__, dev->id, dev->attr.max_wqe);
return -EINVAL;
}
if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
- ocrdma_err("%s(%d) unsupported recv_wr=0x%x requested\n",
- __func__, dev->id, attrs->cap.max_recv_wr);
- ocrdma_err("%s(%d) supported recv_wr=0x%x\n",
- __func__, dev->id, dev->attr.max_rqe);
+ pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
+ __func__, dev->id, attrs->cap.max_recv_wr);
+ pr_err("%s(%d) supported recv_wr=0x%x\n",
+ __func__, dev->id, dev->attr.max_rqe);
return -EINVAL;
}
if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
- ocrdma_err("%s(%d) unsupported inline data size=0x%x"
- " requested\n", __func__, dev->id,
- attrs->cap.max_inline_data);
- ocrdma_err("%s(%d) supported inline data size=0x%x\n",
- __func__, dev->id, dev->attr.max_inline_data);
+ pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
+ __func__, dev->id, attrs->cap.max_inline_data);
+ pr_err("%s(%d) supported inline data size=0x%x\n",
+ __func__, dev->id, dev->attr.max_inline_data);
return -EINVAL;
}
if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
- ocrdma_err("%s(%d) unsupported send_sge=0x%x requested\n",
- __func__, dev->id, attrs->cap.max_send_sge);
- ocrdma_err("%s(%d) supported send_sge=0x%x\n",
- __func__, dev->id, dev->attr.max_send_sge);
+ pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
+ __func__, dev->id, attrs->cap.max_send_sge);
+ pr_err("%s(%d) supported send_sge=0x%x\n",
+ __func__, dev->id, dev->attr.max_send_sge);
return -EINVAL;
}
if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
- ocrdma_err("%s(%d) unsupported recv_sge=0x%x requested\n",
- __func__, dev->id, attrs->cap.max_recv_sge);
- ocrdma_err("%s(%d) supported recv_sge=0x%x\n",
- __func__, dev->id, dev->attr.max_recv_sge);
+ pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
+ __func__, dev->id, attrs->cap.max_recv_sge);
+ pr_err("%s(%d) supported recv_sge=0x%x\n",
+ __func__, dev->id, dev->attr.max_recv_sge);
return -EINVAL;
}
/* unprivileged user space cannot create special QP */
if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
- ocrdma_err
+ pr_err
("%s(%d) Userspace can't create special QPs of type=0x%x\n",
__func__, dev->id, attrs->qp_type);
return -EINVAL;
}
/* allow creating only one GSI type of QP */
if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
- ocrdma_err("%s(%d) GSI special QPs already created.\n",
- __func__, dev->id);
+ pr_err("%s(%d) GSI special QPs already created.\n",
+ __func__, dev->id);
return -EINVAL;
}
/* verify consumer QPs are not trying to use GSI QP's CQ */
if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
- (dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) ||
- (dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) ||
- (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
- ocrdma_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
- __func__, dev->id);
+ (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
+ pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
+ __func__, dev->id);
return -EINVAL;
}
}
@@ -920,28 +1111,21 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
uresp.qp_id = qp->id;
uresp.sq_dbid = qp->sq.dbid;
uresp.num_sq_pages = 1;
- uresp.sq_page_size = qp->sq.len;
+ uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
uresp.sq_page_addr[0] = qp->sq.pa;
uresp.num_wqe_allocated = qp->sq.max_cnt;
if (!srq) {
uresp.rq_dbid = qp->rq.dbid;
uresp.num_rq_pages = 1;
- uresp.rq_page_size = qp->rq.len;
+ uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
uresp.rq_page_addr[0] = qp->rq.pa;
uresp.num_rqe_allocated = qp->rq.max_cnt;
}
uresp.db_page_addr = usr_db;
uresp.db_page_size = dev->nic_info.db_page_size;
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
- uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
- uresp.db_rq_offset = ((qp->id & 0xFFFF) < 128) ?
- OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET;
- uresp.db_shift = (qp->id < 128) ? 24 : 16;
- } else {
- uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET;
- uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
- uresp.db_shift = 16;
- }
+ uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
+ uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
+ uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
if (qp->dpp_enabled) {
uresp.dpp_credit = dpp_credit_lmt;
@@ -949,7 +1133,7 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
}
status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (status) {
- ocrdma_err("%s(%d) user copy error.\n", __func__, dev->id);
+ pr_err("%s(%d) user copy error.\n", __func__, dev->id);
goto err;
}
status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
@@ -973,14 +1157,13 @@ err:
static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
struct ocrdma_pd *pd)
{
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
qp->sq_db = dev->nic_info.db +
(pd->id * dev->nic_info.db_page_size) +
OCRDMA_DB_GEN2_SQ_OFFSET;
qp->rq_db = dev->nic_info.db +
(pd->id * dev->nic_info.db_page_size) +
- ((qp->id < 128) ?
- OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET);
+ OCRDMA_DB_GEN2_RQ_OFFSET;
} else {
qp->sq_db = dev->nic_info.db +
(pd->id * dev->nic_info.db_page_size) +
@@ -1021,16 +1204,7 @@ static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
qp->sq.max_sges = attrs->cap.max_send_sge;
qp->rq.max_sges = attrs->cap.max_recv_sge;
qp->state = OCRDMA_QPS_RST;
-}
-
-static void ocrdma_set_qp_use_cnt(struct ocrdma_qp *qp, struct ocrdma_pd *pd)
-{
- atomic_inc(&pd->use_cnt);
- atomic_inc(&qp->sq_cq->use_cnt);
- atomic_inc(&qp->rq_cq->use_cnt);
- if (qp->srq)
- atomic_inc(&qp->srq->use_cnt);
- qp->ibqp.qp_num = qp->id;
+ qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
}
static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
@@ -1050,7 +1224,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
int status;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_qp *qp;
- struct ocrdma_dev *dev = pd->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
struct ocrdma_create_qp_ureq ureq;
u16 dpp_credit_lmt, dpp_offset;
@@ -1070,6 +1244,9 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
}
qp->dev = dev;
ocrdma_set_qp_init_params(qp, pd, attrs);
+ if (udata == NULL)
+ qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
+ OCRDMA_QP_FAST_REG);
mutex_lock(&dev->dev_lock);
status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
@@ -1080,8 +1257,6 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
/* user space QP's wr_id table are managed in library */
if (udata == NULL) {
- qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
- OCRDMA_QP_FAST_REG);
status = ocrdma_alloc_wr_id_tbl(qp);
if (status)
goto map_err;
@@ -1099,7 +1274,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
goto cpy_err;
}
ocrdma_store_gsi_qp_cq(dev, attrs);
- ocrdma_set_qp_use_cnt(qp, pd);
+ qp->ibqp.qp_num = qp->id;
mutex_unlock(&dev->dev_lock);
return &qp->ibqp;
@@ -1112,7 +1287,7 @@ mbx_err:
kfree(qp->wqe_wr_id_tbl);
kfree(qp->rqe_wr_id_tbl);
kfree(qp);
- ocrdma_err("%s(%d) error=%d\n", __func__, dev->id, status);
+ pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
gen_err:
return ERR_PTR(status);
}
@@ -1128,13 +1303,14 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp = get_ocrdma_qp(ibqp);
dev = qp->dev;
if (attr_mask & IB_QP_STATE)
- status = ocrdma_qp_state_machine(qp, attr->qp_state, &old_qps);
+ status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
/* if new and previous states are same hw doesn't need to
* know about it.
*/
if (status < 0)
return status;
- status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps);
+ status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
+
return status;
}
@@ -1161,11 +1337,12 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_qps = old_qps;
spin_unlock_irqrestore(&qp->q_lock, flags);
- if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
- ocrdma_err("%s(%d) invalid attribute mask=0x%x specified for "
- "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
- __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
- old_qps, new_qps);
+ if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
+ IB_LINK_LAYER_ETHERNET)) {
+ pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
+ "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
+ __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
+ old_qps, new_qps);
goto param_err;
}
@@ -1239,7 +1416,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
qp_attr->cap.max_send_sge = qp->sq.max_sges;
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
- qp_attr->cap.max_inline_data = dev->attr.max_inline_data;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
qp_init_attr->cap = qp_attr->cap;
memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
sizeof(params.dgid));
@@ -1250,7 +1427,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
- OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>
+ OCRDMA_QP_PARAMS_TCLASS_MASK) >>
OCRDMA_QP_PARAMS_TCLASS_SHIFT;
qp_attr->ah_attr.ah_flags = IB_AH_GRH;
@@ -1302,23 +1479,17 @@ static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
{
- int free_cnt;
- if (q->head >= q->tail)
- free_cnt = (q->max_cnt - q->head) + q->tail;
- else
- free_cnt = q->tail - q->head;
- return free_cnt;
+ return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
}
static int is_hw_sq_empty(struct ocrdma_qp *qp)
{
- return (qp->sq.tail == qp->sq.head &&
- ocrdma_hwq_free_cnt(&qp->sq) ? 1 : 0);
+ return (qp->sq.tail == qp->sq.head);
}
static int is_hw_rq_empty(struct ocrdma_qp *qp)
{
- return (qp->rq.tail == qp->rq.head) ? 1 : 0;
+ return (qp->rq.tail == qp->rq.head);
}
static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
@@ -1350,7 +1521,7 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
int discard_cnt = 0;
u32 cur_getp, stop_getp;
struct ocrdma_cqe *cqe;
- u32 qpn = 0;
+ u32 qpn = 0, wqe_idx = 0;
spin_lock_irqsave(&cq->cq_lock, cq_flags);
@@ -1379,30 +1550,36 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
if (qpn == 0 || qpn != qp->id)
goto skip_cqe;
- /* mark cqe discarded so that it is not picked up later
- * in the poll_cq().
- */
- discard_cnt += 1;
- cqe->cmn.qpn = 0;
- if (is_cqe_for_sq(cqe))
+ if (is_cqe_for_sq(cqe)) {
ocrdma_hwq_inc_tail(&qp->sq);
- else {
+ } else {
if (qp->srq) {
+ wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
+ OCRDMA_CQE_BUFTAG_SHIFT) &
+ qp->srq->rq.max_wqe_idx;
+ if (wqe_idx < 1)
+ BUG();
spin_lock_irqsave(&qp->srq->q_lock, flags);
ocrdma_hwq_inc_tail(&qp->srq->rq);
- ocrdma_srq_toggle_bit(qp->srq, cur_getp);
+ ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
spin_unlock_irqrestore(&qp->srq->q_lock, flags);
- } else
+ } else {
ocrdma_hwq_inc_tail(&qp->rq);
+ }
}
+ /* mark cqe discarded so that it is not picked up later
+ * in the poll_cq().
+ */
+ discard_cnt += 1;
+ cqe->cmn.qpn = 0;
skip_cqe:
cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
} while (cur_getp != stop_getp);
spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
}
-static void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
+void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
{
int found = false;
unsigned long flags;
@@ -1468,39 +1645,38 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
mutex_unlock(&dev->dev_lock);
if (pd->uctx) {
- ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, qp->sq.len);
+ ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
+ PAGE_ALIGN(qp->sq.len));
if (!qp->srq)
- ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, qp->rq.len);
+ ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
+ PAGE_ALIGN(qp->rq.len));
}
ocrdma_del_flush_qp(qp);
- atomic_dec(&qp->pd->use_cnt);
- atomic_dec(&qp->sq_cq->use_cnt);
- atomic_dec(&qp->rq_cq->use_cnt);
- if (qp->srq)
- atomic_dec(&qp->srq->use_cnt);
kfree(qp->wqe_wr_id_tbl);
kfree(qp->rqe_wr_id_tbl);
kfree(qp);
return status;
}
-static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
+static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
+ struct ib_udata *udata)
{
int status;
struct ocrdma_create_srq_uresp uresp;
+ memset(&uresp, 0, sizeof(uresp));
uresp.rq_dbid = srq->rq.dbid;
uresp.num_rq_pages = 1;
uresp.rq_page_addr[0] = srq->rq.pa;
uresp.rq_page_size = srq->rq.len;
- uresp.db_page_addr = srq->dev->nic_info.unmapped_db +
- (srq->pd->id * srq->dev->nic_info.db_page_size);
- uresp.db_page_size = srq->dev->nic_info.db_page_size;
+ uresp.db_page_addr = dev->nic_info.unmapped_db +
+ (srq->pd->id * dev->nic_info.db_page_size);
+ uresp.db_page_size = dev->nic_info.db_page_size;
uresp.num_rqe_allocated = srq->rq.max_cnt;
- if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
- uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
+ uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
uresp.db_shift = 24;
} else {
uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
@@ -1523,7 +1699,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
{
int status = -ENOMEM;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = pd->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
struct ocrdma_srq *srq;
if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
@@ -1536,10 +1712,9 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
return ERR_PTR(status);
spin_lock_init(&srq->q_lock);
- srq->dev = dev;
srq->pd = pd;
srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
- status = ocrdma_mbx_create_srq(srq, init_attr, pd);
+ status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
if (status)
goto err;
@@ -1565,14 +1740,12 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
goto arm_err;
}
- atomic_set(&srq->use_cnt, 0);
if (udata) {
- status = ocrdma_copy_srq_uresp(srq, udata);
+ status = ocrdma_copy_srq_uresp(dev, srq, udata);
if (status)
goto arm_err;
}
- atomic_inc(&pd->use_cnt);
return &srq->ibsrq;
arm_err:
@@ -1614,22 +1787,16 @@ int ocrdma_destroy_srq(struct ib_srq *ibsrq)
{
int status;
struct ocrdma_srq *srq;
- struct ocrdma_dev *dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
srq = get_ocrdma_srq(ibsrq);
- dev = srq->dev;
- if (atomic_read(&srq->use_cnt)) {
- ocrdma_err("%s(%d) err, srq=0x%x in use\n",
- __func__, dev->id, srq->id);
- return -EAGAIN;
- }
status = ocrdma_mbx_destroy_srq(dev, srq);
if (srq->pd->uctx)
- ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len);
+ ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
+ PAGE_ALIGN(srq->rq.len));
- atomic_dec(&srq->pd->use_cnt);
kfree(srq->idx_bit_fields);
kfree(srq->rqe_wr_id_tbl);
kfree(srq);
@@ -1670,23 +1837,43 @@ static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
memset(sge, 0, sizeof(*sge));
}
+static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
+{
+ uint32_t total_len = 0, i;
+
+ for (i = 0; i < num_sge; i++)
+ total_len += sg_list[i].length;
+ return total_len;
+}
+
+
static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
struct ocrdma_hdr_wqe *hdr,
struct ocrdma_sge *sge,
struct ib_send_wr *wr, u32 wqe_size)
{
- if (wr->send_flags & IB_SEND_INLINE) {
- if (wr->sg_list[0].length > qp->max_inline_data) {
- ocrdma_err("%s() supported_len=0x%x,"
- " unspported len req=0x%x\n", __func__,
- qp->max_inline_data, wr->sg_list[0].length);
+ int i;
+ char *dpp_addr;
+
+ if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
+ hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
+ if (unlikely(hdr->total_len > qp->max_inline_data)) {
+ pr_err("%s() supported_len=0x%x,\n"
+ " unspported len req=0x%x\n", __func__,
+ qp->max_inline_data, hdr->total_len);
return -EINVAL;
}
- memcpy(sge,
- (void *)(unsigned long)wr->sg_list[0].addr,
- wr->sg_list[0].length);
- hdr->total_len = wr->sg_list[0].length;
+ dpp_addr = (char *)sge;
+ for (i = 0; i < wr->num_sge; i++) {
+ memcpy(dpp_addr,
+ (void *)(unsigned long)wr->sg_list[i].addr,
+ wr->sg_list[i].length);
+ dpp_addr += wr->sg_list[i].length;
+ }
+
wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
+ if (0 == hdr->total_len)
+ wqe_size += sizeof(struct ocrdma_sge);
hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
} else {
ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
@@ -1711,8 +1898,9 @@ static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
ocrdma_build_ud_hdr(qp, hdr, wr);
sge = (struct ocrdma_sge *)(hdr + 2);
wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
- } else
+ } else {
sge = (struct ocrdma_sge *)(hdr + 1);
+ }
status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
return status;
@@ -1755,9 +1943,97 @@ static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
ext_rw->len = hdr->total_len;
}
+static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
+ struct ocrdma_hw_mr *hwmr)
+{
+ int i;
+ u64 buf_addr = 0;
+ int num_pbes;
+ struct ocrdma_pbe *pbe;
+
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ num_pbes = 0;
+
+ /* go through the OS phy regions & fill hw pbe entries into pbls. */
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ /* number of pbes can be more for one OS buf, when
+ * buffers are of different sizes.
+ * split the ib_buf to one or more pbes.
+ */
+ buf_addr = wr->wr.fast_reg.page_list->page_list[i];
+ pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
+ pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
+ num_pbes += 1;
+ pbe++;
+
+ /* if the pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ }
+ }
+ return;
+}
+
+static int get_encoded_page_size(int pg_sz)
+{
+ /* Max size is 256M 4096 << 16 */
+ int i = 0;
+ for (; i < 17; i++)
+ if (pg_sz == (4096 << i))
+ break;
+ return i;
+}
+
+
+static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
+ struct ib_send_wr *wr)
+{
+ u64 fbo;
+ struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
+ struct ocrdma_mr *mr;
+ u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
+
+ wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
+
+ if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr)
+ return -EINVAL;
+
+ hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
+ hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
+
+ if (wr->wr.fast_reg.page_list_len == 0)
+ BUG();
+ if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
+ hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
+ if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
+ hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
+ if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
+ hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
+ hdr->lkey = wr->wr.fast_reg.rkey;
+ hdr->total_len = wr->wr.fast_reg.length;
+
+ fbo = wr->wr.fast_reg.iova_start -
+ (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
+
+ fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
+ fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
+ fast_reg->fbo_hi = upper_32_bits(fbo);
+ fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
+ fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
+ fast_reg->size_sge =
+ get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
+ mr = (struct ocrdma_mr *) (unsigned long)
+ qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
+ build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
+ return 0;
+}
+
static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
{
- u32 val = qp->sq.dbid | (1 << 16);
+ u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
iowrite32(val, qp->sq_db);
}
@@ -1773,18 +2049,20 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->q_lock, flags);
if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
return -EINVAL;
}
while (wr) {
if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
wr->num_sge > qp->sq.max_sges) {
+ *bad_wr = wr;
status = -ENOMEM;
break;
}
hdr = ocrdma_hwq_head(&qp->sq);
hdr->cw = 0;
- if (wr->send_flags & IB_SEND_SIGNALED)
+ if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
if (wr->send_flags & IB_SEND_FENCE)
hdr->cw |=
@@ -1822,10 +2100,14 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_LOCAL_INV:
hdr->cw |=
(OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
- hdr->cw |= (sizeof(struct ocrdma_hdr_wqe) /
+ hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
+ sizeof(struct ocrdma_sge)) /
OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
hdr->lkey = wr->ex.invalidate_rkey;
break;
+ case IB_WR_FAST_REG_MR:
+ status = ocrdma_build_fr(qp, hdr, wr);
+ break;
default:
status = -EINVAL;
break;
@@ -1834,7 +2116,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
*bad_wr = wr;
break;
}
- if (wr->send_flags & IB_SEND_SIGNALED)
+ if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
else
qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
@@ -1856,7 +2138,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
{
- u32 val = qp->rq.dbid | (1 << OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp));
+ u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
iowrite32(val, qp->rq_db);
}
@@ -1944,7 +2226,7 @@ static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
if (row == srq->bit_fields_len)
BUG();
- return indx;
+ return indx + 1; /* Use from index 1 */
}
static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
@@ -1992,7 +2274,7 @@ int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
{
- enum ib_wc_status ibwc_status = IB_WC_GENERAL_ERR;
+ enum ib_wc_status ibwc_status;
switch (status) {
case OCRDMA_CQE_GENERAL_ERR:
@@ -2061,7 +2343,7 @@ static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
default:
ibwc_status = IB_WC_GENERAL_ERR;
break;
- };
+ }
return ibwc_status;
}
@@ -2089,15 +2371,18 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
case OCRDMA_SEND:
ibwc->opcode = IB_WC_SEND;
break;
+ case OCRDMA_FR_MR:
+ ibwc->opcode = IB_WC_FAST_REG_MR;
+ break;
case OCRDMA_LKEY_INV:
ibwc->opcode = IB_WC_LOCAL_INV;
break;
default:
ibwc->status = IB_WC_GENERAL_ERR;
- ocrdma_err("%s() invalid opcode received = 0x%x\n",
- __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
+ pr_err("%s() invalid opcode received = 0x%x\n",
+ __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
break;
- };
+ }
}
static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
@@ -2142,7 +2427,7 @@ static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
ibwc->status = ocrdma_to_ibwc_err(status);
ocrdma_flush_qp(qp);
- ocrdma_qp_state_machine(qp, IB_QPS_ERR, NULL);
+ ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
/* if wqe/rqe pending for which cqe needs to be returned,
* trigger inflating it.
@@ -2227,7 +2512,8 @@ static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
ocrdma_update_wc(qp, ibwc, tail);
*polled = true;
}
- wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK;
+ wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
+ OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
if (tail != wqe_idx)
expand = true; /* Coalesced CQE can't be consumed yet */
@@ -2276,10 +2562,14 @@ static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
u32 wqe_idx;
srq = get_ocrdma_srq(qp->ibqp.srq);
- wqe_idx = le32_to_cpu(cqe->rq.buftag_qpn) >> OCRDMA_CQE_BUFTAG_SHIFT;
+ wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
+ OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
+ if (wqe_idx < 1)
+ BUG();
+
ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
spin_lock_irqsave(&srq->q_lock, flags);
- ocrdma_srq_toggle_bit(srq, wqe_idx);
+ ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
spin_unlock_irqrestore(&srq->q_lock, flags);
ocrdma_hwq_inc_tail(&srq->rq);
}
@@ -2333,9 +2623,9 @@ static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
}
- if (qp->ibqp.srq)
+ if (qp->ibqp.srq) {
ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
- else {
+ } else {
ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
ocrdma_hwq_inc_tail(&qp->rq);
}
@@ -2348,13 +2638,14 @@ static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
bool expand = false;
ibwc->wc_flags = 0;
- if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
+ if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
status = (le32_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_UD_STATUS_MASK) >>
OCRDMA_CQE_UD_STATUS_SHIFT;
- else
+ } else {
status = (le32_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
+ }
if (status == OCRDMA_CQE_SUCCESS) {
*polled = true;
@@ -2372,9 +2663,10 @@ static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
if (cq->phase_change) {
if (cur_getp == 0)
cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
- } else
+ } else {
/* clear valid bit */
cqe->flags_status_srcqpn = 0;
+ }
}
static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
@@ -2385,7 +2677,7 @@ static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
bool expand = false;
int polled_hw_cqes = 0;
struct ocrdma_qp *qp = NULL;
- struct ocrdma_dev *dev = cq->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
struct ocrdma_cqe *cqe;
u16 cur_getp; bool polled = false; bool stop = false;
@@ -2429,10 +2721,18 @@ expand_cqe:
}
stop_cqe:
cq->getp = cur_getp;
- if (polled_hw_cqes || expand || stop) {
- ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited,
+ if (cq->deferred_arm) {
+ ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol,
+ polled_hw_cqes);
+ cq->deferred_arm = false;
+ cq->deferred_sol = false;
+ } else {
+ /* We need to pop the CQE. No need to arm */
+ ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol,
polled_hw_cqes);
+ cq->deferred_sol = false;
}
+
return i;
}
@@ -2451,8 +2751,9 @@ static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
} else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
ocrdma_hwq_inc_tail(&qp->rq);
- } else
+ } else {
return err_cqes;
+ }
ibwc->byte_len = 0;
ibwc->status = IB_WC_WR_FLUSH_ERR;
ibwc = ibwc + 1;
@@ -2465,14 +2766,11 @@ static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
int cqes_to_poll = num_entries;
- struct ocrdma_cq *cq = NULL;
- unsigned long flags;
- struct ocrdma_dev *dev;
+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
int num_os_cqe = 0, err_cqes = 0;
struct ocrdma_qp *qp;
-
- cq = get_ocrdma_cq(ibcq);
- dev = cq->dev;
+ unsigned long flags;
/* poll cqes from adapter CQ */
spin_lock_irqsave(&cq->cq_lock, flags);
@@ -2503,34 +2801,254 @@ int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
{
- struct ocrdma_cq *cq;
- unsigned long flags;
- struct ocrdma_dev *dev;
+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
u16 cq_id;
- u16 cur_getp;
- struct ocrdma_cqe *cqe;
+ unsigned long flags;
+ bool arm_needed = false, sol_needed = false;
- cq = get_ocrdma_cq(ibcq);
cq_id = cq->id;
- dev = cq->dev;
spin_lock_irqsave(&cq->cq_lock, flags);
if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
- cq->armed = true;
+ arm_needed = true;
if (cq_flags & IB_CQ_SOLICITED)
- cq->solicited = true;
+ sol_needed = true;
- cur_getp = cq->getp;
- cqe = cq->va + cur_getp;
-
- /* check whether any valid cqe exist or not, if not then safe to
- * arm. If cqe is not yet consumed, then let it get consumed and then
- * we arm it to avoid false interrupts.
- */
- if (!is_cqe_valid(cq, cqe) || cq->arm_needed) {
- cq->arm_needed = false;
- ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0);
+ if (cq->first_arm) {
+ ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
+ cq->first_arm = false;
+ goto skip_defer;
}
+ cq->deferred_arm = true;
+
+skip_defer:
+ cq->deferred_sol = sol_needed;
spin_unlock_irqrestore(&cq->cq_lock, flags);
+
return 0;
}
+
+struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
+{
+ int status;
+ struct ocrdma_mr *mr;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+
+ if (max_page_list_len > dev->attr.max_pages_per_frmr)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
+ if (status)
+ goto pbl_err;
+ mr->hwmr.fr_mr = 1;
+ mr->hwmr.remote_rd = 0;
+ mr->hwmr.remote_wr = 0;
+ mr->hwmr.local_rd = 0;
+ mr->hwmr.local_wr = 0;
+ mr->hwmr.mw_bind = 0;
+ status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
+ if (status)
+ goto pbl_err;
+ status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
+ if (status)
+ goto mbx_err;
+ mr->ibmr.rkey = mr->hwmr.lkey;
+ mr->ibmr.lkey = mr->hwmr.lkey;
+ dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
+ (unsigned long) mr;
+ return &mr->ibmr;
+mbx_err:
+ ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
+pbl_err:
+ kfree(mr);
+ return ERR_PTR(-ENOMEM);
+}
+
+struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
+ *ibdev,
+ int page_list_len)
+{
+ struct ib_fast_reg_page_list *frmr_list;
+ int size;
+
+ size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
+ frmr_list = kzalloc(size, GFP_KERNEL);
+ if (!frmr_list)
+ return ERR_PTR(-ENOMEM);
+ frmr_list->page_list = (u64 *)(frmr_list + 1);
+ return frmr_list;
+}
+
+void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
+{
+ kfree(page_list);
+}
+
+#define MAX_KERNEL_PBE_SIZE 65536
+static inline int count_kernel_pbes(struct ib_phys_buf *buf_list,
+ int buf_cnt, u32 *pbe_size)
+{
+ u64 total_size = 0;
+ u64 buf_size = 0;
+ int i;
+ *pbe_size = roundup(buf_list[0].size, PAGE_SIZE);
+ *pbe_size = roundup_pow_of_two(*pbe_size);
+
+ /* find the smallest PBE size that we can have */
+ for (i = 0; i < buf_cnt; i++) {
+ /* first addr may not be page aligned, so ignore checking */
+ if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) ||
+ (buf_list[i].size & ~PAGE_MASK))) {
+ return 0;
+ }
+
+ /* if configured PBE size is greater then the chosen one,
+ * reduce the PBE size.
+ */
+ buf_size = roundup(buf_list[i].size, PAGE_SIZE);
+ /* pbe_size has to be even multiple of 4K 1,2,4,8...*/
+ buf_size = roundup_pow_of_two(buf_size);
+ if (*pbe_size > buf_size)
+ *pbe_size = buf_size;
+
+ total_size += buf_size;
+ }
+ *pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ?
+ (MAX_KERNEL_PBE_SIZE) : (*pbe_size);
+
+ /* num_pbes = total_size / (*pbe_size); this is implemented below. */
+
+ return total_size >> ilog2(*pbe_size);
+}
+
+static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt,
+ u32 pbe_size, struct ocrdma_pbl *pbl_tbl,
+ struct ocrdma_hw_mr *hwmr)
+{
+ int i;
+ int idx;
+ int pbes_per_buf = 0;
+ u64 buf_addr = 0;
+ int num_pbes;
+ struct ocrdma_pbe *pbe;
+ int total_num_pbes = 0;
+
+ if (!hwmr->num_pbes)
+ return;
+
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ num_pbes = 0;
+
+ /* go through the OS phy regions & fill hw pbe entries into pbls. */
+ for (i = 0; i < ib_buf_cnt; i++) {
+ buf_addr = buf_list[i].addr;
+ pbes_per_buf =
+ roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) /
+ pbe_size;
+ hwmr->len += buf_list[i].size;
+ /* number of pbes can be more for one OS buf, when
+ * buffers are of different sizes.
+ * split the ib_buf to one or more pbes.
+ */
+ for (idx = 0; idx < pbes_per_buf; idx++) {
+ /* we program always page aligned addresses,
+ * first unaligned address is taken care by fbo.
+ */
+ if (i == 0) {
+ /* for non zero fbo, assign the
+ * start of the page.
+ */
+ pbe->pa_lo =
+ cpu_to_le32((u32) (buf_addr & PAGE_MASK));
+ pbe->pa_hi =
+ cpu_to_le32((u32) upper_32_bits(buf_addr));
+ } else {
+ pbe->pa_lo =
+ cpu_to_le32((u32) (buf_addr & 0xffffffff));
+ pbe->pa_hi =
+ cpu_to_le32((u32) upper_32_bits(buf_addr));
+ }
+ buf_addr += pbe_size;
+ num_pbes += 1;
+ total_num_pbes += 1;
+ pbe++;
+
+ if (total_num_pbes == hwmr->num_pbes)
+ goto mr_tbl_done;
+ /* if the pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ num_pbes = 0;
+ }
+ }
+ }
+mr_tbl_done:
+ return;
+}
+
+struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd,
+ struct ib_phys_buf *buf_list,
+ int buf_cnt, int acc, u64 *iova_start)
+{
+ int status = -ENOMEM;
+ struct ocrdma_mr *mr;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+ u32 num_pbes;
+ u32 pbe_size = 0;
+
+ if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE))
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(status);
+
+ num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size);
+ if (num_pbes == 0) {
+ status = -EINVAL;
+ goto pbl_err;
+ }
+ status = ocrdma_get_pbl_info(dev, mr, num_pbes);
+ if (status)
+ goto pbl_err;
+
+ mr->hwmr.pbe_size = pbe_size;
+ mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK);
+ mr->hwmr.va = *iova_start;
+ mr->hwmr.local_rd = 1;
+ mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+ mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+ mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
+
+ status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
+ if (status)
+ goto pbl_err;
+ build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table,
+ &mr->hwmr);
+ status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
+ if (status)
+ goto mbx_err;
+
+ mr->ibmr.lkey = mr->hwmr.lkey;
+ if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
+ mr->ibmr.rkey = mr->hwmr.lkey;
+ return &mr->ibmr;
+
+mbx_err:
+ ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
+pbl_err:
+ kfree(mr);
+ return ERR_PTR(status);
+}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 633f03d8027..b8f7853fd36 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -72,6 +72,7 @@ int ocrdma_query_qp(struct ib_qp *,
struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *);
int ocrdma_destroy_qp(struct ib_qp *);
+void ocrdma_del_flush_qp(struct ocrdma_qp *qp);
struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *,
struct ib_udata *);
@@ -89,5 +90,10 @@ struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *,
int num_phys_buf, int acc, u64 *iova_start);
struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *);
+struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *pd, int max_page_list_len);
+struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
+ *ibdev,
+ int page_list_len);
+void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list);
#endif /* __OCRDMA_VERBS_H__ */