aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/ehca
diff options
context:
space:
mode:
authorHoang-Nam Nguyen <hnguyen@de.ibm.com>2007-07-12 17:53:47 +0200
committerRoland Dreier <rolandd@cisco.com>2007-07-17 18:37:40 -0700
commit2b94397adc68c2f0f851539884cc426e03444a26 (patch)
treef510f23f62efea31f1de472c6a384d55f593b6be /drivers/infiniband/hw/ehca
parent187c72e31f92791ec70395b80aa9883f2edad97f (diff)
IB/ehca: Fix warnings issued by checkpatch.pl
Run the existing ehca code through checkpatch.pl and clean up the worst of the coding style violations. Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ehca')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_av.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes_pSeries.h156
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_eq.c3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c28
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c56
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h7
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c21
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c59
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.h7
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qes.h22
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c39
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c15
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h28
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c10
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c8
-rw-r--r--drivers/infiniband/hw/ehca/hcp_phyp.c2
-rw-r--r--drivers/infiniband/hw/ehca/hipz_fns_core.h4
-rw-r--r--drivers/infiniband/hw/ehca/hipz_hw.h24
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.c2
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.h4
22 files changed, 261 insertions, 242 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
index 3cd6bf3402d..e53a97af126 100644
--- a/drivers/infiniband/hw/ehca/ehca_av.c
+++ b/drivers/infiniband/hw/ehca/ehca_av.c
@@ -79,7 +79,7 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
av->av.ipd = (ah_mult > 0) ?
((ehca_mult - 1) / ah_mult) : 0;
} else
- av->av.ipd = ehca_static_rate;
+ av->av.ipd = ehca_static_rate;
av->av.lnh = ah_attr->ah_flags;
av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 5e00202dc77..043e4fb23fb 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -208,7 +208,7 @@ struct ehca_mr {
u32 num_hwpages; /* number of hw pages to form MR */
int acl; /* ACL (stored here for usage in reregister) */
u64 *start; /* virtual start address (stored here for */
- /* usage in reregister) */
+ /* usage in reregister) */
u64 size; /* size (stored here for usage in reregister) */
u32 fmr_page_size; /* page size for FMR */
u32 fmr_max_pages; /* max pages for FMR */
@@ -391,6 +391,6 @@ struct ehca_alloc_qp_parms {
int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
-struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
+struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
index fb3df5c271e..1798e6466bd 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
@@ -154,83 +154,83 @@ struct hcp_modify_qp_control_block {
u32 reserved_70_127[58]; /* 70 */
};
-#define MQPCB_MASK_QKEY EHCA_BMASK_IBM(0,0)
-#define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM(2,2)
-#define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM(3,3)
-#define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM(4,4)
-#define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM(5,5)
-#define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM(6,6)
-#define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM(7,7)
-#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM(8,8)
-#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM(9,9)
-#define MQPCB_QP_STATE EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11,11)
-#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12,12)
-#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13,13)
-#define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14,14)
-#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15,15)
-#define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16,16)
-#define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17,17)
-#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18,18)
-#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19,19)
-#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20,20)
-#define MQPCB_PATH_MTU EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21,21)
-#define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22,22)
-#define MQPCB_DLID EHCA_BMASK_IBM(16,31)
-#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23,23)
-#define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29,31)
-#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24,24)
-#define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25,31)
-#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25,25)
-#define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26,26)
-#define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27,27)
-#define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28,28)
-#define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12,31)
-#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30,30)
-#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31,31)
-#define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28,31)
-#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32,32)
-#define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31,31)
-#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33,33)
-#define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31)
-#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34,34)
-#define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27,31)
-#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35,35)
-#define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36,36)
-#define MQPCB_DLID_AL EHCA_BMASK_IBM(16,31)
-#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37,37)
-#define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31)
-#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38,38)
-#define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25,31)
-#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39,39)
-#define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40,40)
-#define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41,41)
-#define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42,42)
-#define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12,31)
-#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44,44)
-#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45,45)
-#define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16,31)
-#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46,46)
-#define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16,31)
-#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47,47)
-#define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31,31)
-#define MQPCB_QP_NUMBER EHCA_BMASK_IBM(8,31)
-#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48,48)
-#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31,31)
-#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49,49)
-#define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16,31)
-#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50,50)
-#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51,51)
+#define MQPCB_MASK_QKEY EHCA_BMASK_IBM( 0, 0)
+#define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM( 2, 2)
+#define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM( 3, 3)
+#define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM( 4, 4)
+#define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM( 5, 5)
+#define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM( 6, 6)
+#define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7)
+#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8)
+#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9)
+#define MQPCB_QP_STATE EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11)
+#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12)
+#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13)
+#define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14, 14)
+#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15, 15)
+#define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16, 16)
+#define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17, 17)
+#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18)
+#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19)
+#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20)
+#define MQPCB_PATH_MTU EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21)
+#define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22)
+#define MQPCB_DLID EHCA_BMASK_IBM(16, 31)
+#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23)
+#define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29, 31)
+#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24)
+#define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25, 31)
+#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25)
+#define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26)
+#define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27)
+#define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28)
+#define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12, 31)
+#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30)
+#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31)
+#define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28, 31)
+#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32)
+#define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31, 31)
+#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33)
+#define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31)
+#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34)
+#define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27, 31)
+#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35)
+#define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36)
+#define MQPCB_DLID_AL EHCA_BMASK_IBM(16, 31)
+#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37)
+#define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31)
+#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38)
+#define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25, 31)
+#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39)
+#define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40)
+#define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41)
+#define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42)
+#define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12, 31)
+#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44)
+#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45)
+#define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
+#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46)
+#define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
+#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47)
+#define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31, 31)
+#define MQPCB_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
+#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48)
+#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31, 31)
+#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49)
+#define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16, 31)
+#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50)
+#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51)
#endif /* __EHCA_CLASSES_PSERIES_H__ */
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 01d4a148bd7..9e87883b561 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -97,7 +97,7 @@ int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
return ret;
}
-struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
+struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
{
struct ehca_qp *ret = NULL;
unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c
index 4961eb88827..4825975f88c 100644
--- a/drivers/infiniband/hw/ehca/ehca_eq.c
+++ b/drivers/infiniband/hw/ehca/ehca_eq.c
@@ -96,7 +96,8 @@ int ehca_create_eq(struct ehca_shca *shca,
for (i = 0; i < nr_pages; i++) {
u64 rpage;
- if (!(vpage = ipz_qpageit_get_inc(&eq->ipz_queue))) {
+ vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
+ if (!vpage) {
ret = H_RESOURCE;
goto create_eq_exit2;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index bbd3c6a5822..fc19ef9fd96 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -127,6 +127,7 @@ int ehca_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props)
{
int ret = 0;
+ u64 h_ret;
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
ib_device);
struct hipz_query_port *rblock;
@@ -137,7 +138,8 @@ int ehca_query_port(struct ib_device *ibdev,
return -ENOMEM;
}
- if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+ h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
+ if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_port1;
@@ -197,6 +199,7 @@ int ehca_query_sma_attr(struct ehca_shca *shca,
u8 port, struct ehca_sma_attr *attr)
{
int ret = 0;
+ u64 h_ret;
struct hipz_query_port *rblock;
rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
@@ -205,7 +208,8 @@ int ehca_query_sma_attr(struct ehca_shca *shca,
return -ENOMEM;
}
- if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+ h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
+ if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_sma_attr1;
@@ -230,9 +234,11 @@ query_sma_attr1:
int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
int ret = 0;
- struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
+ u64 h_ret;
+ struct ehca_shca *shca;
struct hipz_query_port *rblock;
+ shca = container_of(ibdev, struct ehca_shca, ib_device);
if (index > 16) {
ehca_err(&shca->ib_device, "Invalid index: %x.", index);
return -EINVAL;
@@ -244,7 +250,8 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
return -ENOMEM;
}
- if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+ h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
+ if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_pkey1;
@@ -262,6 +269,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
int index, union ib_gid *gid)
{
int ret = 0;
+ u64 h_ret;
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
ib_device);
struct hipz_query_port *rblock;
@@ -277,7 +285,8 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
return -ENOMEM;
}
- if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+ h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
+ if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_gid1;
@@ -302,11 +311,12 @@ int ehca_modify_port(struct ib_device *ibdev,
struct ib_port_modify *props)
{
int ret = 0;
- struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
+ struct ehca_shca *shca;
struct hipz_query_port *rblock;
u32 cap;
u64 hret;
+ shca = container_of(ibdev, struct ehca_shca, ib_device);
if ((props->set_port_cap_mask | props->clr_port_cap_mask)
& ~allowed_port_caps) {
ehca_err(&shca->ib_device, "Non-changeable bits set in masks "
@@ -325,7 +335,8 @@ int ehca_modify_port(struct ib_device *ibdev,
goto modify_port1;
}
- if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+ hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
+ if (hret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto modify_port2;
@@ -337,7 +348,8 @@ int ehca_modify_port(struct ib_device *ibdev,
hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
cap, props->init_type, port_modify_mask);
if (hret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Modify port failed hret=%lx", hret);
+ ehca_err(&shca->ib_device, "Modify port failed hret=%lx",
+ hret);
ret = -EINVAL;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 96eba383075..4fb01fcb63a 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -49,26 +49,26 @@
#include "hipz_fns.h"
#include "ipz_pt_fn.h"
-#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1)
-#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31)
-#define EQE_EE_IDENTIFIER EHCA_BMASK_IBM(2,7)
-#define EQE_CQ_NUMBER EHCA_BMASK_IBM(8,31)
-#define EQE_QP_NUMBER EHCA_BMASK_IBM(8,31)
-#define EQE_QP_TOKEN EHCA_BMASK_IBM(32,63)
-#define EQE_CQ_TOKEN EHCA_BMASK_IBM(32,63)
-
-#define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1)
-#define NEQE_EVENT_CODE EHCA_BMASK_IBM(2,7)
-#define NEQE_PORT_NUMBER EHCA_BMASK_IBM(8,15)
-#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16,16)
-#define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16,16)
-
-#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52,63)
-#define ERROR_DATA_TYPE EHCA_BMASK_IBM(0,7)
+#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
+#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
+#define EQE_EE_IDENTIFIER EHCA_BMASK_IBM( 2, 7)
+#define EQE_CQ_NUMBER EHCA_BMASK_IBM( 8, 31)
+#define EQE_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
+#define EQE_QP_TOKEN EHCA_BMASK_IBM(32, 63)
+#define EQE_CQ_TOKEN EHCA_BMASK_IBM(32, 63)
+
+#define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
+#define NEQE_EVENT_CODE EHCA_BMASK_IBM( 2, 7)
+#define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15)
+#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
+#define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16)
+
+#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63)
+#define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7)
static void queue_comp_task(struct ehca_cq *__cq);
-static struct ehca_comp_pool* pool;
+static struct ehca_comp_pool *pool;
#ifdef CONFIG_HOTPLUG_CPU
static struct notifier_block comp_pool_callback_nb;
#endif
@@ -85,8 +85,8 @@ static inline void comp_event_callback(struct ehca_cq *cq)
return;
}
-static void print_error_data(struct ehca_shca * shca, void* data,
- u64* rblock, int length)
+static void print_error_data(struct ehca_shca *shca, void *data,
+ u64 *rblock, int length)
{
u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
u64 resource = rblock[1];
@@ -94,7 +94,7 @@ static void print_error_data(struct ehca_shca * shca, void* data,
switch (type) {
case 0x1: /* Queue Pair */
{
- struct ehca_qp *qp = (struct ehca_qp*)data;
+ struct ehca_qp *qp = (struct ehca_qp *)data;
/* only print error data if AER is set */
if (rblock[6] == 0)
@@ -107,7 +107,7 @@ static void print_error_data(struct ehca_shca * shca, void* data,
}
case 0x4: /* Completion Queue */
{
- struct ehca_cq *cq = (struct ehca_cq*)data;
+ struct ehca_cq *cq = (struct ehca_cq *)data;
ehca_err(&shca->ib_device,
"CQ 0x%x (resource=%lx) has errors.",
@@ -572,7 +572,7 @@ void ehca_tasklet_eq(unsigned long data)
ehca_process_eq((struct ehca_shca*)data, 1);
}
-static inline int find_next_online_cpu(struct ehca_comp_pool* pool)
+static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
{
int cpu;
unsigned long flags;
@@ -636,7 +636,7 @@ static void queue_comp_task(struct ehca_cq *__cq)
__queue_comp_task(__cq, cct);
}
-static void run_comp_task(struct ehca_cpu_comp_task* cct)
+static void run_comp_task(struct ehca_cpu_comp_task *cct)
{
struct ehca_cq *cq;
unsigned long flags;
@@ -666,12 +666,12 @@ static void run_comp_task(struct ehca_cpu_comp_task* cct)
static int comp_task(void *__cct)
{
- struct ehca_cpu_comp_task* cct = __cct;
+ struct ehca_cpu_comp_task *cct = __cct;
int cql_empty;
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_INTERRUPTIBLE);
- while(!kthread_should_stop()) {
+ while (!kthread_should_stop()) {
add_wait_queue(&cct->wait_queue, &wait);
spin_lock_irq(&cct->task_lock);
@@ -745,7 +745,7 @@ static void take_over_work(struct ehca_comp_pool *pool,
list_splice_init(&cct->cq_list, &list);
- while(!list_empty(&list)) {
+ while (!list_empty(&list)) {
cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
list_del(&cq->entry);
@@ -768,7 +768,7 @@ static int comp_pool_callback(struct notifier_block *nfb,
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
- if(!create_comp_task(pool, cpu)) {
+ if (!create_comp_task(pool, cpu)) {
ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
return NOTIFY_BAD;
}
@@ -838,7 +838,7 @@ int ehca_create_comp_pool(void)
#ifdef CONFIG_HOTPLUG_CPU
comp_pool_callback_nb.notifier_call = comp_pool_callback;
- comp_pool_callback_nb.priority =0;
+ comp_pool_callback_nb.priority = 0;
register_cpu_notifier(&comp_pool_callback_nb);
#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index 77aeca6a2c2..dce503bb7d6 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -81,8 +81,9 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
int num_phys_buf,
int mr_access_flags, u64 *iova_start);
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
- int mr_access_flags, struct ib_udata *udata);
+struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt, int mr_access_flags,
+ struct ib_udata *udata);
int ehca_rereg_phys_mr(struct ib_mr *mr,
int mr_rereg_mask,
@@ -192,7 +193,7 @@ void ehca_poll_eqs(unsigned long data);
void *ehca_alloc_fw_ctrlblock(gfp_t flags);
void ehca_free_fw_ctrlblock(void *ptr);
#else
-#define ehca_alloc_fw_ctrlblock(flags) ((void *) get_zeroed_page(flags))
+#define ehca_alloc_fw_ctrlblock(flags) ((void *)get_zeroed_page(flags))
#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 203d01f87c3..36377c6db3d 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -107,7 +107,7 @@ static DEFINE_SPINLOCK(shca_list_lock);
static struct timer_list poll_eqs_timer;
#ifdef CONFIG_PPC_64K_PAGES
-static struct kmem_cache *ctblk_cache = NULL;
+static struct kmem_cache *ctblk_cache;
void *ehca_alloc_fw_ctrlblock(gfp_t flags)
{
@@ -200,8 +200,8 @@ static void ehca_destroy_slab_caches(void)
#endif
}
-#define EHCA_HCAAVER EHCA_BMASK_IBM(32,39)
-#define EHCA_REVID EHCA_BMASK_IBM(40,63)
+#define EHCA_HCAAVER EHCA_BMASK_IBM(32, 39)
+#define EHCA_REVID EHCA_BMASK_IBM(40, 63)
static struct cap_descr {
u64 mask;
@@ -295,7 +295,7 @@ int ehca_sense_attributes(struct ehca_shca *shca)
if (EHCA_BMASK_GET(hca_cap_descr[i].mask, shca->hca_cap))
ehca_gen_dbg(" %s", hca_cap_descr[i].descr);
- port = (struct hipz_query_port *) rblock;
+ port = (struct hipz_query_port *)rblock;
h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
if (h_ret != H_SUCCESS) {
ehca_gen_err("Cannot query port properties. h_ret=%lx",
@@ -444,7 +444,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
return -EPERM;
}
- ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10, 0);
+ ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1), 10, 0);
if (IS_ERR(ibcq)) {
ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
return PTR_ERR(ibcq);
@@ -671,7 +671,7 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
}
/* create internal protection domain */
- ibpd = ehca_alloc_pd(&shca->ib_device, (void*)(-1), NULL);
+ ibpd = ehca_alloc_pd(&shca->ib_device, (void *)(-1), NULL);
if (IS_ERR(ibpd)) {
ehca_err(&shca->ib_device, "Cannot create internal PD.");
ret = PTR_ERR(ibpd);
@@ -868,18 +868,21 @@ int __init ehca_module_init(void)
printk(KERN_INFO "eHCA Infiniband Device Driver "
"(Rel.: SVNEHCA_0023)\n");
- if ((ret = ehca_create_comp_pool())) {
+ ret = ehca_create_comp_pool();
+ if (ret) {
ehca_gen_err("Cannot create comp pool.");
return ret;
}
- if ((ret = ehca_create_slab_caches())) {
+ ret = ehca_create_slab_caches();
+ if (ret) {
ehca_gen_err("Cannot create SLAB caches");
ret = -ENOMEM;
goto module_init1;
}
- if ((ret = ibmebus_register_driver(&ehca_driver))) {
+ ret = ibmebus_register_driver(&ehca_driver);
+ if (ret) {
ehca_gen_err("Cannot register eHCA device driver");
ret = -EINVAL;
goto module_init2;
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 93c26ccfc3c..6262c5462d5 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -61,9 +61,9 @@ static struct ehca_mr *ehca_mr_new(void)
struct ehca_mr *me;
me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
- if (me) {
+ if (me)
spin_lock_init(&me->mrlock);
- } else
+ else
ehca_gen_err("alloc failed");
return me;
@@ -79,9 +79,9 @@ static struct ehca_mw *ehca_mw_new(void)
struct ehca_mw *me;
me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
- if (me) {
+ if (me)
spin_lock_init(&me->mwlock);
- } else
+ else
ehca_gen_err("alloc failed");
return me;
@@ -111,7 +111,7 @@ struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
goto get_dma_mr_exit0;
}
- ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE,
+ ret = ehca_reg_maxmr(shca, e_maxmr, (u64 *)KERNELBASE,
mr_access_flags, e_pd,
&e_maxmr->ib.ib_mr.lkey,
&e_maxmr->ib.ib_mr.rkey);
@@ -246,8 +246,9 @@ reg_phys_mr_exit0:
/*----------------------------------------------------------------------*/
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
- int mr_access_flags, struct ib_udata *udata)
+struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt, int mr_access_flags,
+ struct ib_udata *udata)
{
struct ib_mr *ib_mr;
struct ehca_mr *e_mr;
@@ -295,7 +296,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt
e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
mr_access_flags);
if (IS_ERR(e_mr->umem)) {
- ib_mr = (void *) e_mr->umem;
+ ib_mr = (void *)e_mr->umem;
goto reg_user_mr_exit1;
}
@@ -322,8 +323,9 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt
(&e_mr->umem->chunk_list),
list);
- ret = ehca_reg_mr(shca, e_mr, (u64*) virt, length, mr_access_flags, e_pd,
- &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
+ ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
+ e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
+ &e_mr->ib.ib_mr.rkey);
if (ret) {
ib_mr = ERR_PTR(ret);
goto reg_user_mr_exit2;
@@ -420,7 +422,7 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
goto rereg_phys_mr_exit0;
}
if (!phys_buf_array || num_phys_buf <= 0) {
- ehca_err(mr->device, "bad input values: mr_rereg_mask=%x"
+ ehca_err(mr->device, "bad input values mr_rereg_mask=%x"
" phys_buf_array=%p num_phys_buf=%x",
mr_rereg_mask, phys_buf_array, num_phys_buf);
ret = -EINVAL;
@@ -444,10 +446,10 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
/* set requested values dependent on rereg request */
spin_lock_irqsave(&e_mr->mrlock, sl_flags);
- new_start = e_mr->start; /* new == old address */
- new_size = e_mr->size; /* new == old length */
- new_acl = e_mr->acl; /* new == old access control */
- new_pd = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/
+ new_start = e_mr->start;
+ new_size = e_mr->size;
+ new_acl = e_mr->acl;
+ new_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
if (mr_rereg_mask & IB_MR_REREG_TRANS) {
new_start = iova_start; /* change address */
@@ -517,7 +519,7 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
u32 cur_pid = current->tgid;
unsigned long sl_flags;
- struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+ struct ehca_mr_hipzout_parms hipzout;
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
(my_pd->ownpid != cur_pid)) {
@@ -629,7 +631,7 @@ struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
struct ehca_shca *shca =
container_of(pd->device, struct ehca_shca, ib_device);
- struct ehca_mw_hipzout_parms hipzout = {{0},0};
+ struct ehca_mw_hipzout_parms hipzout;
e_mw = ehca_mw_new();
if (!e_mw) {
@@ -826,7 +828,7 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr,
EHCA_PAGESIZE);
pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
- ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova,
+ ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
list_len * e_fmr->fmr_page_size,
e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
if (ret)
@@ -841,8 +