aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/cxgb3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/cxgb3')
-rw-r--r--drivers/infiniband/hw/cxgb3/Makefile2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h24
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c132
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c40
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c62
9 files changed, 154 insertions, 125 deletions
diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
index 621619c794e..2761364185a 100644
--- a/drivers/infiniband/hw/cxgb3/Makefile
+++ b/drivers/infiniband/hw/cxgb3/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Idrivers/net/cxgb3
+ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb3
obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index c3f5aca4ef0..de1c61b417d 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -735,14 +735,12 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
((perm & TPT_MW_BIND) ? F_TPT_MW_BIND_ENABLE : 0) |
V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
V_TPT_PAGE_SIZE(page_size));
- tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 :
- cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
+ tpt.rsvd_pbl_addr = cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
tpt.len = cpu_to_be32(len);
tpt.va_hi = cpu_to_be32((u32) (to >> 32));
tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
tpt.rsvd_bind_cnt_or_pstag = 0;
- tpt.rsvd_pbl_size = reset_tpt_entry ? 0 :
- cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2));
+ tpt.rsvd_pbl_size = cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2));
}
err = cxio_hal_ctrl_qp_write_mem(rdev_p,
stag_idx +
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index 31f9201b298..c40088ecf9f 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -62,13 +62,13 @@ static int __cxio_init_resource_fifo(struct kfifo *fifo,
kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
if (random) {
j = 0;
- random_bytes = random32();
+ random_bytes = prandom_u32();
for (i = 0; i < RANDOM_SIZE; i++)
rarray[i] = i + skip_low;
for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
if (j >= RANDOM_SIZE) {
j = 0;
- random_bytes = random32();
+ random_bytes = prandom_u32();
}
idx = (random_bytes >> (j * 2)) & 0xF;
kfifo_in(fifo,
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index a1c44578e03..837862287a2 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -153,19 +153,17 @@ static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
void *handle, u32 id)
{
int ret;
- int newid;
-
- do {
- if (!idr_pre_get(idr, GFP_KERNEL)) {
- return -ENOMEM;
- }
- spin_lock_irq(&rhp->lock);
- ret = idr_get_new_above(idr, handle, id, &newid);
- BUG_ON(newid != id);
- spin_unlock_irq(&rhp->lock);
- } while (ret == -EAGAIN);
-
- return ret;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_irq(&rhp->lock);
+
+ ret = idr_alloc(idr, handle, id, id + 1, GFP_NOWAIT);
+
+ spin_unlock_irq(&rhp->lock);
+ idr_preload_end();
+
+ BUG_ON(ret == -ENOSPC);
+ return ret < 0 ? ret : 0;
}
static inline void remove_handle(struct iwch_dev *rhp, struct idr *idr, u32 id)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index d02dcc6e596..cb78b1e9bcd 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -128,9 +128,8 @@ static void stop_ep_timer(struct iwch_ep *ep)
{
PDBG("%s ep %p\n", __func__, ep);
if (!timer_pending(&ep->timer)) {
- printk(KERN_ERR "%s timer stopped when its not running! ep %p state %u\n",
+ WARN(1, "%s timer stopped when its not running! ep %p state %u\n",
__func__, ep, ep->com.state);
- WARN_ON(1);
return;
}
del_timer_sync(&ep->timer);
@@ -287,7 +286,7 @@ void __free_ep(struct kref *kref)
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
dst_release(ep->dst);
- l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+ l2t_release(ep->com.tdev, ep->l2t);
}
kfree(ep);
}
@@ -338,23 +337,12 @@ static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
__be16 peer_port, u8 tos)
{
struct rtable *rt;
- struct flowi fl = {
- .oif = 0,
- .nl_u = {
- .ip4_u = {
- .daddr = peer_ip,
- .saddr = local_ip,
- .tos = tos}
- },
- .proto = IPPROTO_TCP,
- .uli_u = {
- .ports = {
- .sport = local_port,
- .dport = peer_port}
- }
- };
-
- if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
+ struct flowi4 fl4;
+
+ rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
+ peer_port, local_port, IPPROTO_TCP,
+ tos, 0);
+ if (IS_ERR(rt))
return NULL;
return rt;
}
@@ -430,6 +418,7 @@ static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
skb->priority = CPL_PRIORITY_DATA;
set_arp_failure_handler(skb, abort_arp_failure);
req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
@@ -733,8 +722,10 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status)
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REPLY;
event.status = status;
- event.local_addr = ep->com.local_addr;
- event.remote_addr = ep->com.remote_addr;
+ memcpy(&event.local_addr, &ep->com.local_addr,
+ sizeof(ep->com.local_addr));
+ memcpy(&event.remote_addr, &ep->com.remote_addr,
+ sizeof(ep->com.remote_addr));
if ((status == 0) || (status == -ECONNREFUSED)) {
event.private_data_len = ep->plen;
@@ -759,11 +750,18 @@ static void connect_request_upcall(struct iwch_ep *ep)
PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REQUEST;
- event.local_addr = ep->com.local_addr;
- event.remote_addr = ep->com.remote_addr;
+ memcpy(&event.local_addr, &ep->com.local_addr,
+ sizeof(ep->com.local_addr));
+ memcpy(&event.remote_addr, &ep->com.remote_addr,
+ sizeof(ep->com.local_addr));
event.private_data_len = ep->plen;
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
event.provider_data = ep;
+ /*
+ * Until ird/ord negotiation via MPAv2 support is added, send max
+ * supported values
+ */
+ event.ird = event.ord = 8;
if (state_read(&ep->parent_ep->com) != DEAD) {
get_ep(&ep->com);
ep->parent_ep->com.cm_id->event_handler(
@@ -781,6 +779,11 @@ static void established_upcall(struct iwch_ep *ep)
PDBG("%s ep %p\n", __func__, ep);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_ESTABLISHED;
+ /*
+ * Until ird/ord negotiation via MPAv2 support is added, send max
+ * supported values
+ */
+ event.ird = event.ord = 8;
if (ep->com.cm_id) {
PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
@@ -925,7 +928,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
goto err;
if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
- iwch_post_zb_read(ep->com.qp);
+ iwch_post_zb_read(ep);
}
goto out;
@@ -1089,6 +1092,8 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
struct iwch_ep *ep = ctx;
struct cpl_wr_ack *hdr = cplhdr(skb);
unsigned int credits = ntohs(hdr->credits);
+ unsigned long flags;
+ int post_zb = 0;
PDBG("%s ep %p credits %u\n", __func__, ep, credits);
@@ -1098,28 +1103,34 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
return CPL_RET_BUF_DONE;
}
+ spin_lock_irqsave(&ep->com.lock, flags);
BUG_ON(credits != 1);
dst_confirm(ep->dst);
if (!ep->mpa_skb) {
PDBG("%s rdma_init wr_ack ep %p state %u\n",
- __func__, ep, state_read(&ep->com));
+ __func__, ep, ep->com.state);
if (ep->mpa_attr.initiator) {
PDBG("%s initiator ep %p state %u\n",
- __func__, ep, state_read(&ep->com));
- if (peer2peer)
- iwch_post_zb_read(ep->com.qp);
+ __func__, ep, ep->com.state);
+ if (peer2peer && ep->com.state == FPDU_MODE)
+ post_zb = 1;
} else {
PDBG("%s responder ep %p state %u\n",
- __func__, ep, state_read(&ep->com));
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
+ __func__, ep, ep->com.state);
+ if (ep->com.state == MPA_REQ_RCVD) {
+ ep->com.rpl_done = 1;
+ wake_up(&ep->com.waitq);
+ }
}
} else {
PDBG("%s lsm ack ep %p state %u freeing skb\n",
- __func__, ep, state_read(&ep->com));
+ __func__, ep, ep->com.state);
kfree_skb(ep->mpa_skb);
ep->mpa_skb = NULL;
}
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+ if (post_zb)
+ iwch_post_zb_read(ep);
return CPL_RET_BUF_DONE;
}
@@ -1181,7 +1192,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
release_tid(ep->com.tdev, GET_TID(rpl), NULL);
cxgb3_free_atid(ep->com.tdev, ep->atid);
dst_release(ep->dst);
- l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+ l2t_release(ep->com.tdev, ep->l2t);
put_ep(&ep->com);
return CPL_RET_BUF_DONE;
}
@@ -1367,7 +1378,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
goto reject;
}
dst = &rt->dst;
- l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
+ l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip);
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -1378,7 +1389,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
if (!child_ep) {
printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
__func__);
- l2t_release(L2DATA(tdev), l2t);
+ l2t_release(tdev, l2t);
dst_release(dst);
goto reject;
}
@@ -1673,7 +1684,7 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
* T3A does 3 things when a TERM is received:
* 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
* 2) generate an async event on the QP with the TERMINATE opcode
- * 3) post a TERMINATE opcde cqe into the associated CQ.
+ * 3) post a TERMINATE opcode cqe into the associated CQ.
*
* For (1), we save the message in the qp for later consumer consumption.
* For (2), we move the QP into TERMINATE, post a QP event and disconnect.
@@ -1749,9 +1760,8 @@ static void ep_timeout(unsigned long arg)
__state_set(&ep->com, ABORTING);
break;
default:
- printk(KERN_ERR "%s unexpected state ep %p state %u\n",
+ WARN(1, "%s unexpected state ep %p state %u\n",
__func__, ep, ep->com.state);
- WARN_ON(1);
abort = 0;
}
spin_unlock_irqrestore(&ep->com.lock, flags);
@@ -1867,8 +1877,9 @@ err:
static int is_loopback_dst(struct iw_cm_id *cm_id)
{
struct net_device *dev;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
- dev = ip_dev_find(&init_net, cm_id->remote_addr.sin_addr.s_addr);
+ dev = ip_dev_find(&init_net, raddr->sin_addr.s_addr);
if (!dev)
return 0;
dev_put(dev);
@@ -1877,10 +1888,17 @@ static int is_loopback_dst(struct iw_cm_id *cm_id)
int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
- int err = 0;
struct iwch_dev *h = to_iwch_dev(cm_id->device);
struct iwch_ep *ep;
struct rtable *rt;
+ int err = 0;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+
+ if (cm_id->remote_addr.ss_family != PF_INET) {
+ err = -ENOSYS;
+ goto out;
+ }
if (is_loopback_dst(cm_id)) {
err = -ENOSYS;
@@ -1924,21 +1942,17 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
/* find a route */
- rt = find_route(h->rdev.t3cdev_p,
- cm_id->local_addr.sin_addr.s_addr,
- cm_id->remote_addr.sin_addr.s_addr,
- cm_id->local_addr.sin_port,
- cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
+ rt = find_route(h->rdev.t3cdev_p, laddr->sin_addr.s_addr,
+ raddr->sin_addr.s_addr, laddr->sin_port,
+ raddr->sin_port, IPTOS_LOWDELAY);
if (!rt) {
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
err = -EHOSTUNREACH;
goto fail3;
}
ep->dst = &rt->dst;
-
- /* get a l2t entry */
- ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
- ep->dst->neighbour->dev);
+ ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
+ &raddr->sin_addr.s_addr);
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
@@ -1947,15 +1961,17 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
state_set(&ep->com, CONNECTING);
ep->tos = IPTOS_LOWDELAY;
- ep->com.local_addr = cm_id->local_addr;
- ep->com.remote_addr = cm_id->remote_addr;
+ memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ sizeof(ep->com.local_addr));
+ memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
+ sizeof(ep->com.remote_addr));
/* send connect request to rnic */
err = send_connect(ep);
if (!err)
goto out;
- l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
+ l2t_release(h->rdev.t3cdev_p, ep->l2t);
fail4:
dst_release(ep->dst);
fail3:
@@ -1976,6 +1992,11 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
might_sleep();
+ if (cm_id->local_addr.ss_family != PF_INET) {
+ err = -ENOSYS;
+ goto fail1;
+ }
+
ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
if (!ep) {
printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
@@ -1987,7 +2008,8 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->add_ref(cm_id);
ep->com.cm_id = cm_id;
ep->backlog = backlog;
- ep->com.local_addr = cm_id->local_addr;
+ memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ sizeof(ep->com.local_addr));
/*
* Allocate a server TID.
@@ -2126,7 +2148,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
l2t);
dst_hold(new);
- l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+ l2t_release(ep->com.tdev, ep->l2t);
ep->l2t = l2t;
dst_release(old);
ep->dst = new;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
index 71e0d845da3..abcc9e76962 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c
@@ -46,6 +46,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
struct ib_event event;
struct iwch_qp_attributes attrs;
struct iwch_qp *qhp;
+ unsigned long flag;
spin_lock(&rnicp->lock);
qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
@@ -94,7 +95,9 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
if (qhp->ibqp.event_handler)
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
+ spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+ spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
if (atomic_dec_and_test(&qhp->refcnt))
wake_up(&qhp->wait);
@@ -107,6 +110,7 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
struct iwch_cq *chp;
struct iwch_qp *qhp;
u32 cqid = RSPQ_CQID(rsp_msg);
+ unsigned long flag;
rnicp = (struct iwch_dev *) rdev_p->ulp;
spin_lock(&rnicp->lock);
@@ -170,7 +174,9 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
*/
if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
dst_confirm(qhp->ep->dst);
+ spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+ spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
break;
case TPT_ERR_STAG:
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 2e2741307af..811b24a539c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -61,13 +61,6 @@
#include "iwch_user.h"
#include "common.h"
-static int iwch_modify_port(struct ib_device *ibdev,
- u8 port, int port_modify_mask,
- struct ib_port_modify *props)
-{
- return -ENOSYS;
-}
-
static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
struct ib_ah_attr *ah_attr)
{
@@ -197,6 +190,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
chp->rhp = rhp;
chp->ibcq.cqe = 1 << chp->cq.size_log2;
spin_lock_init(&chp->lock);
+ spin_lock_init(&chp->comp_handler_lock);
atomic_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait);
if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
@@ -232,6 +226,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
sizeof(struct t3_cqe));
uresp.memsize = mm->len;
+ uresp.reserved = 0;
resplen = sizeof uresp;
}
if (ib_copy_to_udata(udata, &uresp, resplen)) {
@@ -565,7 +560,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
__be64 *page_list = NULL;
int shift = 0;
u64 total_size;
- int npages;
+ int npages = 0;
int ret;
PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
@@ -623,14 +618,13 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{
__be64 *pages;
int shift, n, len;
- int i, j, k;
+ int i, k, entry;
int err = 0;
- struct ib_umem_chunk *chunk;
struct iwch_dev *rhp;
struct iwch_pd *php;
struct iwch_mr *mhp;
struct iwch_reg_user_mr_resp uresp;
-
+ struct scatterlist *sg;
PDBG("%s ib_pd %p\n", __func__, pd);
php = to_iwch_pd(pd);
@@ -650,9 +644,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
shift = ffs(mhp->umem->page_size) - 1;
- n = 0;
- list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
- n += chunk->nents;
+ n = mhp->umem->nmap;
err = iwch_alloc_pbl(mhp, n);
if (err)
@@ -666,12 +658,10 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = n = 0;
- list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
- for (j = 0; j < chunk->nmap; ++j) {
- len = sg_dma_len(&chunk->page_list[j]) >> shift;
+ for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
+ len = sg_dma_len(sg) >> shift;
for (k = 0; k < len; ++k) {
- pages[i++] = cpu_to_be64(sg_dma_address(
- &chunk->page_list[j]) +
+ pages[i++] = cpu_to_be64(sg_dma_address(sg) +
mhp->umem->page_size * k);
if (i == PAGE_SIZE / sizeof *pages) {
err = iwch_write_pbl(mhp, pages, i, n);
@@ -681,7 +671,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = 0;
}
}
- }
+ }
if (i)
err = iwch_write_pbl(mhp, pages, i, n);
@@ -744,7 +734,7 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
return ibmr;
}
-static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
+static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
{
struct iwch_dev *rhp;
struct iwch_pd *php;
@@ -753,6 +743,9 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
u32 stag = 0;
int ret;
+ if (type != IB_MW_TYPE_1)
+ return ERR_PTR(-EINVAL);
+
php = to_iwch_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
@@ -789,8 +782,8 @@ static int iwch_dealloc_mw(struct ib_mw *mw)
mmid = (mw->rkey) >> 8;
cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
remove_handle(rhp, &rhp->mmidr, mmid);
- kfree(mhp);
PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
+ kfree(mhp);
return 0;
}
@@ -1233,7 +1226,7 @@ static int iwch_query_port(struct ib_device *ibdev,
props->gid_tbl_len = 1;
props->pkey_tbl_len = 1;
props->active_width = 2;
- props->active_speed = 2;
+ props->active_speed = IB_SPEED_DDR;
props->max_msg_sz = -1;
return 0;
@@ -1392,7 +1385,6 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
dev->ibdev.query_device = iwch_query_device;
dev->ibdev.query_port = iwch_query_port;
- dev->ibdev.modify_port = iwch_modify_port;
dev->ibdev.query_pkey = iwch_query_pkey;
dev->ibdev.query_gid = iwch_query_gid;
dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index c5406da3f4c..87c14b0c5ac 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -103,6 +103,7 @@ struct iwch_cq {
struct iwch_dev *rhp;
struct t3_cq cq;
spinlock_t lock;
+ spinlock_t comp_handler_lock;
atomic_t refcnt;
wait_queue_head_t wait;
u32 __user *user_rptr_addr;
@@ -332,7 +333,7 @@ int iwch_bind_mw(struct ib_qp *qp,
struct ib_mw_bind *mw_bind);
int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
-int iwch_post_zb_read(struct iwch_qp *qhp);
+int iwch_post_zb_read(struct iwch_ep *ep);
int iwch_register_device(struct iwch_dev *dev);
void iwch_unregister_device(struct iwch_dev *dev);
void stop_read_rep_timer(struct iwch_qp *qhp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 1b4cd09f74d..b57c0befd96 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -567,18 +567,19 @@ int iwch_bind_mw(struct ib_qp *qp,
if (mw_bind->send_flags & IB_SEND_SIGNALED)
t3_wr_flags = T3_COMPLETION_FLAG;
- sgl.addr = mw_bind->addr;
- sgl.lkey = mw_bind->mr->lkey;
- sgl.length = mw_bind->length;
+ sgl.addr = mw_bind->bind_info.addr;
+ sgl.lkey = mw_bind->bind_info.mr->lkey;
+ sgl.length = mw_bind->bind_info.length;
wqe->bind.reserved = 0;
wqe->bind.type = TPT_VATO;
/* TBD: check perms */
- wqe->bind.perms = iwch_ib_to_tpt_bind_access(mw_bind->mw_access_flags);
- wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey);
+ wqe->bind.perms = iwch_ib_to_tpt_bind_access(
+ mw_bind->bind_info.mw_access_flags);
+ wqe->bind.mr_stag = cpu_to_be32(mw_bind->bind_info.mr->lkey);
wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
- wqe->bind.mw_len = cpu_to_be32(mw_bind->length);
- wqe->bind.mw_va = cpu_to_be64(mw_bind->addr);
+ wqe->bind.mw_len = cpu_to_be32(mw_bind->bind_info.length);
+ wqe->bind.mw_va = cpu_to_be64(mw_bind->bind_info.addr);
err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);
if (err) {
spin_unlock_irqrestore(&qhp->lock, flag);
@@ -738,7 +739,7 @@ static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
}
}
-int iwch_post_zb_read(struct iwch_qp *qhp)
+int iwch_post_zb_read(struct iwch_ep *ep)
{
union t3_wr *wqe;
struct sk_buff *skb;
@@ -761,10 +762,10 @@ int iwch_post_zb_read(struct iwch_qp *qhp)
wqe->read.local_len = cpu_to_be32(0);
wqe->read.local_to = cpu_to_be64(1);
wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
- wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)|
+ wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)|
V_FW_RIWR_LEN(flit_cnt));
skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
+ return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb);
}
/*
@@ -803,7 +804,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
* Assumes qhp lock is held.
*/
static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
- struct iwch_cq *schp, unsigned long *flag)
+ struct iwch_cq *schp)
{
int count;
int flushed;
@@ -812,38 +813,44 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
/* take a ref on the qhp since we must release the lock */
atomic_inc(&qhp->refcnt);
- spin_unlock_irqrestore(&qhp->lock, *flag);
+ spin_unlock(&qhp->lock);
/* locking hierarchy: cq lock first, then qp lock. */
- spin_lock_irqsave(&rchp->lock, *flag);
+ spin_lock(&rchp->lock);
spin_lock(&qhp->lock);
cxio_flush_hw_cq(&rchp->cq);
cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock);
- spin_unlock_irqrestore(&rchp->lock, *flag);
- if (flushed)
+ spin_unlock(&rchp->lock);
+ if (flushed) {
+ spin_lock(&rchp->comp_handler_lock);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+ spin_unlock(&rchp->comp_handler_lock);
+ }
/* locking hierarchy: cq lock first, then qp lock. */
- spin_lock_irqsave(&schp->lock, *flag);
+ spin_lock(&schp->lock);
spin_lock(&qhp->lock);
cxio_flush_hw_cq(&schp->cq);
cxio_count_scqes(&schp->cq, &qhp->wq, &count);
flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
spin_unlock(&qhp->lock);
- spin_unlock_irqrestore(&schp->lock, *flag);
- if (flushed)
+ spin_unlock(&schp->lock);
+ if (flushed) {
+ spin_lock(&schp->comp_handler_lock);
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
+ spin_unlock(&schp->comp_handler_lock);
+ }
/* deref */
if (atomic_dec_and_test(&qhp->refcnt))
wake_up(&qhp->wait);
- spin_lock_irqsave(&qhp->lock, *flag);
+ spin_lock(&qhp->lock);
}
-static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
+static void flush_qp(struct iwch_qp *qhp)
{
struct iwch_cq *rchp, *schp;
@@ -853,15 +860,19 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
if (qhp->ibqp.uobject) {
cxio_set_wq_in_error(&qhp->wq);
cxio_set_cq_in_error(&rchp->cq);
+ spin_lock(&rchp->comp_handler_lock);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+ spin_unlock(&rchp->comp_handler_lock);
if (schp != rchp) {
cxio_set_cq_in_error(&schp->cq);
+ spin_lock(&schp->comp_handler_lock);
(*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context);
+ spin_unlock(&schp->comp_handler_lock);
}
return;
}
- __flush_qp(qhp, rchp, schp, flag);
+ __flush_qp(qhp, rchp, schp);
}
@@ -872,7 +883,8 @@ u16 iwch_rqes_posted(struct iwch_qp *qhp)
{
union t3_wr *wqe = qhp->wq.queue;
u16 count = 0;
- while ((count+1) != 0 && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) {
+
+ while (count < USHRT_MAX && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) {
count++;
wqe++;
}
@@ -1020,7 +1032,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
break;
case IWCH_QP_STATE_ERROR:
qhp->attr.state = IWCH_QP_STATE_ERROR;
- flush_qp(qhp, &flag);
+ flush_qp(qhp);
break;
default:
ret = -EINVAL;
@@ -1068,7 +1080,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
}
switch (attrs->next_state) {
case IWCH_QP_STATE_IDLE:
- flush_qp(qhp, &flag);
+ flush_qp(qhp);
qhp->attr.state = IWCH_QP_STATE_IDLE;
qhp->attr.llp_stream_handle = NULL;
put_ep(&qhp->ep->com);
@@ -1122,7 +1134,7 @@ err:
free=1;
wake_up(&qhp->wait);
BUG_ON(!ep);
- flush_qp(qhp, &flag);
+ flush_qp(qhp);
out:
spin_unlock_irqrestore(&qhp->lock, flag);