aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c133
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig18
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2374
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c882
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c520
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c193
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h745
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c811
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c518
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1577
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c417
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h550
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h829
-rw-r--r--drivers/infiniband/hw/cxgb4/user.h66
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c50
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c12
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c23
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c10
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
38 files changed, 9674 insertions, 140 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index f7ff66f9836..6ae698e6877 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -250,7 +250,7 @@ struct c2_array {
struct sp_chunk {
struct sp_chunk *next;
dma_addr_t dma_addr;
- DECLARE_PCI_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_ADDR(mapping);
u16 head;
u16 shared_ptr[0];
};
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
index d4f5f5d42e9..78d247ec696 100644
--- a/drivers/infiniband/hw/amso1100/c2_alloc.c
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -49,7 +49,7 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
return -ENOMEM;
new_head->dma_addr = dma_addr;
- pci_unmap_addr_set(new_head, mapping, new_head->dma_addr);
+ dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
new_head->next = NULL;
new_head->head = 0;
@@ -81,7 +81,7 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
while (root) {
next = root->next;
dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
- pci_unmap_addr(root, mapping));
+ dma_unmap_addr(root, mapping));
root = next;
}
}
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index f7b0fc23f41..49e0e8533f7 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -257,7 +257,7 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
{
dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
- mq->msg_pool.host, pci_unmap_addr(mq, mapping));
+ mq->msg_pool.host, dma_unmap_addr(mq, mapping));
}
static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
@@ -278,7 +278,7 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
NULL, /* peer (currently unknown) */
C2_MQ_HOST_TARGET);
- pci_unmap_addr_set(mq, mapping, mq->host_dma);
+ dma_unmap_addr_set(mq, mapping, mq->host_dma);
return 0;
}
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h
index acede007b94..fc1b9a7cec4 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.h
+++ b/drivers/infiniband/hw/amso1100/c2_mq.h
@@ -71,7 +71,7 @@ struct c2_mq {
u8 __iomem *adapter;
} msg_pool;
dma_addr_t host_dma;
- DECLARE_PCI_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_ADDR(mapping);
u16 hint_count;
u16 priv;
struct c2_mq_shared __iomem *peer;
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.h b/drivers/infiniband/hw/amso1100/c2_provider.h
index 1076df2ee96..bf189987711 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.h
+++ b/drivers/infiniband/hw/amso1100/c2_provider.h
@@ -50,7 +50,7 @@
struct c2_buf_list {
void *buf;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 78c4bcc6ef6..85cfae4cad7 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -524,7 +524,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
err = -ENOMEM;
goto bail1;
}
- pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
+ dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
(unsigned long long) c2dev->rep_vq.host_dma);
c2_mq_rep_init(&c2dev->rep_vq,
@@ -545,7 +545,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
err = -ENOMEM;
goto bail2;
}
- pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
+ dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
(unsigned long long) c2dev->aeq.host_dma);
c2_mq_rep_init(&c2dev->aeq,
@@ -596,11 +596,11 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
bail3:
dma_free_coherent(&c2dev->pcidev->dev,
c2dev->aeq.q_size * c2dev->aeq.msg_size,
- q2_pages, pci_unmap_addr(&c2dev->aeq, mapping));
+ q2_pages, dma_unmap_addr(&c2dev->aeq, mapping));
bail2:
dma_free_coherent(&c2dev->pcidev->dev,
c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
- q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping));
+ q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping));
bail1:
c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
bail0:
@@ -637,13 +637,13 @@ void __devexit c2_rnic_term(struct c2_dev *c2dev)
dma_free_coherent(&c2dev->pcidev->dev,
c2dev->aeq.q_size * c2dev->aeq.msg_size,
c2dev->aeq.msg_pool.host,
- pci_unmap_addr(&c2dev->aeq, mapping));
+ dma_unmap_addr(&c2dev->aeq, mapping));
/* Free the verbs reply queue */
dma_free_coherent(&c2dev->pcidev->dev,
c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
c2dev->rep_vq.msg_pool.host,
- pci_unmap_addr(&c2dev->rep_vq, mapping));
+ dma_unmap_addr(&c2dev->rep_vq, mapping));
/* Free the MQ shared pointer pool */
c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 35f286f1ad1..005b7b52bc1 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -174,7 +174,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
kfree(cq->sw_queue);
return -ENOMEM;
}
- pci_unmap_addr_set(cq, mapping, cq->dma_addr);
+ dma_unmap_addr_set(cq, mapping, cq->dma_addr);
memset(cq->queue, 0, size);
setup.id = cq->cqid;
setup.base_addr = (u64) (cq->dma_addr);
@@ -297,7 +297,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
goto err4;
memset(wq->queue, 0, depth * sizeof(union t3_wr));
- pci_unmap_addr_set(wq, mapping, wq->dma_addr);
+ dma_unmap_addr_set(wq, mapping, wq->dma_addr);
wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
if (!kernel_domain)
wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
@@ -325,7 +325,7 @@ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << (cq->size_log2))
* sizeof(struct t3_cqe), cq->queue,
- pci_unmap_addr(cq, mapping));
+ dma_unmap_addr(cq, mapping));
cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
return err;
}
@@ -336,7 +336,7 @@ int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << (wq->size_log2))
* sizeof(union t3_wr), wq->queue,
- pci_unmap_addr(wq, mapping));
+ dma_unmap_addr(wq, mapping));
kfree(wq->sq);
cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
kfree(wq->rq);
@@ -537,7 +537,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
err = -ENOMEM;
goto err;
}
- pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
+ dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
rdev_p->ctrl_qp.dma_addr);
rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
memset(rdev_p->ctrl_qp.workq, 0,
@@ -583,7 +583,7 @@ static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << T3_CTRL_QP_SIZE_LOG2)
* sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
- pci_unmap_addr(&rdev_p->ctrl_qp, mapping));
+ dma_unmap_addr(&rdev_p->ctrl_qp, mapping));
return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 073373c2c56..8f0caf7d448 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -71,7 +71,7 @@ struct cxio_hal_ctrl_qp {
wait_queue_head_t waitq;/* wait for RspQ/CQE msg */
union t3_wr *workq; /* the work request queue */
dma_addr_t dma_addr; /* pci bus address of the workq */
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
void __iomem *doorbell;
};
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 15073b2da1c..e5ddb63e7d2 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -691,7 +691,7 @@ struct t3_swrq {
struct t3_wq {
union t3_wr *queue; /* DMA accessable memory */
dma_addr_t dma_addr; /* DMA address for HW */
- DECLARE_PCI_UNMAP_ADDR(mapping) /* unmap kruft */
+ DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
u32 error; /* 1 once we go to ERROR */
u32 qpid;
u32 wptr; /* idx to next available WR slot */
@@ -718,7 +718,7 @@ struct t3_cq {
u32 wptr;
u32 size_log2;
dma_addr_t dma_addr;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
struct t3_cqe *queue;
struct t3_cqe *sw_queue;
u32 sw_rptr;
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 63f975f3e30..8e77dc543dd 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -47,8 +47,6 @@ MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
-cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
-
static void open_rnic_dev(struct t3cdev *);
static void close_rnic_dev(struct t3cdev *);
static void iwch_event_handler(struct t3cdev *, u32, u32);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 4fef0329627..ebfb117ba68 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -102,12 +102,9 @@ static unsigned int cong_flavor = 1;
module_param(cong_flavor, uint, 0644);
MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
-static void process_work(struct work_struct *work);
static struct workqueue_struct *workq;
-static DECLARE_WORK(skb_work, process_work);
static struct sk_buff_head rxq;
-static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
static void ep_timeout(unsigned long arg);
@@ -151,7 +148,7 @@ int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2
return -EIO;
}
error = l2t_send(tdev, skb, l2e);
- if (error)
+ if (error < 0)
kfree_skb(skb);
return error;
}
@@ -167,7 +164,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
return -EIO;
}
error = cxgb3_ofld_send(tdev, skb);
- if (error)
+ if (error < 0)
kfree_skb(skb);
return error;
}
@@ -302,27 +299,6 @@ static void release_ep_resources(struct iwch_ep *ep)
put_ep(&ep->com);
}
-static void process_work(struct work_struct *work)
-{
- struct sk_buff *skb = NULL;
- void *ep;
- struct t3cdev *tdev;
- int ret;
-
- while ((skb = skb_dequeue(&rxq))) {
- ep = *((void **) (skb->cb));
- tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
- ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
- if (ret & CPL_RET_BUF_DONE)
- kfree_skb(skb);
-
- /*
- * ep was referenced in sched(), and is freed here.
- */
- put_ep((struct iwch_ep_common *)ep);
- }
-}
-
static int status2errno(int status)
{
switch (status) {
@@ -2157,7 +2133,49 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
/*
* All the CM events are handled on a work queue to have a safe context.
+ * These are the real handlers that are called from the work queue.
*/
+static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_ESTABLISH] = act_establish,
+ [CPL_ACT_OPEN_RPL] = act_open_rpl,
+ [CPL_RX_DATA] = rx_data,
+ [CPL_TX_DMA_ACK] = tx_ack,
+ [CPL_ABORT_RPL_RSS] = abort_rpl,
+ [CPL_ABORT_RPL] = abort_rpl,
+ [CPL_PASS_OPEN_RPL] = pass_open_rpl,
+ [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
+ [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
+ [CPL_PASS_ESTABLISH] = pass_establish,
+ [CPL_PEER_CLOSE] = peer_close,
+ [CPL_ABORT_REQ_RSS] = peer_abort,
+ [CPL_CLOSE_CON_RPL] = close_con_rpl,
+ [CPL_RDMA_TERMINATE] = terminate,
+ [CPL_RDMA_EC_STATUS] = ec_status,
+};
+
+static void process_work(struct work_struct *work)
+{
+ struct sk_buff *skb = NULL;
+ void *ep;
+ struct t3cdev *tdev;
+ int ret;
+
+ while ((skb = skb_dequeue(&rxq))) {
+ ep = *((void **) (skb->cb));
+ tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
+ ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
+ if (ret & CPL_RET_BUF_DONE)
+ kfree_skb(skb);
+
+ /*
+ * ep was referenced in sched(), and is freed here.
+ */
+ put_ep((struct iwch_ep_common *)ep);
+ }
+}
+
+static DECLARE_WORK(skb_work, process_work);
+
static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
{
struct iwch_ep_common *epc = ctx;
@@ -2189,6 +2207,29 @@ static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
return CPL_RET_BUF_DONE;
}
+/*
+ * All upcalls from the T3 Core go to sched() to schedule the
+ * processing on a work queue.
+ */
+cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_ESTABLISH] = sched,
+ [CPL_ACT_OPEN_RPL] = sched,
+ [CPL_RX_DATA] = sched,
+ [CPL_TX_DMA_ACK] = sched,
+ [CPL_ABORT_RPL_RSS] = sched,
+ [CPL_ABORT_RPL] = sched,
+ [CPL_PASS_OPEN_RPL] = sched,
+ [CPL_CLOSE_LISTSRV_RPL] = sched,
+ [CPL_PASS_ACCEPT_REQ] = sched,
+ [CPL_PASS_ESTABLISH] = sched,
+ [CPL_PEER_CLOSE] = sched,
+ [CPL_CLOSE_CON_RPL] = sched,
+ [CPL_ABORT_REQ_RSS] = sched,
+ [CPL_RDMA_TERMINATE] = sched,
+ [CPL_RDMA_EC_STATUS] = sched,
+ [CPL_SET_TCB_RPL] = set_tcb_rpl,
+};
+
int __init iwch_cm_init(void)
{
skb_queue_head_init(&rxq);
@@ -2197,46 +2238,6 @@ int __init iwch_cm_init(void)
if (!workq)
return -ENOMEM;
- /*
- * All upcalls from the T3 Core go to sched() to
- * schedule the processing on a work queue.
- */
- t3c_handlers[CPL_ACT_ESTABLISH] = sched;
- t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
- t3c_handlers[CPL_RX_DATA] = sched;
- t3c_handlers[CPL_TX_DMA_ACK] = sched;
- t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
- t3c_handlers[CPL_ABORT_RPL] = sched;
- t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
- t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
- t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
- t3c_handlers[CPL_PASS_ESTABLISH] = sched;
- t3c_handlers[CPL_PEER_CLOSE] = sched;
- t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
- t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
- t3c_handlers[CPL_RDMA_TERMINATE] = sched;
- t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
- t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
-
- /*
- * These are the real handlers that are called from a
- * work queue.
- */
- work_handlers[CPL_ACT_ESTABLISH] = act_establish;
- work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
- work_handlers[CPL_RX_DATA] = rx_data;
- work_handlers[CPL_TX_DMA_ACK] = tx_ack;
- work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
- work_handlers[CPL_ABORT_RPL] = abort_rpl;
- work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
- work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
- work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
- work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
- work_handlers[CPL_PEER_CLOSE] = peer_close;
- work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
- work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
- work_handlers[CPL_RDMA_TERMINATE] = terminate;
- work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
new file mode 100644
index 00000000000..ccb85eaaad7
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -0,0 +1,18 @@
+config INFINIBAND_CXGB4
+ tristate "Chelsio T4 RDMA Driver"
+ depends on CHELSIO_T4 && INET
+ select GENERIC_ALLOCATOR
+ ---help---
+ This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
+ 10GbE adapters.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.htm>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called iw_cxgb4.
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
new file mode 100644
index 00000000000..e31a499f017
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -0,0 +1,5 @@
+EXTRA_CFLAGS += -Idrivers/net/cxgb4
+
+obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
+
+iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
new file mode 100644
index 00000000000..30ce0a8eca0
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -0,0 +1,2374 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <net/route.h>
+
+#include "iw_cxgb4.h"
+
+static char *states[] = {
+ "idle",
+ "listen",
+ "connecting",
+ "mpa_wait_req",
+ "mpa_req_sent",
+ "mpa_req_rcvd",
+ "mpa_rep_sent",
+ "fpdu_mode",
+ "aborting",
+ "closing",
+ "moribund",
+ "dead",
+ NULL,
+};
+
+int c4iw_max_read_depth = 8;
+module_param(c4iw_max_read_depth, int, 0644);
+MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
+
+static int enable_tcp_timestamps;
+module_param(enable_tcp_timestamps, int, 0644);
+MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
+
+static int enable_tcp_sack;
+module_param(enable_tcp_sack, int, 0644);
+MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
+
+static int enable_tcp_window_scaling = 1;
+module_param(enable_tcp_window_scaling, int, 0644);
+MODULE_PARM_DESC(enable_tcp_window_scaling,
+ "Enable tcp window scaling (default=1)");
+
+int c4iw_debug;
+module_param(c4iw_debug, int, 0644);
+MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
+
+static int peer2peer;
+module_param(peer2peer, int, 0644);
+MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
+
+static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
+module_param(p2p_type, int, 0644);
+MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
+ "1=RDMA_READ 0=RDMA_WRITE (default 1)");
+
+static int ep_timeout_secs = 60;
+module_param(ep_timeout_secs, int, 0644);
+MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
+ "in seconds (default=60)");
+
+static int mpa_rev = 1;
+module_param(mpa_rev, int, 0644);
+MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
+ "1 is spec compliant. (default=1)");
+
+static int markers_enabled;
+module_param(markers_enabled, int, 0644);
+MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
+
+static int crc_enabled = 1;
+module_param(crc_enabled, int, 0644);
+MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
+
+static int rcv_win = 256 * 1024;
+module_param(rcv_win, int, 0644);
+MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
+
+static int snd_win = 32 * 1024;
+module_param(snd_win, int, 0644);
+MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
+
+static struct workqueue_struct *workq;
+
+static struct sk_buff_head rxq;
+
+static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
+static void ep_timeout(unsigned long arg);
+static void connect_reply_upcall(struct c4iw_ep *ep, int status);
+
+static LIST_HEAD(timeout_list);
+static spinlock_t timeout_lock;
+
+static void start_ep_timer(struct c4iw_ep *ep)
+{
+ PDBG("%s ep %p\n", __func__, ep);
+ if (timer_pending(&ep->timer)) {
+ PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
+ del_timer_sync(&ep->timer);
+ } else
+ c4iw_get_ep(&ep->com);
+ ep->timer.expires = jiffies + ep_timeout_secs * HZ;
+ ep->timer.data = (unsigned long)ep;
+ ep->timer.function = ep_timeout;
+ add_timer(&ep->timer);
+}
+
+static void stop_ep_timer(struct c4iw_ep *ep)
+{
+ PDBG("%s ep %p\n", __func__, ep);
+ if (!timer_pending(&ep->timer)) {
+ printk(KERN_ERR "%s timer stopped when its not running! "
+ "ep %p state %u\n", __func__, ep, ep->com.state);
+ WARN_ON(1);
+ return;
+ }
+ del_timer_sync(&ep->timer);
+ c4iw_put_ep(&ep->com);
+}
+
+static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
+ struct l2t_entry *l2e)
+{
+ int error = 0;
+
+ if (c4iw_fatal_error(rdev)) {
+ kfree_skb(skb);
+ PDBG("%s - device in error state - dropping\n", __func__);
+ return -EIO;
+ }
+ error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
+ if (error < 0)
+ kfree_skb(skb);
+ return error;
+}
+
+int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
+{
+ int error = 0;
+
+ if (c4iw_fatal_error(rdev)) {
+ kfree_skb(skb);
+ PDBG("%s - device in error state - dropping\n", __func__);
+ return -EIO;
+ }
+ error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
+ if (error < 0)
+ kfree_skb(skb);
+ return error;
+}
+
+static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
+{
+ struct cpl_tid_release *req;
+
+ skb = get_skb(skb, sizeof *req, GFP_KERNEL);
+ if (!skb)
+ return;
+ req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+ c4iw_ofld_send(rdev, skb);
+ return;
+}
+
+static void set_emss(struct c4iw_ep *ep, u16 opt)
+{
+ ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
+ ep->mss = ep->emss;
+ if (GET_TCPOPT_TSTAMP(opt))
+ ep->emss -= 12;
+ if (ep->emss < 128)
+ ep->emss = 128;
+ PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
+ ep->mss, ep->emss);
+}
+
+static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
+{
+ unsigned long flags;
+ enum c4iw_ep_state state;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ state = epc->state;
+ spin_unlock_irqrestore(&epc->lock, flags);
+ return state;
+}
+
+static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
+{
+ epc->state = new;
+}
+
+static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
+ __state_set(epc, new);
+ spin_unlock_irqrestore(&epc->lock, flags);
+ return;
+}
+
+static void *alloc_ep(int size, gfp_t gfp)
+{
+ struct c4iw_ep_common *epc;
+
+ epc = kzalloc(size, gfp);
+ if (epc) {
+ kref_init(&epc->kref);
+ spin_lock_init(&epc->lock);
+ init_waitqueue_head(&epc->waitq);
+ }
+ PDBG("%s alloc ep %p\n", __func__, epc);
+ return epc;
+}
+
+void _c4iw_free_ep(struct kref *kref)
+{
+ struct c4iw_ep *ep;
+
+ ep = container_of(kref, struct c4iw_ep, com.kref);
+ PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);