aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband/core/iwcm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core/iwcm.c')
-rw-r--r--drivers/infiniband/core/iwcm.c145
1 files changed, 84 insertions, 61 deletions
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index c3fb304a4e8..3d2e489ab73 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -39,11 +39,13 @@
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
-#include <linux/pci.h>
#include <linux/rbtree.h>
+#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/module.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_addr.h>
@@ -80,7 +82,7 @@ struct iwcm_work {
* 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
* the backlog is exceeded, then no more connection request events will
* be processed. cm_event_handler() returns -ENOMEM in this case. Its up
- * to the provider to reject the connectino request.
+ * to the provider to reject the connection request.
* 2) in the connection request workqueue handler, cm_conn_req_handler().
* If work elements cannot be allocated for the new connect request cm_id,
* then IWCM will call the provider reject method. This is ok since
@@ -131,44 +133,42 @@ static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
}
/*
- * Save private data from incoming connection requests in the
- * cm_id_priv so the low level driver doesn't have to. Adjust
+ * Save private data from incoming connection requests to
+ * iw_cm_event, so the low level driver doesn't have to. Adjust
* the event ptr to point to the local copy.
*/
-static int copy_private_data(struct iwcm_id_private *cm_id_priv,
- struct iw_cm_event *event)
+static int copy_private_data(struct iw_cm_event *event)
{
void *p;
- p = kmalloc(event->private_data_len, GFP_ATOMIC);
+ p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
if (!p)
return -ENOMEM;
- memcpy(p, event->private_data, event->private_data_len);
event->private_data = p;
return 0;
}
+static void free_cm_id(struct iwcm_id_private *cm_id_priv)
+{
+ dealloc_work_entries(cm_id_priv);
+ kfree(cm_id_priv);
+}
+
/*
- * Release a reference on cm_id. If the last reference is being removed
- * and iw_destroy_cm_id is waiting, wake up the waiting thread.
+ * Release a reference on cm_id. If the last reference is being
+ * released, enable the waiting thread (in iw_destroy_cm_id) to
+ * get woken up, and return 1 if a thread is already waiting.
*/
static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
{
- int ret = 0;
-
BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
if (atomic_dec_and_test(&cm_id_priv->refcount)) {
BUG_ON(!list_empty(&cm_id_priv->work_list));
- if (waitqueue_active(&cm_id_priv->destroy_comp.wait)) {
- BUG_ON(cm_id_priv->state != IW_CM_STATE_DESTROYING);
- BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY,
- &cm_id_priv->flags));
- ret = 1;
- }
complete(&cm_id_priv->destroy_comp);
+ return 1;
}
- return ret;
+ return 0;
}
static void add_ref(struct iw_cm_id *cm_id)
@@ -181,8 +181,19 @@ static void add_ref(struct iw_cm_id *cm_id)
static void rem_ref(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
+ int cb_destroy;
+
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
- iwcm_deref_id(cm_id_priv);
+
+ /*
+ * Test bit before deref in case the cm_id gets freed on another
+ * thread.
+ */
+ cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
+ if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
+ BUG_ON(!list_empty(&cm_id_priv->work_list));
+ free_cm_id(cm_id_priv);
+ }
}
static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
@@ -243,7 +254,7 @@ static int iwcm_modify_qp_sqd(struct ib_qp *qp)
/*
* CM_ID <-- CLOSING
*
- * Block if a passive or active connection is currenlty being processed. Then
+ * Block if a passive or active connection is currently being processed. Then
* process the event as follows:
* - If we are ESTABLISHED, move to CLOSING and modify the QP state
* based on the abrupt flag
@@ -323,7 +334,6 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
unsigned long flags;
- int ret;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
/*
@@ -339,7 +349,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
cm_id_priv->state = IW_CM_STATE_DESTROYING;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
/* destroy the listening endpoint */
- ret = cm_id->device->iwcm->destroy_listen(cm_id);
+ cm_id->device->iwcm->destroy_listen(cm_id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
break;
case IW_CM_STATE_ESTABLISHED:
@@ -356,9 +366,14 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
case IW_CM_STATE_CONN_RECV:
/*
* App called destroy before/without calling accept after
- * receiving connection request event notification.
+ * receiving connection request event notification or
+ * returned non zero from the event callback function.
+ * In either case, must tell the provider to reject.
*/
cm_id_priv->state = IW_CM_STATE_DESTROYING;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ cm_id->device->iwcm->reject(cm_id, NULL, 0);
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
break;
case IW_CM_STATE_CONN_SENT:
case IW_CM_STATE_DESTROYING:
@@ -392,9 +407,7 @@ void iw_destroy_cm_id(struct iw_cm_id *cm_id)
wait_for_completion(&cm_id_priv->destroy_comp);
- dealloc_work_entries(cm_id_priv);
-
- kfree(cm_id_priv);
+ free_cm_id(cm_id_priv);
}
EXPORT_SYMBOL(iw_destroy_cm_id);
@@ -408,7 +421,7 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
{
struct iwcm_id_private *cm_id_priv;
unsigned long flags;
- int ret = 0;
+ int ret;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
@@ -500,6 +513,8 @@ int iw_cm_accept(struct iw_cm_id *cm_id,
qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
if (!qp) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
return -EINVAL;
}
cm_id->device->iwcm->add_ref(qp);
@@ -535,7 +550,7 @@ EXPORT_SYMBOL(iw_cm_accept);
int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
{
struct iwcm_id_private *cm_id_priv;
- int ret = 0;
+ int ret;
unsigned long flags;
struct ib_qp *qp;
@@ -559,6 +574,8 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
if (!qp) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
return -EINVAL;
}
cm_id->device->iwcm->add_ref(qp);
@@ -613,23 +630,12 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
*/
BUG_ON(iw_event->status);
- /*
- * We could be destroying the listening id. If so, ignore this
- * upcall.
- */
- spin_lock_irqsave(&listen_id_priv->lock, flags);
- if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
- spin_unlock_irqrestore(&listen_id_priv->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&listen_id_priv->lock, flags);
-
cm_id = iw_create_cm_id(listen_id_priv->id.device,
listen_id_priv->id.cm_handler,
listen_id_priv->id.context);
/* If the cm_id could not be created, ignore the request */
if (IS_ERR(cm_id))
- return;
+ goto out;
cm_id->provider_data = iw_event->provider_data;
cm_id->local_addr = iw_event->local_addr;
@@ -638,22 +644,37 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
cm_id_priv->state = IW_CM_STATE_CONN_RECV;
+ /*
+ * We could be destroying the listening id. If so, ignore this
+ * upcall.
+ */
+ spin_lock_irqsave(&listen_id_priv->lock, flags);
+ if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
+ spin_unlock_irqrestore(&listen_id_priv->lock, flags);
+ iw_cm_reject(cm_id, NULL, 0);
+ iw_destroy_cm_id(cm_id);
+ goto out;
+ }
+ spin_unlock_irqrestore(&listen_id_priv->lock, flags);
+
ret = alloc_work_entries(cm_id_priv, 3);
if (ret) {
iw_cm_reject(cm_id, NULL, 0);
iw_destroy_cm_id(cm_id);
- return;
+ goto out;
}
/* Call the client CM handler */
ret = cm_id->cm_handler(cm_id, iw_event);
if (ret) {
+ iw_cm_reject(cm_id, NULL, 0);
set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
destroy_cm_id(cm_id);
if (atomic_read(&cm_id_priv->refcount)==0)
- kfree(cm_id);
+ free_cm_id(cm_id_priv);
}
+out:
if (iw_event->private_data_len)
kfree(iw_event->private_data);
}
@@ -674,7 +695,7 @@ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
struct iw_cm_event *iw_event)
{
unsigned long flags;
- int ret = 0;
+ int ret;
spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -704,7 +725,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
struct iw_cm_event *iw_event)
{
unsigned long flags;
- int ret = 0;
+ int ret;
spin_lock_irqsave(&cm_id_priv->lock, flags);
/*
@@ -713,7 +734,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
*/
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
- if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) {
+ if (iw_event->status == 0) {
cm_id_priv->id.local_addr = iw_event->local_addr;
cm_id_priv->id.remote_addr = iw_event->remote_addr;
cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
@@ -828,13 +849,15 @@ static int process_event(struct iwcm_id_private *cm_id_priv,
* thread asleep on the destroy_comp list vs. an object destroyed
* here synchronously when the last reference is removed.
*/
-static void cm_work_handler(void *arg)
+static void cm_work_handler(struct work_struct *_work)
{
- struct iwcm_work *work = arg, lwork;
+ struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
+ struct iw_cm_event levent;
struct iwcm_id_private *cm_id_priv = work->cm_id;
unsigned long flags;
int empty;
int ret = 0;
+ int destroy_id;
spin_lock_irqsave(&cm_id_priv->lock, flags);
empty = list_empty(&cm_id_priv->work_list);
@@ -843,25 +866,26 @@ static void cm_work_handler(void *arg)
struct iwcm_work, list);
list_del_init(&work->list);
empty = list_empty(&cm_id_priv->work_list);
- lwork = *work;
+ levent = work->event;
put_work(work);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = process_event(cm_id_priv, &work->event);
+ ret = process_event(cm_id_priv, &levent);
if (ret) {
set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
destroy_cm_id(&cm_id_priv->id);
}
BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
- if (iwcm_deref_id(cm_id_priv))
- return;
-
- if (atomic_read(&cm_id_priv->refcount)==0 &&
- test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) {
- dealloc_work_entries(cm_id_priv);
- kfree(cm_id_priv);
+ destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
+ if (iwcm_deref_id(cm_id_priv)) {
+ if (destroy_id) {
+ BUG_ON(!list_empty(&cm_id_priv->work_list));
+ free_cm_id(cm_id_priv);
+ }
return;
}
+ if (empty)
+ return;
spin_lock_irqsave(&cm_id_priv->lock, flags);
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -899,14 +923,14 @@ static int cm_event_handler(struct iw_cm_id *cm_id,
goto out;
}
- INIT_WORK(&work->work, cm_work_handler, work);
+ INIT_WORK(&work->work, cm_work_handler);
work->cm_id = cm_id_priv;
work->event = *iw_event;
if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
work->event.private_data_len) {
- ret = copy_private_data(cm_id_priv, &work->event);
+ ret = copy_private_data(&work->event);
if (ret) {
put_work(work);
goto out;
@@ -938,8 +962,7 @@ static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
case IW_CM_STATE_CONN_RECV:
case IW_CM_STATE_ESTABLISHED:
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
- qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE|
+ qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE|
IB_ACCESS_REMOTE_READ;
ret = 0;
break;