diff options
Diffstat (limited to 'drivers/infiniband/core/mad.c')
| -rw-r--r-- | drivers/infiniband/core/mad.c | 76 | 
1 files changed, 63 insertions, 13 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 822cfdcd9f7..ab31f136d04 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -35,6 +35,7 @@   */  #include <linux/dma-mapping.h>  #include <linux/slab.h> +#include <linux/module.h>  #include <rdma/ib_cache.h>  #include "mad_priv.h" @@ -276,6 +277,13 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,  		goto error1;  	} +	/* Verify the QP requested is supported.  For example, Ethernet devices +	 * will not have QP0 */ +	if (!port_priv->qp_info[qpn].qp) { +		ret = ERR_PTR(-EPROTONOSUPPORT); +		goto error1; +	} +  	/* Allocate structures */  	mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);  	if (!mad_agent_priv) { @@ -1014,12 +1022,21 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)  					mad_send_wr->send_buf.mad,  					sge[0].length,  					DMA_TO_DEVICE); +	if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) +		return -ENOMEM; +  	mad_send_wr->header_mapping = sge[0].addr;  	sge[1].addr = ib_dma_map_single(mad_agent->device,  					ib_get_payload(mad_send_wr),  					sge[1].length,  					DMA_TO_DEVICE); +	if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { +		ib_dma_unmap_single(mad_agent->device, +				    mad_send_wr->header_mapping, +				    sge[0].length, DMA_TO_DEVICE); +		return -ENOMEM; +	}  	mad_send_wr->payload_mapping = sge[1].addr;  	spin_lock_irqsave(&qp_info->send_queue.lock, flags); @@ -1589,6 +1606,9 @@ find_mad_agent(struct ib_mad_port_private *port_priv,  					mad->mad_hdr.class_version].class;  			if (!class)  				goto out; +			if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >= +			    IB_MGMT_MAX_METHODS) +				goto out;  			method = class->method_table[convert_mgmt_class(  							mad->mad_hdr.mgmt_class)];  			if (method) @@ -1831,6 +1851,26 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,  	}  } +static bool generate_unmatched_resp(struct ib_mad_private *recv, +				    struct ib_mad_private *response) +{ +	if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET || +	    recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) { +		memcpy(response, recv, sizeof *response); +		response->header.recv_wc.wc = &response->header.wc; +		response->header.recv_wc.recv_buf.mad = &response->mad.mad; +		response->header.recv_wc.recv_buf.grh = &response->grh; +		response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; +		response->mad.mad.mad_hdr.status = +			cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); +		if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) +			response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION; + +		return true; +	} else { +		return false; +	} +}  static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,  				     struct ib_wc *wc)  { @@ -1840,6 +1880,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,  	struct ib_mad_list_head *mad_list;  	struct ib_mad_agent_private *mad_agent;  	int port_num; +	int ret = IB_MAD_RESULT_SUCCESS;  	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;  	qp_info = mad_list->mad_queue->qp_info; @@ -1923,8 +1964,6 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,  local:  	/* Give driver "right of first refusal" on incoming MAD */  	if (port_priv->device->process_mad) { -		int ret; -  		ret = port_priv->device->process_mad(port_priv->device, 0,  						     port_priv->port_num,  						     wc, &recv->grh, @@ -1952,6 +1991,10 @@ local:  		 * or via recv_handler in ib_mad_complete_recv()  		 */  		recv = NULL; +	} else if ((ret & IB_MAD_RESULT_SUCCESS) && +		   generate_unmatched_resp(recv, response)) { +		agent_send_response(&response->mad.mad, &recv->grh, wc, +				    port_priv->device, port_num, qp_info->qp->qp_num);  	}  out: @@ -1970,7 +2013,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)  	unsigned long delay;  	if (list_empty(&mad_agent_priv->wait_list)) { -		__cancel_delayed_work(&mad_agent_priv->timed_work); +		cancel_delayed_work(&mad_agent_priv->timed_work);  	} else {  		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,  					 struct ib_mad_send_wr_private, @@ -1979,13 +2022,11 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)  		if (time_after(mad_agent_priv->timeout,  			       mad_send_wr->timeout)) {  			mad_agent_priv->timeout = mad_send_wr->timeout; -			__cancel_delayed_work(&mad_agent_priv->timed_work);  			delay = mad_send_wr->timeout - jiffies;  			if ((long)delay <= 0)  				delay = 1; -			queue_delayed_work(mad_agent_priv->qp_info-> -					   port_priv->wq, -					   &mad_agent_priv->timed_work, delay); +			mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, +					 &mad_agent_priv->timed_work, delay);  		}  	}  } @@ -2018,11 +2059,9 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)  	list_add(&mad_send_wr->agent_list, list_item);  	/* Reschedule a work item if we have a shorter timeout */ -	if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) { -		__cancel_delayed_work(&mad_agent_priv->timed_work); -		queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq, -				   &mad_agent_priv->timed_work, delay); -	} +	if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) +		mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, +				 &mad_agent_priv->timed_work, delay);  }  void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, @@ -2560,6 +2599,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,  						 sizeof *mad_priv -  						   sizeof mad_priv->header,  						 DMA_FROM_DEVICE); +		if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, +						  sg_list.addr))) { +			ret = -ENOMEM; +			break; +		}  		mad_priv->header.mapping = sg_list.addr;  		recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;  		mad_priv->header.mad_list.mad_queue = recv_queue; @@ -2633,6 +2677,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)  	int ret, i;  	struct ib_qp_attr *attr;  	struct ib_qp *qp; +	u16 pkey_index;  	attr = kmalloc(sizeof *attr, GFP_KERNEL);  	if (!attr) { @@ -2640,6 +2685,11 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)  		return -ENOMEM;  	} +	ret = ib_find_pkey(port_priv->device, port_priv->port_num, +			   IB_DEFAULT_PKEY_FULL, &pkey_index); +	if (ret) +		pkey_index = 0; +  	for (i = 0; i < IB_MAD_QPS_CORE; i++) {  		qp = port_priv->qp_info[i].qp;  		if (!qp) @@ -2650,7 +2700,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)  		 * one is needed for the Reset to Init transition  		 */  		attr->qp_state = IB_QPS_INIT; -		attr->pkey_index = 0; +		attr->pkey_index = pkey_index;  		attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;  		ret = ib_modify_qp(qp, attr, IB_QP_STATE |  					     IB_QP_PKEY_INDEX | IB_QP_QKEY);  | 
