diff options
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/cq.c')
| -rw-r--r-- | drivers/infiniband/hw/cxgb4/cq.c | 64 | 
1 files changed, 40 insertions, 24 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 88de3aa9c5b..c04292c950f 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -134,7 +134,8 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,  			V_FW_RI_RES_WR_IQANUS(0) |  			V_FW_RI_RES_WR_IQANUD(1) |  			F_FW_RI_RES_WR_IQANDST | -			V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids)); +			V_FW_RI_RES_WR_IQANDSTINDEX( +				rdev->lldi.ciq_ids[cq->vector]));  	res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(  			F_FW_RI_RES_WR_IQDROPRSS |  			V_FW_RI_RES_WR_IQPCIECH(2) | @@ -235,27 +236,21 @@ int c4iw_flush_sq(struct c4iw_qp *qhp)  	struct t4_cq *cq = &chp->cq;  	int idx;  	struct t4_swsqe *swsqe; -	int error = (qhp->attr.state != C4IW_QP_STATE_CLOSING && -			qhp->attr.state != C4IW_QP_STATE_IDLE);  	if (wq->sq.flush_cidx == -1)  		wq->sq.flush_cidx = wq->sq.cidx;  	idx = wq->sq.flush_cidx;  	BUG_ON(idx >= wq->sq.size);  	while (idx != wq->sq.pidx) { -		if (error) { -			swsqe = &wq->sq.sw_sq[idx]; -			BUG_ON(swsqe->flushed); -			swsqe->flushed = 1; -			insert_sq_cqe(wq, cq, swsqe); -			if (wq->sq.oldest_read == swsqe) { -				BUG_ON(swsqe->opcode != FW_RI_READ_REQ); -				advance_oldest_read(wq); -			} -			flushed++; -		} else { -			t4_sq_consume(wq); +		swsqe = &wq->sq.sw_sq[idx]; +		BUG_ON(swsqe->flushed); +		swsqe->flushed = 1; +		insert_sq_cqe(wq, cq, swsqe); +		if (wq->sq.oldest_read == swsqe) { +			BUG_ON(swsqe->opcode != FW_RI_READ_REQ); +			advance_oldest_read(wq);  		} +		flushed++;  		if (++idx == wq->sq.size)  			idx = 0;  	} @@ -365,8 +360,14 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)  		if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) { -			/* -			 * drop peer2peer RTR reads. +			/* If we have reached here because of async +			 * event or other error, and have egress error +			 * then drop +			 */ +			if (CQE_TYPE(hw_cqe) == 1) +				goto next_cqe; + +			/* drop peer2peer RTR reads.  			 */  			if (CQE_WRID_STAG(hw_cqe) == 1)  				goto next_cqe; @@ -511,8 +512,18 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,  	 */  	if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) { -		/* -		 * If this is an unsolicited read response, then the read +		/* If we have reached here because of async +		 * event or other error, and have egress error +		 * then drop +		 */ +		if (CQE_TYPE(hw_cqe) == 1) { +			if (CQE_STATUS(hw_cqe)) +				t4_set_wq_in_error(wq); +			ret = -EAGAIN; +			goto skip_cqe; +		} + +		/* If this is an unsolicited read response, then the read  		 * was generated by the kernel driver as part of peer-2-peer  		 * connection setup.  So ignore the completion.  		 */ @@ -603,7 +614,7 @@ proc_cqe:  	 */  	if (SQ_TYPE(hw_cqe)) {  		int idx = CQE_WRID_SQ_IDX(hw_cqe); -		BUG_ON(idx > wq->sq.size); +		BUG_ON(idx >= wq->sq.size);  		/*  		* Account for any unsignaled completions completed by @@ -617,7 +628,7 @@ proc_cqe:  			wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;  		else  			wq->sq.in_use -= idx - wq->sq.cidx; -		BUG_ON(wq->sq.in_use < 0 && wq->sq.in_use < wq->sq.size); +		BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);  		wq->sq.cidx = (uint16_t)idx;  		PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx); @@ -662,7 +673,7 @@ skip_cqe:  static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)  {  	struct c4iw_qp *qhp = NULL; -	struct t4_cqe cqe = {0, 0}, *rd_cqe; +	struct t4_cqe uninitialized_var(cqe), *rd_cqe;  	struct t4_wq *wq;  	u32 credit = 0;  	u8 cqe_flushed; @@ -860,6 +871,9 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,  	rhp = to_c4iw_dev(ibdev); +	if (vector >= rhp->rdev.lldi.nciq) +		return ERR_PTR(-EINVAL); +  	chp = kzalloc(sizeof(*chp), GFP_KERNEL);  	if (!chp)  		return ERR_PTR(-ENOMEM); @@ -881,7 +895,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,  	/*  	 * Make actual HW queue 2x to avoid cdix_inc overflows.  	 */ -	hwentries = entries * 2; +	hwentries = min(entries * 2, T4_MAX_IQ_SIZE);  	/*  	 * Make HW queue at least 64 entries so GTS updates aren't too @@ -905,6 +919,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,  	}  	chp->cq.size = hwentries;  	chp->cq.memsize = memsize; +	chp->cq.vector = vector;  	ret = create_cq(&rhp->rdev, &chp->cq,  			ucontext ? &ucontext->uctx : &rhp->rdev.uctx); @@ -940,7 +955,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,  		uresp.gts_key = ucontext->key;  		ucontext->key += PAGE_SIZE;  		spin_unlock(&ucontext->mmap_lock); -		ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); +		ret = ib_copy_to_udata(udata, &uresp, +				       sizeof(uresp) - sizeof(uresp.reserved));  		if (ret)  			goto err5;  | 
