diff options
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_mad.c')
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_mad.c | 578 | 
1 files changed, 469 insertions, 109 deletions
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 94b0d1f3a8f..22c720e5740 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -1,6 +1,6 @@  /* - * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. - * All rights reserved. + * Copyright (c) 2012 Intel Corporation.  All rights reserved. + * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.   * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.   *   * This software is available to you under a choice of one of two @@ -49,6 +49,18 @@ static int reply(struct ib_smp *smp)  	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;  } +static int reply_failure(struct ib_smp *smp) +{ +	/* +	 * The verbs framework will handle the directed/LID route +	 * packet changes. +	 */ +	smp->method = IB_MGMT_METHOD_GET_RESP; +	if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) +		smp->status |= IB_SMP_DIRECTION; +	return IB_MAD_RESULT_FAILURE | IB_MAD_RESULT_REPLY; +} +  static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)  {  	struct ib_mad_send_buf *send_buf; @@ -90,14 +102,10 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)  	if (!ibp->sm_ah) {  		if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {  			struct ib_ah *ah; -			struct ib_ah_attr attr; -			memset(&attr, 0, sizeof attr); -			attr.dlid = ibp->sm_lid; -			attr.port_num = ppd_from_ibp(ibp)->port; -			ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr); +			ah = qib_create_qp0_ah(ibp, ibp->sm_lid);  			if (IS_ERR(ah)) -				ret = -EINVAL; +				ret = PTR_ERR(ah);  			else {  				send_buf->ah = ah;  				ibp->sm_ah = to_iah(ah); @@ -396,6 +404,7 @@ static int get_linkdowndefaultstate(struct qib_pportdata *ppd)  static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)  { +	int valid_mkey = 0;  	int ret = 0;  	/* Is the mkey in the process of expiring? */ @@ -406,23 +415,36 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)  		ibp->mkeyprot = 0;  	} -	/* M_Key checking depends on Portinfo:M_Key_protect_bits */ -	if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 && -	    ibp->mkey != smp->mkey && -	    (smp->method == IB_MGMT_METHOD_SET || -	     smp->method == IB_MGMT_METHOD_TRAP_REPRESS || -	     (smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) { -		if (ibp->mkey_violations != 0xFFFF) -			++ibp->mkey_violations; -		if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period) -			ibp->mkey_lease_timeout = jiffies + -				ibp->mkey_lease_period * HZ; -		/* Generate a trap notice. */ -		qib_bad_mkey(ibp, smp); -		ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; -	} else if (ibp->mkey_lease_timeout) +	if ((mad_flags & IB_MAD_IGNORE_MKEY) ||  ibp->mkey == 0 || +	    ibp->mkey == smp->mkey) +		valid_mkey = 1; + +	/* Unset lease timeout on any valid Get/Set/TrapRepress */ +	if (valid_mkey && ibp->mkey_lease_timeout && +	    (smp->method == IB_MGMT_METHOD_GET || +	     smp->method == IB_MGMT_METHOD_SET || +	     smp->method == IB_MGMT_METHOD_TRAP_REPRESS))  		ibp->mkey_lease_timeout = 0; +	if (!valid_mkey) { +		switch (smp->method) { +		case IB_MGMT_METHOD_GET: +			/* Bad mkey not a violation below level 2 */ +			if (ibp->mkeyprot < 2) +				break; +		case IB_MGMT_METHOD_SET: +		case IB_MGMT_METHOD_TRAP_REPRESS: +			if (ibp->mkey_violations != 0xFFFF) +				++ibp->mkey_violations; +			if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period) +				ibp->mkey_lease_timeout = jiffies + +					ibp->mkey_lease_period * HZ; +			/* Generate a trap notice. */ +			qib_bad_mkey(ibp, smp); +			ret = 1; +		} +	} +  	return ret;  } @@ -433,7 +455,6 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,  	struct qib_pportdata *ppd;  	struct qib_ibport *ibp;  	struct ib_port_info *pip = (struct ib_port_info *)smp->data; -	u16 lid;  	u8 mtu;  	int ret;  	u32 state; @@ -450,8 +471,10 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,  		if (port_num != port) {  			ibp = to_iport(ibdev, port_num);  			ret = check_mkey(ibp, smp, 0); -			if (ret) +			if (ret) { +				ret = IB_MAD_RESULT_FAILURE;  				goto bail; +			}  		}  	} @@ -464,12 +487,12 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,  	memset(smp->data, 0, sizeof(smp->data));  	/* Only return the mkey if the protection field allows it. */ -	if (smp->method == IB_MGMT_METHOD_SET || ibp->mkey == smp->mkey || -	    ibp->mkeyprot == 0) +	if (!(smp->method == IB_MGMT_METHOD_GET && +	      ibp->mkey != smp->mkey && +	      ibp->mkeyprot == 1))  		pip->mkey = ibp->mkey;  	pip->gid_prefix = ibp->gid_prefix; -	lid = ppd->lid; -	pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; +	pip->lid = cpu_to_be16(ppd->lid);  	pip->sm_lid = cpu_to_be16(ibp->sm_lid);  	pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);  	/* pip->diag_code; */ @@ -632,7 +655,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,  	struct qib_devdata *dd;  	struct qib_pportdata *ppd;  	struct qib_ibport *ibp; -	char clientrereg = 0; +	u8 clientrereg = (pip->clientrereg_resv_subnetto & 0x80);  	unsigned long flags;  	u16 lid, smlid;  	u8 lwe; @@ -668,8 +691,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,  	lid = be16_to_cpu(pip->lid);  	/* Must be a valid unicast LID address. */  	if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE) -		goto err; -	if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { +		smp->status |= IB_SMP_INVALID_FIELD; +	else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {  		if (ppd->lid != lid)  			qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);  		if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) @@ -683,8 +706,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,  	msl = pip->neighbormtu_mastersmsl & 0xF;  	/* Must be a valid unicast LID address. */  	if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE) -		goto err; -	if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { +		smp->status |= IB_SMP_INVALID_FIELD; +	else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {  		spin_lock_irqsave(&ibp->lock, flags);  		if (ibp->sm_ah) {  			if (smlid != ibp->sm_lid) @@ -705,10 +728,11 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,  	lwe = pip->link_width_enabled;  	if (lwe) {  		if (lwe == 0xFF) -			lwe = ppd->link_width_supported; +			set_link_width_enabled(ppd, ppd->link_width_supported);  		else if (lwe >= 16 || (lwe & ~ppd->link_width_supported)) -			goto err; -		set_link_width_enabled(ppd, lwe); +			smp->status |= IB_SMP_INVALID_FIELD; +		else if (lwe != ppd->link_width_enabled) +			set_link_width_enabled(ppd, lwe);  	}  	lse = pip->linkspeedactive_enabled & 0xF; @@ -719,10 +743,12 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,  		 * speeds.  		 */  		if (lse == 15) -			lse = ppd->link_speed_supported; +			set_link_speed_enabled(ppd, +					       ppd->link_speed_supported);  		else if (lse >= 8 || (lse & ~ppd->link_speed_supported)) -			goto err; -		set_link_speed_enabled(ppd, lse); +			smp->status |= IB_SMP_INVALID_FIELD; +		else if (lse != ppd->link_speed_enabled) +			set_link_speed_enabled(ppd, lse);  	}  	/* Set link down default state. */ @@ -738,7 +764,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,  					IB_LINKINITCMD_POLL);  		break;  	default: -		goto err; +		smp->status |= IB_SMP_INVALID_FIELD;  	}  	ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; @@ -748,15 +774,17 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,  	mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);  	if (mtu == -1) -		goto err; -	qib_set_mtu(ppd, mtu); +		smp->status |= IB_SMP_INVALID_FIELD; +	else +		qib_set_mtu(ppd, mtu);  	/* Set operational VLs */  	vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;  	if (vls) {  		if (vls > ppd->vls_supported) -			goto err; -		(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls); +			smp->status |= IB_SMP_INVALID_FIELD; +		else +			(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);  	}  	if (pip->mkey_violations == 0) @@ -770,19 +798,13 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,  	ore = pip->localphyerrors_overrunerrors;  	if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) -		goto err; +		smp->status |= IB_SMP_INVALID_FIELD;  	if (set_overrunthreshold(ppd, (ore & 0xF))) -		goto err; +		smp->status |= IB_SMP_INVALID_FIELD;  	ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; -	if (pip->clientrereg_resv_subnetto & 0x80) { -		clientrereg = 1; -		event.event = IB_EVENT_CLIENT_REREGISTER; -		ib_dispatch_event(&event); -	} -  	/*  	 * Do the port state change now that the other link parameters  	 * have been set. @@ -792,7 +814,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,  	state = pip->linkspeed_portstate & 0xF;  	lstate = (pip->portphysstate_linkdown >> 4) & 0xF;  	if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) -		goto err; +		smp->status |= IB_SMP_INVALID_FIELD;  	/*  	 * Only state changes of DOWN, ARM, and ACTIVE are valid @@ -812,8 +834,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,  			lstate = QIB_IB_LINKDOWN;  		else if (lstate == 3)  			lstate = QIB_IB_LINKDOWN_DISABLE; -		else -			goto err; +		else { +			smp->status |= IB_SMP_INVALID_FIELD; +			break; +		}  		spin_lock_irqsave(&ppd->lflags_lock, flags);  		ppd->lflags &= ~QIBL_LINKV;  		spin_unlock_irqrestore(&ppd->lflags_lock, flags); @@ -835,16 +859,20 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,  		qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);  		break;  	default: -		/* XXX We have already partially updated our state! */ -		goto err; +		smp->status |= IB_SMP_INVALID_FIELD; +	} + +	if (clientrereg) { +		event.event = IB_EVENT_CLIENT_REREGISTER; +		ib_dispatch_event(&event);  	}  	ret = subn_get_portinfo(smp, ibdev, port); -	if (clientrereg) -		pip->clientrereg_resv_subnetto |= 0x80; +	/* restore re-reg bit per o14-12.2.1 */ +	pip->clientrereg_resv_subnetto |= clientrereg; -	goto done; +	goto get_only;  err:  	smp->status |= IB_SMP_INVALID_FIELD; @@ -1000,7 +1028,7 @@ static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)  		event.event = IB_EVENT_PKEY_CHANGE;  		event.device = &dd->verbs_dev.ibdev; -		event.element.port_num = 1; +		event.element.port_num = port;  		ib_dispatch_event(&event);  	}  	return 0; @@ -1118,22 +1146,22 @@ static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,  	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;  } -static int pma_get_classportinfo(struct ib_perf *pmp, +static int pma_get_classportinfo(struct ib_pma_mad *pmp,  				 struct ib_device *ibdev)  { -	struct ib_pma_classportinfo *p = -		(struct ib_pma_classportinfo *)pmp->data; +	struct ib_class_port_info *p = +		(struct ib_class_port_info *)pmp->data;  	struct qib_devdata *dd = dd_from_ibdev(ibdev);  	memset(pmp->data, 0, sizeof(pmp->data)); -	if (pmp->attr_mod != 0) -		pmp->status |= IB_SMP_INVALID_FIELD; +	if (pmp->mad_hdr.attr_mod != 0) +		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;  	/* Note that AllPortSelect is not valid */  	p->base_version = 1;  	p->class_version = 1; -	p->cap_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; +	p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;  	/*  	 * Set the most significant bit of CM2 to indicate support for  	 * congestion statistics @@ -1147,7 +1175,7 @@ static int pma_get_classportinfo(struct ib_perf *pmp,  	return reply((struct ib_smp *) pmp);  } -static int pma_get_portsamplescontrol(struct ib_perf *pmp, +static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,  				      struct ib_device *ibdev, u8 port)  {  	struct ib_pma_portsamplescontrol *p = @@ -1162,8 +1190,8 @@ static int pma_get_portsamplescontrol(struct ib_perf *pmp,  	memset(pmp->data, 0, sizeof(pmp->data));  	p->port_select = port_select; -	if (pmp->attr_mod != 0 || port_select != port) { -		pmp->status |= IB_SMP_INVALID_FIELD; +	if (pmp->mad_hdr.attr_mod != 0 || port_select != port) { +		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;  		goto bail;  	}  	spin_lock_irqsave(&ibp->lock, flags); @@ -1185,7 +1213,7 @@ bail:  	return reply((struct ib_smp *) pmp);  } -static int pma_set_portsamplescontrol(struct ib_perf *pmp, +static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,  				      struct ib_device *ibdev, u8 port)  {  	struct ib_pma_portsamplescontrol *p = @@ -1198,8 +1226,8 @@ static int pma_set_portsamplescontrol(struct ib_perf *pmp,  	u8 status, xmit_flags;  	int ret; -	if (pmp->attr_mod != 0 || p->port_select != port) { -		pmp->status |= IB_SMP_INVALID_FIELD; +	if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) { +		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;  		ret = reply((struct ib_smp *) pmp);  		goto bail;  	} @@ -1314,7 +1342,7 @@ static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,  	return ret;  } -static int pma_get_portsamplesresult(struct ib_perf *pmp, +static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,  				     struct ib_device *ibdev, u8 port)  {  	struct ib_pma_portsamplesresult *p = @@ -1353,7 +1381,7 @@ static int pma_get_portsamplesresult(struct ib_perf *pmp,  	return reply((struct ib_smp *) pmp);  } -static int pma_get_portsamplesresult_ext(struct ib_perf *pmp, +static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,  					 struct ib_device *ibdev, u8 port)  {  	struct ib_pma_portsamplesresult_ext *p = @@ -1395,7 +1423,7 @@ static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,  	return reply((struct ib_smp *) pmp);  } -static int pma_get_portcounters(struct ib_perf *pmp, +static int pma_get_portcounters(struct ib_pma_mad *pmp,  				struct ib_device *ibdev, u8 port)  {  	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) @@ -1429,8 +1457,8 @@ static int pma_get_portcounters(struct ib_perf *pmp,  	memset(pmp->data, 0, sizeof(pmp->data));  	p->port_select = port_select; -	if (pmp->attr_mod != 0 || port_select != port) -		pmp->status |= IB_SMP_INVALID_FIELD; +	if (pmp->mad_hdr.attr_mod != 0 || port_select != port) +		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;  	if (cntrs.symbol_error_counter > 0xFFFFUL)  		p->symbol_error_counter = cpu_to_be16(0xFFFF); @@ -1465,7 +1493,7 @@ static int pma_get_portcounters(struct ib_perf *pmp,  		cntrs.local_link_integrity_errors = 0xFUL;  	if (cntrs.excessive_buffer_overrun_errors > 0xFUL)  		cntrs.excessive_buffer_overrun_errors = 0xFUL; -	p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | +	p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |  		cntrs.excessive_buffer_overrun_errors;  	if (cntrs.vl15_dropped > 0xFFFFUL)  		p->vl15_dropped = cpu_to_be16(0xFFFF); @@ -1493,7 +1521,7 @@ static int pma_get_portcounters(struct ib_perf *pmp,  	return reply((struct ib_smp *) pmp);  } -static int pma_get_portcounters_cong(struct ib_perf *pmp, +static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,  				     struct ib_device *ibdev, u8 port)  {  	/* Congestion PMA packets start at offset 24 not 64 */ @@ -1503,7 +1531,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,  	struct qib_ibport *ibp = to_iport(ibdev, port);  	struct qib_pportdata *ppd = ppd_from_ibp(ibp);  	struct qib_devdata *dd = dd_from_ppd(ppd); -	u32 port_select = be32_to_cpu(pmp->attr_mod) & 0xFF; +	u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF;  	u64 xmit_wait_counter;  	unsigned long flags; @@ -1512,9 +1540,9 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,  	 * SET method ends up calling this anyway.  	 */  	if (!dd->psxmitwait_supported) -		pmp->status |= IB_SMP_UNSUP_METH_ATTR; +		pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;  	if (port_select != port) -		pmp->status |= IB_SMP_INVALID_FIELD; +		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;  	qib_get_counters(ppd, &cntrs);  	spin_lock_irqsave(&ppd->ibport_data.lock, flags); @@ -1596,7 +1624,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,  		cntrs.local_link_integrity_errors = 0xFUL;  	if (cntrs.excessive_buffer_overrun_errors > 0xFUL)  		cntrs.excessive_buffer_overrun_errors = 0xFUL; -	p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | +	p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |  		cntrs.excessive_buffer_overrun_errors;  	if (cntrs.vl15_dropped > 0xFFFFUL)  		p->vl15_dropped = cpu_to_be16(0xFFFF); @@ -1606,7 +1634,24 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,  	return reply((struct ib_smp *)pmp);  } -static int pma_get_portcounters_ext(struct ib_perf *pmp, +static void qib_snapshot_pmacounters( +	struct qib_ibport *ibp, +	struct qib_pma_counters *pmacounters) +{ +	struct qib_pma_counters *p; +	int cpu; + +	memset(pmacounters, 0, sizeof(*pmacounters)); +	for_each_possible_cpu(cpu) { +		p = per_cpu_ptr(ibp->pmastats, cpu); +		pmacounters->n_unicast_xmit += p->n_unicast_xmit; +		pmacounters->n_unicast_rcv += p->n_unicast_rcv; +		pmacounters->n_multicast_xmit += p->n_multicast_xmit; +		pmacounters->n_multicast_rcv += p->n_multicast_rcv; +	} +} + +static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,  				    struct ib_device *ibdev, u8 port)  {  	struct ib_pma_portcounters_ext *p = @@ -1614,13 +1659,14 @@ static int pma_get_portcounters_ext(struct ib_perf *pmp,  	struct qib_ibport *ibp = to_iport(ibdev, port);  	struct qib_pportdata *ppd = ppd_from_ibp(ibp);  	u64 swords, rwords, spkts, rpkts, xwait; +	struct qib_pma_counters pma;  	u8 port_select = p->port_select;  	memset(pmp->data, 0, sizeof(pmp->data));  	p->port_select = port_select; -	if (pmp->attr_mod != 0 || port_select != port) { -		pmp->status |= IB_SMP_INVALID_FIELD; +	if (pmp->mad_hdr.attr_mod != 0 || port_select != port) { +		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;  		goto bail;  	} @@ -1636,16 +1682,23 @@ static int pma_get_portcounters_ext(struct ib_perf *pmp,  	p->port_rcv_data = cpu_to_be64(rwords);  	p->port_xmit_packets = cpu_to_be64(spkts);  	p->port_rcv_packets = cpu_to_be64(rpkts); -	p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit); -	p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv); -	p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit); -	p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv); + +	qib_snapshot_pmacounters(ibp, &pma); + +	p->port_unicast_xmit_packets = cpu_to_be64(pma.n_unicast_xmit +		- ibp->z_unicast_xmit); +	p->port_unicast_rcv_packets = cpu_to_be64(pma.n_unicast_rcv +		- ibp->z_unicast_rcv); +	p->port_multicast_xmit_packets = cpu_to_be64(pma.n_multicast_xmit +		- ibp->z_multicast_xmit); +	p->port_multicast_rcv_packets = cpu_to_be64(pma.n_multicast_rcv +		- ibp->z_multicast_rcv);  bail:  	return reply((struct ib_smp *) pmp);  } -static int pma_set_portcounters(struct ib_perf *pmp, +static int pma_set_portcounters(struct ib_pma_mad *pmp,  				struct ib_device *ibdev, u8 port)  {  	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) @@ -1708,14 +1761,14 @@ static int pma_set_portcounters(struct ib_perf *pmp,  	return pma_get_portcounters(pmp, ibdev, port);  } -static int pma_set_portcounters_cong(struct ib_perf *pmp, +static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,  				     struct ib_device *ibdev, u8 port)  {  	struct qib_ibport *ibp = to_iport(ibdev, port);  	struct qib_pportdata *ppd = ppd_from_ibp(ibp);  	struct qib_devdata *dd = dd_from_ppd(ppd);  	struct qib_verbs_counters cntrs; -	u32 counter_select = (be32_to_cpu(pmp->attr_mod) >> 24) & 0xFF; +	u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF;  	int ret = 0;  	unsigned long flags; @@ -1759,7 +1812,7 @@ static int pma_set_portcounters_cong(struct ib_perf *pmp,  	return ret;  } -static int pma_set_portcounters_ext(struct ib_perf *pmp, +static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,  				    struct ib_device *ibdev, u8 port)  {  	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) @@ -1767,6 +1820,7 @@ static int pma_set_portcounters_ext(struct ib_perf *pmp,  	struct qib_ibport *ibp = to_iport(ibdev, port);  	struct qib_pportdata *ppd = ppd_from_ibp(ibp);  	u64 swords, rwords, spkts, rpkts, xwait; +	struct qib_pma_counters pma;  	qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); @@ -1782,17 +1836,19 @@ static int pma_set_portcounters_ext(struct ib_perf *pmp,  	if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)  		ibp->z_port_rcv_packets = rpkts; +	qib_snapshot_pmacounters(ibp, &pma); +  	if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS) -		ibp->n_unicast_xmit = 0; +		ibp->z_unicast_xmit = pma.n_unicast_xmit;  	if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS) -		ibp->n_unicast_rcv = 0; +		ibp->z_unicast_rcv = pma.n_unicast_rcv;  	if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS) -		ibp->n_multicast_xmit = 0; +		ibp->z_multicast_xmit = pma.n_multicast_xmit;  	if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS) -		ibp->n_multicast_rcv = 0; +		ibp->z_multicast_rcv = pma.n_multicast_rcv;  	return pma_get_portcounters_ext(pmp, ibdev, port);  } @@ -1830,6 +1886,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,  		    port_num && port_num <= ibdev->phys_port_cnt &&  		    port != port_num)  			(void) check_mkey(to_iport(ibdev, port_num), smp, 0); +		ret = IB_MAD_RESULT_FAILURE;  		goto bail;  	} @@ -1952,19 +2009,19 @@ static int process_perf(struct ib_device *ibdev, u8 port,  			struct ib_mad *in_mad,  			struct ib_mad *out_mad)  { -	struct ib_perf *pmp = (struct ib_perf *)out_mad; +	struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;  	int ret;  	*out_mad = *in_mad; -	if (pmp->class_version != 1) { -		pmp->status |= IB_SMP_UNSUP_VERSION; +	if (pmp->mad_hdr.class_version != 1) { +		pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;  		ret = reply((struct ib_smp *) pmp);  		goto bail;  	} -	switch (pmp->method) { +	switch (pmp->mad_hdr.method) {  	case IB_MGMT_METHOD_GET: -		switch (pmp->attr_id) { +		switch (pmp->mad_hdr.attr_id) {  		case IB_PMA_CLASS_PORT_INFO:  			ret = pma_get_classportinfo(pmp, ibdev);  			goto bail; @@ -1987,13 +2044,13 @@ static int process_perf(struct ib_device *ibdev, u8 port,  			ret = pma_get_portcounters_cong(pmp, ibdev, port);  			goto bail;  		default: -			pmp->status |= IB_SMP_UNSUP_METH_ATTR; +			pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;  			ret = reply((struct ib_smp *) pmp);  			goto bail;  		}  	case IB_MGMT_METHOD_SET: -		switch (pmp->attr_id) { +		switch (pmp->mad_hdr.attr_id) {  		case IB_PMA_PORT_SAMPLES_CONTROL:  			ret = pma_set_portsamplescontrol(pmp, ibdev, port);  			goto bail; @@ -2007,7 +2064,7 @@ static int process_perf(struct ib_device *ibdev, u8 port,  			ret = pma_set_portcounters_cong(pmp, ibdev, port);  			goto bail;  		default: -			pmp->status |= IB_SMP_UNSUP_METH_ATTR; +			pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;  			ret = reply((struct ib_smp *) pmp);  			goto bail;  		} @@ -2023,7 +2080,7 @@ static int process_perf(struct ib_device *ibdev, u8 port,  		goto bail;  	default: -		pmp->status |= IB_SMP_UNSUP_METHOD; +		pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;  		ret = reply((struct ib_smp *) pmp);  	} @@ -2031,6 +2088,298 @@ bail:  	return ret;  } +static int cc_get_classportinfo(struct ib_cc_mad *ccp, +				struct ib_device *ibdev) +{ +	struct ib_cc_classportinfo_attr *p = +		(struct ib_cc_classportinfo_attr *)ccp->mgmt_data; + +	memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); + +	p->base_version = 1; +	p->class_version = 1; +	p->cap_mask = 0; + +	/* +	 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. +	 */ +	p->resp_time_value = 18; + +	return reply((struct ib_smp *) ccp); +} + +static int cc_get_congestion_info(struct ib_cc_mad *ccp, +				struct ib_device *ibdev, u8 port) +{ +	struct ib_cc_info_attr *p = +		(struct ib_cc_info_attr *)ccp->mgmt_data; +	struct qib_ibport *ibp = to_iport(ibdev, port); +	struct qib_pportdata *ppd = ppd_from_ibp(ibp); + +	memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); + +	p->congestion_info = 0; +	p->control_table_cap = ppd->cc_max_table_entries; + +	return reply((struct ib_smp *) ccp); +} + +static int cc_get_congestion_setting(struct ib_cc_mad *ccp, +				struct ib_device *ibdev, u8 port) +{ +	int i; +	struct ib_cc_congestion_setting_attr *p = +		(struct ib_cc_congestion_setting_attr *)ccp->mgmt_data; +	struct qib_ibport *ibp = to_iport(ibdev, port); +	struct qib_pportdata *ppd = ppd_from_ibp(ibp); +	struct ib_cc_congestion_entry_shadow *entries; + +	memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); + +	spin_lock(&ppd->cc_shadow_lock); + +	entries = ppd->congestion_entries_shadow->entries; +	p->port_control = cpu_to_be16( +		ppd->congestion_entries_shadow->port_control); +	p->control_map = cpu_to_be16( +		ppd->congestion_entries_shadow->control_map); +	for (i = 0; i < IB_CC_CCS_ENTRIES; i++) { +		p->entries[i].ccti_increase = entries[i].ccti_increase; +		p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer); +		p->entries[i].trigger_threshold = entries[i].trigger_threshold; +		p->entries[i].ccti_min = entries[i].ccti_min; +	} + +	spin_unlock(&ppd->cc_shadow_lock); + +	return reply((struct ib_smp *) ccp); +} + +static int cc_get_congestion_control_table(struct ib_cc_mad *ccp, +				struct ib_device *ibdev, u8 port) +{ +	struct ib_cc_table_attr *p = +		(struct ib_cc_table_attr *)ccp->mgmt_data; +	struct qib_ibport *ibp = to_iport(ibdev, port); +	struct qib_pportdata *ppd = ppd_from_ibp(ibp); +	u32 cct_block_index = be32_to_cpu(ccp->attr_mod); +	u32 max_cct_block; +	u32 cct_entry; +	struct ib_cc_table_entry_shadow *entries; +	int i; + +	/* Is the table index more than what is supported? */ +	if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1) +		goto bail; + +	memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); + +	spin_lock(&ppd->cc_shadow_lock); + +	max_cct_block = +		(ppd->ccti_entries_shadow->ccti_last_entry + 1)/IB_CCT_ENTRIES; +	max_cct_block = max_cct_block ? max_cct_block - 1 : 0; + +	if (cct_block_index > max_cct_block) { +		spin_unlock(&ppd->cc_shadow_lock); +		goto bail; +	} + +	ccp->attr_mod = cpu_to_be32(cct_block_index); + +	cct_entry = IB_CCT_ENTRIES * (cct_block_index + 1); + +	cct_entry--; + +	p->ccti_limit = cpu_to_be16(cct_entry); + +	entries = &ppd->ccti_entries_shadow-> +			entries[IB_CCT_ENTRIES * cct_block_index]; +	cct_entry %= IB_CCT_ENTRIES; + +	for (i = 0; i <= cct_entry; i++) +		p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry); + +	spin_unlock(&ppd->cc_shadow_lock); + +	return reply((struct ib_smp *) ccp); + +bail: +	return reply_failure((struct ib_smp *) ccp); +} + +static int cc_set_congestion_setting(struct ib_cc_mad *ccp, +				struct ib_device *ibdev, u8 port) +{ +	struct ib_cc_congestion_setting_attr *p = +		(struct ib_cc_congestion_setting_attr *)ccp->mgmt_data; +	struct qib_ibport *ibp = to_iport(ibdev, port); +	struct qib_pportdata *ppd = ppd_from_ibp(ibp); +	int i; + +	ppd->cc_sl_control_map = be16_to_cpu(p->control_map); + +	for (i = 0; i < IB_CC_CCS_ENTRIES; i++) { +		ppd->congestion_entries[i].ccti_increase = +			p->entries[i].ccti_increase; + +		ppd->congestion_entries[i].ccti_timer = +			be16_to_cpu(p->entries[i].ccti_timer); + +		ppd->congestion_entries[i].trigger_threshold = +			p->entries[i].trigger_threshold; + +		ppd->congestion_entries[i].ccti_min = +			p->entries[i].ccti_min; +	} + +	return reply((struct ib_smp *) ccp); +} + +static int cc_set_congestion_control_table(struct ib_cc_mad *ccp, +				struct ib_device *ibdev, u8 port) +{ +	struct ib_cc_table_attr *p = +		(struct ib_cc_table_attr *)ccp->mgmt_data; +	struct qib_ibport *ibp = to_iport(ibdev, port); +	struct qib_pportdata *ppd = ppd_from_ibp(ibp); +	u32 cct_block_index = be32_to_cpu(ccp->attr_mod); +	u32 cct_entry; +	struct ib_cc_table_entry_shadow *entries; +	int i; + +	/* Is the table index more than what is supported? */ +	if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1) +		goto bail; + +	/* If this packet is the first in the sequence then +	 * zero the total table entry count. +	 */ +	if (be16_to_cpu(p->ccti_limit) < IB_CCT_ENTRIES) +		ppd->total_cct_entry = 0; + +	cct_entry = (be16_to_cpu(p->ccti_limit))%IB_CCT_ENTRIES; + +	/* ccti_limit is 0 to 63 */ +	ppd->total_cct_entry += (cct_entry + 1); + +	if (ppd->total_cct_entry > ppd->cc_supported_table_entries) +		goto bail; + +	ppd->ccti_limit = be16_to_cpu(p->ccti_limit); + +	entries = ppd->ccti_entries + (IB_CCT_ENTRIES * cct_block_index); + +	for (i = 0; i <= cct_entry; i++) +		entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry); + +	spin_lock(&ppd->cc_shadow_lock); + +	ppd->ccti_entries_shadow->ccti_last_entry = ppd->total_cct_entry - 1; +	memcpy(ppd->ccti_entries_shadow->entries, ppd->ccti_entries, +		(ppd->total_cct_entry * sizeof(struct ib_cc_table_entry))); + +	ppd->congestion_entries_shadow->port_control = IB_CC_CCS_PC_SL_BASED; +	ppd->congestion_entries_shadow->control_map = ppd->cc_sl_control_map; +	memcpy(ppd->congestion_entries_shadow->entries, ppd->congestion_entries, +		IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry)); + +	spin_unlock(&ppd->cc_shadow_lock); + +	return reply((struct ib_smp *) ccp); + +bail: +	return reply_failure((struct ib_smp *) ccp); +} + +static int check_cc_key(struct qib_ibport *ibp, +			struct ib_cc_mad *ccp, int mad_flags) +{ +	return 0; +} + +static int process_cc(struct ib_device *ibdev, int mad_flags, +			u8 port, struct ib_mad *in_mad, +			struct ib_mad *out_mad) +{ +	struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad; +	struct qib_ibport *ibp = to_iport(ibdev, port); +	int ret; + +	*out_mad = *in_mad; + +	if (ccp->class_version != 2) { +		ccp->status |= IB_SMP_UNSUP_VERSION; +		ret = reply((struct ib_smp *)ccp); +		goto bail; +	} + +	ret = check_cc_key(ibp, ccp, mad_flags); +	if (ret) +		goto bail; + +	switch (ccp->method) { +	case IB_MGMT_METHOD_GET: +		switch (ccp->attr_id) { +		case IB_CC_ATTR_CLASSPORTINFO: +			ret = cc_get_classportinfo(ccp, ibdev); +			goto bail; + +		case IB_CC_ATTR_CONGESTION_INFO: +			ret = cc_get_congestion_info(ccp, ibdev, port); +			goto bail; + +		case IB_CC_ATTR_CA_CONGESTION_SETTING: +			ret = cc_get_congestion_setting(ccp, ibdev, port); +			goto bail; + +		case IB_CC_ATTR_CONGESTION_CONTROL_TABLE: +			ret = cc_get_congestion_control_table(ccp, ibdev, port); +			goto bail; + +			/* FALLTHROUGH */ +		default: +			ccp->status |= IB_SMP_UNSUP_METH_ATTR; +			ret = reply((struct ib_smp *) ccp); +			goto bail; +		} + +	case IB_MGMT_METHOD_SET: +		switch (ccp->attr_id) { +		case IB_CC_ATTR_CA_CONGESTION_SETTING: +			ret = cc_set_congestion_setting(ccp, ibdev, port); +			goto bail; + +		case IB_CC_ATTR_CONGESTION_CONTROL_TABLE: +			ret = cc_set_congestion_control_table(ccp, ibdev, port); +			goto bail; + +			/* FALLTHROUGH */ +		default: +			ccp->status |= IB_SMP_UNSUP_METH_ATTR; +			ret = reply((struct ib_smp *) ccp); +			goto bail; +		} + +	case IB_MGMT_METHOD_GET_RESP: +		/* +		 * The ib_mad module will call us to process responses +		 * before checking for other consumers. +		 * Just tell the caller to process it normally. +		 */ +		ret = IB_MAD_RESULT_SUCCESS; +		goto bail; + +	case IB_MGMT_METHOD_TRAP: +	default: +		ccp->status |= IB_SMP_UNSUP_METHOD; +		ret = reply((struct ib_smp *) ccp); +	} + +bail: +	return ret; +} +  /**   * qib_process_mad - process an incoming MAD packet   * @ibdev: the infiniband device this packet came in on @@ -2055,6 +2404,8 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,  		    struct ib_mad *in_mad, struct ib_mad *out_mad)  {  	int ret; +	struct qib_ibport *ibp = to_iport(ibdev, port); +	struct qib_pportdata *ppd = ppd_from_ibp(ibp);  	switch (in_mad->mad_hdr.mgmt_class) {  	case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: @@ -2066,6 +2417,15 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,  		ret = process_perf(ibdev, port, in_mad, out_mad);  		goto bail; +	case IB_MGMT_CLASS_CONG_MGMT: +		if (!ppd->congestion_entries_shadow || +			 !qib_cc_table_size) { +			ret = IB_MAD_RESULT_SUCCESS; +			goto bail; +		} +		ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad); +		goto bail; +  	default:  		ret = IB_MAD_RESULT_SUCCESS;  	}  | 
