diff options
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_mad.c')
| -rw-r--r-- | drivers/infiniband/hw/ipath/ipath_mad.c | 553 |
1 files changed, 244 insertions, 309 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c index 72d1db89db8..43f2d0424d4 100644 --- a/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/drivers/infiniband/hw/ipath/ipath_mad.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 QLogic, Inc. All rights reserved. + * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -32,15 +32,16 @@ */ #include <rdma/ib_smi.h> +#include <rdma/ib_pma.h> #include "ipath_kernel.h" #include "ipath_verbs.h" #include "ipath_common.h" -#define IB_SMP_UNSUP_VERSION __constant_htons(0x0004) -#define IB_SMP_UNSUP_METHOD __constant_htons(0x0008) -#define IB_SMP_UNSUP_METH_ATTR __constant_htons(0x000C) -#define IB_SMP_INVALID_FIELD __constant_htons(0x001C) +#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004) +#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008) +#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C) +#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C) static int reply(struct ib_smp *smp) { @@ -60,7 +61,7 @@ static int recv_subn_get_nodedescription(struct ib_smp *smp, if (smp->attr_mod) smp->status |= IB_SMP_INVALID_FIELD; - strncpy(smp->data, ibdev->node_desc, sizeof(smp->data)); + memcpy(smp->data, ibdev->node_desc, sizeof(smp->data)); return reply(smp); } @@ -87,7 +88,8 @@ static int recv_subn_get_nodeinfo(struct ib_smp *smp, struct ipath_devdata *dd = to_idev(ibdev)->dd; u32 vendor, majrev, minrev; - if (smp->attr_mod) + /* GUID 0 is illegal */ + if (smp->attr_mod || (dd->ipath_guid == 0)) smp->status |= IB_SMP_INVALID_FIELD; nip->base_version = 1; @@ -102,7 +104,7 @@ static int recv_subn_get_nodeinfo(struct ib_smp *smp, /* This is already in network order */ nip->sys_guid = to_idev(ibdev)->sys_image_guid; nip->node_guid = dd->ipath_guid; - nip->port_guid = nip->sys_guid; + nip->port_guid = dd->ipath_guid; nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd)); nip->device_id = cpu_to_be16(dd->ipath_deviceid); majrev = dd->ipath_majrev; @@ -110,9 +112,9 @@ static int recv_subn_get_nodeinfo(struct ib_smp *smp, nip->revision = cpu_to_be32((majrev << 16) | minrev); nip->local_port_num = port; vendor = dd->ipath_vendorid; - nip->vendor_id[0] = 0; - nip->vendor_id[1] = vendor >> 8; - nip->vendor_id[2] = vendor; + nip->vendor_id[0] = IPATH_SRC_OUI_1; + nip->vendor_id[1] = IPATH_SRC_OUI_2; + nip->vendor_id[2] = IPATH_SRC_OUI_3; return reply(smp); } @@ -131,15 +133,29 @@ static int recv_subn_get_guidinfo(struct ib_smp *smp, * We only support one GUID for now. If this changes, the * portinfo.guid_cap field needs to be updated too. */ - if (startgx == 0) - /* The first is a copy of the read-only HW GUID. */ - *p = to_idev(ibdev)->dd->ipath_guid; - else + if (startgx == 0) { + __be64 g = to_idev(ibdev)->dd->ipath_guid; + if (g == 0) + /* GUID 0 is illegal */ + smp->status |= IB_SMP_INVALID_FIELD; + else + /* The first is a copy of the read-only HW GUID. */ + *p = g; + } else smp->status |= IB_SMP_INVALID_FIELD; return reply(smp); } +static void set_link_width_enabled(struct ipath_devdata *dd, u32 w) +{ + (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, w); +} + +static void set_link_speed_enabled(struct ipath_devdata *dd, u32 s) +{ + (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, s); +} static int get_overrunthreshold(struct ipath_devdata *dd) { @@ -220,6 +236,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct ipath_ibdev *dev; + struct ipath_devdata *dd; struct ib_port_info *pip = (struct ib_port_info *)smp->data; u16 lid; u8 ibcstat; @@ -233,34 +250,38 @@ static int recv_subn_get_portinfo(struct ib_smp *smp, } dev = to_idev(ibdev); + dd = dev->dd; /* Clear all fields. Only set the non-zero fields. */ memset(smp->data, 0, sizeof(smp->data)); /* Only return the mkey if the protection field allows it. */ if (smp->method == IB_MGMT_METHOD_SET || dev->mkey == smp->mkey || - (dev->mkeyprot_resv_lmc >> 6) == 0) + dev->mkeyprot == 0) pip->mkey = dev->mkey; pip->gid_prefix = dev->gid_prefix; - lid = dev->dd->ipath_lid; + lid = dd->ipath_lid; pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; pip->sm_lid = cpu_to_be16(dev->sm_lid); pip->cap_mask = cpu_to_be32(dev->port_cap_flags); /* pip->diag_code; */ pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period); pip->local_port_num = port; - pip->link_width_enabled = dev->link_width_enabled; - pip->link_width_supported = 3; /* 1x or 4x */ - pip->link_width_active = 2; /* 4x */ - pip->linkspeed_portstate = 0x10; /* 2.5Gbps */ - ibcstat = dev->dd->ipath_lastibcstat; - pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1; + pip->link_width_enabled = dd->ipath_link_width_enabled; + pip->link_width_supported = dd->ipath_link_width_supported; + pip->link_width_active = dd->ipath_link_width_active; + pip->linkspeed_portstate = dd->ipath_link_speed_supported << 4; + ibcstat = dd->ipath_lastibcstat; + /* map LinkState to IB portinfo values. */ + pip->linkspeed_portstate |= ipath_ib_linkstate(dd, ibcstat) + 1; + pip->portphysstate_linkdown = - (ipath_cvt_physportstate[ibcstat & 0xf] << 4) | - (get_linkdowndefaultstate(dev->dd) ? 1 : 2); - pip->mkeyprot_resv_lmc = dev->mkeyprot_resv_lmc; - pip->linkspeedactive_enabled = 0x11; /* 2.5Gbps, 2.5Gbps */ - switch (dev->dd->ipath_ibmtu) { + (ipath_cvt_physportstate[ibcstat & dd->ibcs_lts_mask] << 4) | + (get_linkdowndefaultstate(dd) ? 1 : 2); + pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dd->ipath_lmc; + pip->linkspeedactive_enabled = (dd->ipath_link_speed_active << 4) | + dd->ipath_link_speed_enabled; + switch (dd->ipath_ibmtu) { case 4096: mtu = IB_MTU_4096; break; @@ -286,14 +307,15 @@ static int recv_subn_get_portinfo(struct ib_smp *smp, /* pip->vl_arb_high_cap; // only one VL */ /* pip->vl_arb_low_cap; // only one VL */ /* InitTypeReply = 0 */ - pip->inittypereply_mtucap = IB_MTU_4096; - // HCAs ignore VLStallCount and HOQLife + /* our mtu cap depends on whether 4K MTU enabled or not */ + pip->inittypereply_mtucap = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048; + /* HCAs ignore VLStallCount and HOQLife */ /* pip->vlstallcnt_hoqlife; */ pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */ pip->mkey_violations = cpu_to_be16(dev->mkey_violations); /* P_KeyViolations are counted by hardware. */ pip->pkey_violations = - cpu_to_be16((ipath_get_cr_errpkey(dev->dd) - + cpu_to_be16((ipath_get_cr_errpkey(dd) - dev->z_pkey_violations) & 0xFFFF); pip->qkey_violations = cpu_to_be16(dev->qkey_violations); /* Only the hardware GUID is supported for now */ @@ -302,10 +324,17 @@ static int recv_subn_get_portinfo(struct ib_smp *smp, /* 32.768 usec. response time (guessing) */ pip->resv_resptimevalue = 3; pip->localphyerrors_overrunerrors = - (get_phyerrthreshold(dev->dd) << 4) | - get_overrunthreshold(dev->dd); + (get_phyerrthreshold(dd) << 4) | + get_overrunthreshold(dd); /* pip->max_credit_hint; */ - /* pip->link_roundtrip_latency[3]; */ + if (dev->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) { + u32 v; + + v = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LINKLATENCY); + pip->link_roundtrip_latency[0] = v >> 16; + pip->link_roundtrip_latency[1] = v >> 8; + pip->link_roundtrip_latency[2] = v; + } ret = reply(smp); @@ -320,6 +349,7 @@ bail: */ static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys) { + /* always a kernel port, no locking needed */ struct ipath_portdata *pd = dd->ipath_pd[0]; memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys)); @@ -390,7 +420,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, struct ib_port_info *pip = (struct ib_port_info *)smp->data; struct ib_event event; struct ipath_ibdev *dev; - u32 flags; + struct ipath_devdata *dd; char clientrereg = 0; u16 lid, smlid; u8 lwe; @@ -404,6 +434,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, goto err; dev = to_idev(ibdev); + dd = dev->dd; event.device = ibdev; event.element.port_num = port; @@ -412,11 +443,12 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); lid = be16_to_cpu(pip->lid); - if (lid != dev->dd->ipath_lid) { + if (dd->ipath_lid != lid || + dd->ipath_lmc != (pip->mkeyprot_resv_lmc & 7)) { /* Must be a valid unicast LID address. */ if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE) goto err; - ipath_set_lid(dev->dd, lid, pip->mkeyprot_resv_lmc & 7); + ipath_set_lid(dd, lid, pip->mkeyprot_resv_lmc & 7); event.event = IB_EVENT_LID_CHANGE; ib_dispatch_event(&event); } @@ -431,37 +463,43 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, ib_dispatch_event(&event); } - /* Only 4x supported but allow 1x or 4x to be set (see 14.2.6.6). */ + /* Allow 1x or 4x to be set (see 14.2.6.6). */ lwe = pip->link_width_enabled; - if ((lwe >= 4 && lwe <= 8) || (lwe >= 0xC && lwe <= 0xFE)) - goto err; - if (lwe == 0xFF) - dev->link_width_enabled = 3; /* 1x or 4x */ - else if (lwe) - dev->link_width_enabled = lwe; + if (lwe) { + if (lwe == 0xFF) + lwe = dd->ipath_link_width_supported; + else if (lwe >= 16 || (lwe & ~dd->ipath_link_width_supported)) + goto err; + set_link_width_enabled(dd, lwe); + } - /* Only 2.5 Gbs supported. */ + /* Allow 2.5 or 5.0 Gbs. */ lse = pip->linkspeedactive_enabled & 0xF; - if (lse >= 2 && lse <= 0xE) - goto err; + if (lse) { + if (lse == 15) + lse = dd->ipath_link_speed_supported; + else if (lse >= 8 || (lse & ~dd->ipath_link_speed_supported)) + goto err; + set_link_speed_enabled(dd, lse); + } /* Set link down default state. */ switch (pip->portphysstate_linkdown & 0xF) { case 0: /* NOP */ break; case 1: /* SLEEP */ - if (set_linkdowndefaultstate(dev->dd, 1)) + if (set_linkdowndefaultstate(dd, 1)) goto err; break; case 2: /* POLL */ - if (set_linkdowndefaultstate(dev->dd, 0)) + if (set_linkdowndefaultstate(dd, 0)) goto err; break; default: goto err; } - dev->mkeyprot_resv_lmc = pip->mkeyprot_resv_lmc; + dev->mkeyprot = pip->mkeyprot_resv_lmc >> 6; dev->vl_high_limit = pip->vl_high_limit; switch ((pip->neighbormtu_mastersmsl >> 4) & 0xF) { @@ -478,13 +516,15 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, mtu = 2048; break; case IB_MTU_4096: + if (!ipath_mtu4096) + goto err; mtu = 4096; break; default: /* XXX We have already partially updated our state! */ goto err; } - ipath_set_mtu(dev->dd, mtu); + ipath_set_mtu(dd, mtu); dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF; @@ -500,16 +540,16 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, * later. */ if (pip->pkey_violations == 0) - dev->z_pkey_violations = ipath_get_cr_errpkey(dev->dd); + dev->z_pkey_violations = ipath_get_cr_errpkey(dd); if (pip->qkey_violations == 0) dev->qkey_violations = 0; ore = pip->localphyerrors_overrunerrors; - if (set_phyerrthreshold(dev->dd, (ore >> 4) & 0xF)) + if (set_phyerrthreshold(dd, (ore >> 4) & 0xF)) goto err; - if (set_overrunthreshold(dev->dd, (ore & 0xF))) + if (set_overrunthreshold(dd, (ore & 0xF))) goto err; dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; @@ -527,7 +567,6 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, * is down or is being set to down. */ state = pip->linkspeed_portstate & 0xF; - flags = dev->dd->ipath_flags; lstate = (pip->portphysstate_linkdown >> 4) & 0xF; if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) goto err; @@ -543,10 +582,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, /* FALLTHROUGH */ case IB_PORT_DOWN: if (lstate == 0) - if (get_linkdowndefaultstate(dev->dd)) - lstate = IPATH_IB_LINKDOWN_SLEEP; - else - lstate = IPATH_IB_LINKDOWN; + lstate = IPATH_IB_LINKDOWN_ONLY; else if (lstate == 1) lstate = IPATH_IB_LINKDOWN_SLEEP; else if (lstate == 2) @@ -555,27 +591,19 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, lstate = IPATH_IB_LINKDOWN_DISABLE; else goto err; - ipath_set_linkstate(dev->dd, lstate); - if (flags & IPATH_LINKACTIVE) { - event.event = IB_EVENT_PORT_ERR; - ib_dispatch_event(&event); + ipath_set_linkstate(dd, lstate); + if (lstate == IPATH_IB_LINKDOWN_DISABLE) { + ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; + goto done; } + ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED | + IPATH_LINKACTIVE, 1000); break; case IB_PORT_ARMED: - if (!(flags & (IPATH_LINKINIT | IPATH_LINKACTIVE))) - break; - ipath_set_linkstate(dev->dd, IPATH_IB_LINKARM); - if (flags & IPATH_LINKACTIVE) { - event.event = IB_EVENT_PORT_ERR; - ib_dispatch_event(&event); - } + ipath_set_linkstate(dd, IPATH_IB_LINKARM); break; case IB_PORT_ACTIVE: - if (!(flags & IPATH_LINKARMED)) - break; - ipath_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE); - event.event = IB_EVENT_PORT_ACTIVE; - ib_dispatch_event(&event); + ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE); break; default: /* XXX We have already partially updated our state! */ @@ -704,6 +732,7 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) int i; int changed = 0; + /* always a kernel port, no locking needed */ pd = dd->ipath_pd[0]; for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { @@ -761,151 +790,18 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp, return recv_subn_get_pkeytable(smp, ibdev); } -#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001) -#define IB_PMA_PORT_SAMPLES_CONTROL __constant_htons(0x0010) -#define IB_PMA_PORT_SAMPLES_RESULT __constant_htons(0x0011) -#define IB_PMA_PORT_COUNTERS __constant_htons(0x0012) -#define IB_PMA_PORT_COUNTERS_EXT __constant_htons(0x001D) -#define IB_PMA_PORT_SAMPLES_RESULT_EXT __constant_htons(0x001E) - -struct ib_perf { - u8 base_version; - u8 mgmt_class; - u8 class_version; - u8 method; - __be16 status; - __be16 unused; - __be64 tid; - __be16 attr_id; - __be16 resv; - __be32 attr_mod; - u8 reserved[40]; - u8 data[192]; -} __attribute__ ((packed)); - -struct ib_pma_classportinfo { - u8 base_version; - u8 class_version; - __be16 cap_mask; - u8 reserved[3]; - u8 resp_time_value; /* only lower 5 bits */ - union ib_gid redirect_gid; - __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */ - __be16 redirect_lid; - __be16 redirect_pkey; - __be32 redirect_qp; /* only lower 24 bits */ - __be32 redirect_qkey; - union ib_gid trap_gid; - __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */ - __be16 trap_lid; - __be16 trap_pkey; - __be32 trap_hl_qp; /* 8, 24 bits respectively */ - __be32 trap_qkey; -} __attribute__ ((packed)); - -struct ib_pma_portsamplescontrol { - u8 opcode; - u8 port_select; - u8 tick; - u8 counter_width; /* only lower 3 bits */ - __be32 counter_mask0_9; /* 2, 10 * 3, bits */ - __be16 counter_mask10_14; /* 1, 5 * 3, bits */ - u8 sample_mechanisms; - u8 sample_status; /* only lower 2 bits */ - __be64 option_mask; - __be64 vendor_mask; - __be32 sample_start; - __be32 sample_interval; - __be16 tag; - __be16 counter_select[15]; -} __attribute__ ((packed)); - -struct ib_pma_portsamplesresult { - __be16 tag; - __be16 sample_status; /* only lower 2 bits */ - __be32 counter[15]; -} __attribute__ ((packed)); - -struct ib_pma_portsamplesresult_ext { - __be16 tag; - __be16 sample_status; /* only lower 2 bits */ - __be32 extended_width; /* only upper 2 bits */ - __be64 counter[15]; -} __attribute__ ((packed)); - -struct ib_pma_portcounters { - u8 reserved; - u8 port_select; - __be16 counter_select; - __be16 symbol_error_counter; - u8 link_error_recovery_counter; - u8 link_downed_counter; - __be16 port_rcv_errors; - __be16 port_rcv_remphys_errors; - __be16 port_rcv_switch_relay_errors; - __be16 port_xmit_discards; - u8 port_xmit_constraint_errors; - u8 port_rcv_constraint_errors; - u8 reserved1; - u8 lli_ebor_errors; /* 4, 4, bits */ - __be16 reserved2; - __be16 vl15_dropped; - __be32 port_xmit_data; - __be32 port_rcv_data; - __be32 port_xmit_packets; - __be32 port_rcv_packets; -} __attribute__ ((packed)); - -#define IB_PMA_SEL_SYMBOL_ERROR __constant_htons(0x0001) -#define IB_PMA_SEL_LINK_ERROR_RECOVERY __constant_htons(0x0002) -#define IB_PMA_SEL_LINK_DOWNED __constant_htons(0x0004) -#define IB_PMA_SEL_PORT_RCV_ERRORS __constant_htons(0x0008) -#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS __constant_htons(0x0010) -#define IB_PMA_SEL_PORT_XMIT_DISCARDS __constant_htons(0x0040) -#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS __constant_htons(0x0200) -#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS __constant_htons(0x0400) -#define IB_PMA_SEL_PORT_VL15_DROPPED __constant_htons(0x0800) -#define IB_PMA_SEL_PORT_XMIT_DATA __constant_htons(0x1000) -#define IB_PMA_SEL_PORT_RCV_DATA __constant_htons(0x2000) -#define IB_PMA_SEL_PORT_XMIT_PACKETS __constant_htons(0x4000) -#define IB_PMA_SEL_PORT_RCV_PACKETS __constant_htons(0x8000) - -struct ib_pma_portcounters_ext { - u8 reserved; - u8 port_select; - __be16 counter_select; - __be32 reserved1; - __be64 port_xmit_data; - __be64 port_rcv_data; - __be64 port_xmit_packets; - __be64 port_rcv_packets; - __be64 port_unicast_xmit_packets; - __be64 port_unicast_rcv_packets; - __be64 port_multicast_xmit_packets; - __be64 port_multicast_rcv_packets; -} __attribute__ ((packed)); - -#define IB_PMA_SELX_PORT_XMIT_DATA __constant_htons(0x0001) -#define IB_PMA_SELX_PORT_RCV_DATA __constant_htons(0x0002) -#define IB_PMA_SELX_PORT_XMIT_PACKETS __constant_htons(0x0004) -#define IB_PMA_SELX_PORT_RCV_PACKETS __constant_htons(0x0008) -#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS __constant_htons(0x0010) -#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS __constant_htons(0x0020) -#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS __constant_htons(0x0040) -#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS __constant_htons(0x0080) - -static int recv_pma_get_classportinfo(struct ib_perf *pmp) +static int recv_pma_get_classportinfo(struct ib_pma_mad *pmp) { - struct ib_pma_classportinfo *p = - (struct ib_pma_classportinfo *)pmp->data; + struct ib_class_port_info *p = + (struct ib_class_port_info *)pmp->data; memset(pmp->data, 0, sizeof(pmp->data)); - if (pmp->attr_mod != 0) - pmp->status |= IB_SMP_INVALID_FIELD; + if (pmp->mad_hdr.attr_mod != 0) + pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; /* Indicate AllPortSelect is valid (only one port anyway) */ - p->cap_mask = __constant_cpu_to_be16(1 << 8); + p->capability_mask = cpu_to_be16(1 << 8); p->base_version = 1; p->class_version = 1; /* @@ -923,41 +819,48 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp) * We support 5 counters which only count the mandatory quantities. */ #define COUNTER_MASK(q, n) (q << ((9 - n) * 3)) -#define COUNTER_MASK0_9 \ - __constant_cpu_to_be32(COUNTER_MASK(1, 0) | \ - COUNTER_MASK(1, 1) | \ - COUNTER_MASK(1, 2) | \ - COUNTER_MASK(1, 3) | \ - COUNTER_MASK(1, 4)) - -static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp, +#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \ + COUNTER_MASK(1, 1) | \ + COUNTER_MASK(1, 2) | \ + COUNTER_MASK(1, 3) | \ + COUNTER_MASK(1, 4)) + +static int recv_pma_get_portsamplescontrol(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portsamplescontrol *p = (struct ib_pma_portsamplescontrol *)pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); + struct ipath_cregs const *crp = dev->dd->ipath_cregs; unsigned long flags; u8 port_select = p->port_select; memset(pmp->data, 0, sizeof(pmp->data)); p->port_select = port_select; - if (pmp->attr_mod != 0 || + if (pmp->mad_hdr.attr_mod != 0 || (port_select != port && port_select != 0xFF)) - pmp->status |= IB_SMP_INVALID_FIELD; + pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; /* * Ticks are 10x the link transfer period which for 2.5Gbs is 4 * nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample * intervals are counted in ticks. Since we use Linux timers, that * count in jiffies, we can't sample for less than 1000 ticks if HZ - * == 1000 (4000 ticks if HZ is 250). + * == 1000 (4000 ticks if HZ is 250). link_speed_active returns 2 for + * DDR, 1 for SDR, set the tick to 1 for DDR, 0 for SDR on chips that + * have hardware support for delaying packets. */ - /* XXX This is WRONG. */ - p->tick = 250; /* 1 usec. */ + if (crp->cr_psstat) + p->tick = dev->dd->ipath_link_speed_active - 1; + else + p->tick = 250; /* 1 usec. */ p->counter_width = 4; /* 32 bit counters */ p->counter_mask0_9 = COUNTER_MASK0_9; spin_lock_irqsave(&dev->pending_lock, flags); - p->sample_status = dev->pma_sample_status; + if (crp->cr_psstat) + p->sample_status = ipath_read_creg32(dev->dd, crp->cr_psstat); + else + p->sample_status = dev->pma_sample_status; p->sample_start = cpu_to_be32(dev->pma_sample_start); p->sample_interval = cpu_to_be32(dev->pma_sample_interval); p->tag = cpu_to_be16(dev->pma_tag); @@ -971,76 +874,85 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp, return reply((struct ib_smp *) pmp); } -static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp, +static int recv_pma_set_portsamplescontrol(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portsamplescontrol *p = (struct ib_pma_portsamplescontrol *)pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); + struct ipath_cregs const *crp = dev->dd->ipath_cregs; unsigned long flags; - u32 start; + u8 status; int ret; - if (pmp->attr_mod != 0 || + if (pmp->mad_hdr.attr_mod != 0 || (p->port_select != port && p->port_select != 0xFF)) { - pmp->status |= IB_SMP_INVALID_FIELD; + pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; ret = reply((struct ib_smp *) pmp); goto bail; } - start = be32_to_cpu(p->sample_start); - if (start != 0) { - spin_lock_irqsave(&dev->pending_lock, flags); - if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_DONE) { - dev->pma_sample_status = - IB_PMA_SAMPLE_STATUS_STARTED; - dev->pma_sample_start = start; - dev->pma_sample_interval = - be32_to_cpu(p->sample_interval); - dev->pma_tag = be16_to_cpu(p->tag); - if (p->counter_select[0]) - dev->pma_counter_select[0] = - p->counter_select[0]; - if (p->counter_select[1]) - dev->pma_counter_select[1] = - p->counter_select[1]; - if (p->counter_select[2]) - dev->pma_counter_select[2] = - p->counter_select[2]; - if (p->counter_select[3]) - dev->pma_counter_select[3] = - p->counter_select[3]; - if (p->counter_select[4]) - dev->pma_counter_select[4] = - p->counter_select[4]; - } - spin_unlock_irqrestore(&dev->pending_lock, flags); + spin_lock_irqsave(&dev->pending_lock, flags); + if (crp->cr_psstat) + status = ipath_read_creg32(dev->dd, crp->cr_psstat); + else + status = dev->pma_sample_status; + if (status == IB_PMA_SAMPLE_STATUS_DONE) { + dev->pma_sample_start = be32_to_cpu(p->sample_start); + dev->pma_sample_interval = be32_to_cpu(p->sample_interval); + dev->pma_tag = be16_to_cpu(p->tag); + dev->pma_counter_select[0] = p->counter_select[0]; + dev->pma_counter_select[1] = p->counter_select[1]; + dev->pma_counter_select[2] = p->counter_select[2]; + dev->pma_counter_select[3] = p->counter_select[3]; + dev->pma_counter_select[4] = p->counter_select[4]; + if (crp->cr_psstat) { + ipath_write_creg(dev->dd, crp->cr_psinterval, + dev->pma_sample_interval); + ipath_write_creg(dev->dd, crp->cr_psstart, + dev->pma_sample_start); + } else + dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED; } + spin_unlock_irqrestore(&dev->pending_lock, flags); + ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port); bail: return ret; } -static u64 get_counter(struct ipath_ibdev *dev, __be16 sel) +static u64 get_counter(struct ipath_ibdev *dev, + struct ipath_cregs const *crp, + __be16 sel) { u64 ret; switch (sel) { case IB_PMA_PORT_XMIT_DATA: - ret = dev->ipath_sword; + ret = (crp->cr_psxmitdatacount) ? + ipath_read_creg32(dev->dd, crp->cr_psxmitdatacount) : + dev->ipath_sword; break; case IB_PMA_PORT_RCV_DATA: - ret = dev->ipath_rword; + ret = (crp->cr_psrcvdatacount) ? + ipath_read_creg32(dev->dd, crp->cr_psrcvdatacount) : + dev->ipath_rword; break; case IB_PMA_PORT_XMIT_PKTS: - ret = dev->ipath_spkts; + ret = (crp->cr_psxmitpktscount) ? + ipath_read_creg32(dev->dd, crp->cr_psxmitpktscount) : + dev->ipath_spkts; break; case IB_PMA_PORT_RCV_PKTS: - ret = dev->ipath_rpkts; + ret = (crp->cr_psrcvpktscount) ? + ipath_read_creg32(dev->dd, crp->cr_psrcvpktscount) : + dev->ipath_rpkts; break; case IB_PMA_PORT_XMIT_WAIT: - ret = dev->ipath_xmit_wait; + ret = (crp->cr_psxmitwaitcount) ? + ipath_read_creg32(dev->dd, crp->cr_psxmitwaitcount) : + dev->ipath_xmit_wait; break; default: ret = 0; @@ -1049,45 +961,59 @@ static u64 get_counter(struct ipath_ibdev *dev, __be16 sel) return ret; } -static int recv_pma_get_portsamplesresult(struct ib_perf *pmp, +static int recv_pma_get_portsamplesresult(struct ib_pma_mad *pmp, struct ib_device *ibdev) { struct ib_pma_portsamplesresult *p = (struct ib_pma_portsamplesresult *)pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); + struct ipath_cregs const *crp = dev->dd->ipath_cregs; + u8 status; int i; memset(pmp->data, 0, sizeof(pmp->data)); p->tag = cpu_to_be16(dev->pma_tag); - p->sample_status = cpu_to_be16(dev->pma_sample_status); + if (crp->cr_psstat) + status = ipath_read_creg32(dev->dd, crp->cr_psstat); + else + status = dev->pma_sample_status; + p->sample_status = cpu_to_be16(status); for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++) - p->counter[i] = cpu_to_be32( - get_counter(dev, dev->pma_counter_select[i])); + p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 : + cpu_to_be32( + get_counter(dev, crp, dev->pma_counter_select[i])); return reply((struct ib_smp *) pmp); } -static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp, +static int recv_pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp, struct ib_device *ibdev) { struct ib_pma_portsamplesresult_ext *p = (struct ib_pma_portsamplesresult_ext *)pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); + struct ipath_cregs const *crp = dev->dd->ipath_cregs; + u8 status; int i; memset(pmp->data, 0, sizeof(pmp->data)); p->tag = cpu_to_be16(dev->pma_tag); - p->sample_status = cpu_to_be16(dev->pma_sample_status); + if (crp->cr_psstat) + status = ipath_read_creg32(dev->dd, crp->cr_psstat); + else + status = dev->pma_sample_status; + p->sample_status = cpu_to_be16(status); /* 64 bits */ - p->extended_width = __constant_cpu_to_be32(0x80000000); + p->extended_width = cpu_to_be32(0x80000000); for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++) - p->counter[i] = cpu_to_be64( - get_counter(dev, dev->pma_counter_select[i])); + p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 : + cpu_to_be64( + get_counter(dev, crp, dev->pma_counter_select[i])); return reply((struct ib_smp *) pmp); } -static int recv_pma_get_portcounters(struct ib_perf *pmp, +static int recv_pma_get_portcounters(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) @@ -1115,16 +1041,18 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp, dev->z_local_link_integrity_errors; cntrs.excessive_buffer_overrun_errors -= dev->z_excessive_buffer_overrun_errors; + cntrs.vl15_dropped -= dev->z_vl15_dropped; + cntrs.vl15_dropped += dev->n_vl15_dropped; memset(pmp->data, 0, sizeof(pmp->data)); p->port_select = port_select; - if (pmp->attr_mod != 0 || + if (pmp->mad_hdr.attr_mod != 0 || (port_select != port && port_select != 0xFF)) - pmp->status |= IB_SMP_INVALID_FIELD; + pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; if (cntrs.symbol_error_counter > 0xFFFFUL) - p->symbol_error_counter = __constant_cpu_to_be16(0xFFFF); + p->symbol_error_counter = cpu_to_be16(0xFFFF); else p->symbol_error_counter = cpu_to_be16((u16)cntrs.symbol_error_counter); @@ -1138,17 +1066,17 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp, else p->link_downed_counter = (u8)cntrs.link_downed_counter; if (cntrs.port_rcv_errors > 0xFFFFUL) - p->port_rcv_errors = __constant_cpu_to_be16(0xFFFF); + p->port_rcv_errors = cpu_to_be16(0xFFFF); else p->port_rcv_errors = cpu_to_be16((u16) cntrs.port_rcv_errors); if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) - p->port_rcv_remphys_errors = __constant_cpu_to_be16(0xFFFF); + p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); else p->port_rcv_remphys_errors = cpu_to_be16((u16)cntrs.port_rcv_remphys_errors); if (cntrs.port_xmit_discards > 0xFFFFUL) - p->port_xmit_discards = __constant_cpu_to_be16(0xFFFF); + p->port_xmit_discards = cpu_to_be16(0xFFFF); else p->port_xmit_discards = cpu_to_be16((u16)cntrs.port_xmit_discards); @@ -1156,27 +1084,27 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp, cntrs.local_link_integrity_errors = 0xFUL; if (cntrs.excessive_buffer_overrun_errors > 0xFUL) cntrs.excessive_buffer_overrun_errors = 0xFUL; - p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | + p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) | cntrs.excessive_buffer_overrun_errors; - if (dev->n_vl15_dropped > 0xFFFFUL) - p->vl15_dropped = __constant_cpu_to_be16(0xFFFF); + if (cntrs.vl15_dropped > 0xFFFFUL) + p->vl15_dropped = cpu_to_be16(0xFFFF); else - p->vl15_dropped = cpu_to_be16((u16)dev->n_vl15_dropped); + p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); if (cntrs.port_xmit_data > 0xFFFFFFFFUL) - p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF); + p->port_xmit_data = cpu_to_be32(0xFFFFFFFF); else p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data); if (cntrs.port_rcv_data > 0xFFFFFFFFUL) - p->port_rcv_data = __constant_cpu_to_be32(0xFFFFFFFF); + p->port_rcv_data = cpu_to_be32(0xFFFFFFFF); else p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data); if (cntrs.port_xmit_packets > 0xFFFFFFFFUL) - p->port_xmit_packets = __constant_cpu_to_be32(0xFFFFFFFF); + p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF); else p->port_xmit_packets = cpu_to_be32((u32)cntrs.port_xmit_packets); if (cntrs.port_rcv_packets > 0xFFFFFFFFUL) - p->port_rcv_packets = __constant_cpu_to_be32(0xFFFFFFFF); + p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF); else p->port_rcv_packets = cpu_to_be32((u32) cntrs.port_rcv_packets); @@ -1184,7 +1112,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp, return reply((struct ib_smp *) pmp); } -static int recv_pma_get_portcounters_ext(struct ib_perf *pmp, +static int recv_pma_get_portcounters_ext(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters_ext *p = @@ -1205,9 +1133,9 @@ static int recv_pma_get_portcounters_ext(struct ib_perf *pmp, memset(pmp->data, 0, sizeof(pmp->data)); p->port_select = port_select; - if (pmp->attr_mod != 0 || + if (pmp->mad_hdr.attr_mod != 0 || (port_select != port && port_select != 0xFF)) - pmp->status |= IB_SMP_INVALID_FIELD; + pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; p->port_xmit_data = cpu_to_be64(swords); p->port_rcv_data = cpu_to_be64(rwords); @@ -1221,7 +1149,7 @@ static int recv_pma_get_portcounters_ext(struct ib_perf *pmp, return reply((struct ib_smp *) pmp); } -static int recv_pma_set_portcounters(struct ib_perf *pmp, +static int recv_pma_set_portcounters(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) @@ -1264,8 +1192,10 @@ static int recv_pma_set_portcounters(struct ib_perf *pmp, dev->z_excessive_buffer_overrun_errors = cntrs.excessive_buffer_overrun_errors; - if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) + if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) { dev->n_vl15_dropped = 0; + dev->z_vl15_dropped = cntrs.vl15_dropped; + } if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA) dev->z_port_xmit_data = cntrs.port_xmit_data; @@ -1282,7 +1212,7 @@ static int recv_pma_set_portcounters(struct ib_perf *pmp, return recv_pma_get_portcounters(pmp, ibdev, port); } -static int recv_pma_set_portcounters_ext(struct ib_perf *pmp, +static int recv_pma_set_portcounters_ext(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) @@ -1336,10 +1266,11 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, } /* Is the mkey in the process of expiring? */ - if (dev->mkey_lease_timeout && jiffies >= dev->mkey_lease_timeout) { + if (dev->mkey_lease_timeout && + time_after_eq(jiffies, dev->mkey_lease_timeout)) { /* Clear timeout and mkey protection field. */ dev->mkey_lease_timeout = 0; - dev->mkeyprot_resv_lmc &= 0x3F; + dev->mkeyprot = 0; } /* @@ -1350,7 +1281,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, dev->mkey != smp->mkey && (smp->method == IB_MGMT_METHOD_SET || (smp->method == IB_MGMT_METHOD_GET && - (dev->mkeyprot_resv_lmc >> 7) != 0))) { + dev->mkeyprot >= 2))) { if (dev->mkey_violations != 0xFFFF) ++dev->mkey_violations; if (dev->mkey_lease_timeout || @@ -1430,13 +1361,17 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, goto bail; } + case IB_MGMT_METHOD_TRAP: + case IB_MGMT_METHOD_REPORT: + case IB_MGMT_METHOD_REPORT_RESP: + case IB_MGMT_METHOD_TRAP_REPRESS: case IB_MGMT_METHOD_GET_RESP: /* * The ib_mad module will call us to process responses * before checking for other consumers. * Just tell the caller to process it normally. */ - ret = IB_MAD_RESULT_FAILURE; + ret = IB_MAD_RESULT_SUCCESS; goto bail; default: smp->status |= IB_SMP_UNSUP_METHOD; @@ -1451,19 +1386,19 @@ static int process_perf(struct ib_device *ibdev, u8 port_num, struct ib_mad *in_mad, struct ib_mad *out_mad) { - struct ib_perf *pmp = (struct ib_perf *)out_mad; + struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad; int ret; *out_mad = *in_mad; - if (pmp->class_version != 1) { - pmp->status |= IB_SMP_UNSUP_VERSION; + if (pmp->mad_hdr.class_version != 1) { + pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION; ret = reply((struct ib_smp *) pmp); goto bail; } - switch (pmp->method) { + switch (pmp->mad_hdr.method) { case IB_MGMT_METHOD_GET: - switch (pmp->attr_id) { + switch (pmp->mad_hdr.attr_id) { case IB_PMA_CLASS_PORT_INFO: ret = recv_pma_get_classportinfo(pmp); goto bail; @@ -1487,13 +1422,13 @@ static int process_perf(struct ib_device *ibdev, u8 port_num, port_num); goto bail; default: - pmp->status |= IB_SMP_UNSUP_METH_ATTR; + pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) pmp); goto bail; } case IB_MGMT_METHOD_SET: - switch (pmp->attr_id) { + switch (pmp->mad_hdr.attr_id) { case IB_PMA_PORT_SAMPLES_CONTROL: ret = recv_pma_set_portsamplescontrol(pmp, ibdev, port_num); @@ -1507,7 +1442,7 @@ static int process_perf(struct ib_device *ibdev, u8 port_num, port_num); goto bail; default: - pmp->status |= IB_SMP_UNSUP_METH_ATTR; + pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) pmp); goto bail; } @@ -1518,10 +1453,10 @@ static int process_perf(struct ib_device *ibdev, u8 port_num, * before checking for other consumers. * Just tell the caller to process it normally. */ - ret = IB_MAD_RESULT_FAILURE; + ret = IB_MAD_RESULT_SUCCESS; goto bail; default: - pmp->status |= IB_SMP_UNSUP_METHOD; + pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD; ret = reply((struct ib_smp *) pmp); } |
