diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c')
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | 1029 |
1 files changed, 669 insertions, 360 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 1adef266fcd..ca47665f94b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -1,12 +1,12 @@ /* bnx2x_stats.c: Broadcom Everest network driver. * - * Copyright (c) 2007-2011 Broadcom Corporation + * Copyright (c) 2007-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Eliezer Tamir * Based on code from Michael Chan's bnx2 driver * UDP CSUM errata workaround by Arik Gendelman @@ -19,7 +19,7 @@ #include "bnx2x_stats.h" #include "bnx2x_cmn.h" - +#include "bnx2x_sriov.h" /* Statistics */ @@ -39,14 +39,39 @@ static inline long bnx2x_hilo(u32 *hiref) #endif } -static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) +static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) { - u16 res = sizeof(struct host_port_stats) >> 2; + u16 res = 0; + + /* 'newest' convention - shmem2 cotains the size of the port stats */ + if (SHMEM2_HAS(bp, sizeof_port_stats)) { + u32 size = SHMEM2_RD(bp, sizeof_port_stats); + if (size) + res = size; - /* if PFC stats are not supported by the MFW, don't DMA them */ - if (!(bp->flags & BC_SUPPORTS_PFC_STATS)) - res -= (sizeof(u32)*4) >> 2; + /* prevent newer BC from causing buffer overflow */ + if (res > sizeof(struct host_port_stats)) + res = sizeof(struct host_port_stats); + } + /* Older convention - all BCs support the port stats' fields up until + * the 'not_used' field + */ + if (!res) { + res = offsetof(struct host_port_stats, not_used) + 4; + + /* if PFC stats are supported by the MFW, DMA them as well */ + if (bp->flags & BC_SUPPORTS_PFC_STATS) { + res += offsetof(struct host_port_stats, + pfc_frames_rx_lo) - + offsetof(struct host_port_stats, + pfc_frames_tx_hi) + 4 ; + } + } + + res >>= 2; + + WARN_ON(res > 2 * DMAE_LEN32_RD_MAX); return res; } @@ -54,6 +79,42 @@ static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) * Init service functions */ +static void bnx2x_dp_stats(struct bnx2x *bp) +{ + int i; + + DP(BNX2X_MSG_STATS, "dumping stats:\n" + "fw_stats_req\n" + " hdr\n" + " cmd_num %d\n" + " reserved0 %d\n" + " drv_stats_counter %d\n" + " reserved1 %d\n" + " stats_counters_addrs %x %x\n", + bp->fw_stats_req->hdr.cmd_num, + bp->fw_stats_req->hdr.reserved0, + bp->fw_stats_req->hdr.drv_stats_counter, + bp->fw_stats_req->hdr.reserved1, + bp->fw_stats_req->hdr.stats_counters_addrs.hi, + bp->fw_stats_req->hdr.stats_counters_addrs.lo); + + for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) { + DP(BNX2X_MSG_STATS, + "query[%d]\n" + " kind %d\n" + " index %d\n" + " funcID %d\n" + " reserved %d\n" + " address %x %x\n", + i, bp->fw_stats_req->query[i].kind, + bp->fw_stats_req->query[i].index, + bp->fw_stats_req->query[i].funcID, + bp->fw_stats_req->query[i].reserved, + bp->fw_stats_req->query[i].address.hi, + bp->fw_stats_req->query[i].address.lo); + } +} + /* Post the next statistics ramrod. Protect it with the spin in * order to ensure the strict order between statistics ramrods * (each ramrod has a sequence number passed in a @@ -75,10 +136,12 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) bp->fw_stats_req->hdr.drv_stats_counter = cpu_to_le16(bp->stats_counter++); - DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n", + DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", bp->fw_stats_req->hdr.drv_stats_counter); - + /* adjust the ramrod to include VF queues statistics */ + bnx2x_iov_adjust_stats_req(bp); + bnx2x_dp_stats(bp); /* send FW stats ramrod */ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, @@ -101,6 +164,11 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp) if (CHIP_REV_IS_SLOW(bp)) return; + /* Update MCP's statistics if possible */ + if (bp->func_stx) + memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats, + sizeof(bp->func_stats)); + /* loader */ if (bp->executer_idx) { int loader_idx = PMF_DMAE_C(bp); @@ -128,7 +196,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp) } else if (bp->func_stx) { *stats_comp = 0; - bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); + bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp); } } @@ -144,7 +212,7 @@ static int bnx2x_stats_comp(struct bnx2x *bp) break; } cnt--; - usleep_range(1000, 1000); + usleep_range(1000, 2000); } return 1; } @@ -153,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp) * Statistics service functions */ -static void bnx2x_stats_pmf_update(struct bnx2x *bp) +/* should be called under stats_sema */ +static void __bnx2x_stats_pmf_update(struct bnx2x *bp) { struct dmae_command *dmae; u32 opcode; @@ -161,7 +230,7 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp) u32 *stats_comp = bnx2x_sp(bp, stats_comp); /* sanity */ - if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) { + if (!bp->port.pmf || !bp->port.port_stx) { BNX2X_ERR("BUG!\n"); return; } @@ -450,29 +519,61 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) *stats_comp = 0; } -static void bnx2x_stats_start(struct bnx2x *bp) +/* should be called under stats_sema */ +static void __bnx2x_stats_start(struct bnx2x *bp) { - if (bp->port.pmf) - bnx2x_port_stats_init(bp); + if (IS_PF(bp)) { + if (bp->port.pmf) + bnx2x_port_stats_init(bp); - else if (bp->func_stx) - bnx2x_func_stats_init(bp); + else if (bp->func_stx) + bnx2x_func_stats_init(bp); - bnx2x_hw_stats_post(bp); - bnx2x_storm_stats_post(bp); + bnx2x_hw_stats_post(bp); + bnx2x_storm_stats_post(bp); + } + + bp->stats_started = true; +} + +static void bnx2x_stats_start(struct bnx2x *bp) +{ + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); } static void bnx2x_stats_pmf_start(struct bnx2x *bp) { + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); bnx2x_stats_comp(bp); - bnx2x_stats_pmf_update(bp); - bnx2x_stats_start(bp); + __bnx2x_stats_pmf_update(bp); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); +} + +static void bnx2x_stats_pmf_update(struct bnx2x *bp) +{ + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + __bnx2x_stats_pmf_update(bp); + up(&bp->stats_sema); } static void bnx2x_stats_restart(struct bnx2x *bp) { + /* vfs travel through here as part of the statistics FSM, but no action + * is required + */ + if (IS_VF(bp)) + return; + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); bnx2x_stats_comp(bp); - bnx2x_stats_start(bp); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); } static void bnx2x_bmac_stats_update(struct bnx2x *bp) @@ -554,23 +655,11 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); /* collect PFC stats */ - DIFF_64(diff.hi, new->tx_stat_gtpp_hi, - pstats->pfc_frames_tx_hi, - diff.lo, new->tx_stat_gtpp_lo, - pstats->pfc_frames_tx_lo); pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi; pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo; - ADD_64(pstats->pfc_frames_tx_hi, diff.hi, - pstats->pfc_frames_tx_lo, diff.lo); - DIFF_64(diff.hi, new->rx_stat_grpp_hi, - pstats->pfc_frames_rx_hi, - diff.lo, new->rx_stat_grpp_lo, - pstats->pfc_frames_rx_lo); pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi; pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo; - ADD_64(pstats->pfc_frames_rx_hi, diff.hi, - pstats->pfc_frames_rx_lo, diff.lo); } estats->pause_frames_received_hi = @@ -638,31 +727,30 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp) tx_stat_dot3statsinternalmactransmiterrors); ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); - ADD_64(estats->etherstatspkts1024octetsto1522octets_hi, - new->stats_tx.tx_gt1518_hi, - estats->etherstatspkts1024octetsto1522octets_lo, - new->stats_tx.tx_gt1518_lo); + estats->etherstatspkts1024octetsto1522octets_hi = + pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi; + estats->etherstatspkts1024octetsto1522octets_lo = + pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo; - ADD_64(estats->etherstatspktsover1522octets_hi, - new->stats_tx.tx_gt2047_hi, - estats->etherstatspktsover1522octets_lo, - new->stats_tx.tx_gt2047_lo); + estats->etherstatspktsover1522octets_hi = + pstats->mac_stx[1].tx_stat_mac_2047_hi; + estats->etherstatspktsover1522octets_lo = + pstats->mac_stx[1].tx_stat_mac_2047_lo; ADD_64(estats->etherstatspktsover1522octets_hi, - new->stats_tx.tx_gt4095_hi, + pstats->mac_stx[1].tx_stat_mac_4095_hi, estats->etherstatspktsover1522octets_lo, - new->stats_tx.tx_gt4095_lo); + pstats->mac_stx[1].tx_stat_mac_4095_lo); ADD_64(estats->etherstatspktsover1522octets_hi, - new->stats_tx.tx_gt9216_hi, + pstats->mac_stx[1].tx_stat_mac_9216_hi, estats->etherstatspktsover1522octets_lo, - new->stats_tx.tx_gt9216_lo); - + pstats->mac_stx[1].tx_stat_mac_9216_lo); ADD_64(estats->etherstatspktsover1522octets_hi, - new->stats_tx.tx_gt16383_hi, + pstats->mac_stx[1].tx_stat_mac_16383_hi, estats->etherstatspktsover1522octets_lo, - new->stats_tx.tx_gt16383_lo); + pstats->mac_stx[1].tx_stat_mac_16383_lo); estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi; @@ -796,6 +884,12 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) pstats->host_port_stats_counter++; + if (CHIP_IS_E3(bp)) { + u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1 + : MISC_REG_CPMU_LP_SM_ENT_CNT_P0; + estats->eee_tx_lpi += REG_RD(bp, lpi_reg); + } + if (!BP_NOMCP(bp)) { u32 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); @@ -809,108 +903,100 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) return 0; } -static int bnx2x_storm_stats_update(struct bnx2x *bp) +static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) { - struct tstorm_per_port_stats *tport = - &bp->fw_stats_data->port.tstorm_port_statistics; - struct tstorm_per_pf_stats *tfunc = - &bp->fw_stats_data->pf.tstorm_pf_statistics; - struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); - struct bnx2x_eth_stats *estats = &bp->eth_stats; struct stats_counter *counters = &bp->fw_stats_data->storm_counters; - int i; u16 cur_stats_counter; - /* Make sure we use the value of the counter * used for sending the last stats ramrod. */ - spin_lock_bh(&bp->stats_lock); cur_stats_counter = bp->stats_counter - 1; - spin_unlock_bh(&bp->stats_lock); /* are storm stats valid? */ if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "stats not updated by xstorm" - " xstorm counter (0x%x) != stats_counter (0x%x)\n", + DP(BNX2X_MSG_STATS, + "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n", le16_to_cpu(counters->xstats_counter), bp->stats_counter); return -EAGAIN; } if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "stats not updated by ustorm" - " ustorm counter (0x%x) != stats_counter (0x%x)\n", + DP(BNX2X_MSG_STATS, + "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n", le16_to_cpu(counters->ustats_counter), bp->stats_counter); return -EAGAIN; } if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "stats not updated by cstorm" - " cstorm counter (0x%x) != stats_counter (0x%x)\n", + DP(BNX2X_MSG_STATS, + "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n", le16_to_cpu(counters->cstats_counter), bp->stats_counter); return -EAGAIN; } if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "stats not updated by tstorm" - " tstorm counter (0x%x) != stats_counter (0x%x)\n", + DP(BNX2X_MSG_STATS, + "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n", le16_to_cpu(counters->tstats_counter), bp->stats_counter); return -EAGAIN; } + return 0; +} + +static int bnx2x_storm_stats_update(struct bnx2x *bp) +{ + struct tstorm_per_port_stats *tport = + &bp->fw_stats_data->port.tstorm_port_statistics; + struct tstorm_per_pf_stats *tfunc = + &bp->fw_stats_data->pf.tstorm_pf_statistics; + struct host_func_stats *fstats = &bp->func_stats; + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old; + int i; + + /* vfs stat counter is managed by pf */ + if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp)) + return -EAGAIN; - memcpy(&(fstats->total_bytes_received_hi), - &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), - sizeof(struct host_func_stats) - 2*sizeof(u32)); estats->error_bytes_received_hi = 0; estats->error_bytes_received_lo = 0; - estats->etherstatsoverrsizepkts_hi = 0; - estats->etherstatsoverrsizepkts_lo = 0; - estats->no_buff_discard_hi = 0; - estats->no_buff_discard_lo = 0; - estats->total_tpa_aggregations_hi = 0; - estats->total_tpa_aggregations_lo = 0; - estats->total_tpa_aggregated_frames_hi = 0; - estats->total_tpa_aggregated_frames_lo = 0; - estats->total_tpa_bytes_hi = 0; - estats->total_tpa_bytes_lo = 0; for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; struct tstorm_per_queue_stats *tclient = &bp->fw_stats_data->queue_stats[i]. tstorm_queue_statistics; - struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; + struct tstorm_per_queue_stats *old_tclient = + &bnx2x_fp_stats(bp, fp)->old_tclient; struct ustorm_per_queue_stats *uclient = &bp->fw_stats_data->queue_stats[i]. ustorm_queue_statistics; - struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; + struct ustorm_per_queue_stats *old_uclient = + &bnx2x_fp_stats(bp, fp)->old_uclient; struct xstorm_per_queue_stats *xclient = &bp->fw_stats_data->queue_stats[i]. xstorm_queue_statistics; - struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; - struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + struct xstorm_per_queue_stats *old_xclient = + &bnx2x_fp_stats(bp, fp)->old_xclient; + struct bnx2x_eth_q_stats *qstats = + &bnx2x_fp_stats(bp, fp)->eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = + &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; + u32 diff; - DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, " - "bcast_sent 0x%x mcast_sent 0x%x\n", + DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n", i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); DP(BNX2X_MSG_STATS, "---------------\n"); - qstats->total_broadcast_bytes_received_hi = - le32_to_cpu(tclient->rcv_bcast_bytes.hi); - qstats->total_broadcast_bytes_received_lo = - le32_to_cpu(tclient->rcv_bcast_bytes.lo); - - qstats->total_multicast_bytes_received_hi = - le32_to_cpu(tclient->rcv_mcast_bytes.hi); - qstats->total_multicast_bytes_received_lo = - le32_to_cpu(tclient->rcv_mcast_bytes.lo); - - qstats->total_unicast_bytes_received_hi = - le32_to_cpu(tclient->rcv_ucast_bytes.hi); - qstats->total_unicast_bytes_received_lo = - le32_to_cpu(tclient->rcv_ucast_bytes.lo); + UPDATE_QSTAT(tclient->rcv_bcast_bytes, + total_broadcast_bytes_received); + UPDATE_QSTAT(tclient->rcv_mcast_bytes, + total_multicast_bytes_received); + UPDATE_QSTAT(tclient->rcv_ucast_bytes, + total_unicast_bytes_received); /* * sum to total_bytes_received all @@ -936,16 +1022,15 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo; - UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received); UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received); UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received); - UPDATE_EXTEND_TSTAT(pkts_too_big_discard, - etherstatsoverrsizepkts); - UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); + UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard, + etherstatsoverrsizepkts, 32); + UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16); SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received); @@ -953,24 +1038,17 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) total_multicast_packets_received); SUB_EXTEND_USTAT(bcast_no_buff_pkts, total_broadcast_packets_received); - UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard); - UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); - UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); - - qstats->total_broadcast_bytes_transmitted_hi = - le32_to_cpu(xclient->bcast_bytes_sent.hi); - qstats->total_broadcast_bytes_transmitted_lo = - le32_to_cpu(xclient->bcast_bytes_sent.lo); - - qstats->total_multicast_bytes_transmitted_hi = - le32_to_cpu(xclient->mcast_bytes_sent.hi); - qstats->total_multicast_bytes_transmitted_lo = - le32_to_cpu(xclient->mcast_bytes_sent.lo); - - qstats->total_unicast_bytes_transmitted_hi = - le32_to_cpu(xclient->ucast_bytes_sent.hi); - qstats->total_unicast_bytes_transmitted_lo = - le32_to_cpu(xclient->ucast_bytes_sent.lo); + UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard); + UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard); + UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard); + + UPDATE_QSTAT(xclient->bcast_bytes_sent, + total_broadcast_bytes_transmitted); + UPDATE_QSTAT(xclient->mcast_bytes_sent, + total_multicast_bytes_transmitted); + UPDATE_QSTAT(xclient->ucast_bytes_sent, + total_unicast_bytes_transmitted); + /* * sum to total_bytes_transmitted all * unicast/multicast/broadcast @@ -1006,110 +1084,54 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) total_transmitted_dropped_packets_error); /* TPA aggregations completed */ - UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations); + UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations); /* Number of network frames aggregated by TPA */ - UPDATE_EXTEND_USTAT(coalesced_pkts, - total_tpa_aggregated_frames); + UPDATE_EXTEND_E_USTAT(coalesced_pkts, + total_tpa_aggregated_frames); /* Total number of bytes in completed TPA aggregations */ - qstats->total_tpa_bytes_lo = - le32_to_cpu(uclient->coalesced_bytes.lo); - qstats->total_tpa_bytes_hi = - le32_to_cpu(uclient->coalesced_bytes.hi); - - /* TPA stats per-function */ - ADD_64(estats->total_tpa_aggregations_hi, - qstats->total_tpa_aggregations_hi, - estats->total_tpa_aggregations_lo, - qstats->total_tpa_aggregations_lo); - ADD_64(estats->total_tpa_aggregated_frames_hi, - qstats->total_tpa_aggregated_frames_hi, - estats->total_tpa_aggregated_frames_lo, - qstats->total_tpa_aggregated_frames_lo); - ADD_64(estats->total_tpa_bytes_hi, - qstats->total_tpa_bytes_hi, - estats->total_tpa_bytes_lo, - qstats->total_tpa_bytes_lo); - - ADD_64(fstats->total_bytes_received_hi, - qstats->total_bytes_received_hi, - fstats->total_bytes_received_lo, - qstats->total_bytes_received_lo); - ADD_64(fstats->total_bytes_transmitted_hi, - qstats->total_bytes_transmitted_hi, - fstats->total_bytes_transmitted_lo, - qstats->total_bytes_transmitted_lo); - ADD_64(fstats->total_unicast_packets_received_hi, - qstats->total_unicast_packets_received_hi, - fstats->total_unicast_packets_received_lo, - qstats->total_unicast_packets_received_lo); - ADD_64(fstats->total_multicast_packets_received_hi, - qstats->total_multicast_packets_received_hi, - fstats->total_multicast_packets_received_lo, - qstats->total_multicast_packets_received_lo); - ADD_64(fstats->total_broadcast_packets_received_hi, - qstats->total_broadcast_packets_received_hi, - fstats->total_broadcast_packets_received_lo, - qstats->total_broadcast_packets_received_lo); - ADD_64(fstats->total_unicast_packets_transmitted_hi, - qstats->total_unicast_packets_transmitted_hi, - fstats->total_unicast_packets_transmitted_lo, - qstats->total_unicast_packets_transmitted_lo); - ADD_64(fstats->total_multicast_packets_transmitted_hi, - qstats->total_multicast_packets_transmitted_hi, - fstats->total_multicast_packets_transmitted_lo, - qstats->total_multicast_packets_transmitted_lo); - ADD_64(fstats->total_broadcast_packets_transmitted_hi, - qstats->total_broadcast_packets_transmitted_hi, - fstats->total_broadcast_packets_transmitted_lo, - qstats->total_broadcast_packets_transmitted_lo); - ADD_64(fstats->valid_bytes_received_hi, - qstats->valid_bytes_received_hi, - fstats->valid_bytes_received_lo, - qstats->valid_bytes_received_lo); - - ADD_64(estats->etherstatsoverrsizepkts_hi, - qstats->etherstatsoverrsizepkts_hi, - estats->etherstatsoverrsizepkts_lo, - qstats->etherstatsoverrsizepkts_lo); - ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi, - estats->no_buff_discard_lo, qstats->no_buff_discard_lo); + UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes); + + UPDATE_ESTAT_QSTAT_64(total_tpa_bytes); + + UPDATE_FSTAT_QSTAT(total_bytes_received); + UPDATE_FSTAT_QSTAT(total_bytes_transmitted); + UPDATE_FSTAT_QSTAT(total_unicast_packets_received); + UPDATE_FSTAT_QSTAT(total_multicast_packets_received); + UPDATE_FSTAT_QSTAT(total_broadcast_packets_received); + UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted); + UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted); + UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted); + UPDATE_FSTAT_QSTAT(valid_bytes_received); } - ADD_64(fstats->total_bytes_received_hi, + ADD_64(estats->total_bytes_received_hi, estats->rx_stat_ifhcinbadoctets_hi, - fstats->total_bytes_received_lo, + estats->total_bytes_received_lo, estats->rx_stat_ifhcinbadoctets_lo); - ADD_64(fstats->total_bytes_received_hi, - le32_to_cpu(tfunc->rcv_error_bytes.hi), - fstats->total_bytes_received_lo, - le32_to_cpu(tfunc->rcv_error_bytes.lo)); + ADD_64_LE(estats->total_bytes_received_hi, + tfunc->rcv_error_bytes.hi, + estats->total_bytes_received_lo, + tfunc->rcv_error_bytes.lo); - memcpy(estats, &(fstats->total_bytes_received_hi), - sizeof(struct host_func_stats) - 2*sizeof(u32)); + ADD_64_LE(estats->error_bytes_received_hi, + tfunc->rcv_error_bytes.hi, + estats->error_bytes_received_lo, + tfunc->rcv_error_bytes.lo); - ADD_64(estats->error_bytes_received_hi, - le32_to_cpu(tfunc->rcv_error_bytes.hi), - estats->error_bytes_received_lo, - le32_to_cpu(tfunc->rcv_error_bytes.lo)); + UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong); - ADD_64(estats->etherstatsoverrsizepkts_hi, - estats->rx_stat_dot3statsframestoolong_hi, - estats->etherstatsoverrsizepkts_lo, - estats->rx_stat_dot3statsframestoolong_lo); ADD_64(estats->error_bytes_received_hi, estats->rx_stat_ifhcinbadoctets_hi, estats->error_bytes_received_lo, estats->rx_stat_ifhcinbadoctets_lo); if (bp->port.pmf) { - estats->mac_filter_discard = - le32_to_cpu(tport->mac_filter_discard); - estats->mf_tag_discard = - le32_to_cpu(tport->mf_tag_discard); - estats->brb_truncate_discard = - le32_to_cpu(tport->brb_truncate_discard); - estats->mac_discard = le32_to_cpu(tport->mac_discard); + struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; + UPDATE_FW_STAT(mac_filter_discard); + UPDATE_FW_STAT(mf_tag_discard); + UPDATE_FW_STAT(brb_truncate_discard); + UPDATE_FW_STAT(mac_discard); } fstats->host_func_stats_start = ++fstats->host_func_stats_end; @@ -1141,9 +1163,12 @@ static void bnx2x_net_stats_update(struct bnx2x *bp) nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); tmp = estats->mac_discard; - for_each_rx_queue(bp, i) - tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); - nstats->rx_dropped = tmp; + for_each_rx_queue(bp, i) { + struct tstorm_per_queue_stats *old_tclient = + &bp->fp_stats[i].old_tclient; + tmp += le32_to_cpu(old_tclient->checksum_discard); + } + nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped; nstats->tx_dropped = 0; @@ -1191,17 +1216,16 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp) struct bnx2x_eth_stats *estats = &bp->eth_stats; int i; - estats->driver_xoff = 0; - estats->rx_err_discard_pkt = 0; - estats->rx_skb_alloc_failed = 0; - estats->hw_csum_err = 0; for_each_queue(bp, i) { - struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; - - estats->driver_xoff += qstats->driver_xoff; - estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt; - estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed; - estats->hw_csum_err += qstats->hw_csum_err; + struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = + &bp->fp_stats[i].eth_q_stats_old; + + UPDATE_ESTAT_QSTAT(driver_xoff); + UPDATE_ESTAT_QSTAT(rx_err_discard_pkt); + UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed); + UPDATE_ESTAT_QSTAT(hw_csum_err); + UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt); } } @@ -1223,75 +1247,55 @@ static void bnx2x_stats_update(struct bnx2x *bp) { u32 *stats_comp = bnx2x_sp(bp, stats_comp); - if (bnx2x_edebug_stats_stopped(bp)) + /* we run update from timer context, so give up + * if somebody is in the middle of transition + */ + if (down_trylock(&bp->stats_sema)) return; - if (*stats_comp != DMAE_COMP_VAL) - return; + if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) + goto out; - if (bp->port.pmf) - bnx2x_hw_stats_update(bp); + if (IS_PF(bp)) { + if (*stats_comp != DMAE_COMP_VAL) + goto out; - if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) { - BNX2X_ERR("storm stats were not updated for 3 times\n"); - bnx2x_panic(); - return; + if (bp->port.pmf) + bnx2x_hw_stats_update(bp); + + if (bnx2x_storm_stats_update(bp)) { + if (bp->stats_pending++ == 3) { + BNX2X_ERR("storm stats were not updated for 3 times\n"); + bnx2x_panic(); + } + goto out; + } + } else { + /* vf doesn't collect HW statistics, and doesn't get completions + * perform only update + */ + bnx2x_storm_stats_update(bp); } bnx2x_net_stats_update(bp); bnx2x_drv_stats_update(bp); + /* vf is done */ + if (IS_VF(bp)) + goto out; + if (netif_msg_timer(bp)) { struct bnx2x_eth_stats *estats = &bp->eth_stats; - int i, cos; netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", estats->brb_drop_lo, estats->brb_truncate_lo); - - for_each_eth_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; - - pr_debug("%s: rx usage(%4u) *rx_cons_sb(%u) rx pkt(%lu) rx calls(%lu %lu)\n", - fp->name, (le16_to_cpu(*fp->rx_cons_sb) - - fp->rx_comp_cons), - le16_to_cpu(*fp->rx_cons_sb), - bnx2x_hilo(&qstats-> - total_unicast_packets_received_hi), - fp->rx_calls, fp->rx_pkt); - } - - for_each_eth_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - struct bnx2x_fp_txdata *txdata; - struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; - struct netdev_queue *txq; - - pr_debug("%s: tx pkt(%lu) (Xoff events %u)", - fp->name, - bnx2x_hilo( - &qstats->total_unicast_packets_transmitted_hi), - qstats->driver_xoff); - - for_each_cos_in_tx_queue(fp, cos) { - txdata = &fp->txdata[cos]; - txq = netdev_get_tx_queue(bp->dev, - FP_COS_TO_TXQ(fp, cos)); - - pr_debug("%d: tx avail(%4u) *tx_cons_sb(%u) tx calls (%lu) %s\n", - cos, - bnx2x_tx_avail(bp, txdata), - le16_to_cpu(*txdata->tx_cons_sb), - txdata->tx_pkt, - (netif_tx_queue_stopped(txq) ? - "Xoff" : "Xon") - ); - } - } } bnx2x_hw_stats_post(bp); bnx2x_storm_stats_post(bp); + +out: + up(&bp->stats_sema); } static void bnx2x_port_stats_stop(struct bnx2x *bp) @@ -1357,6 +1361,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp) { int update = 0; + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + + bp->stats_started = false; + bnx2x_stats_comp(bp); if (bp->port.pmf) @@ -1373,6 +1382,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp) bnx2x_hw_stats_post(bp); bnx2x_stats_comp(bp); } + + up(&bp->stats_sema); } static void bnx2x_stats_do_nothing(struct bnx2x *bp) @@ -1401,15 +1412,17 @@ static const struct { void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) { enum bnx2x_stats_state state; + void (*action)(struct bnx2x *bp); if (unlikely(bp->panic)) return; spin_lock_bh(&bp->stats_lock); state = bp->stats_state; bp->stats_state = bnx2x_stats_stm[state][event].next_state; + action = bnx2x_stats_stm[state][event].action; spin_unlock_bh(&bp->stats_lock); - bnx2x_stats_stm[state][event].action(bp); + action(bp); if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", @@ -1446,71 +1459,11 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp) bnx2x_stats_comp(bp); } -static void bnx2x_func_stats_base_init(struct bnx2x *bp) -{ - int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX; - u32 func_stx; - - /* sanity */ - if (!bp->port.pmf || !bp->func_stx) { - BNX2X_ERR("BUG!\n"); - return; - } - - /* save our func_stx */ - func_stx = bp->func_stx; - - for (vn = VN_0; vn < vn_max; vn++) { - int mb_idx = BP_FW_MB_IDX_VN(bp, vn); - - bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); - bnx2x_func_stats_init(bp); - bnx2x_hw_stats_post(bp); - bnx2x_stats_comp(bp); - } - - /* restore our func_stx */ - bp->func_stx = func_stx; -} - -static void bnx2x_func_stats_base_update(struct bnx2x *bp) -{ - struct dmae_command *dmae = &bp->stats_dmae; - u32 *stats_comp = bnx2x_sp(bp, stats_comp); - - /* sanity */ - if (!bp->func_stx) { - BNX2X_ERR("BUG!\n"); - return; - } - - bp->executer_idx = 0; - memset(dmae, 0, sizeof(struct dmae_command)); - - dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, - true, DMAE_COMP_PCI); - dmae->src_addr_lo = bp->func_stx >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base)); - dmae->len = sizeof(struct host_func_stats) >> 2; - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_val = DMAE_COMP_VAL; - - *stats_comp = 0; - bnx2x_hw_stats_post(bp); - bnx2x_stats_comp(bp); -} - -/** - * This function will prepare the statistics ramrod data the way +/* This function will prepare the statistics ramrod data the way * we will only have to increment the statistics counter and * send the ramrod each time we have to. - * - * @param bp */ -static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) +static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) { int i; int first_queue_query_index; @@ -1622,7 +1575,7 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) query[first_queue_query_index + i]; cur_query_entry->kind = STATS_TYPE_QUEUE; - cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]); + cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]); cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); @@ -1631,11 +1584,51 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) } } +void bnx2x_memset_stats(struct bnx2x *bp) +{ + int i; + + /* function stats */ + for_each_queue(bp, i) { + struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i]; + + memset(&fp_stats->old_tclient, 0, + sizeof(fp_stats->old_tclient)); + memset(&fp_stats->old_uclient, 0, + sizeof(fp_stats->old_uclient)); + memset(&fp_stats->old_xclient, 0, + sizeof(fp_stats->old_xclient)); + if (bp->stats_init) { + memset(&fp_stats->eth_q_stats, 0, + sizeof(fp_stats->eth_q_stats)); + memset(&fp_stats->eth_q_stats_old, 0, + sizeof(fp_stats->eth_q_stats_old)); + } + } + + memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); + + if (bp->stats_init) { + memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old)); + memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old)); + memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old)); + memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); + memset(&bp->func_stats, 0, sizeof(bp->func_stats)); + } + + bp->stats_state = STATS_STATE_DISABLED; + + if (bp->port.pmf && bp->port.port_stx) + bnx2x_port_stats_base_init(bp); + + /* mark the end of statistics initializiation */ + bp->stats_init = false; +} + void bnx2x_stats_init(struct bnx2x *bp) { int /*abs*/port = BP_PORT(bp); int mb_idx = BP_FW_MB_IDX(bp); - int i; bp->stats_pending = 0; bp->executer_idx = 0; @@ -1653,6 +1646,10 @@ void bnx2x_stats_init(struct bnx2x *bp) DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", bp->port.port_stx, bp->func_stx); + /* pmf should retrieve port statistics from SP on a non-init*/ + if (!bp->stats_init && bp->port.pmf && bp->port.port_stx) + bnx2x_stats_handle(bp, STATS_EVENT_PMF); + port = BP_PORT(bp); /* port stats */ memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); @@ -1667,31 +1664,343 @@ void bnx2x_stats_init(struct bnx2x *bp) &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); } - /* function stats */ - for_each_queue(bp, i) { + /* Prepare statistics ramrod data */ + bnx2x_prep_fw_stats_req(bp); + + /* Clean SP from previous statistics */ + if (bp->stats_init) { + if (bp->func_stx) { + memset(bnx2x_sp(bp, func_stats), 0, + sizeof(struct host_func_stats)); + bnx2x_func_stats_init(bp); + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); + } + } + + bnx2x_memset_stats(bp); +} + +void bnx2x_save_statistics(struct bnx2x *bp) +{ + int i; + struct net_device_stats *nstats = &bp->dev->stats; + + /* save queue statistics */ + for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + struct bnx2x_eth_q_stats *qstats = + &bnx2x_fp_stats(bp, fp)->eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = + &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; + + UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); + UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); + UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi); + UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo); + UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi); + UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo); + UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi); + UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo); + UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi); + UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo); + UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi); + UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo); + UPDATE_QSTAT_OLD(total_tpa_bytes_hi); + UPDATE_QSTAT_OLD(total_tpa_bytes_lo); + } + + /* save net_device_stats statistics */ + bp->net_stats_old.rx_dropped = nstats->rx_dropped; - memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); - memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); - memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); - memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); + /* store port firmware statistics */ + if (bp->port.pmf && IS_MF(bp)) { + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; + UPDATE_FW_STAT_OLD(mac_filter_discard); + UPDATE_FW_STAT_OLD(mf_tag_discard); + UPDATE_FW_STAT_OLD(brb_truncate_discard); + UPDATE_FW_STAT_OLD(mac_discard); } +} - /* Prepare statistics ramrod data */ - bnx2x_prep_fw_stats_req(bp); +void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, + u32 stats_type) +{ + int i; + struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats; + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct per_queue_stats *fcoe_q_stats = + &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]; - memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); - memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); + struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = + &fcoe_q_stats->tstorm_queue_statistics; - bp->stats_state = STATS_STATE_DISABLED; + struct ustorm_per_queue_stats *fcoe_q_ustorm_stats = + &fcoe_q_stats->ustorm_queue_statistics; - if (bp->port.pmf) { - if (bp->port.port_stx) - bnx2x_port_stats_base_init(bp); + struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = + &fcoe_q_stats->xstorm_queue_statistics; - if (bp->func_stx) - bnx2x_func_stats_base_init(bp); + struct fcoe_statistics_params *fw_fcoe_stat = + &bp->fw_stats_data->fcoe; + + memset(afex_stats, 0, sizeof(struct afex_stats)); + + for_each_eth_queue(bp, i) { + struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; + + ADD_64(afex_stats->rx_unicast_bytes_hi, + qstats->total_unicast_bytes_received_hi, + afex_stats->rx_unicast_bytes_lo, + qstats->total_unicast_bytes_received_lo); + + ADD_64(afex_stats->rx_broadcast_bytes_hi, + qstats->total_broadcast_bytes_received_hi, + afex_stats->rx_broadcast_bytes_lo, + qstats->total_broadcast_bytes_received_lo); + + ADD_64(afex_stats->rx_multicast_bytes_hi, + qstats->total_multicast_bytes_received_hi, + afex_stats->rx_multicast_bytes_lo, + qstats->total_multicast_bytes_received_lo); + + ADD_64(afex_stats->rx_unicast_frames_hi, + qstats->total_unicast_packets_received_hi, + afex_stats->rx_unicast_frames_lo, + qstats->total_unicast_packets_received_lo); + + ADD_64(afex_stats->rx_broadcast_frames_hi, + qstats->total_broadcast_packets_received_hi, + afex_stats->rx_broadcast_frames_lo, + qstats->total_broadcast_packets_received_lo); + + ADD_64(afex_stats->rx_multicast_frames_hi, + qstats->total_multicast_packets_received_hi, + afex_stats->rx_multicast_frames_lo, + qstats->total_multicast_packets_received_lo); + + /* sum to rx_frames_discarded all discraded + * packets due to size, ttl0 and checksum + */ + ADD_64(afex_stats->rx_frames_discarded_hi, + qstats->total_packets_received_checksum_discarded_hi, + afex_stats->rx_frames_discarded_lo, + qstats->total_packets_received_checksum_discarded_lo); - } else if (bp->func_stx) - bnx2x_func_stats_base_update(bp); + ADD_64(afex_stats->rx_frames_discarded_hi, + qstats->total_packets_received_ttl0_discarded_hi, + afex_stats->rx_frames_discarded_lo, + qstats->total_packets_received_ttl0_discarded_lo); + + ADD_64(afex_stats->rx_frames_discarded_hi, + qstats->etherstatsoverrsizepkts_hi, + afex_stats->rx_frames_discarded_lo, + qstats->etherstatsoverrsizepkts_lo); + + ADD_64(afex_stats->rx_frames_dropped_hi, + qstats->no_buff_discard_hi, + afex_stats->rx_frames_dropped_lo, + qstats->no_buff_discard_lo); + + ADD_64(afex_stats->tx_unicast_bytes_hi, + qstats->total_unicast_bytes_transmitted_hi, + afex_stats->tx_unicast_bytes_lo, + qstats->total_unicast_bytes_transmitted_lo); + + ADD_64(afex_stats->tx_broadcast_bytes_hi, + qstats->total_broadcast_bytes_transmitted_hi, + afex_stats->tx_broadcast_bytes_lo, + qstats->total_broadcast_bytes_transmitted_lo); + + ADD_64(afex_stats->tx_multicast_bytes_hi, + qstats->total_multicast_bytes_transmitted_hi, + afex_stats->tx_multicast_bytes_lo, + qstats->total_multicast_bytes_transmitted_lo); + + ADD_64(afex_stats->tx_unicast_frames_hi, + qstats->total_unicast_packets_transmitted_hi, + afex_stats->tx_unicast_frames_lo, + qstats->total_unicast_packets_transmitted_lo); + + ADD_64(afex_stats->tx_broadcast_frames_hi, + qstats->total_broadcast_packets_transmitted_hi, + afex_stats->tx_broadcast_frames_lo, + qstats->total_broadcast_packets_transmitted_lo); + + ADD_64(afex_stats->tx_multicast_frames_hi, + qstats->total_multicast_packets_transmitted_hi, + afex_stats->tx_multicast_frames_lo, + qstats->total_multicast_packets_transmitted_lo); + + ADD_64(afex_stats->tx_frames_dropped_hi, + qstats->total_transmitted_dropped_packets_error_hi, + afex_stats->tx_frames_dropped_lo, + qstats->total_transmitted_dropped_packets_error_lo); + } + + /* now add FCoE statistics which are collected separately + * (both offloaded and non offloaded) + */ + if (!NO_FCOE(bp)) { + ADD_64_LE(afex_stats->rx_unicast_bytes_hi, + LE32_0, + afex_stats->rx_unicast_bytes_lo, + fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); + + ADD_64_LE(afex_stats->rx_unicast_bytes_hi, + fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, + afex_stats->rx_unicast_bytes_lo, + fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); + + ADD_64_LE(afex_stats->rx_broadcast_bytes_hi, + fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, + afex_stats->rx_broadcast_bytes_lo, + fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); + + ADD_64_LE(afex_stats->rx_multicast_bytes_hi, + fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, + afex_stats->rx_multicast_bytes_lo, + fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); + + ADD_64_LE(afex_stats->rx_unicast_frames_hi, + LE32_0, + afex_stats->rx_unicast_frames_lo, + fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); + + ADD_64_LE(afex_stats->rx_unicast_frames_hi, + LE32_0, + afex_stats->rx_unicast_frames_lo, + fcoe_q_tstorm_stats->rcv_ucast_pkts); + + ADD_64_LE(afex_stats->rx_broadcast_frames_hi, + LE32_0, + afex_stats->rx_broadcast_frames_lo, + fcoe_q_tstorm_stats->rcv_bcast_pkts); + + ADD_64_LE(afex_stats->rx_multicast_frames_hi, + LE32_0, + afex_stats->rx_multicast_frames_lo, + fcoe_q_tstorm_stats->rcv_ucast_pkts); + + ADD_64_LE(afex_stats->rx_frames_discarded_hi, + LE32_0, + afex_stats->rx_frames_discarded_lo, + fcoe_q_tstorm_stats->checksum_discard); + + ADD_64_LE(afex_stats->rx_frames_discarded_hi, + LE32_0, + afex_stats->rx_frames_discarded_lo, + fcoe_q_tstorm_stats->pkts_too_big_discard); + + ADD_64_LE(afex_stats->rx_frames_discarded_hi, + LE32_0, + afex_stats->rx_frames_discarded_lo, + fcoe_q_tstorm_stats->ttl0_discard); + + ADD_64_LE16(afex_stats->rx_frames_dropped_hi, + LE16_0, + afex_stats->rx_frames_dropped_lo, + fcoe_q_tstorm_stats->no_buff_discard); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fcoe_q_ustorm_stats->ucast_no_buff_pkts); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fcoe_q_ustorm_stats->mcast_no_buff_pkts); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fcoe_q_ustorm_stats->bcast_no_buff_pkts); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt); + + ADD_64_LE(afex_stats->tx_unicast_bytes_hi, + LE32_0, + afex_stats->tx_unicast_bytes_lo, + fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); + + ADD_64_LE(afex_stats->tx_unicast_bytes_hi, + fcoe_q_xstorm_stats->ucast_bytes_sent.hi, + afex_stats->tx_unicast_bytes_lo, + fcoe_q_xstorm_stats->ucast_bytes_sent.lo); + + ADD_64_LE(afex_stats->tx_broadcast_bytes_hi, + fcoe_q_xstorm_stats->bcast_bytes_sent.hi, + afex_stats->tx_broadcast_bytes_lo, + fcoe_q_xstorm_stats->bcast_bytes_sent.lo); + + ADD_64_LE(afex_stats->tx_multicast_bytes_hi, + fcoe_q_xstorm_stats->mcast_bytes_sent.hi, + afex_stats->tx_multicast_bytes_lo, + fcoe_q_xstorm_stats->mcast_bytes_sent.lo); + + ADD_64_LE(afex_stats->tx_unicast_frames_hi, + LE32_0, + afex_stats->tx_unicast_frames_lo, + fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); + + ADD_64_LE(afex_stats->tx_unicast_frames_hi, + LE32_0, + afex_stats->tx_unicast_frames_lo, + fcoe_q_xstorm_stats->ucast_pkts_sent); + + ADD_64_LE(afex_stats->tx_broadcast_frames_hi, + LE32_0, + afex_stats->tx_broadcast_frames_lo, + fcoe_q_xstorm_stats->bcast_pkts_sent); + + ADD_64_LE(afex_stats->tx_multicast_frames_hi, + LE32_0, + afex_stats->tx_multicast_frames_lo, + fcoe_q_xstorm_stats->mcast_pkts_sent); + + ADD_64_LE(afex_stats->tx_frames_dropped_hi, + LE32_0, + afex_stats->tx_frames_dropped_lo, + fcoe_q_xstorm_stats->error_drop_pkts); + } + + /* if port stats are requested, add them to the PMF + * stats, as anyway they will be accumulated by the + * MCP before sent to the switch + */ + if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) { + ADD_64(afex_stats->rx_frames_dropped_hi, + 0, + afex_stats->rx_frames_dropped_lo, + estats->mac_filter_discard); + ADD_64(afex_stats->rx_frames_dropped_hi, + 0, + afex_stats->rx_frames_dropped_lo, + estats->brb_truncate_discard); + ADD_64(afex_stats->rx_frames_discarded_hi, + 0, + afex_stats->rx_frames_discarded_lo, + estats->mac_discard); + } +} + +void bnx2x_stats_safe_exec(struct bnx2x *bp, + void (func_to_exec)(void *cookie), + void *cookie){ + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + bnx2x_stats_comp(bp); + func_to_exec(cookie); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); } |
