1 /* bnx2x_stats.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2013 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include "bnx2x_stats.h" 21 #include "bnx2x_cmn.h" 22 #include "bnx2x_sriov.h" 23 24 /* Statistics */ 25 26 /* 27 * General service functions 28 */ 29 30 static inline long bnx2x_hilo(u32 *hiref) 31 { 32 u32 lo = *(hiref + 1); 33 #if (BITS_PER_LONG == 64) 34 u32 hi = *hiref; 35 36 return HILO_U64(hi, lo); 37 #else 38 return lo; 39 #endif 40 } 41 42 static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) 43 { 44 u16 res = 0; 45 46 /* 'newest' convention - shmem2 cotains the size of the port stats */ 47 if (SHMEM2_HAS(bp, sizeof_port_stats)) { 48 u32 size = SHMEM2_RD(bp, sizeof_port_stats); 49 if (size) 50 res = size; 51 52 /* prevent newer BC from causing buffer overflow */ 53 if (res > sizeof(struct host_port_stats)) 54 res = sizeof(struct host_port_stats); 55 } 56 57 /* Older convention - all BCs support the port stats' fields up until 58 * the 'not_used' field 59 */ 60 if (!res) { 61 res = offsetof(struct host_port_stats, not_used) + 4; 62 63 /* if PFC stats are supported by the MFW, DMA them as well */ 64 if (bp->flags & BC_SUPPORTS_PFC_STATS) { 65 res += offsetof(struct host_port_stats, 66 pfc_frames_rx_lo) - 67 offsetof(struct host_port_stats, 68 pfc_frames_tx_hi) + 4 ; 69 } 70 } 71 72 res >>= 2; 73 74 WARN_ON(res > 2 * DMAE_LEN32_RD_MAX); 75 return res; 76 } 77 78 /* 79 * Init service functions 80 */ 81 82 static void bnx2x_dp_stats(struct bnx2x *bp) 83 { 84 int i; 85 86 DP(BNX2X_MSG_STATS, "dumping stats:\n" 87 "fw_stats_req\n" 88 " hdr\n" 89 " cmd_num %d\n" 90 " reserved0 %d\n" 91 " drv_stats_counter %d\n" 92 " reserved1 %d\n" 93 " stats_counters_addrs %x %x\n", 94 bp->fw_stats_req->hdr.cmd_num, 95 bp->fw_stats_req->hdr.reserved0, 96 bp->fw_stats_req->hdr.drv_stats_counter, 97 bp->fw_stats_req->hdr.reserved1, 98 bp->fw_stats_req->hdr.stats_counters_addrs.hi, 99 bp->fw_stats_req->hdr.stats_counters_addrs.lo); 100 101 for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) { 102 DP(BNX2X_MSG_STATS, 103 "query[%d]\n" 104 " kind %d\n" 105 " index %d\n" 106 " funcID %d\n" 107 " reserved %d\n" 108 " address %x %x\n", 109 i, bp->fw_stats_req->query[i].kind, 110 bp->fw_stats_req->query[i].index, 111 bp->fw_stats_req->query[i].funcID, 112 bp->fw_stats_req->query[i].reserved, 113 bp->fw_stats_req->query[i].address.hi, 114 bp->fw_stats_req->query[i].address.lo); 115 } 116 } 117 118 /* Post the next statistics ramrod. Protect it with the spin in 119 * order to ensure the strict order between statistics ramrods 120 * (each ramrod has a sequence number passed in a 121 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be 122 * sent in order). 123 */ 124 static void bnx2x_storm_stats_post(struct bnx2x *bp) 125 { 126 if (!bp->stats_pending) { 127 int rc; 128 129 spin_lock_bh(&bp->stats_lock); 130 131 if (bp->stats_pending) { 132 spin_unlock_bh(&bp->stats_lock); 133 return; 134 } 135 136 bp->fw_stats_req->hdr.drv_stats_counter = 137 cpu_to_le16(bp->stats_counter++); 138 139 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", 140 le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); 141 142 /* adjust the ramrod to include VF queues statistics */ 143 bnx2x_iov_adjust_stats_req(bp); 144 bnx2x_dp_stats(bp); 145 146 /* send FW stats ramrod */ 147 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, 148 U64_HI(bp->fw_stats_req_mapping), 149 U64_LO(bp->fw_stats_req_mapping), 150 NONE_CONNECTION_TYPE); 151 if (rc == 0) 152 bp->stats_pending = 1; 153 154 spin_unlock_bh(&bp->stats_lock); 155 } 156 } 157 158 static void bnx2x_hw_stats_post(struct bnx2x *bp) 159 { 160 struct dmae_command *dmae = &bp->stats_dmae; 161 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 162 163 *stats_comp = DMAE_COMP_VAL; 164 if (CHIP_REV_IS_SLOW(bp)) 165 return; 166 167 /* Update MCP's statistics if possible */ 168 if (bp->func_stx) 169 memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats, 170 sizeof(bp->func_stats)); 171 172 /* loader */ 173 if (bp->executer_idx) { 174 int loader_idx = PMF_DMAE_C(bp); 175 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 176 true, DMAE_COMP_GRC); 177 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode); 178 179 memset(dmae, 0, sizeof(struct dmae_command)); 180 dmae->opcode = opcode; 181 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); 182 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); 183 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + 184 sizeof(struct dmae_command) * 185 (loader_idx + 1)) >> 2; 186 dmae->dst_addr_hi = 0; 187 dmae->len = sizeof(struct dmae_command) >> 2; 188 if (CHIP_IS_E1(bp)) 189 dmae->len--; 190 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; 191 dmae->comp_addr_hi = 0; 192 dmae->comp_val = 1; 193 194 *stats_comp = 0; 195 bnx2x_post_dmae(bp, dmae, loader_idx); 196 197 } else if (bp->func_stx) { 198 *stats_comp = 0; 199 bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp); 200 } 201 } 202 203 static void bnx2x_stats_comp(struct bnx2x *bp) 204 { 205 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 206 int cnt = 10; 207 208 might_sleep(); 209 while (*stats_comp != DMAE_COMP_VAL) { 210 if (!cnt) { 211 BNX2X_ERR("timeout waiting for stats finished\n"); 212 break; 213 } 214 cnt--; 215 usleep_range(1000, 2000); 216 } 217 } 218 219 /* 220 * Statistics service functions 221 */ 222 223 /* should be called under stats_sema */ 224 static void __bnx2x_stats_pmf_update(struct bnx2x *bp) 225 { 226 struct dmae_command *dmae; 227 u32 opcode; 228 int loader_idx = PMF_DMAE_C(bp); 229 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 230 231 /* sanity */ 232 if (!bp->port.pmf || !bp->port.port_stx) { 233 BNX2X_ERR("BUG!\n"); 234 return; 235 } 236 237 bp->executer_idx = 0; 238 239 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0); 240 241 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 242 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); 243 dmae->src_addr_lo = bp->port.port_stx >> 2; 244 dmae->src_addr_hi = 0; 245 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 246 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 247 dmae->len = DMAE_LEN32_RD_MAX; 248 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 249 dmae->comp_addr_hi = 0; 250 dmae->comp_val = 1; 251 252 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 253 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); 254 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; 255 dmae->src_addr_hi = 0; 256 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + 257 DMAE_LEN32_RD_MAX * 4); 258 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + 259 DMAE_LEN32_RD_MAX * 4); 260 dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX; 261 262 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 263 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 264 dmae->comp_val = DMAE_COMP_VAL; 265 266 *stats_comp = 0; 267 bnx2x_hw_stats_post(bp); 268 bnx2x_stats_comp(bp); 269 } 270 271 static void bnx2x_port_stats_init(struct bnx2x *bp) 272 { 273 struct dmae_command *dmae; 274 int port = BP_PORT(bp); 275 u32 opcode; 276 int loader_idx = PMF_DMAE_C(bp); 277 u32 mac_addr; 278 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 279 280 /* sanity */ 281 if (!bp->link_vars.link_up || !bp->port.pmf) { 282 BNX2X_ERR("BUG!\n"); 283 return; 284 } 285 286 bp->executer_idx = 0; 287 288 /* MCP */ 289 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 290 true, DMAE_COMP_GRC); 291 292 if (bp->port.port_stx) { 293 294 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 295 dmae->opcode = opcode; 296 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 297 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 298 dmae->dst_addr_lo = bp->port.port_stx >> 2; 299 dmae->dst_addr_hi = 0; 300 dmae->len = bnx2x_get_port_stats_dma_len(bp); 301 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 302 dmae->comp_addr_hi = 0; 303 dmae->comp_val = 1; 304 } 305 306 if (bp->func_stx) { 307 308 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 309 dmae->opcode = opcode; 310 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 311 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 312 dmae->dst_addr_lo = bp->func_stx >> 2; 313 dmae->dst_addr_hi = 0; 314 dmae->len = sizeof(struct host_func_stats) >> 2; 315 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 316 dmae->comp_addr_hi = 0; 317 dmae->comp_val = 1; 318 } 319 320 /* MAC */ 321 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 322 true, DMAE_COMP_GRC); 323 324 /* EMAC is special */ 325 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { 326 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); 327 328 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ 329 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 330 dmae->opcode = opcode; 331 dmae->src_addr_lo = (mac_addr + 332 EMAC_REG_EMAC_RX_STAT_AC) >> 2; 333 dmae->src_addr_hi = 0; 334 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 335 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 336 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; 337 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 338 dmae->comp_addr_hi = 0; 339 dmae->comp_val = 1; 340 341 /* EMAC_REG_EMAC_RX_STAT_AC_28 */ 342 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 343 dmae->opcode = opcode; 344 dmae->src_addr_lo = (mac_addr + 345 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; 346 dmae->src_addr_hi = 0; 347 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 348 offsetof(struct emac_stats, rx_stat_falsecarriererrors)); 349 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 350 offsetof(struct emac_stats, rx_stat_falsecarriererrors)); 351 dmae->len = 1; 352 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 353 dmae->comp_addr_hi = 0; 354 dmae->comp_val = 1; 355 356 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ 357 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 358 dmae->opcode = opcode; 359 dmae->src_addr_lo = (mac_addr + 360 EMAC_REG_EMAC_TX_STAT_AC) >> 2; 361 dmae->src_addr_hi = 0; 362 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 363 offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); 364 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 365 offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); 366 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; 367 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 368 dmae->comp_addr_hi = 0; 369 dmae->comp_val = 1; 370 } else { 371 u32 tx_src_addr_lo, rx_src_addr_lo; 372 u16 rx_len, tx_len; 373 374 /* configure the params according to MAC type */ 375 switch (bp->link_vars.mac_type) { 376 case MAC_TYPE_BMAC: 377 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : 378 NIG_REG_INGRESS_BMAC0_MEM); 379 380 /* BIGMAC_REGISTER_TX_STAT_GTPKT .. 381 BIGMAC_REGISTER_TX_STAT_GTBYT */ 382 if (CHIP_IS_E1x(bp)) { 383 tx_src_addr_lo = (mac_addr + 384 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 385 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - 386 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 387 rx_src_addr_lo = (mac_addr + 388 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 389 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - 390 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 391 } else { 392 tx_src_addr_lo = (mac_addr + 393 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; 394 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - 395 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; 396 rx_src_addr_lo = (mac_addr + 397 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; 398 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - 399 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; 400 } 401 break; 402 403 case MAC_TYPE_UMAC: /* handled by MSTAT */ 404 case MAC_TYPE_XMAC: /* handled by MSTAT */ 405 default: 406 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; 407 tx_src_addr_lo = (mac_addr + 408 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2; 409 rx_src_addr_lo = (mac_addr + 410 MSTAT_REG_RX_STAT_GR64_LO) >> 2; 411 tx_len = sizeof(bp->slowpath-> 412 mac_stats.mstat_stats.stats_tx) >> 2; 413 rx_len = sizeof(bp->slowpath-> 414 mac_stats.mstat_stats.stats_rx) >> 2; 415 break; 416 } 417 418 /* TX stats */ 419 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 420 dmae->opcode = opcode; 421 dmae->src_addr_lo = tx_src_addr_lo; 422 dmae->src_addr_hi = 0; 423 dmae->len = tx_len; 424 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 425 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 426 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 427 dmae->comp_addr_hi = 0; 428 dmae->comp_val = 1; 429 430 /* RX stats */ 431 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 432 dmae->opcode = opcode; 433 dmae->src_addr_hi = 0; 434 dmae->src_addr_lo = rx_src_addr_lo; 435 dmae->dst_addr_lo = 436 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); 437 dmae->dst_addr_hi = 438 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); 439 dmae->len = rx_len; 440 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 441 dmae->comp_addr_hi = 0; 442 dmae->comp_val = 1; 443 } 444 445 /* NIG */ 446 if (!CHIP_IS_E3(bp)) { 447 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 448 dmae->opcode = opcode; 449 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : 450 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; 451 dmae->src_addr_hi = 0; 452 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + 453 offsetof(struct nig_stats, egress_mac_pkt0_lo)); 454 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + 455 offsetof(struct nig_stats, egress_mac_pkt0_lo)); 456 dmae->len = (2*sizeof(u32)) >> 2; 457 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 458 dmae->comp_addr_hi = 0; 459 dmae->comp_val = 1; 460 461 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 462 dmae->opcode = opcode; 463 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : 464 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; 465 dmae->src_addr_hi = 0; 466 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + 467 offsetof(struct nig_stats, egress_mac_pkt1_lo)); 468 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + 469 offsetof(struct nig_stats, egress_mac_pkt1_lo)); 470 dmae->len = (2*sizeof(u32)) >> 2; 471 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 472 dmae->comp_addr_hi = 0; 473 dmae->comp_val = 1; 474 } 475 476 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 477 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 478 true, DMAE_COMP_PCI); 479 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : 480 NIG_REG_STAT0_BRB_DISCARD) >> 2; 481 dmae->src_addr_hi = 0; 482 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); 483 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); 484 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; 485 486 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 487 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 488 dmae->comp_val = DMAE_COMP_VAL; 489 490 *stats_comp = 0; 491 } 492 493 static void bnx2x_func_stats_init(struct bnx2x *bp) 494 { 495 struct dmae_command *dmae = &bp->stats_dmae; 496 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 497 498 /* sanity */ 499 if (!bp->func_stx) { 500 BNX2X_ERR("BUG!\n"); 501 return; 502 } 503 504 bp->executer_idx = 0; 505 memset(dmae, 0, sizeof(struct dmae_command)); 506 507 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 508 true, DMAE_COMP_PCI); 509 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 510 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 511 dmae->dst_addr_lo = bp->func_stx >> 2; 512 dmae->dst_addr_hi = 0; 513 dmae->len = sizeof(struct host_func_stats) >> 2; 514 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 515 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 516 dmae->comp_val = DMAE_COMP_VAL; 517 518 *stats_comp = 0; 519 } 520 521 /* should be called under stats_sema */ 522 static void __bnx2x_stats_start(struct bnx2x *bp) 523 { 524 if (IS_PF(bp)) { 525 if (bp->port.pmf) 526 bnx2x_port_stats_init(bp); 527 528 else if (bp->func_stx) 529 bnx2x_func_stats_init(bp); 530 531 bnx2x_hw_stats_post(bp); 532 bnx2x_storm_stats_post(bp); 533 } 534 535 bp->stats_started = true; 536 } 537 538 static void bnx2x_stats_start(struct bnx2x *bp) 539 { 540 if (down_timeout(&bp->stats_sema, HZ/10)) 541 BNX2X_ERR("Unable to acquire stats lock\n"); 542 __bnx2x_stats_start(bp); 543 up(&bp->stats_sema); 544 } 545 546 static void bnx2x_stats_pmf_start(struct bnx2x *bp) 547 { 548 if (down_timeout(&bp->stats_sema, HZ/10)) 549 BNX2X_ERR("Unable to acquire stats lock\n"); 550 bnx2x_stats_comp(bp); 551 __bnx2x_stats_pmf_update(bp); 552 __bnx2x_stats_start(bp); 553 up(&bp->stats_sema); 554 } 555 556 static void bnx2x_stats_pmf_update(struct bnx2x *bp) 557 { 558 if (down_timeout(&bp->stats_sema, HZ/10)) 559 BNX2X_ERR("Unable to acquire stats lock\n"); 560 __bnx2x_stats_pmf_update(bp); 561 up(&bp->stats_sema); 562 } 563 564 static void bnx2x_stats_restart(struct bnx2x *bp) 565 { 566 /* vfs travel through here as part of the statistics FSM, but no action 567 * is required 568 */ 569 if (IS_VF(bp)) 570 return; 571 if (down_timeout(&bp->stats_sema, HZ/10)) 572 BNX2X_ERR("Unable to acquire stats lock\n"); 573 bnx2x_stats_comp(bp); 574 __bnx2x_stats_start(bp); 575 up(&bp->stats_sema); 576 } 577 578 static void bnx2x_bmac_stats_update(struct bnx2x *bp) 579 { 580 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 581 struct bnx2x_eth_stats *estats = &bp->eth_stats; 582 struct { 583 u32 lo; 584 u32 hi; 585 } diff; 586 587 if (CHIP_IS_E1x(bp)) { 588 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats); 589 590 /* the macros below will use "bmac1_stats" type */ 591 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 592 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 593 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 594 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 595 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 596 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 597 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 598 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 599 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); 600 601 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 602 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 603 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 604 UPDATE_STAT64(tx_stat_gt127, 605 tx_stat_etherstatspkts65octetsto127octets); 606 UPDATE_STAT64(tx_stat_gt255, 607 tx_stat_etherstatspkts128octetsto255octets); 608 UPDATE_STAT64(tx_stat_gt511, 609 tx_stat_etherstatspkts256octetsto511octets); 610 UPDATE_STAT64(tx_stat_gt1023, 611 tx_stat_etherstatspkts512octetsto1023octets); 612 UPDATE_STAT64(tx_stat_gt1518, 613 tx_stat_etherstatspkts1024octetsto1522octets); 614 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); 615 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); 616 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); 617 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); 618 UPDATE_STAT64(tx_stat_gterr, 619 tx_stat_dot3statsinternalmactransmiterrors); 620 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); 621 622 } else { 623 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); 624 625 /* the macros below will use "bmac2_stats" type */ 626 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 627 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 628 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 629 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 630 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 631 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 632 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 633 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 634 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); 635 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 636 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 637 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 638 UPDATE_STAT64(tx_stat_gt127, 639 tx_stat_etherstatspkts65octetsto127octets); 640 UPDATE_STAT64(tx_stat_gt255, 641 tx_stat_etherstatspkts128octetsto255octets); 642 UPDATE_STAT64(tx_stat_gt511, 643 tx_stat_etherstatspkts256octetsto511octets); 644 UPDATE_STAT64(tx_stat_gt1023, 645 tx_stat_etherstatspkts512octetsto1023octets); 646 UPDATE_STAT64(tx_stat_gt1518, 647 tx_stat_etherstatspkts1024octetsto1522octets); 648 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); 649 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); 650 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); 651 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); 652 UPDATE_STAT64(tx_stat_gterr, 653 tx_stat_dot3statsinternalmactransmiterrors); 654 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); 655 656 /* collect PFC stats */ 657 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi; 658 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo; 659 660 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi; 661 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo; 662 } 663 664 estats->pause_frames_received_hi = 665 pstats->mac_stx[1].rx_stat_mac_xpf_hi; 666 estats->pause_frames_received_lo = 667 pstats->mac_stx[1].rx_stat_mac_xpf_lo; 668 669 estats->pause_frames_sent_hi = 670 pstats->mac_stx[1].tx_stat_outxoffsent_hi; 671 estats->pause_frames_sent_lo = 672 pstats->mac_stx[1].tx_stat_outxoffsent_lo; 673 674 estats->pfc_frames_received_hi = 675 pstats->pfc_frames_rx_hi; 676 estats->pfc_frames_received_lo = 677 pstats->pfc_frames_rx_lo; 678 estats->pfc_frames_sent_hi = 679 pstats->pfc_frames_tx_hi; 680 estats->pfc_frames_sent_lo = 681 pstats->pfc_frames_tx_lo; 682 } 683 684 static void bnx2x_mstat_stats_update(struct bnx2x *bp) 685 { 686 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 687 struct bnx2x_eth_stats *estats = &bp->eth_stats; 688 689 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); 690 691 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); 692 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); 693 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); 694 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); 695 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); 696 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); 697 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); 698 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); 699 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); 700 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); 701 702 /* collect pfc stats */ 703 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi, 704 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo); 705 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi, 706 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo); 707 708 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); 709 ADD_STAT64(stats_tx.tx_gt127, 710 tx_stat_etherstatspkts65octetsto127octets); 711 ADD_STAT64(stats_tx.tx_gt255, 712 tx_stat_etherstatspkts128octetsto255octets); 713 ADD_STAT64(stats_tx.tx_gt511, 714 tx_stat_etherstatspkts256octetsto511octets); 715 ADD_STAT64(stats_tx.tx_gt1023, 716 tx_stat_etherstatspkts512octetsto1023octets); 717 ADD_STAT64(stats_tx.tx_gt1518, 718 tx_stat_etherstatspkts1024octetsto1522octets); 719 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); 720 721 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); 722 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); 723 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); 724 725 ADD_STAT64(stats_tx.tx_gterr, 726 tx_stat_dot3statsinternalmactransmiterrors); 727 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); 728 729 estats->etherstatspkts1024octetsto1522octets_hi = 730 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi; 731 estats->etherstatspkts1024octetsto1522octets_lo = 732 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo; 733 734 estats->etherstatspktsover1522octets_hi = 735 pstats->mac_stx[1].tx_stat_mac_2047_hi; 736 estats->etherstatspktsover1522octets_lo = 737 pstats->mac_stx[1].tx_stat_mac_2047_lo; 738 739 ADD_64(estats->etherstatspktsover1522octets_hi, 740 pstats->mac_stx[1].tx_stat_mac_4095_hi, 741 estats->etherstatspktsover1522octets_lo, 742 pstats->mac_stx[1].tx_stat_mac_4095_lo); 743 744 ADD_64(estats->etherstatspktsover1522octets_hi, 745 pstats->mac_stx[1].tx_stat_mac_9216_hi, 746 estats->etherstatspktsover1522octets_lo, 747 pstats->mac_stx[1].tx_stat_mac_9216_lo); 748 749 ADD_64(estats->etherstatspktsover1522octets_hi, 750 pstats->mac_stx[1].tx_stat_mac_16383_hi, 751 estats->etherstatspktsover1522octets_lo, 752 pstats->mac_stx[1].tx_stat_mac_16383_lo); 753 754 estats->pause_frames_received_hi = 755 pstats->mac_stx[1].rx_stat_mac_xpf_hi; 756 estats->pause_frames_received_lo = 757 pstats->mac_stx[1].rx_stat_mac_xpf_lo; 758 759 estats->pause_frames_sent_hi = 760 pstats->mac_stx[1].tx_stat_outxoffsent_hi; 761 estats->pause_frames_sent_lo = 762 pstats->mac_stx[1].tx_stat_outxoffsent_lo; 763 764 estats->pfc_frames_received_hi = 765 pstats->pfc_frames_rx_hi; 766 estats->pfc_frames_received_lo = 767 pstats->pfc_frames_rx_lo; 768 estats->pfc_frames_sent_hi = 769 pstats->pfc_frames_tx_hi; 770 estats->pfc_frames_sent_lo = 771 pstats->pfc_frames_tx_lo; 772 } 773 774 static void bnx2x_emac_stats_update(struct bnx2x *bp) 775 { 776 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); 777 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 778 struct bnx2x_eth_stats *estats = &bp->eth_stats; 779 780 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); 781 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); 782 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); 783 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); 784 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); 785 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); 786 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); 787 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); 788 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); 789 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); 790 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); 791 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); 792 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); 793 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); 794 UPDATE_EXTEND_STAT(tx_stat_outxonsent); 795 UPDATE_EXTEND_STAT(tx_stat_outxoffsent); 796 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); 797 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); 798 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); 799 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); 800 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); 801 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); 802 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); 803 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); 804 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); 805 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); 806 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); 807 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); 808 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); 809 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); 810 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); 811 812 estats->pause_frames_received_hi = 813 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; 814 estats->pause_frames_received_lo = 815 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; 816 ADD_64(estats->pause_frames_received_hi, 817 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, 818 estats->pause_frames_received_lo, 819 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); 820 821 estats->pause_frames_sent_hi = 822 pstats->mac_stx[1].tx_stat_outxonsent_hi; 823 estats->pause_frames_sent_lo = 824 pstats->mac_stx[1].tx_stat_outxonsent_lo; 825 ADD_64(estats->pause_frames_sent_hi, 826 pstats->mac_stx[1].tx_stat_outxoffsent_hi, 827 estats->pause_frames_sent_lo, 828 pstats->mac_stx[1].tx_stat_outxoffsent_lo); 829 } 830 831 static int bnx2x_hw_stats_update(struct bnx2x *bp) 832 { 833 struct nig_stats *new = bnx2x_sp(bp, nig_stats); 834 struct nig_stats *old = &(bp->port.old_nig_stats); 835 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 836 struct bnx2x_eth_stats *estats = &bp->eth_stats; 837 struct { 838 u32 lo; 839 u32 hi; 840 } diff; 841 842 switch (bp->link_vars.mac_type) { 843 case MAC_TYPE_BMAC: 844 bnx2x_bmac_stats_update(bp); 845 break; 846 847 case MAC_TYPE_EMAC: 848 bnx2x_emac_stats_update(bp); 849 break; 850 851 case MAC_TYPE_UMAC: 852 case MAC_TYPE_XMAC: 853 bnx2x_mstat_stats_update(bp); 854 break; 855 856 case MAC_TYPE_NONE: /* unreached */ 857 DP(BNX2X_MSG_STATS, 858 "stats updated by DMAE but no MAC active\n"); 859 return -1; 860 861 default: /* unreached */ 862 BNX2X_ERR("Unknown MAC type\n"); 863 } 864 865 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, 866 new->brb_discard - old->brb_discard); 867 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, 868 new->brb_truncate - old->brb_truncate); 869 870 if (!CHIP_IS_E3(bp)) { 871 UPDATE_STAT64_NIG(egress_mac_pkt0, 872 etherstatspkts1024octetsto1522octets); 873 UPDATE_STAT64_NIG(egress_mac_pkt1, 874 etherstatspktsover1522octets); 875 } 876 877 memcpy(old, new, sizeof(struct nig_stats)); 878 879 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), 880 sizeof(struct mac_stx)); 881 estats->brb_drop_hi = pstats->brb_drop_hi; 882 estats->brb_drop_lo = pstats->brb_drop_lo; 883 884 pstats->host_port_stats_counter++; 885 886 if (CHIP_IS_E3(bp)) { 887 u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1 888 : MISC_REG_CPMU_LP_SM_ENT_CNT_P0; 889 estats->eee_tx_lpi += REG_RD(bp, lpi_reg); 890 } 891 892 if (!BP_NOMCP(bp)) { 893 u32 nig_timer_max = 894 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); 895 if (nig_timer_max != estats->nig_timer_max) { 896 estats->nig_timer_max = nig_timer_max; 897 BNX2X_ERR("NIG timer max (%u)\n", 898 estats->nig_timer_max); 899 } 900 } 901 902 return 0; 903 } 904 905 static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) 906 { 907 struct stats_counter *counters = &bp->fw_stats_data->storm_counters; 908 u16 cur_stats_counter; 909 /* Make sure we use the value of the counter 910 * used for sending the last stats ramrod. 911 */ 912 cur_stats_counter = bp->stats_counter - 1; 913 914 /* are storm stats valid? */ 915 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { 916 DP(BNX2X_MSG_STATS, 917 "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n", 918 le16_to_cpu(counters->xstats_counter), bp->stats_counter); 919 return -EAGAIN; 920 } 921 922 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { 923 DP(BNX2X_MSG_STATS, 924 "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n", 925 le16_to_cpu(counters->ustats_counter), bp->stats_counter); 926 return -EAGAIN; 927 } 928 929 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { 930 DP(BNX2X_MSG_STATS, 931 "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n", 932 le16_to_cpu(counters->cstats_counter), bp->stats_counter); 933 return -EAGAIN; 934 } 935 936 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { 937 DP(BNX2X_MSG_STATS, 938 "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n", 939 le16_to_cpu(counters->tstats_counter), bp->stats_counter); 940 return -EAGAIN; 941 } 942 return 0; 943 } 944 945 static int bnx2x_storm_stats_update(struct bnx2x *bp) 946 { 947 struct tstorm_per_port_stats *tport = 948 &bp->fw_stats_data->port.tstorm_port_statistics; 949 struct tstorm_per_pf_stats *tfunc = 950 &bp->fw_stats_data->pf.tstorm_pf_statistics; 951 struct host_func_stats *fstats = &bp->func_stats; 952 struct bnx2x_eth_stats *estats = &bp->eth_stats; 953 struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old; 954 int i; 955 956 /* vfs stat counter is managed by pf */ 957 if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp)) 958 return -EAGAIN; 959 960 estats->error_bytes_received_hi = 0; 961 estats->error_bytes_received_lo = 0; 962 963 for_each_eth_queue(bp, i) { 964 struct bnx2x_fastpath *fp = &bp->fp[i]; 965 struct tstorm_per_queue_stats *tclient = 966 &bp->fw_stats_data->queue_stats[i]. 967 tstorm_queue_statistics; 968 struct tstorm_per_queue_stats *old_tclient = 969 &bnx2x_fp_stats(bp, fp)->old_tclient; 970 struct ustorm_per_queue_stats *uclient = 971 &bp->fw_stats_data->queue_stats[i]. 972 ustorm_queue_statistics; 973 struct ustorm_per_queue_stats *old_uclient = 974 &bnx2x_fp_stats(bp, fp)->old_uclient; 975 struct xstorm_per_queue_stats *xclient = 976 &bp->fw_stats_data->queue_stats[i]. 977 xstorm_queue_statistics; 978 struct xstorm_per_queue_stats *old_xclient = 979 &bnx2x_fp_stats(bp, fp)->old_xclient; 980 struct bnx2x_eth_q_stats *qstats = 981 &bnx2x_fp_stats(bp, fp)->eth_q_stats; 982 struct bnx2x_eth_q_stats_old *qstats_old = 983 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; 984 985 u32 diff; 986 987 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n", 988 i, xclient->ucast_pkts_sent, 989 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); 990 991 DP(BNX2X_MSG_STATS, "---------------\n"); 992 993 UPDATE_QSTAT(tclient->rcv_bcast_bytes, 994 total_broadcast_bytes_received); 995 UPDATE_QSTAT(tclient->rcv_mcast_bytes, 996 total_multicast_bytes_received); 997 UPDATE_QSTAT(tclient->rcv_ucast_bytes, 998 total_unicast_bytes_received); 999 1000 /* 1001 * sum to total_bytes_received all 1002 * unicast/multicast/broadcast 1003 */ 1004 qstats->total_bytes_received_hi = 1005 qstats->total_broadcast_bytes_received_hi; 1006 qstats->total_bytes_received_lo = 1007 qstats->total_broadcast_bytes_received_lo; 1008 1009 ADD_64(qstats->total_bytes_received_hi, 1010 qstats->total_multicast_bytes_received_hi, 1011 qstats->total_bytes_received_lo, 1012 qstats->total_multicast_bytes_received_lo); 1013 1014 ADD_64(qstats->total_bytes_received_hi, 1015 qstats->total_unicast_bytes_received_hi, 1016 qstats->total_bytes_received_lo, 1017 qstats->total_unicast_bytes_received_lo); 1018 1019 qstats->valid_bytes_received_hi = 1020 qstats->total_bytes_received_hi; 1021 qstats->valid_bytes_received_lo = 1022 qstats->total_bytes_received_lo; 1023 1024 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, 1025 total_unicast_packets_received); 1026 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, 1027 total_multicast_packets_received); 1028 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, 1029 total_broadcast_packets_received); 1030 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard, 1031 etherstatsoverrsizepkts, 32); 1032 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16); 1033 1034 SUB_EXTEND_USTAT(ucast_no_buff_pkts, 1035 total_unicast_packets_received); 1036 SUB_EXTEND_USTAT(mcast_no_buff_pkts, 1037 total_multicast_packets_received); 1038 SUB_EXTEND_USTAT(bcast_no_buff_pkts, 1039 total_broadcast_packets_received); 1040 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard); 1041 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard); 1042 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard); 1043 1044 UPDATE_QSTAT(xclient->bcast_bytes_sent, 1045 total_broadcast_bytes_transmitted); 1046 UPDATE_QSTAT(xclient->mcast_bytes_sent, 1047 total_multicast_bytes_transmitted); 1048 UPDATE_QSTAT(xclient->ucast_bytes_sent, 1049 total_unicast_bytes_transmitted); 1050 1051 /* 1052 * sum to total_bytes_transmitted all 1053 * unicast/multicast/broadcast 1054 */ 1055 qstats->total_bytes_transmitted_hi = 1056 qstats->total_unicast_bytes_transmitted_hi; 1057 qstats->total_bytes_transmitted_lo = 1058 qstats->total_unicast_bytes_transmitted_lo; 1059 1060 ADD_64(qstats->total_bytes_transmitted_hi, 1061 qstats->total_broadcast_bytes_transmitted_hi, 1062 qstats->total_bytes_transmitted_lo, 1063 qstats->total_broadcast_bytes_transmitted_lo); 1064 1065 ADD_64(qstats->total_bytes_transmitted_hi, 1066 qstats->total_multicast_bytes_transmitted_hi, 1067 qstats->total_bytes_transmitted_lo, 1068 qstats->total_multicast_bytes_transmitted_lo); 1069 1070 UPDATE_EXTEND_XSTAT(ucast_pkts_sent, 1071 total_unicast_packets_transmitted); 1072 UPDATE_EXTEND_XSTAT(mcast_pkts_sent, 1073 total_multicast_packets_transmitted); 1074 UPDATE_EXTEND_XSTAT(bcast_pkts_sent, 1075 total_broadcast_packets_transmitted); 1076 1077 UPDATE_EXTEND_TSTAT(checksum_discard, 1078 total_packets_received_checksum_discarded); 1079 UPDATE_EXTEND_TSTAT(ttl0_discard, 1080 total_packets_received_ttl0_discarded); 1081 1082 UPDATE_EXTEND_XSTAT(error_drop_pkts, 1083 total_transmitted_dropped_packets_error); 1084 1085 /* TPA aggregations completed */ 1086 UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations); 1087 /* Number of network frames aggregated by TPA */ 1088 UPDATE_EXTEND_E_USTAT(coalesced_pkts, 1089 total_tpa_aggregated_frames); 1090 /* Total number of bytes in completed TPA aggregations */ 1091 UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes); 1092 1093 UPDATE_ESTAT_QSTAT_64(total_tpa_bytes); 1094 1095 UPDATE_FSTAT_QSTAT(total_bytes_received); 1096 UPDATE_FSTAT_QSTAT(total_bytes_transmitted); 1097 UPDATE_FSTAT_QSTAT(total_unicast_packets_received); 1098 UPDATE_FSTAT_QSTAT(total_multicast_packets_received); 1099 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received); 1100 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted); 1101 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted); 1102 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted); 1103 UPDATE_FSTAT_QSTAT(valid_bytes_received); 1104 } 1105 1106 ADD_64(estats->total_bytes_received_hi, 1107 estats->rx_stat_ifhcinbadoctets_hi, 1108 estats->total_bytes_received_lo, 1109 estats->rx_stat_ifhcinbadoctets_lo); 1110 1111 ADD_64_LE(estats->total_bytes_received_hi, 1112 tfunc->rcv_error_bytes.hi, 1113 estats->total_bytes_received_lo, 1114 tfunc->rcv_error_bytes.lo); 1115 1116 ADD_64_LE(estats->error_bytes_received_hi, 1117 tfunc->rcv_error_bytes.hi, 1118 estats->error_bytes_received_lo, 1119 tfunc->rcv_error_bytes.lo); 1120 1121 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong); 1122 1123 ADD_64(estats->error_bytes_received_hi, 1124 estats->rx_stat_ifhcinbadoctets_hi, 1125 estats->error_bytes_received_lo, 1126 estats->rx_stat_ifhcinbadoctets_lo); 1127 1128 if (bp->port.pmf) { 1129 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; 1130 UPDATE_FW_STAT(mac_filter_discard); 1131 UPDATE_FW_STAT(mf_tag_discard); 1132 UPDATE_FW_STAT(brb_truncate_discard); 1133 UPDATE_FW_STAT(mac_discard); 1134 } 1135 1136 fstats->host_func_stats_start = ++fstats->host_func_stats_end; 1137 1138 bp->stats_pending = 0; 1139 1140 return 0; 1141 } 1142 1143 static void bnx2x_net_stats_update(struct bnx2x *bp) 1144 { 1145 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1146 struct net_device_stats *nstats = &bp->dev->stats; 1147 unsigned long tmp; 1148 int i; 1149 1150 nstats->rx_packets = 1151 bnx2x_hilo(&estats->total_unicast_packets_received_hi) + 1152 bnx2x_hilo(&estats->total_multicast_packets_received_hi) + 1153 bnx2x_hilo(&estats->total_broadcast_packets_received_hi); 1154 1155 nstats->tx_packets = 1156 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + 1157 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + 1158 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); 1159 1160 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); 1161 1162 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 1163 1164 tmp = estats->mac_discard; 1165 for_each_rx_queue(bp, i) { 1166 struct tstorm_per_queue_stats *old_tclient = 1167 &bp->fp_stats[i].old_tclient; 1168 tmp += le32_to_cpu(old_tclient->checksum_discard); 1169 } 1170 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped; 1171 1172 nstats->tx_dropped = 0; 1173 1174 nstats->multicast = 1175 bnx2x_hilo(&estats->total_multicast_packets_received_hi); 1176 1177 nstats->collisions = 1178 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); 1179 1180 nstats->rx_length_errors = 1181 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + 1182 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); 1183 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + 1184 bnx2x_hilo(&estats->brb_truncate_hi); 1185 nstats->rx_crc_errors = 1186 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); 1187 nstats->rx_frame_errors = 1188 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); 1189 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); 1190 nstats->rx_missed_errors = 0; 1191 1192 nstats->rx_errors = nstats->rx_length_errors + 1193 nstats->rx_over_errors + 1194 nstats->rx_crc_errors + 1195 nstats->rx_frame_errors + 1196 nstats->rx_fifo_errors + 1197 nstats->rx_missed_errors; 1198 1199 nstats->tx_aborted_errors = 1200 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + 1201 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); 1202 nstats->tx_carrier_errors = 1203 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); 1204 nstats->tx_fifo_errors = 0; 1205 nstats->tx_heartbeat_errors = 0; 1206 nstats->tx_window_errors = 0; 1207 1208 nstats->tx_errors = nstats->tx_aborted_errors + 1209 nstats->tx_carrier_errors + 1210 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); 1211 } 1212 1213 static void bnx2x_drv_stats_update(struct bnx2x *bp) 1214 { 1215 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1216 int i; 1217 1218 for_each_queue(bp, i) { 1219 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; 1220 struct bnx2x_eth_q_stats_old *qstats_old = 1221 &bp->fp_stats[i].eth_q_stats_old; 1222 1223 UPDATE_ESTAT_QSTAT(driver_xoff); 1224 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt); 1225 UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed); 1226 UPDATE_ESTAT_QSTAT(hw_csum_err); 1227 UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt); 1228 } 1229 } 1230 1231 static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) 1232 { 1233 u32 val; 1234 1235 if (SHMEM2_HAS(bp, edebug_driver_if[1])) { 1236 val = SHMEM2_RD(bp, edebug_driver_if[1]); 1237 1238 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) 1239 return true; 1240 } 1241 1242 return false; 1243 } 1244 1245 static void bnx2x_stats_update(struct bnx2x *bp) 1246 { 1247 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1248 1249 /* we run update from timer context, so give up 1250 * if somebody is in the middle of transition 1251 */ 1252 if (down_trylock(&bp->stats_sema)) 1253 return; 1254 1255 if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) 1256 goto out; 1257 1258 if (IS_PF(bp)) { 1259 if (*stats_comp != DMAE_COMP_VAL) 1260 goto out; 1261 1262 if (bp->port.pmf) 1263 bnx2x_hw_stats_update(bp); 1264 1265 if (bnx2x_storm_stats_update(bp)) { 1266 if (bp->stats_pending++ == 3) { 1267 BNX2X_ERR("storm stats were not updated for 3 times\n"); 1268 bnx2x_panic(); 1269 } 1270 goto out; 1271 } 1272 } else { 1273 /* vf doesn't collect HW statistics, and doesn't get completions 1274 * perform only update 1275 */ 1276 bnx2x_storm_stats_update(bp); 1277 } 1278 1279 bnx2x_net_stats_update(bp); 1280 bnx2x_drv_stats_update(bp); 1281 1282 /* vf is done */ 1283 if (IS_VF(bp)) 1284 goto out; 1285 1286 if (netif_msg_timer(bp)) { 1287 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1288 1289 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", 1290 estats->brb_drop_lo, estats->brb_truncate_lo); 1291 } 1292 1293 bnx2x_hw_stats_post(bp); 1294 bnx2x_storm_stats_post(bp); 1295 1296 out: 1297 up(&bp->stats_sema); 1298 } 1299 1300 static void bnx2x_port_stats_stop(struct bnx2x *bp) 1301 { 1302 struct dmae_command *dmae; 1303 u32 opcode; 1304 int loader_idx = PMF_DMAE_C(bp); 1305 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1306 1307 bp->executer_idx = 0; 1308 1309 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0); 1310 1311 if (bp->port.port_stx) { 1312 1313 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1314 if (bp->func_stx) 1315 dmae->opcode = bnx2x_dmae_opcode_add_comp( 1316 opcode, DMAE_COMP_GRC); 1317 else 1318 dmae->opcode = bnx2x_dmae_opcode_add_comp( 1319 opcode, DMAE_COMP_PCI); 1320 1321 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1322 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1323 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1324 dmae->dst_addr_hi = 0; 1325 dmae->len = bnx2x_get_port_stats_dma_len(bp); 1326 if (bp->func_stx) { 1327 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 1328 dmae->comp_addr_hi = 0; 1329 dmae->comp_val = 1; 1330 } else { 1331 dmae->comp_addr_lo = 1332 U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1333 dmae->comp_addr_hi = 1334 U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1335 dmae->comp_val = DMAE_COMP_VAL; 1336 1337 *stats_comp = 0; 1338 } 1339 } 1340 1341 if (bp->func_stx) { 1342 1343 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1344 dmae->opcode = 1345 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); 1346 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 1347 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 1348 dmae->dst_addr_lo = bp->func_stx >> 2; 1349 dmae->dst_addr_hi = 0; 1350 dmae->len = sizeof(struct host_func_stats) >> 2; 1351 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1352 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1353 dmae->comp_val = DMAE_COMP_VAL; 1354 1355 *stats_comp = 0; 1356 } 1357 } 1358 1359 static void bnx2x_stats_stop(struct bnx2x *bp) 1360 { 1361 int update = 0; 1362 1363 if (down_timeout(&bp->stats_sema, HZ/10)) 1364 BNX2X_ERR("Unable to acquire stats lock\n"); 1365 1366 bp->stats_started = false; 1367 1368 bnx2x_stats_comp(bp); 1369 1370 if (bp->port.pmf) 1371 update = (bnx2x_hw_stats_update(bp) == 0); 1372 1373 update |= (bnx2x_storm_stats_update(bp) == 0); 1374 1375 if (update) { 1376 bnx2x_net_stats_update(bp); 1377 1378 if (bp->port.pmf) 1379 bnx2x_port_stats_stop(bp); 1380 1381 bnx2x_hw_stats_post(bp); 1382 bnx2x_stats_comp(bp); 1383 } 1384 1385 up(&bp->stats_sema); 1386 } 1387 1388 static void bnx2x_stats_do_nothing(struct bnx2x *bp) 1389 { 1390 } 1391 1392 static const struct { 1393 void (*action)(struct bnx2x *bp); 1394 enum bnx2x_stats_state next_state; 1395 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { 1396 /* state event */ 1397 { 1398 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, 1399 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, 1400 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, 1401 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} 1402 }, 1403 { 1404 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, 1405 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, 1406 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, 1407 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} 1408 } 1409 }; 1410 1411 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 1412 { 1413 enum bnx2x_stats_state state; 1414 void (*action)(struct bnx2x *bp); 1415 if (unlikely(bp->panic)) 1416 return; 1417 1418 spin_lock_bh(&bp->stats_lock); 1419 state = bp->stats_state; 1420 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1421 action = bnx2x_stats_stm[state][event].action; 1422 spin_unlock_bh(&bp->stats_lock); 1423 1424 action(bp); 1425 1426 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1427 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1428 state, event, bp->stats_state); 1429 } 1430 1431 static void bnx2x_port_stats_base_init(struct bnx2x *bp) 1432 { 1433 struct dmae_command *dmae; 1434 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1435 1436 /* sanity */ 1437 if (!bp->port.pmf || !bp->port.port_stx) { 1438 BNX2X_ERR("BUG!\n"); 1439 return; 1440 } 1441 1442 bp->executer_idx = 0; 1443 1444 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1445 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 1446 true, DMAE_COMP_PCI); 1447 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1448 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1449 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1450 dmae->dst_addr_hi = 0; 1451 dmae->len = bnx2x_get_port_stats_dma_len(bp); 1452 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1453 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1454 dmae->comp_val = DMAE_COMP_VAL; 1455 1456 *stats_comp = 0; 1457 bnx2x_hw_stats_post(bp); 1458 bnx2x_stats_comp(bp); 1459 } 1460 1461 /* This function will prepare the statistics ramrod data the way 1462 * we will only have to increment the statistics counter and 1463 * send the ramrod each time we have to. 1464 */ 1465 static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) 1466 { 1467 int i; 1468 int first_queue_query_index; 1469 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; 1470 1471 dma_addr_t cur_data_offset; 1472 struct stats_query_entry *cur_query_entry; 1473 1474 stats_hdr->cmd_num = bp->fw_stats_num; 1475 stats_hdr->drv_stats_counter = 0; 1476 1477 /* storm_counters struct contains the counters of completed 1478 * statistics requests per storm which are incremented by FW 1479 * each time it completes hadning a statistics ramrod. We will 1480 * check these counters in the timer handler and discard a 1481 * (statistics) ramrod completion. 1482 */ 1483 cur_data_offset = bp->fw_stats_data_mapping + 1484 offsetof(struct bnx2x_fw_stats_data, storm_counters); 1485 1486 stats_hdr->stats_counters_addrs.hi = 1487 cpu_to_le32(U64_HI(cur_data_offset)); 1488 stats_hdr->stats_counters_addrs.lo = 1489 cpu_to_le32(U64_LO(cur_data_offset)); 1490 1491 /* prepare to the first stats ramrod (will be completed with 1492 * the counters equal to zero) - init counters to somethig different. 1493 */ 1494 memset(&bp->fw_stats_data->storm_counters, 0xff, 1495 sizeof(struct stats_counter)); 1496 1497 /**** Port FW statistics data ****/ 1498 cur_data_offset = bp->fw_stats_data_mapping + 1499 offsetof(struct bnx2x_fw_stats_data, port); 1500 1501 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; 1502 1503 cur_query_entry->kind = STATS_TYPE_PORT; 1504 /* For port query index is a DONT CARE */ 1505 cur_query_entry->index = BP_PORT(bp); 1506 /* For port query funcID is a DONT CARE */ 1507 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1508 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); 1509 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); 1510 1511 /**** PF FW statistics data ****/ 1512 cur_data_offset = bp->fw_stats_data_mapping + 1513 offsetof(struct bnx2x_fw_stats_data, pf); 1514 1515 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; 1516 1517 cur_query_entry->kind = STATS_TYPE_PF; 1518 /* For PF query index is a DONT CARE */ 1519 cur_query_entry->index = BP_PORT(bp); 1520 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1521 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); 1522 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); 1523 1524 /**** FCoE FW statistics data ****/ 1525 if (!NO_FCOE(bp)) { 1526 cur_data_offset = bp->fw_stats_data_mapping + 1527 offsetof(struct bnx2x_fw_stats_data, fcoe); 1528 1529 cur_query_entry = 1530 &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX]; 1531 1532 cur_query_entry->kind = STATS_TYPE_FCOE; 1533 /* For FCoE query index is a DONT CARE */ 1534 cur_query_entry->index = BP_PORT(bp); 1535 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1536 cur_query_entry->address.hi = 1537 cpu_to_le32(U64_HI(cur_data_offset)); 1538 cur_query_entry->address.lo = 1539 cpu_to_le32(U64_LO(cur_data_offset)); 1540 } 1541 1542 /**** Clients' queries ****/ 1543 cur_data_offset = bp->fw_stats_data_mapping + 1544 offsetof(struct bnx2x_fw_stats_data, queue_stats); 1545 1546 /* first queue query index depends whether FCoE offloaded request will 1547 * be included in the ramrod 1548 */ 1549 if (!NO_FCOE(bp)) 1550 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX; 1551 else 1552 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1; 1553 1554 for_each_eth_queue(bp, i) { 1555 cur_query_entry = 1556 &bp->fw_stats_req-> 1557 query[first_queue_query_index + i]; 1558 1559 cur_query_entry->kind = STATS_TYPE_QUEUE; 1560 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); 1561 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1562 cur_query_entry->address.hi = 1563 cpu_to_le32(U64_HI(cur_data_offset)); 1564 cur_query_entry->address.lo = 1565 cpu_to_le32(U64_LO(cur_data_offset)); 1566 1567 cur_data_offset += sizeof(struct per_queue_stats); 1568 } 1569 1570 /* add FCoE queue query if needed */ 1571 if (!NO_FCOE(bp)) { 1572 cur_query_entry = 1573 &bp->fw_stats_req-> 1574 query[first_queue_query_index + i]; 1575 1576 cur_query_entry->kind = STATS_TYPE_QUEUE; 1577 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]); 1578 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1579 cur_query_entry->address.hi = 1580 cpu_to_le32(U64_HI(cur_data_offset)); 1581 cur_query_entry->address.lo = 1582 cpu_to_le32(U64_LO(cur_data_offset)); 1583 } 1584 } 1585 1586 void bnx2x_memset_stats(struct bnx2x *bp) 1587 { 1588 int i; 1589 1590 /* function stats */ 1591 for_each_queue(bp, i) { 1592 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i]; 1593 1594 memset(&fp_stats->old_tclient, 0, 1595 sizeof(fp_stats->old_tclient)); 1596 memset(&fp_stats->old_uclient, 0, 1597 sizeof(fp_stats->old_uclient)); 1598 memset(&fp_stats->old_xclient, 0, 1599 sizeof(fp_stats->old_xclient)); 1600 if (bp->stats_init) { 1601 memset(&fp_stats->eth_q_stats, 0, 1602 sizeof(fp_stats->eth_q_stats)); 1603 memset(&fp_stats->eth_q_stats_old, 0, 1604 sizeof(fp_stats->eth_q_stats_old)); 1605 } 1606 } 1607 1608 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); 1609 1610 if (bp->stats_init) { 1611 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old)); 1612 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old)); 1613 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old)); 1614 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); 1615 memset(&bp->func_stats, 0, sizeof(bp->func_stats)); 1616 } 1617 1618 bp->stats_state = STATS_STATE_DISABLED; 1619 1620 if (bp->port.pmf && bp->port.port_stx) 1621 bnx2x_port_stats_base_init(bp); 1622 1623 /* mark the end of statistics initializiation */ 1624 bp->stats_init = false; 1625 } 1626 1627 void bnx2x_stats_init(struct bnx2x *bp) 1628 { 1629 int /*abs*/port = BP_PORT(bp); 1630 int mb_idx = BP_FW_MB_IDX(bp); 1631 1632 if (IS_VF(bp)) { 1633 bnx2x_memset_stats(bp); 1634 return; 1635 } 1636 1637 bp->stats_pending = 0; 1638 bp->executer_idx = 0; 1639 bp->stats_counter = 0; 1640 1641 /* port and func stats for management */ 1642 if (!BP_NOMCP(bp)) { 1643 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); 1644 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1645 1646 } else { 1647 bp->port.port_stx = 0; 1648 bp->func_stx = 0; 1649 } 1650 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", 1651 bp->port.port_stx, bp->func_stx); 1652 1653 /* pmf should retrieve port statistics from SP on a non-init*/ 1654 if (!bp->stats_init && bp->port.pmf && bp->port.port_stx) 1655 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 1656 1657 port = BP_PORT(bp); 1658 /* port stats */ 1659 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); 1660 bp->port.old_nig_stats.brb_discard = 1661 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); 1662 bp->port.old_nig_stats.brb_truncate = 1663 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); 1664 if (!CHIP_IS_E3(bp)) { 1665 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, 1666 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); 1667 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, 1668 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); 1669 } 1670 1671 /* Prepare statistics ramrod data */ 1672 bnx2x_prep_fw_stats_req(bp); 1673 1674 /* Clean SP from previous statistics */ 1675 if (bp->stats_init) { 1676 if (bp->func_stx) { 1677 memset(bnx2x_sp(bp, func_stats), 0, 1678 sizeof(struct host_func_stats)); 1679 bnx2x_func_stats_init(bp); 1680 bnx2x_hw_stats_post(bp); 1681 bnx2x_stats_comp(bp); 1682 } 1683 } 1684 1685 bnx2x_memset_stats(bp); 1686 } 1687 1688 void bnx2x_save_statistics(struct bnx2x *bp) 1689 { 1690 int i; 1691 struct net_device_stats *nstats = &bp->dev->stats; 1692 1693 /* save queue statistics */ 1694 for_each_eth_queue(bp, i) { 1695 struct bnx2x_fastpath *fp = &bp->fp[i]; 1696 struct bnx2x_eth_q_stats *qstats = 1697 &bnx2x_fp_stats(bp, fp)->eth_q_stats; 1698 struct bnx2x_eth_q_stats_old *qstats_old = 1699 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; 1700 1701 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); 1702 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); 1703 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi); 1704 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo); 1705 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi); 1706 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo); 1707 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi); 1708 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo); 1709 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi); 1710 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo); 1711 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi); 1712 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo); 1713 UPDATE_QSTAT_OLD(total_tpa_bytes_hi); 1714 UPDATE_QSTAT_OLD(total_tpa_bytes_lo); 1715 } 1716 1717 /* save net_device_stats statistics */ 1718 bp->net_stats_old.rx_dropped = nstats->rx_dropped; 1719 1720 /* store port firmware statistics */ 1721 if (bp->port.pmf && IS_MF(bp)) { 1722 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1723 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; 1724 UPDATE_FW_STAT_OLD(mac_filter_discard); 1725 UPDATE_FW_STAT_OLD(mf_tag_discard); 1726 UPDATE_FW_STAT_OLD(brb_truncate_discard); 1727 UPDATE_FW_STAT_OLD(mac_discard); 1728 } 1729 } 1730 1731 void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, 1732 u32 stats_type) 1733 { 1734 int i; 1735 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats; 1736 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1737 struct per_queue_stats *fcoe_q_stats = 1738 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]; 1739 1740 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 1741 &fcoe_q_stats->tstorm_queue_statistics; 1742 1743 struct ustorm_per_queue_stats *fcoe_q_ustorm_stats = 1744 &fcoe_q_stats->ustorm_queue_statistics; 1745 1746 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = 1747 &fcoe_q_stats->xstorm_queue_statistics; 1748 1749 struct fcoe_statistics_params *fw_fcoe_stat = 1750 &bp->fw_stats_data->fcoe; 1751 1752 memset(afex_stats, 0, sizeof(struct afex_stats)); 1753 1754 for_each_eth_queue(bp, i) { 1755 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; 1756 1757 ADD_64(afex_stats->rx_unicast_bytes_hi, 1758 qstats->total_unicast_bytes_received_hi, 1759 afex_stats->rx_unicast_bytes_lo, 1760 qstats->total_unicast_bytes_received_lo); 1761 1762 ADD_64(afex_stats->rx_broadcast_bytes_hi, 1763 qstats->total_broadcast_bytes_received_hi, 1764 afex_stats->rx_broadcast_bytes_lo, 1765 qstats->total_broadcast_bytes_received_lo); 1766 1767 ADD_64(afex_stats->rx_multicast_bytes_hi, 1768 qstats->total_multicast_bytes_received_hi, 1769 afex_stats->rx_multicast_bytes_lo, 1770 qstats->total_multicast_bytes_received_lo); 1771 1772 ADD_64(afex_stats->rx_unicast_frames_hi, 1773 qstats->total_unicast_packets_received_hi, 1774 afex_stats->rx_unicast_frames_lo, 1775 qstats->total_unicast_packets_received_lo); 1776 1777 ADD_64(afex_stats->rx_broadcast_frames_hi, 1778 qstats->total_broadcast_packets_received_hi, 1779 afex_stats->rx_broadcast_frames_lo, 1780 qstats->total_broadcast_packets_received_lo); 1781 1782 ADD_64(afex_stats->rx_multicast_frames_hi, 1783 qstats->total_multicast_packets_received_hi, 1784 afex_stats->rx_multicast_frames_lo, 1785 qstats->total_multicast_packets_received_lo); 1786 1787 /* sum to rx_frames_discarded all discraded 1788 * packets due to size, ttl0 and checksum 1789 */ 1790 ADD_64(afex_stats->rx_frames_discarded_hi, 1791 qstats->total_packets_received_checksum_discarded_hi, 1792 afex_stats->rx_frames_discarded_lo, 1793 qstats->total_packets_received_checksum_discarded_lo); 1794 1795 ADD_64(afex_stats->rx_frames_discarded_hi, 1796 qstats->total_packets_received_ttl0_discarded_hi, 1797 afex_stats->rx_frames_discarded_lo, 1798 qstats->total_packets_received_ttl0_discarded_lo); 1799 1800 ADD_64(afex_stats->rx_frames_discarded_hi, 1801 qstats->etherstatsoverrsizepkts_hi, 1802 afex_stats->rx_frames_discarded_lo, 1803 qstats->etherstatsoverrsizepkts_lo); 1804 1805 ADD_64(afex_stats->rx_frames_dropped_hi, 1806 qstats->no_buff_discard_hi, 1807 afex_stats->rx_frames_dropped_lo, 1808 qstats->no_buff_discard_lo); 1809 1810 ADD_64(afex_stats->tx_unicast_bytes_hi, 1811 qstats->total_unicast_bytes_transmitted_hi, 1812 afex_stats->tx_unicast_bytes_lo, 1813 qstats->total_unicast_bytes_transmitted_lo); 1814 1815 ADD_64(afex_stats->tx_broadcast_bytes_hi, 1816 qstats->total_broadcast_bytes_transmitted_hi, 1817 afex_stats->tx_broadcast_bytes_lo, 1818 qstats->total_broadcast_bytes_transmitted_lo); 1819 1820 ADD_64(afex_stats->tx_multicast_bytes_hi, 1821 qstats->total_multicast_bytes_transmitted_hi, 1822 afex_stats->tx_multicast_bytes_lo, 1823 qstats->total_multicast_bytes_transmitted_lo); 1824 1825 ADD_64(afex_stats->tx_unicast_frames_hi, 1826 qstats->total_unicast_packets_transmitted_hi, 1827 afex_stats->tx_unicast_frames_lo, 1828 qstats->total_unicast_packets_transmitted_lo); 1829 1830 ADD_64(afex_stats->tx_broadcast_frames_hi, 1831 qstats->total_broadcast_packets_transmitted_hi, 1832 afex_stats->tx_broadcast_frames_lo, 1833 qstats->total_broadcast_packets_transmitted_lo); 1834 1835 ADD_64(afex_stats->tx_multicast_frames_hi, 1836 qstats->total_multicast_packets_transmitted_hi, 1837 afex_stats->tx_multicast_frames_lo, 1838 qstats->total_multicast_packets_transmitted_lo); 1839 1840 ADD_64(afex_stats->tx_frames_dropped_hi, 1841 qstats->total_transmitted_dropped_packets_error_hi, 1842 afex_stats->tx_frames_dropped_lo, 1843 qstats->total_transmitted_dropped_packets_error_lo); 1844 } 1845 1846 /* now add FCoE statistics which are collected separately 1847 * (both offloaded and non offloaded) 1848 */ 1849 if (!NO_FCOE(bp)) { 1850 ADD_64_LE(afex_stats->rx_unicast_bytes_hi, 1851 LE32_0, 1852 afex_stats->rx_unicast_bytes_lo, 1853 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); 1854 1855 ADD_64_LE(afex_stats->rx_unicast_bytes_hi, 1856 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, 1857 afex_stats->rx_unicast_bytes_lo, 1858 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); 1859 1860 ADD_64_LE(afex_stats->rx_broadcast_bytes_hi, 1861 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, 1862 afex_stats->rx_broadcast_bytes_lo, 1863 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); 1864 1865 ADD_64_LE(afex_stats->rx_multicast_bytes_hi, 1866 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, 1867 afex_stats->rx_multicast_bytes_lo, 1868 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); 1869 1870 ADD_64_LE(afex_stats->rx_unicast_frames_hi, 1871 LE32_0, 1872 afex_stats->rx_unicast_frames_lo, 1873 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); 1874 1875 ADD_64_LE(afex_stats->rx_unicast_frames_hi, 1876 LE32_0, 1877 afex_stats->rx_unicast_frames_lo, 1878 fcoe_q_tstorm_stats->rcv_ucast_pkts); 1879 1880 ADD_64_LE(afex_stats->rx_broadcast_frames_hi, 1881 LE32_0, 1882 afex_stats->rx_broadcast_frames_lo, 1883 fcoe_q_tstorm_stats->rcv_bcast_pkts); 1884 1885 ADD_64_LE(afex_stats->rx_multicast_frames_hi, 1886 LE32_0, 1887 afex_stats->rx_multicast_frames_lo, 1888 fcoe_q_tstorm_stats->rcv_ucast_pkts); 1889 1890 ADD_64_LE(afex_stats->rx_frames_discarded_hi, 1891 LE32_0, 1892 afex_stats->rx_frames_discarded_lo, 1893 fcoe_q_tstorm_stats->checksum_discard); 1894 1895 ADD_64_LE(afex_stats->rx_frames_discarded_hi, 1896 LE32_0, 1897 afex_stats->rx_frames_discarded_lo, 1898 fcoe_q_tstorm_stats->pkts_too_big_discard); 1899 1900 ADD_64_LE(afex_stats->rx_frames_discarded_hi, 1901 LE32_0, 1902 afex_stats->rx_frames_discarded_lo, 1903 fcoe_q_tstorm_stats->ttl0_discard); 1904 1905 ADD_64_LE16(afex_stats->rx_frames_dropped_hi, 1906 LE16_0, 1907 afex_stats->rx_frames_dropped_lo, 1908 fcoe_q_tstorm_stats->no_buff_discard); 1909 1910 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1911 LE32_0, 1912 afex_stats->rx_frames_dropped_lo, 1913 fcoe_q_ustorm_stats->ucast_no_buff_pkts); 1914 1915 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1916 LE32_0, 1917 afex_stats->rx_frames_dropped_lo, 1918 fcoe_q_ustorm_stats->mcast_no_buff_pkts); 1919 1920 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1921 LE32_0, 1922 afex_stats->rx_frames_dropped_lo, 1923 fcoe_q_ustorm_stats->bcast_no_buff_pkts); 1924 1925 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1926 LE32_0, 1927 afex_stats->rx_frames_dropped_lo, 1928 fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt); 1929 1930 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1931 LE32_0, 1932 afex_stats->rx_frames_dropped_lo, 1933 fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt); 1934 1935 ADD_64_LE(afex_stats->tx_unicast_bytes_hi, 1936 LE32_0, 1937 afex_stats->tx_unicast_bytes_lo, 1938 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); 1939 1940 ADD_64_LE(afex_stats->tx_unicast_bytes_hi, 1941 fcoe_q_xstorm_stats->ucast_bytes_sent.hi, 1942 afex_stats->tx_unicast_bytes_lo, 1943 fcoe_q_xstorm_stats->ucast_bytes_sent.lo); 1944 1945 ADD_64_LE(afex_stats->tx_broadcast_bytes_hi, 1946 fcoe_q_xstorm_stats->bcast_bytes_sent.hi, 1947 afex_stats->tx_broadcast_bytes_lo, 1948 fcoe_q_xstorm_stats->bcast_bytes_sent.lo); 1949 1950 ADD_64_LE(afex_stats->tx_multicast_bytes_hi, 1951 fcoe_q_xstorm_stats->mcast_bytes_sent.hi, 1952 afex_stats->tx_multicast_bytes_lo, 1953 fcoe_q_xstorm_stats->mcast_bytes_sent.lo); 1954 1955 ADD_64_LE(afex_stats->tx_unicast_frames_hi, 1956 LE32_0, 1957 afex_stats->tx_unicast_frames_lo, 1958 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); 1959 1960 ADD_64_LE(afex_stats->tx_unicast_frames_hi, 1961 LE32_0, 1962 afex_stats->tx_unicast_frames_lo, 1963 fcoe_q_xstorm_stats->ucast_pkts_sent); 1964 1965 ADD_64_LE(afex_stats->tx_broadcast_frames_hi, 1966 LE32_0, 1967 afex_stats->tx_broadcast_frames_lo, 1968 fcoe_q_xstorm_stats->bcast_pkts_sent); 1969 1970 ADD_64_LE(afex_stats->tx_multicast_frames_hi, 1971 LE32_0, 1972 afex_stats->tx_multicast_frames_lo, 1973 fcoe_q_xstorm_stats->mcast_pkts_sent); 1974 1975 ADD_64_LE(afex_stats->tx_frames_dropped_hi, 1976 LE32_0, 1977 afex_stats->tx_frames_dropped_lo, 1978 fcoe_q_xstorm_stats->error_drop_pkts); 1979 } 1980 1981 /* if port stats are requested, add them to the PMF 1982 * stats, as anyway they will be accumulated by the 1983 * MCP before sent to the switch 1984 */ 1985 if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) { 1986 ADD_64(afex_stats->rx_frames_dropped_hi, 1987 0, 1988 afex_stats->rx_frames_dropped_lo, 1989 estats->mac_filter_discard); 1990 ADD_64(afex_stats->rx_frames_dropped_hi, 1991 0, 1992 afex_stats->rx_frames_dropped_lo, 1993 estats->brb_truncate_discard); 1994 ADD_64(afex_stats->rx_frames_discarded_hi, 1995 0, 1996 afex_stats->rx_frames_discarded_lo, 1997 estats->mac_discard); 1998 } 1999 } 2000 2001 void bnx2x_stats_safe_exec(struct bnx2x *bp, 2002 void (func_to_exec)(void *cookie), 2003 void *cookie){ 2004 if (down_timeout(&bp->stats_sema, HZ/10)) 2005 BNX2X_ERR("Unable to acquire stats lock\n"); 2006 bnx2x_stats_comp(bp); 2007 func_to_exec(cookie); 2008 __bnx2x_stats_start(bp); 2009 up(&bp->stats_sema); 2010 } 2011