1 /* bnx2x_stats.c: QLogic Everest network driver. 2 * 3 * Copyright (c) 2007-2013 Broadcom Corporation 4 * Copyright (c) 2014 QLogic Corporation 5 * All rights reserved 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation. 10 * 11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com> 12 * Written by: Eliezer Tamir 13 * Based on code from Michael Chan's bnx2 driver 14 * UDP CSUM errata workaround by Arik Gendelman 15 * Slowpath and fastpath rework by Vladislav Zolotarov 16 * Statistics and Link management by Yitchak Gertner 17 * 18 */ 19 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include "bnx2x_stats.h" 23 #include "bnx2x_cmn.h" 24 #include "bnx2x_sriov.h" 25 26 extern const u32 dmae_reg_go_c[]; 27 28 /* Statistics */ 29 30 /* 31 * General service functions 32 */ 33 34 static inline long bnx2x_hilo(u32 *hiref) 35 { 36 u32 lo = *(hiref + 1); 37 #if (BITS_PER_LONG == 64) 38 u32 hi = *hiref; 39 40 return HILO_U64(hi, lo); 41 #else 42 return lo; 43 #endif 44 } 45 46 static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) 47 { 48 u16 res = 0; 49 50 /* 'newest' convention - shmem2 cotains the size of the port stats */ 51 if (SHMEM2_HAS(bp, sizeof_port_stats)) { 52 u32 size = SHMEM2_RD(bp, sizeof_port_stats); 53 if (size) 54 res = size; 55 56 /* prevent newer BC from causing buffer overflow */ 57 if (res > sizeof(struct host_port_stats)) 58 res = sizeof(struct host_port_stats); 59 } 60 61 /* Older convention - all BCs support the port stats' fields up until 62 * the 'not_used' field 63 */ 64 if (!res) { 65 res = offsetof(struct host_port_stats, not_used) + 4; 66 67 /* if PFC stats are supported by the MFW, DMA them as well */ 68 if (bp->flags & BC_SUPPORTS_PFC_STATS) { 69 res += offsetof(struct host_port_stats, 70 pfc_frames_rx_lo) - 71 offsetof(struct host_port_stats, 72 pfc_frames_tx_hi) + 4 ; 73 } 74 } 75 76 res >>= 2; 77 78 WARN_ON(res > 2 * DMAE_LEN32_RD_MAX); 79 return res; 80 } 81 82 /* 83 * Init service functions 84 */ 85 86 static void bnx2x_dp_stats(struct bnx2x *bp) 87 { 88 int i; 89 90 DP(BNX2X_MSG_STATS, "dumping stats:\n" 91 "fw_stats_req\n" 92 " hdr\n" 93 " cmd_num %d\n" 94 " reserved0 %d\n" 95 " drv_stats_counter %d\n" 96 " reserved1 %d\n" 97 " stats_counters_addrs %x %x\n", 98 bp->fw_stats_req->hdr.cmd_num, 99 bp->fw_stats_req->hdr.reserved0, 100 bp->fw_stats_req->hdr.drv_stats_counter, 101 bp->fw_stats_req->hdr.reserved1, 102 bp->fw_stats_req->hdr.stats_counters_addrs.hi, 103 bp->fw_stats_req->hdr.stats_counters_addrs.lo); 104 105 for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) { 106 DP(BNX2X_MSG_STATS, 107 "query[%d]\n" 108 " kind %d\n" 109 " index %d\n" 110 " funcID %d\n" 111 " reserved %d\n" 112 " address %x %x\n", 113 i, bp->fw_stats_req->query[i].kind, 114 bp->fw_stats_req->query[i].index, 115 bp->fw_stats_req->query[i].funcID, 116 bp->fw_stats_req->query[i].reserved, 117 bp->fw_stats_req->query[i].address.hi, 118 bp->fw_stats_req->query[i].address.lo); 119 } 120 } 121 122 /* Post the next statistics ramrod. Protect it with the spin in 123 * order to ensure the strict order between statistics ramrods 124 * (each ramrod has a sequence number passed in a 125 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be 126 * sent in order). 127 */ 128 static void bnx2x_storm_stats_post(struct bnx2x *bp) 129 { 130 int rc; 131 132 if (bp->stats_pending) 133 return; 134 135 bp->fw_stats_req->hdr.drv_stats_counter = 136 cpu_to_le16(bp->stats_counter++); 137 138 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", 139 le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); 140 141 /* adjust the ramrod to include VF queues statistics */ 142 bnx2x_iov_adjust_stats_req(bp); 143 bnx2x_dp_stats(bp); 144 145 /* send FW stats ramrod */ 146 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, 147 U64_HI(bp->fw_stats_req_mapping), 148 U64_LO(bp->fw_stats_req_mapping), 149 NONE_CONNECTION_TYPE); 150 if (rc == 0) 151 bp->stats_pending = 1; 152 } 153 154 static void bnx2x_hw_stats_post(struct bnx2x *bp) 155 { 156 struct dmae_command *dmae = &bp->stats_dmae; 157 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 158 159 *stats_comp = DMAE_COMP_VAL; 160 if (CHIP_REV_IS_SLOW(bp)) 161 return; 162 163 /* Update MCP's statistics if possible */ 164 if (bp->func_stx) 165 memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats, 166 sizeof(bp->func_stats)); 167 168 /* loader */ 169 if (bp->executer_idx) { 170 int loader_idx = PMF_DMAE_C(bp); 171 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 172 true, DMAE_COMP_GRC); 173 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode); 174 175 memset(dmae, 0, sizeof(struct dmae_command)); 176 dmae->opcode = opcode; 177 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); 178 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); 179 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + 180 sizeof(struct dmae_command) * 181 (loader_idx + 1)) >> 2; 182 dmae->dst_addr_hi = 0; 183 dmae->len = sizeof(struct dmae_command) >> 2; 184 if (CHIP_IS_E1(bp)) 185 dmae->len--; 186 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; 187 dmae->comp_addr_hi = 0; 188 dmae->comp_val = 1; 189 190 *stats_comp = 0; 191 bnx2x_post_dmae(bp, dmae, loader_idx); 192 193 } else if (bp->func_stx) { 194 *stats_comp = 0; 195 bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp); 196 } 197 } 198 199 static void bnx2x_stats_comp(struct bnx2x *bp) 200 { 201 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 202 int cnt = 10; 203 204 might_sleep(); 205 while (*stats_comp != DMAE_COMP_VAL) { 206 if (!cnt) { 207 BNX2X_ERR("timeout waiting for stats finished\n"); 208 break; 209 } 210 cnt--; 211 usleep_range(1000, 2000); 212 } 213 } 214 215 /* 216 * Statistics service functions 217 */ 218 219 /* should be called under stats_sema */ 220 static void bnx2x_stats_pmf_update(struct bnx2x *bp) 221 { 222 struct dmae_command *dmae; 223 u32 opcode; 224 int loader_idx = PMF_DMAE_C(bp); 225 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 226 227 /* sanity */ 228 if (!bp->port.pmf || !bp->port.port_stx) { 229 BNX2X_ERR("BUG!\n"); 230 return; 231 } 232 233 bp->executer_idx = 0; 234 235 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0); 236 237 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 238 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); 239 dmae->src_addr_lo = bp->port.port_stx >> 2; 240 dmae->src_addr_hi = 0; 241 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 242 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 243 dmae->len = DMAE_LEN32_RD_MAX; 244 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 245 dmae->comp_addr_hi = 0; 246 dmae->comp_val = 1; 247 248 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 249 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); 250 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; 251 dmae->src_addr_hi = 0; 252 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + 253 DMAE_LEN32_RD_MAX * 4); 254 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + 255 DMAE_LEN32_RD_MAX * 4); 256 dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX; 257 258 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 259 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 260 dmae->comp_val = DMAE_COMP_VAL; 261 262 *stats_comp = 0; 263 bnx2x_hw_stats_post(bp); 264 bnx2x_stats_comp(bp); 265 } 266 267 static void bnx2x_port_stats_init(struct bnx2x *bp) 268 { 269 struct dmae_command *dmae; 270 int port = BP_PORT(bp); 271 u32 opcode; 272 int loader_idx = PMF_DMAE_C(bp); 273 u32 mac_addr; 274 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 275 276 /* sanity */ 277 if (!bp->link_vars.link_up || !bp->port.pmf) { 278 BNX2X_ERR("BUG!\n"); 279 return; 280 } 281 282 bp->executer_idx = 0; 283 284 /* MCP */ 285 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 286 true, DMAE_COMP_GRC); 287 288 if (bp->port.port_stx) { 289 290 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 291 dmae->opcode = opcode; 292 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 293 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 294 dmae->dst_addr_lo = bp->port.port_stx >> 2; 295 dmae->dst_addr_hi = 0; 296 dmae->len = bnx2x_get_port_stats_dma_len(bp); 297 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 298 dmae->comp_addr_hi = 0; 299 dmae->comp_val = 1; 300 } 301 302 if (bp->func_stx) { 303 304 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 305 dmae->opcode = opcode; 306 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 307 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 308 dmae->dst_addr_lo = bp->func_stx >> 2; 309 dmae->dst_addr_hi = 0; 310 dmae->len = sizeof(struct host_func_stats) >> 2; 311 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 312 dmae->comp_addr_hi = 0; 313 dmae->comp_val = 1; 314 } 315 316 /* MAC */ 317 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 318 true, DMAE_COMP_GRC); 319 320 /* EMAC is special */ 321 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { 322 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); 323 324 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ 325 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 326 dmae->opcode = opcode; 327 dmae->src_addr_lo = (mac_addr + 328 EMAC_REG_EMAC_RX_STAT_AC) >> 2; 329 dmae->src_addr_hi = 0; 330 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 331 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 332 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; 333 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 334 dmae->comp_addr_hi = 0; 335 dmae->comp_val = 1; 336 337 /* EMAC_REG_EMAC_RX_STAT_AC_28 */ 338 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 339 dmae->opcode = opcode; 340 dmae->src_addr_lo = (mac_addr + 341 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; 342 dmae->src_addr_hi = 0; 343 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 344 offsetof(struct emac_stats, rx_stat_falsecarriererrors)); 345 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 346 offsetof(struct emac_stats, rx_stat_falsecarriererrors)); 347 dmae->len = 1; 348 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 349 dmae->comp_addr_hi = 0; 350 dmae->comp_val = 1; 351 352 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ 353 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 354 dmae->opcode = opcode; 355 dmae->src_addr_lo = (mac_addr + 356 EMAC_REG_EMAC_TX_STAT_AC) >> 2; 357 dmae->src_addr_hi = 0; 358 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 359 offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); 360 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 361 offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); 362 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; 363 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 364 dmae->comp_addr_hi = 0; 365 dmae->comp_val = 1; 366 } else { 367 u32 tx_src_addr_lo, rx_src_addr_lo; 368 u16 rx_len, tx_len; 369 370 /* configure the params according to MAC type */ 371 switch (bp->link_vars.mac_type) { 372 case MAC_TYPE_BMAC: 373 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : 374 NIG_REG_INGRESS_BMAC0_MEM); 375 376 /* BIGMAC_REGISTER_TX_STAT_GTPKT .. 377 BIGMAC_REGISTER_TX_STAT_GTBYT */ 378 if (CHIP_IS_E1x(bp)) { 379 tx_src_addr_lo = (mac_addr + 380 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 381 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - 382 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 383 rx_src_addr_lo = (mac_addr + 384 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 385 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - 386 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 387 } else { 388 tx_src_addr_lo = (mac_addr + 389 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; 390 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - 391 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; 392 rx_src_addr_lo = (mac_addr + 393 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; 394 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - 395 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; 396 } 397 break; 398 399 case MAC_TYPE_UMAC: /* handled by MSTAT */ 400 case MAC_TYPE_XMAC: /* handled by MSTAT */ 401 default: 402 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; 403 tx_src_addr_lo = (mac_addr + 404 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2; 405 rx_src_addr_lo = (mac_addr + 406 MSTAT_REG_RX_STAT_GR64_LO) >> 2; 407 tx_len = sizeof(bp->slowpath-> 408 mac_stats.mstat_stats.stats_tx) >> 2; 409 rx_len = sizeof(bp->slowpath-> 410 mac_stats.mstat_stats.stats_rx) >> 2; 411 break; 412 } 413 414 /* TX stats */ 415 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 416 dmae->opcode = opcode; 417 dmae->src_addr_lo = tx_src_addr_lo; 418 dmae->src_addr_hi = 0; 419 dmae->len = tx_len; 420 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 421 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 422 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 423 dmae->comp_addr_hi = 0; 424 dmae->comp_val = 1; 425 426 /* RX stats */ 427 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 428 dmae->opcode = opcode; 429 dmae->src_addr_hi = 0; 430 dmae->src_addr_lo = rx_src_addr_lo; 431 dmae->dst_addr_lo = 432 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); 433 dmae->dst_addr_hi = 434 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); 435 dmae->len = rx_len; 436 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 437 dmae->comp_addr_hi = 0; 438 dmae->comp_val = 1; 439 } 440 441 /* NIG */ 442 if (!CHIP_IS_E3(bp)) { 443 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 444 dmae->opcode = opcode; 445 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : 446 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; 447 dmae->src_addr_hi = 0; 448 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + 449 offsetof(struct nig_stats, egress_mac_pkt0_lo)); 450 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + 451 offsetof(struct nig_stats, egress_mac_pkt0_lo)); 452 dmae->len = (2*sizeof(u32)) >> 2; 453 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 454 dmae->comp_addr_hi = 0; 455 dmae->comp_val = 1; 456 457 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 458 dmae->opcode = opcode; 459 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : 460 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; 461 dmae->src_addr_hi = 0; 462 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + 463 offsetof(struct nig_stats, egress_mac_pkt1_lo)); 464 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + 465 offsetof(struct nig_stats, egress_mac_pkt1_lo)); 466 dmae->len = (2*sizeof(u32)) >> 2; 467 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 468 dmae->comp_addr_hi = 0; 469 dmae->comp_val = 1; 470 } 471 472 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 473 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, 474 true, DMAE_COMP_PCI); 475 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : 476 NIG_REG_STAT0_BRB_DISCARD) >> 2; 477 dmae->src_addr_hi = 0; 478 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); 479 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); 480 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; 481 482 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 483 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 484 dmae->comp_val = DMAE_COMP_VAL; 485 486 *stats_comp = 0; 487 } 488 489 static void bnx2x_func_stats_init(struct bnx2x *bp) 490 { 491 struct dmae_command *dmae = &bp->stats_dmae; 492 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 493 494 /* sanity */ 495 if (!bp->func_stx) { 496 BNX2X_ERR("BUG!\n"); 497 return; 498 } 499 500 bp->executer_idx = 0; 501 memset(dmae, 0, sizeof(struct dmae_command)); 502 503 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 504 true, DMAE_COMP_PCI); 505 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 506 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 507 dmae->dst_addr_lo = bp->func_stx >> 2; 508 dmae->dst_addr_hi = 0; 509 dmae->len = sizeof(struct host_func_stats) >> 2; 510 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 511 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 512 dmae->comp_val = DMAE_COMP_VAL; 513 514 *stats_comp = 0; 515 } 516 517 /* should be called under stats_sema */ 518 static void bnx2x_stats_start(struct bnx2x *bp) 519 { 520 if (IS_PF(bp)) { 521 if (bp->port.pmf) 522 bnx2x_port_stats_init(bp); 523 524 else if (bp->func_stx) 525 bnx2x_func_stats_init(bp); 526 527 bnx2x_hw_stats_post(bp); 528 bnx2x_storm_stats_post(bp); 529 } 530 } 531 532 static void bnx2x_stats_pmf_start(struct bnx2x *bp) 533 { 534 bnx2x_stats_comp(bp); 535 bnx2x_stats_pmf_update(bp); 536 bnx2x_stats_start(bp); 537 } 538 539 static void bnx2x_stats_restart(struct bnx2x *bp) 540 { 541 /* vfs travel through here as part of the statistics FSM, but no action 542 * is required 543 */ 544 if (IS_VF(bp)) 545 return; 546 547 bnx2x_stats_comp(bp); 548 bnx2x_stats_start(bp); 549 } 550 551 static void bnx2x_bmac_stats_update(struct bnx2x *bp) 552 { 553 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 554 struct bnx2x_eth_stats *estats = &bp->eth_stats; 555 struct { 556 u32 lo; 557 u32 hi; 558 } diff; 559 560 if (CHIP_IS_E1x(bp)) { 561 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats); 562 563 /* the macros below will use "bmac1_stats" type */ 564 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 565 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 566 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 567 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 568 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 569 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 570 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 571 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 572 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); 573 574 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 575 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 576 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 577 UPDATE_STAT64(tx_stat_gt127, 578 tx_stat_etherstatspkts65octetsto127octets); 579 UPDATE_STAT64(tx_stat_gt255, 580 tx_stat_etherstatspkts128octetsto255octets); 581 UPDATE_STAT64(tx_stat_gt511, 582 tx_stat_etherstatspkts256octetsto511octets); 583 UPDATE_STAT64(tx_stat_gt1023, 584 tx_stat_etherstatspkts512octetsto1023octets); 585 UPDATE_STAT64(tx_stat_gt1518, 586 tx_stat_etherstatspkts1024octetsto1522octets); 587 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); 588 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); 589 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); 590 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); 591 UPDATE_STAT64(tx_stat_gterr, 592 tx_stat_dot3statsinternalmactransmiterrors); 593 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); 594 595 } else { 596 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); 597 598 /* the macros below will use "bmac2_stats" type */ 599 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 600 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 601 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 602 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 603 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 604 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 605 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 606 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 607 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); 608 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 609 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 610 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 611 UPDATE_STAT64(tx_stat_gt127, 612 tx_stat_etherstatspkts65octetsto127octets); 613 UPDATE_STAT64(tx_stat_gt255, 614 tx_stat_etherstatspkts128octetsto255octets); 615 UPDATE_STAT64(tx_stat_gt511, 616 tx_stat_etherstatspkts256octetsto511octets); 617 UPDATE_STAT64(tx_stat_gt1023, 618 tx_stat_etherstatspkts512octetsto1023octets); 619 UPDATE_STAT64(tx_stat_gt1518, 620 tx_stat_etherstatspkts1024octetsto1522octets); 621 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); 622 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); 623 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); 624 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); 625 UPDATE_STAT64(tx_stat_gterr, 626 tx_stat_dot3statsinternalmactransmiterrors); 627 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); 628 629 /* collect PFC stats */ 630 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi; 631 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo; 632 633 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi; 634 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo; 635 } 636 637 estats->pause_frames_received_hi = 638 pstats->mac_stx[1].rx_stat_mac_xpf_hi; 639 estats->pause_frames_received_lo = 640 pstats->mac_stx[1].rx_stat_mac_xpf_lo; 641 642 estats->pause_frames_sent_hi = 643 pstats->mac_stx[1].tx_stat_outxoffsent_hi; 644 estats->pause_frames_sent_lo = 645 pstats->mac_stx[1].tx_stat_outxoffsent_lo; 646 647 estats->pfc_frames_received_hi = 648 pstats->pfc_frames_rx_hi; 649 estats->pfc_frames_received_lo = 650 pstats->pfc_frames_rx_lo; 651 estats->pfc_frames_sent_hi = 652 pstats->pfc_frames_tx_hi; 653 estats->pfc_frames_sent_lo = 654 pstats->pfc_frames_tx_lo; 655 } 656 657 static void bnx2x_mstat_stats_update(struct bnx2x *bp) 658 { 659 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 660 struct bnx2x_eth_stats *estats = &bp->eth_stats; 661 662 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); 663 664 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); 665 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); 666 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); 667 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); 668 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); 669 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); 670 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); 671 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); 672 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); 673 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); 674 675 /* collect pfc stats */ 676 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi, 677 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo); 678 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi, 679 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo); 680 681 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); 682 ADD_STAT64(stats_tx.tx_gt127, 683 tx_stat_etherstatspkts65octetsto127octets); 684 ADD_STAT64(stats_tx.tx_gt255, 685 tx_stat_etherstatspkts128octetsto255octets); 686 ADD_STAT64(stats_tx.tx_gt511, 687 tx_stat_etherstatspkts256octetsto511octets); 688 ADD_STAT64(stats_tx.tx_gt1023, 689 tx_stat_etherstatspkts512octetsto1023octets); 690 ADD_STAT64(stats_tx.tx_gt1518, 691 tx_stat_etherstatspkts1024octetsto1522octets); 692 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); 693 694 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); 695 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); 696 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); 697 698 ADD_STAT64(stats_tx.tx_gterr, 699 tx_stat_dot3statsinternalmactransmiterrors); 700 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); 701 702 estats->etherstatspkts1024octetsto1522octets_hi = 703 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi; 704 estats->etherstatspkts1024octetsto1522octets_lo = 705 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo; 706 707 estats->etherstatspktsover1522octets_hi = 708 pstats->mac_stx[1].tx_stat_mac_2047_hi; 709 estats->etherstatspktsover1522octets_lo = 710 pstats->mac_stx[1].tx_stat_mac_2047_lo; 711 712 ADD_64(estats->etherstatspktsover1522octets_hi, 713 pstats->mac_stx[1].tx_stat_mac_4095_hi, 714 estats->etherstatspktsover1522octets_lo, 715 pstats->mac_stx[1].tx_stat_mac_4095_lo); 716 717 ADD_64(estats->etherstatspktsover1522octets_hi, 718 pstats->mac_stx[1].tx_stat_mac_9216_hi, 719 estats->etherstatspktsover1522octets_lo, 720 pstats->mac_stx[1].tx_stat_mac_9216_lo); 721 722 ADD_64(estats->etherstatspktsover1522octets_hi, 723 pstats->mac_stx[1].tx_stat_mac_16383_hi, 724 estats->etherstatspktsover1522octets_lo, 725 pstats->mac_stx[1].tx_stat_mac_16383_lo); 726 727 estats->pause_frames_received_hi = 728 pstats->mac_stx[1].rx_stat_mac_xpf_hi; 729 estats->pause_frames_received_lo = 730 pstats->mac_stx[1].rx_stat_mac_xpf_lo; 731 732 estats->pause_frames_sent_hi = 733 pstats->mac_stx[1].tx_stat_outxoffsent_hi; 734 estats->pause_frames_sent_lo = 735 pstats->mac_stx[1].tx_stat_outxoffsent_lo; 736 737 estats->pfc_frames_received_hi = 738 pstats->pfc_frames_rx_hi; 739 estats->pfc_frames_received_lo = 740 pstats->pfc_frames_rx_lo; 741 estats->pfc_frames_sent_hi = 742 pstats->pfc_frames_tx_hi; 743 estats->pfc_frames_sent_lo = 744 pstats->pfc_frames_tx_lo; 745 } 746 747 static void bnx2x_emac_stats_update(struct bnx2x *bp) 748 { 749 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); 750 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 751 struct bnx2x_eth_stats *estats = &bp->eth_stats; 752 753 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); 754 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); 755 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); 756 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); 757 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); 758 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); 759 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); 760 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); 761 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); 762 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); 763 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); 764 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); 765 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); 766 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); 767 UPDATE_EXTEND_STAT(tx_stat_outxonsent); 768 UPDATE_EXTEND_STAT(tx_stat_outxoffsent); 769 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); 770 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); 771 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); 772 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); 773 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); 774 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); 775 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); 776 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); 777 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); 778 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); 779 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); 780 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); 781 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); 782 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); 783 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); 784 785 estats->pause_frames_received_hi = 786 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; 787 estats->pause_frames_received_lo = 788 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; 789 ADD_64(estats->pause_frames_received_hi, 790 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, 791 estats->pause_frames_received_lo, 792 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); 793 794 estats->pause_frames_sent_hi = 795 pstats->mac_stx[1].tx_stat_outxonsent_hi; 796 estats->pause_frames_sent_lo = 797 pstats->mac_stx[1].tx_stat_outxonsent_lo; 798 ADD_64(estats->pause_frames_sent_hi, 799 pstats->mac_stx[1].tx_stat_outxoffsent_hi, 800 estats->pause_frames_sent_lo, 801 pstats->mac_stx[1].tx_stat_outxoffsent_lo); 802 } 803 804 static int bnx2x_hw_stats_update(struct bnx2x *bp) 805 { 806 struct nig_stats *new = bnx2x_sp(bp, nig_stats); 807 struct nig_stats *old = &(bp->port.old_nig_stats); 808 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 809 struct bnx2x_eth_stats *estats = &bp->eth_stats; 810 struct { 811 u32 lo; 812 u32 hi; 813 } diff; 814 815 switch (bp->link_vars.mac_type) { 816 case MAC_TYPE_BMAC: 817 bnx2x_bmac_stats_update(bp); 818 break; 819 820 case MAC_TYPE_EMAC: 821 bnx2x_emac_stats_update(bp); 822 break; 823 824 case MAC_TYPE_UMAC: 825 case MAC_TYPE_XMAC: 826 bnx2x_mstat_stats_update(bp); 827 break; 828 829 case MAC_TYPE_NONE: /* unreached */ 830 DP(BNX2X_MSG_STATS, 831 "stats updated by DMAE but no MAC active\n"); 832 return -1; 833 834 default: /* unreached */ 835 BNX2X_ERR("Unknown MAC type\n"); 836 } 837 838 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, 839 new->brb_discard - old->brb_discard); 840 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, 841 new->brb_truncate - old->brb_truncate); 842 843 if (!CHIP_IS_E3(bp)) { 844 UPDATE_STAT64_NIG(egress_mac_pkt0, 845 etherstatspkts1024octetsto1522octets); 846 UPDATE_STAT64_NIG(egress_mac_pkt1, 847 etherstatspktsover1522octets); 848 } 849 850 memcpy(old, new, sizeof(struct nig_stats)); 851 852 BUILD_BUG_ON(sizeof(estats->shared) != sizeof(pstats->mac_stx[1])); 853 memcpy(&(estats->shared), &(pstats->mac_stx[1]), 854 sizeof(struct mac_stx)); 855 estats->brb_drop_hi = pstats->brb_drop_hi; 856 estats->brb_drop_lo = pstats->brb_drop_lo; 857 858 pstats->host_port_stats_counter++; 859 860 if (CHIP_IS_E3(bp)) { 861 u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1 862 : MISC_REG_CPMU_LP_SM_ENT_CNT_P0; 863 estats->eee_tx_lpi += REG_RD(bp, lpi_reg); 864 } 865 866 if (!BP_NOMCP(bp)) { 867 u32 nig_timer_max = 868 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); 869 if (nig_timer_max != estats->nig_timer_max) { 870 estats->nig_timer_max = nig_timer_max; 871 BNX2X_ERR("NIG timer max (%u)\n", 872 estats->nig_timer_max); 873 } 874 } 875 876 return 0; 877 } 878 879 static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) 880 { 881 struct stats_counter *counters = &bp->fw_stats_data->storm_counters; 882 u16 cur_stats_counter; 883 /* Make sure we use the value of the counter 884 * used for sending the last stats ramrod. 885 */ 886 cur_stats_counter = bp->stats_counter - 1; 887 888 /* are storm stats valid? */ 889 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { 890 DP(BNX2X_MSG_STATS, 891 "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n", 892 le16_to_cpu(counters->xstats_counter), bp->stats_counter); 893 return -EAGAIN; 894 } 895 896 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { 897 DP(BNX2X_MSG_STATS, 898 "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n", 899 le16_to_cpu(counters->ustats_counter), bp->stats_counter); 900 return -EAGAIN; 901 } 902 903 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { 904 DP(BNX2X_MSG_STATS, 905 "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n", 906 le16_to_cpu(counters->cstats_counter), bp->stats_counter); 907 return -EAGAIN; 908 } 909 910 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { 911 DP(BNX2X_MSG_STATS, 912 "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n", 913 le16_to_cpu(counters->tstats_counter), bp->stats_counter); 914 return -EAGAIN; 915 } 916 return 0; 917 } 918 919 static int bnx2x_storm_stats_update(struct bnx2x *bp) 920 { 921 struct tstorm_per_port_stats *tport = 922 &bp->fw_stats_data->port.tstorm_port_statistics; 923 struct tstorm_per_pf_stats *tfunc = 924 &bp->fw_stats_data->pf.tstorm_pf_statistics; 925 struct host_func_stats *fstats = &bp->func_stats; 926 struct bnx2x_eth_stats *estats = &bp->eth_stats; 927 struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old; 928 int i; 929 930 /* vfs stat counter is managed by pf */ 931 if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp)) 932 return -EAGAIN; 933 934 estats->error_bytes_received_hi = 0; 935 estats->error_bytes_received_lo = 0; 936 937 for_each_eth_queue(bp, i) { 938 struct bnx2x_fastpath *fp = &bp->fp[i]; 939 struct tstorm_per_queue_stats *tclient = 940 &bp->fw_stats_data->queue_stats[i]. 941 tstorm_queue_statistics; 942 struct tstorm_per_queue_stats *old_tclient = 943 &bnx2x_fp_stats(bp, fp)->old_tclient; 944 struct ustorm_per_queue_stats *uclient = 945 &bp->fw_stats_data->queue_stats[i]. 946 ustorm_queue_statistics; 947 struct ustorm_per_queue_stats *old_uclient = 948 &bnx2x_fp_stats(bp, fp)->old_uclient; 949 struct xstorm_per_queue_stats *xclient = 950 &bp->fw_stats_data->queue_stats[i]. 951 xstorm_queue_statistics; 952 struct xstorm_per_queue_stats *old_xclient = 953 &bnx2x_fp_stats(bp, fp)->old_xclient; 954 struct bnx2x_eth_q_stats *qstats = 955 &bnx2x_fp_stats(bp, fp)->eth_q_stats; 956 struct bnx2x_eth_q_stats_old *qstats_old = 957 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; 958 959 u32 diff; 960 961 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n", 962 i, xclient->ucast_pkts_sent, 963 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); 964 965 DP(BNX2X_MSG_STATS, "---------------\n"); 966 967 UPDATE_QSTAT(tclient->rcv_bcast_bytes, 968 total_broadcast_bytes_received); 969 UPDATE_QSTAT(tclient->rcv_mcast_bytes, 970 total_multicast_bytes_received); 971 UPDATE_QSTAT(tclient->rcv_ucast_bytes, 972 total_unicast_bytes_received); 973 974 /* 975 * sum to total_bytes_received all 976 * unicast/multicast/broadcast 977 */ 978 qstats->total_bytes_received_hi = 979 qstats->total_broadcast_bytes_received_hi; 980 qstats->total_bytes_received_lo = 981 qstats->total_broadcast_bytes_received_lo; 982 983 ADD_64(qstats->total_bytes_received_hi, 984 qstats->total_multicast_bytes_received_hi, 985 qstats->total_bytes_received_lo, 986 qstats->total_multicast_bytes_received_lo); 987 988 ADD_64(qstats->total_bytes_received_hi, 989 qstats->total_unicast_bytes_received_hi, 990 qstats->total_bytes_received_lo, 991 qstats->total_unicast_bytes_received_lo); 992 993 qstats->valid_bytes_received_hi = 994 qstats->total_bytes_received_hi; 995 qstats->valid_bytes_received_lo = 996 qstats->total_bytes_received_lo; 997 998 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, 999 total_unicast_packets_received); 1000 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, 1001 total_multicast_packets_received); 1002 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, 1003 total_broadcast_packets_received); 1004 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard, 1005 etherstatsoverrsizepkts, 32); 1006 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16); 1007 1008 SUB_EXTEND_USTAT(ucast_no_buff_pkts, 1009 total_unicast_packets_received); 1010 SUB_EXTEND_USTAT(mcast_no_buff_pkts, 1011 total_multicast_packets_received); 1012 SUB_EXTEND_USTAT(bcast_no_buff_pkts, 1013 total_broadcast_packets_received); 1014 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard); 1015 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard); 1016 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard); 1017 1018 UPDATE_QSTAT(xclient->bcast_bytes_sent, 1019 total_broadcast_bytes_transmitted); 1020 UPDATE_QSTAT(xclient->mcast_bytes_sent, 1021 total_multicast_bytes_transmitted); 1022 UPDATE_QSTAT(xclient->ucast_bytes_sent, 1023 total_unicast_bytes_transmitted); 1024 1025 /* 1026 * sum to total_bytes_transmitted all 1027 * unicast/multicast/broadcast 1028 */ 1029 qstats->total_bytes_transmitted_hi = 1030 qstats->total_unicast_bytes_transmitted_hi; 1031 qstats->total_bytes_transmitted_lo = 1032 qstats->total_unicast_bytes_transmitted_lo; 1033 1034 ADD_64(qstats->total_bytes_transmitted_hi, 1035 qstats->total_broadcast_bytes_transmitted_hi, 1036 qstats->total_bytes_transmitted_lo, 1037 qstats->total_broadcast_bytes_transmitted_lo); 1038 1039 ADD_64(qstats->total_bytes_transmitted_hi, 1040 qstats->total_multicast_bytes_transmitted_hi, 1041 qstats->total_bytes_transmitted_lo, 1042 qstats->total_multicast_bytes_transmitted_lo); 1043 1044 UPDATE_EXTEND_XSTAT(ucast_pkts_sent, 1045 total_unicast_packets_transmitted); 1046 UPDATE_EXTEND_XSTAT(mcast_pkts_sent, 1047 total_multicast_packets_transmitted); 1048 UPDATE_EXTEND_XSTAT(bcast_pkts_sent, 1049 total_broadcast_packets_transmitted); 1050 1051 UPDATE_EXTEND_TSTAT(checksum_discard, 1052 total_packets_received_checksum_discarded); 1053 UPDATE_EXTEND_TSTAT(ttl0_discard, 1054 total_packets_received_ttl0_discarded); 1055 1056 UPDATE_EXTEND_XSTAT(error_drop_pkts, 1057 total_transmitted_dropped_packets_error); 1058 1059 /* TPA aggregations completed */ 1060 UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations); 1061 /* Number of network frames aggregated by TPA */ 1062 UPDATE_EXTEND_E_USTAT(coalesced_pkts, 1063 total_tpa_aggregated_frames); 1064 /* Total number of bytes in completed TPA aggregations */ 1065 UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes); 1066 1067 UPDATE_ESTAT_QSTAT_64(total_tpa_bytes); 1068 1069 UPDATE_FSTAT_QSTAT(total_bytes_received); 1070 UPDATE_FSTAT_QSTAT(total_bytes_transmitted); 1071 UPDATE_FSTAT_QSTAT(total_unicast_packets_received); 1072 UPDATE_FSTAT_QSTAT(total_multicast_packets_received); 1073 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received); 1074 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted); 1075 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted); 1076 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted); 1077 UPDATE_FSTAT_QSTAT(valid_bytes_received); 1078 } 1079 1080 ADD_64(estats->total_bytes_received_hi, 1081 estats->rx_stat_ifhcinbadoctets_hi, 1082 estats->total_bytes_received_lo, 1083 estats->rx_stat_ifhcinbadoctets_lo); 1084 1085 ADD_64_LE(estats->total_bytes_received_hi, 1086 tfunc->rcv_error_bytes.hi, 1087 estats->total_bytes_received_lo, 1088 tfunc->rcv_error_bytes.lo); 1089 1090 ADD_64_LE(estats->error_bytes_received_hi, 1091 tfunc->rcv_error_bytes.hi, 1092 estats->error_bytes_received_lo, 1093 tfunc->rcv_error_bytes.lo); 1094 1095 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong); 1096 1097 ADD_64(estats->error_bytes_received_hi, 1098 estats->rx_stat_ifhcinbadoctets_hi, 1099 estats->error_bytes_received_lo, 1100 estats->rx_stat_ifhcinbadoctets_lo); 1101 1102 if (bp->port.pmf) { 1103 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; 1104 UPDATE_FW_STAT(mac_filter_discard); 1105 UPDATE_FW_STAT(mf_tag_discard); 1106 UPDATE_FW_STAT(brb_truncate_discard); 1107 UPDATE_FW_STAT(mac_discard); 1108 } 1109 1110 fstats->host_func_stats_start = ++fstats->host_func_stats_end; 1111 1112 bp->stats_pending = 0; 1113 1114 return 0; 1115 } 1116 1117 static void bnx2x_net_stats_update(struct bnx2x *bp) 1118 { 1119 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1120 struct net_device_stats *nstats = &bp->dev->stats; 1121 unsigned long tmp; 1122 int i; 1123 1124 nstats->rx_packets = 1125 bnx2x_hilo(&estats->total_unicast_packets_received_hi) + 1126 bnx2x_hilo(&estats->total_multicast_packets_received_hi) + 1127 bnx2x_hilo(&estats->total_broadcast_packets_received_hi); 1128 1129 nstats->tx_packets = 1130 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + 1131 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + 1132 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); 1133 1134 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); 1135 1136 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 1137 1138 tmp = estats->mac_discard; 1139 for_each_rx_queue(bp, i) { 1140 struct tstorm_per_queue_stats *old_tclient = 1141 &bp->fp_stats[i].old_tclient; 1142 tmp += le32_to_cpu(old_tclient->checksum_discard); 1143 } 1144 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped; 1145 1146 nstats->tx_dropped = 0; 1147 1148 nstats->multicast = 1149 bnx2x_hilo(&estats->total_multicast_packets_received_hi); 1150 1151 nstats->collisions = 1152 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); 1153 1154 nstats->rx_length_errors = 1155 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + 1156 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); 1157 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + 1158 bnx2x_hilo(&estats->brb_truncate_hi); 1159 nstats->rx_crc_errors = 1160 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); 1161 nstats->rx_frame_errors = 1162 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); 1163 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); 1164 nstats->rx_missed_errors = 0; 1165 1166 nstats->rx_errors = nstats->rx_length_errors + 1167 nstats->rx_over_errors + 1168 nstats->rx_crc_errors + 1169 nstats->rx_frame_errors + 1170 nstats->rx_fifo_errors + 1171 nstats->rx_missed_errors; 1172 1173 nstats->tx_aborted_errors = 1174 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + 1175 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); 1176 nstats->tx_carrier_errors = 1177 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); 1178 nstats->tx_fifo_errors = 0; 1179 nstats->tx_heartbeat_errors = 0; 1180 nstats->tx_window_errors = 0; 1181 1182 nstats->tx_errors = nstats->tx_aborted_errors + 1183 nstats->tx_carrier_errors + 1184 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); 1185 } 1186 1187 static void bnx2x_drv_stats_update(struct bnx2x *bp) 1188 { 1189 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1190 int i; 1191 1192 for_each_queue(bp, i) { 1193 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; 1194 struct bnx2x_eth_q_stats_old *qstats_old = 1195 &bp->fp_stats[i].eth_q_stats_old; 1196 1197 UPDATE_ESTAT_QSTAT(driver_xoff); 1198 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt); 1199 UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed); 1200 UPDATE_ESTAT_QSTAT(hw_csum_err); 1201 UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt); 1202 } 1203 } 1204 1205 static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) 1206 { 1207 u32 val; 1208 1209 if (SHMEM2_HAS(bp, edebug_driver_if[1])) { 1210 val = SHMEM2_RD(bp, edebug_driver_if[1]); 1211 1212 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) 1213 return true; 1214 } 1215 1216 return false; 1217 } 1218 1219 static void bnx2x_stats_update(struct bnx2x *bp) 1220 { 1221 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1222 1223 if (bnx2x_edebug_stats_stopped(bp)) 1224 return; 1225 1226 if (IS_PF(bp)) { 1227 if (*stats_comp != DMAE_COMP_VAL) 1228 return; 1229 1230 if (bp->port.pmf) 1231 bnx2x_hw_stats_update(bp); 1232 1233 if (bnx2x_storm_stats_update(bp)) { 1234 if (bp->stats_pending++ == 3) { 1235 BNX2X_ERR("storm stats were not updated for 3 times\n"); 1236 bnx2x_panic(); 1237 } 1238 return; 1239 } 1240 } else { 1241 /* vf doesn't collect HW statistics, and doesn't get completions 1242 * perform only update 1243 */ 1244 bnx2x_storm_stats_update(bp); 1245 } 1246 1247 bnx2x_net_stats_update(bp); 1248 bnx2x_drv_stats_update(bp); 1249 1250 /* vf is done */ 1251 if (IS_VF(bp)) 1252 return; 1253 1254 if (netif_msg_timer(bp)) { 1255 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1256 1257 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", 1258 estats->brb_drop_lo, estats->brb_truncate_lo); 1259 } 1260 1261 bnx2x_hw_stats_post(bp); 1262 bnx2x_storm_stats_post(bp); 1263 } 1264 1265 static void bnx2x_port_stats_stop(struct bnx2x *bp) 1266 { 1267 struct dmae_command *dmae; 1268 u32 opcode; 1269 int loader_idx = PMF_DMAE_C(bp); 1270 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1271 1272 bp->executer_idx = 0; 1273 1274 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0); 1275 1276 if (bp->port.port_stx) { 1277 1278 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1279 if (bp->func_stx) 1280 dmae->opcode = bnx2x_dmae_opcode_add_comp( 1281 opcode, DMAE_COMP_GRC); 1282 else 1283 dmae->opcode = bnx2x_dmae_opcode_add_comp( 1284 opcode, DMAE_COMP_PCI); 1285 1286 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1287 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1288 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1289 dmae->dst_addr_hi = 0; 1290 dmae->len = bnx2x_get_port_stats_dma_len(bp); 1291 if (bp->func_stx) { 1292 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 1293 dmae->comp_addr_hi = 0; 1294 dmae->comp_val = 1; 1295 } else { 1296 dmae->comp_addr_lo = 1297 U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1298 dmae->comp_addr_hi = 1299 U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1300 dmae->comp_val = DMAE_COMP_VAL; 1301 1302 *stats_comp = 0; 1303 } 1304 } 1305 1306 if (bp->func_stx) { 1307 1308 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1309 dmae->opcode = 1310 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); 1311 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 1312 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 1313 dmae->dst_addr_lo = bp->func_stx >> 2; 1314 dmae->dst_addr_hi = 0; 1315 dmae->len = sizeof(struct host_func_stats) >> 2; 1316 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1317 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1318 dmae->comp_val = DMAE_COMP_VAL; 1319 1320 *stats_comp = 0; 1321 } 1322 } 1323 1324 static void bnx2x_stats_stop(struct bnx2x *bp) 1325 { 1326 bool update = false; 1327 1328 bnx2x_stats_comp(bp); 1329 1330 if (bp->port.pmf) 1331 update = (bnx2x_hw_stats_update(bp) == 0); 1332 1333 update |= (bnx2x_storm_stats_update(bp) == 0); 1334 1335 if (update) { 1336 bnx2x_net_stats_update(bp); 1337 1338 if (bp->port.pmf) 1339 bnx2x_port_stats_stop(bp); 1340 1341 bnx2x_hw_stats_post(bp); 1342 bnx2x_stats_comp(bp); 1343 } 1344 } 1345 1346 static void bnx2x_stats_do_nothing(struct bnx2x *bp) 1347 { 1348 } 1349 1350 static const struct { 1351 void (*action)(struct bnx2x *bp); 1352 enum bnx2x_stats_state next_state; 1353 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { 1354 /* state event */ 1355 { 1356 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, 1357 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, 1358 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, 1359 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} 1360 }, 1361 { 1362 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, 1363 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, 1364 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, 1365 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} 1366 } 1367 }; 1368 1369 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 1370 { 1371 enum bnx2x_stats_state state = bp->stats_state; 1372 1373 if (unlikely(bp->panic)) 1374 return; 1375 1376 /* Statistics update run from timer context, and we don't want to stop 1377 * that context in case someone is in the middle of a transition. 1378 * For other events, wait a bit until lock is taken. 1379 */ 1380 if (down_trylock(&bp->stats_lock)) { 1381 if (event == STATS_EVENT_UPDATE) 1382 return; 1383 1384 DP(BNX2X_MSG_STATS, 1385 "Unlikely stats' lock contention [event %d]\n", event); 1386 if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) { 1387 BNX2X_ERR("Failed to take stats lock [event %d]\n", 1388 event); 1389 return; 1390 } 1391 } 1392 1393 bnx2x_stats_stm[state][event].action(bp); 1394 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1395 1396 up(&bp->stats_lock); 1397 1398 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1399 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1400 state, event, bp->stats_state); 1401 } 1402 1403 static void bnx2x_port_stats_base_init(struct bnx2x *bp) 1404 { 1405 struct dmae_command *dmae; 1406 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1407 1408 /* sanity */ 1409 if (!bp->port.pmf || !bp->port.port_stx) { 1410 BNX2X_ERR("BUG!\n"); 1411 return; 1412 } 1413 1414 bp->executer_idx = 0; 1415 1416 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1417 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, 1418 true, DMAE_COMP_PCI); 1419 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1420 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1421 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1422 dmae->dst_addr_hi = 0; 1423 dmae->len = bnx2x_get_port_stats_dma_len(bp); 1424 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); 1425 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); 1426 dmae->comp_val = DMAE_COMP_VAL; 1427 1428 *stats_comp = 0; 1429 bnx2x_hw_stats_post(bp); 1430 bnx2x_stats_comp(bp); 1431 } 1432 1433 /* This function will prepare the statistics ramrod data the way 1434 * we will only have to increment the statistics counter and 1435 * send the ramrod each time we have to. 1436 */ 1437 static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) 1438 { 1439 int i; 1440 int first_queue_query_index; 1441 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; 1442 1443 dma_addr_t cur_data_offset; 1444 struct stats_query_entry *cur_query_entry; 1445 1446 stats_hdr->cmd_num = bp->fw_stats_num; 1447 stats_hdr->drv_stats_counter = 0; 1448 1449 /* storm_counters struct contains the counters of completed 1450 * statistics requests per storm which are incremented by FW 1451 * each time it completes hadning a statistics ramrod. We will 1452 * check these counters in the timer handler and discard a 1453 * (statistics) ramrod completion. 1454 */ 1455 cur_data_offset = bp->fw_stats_data_mapping + 1456 offsetof(struct bnx2x_fw_stats_data, storm_counters); 1457 1458 stats_hdr->stats_counters_addrs.hi = 1459 cpu_to_le32(U64_HI(cur_data_offset)); 1460 stats_hdr->stats_counters_addrs.lo = 1461 cpu_to_le32(U64_LO(cur_data_offset)); 1462 1463 /* prepare to the first stats ramrod (will be completed with 1464 * the counters equal to zero) - init counters to somethig different. 1465 */ 1466 memset(&bp->fw_stats_data->storm_counters, 0xff, 1467 sizeof(struct stats_counter)); 1468 1469 /**** Port FW statistics data ****/ 1470 cur_data_offset = bp->fw_stats_data_mapping + 1471 offsetof(struct bnx2x_fw_stats_data, port); 1472 1473 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; 1474 1475 cur_query_entry->kind = STATS_TYPE_PORT; 1476 /* For port query index is a DONT CARE */ 1477 cur_query_entry->index = BP_PORT(bp); 1478 /* For port query funcID is a DONT CARE */ 1479 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1480 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); 1481 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); 1482 1483 /**** PF FW statistics data ****/ 1484 cur_data_offset = bp->fw_stats_data_mapping + 1485 offsetof(struct bnx2x_fw_stats_data, pf); 1486 1487 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; 1488 1489 cur_query_entry->kind = STATS_TYPE_PF; 1490 /* For PF query index is a DONT CARE */ 1491 cur_query_entry->index = BP_PORT(bp); 1492 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1493 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); 1494 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); 1495 1496 /**** FCoE FW statistics data ****/ 1497 if (!NO_FCOE(bp)) { 1498 cur_data_offset = bp->fw_stats_data_mapping + 1499 offsetof(struct bnx2x_fw_stats_data, fcoe); 1500 1501 cur_query_entry = 1502 &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX]; 1503 1504 cur_query_entry->kind = STATS_TYPE_FCOE; 1505 /* For FCoE query index is a DONT CARE */ 1506 cur_query_entry->index = BP_PORT(bp); 1507 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1508 cur_query_entry->address.hi = 1509 cpu_to_le32(U64_HI(cur_data_offset)); 1510 cur_query_entry->address.lo = 1511 cpu_to_le32(U64_LO(cur_data_offset)); 1512 } 1513 1514 /**** Clients' queries ****/ 1515 cur_data_offset = bp->fw_stats_data_mapping + 1516 offsetof(struct bnx2x_fw_stats_data, queue_stats); 1517 1518 /* first queue query index depends whether FCoE offloaded request will 1519 * be included in the ramrod 1520 */ 1521 if (!NO_FCOE(bp)) 1522 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX; 1523 else 1524 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1; 1525 1526 for_each_eth_queue(bp, i) { 1527 cur_query_entry = 1528 &bp->fw_stats_req-> 1529 query[first_queue_query_index + i]; 1530 1531 cur_query_entry->kind = STATS_TYPE_QUEUE; 1532 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); 1533 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1534 cur_query_entry->address.hi = 1535 cpu_to_le32(U64_HI(cur_data_offset)); 1536 cur_query_entry->address.lo = 1537 cpu_to_le32(U64_LO(cur_data_offset)); 1538 1539 cur_data_offset += sizeof(struct per_queue_stats); 1540 } 1541 1542 /* add FCoE queue query if needed */ 1543 if (!NO_FCOE(bp)) { 1544 cur_query_entry = 1545 &bp->fw_stats_req-> 1546 query[first_queue_query_index + i]; 1547 1548 cur_query_entry->kind = STATS_TYPE_QUEUE; 1549 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]); 1550 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1551 cur_query_entry->address.hi = 1552 cpu_to_le32(U64_HI(cur_data_offset)); 1553 cur_query_entry->address.lo = 1554 cpu_to_le32(U64_LO(cur_data_offset)); 1555 } 1556 } 1557 1558 void bnx2x_memset_stats(struct bnx2x *bp) 1559 { 1560 int i; 1561 1562 /* function stats */ 1563 for_each_queue(bp, i) { 1564 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i]; 1565 1566 memset(&fp_stats->old_tclient, 0, 1567 sizeof(fp_stats->old_tclient)); 1568 memset(&fp_stats->old_uclient, 0, 1569 sizeof(fp_stats->old_uclient)); 1570 memset(&fp_stats->old_xclient, 0, 1571 sizeof(fp_stats->old_xclient)); 1572 if (bp->stats_init) { 1573 memset(&fp_stats->eth_q_stats, 0, 1574 sizeof(fp_stats->eth_q_stats)); 1575 memset(&fp_stats->eth_q_stats_old, 0, 1576 sizeof(fp_stats->eth_q_stats_old)); 1577 } 1578 } 1579 1580 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); 1581 1582 if (bp->stats_init) { 1583 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old)); 1584 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old)); 1585 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old)); 1586 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); 1587 memset(&bp->func_stats, 0, sizeof(bp->func_stats)); 1588 } 1589 1590 bp->stats_state = STATS_STATE_DISABLED; 1591 1592 if (bp->port.pmf && bp->port.port_stx) 1593 bnx2x_port_stats_base_init(bp); 1594 1595 /* mark the end of statistics initialization */ 1596 bp->stats_init = false; 1597 } 1598 1599 void bnx2x_stats_init(struct bnx2x *bp) 1600 { 1601 int /*abs*/port = BP_PORT(bp); 1602 int mb_idx = BP_FW_MB_IDX(bp); 1603 1604 if (IS_VF(bp)) { 1605 bnx2x_memset_stats(bp); 1606 return; 1607 } 1608 1609 bp->stats_pending = 0; 1610 bp->executer_idx = 0; 1611 bp->stats_counter = 0; 1612 1613 /* port and func stats for management */ 1614 if (!BP_NOMCP(bp)) { 1615 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); 1616 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1617 1618 } else { 1619 bp->port.port_stx = 0; 1620 bp->func_stx = 0; 1621 } 1622 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", 1623 bp->port.port_stx, bp->func_stx); 1624 1625 /* pmf should retrieve port statistics from SP on a non-init*/ 1626 if (!bp->stats_init && bp->port.pmf && bp->port.port_stx) 1627 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 1628 1629 port = BP_PORT(bp); 1630 /* port stats */ 1631 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); 1632 bp->port.old_nig_stats.brb_discard = 1633 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); 1634 bp->port.old_nig_stats.brb_truncate = 1635 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); 1636 if (!CHIP_IS_E3(bp)) { 1637 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, 1638 &(bp->port.old_nig_stats.egress_mac_pkt0), 2); 1639 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, 1640 &(bp->port.old_nig_stats.egress_mac_pkt1), 2); 1641 } 1642 1643 /* Prepare statistics ramrod data */ 1644 bnx2x_prep_fw_stats_req(bp); 1645 1646 /* Clean SP from previous statistics */ 1647 if (bp->stats_init) { 1648 if (bp->func_stx) { 1649 memset(bnx2x_sp(bp, func_stats), 0, 1650 sizeof(struct host_func_stats)); 1651 bnx2x_func_stats_init(bp); 1652 bnx2x_hw_stats_post(bp); 1653 bnx2x_stats_comp(bp); 1654 } 1655 } 1656 1657 bnx2x_memset_stats(bp); 1658 } 1659 1660 void bnx2x_save_statistics(struct bnx2x *bp) 1661 { 1662 int i; 1663 struct net_device_stats *nstats = &bp->dev->stats; 1664 1665 /* save queue statistics */ 1666 for_each_eth_queue(bp, i) { 1667 struct bnx2x_fastpath *fp = &bp->fp[i]; 1668 struct bnx2x_eth_q_stats *qstats = 1669 &bnx2x_fp_stats(bp, fp)->eth_q_stats; 1670 struct bnx2x_eth_q_stats_old *qstats_old = 1671 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; 1672 1673 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); 1674 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); 1675 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi); 1676 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo); 1677 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi); 1678 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo); 1679 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi); 1680 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo); 1681 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi); 1682 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo); 1683 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi); 1684 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo); 1685 UPDATE_QSTAT_OLD(total_tpa_bytes_hi); 1686 UPDATE_QSTAT_OLD(total_tpa_bytes_lo); 1687 } 1688 1689 /* save net_device_stats statistics */ 1690 bp->net_stats_old.rx_dropped = nstats->rx_dropped; 1691 1692 /* store port firmware statistics */ 1693 if (bp->port.pmf && IS_MF(bp)) { 1694 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1695 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; 1696 UPDATE_FW_STAT_OLD(mac_filter_discard); 1697 UPDATE_FW_STAT_OLD(mf_tag_discard); 1698 UPDATE_FW_STAT_OLD(brb_truncate_discard); 1699 UPDATE_FW_STAT_OLD(mac_discard); 1700 } 1701 } 1702 1703 void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, 1704 u32 stats_type) 1705 { 1706 int i; 1707 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats; 1708 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1709 struct per_queue_stats *fcoe_q_stats = 1710 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]; 1711 1712 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 1713 &fcoe_q_stats->tstorm_queue_statistics; 1714 1715 struct ustorm_per_queue_stats *fcoe_q_ustorm_stats = 1716 &fcoe_q_stats->ustorm_queue_statistics; 1717 1718 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = 1719 &fcoe_q_stats->xstorm_queue_statistics; 1720 1721 struct fcoe_statistics_params *fw_fcoe_stat = 1722 &bp->fw_stats_data->fcoe; 1723 1724 memset(afex_stats, 0, sizeof(struct afex_stats)); 1725 1726 for_each_eth_queue(bp, i) { 1727 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; 1728 1729 ADD_64(afex_stats->rx_unicast_bytes_hi, 1730 qstats->total_unicast_bytes_received_hi, 1731 afex_stats->rx_unicast_bytes_lo, 1732 qstats->total_unicast_bytes_received_lo); 1733 1734 ADD_64(afex_stats->rx_broadcast_bytes_hi, 1735 qstats->total_broadcast_bytes_received_hi, 1736 afex_stats->rx_broadcast_bytes_lo, 1737 qstats->total_broadcast_bytes_received_lo); 1738 1739 ADD_64(afex_stats->rx_multicast_bytes_hi, 1740 qstats->total_multicast_bytes_received_hi, 1741 afex_stats->rx_multicast_bytes_lo, 1742 qstats->total_multicast_bytes_received_lo); 1743 1744 ADD_64(afex_stats->rx_unicast_frames_hi, 1745 qstats->total_unicast_packets_received_hi, 1746 afex_stats->rx_unicast_frames_lo, 1747 qstats->total_unicast_packets_received_lo); 1748 1749 ADD_64(afex_stats->rx_broadcast_frames_hi, 1750 qstats->total_broadcast_packets_received_hi, 1751 afex_stats->rx_broadcast_frames_lo, 1752 qstats->total_broadcast_packets_received_lo); 1753 1754 ADD_64(afex_stats->rx_multicast_frames_hi, 1755 qstats->total_multicast_packets_received_hi, 1756 afex_stats->rx_multicast_frames_lo, 1757 qstats->total_multicast_packets_received_lo); 1758 1759 /* sum to rx_frames_discarded all discraded 1760 * packets due to size, ttl0 and checksum 1761 */ 1762 ADD_64(afex_stats->rx_frames_discarded_hi, 1763 qstats->total_packets_received_checksum_discarded_hi, 1764 afex_stats->rx_frames_discarded_lo, 1765 qstats->total_packets_received_checksum_discarded_lo); 1766 1767 ADD_64(afex_stats->rx_frames_discarded_hi, 1768 qstats->total_packets_received_ttl0_discarded_hi, 1769 afex_stats->rx_frames_discarded_lo, 1770 qstats->total_packets_received_ttl0_discarded_lo); 1771 1772 ADD_64(afex_stats->rx_frames_discarded_hi, 1773 qstats->etherstatsoverrsizepkts_hi, 1774 afex_stats->rx_frames_discarded_lo, 1775 qstats->etherstatsoverrsizepkts_lo); 1776 1777 ADD_64(afex_stats->rx_frames_dropped_hi, 1778 qstats->no_buff_discard_hi, 1779 afex_stats->rx_frames_dropped_lo, 1780 qstats->no_buff_discard_lo); 1781 1782 ADD_64(afex_stats->tx_unicast_bytes_hi, 1783 qstats->total_unicast_bytes_transmitted_hi, 1784 afex_stats->tx_unicast_bytes_lo, 1785 qstats->total_unicast_bytes_transmitted_lo); 1786 1787 ADD_64(afex_stats->tx_broadcast_bytes_hi, 1788 qstats->total_broadcast_bytes_transmitted_hi, 1789 afex_stats->tx_broadcast_bytes_lo, 1790 qstats->total_broadcast_bytes_transmitted_lo); 1791 1792 ADD_64(afex_stats->tx_multicast_bytes_hi, 1793 qstats->total_multicast_bytes_transmitted_hi, 1794 afex_stats->tx_multicast_bytes_lo, 1795 qstats->total_multicast_bytes_transmitted_lo); 1796 1797 ADD_64(afex_stats->tx_unicast_frames_hi, 1798 qstats->total_unicast_packets_transmitted_hi, 1799 afex_stats->tx_unicast_frames_lo, 1800 qstats->total_unicast_packets_transmitted_lo); 1801 1802 ADD_64(afex_stats->tx_broadcast_frames_hi, 1803 qstats->total_broadcast_packets_transmitted_hi, 1804 afex_stats->tx_broadcast_frames_lo, 1805 qstats->total_broadcast_packets_transmitted_lo); 1806 1807 ADD_64(afex_stats->tx_multicast_frames_hi, 1808 qstats->total_multicast_packets_transmitted_hi, 1809 afex_stats->tx_multicast_frames_lo, 1810 qstats->total_multicast_packets_transmitted_lo); 1811 1812 ADD_64(afex_stats->tx_frames_dropped_hi, 1813 qstats->total_transmitted_dropped_packets_error_hi, 1814 afex_stats->tx_frames_dropped_lo, 1815 qstats->total_transmitted_dropped_packets_error_lo); 1816 } 1817 1818 /* now add FCoE statistics which are collected separately 1819 * (both offloaded and non offloaded) 1820 */ 1821 if (!NO_FCOE(bp)) { 1822 ADD_64_LE(afex_stats->rx_unicast_bytes_hi, 1823 LE32_0, 1824 afex_stats->rx_unicast_bytes_lo, 1825 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); 1826 1827 ADD_64_LE(afex_stats->rx_unicast_bytes_hi, 1828 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, 1829 afex_stats->rx_unicast_bytes_lo, 1830 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); 1831 1832 ADD_64_LE(afex_stats->rx_broadcast_bytes_hi, 1833 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, 1834 afex_stats->rx_broadcast_bytes_lo, 1835 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); 1836 1837 ADD_64_LE(afex_stats->rx_multicast_bytes_hi, 1838 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, 1839 afex_stats->rx_multicast_bytes_lo, 1840 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); 1841 1842 ADD_64_LE(afex_stats->rx_unicast_frames_hi, 1843 LE32_0, 1844 afex_stats->rx_unicast_frames_lo, 1845 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); 1846 1847 ADD_64_LE(afex_stats->rx_unicast_frames_hi, 1848 LE32_0, 1849 afex_stats->rx_unicast_frames_lo, 1850 fcoe_q_tstorm_stats->rcv_ucast_pkts); 1851 1852 ADD_64_LE(afex_stats->rx_broadcast_frames_hi, 1853 LE32_0, 1854 afex_stats->rx_broadcast_frames_lo, 1855 fcoe_q_tstorm_stats->rcv_bcast_pkts); 1856 1857 ADD_64_LE(afex_stats->rx_multicast_frames_hi, 1858 LE32_0, 1859 afex_stats->rx_multicast_frames_lo, 1860 fcoe_q_tstorm_stats->rcv_ucast_pkts); 1861 1862 ADD_64_LE(afex_stats->rx_frames_discarded_hi, 1863 LE32_0, 1864 afex_stats->rx_frames_discarded_lo, 1865 fcoe_q_tstorm_stats->checksum_discard); 1866 1867 ADD_64_LE(afex_stats->rx_frames_discarded_hi, 1868 LE32_0, 1869 afex_stats->rx_frames_discarded_lo, 1870 fcoe_q_tstorm_stats->pkts_too_big_discard); 1871 1872 ADD_64_LE(afex_stats->rx_frames_discarded_hi, 1873 LE32_0, 1874 afex_stats->rx_frames_discarded_lo, 1875 fcoe_q_tstorm_stats->ttl0_discard); 1876 1877 ADD_64_LE16(afex_stats->rx_frames_dropped_hi, 1878 LE16_0, 1879 afex_stats->rx_frames_dropped_lo, 1880 fcoe_q_tstorm_stats->no_buff_discard); 1881 1882 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1883 LE32_0, 1884 afex_stats->rx_frames_dropped_lo, 1885 fcoe_q_ustorm_stats->ucast_no_buff_pkts); 1886 1887 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1888 LE32_0, 1889 afex_stats->rx_frames_dropped_lo, 1890 fcoe_q_ustorm_stats->mcast_no_buff_pkts); 1891 1892 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1893 LE32_0, 1894 afex_stats->rx_frames_dropped_lo, 1895 fcoe_q_ustorm_stats->bcast_no_buff_pkts); 1896 1897 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1898 LE32_0, 1899 afex_stats->rx_frames_dropped_lo, 1900 fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt); 1901 1902 ADD_64_LE(afex_stats->rx_frames_dropped_hi, 1903 LE32_0, 1904 afex_stats->rx_frames_dropped_lo, 1905 fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt); 1906 1907 ADD_64_LE(afex_stats->tx_unicast_bytes_hi, 1908 LE32_0, 1909 afex_stats->tx_unicast_bytes_lo, 1910 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); 1911 1912 ADD_64_LE(afex_stats->tx_unicast_bytes_hi, 1913 fcoe_q_xstorm_stats->ucast_bytes_sent.hi, 1914 afex_stats->tx_unicast_bytes_lo, 1915 fcoe_q_xstorm_stats->ucast_bytes_sent.lo); 1916 1917 ADD_64_LE(afex_stats->tx_broadcast_bytes_hi, 1918 fcoe_q_xstorm_stats->bcast_bytes_sent.hi, 1919 afex_stats->tx_broadcast_bytes_lo, 1920 fcoe_q_xstorm_stats->bcast_bytes_sent.lo); 1921 1922 ADD_64_LE(afex_stats->tx_multicast_bytes_hi, 1923 fcoe_q_xstorm_stats->mcast_bytes_sent.hi, 1924 afex_stats->tx_multicast_bytes_lo, 1925 fcoe_q_xstorm_stats->mcast_bytes_sent.lo); 1926 1927 ADD_64_LE(afex_stats->tx_unicast_frames_hi, 1928 LE32_0, 1929 afex_stats->tx_unicast_frames_lo, 1930 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); 1931 1932 ADD_64_LE(afex_stats->tx_unicast_frames_hi, 1933 LE32_0, 1934 afex_stats->tx_unicast_frames_lo, 1935 fcoe_q_xstorm_stats->ucast_pkts_sent); 1936 1937 ADD_64_LE(afex_stats->tx_broadcast_frames_hi, 1938 LE32_0, 1939 afex_stats->tx_broadcast_frames_lo, 1940 fcoe_q_xstorm_stats->bcast_pkts_sent); 1941 1942 ADD_64_LE(afex_stats->tx_multicast_frames_hi, 1943 LE32_0, 1944 afex_stats->tx_multicast_frames_lo, 1945 fcoe_q_xstorm_stats->mcast_pkts_sent); 1946 1947 ADD_64_LE(afex_stats->tx_frames_dropped_hi, 1948 LE32_0, 1949 afex_stats->tx_frames_dropped_lo, 1950 fcoe_q_xstorm_stats->error_drop_pkts); 1951 } 1952 1953 /* if port stats are requested, add them to the PMF 1954 * stats, as anyway they will be accumulated by the 1955 * MCP before sent to the switch 1956 */ 1957 if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) { 1958 ADD_64(afex_stats->rx_frames_dropped_hi, 1959 0, 1960 afex_stats->rx_frames_dropped_lo, 1961 estats->mac_filter_discard); 1962 ADD_64(afex_stats->rx_frames_dropped_hi, 1963 0, 1964 afex_stats->rx_frames_dropped_lo, 1965 estats->brb_truncate_discard); 1966 ADD_64(afex_stats->rx_frames_discarded_hi, 1967 0, 1968 afex_stats->rx_frames_discarded_lo, 1969 estats->mac_discard); 1970 } 1971 } 1972 1973 int bnx2x_stats_safe_exec(struct bnx2x *bp, 1974 void (func_to_exec)(void *cookie), 1975 void *cookie) 1976 { 1977 int cnt = 10, rc = 0; 1978 1979 /* Wait for statistics to end [while blocking further requests], 1980 * then run supplied function 'safely'. 1981 */ 1982 rc = down_timeout(&bp->stats_lock, HZ / 10); 1983 if (unlikely(rc)) { 1984 BNX2X_ERR("Failed to take statistics lock for safe execution\n"); 1985 goto out_no_lock; 1986 } 1987 1988 bnx2x_stats_comp(bp); 1989 while (bp->stats_pending && cnt--) 1990 if (bnx2x_storm_stats_update(bp)) 1991 usleep_range(1000, 2000); 1992 if (bp->stats_pending) { 1993 BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n"); 1994 rc = -EBUSY; 1995 goto out; 1996 } 1997 1998 func_to_exec(cookie); 1999 2000 out: 2001 /* No need to restart statistics - if they're enabled, the timer 2002 * will restart the statistics. 2003 */ 2004 up(&bp->stats_lock); 2005 out_no_lock: 2006 return rc; 2007 } 2008