1 /* bnx2x_stats.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2012 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include "bnx2x_stats.h"
21 #include "bnx2x_cmn.h"
22 
23 
24 /* Statistics */
25 
26 /*
27  * General service functions
28  */
29 
30 static inline long bnx2x_hilo(u32 *hiref)
31 {
32 	u32 lo = *(hiref + 1);
33 #if (BITS_PER_LONG == 64)
34 	u32 hi = *hiref;
35 
36 	return HILO_U64(hi, lo);
37 #else
38 	return lo;
39 #endif
40 }
41 
42 static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
43 {
44 	u16 res = sizeof(struct host_port_stats) >> 2;
45 
46 	/* if PFC stats are not supported by the MFW, don't DMA them */
47 	if (!(bp->flags &  BC_SUPPORTS_PFC_STATS))
48 		res -= (sizeof(u32)*4) >> 2;
49 
50 	return res;
51 }
52 
53 /*
54  * Init service functions
55  */
56 
57 /* Post the next statistics ramrod. Protect it with the spin in
58  * order to ensure the strict order between statistics ramrods
59  * (each ramrod has a sequence number passed in a
60  * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
61  * sent in order).
62  */
63 static void bnx2x_storm_stats_post(struct bnx2x *bp)
64 {
65 	if (!bp->stats_pending) {
66 		int rc;
67 
68 		spin_lock_bh(&bp->stats_lock);
69 
70 		if (bp->stats_pending) {
71 			spin_unlock_bh(&bp->stats_lock);
72 			return;
73 		}
74 
75 		bp->fw_stats_req->hdr.drv_stats_counter =
76 			cpu_to_le16(bp->stats_counter++);
77 
78 		DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
79 			bp->fw_stats_req->hdr.drv_stats_counter);
80 
81 
82 
83 		/* send FW stats ramrod */
84 		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
85 				   U64_HI(bp->fw_stats_req_mapping),
86 				   U64_LO(bp->fw_stats_req_mapping),
87 				   NONE_CONNECTION_TYPE);
88 		if (rc == 0)
89 			bp->stats_pending = 1;
90 
91 		spin_unlock_bh(&bp->stats_lock);
92 	}
93 }
94 
95 static void bnx2x_hw_stats_post(struct bnx2x *bp)
96 {
97 	struct dmae_command *dmae = &bp->stats_dmae;
98 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
99 
100 	*stats_comp = DMAE_COMP_VAL;
101 	if (CHIP_REV_IS_SLOW(bp))
102 		return;
103 
104 	/* loader */
105 	if (bp->executer_idx) {
106 		int loader_idx = PMF_DMAE_C(bp);
107 		u32 opcode =  bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
108 						 true, DMAE_COMP_GRC);
109 		opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
110 
111 		memset(dmae, 0, sizeof(struct dmae_command));
112 		dmae->opcode = opcode;
113 		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
114 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
115 		dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
116 				     sizeof(struct dmae_command) *
117 				     (loader_idx + 1)) >> 2;
118 		dmae->dst_addr_hi = 0;
119 		dmae->len = sizeof(struct dmae_command) >> 2;
120 		if (CHIP_IS_E1(bp))
121 			dmae->len--;
122 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
123 		dmae->comp_addr_hi = 0;
124 		dmae->comp_val = 1;
125 
126 		*stats_comp = 0;
127 		bnx2x_post_dmae(bp, dmae, loader_idx);
128 
129 	} else if (bp->func_stx) {
130 		*stats_comp = 0;
131 		memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
132 		       sizeof(bp->func_stats));
133 		bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
134 	}
135 }
136 
137 static int bnx2x_stats_comp(struct bnx2x *bp)
138 {
139 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
140 	int cnt = 10;
141 
142 	might_sleep();
143 	while (*stats_comp != DMAE_COMP_VAL) {
144 		if (!cnt) {
145 			BNX2X_ERR("timeout waiting for stats finished\n");
146 			break;
147 		}
148 		cnt--;
149 		usleep_range(1000, 1000);
150 	}
151 	return 1;
152 }
153 
154 /*
155  * Statistics service functions
156  */
157 
158 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
159 {
160 	struct dmae_command *dmae;
161 	u32 opcode;
162 	int loader_idx = PMF_DMAE_C(bp);
163 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
164 
165 	/* sanity */
166 	if (!bp->port.pmf || !bp->port.port_stx) {
167 		BNX2X_ERR("BUG!\n");
168 		return;
169 	}
170 
171 	bp->executer_idx = 0;
172 
173 	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
174 
175 	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
176 	dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
177 	dmae->src_addr_lo = bp->port.port_stx >> 2;
178 	dmae->src_addr_hi = 0;
179 	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
180 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
181 	dmae->len = DMAE_LEN32_RD_MAX;
182 	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
183 	dmae->comp_addr_hi = 0;
184 	dmae->comp_val = 1;
185 
186 	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
187 	dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
188 	dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
189 	dmae->src_addr_hi = 0;
190 	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
191 				   DMAE_LEN32_RD_MAX * 4);
192 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
193 				   DMAE_LEN32_RD_MAX * 4);
194 	dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
195 
196 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
197 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
198 	dmae->comp_val = DMAE_COMP_VAL;
199 
200 	*stats_comp = 0;
201 	bnx2x_hw_stats_post(bp);
202 	bnx2x_stats_comp(bp);
203 }
204 
205 static void bnx2x_port_stats_init(struct bnx2x *bp)
206 {
207 	struct dmae_command *dmae;
208 	int port = BP_PORT(bp);
209 	u32 opcode;
210 	int loader_idx = PMF_DMAE_C(bp);
211 	u32 mac_addr;
212 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
213 
214 	/* sanity */
215 	if (!bp->link_vars.link_up || !bp->port.pmf) {
216 		BNX2X_ERR("BUG!\n");
217 		return;
218 	}
219 
220 	bp->executer_idx = 0;
221 
222 	/* MCP */
223 	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
224 				    true, DMAE_COMP_GRC);
225 
226 	if (bp->port.port_stx) {
227 
228 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
229 		dmae->opcode = opcode;
230 		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
231 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
232 		dmae->dst_addr_lo = bp->port.port_stx >> 2;
233 		dmae->dst_addr_hi = 0;
234 		dmae->len = bnx2x_get_port_stats_dma_len(bp);
235 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
236 		dmae->comp_addr_hi = 0;
237 		dmae->comp_val = 1;
238 	}
239 
240 	if (bp->func_stx) {
241 
242 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
243 		dmae->opcode = opcode;
244 		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
245 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
246 		dmae->dst_addr_lo = bp->func_stx >> 2;
247 		dmae->dst_addr_hi = 0;
248 		dmae->len = sizeof(struct host_func_stats) >> 2;
249 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
250 		dmae->comp_addr_hi = 0;
251 		dmae->comp_val = 1;
252 	}
253 
254 	/* MAC */
255 	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
256 				   true, DMAE_COMP_GRC);
257 
258 	/* EMAC is special */
259 	if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
260 		mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
261 
262 		/* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
263 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
264 		dmae->opcode = opcode;
265 		dmae->src_addr_lo = (mac_addr +
266 				     EMAC_REG_EMAC_RX_STAT_AC) >> 2;
267 		dmae->src_addr_hi = 0;
268 		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
269 		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
270 		dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
271 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
272 		dmae->comp_addr_hi = 0;
273 		dmae->comp_val = 1;
274 
275 		/* EMAC_REG_EMAC_RX_STAT_AC_28 */
276 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
277 		dmae->opcode = opcode;
278 		dmae->src_addr_lo = (mac_addr +
279 				     EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
280 		dmae->src_addr_hi = 0;
281 		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
282 		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
283 		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
284 		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
285 		dmae->len = 1;
286 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
287 		dmae->comp_addr_hi = 0;
288 		dmae->comp_val = 1;
289 
290 		/* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
291 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
292 		dmae->opcode = opcode;
293 		dmae->src_addr_lo = (mac_addr +
294 				     EMAC_REG_EMAC_TX_STAT_AC) >> 2;
295 		dmae->src_addr_hi = 0;
296 		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
297 			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
298 		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
299 			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
300 		dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
301 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
302 		dmae->comp_addr_hi = 0;
303 		dmae->comp_val = 1;
304 	} else {
305 		u32 tx_src_addr_lo, rx_src_addr_lo;
306 		u16 rx_len, tx_len;
307 
308 		/* configure the params according to MAC type */
309 		switch (bp->link_vars.mac_type) {
310 		case MAC_TYPE_BMAC:
311 			mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
312 					   NIG_REG_INGRESS_BMAC0_MEM);
313 
314 			/* BIGMAC_REGISTER_TX_STAT_GTPKT ..
315 			   BIGMAC_REGISTER_TX_STAT_GTBYT */
316 			if (CHIP_IS_E1x(bp)) {
317 				tx_src_addr_lo = (mac_addr +
318 					BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
319 				tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
320 					  BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
321 				rx_src_addr_lo = (mac_addr +
322 					BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
323 				rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
324 					  BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
325 			} else {
326 				tx_src_addr_lo = (mac_addr +
327 					BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
328 				tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
329 					  BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
330 				rx_src_addr_lo = (mac_addr +
331 					BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
332 				rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
333 					  BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
334 			}
335 			break;
336 
337 		case MAC_TYPE_UMAC: /* handled by MSTAT */
338 		case MAC_TYPE_XMAC: /* handled by MSTAT */
339 		default:
340 			mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
341 			tx_src_addr_lo = (mac_addr +
342 					  MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
343 			rx_src_addr_lo = (mac_addr +
344 					  MSTAT_REG_RX_STAT_GR64_LO) >> 2;
345 			tx_len = sizeof(bp->slowpath->
346 					mac_stats.mstat_stats.stats_tx) >> 2;
347 			rx_len = sizeof(bp->slowpath->
348 					mac_stats.mstat_stats.stats_rx) >> 2;
349 			break;
350 		}
351 
352 		/* TX stats */
353 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
354 		dmae->opcode = opcode;
355 		dmae->src_addr_lo = tx_src_addr_lo;
356 		dmae->src_addr_hi = 0;
357 		dmae->len = tx_len;
358 		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
359 		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
360 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
361 		dmae->comp_addr_hi = 0;
362 		dmae->comp_val = 1;
363 
364 		/* RX stats */
365 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
366 		dmae->opcode = opcode;
367 		dmae->src_addr_hi = 0;
368 		dmae->src_addr_lo = rx_src_addr_lo;
369 		dmae->dst_addr_lo =
370 			U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
371 		dmae->dst_addr_hi =
372 			U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
373 		dmae->len = rx_len;
374 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
375 		dmae->comp_addr_hi = 0;
376 		dmae->comp_val = 1;
377 	}
378 
379 	/* NIG */
380 	if (!CHIP_IS_E3(bp)) {
381 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
382 		dmae->opcode = opcode;
383 		dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
384 					    NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
385 		dmae->src_addr_hi = 0;
386 		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
387 				offsetof(struct nig_stats, egress_mac_pkt0_lo));
388 		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
389 				offsetof(struct nig_stats, egress_mac_pkt0_lo));
390 		dmae->len = (2*sizeof(u32)) >> 2;
391 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
392 		dmae->comp_addr_hi = 0;
393 		dmae->comp_val = 1;
394 
395 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
396 		dmae->opcode = opcode;
397 		dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
398 					    NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
399 		dmae->src_addr_hi = 0;
400 		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
401 				offsetof(struct nig_stats, egress_mac_pkt1_lo));
402 		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
403 				offsetof(struct nig_stats, egress_mac_pkt1_lo));
404 		dmae->len = (2*sizeof(u32)) >> 2;
405 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
406 		dmae->comp_addr_hi = 0;
407 		dmae->comp_val = 1;
408 	}
409 
410 	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
411 	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
412 						 true, DMAE_COMP_PCI);
413 	dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
414 				    NIG_REG_STAT0_BRB_DISCARD) >> 2;
415 	dmae->src_addr_hi = 0;
416 	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
417 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
418 	dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
419 
420 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
421 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
422 	dmae->comp_val = DMAE_COMP_VAL;
423 
424 	*stats_comp = 0;
425 }
426 
427 static void bnx2x_func_stats_init(struct bnx2x *bp)
428 {
429 	struct dmae_command *dmae = &bp->stats_dmae;
430 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
431 
432 	/* sanity */
433 	if (!bp->func_stx) {
434 		BNX2X_ERR("BUG!\n");
435 		return;
436 	}
437 
438 	bp->executer_idx = 0;
439 	memset(dmae, 0, sizeof(struct dmae_command));
440 
441 	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
442 					 true, DMAE_COMP_PCI);
443 	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
444 	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
445 	dmae->dst_addr_lo = bp->func_stx >> 2;
446 	dmae->dst_addr_hi = 0;
447 	dmae->len = sizeof(struct host_func_stats) >> 2;
448 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
449 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
450 	dmae->comp_val = DMAE_COMP_VAL;
451 
452 	*stats_comp = 0;
453 }
454 
455 static void bnx2x_stats_start(struct bnx2x *bp)
456 {
457 	if (bp->port.pmf)
458 		bnx2x_port_stats_init(bp);
459 
460 	else if (bp->func_stx)
461 		bnx2x_func_stats_init(bp);
462 
463 	bnx2x_hw_stats_post(bp);
464 	bnx2x_storm_stats_post(bp);
465 }
466 
467 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
468 {
469 	bnx2x_stats_comp(bp);
470 	bnx2x_stats_pmf_update(bp);
471 	bnx2x_stats_start(bp);
472 }
473 
474 static void bnx2x_stats_restart(struct bnx2x *bp)
475 {
476 	bnx2x_stats_comp(bp);
477 	bnx2x_stats_start(bp);
478 }
479 
480 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
481 {
482 	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
483 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
484 	struct {
485 		u32 lo;
486 		u32 hi;
487 	} diff;
488 
489 	if (CHIP_IS_E1x(bp)) {
490 		struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
491 
492 		/* the macros below will use "bmac1_stats" type */
493 		UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
494 		UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
495 		UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
496 		UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
497 		UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
498 		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
499 		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
500 		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
501 		UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
502 
503 		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
504 		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
505 		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
506 		UPDATE_STAT64(tx_stat_gt127,
507 				tx_stat_etherstatspkts65octetsto127octets);
508 		UPDATE_STAT64(tx_stat_gt255,
509 				tx_stat_etherstatspkts128octetsto255octets);
510 		UPDATE_STAT64(tx_stat_gt511,
511 				tx_stat_etherstatspkts256octetsto511octets);
512 		UPDATE_STAT64(tx_stat_gt1023,
513 				tx_stat_etherstatspkts512octetsto1023octets);
514 		UPDATE_STAT64(tx_stat_gt1518,
515 				tx_stat_etherstatspkts1024octetsto1522octets);
516 		UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
517 		UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
518 		UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
519 		UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
520 		UPDATE_STAT64(tx_stat_gterr,
521 				tx_stat_dot3statsinternalmactransmiterrors);
522 		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
523 
524 	} else {
525 		struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
526 
527 		/* the macros below will use "bmac2_stats" type */
528 		UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
529 		UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
530 		UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
531 		UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
532 		UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
533 		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
534 		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
535 		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
536 		UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
537 		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
538 		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
539 		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
540 		UPDATE_STAT64(tx_stat_gt127,
541 				tx_stat_etherstatspkts65octetsto127octets);
542 		UPDATE_STAT64(tx_stat_gt255,
543 				tx_stat_etherstatspkts128octetsto255octets);
544 		UPDATE_STAT64(tx_stat_gt511,
545 				tx_stat_etherstatspkts256octetsto511octets);
546 		UPDATE_STAT64(tx_stat_gt1023,
547 				tx_stat_etherstatspkts512octetsto1023octets);
548 		UPDATE_STAT64(tx_stat_gt1518,
549 				tx_stat_etherstatspkts1024octetsto1522octets);
550 		UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
551 		UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
552 		UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
553 		UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
554 		UPDATE_STAT64(tx_stat_gterr,
555 				tx_stat_dot3statsinternalmactransmiterrors);
556 		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
557 
558 		/* collect PFC stats */
559 		pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
560 		pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
561 
562 		pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
563 		pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
564 	}
565 
566 	estats->pause_frames_received_hi =
567 				pstats->mac_stx[1].rx_stat_mac_xpf_hi;
568 	estats->pause_frames_received_lo =
569 				pstats->mac_stx[1].rx_stat_mac_xpf_lo;
570 
571 	estats->pause_frames_sent_hi =
572 				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
573 	estats->pause_frames_sent_lo =
574 				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
575 
576 	estats->pfc_frames_received_hi =
577 				pstats->pfc_frames_rx_hi;
578 	estats->pfc_frames_received_lo =
579 				pstats->pfc_frames_rx_lo;
580 	estats->pfc_frames_sent_hi =
581 				pstats->pfc_frames_tx_hi;
582 	estats->pfc_frames_sent_lo =
583 				pstats->pfc_frames_tx_lo;
584 }
585 
586 static void bnx2x_mstat_stats_update(struct bnx2x *bp)
587 {
588 	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
589 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
590 
591 	struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
592 
593 	ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
594 	ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
595 	ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
596 	ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
597 	ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
598 	ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
599 	ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
600 	ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
601 	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
602 	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
603 
604 	/* collect pfc stats */
605 	ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
606 		pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
607 	ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
608 		pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
609 
610 	ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
611 	ADD_STAT64(stats_tx.tx_gt127,
612 			tx_stat_etherstatspkts65octetsto127octets);
613 	ADD_STAT64(stats_tx.tx_gt255,
614 			tx_stat_etherstatspkts128octetsto255octets);
615 	ADD_STAT64(stats_tx.tx_gt511,
616 			tx_stat_etherstatspkts256octetsto511octets);
617 	ADD_STAT64(stats_tx.tx_gt1023,
618 			tx_stat_etherstatspkts512octetsto1023octets);
619 	ADD_STAT64(stats_tx.tx_gt1518,
620 			tx_stat_etherstatspkts1024octetsto1522octets);
621 	ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
622 
623 	ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
624 	ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
625 	ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
626 
627 	ADD_STAT64(stats_tx.tx_gterr,
628 			tx_stat_dot3statsinternalmactransmiterrors);
629 	ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
630 
631 	estats->etherstatspkts1024octetsto1522octets_hi =
632 	    pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
633 	estats->etherstatspkts1024octetsto1522octets_lo =
634 	    pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
635 
636 	estats->etherstatspktsover1522octets_hi =
637 	    pstats->mac_stx[1].tx_stat_mac_2047_hi;
638 	estats->etherstatspktsover1522octets_lo =
639 	    pstats->mac_stx[1].tx_stat_mac_2047_lo;
640 
641 	ADD_64(estats->etherstatspktsover1522octets_hi,
642 	       pstats->mac_stx[1].tx_stat_mac_4095_hi,
643 	       estats->etherstatspktsover1522octets_lo,
644 	       pstats->mac_stx[1].tx_stat_mac_4095_lo);
645 
646 	ADD_64(estats->etherstatspktsover1522octets_hi,
647 	       pstats->mac_stx[1].tx_stat_mac_9216_hi,
648 	       estats->etherstatspktsover1522octets_lo,
649 	       pstats->mac_stx[1].tx_stat_mac_9216_lo);
650 
651 	ADD_64(estats->etherstatspktsover1522octets_hi,
652 	       pstats->mac_stx[1].tx_stat_mac_16383_hi,
653 	       estats->etherstatspktsover1522octets_lo,
654 	       pstats->mac_stx[1].tx_stat_mac_16383_lo);
655 
656 	estats->pause_frames_received_hi =
657 				pstats->mac_stx[1].rx_stat_mac_xpf_hi;
658 	estats->pause_frames_received_lo =
659 				pstats->mac_stx[1].rx_stat_mac_xpf_lo;
660 
661 	estats->pause_frames_sent_hi =
662 				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
663 	estats->pause_frames_sent_lo =
664 				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
665 
666 	estats->pfc_frames_received_hi =
667 				pstats->pfc_frames_rx_hi;
668 	estats->pfc_frames_received_lo =
669 				pstats->pfc_frames_rx_lo;
670 	estats->pfc_frames_sent_hi =
671 				pstats->pfc_frames_tx_hi;
672 	estats->pfc_frames_sent_lo =
673 				pstats->pfc_frames_tx_lo;
674 }
675 
676 static void bnx2x_emac_stats_update(struct bnx2x *bp)
677 {
678 	struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
679 	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
680 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
681 
682 	UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
683 	UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
684 	UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
685 	UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
686 	UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
687 	UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
688 	UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
689 	UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
690 	UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
691 	UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
692 	UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
693 	UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
694 	UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
695 	UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
696 	UPDATE_EXTEND_STAT(tx_stat_outxonsent);
697 	UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
698 	UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
699 	UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
700 	UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
701 	UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
702 	UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
703 	UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
704 	UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
705 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
706 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
707 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
708 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
709 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
710 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
711 	UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
712 	UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
713 
714 	estats->pause_frames_received_hi =
715 			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
716 	estats->pause_frames_received_lo =
717 			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
718 	ADD_64(estats->pause_frames_received_hi,
719 	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
720 	       estats->pause_frames_received_lo,
721 	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
722 
723 	estats->pause_frames_sent_hi =
724 			pstats->mac_stx[1].tx_stat_outxonsent_hi;
725 	estats->pause_frames_sent_lo =
726 			pstats->mac_stx[1].tx_stat_outxonsent_lo;
727 	ADD_64(estats->pause_frames_sent_hi,
728 	       pstats->mac_stx[1].tx_stat_outxoffsent_hi,
729 	       estats->pause_frames_sent_lo,
730 	       pstats->mac_stx[1].tx_stat_outxoffsent_lo);
731 }
732 
733 static int bnx2x_hw_stats_update(struct bnx2x *bp)
734 {
735 	struct nig_stats *new = bnx2x_sp(bp, nig_stats);
736 	struct nig_stats *old = &(bp->port.old_nig_stats);
737 	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
738 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
739 	struct {
740 		u32 lo;
741 		u32 hi;
742 	} diff;
743 
744 	switch (bp->link_vars.mac_type) {
745 	case MAC_TYPE_BMAC:
746 		bnx2x_bmac_stats_update(bp);
747 		break;
748 
749 	case MAC_TYPE_EMAC:
750 		bnx2x_emac_stats_update(bp);
751 		break;
752 
753 	case MAC_TYPE_UMAC:
754 	case MAC_TYPE_XMAC:
755 		bnx2x_mstat_stats_update(bp);
756 		break;
757 
758 	case MAC_TYPE_NONE: /* unreached */
759 		DP(BNX2X_MSG_STATS,
760 		   "stats updated by DMAE but no MAC active\n");
761 		return -1;
762 
763 	default: /* unreached */
764 		BNX2X_ERR("Unknown MAC type\n");
765 	}
766 
767 	ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
768 		      new->brb_discard - old->brb_discard);
769 	ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
770 		      new->brb_truncate - old->brb_truncate);
771 
772 	if (!CHIP_IS_E3(bp)) {
773 		UPDATE_STAT64_NIG(egress_mac_pkt0,
774 					etherstatspkts1024octetsto1522octets);
775 		UPDATE_STAT64_NIG(egress_mac_pkt1,
776 					etherstatspktsover1522octets);
777 	}
778 
779 	memcpy(old, new, sizeof(struct nig_stats));
780 
781 	memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
782 	       sizeof(struct mac_stx));
783 	estats->brb_drop_hi = pstats->brb_drop_hi;
784 	estats->brb_drop_lo = pstats->brb_drop_lo;
785 
786 	pstats->host_port_stats_counter++;
787 
788 	if (CHIP_IS_E3(bp)) {
789 		u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1
790 					  : MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
791 		estats->eee_tx_lpi += REG_RD(bp, lpi_reg);
792 	}
793 
794 	if (!BP_NOMCP(bp)) {
795 		u32 nig_timer_max =
796 			SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
797 		if (nig_timer_max != estats->nig_timer_max) {
798 			estats->nig_timer_max = nig_timer_max;
799 			BNX2X_ERR("NIG timer max (%u)\n",
800 				  estats->nig_timer_max);
801 		}
802 	}
803 
804 	return 0;
805 }
806 
807 static int bnx2x_storm_stats_update(struct bnx2x *bp)
808 {
809 	struct tstorm_per_port_stats *tport =
810 				&bp->fw_stats_data->port.tstorm_port_statistics;
811 	struct tstorm_per_pf_stats *tfunc =
812 				&bp->fw_stats_data->pf.tstorm_pf_statistics;
813 	struct host_func_stats *fstats = &bp->func_stats;
814 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
815 	struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
816 	struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
817 	int i;
818 	u16 cur_stats_counter;
819 
820 	/* Make sure we use the value of the counter
821 	 * used for sending the last stats ramrod.
822 	 */
823 	spin_lock_bh(&bp->stats_lock);
824 	cur_stats_counter = bp->stats_counter - 1;
825 	spin_unlock_bh(&bp->stats_lock);
826 
827 	/* are storm stats valid? */
828 	if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
829 		DP(BNX2X_MSG_STATS,
830 		   "stats not updated by xstorm  xstorm counter (0x%x) != stats_counter (0x%x)\n",
831 		   le16_to_cpu(counters->xstats_counter), bp->stats_counter);
832 		return -EAGAIN;
833 	}
834 
835 	if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
836 		DP(BNX2X_MSG_STATS,
837 		   "stats not updated by ustorm  ustorm counter (0x%x) != stats_counter (0x%x)\n",
838 		   le16_to_cpu(counters->ustats_counter), bp->stats_counter);
839 		return -EAGAIN;
840 	}
841 
842 	if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
843 		DP(BNX2X_MSG_STATS,
844 		   "stats not updated by cstorm  cstorm counter (0x%x) != stats_counter (0x%x)\n",
845 		   le16_to_cpu(counters->cstats_counter), bp->stats_counter);
846 		return -EAGAIN;
847 	}
848 
849 	if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
850 		DP(BNX2X_MSG_STATS,
851 		   "stats not updated by tstorm  tstorm counter (0x%x) != stats_counter (0x%x)\n",
852 		   le16_to_cpu(counters->tstats_counter), bp->stats_counter);
853 		return -EAGAIN;
854 	}
855 
856 	estats->error_bytes_received_hi = 0;
857 	estats->error_bytes_received_lo = 0;
858 
859 	for_each_eth_queue(bp, i) {
860 		struct bnx2x_fastpath *fp = &bp->fp[i];
861 		struct tstorm_per_queue_stats *tclient =
862 			&bp->fw_stats_data->queue_stats[i].
863 			tstorm_queue_statistics;
864 		struct tstorm_per_queue_stats *old_tclient =
865 			&bnx2x_fp_stats(bp, fp)->old_tclient;
866 		struct ustorm_per_queue_stats *uclient =
867 			&bp->fw_stats_data->queue_stats[i].
868 			ustorm_queue_statistics;
869 		struct ustorm_per_queue_stats *old_uclient =
870 			&bnx2x_fp_stats(bp, fp)->old_uclient;
871 		struct xstorm_per_queue_stats *xclient =
872 			&bp->fw_stats_data->queue_stats[i].
873 			xstorm_queue_statistics;
874 		struct xstorm_per_queue_stats *old_xclient =
875 			&bnx2x_fp_stats(bp, fp)->old_xclient;
876 		struct bnx2x_eth_q_stats *qstats =
877 			&bnx2x_fp_stats(bp, fp)->eth_q_stats;
878 		struct bnx2x_eth_q_stats_old *qstats_old =
879 			&bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
880 
881 		u32 diff;
882 
883 		DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
884 		   i, xclient->ucast_pkts_sent,
885 		   xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
886 
887 		DP(BNX2X_MSG_STATS, "---------------\n");
888 
889 		UPDATE_QSTAT(tclient->rcv_bcast_bytes,
890 			     total_broadcast_bytes_received);
891 		UPDATE_QSTAT(tclient->rcv_mcast_bytes,
892 			     total_multicast_bytes_received);
893 		UPDATE_QSTAT(tclient->rcv_ucast_bytes,
894 			     total_unicast_bytes_received);
895 
896 		/*
897 		 * sum to total_bytes_received all
898 		 * unicast/multicast/broadcast
899 		 */
900 		qstats->total_bytes_received_hi =
901 			qstats->total_broadcast_bytes_received_hi;
902 		qstats->total_bytes_received_lo =
903 			qstats->total_broadcast_bytes_received_lo;
904 
905 		ADD_64(qstats->total_bytes_received_hi,
906 		       qstats->total_multicast_bytes_received_hi,
907 		       qstats->total_bytes_received_lo,
908 		       qstats->total_multicast_bytes_received_lo);
909 
910 		ADD_64(qstats->total_bytes_received_hi,
911 		       qstats->total_unicast_bytes_received_hi,
912 		       qstats->total_bytes_received_lo,
913 		       qstats->total_unicast_bytes_received_lo);
914 
915 		qstats->valid_bytes_received_hi =
916 					qstats->total_bytes_received_hi;
917 		qstats->valid_bytes_received_lo =
918 					qstats->total_bytes_received_lo;
919 
920 
921 		UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
922 					total_unicast_packets_received);
923 		UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
924 					total_multicast_packets_received);
925 		UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
926 					total_broadcast_packets_received);
927 		UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
928 				      etherstatsoverrsizepkts);
929 		UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard);
930 
931 		SUB_EXTEND_USTAT(ucast_no_buff_pkts,
932 					total_unicast_packets_received);
933 		SUB_EXTEND_USTAT(mcast_no_buff_pkts,
934 					total_multicast_packets_received);
935 		SUB_EXTEND_USTAT(bcast_no_buff_pkts,
936 					total_broadcast_packets_received);
937 		UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
938 		UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
939 		UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
940 
941 		UPDATE_QSTAT(xclient->bcast_bytes_sent,
942 			     total_broadcast_bytes_transmitted);
943 		UPDATE_QSTAT(xclient->mcast_bytes_sent,
944 			     total_multicast_bytes_transmitted);
945 		UPDATE_QSTAT(xclient->ucast_bytes_sent,
946 			     total_unicast_bytes_transmitted);
947 
948 		/*
949 		 * sum to total_bytes_transmitted all
950 		 * unicast/multicast/broadcast
951 		 */
952 		qstats->total_bytes_transmitted_hi =
953 				qstats->total_unicast_bytes_transmitted_hi;
954 		qstats->total_bytes_transmitted_lo =
955 				qstats->total_unicast_bytes_transmitted_lo;
956 
957 		ADD_64(qstats->total_bytes_transmitted_hi,
958 		       qstats->total_broadcast_bytes_transmitted_hi,
959 		       qstats->total_bytes_transmitted_lo,
960 		       qstats->total_broadcast_bytes_transmitted_lo);
961 
962 		ADD_64(qstats->total_bytes_transmitted_hi,
963 		       qstats->total_multicast_bytes_transmitted_hi,
964 		       qstats->total_bytes_transmitted_lo,
965 		       qstats->total_multicast_bytes_transmitted_lo);
966 
967 		UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
968 					total_unicast_packets_transmitted);
969 		UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
970 					total_multicast_packets_transmitted);
971 		UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
972 					total_broadcast_packets_transmitted);
973 
974 		UPDATE_EXTEND_TSTAT(checksum_discard,
975 				    total_packets_received_checksum_discarded);
976 		UPDATE_EXTEND_TSTAT(ttl0_discard,
977 				    total_packets_received_ttl0_discarded);
978 
979 		UPDATE_EXTEND_XSTAT(error_drop_pkts,
980 				    total_transmitted_dropped_packets_error);
981 
982 		/* TPA aggregations completed */
983 		UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
984 		/* Number of network frames aggregated by TPA */
985 		UPDATE_EXTEND_E_USTAT(coalesced_pkts,
986 				      total_tpa_aggregated_frames);
987 		/* Total number of bytes in completed TPA aggregations */
988 		UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
989 
990 		UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
991 
992 		UPDATE_FSTAT_QSTAT(total_bytes_received);
993 		UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
994 		UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
995 		UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
996 		UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
997 		UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
998 		UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
999 		UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1000 		UPDATE_FSTAT_QSTAT(valid_bytes_received);
1001 	}
1002 
1003 	ADD_64(estats->total_bytes_received_hi,
1004 	       estats->rx_stat_ifhcinbadoctets_hi,
1005 	       estats->total_bytes_received_lo,
1006 	       estats->rx_stat_ifhcinbadoctets_lo);
1007 
1008 	ADD_64(estats->total_bytes_received_hi,
1009 	       le32_to_cpu(tfunc->rcv_error_bytes.hi),
1010 	       estats->total_bytes_received_lo,
1011 	       le32_to_cpu(tfunc->rcv_error_bytes.lo));
1012 
1013 	ADD_64(estats->error_bytes_received_hi,
1014 	       le32_to_cpu(tfunc->rcv_error_bytes.hi),
1015 	       estats->error_bytes_received_lo,
1016 	       le32_to_cpu(tfunc->rcv_error_bytes.lo));
1017 
1018 	UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1019 
1020 	ADD_64(estats->error_bytes_received_hi,
1021 	       estats->rx_stat_ifhcinbadoctets_hi,
1022 	       estats->error_bytes_received_lo,
1023 	       estats->rx_stat_ifhcinbadoctets_lo);
1024 
1025 	if (bp->port.pmf) {
1026 		struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1027 		UPDATE_FW_STAT(mac_filter_discard);
1028 		UPDATE_FW_STAT(mf_tag_discard);
1029 		UPDATE_FW_STAT(brb_truncate_discard);
1030 		UPDATE_FW_STAT(mac_discard);
1031 	}
1032 
1033 	fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1034 
1035 	bp->stats_pending = 0;
1036 
1037 	return 0;
1038 }
1039 
1040 static void bnx2x_net_stats_update(struct bnx2x *bp)
1041 {
1042 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
1043 	struct net_device_stats *nstats = &bp->dev->stats;
1044 	unsigned long tmp;
1045 	int i;
1046 
1047 	nstats->rx_packets =
1048 		bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
1049 		bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
1050 		bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
1051 
1052 	nstats->tx_packets =
1053 		bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
1054 		bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
1055 		bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
1056 
1057 	nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
1058 
1059 	nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1060 
1061 	tmp = estats->mac_discard;
1062 	for_each_rx_queue(bp, i) {
1063 		struct tstorm_per_queue_stats *old_tclient =
1064 			&bp->fp_stats[i].old_tclient;
1065 		tmp += le32_to_cpu(old_tclient->checksum_discard);
1066 	}
1067 	nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
1068 
1069 	nstats->tx_dropped = 0;
1070 
1071 	nstats->multicast =
1072 		bnx2x_hilo(&estats->total_multicast_packets_received_hi);
1073 
1074 	nstats->collisions =
1075 		bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1076 
1077 	nstats->rx_length_errors =
1078 		bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1079 		bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1080 	nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1081 				 bnx2x_hilo(&estats->brb_truncate_hi);
1082 	nstats->rx_crc_errors =
1083 		bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1084 	nstats->rx_frame_errors =
1085 		bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1086 	nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
1087 	nstats->rx_missed_errors = 0;
1088 
1089 	nstats->rx_errors = nstats->rx_length_errors +
1090 			    nstats->rx_over_errors +
1091 			    nstats->rx_crc_errors +
1092 			    nstats->rx_frame_errors +
1093 			    nstats->rx_fifo_errors +
1094 			    nstats->rx_missed_errors;
1095 
1096 	nstats->tx_aborted_errors =
1097 		bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1098 		bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1099 	nstats->tx_carrier_errors =
1100 		bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1101 	nstats->tx_fifo_errors = 0;
1102 	nstats->tx_heartbeat_errors = 0;
1103 	nstats->tx_window_errors = 0;
1104 
1105 	nstats->tx_errors = nstats->tx_aborted_errors +
1106 			    nstats->tx_carrier_errors +
1107 	    bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1108 }
1109 
1110 static void bnx2x_drv_stats_update(struct bnx2x *bp)
1111 {
1112 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
1113 	int i;
1114 
1115 	for_each_queue(bp, i) {
1116 		struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
1117 		struct bnx2x_eth_q_stats_old *qstats_old =
1118 			&bp->fp_stats[i].eth_q_stats_old;
1119 
1120 		UPDATE_ESTAT_QSTAT(driver_xoff);
1121 		UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
1122 		UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
1123 		UPDATE_ESTAT_QSTAT(hw_csum_err);
1124 	}
1125 }
1126 
1127 static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
1128 {
1129 	u32 val;
1130 
1131 	if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
1132 		val = SHMEM2_RD(bp, edebug_driver_if[1]);
1133 
1134 		if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
1135 			return true;
1136 	}
1137 
1138 	return false;
1139 }
1140 
1141 static void bnx2x_stats_update(struct bnx2x *bp)
1142 {
1143 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1144 
1145 	if (bnx2x_edebug_stats_stopped(bp))
1146 		return;
1147 
1148 	if (*stats_comp != DMAE_COMP_VAL)
1149 		return;
1150 
1151 	if (bp->port.pmf)
1152 		bnx2x_hw_stats_update(bp);
1153 
1154 	if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
1155 		BNX2X_ERR("storm stats were not updated for 3 times\n");
1156 		bnx2x_panic();
1157 		return;
1158 	}
1159 
1160 	bnx2x_net_stats_update(bp);
1161 	bnx2x_drv_stats_update(bp);
1162 
1163 	if (netif_msg_timer(bp)) {
1164 		struct bnx2x_eth_stats *estats = &bp->eth_stats;
1165 
1166 		netdev_dbg(bp->dev, "brb drops %u  brb truncate %u\n",
1167 		       estats->brb_drop_lo, estats->brb_truncate_lo);
1168 	}
1169 
1170 	bnx2x_hw_stats_post(bp);
1171 	bnx2x_storm_stats_post(bp);
1172 }
1173 
1174 static void bnx2x_port_stats_stop(struct bnx2x *bp)
1175 {
1176 	struct dmae_command *dmae;
1177 	u32 opcode;
1178 	int loader_idx = PMF_DMAE_C(bp);
1179 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1180 
1181 	bp->executer_idx = 0;
1182 
1183 	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
1184 
1185 	if (bp->port.port_stx) {
1186 
1187 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1188 		if (bp->func_stx)
1189 			dmae->opcode = bnx2x_dmae_opcode_add_comp(
1190 						opcode, DMAE_COMP_GRC);
1191 		else
1192 			dmae->opcode = bnx2x_dmae_opcode_add_comp(
1193 						opcode, DMAE_COMP_PCI);
1194 
1195 		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1196 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1197 		dmae->dst_addr_lo = bp->port.port_stx >> 2;
1198 		dmae->dst_addr_hi = 0;
1199 		dmae->len = bnx2x_get_port_stats_dma_len(bp);
1200 		if (bp->func_stx) {
1201 			dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1202 			dmae->comp_addr_hi = 0;
1203 			dmae->comp_val = 1;
1204 		} else {
1205 			dmae->comp_addr_lo =
1206 				U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1207 			dmae->comp_addr_hi =
1208 				U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1209 			dmae->comp_val = DMAE_COMP_VAL;
1210 
1211 			*stats_comp = 0;
1212 		}
1213 	}
1214 
1215 	if (bp->func_stx) {
1216 
1217 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1218 		dmae->opcode =
1219 			bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1220 		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1221 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1222 		dmae->dst_addr_lo = bp->func_stx >> 2;
1223 		dmae->dst_addr_hi = 0;
1224 		dmae->len = sizeof(struct host_func_stats) >> 2;
1225 		dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1226 		dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1227 		dmae->comp_val = DMAE_COMP_VAL;
1228 
1229 		*stats_comp = 0;
1230 	}
1231 }
1232 
1233 static void bnx2x_stats_stop(struct bnx2x *bp)
1234 {
1235 	int update = 0;
1236 
1237 	bnx2x_stats_comp(bp);
1238 
1239 	if (bp->port.pmf)
1240 		update = (bnx2x_hw_stats_update(bp) == 0);
1241 
1242 	update |= (bnx2x_storm_stats_update(bp) == 0);
1243 
1244 	if (update) {
1245 		bnx2x_net_stats_update(bp);
1246 
1247 		if (bp->port.pmf)
1248 			bnx2x_port_stats_stop(bp);
1249 
1250 		bnx2x_hw_stats_post(bp);
1251 		bnx2x_stats_comp(bp);
1252 	}
1253 }
1254 
1255 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1256 {
1257 }
1258 
1259 static const struct {
1260 	void (*action)(struct bnx2x *bp);
1261 	enum bnx2x_stats_state next_state;
1262 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1263 /* state	event	*/
1264 {
1265 /* DISABLED	PMF	*/ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1266 /*		LINK_UP	*/ {bnx2x_stats_start,      STATS_STATE_ENABLED},
1267 /*		UPDATE	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1268 /*		STOP	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1269 },
1270 {
1271 /* ENABLED	PMF	*/ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
1272 /*		LINK_UP	*/ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
1273 /*		UPDATE	*/ {bnx2x_stats_update,     STATS_STATE_ENABLED},
1274 /*		STOP	*/ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
1275 }
1276 };
1277 
1278 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1279 {
1280 	enum bnx2x_stats_state state;
1281 	if (unlikely(bp->panic))
1282 		return;
1283 
1284 	spin_lock_bh(&bp->stats_lock);
1285 	state = bp->stats_state;
1286 	bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1287 	spin_unlock_bh(&bp->stats_lock);
1288 
1289 	bnx2x_stats_stm[state][event].action(bp);
1290 
1291 	if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1292 		DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1293 		   state, event, bp->stats_state);
1294 }
1295 
1296 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1297 {
1298 	struct dmae_command *dmae;
1299 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1300 
1301 	/* sanity */
1302 	if (!bp->port.pmf || !bp->port.port_stx) {
1303 		BNX2X_ERR("BUG!\n");
1304 		return;
1305 	}
1306 
1307 	bp->executer_idx = 0;
1308 
1309 	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1310 	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1311 					 true, DMAE_COMP_PCI);
1312 	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1313 	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1314 	dmae->dst_addr_lo = bp->port.port_stx >> 2;
1315 	dmae->dst_addr_hi = 0;
1316 	dmae->len = bnx2x_get_port_stats_dma_len(bp);
1317 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1318 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1319 	dmae->comp_val = DMAE_COMP_VAL;
1320 
1321 	*stats_comp = 0;
1322 	bnx2x_hw_stats_post(bp);
1323 	bnx2x_stats_comp(bp);
1324 }
1325 
1326 /* This function will prepare the statistics ramrod data the way
1327  * we will only have to increment the statistics counter and
1328  * send the ramrod each time we have to.
1329  */
1330 static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1331 {
1332 	int i;
1333 	int first_queue_query_index;
1334 	struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
1335 
1336 	dma_addr_t cur_data_offset;
1337 	struct stats_query_entry *cur_query_entry;
1338 
1339 	stats_hdr->cmd_num = bp->fw_stats_num;
1340 	stats_hdr->drv_stats_counter = 0;
1341 
1342 	/* storm_counters struct contains the counters of completed
1343 	 * statistics requests per storm which are incremented by FW
1344 	 * each time it completes hadning a statistics ramrod. We will
1345 	 * check these counters in the timer handler and discard a
1346 	 * (statistics) ramrod completion.
1347 	 */
1348 	cur_data_offset = bp->fw_stats_data_mapping +
1349 		offsetof(struct bnx2x_fw_stats_data, storm_counters);
1350 
1351 	stats_hdr->stats_counters_addrs.hi =
1352 		cpu_to_le32(U64_HI(cur_data_offset));
1353 	stats_hdr->stats_counters_addrs.lo =
1354 		cpu_to_le32(U64_LO(cur_data_offset));
1355 
1356 	/* prepare to the first stats ramrod (will be completed with
1357 	 * the counters equal to zero) - init counters to somethig different.
1358 	 */
1359 	memset(&bp->fw_stats_data->storm_counters, 0xff,
1360 	       sizeof(struct stats_counter));
1361 
1362 	/**** Port FW statistics data ****/
1363 	cur_data_offset = bp->fw_stats_data_mapping +
1364 		offsetof(struct bnx2x_fw_stats_data, port);
1365 
1366 	cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1367 
1368 	cur_query_entry->kind = STATS_TYPE_PORT;
1369 	/* For port query index is a DONT CARE */
1370 	cur_query_entry->index = BP_PORT(bp);
1371 	/* For port query funcID is a DONT CARE */
1372 	cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1373 	cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1374 	cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1375 
1376 	/**** PF FW statistics data ****/
1377 	cur_data_offset = bp->fw_stats_data_mapping +
1378 		offsetof(struct bnx2x_fw_stats_data, pf);
1379 
1380 	cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1381 
1382 	cur_query_entry->kind = STATS_TYPE_PF;
1383 	/* For PF query index is a DONT CARE */
1384 	cur_query_entry->index = BP_PORT(bp);
1385 	cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1386 	cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1387 	cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1388 
1389 	/**** FCoE FW statistics data ****/
1390 	if (!NO_FCOE(bp)) {
1391 		cur_data_offset = bp->fw_stats_data_mapping +
1392 			offsetof(struct bnx2x_fw_stats_data, fcoe);
1393 
1394 		cur_query_entry =
1395 			&bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
1396 
1397 		cur_query_entry->kind = STATS_TYPE_FCOE;
1398 		/* For FCoE query index is a DONT CARE */
1399 		cur_query_entry->index = BP_PORT(bp);
1400 		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1401 		cur_query_entry->address.hi =
1402 			cpu_to_le32(U64_HI(cur_data_offset));
1403 		cur_query_entry->address.lo =
1404 			cpu_to_le32(U64_LO(cur_data_offset));
1405 	}
1406 
1407 	/**** Clients' queries ****/
1408 	cur_data_offset = bp->fw_stats_data_mapping +
1409 		offsetof(struct bnx2x_fw_stats_data, queue_stats);
1410 
1411 	/* first queue query index depends whether FCoE offloaded request will
1412 	 * be included in the ramrod
1413 	 */
1414 	if (!NO_FCOE(bp))
1415 		first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
1416 	else
1417 		first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
1418 
1419 	for_each_eth_queue(bp, i) {
1420 		cur_query_entry =
1421 			&bp->fw_stats_req->
1422 					query[first_queue_query_index + i];
1423 
1424 		cur_query_entry->kind = STATS_TYPE_QUEUE;
1425 		cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
1426 		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1427 		cur_query_entry->address.hi =
1428 			cpu_to_le32(U64_HI(cur_data_offset));
1429 		cur_query_entry->address.lo =
1430 			cpu_to_le32(U64_LO(cur_data_offset));
1431 
1432 		cur_data_offset += sizeof(struct per_queue_stats);
1433 	}
1434 
1435 	/* add FCoE queue query if needed */
1436 	if (!NO_FCOE(bp)) {
1437 		cur_query_entry =
1438 			&bp->fw_stats_req->
1439 					query[first_queue_query_index + i];
1440 
1441 		cur_query_entry->kind = STATS_TYPE_QUEUE;
1442 		cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
1443 		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1444 		cur_query_entry->address.hi =
1445 			cpu_to_le32(U64_HI(cur_data_offset));
1446 		cur_query_entry->address.lo =
1447 			cpu_to_le32(U64_LO(cur_data_offset));
1448 	}
1449 }
1450 
1451 void bnx2x_stats_init(struct bnx2x *bp)
1452 {
1453 	int /*abs*/port = BP_PORT(bp);
1454 	int mb_idx = BP_FW_MB_IDX(bp);
1455 	int i;
1456 
1457 	bp->stats_pending = 0;
1458 	bp->executer_idx = 0;
1459 	bp->stats_counter = 0;
1460 
1461 	/* port and func stats for management */
1462 	if (!BP_NOMCP(bp)) {
1463 		bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
1464 		bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1465 
1466 	} else {
1467 		bp->port.port_stx = 0;
1468 		bp->func_stx = 0;
1469 	}
1470 	DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
1471 	   bp->port.port_stx, bp->func_stx);
1472 
1473 	/* pmf should retrieve port statistics from SP on a non-init*/
1474 	if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
1475 		bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1476 
1477 	port = BP_PORT(bp);
1478 	/* port stats */
1479 	memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1480 	bp->port.old_nig_stats.brb_discard =
1481 			REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1482 	bp->port.old_nig_stats.brb_truncate =
1483 			REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1484 	if (!CHIP_IS_E3(bp)) {
1485 		REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1486 			    &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1487 		REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1488 			    &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1489 	}
1490 
1491 	/* function stats */
1492 	for_each_queue(bp, i) {
1493 		struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
1494 
1495 		memset(&fp_stats->old_tclient, 0,
1496 		       sizeof(fp_stats->old_tclient));
1497 		memset(&fp_stats->old_uclient, 0,
1498 		       sizeof(fp_stats->old_uclient));
1499 		memset(&fp_stats->old_xclient, 0,
1500 		       sizeof(fp_stats->old_xclient));
1501 		if (bp->stats_init) {
1502 			memset(&fp_stats->eth_q_stats, 0,
1503 			       sizeof(fp_stats->eth_q_stats));
1504 			memset(&fp_stats->eth_q_stats_old, 0,
1505 			       sizeof(fp_stats->eth_q_stats_old));
1506 		}
1507 	}
1508 
1509 	/* Prepare statistics ramrod data */
1510 	bnx2x_prep_fw_stats_req(bp);
1511 
1512 	memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
1513 	if (bp->stats_init) {
1514 		memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1515 		memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1516 		memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1517 		memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
1518 		memset(&bp->func_stats, 0, sizeof(bp->func_stats));
1519 
1520 		/* Clean SP from previous statistics */
1521 		if (bp->func_stx) {
1522 			memset(bnx2x_sp(bp, func_stats), 0,
1523 			       sizeof(struct host_func_stats));
1524 			bnx2x_func_stats_init(bp);
1525 			bnx2x_hw_stats_post(bp);
1526 			bnx2x_stats_comp(bp);
1527 		}
1528 	}
1529 
1530 	bp->stats_state = STATS_STATE_DISABLED;
1531 
1532 	if (bp->port.pmf && bp->port.port_stx)
1533 		bnx2x_port_stats_base_init(bp);
1534 
1535 	/* mark the end of statistics initializiation */
1536 	bp->stats_init = false;
1537 }
1538 
1539 void bnx2x_save_statistics(struct bnx2x *bp)
1540 {
1541 	int i;
1542 	struct net_device_stats *nstats = &bp->dev->stats;
1543 
1544 	/* save queue statistics */
1545 	for_each_eth_queue(bp, i) {
1546 		struct bnx2x_fastpath *fp = &bp->fp[i];
1547 		struct bnx2x_eth_q_stats *qstats =
1548 			&bnx2x_fp_stats(bp, fp)->eth_q_stats;
1549 		struct bnx2x_eth_q_stats_old *qstats_old =
1550 			&bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
1551 
1552 		UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1553 		UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1554 		UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1555 		UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1556 		UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1557 		UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1558 		UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1559 		UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1560 		UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1561 		UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1562 		UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1563 		UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1564 		UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1565 		UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1566 	}
1567 
1568 	/* save net_device_stats statistics */
1569 	bp->net_stats_old.rx_dropped = nstats->rx_dropped;
1570 
1571 	/* store port firmware statistics */
1572 	if (bp->port.pmf && IS_MF(bp)) {
1573 		struct bnx2x_eth_stats *estats = &bp->eth_stats;
1574 		struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1575 		UPDATE_FW_STAT_OLD(mac_filter_discard);
1576 		UPDATE_FW_STAT_OLD(mf_tag_discard);
1577 		UPDATE_FW_STAT_OLD(brb_truncate_discard);
1578 		UPDATE_FW_STAT_OLD(mac_discard);
1579 	}
1580 }
1581 
1582 void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1583 			      u32 stats_type)
1584 {
1585 	int i;
1586 	struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1587 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
1588 	struct per_queue_stats *fcoe_q_stats =
1589 		&bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
1590 
1591 	struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1592 		&fcoe_q_stats->tstorm_queue_statistics;
1593 
1594 	struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1595 		&fcoe_q_stats->ustorm_queue_statistics;
1596 
1597 	struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1598 		&fcoe_q_stats->xstorm_queue_statistics;
1599 
1600 	struct fcoe_statistics_params *fw_fcoe_stat =
1601 		&bp->fw_stats_data->fcoe;
1602 
1603 	memset(afex_stats, 0, sizeof(struct afex_stats));
1604 
1605 	for_each_eth_queue(bp, i) {
1606 		struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
1607 
1608 		ADD_64(afex_stats->rx_unicast_bytes_hi,
1609 		       qstats->total_unicast_bytes_received_hi,
1610 		       afex_stats->rx_unicast_bytes_lo,
1611 		       qstats->total_unicast_bytes_received_lo);
1612 
1613 		ADD_64(afex_stats->rx_broadcast_bytes_hi,
1614 		       qstats->total_broadcast_bytes_received_hi,
1615 		       afex_stats->rx_broadcast_bytes_lo,
1616 		       qstats->total_broadcast_bytes_received_lo);
1617 
1618 		ADD_64(afex_stats->rx_multicast_bytes_hi,
1619 		       qstats->total_multicast_bytes_received_hi,
1620 		       afex_stats->rx_multicast_bytes_lo,
1621 		       qstats->total_multicast_bytes_received_lo);
1622 
1623 		ADD_64(afex_stats->rx_unicast_frames_hi,
1624 		       qstats->total_unicast_packets_received_hi,
1625 		       afex_stats->rx_unicast_frames_lo,
1626 		       qstats->total_unicast_packets_received_lo);
1627 
1628 		ADD_64(afex_stats->rx_broadcast_frames_hi,
1629 		       qstats->total_broadcast_packets_received_hi,
1630 		       afex_stats->rx_broadcast_frames_lo,
1631 		       qstats->total_broadcast_packets_received_lo);
1632 
1633 		ADD_64(afex_stats->rx_multicast_frames_hi,
1634 		       qstats->total_multicast_packets_received_hi,
1635 		       afex_stats->rx_multicast_frames_lo,
1636 		       qstats->total_multicast_packets_received_lo);
1637 
1638 		/* sum to rx_frames_discarded all discraded
1639 		 * packets due to size, ttl0 and checksum
1640 		 */
1641 		ADD_64(afex_stats->rx_frames_discarded_hi,
1642 		       qstats->total_packets_received_checksum_discarded_hi,
1643 		       afex_stats->rx_frames_discarded_lo,
1644 		       qstats->total_packets_received_checksum_discarded_lo);
1645 
1646 		ADD_64(afex_stats->rx_frames_discarded_hi,
1647 		       qstats->total_packets_received_ttl0_discarded_hi,
1648 		       afex_stats->rx_frames_discarded_lo,
1649 		       qstats->total_packets_received_ttl0_discarded_lo);
1650 
1651 		ADD_64(afex_stats->rx_frames_discarded_hi,
1652 		       qstats->etherstatsoverrsizepkts_hi,
1653 		       afex_stats->rx_frames_discarded_lo,
1654 		       qstats->etherstatsoverrsizepkts_lo);
1655 
1656 		ADD_64(afex_stats->rx_frames_dropped_hi,
1657 		       qstats->no_buff_discard_hi,
1658 		       afex_stats->rx_frames_dropped_lo,
1659 		       qstats->no_buff_discard_lo);
1660 
1661 		ADD_64(afex_stats->tx_unicast_bytes_hi,
1662 		       qstats->total_unicast_bytes_transmitted_hi,
1663 		       afex_stats->tx_unicast_bytes_lo,
1664 		       qstats->total_unicast_bytes_transmitted_lo);
1665 
1666 		ADD_64(afex_stats->tx_broadcast_bytes_hi,
1667 		       qstats->total_broadcast_bytes_transmitted_hi,
1668 		       afex_stats->tx_broadcast_bytes_lo,
1669 		       qstats->total_broadcast_bytes_transmitted_lo);
1670 
1671 		ADD_64(afex_stats->tx_multicast_bytes_hi,
1672 		       qstats->total_multicast_bytes_transmitted_hi,
1673 		       afex_stats->tx_multicast_bytes_lo,
1674 		       qstats->total_multicast_bytes_transmitted_lo);
1675 
1676 		ADD_64(afex_stats->tx_unicast_frames_hi,
1677 		       qstats->total_unicast_packets_transmitted_hi,
1678 		       afex_stats->tx_unicast_frames_lo,
1679 		       qstats->total_unicast_packets_transmitted_lo);
1680 
1681 		ADD_64(afex_stats->tx_broadcast_frames_hi,
1682 		       qstats->total_broadcast_packets_transmitted_hi,
1683 		       afex_stats->tx_broadcast_frames_lo,
1684 		       qstats->total_broadcast_packets_transmitted_lo);
1685 
1686 		ADD_64(afex_stats->tx_multicast_frames_hi,
1687 		       qstats->total_multicast_packets_transmitted_hi,
1688 		       afex_stats->tx_multicast_frames_lo,
1689 		       qstats->total_multicast_packets_transmitted_lo);
1690 
1691 		ADD_64(afex_stats->tx_frames_dropped_hi,
1692 		       qstats->total_transmitted_dropped_packets_error_hi,
1693 		       afex_stats->tx_frames_dropped_lo,
1694 		       qstats->total_transmitted_dropped_packets_error_lo);
1695 	}
1696 
1697 	/* now add FCoE statistics which are collected separately
1698 	 * (both offloaded and non offloaded)
1699 	 */
1700 	if (!NO_FCOE(bp)) {
1701 		ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1702 			  LE32_0,
1703 			  afex_stats->rx_unicast_bytes_lo,
1704 			  fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1705 
1706 		ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1707 			  fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1708 			  afex_stats->rx_unicast_bytes_lo,
1709 			  fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1710 
1711 		ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1712 			  fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1713 			  afex_stats->rx_broadcast_bytes_lo,
1714 			  fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1715 
1716 		ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1717 			  fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1718 			  afex_stats->rx_multicast_bytes_lo,
1719 			  fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1720 
1721 		ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1722 			  LE32_0,
1723 			  afex_stats->rx_unicast_frames_lo,
1724 			  fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1725 
1726 		ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1727 			  LE32_0,
1728 			  afex_stats->rx_unicast_frames_lo,
1729 			  fcoe_q_tstorm_stats->rcv_ucast_pkts);
1730 
1731 		ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1732 			  LE32_0,
1733 			  afex_stats->rx_broadcast_frames_lo,
1734 			  fcoe_q_tstorm_stats->rcv_bcast_pkts);
1735 
1736 		ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1737 			  LE32_0,
1738 			  afex_stats->rx_multicast_frames_lo,
1739 			  fcoe_q_tstorm_stats->rcv_ucast_pkts);
1740 
1741 		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1742 			  LE32_0,
1743 			  afex_stats->rx_frames_discarded_lo,
1744 			  fcoe_q_tstorm_stats->checksum_discard);
1745 
1746 		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1747 			  LE32_0,
1748 			  afex_stats->rx_frames_discarded_lo,
1749 			  fcoe_q_tstorm_stats->pkts_too_big_discard);
1750 
1751 		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1752 			  LE32_0,
1753 			  afex_stats->rx_frames_discarded_lo,
1754 			  fcoe_q_tstorm_stats->ttl0_discard);
1755 
1756 		ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1757 			    LE16_0,
1758 			    afex_stats->rx_frames_dropped_lo,
1759 			    fcoe_q_tstorm_stats->no_buff_discard);
1760 
1761 		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1762 			  LE32_0,
1763 			  afex_stats->rx_frames_dropped_lo,
1764 			  fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1765 
1766 		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1767 			  LE32_0,
1768 			  afex_stats->rx_frames_dropped_lo,
1769 			  fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1770 
1771 		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1772 			  LE32_0,
1773 			  afex_stats->rx_frames_dropped_lo,
1774 			  fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1775 
1776 		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1777 			  LE32_0,
1778 			  afex_stats->rx_frames_dropped_lo,
1779 			  fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1780 
1781 		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1782 			  LE32_0,
1783 			  afex_stats->rx_frames_dropped_lo,
1784 			  fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1785 
1786 		ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1787 			  LE32_0,
1788 			  afex_stats->tx_unicast_bytes_lo,
1789 			  fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1790 
1791 		ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1792 			  fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1793 			  afex_stats->tx_unicast_bytes_lo,
1794 			  fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1795 
1796 		ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1797 			  fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1798 			  afex_stats->tx_broadcast_bytes_lo,
1799 			  fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1800 
1801 		ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1802 			  fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1803 			  afex_stats->tx_multicast_bytes_lo,
1804 			  fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1805 
1806 		ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1807 			  LE32_0,
1808 			  afex_stats->tx_unicast_frames_lo,
1809 			  fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1810 
1811 		ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1812 			  LE32_0,
1813 			  afex_stats->tx_unicast_frames_lo,
1814 			  fcoe_q_xstorm_stats->ucast_pkts_sent);
1815 
1816 		ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
1817 			  LE32_0,
1818 			  afex_stats->tx_broadcast_frames_lo,
1819 			  fcoe_q_xstorm_stats->bcast_pkts_sent);
1820 
1821 		ADD_64_LE(afex_stats->tx_multicast_frames_hi,
1822 			  LE32_0,
1823 			  afex_stats->tx_multicast_frames_lo,
1824 			  fcoe_q_xstorm_stats->mcast_pkts_sent);
1825 
1826 		ADD_64_LE(afex_stats->tx_frames_dropped_hi,
1827 			  LE32_0,
1828 			  afex_stats->tx_frames_dropped_lo,
1829 			  fcoe_q_xstorm_stats->error_drop_pkts);
1830 	}
1831 
1832 	/* if port stats are requested, add them to the PMF
1833 	 * stats, as anyway they will be accumulated by the
1834 	 * MCP before sent to the switch
1835 	 */
1836 	if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
1837 		ADD_64(afex_stats->rx_frames_dropped_hi,
1838 		       0,
1839 		       afex_stats->rx_frames_dropped_lo,
1840 		       estats->mac_filter_discard);
1841 		ADD_64(afex_stats->rx_frames_dropped_hi,
1842 		       0,
1843 		       afex_stats->rx_frames_dropped_lo,
1844 		       estats->brb_truncate_discard);
1845 		ADD_64(afex_stats->rx_frames_discarded_hi,
1846 		       0,
1847 		       afex_stats->rx_frames_discarded_lo,
1848 		       estats->mac_discard);
1849 	}
1850 }
1851