xref: /openbmc/linux/drivers/staging/qlge/qlge_dbg.c (revision 068ac0db)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/slab.h>
5 
6 #include "qlge.h"
7 
8 /* Read a NIC register from the alternate function. */
9 static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
10 				  u32 reg)
11 {
12 	u32 register_to_read;
13 	u32 reg_val;
14 	unsigned int status = 0;
15 
16 	register_to_read = MPI_NIC_REG_BLOCK
17 				| MPI_NIC_READ
18 				| (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
19 				| reg;
20 	status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
21 	if (status != 0)
22 		return 0xffffffff;
23 
24 	return reg_val;
25 }
26 
27 /* Write a NIC register from the alternate function. */
28 static int ql_write_other_func_reg(struct ql_adapter *qdev,
29 				   u32 reg, u32 reg_val)
30 {
31 	u32 register_to_read;
32 	int status = 0;
33 
34 	register_to_read = MPI_NIC_REG_BLOCK
35 				| MPI_NIC_READ
36 				| (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
37 				| reg;
38 	status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
39 
40 	return status;
41 }
42 
43 static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
44 				      u32 bit, u32 err_bit)
45 {
46 	u32 temp;
47 	int count = 10;
48 
49 	while (count) {
50 		temp = ql_read_other_func_reg(qdev, reg);
51 
52 		/* check for errors */
53 		if (temp & err_bit)
54 			return -1;
55 		else if (temp & bit)
56 			return 0;
57 		mdelay(10);
58 		count--;
59 	}
60 	return -1;
61 }
62 
63 static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
64 					 u32 *data)
65 {
66 	int status;
67 
68 	/* wait for reg to come ready */
69 	status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
70 					    XG_SERDES_ADDR_RDY, 0);
71 	if (status)
72 		goto exit;
73 
74 	/* set up for reg read */
75 	ql_write_other_func_reg(qdev, XG_SERDES_ADDR / 4, reg | PROC_ADDR_R);
76 
77 	/* wait for reg to come ready */
78 	status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
79 					    XG_SERDES_ADDR_RDY, 0);
80 	if (status)
81 		goto exit;
82 
83 	/* get the data */
84 	*data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
85 exit:
86 	return status;
87 }
88 
89 /* Read out the SERDES registers */
90 static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
91 {
92 	int status;
93 
94 	/* wait for reg to come ready */
95 	status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
96 	if (status)
97 		goto exit;
98 
99 	/* set up for reg read */
100 	ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
101 
102 	/* wait for reg to come ready */
103 	status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
104 	if (status)
105 		goto exit;
106 
107 	/* get the data */
108 	*data = ql_read32(qdev, XG_SERDES_DATA);
109 exit:
110 	return status;
111 }
112 
113 static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
114 			       u32 *direct_ptr, u32 *indirect_ptr,
115 			       bool direct_valid, bool indirect_valid)
116 {
117 	unsigned int status;
118 
119 	status = 1;
120 	if (direct_valid)
121 		status = ql_read_serdes_reg(qdev, addr, direct_ptr);
122 	/* Dead fill any failures or invalids. */
123 	if (status)
124 		*direct_ptr = 0xDEADBEEF;
125 
126 	status = 1;
127 	if (indirect_valid)
128 		status = ql_read_other_func_serdes_reg(
129 						qdev, addr, indirect_ptr);
130 	/* Dead fill any failures or invalids. */
131 	if (status)
132 		*indirect_ptr = 0xDEADBEEF;
133 }
134 
135 static int ql_get_serdes_regs(struct ql_adapter *qdev,
136 			      struct ql_mpi_coredump *mpi_coredump)
137 {
138 	int status;
139 	bool xfi_direct_valid = false, xfi_indirect_valid = false;
140 	bool xaui_direct_valid = true, xaui_indirect_valid = true;
141 	unsigned int i;
142 	u32 *direct_ptr, temp;
143 	u32 *indirect_ptr;
144 
145 
146 	/* The XAUI needs to be read out per port */
147 	status = ql_read_other_func_serdes_reg(qdev,
148 			XG_SERDES_XAUI_HSS_PCS_START, &temp);
149 	if (status)
150 		temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
151 
152 	if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
153 				XG_SERDES_ADDR_XAUI_PWR_DOWN)
154 		xaui_indirect_valid = false;
155 
156 	status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp);
157 
158 	if (status)
159 		temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
160 
161 	if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
162 				XG_SERDES_ADDR_XAUI_PWR_DOWN)
163 		xaui_direct_valid = false;
164 
165 	/*
166 	 * XFI register is shared so only need to read one
167 	 * functions and then check the bits.
168 	 */
169 	status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
170 	if (status)
171 		temp = 0;
172 
173 	if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
174 					XG_SERDES_ADDR_XFI1_PWR_UP) {
175 		/* now see if i'm NIC 1 or NIC 2 */
176 		if (qdev->func & 1)
177 			/* I'm NIC 2, so the indirect (NIC1) xfi is up. */
178 			xfi_indirect_valid = true;
179 		else
180 			xfi_direct_valid = true;
181 	}
182 	if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
183 					XG_SERDES_ADDR_XFI2_PWR_UP) {
184 		/* now see if i'm NIC 1 or NIC 2 */
185 		if (qdev->func & 1)
186 			/* I'm NIC 2, so the indirect (NIC1) xfi is up. */
187 			xfi_direct_valid = true;
188 		else
189 			xfi_indirect_valid = true;
190 	}
191 
192 	/* Get XAUI_AN register block. */
193 	if (qdev->func & 1) {
194 		/* Function 2 is direct	*/
195 		direct_ptr = mpi_coredump->serdes2_xaui_an;
196 		indirect_ptr = mpi_coredump->serdes_xaui_an;
197 	} else {
198 		/* Function 1 is direct	*/
199 		direct_ptr = mpi_coredump->serdes_xaui_an;
200 		indirect_ptr = mpi_coredump->serdes2_xaui_an;
201 	}
202 
203 	for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
204 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
205 				   xaui_direct_valid, xaui_indirect_valid);
206 
207 	/* Get XAUI_HSS_PCS register block. */
208 	if (qdev->func & 1) {
209 		direct_ptr =
210 			mpi_coredump->serdes2_xaui_hss_pcs;
211 		indirect_ptr =
212 			mpi_coredump->serdes_xaui_hss_pcs;
213 	} else {
214 		direct_ptr =
215 			mpi_coredump->serdes_xaui_hss_pcs;
216 		indirect_ptr =
217 			mpi_coredump->serdes2_xaui_hss_pcs;
218 	}
219 
220 	for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
221 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
222 				   xaui_direct_valid, xaui_indirect_valid);
223 
224 	/* Get XAUI_XFI_AN register block. */
225 	if (qdev->func & 1) {
226 		direct_ptr = mpi_coredump->serdes2_xfi_an;
227 		indirect_ptr = mpi_coredump->serdes_xfi_an;
228 	} else {
229 		direct_ptr = mpi_coredump->serdes_xfi_an;
230 		indirect_ptr = mpi_coredump->serdes2_xfi_an;
231 	}
232 
233 	for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
234 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
235 				   xfi_direct_valid, xfi_indirect_valid);
236 
237 	/* Get XAUI_XFI_TRAIN register block. */
238 	if (qdev->func & 1) {
239 		direct_ptr = mpi_coredump->serdes2_xfi_train;
240 		indirect_ptr =
241 			mpi_coredump->serdes_xfi_train;
242 	} else {
243 		direct_ptr = mpi_coredump->serdes_xfi_train;
244 		indirect_ptr =
245 			mpi_coredump->serdes2_xfi_train;
246 	}
247 
248 	for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
249 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
250 				   xfi_direct_valid, xfi_indirect_valid);
251 
252 	/* Get XAUI_XFI_HSS_PCS register block. */
253 	if (qdev->func & 1) {
254 		direct_ptr =
255 			mpi_coredump->serdes2_xfi_hss_pcs;
256 		indirect_ptr =
257 			mpi_coredump->serdes_xfi_hss_pcs;
258 	} else {
259 		direct_ptr =
260 			mpi_coredump->serdes_xfi_hss_pcs;
261 		indirect_ptr =
262 			mpi_coredump->serdes2_xfi_hss_pcs;
263 	}
264 
265 	for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
266 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
267 				   xfi_direct_valid, xfi_indirect_valid);
268 
269 	/* Get XAUI_XFI_HSS_TX register block. */
270 	if (qdev->func & 1) {
271 		direct_ptr =
272 			mpi_coredump->serdes2_xfi_hss_tx;
273 		indirect_ptr =
274 			mpi_coredump->serdes_xfi_hss_tx;
275 	} else {
276 		direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
277 		indirect_ptr =
278 			mpi_coredump->serdes2_xfi_hss_tx;
279 	}
280 	for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
281 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
282 				   xfi_direct_valid, xfi_indirect_valid);
283 
284 	/* Get XAUI_XFI_HSS_RX register block. */
285 	if (qdev->func & 1) {
286 		direct_ptr =
287 			mpi_coredump->serdes2_xfi_hss_rx;
288 		indirect_ptr =
289 			mpi_coredump->serdes_xfi_hss_rx;
290 	} else {
291 		direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
292 		indirect_ptr =
293 			mpi_coredump->serdes2_xfi_hss_rx;
294 	}
295 
296 	for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
297 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
298 				   xfi_direct_valid, xfi_indirect_valid);
299 
300 
301 	/* Get XAUI_XFI_HSS_PLL register block. */
302 	if (qdev->func & 1) {
303 		direct_ptr =
304 			mpi_coredump->serdes2_xfi_hss_pll;
305 		indirect_ptr =
306 			mpi_coredump->serdes_xfi_hss_pll;
307 	} else {
308 		direct_ptr =
309 			mpi_coredump->serdes_xfi_hss_pll;
310 		indirect_ptr =
311 			mpi_coredump->serdes2_xfi_hss_pll;
312 	}
313 	for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
314 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
315 				   xfi_direct_valid, xfi_indirect_valid);
316 	return 0;
317 }
318 
319 static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
320 					u32 *data)
321 {
322 	int status = 0;
323 
324 	/* wait for reg to come ready */
325 	status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
326 					    XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
327 	if (status)
328 		goto exit;
329 
330 	/* set up for reg read */
331 	ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
332 
333 	/* wait for reg to come ready */
334 	status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
335 					    XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
336 	if (status)
337 		goto exit;
338 
339 	/* get the data */
340 	*data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
341 exit:
342 	return status;
343 }
344 
345 /* Read the 400 xgmac control/statistics registers
346  * skipping unused locations.
347  */
348 static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
349 			     unsigned int other_function)
350 {
351 	int status = 0;
352 	int i;
353 
354 	for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
355 		/* We're reading 400 xgmac registers, but we filter out
356 		 * several locations that are non-responsive to reads.
357 		 */
358 		if ((i == 0x00000114) ||
359 		    (i == 0x00000118) ||
360 			(i == 0x0000013c) ||
361 			(i == 0x00000140) ||
362 			(i > 0x00000150 && i < 0x000001fc) ||
363 			(i > 0x00000278 && i < 0x000002a0) ||
364 			(i > 0x000002c0 && i < 0x000002cf) ||
365 			(i > 0x000002dc && i < 0x000002f0) ||
366 			(i > 0x000003c8 && i < 0x00000400) ||
367 			(i > 0x00000400 && i < 0x00000410) ||
368 			(i > 0x00000410 && i < 0x00000420) ||
369 			(i > 0x00000420 && i < 0x00000430) ||
370 			(i > 0x00000430 && i < 0x00000440) ||
371 			(i > 0x00000440 && i < 0x00000450) ||
372 			(i > 0x00000450 && i < 0x00000500) ||
373 			(i > 0x0000054c && i < 0x00000568) ||
374 			(i > 0x000005c8 && i < 0x00000600)) {
375 			if (other_function)
376 				status =
377 				ql_read_other_func_xgmac_reg(qdev, i, buf);
378 			else
379 				status = ql_read_xgmac_reg(qdev, i, buf);
380 
381 			if (status)
382 				*buf = 0xdeadbeef;
383 			break;
384 		}
385 	}
386 	return status;
387 }
388 
389 static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
390 {
391 	int i;
392 
393 	for (i = 0; i < 8; i++, buf++) {
394 		ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
395 		*buf = ql_read32(qdev, NIC_ETS);
396 	}
397 
398 	for (i = 0; i < 2; i++, buf++) {
399 		ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
400 		*buf = ql_read32(qdev, CNA_ETS);
401 	}
402 
403 	return 0;
404 }
405 
406 static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
407 {
408 	int i;
409 
410 	for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
411 		ql_write32(qdev, INTR_EN,
412 			   qdev->intr_context[i].intr_read_mask);
413 		*buf = ql_read32(qdev, INTR_EN);
414 	}
415 }
416 
417 static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
418 {
419 	int i, status;
420 	u32 value[3];
421 
422 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
423 	if (status)
424 		return status;
425 
426 	for (i = 0; i < 16; i++) {
427 		status = ql_get_mac_addr_reg(qdev,
428 					     MAC_ADDR_TYPE_CAM_MAC, i, value);
429 		if (status) {
430 			netif_err(qdev, drv, qdev->ndev,
431 				  "Failed read of mac index register\n");
432 			goto err;
433 		}
434 		*buf++ = value[0];	/* lower MAC address */
435 		*buf++ = value[1];	/* upper MAC address */
436 		*buf++ = value[2];	/* output */
437 	}
438 	for (i = 0; i < 32; i++) {
439 		status = ql_get_mac_addr_reg(qdev,
440 					     MAC_ADDR_TYPE_MULTI_MAC, i, value);
441 		if (status) {
442 			netif_err(qdev, drv, qdev->ndev,
443 				  "Failed read of mac index register\n");
444 			goto err;
445 		}
446 		*buf++ = value[0];	/* lower Mcast address */
447 		*buf++ = value[1];	/* upper Mcast address */
448 	}
449 err:
450 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
451 	return status;
452 }
453 
454 static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
455 {
456 	int status;
457 	u32 value, i;
458 
459 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
460 	if (status)
461 		return status;
462 
463 	for (i = 0; i < 16; i++) {
464 		status = ql_get_routing_reg(qdev, i, &value);
465 		if (status) {
466 			netif_err(qdev, drv, qdev->ndev,
467 				  "Failed read of routing index register\n");
468 			goto err;
469 		} else {
470 			*buf++ = value;
471 		}
472 	}
473 err:
474 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
475 	return status;
476 }
477 
478 /* Read the MPI Processor shadow registers */
479 static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
480 {
481 	u32 i;
482 	int status;
483 
484 	for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
485 		status = ql_write_mpi_reg(qdev, RISC_124,
486 				(SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
487 		if (status)
488 			goto end;
489 		status = ql_read_mpi_reg(qdev, RISC_127, buf);
490 		if (status)
491 			goto end;
492 	}
493 end:
494 	return status;
495 }
496 
497 /* Read the MPI Processor core registers */
498 static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
499 			   u32 offset, u32 count)
500 {
501 	int i, status = 0;
502 	for (i = 0; i < count; i++, buf++) {
503 		status = ql_read_mpi_reg(qdev, offset + i, buf);
504 		if (status)
505 			return status;
506 	}
507 	return status;
508 }
509 
510 /* Read the ASIC probe dump */
511 static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
512 				  u32 valid, u32 *buf)
513 {
514 	u32 module, mux_sel, probe, lo_val, hi_val;
515 
516 	for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
517 		if (!((valid >> module) & 1))
518 			continue;
519 		for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
520 			probe = clock
521 				| PRB_MX_ADDR_ARE
522 				| mux_sel
523 				| (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
524 			ql_write32(qdev, PRB_MX_ADDR, probe);
525 			lo_val = ql_read32(qdev, PRB_MX_DATA);
526 			if (mux_sel == 0) {
527 				*buf = probe;
528 				buf++;
529 			}
530 			probe |= PRB_MX_ADDR_UP;
531 			ql_write32(qdev, PRB_MX_ADDR, probe);
532 			hi_val = ql_read32(qdev, PRB_MX_DATA);
533 			*buf = lo_val;
534 			buf++;
535 			*buf = hi_val;
536 			buf++;
537 		}
538 	}
539 	return buf;
540 }
541 
542 static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
543 {
544 	/* First we have to enable the probe mux */
545 	ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
546 	buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
547 			   PRB_MX_ADDR_VALID_SYS_MOD, buf);
548 	buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
549 			   PRB_MX_ADDR_VALID_PCI_MOD, buf);
550 	buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
551 			   PRB_MX_ADDR_VALID_XGM_MOD, buf);
552 	buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
553 			   PRB_MX_ADDR_VALID_FC_MOD, buf);
554 	return 0;
555 
556 }
557 
558 /* Read out the routing index registers */
559 static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
560 {
561 	int status;
562 	u32 type, index, index_max;
563 	u32 result_index;
564 	u32 result_data;
565 	u32 val;
566 
567 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
568 	if (status)
569 		return status;
570 
571 	for (type = 0; type < 4; type++) {
572 		if (type < 2)
573 			index_max = 8;
574 		else
575 			index_max = 16;
576 		for (index = 0; index < index_max; index++) {
577 			val = RT_IDX_RS
578 				| (type << RT_IDX_TYPE_SHIFT)
579 				| (index << RT_IDX_IDX_SHIFT);
580 			ql_write32(qdev, RT_IDX, val);
581 			result_index = 0;
582 			while ((result_index & RT_IDX_MR) == 0)
583 				result_index = ql_read32(qdev, RT_IDX);
584 			result_data = ql_read32(qdev, RT_DATA);
585 			*buf = type;
586 			buf++;
587 			*buf = index;
588 			buf++;
589 			*buf = result_index;
590 			buf++;
591 			*buf = result_data;
592 			buf++;
593 		}
594 	}
595 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
596 	return status;
597 }
598 
599 /* Read out the MAC protocol registers */
600 static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
601 {
602 	u32 result_index, result_data;
603 	u32 type;
604 	u32 index;
605 	u32 offset;
606 	u32 val;
607 	u32 initial_val = MAC_ADDR_RS;
608 	u32 max_index;
609 	u32 max_offset;
610 
611 	for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
612 		switch (type) {
613 
614 		case 0: /* CAM */
615 			initial_val |= MAC_ADDR_ADR;
616 			max_index = MAC_ADDR_MAX_CAM_ENTRIES;
617 			max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
618 			break;
619 		case 1: /* Multicast MAC Address */
620 			max_index = MAC_ADDR_MAX_CAM_WCOUNT;
621 			max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
622 			break;
623 		case 2: /* VLAN filter mask */
624 		case 3: /* MC filter mask */
625 			max_index = MAC_ADDR_MAX_CAM_WCOUNT;
626 			max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
627 			break;
628 		case 4: /* FC MAC addresses */
629 			max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
630 			max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
631 			break;
632 		case 5: /* Mgmt MAC addresses */
633 			max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
634 			max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
635 			break;
636 		case 6: /* Mgmt VLAN addresses */
637 			max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
638 			max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
639 			break;
640 		case 7: /* Mgmt IPv4 address */
641 			max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
642 			max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
643 			break;
644 		case 8: /* Mgmt IPv6 address */
645 			max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
646 			max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
647 			break;
648 		case 9: /* Mgmt TCP/UDP Dest port */
649 			max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
650 			max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
651 			break;
652 		default:
653 			pr_err("Bad type!!! 0x%08x\n", type);
654 			max_index = 0;
655 			max_offset = 0;
656 			break;
657 		}
658 		for (index = 0; index < max_index; index++) {
659 			for (offset = 0; offset < max_offset; offset++) {
660 				val = initial_val
661 					| (type << MAC_ADDR_TYPE_SHIFT)
662 					| (index << MAC_ADDR_IDX_SHIFT)
663 					| (offset);
664 				ql_write32(qdev, MAC_ADDR_IDX, val);
665 				result_index = 0;
666 				while ((result_index & MAC_ADDR_MR) == 0) {
667 					result_index = ql_read32(qdev,
668 								 MAC_ADDR_IDX);
669 				}
670 				result_data = ql_read32(qdev, MAC_ADDR_DATA);
671 				*buf = result_index;
672 				buf++;
673 				*buf = result_data;
674 				buf++;
675 			}
676 		}
677 	}
678 }
679 
680 static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
681 {
682 	u32 func_num, reg, reg_val;
683 	int status;
684 
685 	for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
686 		reg = MPI_NIC_REG_BLOCK
687 			| (func_num << MPI_NIC_FUNCTION_SHIFT)
688 			| (SEM / 4);
689 		status = ql_read_mpi_reg(qdev, reg, &reg_val);
690 		*buf = reg_val;
691 		/* if the read failed then dead fill the element. */
692 		if (!status)
693 			*buf = 0xdeadbeef;
694 		buf++;
695 	}
696 }
697 
698 /* Create a coredump segment header */
699 static void ql_build_coredump_seg_header(
700 		struct mpi_coredump_segment_header *seg_hdr,
701 		u32 seg_number, u32 seg_size, u8 *desc)
702 {
703 	memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
704 	seg_hdr->cookie = MPI_COREDUMP_COOKIE;
705 	seg_hdr->segNum = seg_number;
706 	seg_hdr->segSize = seg_size;
707 	strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
708 }
709 
710 /*
711  * This function should be called when a coredump / probedump
712  * is to be extracted from the HBA. It is assumed there is a
713  * qdev structure that contains the base address of the register
714  * space for this function as well as a coredump structure that
715  * will contain the dump.
716  */
717 int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
718 {
719 	int status;
720 	int i;
721 
722 	if (!mpi_coredump) {
723 		netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
724 		return -EINVAL;
725 	}
726 
727 	/* Try to get the spinlock, but dont worry if
728 	 * it isn't available.  If the firmware died it
729 	 * might be holding the sem.
730 	 */
731 	ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
732 
733 	status = ql_pause_mpi_risc(qdev);
734 	if (status) {
735 		netif_err(qdev, drv, qdev->ndev,
736 			  "Failed RISC pause. Status = 0x%.08x\n", status);
737 		goto err;
738 	}
739 
740 	/* Insert the global header */
741 	memset(&(mpi_coredump->mpi_global_header), 0,
742 	       sizeof(struct mpi_coredump_global_header));
743 	mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
744 	mpi_coredump->mpi_global_header.headerSize =
745 		sizeof(struct mpi_coredump_global_header);
746 	mpi_coredump->mpi_global_header.imageSize =
747 		sizeof(struct ql_mpi_coredump);
748 	strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
749 		sizeof(mpi_coredump->mpi_global_header.idString));
750 
751 	/* Get generic NIC reg dump */
752 	ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
753 				     NIC1_CONTROL_SEG_NUM,
754 			sizeof(struct mpi_coredump_segment_header) +
755 			sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
756 
757 	ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
758 				     NIC2_CONTROL_SEG_NUM,
759 			sizeof(struct mpi_coredump_segment_header) +
760 			sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
761 
762 	/* Get XGMac registers. (Segment 18, Rev C. step 21) */
763 	ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
764 				     NIC1_XGMAC_SEG_NUM,
765 			sizeof(struct mpi_coredump_segment_header) +
766 			sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
767 
768 	ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
769 				     NIC2_XGMAC_SEG_NUM,
770 			sizeof(struct mpi_coredump_segment_header) +
771 			sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
772 
773 	if (qdev->func & 1) {
774 		/* Odd means our function is NIC 2 */
775 		for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
776 			mpi_coredump->nic2_regs[i] =
777 					 ql_read32(qdev, i * sizeof(u32));
778 
779 		for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
780 			mpi_coredump->nic_regs[i] =
781 			ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
782 
783 		ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
784 		ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
785 	} else {
786 		/* Even means our function is NIC 1 */
787 		for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
788 			mpi_coredump->nic_regs[i] =
789 					ql_read32(qdev, i * sizeof(u32));
790 		for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
791 			mpi_coredump->nic2_regs[i] =
792 			ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
793 
794 		ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
795 		ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
796 	}
797 
798 	/* Rev C. Step 20a */
799 	ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
800 				     XAUI_AN_SEG_NUM,
801 			sizeof(struct mpi_coredump_segment_header) +
802 			sizeof(mpi_coredump->serdes_xaui_an),
803 			"XAUI AN Registers");
804 
805 	/* Rev C. Step 20b */
806 	ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
807 				     XAUI_HSS_PCS_SEG_NUM,
808 			sizeof(struct mpi_coredump_segment_header) +
809 			sizeof(mpi_coredump->serdes_xaui_hss_pcs),
810 			"XAUI HSS PCS Registers");
811 
812 	ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
813 				     sizeof(struct mpi_coredump_segment_header) +
814 			sizeof(mpi_coredump->serdes_xfi_an),
815 			"XFI AN Registers");
816 
817 	ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
818 				     XFI_TRAIN_SEG_NUM,
819 			sizeof(struct mpi_coredump_segment_header) +
820 			sizeof(mpi_coredump->serdes_xfi_train),
821 			"XFI TRAIN Registers");
822 
823 	ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
824 				     XFI_HSS_PCS_SEG_NUM,
825 			sizeof(struct mpi_coredump_segment_header) +
826 			sizeof(mpi_coredump->serdes_xfi_hss_pcs),
827 			"XFI HSS PCS Registers");
828 
829 	ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
830 				     XFI_HSS_TX_SEG_NUM,
831 			sizeof(struct mpi_coredump_segment_header) +
832 			sizeof(mpi_coredump->serdes_xfi_hss_tx),
833 			"XFI HSS TX Registers");
834 
835 	ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
836 				     XFI_HSS_RX_SEG_NUM,
837 			sizeof(struct mpi_coredump_segment_header) +
838 			sizeof(mpi_coredump->serdes_xfi_hss_rx),
839 			"XFI HSS RX Registers");
840 
841 	ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
842 				     XFI_HSS_PLL_SEG_NUM,
843 			sizeof(struct mpi_coredump_segment_header) +
844 			sizeof(mpi_coredump->serdes_xfi_hss_pll),
845 			"XFI HSS PLL Registers");
846 
847 	ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
848 				     XAUI2_AN_SEG_NUM,
849 			sizeof(struct mpi_coredump_segment_header) +
850 			sizeof(mpi_coredump->serdes2_xaui_an),
851 			"XAUI2 AN Registers");
852 
853 	ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
854 				     XAUI2_HSS_PCS_SEG_NUM,
855 			sizeof(struct mpi_coredump_segment_header) +
856 			sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
857 			"XAUI2 HSS PCS Registers");
858 
859 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
860 				     XFI2_AN_SEG_NUM,
861 			sizeof(struct mpi_coredump_segment_header) +
862 			sizeof(mpi_coredump->serdes2_xfi_an),
863 			"XFI2 AN Registers");
864 
865 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
866 				     XFI2_TRAIN_SEG_NUM,
867 			sizeof(struct mpi_coredump_segment_header) +
868 			sizeof(mpi_coredump->serdes2_xfi_train),
869 			"XFI2 TRAIN Registers");
870 
871 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
872 				     XFI2_HSS_PCS_SEG_NUM,
873 			sizeof(struct mpi_coredump_segment_header) +
874 			sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
875 			"XFI2 HSS PCS Registers");
876 
877 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
878 				     XFI2_HSS_TX_SEG_NUM,
879 			sizeof(struct mpi_coredump_segment_header) +
880 			sizeof(mpi_coredump->serdes2_xfi_hss_tx),
881 			"XFI2 HSS TX Registers");
882 
883 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
884 				     XFI2_HSS_RX_SEG_NUM,
885 			sizeof(struct mpi_coredump_segment_header) +
886 			sizeof(mpi_coredump->serdes2_xfi_hss_rx),
887 			"XFI2 HSS RX Registers");
888 
889 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
890 				     XFI2_HSS_PLL_SEG_NUM,
891 			sizeof(struct mpi_coredump_segment_header) +
892 			sizeof(mpi_coredump->serdes2_xfi_hss_pll),
893 			"XFI2 HSS PLL Registers");
894 
895 	status = ql_get_serdes_regs(qdev, mpi_coredump);
896 	if (status) {
897 		netif_err(qdev, drv, qdev->ndev,
898 			  "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
899 			  status);
900 		goto err;
901 	}
902 
903 	ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
904 				     CORE_SEG_NUM,
905 				sizeof(mpi_coredump->core_regs_seg_hdr) +
906 				sizeof(mpi_coredump->mpi_core_regs) +
907 				sizeof(mpi_coredump->mpi_core_sh_regs),
908 				"Core Registers");
909 
910 	/* Get the MPI Core Registers */
911 	status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
912 				 MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
913 	if (status)
914 		goto err;
915 	/* Get the 16 MPI shadow registers */
916 	status = ql_get_mpi_shadow_regs(qdev,
917 					&mpi_coredump->mpi_core_sh_regs[0]);
918 	if (status)
919 		goto err;
920 
921 	/* Get the Test Logic Registers */
922 	ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
923 				     TEST_LOGIC_SEG_NUM,
924 				sizeof(struct mpi_coredump_segment_header)
925 				+ sizeof(mpi_coredump->test_logic_regs),
926 				"Test Logic Regs");
927 	status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
928 				 TEST_REGS_ADDR, TEST_REGS_CNT);
929 	if (status)
930 		goto err;
931 
932 	/* Get the RMII Registers */
933 	ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
934 				     RMII_SEG_NUM,
935 				sizeof(struct mpi_coredump_segment_header)
936 				+ sizeof(mpi_coredump->rmii_regs),
937 				"RMII Registers");
938 	status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
939 				 RMII_REGS_ADDR, RMII_REGS_CNT);
940 	if (status)
941 		goto err;
942 
943 	/* Get the FCMAC1 Registers */
944 	ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
945 				     FCMAC1_SEG_NUM,
946 				sizeof(struct mpi_coredump_segment_header)
947 				+ sizeof(mpi_coredump->fcmac1_regs),
948 				"FCMAC1 Registers");
949 	status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
950 				 FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
951 	if (status)
952 		goto err;
953 
954 	/* Get the FCMAC2 Registers */
955 
956 	ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
957 				     FCMAC2_SEG_NUM,
958 				sizeof(struct mpi_coredump_segment_header)
959 				+ sizeof(mpi_coredump->fcmac2_regs),
960 				"FCMAC2 Registers");
961 
962 	status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
963 				 FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
964 	if (status)
965 		goto err;
966 
967 	/* Get the FC1 MBX Registers */
968 	ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
969 				     FC1_MBOX_SEG_NUM,
970 				sizeof(struct mpi_coredump_segment_header)
971 				+ sizeof(mpi_coredump->fc1_mbx_regs),
972 				"FC1 MBox Regs");
973 	status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
974 				 FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
975 	if (status)
976 		goto err;
977 
978 	/* Get the IDE Registers */
979 	ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
980 				     IDE_SEG_NUM,
981 				sizeof(struct mpi_coredump_segment_header)
982 				+ sizeof(mpi_coredump->ide_regs),
983 				"IDE Registers");
984 	status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
985 				 IDE_REGS_ADDR, IDE_REGS_CNT);
986 	if (status)
987 		goto err;
988 
989 	/* Get the NIC1 MBX Registers */
990 	ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
991 				     NIC1_MBOX_SEG_NUM,
992 				sizeof(struct mpi_coredump_segment_header)
993 				+ sizeof(mpi_coredump->nic1_mbx_regs),
994 				"NIC1 MBox Regs");
995 	status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
996 				 NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
997 	if (status)
998 		goto err;
999 
1000 	/* Get the SMBus Registers */
1001 	ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
1002 				     SMBUS_SEG_NUM,
1003 				sizeof(struct mpi_coredump_segment_header)
1004 				+ sizeof(mpi_coredump->smbus_regs),
1005 				"SMBus Registers");
1006 	status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
1007 				 SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
1008 	if (status)
1009 		goto err;
1010 
1011 	/* Get the FC2 MBX Registers */
1012 	ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
1013 				     FC2_MBOX_SEG_NUM,
1014 				sizeof(struct mpi_coredump_segment_header)
1015 				+ sizeof(mpi_coredump->fc2_mbx_regs),
1016 				"FC2 MBox Regs");
1017 	status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
1018 				 FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
1019 	if (status)
1020 		goto err;
1021 
1022 	/* Get the NIC2 MBX Registers */
1023 	ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
1024 				     NIC2_MBOX_SEG_NUM,
1025 				sizeof(struct mpi_coredump_segment_header)
1026 				+ sizeof(mpi_coredump->nic2_mbx_regs),
1027 				"NIC2 MBox Regs");
1028 	status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
1029 				 NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1030 	if (status)
1031 		goto err;
1032 
1033 	/* Get the I2C Registers */
1034 	ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
1035 				     I2C_SEG_NUM,
1036 				sizeof(struct mpi_coredump_segment_header)
1037 				+ sizeof(mpi_coredump->i2c_regs),
1038 				"I2C Registers");
1039 	status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
1040 				 I2C_REGS_ADDR, I2C_REGS_CNT);
1041 	if (status)
1042 		goto err;
1043 
1044 	/* Get the MEMC Registers */
1045 	ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
1046 				     MEMC_SEG_NUM,
1047 				sizeof(struct mpi_coredump_segment_header)
1048 				+ sizeof(mpi_coredump->memc_regs),
1049 				"MEMC Registers");
1050 	status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
1051 				 MEMC_REGS_ADDR, MEMC_REGS_CNT);
1052 	if (status)
1053 		goto err;
1054 
1055 	/* Get the PBus Registers */
1056 	ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
1057 				     PBUS_SEG_NUM,
1058 				sizeof(struct mpi_coredump_segment_header)
1059 				+ sizeof(mpi_coredump->pbus_regs),
1060 				"PBUS Registers");
1061 	status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
1062 				 PBUS_REGS_ADDR, PBUS_REGS_CNT);
1063 	if (status)
1064 		goto err;
1065 
1066 	/* Get the MDE Registers */
1067 	ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
1068 				     MDE_SEG_NUM,
1069 				sizeof(struct mpi_coredump_segment_header)
1070 				+ sizeof(mpi_coredump->mde_regs),
1071 				"MDE Registers");
1072 	status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
1073 				 MDE_REGS_ADDR, MDE_REGS_CNT);
1074 	if (status)
1075 		goto err;
1076 
1077 	ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1078 				     MISC_NIC_INFO_SEG_NUM,
1079 				sizeof(struct mpi_coredump_segment_header)
1080 				+ sizeof(mpi_coredump->misc_nic_info),
1081 				"MISC NIC INFO");
1082 	mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1083 	mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1084 	mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1085 	mpi_coredump->misc_nic_info.function = qdev->func;
1086 
1087 	/* Segment 31 */
1088 	/* Get indexed register values. */
1089 	ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1090 				     INTR_STATES_SEG_NUM,
1091 				sizeof(struct mpi_coredump_segment_header)
1092 				+ sizeof(mpi_coredump->intr_states),
1093 				"INTR States");
1094 	ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1095 
1096 	ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1097 				     CAM_ENTRIES_SEG_NUM,
1098 				sizeof(struct mpi_coredump_segment_header)
1099 				+ sizeof(mpi_coredump->cam_entries),
1100 				"CAM Entries");
1101 	status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1102 	if (status)
1103 		goto err;
1104 
1105 	ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1106 				     ROUTING_WORDS_SEG_NUM,
1107 				sizeof(struct mpi_coredump_segment_header)
1108 				+ sizeof(mpi_coredump->nic_routing_words),
1109 				"Routing Words");
1110 	status = ql_get_routing_entries(qdev,
1111 			&mpi_coredump->nic_routing_words[0]);
1112 	if (status)
1113 		goto err;
1114 
1115 	/* Segment 34 (Rev C. step 23) */
1116 	ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1117 				     ETS_SEG_NUM,
1118 				sizeof(struct mpi_coredump_segment_header)
1119 				+ sizeof(mpi_coredump->ets),
1120 				"ETS Registers");
1121 	status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1122 	if (status)
1123 		goto err;
1124 
1125 	ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
1126 				     PROBE_DUMP_SEG_NUM,
1127 				sizeof(struct mpi_coredump_segment_header)
1128 				+ sizeof(mpi_coredump->probe_dump),
1129 				"Probe Dump");
1130 	ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
1131 
1132 	ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
1133 				     ROUTING_INDEX_SEG_NUM,
1134 				sizeof(struct mpi_coredump_segment_header)
1135 				+ sizeof(mpi_coredump->routing_regs),
1136 				"Routing Regs");
1137 	status = ql_get_routing_index_registers(qdev,
1138 						&mpi_coredump->routing_regs[0]);
1139 	if (status)
1140 		goto err;
1141 
1142 	ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
1143 				     MAC_PROTOCOL_SEG_NUM,
1144 				sizeof(struct mpi_coredump_segment_header)
1145 				+ sizeof(mpi_coredump->mac_prot_regs),
1146 				"MAC Prot Regs");
1147 	ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
1148 
1149 	/* Get the semaphore registers for all 5 functions */
1150 	ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
1151 				     SEM_REGS_SEG_NUM,
1152 			sizeof(struct mpi_coredump_segment_header) +
1153 			sizeof(mpi_coredump->sem_regs),	"Sem Registers");
1154 
1155 	ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
1156 
1157 	/* Prevent the mpi restarting while we dump the memory.*/
1158 	ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
1159 
1160 	/* clear the pause */
1161 	status = ql_unpause_mpi_risc(qdev);
1162 	if (status) {
1163 		netif_err(qdev, drv, qdev->ndev,
1164 			  "Failed RISC unpause. Status = 0x%.08x\n", status);
1165 		goto err;
1166 	}
1167 
1168 	/* Reset the RISC so we can dump RAM */
1169 	status = ql_hard_reset_mpi_risc(qdev);
1170 	if (status) {
1171 		netif_err(qdev, drv, qdev->ndev,
1172 			  "Failed RISC reset. Status = 0x%.08x\n", status);
1173 		goto err;
1174 	}
1175 
1176 	ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
1177 				     WCS_RAM_SEG_NUM,
1178 				sizeof(struct mpi_coredump_segment_header)
1179 				+ sizeof(mpi_coredump->code_ram),
1180 				"WCS RAM");
1181 	status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
1182 				       CODE_RAM_ADDR, CODE_RAM_CNT);
1183 	if (status) {
1184 		netif_err(qdev, drv, qdev->ndev,
1185 			  "Failed Dump of CODE RAM. Status = 0x%.08x\n",
1186 			  status);
1187 		goto err;
1188 	}
1189 
1190 	/* Insert the segment header */
1191 	ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
1192 				     MEMC_RAM_SEG_NUM,
1193 				sizeof(struct mpi_coredump_segment_header)
1194 				+ sizeof(mpi_coredump->memc_ram),
1195 				"MEMC RAM");
1196 	status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
1197 				       MEMC_RAM_ADDR, MEMC_RAM_CNT);
1198 	if (status) {
1199 		netif_err(qdev, drv, qdev->ndev,
1200 			  "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
1201 			  status);
1202 		goto err;
1203 	}
1204 err:
1205 	ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
1206 	return status;
1207 
1208 }
1209 
1210 static void ql_get_core_dump(struct ql_adapter *qdev)
1211 {
1212 	if (!ql_own_firmware(qdev)) {
1213 		netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
1214 		return;
1215 	}
1216 
1217 	if (!netif_running(qdev->ndev)) {
1218 		netif_err(qdev, ifup, qdev->ndev,
1219 			  "Force Coredump can only be done from interface that is up\n");
1220 		return;
1221 	}
1222 	ql_queue_fw_error(qdev);
1223 }
1224 
1225 static void ql_gen_reg_dump(struct ql_adapter *qdev,
1226 			    struct ql_reg_dump *mpi_coredump)
1227 {
1228 	int i, status;
1229 
1230 
1231 	memset(&(mpi_coredump->mpi_global_header), 0,
1232 	       sizeof(struct mpi_coredump_global_header));
1233 	mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
1234 	mpi_coredump->mpi_global_header.headerSize =
1235 		sizeof(struct mpi_coredump_global_header);
1236 	mpi_coredump->mpi_global_header.imageSize =
1237 		sizeof(struct ql_reg_dump);
1238 	strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
1239 		sizeof(mpi_coredump->mpi_global_header.idString));
1240 
1241 
1242 	/* segment 16 */
1243 	ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1244 				     MISC_NIC_INFO_SEG_NUM,
1245 				sizeof(struct mpi_coredump_segment_header)
1246 				+ sizeof(mpi_coredump->misc_nic_info),
1247 				"MISC NIC INFO");
1248 	mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1249 	mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1250 	mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1251 	mpi_coredump->misc_nic_info.function = qdev->func;
1252 
1253 	/* Segment 16, Rev C. Step 18 */
1254 	ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
1255 				     NIC1_CONTROL_SEG_NUM,
1256 				sizeof(struct mpi_coredump_segment_header)
1257 				+ sizeof(mpi_coredump->nic_regs),
1258 				"NIC Registers");
1259 	/* Get generic reg dump */
1260 	for (i = 0; i < 64; i++)
1261 		mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
1262 
1263 	/* Segment 31 */
1264 	/* Get indexed register values. */
1265 	ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1266 				     INTR_STATES_SEG_NUM,
1267 				sizeof(struct mpi_coredump_segment_header)
1268 				+ sizeof(mpi_coredump->intr_states),
1269 				"INTR States");
1270 	ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1271 
1272 	ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1273 				     CAM_ENTRIES_SEG_NUM,
1274 				sizeof(struct mpi_coredump_segment_header)
1275 				+ sizeof(mpi_coredump->cam_entries),
1276 				"CAM Entries");
1277 	status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1278 	if (status)
1279 		return;
1280 
1281 	ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1282 				     ROUTING_WORDS_SEG_NUM,
1283 				sizeof(struct mpi_coredump_segment_header)
1284 				+ sizeof(mpi_coredump->nic_routing_words),
1285 				"Routing Words");
1286 	status = ql_get_routing_entries(qdev,
1287 					&mpi_coredump->nic_routing_words[0]);
1288 	if (status)
1289 		return;
1290 
1291 	/* Segment 34 (Rev C. step 23) */
1292 	ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1293 				     ETS_SEG_NUM,
1294 				sizeof(struct mpi_coredump_segment_header)
1295 				+ sizeof(mpi_coredump->ets),
1296 				"ETS Registers");
1297 	status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1298 	if (status)
1299 		return;
1300 }
1301 
1302 void ql_get_dump(struct ql_adapter *qdev, void *buff)
1303 {
1304 	/*
1305 	 * If the dump has already been taken and is stored
1306 	 * in our internal buffer and if force dump is set then
1307 	 * just start the spool to dump it to the log file
1308 	 * and also, take a snapshot of the general regs to
1309 	 * to the user's buffer or else take complete dump
1310 	 * to the user's buffer if force is not set.
1311 	 */
1312 
1313 	if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
1314 		if (!ql_core_dump(qdev, buff))
1315 			ql_soft_reset_mpi_risc(qdev);
1316 		else
1317 			netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
1318 	} else {
1319 		ql_gen_reg_dump(qdev, buff);
1320 		ql_get_core_dump(qdev);
1321 	}
1322 }
1323 
1324 /* Coredump to messages log file using separate worker thread */
1325 void ql_mpi_core_to_log(struct work_struct *work)
1326 {
1327 	struct ql_adapter *qdev =
1328 		container_of(work, struct ql_adapter, mpi_core_to_log.work);
1329 	u32 *tmp, count;
1330 	int i;
1331 
1332 	count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
1333 	tmp = (u32 *)qdev->mpi_coredump;
1334 	netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
1335 		     "Core is dumping to log file!\n");
1336 
1337 	for (i = 0; i < count; i += 8) {
1338 		pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x "
1339 			"%.08x %.08x %.08x\n", i,
1340 			tmp[i + 0],
1341 			tmp[i + 1],
1342 			tmp[i + 2],
1343 			tmp[i + 3],
1344 			tmp[i + 4],
1345 			tmp[i + 5],
1346 			tmp[i + 6],
1347 			tmp[i + 7]);
1348 		msleep(5);
1349 	}
1350 }
1351 
1352 #ifdef QL_REG_DUMP
1353 static void ql_dump_intr_states(struct ql_adapter *qdev)
1354 {
1355 	int i;
1356 	u32 value;
1357 	for (i = 0; i < qdev->intr_count; i++) {
1358 		ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
1359 		value = ql_read32(qdev, INTR_EN);
1360 		pr_err("%s: Interrupt %d is %s\n",
1361 		       qdev->ndev->name, i,
1362 		       (value & INTR_EN_EN ? "enabled" : "disabled"));
1363 	}
1364 }
1365 
1366 #define DUMP_XGMAC(qdev, reg)					\
1367 do {								\
1368 	u32 data;						\
1369 	ql_read_xgmac_reg(qdev, reg, &data);			\
1370 	pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
1371 } while (0)
1372 
1373 void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
1374 {
1375 	if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
1376 		pr_err("%s: Couldn't get xgmac sem\n", __func__);
1377 		return;
1378 	}
1379 	DUMP_XGMAC(qdev, PAUSE_SRC_LO);
1380 	DUMP_XGMAC(qdev, PAUSE_SRC_HI);
1381 	DUMP_XGMAC(qdev, GLOBAL_CFG);
1382 	DUMP_XGMAC(qdev, TX_CFG);
1383 	DUMP_XGMAC(qdev, RX_CFG);
1384 	DUMP_XGMAC(qdev, FLOW_CTL);
1385 	DUMP_XGMAC(qdev, PAUSE_OPCODE);
1386 	DUMP_XGMAC(qdev, PAUSE_TIMER);
1387 	DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
1388 	DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
1389 	DUMP_XGMAC(qdev, MAC_TX_PARAMS);
1390 	DUMP_XGMAC(qdev, MAC_RX_PARAMS);
1391 	DUMP_XGMAC(qdev, MAC_SYS_INT);
1392 	DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
1393 	DUMP_XGMAC(qdev, MAC_MGMT_INT);
1394 	DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
1395 	DUMP_XGMAC(qdev, EXT_ARB_MODE);
1396 	ql_sem_unlock(qdev, qdev->xg_sem_mask);
1397 }
1398 
1399 static void ql_dump_ets_regs(struct ql_adapter *qdev)
1400 {
1401 }
1402 
1403 static void ql_dump_cam_entries(struct ql_adapter *qdev)
1404 {
1405 	int i;
1406 	u32 value[3];
1407 
1408 	i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1409 	if (i)
1410 		return;
1411 	for (i = 0; i < 4; i++) {
1412 		if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
1413 			pr_err("%s: Failed read of mac index register\n",
1414 			       __func__);
1415 			return;
1416 		} else {
1417 			if (value[0])
1418 				pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
1419 				       qdev->ndev->name, i, value[1], value[0],
1420 				       value[2]);
1421 		}
1422 	}
1423 	for (i = 0; i < 32; i++) {
1424 		if (ql_get_mac_addr_reg
1425 		    (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
1426 			pr_err("%s: Failed read of mac index register\n",
1427 			       __func__);
1428 			return;
1429 		} else {
1430 			if (value[0])
1431 				pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
1432 				       qdev->ndev->name, i, value[1], value[0]);
1433 		}
1434 	}
1435 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1436 }
1437 
1438 void ql_dump_routing_entries(struct ql_adapter *qdev)
1439 {
1440 	int i;
1441 	u32 value;
1442 	i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
1443 	if (i)
1444 		return;
1445 	for (i = 0; i < 16; i++) {
1446 		value = 0;
1447 		if (ql_get_routing_reg(qdev, i, &value)) {
1448 			pr_err("%s: Failed read of routing index register\n",
1449 			       __func__);
1450 			return;
1451 		} else {
1452 			if (value)
1453 				pr_err("%s: Routing Mask %d = 0x%.08x\n",
1454 				       qdev->ndev->name, i, value);
1455 		}
1456 	}
1457 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
1458 }
1459 
1460 #define DUMP_REG(qdev, reg)			\
1461 	pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
1462 
1463 void ql_dump_regs(struct ql_adapter *qdev)
1464 {
1465 	pr_err("reg dump for function #%d\n", qdev->func);
1466 	DUMP_REG(qdev, SYS);
1467 	DUMP_REG(qdev, RST_FO);
1468 	DUMP_REG(qdev, FSC);
1469 	DUMP_REG(qdev, CSR);
1470 	DUMP_REG(qdev, ICB_RID);
1471 	DUMP_REG(qdev, ICB_L);
1472 	DUMP_REG(qdev, ICB_H);
1473 	DUMP_REG(qdev, CFG);
1474 	DUMP_REG(qdev, BIOS_ADDR);
1475 	DUMP_REG(qdev, STS);
1476 	DUMP_REG(qdev, INTR_EN);
1477 	DUMP_REG(qdev, INTR_MASK);
1478 	DUMP_REG(qdev, ISR1);
1479 	DUMP_REG(qdev, ISR2);
1480 	DUMP_REG(qdev, ISR3);
1481 	DUMP_REG(qdev, ISR4);
1482 	DUMP_REG(qdev, REV_ID);
1483 	DUMP_REG(qdev, FRC_ECC_ERR);
1484 	DUMP_REG(qdev, ERR_STS);
1485 	DUMP_REG(qdev, RAM_DBG_ADDR);
1486 	DUMP_REG(qdev, RAM_DBG_DATA);
1487 	DUMP_REG(qdev, ECC_ERR_CNT);
1488 	DUMP_REG(qdev, SEM);
1489 	DUMP_REG(qdev, GPIO_1);
1490 	DUMP_REG(qdev, GPIO_2);
1491 	DUMP_REG(qdev, GPIO_3);
1492 	DUMP_REG(qdev, XGMAC_ADDR);
1493 	DUMP_REG(qdev, XGMAC_DATA);
1494 	DUMP_REG(qdev, NIC_ETS);
1495 	DUMP_REG(qdev, CNA_ETS);
1496 	DUMP_REG(qdev, FLASH_ADDR);
1497 	DUMP_REG(qdev, FLASH_DATA);
1498 	DUMP_REG(qdev, CQ_STOP);
1499 	DUMP_REG(qdev, PAGE_TBL_RID);
1500 	DUMP_REG(qdev, WQ_PAGE_TBL_LO);
1501 	DUMP_REG(qdev, WQ_PAGE_TBL_HI);
1502 	DUMP_REG(qdev, CQ_PAGE_TBL_LO);
1503 	DUMP_REG(qdev, CQ_PAGE_TBL_HI);
1504 	DUMP_REG(qdev, COS_DFLT_CQ1);
1505 	DUMP_REG(qdev, COS_DFLT_CQ2);
1506 	DUMP_REG(qdev, SPLT_HDR);
1507 	DUMP_REG(qdev, FC_PAUSE_THRES);
1508 	DUMP_REG(qdev, NIC_PAUSE_THRES);
1509 	DUMP_REG(qdev, FC_ETHERTYPE);
1510 	DUMP_REG(qdev, FC_RCV_CFG);
1511 	DUMP_REG(qdev, NIC_RCV_CFG);
1512 	DUMP_REG(qdev, FC_COS_TAGS);
1513 	DUMP_REG(qdev, NIC_COS_TAGS);
1514 	DUMP_REG(qdev, MGMT_RCV_CFG);
1515 	DUMP_REG(qdev, XG_SERDES_ADDR);
1516 	DUMP_REG(qdev, XG_SERDES_DATA);
1517 	DUMP_REG(qdev, PRB_MX_ADDR);
1518 	DUMP_REG(qdev, PRB_MX_DATA);
1519 	ql_dump_intr_states(qdev);
1520 	ql_dump_xgmac_control_regs(qdev);
1521 	ql_dump_ets_regs(qdev);
1522 	ql_dump_cam_entries(qdev);
1523 	ql_dump_routing_entries(qdev);
1524 }
1525 #endif
1526 
1527 #ifdef QL_STAT_DUMP
1528 
1529 #define DUMP_STAT(qdev, stat)	\
1530 	pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
1531 
1532 void ql_dump_stat(struct ql_adapter *qdev)
1533 {
1534 	pr_err("%s: Enter\n", __func__);
1535 	DUMP_STAT(qdev, tx_pkts);
1536 	DUMP_STAT(qdev, tx_bytes);
1537 	DUMP_STAT(qdev, tx_mcast_pkts);
1538 	DUMP_STAT(qdev, tx_bcast_pkts);
1539 	DUMP_STAT(qdev, tx_ucast_pkts);
1540 	DUMP_STAT(qdev, tx_ctl_pkts);
1541 	DUMP_STAT(qdev, tx_pause_pkts);
1542 	DUMP_STAT(qdev, tx_64_pkt);
1543 	DUMP_STAT(qdev, tx_65_to_127_pkt);
1544 	DUMP_STAT(qdev, tx_128_to_255_pkt);
1545 	DUMP_STAT(qdev, tx_256_511_pkt);
1546 	DUMP_STAT(qdev, tx_512_to_1023_pkt);
1547 	DUMP_STAT(qdev, tx_1024_to_1518_pkt);
1548 	DUMP_STAT(qdev, tx_1519_to_max_pkt);
1549 	DUMP_STAT(qdev, tx_undersize_pkt);
1550 	DUMP_STAT(qdev, tx_oversize_pkt);
1551 	DUMP_STAT(qdev, rx_bytes);
1552 	DUMP_STAT(qdev, rx_bytes_ok);
1553 	DUMP_STAT(qdev, rx_pkts);
1554 	DUMP_STAT(qdev, rx_pkts_ok);
1555 	DUMP_STAT(qdev, rx_bcast_pkts);
1556 	DUMP_STAT(qdev, rx_mcast_pkts);
1557 	DUMP_STAT(qdev, rx_ucast_pkts);
1558 	DUMP_STAT(qdev, rx_undersize_pkts);
1559 	DUMP_STAT(qdev, rx_oversize_pkts);
1560 	DUMP_STAT(qdev, rx_jabber_pkts);
1561 	DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
1562 	DUMP_STAT(qdev, rx_drop_events);
1563 	DUMP_STAT(qdev, rx_fcerr_pkts);
1564 	DUMP_STAT(qdev, rx_align_err);
1565 	DUMP_STAT(qdev, rx_symbol_err);
1566 	DUMP_STAT(qdev, rx_mac_err);
1567 	DUMP_STAT(qdev, rx_ctl_pkts);
1568 	DUMP_STAT(qdev, rx_pause_pkts);
1569 	DUMP_STAT(qdev, rx_64_pkts);
1570 	DUMP_STAT(qdev, rx_65_to_127_pkts);
1571 	DUMP_STAT(qdev, rx_128_255_pkts);
1572 	DUMP_STAT(qdev, rx_256_511_pkts);
1573 	DUMP_STAT(qdev, rx_512_to_1023_pkts);
1574 	DUMP_STAT(qdev, rx_1024_to_1518_pkts);
1575 	DUMP_STAT(qdev, rx_1519_to_max_pkts);
1576 	DUMP_STAT(qdev, rx_len_err_pkts);
1577 };
1578 #endif
1579 
1580 #ifdef QL_DEV_DUMP
1581 
1582 #define DUMP_QDEV_FIELD(qdev, type, field)		\
1583 	pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
1584 #define DUMP_QDEV_DMA_FIELD(qdev, field)		\
1585 	pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
1586 #define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
1587 	pr_err("%s[%d].%s = " type "\n",		 \
1588 	       #array, index, #field, qdev->array[index].field);
1589 void ql_dump_qdev(struct ql_adapter *qdev)
1590 {
1591 	int i;
1592 	DUMP_QDEV_FIELD(qdev, "%lx", flags);
1593 	DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
1594 	DUMP_QDEV_FIELD(qdev, "%p", pdev);
1595 	DUMP_QDEV_FIELD(qdev, "%p", ndev);
1596 	DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
1597 	DUMP_QDEV_FIELD(qdev, "%p", reg_base);
1598 	DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
1599 	DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
1600 	DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
1601 	DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
1602 	DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
1603 	DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
1604 	DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
1605 	DUMP_QDEV_FIELD(qdev, "%d", intr_count);
1606 	if (qdev->msi_x_entry)
1607 		for (i = 0; i < qdev->intr_count; i++) {
1608 			DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
1609 			DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
1610 		}
1611 	for (i = 0; i < qdev->intr_count; i++) {
1612 		DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
1613 		DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
1614 		DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
1615 		DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
1616 		DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
1617 		DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
1618 	}
1619 	DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
1620 	DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
1621 	DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
1622 	DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
1623 	DUMP_QDEV_FIELD(qdev, "%d", intr_count);
1624 	DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
1625 	DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
1626 	DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
1627 	DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
1628 	DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
1629 	DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
1630 	DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
1631 	DUMP_QDEV_FIELD(qdev, "%u", lbq_buf_size);
1632 }
1633 #endif
1634 
1635 #ifdef QL_CB_DUMP
1636 void ql_dump_wqicb(struct wqicb *wqicb)
1637 {
1638 	pr_err("Dumping wqicb stuff...\n");
1639 	pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
1640 	pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags));
1641 	pr_err("wqicb->cq_id_rss = %d\n",
1642 	       le16_to_cpu(wqicb->cq_id_rss));
1643 	pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
1644 	pr_err("wqicb->wq_addr = 0x%llx\n",
1645 	       (unsigned long long) le64_to_cpu(wqicb->addr));
1646 	pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
1647 	       (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
1648 }
1649 
1650 void ql_dump_tx_ring(struct tx_ring *tx_ring)
1651 {
1652 	if (!tx_ring)
1653 		return;
1654 	pr_err("===================== Dumping tx_ring %d ===============\n",
1655 	       tx_ring->wq_id);
1656 	pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
1657 	pr_err("tx_ring->base_dma = 0x%llx\n",
1658 	       (unsigned long long) tx_ring->wq_base_dma);
1659 	pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
1660 	       tx_ring->cnsmr_idx_sh_reg,
1661 	       tx_ring->cnsmr_idx_sh_reg
1662 			? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
1663 	pr_err("tx_ring->size = %d\n", tx_ring->wq_size);
1664 	pr_err("tx_ring->len = %d\n", tx_ring->wq_len);
1665 	pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
1666 	pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
1667 	pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
1668 	pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id);
1669 	pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id);
1670 	pr_err("tx_ring->q = %p\n", tx_ring->q);
1671 	pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
1672 }
1673 
1674 void ql_dump_ricb(struct ricb *ricb)
1675 {
1676 	int i;
1677 	pr_err("===================== Dumping ricb ===============\n");
1678 	pr_err("Dumping ricb stuff...\n");
1679 
1680 	pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
1681 	pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n",
1682 	       ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
1683 	       ricb->flags & RSS_L6K ? "RSS_L6K " : "",
1684 	       ricb->flags & RSS_LI ? "RSS_LI " : "",
1685 	       ricb->flags & RSS_LB ? "RSS_LB " : "",
1686 	       ricb->flags & RSS_LM ? "RSS_LM " : "",
1687 	       ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
1688 	       ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
1689 	       ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
1690 	       ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
1691 	pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
1692 	for (i = 0; i < 16; i++)
1693 		pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i,
1694 		       le32_to_cpu(ricb->hash_cq_id[i]));
1695 	for (i = 0; i < 10; i++)
1696 		pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
1697 		       le32_to_cpu(ricb->ipv6_hash_key[i]));
1698 	for (i = 0; i < 4; i++)
1699 		pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
1700 		       le32_to_cpu(ricb->ipv4_hash_key[i]));
1701 }
1702 
1703 void ql_dump_cqicb(struct cqicb *cqicb)
1704 {
1705 	pr_err("Dumping cqicb stuff...\n");
1706 
1707 	pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect);
1708 	pr_err("cqicb->flags = %x\n", cqicb->flags);
1709 	pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len));
1710 	pr_err("cqicb->addr = 0x%llx\n",
1711 	       (unsigned long long) le64_to_cpu(cqicb->addr));
1712 	pr_err("cqicb->prod_idx_addr = 0x%llx\n",
1713 	       (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
1714 	pr_err("cqicb->pkt_delay = 0x%.04x\n",
1715 	       le16_to_cpu(cqicb->pkt_delay));
1716 	pr_err("cqicb->irq_delay = 0x%.04x\n",
1717 	       le16_to_cpu(cqicb->irq_delay));
1718 	pr_err("cqicb->lbq_addr = 0x%llx\n",
1719 	       (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
1720 	pr_err("cqicb->lbq_buf_size = 0x%.04x\n",
1721 	       le16_to_cpu(cqicb->lbq_buf_size));
1722 	pr_err("cqicb->lbq_len = 0x%.04x\n",
1723 	       le16_to_cpu(cqicb->lbq_len));
1724 	pr_err("cqicb->sbq_addr = 0x%llx\n",
1725 	       (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
1726 	pr_err("cqicb->sbq_buf_size = 0x%.04x\n",
1727 	       le16_to_cpu(cqicb->sbq_buf_size));
1728 	pr_err("cqicb->sbq_len = 0x%.04x\n",
1729 	       le16_to_cpu(cqicb->sbq_len));
1730 }
1731 
1732 static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring)
1733 {
1734 	struct ql_adapter *qdev = rx_ring->qdev;
1735 
1736 	if (rx_ring->cq_id < qdev->rss_ring_count)
1737 		return "RX COMPLETION";
1738 	else
1739 		return "TX COMPLETION";
1740 };
1741 
1742 void ql_dump_rx_ring(struct rx_ring *rx_ring)
1743 {
1744 	if (!rx_ring)
1745 		return;
1746 	pr_err("===================== Dumping rx_ring %d ===============\n",
1747 	       rx_ring->cq_id);
1748 	pr_err("Dumping rx_ring %d, type = %s\n", rx_ring->cq_id,
1749 	       qlge_rx_ring_type_name(rx_ring));
1750 	pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
1751 	pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
1752 	pr_err("rx_ring->cq_base_dma = %llx\n",
1753 	       (unsigned long long) rx_ring->cq_base_dma);
1754 	pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size);
1755 	pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len);
1756 	pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
1757 	       rx_ring->prod_idx_sh_reg,
1758 	       rx_ring->prod_idx_sh_reg
1759 			? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
1760 	pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n",
1761 	       (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
1762 	pr_err("rx_ring->cnsmr_idx_db_reg = %p\n",
1763 	       rx_ring->cnsmr_idx_db_reg);
1764 	pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
1765 	pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
1766 	pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
1767 
1768 	pr_err("rx_ring->lbq.base = %p\n", rx_ring->lbq.base);
1769 	pr_err("rx_ring->lbq.base_dma = %llx\n",
1770 	       (unsigned long long)rx_ring->lbq.base_dma);
1771 	pr_err("rx_ring->lbq.base_indirect = %p\n",
1772 	       rx_ring->lbq.base_indirect);
1773 	pr_err("rx_ring->lbq.base_indirect_dma = %llx\n",
1774 	       (unsigned long long)rx_ring->lbq.base_indirect_dma);
1775 	pr_err("rx_ring->lbq = %p\n", rx_ring->lbq.queue);
1776 	pr_err("rx_ring->lbq.prod_idx_db_reg = %p\n",
1777 	       rx_ring->lbq.prod_idx_db_reg);
1778 	pr_err("rx_ring->lbq.next_to_use = %d\n", rx_ring->lbq.next_to_use);
1779 	pr_err("rx_ring->lbq.next_to_clean = %d\n", rx_ring->lbq.next_to_clean);
1780 	pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
1781 	pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
1782 
1783 	pr_err("rx_ring->sbq.base = %p\n", rx_ring->sbq.base);
1784 	pr_err("rx_ring->sbq.base_dma = %llx\n",
1785 	       (unsigned long long)rx_ring->sbq.base_dma);
1786 	pr_err("rx_ring->sbq.base_indirect = %p\n",
1787 	       rx_ring->sbq.base_indirect);
1788 	pr_err("rx_ring->sbq.base_indirect_dma = %llx\n",
1789 	       (unsigned long long)rx_ring->sbq.base_indirect_dma);
1790 	pr_err("rx_ring->sbq = %p\n", rx_ring->sbq.queue);
1791 	pr_err("rx_ring->sbq.prod_idx_db_reg addr = %p\n",
1792 	       rx_ring->sbq.prod_idx_db_reg);
1793 	pr_err("rx_ring->sbq.next_to_use = %d\n", rx_ring->sbq.next_to_use);
1794 	pr_err("rx_ring->sbq.next_to_clean = %d\n", rx_ring->sbq.next_to_clean);
1795 	pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
1796 	pr_err("rx_ring->irq = %d\n", rx_ring->irq);
1797 	pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
1798 	pr_err("rx_ring->qdev = %p\n", rx_ring->qdev);
1799 }
1800 
1801 void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
1802 {
1803 	void *ptr;
1804 
1805 	pr_err("%s: Enter\n", __func__);
1806 
1807 	ptr = kmalloc(size, GFP_ATOMIC);
1808 	if (!ptr)
1809 		return;
1810 
1811 	if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
1812 		pr_err("%s: Failed to upload control block!\n", __func__);
1813 		goto fail_it;
1814 	}
1815 	switch (bit) {
1816 	case CFG_DRQ:
1817 		ql_dump_wqicb((struct wqicb *)ptr);
1818 		break;
1819 	case CFG_DCQ:
1820 		ql_dump_cqicb((struct cqicb *)ptr);
1821 		break;
1822 	case CFG_DR:
1823 		ql_dump_ricb((struct ricb *)ptr);
1824 		break;
1825 	default:
1826 		pr_err("%s: Invalid bit value = %x\n", __func__, bit);
1827 		break;
1828 	}
1829 fail_it:
1830 	kfree(ptr);
1831 }
1832 #endif
1833 
1834 #ifdef QL_OB_DUMP
1835 void ql_dump_tx_desc(struct tx_buf_desc *tbd)
1836 {
1837 	pr_err("tbd->addr  = 0x%llx\n",
1838 	       le64_to_cpu((u64) tbd->addr));
1839 	pr_err("tbd->len   = %d\n",
1840 	       le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1841 	pr_err("tbd->flags = %s %s\n",
1842 	       tbd->len & TX_DESC_C ? "C" : ".",
1843 	       tbd->len & TX_DESC_E ? "E" : ".");
1844 	tbd++;
1845 	pr_err("tbd->addr  = 0x%llx\n",
1846 	       le64_to_cpu((u64) tbd->addr));
1847 	pr_err("tbd->len   = %d\n",
1848 	       le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1849 	pr_err("tbd->flags = %s %s\n",
1850 	       tbd->len & TX_DESC_C ? "C" : ".",
1851 	       tbd->len & TX_DESC_E ? "E" : ".");
1852 	tbd++;
1853 	pr_err("tbd->addr  = 0x%llx\n",
1854 	       le64_to_cpu((u64) tbd->addr));
1855 	pr_err("tbd->len   = %d\n",
1856 	       le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1857 	pr_err("tbd->flags = %s %s\n",
1858 	       tbd->len & TX_DESC_C ? "C" : ".",
1859 	       tbd->len & TX_DESC_E ? "E" : ".");
1860 
1861 }
1862 
1863 void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
1864 {
1865 	struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
1866 	    (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
1867 	struct tx_buf_desc *tbd;
1868 	u16 frame_len;
1869 
1870 	pr_err("%s\n", __func__);
1871 	pr_err("opcode         = %s\n",
1872 	       (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
1873 	pr_err("flags1          = %s %s %s %s %s\n",
1874 	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
1875 	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
1876 	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
1877 	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
1878 	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
1879 	pr_err("flags2          = %s %s %s\n",
1880 	       ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
1881 	       ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
1882 	       ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
1883 	pr_err("flags3          = %s %s %s\n",
1884 	       ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
1885 	       ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
1886 	       ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
1887 	pr_err("tid = %x\n", ob_mac_iocb->tid);
1888 	pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx);
1889 	pr_err("vlan_tci      = %x\n", ob_mac_tso_iocb->vlan_tci);
1890 	if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
1891 		pr_err("frame_len      = %d\n",
1892 		       le32_to_cpu(ob_mac_tso_iocb->frame_len));
1893 		pr_err("mss      = %d\n",
1894 		       le16_to_cpu(ob_mac_tso_iocb->mss));
1895 		pr_err("prot_hdr_len   = %d\n",
1896 		       le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
1897 		pr_err("hdr_offset     = 0x%.04x\n",
1898 		       le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
1899 		frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
1900 	} else {
1901 		pr_err("frame_len      = %d\n",
1902 		       le16_to_cpu(ob_mac_iocb->frame_len));
1903 		frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
1904 	}
1905 	tbd = &ob_mac_iocb->tbd[0];
1906 	ql_dump_tx_desc(tbd);
1907 }
1908 
1909 void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
1910 {
1911 	pr_err("%s\n", __func__);
1912 	pr_err("opcode         = %d\n", ob_mac_rsp->opcode);
1913 	pr_err("flags          = %s %s %s %s %s %s %s\n",
1914 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
1915 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
1916 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
1917 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
1918 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
1919 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
1920 	       ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
1921 	pr_err("tid = %x\n", ob_mac_rsp->tid);
1922 }
1923 #endif
1924 
1925 #ifdef QL_IB_DUMP
1926 void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
1927 {
1928 	pr_err("%s\n", __func__);
1929 	pr_err("opcode         = 0x%x\n", ib_mac_rsp->opcode);
1930 	pr_err("flags1 = %s%s%s%s%s%s\n",
1931 	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
1932 	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
1933 	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
1934 	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
1935 	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
1936 	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
1937 
1938 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
1939 		pr_err("%s%s%s Multicast\n",
1940 		       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1941 		       IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1942 		       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1943 		       IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1944 		       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1945 		       IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1946 
1947 	pr_err("flags2 = %s%s%s%s%s\n",
1948 	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
1949 	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
1950 	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
1951 	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
1952 	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
1953 
1954 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
1955 		pr_err("%s%s%s%s%s error\n",
1956 		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1957 		       IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
1958 		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1959 		       IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
1960 		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1961 		       IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
1962 		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1963 		       IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
1964 		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1965 		       IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
1966 
1967 	pr_err("flags3 = %s%s\n",
1968 	       ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
1969 	       ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
1970 
1971 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
1972 		pr_err("RSS flags = %s%s%s%s\n",
1973 		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1974 			IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
1975 		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1976 			IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
1977 		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1978 			IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
1979 		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1980 			IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
1981 
1982 	pr_err("data_len	= %d\n",
1983 	       le32_to_cpu(ib_mac_rsp->data_len));
1984 	pr_err("data_addr    = 0x%llx\n",
1985 	       (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
1986 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
1987 		pr_err("rss    = %x\n",
1988 		       le32_to_cpu(ib_mac_rsp->rss));
1989 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
1990 		pr_err("vlan_id    = %x\n",
1991 		       le16_to_cpu(ib_mac_rsp->vlan_id));
1992 
1993 	pr_err("flags4 = %s%s%s\n",
1994 	       ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
1995 		ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
1996 		ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
1997 
1998 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1999 		pr_err("hdr length	= %d\n",
2000 		       le32_to_cpu(ib_mac_rsp->hdr_len));
2001 		pr_err("hdr addr    = 0x%llx\n",
2002 		       (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
2003 	}
2004 }
2005 #endif
2006 
2007 #ifdef QL_ALL_DUMP
2008 void ql_dump_all(struct ql_adapter *qdev)
2009 {
2010 	int i;
2011 
2012 	QL_DUMP_REGS(qdev);
2013 	QL_DUMP_QDEV(qdev);
2014 	for (i = 0; i < qdev->tx_ring_count; i++) {
2015 		QL_DUMP_TX_RING(&qdev->tx_ring[i]);
2016 		QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
2017 	}
2018 	for (i = 0; i < qdev->rx_ring_count; i++) {
2019 		QL_DUMP_RX_RING(&qdev->rx_ring[i]);
2020 		QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
2021 	}
2022 }
2023 #endif
2024