xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_dbg.c (revision 165f2d28)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 
8 /*
9  * Table for showing the current message id in use for particular level
10  * Change this table for addition of log/debug messages.
11  * ----------------------------------------------------------------------
12  * |             Level            |   Last Value Used  |     Holes	|
13  * ----------------------------------------------------------------------
14  * | Module Init and Probe        |       0x0193       | 0x0146         |
15  * |                              |                    | 0x015b-0x0160	|
16  * |                              |                    | 0x016e		|
17  * | Mailbox commands             |       0x1206       | 0x11a2-0x11ff	|
18  * | Device Discovery             |       0x2134       | 0x210e-0x2116  |
19  * |				  | 		       | 0x211a         |
20  * |                              |                    | 0x211c-0x2128  |
21  * |                              |                    | 0x212a-0x2134  |
22  * | Queue Command and IO tracing |       0x3074       | 0x300b         |
23  * |                              |                    | 0x3027-0x3028  |
24  * |                              |                    | 0x303d-0x3041  |
25  * |                              |                    | 0x302d,0x3033  |
26  * |                              |                    | 0x3036,0x3038  |
27  * |                              |                    | 0x303a		|
28  * | DPC Thread                   |       0x4023       | 0x4002,0x4013  |
29  * | Async Events                 |       0x5090       | 0x502b-0x502f  |
30  * |				  | 		       | 0x5047         |
31  * |                              |                    | 0x5084,0x5075	|
32  * |                              |                    | 0x503d,0x5044  |
33  * |                              |                    | 0x505f		|
34  * | Timer Routines               |       0x6012       |                |
35  * | User Space Interactions      |       0x70e3       | 0x7018,0x702e  |
36  * |				  |		       | 0x7020,0x7024  |
37  * |                              |                    | 0x7039,0x7045  |
38  * |                              |                    | 0x7073-0x7075  |
39  * |                              |                    | 0x70a5-0x70a6  |
40  * |                              |                    | 0x70a8,0x70ab  |
41  * |                              |                    | 0x70ad-0x70ae  |
42  * |                              |                    | 0x70d0-0x70d6	|
43  * |                              |                    | 0x70d7-0x70db  |
44  * | Task Management              |       0x8042       | 0x8000         |
45  * |                              |                    | 0x8019         |
46  * |                              |                    | 0x8025,0x8026  |
47  * |                              |                    | 0x8031,0x8032  |
48  * |                              |                    | 0x8039,0x803c  |
49  * | AER/EEH                      |       0x9011       |		|
50  * | Virtual Port                 |       0xa007       |		|
51  * | ISP82XX Specific             |       0xb157       | 0xb002,0xb024  |
52  * |                              |                    | 0xb09e,0xb0ae  |
53  * |				  |		       | 0xb0c3,0xb0c6  |
54  * |                              |                    | 0xb0e0-0xb0ef  |
55  * |                              |                    | 0xb085,0xb0dc  |
56  * |                              |                    | 0xb107,0xb108  |
57  * |                              |                    | 0xb111,0xb11e  |
58  * |                              |                    | 0xb12c,0xb12d  |
59  * |                              |                    | 0xb13a,0xb142  |
60  * |                              |                    | 0xb13c-0xb140  |
61  * |                              |                    | 0xb149		|
62  * | MultiQ                       |       0xc010       |		|
63  * | Misc                         |       0xd303       | 0xd031-0xd0ff	|
64  * |                              |                    | 0xd101-0xd1fe	|
65  * |                              |                    | 0xd214-0xd2fe	|
66  * | Target Mode		  |	  0xe081       |		|
67  * | Target Mode Management	  |	  0xf09b       | 0xf002		|
68  * |                              |                    | 0xf046-0xf049  |
69  * | Target Mode Task Management  |	  0x1000d      |		|
70  * ----------------------------------------------------------------------
71  */
72 
73 #include "qla_def.h"
74 
75 #include <linux/delay.h>
76 #define CREATE_TRACE_POINTS
77 #include <trace/events/qla.h>
78 
79 static uint32_t ql_dbg_offset = 0x800;
80 
81 static inline void
82 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
83 {
84 	fw_dump->fw_major_version = htonl(ha->fw_major_version);
85 	fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
86 	fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
87 	fw_dump->fw_attributes = htonl(ha->fw_attributes);
88 
89 	fw_dump->vendor = htonl(ha->pdev->vendor);
90 	fw_dump->device = htonl(ha->pdev->device);
91 	fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
92 	fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
93 }
94 
95 static inline void *
96 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
97 {
98 	struct req_que *req = ha->req_q_map[0];
99 	struct rsp_que *rsp = ha->rsp_q_map[0];
100 	/* Request queue. */
101 	memcpy(ptr, req->ring, req->length *
102 	    sizeof(request_t));
103 
104 	/* Response queue. */
105 	ptr += req->length * sizeof(request_t);
106 	memcpy(ptr, rsp->ring, rsp->length  *
107 	    sizeof(response_t));
108 
109 	return ptr + (rsp->length * sizeof(response_t));
110 }
111 
112 int
113 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
114 	uint32_t ram_dwords, void **nxt)
115 {
116 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
117 	dma_addr_t dump_dma = ha->gid_list_dma;
118 	uint32_t *chunk = (void *)ha->gid_list;
119 	uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
120 	uint32_t stat;
121 	ulong i, j, timer = 6000000;
122 	int rval = QLA_FUNCTION_FAILED;
123 
124 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
125 	for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
126 		if (i + dwords > ram_dwords)
127 			dwords = ram_dwords - i;
128 
129 		WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
130 		WRT_REG_WORD(&reg->mailbox1, LSW(addr));
131 		WRT_REG_WORD(&reg->mailbox8, MSW(addr));
132 
133 		WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
134 		WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
135 		WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
136 		WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
137 
138 		WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
139 		WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
140 
141 		WRT_REG_WORD(&reg->mailbox9, 0);
142 		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
143 
144 		ha->flags.mbox_int = 0;
145 		while (timer--) {
146 			udelay(5);
147 
148 			stat = RD_REG_DWORD(&reg->host_status);
149 			/* Check for pending interrupts. */
150 			if (!(stat & HSRX_RISC_INT))
151 				continue;
152 
153 			stat &= 0xff;
154 			if (stat != 0x1 && stat != 0x2 &&
155 			    stat != 0x10 && stat != 0x11) {
156 
157 				/* Clear this intr; it wasn't a mailbox intr */
158 				WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
159 				RD_REG_DWORD(&reg->hccr);
160 				continue;
161 			}
162 
163 			set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
164 			rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
165 			WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
166 			RD_REG_DWORD(&reg->hccr);
167 			break;
168 		}
169 		ha->flags.mbox_int = 1;
170 		*nxt = ram + i;
171 
172 		if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
173 			/* no interrupt, timed out*/
174 			return rval;
175 		}
176 		if (rval) {
177 			/* error completion status */
178 			return rval;
179 		}
180 		for (j = 0; j < dwords; j++) {
181 			ram[i + j] =
182 			    (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
183 			    chunk[j] : swab32(chunk[j]);
184 		}
185 	}
186 
187 	*nxt = ram + i;
188 	return QLA_SUCCESS;
189 }
190 
191 int
192 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
193     uint32_t ram_dwords, void **nxt)
194 {
195 	int rval = QLA_FUNCTION_FAILED;
196 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
197 	dma_addr_t dump_dma = ha->gid_list_dma;
198 	uint32_t *chunk = (void *)ha->gid_list;
199 	uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
200 	uint32_t stat;
201 	ulong i, j, timer = 6000000;
202 
203 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
204 
205 	for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
206 		if (i + dwords > ram_dwords)
207 			dwords = ram_dwords - i;
208 
209 		WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
210 		WRT_REG_WORD(&reg->mailbox1, LSW(addr));
211 		WRT_REG_WORD(&reg->mailbox8, MSW(addr));
212 
213 		WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
214 		WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
215 		WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
216 		WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
217 
218 		WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
219 		WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
220 		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
221 
222 		ha->flags.mbox_int = 0;
223 		while (timer--) {
224 			udelay(5);
225 			stat = RD_REG_DWORD(&reg->host_status);
226 
227 			/* Check for pending interrupts. */
228 			if (!(stat & HSRX_RISC_INT))
229 				continue;
230 
231 			stat &= 0xff;
232 			if (stat != 0x1 && stat != 0x2 &&
233 			    stat != 0x10 && stat != 0x11) {
234 				WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
235 				RD_REG_DWORD(&reg->hccr);
236 				continue;
237 			}
238 
239 			set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
240 			rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
241 			WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
242 			RD_REG_DWORD(&reg->hccr);
243 			break;
244 		}
245 		ha->flags.mbox_int = 1;
246 		*nxt = ram + i;
247 
248 		if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
249 			/* no interrupt, timed out*/
250 			return rval;
251 		}
252 		if (rval) {
253 			/* error completion status */
254 			return rval;
255 		}
256 		for (j = 0; j < dwords; j++) {
257 			ram[i + j] =
258 			    (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
259 			    chunk[j] : swab32(chunk[j]);
260 		}
261 	}
262 
263 	*nxt = ram + i;
264 	return QLA_SUCCESS;
265 }
266 
267 static int
268 qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
269     uint32_t cram_size, void **nxt)
270 {
271 	int rval;
272 
273 	/* Code RAM. */
274 	rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
275 	if (rval != QLA_SUCCESS)
276 		return rval;
277 
278 	set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
279 
280 	/* External Memory. */
281 	rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
282 	    ha->fw_memory_size - 0x100000 + 1, nxt);
283 	if (rval == QLA_SUCCESS)
284 		set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
285 
286 	return rval;
287 }
288 
289 static uint32_t *
290 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
291     uint32_t count, uint32_t *buf)
292 {
293 	uint32_t __iomem *dmp_reg;
294 
295 	WRT_REG_DWORD(&reg->iobase_addr, iobase);
296 	dmp_reg = &reg->iobase_window;
297 	for ( ; count--; dmp_reg++)
298 		*buf++ = htonl(RD_REG_DWORD(dmp_reg));
299 
300 	return buf;
301 }
302 
303 void
304 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
305 {
306 	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
307 
308 	/* 100 usec delay is sufficient enough for hardware to pause RISC */
309 	udelay(100);
310 	if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
311 		set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
312 }
313 
314 int
315 qla24xx_soft_reset(struct qla_hw_data *ha)
316 {
317 	int rval = QLA_SUCCESS;
318 	uint32_t cnt;
319 	uint16_t wd;
320 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
321 
322 	/*
323 	 * Reset RISC. The delay is dependent on system architecture.
324 	 * Driver can proceed with the reset sequence after waiting
325 	 * for a timeout period.
326 	 */
327 	WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
328 	for (cnt = 0; cnt < 30000; cnt++) {
329 		if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
330 			break;
331 
332 		udelay(10);
333 	}
334 	if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
335 		set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
336 
337 	WRT_REG_DWORD(&reg->ctrl_status,
338 	    CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
339 	pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
340 
341 	udelay(100);
342 
343 	/* Wait for soft-reset to complete. */
344 	for (cnt = 0; cnt < 30000; cnt++) {
345 		if ((RD_REG_DWORD(&reg->ctrl_status) &
346 		    CSRX_ISP_SOFT_RESET) == 0)
347 			break;
348 
349 		udelay(10);
350 	}
351 	if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
352 		set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
353 
354 	WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
355 	RD_REG_DWORD(&reg->hccr);             /* PCI Posting. */
356 
357 	for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
358 	    rval == QLA_SUCCESS; cnt--) {
359 		if (cnt)
360 			udelay(10);
361 		else
362 			rval = QLA_FUNCTION_TIMEOUT;
363 	}
364 	if (rval == QLA_SUCCESS)
365 		set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
366 
367 	return rval;
368 }
369 
370 static int
371 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
372     uint32_t ram_words, void **nxt)
373 {
374 	int rval;
375 	uint32_t cnt, stat, timer, words, idx;
376 	uint16_t mb0;
377 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
378 	dma_addr_t dump_dma = ha->gid_list_dma;
379 	uint16_t *dump = (uint16_t *)ha->gid_list;
380 
381 	rval = QLA_SUCCESS;
382 	mb0 = 0;
383 
384 	WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
385 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
386 
387 	words = qla2x00_gid_list_size(ha) / 2;
388 	for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
389 	    cnt += words, addr += words) {
390 		if (cnt + words > ram_words)
391 			words = ram_words - cnt;
392 
393 		WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
394 		WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
395 
396 		WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
397 		WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
398 		WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
399 		WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
400 
401 		WRT_MAILBOX_REG(ha, reg, 4, words);
402 		WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
403 
404 		for (timer = 6000000; timer; timer--) {
405 			/* Check for pending interrupts. */
406 			stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
407 			if (stat & HSR_RISC_INT) {
408 				stat &= 0xff;
409 
410 				if (stat == 0x1 || stat == 0x2) {
411 					set_bit(MBX_INTERRUPT,
412 					    &ha->mbx_cmd_flags);
413 
414 					mb0 = RD_MAILBOX_REG(ha, reg, 0);
415 
416 					/* Release mailbox registers. */
417 					WRT_REG_WORD(&reg->semaphore, 0);
418 					WRT_REG_WORD(&reg->hccr,
419 					    HCCR_CLR_RISC_INT);
420 					RD_REG_WORD(&reg->hccr);
421 					break;
422 				} else if (stat == 0x10 || stat == 0x11) {
423 					set_bit(MBX_INTERRUPT,
424 					    &ha->mbx_cmd_flags);
425 
426 					mb0 = RD_MAILBOX_REG(ha, reg, 0);
427 
428 					WRT_REG_WORD(&reg->hccr,
429 					    HCCR_CLR_RISC_INT);
430 					RD_REG_WORD(&reg->hccr);
431 					break;
432 				}
433 
434 				/* clear this intr; it wasn't a mailbox intr */
435 				WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
436 				RD_REG_WORD(&reg->hccr);
437 			}
438 			udelay(5);
439 		}
440 
441 		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
442 			rval = mb0 & MBS_MASK;
443 			for (idx = 0; idx < words; idx++)
444 				ram[cnt + idx] = swab16(dump[idx]);
445 		} else {
446 			rval = QLA_FUNCTION_FAILED;
447 		}
448 	}
449 
450 	*nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
451 	return rval;
452 }
453 
454 static inline void
455 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
456     uint16_t *buf)
457 {
458 	uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
459 
460 	for ( ; count--; dmp_reg++)
461 		*buf++ = htons(RD_REG_WORD(dmp_reg));
462 }
463 
464 static inline void *
465 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
466 {
467 	if (!ha->eft)
468 		return ptr;
469 
470 	memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
471 	return ptr + ntohl(ha->fw_dump->eft_size);
472 }
473 
474 static inline void *
475 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
476 {
477 	uint32_t cnt;
478 	uint32_t *iter_reg;
479 	struct qla2xxx_fce_chain *fcec = ptr;
480 
481 	if (!ha->fce)
482 		return ptr;
483 
484 	*last_chain = &fcec->type;
485 	fcec->type = htonl(DUMP_CHAIN_FCE);
486 	fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
487 	    fce_calc_size(ha->fce_bufs));
488 	fcec->size = htonl(fce_calc_size(ha->fce_bufs));
489 	fcec->addr_l = htonl(LSD(ha->fce_dma));
490 	fcec->addr_h = htonl(MSD(ha->fce_dma));
491 
492 	iter_reg = fcec->eregs;
493 	for (cnt = 0; cnt < 8; cnt++)
494 		*iter_reg++ = htonl(ha->fce_mb[cnt]);
495 
496 	memcpy(iter_reg, ha->fce, ntohl(fcec->size));
497 
498 	return (char *)iter_reg + ntohl(fcec->size);
499 }
500 
501 static inline void *
502 qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
503 {
504 	struct qla2xxx_offld_chain *c = ptr;
505 
506 	if (!ha->exlogin_buf)
507 		return ptr;
508 
509 	*last_chain = &c->type;
510 
511 	c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN);
512 	c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
513 	    ha->exlogin_size);
514 	c->size = cpu_to_be32(ha->exlogin_size);
515 	c->addr = cpu_to_be64(ha->exlogin_buf_dma);
516 
517 	ptr += sizeof(struct qla2xxx_offld_chain);
518 	memcpy(ptr, ha->exlogin_buf, ha->exlogin_size);
519 
520 	return (char *)ptr + cpu_to_be32(c->size);
521 }
522 
523 static inline void *
524 qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
525 {
526 	struct qla2xxx_offld_chain *c = ptr;
527 
528 	if (!ha->exchoffld_buf)
529 		return ptr;
530 
531 	*last_chain = &c->type;
532 
533 	c->type = cpu_to_be32(DUMP_CHAIN_EXCHG);
534 	c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
535 	    ha->exchoffld_size);
536 	c->size = cpu_to_be32(ha->exchoffld_size);
537 	c->addr = cpu_to_be64(ha->exchoffld_buf_dma);
538 
539 	ptr += sizeof(struct qla2xxx_offld_chain);
540 	memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size);
541 
542 	return (char *)ptr + cpu_to_be32(c->size);
543 }
544 
545 static inline void *
546 qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
547 	uint32_t **last_chain)
548 {
549 	struct qla2xxx_mqueue_chain *q;
550 	struct qla2xxx_mqueue_header *qh;
551 	uint32_t num_queues;
552 	int que;
553 	struct {
554 		int length;
555 		void *ring;
556 	} aq, *aqp;
557 
558 	if (!ha->tgt.atio_ring)
559 		return ptr;
560 
561 	num_queues = 1;
562 	aqp = &aq;
563 	aqp->length = ha->tgt.atio_q_length;
564 	aqp->ring = ha->tgt.atio_ring;
565 
566 	for (que = 0; que < num_queues; que++) {
567 		/* aqp = ha->atio_q_map[que]; */
568 		q = ptr;
569 		*last_chain = &q->type;
570 		q->type = htonl(DUMP_CHAIN_QUEUE);
571 		q->chain_size = htonl(
572 		    sizeof(struct qla2xxx_mqueue_chain) +
573 		    sizeof(struct qla2xxx_mqueue_header) +
574 		    (aqp->length * sizeof(request_t)));
575 		ptr += sizeof(struct qla2xxx_mqueue_chain);
576 
577 		/* Add header. */
578 		qh = ptr;
579 		qh->queue = htonl(TYPE_ATIO_QUEUE);
580 		qh->number = htonl(que);
581 		qh->size = htonl(aqp->length * sizeof(request_t));
582 		ptr += sizeof(struct qla2xxx_mqueue_header);
583 
584 		/* Add data. */
585 		memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
586 
587 		ptr += aqp->length * sizeof(request_t);
588 	}
589 
590 	return ptr;
591 }
592 
593 static inline void *
594 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
595 {
596 	struct qla2xxx_mqueue_chain *q;
597 	struct qla2xxx_mqueue_header *qh;
598 	struct req_que *req;
599 	struct rsp_que *rsp;
600 	int que;
601 
602 	if (!ha->mqenable)
603 		return ptr;
604 
605 	/* Request queues */
606 	for (que = 1; que < ha->max_req_queues; que++) {
607 		req = ha->req_q_map[que];
608 		if (!req)
609 			break;
610 
611 		/* Add chain. */
612 		q = ptr;
613 		*last_chain = &q->type;
614 		q->type = htonl(DUMP_CHAIN_QUEUE);
615 		q->chain_size = htonl(
616 		    sizeof(struct qla2xxx_mqueue_chain) +
617 		    sizeof(struct qla2xxx_mqueue_header) +
618 		    (req->length * sizeof(request_t)));
619 		ptr += sizeof(struct qla2xxx_mqueue_chain);
620 
621 		/* Add header. */
622 		qh = ptr;
623 		qh->queue = htonl(TYPE_REQUEST_QUEUE);
624 		qh->number = htonl(que);
625 		qh->size = htonl(req->length * sizeof(request_t));
626 		ptr += sizeof(struct qla2xxx_mqueue_header);
627 
628 		/* Add data. */
629 		memcpy(ptr, req->ring, req->length * sizeof(request_t));
630 		ptr += req->length * sizeof(request_t);
631 	}
632 
633 	/* Response queues */
634 	for (que = 1; que < ha->max_rsp_queues; que++) {
635 		rsp = ha->rsp_q_map[que];
636 		if (!rsp)
637 			break;
638 
639 		/* Add chain. */
640 		q = ptr;
641 		*last_chain = &q->type;
642 		q->type = htonl(DUMP_CHAIN_QUEUE);
643 		q->chain_size = htonl(
644 		    sizeof(struct qla2xxx_mqueue_chain) +
645 		    sizeof(struct qla2xxx_mqueue_header) +
646 		    (rsp->length * sizeof(response_t)));
647 		ptr += sizeof(struct qla2xxx_mqueue_chain);
648 
649 		/* Add header. */
650 		qh = ptr;
651 		qh->queue = htonl(TYPE_RESPONSE_QUEUE);
652 		qh->number = htonl(que);
653 		qh->size = htonl(rsp->length * sizeof(response_t));
654 		ptr += sizeof(struct qla2xxx_mqueue_header);
655 
656 		/* Add data. */
657 		memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
658 		ptr += rsp->length * sizeof(response_t);
659 	}
660 
661 	return ptr;
662 }
663 
664 static inline void *
665 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
666 {
667 	uint32_t cnt, que_idx;
668 	uint8_t que_cnt;
669 	struct qla2xxx_mq_chain *mq = ptr;
670 	device_reg_t *reg;
671 
672 	if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
673 	    IS_QLA28XX(ha))
674 		return ptr;
675 
676 	mq = ptr;
677 	*last_chain = &mq->type;
678 	mq->type = htonl(DUMP_CHAIN_MQ);
679 	mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
680 
681 	que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
682 		ha->max_req_queues : ha->max_rsp_queues;
683 	mq->count = htonl(que_cnt);
684 	for (cnt = 0; cnt < que_cnt; cnt++) {
685 		reg = ISP_QUE_REG(ha, cnt);
686 		que_idx = cnt * 4;
687 		mq->qregs[que_idx] =
688 		    htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
689 		mq->qregs[que_idx+1] =
690 		    htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
691 		mq->qregs[que_idx+2] =
692 		    htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
693 		mq->qregs[que_idx+3] =
694 		    htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
695 	}
696 
697 	return ptr + sizeof(struct qla2xxx_mq_chain);
698 }
699 
700 void
701 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
702 {
703 	struct qla_hw_data *ha = vha->hw;
704 
705 	if (rval != QLA_SUCCESS) {
706 		ql_log(ql_log_warn, vha, 0xd000,
707 		    "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
708 		    rval, ha->fw_dump_cap_flags);
709 		ha->fw_dumped = 0;
710 	} else {
711 		ql_log(ql_log_info, vha, 0xd001,
712 		    "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
713 		    vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
714 		ha->fw_dumped = 1;
715 		qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
716 	}
717 }
718 
719 /**
720  * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
721  * @vha: HA context
722  * @hardware_locked: Called with the hardware_lock
723  */
724 void
725 qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
726 {
727 	int		rval;
728 	uint32_t	cnt;
729 	struct qla_hw_data *ha = vha->hw;
730 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
731 	uint16_t __iomem *dmp_reg;
732 	unsigned long	flags;
733 	struct qla2300_fw_dump	*fw;
734 	void		*nxt;
735 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
736 
737 	flags = 0;
738 
739 #ifndef __CHECKER__
740 	if (!hardware_locked)
741 		spin_lock_irqsave(&ha->hardware_lock, flags);
742 #endif
743 
744 	if (!ha->fw_dump) {
745 		ql_log(ql_log_warn, vha, 0xd002,
746 		    "No buffer available for dump.\n");
747 		goto qla2300_fw_dump_failed;
748 	}
749 
750 	if (ha->fw_dumped) {
751 		ql_log(ql_log_warn, vha, 0xd003,
752 		    "Firmware has been previously dumped (%p) "
753 		    "-- ignoring request.\n",
754 		    ha->fw_dump);
755 		goto qla2300_fw_dump_failed;
756 	}
757 	fw = &ha->fw_dump->isp.isp23;
758 	qla2xxx_prep_dump(ha, ha->fw_dump);
759 
760 	rval = QLA_SUCCESS;
761 	fw->hccr = htons(RD_REG_WORD(&reg->hccr));
762 
763 	/* Pause RISC. */
764 	WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
765 	if (IS_QLA2300(ha)) {
766 		for (cnt = 30000;
767 		    (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
768 			rval == QLA_SUCCESS; cnt--) {
769 			if (cnt)
770 				udelay(100);
771 			else
772 				rval = QLA_FUNCTION_TIMEOUT;
773 		}
774 	} else {
775 		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
776 		udelay(10);
777 	}
778 
779 	if (rval == QLA_SUCCESS) {
780 		dmp_reg = &reg->flash_address;
781 		for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
782 			fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
783 
784 		dmp_reg = &reg->u.isp2300.req_q_in;
785 		for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2;
786 		    cnt++, dmp_reg++)
787 			fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
788 
789 		dmp_reg = &reg->u.isp2300.mailbox0;
790 		for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2;
791 		    cnt++, dmp_reg++)
792 			fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
793 
794 		WRT_REG_WORD(&reg->ctrl_status, 0x40);
795 		qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
796 
797 		WRT_REG_WORD(&reg->ctrl_status, 0x50);
798 		qla2xxx_read_window(reg, 48, fw->dma_reg);
799 
800 		WRT_REG_WORD(&reg->ctrl_status, 0x00);
801 		dmp_reg = &reg->risc_hw;
802 		for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2;
803 		    cnt++, dmp_reg++)
804 			fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
805 
806 		WRT_REG_WORD(&reg->pcr, 0x2000);
807 		qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
808 
809 		WRT_REG_WORD(&reg->pcr, 0x2200);
810 		qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
811 
812 		WRT_REG_WORD(&reg->pcr, 0x2400);
813 		qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
814 
815 		WRT_REG_WORD(&reg->pcr, 0x2600);
816 		qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
817 
818 		WRT_REG_WORD(&reg->pcr, 0x2800);
819 		qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
820 
821 		WRT_REG_WORD(&reg->pcr, 0x2A00);
822 		qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
823 
824 		WRT_REG_WORD(&reg->pcr, 0x2C00);
825 		qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
826 
827 		WRT_REG_WORD(&reg->pcr, 0x2E00);
828 		qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
829 
830 		WRT_REG_WORD(&reg->ctrl_status, 0x10);
831 		qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
832 
833 		WRT_REG_WORD(&reg->ctrl_status, 0x20);
834 		qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
835 
836 		WRT_REG_WORD(&reg->ctrl_status, 0x30);
837 		qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
838 
839 		/* Reset RISC. */
840 		WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
841 		for (cnt = 0; cnt < 30000; cnt++) {
842 			if ((RD_REG_WORD(&reg->ctrl_status) &
843 			    CSR_ISP_SOFT_RESET) == 0)
844 				break;
845 
846 			udelay(10);
847 		}
848 	}
849 
850 	if (!IS_QLA2300(ha)) {
851 		for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
852 		    rval == QLA_SUCCESS; cnt--) {
853 			if (cnt)
854 				udelay(100);
855 			else
856 				rval = QLA_FUNCTION_TIMEOUT;
857 		}
858 	}
859 
860 	/* Get RISC SRAM. */
861 	if (rval == QLA_SUCCESS)
862 		rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
863 		    sizeof(fw->risc_ram) / 2, &nxt);
864 
865 	/* Get stack SRAM. */
866 	if (rval == QLA_SUCCESS)
867 		rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
868 		    sizeof(fw->stack_ram) / 2, &nxt);
869 
870 	/* Get data SRAM. */
871 	if (rval == QLA_SUCCESS)
872 		rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
873 		    ha->fw_memory_size - 0x11000 + 1, &nxt);
874 
875 	if (rval == QLA_SUCCESS)
876 		qla2xxx_copy_queues(ha, nxt);
877 
878 	qla2xxx_dump_post_process(base_vha, rval);
879 
880 qla2300_fw_dump_failed:
881 #ifndef __CHECKER__
882 	if (!hardware_locked)
883 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
884 #else
885 	;
886 #endif
887 }
888 
889 /**
890  * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
891  * @vha: HA context
892  * @hardware_locked: Called with the hardware_lock
893  */
894 void
895 qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
896 {
897 	int		rval;
898 	uint32_t	cnt, timer;
899 	uint16_t	risc_address;
900 	uint16_t	mb0, mb2;
901 	struct qla_hw_data *ha = vha->hw;
902 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
903 	uint16_t __iomem *dmp_reg;
904 	unsigned long	flags;
905 	struct qla2100_fw_dump	*fw;
906 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
907 
908 	risc_address = 0;
909 	mb0 = mb2 = 0;
910 	flags = 0;
911 
912 #ifndef __CHECKER__
913 	if (!hardware_locked)
914 		spin_lock_irqsave(&ha->hardware_lock, flags);
915 #endif
916 
917 	if (!ha->fw_dump) {
918 		ql_log(ql_log_warn, vha, 0xd004,
919 		    "No buffer available for dump.\n");
920 		goto qla2100_fw_dump_failed;
921 	}
922 
923 	if (ha->fw_dumped) {
924 		ql_log(ql_log_warn, vha, 0xd005,
925 		    "Firmware has been previously dumped (%p) "
926 		    "-- ignoring request.\n",
927 		    ha->fw_dump);
928 		goto qla2100_fw_dump_failed;
929 	}
930 	fw = &ha->fw_dump->isp.isp21;
931 	qla2xxx_prep_dump(ha, ha->fw_dump);
932 
933 	rval = QLA_SUCCESS;
934 	fw->hccr = htons(RD_REG_WORD(&reg->hccr));
935 
936 	/* Pause RISC. */
937 	WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
938 	for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
939 	    rval == QLA_SUCCESS; cnt--) {
940 		if (cnt)
941 			udelay(100);
942 		else
943 			rval = QLA_FUNCTION_TIMEOUT;
944 	}
945 	if (rval == QLA_SUCCESS) {
946 		dmp_reg = &reg->flash_address;
947 		for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
948 			fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
949 
950 		dmp_reg = &reg->u.isp2100.mailbox0;
951 		for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
952 			if (cnt == 8)
953 				dmp_reg = &reg->u_end.isp2200.mailbox8;
954 
955 			fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
956 		}
957 
958 		dmp_reg = &reg->u.isp2100.unused_2[0];
959 		for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++)
960 			fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
961 
962 		WRT_REG_WORD(&reg->ctrl_status, 0x00);
963 		dmp_reg = &reg->risc_hw;
964 		for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++)
965 			fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
966 
967 		WRT_REG_WORD(&reg->pcr, 0x2000);
968 		qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
969 
970 		WRT_REG_WORD(&reg->pcr, 0x2100);
971 		qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
972 
973 		WRT_REG_WORD(&reg->pcr, 0x2200);
974 		qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
975 
976 		WRT_REG_WORD(&reg->pcr, 0x2300);
977 		qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
978 
979 		WRT_REG_WORD(&reg->pcr, 0x2400);
980 		qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
981 
982 		WRT_REG_WORD(&reg->pcr, 0x2500);
983 		qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
984 
985 		WRT_REG_WORD(&reg->pcr, 0x2600);
986 		qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
987 
988 		WRT_REG_WORD(&reg->pcr, 0x2700);
989 		qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
990 
991 		WRT_REG_WORD(&reg->ctrl_status, 0x10);
992 		qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
993 
994 		WRT_REG_WORD(&reg->ctrl_status, 0x20);
995 		qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
996 
997 		WRT_REG_WORD(&reg->ctrl_status, 0x30);
998 		qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
999 
1000 		/* Reset the ISP. */
1001 		WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1002 	}
1003 
1004 	for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
1005 	    rval == QLA_SUCCESS; cnt--) {
1006 		if (cnt)
1007 			udelay(100);
1008 		else
1009 			rval = QLA_FUNCTION_TIMEOUT;
1010 	}
1011 
1012 	/* Pause RISC. */
1013 	if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
1014 	    (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
1015 
1016 		WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
1017 		for (cnt = 30000;
1018 		    (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
1019 		    rval == QLA_SUCCESS; cnt--) {
1020 			if (cnt)
1021 				udelay(100);
1022 			else
1023 				rval = QLA_FUNCTION_TIMEOUT;
1024 		}
1025 		if (rval == QLA_SUCCESS) {
1026 			/* Set memory configuration and timing. */
1027 			if (IS_QLA2100(ha))
1028 				WRT_REG_WORD(&reg->mctr, 0xf1);
1029 			else
1030 				WRT_REG_WORD(&reg->mctr, 0xf2);
1031 			RD_REG_WORD(&reg->mctr);	/* PCI Posting. */
1032 
1033 			/* Release RISC. */
1034 			WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1035 		}
1036 	}
1037 
1038 	if (rval == QLA_SUCCESS) {
1039 		/* Get RISC SRAM. */
1040 		risc_address = 0x1000;
1041  		WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
1042 		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1043 	}
1044 	for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
1045 	    cnt++, risc_address++) {
1046  		WRT_MAILBOX_REG(ha, reg, 1, risc_address);
1047 		WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
1048 
1049 		for (timer = 6000000; timer != 0; timer--) {
1050 			/* Check for pending interrupts. */
1051 			if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
1052 				if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
1053 					set_bit(MBX_INTERRUPT,
1054 					    &ha->mbx_cmd_flags);
1055 
1056 					mb0 = RD_MAILBOX_REG(ha, reg, 0);
1057 					mb2 = RD_MAILBOX_REG(ha, reg, 2);
1058 
1059 					WRT_REG_WORD(&reg->semaphore, 0);
1060 					WRT_REG_WORD(&reg->hccr,
1061 					    HCCR_CLR_RISC_INT);
1062 					RD_REG_WORD(&reg->hccr);
1063 					break;
1064 				}
1065 				WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
1066 				RD_REG_WORD(&reg->hccr);
1067 			}
1068 			udelay(5);
1069 		}
1070 
1071 		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1072 			rval = mb0 & MBS_MASK;
1073 			fw->risc_ram[cnt] = htons(mb2);
1074 		} else {
1075 			rval = QLA_FUNCTION_FAILED;
1076 		}
1077 	}
1078 
1079 	if (rval == QLA_SUCCESS)
1080 		qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
1081 
1082 	qla2xxx_dump_post_process(base_vha, rval);
1083 
1084 qla2100_fw_dump_failed:
1085 #ifndef __CHECKER__
1086 	if (!hardware_locked)
1087 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1088 #else
1089 	;
1090 #endif
1091 }
1092 
1093 void
1094 qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1095 {
1096 	int		rval;
1097 	uint32_t	cnt;
1098 	struct qla_hw_data *ha = vha->hw;
1099 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1100 	uint32_t __iomem *dmp_reg;
1101 	uint32_t	*iter_reg;
1102 	uint16_t __iomem *mbx_reg;
1103 	unsigned long	flags;
1104 	struct qla24xx_fw_dump *fw;
1105 	void		*nxt;
1106 	void		*nxt_chain;
1107 	uint32_t	*last_chain = NULL;
1108 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1109 
1110 	if (IS_P3P_TYPE(ha))
1111 		return;
1112 
1113 	flags = 0;
1114 	ha->fw_dump_cap_flags = 0;
1115 
1116 #ifndef __CHECKER__
1117 	if (!hardware_locked)
1118 		spin_lock_irqsave(&ha->hardware_lock, flags);
1119 #endif
1120 
1121 	if (!ha->fw_dump) {
1122 		ql_log(ql_log_warn, vha, 0xd006,
1123 		    "No buffer available for dump.\n");
1124 		goto qla24xx_fw_dump_failed;
1125 	}
1126 
1127 	if (ha->fw_dumped) {
1128 		ql_log(ql_log_warn, vha, 0xd007,
1129 		    "Firmware has been previously dumped (%p) "
1130 		    "-- ignoring request.\n",
1131 		    ha->fw_dump);
1132 		goto qla24xx_fw_dump_failed;
1133 	}
1134 	QLA_FW_STOPPED(ha);
1135 	fw = &ha->fw_dump->isp.isp24;
1136 	qla2xxx_prep_dump(ha, ha->fw_dump);
1137 
1138 	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1139 
1140 	/*
1141 	 * Pause RISC. No need to track timeout, as resetting the chip
1142 	 * is the right approach incase of pause timeout
1143 	 */
1144 	qla24xx_pause_risc(reg, ha);
1145 
1146 	/* Host interface registers. */
1147 	dmp_reg = &reg->flash_addr;
1148 	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1149 		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1150 
1151 	/* Disable interrupts. */
1152 	WRT_REG_DWORD(&reg->ictrl, 0);
1153 	RD_REG_DWORD(&reg->ictrl);
1154 
1155 	/* Shadow registers. */
1156 	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1157 	RD_REG_DWORD(&reg->iobase_addr);
1158 	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1159 	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1160 
1161 	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1162 	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1163 
1164 	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1165 	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1166 
1167 	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1168 	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1169 
1170 	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1171 	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1172 
1173 	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1174 	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1175 
1176 	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1177 	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1178 
1179 	/* Mailbox registers. */
1180 	mbx_reg = &reg->mailbox0;
1181 	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1182 		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1183 
1184 	/* Transfer sequence registers. */
1185 	iter_reg = fw->xseq_gp_reg;
1186 	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1187 	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1188 	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1189 	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1190 	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1191 	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1192 	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1193 	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1194 
1195 	qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
1196 	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1197 
1198 	/* Receive sequence registers. */
1199 	iter_reg = fw->rseq_gp_reg;
1200 	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1201 	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1202 	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1203 	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1204 	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1205 	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1206 	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1207 	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1208 
1209 	qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
1210 	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1211 	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1212 
1213 	/* Command DMA registers. */
1214 	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1215 
1216 	/* Queues. */
1217 	iter_reg = fw->req0_dma_reg;
1218 	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1219 	dmp_reg = &reg->iobase_q;
1220 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1221 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1222 
1223 	iter_reg = fw->resp0_dma_reg;
1224 	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1225 	dmp_reg = &reg->iobase_q;
1226 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1227 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1228 
1229 	iter_reg = fw->req1_dma_reg;
1230 	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1231 	dmp_reg = &reg->iobase_q;
1232 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1233 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1234 
1235 	/* Transmit DMA registers. */
1236 	iter_reg = fw->xmt0_dma_reg;
1237 	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1238 	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1239 
1240 	iter_reg = fw->xmt1_dma_reg;
1241 	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1242 	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1243 
1244 	iter_reg = fw->xmt2_dma_reg;
1245 	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1246 	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1247 
1248 	iter_reg = fw->xmt3_dma_reg;
1249 	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1250 	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1251 
1252 	iter_reg = fw->xmt4_dma_reg;
1253 	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1254 	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1255 
1256 	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1257 
1258 	/* Receive DMA registers. */
1259 	iter_reg = fw->rcvt0_data_dma_reg;
1260 	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1261 	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1262 
1263 	iter_reg = fw->rcvt1_data_dma_reg;
1264 	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1265 	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1266 
1267 	/* RISC registers. */
1268 	iter_reg = fw->risc_gp_reg;
1269 	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1270 	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1271 	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1272 	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1273 	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1274 	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1275 	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1276 	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1277 
1278 	/* Local memory controller registers. */
1279 	iter_reg = fw->lmc_reg;
1280 	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1281 	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1282 	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1283 	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1284 	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1285 	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1286 	qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1287 
1288 	/* Fibre Protocol Module registers. */
1289 	iter_reg = fw->fpm_hdw_reg;
1290 	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1291 	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1292 	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1293 	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1294 	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1295 	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1296 	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1297 	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1298 	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1299 	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1300 	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1301 	qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1302 
1303 	/* Frame Buffer registers. */
1304 	iter_reg = fw->fb_hdw_reg;
1305 	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1306 	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1307 	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1308 	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1309 	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1310 	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1311 	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1312 	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1313 	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1314 	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1315 	qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1316 
1317 	rval = qla24xx_soft_reset(ha);
1318 	if (rval != QLA_SUCCESS)
1319 		goto qla24xx_fw_dump_failed_0;
1320 
1321 	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1322 	    &nxt);
1323 	if (rval != QLA_SUCCESS)
1324 		goto qla24xx_fw_dump_failed_0;
1325 
1326 	nxt = qla2xxx_copy_queues(ha, nxt);
1327 
1328 	qla24xx_copy_eft(ha, nxt);
1329 
1330 	nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1331 	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1332 	if (last_chain) {
1333 		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1334 		*last_chain |= htonl(DUMP_CHAIN_LAST);
1335 	}
1336 
1337 	/* Adjust valid length. */
1338 	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1339 
1340 qla24xx_fw_dump_failed_0:
1341 	qla2xxx_dump_post_process(base_vha, rval);
1342 
1343 qla24xx_fw_dump_failed:
1344 #ifndef __CHECKER__
1345 	if (!hardware_locked)
1346 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1347 #else
1348 	;
1349 #endif
1350 }
1351 
1352 void
1353 qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1354 {
1355 	int		rval;
1356 	uint32_t	cnt;
1357 	struct qla_hw_data *ha = vha->hw;
1358 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1359 	uint32_t __iomem *dmp_reg;
1360 	uint32_t	*iter_reg;
1361 	uint16_t __iomem *mbx_reg;
1362 	unsigned long	flags;
1363 	struct qla25xx_fw_dump *fw;
1364 	void		*nxt, *nxt_chain;
1365 	uint32_t	*last_chain = NULL;
1366 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1367 
1368 	flags = 0;
1369 	ha->fw_dump_cap_flags = 0;
1370 
1371 #ifndef __CHECKER__
1372 	if (!hardware_locked)
1373 		spin_lock_irqsave(&ha->hardware_lock, flags);
1374 #endif
1375 
1376 	if (!ha->fw_dump) {
1377 		ql_log(ql_log_warn, vha, 0xd008,
1378 		    "No buffer available for dump.\n");
1379 		goto qla25xx_fw_dump_failed;
1380 	}
1381 
1382 	if (ha->fw_dumped) {
1383 		ql_log(ql_log_warn, vha, 0xd009,
1384 		    "Firmware has been previously dumped (%p) "
1385 		    "-- ignoring request.\n",
1386 		    ha->fw_dump);
1387 		goto qla25xx_fw_dump_failed;
1388 	}
1389 	QLA_FW_STOPPED(ha);
1390 	fw = &ha->fw_dump->isp.isp25;
1391 	qla2xxx_prep_dump(ha, ha->fw_dump);
1392 	ha->fw_dump->version = htonl(2);
1393 
1394 	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1395 
1396 	/*
1397 	 * Pause RISC. No need to track timeout, as resetting the chip
1398 	 * is the right approach incase of pause timeout
1399 	 */
1400 	qla24xx_pause_risc(reg, ha);
1401 
1402 	/* Host/Risc registers. */
1403 	iter_reg = fw->host_risc_reg;
1404 	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1405 	qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1406 
1407 	/* PCIe registers. */
1408 	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1409 	RD_REG_DWORD(&reg->iobase_addr);
1410 	WRT_REG_DWORD(&reg->iobase_window, 0x01);
1411 	dmp_reg = &reg->iobase_c4;
1412 	fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
1413 	dmp_reg++;
1414 	fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
1415 	dmp_reg++;
1416 	fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1417 	fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1418 
1419 	WRT_REG_DWORD(&reg->iobase_window, 0x00);
1420 	RD_REG_DWORD(&reg->iobase_window);
1421 
1422 	/* Host interface registers. */
1423 	dmp_reg = &reg->flash_addr;
1424 	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1425 		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1426 
1427 	/* Disable interrupts. */
1428 	WRT_REG_DWORD(&reg->ictrl, 0);
1429 	RD_REG_DWORD(&reg->ictrl);
1430 
1431 	/* Shadow registers. */
1432 	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1433 	RD_REG_DWORD(&reg->iobase_addr);
1434 	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1435 	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1436 
1437 	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1438 	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1439 
1440 	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1441 	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1442 
1443 	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1444 	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1445 
1446 	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1447 	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1448 
1449 	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1450 	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1451 
1452 	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1453 	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1454 
1455 	WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1456 	fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1457 
1458 	WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1459 	fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1460 
1461 	WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1462 	fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1463 
1464 	WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1465 	fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1466 
1467 	/* RISC I/O register. */
1468 	WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1469 	fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1470 
1471 	/* Mailbox registers. */
1472 	mbx_reg = &reg->mailbox0;
1473 	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1474 		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1475 
1476 	/* Transfer sequence registers. */
1477 	iter_reg = fw->xseq_gp_reg;
1478 	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1479 	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1480 	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1481 	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1482 	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1483 	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1484 	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1485 	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1486 
1487 	iter_reg = fw->xseq_0_reg;
1488 	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1489 	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1490 	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1491 
1492 	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1493 
1494 	/* Receive sequence registers. */
1495 	iter_reg = fw->rseq_gp_reg;
1496 	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1497 	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1498 	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1499 	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1500 	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1501 	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1502 	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1503 	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1504 
1505 	iter_reg = fw->rseq_0_reg;
1506 	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1507 	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1508 
1509 	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1510 	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1511 
1512 	/* Auxiliary sequence registers. */
1513 	iter_reg = fw->aseq_gp_reg;
1514 	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1515 	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1516 	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1517 	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1518 	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1519 	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1520 	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1521 	qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1522 
1523 	iter_reg = fw->aseq_0_reg;
1524 	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1525 	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1526 
1527 	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1528 	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1529 
1530 	/* Command DMA registers. */
1531 	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1532 
1533 	/* Queues. */
1534 	iter_reg = fw->req0_dma_reg;
1535 	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1536 	dmp_reg = &reg->iobase_q;
1537 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1538 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1539 
1540 	iter_reg = fw->resp0_dma_reg;
1541 	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1542 	dmp_reg = &reg->iobase_q;
1543 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1544 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1545 
1546 	iter_reg = fw->req1_dma_reg;
1547 	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1548 	dmp_reg = &reg->iobase_q;
1549 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1550 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1551 
1552 	/* Transmit DMA registers. */
1553 	iter_reg = fw->xmt0_dma_reg;
1554 	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1555 	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1556 
1557 	iter_reg = fw->xmt1_dma_reg;
1558 	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1559 	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1560 
1561 	iter_reg = fw->xmt2_dma_reg;
1562 	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1563 	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1564 
1565 	iter_reg = fw->xmt3_dma_reg;
1566 	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1567 	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1568 
1569 	iter_reg = fw->xmt4_dma_reg;
1570 	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1571 	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1572 
1573 	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1574 
1575 	/* Receive DMA registers. */
1576 	iter_reg = fw->rcvt0_data_dma_reg;
1577 	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1578 	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1579 
1580 	iter_reg = fw->rcvt1_data_dma_reg;
1581 	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1582 	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1583 
1584 	/* RISC registers. */
1585 	iter_reg = fw->risc_gp_reg;
1586 	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1587 	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1588 	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1589 	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1590 	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1591 	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1592 	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1593 	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1594 
1595 	/* Local memory controller registers. */
1596 	iter_reg = fw->lmc_reg;
1597 	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1598 	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1599 	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1600 	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1601 	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1602 	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1603 	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1604 	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1605 
1606 	/* Fibre Protocol Module registers. */
1607 	iter_reg = fw->fpm_hdw_reg;
1608 	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1609 	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1610 	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1611 	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1612 	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1613 	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1614 	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1615 	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1616 	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1617 	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1618 	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1619 	qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1620 
1621 	/* Frame Buffer registers. */
1622 	iter_reg = fw->fb_hdw_reg;
1623 	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1624 	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1625 	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1626 	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1627 	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1628 	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1629 	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1630 	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1631 	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1632 	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1633 	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1634 	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1635 
1636 	/* Multi queue registers */
1637 	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1638 	    &last_chain);
1639 
1640 	rval = qla24xx_soft_reset(ha);
1641 	if (rval != QLA_SUCCESS)
1642 		goto qla25xx_fw_dump_failed_0;
1643 
1644 	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1645 	    &nxt);
1646 	if (rval != QLA_SUCCESS)
1647 		goto qla25xx_fw_dump_failed_0;
1648 
1649 	nxt = qla2xxx_copy_queues(ha, nxt);
1650 
1651 	qla24xx_copy_eft(ha, nxt);
1652 
1653 	/* Chain entries -- started with MQ. */
1654 	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1655 	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1656 	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1657 	nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1658 	if (last_chain) {
1659 		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1660 		*last_chain |= htonl(DUMP_CHAIN_LAST);
1661 	}
1662 
1663 	/* Adjust valid length. */
1664 	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1665 
1666 qla25xx_fw_dump_failed_0:
1667 	qla2xxx_dump_post_process(base_vha, rval);
1668 
1669 qla25xx_fw_dump_failed:
1670 #ifndef __CHECKER__
1671 	if (!hardware_locked)
1672 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1673 #else
1674 	;
1675 #endif
1676 }
1677 
1678 void
1679 qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1680 {
1681 	int		rval;
1682 	uint32_t	cnt;
1683 	struct qla_hw_data *ha = vha->hw;
1684 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1685 	uint32_t __iomem *dmp_reg;
1686 	uint32_t	*iter_reg;
1687 	uint16_t __iomem *mbx_reg;
1688 	unsigned long	flags;
1689 	struct qla81xx_fw_dump *fw;
1690 	void		*nxt, *nxt_chain;
1691 	uint32_t	*last_chain = NULL;
1692 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1693 
1694 	flags = 0;
1695 	ha->fw_dump_cap_flags = 0;
1696 
1697 #ifndef __CHECKER__
1698 	if (!hardware_locked)
1699 		spin_lock_irqsave(&ha->hardware_lock, flags);
1700 #endif
1701 
1702 	if (!ha->fw_dump) {
1703 		ql_log(ql_log_warn, vha, 0xd00a,
1704 		    "No buffer available for dump.\n");
1705 		goto qla81xx_fw_dump_failed;
1706 	}
1707 
1708 	if (ha->fw_dumped) {
1709 		ql_log(ql_log_warn, vha, 0xd00b,
1710 		    "Firmware has been previously dumped (%p) "
1711 		    "-- ignoring request.\n",
1712 		    ha->fw_dump);
1713 		goto qla81xx_fw_dump_failed;
1714 	}
1715 	fw = &ha->fw_dump->isp.isp81;
1716 	qla2xxx_prep_dump(ha, ha->fw_dump);
1717 
1718 	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1719 
1720 	/*
1721 	 * Pause RISC. No need to track timeout, as resetting the chip
1722 	 * is the right approach incase of pause timeout
1723 	 */
1724 	qla24xx_pause_risc(reg, ha);
1725 
1726 	/* Host/Risc registers. */
1727 	iter_reg = fw->host_risc_reg;
1728 	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1729 	qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1730 
1731 	/* PCIe registers. */
1732 	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1733 	RD_REG_DWORD(&reg->iobase_addr);
1734 	WRT_REG_DWORD(&reg->iobase_window, 0x01);
1735 	dmp_reg = &reg->iobase_c4;
1736 	fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
1737 	dmp_reg++;
1738 	fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
1739 	dmp_reg++;
1740 	fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1741 	fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1742 
1743 	WRT_REG_DWORD(&reg->iobase_window, 0x00);
1744 	RD_REG_DWORD(&reg->iobase_window);
1745 
1746 	/* Host interface registers. */
1747 	dmp_reg = &reg->flash_addr;
1748 	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1749 		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1750 
1751 	/* Disable interrupts. */
1752 	WRT_REG_DWORD(&reg->ictrl, 0);
1753 	RD_REG_DWORD(&reg->ictrl);
1754 
1755 	/* Shadow registers. */
1756 	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1757 	RD_REG_DWORD(&reg->iobase_addr);
1758 	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1759 	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1760 
1761 	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1762 	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1763 
1764 	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1765 	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1766 
1767 	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1768 	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1769 
1770 	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1771 	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1772 
1773 	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1774 	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1775 
1776 	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1777 	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1778 
1779 	WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1780 	fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1781 
1782 	WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1783 	fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1784 
1785 	WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1786 	fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1787 
1788 	WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1789 	fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1790 
1791 	/* RISC I/O register. */
1792 	WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1793 	fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1794 
1795 	/* Mailbox registers. */
1796 	mbx_reg = &reg->mailbox0;
1797 	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1798 		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1799 
1800 	/* Transfer sequence registers. */
1801 	iter_reg = fw->xseq_gp_reg;
1802 	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1803 	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1804 	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1805 	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1806 	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1807 	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1808 	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1809 	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1810 
1811 	iter_reg = fw->xseq_0_reg;
1812 	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1813 	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1814 	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1815 
1816 	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1817 
1818 	/* Receive sequence registers. */
1819 	iter_reg = fw->rseq_gp_reg;
1820 	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1821 	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1822 	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1823 	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1824 	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1825 	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1826 	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1827 	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1828 
1829 	iter_reg = fw->rseq_0_reg;
1830 	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1831 	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1832 
1833 	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1834 	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1835 
1836 	/* Auxiliary sequence registers. */
1837 	iter_reg = fw->aseq_gp_reg;
1838 	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1839 	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1840 	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1841 	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1842 	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1843 	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1844 	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1845 	qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1846 
1847 	iter_reg = fw->aseq_0_reg;
1848 	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1849 	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1850 
1851 	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1852 	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1853 
1854 	/* Command DMA registers. */
1855 	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1856 
1857 	/* Queues. */
1858 	iter_reg = fw->req0_dma_reg;
1859 	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1860 	dmp_reg = &reg->iobase_q;
1861 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1862 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1863 
1864 	iter_reg = fw->resp0_dma_reg;
1865 	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1866 	dmp_reg = &reg->iobase_q;
1867 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1868 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1869 
1870 	iter_reg = fw->req1_dma_reg;
1871 	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1872 	dmp_reg = &reg->iobase_q;
1873 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1874 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1875 
1876 	/* Transmit DMA registers. */
1877 	iter_reg = fw->xmt0_dma_reg;
1878 	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1879 	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1880 
1881 	iter_reg = fw->xmt1_dma_reg;
1882 	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1883 	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1884 
1885 	iter_reg = fw->xmt2_dma_reg;
1886 	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1887 	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1888 
1889 	iter_reg = fw->xmt3_dma_reg;
1890 	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1891 	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1892 
1893 	iter_reg = fw->xmt4_dma_reg;
1894 	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1895 	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1896 
1897 	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1898 
1899 	/* Receive DMA registers. */
1900 	iter_reg = fw->rcvt0_data_dma_reg;
1901 	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1902 	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1903 
1904 	iter_reg = fw->rcvt1_data_dma_reg;
1905 	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1906 	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1907 
1908 	/* RISC registers. */
1909 	iter_reg = fw->risc_gp_reg;
1910 	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1911 	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1912 	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1913 	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1914 	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1915 	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1916 	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1917 	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1918 
1919 	/* Local memory controller registers. */
1920 	iter_reg = fw->lmc_reg;
1921 	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1922 	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1923 	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1924 	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1925 	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1926 	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1927 	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1928 	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1929 
1930 	/* Fibre Protocol Module registers. */
1931 	iter_reg = fw->fpm_hdw_reg;
1932 	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1933 	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1934 	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1935 	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1936 	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1937 	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1938 	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1939 	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1940 	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1941 	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1942 	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1943 	iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1944 	iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1945 	qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1946 
1947 	/* Frame Buffer registers. */
1948 	iter_reg = fw->fb_hdw_reg;
1949 	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1950 	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1951 	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1952 	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1953 	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1954 	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1955 	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1956 	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1957 	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1958 	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1959 	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1960 	iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1961 	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1962 
1963 	/* Multi queue registers */
1964 	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1965 	    &last_chain);
1966 
1967 	rval = qla24xx_soft_reset(ha);
1968 	if (rval != QLA_SUCCESS)
1969 		goto qla81xx_fw_dump_failed_0;
1970 
1971 	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1972 	    &nxt);
1973 	if (rval != QLA_SUCCESS)
1974 		goto qla81xx_fw_dump_failed_0;
1975 
1976 	nxt = qla2xxx_copy_queues(ha, nxt);
1977 
1978 	qla24xx_copy_eft(ha, nxt);
1979 
1980 	/* Chain entries -- started with MQ. */
1981 	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1982 	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1983 	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1984 	nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1985 	nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
1986 	if (last_chain) {
1987 		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1988 		*last_chain |= htonl(DUMP_CHAIN_LAST);
1989 	}
1990 
1991 	/* Adjust valid length. */
1992 	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1993 
1994 qla81xx_fw_dump_failed_0:
1995 	qla2xxx_dump_post_process(base_vha, rval);
1996 
1997 qla81xx_fw_dump_failed:
1998 #ifndef __CHECKER__
1999 	if (!hardware_locked)
2000 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2001 #else
2002 	;
2003 #endif
2004 }
2005 
2006 void
2007 qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
2008 {
2009 	int		rval;
2010 	uint32_t	cnt;
2011 	struct qla_hw_data *ha = vha->hw;
2012 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2013 	uint32_t __iomem *dmp_reg;
2014 	uint32_t	*iter_reg;
2015 	uint16_t __iomem *mbx_reg;
2016 	unsigned long	flags;
2017 	struct qla83xx_fw_dump *fw;
2018 	void		*nxt, *nxt_chain;
2019 	uint32_t	*last_chain = NULL;
2020 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2021 
2022 	flags = 0;
2023 	ha->fw_dump_cap_flags = 0;
2024 
2025 #ifndef __CHECKER__
2026 	if (!hardware_locked)
2027 		spin_lock_irqsave(&ha->hardware_lock, flags);
2028 #endif
2029 
2030 	if (!ha->fw_dump) {
2031 		ql_log(ql_log_warn, vha, 0xd00c,
2032 		    "No buffer available for dump!!!\n");
2033 		goto qla83xx_fw_dump_failed;
2034 	}
2035 
2036 	if (ha->fw_dumped) {
2037 		ql_log(ql_log_warn, vha, 0xd00d,
2038 		    "Firmware has been previously dumped (%p) -- ignoring "
2039 		    "request...\n", ha->fw_dump);
2040 		goto qla83xx_fw_dump_failed;
2041 	}
2042 	QLA_FW_STOPPED(ha);
2043 	fw = &ha->fw_dump->isp.isp83;
2044 	qla2xxx_prep_dump(ha, ha->fw_dump);
2045 
2046 	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
2047 
2048 	/*
2049 	 * Pause RISC. No need to track timeout, as resetting the chip
2050 	 * is the right approach incase of pause timeout
2051 	 */
2052 	qla24xx_pause_risc(reg, ha);
2053 
2054 	WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
2055 	dmp_reg = &reg->iobase_window;
2056 	RD_REG_DWORD(dmp_reg);
2057 	WRT_REG_DWORD(dmp_reg, 0);
2058 
2059 	dmp_reg = &reg->unused_4_1[0];
2060 	RD_REG_DWORD(dmp_reg);
2061 	WRT_REG_DWORD(dmp_reg, 0);
2062 
2063 	WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
2064 	dmp_reg = &reg->unused_4_1[2];
2065 	RD_REG_DWORD(dmp_reg);
2066 	WRT_REG_DWORD(dmp_reg, 0);
2067 
2068 	/* select PCR and disable ecc checking and correction */
2069 	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
2070 	RD_REG_DWORD(&reg->iobase_addr);
2071 	WRT_REG_DWORD(&reg->iobase_select, 0x60000000);	/* write to F0h = PCR */
2072 
2073 	/* Host/Risc registers. */
2074 	iter_reg = fw->host_risc_reg;
2075 	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
2076 	iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
2077 	qla24xx_read_window(reg, 0x7040, 16, iter_reg);
2078 
2079 	/* PCIe registers. */
2080 	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2081 	RD_REG_DWORD(&reg->iobase_addr);
2082 	WRT_REG_DWORD(&reg->iobase_window, 0x01);
2083 	dmp_reg = &reg->iobase_c4;
2084 	fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
2085 	dmp_reg++;
2086 	fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
2087 	dmp_reg++;
2088 	fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
2089 	fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
2090 
2091 	WRT_REG_DWORD(&reg->iobase_window, 0x00);
2092 	RD_REG_DWORD(&reg->iobase_window);
2093 
2094 	/* Host interface registers. */
2095 	dmp_reg = &reg->flash_addr;
2096 	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
2097 		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
2098 
2099 	/* Disable interrupts. */
2100 	WRT_REG_DWORD(&reg->ictrl, 0);
2101 	RD_REG_DWORD(&reg->ictrl);
2102 
2103 	/* Shadow registers. */
2104 	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
2105 	RD_REG_DWORD(&reg->iobase_addr);
2106 	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
2107 	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2108 
2109 	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
2110 	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2111 
2112 	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
2113 	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2114 
2115 	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
2116 	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2117 
2118 	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
2119 	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2120 
2121 	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
2122 	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2123 
2124 	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
2125 	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2126 
2127 	WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
2128 	fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2129 
2130 	WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
2131 	fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2132 
2133 	WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
2134 	fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2135 
2136 	WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
2137 	fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2138 
2139 	/* RISC I/O register. */
2140 	WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
2141 	fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
2142 
2143 	/* Mailbox registers. */
2144 	mbx_reg = &reg->mailbox0;
2145 	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
2146 		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
2147 
2148 	/* Transfer sequence registers. */
2149 	iter_reg = fw->xseq_gp_reg;
2150 	iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
2151 	iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
2152 	iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
2153 	iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
2154 	iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
2155 	iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
2156 	iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
2157 	iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
2158 	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
2159 	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
2160 	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
2161 	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
2162 	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
2163 	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
2164 	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
2165 	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
2166 
2167 	iter_reg = fw->xseq_0_reg;
2168 	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
2169 	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
2170 	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
2171 
2172 	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
2173 
2174 	qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
2175 
2176 	/* Receive sequence registers. */
2177 	iter_reg = fw->rseq_gp_reg;
2178 	iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
2179 	iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
2180 	iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
2181 	iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
2182 	iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
2183 	iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
2184 	iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
2185 	iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
2186 	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
2187 	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
2188 	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
2189 	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
2190 	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
2191 	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
2192 	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
2193 	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
2194 
2195 	iter_reg = fw->rseq_0_reg;
2196 	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
2197 	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
2198 
2199 	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
2200 	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
2201 	qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
2202 
2203 	/* Auxiliary sequence registers. */
2204 	iter_reg = fw->aseq_gp_reg;
2205 	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
2206 	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
2207 	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
2208 	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
2209 	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
2210 	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
2211 	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
2212 	iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
2213 	iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
2214 	iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
2215 	iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
2216 	iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
2217 	iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
2218 	iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
2219 	iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
2220 	qla24xx_read_window(reg, 0xB170, 16, iter_reg);
2221 
2222 	iter_reg = fw->aseq_0_reg;
2223 	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
2224 	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
2225 
2226 	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
2227 	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
2228 	qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
2229 
2230 	/* Command DMA registers. */
2231 	iter_reg = fw->cmd_dma_reg;
2232 	iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
2233 	iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
2234 	iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
2235 	qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
2236 
2237 	/* Queues. */
2238 	iter_reg = fw->req0_dma_reg;
2239 	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
2240 	dmp_reg = &reg->iobase_q;
2241 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2242 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2243 
2244 	iter_reg = fw->resp0_dma_reg;
2245 	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
2246 	dmp_reg = &reg->iobase_q;
2247 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2248 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2249 
2250 	iter_reg = fw->req1_dma_reg;
2251 	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
2252 	dmp_reg = &reg->iobase_q;
2253 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2254 		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2255 
2256 	/* Transmit DMA registers. */
2257 	iter_reg = fw->xmt0_dma_reg;
2258 	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
2259 	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
2260 
2261 	iter_reg = fw->xmt1_dma_reg;
2262 	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
2263 	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
2264 
2265 	iter_reg = fw->xmt2_dma_reg;
2266 	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
2267 	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
2268 
2269 	iter_reg = fw->xmt3_dma_reg;
2270 	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
2271 	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
2272 
2273 	iter_reg = fw->xmt4_dma_reg;
2274 	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
2275 	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
2276 
2277 	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
2278 
2279 	/* Receive DMA registers. */
2280 	iter_reg = fw->rcvt0_data_dma_reg;
2281 	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
2282 	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
2283 
2284 	iter_reg = fw->rcvt1_data_dma_reg;
2285 	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
2286 	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
2287 
2288 	/* RISC registers. */
2289 	iter_reg = fw->risc_gp_reg;
2290 	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
2291 	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
2292 	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
2293 	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
2294 	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
2295 	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
2296 	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
2297 	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
2298 
2299 	/* Local memory controller registers. */
2300 	iter_reg = fw->lmc_reg;
2301 	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
2302 	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
2303 	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
2304 	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
2305 	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
2306 	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
2307 	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
2308 	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
2309 
2310 	/* Fibre Protocol Module registers. */
2311 	iter_reg = fw->fpm_hdw_reg;
2312 	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
2313 	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
2314 	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
2315 	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
2316 	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
2317 	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
2318 	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
2319 	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
2320 	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
2321 	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
2322 	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
2323 	iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
2324 	iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
2325 	iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
2326 	iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
2327 	qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
2328 
2329 	/* RQ0 Array registers. */
2330 	iter_reg = fw->rq0_array_reg;
2331 	iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
2332 	iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
2333 	iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
2334 	iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
2335 	iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
2336 	iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
2337 	iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
2338 	iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
2339 	iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
2340 	iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
2341 	iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
2342 	iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
2343 	iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
2344 	iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
2345 	iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
2346 	qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
2347 
2348 	/* RQ1 Array registers. */
2349 	iter_reg = fw->rq1_array_reg;
2350 	iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
2351 	iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
2352 	iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
2353 	iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
2354 	iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
2355 	iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
2356 	iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
2357 	iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
2358 	iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
2359 	iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
2360 	iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
2361 	iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
2362 	iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
2363 	iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
2364 	iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
2365 	qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
2366 
2367 	/* RP0 Array registers. */
2368 	iter_reg = fw->rp0_array_reg;
2369 	iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
2370 	iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
2371 	iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
2372 	iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
2373 	iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
2374 	iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
2375 	iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
2376 	iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
2377 	iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
2378 	iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
2379 	iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
2380 	iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
2381 	iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
2382 	iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
2383 	iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
2384 	qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
2385 
2386 	/* RP1 Array registers. */
2387 	iter_reg = fw->rp1_array_reg;
2388 	iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
2389 	iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
2390 	iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
2391 	iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
2392 	iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
2393 	iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
2394 	iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
2395 	iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
2396 	iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
2397 	iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
2398 	iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
2399 	iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
2400 	iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
2401 	iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
2402 	iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
2403 	qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
2404 
2405 	iter_reg = fw->at0_array_reg;
2406 	iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
2407 	iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
2408 	iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
2409 	iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
2410 	iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
2411 	iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
2412 	iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
2413 	qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
2414 
2415 	/* I/O Queue Control registers. */
2416 	qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
2417 
2418 	/* Frame Buffer registers. */
2419 	iter_reg = fw->fb_hdw_reg;
2420 	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
2421 	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
2422 	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
2423 	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
2424 	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
2425 	iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
2426 	iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
2427 	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
2428 	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
2429 	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
2430 	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
2431 	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
2432 	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
2433 	iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
2434 	iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
2435 	iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
2436 	iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
2437 	iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
2438 	iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
2439 	iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
2440 	iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
2441 	iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
2442 	iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
2443 	iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
2444 	iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
2445 	iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
2446 	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
2447 
2448 	/* Multi queue registers */
2449 	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
2450 	    &last_chain);
2451 
2452 	rval = qla24xx_soft_reset(ha);
2453 	if (rval != QLA_SUCCESS) {
2454 		ql_log(ql_log_warn, vha, 0xd00e,
2455 		    "SOFT RESET FAILED, forcing continuation of dump!!!\n");
2456 		rval = QLA_SUCCESS;
2457 
2458 		ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
2459 
2460 		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2461 		RD_REG_DWORD(&reg->hccr);
2462 
2463 		WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2464 		RD_REG_DWORD(&reg->hccr);
2465 
2466 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2467 		RD_REG_DWORD(&reg->hccr);
2468 
2469 		for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
2470 			udelay(5);
2471 
2472 		if (!cnt) {
2473 			nxt = fw->code_ram;
2474 			nxt += sizeof(fw->code_ram);
2475 			nxt += (ha->fw_memory_size - 0x100000 + 1);
2476 			goto copy_queue;
2477 		} else {
2478 			set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2479 			ql_log(ql_log_warn, vha, 0xd010,
2480 			    "bigger hammer success?\n");
2481 		}
2482 	}
2483 
2484 	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
2485 	    &nxt);
2486 	if (rval != QLA_SUCCESS)
2487 		goto qla83xx_fw_dump_failed_0;
2488 
2489 copy_queue:
2490 	nxt = qla2xxx_copy_queues(ha, nxt);
2491 
2492 	qla24xx_copy_eft(ha, nxt);
2493 
2494 	/* Chain entries -- started with MQ. */
2495 	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2496 	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2497 	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2498 	nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
2499 	nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
2500 	if (last_chain) {
2501 		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
2502 		*last_chain |= htonl(DUMP_CHAIN_LAST);
2503 	}
2504 
2505 	/* Adjust valid length. */
2506 	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
2507 
2508 qla83xx_fw_dump_failed_0:
2509 	qla2xxx_dump_post_process(base_vha, rval);
2510 
2511 qla83xx_fw_dump_failed:
2512 #ifndef __CHECKER__
2513 	if (!hardware_locked)
2514 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2515 #else
2516 	;
2517 #endif
2518 }
2519 
2520 /****************************************************************************/
2521 /*                         Driver Debug Functions.                          */
2522 /****************************************************************************/
2523 
2524 /*
2525  * This function is for formatting and logging debug information.
2526  * It is to be used when vha is available. It formats the message
2527  * and logs it to the messages file.
2528  * parameters:
2529  * level: The level of the debug messages to be printed.
2530  *        If ql2xextended_error_logging value is correctly set,
2531  *        this message will appear in the messages file.
2532  * vha:   Pointer to the scsi_qla_host_t.
2533  * id:    This is a unique identifier for the level. It identifies the
2534  *        part of the code from where the message originated.
2535  * msg:   The message to be displayed.
2536  */
2537 void
2538 ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
2539 {
2540 	va_list va;
2541 	struct va_format vaf;
2542 
2543 	va_start(va, fmt);
2544 
2545 	vaf.fmt = fmt;
2546 	vaf.va = &va;
2547 
2548 	if (!ql_mask_match(level)) {
2549 		char pbuf[64];
2550 
2551 		if (vha != NULL) {
2552 			const struct pci_dev *pdev = vha->hw->pdev;
2553 			/* <module-name> <msg-id>:<host> Message */
2554 			snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
2555 			    QL_MSGHDR, dev_name(&(pdev->dev)), id,
2556 			    vha->host_no);
2557 		} else {
2558 			snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2559 			    QL_MSGHDR, "0000:00:00.0", id);
2560 		}
2561 		pbuf[sizeof(pbuf) - 1] = 0;
2562 		trace_ql_dbg_log(pbuf, &vaf);
2563 		va_end(va);
2564 		return;
2565 	}
2566 
2567 	if (vha != NULL) {
2568 		const struct pci_dev *pdev = vha->hw->pdev;
2569 		/* <module-name> <pci-name> <msg-id>:<host> Message */
2570 		pr_warn("%s [%s]-%04x:%ld: %pV",
2571 			QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
2572 			vha->host_no, &vaf);
2573 	} else {
2574 		pr_warn("%s [%s]-%04x: : %pV",
2575 			QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
2576 	}
2577 
2578 	va_end(va);
2579 
2580 }
2581 
2582 /*
2583  * This function is for formatting and logging debug information.
2584  * It is to be used when vha is not available and pci is available,
2585  * i.e., before host allocation. It formats the message and logs it
2586  * to the messages file.
2587  * parameters:
2588  * level: The level of the debug messages to be printed.
2589  *        If ql2xextended_error_logging value is correctly set,
2590  *        this message will appear in the messages file.
2591  * pdev:  Pointer to the struct pci_dev.
2592  * id:    This is a unique id for the level. It identifies the part
2593  *        of the code from where the message originated.
2594  * msg:   The message to be displayed.
2595  */
2596 void
2597 ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
2598 {
2599 	va_list va;
2600 	struct va_format vaf;
2601 
2602 	if (pdev == NULL)
2603 		return;
2604 	if (!ql_mask_match(level))
2605 		return;
2606 
2607 	va_start(va, fmt);
2608 
2609 	vaf.fmt = fmt;
2610 	vaf.va = &va;
2611 
2612 	/* <module-name> <dev-name>:<msg-id> Message */
2613 	pr_warn("%s [%s]-%04x: : %pV",
2614 		QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
2615 
2616 	va_end(va);
2617 }
2618 
2619 /*
2620  * This function is for formatting and logging log messages.
2621  * It is to be used when vha is available. It formats the message
2622  * and logs it to the messages file. All the messages will be logged
2623  * irrespective of value of ql2xextended_error_logging.
2624  * parameters:
2625  * level: The level of the log messages to be printed in the
2626  *        messages file.
2627  * vha:   Pointer to the scsi_qla_host_t
2628  * id:    This is a unique id for the level. It identifies the
2629  *        part of the code from where the message originated.
2630  * msg:   The message to be displayed.
2631  */
2632 void
2633 ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
2634 {
2635 	va_list va;
2636 	struct va_format vaf;
2637 	char pbuf[128];
2638 
2639 	if (level > ql_errlev)
2640 		return;
2641 
2642 	if (vha != NULL) {
2643 		const struct pci_dev *pdev = vha->hw->pdev;
2644 		/* <module-name> <msg-id>:<host> Message */
2645 		snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
2646 			QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
2647 	} else {
2648 		snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2649 			QL_MSGHDR, "0000:00:00.0", id);
2650 	}
2651 	pbuf[sizeof(pbuf) - 1] = 0;
2652 
2653 	va_start(va, fmt);
2654 
2655 	vaf.fmt = fmt;
2656 	vaf.va = &va;
2657 
2658 	switch (level) {
2659 	case ql_log_fatal: /* FATAL LOG */
2660 		pr_crit("%s%pV", pbuf, &vaf);
2661 		break;
2662 	case ql_log_warn:
2663 		pr_err("%s%pV", pbuf, &vaf);
2664 		break;
2665 	case ql_log_info:
2666 		pr_warn("%s%pV", pbuf, &vaf);
2667 		break;
2668 	default:
2669 		pr_info("%s%pV", pbuf, &vaf);
2670 		break;
2671 	}
2672 
2673 	va_end(va);
2674 }
2675 
2676 /*
2677  * This function is for formatting and logging log messages.
2678  * It is to be used when vha is not available and pci is available,
2679  * i.e., before host allocation. It formats the message and logs
2680  * it to the messages file. All the messages are logged irrespective
2681  * of the value of ql2xextended_error_logging.
2682  * parameters:
2683  * level: The level of the log messages to be printed in the
2684  *        messages file.
2685  * pdev:  Pointer to the struct pci_dev.
2686  * id:    This is a unique id for the level. It identifies the
2687  *        part of the code from where the message originated.
2688  * msg:   The message to be displayed.
2689  */
2690 void
2691 ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
2692 {
2693 	va_list va;
2694 	struct va_format vaf;
2695 	char pbuf[128];
2696 
2697 	if (pdev == NULL)
2698 		return;
2699 	if (level > ql_errlev)
2700 		return;
2701 
2702 	/* <module-name> <dev-name>:<msg-id> Message */
2703 	snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2704 		 QL_MSGHDR, dev_name(&(pdev->dev)), id);
2705 	pbuf[sizeof(pbuf) - 1] = 0;
2706 
2707 	va_start(va, fmt);
2708 
2709 	vaf.fmt = fmt;
2710 	vaf.va = &va;
2711 
2712 	switch (level) {
2713 	case ql_log_fatal: /* FATAL LOG */
2714 		pr_crit("%s%pV", pbuf, &vaf);
2715 		break;
2716 	case ql_log_warn:
2717 		pr_err("%s%pV", pbuf, &vaf);
2718 		break;
2719 	case ql_log_info:
2720 		pr_warn("%s%pV", pbuf, &vaf);
2721 		break;
2722 	default:
2723 		pr_info("%s%pV", pbuf, &vaf);
2724 		break;
2725 	}
2726 
2727 	va_end(va);
2728 }
2729 
2730 void
2731 ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
2732 {
2733 	int i;
2734 	struct qla_hw_data *ha = vha->hw;
2735 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2736 	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2737 	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
2738 	uint16_t __iomem *mbx_reg;
2739 
2740 	if (!ql_mask_match(level))
2741 		return;
2742 
2743 	if (IS_P3P_TYPE(ha))
2744 		mbx_reg = &reg82->mailbox_in[0];
2745 	else if (IS_FWI2_CAPABLE(ha))
2746 		mbx_reg = &reg24->mailbox0;
2747 	else
2748 		mbx_reg = MAILBOX_REG(ha, reg, 0);
2749 
2750 	ql_dbg(level, vha, id, "Mailbox registers:\n");
2751 	for (i = 0; i < 6; i++, mbx_reg++)
2752 		ql_dbg(level, vha, id,
2753 		    "mbox[%d] %#04x\n", i, RD_REG_WORD(mbx_reg));
2754 }
2755 
2756 
2757 void
2758 ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf,
2759 	       uint size)
2760 {
2761 	uint cnt;
2762 
2763 	if (!ql_mask_match(level))
2764 		return;
2765 
2766 	ql_dbg(level, vha, id,
2767 	    "%-+5d  0  1  2  3  4  5  6  7  8  9  A  B  C  D  E  F\n", size);
2768 	ql_dbg(level, vha, id,
2769 	    "----- -----------------------------------------------\n");
2770 	for (cnt = 0; cnt < size; cnt += 16) {
2771 		ql_dbg(level, vha, id, "%04x: ", cnt);
2772 		print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
2773 			       buf + cnt, min(16U, size - cnt), false);
2774 	}
2775 }
2776 
2777 /*
2778  * This function is for formatting and logging log messages.
2779  * It is to be used when vha is available. It formats the message
2780  * and logs it to the messages file. All the messages will be logged
2781  * irrespective of value of ql2xextended_error_logging.
2782  * parameters:
2783  * level: The level of the log messages to be printed in the
2784  *        messages file.
2785  * vha:   Pointer to the scsi_qla_host_t
2786  * id:    This is a unique id for the level. It identifies the
2787  *        part of the code from where the message originated.
2788  * msg:   The message to be displayed.
2789  */
2790 void
2791 ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2792     const char *fmt, ...)
2793 {
2794 	va_list va;
2795 	struct va_format vaf;
2796 	char pbuf[128];
2797 
2798 	if (level > ql_errlev)
2799 		return;
2800 
2801 	if (qpair != NULL) {
2802 		const struct pci_dev *pdev = qpair->pdev;
2803 		/* <module-name> <msg-id>:<host> Message */
2804 		snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: ",
2805 			QL_MSGHDR, dev_name(&(pdev->dev)), id);
2806 	} else {
2807 		snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2808 			QL_MSGHDR, "0000:00:00.0", id);
2809 	}
2810 	pbuf[sizeof(pbuf) - 1] = 0;
2811 
2812 	va_start(va, fmt);
2813 
2814 	vaf.fmt = fmt;
2815 	vaf.va = &va;
2816 
2817 	switch (level) {
2818 	case ql_log_fatal: /* FATAL LOG */
2819 		pr_crit("%s%pV", pbuf, &vaf);
2820 		break;
2821 	case ql_log_warn:
2822 		pr_err("%s%pV", pbuf, &vaf);
2823 		break;
2824 	case ql_log_info:
2825 		pr_warn("%s%pV", pbuf, &vaf);
2826 		break;
2827 	default:
2828 		pr_info("%s%pV", pbuf, &vaf);
2829 		break;
2830 	}
2831 
2832 	va_end(va);
2833 }
2834 
2835 /*
2836  * This function is for formatting and logging debug information.
2837  * It is to be used when vha is available. It formats the message
2838  * and logs it to the messages file.
2839  * parameters:
2840  * level: The level of the debug messages to be printed.
2841  *        If ql2xextended_error_logging value is correctly set,
2842  *        this message will appear in the messages file.
2843  * vha:   Pointer to the scsi_qla_host_t.
2844  * id:    This is a unique identifier for the level. It identifies the
2845  *        part of the code from where the message originated.
2846  * msg:   The message to be displayed.
2847  */
2848 void
2849 ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2850     const char *fmt, ...)
2851 {
2852 	va_list va;
2853 	struct va_format vaf;
2854 
2855 	if (!ql_mask_match(level))
2856 		return;
2857 
2858 	va_start(va, fmt);
2859 
2860 	vaf.fmt = fmt;
2861 	vaf.va = &va;
2862 
2863 	if (qpair != NULL) {
2864 		const struct pci_dev *pdev = qpair->pdev;
2865 		/* <module-name> <pci-name> <msg-id>:<host> Message */
2866 		pr_warn("%s [%s]-%04x: %pV",
2867 		    QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
2868 		    &vaf);
2869 	} else {
2870 		pr_warn("%s [%s]-%04x: : %pV",
2871 			QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
2872 	}
2873 
2874 	va_end(va);
2875 
2876 }
2877