xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_nx2.c (revision bb0eb050)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 
8 #include <linux/vmalloc.h>
9 #include <linux/delay.h>
10 
11 #include "qla_def.h"
12 #include "qla_gbl.h"
13 
14 #include <linux/delay.h>
15 
16 #define TIMEOUT_100_MS 100
17 
18 static const uint32_t qla8044_reg_tbl[] = {
19 	QLA8044_PEG_HALT_STATUS1,
20 	QLA8044_PEG_HALT_STATUS2,
21 	QLA8044_PEG_ALIVE_COUNTER,
22 	QLA8044_CRB_DRV_ACTIVE,
23 	QLA8044_CRB_DEV_STATE,
24 	QLA8044_CRB_DRV_STATE,
25 	QLA8044_CRB_DRV_SCRATCH,
26 	QLA8044_CRB_DEV_PART_INFO1,
27 	QLA8044_CRB_IDC_VER_MAJOR,
28 	QLA8044_FW_VER_MAJOR,
29 	QLA8044_FW_VER_MINOR,
30 	QLA8044_FW_VER_SUB,
31 	QLA8044_CMDPEG_STATE,
32 	QLA8044_ASIC_TEMP,
33 };
34 
35 /* 8044 Flash Read/Write functions */
36 uint32_t
37 qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
38 {
39 	return readl((void __iomem *) (ha->nx_pcibase + addr));
40 }
41 
42 void
43 qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val)
44 {
45 	writel(val, (void __iomem *)((ha)->nx_pcibase + addr));
46 }
47 
48 int
49 qla8044_rd_direct(struct scsi_qla_host *vha,
50 	const uint32_t crb_reg)
51 {
52 	struct qla_hw_data *ha = vha->hw;
53 
54 	if (crb_reg < CRB_REG_INDEX_MAX)
55 		return qla8044_rd_reg(ha, qla8044_reg_tbl[crb_reg]);
56 	else
57 		return QLA_FUNCTION_FAILED;
58 }
59 
60 void
61 qla8044_wr_direct(struct scsi_qla_host *vha,
62 	const uint32_t crb_reg,
63 	const uint32_t value)
64 {
65 	struct qla_hw_data *ha = vha->hw;
66 
67 	if (crb_reg < CRB_REG_INDEX_MAX)
68 		qla8044_wr_reg(ha, qla8044_reg_tbl[crb_reg], value);
69 }
70 
71 static int
72 qla8044_set_win_base(scsi_qla_host_t *vha, uint32_t addr)
73 {
74 	uint32_t val;
75 	int ret_val = QLA_SUCCESS;
76 	struct qla_hw_data *ha = vha->hw;
77 
78 	qla8044_wr_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum), addr);
79 	val = qla8044_rd_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum));
80 
81 	if (val != addr) {
82 		ql_log(ql_log_warn, vha, 0xb087,
83 		    "%s: Failed to set register window : "
84 		    "addr written 0x%x, read 0x%x!\n",
85 		    __func__, addr, val);
86 		ret_val = QLA_FUNCTION_FAILED;
87 	}
88 	return ret_val;
89 }
90 
91 static int
92 qla8044_rd_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
93 {
94 	int ret_val = QLA_SUCCESS;
95 	struct qla_hw_data *ha = vha->hw;
96 
97 	ret_val = qla8044_set_win_base(vha, addr);
98 	if (!ret_val)
99 		*data = qla8044_rd_reg(ha, QLA8044_WILDCARD);
100 	else
101 		ql_log(ql_log_warn, vha, 0xb088,
102 		    "%s: failed read of addr 0x%x!\n", __func__, addr);
103 	return ret_val;
104 }
105 
106 static int
107 qla8044_wr_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
108 {
109 	int ret_val = QLA_SUCCESS;
110 	struct qla_hw_data *ha = vha->hw;
111 
112 	ret_val = qla8044_set_win_base(vha, addr);
113 	if (!ret_val)
114 		qla8044_wr_reg(ha, QLA8044_WILDCARD, data);
115 	else
116 		ql_log(ql_log_warn, vha, 0xb089,
117 		    "%s: failed wrt to addr 0x%x, data 0x%x\n",
118 		    __func__, addr, data);
119 	return ret_val;
120 }
121 
122 /*
123  * qla8044_read_write_crb_reg - Read from raddr and write value to waddr.
124  *
125  * @ha : Pointer to adapter structure
126  * @raddr : CRB address to read from
127  * @waddr : CRB address to write to
128  *
129  */
130 static void
131 qla8044_read_write_crb_reg(struct scsi_qla_host *vha,
132 	uint32_t raddr, uint32_t waddr)
133 {
134 	uint32_t value;
135 
136 	qla8044_rd_reg_indirect(vha, raddr, &value);
137 	qla8044_wr_reg_indirect(vha, waddr, value);
138 }
139 
140 static int
141 qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1,
142 	uint32_t mask)
143 {
144 	unsigned long timeout;
145 	uint32_t temp;
146 
147 	/* jiffies after 100ms */
148 	timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
149 	do {
150 		qla8044_rd_reg_indirect(vha, addr1, &temp);
151 		if ((temp & mask) != 0)
152 			break;
153 		if (time_after_eq(jiffies, timeout)) {
154 			ql_log(ql_log_warn, vha, 0xb151,
155 				"Error in processing rdmdio entry\n");
156 			return -1;
157 		}
158 	} while (1);
159 
160 	return 0;
161 }
162 
163 static uint32_t
164 qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha,
165 	uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr)
166 {
167 	uint32_t temp;
168 	int ret = 0;
169 
170 	ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
171 	if (ret == -1)
172 		return -1;
173 
174 	temp = (0x40000000 | addr);
175 	qla8044_wr_reg_indirect(vha, addr1, temp);
176 
177 	ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
178 	if (ret == -1)
179 		return 0;
180 
181 	qla8044_rd_reg_indirect(vha, addr3, &ret);
182 
183 	return ret;
184 }
185 
186 
187 static int
188 qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha,
189 	uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask)
190 {
191 	unsigned long timeout;
192 	uint32_t temp;
193 
194 	/* jiffies after 100 msecs */
195 	timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
196 	do {
197 		temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2);
198 		if ((temp & 0x1) != 1)
199 			break;
200 		if (time_after_eq(jiffies, timeout)) {
201 			ql_log(ql_log_warn, vha, 0xb152,
202 			    "Error in processing mdiobus idle\n");
203 			return -1;
204 		}
205 	} while (1);
206 
207 	return 0;
208 }
209 
210 static int
211 qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1,
212 	uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value)
213 {
214 	int ret = 0;
215 
216 	ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
217 	if (ret == -1)
218 		return -1;
219 
220 	qla8044_wr_reg_indirect(vha, addr3, value);
221 	qla8044_wr_reg_indirect(vha, addr1, addr);
222 
223 	ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
224 	if (ret == -1)
225 		return -1;
226 
227 	return 0;
228 }
229 /*
230  * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask,
231  * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
232  *
233  * @vha : Pointer to adapter structure
234  * @raddr : CRB address to read from
235  * @waddr : CRB address to write to
236  * @p_rmw_hdr : header with shift/or/xor values.
237  *
238  */
239 static void
240 qla8044_rmw_crb_reg(struct scsi_qla_host *vha,
241 	uint32_t raddr, uint32_t waddr,	struct qla8044_rmw *p_rmw_hdr)
242 {
243 	uint32_t value;
244 
245 	if (p_rmw_hdr->index_a)
246 		value = vha->reset_tmplt.array[p_rmw_hdr->index_a];
247 	else
248 		qla8044_rd_reg_indirect(vha, raddr, &value);
249 	value &= p_rmw_hdr->test_mask;
250 	value <<= p_rmw_hdr->shl;
251 	value >>= p_rmw_hdr->shr;
252 	value |= p_rmw_hdr->or_value;
253 	value ^= p_rmw_hdr->xor_value;
254 	qla8044_wr_reg_indirect(vha, waddr, value);
255 	return;
256 }
257 
258 static inline void
259 qla8044_set_qsnt_ready(struct scsi_qla_host *vha)
260 {
261 	uint32_t qsnt_state;
262 	struct qla_hw_data *ha = vha->hw;
263 
264 	qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
265 	qsnt_state |= (1 << ha->portnum);
266 	qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
267 	ql_log(ql_log_info, vha, 0xb08e, "%s(%ld): qsnt_state: 0x%08x\n",
268 	     __func__, vha->host_no, qsnt_state);
269 }
270 
271 void
272 qla8044_clear_qsnt_ready(struct scsi_qla_host *vha)
273 {
274 	uint32_t qsnt_state;
275 	struct qla_hw_data *ha = vha->hw;
276 
277 	qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
278 	qsnt_state &= ~(1 << ha->portnum);
279 	qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
280 	ql_log(ql_log_info, vha, 0xb08f, "%s(%ld): qsnt_state: 0x%08x\n",
281 	    __func__, vha->host_no, qsnt_state);
282 }
283 
284 /**
285  *
286  * qla8044_lock_recovery - Recovers the idc_lock.
287  * @ha : Pointer to adapter structure
288  *
289  * Lock Recovery Register
290  * 5-2	Lock recovery owner: Function ID of driver doing lock recovery,
291  *	valid if bits 1..0 are set by driver doing lock recovery.
292  * 1-0  1 - Driver intends to force unlock the IDC lock.
293  *	2 - Driver is moving forward to unlock the IDC lock. Driver clears
294  *	    this field after force unlocking the IDC lock.
295  *
296  * Lock Recovery process
297  * a. Read the IDC_LOCK_RECOVERY register. If the value in bits 1..0 is
298  *    greater than 0, then wait for the other driver to unlock otherwise
299  *    move to the next step.
300  * b. Indicate intent to force-unlock by writing 1h to the IDC_LOCK_RECOVERY
301  *    register bits 1..0 and also set the function# in bits 5..2.
302  * c. Read the IDC_LOCK_RECOVERY register again after a delay of 200ms.
303  *    Wait for the other driver to perform lock recovery if the function
304  *    number in bits 5..2 has changed, otherwise move to the next step.
305  * d. Write a value of 2h to the IDC_LOCK_RECOVERY register bits 1..0
306  *    leaving your function# in bits 5..2.
307  * e. Force unlock using the DRIVER_UNLOCK register and immediately clear
308  *    the IDC_LOCK_RECOVERY bits 5..0 by writing 0.
309  **/
310 static int
311 qla8044_lock_recovery(struct scsi_qla_host *vha)
312 {
313 	uint32_t lock = 0, lockid;
314 	struct qla_hw_data *ha = vha->hw;
315 
316 	lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
317 
318 	/* Check for other Recovery in progress, go wait */
319 	if ((lockid & IDC_LOCK_RECOVERY_STATE_MASK) != 0)
320 		return QLA_FUNCTION_FAILED;
321 
322 	/* Intent to Recover */
323 	qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
324 	    (ha->portnum <<
325 	     IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | INTENT_TO_RECOVER);
326 	msleep(200);
327 
328 	/* Check Intent to Recover is advertised */
329 	lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
330 	if ((lockid & IDC_LOCK_RECOVERY_OWNER_MASK) != (ha->portnum <<
331 	    IDC_LOCK_RECOVERY_STATE_SHIFT_BITS))
332 		return QLA_FUNCTION_FAILED;
333 
334 	ql_dbg(ql_dbg_p3p, vha, 0xb08B, "%s:%d: IDC Lock recovery initiated\n"
335 	    , __func__, ha->portnum);
336 
337 	/* Proceed to Recover */
338 	qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
339 	    (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) |
340 	    PROCEED_TO_RECOVER);
341 
342 	/* Force Unlock() */
343 	qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, 0xFF);
344 	qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
345 
346 	/* Clear bits 0-5 in IDC_RECOVERY register*/
347 	qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 0);
348 
349 	/* Get lock() */
350 	lock = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
351 	if (lock) {
352 		lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
353 		lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->portnum;
354 		qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lockid);
355 		return QLA_SUCCESS;
356 	} else
357 		return QLA_FUNCTION_FAILED;
358 }
359 
360 int
361 qla8044_idc_lock(struct qla_hw_data *ha)
362 {
363 	uint32_t ret_val = QLA_SUCCESS, timeout = 0, status = 0;
364 	uint32_t lock_id, lock_cnt, func_num, tmo_owner = 0, first_owner = 0;
365 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
366 
367 	while (status == 0) {
368 		/* acquire semaphore5 from PCI HW block */
369 		status = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
370 
371 		if (status) {
372 			/* Increment Counter (8-31) and update func_num (0-7) on
373 			 * getting a successful lock  */
374 			lock_id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
375 			lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->portnum;
376 			qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lock_id);
377 			break;
378 		}
379 
380 		if (timeout == 0)
381 			first_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
382 
383 		if (++timeout >=
384 		    (QLA8044_DRV_LOCK_TIMEOUT / QLA8044_DRV_LOCK_MSLEEP)) {
385 			tmo_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
386 			func_num = tmo_owner & 0xFF;
387 			lock_cnt = tmo_owner >> 8;
388 			ql_log(ql_log_warn, vha, 0xb114,
389 			    "%s: Lock by func %d failed after 2s, lock held "
390 			    "by func %d, lock count %d, first_owner %d\n",
391 			    __func__, ha->portnum, func_num, lock_cnt,
392 			    (first_owner & 0xFF));
393 			if (first_owner != tmo_owner) {
394 				/* Some other driver got lock,
395 				 * OR same driver got lock again (counter
396 				 * value changed), when we were waiting for
397 				 * lock. Retry for another 2 sec */
398 				ql_dbg(ql_dbg_p3p, vha, 0xb115,
399 				    "%s: %d: IDC lock failed\n",
400 				    __func__, ha->portnum);
401 				timeout = 0;
402 			} else {
403 				/* Same driver holding lock > 2sec.
404 				 * Force Recovery */
405 				if (qla8044_lock_recovery(vha) == QLA_SUCCESS) {
406 					/* Recovered and got lock */
407 					ret_val = QLA_SUCCESS;
408 					ql_dbg(ql_dbg_p3p, vha, 0xb116,
409 					    "%s:IDC lock Recovery by %d"
410 					    "successful...\n", __func__,
411 					     ha->portnum);
412 				}
413 				/* Recovery Failed, some other function
414 				 * has the lock, wait for 2secs
415 				 * and retry
416 				 */
417 				ql_dbg(ql_dbg_p3p, vha, 0xb08a,
418 				       "%s: IDC lock Recovery by %d "
419 				       "failed, Retrying timeout\n", __func__,
420 				       ha->portnum);
421 				timeout = 0;
422 			}
423 		}
424 		msleep(QLA8044_DRV_LOCK_MSLEEP);
425 	}
426 	return ret_val;
427 }
428 
429 void
430 qla8044_idc_unlock(struct qla_hw_data *ha)
431 {
432 	int id;
433 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
434 
435 	id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
436 
437 	if ((id & 0xFF) != ha->portnum) {
438 		ql_log(ql_log_warn, vha, 0xb118,
439 		    "%s: IDC Unlock by %d failed, lock owner is %d!\n",
440 		    __func__, ha->portnum, (id & 0xFF));
441 		return;
442 	}
443 
444 	/* Keep lock counter value, update the ha->func_num to 0xFF */
445 	qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, (id | 0xFF));
446 	qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
447 }
448 
449 /* 8044 Flash Lock/Unlock functions */
450 static int
451 qla8044_flash_lock(scsi_qla_host_t *vha)
452 {
453 	int lock_owner;
454 	int timeout = 0;
455 	uint32_t lock_status = 0;
456 	int ret_val = QLA_SUCCESS;
457 	struct qla_hw_data *ha = vha->hw;
458 
459 	while (lock_status == 0) {
460 		lock_status = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK);
461 		if (lock_status)
462 			break;
463 
464 		if (++timeout >= QLA8044_FLASH_LOCK_TIMEOUT / 20) {
465 			lock_owner = qla8044_rd_reg(ha,
466 			    QLA8044_FLASH_LOCK_ID);
467 			ql_log(ql_log_warn, vha, 0xb113,
468 			    "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
469 			    __func__, ha->portnum, lock_owner);
470 			ret_val = QLA_FUNCTION_FAILED;
471 			break;
472 		}
473 		msleep(20);
474 	}
475 	qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, ha->portnum);
476 	return ret_val;
477 }
478 
479 static void
480 qla8044_flash_unlock(scsi_qla_host_t *vha)
481 {
482 	struct qla_hw_data *ha = vha->hw;
483 
484 	/* Reading FLASH_UNLOCK register unlocks the Flash */
485 	qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF);
486 	qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
487 }
488 
489 
490 static
491 void qla8044_flash_lock_recovery(struct scsi_qla_host *vha)
492 {
493 
494 	if (qla8044_flash_lock(vha)) {
495 		/* Someone else is holding the lock. */
496 		ql_log(ql_log_warn, vha, 0xb120, "Resetting flash_lock\n");
497 	}
498 
499 	/*
500 	 * Either we got the lock, or someone
501 	 * else died while holding it.
502 	 * In either case, unlock.
503 	 */
504 	qla8044_flash_unlock(vha);
505 }
506 
507 /*
508  * Address and length are byte address
509  */
510 static int
511 qla8044_read_flash_data(scsi_qla_host_t *vha,  uint8_t *p_data,
512 	uint32_t flash_addr, int u32_word_count)
513 {
514 	int i, ret_val = QLA_SUCCESS;
515 	uint32_t u32_word;
516 
517 	if (qla8044_flash_lock(vha) != QLA_SUCCESS) {
518 		ret_val = QLA_FUNCTION_FAILED;
519 		goto exit_lock_error;
520 	}
521 
522 	if (flash_addr & 0x03) {
523 		ql_log(ql_log_warn, vha, 0xb117,
524 		    "%s: Illegal addr = 0x%x\n", __func__, flash_addr);
525 		ret_val = QLA_FUNCTION_FAILED;
526 		goto exit_flash_read;
527 	}
528 
529 	for (i = 0; i < u32_word_count; i++) {
530 		if (qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW,
531 		    (flash_addr & 0xFFFF0000))) {
532 			ql_log(ql_log_warn, vha, 0xb119,
533 			    "%s: failed to write addr 0x%x to "
534 			    "FLASH_DIRECT_WINDOW\n! ",
535 			    __func__, flash_addr);
536 			ret_val = QLA_FUNCTION_FAILED;
537 			goto exit_flash_read;
538 		}
539 
540 		ret_val = qla8044_rd_reg_indirect(vha,
541 		    QLA8044_FLASH_DIRECT_DATA(flash_addr),
542 		    &u32_word);
543 		if (ret_val != QLA_SUCCESS) {
544 			ql_log(ql_log_warn, vha, 0xb08c,
545 			    "%s: failed to read addr 0x%x!\n",
546 			    __func__, flash_addr);
547 			goto exit_flash_read;
548 		}
549 
550 		*(uint32_t *)p_data = u32_word;
551 		p_data = p_data + 4;
552 		flash_addr = flash_addr + 4;
553 	}
554 
555 exit_flash_read:
556 	qla8044_flash_unlock(vha);
557 
558 exit_lock_error:
559 	return ret_val;
560 }
561 
562 /*
563  * Address and length are byte address
564  */
565 uint8_t *
566 qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
567 	uint32_t offset, uint32_t length)
568 {
569 	scsi_block_requests(vha->host);
570 	if (qla8044_read_flash_data(vha, (uint8_t *)buf, offset, length / 4)
571 	    != QLA_SUCCESS) {
572 		ql_log(ql_log_warn, vha,  0xb08d,
573 		    "%s: Failed to read from flash\n",
574 		    __func__);
575 	}
576 	scsi_unblock_requests(vha->host);
577 	return buf;
578 }
579 
580 static inline int
581 qla8044_need_reset(struct scsi_qla_host *vha)
582 {
583 	uint32_t drv_state, drv_active;
584 	int rval;
585 	struct qla_hw_data *ha = vha->hw;
586 
587 	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
588 	drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
589 
590 	rval = drv_state & (1 << ha->portnum);
591 
592 	if (ha->flags.eeh_busy && drv_active)
593 		rval = 1;
594 	return rval;
595 }
596 
597 /*
598  * qla8044_write_list - Write the value (p_entry->arg2) to address specified
599  * by p_entry->arg1 for all entries in header with delay of p_hdr->delay between
600  * entries.
601  *
602  * @vha : Pointer to adapter structure
603  * @p_hdr : reset_entry header for WRITE_LIST opcode.
604  *
605  */
606 static void
607 qla8044_write_list(struct scsi_qla_host *vha,
608 	struct qla8044_reset_entry_hdr *p_hdr)
609 {
610 	struct qla8044_entry *p_entry;
611 	uint32_t i;
612 
613 	p_entry = (struct qla8044_entry *)((char *)p_hdr +
614 	    sizeof(struct qla8044_reset_entry_hdr));
615 
616 	for (i = 0; i < p_hdr->count; i++, p_entry++) {
617 		qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2);
618 		if (p_hdr->delay)
619 			udelay((uint32_t)(p_hdr->delay));
620 	}
621 }
622 
623 /*
624  * qla8044_read_write_list - Read from address specified by p_entry->arg1,
625  * write value read to address specified by p_entry->arg2, for all entries in
626  * header with delay of p_hdr->delay between entries.
627  *
628  * @vha : Pointer to adapter structure
629  * @p_hdr : reset_entry header for READ_WRITE_LIST opcode.
630  *
631  */
632 static void
633 qla8044_read_write_list(struct scsi_qla_host *vha,
634 	struct qla8044_reset_entry_hdr *p_hdr)
635 {
636 	struct qla8044_entry *p_entry;
637 	uint32_t i;
638 
639 	p_entry = (struct qla8044_entry *)((char *)p_hdr +
640 	    sizeof(struct qla8044_reset_entry_hdr));
641 
642 	for (i = 0; i < p_hdr->count; i++, p_entry++) {
643 		qla8044_read_write_crb_reg(vha, p_entry->arg1,
644 		    p_entry->arg2);
645 		if (p_hdr->delay)
646 			udelay((uint32_t)(p_hdr->delay));
647 	}
648 }
649 
650 /*
651  * qla8044_poll_reg - Poll the given CRB addr for duration msecs till
652  * value read ANDed with test_mask is equal to test_result.
653  *
654  * @ha : Pointer to adapter structure
655  * @addr : CRB register address
656  * @duration : Poll for total of "duration" msecs
657  * @test_mask : Mask value read with "test_mask"
658  * @test_result : Compare (value&test_mask) with test_result.
659  *
660  * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
661  */
662 static int
663 qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr,
664 	int duration, uint32_t test_mask, uint32_t test_result)
665 {
666 	uint32_t value;
667 	int timeout_error;
668 	uint8_t retries;
669 	int ret_val = QLA_SUCCESS;
670 
671 	ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
672 	if (ret_val == QLA_FUNCTION_FAILED) {
673 		timeout_error = 1;
674 		goto exit_poll_reg;
675 	}
676 
677 	/* poll every 1/10 of the total duration */
678 	retries = duration/10;
679 
680 	do {
681 		if ((value & test_mask) != test_result) {
682 			timeout_error = 1;
683 			msleep(duration/10);
684 			ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
685 			if (ret_val == QLA_FUNCTION_FAILED) {
686 				timeout_error = 1;
687 				goto exit_poll_reg;
688 			}
689 		} else {
690 			timeout_error = 0;
691 			break;
692 		}
693 	} while (retries--);
694 
695 exit_poll_reg:
696 	if (timeout_error) {
697 		vha->reset_tmplt.seq_error++;
698 		ql_log(ql_log_fatal, vha, 0xb090,
699 		    "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n",
700 		    __func__, value, test_mask, test_result);
701 	}
702 
703 	return timeout_error;
704 }
705 
706 /*
707  * qla8044_poll_list - For all entries in the POLL_LIST header, poll read CRB
708  * register specified by p_entry->arg1 and compare (value AND test_mask) with
709  * test_result to validate it. Wait for p_hdr->delay between processing entries.
710  *
711  * @ha : Pointer to adapter structure
712  * @p_hdr : reset_entry header for POLL_LIST opcode.
713  *
714  */
715 static void
716 qla8044_poll_list(struct scsi_qla_host *vha,
717 	struct qla8044_reset_entry_hdr *p_hdr)
718 {
719 	long delay;
720 	struct qla8044_entry *p_entry;
721 	struct qla8044_poll *p_poll;
722 	uint32_t i;
723 	uint32_t value;
724 
725 	p_poll = (struct qla8044_poll *)
726 		((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
727 
728 	/* Entries start after 8 byte qla8044_poll, poll header contains
729 	 * the test_mask, test_value.
730 	 */
731 	p_entry = (struct qla8044_entry *)((char *)p_poll +
732 	    sizeof(struct qla8044_poll));
733 
734 	delay = (long)p_hdr->delay;
735 
736 	if (!delay) {
737 		for (i = 0; i < p_hdr->count; i++, p_entry++)
738 			qla8044_poll_reg(vha, p_entry->arg1,
739 			    delay, p_poll->test_mask, p_poll->test_value);
740 	} else {
741 		for (i = 0; i < p_hdr->count; i++, p_entry++) {
742 			if (delay) {
743 				if (qla8044_poll_reg(vha,
744 				    p_entry->arg1, delay,
745 				    p_poll->test_mask,
746 				    p_poll->test_value)) {
747 					/*If
748 					* (data_read&test_mask != test_value)
749 					* read TIMEOUT_ADDR (arg1) and
750 					* ADDR (arg2) registers
751 					*/
752 					qla8044_rd_reg_indirect(vha,
753 					    p_entry->arg1, &value);
754 					qla8044_rd_reg_indirect(vha,
755 					    p_entry->arg2, &value);
756 				}
757 			}
758 		}
759 	}
760 }
761 
762 /*
763  * qla8044_poll_write_list - Write dr_value, ar_value to dr_addr/ar_addr,
764  * read ar_addr, if (value& test_mask != test_mask) re-read till timeout
765  * expires.
766  *
767  * @vha : Pointer to adapter structure
768  * @p_hdr : reset entry header for POLL_WRITE_LIST opcode.
769  *
770  */
771 static void
772 qla8044_poll_write_list(struct scsi_qla_host *vha,
773 	struct qla8044_reset_entry_hdr *p_hdr)
774 {
775 	long delay;
776 	struct qla8044_quad_entry *p_entry;
777 	struct qla8044_poll *p_poll;
778 	uint32_t i;
779 
780 	p_poll = (struct qla8044_poll *)((char *)p_hdr +
781 	    sizeof(struct qla8044_reset_entry_hdr));
782 
783 	p_entry = (struct qla8044_quad_entry *)((char *)p_poll +
784 	    sizeof(struct qla8044_poll));
785 
786 	delay = (long)p_hdr->delay;
787 
788 	for (i = 0; i < p_hdr->count; i++, p_entry++) {
789 		qla8044_wr_reg_indirect(vha,
790 		    p_entry->dr_addr, p_entry->dr_value);
791 		qla8044_wr_reg_indirect(vha,
792 		    p_entry->ar_addr, p_entry->ar_value);
793 		if (delay) {
794 			if (qla8044_poll_reg(vha,
795 			    p_entry->ar_addr, delay,
796 			    p_poll->test_mask,
797 			    p_poll->test_value)) {
798 				ql_dbg(ql_dbg_p3p, vha, 0xb091,
799 				    "%s: Timeout Error: poll list, ",
800 				    __func__);
801 				ql_dbg(ql_dbg_p3p, vha, 0xb092,
802 				    "item_num %d, entry_num %d\n", i,
803 				    vha->reset_tmplt.seq_index);
804 			}
805 		}
806 	}
807 }
808 
809 /*
810  * qla8044_read_modify_write - Read value from p_entry->arg1, modify the
811  * value, write value to p_entry->arg2. Process entries with p_hdr->delay
812  * between entries.
813  *
814  * @vha : Pointer to adapter structure
815  * @p_hdr : header with shift/or/xor values.
816  *
817  */
818 static void
819 qla8044_read_modify_write(struct scsi_qla_host *vha,
820 	struct qla8044_reset_entry_hdr *p_hdr)
821 {
822 	struct qla8044_entry *p_entry;
823 	struct qla8044_rmw *p_rmw_hdr;
824 	uint32_t i;
825 
826 	p_rmw_hdr = (struct qla8044_rmw *)((char *)p_hdr +
827 	    sizeof(struct qla8044_reset_entry_hdr));
828 
829 	p_entry = (struct qla8044_entry *)((char *)p_rmw_hdr +
830 	    sizeof(struct qla8044_rmw));
831 
832 	for (i = 0; i < p_hdr->count; i++, p_entry++) {
833 		qla8044_rmw_crb_reg(vha, p_entry->arg1,
834 		    p_entry->arg2, p_rmw_hdr);
835 		if (p_hdr->delay)
836 			udelay((uint32_t)(p_hdr->delay));
837 	}
838 }
839 
840 /*
841  * qla8044_pause - Wait for p_hdr->delay msecs, called between processing
842  * two entries of a sequence.
843  *
844  * @vha : Pointer to adapter structure
845  * @p_hdr : Common reset entry header.
846  *
847  */
848 static
849 void qla8044_pause(struct scsi_qla_host *vha,
850 	struct qla8044_reset_entry_hdr *p_hdr)
851 {
852 	if (p_hdr->delay)
853 		mdelay((uint32_t)((long)p_hdr->delay));
854 }
855 
856 /*
857  * qla8044_template_end - Indicates end of reset sequence processing.
858  *
859  * @vha : Pointer to adapter structure
860  * @p_hdr : Common reset entry header.
861  *
862  */
863 static void
864 qla8044_template_end(struct scsi_qla_host *vha,
865 	struct qla8044_reset_entry_hdr *p_hdr)
866 {
867 	vha->reset_tmplt.template_end = 1;
868 
869 	if (vha->reset_tmplt.seq_error == 0) {
870 		ql_dbg(ql_dbg_p3p, vha, 0xb093,
871 		    "%s: Reset sequence completed SUCCESSFULLY.\n", __func__);
872 	} else {
873 		ql_log(ql_log_fatal, vha, 0xb094,
874 		    "%s: Reset sequence completed with some timeout "
875 		    "errors.\n", __func__);
876 	}
877 }
878 
879 /*
880  * qla8044_poll_read_list - Write ar_value to ar_addr register, read ar_addr,
881  * if (value & test_mask != test_value) re-read till timeout value expires,
882  * read dr_addr register and assign to reset_tmplt.array.
883  *
884  * @vha : Pointer to adapter structure
885  * @p_hdr : Common reset entry header.
886  *
887  */
888 static void
889 qla8044_poll_read_list(struct scsi_qla_host *vha,
890 	struct qla8044_reset_entry_hdr *p_hdr)
891 {
892 	long delay;
893 	int index;
894 	struct qla8044_quad_entry *p_entry;
895 	struct qla8044_poll *p_poll;
896 	uint32_t i;
897 	uint32_t value;
898 
899 	p_poll = (struct qla8044_poll *)
900 		((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
901 
902 	p_entry = (struct qla8044_quad_entry *)
903 		((char *)p_poll + sizeof(struct qla8044_poll));
904 
905 	delay = (long)p_hdr->delay;
906 
907 	for (i = 0; i < p_hdr->count; i++, p_entry++) {
908 		qla8044_wr_reg_indirect(vha, p_entry->ar_addr,
909 		    p_entry->ar_value);
910 		if (delay) {
911 			if (qla8044_poll_reg(vha, p_entry->ar_addr, delay,
912 			    p_poll->test_mask, p_poll->test_value)) {
913 				ql_dbg(ql_dbg_p3p, vha, 0xb095,
914 				    "%s: Timeout Error: poll "
915 				    "list, ", __func__);
916 				ql_dbg(ql_dbg_p3p, vha, 0xb096,
917 				    "Item_num %d, "
918 				    "entry_num %d\n", i,
919 				    vha->reset_tmplt.seq_index);
920 			} else {
921 				index = vha->reset_tmplt.array_index;
922 				qla8044_rd_reg_indirect(vha,
923 				    p_entry->dr_addr, &value);
924 				vha->reset_tmplt.array[index++] = value;
925 				if (index == QLA8044_MAX_RESET_SEQ_ENTRIES)
926 					vha->reset_tmplt.array_index = 1;
927 			}
928 		}
929 	}
930 }
931 
932 /*
933  * qla8031_process_reset_template - Process all entries in reset template
934  * till entry with SEQ_END opcode, which indicates end of the reset template
935  * processing. Each entry has a Reset Entry header, entry opcode/command, with
936  * size of the entry, number of entries in sub-sequence and delay in microsecs
937  * or timeout in millisecs.
938  *
939  * @ha : Pointer to adapter structure
940  * @p_buff : Common reset entry header.
941  *
942  */
943 static void
944 qla8044_process_reset_template(struct scsi_qla_host *vha,
945 	char *p_buff)
946 {
947 	int index, entries;
948 	struct qla8044_reset_entry_hdr *p_hdr;
949 	char *p_entry = p_buff;
950 
951 	vha->reset_tmplt.seq_end = 0;
952 	vha->reset_tmplt.template_end = 0;
953 	entries = vha->reset_tmplt.hdr->entries;
954 	index = vha->reset_tmplt.seq_index;
955 
956 	for (; (!vha->reset_tmplt.seq_end) && (index  < entries); index++) {
957 		p_hdr = (struct qla8044_reset_entry_hdr *)p_entry;
958 		switch (p_hdr->cmd) {
959 		case OPCODE_NOP:
960 			break;
961 		case OPCODE_WRITE_LIST:
962 			qla8044_write_list(vha, p_hdr);
963 			break;
964 		case OPCODE_READ_WRITE_LIST:
965 			qla8044_read_write_list(vha, p_hdr);
966 			break;
967 		case OPCODE_POLL_LIST:
968 			qla8044_poll_list(vha, p_hdr);
969 			break;
970 		case OPCODE_POLL_WRITE_LIST:
971 			qla8044_poll_write_list(vha, p_hdr);
972 			break;
973 		case OPCODE_READ_MODIFY_WRITE:
974 			qla8044_read_modify_write(vha, p_hdr);
975 			break;
976 		case OPCODE_SEQ_PAUSE:
977 			qla8044_pause(vha, p_hdr);
978 			break;
979 		case OPCODE_SEQ_END:
980 			vha->reset_tmplt.seq_end = 1;
981 			break;
982 		case OPCODE_TMPL_END:
983 			qla8044_template_end(vha, p_hdr);
984 			break;
985 		case OPCODE_POLL_READ_LIST:
986 			qla8044_poll_read_list(vha, p_hdr);
987 			break;
988 		default:
989 			ql_log(ql_log_fatal, vha, 0xb097,
990 			    "%s: Unknown command ==> 0x%04x on "
991 			    "entry = %d\n", __func__, p_hdr->cmd, index);
992 			break;
993 		}
994 		/*
995 		 *Set pointer to next entry in the sequence.
996 		*/
997 		p_entry += p_hdr->size;
998 	}
999 	vha->reset_tmplt.seq_index = index;
1000 }
1001 
1002 static void
1003 qla8044_process_init_seq(struct scsi_qla_host *vha)
1004 {
1005 	qla8044_process_reset_template(vha,
1006 	    vha->reset_tmplt.init_offset);
1007 	if (vha->reset_tmplt.seq_end != 1)
1008 		ql_log(ql_log_fatal, vha, 0xb098,
1009 		    "%s: Abrupt INIT Sub-Sequence end.\n",
1010 		    __func__);
1011 }
1012 
1013 static void
1014 qla8044_process_stop_seq(struct scsi_qla_host *vha)
1015 {
1016 	vha->reset_tmplt.seq_index = 0;
1017 	qla8044_process_reset_template(vha, vha->reset_tmplt.stop_offset);
1018 	if (vha->reset_tmplt.seq_end != 1)
1019 		ql_log(ql_log_fatal, vha, 0xb099,
1020 		    "%s: Abrupt STOP Sub-Sequence end.\n", __func__);
1021 }
1022 
1023 static void
1024 qla8044_process_start_seq(struct scsi_qla_host *vha)
1025 {
1026 	qla8044_process_reset_template(vha, vha->reset_tmplt.start_offset);
1027 	if (vha->reset_tmplt.template_end != 1)
1028 		ql_log(ql_log_fatal, vha, 0xb09a,
1029 		    "%s: Abrupt START Sub-Sequence end.\n",
1030 		    __func__);
1031 }
1032 
1033 static int
1034 qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha,
1035 	uint32_t flash_addr, uint8_t *p_data, int u32_word_count)
1036 {
1037 	uint32_t i;
1038 	uint32_t u32_word;
1039 	uint32_t flash_offset;
1040 	uint32_t addr = flash_addr;
1041 	int ret_val = QLA_SUCCESS;
1042 
1043 	flash_offset = addr & (QLA8044_FLASH_SECTOR_SIZE - 1);
1044 
1045 	if (addr & 0x3) {
1046 		ql_log(ql_log_fatal, vha, 0xb09b, "%s: Illegal addr = 0x%x\n",
1047 		    __func__, addr);
1048 		ret_val = QLA_FUNCTION_FAILED;
1049 		goto exit_lockless_read;
1050 	}
1051 
1052 	ret_val = qla8044_wr_reg_indirect(vha,
1053 	    QLA8044_FLASH_DIRECT_WINDOW, (addr));
1054 
1055 	if (ret_val != QLA_SUCCESS) {
1056 		ql_log(ql_log_fatal, vha, 0xb09c,
1057 		    "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
1058 		    __func__, addr);
1059 		goto exit_lockless_read;
1060 	}
1061 
1062 	/* Check if data is spread across multiple sectors  */
1063 	if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
1064 	    (QLA8044_FLASH_SECTOR_SIZE - 1)) {
1065 		/* Multi sector read */
1066 		for (i = 0; i < u32_word_count; i++) {
1067 			ret_val = qla8044_rd_reg_indirect(vha,
1068 			    QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
1069 			if (ret_val != QLA_SUCCESS) {
1070 				ql_log(ql_log_fatal, vha, 0xb09d,
1071 				    "%s: failed to read addr 0x%x!\n",
1072 				    __func__, addr);
1073 				goto exit_lockless_read;
1074 			}
1075 			*(uint32_t *)p_data  = u32_word;
1076 			p_data = p_data + 4;
1077 			addr = addr + 4;
1078 			flash_offset = flash_offset + 4;
1079 			if (flash_offset > (QLA8044_FLASH_SECTOR_SIZE - 1)) {
1080 				/* This write is needed once for each sector */
1081 				ret_val = qla8044_wr_reg_indirect(vha,
1082 				    QLA8044_FLASH_DIRECT_WINDOW, (addr));
1083 				if (ret_val != QLA_SUCCESS) {
1084 					ql_log(ql_log_fatal, vha, 0xb09f,
1085 					    "%s: failed to write addr "
1086 					    "0x%x to FLASH_DIRECT_WINDOW!\n",
1087 					    __func__, addr);
1088 					goto exit_lockless_read;
1089 				}
1090 				flash_offset = 0;
1091 			}
1092 		}
1093 	} else {
1094 		/* Single sector read */
1095 		for (i = 0; i < u32_word_count; i++) {
1096 			ret_val = qla8044_rd_reg_indirect(vha,
1097 			    QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
1098 			if (ret_val != QLA_SUCCESS) {
1099 				ql_log(ql_log_fatal, vha, 0xb0a0,
1100 				    "%s: failed to read addr 0x%x!\n",
1101 				    __func__, addr);
1102 				goto exit_lockless_read;
1103 			}
1104 			*(uint32_t *)p_data = u32_word;
1105 			p_data = p_data + 4;
1106 			addr = addr + 4;
1107 		}
1108 	}
1109 
1110 exit_lockless_read:
1111 	return ret_val;
1112 }
1113 
1114 /*
1115  * qla8044_ms_mem_write_128b - Writes data to MS/off-chip memory
1116  *
1117  * @vha : Pointer to adapter structure
1118  * addr : Flash address to write to
1119  * data : Data to be written
1120  * count : word_count to be written
1121  *
1122  * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
1123  */
1124 static int
1125 qla8044_ms_mem_write_128b(struct scsi_qla_host *vha,
1126 	uint64_t addr, uint32_t *data, uint32_t count)
1127 {
1128 	int i, j, ret_val = QLA_SUCCESS;
1129 	uint32_t agt_ctrl;
1130 	unsigned long flags;
1131 	struct qla_hw_data *ha = vha->hw;
1132 
1133 	/* Only 128-bit aligned access */
1134 	if (addr & 0xF) {
1135 		ret_val = QLA_FUNCTION_FAILED;
1136 		goto exit_ms_mem_write;
1137 	}
1138 	write_lock_irqsave(&ha->hw_lock, flags);
1139 
1140 	/* Write address */
1141 	ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, 0);
1142 	if (ret_val == QLA_FUNCTION_FAILED) {
1143 		ql_log(ql_log_fatal, vha, 0xb0a1,
1144 		    "%s: write to AGT_ADDR_HI failed!\n", __func__);
1145 		goto exit_ms_mem_write_unlock;
1146 	}
1147 
1148 	for (i = 0; i < count; i++, addr += 16) {
1149 		if (!((addr_in_range(addr, QLA8044_ADDR_QDR_NET,
1150 		    QLA8044_ADDR_QDR_NET_MAX)) ||
1151 		    (addr_in_range(addr, QLA8044_ADDR_DDR_NET,
1152 			QLA8044_ADDR_DDR_NET_MAX)))) {
1153 			ret_val = QLA_FUNCTION_FAILED;
1154 			goto exit_ms_mem_write_unlock;
1155 		}
1156 
1157 		ret_val = qla8044_wr_reg_indirect(vha,
1158 		    MD_MIU_TEST_AGT_ADDR_LO, addr);
1159 
1160 		/* Write data */
1161 		ret_val += qla8044_wr_reg_indirect(vha,
1162 		    MD_MIU_TEST_AGT_WRDATA_LO, *data++);
1163 		ret_val += qla8044_wr_reg_indirect(vha,
1164 		    MD_MIU_TEST_AGT_WRDATA_HI, *data++);
1165 		ret_val += qla8044_wr_reg_indirect(vha,
1166 		    MD_MIU_TEST_AGT_WRDATA_ULO, *data++);
1167 		ret_val += qla8044_wr_reg_indirect(vha,
1168 		    MD_MIU_TEST_AGT_WRDATA_UHI, *data++);
1169 		if (ret_val == QLA_FUNCTION_FAILED) {
1170 			ql_log(ql_log_fatal, vha, 0xb0a2,
1171 			    "%s: write to AGT_WRDATA failed!\n",
1172 			    __func__);
1173 			goto exit_ms_mem_write_unlock;
1174 		}
1175 
1176 		/* Check write status */
1177 		ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
1178 		    MIU_TA_CTL_WRITE_ENABLE);
1179 		ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
1180 		    MIU_TA_CTL_WRITE_START);
1181 		if (ret_val == QLA_FUNCTION_FAILED) {
1182 			ql_log(ql_log_fatal, vha, 0xb0a3,
1183 			    "%s: write to AGT_CTRL failed!\n", __func__);
1184 			goto exit_ms_mem_write_unlock;
1185 		}
1186 
1187 		for (j = 0; j < MAX_CTL_CHECK; j++) {
1188 			ret_val = qla8044_rd_reg_indirect(vha,
1189 			    MD_MIU_TEST_AGT_CTRL, &agt_ctrl);
1190 			if (ret_val == QLA_FUNCTION_FAILED) {
1191 				ql_log(ql_log_fatal, vha, 0xb0a4,
1192 				    "%s: failed to read "
1193 				    "MD_MIU_TEST_AGT_CTRL!\n", __func__);
1194 				goto exit_ms_mem_write_unlock;
1195 			}
1196 			if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
1197 				break;
1198 		}
1199 
1200 		/* Status check failed */
1201 		if (j >= MAX_CTL_CHECK) {
1202 			ql_log(ql_log_fatal, vha, 0xb0a5,
1203 			    "%s: MS memory write failed!\n",
1204 			   __func__);
1205 			ret_val = QLA_FUNCTION_FAILED;
1206 			goto exit_ms_mem_write_unlock;
1207 		}
1208 	}
1209 
1210 exit_ms_mem_write_unlock:
1211 	write_unlock_irqrestore(&ha->hw_lock, flags);
1212 
1213 exit_ms_mem_write:
1214 	return ret_val;
1215 }
1216 
1217 static int
1218 qla8044_copy_bootloader(struct scsi_qla_host *vha)
1219 {
1220 	uint8_t *p_cache;
1221 	uint32_t src, count, size;
1222 	uint64_t dest;
1223 	int ret_val = QLA_SUCCESS;
1224 	struct qla_hw_data *ha = vha->hw;
1225 
1226 	src = QLA8044_BOOTLOADER_FLASH_ADDR;
1227 	dest = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_ADDR);
1228 	size = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_SIZE);
1229 
1230 	/* 128 bit alignment check */
1231 	if (size & 0xF)
1232 		size = (size + 16) & ~0xF;
1233 
1234 	/* 16 byte count */
1235 	count = size/16;
1236 
1237 	p_cache = vmalloc(size);
1238 	if (p_cache == NULL) {
1239 		ql_log(ql_log_fatal, vha, 0xb0a6,
1240 		    "%s: Failed to allocate memory for "
1241 		    "boot loader cache\n", __func__);
1242 		ret_val = QLA_FUNCTION_FAILED;
1243 		goto exit_copy_bootloader;
1244 	}
1245 
1246 	ret_val = qla8044_lockless_flash_read_u32(vha, src,
1247 	    p_cache, size/sizeof(uint32_t));
1248 	if (ret_val == QLA_FUNCTION_FAILED) {
1249 		ql_log(ql_log_fatal, vha, 0xb0a7,
1250 		    "%s: Error reading F/W from flash!!!\n", __func__);
1251 		goto exit_copy_error;
1252 	}
1253 	ql_dbg(ql_dbg_p3p, vha, 0xb0a8, "%s: Read F/W from flash!\n",
1254 	    __func__);
1255 
1256 	/* 128 bit/16 byte write to MS memory */
1257 	ret_val = qla8044_ms_mem_write_128b(vha, dest,
1258 	    (uint32_t *)p_cache, count);
1259 	if (ret_val == QLA_FUNCTION_FAILED) {
1260 		ql_log(ql_log_fatal, vha, 0xb0a9,
1261 		    "%s: Error writing F/W to MS !!!\n", __func__);
1262 		goto exit_copy_error;
1263 	}
1264 	ql_dbg(ql_dbg_p3p, vha, 0xb0aa,
1265 	    "%s: Wrote F/W (size %d) to MS !!!\n",
1266 	    __func__, size);
1267 
1268 exit_copy_error:
1269 	vfree(p_cache);
1270 
1271 exit_copy_bootloader:
1272 	return ret_val;
1273 }
1274 
1275 static int
1276 qla8044_restart(struct scsi_qla_host *vha)
1277 {
1278 	int ret_val = QLA_SUCCESS;
1279 	struct qla_hw_data *ha = vha->hw;
1280 
1281 	qla8044_process_stop_seq(vha);
1282 
1283 	/* Collect minidump */
1284 	if (ql2xmdenable)
1285 		qla8044_get_minidump(vha);
1286 	else
1287 		ql_log(ql_log_fatal, vha, 0xb14c,
1288 		    "Minidump disabled.\n");
1289 
1290 	qla8044_process_init_seq(vha);
1291 
1292 	if (qla8044_copy_bootloader(vha)) {
1293 		ql_log(ql_log_fatal, vha, 0xb0ab,
1294 		    "%s: Copy bootloader, firmware restart failed!\n",
1295 		    __func__);
1296 		ret_val = QLA_FUNCTION_FAILED;
1297 		goto exit_restart;
1298 	}
1299 
1300 	/*
1301 	 *  Loads F/W from flash
1302 	 */
1303 	qla8044_wr_reg(ha, QLA8044_FW_IMAGE_VALID, QLA8044_BOOT_FROM_FLASH);
1304 
1305 	qla8044_process_start_seq(vha);
1306 
1307 exit_restart:
1308 	return ret_val;
1309 }
1310 
1311 /*
1312  * qla8044_check_cmd_peg_status - Check peg status to see if Peg is
1313  * initialized.
1314  *
1315  * @ha : Pointer to adapter structure
1316  *
1317  * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
1318  */
1319 static int
1320 qla8044_check_cmd_peg_status(struct scsi_qla_host *vha)
1321 {
1322 	uint32_t val, ret_val = QLA_FUNCTION_FAILED;
1323 	int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
1324 	struct qla_hw_data *ha = vha->hw;
1325 
1326 	do {
1327 		val = qla8044_rd_reg(ha, QLA8044_CMDPEG_STATE);
1328 		if (val == PHAN_INITIALIZE_COMPLETE) {
1329 			ql_dbg(ql_dbg_p3p, vha, 0xb0ac,
1330 			    "%s: Command Peg initialization "
1331 			    "complete! state=0x%x\n", __func__, val);
1332 			ret_val = QLA_SUCCESS;
1333 			break;
1334 		}
1335 		msleep(CRB_CMDPEG_CHECK_DELAY);
1336 	} while (--retries);
1337 
1338 	return ret_val;
1339 }
1340 
1341 static int
1342 qla8044_start_firmware(struct scsi_qla_host *vha)
1343 {
1344 	int ret_val = QLA_SUCCESS;
1345 
1346 	if (qla8044_restart(vha)) {
1347 		ql_log(ql_log_fatal, vha, 0xb0ad,
1348 		    "%s: Restart Error!!!, Need Reset!!!\n",
1349 		    __func__);
1350 		ret_val = QLA_FUNCTION_FAILED;
1351 		goto exit_start_fw;
1352 	} else
1353 		ql_dbg(ql_dbg_p3p, vha, 0xb0af,
1354 		    "%s: Restart done!\n", __func__);
1355 
1356 	ret_val = qla8044_check_cmd_peg_status(vha);
1357 	if (ret_val) {
1358 		ql_log(ql_log_fatal, vha, 0xb0b0,
1359 		    "%s: Peg not initialized!\n", __func__);
1360 		ret_val = QLA_FUNCTION_FAILED;
1361 	}
1362 
1363 exit_start_fw:
1364 	return ret_val;
1365 }
1366 
1367 void
1368 qla8044_clear_drv_active(struct qla_hw_data *ha)
1369 {
1370 	uint32_t drv_active;
1371 	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
1372 
1373 	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1374 	drv_active &= ~(1 << (ha->portnum));
1375 
1376 	ql_log(ql_log_info, vha, 0xb0b1,
1377 	    "%s(%ld): drv_active: 0x%08x\n",
1378 	    __func__, vha->host_no, drv_active);
1379 
1380 	qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
1381 }
1382 
1383 /*
1384  * qla8044_device_bootstrap - Initialize device, set DEV_READY, start fw
1385  * @ha: pointer to adapter structure
1386  *
1387  * Note: IDC lock must be held upon entry
1388  **/
1389 static int
1390 qla8044_device_bootstrap(struct scsi_qla_host *vha)
1391 {
1392 	int rval = QLA_FUNCTION_FAILED;
1393 	int i;
1394 	uint32_t old_count = 0, count = 0;
1395 	int need_reset = 0;
1396 	uint32_t idc_ctrl;
1397 	struct qla_hw_data *ha = vha->hw;
1398 
1399 	need_reset = qla8044_need_reset(vha);
1400 
1401 	if (!need_reset) {
1402 		old_count = qla8044_rd_direct(vha,
1403 		    QLA8044_PEG_ALIVE_COUNTER_INDEX);
1404 
1405 		for (i = 0; i < 10; i++) {
1406 			msleep(200);
1407 
1408 			count = qla8044_rd_direct(vha,
1409 			    QLA8044_PEG_ALIVE_COUNTER_INDEX);
1410 			if (count != old_count) {
1411 				rval = QLA_SUCCESS;
1412 				goto dev_ready;
1413 			}
1414 		}
1415 		qla8044_flash_lock_recovery(vha);
1416 	} else {
1417 		/* We are trying to perform a recovery here. */
1418 		if (ha->flags.isp82xx_fw_hung)
1419 			qla8044_flash_lock_recovery(vha);
1420 	}
1421 
1422 	/* set to DEV_INITIALIZING */
1423 	ql_log(ql_log_info, vha, 0xb0b2,
1424 	    "%s: HW State: INITIALIZING\n", __func__);
1425 	qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1426 	    QLA8XXX_DEV_INITIALIZING);
1427 
1428 	qla8044_idc_unlock(ha);
1429 	rval = qla8044_start_firmware(vha);
1430 	qla8044_idc_lock(ha);
1431 
1432 	if (rval != QLA_SUCCESS) {
1433 		ql_log(ql_log_info, vha, 0xb0b3,
1434 		     "%s: HW State: FAILED\n", __func__);
1435 		qla8044_clear_drv_active(ha);
1436 		qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1437 		    QLA8XXX_DEV_FAILED);
1438 		return rval;
1439 	}
1440 
1441 	/* For ISP8044, If IDC_CTRL GRACEFUL_RESET_BIT1 is set , reset it after
1442 	 * device goes to INIT state. */
1443 	idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
1444 	if (idc_ctrl & GRACEFUL_RESET_BIT1) {
1445 		qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
1446 		    (idc_ctrl & ~GRACEFUL_RESET_BIT1));
1447 		ha->fw_dumped = 0;
1448 	}
1449 
1450 dev_ready:
1451 	ql_log(ql_log_info, vha, 0xb0b4,
1452 	    "%s: HW State: READY\n", __func__);
1453 	qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_READY);
1454 
1455 	return rval;
1456 }
1457 
1458 /*-------------------------Reset Sequence Functions-----------------------*/
1459 static void
1460 qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha)
1461 {
1462 	u8 *phdr;
1463 
1464 	if (!vha->reset_tmplt.buff) {
1465 		ql_log(ql_log_fatal, vha, 0xb0b5,
1466 		    "%s: Error Invalid reset_seq_template\n", __func__);
1467 		return;
1468 	}
1469 
1470 	phdr = vha->reset_tmplt.buff;
1471 	ql_dbg(ql_dbg_p3p, vha, 0xb0b6,
1472 	    "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X"
1473 	    "0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n"
1474 	    "\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n",
1475 	    *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
1476 	    *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
1477 	    *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
1478 	    *(phdr+13), *(phdr+14), *(phdr+15));
1479 }
1480 
1481 /*
1482  * qla8044_reset_seq_checksum_test - Validate Reset Sequence template.
1483  *
1484  * @ha : Pointer to adapter structure
1485  *
1486  * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
1487  */
1488 static int
1489 qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha)
1490 {
1491 	uint32_t sum =  0;
1492 	uint16_t *buff = (uint16_t *)vha->reset_tmplt.buff;
1493 	int u16_count =  vha->reset_tmplt.hdr->size / sizeof(uint16_t);
1494 
1495 	while (u16_count-- > 0)
1496 		sum += *buff++;
1497 
1498 	while (sum >> 16)
1499 		sum = (sum & 0xFFFF) +  (sum >> 16);
1500 
1501 	/* checksum of 0 indicates a valid template */
1502 	if (~sum) {
1503 		return QLA_SUCCESS;
1504 	} else {
1505 		ql_log(ql_log_fatal, vha, 0xb0b7,
1506 		    "%s: Reset seq checksum failed\n", __func__);
1507 		return QLA_FUNCTION_FAILED;
1508 	}
1509 }
1510 
1511 /*
1512  * qla8044_read_reset_template - Read Reset Template from Flash, validate
1513  * the template and store offsets of stop/start/init offsets in ha->reset_tmplt.
1514  *
1515  * @ha : Pointer to adapter structure
1516  */
1517 void
1518 qla8044_read_reset_template(struct scsi_qla_host *vha)
1519 {
1520 	uint8_t *p_buff;
1521 	uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
1522 
1523 	vha->reset_tmplt.seq_error = 0;
1524 	vha->reset_tmplt.buff = vmalloc(QLA8044_RESTART_TEMPLATE_SIZE);
1525 	if (vha->reset_tmplt.buff == NULL) {
1526 		ql_log(ql_log_fatal, vha, 0xb0b8,
1527 		    "%s: Failed to allocate reset template resources\n",
1528 		    __func__);
1529 		goto exit_read_reset_template;
1530 	}
1531 
1532 	p_buff = vha->reset_tmplt.buff;
1533 	addr = QLA8044_RESET_TEMPLATE_ADDR;
1534 
1535 	tmplt_hdr_def_size =
1536 	    sizeof(struct qla8044_reset_template_hdr) / sizeof(uint32_t);
1537 
1538 	ql_dbg(ql_dbg_p3p, vha, 0xb0b9,
1539 	    "%s: Read template hdr size %d from Flash\n",
1540 	    __func__, tmplt_hdr_def_size);
1541 
1542 	/* Copy template header from flash */
1543 	if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
1544 		ql_log(ql_log_fatal, vha, 0xb0ba,
1545 		    "%s: Failed to read reset template\n", __func__);
1546 		goto exit_read_template_error;
1547 	}
1548 
1549 	vha->reset_tmplt.hdr =
1550 	 (struct qla8044_reset_template_hdr *) vha->reset_tmplt.buff;
1551 
1552 	/* Validate the template header size and signature */
1553 	tmplt_hdr_size = vha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
1554 	if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
1555 	    (vha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
1556 		ql_log(ql_log_fatal, vha, 0xb0bb,
1557 		    "%s: Template Header size invalid %d "
1558 		    "tmplt_hdr_def_size %d!!!\n", __func__,
1559 		    tmplt_hdr_size, tmplt_hdr_def_size);
1560 		goto exit_read_template_error;
1561 	}
1562 
1563 	addr = QLA8044_RESET_TEMPLATE_ADDR + vha->reset_tmplt.hdr->hdr_size;
1564 	p_buff = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->hdr_size;
1565 	tmplt_hdr_def_size = (vha->reset_tmplt.hdr->size -
1566 	    vha->reset_tmplt.hdr->hdr_size)/sizeof(uint32_t);
1567 
1568 	ql_dbg(ql_dbg_p3p, vha, 0xb0bc,
1569 	    "%s: Read rest of the template size %d\n",
1570 	    __func__, vha->reset_tmplt.hdr->size);
1571 
1572 	/* Copy rest of the template */
1573 	if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
1574 		ql_log(ql_log_fatal, vha, 0xb0bd,
1575 		    "%s: Failed to read reset tempelate\n", __func__);
1576 		goto exit_read_template_error;
1577 	}
1578 
1579 	/* Integrity check */
1580 	if (qla8044_reset_seq_checksum_test(vha)) {
1581 		ql_log(ql_log_fatal, vha, 0xb0be,
1582 		    "%s: Reset Seq checksum failed!\n", __func__);
1583 		goto exit_read_template_error;
1584 	}
1585 
1586 	ql_dbg(ql_dbg_p3p, vha, 0xb0bf,
1587 	    "%s: Reset Seq checksum passed! Get stop, "
1588 	    "start and init seq offsets\n", __func__);
1589 
1590 	/* Get STOP, START, INIT sequence offsets */
1591 	vha->reset_tmplt.init_offset = vha->reset_tmplt.buff +
1592 	    vha->reset_tmplt.hdr->init_seq_offset;
1593 
1594 	vha->reset_tmplt.start_offset = vha->reset_tmplt.buff +
1595 	    vha->reset_tmplt.hdr->start_seq_offset;
1596 
1597 	vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff +
1598 	    vha->reset_tmplt.hdr->hdr_size;
1599 
1600 	qla8044_dump_reset_seq_hdr(vha);
1601 
1602 	goto exit_read_reset_template;
1603 
1604 exit_read_template_error:
1605 	vfree(vha->reset_tmplt.buff);
1606 
1607 exit_read_reset_template:
1608 	return;
1609 }
1610 
1611 void
1612 qla8044_set_idc_dontreset(struct scsi_qla_host *vha)
1613 {
1614 	uint32_t idc_ctrl;
1615 	struct qla_hw_data *ha = vha->hw;
1616 
1617 	idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
1618 	idc_ctrl |= DONTRESET_BIT0;
1619 	ql_dbg(ql_dbg_p3p, vha, 0xb0c0,
1620 	    "%s: idc_ctrl = %d\n", __func__, idc_ctrl);
1621 	qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
1622 }
1623 
1624 static inline void
1625 qla8044_set_rst_ready(struct scsi_qla_host *vha)
1626 {
1627 	uint32_t drv_state;
1628 	struct qla_hw_data *ha = vha->hw;
1629 
1630 	drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
1631 
1632 	/* For ISP8044, drv_active register has 1 bit per function,
1633 	 * shift 1 by func_num to set a bit for the function.*/
1634 	drv_state |= (1 << ha->portnum);
1635 
1636 	ql_log(ql_log_info, vha, 0xb0c1,
1637 	    "%s(%ld): drv_state: 0x%08x\n",
1638 	    __func__, vha->host_no, drv_state);
1639 	qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
1640 }
1641 
1642 /**
1643  * qla8044_need_reset_handler - Code to start reset sequence
1644  * @ha: pointer to adapter structure
1645  *
1646  * Note: IDC lock must be held upon entry
1647  **/
1648 static void
1649 qla8044_need_reset_handler(struct scsi_qla_host *vha)
1650 {
1651 	uint32_t dev_state = 0, drv_state, drv_active;
1652 	unsigned long reset_timeout;
1653 	struct qla_hw_data *ha = vha->hw;
1654 
1655 	ql_log(ql_log_fatal, vha, 0xb0c2,
1656 	    "%s: Performing ISP error recovery\n", __func__);
1657 
1658 	if (vha->flags.online) {
1659 		qla8044_idc_unlock(ha);
1660 		qla2x00_abort_isp_cleanup(vha);
1661 		ha->isp_ops->get_flash_version(vha, vha->req->ring);
1662 		ha->isp_ops->nvram_config(vha);
1663 		qla8044_idc_lock(ha);
1664 	}
1665 
1666 	dev_state = qla8044_rd_direct(vha,
1667 	    QLA8044_CRB_DEV_STATE_INDEX);
1668 	drv_state = qla8044_rd_direct(vha,
1669 	    QLA8044_CRB_DRV_STATE_INDEX);
1670 	drv_active = qla8044_rd_direct(vha,
1671 	    QLA8044_CRB_DRV_ACTIVE_INDEX);
1672 
1673 	ql_log(ql_log_info, vha, 0xb0c5,
1674 	    "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n",
1675 	    __func__, vha->host_no, drv_state, drv_active, dev_state);
1676 
1677 	qla8044_set_rst_ready(vha);
1678 
1679 	/* wait for 10 seconds for reset ack from all functions */
1680 	reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
1681 
1682 	do {
1683 		if (time_after_eq(jiffies, reset_timeout)) {
1684 			ql_log(ql_log_info, vha, 0xb0c4,
1685 			    "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n",
1686 			    __func__, ha->portnum, drv_state, drv_active);
1687 			break;
1688 		}
1689 
1690 		qla8044_idc_unlock(ha);
1691 		msleep(1000);
1692 		qla8044_idc_lock(ha);
1693 
1694 		dev_state = qla8044_rd_direct(vha,
1695 		    QLA8044_CRB_DEV_STATE_INDEX);
1696 		drv_state = qla8044_rd_direct(vha,
1697 		    QLA8044_CRB_DRV_STATE_INDEX);
1698 		drv_active = qla8044_rd_direct(vha,
1699 		    QLA8044_CRB_DRV_ACTIVE_INDEX);
1700 	} while (((drv_state & drv_active) != drv_active) &&
1701 	    (dev_state == QLA8XXX_DEV_NEED_RESET));
1702 
1703 	/* Remove IDC participation of functions not acknowledging */
1704 	if (drv_state != drv_active) {
1705 		ql_log(ql_log_info, vha, 0xb0c7,
1706 		    "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n",
1707 		    __func__, vha->host_no, ha->portnum,
1708 		    (drv_active ^ drv_state));
1709 		drv_active = drv_active & drv_state;
1710 		qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
1711 		    drv_active);
1712 	} else {
1713 		/*
1714 		 * Reset owner should execute reset recovery,
1715 		 * if all functions acknowledged
1716 		 */
1717 		if ((ha->flags.nic_core_reset_owner) &&
1718 		    (dev_state == QLA8XXX_DEV_NEED_RESET)) {
1719 			ha->flags.nic_core_reset_owner = 0;
1720 			qla8044_device_bootstrap(vha);
1721 			return;
1722 		}
1723 	}
1724 
1725 	/* Exit if non active function */
1726 	if (!(drv_active & (1 << ha->portnum))) {
1727 		ha->flags.nic_core_reset_owner = 0;
1728 		return;
1729 	}
1730 
1731 	/*
1732 	 * Execute Reset Recovery if Reset Owner or Function 7
1733 	 * is the only active function
1734 	 */
1735 	if (ha->flags.nic_core_reset_owner ||
1736 	    ((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) {
1737 		ha->flags.nic_core_reset_owner = 0;
1738 		qla8044_device_bootstrap(vha);
1739 	}
1740 }
1741 
1742 static void
1743 qla8044_set_drv_active(struct scsi_qla_host *vha)
1744 {
1745 	uint32_t drv_active;
1746 	struct qla_hw_data *ha = vha->hw;
1747 
1748 	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1749 
1750 	/* For ISP8044, drv_active register has 1 bit per function,
1751 	 * shift 1 by func_num to set a bit for the function.*/
1752 	drv_active |= (1 << ha->portnum);
1753 
1754 	ql_log(ql_log_info, vha, 0xb0c8,
1755 	    "%s(%ld): drv_active: 0x%08x\n",
1756 	    __func__, vha->host_no, drv_active);
1757 	qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
1758 }
1759 
1760 static int
1761 qla8044_check_drv_active(struct scsi_qla_host *vha)
1762 {
1763 	uint32_t drv_active;
1764 	struct qla_hw_data *ha = vha->hw;
1765 
1766 	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1767 	if (drv_active & (1 << ha->portnum))
1768 		return QLA_SUCCESS;
1769 	else
1770 		return QLA_TEST_FAILED;
1771 }
1772 
1773 static void
1774 qla8044_clear_idc_dontreset(struct scsi_qla_host *vha)
1775 {
1776 	uint32_t idc_ctrl;
1777 	struct qla_hw_data *ha = vha->hw;
1778 
1779 	idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
1780 	idc_ctrl &= ~DONTRESET_BIT0;
1781 	ql_log(ql_log_info, vha, 0xb0c9,
1782 	    "%s: idc_ctrl = %d\n", __func__,
1783 	    idc_ctrl);
1784 	qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
1785 }
1786 
1787 static int
1788 qla8044_set_idc_ver(struct scsi_qla_host *vha)
1789 {
1790 	int idc_ver;
1791 	uint32_t drv_active;
1792 	int rval = QLA_SUCCESS;
1793 	struct qla_hw_data *ha = vha->hw;
1794 
1795 	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1796 	if (drv_active == (1 << ha->portnum)) {
1797 		idc_ver = qla8044_rd_direct(vha,
1798 		    QLA8044_CRB_DRV_IDC_VERSION_INDEX);
1799 		idc_ver &= (~0xFF);
1800 		idc_ver |= QLA8044_IDC_VER_MAJ_VALUE;
1801 		qla8044_wr_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX,
1802 		    idc_ver);
1803 		ql_log(ql_log_info, vha, 0xb0ca,
1804 		    "%s: IDC version updated to %d\n",
1805 		    __func__, idc_ver);
1806 	} else {
1807 		idc_ver = qla8044_rd_direct(vha,
1808 		    QLA8044_CRB_DRV_IDC_VERSION_INDEX);
1809 		idc_ver &= 0xFF;
1810 		if (QLA8044_IDC_VER_MAJ_VALUE != idc_ver) {
1811 			ql_log(ql_log_info, vha, 0xb0cb,
1812 			    "%s: qla4xxx driver IDC version %d "
1813 			    "is not compatible with IDC version %d "
1814 			    "of other drivers!\n",
1815 			    __func__, QLA8044_IDC_VER_MAJ_VALUE,
1816 			    idc_ver);
1817 			rval = QLA_FUNCTION_FAILED;
1818 			goto exit_set_idc_ver;
1819 		}
1820 	}
1821 
1822 	/* Update IDC_MINOR_VERSION */
1823 	idc_ver = qla8044_rd_reg(ha, QLA8044_CRB_IDC_VER_MINOR);
1824 	idc_ver &= ~(0x03 << (ha->portnum * 2));
1825 	idc_ver |= (QLA8044_IDC_VER_MIN_VALUE << (ha->portnum * 2));
1826 	qla8044_wr_reg(ha, QLA8044_CRB_IDC_VER_MINOR, idc_ver);
1827 
1828 exit_set_idc_ver:
1829 	return rval;
1830 }
1831 
1832 static int
1833 qla8044_update_idc_reg(struct scsi_qla_host *vha)
1834 {
1835 	uint32_t drv_active;
1836 	int rval = QLA_SUCCESS;
1837 	struct qla_hw_data *ha = vha->hw;
1838 
1839 	if (vha->flags.init_done)
1840 		goto exit_update_idc_reg;
1841 
1842 	qla8044_idc_lock(ha);
1843 	qla8044_set_drv_active(vha);
1844 
1845 	drv_active = qla8044_rd_direct(vha,
1846 	    QLA8044_CRB_DRV_ACTIVE_INDEX);
1847 
1848 	/* If we are the first driver to load and
1849 	 * ql2xdontresethba is not set, clear IDC_CTRL BIT0. */
1850 	if ((drv_active == (1 << ha->portnum)) && !ql2xdontresethba)
1851 		qla8044_clear_idc_dontreset(vha);
1852 
1853 	rval = qla8044_set_idc_ver(vha);
1854 	if (rval == QLA_FUNCTION_FAILED)
1855 		qla8044_clear_drv_active(ha);
1856 	qla8044_idc_unlock(ha);
1857 
1858 exit_update_idc_reg:
1859 	return rval;
1860 }
1861 
1862 /**
1863  * qla8044_need_qsnt_handler - Code to start qsnt
1864  * @ha: pointer to adapter structure
1865  **/
1866 static void
1867 qla8044_need_qsnt_handler(struct scsi_qla_host *vha)
1868 {
1869 	unsigned long qsnt_timeout;
1870 	uint32_t drv_state, drv_active, dev_state;
1871 	struct qla_hw_data *ha = vha->hw;
1872 
1873 	if (vha->flags.online)
1874 		qla2x00_quiesce_io(vha);
1875 	else
1876 		return;
1877 
1878 	qla8044_set_qsnt_ready(vha);
1879 
1880 	/* Wait for 30 secs for all functions to ack qsnt mode */
1881 	qsnt_timeout = jiffies + (QSNT_ACK_TOV * HZ);
1882 	drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
1883 	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1884 
1885 	/* Shift drv_active by 1 to match drv_state. As quiescent ready bit
1886 	   position is at bit 1 and drv active is at bit 0 */
1887 	drv_active = drv_active << 1;
1888 
1889 	while (drv_state != drv_active) {
1890 		if (time_after_eq(jiffies, qsnt_timeout)) {
1891 			/* Other functions did not ack, changing state to
1892 			 * DEV_READY
1893 			 */
1894 			clear_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1895 			qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1896 					    QLA8XXX_DEV_READY);
1897 			qla8044_clear_qsnt_ready(vha);
1898 			ql_log(ql_log_info, vha, 0xb0cc,
1899 			    "Timeout waiting for quiescent ack!!!\n");
1900 			return;
1901 		}
1902 		qla8044_idc_unlock(ha);
1903 		msleep(1000);
1904 		qla8044_idc_lock(ha);
1905 
1906 		drv_state = qla8044_rd_direct(vha,
1907 		    QLA8044_CRB_DRV_STATE_INDEX);
1908 		drv_active = qla8044_rd_direct(vha,
1909 		    QLA8044_CRB_DRV_ACTIVE_INDEX);
1910 		drv_active = drv_active << 1;
1911 	}
1912 
1913 	/* All functions have Acked. Set quiescent state */
1914 	dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
1915 
1916 	if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
1917 		qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1918 		    QLA8XXX_DEV_QUIESCENT);
1919 		ql_log(ql_log_info, vha, 0xb0cd,
1920 		    "%s: HW State: QUIESCENT\n", __func__);
1921 	}
1922 }
1923 
1924 /*
1925  * qla8044_device_state_handler - Adapter state machine
1926  * @ha: pointer to host adapter structure.
1927  *
1928  * Note: IDC lock must be UNLOCKED upon entry
1929  **/
1930 int
1931 qla8044_device_state_handler(struct scsi_qla_host *vha)
1932 {
1933 	uint32_t dev_state;
1934 	int rval = QLA_SUCCESS;
1935 	unsigned long dev_init_timeout;
1936 	struct qla_hw_data *ha = vha->hw;
1937 
1938 	rval = qla8044_update_idc_reg(vha);
1939 	if (rval == QLA_FUNCTION_FAILED)
1940 		goto exit_error;
1941 
1942 	dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
1943 	ql_dbg(ql_dbg_p3p, vha, 0xb0ce,
1944 	    "Device state is 0x%x = %s\n",
1945 	    dev_state, dev_state < MAX_STATES ?
1946 	    qdev_state(dev_state) : "Unknown");
1947 
1948 	/* wait for 30 seconds for device to go ready */
1949 	dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
1950 
1951 	qla8044_idc_lock(ha);
1952 
1953 	while (1) {
1954 		if (time_after_eq(jiffies, dev_init_timeout)) {
1955 			if (qla8044_check_drv_active(vha) == QLA_SUCCESS) {
1956 				ql_log(ql_log_warn, vha, 0xb0cf,
1957 				    "%s: Device Init Failed 0x%x = %s\n",
1958 				    QLA2XXX_DRIVER_NAME, dev_state,
1959 				    dev_state < MAX_STATES ?
1960 				    qdev_state(dev_state) : "Unknown");
1961 				qla8044_wr_direct(vha,
1962 				    QLA8044_CRB_DEV_STATE_INDEX,
1963 				    QLA8XXX_DEV_FAILED);
1964 			}
1965 		}
1966 
1967 		dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
1968 		ql_log(ql_log_info, vha, 0xb0d0,
1969 		    "Device state is 0x%x = %s\n",
1970 		    dev_state, dev_state < MAX_STATES ?
1971 		    qdev_state(dev_state) : "Unknown");
1972 
1973 		/* NOTE: Make sure idc unlocked upon exit of switch statement */
1974 		switch (dev_state) {
1975 		case QLA8XXX_DEV_READY:
1976 			ha->flags.nic_core_reset_owner = 0;
1977 			goto exit;
1978 		case QLA8XXX_DEV_COLD:
1979 			rval = qla8044_device_bootstrap(vha);
1980 			break;
1981 		case QLA8XXX_DEV_INITIALIZING:
1982 			qla8044_idc_unlock(ha);
1983 			msleep(1000);
1984 			qla8044_idc_lock(ha);
1985 			break;
1986 		case QLA8XXX_DEV_NEED_RESET:
1987 			/* For ISP8044, if NEED_RESET is set by any driver,
1988 			 * it should be honored, irrespective of IDC_CTRL
1989 			 * DONTRESET_BIT0 */
1990 			qla8044_need_reset_handler(vha);
1991 			break;
1992 		case QLA8XXX_DEV_NEED_QUIESCENT:
1993 			/* idc locked/unlocked in handler */
1994 			qla8044_need_qsnt_handler(vha);
1995 
1996 			/* Reset the init timeout after qsnt handler */
1997 			dev_init_timeout = jiffies +
1998 			    (ha->fcoe_reset_timeout * HZ);
1999 			break;
2000 		case QLA8XXX_DEV_QUIESCENT:
2001 			ql_log(ql_log_info, vha, 0xb0d1,
2002 			    "HW State: QUIESCENT\n");
2003 
2004 			qla8044_idc_unlock(ha);
2005 			msleep(1000);
2006 			qla8044_idc_lock(ha);
2007 
2008 			/* Reset the init timeout after qsnt handler */
2009 			dev_init_timeout = jiffies +
2010 			    (ha->fcoe_reset_timeout * HZ);
2011 			break;
2012 		case QLA8XXX_DEV_FAILED:
2013 			ha->flags.nic_core_reset_owner = 0;
2014 			qla8044_idc_unlock(ha);
2015 			qla8xxx_dev_failed_handler(vha);
2016 			rval = QLA_FUNCTION_FAILED;
2017 			qla8044_idc_lock(ha);
2018 			goto exit;
2019 		default:
2020 			qla8044_idc_unlock(ha);
2021 			qla8xxx_dev_failed_handler(vha);
2022 			rval = QLA_FUNCTION_FAILED;
2023 			qla8044_idc_lock(ha);
2024 			goto exit;
2025 		}
2026 	}
2027 exit:
2028 	qla8044_idc_unlock(ha);
2029 
2030 exit_error:
2031 	return rval;
2032 }
2033 
2034 /**
2035  * qla4_8xxx_check_temp - Check the ISP82XX temperature.
2036  * @ha: adapter block pointer.
2037  *
2038  * Note: The caller should not hold the idc lock.
2039  **/
2040 static int
2041 qla8044_check_temp(struct scsi_qla_host *vha)
2042 {
2043 	uint32_t temp, temp_state, temp_val;
2044 	int status = QLA_SUCCESS;
2045 
2046 	temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
2047 	temp_state = qla82xx_get_temp_state(temp);
2048 	temp_val = qla82xx_get_temp_val(temp);
2049 
2050 	if (temp_state == QLA82XX_TEMP_PANIC) {
2051 		ql_log(ql_log_warn, vha, 0xb0d2,
2052 		    "Device temperature %d degrees C"
2053 		    " exceeds maximum allowed. Hardware has been shut"
2054 		    " down\n", temp_val);
2055 		status = QLA_FUNCTION_FAILED;
2056 		return status;
2057 	} else if (temp_state == QLA82XX_TEMP_WARN) {
2058 		ql_log(ql_log_warn, vha, 0xb0d3,
2059 		    "Device temperature %d"
2060 		    " degrees C exceeds operating range."
2061 		    " Immediate action needed.\n", temp_val);
2062 	}
2063 	return 0;
2064 }
2065 
2066 int qla8044_read_temperature(scsi_qla_host_t *vha)
2067 {
2068 	uint32_t temp;
2069 
2070 	temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
2071 	return qla82xx_get_temp_val(temp);
2072 }
2073 
2074 /**
2075  * qla8044_check_fw_alive  - Check firmware health
2076  * @ha: Pointer to host adapter structure.
2077  *
2078  * Context: Interrupt
2079  **/
2080 int
2081 qla8044_check_fw_alive(struct scsi_qla_host *vha)
2082 {
2083 	uint32_t fw_heartbeat_counter;
2084 	uint32_t halt_status1, halt_status2;
2085 	int status = QLA_SUCCESS;
2086 
2087 	fw_heartbeat_counter = qla8044_rd_direct(vha,
2088 	    QLA8044_PEG_ALIVE_COUNTER_INDEX);
2089 
2090 	/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
2091 	if (fw_heartbeat_counter == 0xffffffff) {
2092 		ql_dbg(ql_dbg_p3p, vha, 0xb0d4,
2093 		    "scsi%ld: %s: Device in frozen "
2094 		    "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
2095 		    vha->host_no, __func__);
2096 		return status;
2097 	}
2098 
2099 	if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
2100 		vha->seconds_since_last_heartbeat++;
2101 		/* FW not alive after 2 seconds */
2102 		if (vha->seconds_since_last_heartbeat == 2) {
2103 			vha->seconds_since_last_heartbeat = 0;
2104 			halt_status1 = qla8044_rd_direct(vha,
2105 			    QLA8044_PEG_HALT_STATUS1_INDEX);
2106 			halt_status2 = qla8044_rd_direct(vha,
2107 			    QLA8044_PEG_HALT_STATUS2_INDEX);
2108 
2109 			ql_log(ql_log_info, vha, 0xb0d5,
2110 			    "scsi(%ld): %s, ISP8044 "
2111 			    "Dumping hw/fw registers:\n"
2112 			    " PEG_HALT_STATUS1: 0x%x, "
2113 			    "PEG_HALT_STATUS2: 0x%x,\n",
2114 			    vha->host_no, __func__, halt_status1,
2115 			    halt_status2);
2116 			status = QLA_FUNCTION_FAILED;
2117 		}
2118 	} else
2119 		vha->seconds_since_last_heartbeat = 0;
2120 
2121 	vha->fw_heartbeat_counter = fw_heartbeat_counter;
2122 	return status;
2123 }
2124 
2125 void
2126 qla8044_watchdog(struct scsi_qla_host *vha)
2127 {
2128 	uint32_t dev_state, halt_status;
2129 	int halt_status_unrecoverable = 0;
2130 	struct qla_hw_data *ha = vha->hw;
2131 
2132 	/* don't poll if reset is going on or FW hang in quiescent state */
2133 	if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2134 	    test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
2135 		dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
2136 
2137 		if (qla8044_check_fw_alive(vha)) {
2138 			ha->flags.isp82xx_fw_hung = 1;
2139 			ql_log(ql_log_warn, vha, 0xb10a,
2140 			    "Firmware hung.\n");
2141 			qla82xx_clear_pending_mbx(vha);
2142 		}
2143 
2144 		if (qla8044_check_temp(vha)) {
2145 			set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
2146 			ha->flags.isp82xx_fw_hung = 1;
2147 			qla2xxx_wake_dpc(vha);
2148 		} else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
2149 			   !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
2150 			ql_log(ql_log_info, vha, 0xb0d6,
2151 			    "%s: HW State: NEED RESET!\n",
2152 			    __func__);
2153 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2154 			qla2xxx_wake_dpc(vha);
2155 		} else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
2156 		    !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
2157 			ql_log(ql_log_info, vha, 0xb0d7,
2158 			    "%s: HW State: NEED QUIES detected!\n",
2159 			    __func__);
2160 			set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
2161 			qla2xxx_wake_dpc(vha);
2162 		} else  {
2163 			/* Check firmware health */
2164 			if (ha->flags.isp82xx_fw_hung) {
2165 				halt_status = qla8044_rd_direct(vha,
2166 					QLA8044_PEG_HALT_STATUS1_INDEX);
2167 				if (halt_status &
2168 				    QLA8044_HALT_STATUS_FW_RESET) {
2169 					ql_log(ql_log_fatal, vha,
2170 					    0xb0d8, "%s: Firmware "
2171 					    "error detected device "
2172 					    "is being reset\n",
2173 					    __func__);
2174 				} else if (halt_status &
2175 					    QLA8044_HALT_STATUS_UNRECOVERABLE) {
2176 						halt_status_unrecoverable = 1;
2177 				}
2178 
2179 				/* Since we cannot change dev_state in interrupt
2180 				 * context, set appropriate DPC flag then wakeup
2181 				 *  DPC */
2182 				if (halt_status_unrecoverable) {
2183 					set_bit(ISP_UNRECOVERABLE,
2184 					    &vha->dpc_flags);
2185 				} else {
2186 					if (dev_state ==
2187 					    QLA8XXX_DEV_QUIESCENT) {
2188 						set_bit(FCOE_CTX_RESET_NEEDED,
2189 						    &vha->dpc_flags);
2190 						ql_log(ql_log_info, vha, 0xb0d9,
2191 						    "%s: FW CONTEXT Reset "
2192 						    "needed!\n", __func__);
2193 					} else {
2194 						ql_log(ql_log_info, vha,
2195 						    0xb0da, "%s: "
2196 						    "detect abort needed\n",
2197 						    __func__);
2198 						set_bit(ISP_ABORT_NEEDED,
2199 						    &vha->dpc_flags);
2200 					}
2201 				}
2202 				qla2xxx_wake_dpc(vha);
2203 			}
2204 		}
2205 
2206 	}
2207 }
2208 
2209 static int
2210 qla8044_minidump_process_control(struct scsi_qla_host *vha,
2211 				 struct qla8044_minidump_entry_hdr *entry_hdr)
2212 {
2213 	struct qla8044_minidump_entry_crb *crb_entry;
2214 	uint32_t read_value, opcode, poll_time, addr, index;
2215 	uint32_t crb_addr, rval = QLA_SUCCESS;
2216 	unsigned long wtime;
2217 	struct qla8044_minidump_template_hdr *tmplt_hdr;
2218 	int i;
2219 	struct qla_hw_data *ha = vha->hw;
2220 
2221 	ql_dbg(ql_dbg_p3p, vha, 0xb0dd, "Entering fn: %s\n", __func__);
2222 	tmplt_hdr = (struct qla8044_minidump_template_hdr *)
2223 		ha->md_tmplt_hdr;
2224 	crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr;
2225 
2226 	crb_addr = crb_entry->addr;
2227 	for (i = 0; i < crb_entry->op_count; i++) {
2228 		opcode = crb_entry->crb_ctrl.opcode;
2229 
2230 		if (opcode & QLA82XX_DBG_OPCODE_WR) {
2231 			qla8044_wr_reg_indirect(vha, crb_addr,
2232 			    crb_entry->value_1);
2233 			opcode &= ~QLA82XX_DBG_OPCODE_WR;
2234 		}
2235 
2236 		if (opcode & QLA82XX_DBG_OPCODE_RW) {
2237 			qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2238 			qla8044_wr_reg_indirect(vha, crb_addr, read_value);
2239 			opcode &= ~QLA82XX_DBG_OPCODE_RW;
2240 		}
2241 
2242 		if (opcode & QLA82XX_DBG_OPCODE_AND) {
2243 			qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2244 			read_value &= crb_entry->value_2;
2245 			opcode &= ~QLA82XX_DBG_OPCODE_AND;
2246 			if (opcode & QLA82XX_DBG_OPCODE_OR) {
2247 				read_value |= crb_entry->value_3;
2248 				opcode &= ~QLA82XX_DBG_OPCODE_OR;
2249 			}
2250 			qla8044_wr_reg_indirect(vha, crb_addr, read_value);
2251 		}
2252 		if (opcode & QLA82XX_DBG_OPCODE_OR) {
2253 			qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2254 			read_value |= crb_entry->value_3;
2255 			qla8044_wr_reg_indirect(vha, crb_addr, read_value);
2256 			opcode &= ~QLA82XX_DBG_OPCODE_OR;
2257 		}
2258 		if (opcode & QLA82XX_DBG_OPCODE_POLL) {
2259 			poll_time = crb_entry->crb_strd.poll_timeout;
2260 			wtime = jiffies + poll_time;
2261 			qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2262 
2263 			do {
2264 				if ((read_value & crb_entry->value_2) ==
2265 				    crb_entry->value_1) {
2266 					break;
2267 				} else if (time_after_eq(jiffies, wtime)) {
2268 					/* capturing dump failed */
2269 					rval = QLA_FUNCTION_FAILED;
2270 					break;
2271 				} else {
2272 					qla8044_rd_reg_indirect(vha,
2273 					    crb_addr, &read_value);
2274 				}
2275 			} while (1);
2276 			opcode &= ~QLA82XX_DBG_OPCODE_POLL;
2277 		}
2278 
2279 		if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
2280 			if (crb_entry->crb_strd.state_index_a) {
2281 				index = crb_entry->crb_strd.state_index_a;
2282 				addr = tmplt_hdr->saved_state_array[index];
2283 			} else {
2284 				addr = crb_addr;
2285 			}
2286 
2287 			qla8044_rd_reg_indirect(vha, addr, &read_value);
2288 			index = crb_entry->crb_ctrl.state_index_v;
2289 			tmplt_hdr->saved_state_array[index] = read_value;
2290 			opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
2291 		}
2292 
2293 		if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
2294 			if (crb_entry->crb_strd.state_index_a) {
2295 				index = crb_entry->crb_strd.state_index_a;
2296 				addr = tmplt_hdr->saved_state_array[index];
2297 			} else {
2298 				addr = crb_addr;
2299 			}
2300 
2301 			if (crb_entry->crb_ctrl.state_index_v) {
2302 				index = crb_entry->crb_ctrl.state_index_v;
2303 				read_value =
2304 				    tmplt_hdr->saved_state_array[index];
2305 			} else {
2306 				read_value = crb_entry->value_1;
2307 			}
2308 
2309 			qla8044_wr_reg_indirect(vha, addr, read_value);
2310 			opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
2311 		}
2312 
2313 		if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
2314 			index = crb_entry->crb_ctrl.state_index_v;
2315 			read_value = tmplt_hdr->saved_state_array[index];
2316 			read_value <<= crb_entry->crb_ctrl.shl;
2317 			read_value >>= crb_entry->crb_ctrl.shr;
2318 			if (crb_entry->value_2)
2319 				read_value &= crb_entry->value_2;
2320 			read_value |= crb_entry->value_3;
2321 			read_value += crb_entry->value_1;
2322 			tmplt_hdr->saved_state_array[index] = read_value;
2323 			opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
2324 		}
2325 		crb_addr += crb_entry->crb_strd.addr_stride;
2326 	}
2327 	return rval;
2328 }
2329 
2330 static void
2331 qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha,
2332 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2333 {
2334 	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
2335 	struct qla8044_minidump_entry_crb *crb_hdr;
2336 	uint32_t *data_ptr = *d_ptr;
2337 
2338 	ql_dbg(ql_dbg_p3p, vha, 0xb0de, "Entering fn: %s\n", __func__);
2339 	crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr;
2340 	r_addr = crb_hdr->addr;
2341 	r_stride = crb_hdr->crb_strd.addr_stride;
2342 	loop_cnt = crb_hdr->op_count;
2343 
2344 	for (i = 0; i < loop_cnt; i++) {
2345 		qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2346 		*data_ptr++ = r_addr;
2347 		*data_ptr++ = r_value;
2348 		r_addr += r_stride;
2349 	}
2350 	*d_ptr = data_ptr;
2351 }
2352 
2353 static int
2354 qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,
2355 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2356 {
2357 	uint32_t r_addr, r_value, r_data;
2358 	uint32_t i, j, loop_cnt;
2359 	struct qla8044_minidump_entry_rdmem *m_hdr;
2360 	unsigned long flags;
2361 	uint32_t *data_ptr = *d_ptr;
2362 	struct qla_hw_data *ha = vha->hw;
2363 
2364 	ql_dbg(ql_dbg_p3p, vha, 0xb0df, "Entering fn: %s\n", __func__);
2365 	m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr;
2366 	r_addr = m_hdr->read_addr;
2367 	loop_cnt = m_hdr->read_data_size/16;
2368 
2369 	ql_dbg(ql_dbg_p3p, vha, 0xb0f0,
2370 	    "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
2371 	    __func__, r_addr, m_hdr->read_data_size);
2372 
2373 	if (r_addr & 0xf) {
2374 		ql_dbg(ql_dbg_p3p, vha, 0xb0f1,
2375 		    "[%s]: Read addr 0x%x not 16 bytes aligned\n",
2376 		    __func__, r_addr);
2377 		return QLA_FUNCTION_FAILED;
2378 	}
2379 
2380 	if (m_hdr->read_data_size % 16) {
2381 		ql_dbg(ql_dbg_p3p, vha, 0xb0f2,
2382 		    "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
2383 		    __func__, m_hdr->read_data_size);
2384 		return QLA_FUNCTION_FAILED;
2385 	}
2386 
2387 	ql_dbg(ql_dbg_p3p, vha, 0xb0f3,
2388 	    "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
2389 	    __func__, r_addr, m_hdr->read_data_size, loop_cnt);
2390 
2391 	write_lock_irqsave(&ha->hw_lock, flags);
2392 	for (i = 0; i < loop_cnt; i++) {
2393 		qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_LO, r_addr);
2394 		r_value = 0;
2395 		qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, r_value);
2396 		r_value = MIU_TA_CTL_ENABLE;
2397 		qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
2398 		r_value = MIU_TA_CTL_START_ENABLE;
2399 		qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
2400 
2401 		for (j = 0; j < MAX_CTL_CHECK; j++) {
2402 			qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
2403 			    &r_value);
2404 			if ((r_value & MIU_TA_CTL_BUSY) == 0)
2405 				break;
2406 		}
2407 
2408 		if (j >= MAX_CTL_CHECK) {
2409 			write_unlock_irqrestore(&ha->hw_lock, flags);
2410 			return QLA_SUCCESS;
2411 		}
2412 
2413 		for (j = 0; j < 4; j++) {
2414 			qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_RDDATA[j],
2415 			    &r_data);
2416 			*data_ptr++ = r_data;
2417 		}
2418 
2419 		r_addr += 16;
2420 	}
2421 	write_unlock_irqrestore(&ha->hw_lock, flags);
2422 
2423 	ql_dbg(ql_dbg_p3p, vha, 0xb0f4,
2424 	    "Leaving fn: %s datacount: 0x%x\n",
2425 	     __func__, (loop_cnt * 16));
2426 
2427 	*d_ptr = data_ptr;
2428 	return QLA_SUCCESS;
2429 }
2430 
2431 /* ISP83xx flash read for _RDROM _BOARD */
2432 static uint32_t
2433 qla8044_minidump_process_rdrom(struct scsi_qla_host *vha,
2434 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2435 {
2436 	uint32_t fl_addr, u32_count, rval;
2437 	struct qla8044_minidump_entry_rdrom *rom_hdr;
2438 	uint32_t *data_ptr = *d_ptr;
2439 
2440 	rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr;
2441 	fl_addr = rom_hdr->read_addr;
2442 	u32_count = (rom_hdr->read_data_size)/sizeof(uint32_t);
2443 
2444 	ql_dbg(ql_dbg_p3p, vha, 0xb0f5, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
2445 	    __func__, fl_addr, u32_count);
2446 
2447 	rval = qla8044_lockless_flash_read_u32(vha, fl_addr,
2448 	    (u8 *)(data_ptr), u32_count);
2449 
2450 	if (rval != QLA_SUCCESS) {
2451 		ql_log(ql_log_fatal, vha, 0xb0f6,
2452 		    "%s: Flash Read Error,Count=%d\n", __func__, u32_count);
2453 		return QLA_FUNCTION_FAILED;
2454 	} else {
2455 		data_ptr += u32_count;
2456 		*d_ptr = data_ptr;
2457 		return QLA_SUCCESS;
2458 	}
2459 }
2460 
2461 static void
2462 qla8044_mark_entry_skipped(struct scsi_qla_host *vha,
2463 	struct qla8044_minidump_entry_hdr *entry_hdr, int index)
2464 {
2465 	entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
2466 
2467 	ql_log(ql_log_info, vha, 0xb0f7,
2468 	    "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
2469 	    vha->host_no, index, entry_hdr->entry_type,
2470 	    entry_hdr->d_ctrl.entry_capture_mask);
2471 }
2472 
2473 static int
2474 qla8044_minidump_process_l2tag(struct scsi_qla_host *vha,
2475 	struct qla8044_minidump_entry_hdr *entry_hdr,
2476 				 uint32_t **d_ptr)
2477 {
2478 	uint32_t addr, r_addr, c_addr, t_r_addr;
2479 	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
2480 	unsigned long p_wait, w_time, p_mask;
2481 	uint32_t c_value_w, c_value_r;
2482 	struct qla8044_minidump_entry_cache *cache_hdr;
2483 	int rval = QLA_FUNCTION_FAILED;
2484 	uint32_t *data_ptr = *d_ptr;
2485 
2486 	ql_dbg(ql_dbg_p3p, vha, 0xb0f8, "Entering fn: %s\n", __func__);
2487 	cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
2488 
2489 	loop_count = cache_hdr->op_count;
2490 	r_addr = cache_hdr->read_addr;
2491 	c_addr = cache_hdr->control_addr;
2492 	c_value_w = cache_hdr->cache_ctrl.write_value;
2493 
2494 	t_r_addr = cache_hdr->tag_reg_addr;
2495 	t_value = cache_hdr->addr_ctrl.init_tag_value;
2496 	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
2497 	p_wait = cache_hdr->cache_ctrl.poll_wait;
2498 	p_mask = cache_hdr->cache_ctrl.poll_mask;
2499 
2500 	for (i = 0; i < loop_count; i++) {
2501 		qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
2502 		if (c_value_w)
2503 			qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
2504 
2505 		if (p_mask) {
2506 			w_time = jiffies + p_wait;
2507 			do {
2508 				qla8044_rd_reg_indirect(vha, c_addr,
2509 				    &c_value_r);
2510 				if ((c_value_r & p_mask) == 0) {
2511 					break;
2512 				} else if (time_after_eq(jiffies, w_time)) {
2513 					/* capturing dump failed */
2514 					return rval;
2515 				}
2516 			} while (1);
2517 		}
2518 
2519 		addr = r_addr;
2520 		for (k = 0; k < r_cnt; k++) {
2521 			qla8044_rd_reg_indirect(vha, addr, &r_value);
2522 			*data_ptr++ = r_value;
2523 			addr += cache_hdr->read_ctrl.read_addr_stride;
2524 		}
2525 		t_value += cache_hdr->addr_ctrl.tag_value_stride;
2526 	}
2527 	*d_ptr = data_ptr;
2528 	return QLA_SUCCESS;
2529 }
2530 
2531 static void
2532 qla8044_minidump_process_l1cache(struct scsi_qla_host *vha,
2533 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2534 {
2535 	uint32_t addr, r_addr, c_addr, t_r_addr;
2536 	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
2537 	uint32_t c_value_w;
2538 	struct qla8044_minidump_entry_cache *cache_hdr;
2539 	uint32_t *data_ptr = *d_ptr;
2540 
2541 	cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
2542 	loop_count = cache_hdr->op_count;
2543 	r_addr = cache_hdr->read_addr;
2544 	c_addr = cache_hdr->control_addr;
2545 	c_value_w = cache_hdr->cache_ctrl.write_value;
2546 
2547 	t_r_addr = cache_hdr->tag_reg_addr;
2548 	t_value = cache_hdr->addr_ctrl.init_tag_value;
2549 	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
2550 
2551 	for (i = 0; i < loop_count; i++) {
2552 		qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
2553 		qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
2554 		addr = r_addr;
2555 		for (k = 0; k < r_cnt; k++) {
2556 			qla8044_rd_reg_indirect(vha, addr, &r_value);
2557 			*data_ptr++ = r_value;
2558 			addr += cache_hdr->read_ctrl.read_addr_stride;
2559 		}
2560 		t_value += cache_hdr->addr_ctrl.tag_value_stride;
2561 	}
2562 	*d_ptr = data_ptr;
2563 }
2564 
2565 static void
2566 qla8044_minidump_process_rdocm(struct scsi_qla_host *vha,
2567 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2568 {
2569 	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
2570 	struct qla8044_minidump_entry_rdocm *ocm_hdr;
2571 	uint32_t *data_ptr = *d_ptr;
2572 	struct qla_hw_data *ha = vha->hw;
2573 
2574 	ql_dbg(ql_dbg_p3p, vha, 0xb0f9, "Entering fn: %s\n", __func__);
2575 
2576 	ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr;
2577 	r_addr = ocm_hdr->read_addr;
2578 	r_stride = ocm_hdr->read_addr_stride;
2579 	loop_cnt = ocm_hdr->op_count;
2580 
2581 	ql_dbg(ql_dbg_p3p, vha, 0xb0fa,
2582 	    "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
2583 	    __func__, r_addr, r_stride, loop_cnt);
2584 
2585 	for (i = 0; i < loop_cnt; i++) {
2586 		r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
2587 		*data_ptr++ = r_value;
2588 		r_addr += r_stride;
2589 	}
2590 	ql_dbg(ql_dbg_p3p, vha, 0xb0fb, "Leaving fn: %s datacount: 0x%lx\n",
2591 	    __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t)));
2592 
2593 	*d_ptr = data_ptr;
2594 }
2595 
2596 static void
2597 qla8044_minidump_process_rdmux(struct scsi_qla_host *vha,
2598 	struct qla8044_minidump_entry_hdr *entry_hdr,
2599 	uint32_t **d_ptr)
2600 {
2601 	uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
2602 	struct qla8044_minidump_entry_mux *mux_hdr;
2603 	uint32_t *data_ptr = *d_ptr;
2604 
2605 	ql_dbg(ql_dbg_p3p, vha, 0xb0fc, "Entering fn: %s\n", __func__);
2606 
2607 	mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr;
2608 	r_addr = mux_hdr->read_addr;
2609 	s_addr = mux_hdr->select_addr;
2610 	s_stride = mux_hdr->select_value_stride;
2611 	s_value = mux_hdr->select_value;
2612 	loop_cnt = mux_hdr->op_count;
2613 
2614 	for (i = 0; i < loop_cnt; i++) {
2615 		qla8044_wr_reg_indirect(vha, s_addr, s_value);
2616 		qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2617 		*data_ptr++ = s_value;
2618 		*data_ptr++ = r_value;
2619 		s_value += s_stride;
2620 	}
2621 	*d_ptr = data_ptr;
2622 }
2623 
2624 static void
2625 qla8044_minidump_process_queue(struct scsi_qla_host *vha,
2626 	struct qla8044_minidump_entry_hdr *entry_hdr,
2627 	uint32_t **d_ptr)
2628 {
2629 	uint32_t s_addr, r_addr;
2630 	uint32_t r_stride, r_value, r_cnt, qid = 0;
2631 	uint32_t i, k, loop_cnt;
2632 	struct qla8044_minidump_entry_queue *q_hdr;
2633 	uint32_t *data_ptr = *d_ptr;
2634 
2635 	ql_dbg(ql_dbg_p3p, vha, 0xb0fd, "Entering fn: %s\n", __func__);
2636 	q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr;
2637 	s_addr = q_hdr->select_addr;
2638 	r_cnt = q_hdr->rd_strd.read_addr_cnt;
2639 	r_stride = q_hdr->rd_strd.read_addr_stride;
2640 	loop_cnt = q_hdr->op_count;
2641 
2642 	for (i = 0; i < loop_cnt; i++) {
2643 		qla8044_wr_reg_indirect(vha, s_addr, qid);
2644 		r_addr = q_hdr->read_addr;
2645 		for (k = 0; k < r_cnt; k++) {
2646 			qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2647 			*data_ptr++ = r_value;
2648 			r_addr += r_stride;
2649 		}
2650 		qid += q_hdr->q_strd.queue_id_stride;
2651 	}
2652 	*d_ptr = data_ptr;
2653 }
2654 
2655 /* ISP83xx functions to process new minidump entries... */
2656 static uint32_t
2657 qla8044_minidump_process_pollrd(struct scsi_qla_host *vha,
2658 	struct qla8044_minidump_entry_hdr *entry_hdr,
2659 	uint32_t **d_ptr)
2660 {
2661 	uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
2662 	uint16_t s_stride, i;
2663 	struct qla8044_minidump_entry_pollrd *pollrd_hdr;
2664 	uint32_t *data_ptr = *d_ptr;
2665 
2666 	pollrd_hdr = (struct qla8044_minidump_entry_pollrd *) entry_hdr;
2667 	s_addr = pollrd_hdr->select_addr;
2668 	r_addr = pollrd_hdr->read_addr;
2669 	s_value = pollrd_hdr->select_value;
2670 	s_stride = pollrd_hdr->select_value_stride;
2671 
2672 	poll_wait = pollrd_hdr->poll_wait;
2673 	poll_mask = pollrd_hdr->poll_mask;
2674 
2675 	for (i = 0; i < pollrd_hdr->op_count; i++) {
2676 		qla8044_wr_reg_indirect(vha, s_addr, s_value);
2677 		poll_wait = pollrd_hdr->poll_wait;
2678 		while (1) {
2679 			qla8044_rd_reg_indirect(vha, s_addr, &r_value);
2680 			if ((r_value & poll_mask) != 0) {
2681 				break;
2682 			} else {
2683 				usleep_range(1000, 1100);
2684 				if (--poll_wait == 0) {
2685 					ql_log(ql_log_fatal, vha, 0xb0fe,
2686 					    "%s: TIMEOUT\n", __func__);
2687 					goto error;
2688 				}
2689 			}
2690 		}
2691 		qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2692 		*data_ptr++ = s_value;
2693 		*data_ptr++ = r_value;
2694 
2695 		s_value += s_stride;
2696 	}
2697 	*d_ptr = data_ptr;
2698 	return QLA_SUCCESS;
2699 
2700 error:
2701 	return QLA_FUNCTION_FAILED;
2702 }
2703 
2704 static void
2705 qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha,
2706 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2707 {
2708 	uint32_t sel_val1, sel_val2, t_sel_val, data, i;
2709 	uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
2710 	struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr;
2711 	uint32_t *data_ptr = *d_ptr;
2712 
2713 	rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *) entry_hdr;
2714 	sel_val1 = rdmux2_hdr->select_value_1;
2715 	sel_val2 = rdmux2_hdr->select_value_2;
2716 	sel_addr1 = rdmux2_hdr->select_addr_1;
2717 	sel_addr2 = rdmux2_hdr->select_addr_2;
2718 	sel_val_mask = rdmux2_hdr->select_value_mask;
2719 	read_addr = rdmux2_hdr->read_addr;
2720 
2721 	for (i = 0; i < rdmux2_hdr->op_count; i++) {
2722 		qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1);
2723 		t_sel_val = sel_val1 & sel_val_mask;
2724 		*data_ptr++ = t_sel_val;
2725 
2726 		qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
2727 		qla8044_rd_reg_indirect(vha, read_addr, &data);
2728 
2729 		*data_ptr++ = data;
2730 
2731 		qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2);
2732 		t_sel_val = sel_val2 & sel_val_mask;
2733 		*data_ptr++ = t_sel_val;
2734 
2735 		qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
2736 		qla8044_rd_reg_indirect(vha, read_addr, &data);
2737 
2738 		*data_ptr++ = data;
2739 
2740 		sel_val1 += rdmux2_hdr->select_value_stride;
2741 		sel_val2 += rdmux2_hdr->select_value_stride;
2742 	}
2743 
2744 	*d_ptr = data_ptr;
2745 }
2746 
2747 static uint32_t
2748 qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha,
2749 	struct qla8044_minidump_entry_hdr *entry_hdr,
2750 	uint32_t **d_ptr)
2751 {
2752 	uint32_t poll_wait, poll_mask, r_value, data;
2753 	uint32_t addr_1, addr_2, value_1, value_2;
2754 	struct qla8044_minidump_entry_pollrdmwr *poll_hdr;
2755 	uint32_t *data_ptr = *d_ptr;
2756 
2757 	poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *) entry_hdr;
2758 	addr_1 = poll_hdr->addr_1;
2759 	addr_2 = poll_hdr->addr_2;
2760 	value_1 = poll_hdr->value_1;
2761 	value_2 = poll_hdr->value_2;
2762 	poll_mask = poll_hdr->poll_mask;
2763 
2764 	qla8044_wr_reg_indirect(vha, addr_1, value_1);
2765 
2766 	poll_wait = poll_hdr->poll_wait;
2767 	while (1) {
2768 		qla8044_rd_reg_indirect(vha, addr_1, &r_value);
2769 
2770 		if ((r_value & poll_mask) != 0) {
2771 			break;
2772 		} else {
2773 			usleep_range(1000, 1100);
2774 			if (--poll_wait == 0) {
2775 				ql_log(ql_log_fatal, vha, 0xb0ff,
2776 				    "%s: TIMEOUT\n", __func__);
2777 				goto error;
2778 			}
2779 		}
2780 	}
2781 
2782 	qla8044_rd_reg_indirect(vha, addr_2, &data);
2783 	data &= poll_hdr->modify_mask;
2784 	qla8044_wr_reg_indirect(vha, addr_2, data);
2785 	qla8044_wr_reg_indirect(vha, addr_1, value_2);
2786 
2787 	poll_wait = poll_hdr->poll_wait;
2788 	while (1) {
2789 		qla8044_rd_reg_indirect(vha, addr_1, &r_value);
2790 
2791 		if ((r_value & poll_mask) != 0) {
2792 			break;
2793 		} else {
2794 			usleep_range(1000, 1100);
2795 			if (--poll_wait == 0) {
2796 				ql_log(ql_log_fatal, vha, 0xb100,
2797 				    "%s: TIMEOUT2\n", __func__);
2798 				goto error;
2799 			}
2800 		}
2801 	}
2802 
2803 	*data_ptr++ = addr_2;
2804 	*data_ptr++ = data;
2805 
2806 	*d_ptr = data_ptr;
2807 
2808 	return QLA_SUCCESS;
2809 
2810 error:
2811 	return QLA_FUNCTION_FAILED;
2812 }
2813 
2814 #define ISP8044_PEX_DMA_ENGINE_INDEX		8
2815 #define ISP8044_PEX_DMA_BASE_ADDRESS		0x77320000
2816 #define ISP8044_PEX_DMA_NUM_OFFSET		0x10000
2817 #define ISP8044_PEX_DMA_CMD_ADDR_LOW		0x0
2818 #define ISP8044_PEX_DMA_CMD_ADDR_HIGH		0x04
2819 #define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL	0x08
2820 
2821 #define ISP8044_PEX_DMA_READ_SIZE	(16 * 1024)
2822 #define ISP8044_PEX_DMA_MAX_WAIT	(100 * 100) /* Max wait of 100 msecs */
2823 
2824 static int
2825 qla8044_check_dma_engine_state(struct scsi_qla_host *vha)
2826 {
2827 	struct qla_hw_data *ha = vha->hw;
2828 	int rval = QLA_SUCCESS;
2829 	uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
2830 	uint64_t dma_base_addr = 0;
2831 	struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
2832 
2833 	tmplt_hdr = ha->md_tmplt_hdr;
2834 	dma_eng_num =
2835 	    tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
2836 	dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
2837 		(dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
2838 
2839 	/* Read the pex-dma's command-status-and-control register. */
2840 	rval = qla8044_rd_reg_indirect(vha,
2841 	    (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
2842 	    &cmd_sts_and_cntrl);
2843 	if (rval)
2844 		return QLA_FUNCTION_FAILED;
2845 
2846 	/* Check if requested pex-dma engine is available. */
2847 	if (cmd_sts_and_cntrl & BIT_31)
2848 		return QLA_SUCCESS;
2849 
2850 	return QLA_FUNCTION_FAILED;
2851 }
2852 
2853 static int
2854 qla8044_start_pex_dma(struct scsi_qla_host *vha,
2855 	struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr)
2856 {
2857 	struct qla_hw_data *ha = vha->hw;
2858 	int rval = QLA_SUCCESS, wait = 0;
2859 	uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
2860 	uint64_t dma_base_addr = 0;
2861 	struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
2862 
2863 	tmplt_hdr = ha->md_tmplt_hdr;
2864 	dma_eng_num =
2865 	    tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
2866 	dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
2867 		(dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
2868 
2869 	rval = qla8044_wr_reg_indirect(vha,
2870 	    dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_LOW,
2871 	    m_hdr->desc_card_addr);
2872 	if (rval)
2873 		goto error_exit;
2874 
2875 	rval = qla8044_wr_reg_indirect(vha,
2876 	    dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_HIGH, 0);
2877 	if (rval)
2878 		goto error_exit;
2879 
2880 	rval = qla8044_wr_reg_indirect(vha,
2881 	    dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL,
2882 	    m_hdr->start_dma_cmd);
2883 	if (rval)
2884 		goto error_exit;
2885 
2886 	/* Wait for dma operation to complete. */
2887 	for (wait = 0; wait < ISP8044_PEX_DMA_MAX_WAIT; wait++) {
2888 		rval = qla8044_rd_reg_indirect(vha,
2889 		    (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
2890 		    &cmd_sts_and_cntrl);
2891 		if (rval)
2892 			goto error_exit;
2893 
2894 		if ((cmd_sts_and_cntrl & BIT_1) == 0)
2895 			break;
2896 
2897 		udelay(10);
2898 	}
2899 
2900 	/* Wait a max of 100 ms, otherwise fallback to rdmem entry read */
2901 	if (wait >= ISP8044_PEX_DMA_MAX_WAIT) {
2902 		rval = QLA_FUNCTION_FAILED;
2903 		goto error_exit;
2904 	}
2905 
2906 error_exit:
2907 	return rval;
2908 }
2909 
2910 static int
2911 qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
2912 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2913 {
2914 	struct qla_hw_data *ha = vha->hw;
2915 	int rval = QLA_SUCCESS;
2916 	struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
2917 	uint32_t chunk_size, read_size;
2918 	uint8_t *data_ptr = (uint8_t *)*d_ptr;
2919 	void *rdmem_buffer = NULL;
2920 	dma_addr_t rdmem_dma;
2921 	struct qla8044_pex_dma_descriptor dma_desc;
2922 
2923 	rval = qla8044_check_dma_engine_state(vha);
2924 	if (rval != QLA_SUCCESS) {
2925 		ql_dbg(ql_dbg_p3p, vha, 0xb147,
2926 		    "DMA engine not available. Fallback to rdmem-read.\n");
2927 		return QLA_FUNCTION_FAILED;
2928 	}
2929 
2930 	m_hdr = (void *)entry_hdr;
2931 
2932 	rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
2933 	    ISP8044_PEX_DMA_READ_SIZE, &rdmem_dma, GFP_KERNEL);
2934 	if (!rdmem_buffer) {
2935 		ql_dbg(ql_dbg_p3p, vha, 0xb148,
2936 		    "Unable to allocate rdmem dma buffer\n");
2937 		return QLA_FUNCTION_FAILED;
2938 	}
2939 
2940 	/* Prepare pex-dma descriptor to be written to MS memory. */
2941 	/* dma-desc-cmd layout:
2942 	 *		0-3: dma-desc-cmd 0-3
2943 	 *		4-7: pcid function number
2944 	 *		8-15: dma-desc-cmd 8-15
2945 	 * dma_bus_addr: dma buffer address
2946 	 * cmd.read_data_size: amount of data-chunk to be read.
2947 	 */
2948 	dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
2949 	dma_desc.cmd.dma_desc_cmd |=
2950 	    ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
2951 
2952 	dma_desc.dma_bus_addr = rdmem_dma;
2953 	dma_desc.cmd.read_data_size = chunk_size = ISP8044_PEX_DMA_READ_SIZE;
2954 	read_size = 0;
2955 
2956 	/*
2957 	 * Perform rdmem operation using pex-dma.
2958 	 * Prepare dma in chunks of ISP8044_PEX_DMA_READ_SIZE.
2959 	 */
2960 	while (read_size < m_hdr->read_data_size) {
2961 		if (m_hdr->read_data_size - read_size <
2962 		    ISP8044_PEX_DMA_READ_SIZE) {
2963 			chunk_size = (m_hdr->read_data_size - read_size);
2964 			dma_desc.cmd.read_data_size = chunk_size;
2965 		}
2966 
2967 		dma_desc.src_addr = m_hdr->read_addr + read_size;
2968 
2969 		/* Prepare: Write pex-dma descriptor to MS memory. */
2970 		rval = qla8044_ms_mem_write_128b(vha,
2971 		    m_hdr->desc_card_addr, (void *)&dma_desc,
2972 		    (sizeof(struct qla8044_pex_dma_descriptor)/16));
2973 		if (rval) {
2974 			ql_log(ql_log_warn, vha, 0xb14a,
2975 			    "%s: Error writing rdmem-dma-init to MS !!!\n",
2976 			    __func__);
2977 			goto error_exit;
2978 		}
2979 		ql_dbg(ql_dbg_p3p, vha, 0xb14b,
2980 		    "%s: Dma-descriptor: Instruct for rdmem dma "
2981 		    "(chunk_size 0x%x).\n", __func__, chunk_size);
2982 
2983 		/* Execute: Start pex-dma operation. */
2984 		rval = qla8044_start_pex_dma(vha, m_hdr);
2985 		if (rval)
2986 			goto error_exit;
2987 
2988 		memcpy(data_ptr, rdmem_buffer, chunk_size);
2989 		data_ptr += chunk_size;
2990 		read_size += chunk_size;
2991 	}
2992 
2993 	*d_ptr = (void *)data_ptr;
2994 
2995 error_exit:
2996 	if (rdmem_buffer)
2997 		dma_free_coherent(&ha->pdev->dev, ISP8044_PEX_DMA_READ_SIZE,
2998 		    rdmem_buffer, rdmem_dma);
2999 
3000 	return rval;
3001 }
3002 
3003 static uint32_t
3004 qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
3005 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
3006 {
3007 	int loop_cnt;
3008 	uint32_t addr1, addr2, value, data, temp, wrVal;
3009 	uint8_t stride, stride2;
3010 	uint16_t count;
3011 	uint32_t poll, mask, modify_mask;
3012 	uint32_t wait_count = 0;
3013 
3014 	uint32_t *data_ptr = *d_ptr;
3015 
3016 	struct qla8044_minidump_entry_rddfe *rddfe;
3017 	rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr;
3018 
3019 	addr1 = rddfe->addr_1;
3020 	value = rddfe->value;
3021 	stride = rddfe->stride;
3022 	stride2 = rddfe->stride2;
3023 	count = rddfe->count;
3024 
3025 	poll = rddfe->poll;
3026 	mask = rddfe->mask;
3027 	modify_mask = rddfe->modify_mask;
3028 
3029 	addr2 = addr1 + stride;
3030 
3031 	for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
3032 		qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value));
3033 
3034 		wait_count = 0;
3035 		while (wait_count < poll) {
3036 			qla8044_rd_reg_indirect(vha, addr1, &temp);
3037 			if ((temp & mask) != 0)
3038 				break;
3039 			wait_count++;
3040 		}
3041 
3042 		if (wait_count == poll) {
3043 			ql_log(ql_log_warn, vha, 0xb153,
3044 			    "%s: TIMEOUT\n", __func__);
3045 			goto error;
3046 		} else {
3047 			qla8044_rd_reg_indirect(vha, addr2, &temp);
3048 			temp = temp & modify_mask;
3049 			temp = (temp | ((loop_cnt << 16) | loop_cnt));
3050 			wrVal = ((temp << 16) | temp);
3051 
3052 			qla8044_wr_reg_indirect(vha, addr2, wrVal);
3053 			qla8044_wr_reg_indirect(vha, addr1, value);
3054 
3055 			wait_count = 0;
3056 			while (wait_count < poll) {
3057 				qla8044_rd_reg_indirect(vha, addr1, &temp);
3058 				if ((temp & mask) != 0)
3059 					break;
3060 				wait_count++;
3061 			}
3062 			if (wait_count == poll) {
3063 				ql_log(ql_log_warn, vha, 0xb154,
3064 				    "%s: TIMEOUT\n", __func__);
3065 				goto error;
3066 			}
3067 
3068 			qla8044_wr_reg_indirect(vha, addr1,
3069 			    ((0x40000000 | value) + stride2));
3070 			wait_count = 0;
3071 			while (wait_count < poll) {
3072 				qla8044_rd_reg_indirect(vha, addr1, &temp);
3073 				if ((temp & mask) != 0)
3074 					break;
3075 				wait_count++;
3076 			}
3077 
3078 			if (wait_count == poll) {
3079 				ql_log(ql_log_warn, vha, 0xb155,
3080 				    "%s: TIMEOUT\n", __func__);
3081 				goto error;
3082 			}
3083 
3084 			qla8044_rd_reg_indirect(vha, addr2, &data);
3085 
3086 			*data_ptr++ = wrVal;
3087 			*data_ptr++ = data;
3088 		}
3089 
3090 	}
3091 
3092 	*d_ptr = data_ptr;
3093 	return QLA_SUCCESS;
3094 
3095 error:
3096 	return -1;
3097 
3098 }
3099 
3100 static uint32_t
3101 qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
3102 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
3103 {
3104 	int ret = 0;
3105 	uint32_t addr1, addr2, value1, value2, data, selVal;
3106 	uint8_t stride1, stride2;
3107 	uint32_t addr3, addr4, addr5, addr6, addr7;
3108 	uint16_t count, loop_cnt;
3109 	uint32_t mask;
3110 	uint32_t *data_ptr = *d_ptr;
3111 
3112 	struct qla8044_minidump_entry_rdmdio *rdmdio;
3113 
3114 	rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr;
3115 
3116 	addr1 = rdmdio->addr_1;
3117 	addr2 = rdmdio->addr_2;
3118 	value1 = rdmdio->value_1;
3119 	stride1 = rdmdio->stride_1;
3120 	stride2 = rdmdio->stride_2;
3121 	count = rdmdio->count;
3122 
3123 	mask = rdmdio->mask;
3124 	value2 = rdmdio->value_2;
3125 
3126 	addr3 = addr1 + stride1;
3127 
3128 	for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
3129 		ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
3130 		    addr3, mask);
3131 		if (ret == -1)
3132 			goto error;
3133 
3134 		addr4 = addr2 - stride1;
3135 		ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4,
3136 		    value2);
3137 		if (ret == -1)
3138 			goto error;
3139 
3140 		addr5 = addr2 - (2 * stride1);
3141 		ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5,
3142 		    value1);
3143 		if (ret == -1)
3144 			goto error;
3145 
3146 		addr6 = addr2 - (3 * stride1);
3147 		ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask,
3148 		    addr6, 0x2);
3149 		if (ret == -1)
3150 			goto error;
3151 
3152 		ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
3153 		    addr3, mask);
3154 		if (ret == -1)
3155 			goto error;
3156 
3157 		addr7 = addr2 - (4 * stride1);
3158 		data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr7);
3159 		if (data == -1)
3160 			goto error;
3161 
3162 		selVal = (value2 << 18) | (value1 << 2) | 2;
3163 
3164 		stride2 = rdmdio->stride_2;
3165 		*data_ptr++ = selVal;
3166 		*data_ptr++ = data;
3167 
3168 		value1 = value1 + stride2;
3169 		*d_ptr = data_ptr;
3170 	}
3171 
3172 	return 0;
3173 
3174 error:
3175 	return -1;
3176 }
3177 
3178 static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
3179 		struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
3180 {
3181 	uint32_t addr1, addr2, value1, value2, poll, r_value;
3182 	uint32_t wait_count = 0;
3183 	struct qla8044_minidump_entry_pollwr *pollwr_hdr;
3184 
3185 	pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
3186 	addr1 = pollwr_hdr->addr_1;
3187 	addr2 = pollwr_hdr->addr_2;
3188 	value1 = pollwr_hdr->value_1;
3189 	value2 = pollwr_hdr->value_2;
3190 
3191 	poll = pollwr_hdr->poll;
3192 
3193 	while (wait_count < poll) {
3194 		qla8044_rd_reg_indirect(vha, addr1, &r_value);
3195 
3196 		if ((r_value & poll) != 0)
3197 			break;
3198 		wait_count++;
3199 	}
3200 
3201 	if (wait_count == poll) {
3202 		ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__);
3203 		goto error;
3204 	}
3205 
3206 	qla8044_wr_reg_indirect(vha, addr2, value2);
3207 	qla8044_wr_reg_indirect(vha, addr1, value1);
3208 
3209 	wait_count = 0;
3210 	while (wait_count < poll) {
3211 		qla8044_rd_reg_indirect(vha, addr1, &r_value);
3212 
3213 		if ((r_value & poll) != 0)
3214 			break;
3215 		wait_count++;
3216 	}
3217 
3218 	return QLA_SUCCESS;
3219 
3220 error:
3221 	return -1;
3222 }
3223 
3224 /*
3225  *
3226  * qla8044_collect_md_data - Retrieve firmware minidump data.
3227  * @ha: pointer to adapter structure
3228  **/
3229 int
3230 qla8044_collect_md_data(struct scsi_qla_host *vha)
3231 {
3232 	int num_entry_hdr = 0;
3233 	struct qla8044_minidump_entry_hdr *entry_hdr;
3234 	struct qla8044_minidump_template_hdr *tmplt_hdr;
3235 	uint32_t *data_ptr;
3236 	uint32_t data_collected = 0, f_capture_mask;
3237 	int i, rval = QLA_FUNCTION_FAILED;
3238 	uint64_t now;
3239 	uint32_t timestamp, idc_control;
3240 	struct qla_hw_data *ha = vha->hw;
3241 
3242 	if (!ha->md_dump) {
3243 		ql_log(ql_log_info, vha, 0xb101,
3244 		    "%s(%ld) No buffer to dump\n",
3245 		    __func__, vha->host_no);
3246 		return rval;
3247 	}
3248 
3249 	if (ha->fw_dumped) {
3250 		ql_log(ql_log_warn, vha, 0xb10d,
3251 		    "Firmware has been previously dumped (%p) "
3252 		    "-- ignoring request.\n", ha->fw_dump);
3253 		goto md_failed;
3254 	}
3255 
3256 	ha->fw_dumped = 0;
3257 
3258 	if (!ha->md_tmplt_hdr || !ha->md_dump) {
3259 		ql_log(ql_log_warn, vha, 0xb10e,
3260 		    "Memory not allocated for minidump capture\n");
3261 		goto md_failed;
3262 	}
3263 
3264 	qla8044_idc_lock(ha);
3265 	idc_control = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
3266 	if (idc_control & GRACEFUL_RESET_BIT1) {
3267 		ql_log(ql_log_warn, vha, 0xb112,
3268 		    "Forced reset from application, "
3269 		    "ignore minidump capture\n");
3270 		qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
3271 		    (idc_control & ~GRACEFUL_RESET_BIT1));
3272 		qla8044_idc_unlock(ha);
3273 
3274 		goto md_failed;
3275 	}
3276 	qla8044_idc_unlock(ha);
3277 
3278 	if (qla82xx_validate_template_chksum(vha)) {
3279 		ql_log(ql_log_info, vha, 0xb109,
3280 		    "Template checksum validation error\n");
3281 		goto md_failed;
3282 	}
3283 
3284 	tmplt_hdr = (struct qla8044_minidump_template_hdr *)
3285 		ha->md_tmplt_hdr;
3286 	data_ptr = (uint32_t *)((uint8_t *)ha->md_dump);
3287 	num_entry_hdr = tmplt_hdr->num_of_entries;
3288 
3289 	ql_dbg(ql_dbg_p3p, vha, 0xb11a,
3290 	    "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
3291 
3292 	f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
3293 
3294 	/* Validate whether required debug level is set */
3295 	if ((f_capture_mask & 0x3) != 0x3) {
3296 		ql_log(ql_log_warn, vha, 0xb10f,
3297 		    "Minimum required capture mask[0x%x] level not set\n",
3298 		    f_capture_mask);
3299 
3300 	}
3301 	tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
3302 	ql_log(ql_log_info, vha, 0xb102,
3303 	    "[%s]: starting data ptr: %p\n",
3304 	   __func__, data_ptr);
3305 	ql_log(ql_log_info, vha, 0xb10b,
3306 	   "[%s]: no of entry headers in Template: 0x%x\n",
3307 	   __func__, num_entry_hdr);
3308 	ql_log(ql_log_info, vha, 0xb10c,
3309 	    "[%s]: Total_data_size 0x%x, %d obtained\n",
3310 	   __func__, ha->md_dump_size, ha->md_dump_size);
3311 
3312 	/* Update current timestamp before taking dump */
3313 	now = get_jiffies_64();
3314 	timestamp = (u32)(jiffies_to_msecs(now) / 1000);
3315 	tmplt_hdr->driver_timestamp = timestamp;
3316 
3317 	entry_hdr = (struct qla8044_minidump_entry_hdr *)
3318 		(((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
3319 	tmplt_hdr->saved_state_array[QLA8044_SS_OCM_WNDREG_INDEX] =
3320 	    tmplt_hdr->ocm_window_reg[ha->portnum];
3321 
3322 	/* Walk through the entry headers - validate/perform required action */
3323 	for (i = 0; i < num_entry_hdr; i++) {
3324 		if (data_collected > ha->md_dump_size) {
3325 			ql_log(ql_log_info, vha, 0xb103,
3326 			    "Data collected: [0x%x], "
3327 			    "Total Dump size: [0x%x]\n",
3328 			    data_collected, ha->md_dump_size);
3329 			return rval;
3330 		}
3331 
3332 		if (!(entry_hdr->d_ctrl.entry_capture_mask &
3333 		      ql2xmdcapmask)) {
3334 			entry_hdr->d_ctrl.driver_flags |=
3335 			    QLA82XX_DBG_SKIPPED_FLAG;
3336 			goto skip_nxt_entry;
3337 		}
3338 
3339 		ql_dbg(ql_dbg_p3p, vha, 0xb104,
3340 		    "Data collected: [0x%x], Dump size left:[0x%x]\n",
3341 		    data_collected,
3342 		    (ha->md_dump_size - data_collected));
3343 
3344 		/* Decode the entry type and take required action to capture
3345 		 * debug data
3346 		 */
3347 		switch (entry_hdr->entry_type) {
3348 		case QLA82XX_RDEND:
3349 			qla8044_mark_entry_skipped(vha, entry_hdr, i);
3350 			break;
3351 		case QLA82XX_CNTRL:
3352 			rval = qla8044_minidump_process_control(vha,
3353 			    entry_hdr);
3354 			if (rval != QLA_SUCCESS) {
3355 				qla8044_mark_entry_skipped(vha, entry_hdr, i);
3356 				goto md_failed;
3357 			}
3358 			break;
3359 		case QLA82XX_RDCRB:
3360 			qla8044_minidump_process_rdcrb(vha,
3361 			    entry_hdr, &data_ptr);
3362 			break;
3363 		case QLA82XX_RDMEM:
3364 			rval = qla8044_minidump_pex_dma_read(vha,
3365 			    entry_hdr, &data_ptr);
3366 			if (rval != QLA_SUCCESS) {
3367 				rval = qla8044_minidump_process_rdmem(vha,
3368 				    entry_hdr, &data_ptr);
3369 				if (rval != QLA_SUCCESS) {
3370 					qla8044_mark_entry_skipped(vha,
3371 					    entry_hdr, i);
3372 					goto md_failed;
3373 				}
3374 			}
3375 			break;
3376 		case QLA82XX_BOARD:
3377 		case QLA82XX_RDROM:
3378 			rval = qla8044_minidump_process_rdrom(vha,
3379 			    entry_hdr, &data_ptr);
3380 			if (rval != QLA_SUCCESS) {
3381 				qla8044_mark_entry_skipped(vha,
3382 				    entry_hdr, i);
3383 			}
3384 			break;
3385 		case QLA82XX_L2DTG:
3386 		case QLA82XX_L2ITG:
3387 		case QLA82XX_L2DAT:
3388 		case QLA82XX_L2INS:
3389 			rval = qla8044_minidump_process_l2tag(vha,
3390 			    entry_hdr, &data_ptr);
3391 			if (rval != QLA_SUCCESS) {
3392 				qla8044_mark_entry_skipped(vha, entry_hdr, i);
3393 				goto md_failed;
3394 			}
3395 			break;
3396 		case QLA8044_L1DTG:
3397 		case QLA8044_L1ITG:
3398 		case QLA82XX_L1DAT:
3399 		case QLA82XX_L1INS:
3400 			qla8044_minidump_process_l1cache(vha,
3401 			    entry_hdr, &data_ptr);
3402 			break;
3403 		case QLA82XX_RDOCM:
3404 			qla8044_minidump_process_rdocm(vha,
3405 			    entry_hdr, &data_ptr);
3406 			break;
3407 		case QLA82XX_RDMUX:
3408 			qla8044_minidump_process_rdmux(vha,
3409 			    entry_hdr, &data_ptr);
3410 			break;
3411 		case QLA82XX_QUEUE:
3412 			qla8044_minidump_process_queue(vha,
3413 			    entry_hdr, &data_ptr);
3414 			break;
3415 		case QLA8044_POLLRD:
3416 			rval = qla8044_minidump_process_pollrd(vha,
3417 			    entry_hdr, &data_ptr);
3418 			if (rval != QLA_SUCCESS)
3419 				qla8044_mark_entry_skipped(vha, entry_hdr, i);
3420 			break;
3421 		case QLA8044_RDMUX2:
3422 			qla8044_minidump_process_rdmux2(vha,
3423 			    entry_hdr, &data_ptr);
3424 			break;
3425 		case QLA8044_POLLRDMWR:
3426 			rval = qla8044_minidump_process_pollrdmwr(vha,
3427 			    entry_hdr, &data_ptr);
3428 			if (rval != QLA_SUCCESS)
3429 				qla8044_mark_entry_skipped(vha, entry_hdr, i);
3430 			break;
3431 		case QLA8044_RDDFE:
3432 			rval = qla8044_minidump_process_rddfe(vha, entry_hdr,
3433 			    &data_ptr);
3434 			if (rval != QLA_SUCCESS)
3435 				qla8044_mark_entry_skipped(vha, entry_hdr, i);
3436 			break;
3437 		case QLA8044_RDMDIO:
3438 			rval = qla8044_minidump_process_rdmdio(vha, entry_hdr,
3439 			    &data_ptr);
3440 			if (rval != QLA_SUCCESS)
3441 				qla8044_mark_entry_skipped(vha, entry_hdr, i);
3442 			break;
3443 		case QLA8044_POLLWR:
3444 			rval = qla8044_minidump_process_pollwr(vha, entry_hdr,
3445 			    &data_ptr);
3446 			if (rval != QLA_SUCCESS)
3447 				qla8044_mark_entry_skipped(vha, entry_hdr, i);
3448 			break;
3449 		case QLA82XX_RDNOP:
3450 		default:
3451 			qla8044_mark_entry_skipped(vha, entry_hdr, i);
3452 			break;
3453 		}
3454 
3455 		data_collected = (uint8_t *)data_ptr -
3456 		    (uint8_t *)((uint8_t *)ha->md_dump);
3457 skip_nxt_entry:
3458 		/*
3459 		 * next entry in the template
3460 		 */
3461 		entry_hdr = (struct qla8044_minidump_entry_hdr *)
3462 		    (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
3463 	}
3464 
3465 	if (data_collected != ha->md_dump_size) {
3466 		ql_log(ql_log_info, vha, 0xb105,
3467 		    "Dump data mismatch: Data collected: "
3468 		    "[0x%x], total_data_size:[0x%x]\n",
3469 		    data_collected, ha->md_dump_size);
3470 		rval = QLA_FUNCTION_FAILED;
3471 		goto md_failed;
3472 	}
3473 
3474 	ql_log(ql_log_info, vha, 0xb110,
3475 	    "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
3476 	    vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
3477 	ha->fw_dumped = 1;
3478 	qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
3479 
3480 
3481 	ql_log(ql_log_info, vha, 0xb106,
3482 	    "Leaving fn: %s Last entry: 0x%x\n",
3483 	    __func__, i);
3484 md_failed:
3485 	return rval;
3486 }
3487 
3488 void
3489 qla8044_get_minidump(struct scsi_qla_host *vha)
3490 {
3491 	struct qla_hw_data *ha = vha->hw;
3492 
3493 	if (!qla8044_collect_md_data(vha)) {
3494 		ha->fw_dumped = 1;
3495 		ha->prev_minidump_failed = 0;
3496 	} else {
3497 		ql_log(ql_log_fatal, vha, 0xb0db,
3498 		    "%s: Unable to collect minidump\n",
3499 		    __func__);
3500 		ha->prev_minidump_failed = 1;
3501 	}
3502 }
3503 
3504 static int
3505 qla8044_poll_flash_status_reg(struct scsi_qla_host *vha)
3506 {
3507 	uint32_t flash_status;
3508 	int retries = QLA8044_FLASH_READ_RETRY_COUNT;
3509 	int ret_val = QLA_SUCCESS;
3510 
3511 	while (retries--) {
3512 		ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_STATUS,
3513 		    &flash_status);
3514 		if (ret_val) {
3515 			ql_log(ql_log_warn, vha, 0xb13c,
3516 			    "%s: Failed to read FLASH_STATUS reg.\n",
3517 			    __func__);
3518 			break;
3519 		}
3520 		if ((flash_status & QLA8044_FLASH_STATUS_READY) ==
3521 		    QLA8044_FLASH_STATUS_READY)
3522 			break;
3523 		msleep(QLA8044_FLASH_STATUS_REG_POLL_DELAY);
3524 	}
3525 
3526 	if (!retries)
3527 		ret_val = QLA_FUNCTION_FAILED;
3528 
3529 	return ret_val;
3530 }
3531 
3532 static int
3533 qla8044_write_flash_status_reg(struct scsi_qla_host *vha,
3534 			       uint32_t data)
3535 {
3536 	int ret_val = QLA_SUCCESS;
3537 	uint32_t cmd;
3538 
3539 	cmd = vha->hw->fdt_wrt_sts_reg_cmd;
3540 
3541 	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3542 	    QLA8044_FLASH_STATUS_WRITE_DEF_SIG | cmd);
3543 	if (ret_val) {
3544 		ql_log(ql_log_warn, vha, 0xb125,
3545 		    "%s: Failed to write to FLASH_ADDR.\n", __func__);
3546 		goto exit_func;
3547 	}
3548 
3549 	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, data);
3550 	if (ret_val) {
3551 		ql_log(ql_log_warn, vha, 0xb126,
3552 		    "%s: Failed to write to FLASH_WRDATA.\n", __func__);
3553 		goto exit_func;
3554 	}
3555 
3556 	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3557 	    QLA8044_FLASH_SECOND_ERASE_MS_VAL);
3558 	if (ret_val) {
3559 		ql_log(ql_log_warn, vha, 0xb127,
3560 		    "%s: Failed to write to FLASH_CONTROL.\n", __func__);
3561 		goto exit_func;
3562 	}
3563 
3564 	ret_val = qla8044_poll_flash_status_reg(vha);
3565 	if (ret_val)
3566 		ql_log(ql_log_warn, vha, 0xb128,
3567 		    "%s: Error polling flash status reg.\n", __func__);
3568 
3569 exit_func:
3570 	return ret_val;
3571 }
3572 
3573 /*
3574  * This function assumes that the flash lock is held.
3575  */
3576 static int
3577 qla8044_unprotect_flash(scsi_qla_host_t *vha)
3578 {
3579 	int ret_val;
3580 	struct qla_hw_data *ha = vha->hw;
3581 
3582 	ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable);
3583 	if (ret_val)
3584 		ql_log(ql_log_warn, vha, 0xb139,
3585 		    "%s: Write flash status failed.\n", __func__);
3586 
3587 	return ret_val;
3588 }
3589 
3590 /*
3591  * This function assumes that the flash lock is held.
3592  */
3593 static int
3594 qla8044_protect_flash(scsi_qla_host_t *vha)
3595 {
3596 	int ret_val;
3597 	struct qla_hw_data *ha = vha->hw;
3598 
3599 	ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable);
3600 	if (ret_val)
3601 		ql_log(ql_log_warn, vha, 0xb13b,
3602 		    "%s: Write flash status failed.\n", __func__);
3603 
3604 	return ret_val;
3605 }
3606 
3607 
3608 static int
3609 qla8044_erase_flash_sector(struct scsi_qla_host *vha,
3610 			   uint32_t sector_start_addr)
3611 {
3612 	uint32_t reversed_addr;
3613 	int ret_val = QLA_SUCCESS;
3614 
3615 	ret_val = qla8044_poll_flash_status_reg(vha);
3616 	if (ret_val) {
3617 		ql_log(ql_log_warn, vha, 0xb12e,
3618 		    "%s: Poll flash status after erase failed..\n", __func__);
3619 	}
3620 
3621 	reversed_addr = (((sector_start_addr & 0xFF) << 16) |
3622 	    (sector_start_addr & 0xFF00) |
3623 	    ((sector_start_addr & 0xFF0000) >> 16));
3624 
3625 	ret_val = qla8044_wr_reg_indirect(vha,
3626 	    QLA8044_FLASH_WRDATA, reversed_addr);
3627 	if (ret_val) {
3628 		ql_log(ql_log_warn, vha, 0xb12f,
3629 		    "%s: Failed to write to FLASH_WRDATA.\n", __func__);
3630 	}
3631 	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3632 	   QLA8044_FLASH_ERASE_SIG | vha->hw->fdt_erase_cmd);
3633 	if (ret_val) {
3634 		ql_log(ql_log_warn, vha, 0xb130,
3635 		    "%s: Failed to write to FLASH_ADDR.\n", __func__);
3636 	}
3637 	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3638 	    QLA8044_FLASH_LAST_ERASE_MS_VAL);
3639 	if (ret_val) {
3640 		ql_log(ql_log_warn, vha, 0xb131,
3641 		    "%s: Failed write to FLASH_CONTROL.\n", __func__);
3642 	}
3643 	ret_val = qla8044_poll_flash_status_reg(vha);
3644 	if (ret_val) {
3645 		ql_log(ql_log_warn, vha, 0xb132,
3646 		    "%s: Poll flash status failed.\n", __func__);
3647 	}
3648 
3649 
3650 	return ret_val;
3651 }
3652 
3653 /*
3654  * qla8044_flash_write_u32 - Write data to flash
3655  *
3656  * @ha : Pointer to adapter structure
3657  * addr : Flash address to write to
3658  * p_data : Data to be written
3659  *
3660  * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
3661  *
3662  * NOTE: Lock should be held on entry
3663  */
3664 static int
3665 qla8044_flash_write_u32(struct scsi_qla_host *vha, uint32_t addr,
3666 			uint32_t *p_data)
3667 {
3668 	int ret_val = QLA_SUCCESS;
3669 
3670 	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3671 	    0x00800000 | (addr >> 2));
3672 	if (ret_val) {
3673 		ql_log(ql_log_warn, vha, 0xb134,
3674 		    "%s: Failed write to FLASH_ADDR.\n", __func__);
3675 		goto exit_func;
3676 	}
3677 	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *p_data);
3678 	if (ret_val) {
3679 		ql_log(ql_log_warn, vha, 0xb135,
3680 		    "%s: Failed write to FLASH_WRDATA.\n", __func__);
3681 		goto exit_func;
3682 	}
3683 	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 0x3D);
3684 	if (ret_val) {
3685 		ql_log(ql_log_warn, vha, 0xb136,
3686 		    "%s: Failed write to FLASH_CONTROL.\n", __func__);
3687 		goto exit_func;
3688 	}
3689 	ret_val = qla8044_poll_flash_status_reg(vha);
3690 	if (ret_val) {
3691 		ql_log(ql_log_warn, vha, 0xb137,
3692 		    "%s: Poll flash status failed.\n", __func__);
3693 	}
3694 
3695 exit_func:
3696 	return ret_val;
3697 }
3698 
3699 static int
3700 qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
3701 				uint32_t faddr, uint32_t dwords)
3702 {
3703 	int ret = QLA_FUNCTION_FAILED;
3704 	uint32_t spi_val;
3705 
3706 	if (dwords < QLA8044_MIN_OPTROM_BURST_DWORDS ||
3707 	    dwords > QLA8044_MAX_OPTROM_BURST_DWORDS) {
3708 		ql_dbg(ql_dbg_user, vha, 0xb123,
3709 		    "Got unsupported dwords = 0x%x.\n",
3710 		    dwords);
3711 		return QLA_FUNCTION_FAILED;
3712 	}
3713 
3714 	qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, &spi_val);
3715 	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
3716 	    spi_val | QLA8044_FLASH_SPI_CTL);
3717 	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3718 	    QLA8044_FLASH_FIRST_TEMP_VAL);
3719 
3720 	/* First DWORD write to FLASH_WRDATA */
3721 	ret = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA,
3722 	    *dwptr++);
3723 	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3724 	    QLA8044_FLASH_FIRST_MS_PATTERN);
3725 
3726 	ret = qla8044_poll_flash_status_reg(vha);
3727 	if (ret) {
3728 		ql_log(ql_log_warn, vha, 0xb124,
3729 		    "%s: Failed.\n", __func__);
3730 		goto exit_func;
3731 	}
3732 
3733 	dwords--;
3734 
3735 	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3736 	    QLA8044_FLASH_SECOND_TEMP_VAL);
3737 
3738 
3739 	/* Second to N-1 DWORDS writes */
3740 	while (dwords != 1) {
3741 		qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
3742 		qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3743 		    QLA8044_FLASH_SECOND_MS_PATTERN);
3744 		ret = qla8044_poll_flash_status_reg(vha);
3745 		if (ret) {
3746 			ql_log(ql_log_warn, vha, 0xb129,
3747 			    "%s: Failed.\n", __func__);
3748 			goto exit_func;
3749 		}
3750 		dwords--;
3751 	}
3752 
3753 	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3754 	    QLA8044_FLASH_FIRST_TEMP_VAL | (faddr >> 2));
3755 
3756 	/* Last DWORD write */
3757 	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
3758 	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3759 	    QLA8044_FLASH_LAST_MS_PATTERN);
3760 	ret = qla8044_poll_flash_status_reg(vha);
3761 	if (ret) {
3762 		ql_log(ql_log_warn, vha, 0xb12a,
3763 		    "%s: Failed.\n", __func__);
3764 		goto exit_func;
3765 	}
3766 	qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_STATUS, &spi_val);
3767 
3768 	if ((spi_val & QLA8044_FLASH_SPI_CTL) == QLA8044_FLASH_SPI_CTL) {
3769 		ql_log(ql_log_warn, vha, 0xb12b,
3770 		    "%s: Failed.\n", __func__);
3771 		spi_val = 0;
3772 		/* Operation failed, clear error bit. */
3773 		qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
3774 		    &spi_val);
3775 		qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
3776 		    spi_val | QLA8044_FLASH_SPI_CTL);
3777 	}
3778 exit_func:
3779 	return ret;
3780 }
3781 
3782 static int
3783 qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
3784 			       uint32_t faddr, uint32_t dwords)
3785 {
3786 	int ret = QLA_FUNCTION_FAILED;
3787 	uint32_t liter;
3788 
3789 	for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
3790 		ret = qla8044_flash_write_u32(vha, faddr, dwptr);
3791 		if (ret) {
3792 			ql_dbg(ql_dbg_p3p, vha, 0xb141,
3793 			    "%s: flash address=%x data=%x.\n", __func__,
3794 			     faddr, *dwptr);
3795 			break;
3796 		}
3797 	}
3798 
3799 	return ret;
3800 }
3801 
3802 int
3803 qla8044_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3804 			  uint32_t offset, uint32_t length)
3805 {
3806 	int rval = QLA_FUNCTION_FAILED, i, burst_iter_count;
3807 	int dword_count, erase_sec_count;
3808 	uint32_t erase_offset;
3809 	uint8_t *p_cache, *p_src;
3810 
3811 	erase_offset = offset;
3812 
3813 	p_cache = kcalloc(length, sizeof(uint8_t), GFP_KERNEL);
3814 	if (!p_cache)
3815 		return QLA_FUNCTION_FAILED;
3816 
3817 	memcpy(p_cache, buf, length);
3818 	p_src = p_cache;
3819 	dword_count = length / sizeof(uint32_t);
3820 	/* Since the offset and legth are sector aligned, it will be always
3821 	 * multiple of burst_iter_count (64)
3822 	 */
3823 	burst_iter_count = dword_count / QLA8044_MAX_OPTROM_BURST_DWORDS;
3824 	erase_sec_count = length / QLA8044_SECTOR_SIZE;
3825 
3826 	/* Suspend HBA. */
3827 	scsi_block_requests(vha->host);
3828 	/* Lock and enable write for whole operation. */
3829 	qla8044_flash_lock(vha);
3830 	qla8044_unprotect_flash(vha);
3831 
3832 	/* Erasing the sectors */
3833 	for (i = 0; i < erase_sec_count; i++) {
3834 		rval = qla8044_erase_flash_sector(vha, erase_offset);
3835 		ql_dbg(ql_dbg_user, vha, 0xb138,
3836 		    "Done erase of sector=0x%x.\n",
3837 		    erase_offset);
3838 		if (rval) {
3839 			ql_log(ql_log_warn, vha, 0xb121,
3840 			    "Failed to erase the sector having address: "
3841 			    "0x%x.\n", erase_offset);
3842 			goto out;
3843 		}
3844 		erase_offset += QLA8044_SECTOR_SIZE;
3845 	}
3846 	ql_dbg(ql_dbg_user, vha, 0xb13f,
3847 	    "Got write for addr = 0x%x length=0x%x.\n",
3848 	    offset, length);
3849 
3850 	for (i = 0; i < burst_iter_count; i++) {
3851 
3852 		/* Go with write. */
3853 		rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src,
3854 		    offset, QLA8044_MAX_OPTROM_BURST_DWORDS);
3855 		if (rval) {
3856 			/* Buffer Mode failed skip to dword mode */
3857 			ql_log(ql_log_warn, vha, 0xb122,
3858 			    "Failed to write flash in buffer mode, "
3859 			    "Reverting to slow-write.\n");
3860 			rval = qla8044_write_flash_dword_mode(vha,
3861 			    (uint32_t *)p_src, offset,
3862 			    QLA8044_MAX_OPTROM_BURST_DWORDS);
3863 		}
3864 		p_src +=  sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
3865 		offset += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
3866 	}
3867 	ql_dbg(ql_dbg_user, vha, 0xb133,
3868 	    "Done writing.\n");
3869 
3870 out:
3871 	qla8044_protect_flash(vha);
3872 	qla8044_flash_unlock(vha);
3873 	scsi_unblock_requests(vha->host);
3874 	kfree(p_cache);
3875 
3876 	return rval;
3877 }
3878 
3879 #define LEG_INT_PTR_B31		(1 << 31)
3880 #define LEG_INT_PTR_B30		(1 << 30)
3881 #define PF_BITS_MASK		(0xF << 16)
3882 /**
3883  * qla8044_intr_handler() - Process interrupts for the ISP8044
3884  * @irq:
3885  * @dev_id: SCSI driver HA context
3886  *
3887  * Called by system whenever the host adapter generates an interrupt.
3888  *
3889  * Returns handled flag.
3890  */
3891 irqreturn_t
3892 qla8044_intr_handler(int irq, void *dev_id)
3893 {
3894 	scsi_qla_host_t	*vha;
3895 	struct qla_hw_data *ha;
3896 	struct rsp_que *rsp;
3897 	struct device_reg_82xx __iomem *reg;
3898 	int		status = 0;
3899 	unsigned long	flags;
3900 	unsigned long	iter;
3901 	uint32_t	stat;
3902 	uint16_t	mb[4];
3903 	uint32_t leg_int_ptr = 0, pf_bit;
3904 
3905 	rsp = (struct rsp_que *) dev_id;
3906 	if (!rsp) {
3907 		ql_log(ql_log_info, NULL, 0xb143,
3908 		    "%s(): NULL response queue pointer\n", __func__);
3909 		return IRQ_NONE;
3910 	}
3911 	ha = rsp->hw;
3912 	vha = pci_get_drvdata(ha->pdev);
3913 
3914 	if (unlikely(pci_channel_offline(ha->pdev)))
3915 		return IRQ_HANDLED;
3916 
3917 	leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
3918 
3919 	/* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
3920 	if (!(leg_int_ptr & (LEG_INT_PTR_B31))) {
3921 		ql_dbg(ql_dbg_p3p, vha, 0xb144,
3922 		    "%s: Legacy Interrupt Bit 31 not set, "
3923 		    "spurious interrupt!\n", __func__);
3924 		return IRQ_NONE;
3925 	}
3926 
3927 	pf_bit = ha->portnum << 16;
3928 	/* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
3929 	if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) {
3930 		ql_dbg(ql_dbg_p3p, vha, 0xb145,
3931 		    "%s: Incorrect function ID 0x%x in "
3932 		    "legacy interrupt register, "
3933 		    "ha->pf_bit = 0x%x\n", __func__,
3934 		    (leg_int_ptr & (PF_BITS_MASK)), pf_bit);
3935 		return IRQ_NONE;
3936 	}
3937 
3938 	/* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
3939 	 * Control register and poll till Legacy Interrupt Pointer register
3940 	 * bit32 is 0.
3941 	 */
3942 	qla8044_wr_reg(ha, LEG_INTR_TRIG_OFFSET, 0);
3943 	do {
3944 		leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
3945 		if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit)
3946 			break;
3947 	} while (leg_int_ptr & (LEG_INT_PTR_B30));
3948 
3949 	reg = &ha->iobase->isp82;
3950 	spin_lock_irqsave(&ha->hardware_lock, flags);
3951 	for (iter = 1; iter--; ) {
3952 
3953 		if (RD_REG_DWORD(&reg->host_int)) {
3954 			stat = RD_REG_DWORD(&reg->host_status);
3955 			if ((stat & HSRX_RISC_INT) == 0)
3956 				break;
3957 
3958 			switch (stat & 0xff) {
3959 			case 0x1:
3960 			case 0x2:
3961 			case 0x10:
3962 			case 0x11:
3963 				qla82xx_mbx_completion(vha, MSW(stat));
3964 				status |= MBX_INTERRUPT;
3965 				break;
3966 			case 0x12:
3967 				mb[0] = MSW(stat);
3968 				mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
3969 				mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
3970 				mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
3971 				qla2x00_async_event(vha, rsp, mb);
3972 				break;
3973 			case 0x13:
3974 				qla24xx_process_response_queue(vha, rsp);
3975 				break;
3976 			default:
3977 				ql_dbg(ql_dbg_p3p, vha, 0xb146,
3978 				    "Unrecognized interrupt type "
3979 				    "(%d).\n", stat & 0xff);
3980 				break;
3981 			}
3982 		}
3983 		WRT_REG_DWORD(&reg->host_int, 0);
3984 	}
3985 
3986 	qla2x00_handle_mbx_completion(ha, status);
3987 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3988 
3989 	return IRQ_HANDLED;
3990 }
3991 
3992 static int
3993 qla8044_idc_dontreset(struct qla_hw_data *ha)
3994 {
3995 	uint32_t idc_ctrl;
3996 
3997 	idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
3998 	return idc_ctrl & DONTRESET_BIT0;
3999 }
4000 
4001 static void
4002 qla8044_clear_rst_ready(scsi_qla_host_t *vha)
4003 {
4004 	uint32_t drv_state;
4005 
4006 	drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
4007 
4008 	/*
4009 	 * For ISP8044, drv_active register has 1 bit per function,
4010 	 * shift 1 by func_num to set a bit for the function.
4011 	 * For ISP82xx, drv_active has 4 bits per function
4012 	 */
4013 	drv_state &= ~(1 << vha->hw->portnum);
4014 
4015 	ql_dbg(ql_dbg_p3p, vha, 0xb13d,
4016 	    "drv_state: 0x%08x\n", drv_state);
4017 	qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
4018 }
4019 
4020 int
4021 qla8044_abort_isp(scsi_qla_host_t *vha)
4022 {
4023 	int rval;
4024 	uint32_t dev_state;
4025 	struct qla_hw_data *ha = vha->hw;
4026 
4027 	qla8044_idc_lock(ha);
4028 	dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
4029 
4030 	if (ql2xdontresethba)
4031 		qla8044_set_idc_dontreset(vha);
4032 
4033 	/* If device_state is NEED_RESET, go ahead with
4034 	 * Reset,irrespective of ql2xdontresethba. This is to allow a
4035 	 * non-reset-owner to force a reset. Non-reset-owner sets
4036 	 * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
4037 	 * and then forces a Reset by setting device_state to
4038 	 * NEED_RESET. */
4039 	if (dev_state == QLA8XXX_DEV_READY) {
4040 		/* If IDC_CTRL DONTRESETHBA_BIT0 is set don't do reset
4041 		 * recovery */
4042 		if (qla8044_idc_dontreset(ha) == DONTRESET_BIT0) {
4043 			ql_dbg(ql_dbg_p3p, vha, 0xb13e,
4044 			    "Reset recovery disabled\n");
4045 			rval = QLA_FUNCTION_FAILED;
4046 			goto exit_isp_reset;
4047 		}
4048 
4049 		ql_dbg(ql_dbg_p3p, vha, 0xb140,
4050 		    "HW State: NEED RESET\n");
4051 		qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
4052 		    QLA8XXX_DEV_NEED_RESET);
4053 	}
4054 
4055 	/* For ISP8044, Reset owner is NIC, iSCSI or FCOE based on priority
4056 	 * and which drivers are present. Unlike ISP82XX, the function setting
4057 	 * NEED_RESET, may not be the Reset owner. */
4058 	qla83xx_reset_ownership(vha);
4059 
4060 	qla8044_idc_unlock(ha);
4061 	rval = qla8044_device_state_handler(vha);
4062 	qla8044_idc_lock(ha);
4063 	qla8044_clear_rst_ready(vha);
4064 
4065 exit_isp_reset:
4066 	qla8044_idc_unlock(ha);
4067 	if (rval == QLA_SUCCESS) {
4068 		ha->flags.isp82xx_fw_hung = 0;
4069 		ha->flags.nic_core_reset_hdlr_active = 0;
4070 		rval = qla82xx_restart_isp(vha);
4071 	}
4072 
4073 	return rval;
4074 }
4075 
4076 void
4077 qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
4078 {
4079 	struct qla_hw_data *ha = vha->hw;
4080 
4081 	if (!ha->allow_cna_fw_dump)
4082 		return;
4083 
4084 	scsi_block_requests(vha->host);
4085 	ha->flags.isp82xx_no_md_cap = 1;
4086 	qla8044_idc_lock(ha);
4087 	qla82xx_set_reset_owner(vha);
4088 	qla8044_idc_unlock(ha);
4089 	qla2x00_wait_for_chip_reset(vha);
4090 	scsi_unblock_requests(vha->host);
4091 }
4092