xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_mr.c (revision 84d517f3)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2013 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include <linux/delay.h>
9 #include <linux/pci.h>
10 #include <linux/ratelimit.h>
11 #include <linux/vmalloc.h>
12 #include <scsi/scsi_tcq.h>
13 #include <linux/utsname.h>
14 
15 
16 /* QLAFX00 specific Mailbox implementation functions */
17 
18 /*
19  * qlafx00_mailbox_command
20  *	Issue mailbox command and waits for completion.
21  *
22  * Input:
23  *	ha = adapter block pointer.
24  *	mcp = driver internal mbx struct pointer.
25  *
26  * Output:
27  *	mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
28  *
29  * Returns:
30  *	0 : QLA_SUCCESS = cmd performed success
31  *	1 : QLA_FUNCTION_FAILED   (error encountered)
32  *	6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
33  *
34  * Context:
35  *	Kernel context.
36  */
37 static int
38 qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
39 
40 {
41 	int		rval;
42 	unsigned long    flags = 0;
43 	device_reg_t *reg;
44 	uint8_t		abort_active;
45 	uint8_t		io_lock_on;
46 	uint16_t	command = 0;
47 	uint32_t	*iptr;
48 	uint32_t __iomem *optr;
49 	uint32_t	cnt;
50 	uint32_t	mboxes;
51 	unsigned long	wait_time;
52 	struct qla_hw_data *ha = vha->hw;
53 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
54 
55 	if (ha->pdev->error_state > pci_channel_io_frozen) {
56 		ql_log(ql_log_warn, vha, 0x115c,
57 		    "error_state is greater than pci_channel_io_frozen, "
58 		    "exiting.\n");
59 		return QLA_FUNCTION_TIMEOUT;
60 	}
61 
62 	if (vha->device_flags & DFLG_DEV_FAILED) {
63 		ql_log(ql_log_warn, vha, 0x115f,
64 		    "Device in failed state, exiting.\n");
65 		return QLA_FUNCTION_TIMEOUT;
66 	}
67 
68 	reg = ha->iobase;
69 	io_lock_on = base_vha->flags.init_done;
70 
71 	rval = QLA_SUCCESS;
72 	abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
73 
74 	if (ha->flags.pci_channel_io_perm_failure) {
75 		ql_log(ql_log_warn, vha, 0x1175,
76 		    "Perm failure on EEH timeout MBX, exiting.\n");
77 		return QLA_FUNCTION_TIMEOUT;
78 	}
79 
80 	if (ha->flags.isp82xx_fw_hung) {
81 		/* Setting Link-Down error */
82 		mcp->mb[0] = MBS_LINK_DOWN_ERROR;
83 		ql_log(ql_log_warn, vha, 0x1176,
84 		    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
85 		rval = QLA_FUNCTION_FAILED;
86 		goto premature_exit;
87 	}
88 
89 	/*
90 	 * Wait for active mailbox commands to finish by waiting at most tov
91 	 * seconds. This is to serialize actual issuing of mailbox cmds during
92 	 * non ISP abort time.
93 	 */
94 	if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
95 		/* Timeout occurred. Return error. */
96 		ql_log(ql_log_warn, vha, 0x1177,
97 		    "Cmd access timeout, cmd=0x%x, Exiting.\n",
98 		    mcp->mb[0]);
99 		return QLA_FUNCTION_TIMEOUT;
100 	}
101 
102 	ha->flags.mbox_busy = 1;
103 	/* Save mailbox command for debug */
104 	ha->mcp32 = mcp;
105 
106 	ql_dbg(ql_dbg_mbx, vha, 0x1178,
107 	    "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
108 
109 	spin_lock_irqsave(&ha->hardware_lock, flags);
110 
111 	/* Load mailbox registers. */
112 	optr = (uint32_t __iomem *)&reg->ispfx00.mailbox0;
113 
114 	iptr = mcp->mb;
115 	command = mcp->mb[0];
116 	mboxes = mcp->out_mb;
117 
118 	for (cnt = 0; cnt < ha->mbx_count; cnt++) {
119 		if (mboxes & BIT_0)
120 			WRT_REG_DWORD(optr, *iptr);
121 
122 		mboxes >>= 1;
123 		optr++;
124 		iptr++;
125 	}
126 
127 	/* Issue set host interrupt command to send cmd out. */
128 	ha->flags.mbox_int = 0;
129 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
130 
131 	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172,
132 	    (uint8_t *)mcp->mb, 16);
133 	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173,
134 	    ((uint8_t *)mcp->mb + 0x10), 16);
135 	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174,
136 	    ((uint8_t *)mcp->mb + 0x20), 8);
137 
138 	/* Unlock mbx registers and wait for interrupt */
139 	ql_dbg(ql_dbg_mbx, vha, 0x1179,
140 	    "Going to unlock irq & waiting for interrupts. "
141 	    "jiffies=%lx.\n", jiffies);
142 
143 	/* Wait for mbx cmd completion until timeout */
144 	if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
145 		set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
146 
147 		QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
148 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
149 
150 		wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
151 	} else {
152 		ql_dbg(ql_dbg_mbx, vha, 0x112c,
153 		    "Cmd=%x Polling Mode.\n", command);
154 
155 		QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
156 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
157 
158 		wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
159 		while (!ha->flags.mbox_int) {
160 			if (time_after(jiffies, wait_time))
161 				break;
162 
163 			/* Check for pending interrupts. */
164 			qla2x00_poll(ha->rsp_q_map[0]);
165 
166 			if (!ha->flags.mbox_int &&
167 			    !(IS_QLA2200(ha) &&
168 			    command == MBC_LOAD_RISC_RAM_EXTENDED))
169 				usleep_range(10000, 11000);
170 		} /* while */
171 		ql_dbg(ql_dbg_mbx, vha, 0x112d,
172 		    "Waited %d sec.\n",
173 		    (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
174 	}
175 
176 	/* Check whether we timed out */
177 	if (ha->flags.mbox_int) {
178 		uint32_t *iptr2;
179 
180 		ql_dbg(ql_dbg_mbx, vha, 0x112e,
181 		    "Cmd=%x completed.\n", command);
182 
183 		/* Got interrupt. Clear the flag. */
184 		ha->flags.mbox_int = 0;
185 		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
186 
187 		if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE)
188 			rval = QLA_FUNCTION_FAILED;
189 
190 		/* Load return mailbox registers. */
191 		iptr2 = mcp->mb;
192 		iptr = (uint32_t *)&ha->mailbox_out32[0];
193 		mboxes = mcp->in_mb;
194 		for (cnt = 0; cnt < ha->mbx_count; cnt++) {
195 			if (mboxes & BIT_0)
196 				*iptr2 = *iptr;
197 
198 			mboxes >>= 1;
199 			iptr2++;
200 			iptr++;
201 		}
202 	} else {
203 
204 		rval = QLA_FUNCTION_TIMEOUT;
205 	}
206 
207 	ha->flags.mbox_busy = 0;
208 
209 	/* Clean up */
210 	ha->mcp32 = NULL;
211 
212 	if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
213 		ql_dbg(ql_dbg_mbx, vha, 0x113a,
214 		    "checking for additional resp interrupt.\n");
215 
216 		/* polling mode for non isp_abort commands. */
217 		qla2x00_poll(ha->rsp_q_map[0]);
218 	}
219 
220 	if (rval == QLA_FUNCTION_TIMEOUT &&
221 	    mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
222 		if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
223 		    ha->flags.eeh_busy) {
224 			/* not in dpc. schedule it for dpc to take over. */
225 			ql_dbg(ql_dbg_mbx, vha, 0x115d,
226 			    "Timeout, schedule isp_abort_needed.\n");
227 
228 			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
229 			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
230 			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
231 
232 				ql_log(ql_log_info, base_vha, 0x115e,
233 				    "Mailbox cmd timeout occurred, cmd=0x%x, "
234 				    "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
235 				    "abort.\n", command, mcp->mb[0],
236 				    ha->flags.eeh_busy);
237 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
238 				qla2xxx_wake_dpc(vha);
239 			}
240 		} else if (!abort_active) {
241 			/* call abort directly since we are in the DPC thread */
242 			ql_dbg(ql_dbg_mbx, vha, 0x1160,
243 			    "Timeout, calling abort_isp.\n");
244 
245 			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
246 			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
247 			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
248 
249 				ql_log(ql_log_info, base_vha, 0x1161,
250 				    "Mailbox cmd timeout occurred, cmd=0x%x, "
251 				    "mb[0]=0x%x. Scheduling ISP abort ",
252 				    command, mcp->mb[0]);
253 
254 				set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
255 				clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
256 				if (ha->isp_ops->abort_isp(vha)) {
257 					/* Failed. retry later. */
258 					set_bit(ISP_ABORT_NEEDED,
259 					    &vha->dpc_flags);
260 				}
261 				clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
262 				ql_dbg(ql_dbg_mbx, vha, 0x1162,
263 				    "Finished abort_isp.\n");
264 			}
265 		}
266 	}
267 
268 premature_exit:
269 	/* Allow next mbx cmd to come in. */
270 	complete(&ha->mbx_cmd_comp);
271 
272 	if (rval) {
273 		ql_log(ql_log_warn, base_vha, 0x1163,
274 		    "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, "
275 		    "mb[3]=%x, cmd=%x ****.\n",
276 		    mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
277 	} else {
278 		ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
279 	}
280 
281 	return rval;
282 }
283 
284 /*
285  * qlafx00_driver_shutdown
286  *	Indicate a driver shutdown to firmware.
287  *
288  * Input:
289  *	ha = adapter block pointer.
290  *
291  * Returns:
292  *	local function return status code.
293  *
294  * Context:
295  *	Kernel context.
296  */
297 int
298 qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
299 {
300 	int rval;
301 	struct mbx_cmd_32 mc;
302 	struct mbx_cmd_32 *mcp = &mc;
303 
304 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166,
305 	    "Entered %s.\n", __func__);
306 
307 	mcp->mb[0] = MBC_MR_DRV_SHUTDOWN;
308 	mcp->out_mb = MBX_0;
309 	mcp->in_mb = MBX_0;
310 	if (tmo)
311 		mcp->tov = tmo;
312 	else
313 		mcp->tov = MBX_TOV_SECONDS;
314 	mcp->flags = 0;
315 	rval = qlafx00_mailbox_command(vha, mcp);
316 
317 	if (rval != QLA_SUCCESS) {
318 		ql_dbg(ql_dbg_mbx, vha, 0x1167,
319 		    "Failed=%x.\n", rval);
320 	} else {
321 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168,
322 		    "Done %s.\n", __func__);
323 	}
324 
325 	return rval;
326 }
327 
328 /*
329  * qlafx00_get_firmware_state
330  *	Get adapter firmware state.
331  *
332  * Input:
333  *	ha = adapter block pointer.
334  *	TARGET_QUEUE_LOCK must be released.
335  *	ADAPTER_STATE_LOCK must be released.
336  *
337  * Returns:
338  *	qla7xxx local function return status code.
339  *
340  * Context:
341  *	Kernel context.
342  */
343 static int
344 qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states)
345 {
346 	int rval;
347 	struct mbx_cmd_32 mc;
348 	struct mbx_cmd_32 *mcp = &mc;
349 
350 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169,
351 	    "Entered %s.\n", __func__);
352 
353 	mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
354 	mcp->out_mb = MBX_0;
355 	mcp->in_mb = MBX_1|MBX_0;
356 	mcp->tov = MBX_TOV_SECONDS;
357 	mcp->flags = 0;
358 	rval = qlafx00_mailbox_command(vha, mcp);
359 
360 	/* Return firmware states. */
361 	states[0] = mcp->mb[1];
362 
363 	if (rval != QLA_SUCCESS) {
364 		ql_dbg(ql_dbg_mbx, vha, 0x116a,
365 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
366 	} else {
367 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b,
368 		    "Done %s.\n", __func__);
369 	}
370 	return rval;
371 }
372 
373 /*
374  * qlafx00_init_firmware
375  *	Initialize adapter firmware.
376  *
377  * Input:
378  *	ha = adapter block pointer.
379  *	dptr = Initialization control block pointer.
380  *	size = size of initialization control block.
381  *	TARGET_QUEUE_LOCK must be released.
382  *	ADAPTER_STATE_LOCK must be released.
383  *
384  * Returns:
385  *	qlafx00 local function return status code.
386  *
387  * Context:
388  *	Kernel context.
389  */
390 int
391 qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
392 {
393 	int rval;
394 	struct mbx_cmd_32 mc;
395 	struct mbx_cmd_32 *mcp = &mc;
396 	struct qla_hw_data *ha = vha->hw;
397 
398 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c,
399 	    "Entered %s.\n", __func__);
400 
401 	mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
402 
403 	mcp->mb[1] = 0;
404 	mcp->mb[2] = MSD(ha->init_cb_dma);
405 	mcp->mb[3] = LSD(ha->init_cb_dma);
406 
407 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
408 	mcp->in_mb = MBX_0;
409 	mcp->buf_size = size;
410 	mcp->flags = MBX_DMA_OUT;
411 	mcp->tov = MBX_TOV_SECONDS;
412 	rval = qlafx00_mailbox_command(vha, mcp);
413 
414 	if (rval != QLA_SUCCESS) {
415 		ql_dbg(ql_dbg_mbx, vha, 0x116d,
416 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
417 	} else {
418 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e,
419 		    "Done %s.\n", __func__);
420 	}
421 	return rval;
422 }
423 
424 /*
425  * qlafx00_mbx_reg_test
426  */
427 static int
428 qlafx00_mbx_reg_test(scsi_qla_host_t *vha)
429 {
430 	int rval;
431 	struct mbx_cmd_32 mc;
432 	struct mbx_cmd_32 *mcp = &mc;
433 
434 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f,
435 	    "Entered %s.\n", __func__);
436 
437 
438 	mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
439 	mcp->mb[1] = 0xAAAA;
440 	mcp->mb[2] = 0x5555;
441 	mcp->mb[3] = 0xAA55;
442 	mcp->mb[4] = 0x55AA;
443 	mcp->mb[5] = 0xA5A5;
444 	mcp->mb[6] = 0x5A5A;
445 	mcp->mb[7] = 0x2525;
446 	mcp->mb[8] = 0xBBBB;
447 	mcp->mb[9] = 0x6666;
448 	mcp->mb[10] = 0xBB66;
449 	mcp->mb[11] = 0x66BB;
450 	mcp->mb[12] = 0xB6B6;
451 	mcp->mb[13] = 0x6B6B;
452 	mcp->mb[14] = 0x3636;
453 	mcp->mb[15] = 0xCCCC;
454 
455 
456 	mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
457 			MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
458 	mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
459 			MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
460 	mcp->buf_size = 0;
461 	mcp->flags = MBX_DMA_OUT;
462 	mcp->tov = MBX_TOV_SECONDS;
463 	rval = qlafx00_mailbox_command(vha, mcp);
464 	if (rval == QLA_SUCCESS) {
465 		if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 ||
466 		    mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA)
467 			rval = QLA_FUNCTION_FAILED;
468 		if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A ||
469 		    mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB)
470 			rval = QLA_FUNCTION_FAILED;
471 		if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 ||
472 		    mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6)
473 			rval = QLA_FUNCTION_FAILED;
474 		if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 ||
475 		    mcp->mb[31] != 0xCCCC)
476 			rval = QLA_FUNCTION_FAILED;
477 	}
478 
479 	if (rval != QLA_SUCCESS) {
480 		ql_dbg(ql_dbg_mbx, vha, 0x1170,
481 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
482 	} else {
483 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171,
484 		    "Done %s.\n", __func__);
485 	}
486 	return rval;
487 }
488 
489 /**
490  * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers.
491  * @ha: HA context
492  *
493  * Returns 0 on success.
494  */
495 int
496 qlafx00_pci_config(scsi_qla_host_t *vha)
497 {
498 	uint16_t w;
499 	struct qla_hw_data *ha = vha->hw;
500 
501 	pci_set_master(ha->pdev);
502 	pci_try_set_mwi(ha->pdev);
503 
504 	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
505 	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
506 	w &= ~PCI_COMMAND_INTX_DISABLE;
507 	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
508 
509 	/* PCIe -- adjust Maximum Read Request Size (2048). */
510 	if (pci_is_pcie(ha->pdev))
511 		pcie_set_readrq(ha->pdev, 2048);
512 
513 	ha->chip_revision = ha->pdev->revision;
514 
515 	return QLA_SUCCESS;
516 }
517 
518 /**
519  * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
520  * @ha: HA context
521  *
522   */
523 static inline void
524 qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
525 {
526 	unsigned long flags = 0;
527 	struct qla_hw_data *ha = vha->hw;
528 	int i, core;
529 	uint32_t cnt;
530 
531 	/* Set all 4 cores in reset */
532 	for (i = 0; i < 4; i++) {
533 		QLAFX00_SET_HBA_SOC_REG(ha,
534 		    (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
535 	}
536 
537 	/* Set all 4 core Clock gating control */
538 	for (i = 0; i < 4; i++) {
539 		QLAFX00_SET_HBA_SOC_REG(ha,
540 		    (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
541 	}
542 
543 	/* Reset all units in Fabric */
544 	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101));
545 
546 	/* Reset all interrupt control registers */
547 	for (i = 0; i < 115; i++) {
548 		QLAFX00_SET_HBA_SOC_REG(ha,
549 		    (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0));
550 	}
551 
552 	/* Reset Timers control registers. per core */
553 	for (core = 0; core < 4; core++)
554 		for (i = 0; i < 8; i++)
555 			QLAFX00_SET_HBA_SOC_REG(ha,
556 			    (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0));
557 
558 	/* Reset per core IRQ ack register */
559 	for (core = 0; core < 4; core++)
560 		QLAFX00_SET_HBA_SOC_REG(ha,
561 		    (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF));
562 
563 	/* Set Fabric control and config to defaults */
564 	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
565 	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
566 
567 	spin_lock_irqsave(&ha->hardware_lock, flags);
568 
569 	/* Kick in Fabric units */
570 	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
571 
572 	/* Kick in Core0 to start boot process */
573 	QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
574 
575 	/* Wait 10secs for soft-reset to complete. */
576 	for (cnt = 10; cnt; cnt--) {
577 		msleep(1000);
578 		barrier();
579 	}
580 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
581 }
582 
583 /**
584  * qlafx00_soft_reset() - Soft Reset ISPFx00.
585  * @ha: HA context
586  *
587  * Returns 0 on success.
588  */
589 void
590 qlafx00_soft_reset(scsi_qla_host_t *vha)
591 {
592 	struct qla_hw_data *ha = vha->hw;
593 
594 	if (unlikely(pci_channel_offline(ha->pdev) &&
595 	    ha->flags.pci_channel_io_perm_failure))
596 		return;
597 
598 	ha->isp_ops->disable_intrs(ha);
599 	qlafx00_soc_cpu_reset(vha);
600 	ha->isp_ops->enable_intrs(ha);
601 }
602 
603 /**
604  * qlafx00_chip_diag() - Test ISPFx00 for proper operation.
605  * @ha: HA context
606  *
607  * Returns 0 on success.
608  */
609 int
610 qlafx00_chip_diag(scsi_qla_host_t *vha)
611 {
612 	int rval = 0;
613 	struct qla_hw_data *ha = vha->hw;
614 	struct req_que *req = ha->req_q_map[0];
615 
616 	ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
617 
618 	rval = qlafx00_mbx_reg_test(vha);
619 	if (rval) {
620 		ql_log(ql_log_warn, vha, 0x1165,
621 		    "Failed mailbox send register test\n");
622 	} else {
623 		/* Flag a successful rval */
624 		rval = QLA_SUCCESS;
625 	}
626 	return rval;
627 }
628 
629 void
630 qlafx00_config_rings(struct scsi_qla_host *vha)
631 {
632 	struct qla_hw_data *ha = vha->hw;
633 	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
634 
635 	WRT_REG_DWORD(&reg->req_q_in, 0);
636 	WRT_REG_DWORD(&reg->req_q_out, 0);
637 
638 	WRT_REG_DWORD(&reg->rsp_q_in, 0);
639 	WRT_REG_DWORD(&reg->rsp_q_out, 0);
640 
641 	/* PCI posting */
642 	RD_REG_DWORD(&reg->rsp_q_out);
643 }
644 
645 char *
646 qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
647 {
648 	struct qla_hw_data *ha = vha->hw;
649 
650 	if (pci_is_pcie(ha->pdev)) {
651 		strcpy(str, "PCIe iSA");
652 		return str;
653 	}
654 	return str;
655 }
656 
657 char *
658 qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str)
659 {
660 	struct qla_hw_data *ha = vha->hw;
661 
662 	sprintf(str, "%s", ha->mr.fw_version);
663 	return str;
664 }
665 
666 void
667 qlafx00_enable_intrs(struct qla_hw_data *ha)
668 {
669 	unsigned long flags = 0;
670 
671 	spin_lock_irqsave(&ha->hardware_lock, flags);
672 	ha->interrupts_on = 1;
673 	QLAFX00_ENABLE_ICNTRL_REG(ha);
674 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
675 }
676 
677 void
678 qlafx00_disable_intrs(struct qla_hw_data *ha)
679 {
680 	unsigned long flags = 0;
681 
682 	spin_lock_irqsave(&ha->hardware_lock, flags);
683 	ha->interrupts_on = 0;
684 	QLAFX00_DISABLE_ICNTRL_REG(ha);
685 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
686 }
687 
688 int
689 qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag)
690 {
691 	return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
692 }
693 
694 int
695 qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
696 {
697 	return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
698 }
699 
700 int
701 qlafx00_loop_reset(scsi_qla_host_t *vha)
702 {
703 	int ret;
704 	struct fc_port *fcport;
705 	struct qla_hw_data *ha = vha->hw;
706 
707 	if (ql2xtargetreset) {
708 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
709 			if (fcport->port_type != FCT_TARGET)
710 				continue;
711 
712 			ret = ha->isp_ops->target_reset(fcport, 0, 0);
713 			if (ret != QLA_SUCCESS) {
714 				ql_dbg(ql_dbg_taskm, vha, 0x803d,
715 				    "Bus Reset failed: Reset=%d "
716 				    "d_id=%x.\n", ret, fcport->d_id.b24);
717 			}
718 		}
719 	}
720 	return QLA_SUCCESS;
721 }
722 
723 int
724 qlafx00_iospace_config(struct qla_hw_data *ha)
725 {
726 	if (pci_request_selected_regions(ha->pdev, ha->bars,
727 	    QLA2XXX_DRIVER_NAME)) {
728 		ql_log_pci(ql_log_fatal, ha->pdev, 0x014e,
729 		    "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
730 		    pci_name(ha->pdev));
731 		goto iospace_error_exit;
732 	}
733 
734 	/* Use MMIO operations for all accesses. */
735 	if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
736 		ql_log_pci(ql_log_warn, ha->pdev, 0x014f,
737 		    "Invalid pci I/O region size (%s).\n",
738 		    pci_name(ha->pdev));
739 		goto iospace_error_exit;
740 	}
741 	if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) {
742 		ql_log_pci(ql_log_warn, ha->pdev, 0x0127,
743 		    "Invalid PCI mem BAR0 region size (%s), aborting\n",
744 			pci_name(ha->pdev));
745 		goto iospace_error_exit;
746 	}
747 
748 	ha->cregbase =
749 	    ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
750 	if (!ha->cregbase) {
751 		ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
752 		    "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
753 		goto iospace_error_exit;
754 	}
755 
756 	if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) {
757 		ql_log_pci(ql_log_warn, ha->pdev, 0x0129,
758 		    "region #2 not an MMIO resource (%s), aborting\n",
759 		    pci_name(ha->pdev));
760 		goto iospace_error_exit;
761 	}
762 	if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) {
763 		ql_log_pci(ql_log_warn, ha->pdev, 0x012a,
764 		    "Invalid PCI mem BAR2 region size (%s), aborting\n",
765 			pci_name(ha->pdev));
766 		goto iospace_error_exit;
767 	}
768 
769 	ha->iobase =
770 	    ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
771 	if (!ha->iobase) {
772 		ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
773 		    "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
774 		goto iospace_error_exit;
775 	}
776 
777 	/* Determine queue resources */
778 	ha->max_req_queues = ha->max_rsp_queues = 1;
779 
780 	ql_log_pci(ql_log_info, ha->pdev, 0x012c,
781 	    "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n",
782 	    ha->bars, ha->cregbase, ha->iobase);
783 
784 	return 0;
785 
786 iospace_error_exit:
787 	return -ENOMEM;
788 }
789 
790 static void
791 qlafx00_save_queue_ptrs(struct scsi_qla_host *vha)
792 {
793 	struct qla_hw_data *ha = vha->hw;
794 	struct req_que *req = ha->req_q_map[0];
795 	struct rsp_que *rsp = ha->rsp_q_map[0];
796 
797 	req->length_fx00 = req->length;
798 	req->ring_fx00 = req->ring;
799 	req->dma_fx00 = req->dma;
800 
801 	rsp->length_fx00 = rsp->length;
802 	rsp->ring_fx00 = rsp->ring;
803 	rsp->dma_fx00 = rsp->dma;
804 
805 	ql_dbg(ql_dbg_init, vha, 0x012d,
806 	    "req: %p, ring_fx00: %p, length_fx00: 0x%x,"
807 	    "req->dma_fx00: 0x%llx\n", req, req->ring_fx00,
808 	    req->length_fx00, (u64)req->dma_fx00);
809 
810 	ql_dbg(ql_dbg_init, vha, 0x012e,
811 	    "rsp: %p, ring_fx00: %p, length_fx00: 0x%x,"
812 	    "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00,
813 	    rsp->length_fx00, (u64)rsp->dma_fx00);
814 }
815 
816 static int
817 qlafx00_config_queues(struct scsi_qla_host *vha)
818 {
819 	struct qla_hw_data *ha = vha->hw;
820 	struct req_que *req = ha->req_q_map[0];
821 	struct rsp_que *rsp = ha->rsp_q_map[0];
822 	dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
823 
824 	req->length = ha->req_que_len;
825 	req->ring = (void *)ha->iobase + ha->req_que_off;
826 	req->dma = bar2_hdl + ha->req_que_off;
827 	if ((!req->ring) || (req->length == 0)) {
828 		ql_log_pci(ql_log_info, ha->pdev, 0x012f,
829 		    "Unable to allocate memory for req_ring\n");
830 		return QLA_FUNCTION_FAILED;
831 	}
832 
833 	ql_dbg(ql_dbg_init, vha, 0x0130,
834 	    "req: %p req_ring pointer %p req len 0x%x "
835 	    "req off 0x%x\n, req->dma: 0x%llx",
836 	    req, req->ring, req->length,
837 	    ha->req_que_off, (u64)req->dma);
838 
839 	rsp->length = ha->rsp_que_len;
840 	rsp->ring = (void *)ha->iobase + ha->rsp_que_off;
841 	rsp->dma = bar2_hdl + ha->rsp_que_off;
842 	if ((!rsp->ring) || (rsp->length == 0)) {
843 		ql_log_pci(ql_log_info, ha->pdev, 0x0131,
844 		    "Unable to allocate memory for rsp_ring\n");
845 		return QLA_FUNCTION_FAILED;
846 	}
847 
848 	ql_dbg(ql_dbg_init, vha, 0x0132,
849 	    "rsp: %p rsp_ring pointer %p rsp len 0x%x "
850 	    "rsp off 0x%x, rsp->dma: 0x%llx\n",
851 	    rsp, rsp->ring, rsp->length,
852 	    ha->rsp_que_off, (u64)rsp->dma);
853 
854 	return QLA_SUCCESS;
855 }
856 
857 static int
858 qlafx00_init_fw_ready(scsi_qla_host_t *vha)
859 {
860 	int rval = 0;
861 	unsigned long wtime;
862 	uint16_t wait_time;	/* Wait time */
863 	struct qla_hw_data *ha = vha->hw;
864 	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
865 	uint32_t aenmbx, aenmbx7 = 0;
866 	uint32_t pseudo_aen;
867 	uint32_t state[5];
868 	bool done = false;
869 
870 	/* 30 seconds wait - Adjust if required */
871 	wait_time = 30;
872 
873 	pseudo_aen = RD_REG_DWORD(&reg->pseudoaen);
874 	if (pseudo_aen == 1) {
875 		aenmbx7 = RD_REG_DWORD(&reg->initval7);
876 		ha->mbx_intr_code = MSW(aenmbx7);
877 		ha->rqstq_intr_code = LSW(aenmbx7);
878 		rval = qlafx00_driver_shutdown(vha, 10);
879 		if (rval != QLA_SUCCESS)
880 			qlafx00_soft_reset(vha);
881 	}
882 
883 	/* wait time before firmware ready */
884 	wtime = jiffies + (wait_time * HZ);
885 	do {
886 		aenmbx = RD_REG_DWORD(&reg->aenmailbox0);
887 		barrier();
888 		ql_dbg(ql_dbg_mbx, vha, 0x0133,
889 		    "aenmbx: 0x%x\n", aenmbx);
890 
891 		switch (aenmbx) {
892 		case MBA_FW_NOT_STARTED:
893 		case MBA_FW_STARTING:
894 			break;
895 
896 		case MBA_SYSTEM_ERR:
897 		case MBA_REQ_TRANSFER_ERR:
898 		case MBA_RSP_TRANSFER_ERR:
899 		case MBA_FW_INIT_FAILURE:
900 			qlafx00_soft_reset(vha);
901 			break;
902 
903 		case MBA_FW_RESTART_CMPLT:
904 			/* Set the mbx and rqstq intr code */
905 			aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
906 			ha->mbx_intr_code = MSW(aenmbx7);
907 			ha->rqstq_intr_code = LSW(aenmbx7);
908 			ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
909 			ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
910 			ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
911 			ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
912 			WRT_REG_DWORD(&reg->aenmailbox0, 0);
913 			RD_REG_DWORD_RELAXED(&reg->aenmailbox0);
914 			ql_dbg(ql_dbg_init, vha, 0x0134,
915 			    "f/w returned mbx_intr_code: 0x%x, "
916 			    "rqstq_intr_code: 0x%x\n",
917 			    ha->mbx_intr_code, ha->rqstq_intr_code);
918 			QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
919 			rval = QLA_SUCCESS;
920 			done = true;
921 			break;
922 
923 		default:
924 			if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS)
925 				break;
926 
927 			/* If fw is apparently not ready. In order to continue,
928 			 * we might need to issue Mbox cmd, but the problem is
929 			 * that the DoorBell vector values that come with the
930 			 * 8060 AEN are most likely gone by now (and thus no
931 			 * bell would be rung on the fw side when mbox cmd is
932 			 * issued). We have to therefore grab the 8060 AEN
933 			 * shadow regs (filled in by FW when the last 8060
934 			 * AEN was being posted).
935 			 * Do the following to determine what is needed in
936 			 * order to get the FW ready:
937 			 * 1. reload the 8060 AEN values from the shadow regs
938 			 * 2. clear int status to get rid of possible pending
939 			 *    interrupts
940 			 * 3. issue Get FW State Mbox cmd to determine fw state
941 			 * Set the mbx and rqstq intr code from Shadow Regs
942 			 */
943 			aenmbx7 = RD_REG_DWORD(&reg->initval7);
944 			ha->mbx_intr_code = MSW(aenmbx7);
945 			ha->rqstq_intr_code = LSW(aenmbx7);
946 			ha->req_que_off = RD_REG_DWORD(&reg->initval1);
947 			ha->rsp_que_off = RD_REG_DWORD(&reg->initval3);
948 			ha->req_que_len = RD_REG_DWORD(&reg->initval5);
949 			ha->rsp_que_len = RD_REG_DWORD(&reg->initval6);
950 			ql_dbg(ql_dbg_init, vha, 0x0135,
951 			    "f/w returned mbx_intr_code: 0x%x, "
952 			    "rqstq_intr_code: 0x%x\n",
953 			    ha->mbx_intr_code, ha->rqstq_intr_code);
954 			QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
955 
956 			/* Get the FW state */
957 			rval = qlafx00_get_firmware_state(vha, state);
958 			if (rval != QLA_SUCCESS) {
959 				/* Retry if timer has not expired */
960 				break;
961 			}
962 
963 			if (state[0] == FSTATE_FX00_CONFIG_WAIT) {
964 				/* Firmware is waiting to be
965 				 * initialized by driver
966 				 */
967 				rval = QLA_SUCCESS;
968 				done = true;
969 				break;
970 			}
971 
972 			/* Issue driver shutdown and wait until f/w recovers.
973 			 * Driver should continue to poll until 8060 AEN is
974 			 * received indicating firmware recovery.
975 			 */
976 			ql_dbg(ql_dbg_init, vha, 0x0136,
977 			    "Sending Driver shutdown fw_state 0x%x\n",
978 			    state[0]);
979 
980 			rval = qlafx00_driver_shutdown(vha, 10);
981 			if (rval != QLA_SUCCESS) {
982 				rval = QLA_FUNCTION_FAILED;
983 				break;
984 			}
985 			msleep(500);
986 
987 			wtime = jiffies + (wait_time * HZ);
988 			break;
989 		}
990 
991 		if (!done) {
992 			if (time_after_eq(jiffies, wtime)) {
993 				ql_dbg(ql_dbg_init, vha, 0x0137,
994 				    "Init f/w failed: aen[7]: 0x%x\n",
995 				    RD_REG_DWORD(&reg->aenmailbox7));
996 				rval = QLA_FUNCTION_FAILED;
997 				done = true;
998 				break;
999 			}
1000 			/* Delay for a while */
1001 			msleep(500);
1002 		}
1003 	} while (!done);
1004 
1005 	if (rval)
1006 		ql_dbg(ql_dbg_init, vha, 0x0138,
1007 		    "%s **** FAILED ****.\n", __func__);
1008 	else
1009 		ql_dbg(ql_dbg_init, vha, 0x0139,
1010 		    "%s **** SUCCESS ****.\n", __func__);
1011 
1012 	return rval;
1013 }
1014 
1015 /*
1016  * qlafx00_fw_ready() - Waits for firmware ready.
1017  * @ha: HA context
1018  *
1019  * Returns 0 on success.
1020  */
1021 int
1022 qlafx00_fw_ready(scsi_qla_host_t *vha)
1023 {
1024 	int		rval;
1025 	unsigned long	wtime;
1026 	uint16_t	wait_time;	/* Wait time if loop is coming ready */
1027 	uint32_t	state[5];
1028 
1029 	rval = QLA_SUCCESS;
1030 
1031 	wait_time = 10;
1032 
1033 	/* wait time before firmware ready */
1034 	wtime = jiffies + (wait_time * HZ);
1035 
1036 	/* Wait for ISP to finish init */
1037 	if (!vha->flags.init_done)
1038 		ql_dbg(ql_dbg_init, vha, 0x013a,
1039 		    "Waiting for init to complete...\n");
1040 
1041 	do {
1042 		rval = qlafx00_get_firmware_state(vha, state);
1043 
1044 		if (rval == QLA_SUCCESS) {
1045 			if (state[0] == FSTATE_FX00_INITIALIZED) {
1046 				ql_dbg(ql_dbg_init, vha, 0x013b,
1047 				    "fw_state=%x\n", state[0]);
1048 				rval = QLA_SUCCESS;
1049 					break;
1050 			}
1051 		}
1052 		rval = QLA_FUNCTION_FAILED;
1053 
1054 		if (time_after_eq(jiffies, wtime))
1055 			break;
1056 
1057 		/* Delay for a while */
1058 		msleep(500);
1059 
1060 		ql_dbg(ql_dbg_init, vha, 0x013c,
1061 		    "fw_state=%x curr time=%lx.\n", state[0], jiffies);
1062 	} while (1);
1063 
1064 
1065 	if (rval)
1066 		ql_dbg(ql_dbg_init, vha, 0x013d,
1067 		    "Firmware ready **** FAILED ****.\n");
1068 	else
1069 		ql_dbg(ql_dbg_init, vha, 0x013e,
1070 		    "Firmware ready **** SUCCESS ****.\n");
1071 
1072 	return rval;
1073 }
1074 
1075 static int
1076 qlafx00_find_all_targets(scsi_qla_host_t *vha,
1077 	struct list_head *new_fcports)
1078 {
1079 	int		rval;
1080 	uint16_t	tgt_id;
1081 	fc_port_t	*fcport, *new_fcport;
1082 	int		found;
1083 	struct qla_hw_data *ha = vha->hw;
1084 
1085 	rval = QLA_SUCCESS;
1086 
1087 	if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
1088 		return QLA_FUNCTION_FAILED;
1089 
1090 	if ((atomic_read(&vha->loop_down_timer) ||
1091 	     STATE_TRANSITION(vha))) {
1092 		atomic_set(&vha->loop_down_timer, 0);
1093 		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1094 		return QLA_FUNCTION_FAILED;
1095 	}
1096 
1097 	ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
1098 	    "Listing Target bit map...\n");
1099 	ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha,
1100 	    0x2089, (uint8_t *)ha->gid_list, 32);
1101 
1102 	/* Allocate temporary rmtport for any new rmtports discovered. */
1103 	new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1104 	if (new_fcport == NULL)
1105 		return QLA_MEMORY_ALLOC_FAILED;
1106 
1107 	for_each_set_bit(tgt_id, (void *)ha->gid_list,
1108 	    QLAFX00_TGT_NODE_LIST_SIZE) {
1109 
1110 		/* Send get target node info */
1111 		new_fcport->tgt_id = tgt_id;
1112 		rval = qlafx00_fx_disc(vha, new_fcport,
1113 		    FXDISC_GET_TGT_NODE_INFO);
1114 		if (rval != QLA_SUCCESS) {
1115 			ql_log(ql_log_warn, vha, 0x208a,
1116 			    "Target info scan failed -- assuming zero-entry "
1117 			    "result...\n");
1118 			continue;
1119 		}
1120 
1121 		/* Locate matching device in database. */
1122 		found = 0;
1123 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
1124 			if (memcmp(new_fcport->port_name,
1125 			    fcport->port_name, WWN_SIZE))
1126 				continue;
1127 
1128 			found++;
1129 
1130 			/*
1131 			 * If tgt_id is same and state FCS_ONLINE, nothing
1132 			 * changed.
1133 			 */
1134 			if (fcport->tgt_id == new_fcport->tgt_id &&
1135 			    atomic_read(&fcport->state) == FCS_ONLINE)
1136 				break;
1137 
1138 			/*
1139 			 * Tgt ID changed or device was marked to be updated.
1140 			 */
1141 			ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b,
1142 			    "TGT-ID Change(%s): Present tgt id: "
1143 			    "0x%x state: 0x%x "
1144 			    "wwnn = %llx wwpn = %llx.\n",
1145 			    __func__, fcport->tgt_id,
1146 			    atomic_read(&fcport->state),
1147 			    (unsigned long long)wwn_to_u64(fcport->node_name),
1148 			    (unsigned long long)wwn_to_u64(fcport->port_name));
1149 
1150 			ql_log(ql_log_info, vha, 0x208c,
1151 			    "TGT-ID Announce(%s): Discovered tgt "
1152 			    "id 0x%x wwnn = %llx "
1153 			    "wwpn = %llx.\n", __func__, new_fcport->tgt_id,
1154 			    (unsigned long long)
1155 			    wwn_to_u64(new_fcport->node_name),
1156 			    (unsigned long long)
1157 			    wwn_to_u64(new_fcport->port_name));
1158 
1159 			if (atomic_read(&fcport->state) != FCS_ONLINE) {
1160 				fcport->old_tgt_id = fcport->tgt_id;
1161 				fcport->tgt_id = new_fcport->tgt_id;
1162 				ql_log(ql_log_info, vha, 0x208d,
1163 				   "TGT-ID: New fcport Added: %p\n", fcport);
1164 				qla2x00_update_fcport(vha, fcport);
1165 			} else {
1166 				ql_log(ql_log_info, vha, 0x208e,
1167 				    " Existing TGT-ID %x did not get "
1168 				    " offline event from firmware.\n",
1169 				    fcport->old_tgt_id);
1170 				qla2x00_mark_device_lost(vha, fcport, 0, 0);
1171 				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1172 				kfree(new_fcport);
1173 				return rval;
1174 			}
1175 			break;
1176 		}
1177 
1178 		if (found)
1179 			continue;
1180 
1181 		/* If device was not in our fcports list, then add it. */
1182 		list_add_tail(&new_fcport->list, new_fcports);
1183 
1184 		/* Allocate a new replacement fcport. */
1185 		new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1186 		if (new_fcport == NULL)
1187 			return QLA_MEMORY_ALLOC_FAILED;
1188 	}
1189 
1190 	kfree(new_fcport);
1191 	return rval;
1192 }
1193 
1194 /*
1195  * qlafx00_configure_all_targets
1196  *      Setup target devices with node ID's.
1197  *
1198  * Input:
1199  *      ha = adapter block pointer.
1200  *
1201  * Returns:
1202  *      0 = success.
1203  *      BIT_0 = error
1204  */
1205 static int
1206 qlafx00_configure_all_targets(scsi_qla_host_t *vha)
1207 {
1208 	int rval;
1209 	fc_port_t *fcport, *rmptemp;
1210 	LIST_HEAD(new_fcports);
1211 
1212 	rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1213 	    FXDISC_GET_TGT_NODE_LIST);
1214 	if (rval != QLA_SUCCESS) {
1215 		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1216 		return rval;
1217 	}
1218 
1219 	rval = qlafx00_find_all_targets(vha, &new_fcports);
1220 	if (rval != QLA_SUCCESS) {
1221 		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1222 		return rval;
1223 	}
1224 
1225 	/*
1226 	 * Delete all previous devices marked lost.
1227 	 */
1228 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1229 		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1230 			break;
1231 
1232 		if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
1233 			if (fcport->port_type != FCT_INITIATOR)
1234 				qla2x00_mark_device_lost(vha, fcport, 0, 0);
1235 		}
1236 	}
1237 
1238 	/*
1239 	 * Add the new devices to our devices list.
1240 	 */
1241 	list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
1242 		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1243 			break;
1244 
1245 		qla2x00_update_fcport(vha, fcport);
1246 		list_move_tail(&fcport->list, &vha->vp_fcports);
1247 		ql_log(ql_log_info, vha, 0x208f,
1248 		    "Attach new target id 0x%x wwnn = %llx "
1249 		    "wwpn = %llx.\n",
1250 		    fcport->tgt_id,
1251 		    (unsigned long long)wwn_to_u64(fcport->node_name),
1252 		    (unsigned long long)wwn_to_u64(fcport->port_name));
1253 	}
1254 
1255 	/* Free all new device structures not processed. */
1256 	list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
1257 		list_del(&fcport->list);
1258 		kfree(fcport);
1259 	}
1260 
1261 	return rval;
1262 }
1263 
1264 /*
1265  * qlafx00_configure_devices
1266  *      Updates Fibre Channel Device Database with what is actually on loop.
1267  *
1268  * Input:
1269  *      ha                = adapter block pointer.
1270  *
1271  * Returns:
1272  *      0 = success.
1273  *      1 = error.
1274  *      2 = database was full and device was not configured.
1275  */
1276 int
1277 qlafx00_configure_devices(scsi_qla_host_t *vha)
1278 {
1279 	int  rval;
1280 	unsigned long flags, save_flags;
1281 	rval = QLA_SUCCESS;
1282 
1283 	save_flags = flags = vha->dpc_flags;
1284 
1285 	ql_dbg(ql_dbg_disc, vha, 0x2090,
1286 	    "Configure devices -- dpc flags =0x%lx\n", flags);
1287 
1288 	rval = qlafx00_configure_all_targets(vha);
1289 
1290 	if (rval == QLA_SUCCESS) {
1291 		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1292 			rval = QLA_FUNCTION_FAILED;
1293 		} else {
1294 			atomic_set(&vha->loop_state, LOOP_READY);
1295 			ql_log(ql_log_info, vha, 0x2091,
1296 			    "Device Ready\n");
1297 		}
1298 	}
1299 
1300 	if (rval) {
1301 		ql_dbg(ql_dbg_disc, vha, 0x2092,
1302 		    "%s *** FAILED ***.\n", __func__);
1303 	} else {
1304 		ql_dbg(ql_dbg_disc, vha, 0x2093,
1305 		    "%s: exiting normally.\n", __func__);
1306 	}
1307 	return rval;
1308 }
1309 
1310 static void
1311 qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp)
1312 {
1313 	struct qla_hw_data *ha = vha->hw;
1314 	fc_port_t *fcport;
1315 
1316 	vha->flags.online = 0;
1317 	ha->mr.fw_hbt_en = 0;
1318 
1319 	if (!critemp) {
1320 		ha->flags.chip_reset_done = 0;
1321 		clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1322 		vha->qla_stats.total_isp_aborts++;
1323 		ql_log(ql_log_info, vha, 0x013f,
1324 		    "Performing ISP error recovery - ha = %p.\n", ha);
1325 		ha->isp_ops->reset_chip(vha);
1326 	}
1327 
1328 	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1329 		atomic_set(&vha->loop_state, LOOP_DOWN);
1330 		atomic_set(&vha->loop_down_timer,
1331 		    QLAFX00_LOOP_DOWN_TIME);
1332 	} else {
1333 		if (!atomic_read(&vha->loop_down_timer))
1334 			atomic_set(&vha->loop_down_timer,
1335 			    QLAFX00_LOOP_DOWN_TIME);
1336 	}
1337 
1338 	/* Clear all async request states across all VPs. */
1339 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1340 		fcport->flags = 0;
1341 		if (atomic_read(&fcport->state) == FCS_ONLINE)
1342 			qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
1343 	}
1344 
1345 	if (!ha->flags.eeh_busy) {
1346 		if (critemp) {
1347 			qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
1348 		} else {
1349 			/* Requeue all commands in outstanding command list. */
1350 			qla2x00_abort_all_cmds(vha, DID_RESET << 16);
1351 		}
1352 	}
1353 
1354 	qla2x00_free_irqs(vha);
1355 	if (critemp)
1356 		set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags);
1357 	else
1358 		set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1359 
1360 	/* Clear the Interrupts */
1361 	QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1362 
1363 	ql_log(ql_log_info, vha, 0x0140,
1364 	    "%s Done done - ha=%p.\n", __func__, ha);
1365 }
1366 
1367 /**
1368  * qlafx00_init_response_q_entries() - Initializes response queue entries.
1369  * @ha: HA context
1370  *
1371  * Beginning of request ring has initialization control block already built
1372  * by nvram config routine.
1373  *
1374  * Returns 0 on success.
1375  */
1376 void
1377 qlafx00_init_response_q_entries(struct rsp_que *rsp)
1378 {
1379 	uint16_t cnt;
1380 	response_t *pkt;
1381 
1382 	rsp->ring_ptr = rsp->ring;
1383 	rsp->ring_index    = 0;
1384 	rsp->status_srb = NULL;
1385 	pkt = rsp->ring_ptr;
1386 	for (cnt = 0; cnt < rsp->length; cnt++) {
1387 		pkt->signature = RESPONSE_PROCESSED;
1388 		WRT_REG_DWORD((void __iomem *)&pkt->signature,
1389 		    RESPONSE_PROCESSED);
1390 		pkt++;
1391 	}
1392 }
1393 
1394 int
1395 qlafx00_rescan_isp(scsi_qla_host_t *vha)
1396 {
1397 	uint32_t status = QLA_FUNCTION_FAILED;
1398 	struct qla_hw_data *ha = vha->hw;
1399 	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
1400 	uint32_t aenmbx7;
1401 
1402 	qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
1403 
1404 	aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
1405 	ha->mbx_intr_code = MSW(aenmbx7);
1406 	ha->rqstq_intr_code = LSW(aenmbx7);
1407 	ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
1408 	ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
1409 	ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
1410 	ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
1411 
1412 	ql_dbg(ql_dbg_disc, vha, 0x2094,
1413 	    "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
1414 	    " Req que offset 0x%x Rsp que offset 0x%x\n",
1415 	    ha->mbx_intr_code, ha->rqstq_intr_code,
1416 	    ha->req_que_off, ha->rsp_que_len);
1417 
1418 	/* Clear the Interrupts */
1419 	QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1420 
1421 	status = qla2x00_init_rings(vha);
1422 	if (!status) {
1423 		vha->flags.online = 1;
1424 
1425 		/* if no cable then assume it's good */
1426 		if ((vha->device_flags & DFLG_NO_CABLE))
1427 			status = 0;
1428 		/* Register system information */
1429 		if (qlafx00_fx_disc(vha,
1430 		    &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO))
1431 			ql_dbg(ql_dbg_disc, vha, 0x2095,
1432 			    "failed to register host info\n");
1433 	}
1434 	scsi_unblock_requests(vha->host);
1435 	return status;
1436 }
1437 
1438 void
1439 qlafx00_timer_routine(scsi_qla_host_t *vha)
1440 {
1441 	struct qla_hw_data *ha = vha->hw;
1442 	uint32_t fw_heart_beat;
1443 	uint32_t aenmbx0;
1444 	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
1445 	uint32_t tempc;
1446 
1447 	/* Check firmware health */
1448 	if (ha->mr.fw_hbt_cnt)
1449 		ha->mr.fw_hbt_cnt--;
1450 	else {
1451 		if ((!ha->flags.mr_reset_hdlr_active) &&
1452 		    (!test_bit(UNLOADING, &vha->dpc_flags)) &&
1453 		    (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
1454 		    (ha->mr.fw_hbt_en)) {
1455 			fw_heart_beat = RD_REG_DWORD(&reg->fwheartbeat);
1456 			if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
1457 				ha->mr.old_fw_hbt_cnt = fw_heart_beat;
1458 				ha->mr.fw_hbt_miss_cnt = 0;
1459 			} else {
1460 				ha->mr.fw_hbt_miss_cnt++;
1461 				if (ha->mr.fw_hbt_miss_cnt ==
1462 				    QLAFX00_HEARTBEAT_MISS_CNT) {
1463 					set_bit(ISP_ABORT_NEEDED,
1464 					    &vha->dpc_flags);
1465 					qla2xxx_wake_dpc(vha);
1466 					ha->mr.fw_hbt_miss_cnt = 0;
1467 				}
1468 			}
1469 		}
1470 		ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
1471 	}
1472 
1473 	if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
1474 		/* Reset recovery to be performed in timer routine */
1475 		aenmbx0 = RD_REG_DWORD(&reg->aenmailbox0);
1476 		if (ha->mr.fw_reset_timer_exp) {
1477 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1478 			qla2xxx_wake_dpc(vha);
1479 			ha->mr.fw_reset_timer_exp = 0;
1480 		} else if (aenmbx0 == MBA_FW_RESTART_CMPLT) {
1481 			/* Wake up DPC to rescan the targets */
1482 			set_bit(FX00_TARGET_SCAN, &vha->dpc_flags);
1483 			clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1484 			qla2xxx_wake_dpc(vha);
1485 			ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1486 		} else if ((aenmbx0 == MBA_FW_STARTING) &&
1487 		    (!ha->mr.fw_hbt_en)) {
1488 			ha->mr.fw_hbt_en = 1;
1489 		} else if (!ha->mr.fw_reset_timer_tick) {
1490 			if (aenmbx0 == ha->mr.old_aenmbx0_state)
1491 				ha->mr.fw_reset_timer_exp = 1;
1492 			ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1493 		} else if (aenmbx0 == 0xFFFFFFFF) {
1494 			uint32_t data0, data1;
1495 
1496 			data0 = QLAFX00_RD_REG(ha,
1497 			    QLAFX00_BAR1_BASE_ADDR_REG);
1498 			data1 = QLAFX00_RD_REG(ha,
1499 			    QLAFX00_PEX0_WIN0_BASE_ADDR_REG);
1500 
1501 			data0 &= 0xffff0000;
1502 			data1 &= 0x0000ffff;
1503 
1504 			QLAFX00_WR_REG(ha,
1505 			    QLAFX00_PEX0_WIN0_BASE_ADDR_REG,
1506 			    (data0 | data1));
1507 		} else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
1508 			ha->mr.fw_reset_timer_tick =
1509 			    QLAFX00_MAX_RESET_INTERVAL;
1510 		} else if (aenmbx0 == MBA_FW_RESET_FCT) {
1511 			ha->mr.fw_reset_timer_tick =
1512 			    QLAFX00_MAX_RESET_INTERVAL;
1513 		}
1514 		ha->mr.old_aenmbx0_state = aenmbx0;
1515 		ha->mr.fw_reset_timer_tick--;
1516 	}
1517 	if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
1518 		/*
1519 		 * Critical temperature recovery to be
1520 		 * performed in timer routine
1521 		 */
1522 		if (ha->mr.fw_critemp_timer_tick == 0) {
1523 			tempc = QLAFX00_GET_TEMPERATURE(ha);
1524 			ql_dbg(ql_dbg_timer, vha, 0x6012,
1525 			    "ISPFx00(%s): Critical temp timer, "
1526 			    "current SOC temperature: %d\n",
1527 			    __func__, tempc);
1528 			if (tempc < ha->mr.critical_temperature) {
1529 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1530 				clear_bit(FX00_CRITEMP_RECOVERY,
1531 				    &vha->dpc_flags);
1532 				qla2xxx_wake_dpc(vha);
1533 			}
1534 			ha->mr.fw_critemp_timer_tick =
1535 			    QLAFX00_CRITEMP_INTERVAL;
1536 		} else {
1537 			ha->mr.fw_critemp_timer_tick--;
1538 		}
1539 	}
1540 	if (ha->mr.host_info_resend) {
1541 		/*
1542 		 * Incomplete host info might be sent to firmware
1543 		 * durinng system boot - info should be resend
1544 		 */
1545 		if (ha->mr.hinfo_resend_timer_tick == 0) {
1546 			ha->mr.host_info_resend = false;
1547 			set_bit(FX00_HOST_INFO_RESEND, &vha->dpc_flags);
1548 			ha->mr.hinfo_resend_timer_tick =
1549 			    QLAFX00_HINFO_RESEND_INTERVAL;
1550 			qla2xxx_wake_dpc(vha);
1551 		} else {
1552 			ha->mr.hinfo_resend_timer_tick--;
1553 		}
1554 	}
1555 
1556 }
1557 
1558 /*
1559  *  qlfx00a_reset_initialize
1560  *      Re-initialize after a iSA device reset.
1561  *
1562  * Input:
1563  *      ha  = adapter block pointer.
1564  *
1565  * Returns:
1566  *      0 = success
1567  */
1568 int
1569 qlafx00_reset_initialize(scsi_qla_host_t *vha)
1570 {
1571 	struct qla_hw_data *ha = vha->hw;
1572 
1573 	if (vha->device_flags & DFLG_DEV_FAILED) {
1574 		ql_dbg(ql_dbg_init, vha, 0x0142,
1575 		    "Device in failed state\n");
1576 		return QLA_SUCCESS;
1577 	}
1578 
1579 	ha->flags.mr_reset_hdlr_active = 1;
1580 
1581 	if (vha->flags.online) {
1582 		scsi_block_requests(vha->host);
1583 		qlafx00_abort_isp_cleanup(vha, false);
1584 	}
1585 
1586 	ql_log(ql_log_info, vha, 0x0143,
1587 	    "(%s): succeeded.\n", __func__);
1588 	ha->flags.mr_reset_hdlr_active = 0;
1589 	return QLA_SUCCESS;
1590 }
1591 
1592 /*
1593  *  qlafx00_abort_isp
1594  *      Resets ISP and aborts all outstanding commands.
1595  *
1596  * Input:
1597  *      ha  = adapter block pointer.
1598  *
1599  * Returns:
1600  *      0 = success
1601  */
1602 int
1603 qlafx00_abort_isp(scsi_qla_host_t *vha)
1604 {
1605 	struct qla_hw_data *ha = vha->hw;
1606 
1607 	if (vha->flags.online) {
1608 		if (unlikely(pci_channel_offline(ha->pdev) &&
1609 		    ha->flags.pci_channel_io_perm_failure)) {
1610 			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
1611 			return QLA_SUCCESS;
1612 		}
1613 
1614 		scsi_block_requests(vha->host);
1615 		qlafx00_abort_isp_cleanup(vha, false);
1616 	} else {
1617 		scsi_block_requests(vha->host);
1618 		clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1619 		vha->qla_stats.total_isp_aborts++;
1620 		ha->isp_ops->reset_chip(vha);
1621 		set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1622 		/* Clear the Interrupts */
1623 		QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1624 	}
1625 
1626 	ql_log(ql_log_info, vha, 0x0145,
1627 	    "(%s): succeeded.\n", __func__);
1628 
1629 	return QLA_SUCCESS;
1630 }
1631 
1632 static inline fc_port_t*
1633 qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id)
1634 {
1635 	fc_port_t	*fcport;
1636 
1637 	/* Check for matching device in remote port list. */
1638 	fcport = NULL;
1639 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1640 		if (fcport->tgt_id == tgt_id) {
1641 			ql_dbg(ql_dbg_async, vha, 0x5072,
1642 			    "Matching fcport(%p) found with TGT-ID: 0x%x "
1643 			    "and Remote TGT_ID: 0x%x\n",
1644 			    fcport, fcport->tgt_id, tgt_id);
1645 			break;
1646 		}
1647 	}
1648 	return fcport;
1649 }
1650 
1651 static void
1652 qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
1653 {
1654 	fc_port_t	*fcport;
1655 
1656 	ql_log(ql_log_info, vha, 0x5073,
1657 	    "Detach TGT-ID: 0x%x\n", tgt_id);
1658 
1659 	fcport = qlafx00_get_fcport(vha, tgt_id);
1660 	if (!fcport)
1661 		return;
1662 
1663 	qla2x00_mark_device_lost(vha, fcport, 0, 0);
1664 
1665 	return;
1666 }
1667 
1668 int
1669 qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
1670 {
1671 	int rval = 0;
1672 	uint32_t aen_code, aen_data;
1673 
1674 	aen_code = FCH_EVT_VENDOR_UNIQUE;
1675 	aen_data = evt->u.aenfx.evtcode;
1676 
1677 	switch (evt->u.aenfx.evtcode) {
1678 	case QLAFX00_MBA_PORT_UPDATE:		/* Port database update */
1679 		if (evt->u.aenfx.mbx[1] == 0) {
1680 			if (evt->u.aenfx.mbx[2] == 1) {
1681 				if (!vha->flags.fw_tgt_reported)
1682 					vha->flags.fw_tgt_reported = 1;
1683 				atomic_set(&vha->loop_down_timer, 0);
1684 				atomic_set(&vha->loop_state, LOOP_UP);
1685 				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1686 				qla2xxx_wake_dpc(vha);
1687 			} else if (evt->u.aenfx.mbx[2] == 2) {
1688 				qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]);
1689 			}
1690 		} else if (evt->u.aenfx.mbx[1] == 0xffff) {
1691 			if (evt->u.aenfx.mbx[2] == 1) {
1692 				if (!vha->flags.fw_tgt_reported)
1693 					vha->flags.fw_tgt_reported = 1;
1694 				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1695 			} else if (evt->u.aenfx.mbx[2] == 2) {
1696 				vha->device_flags |= DFLG_NO_CABLE;
1697 				qla2x00_mark_all_devices_lost(vha, 1);
1698 			}
1699 		}
1700 		break;
1701 	case QLAFX00_MBA_LINK_UP:
1702 		aen_code = FCH_EVT_LINKUP;
1703 		aen_data = 0;
1704 		break;
1705 	case QLAFX00_MBA_LINK_DOWN:
1706 		aen_code = FCH_EVT_LINKDOWN;
1707 		aen_data = 0;
1708 		break;
1709 	case QLAFX00_MBA_TEMP_CRIT:	/* Critical temperature event */
1710 		ql_log(ql_log_info, vha, 0x5082,
1711 		    "Process critical temperature event "
1712 		    "aenmb[0]: %x\n",
1713 		    evt->u.aenfx.evtcode);
1714 		scsi_block_requests(vha->host);
1715 		qlafx00_abort_isp_cleanup(vha, true);
1716 		scsi_unblock_requests(vha->host);
1717 		break;
1718 	}
1719 
1720 	fc_host_post_event(vha->host, fc_get_event_number(),
1721 	    aen_code, aen_data);
1722 
1723 	return rval;
1724 }
1725 
1726 static void
1727 qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo)
1728 {
1729 	u64 port_name = 0, node_name = 0;
1730 
1731 	port_name = (unsigned long long)wwn_to_u64(pinfo->port_name);
1732 	node_name = (unsigned long long)wwn_to_u64(pinfo->node_name);
1733 
1734 	fc_host_node_name(vha->host) = node_name;
1735 	fc_host_port_name(vha->host) = port_name;
1736 	if (!pinfo->port_type)
1737 		vha->hw->current_topology = ISP_CFG_F;
1738 	if (pinfo->link_status == QLAFX00_LINK_STATUS_UP)
1739 		atomic_set(&vha->loop_state, LOOP_READY);
1740 	else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN)
1741 		atomic_set(&vha->loop_state, LOOP_DOWN);
1742 	vha->hw->link_data_rate = (uint16_t)pinfo->link_config;
1743 }
1744 
1745 static void
1746 qla2x00_fxdisc_iocb_timeout(void *data)
1747 {
1748 	srb_t *sp = (srb_t *)data;
1749 	struct srb_iocb *lio = &sp->u.iocb_cmd;
1750 
1751 	complete(&lio->u.fxiocb.fxiocb_comp);
1752 }
1753 
1754 static void
1755 qla2x00_fxdisc_sp_done(void *data, void *ptr, int res)
1756 {
1757 	srb_t *sp = (srb_t *)ptr;
1758 	struct srb_iocb *lio = &sp->u.iocb_cmd;
1759 
1760 	complete(&lio->u.fxiocb.fxiocb_comp);
1761 }
1762 
1763 int
1764 qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
1765 {
1766 	srb_t *sp;
1767 	struct srb_iocb *fdisc;
1768 	int rval = QLA_FUNCTION_FAILED;
1769 	struct qla_hw_data *ha = vha->hw;
1770 	struct host_system_info *phost_info;
1771 	struct register_host_info *preg_hsi;
1772 	struct new_utsname *p_sysid = NULL;
1773 	struct timeval tv;
1774 
1775 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1776 	if (!sp)
1777 		goto done;
1778 
1779 	fdisc = &sp->u.iocb_cmd;
1780 	switch (fx_type) {
1781 	case FXDISC_GET_CONFIG_INFO:
1782 	fdisc->u.fxiocb.flags =
1783 		    SRB_FXDISC_RESP_DMA_VALID;
1784 		fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data);
1785 		break;
1786 	case FXDISC_GET_PORT_INFO:
1787 		fdisc->u.fxiocb.flags =
1788 		    SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1789 		fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO;
1790 		fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->port_id);
1791 		break;
1792 	case FXDISC_GET_TGT_NODE_INFO:
1793 		fdisc->u.fxiocb.flags =
1794 		    SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1795 		fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO;
1796 		fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->tgt_id);
1797 		break;
1798 	case FXDISC_GET_TGT_NODE_LIST:
1799 		fdisc->u.fxiocb.flags =
1800 		    SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1801 		fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE;
1802 		break;
1803 	case FXDISC_REG_HOST_INFO:
1804 		fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID;
1805 		fdisc->u.fxiocb.req_len = sizeof(struct register_host_info);
1806 		p_sysid = utsname();
1807 		if (!p_sysid) {
1808 			ql_log(ql_log_warn, vha, 0x303c,
1809 			    "Not able to get the system information\n");
1810 			goto done_free_sp;
1811 		}
1812 		break;
1813 	case FXDISC_ABORT_IOCTL:
1814 	default:
1815 		break;
1816 	}
1817 
1818 	if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
1819 		fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev,
1820 		    fdisc->u.fxiocb.req_len,
1821 		    &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL);
1822 		if (!fdisc->u.fxiocb.req_addr)
1823 			goto done_free_sp;
1824 
1825 		if (fx_type == FXDISC_REG_HOST_INFO) {
1826 			preg_hsi = (struct register_host_info *)
1827 				fdisc->u.fxiocb.req_addr;
1828 			phost_info = &preg_hsi->hsi;
1829 			memset(preg_hsi, 0, sizeof(struct register_host_info));
1830 			phost_info->os_type = OS_TYPE_LINUX;
1831 			strncpy(phost_info->sysname,
1832 			    p_sysid->sysname, SYSNAME_LENGTH);
1833 			strncpy(phost_info->nodename,
1834 			    p_sysid->nodename, NODENAME_LENGTH);
1835 			if (!strcmp(phost_info->nodename, "(none)"))
1836 				ha->mr.host_info_resend = true;
1837 			strncpy(phost_info->release,
1838 			    p_sysid->release, RELEASE_LENGTH);
1839 			strncpy(phost_info->version,
1840 			    p_sysid->version, VERSION_LENGTH);
1841 			strncpy(phost_info->machine,
1842 			    p_sysid->machine, MACHINE_LENGTH);
1843 			strncpy(phost_info->domainname,
1844 			    p_sysid->domainname, DOMNAME_LENGTH);
1845 			strncpy(phost_info->hostdriver,
1846 			    QLA2XXX_VERSION, VERSION_LENGTH);
1847 			do_gettimeofday(&tv);
1848 			preg_hsi->utc = (uint64_t)tv.tv_sec;
1849 			ql_dbg(ql_dbg_init, vha, 0x0149,
1850 			    "ISP%04X: Host registration with firmware\n",
1851 			    ha->pdev->device);
1852 			ql_dbg(ql_dbg_init, vha, 0x014a,
1853 			    "os_type = '%d', sysname = '%s', nodname = '%s'\n",
1854 			    phost_info->os_type,
1855 			    phost_info->sysname,
1856 			    phost_info->nodename);
1857 			ql_dbg(ql_dbg_init, vha, 0x014b,
1858 			    "release = '%s', version = '%s'\n",
1859 			    phost_info->release,
1860 			    phost_info->version);
1861 			ql_dbg(ql_dbg_init, vha, 0x014c,
1862 			    "machine = '%s' "
1863 			    "domainname = '%s', hostdriver = '%s'\n",
1864 			    phost_info->machine,
1865 			    phost_info->domainname,
1866 			    phost_info->hostdriver);
1867 			ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
1868 			    (uint8_t *)phost_info,
1869 			    sizeof(struct host_system_info));
1870 		}
1871 	}
1872 
1873 	if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
1874 		fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev,
1875 		    fdisc->u.fxiocb.rsp_len,
1876 		    &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL);
1877 		if (!fdisc->u.fxiocb.rsp_addr)
1878 			goto done_unmap_req;
1879 	}
1880 
1881 	sp->type = SRB_FXIOCB_DCMD;
1882 	sp->name = "fxdisc";
1883 	qla2x00_init_timer(sp, FXDISC_TIMEOUT);
1884 	fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
1885 	fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
1886 	sp->done = qla2x00_fxdisc_sp_done;
1887 
1888 	rval = qla2x00_start_sp(sp);
1889 	if (rval != QLA_SUCCESS)
1890 		goto done_unmap_dma;
1891 
1892 	wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp);
1893 
1894 	if (fx_type == FXDISC_GET_CONFIG_INFO) {
1895 		struct config_info_data *pinfo =
1896 		    (struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
1897 		strcpy(vha->hw->model_number, pinfo->model_num);
1898 		strcpy(vha->hw->model_desc, pinfo->model_description);
1899 		memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
1900 		    sizeof(vha->hw->mr.symbolic_name));
1901 		memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
1902 		    sizeof(vha->hw->mr.serial_num));
1903 		memcpy(&vha->hw->mr.hw_version, pinfo->hw_version,
1904 		    sizeof(vha->hw->mr.hw_version));
1905 		memcpy(&vha->hw->mr.fw_version, pinfo->fw_version,
1906 		    sizeof(vha->hw->mr.fw_version));
1907 		strim(vha->hw->mr.fw_version);
1908 		memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version,
1909 		    sizeof(vha->hw->mr.uboot_version));
1910 		memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
1911 		    sizeof(vha->hw->mr.fru_serial_num));
1912 		vha->hw->mr.critical_temperature =
1913 		    (pinfo->nominal_temp_value) ?
1914 		    pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD;
1915 		ha->mr.extended_io_enabled = (pinfo->enabled_capabilities &
1916 		    QLAFX00_EXTENDED_IO_EN_MASK) != 0;
1917 	} else if (fx_type == FXDISC_GET_PORT_INFO) {
1918 		struct port_info_data *pinfo =
1919 		    (struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
1920 		memcpy(vha->node_name, pinfo->node_name, WWN_SIZE);
1921 		memcpy(vha->port_name, pinfo->port_name, WWN_SIZE);
1922 		vha->d_id.b.domain = pinfo->port_id[0];
1923 		vha->d_id.b.area = pinfo->port_id[1];
1924 		vha->d_id.b.al_pa = pinfo->port_id[2];
1925 		qlafx00_update_host_attr(vha, pinfo);
1926 		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
1927 		    (uint8_t *)pinfo, 16);
1928 	} else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
1929 		struct qlafx00_tgt_node_info *pinfo =
1930 		    (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
1931 		memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE);
1932 		memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
1933 		fcport->port_type = FCT_TARGET;
1934 		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
1935 		    (uint8_t *)pinfo, 16);
1936 	} else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
1937 		struct qlafx00_tgt_node_info *pinfo =
1938 		    (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
1939 		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
1940 		    (uint8_t *)pinfo, 16);
1941 		memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
1942 	} else if (fx_type == FXDISC_ABORT_IOCTL)
1943 		fdisc->u.fxiocb.result =
1944 		    (fdisc->u.fxiocb.result ==
1945 			cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ?
1946 		    cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED);
1947 
1948 	rval = le32_to_cpu(fdisc->u.fxiocb.result);
1949 
1950 done_unmap_dma:
1951 	if (fdisc->u.fxiocb.rsp_addr)
1952 		dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len,
1953 		    fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle);
1954 
1955 done_unmap_req:
1956 	if (fdisc->u.fxiocb.req_addr)
1957 		dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
1958 		    fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
1959 done_free_sp:
1960 	sp->free(vha, sp);
1961 done:
1962 	return rval;
1963 }
1964 
1965 /*
1966  * qlafx00_initialize_adapter
1967  *      Initialize board.
1968  *
1969  * Input:
1970  *      ha = adapter block pointer.
1971  *
1972  * Returns:
1973  *      0 = success
1974  */
1975 int
1976 qlafx00_initialize_adapter(scsi_qla_host_t *vha)
1977 {
1978 	int	rval;
1979 	struct qla_hw_data *ha = vha->hw;
1980 	uint32_t tempc;
1981 
1982 	/* Clear adapter flags. */
1983 	vha->flags.online = 0;
1984 	ha->flags.chip_reset_done = 0;
1985 	vha->flags.reset_active = 0;
1986 	ha->flags.pci_channel_io_perm_failure = 0;
1987 	ha->flags.eeh_busy = 0;
1988 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1989 	atomic_set(&vha->loop_state, LOOP_DOWN);
1990 	vha->device_flags = DFLG_NO_CABLE;
1991 	vha->dpc_flags = 0;
1992 	vha->flags.management_server_logged_in = 0;
1993 	ha->isp_abort_cnt = 0;
1994 	ha->beacon_blink_led = 0;
1995 
1996 	set_bit(0, ha->req_qid_map);
1997 	set_bit(0, ha->rsp_qid_map);
1998 
1999 	ql_dbg(ql_dbg_init, vha, 0x0147,
2000 	    "Configuring PCI space...\n");
2001 
2002 	rval = ha->isp_ops->pci_config(vha);
2003 	if (rval) {
2004 		ql_log(ql_log_warn, vha, 0x0148,
2005 		    "Unable to configure PCI space.\n");
2006 		return rval;
2007 	}
2008 
2009 	rval = qlafx00_init_fw_ready(vha);
2010 	if (rval != QLA_SUCCESS)
2011 		return rval;
2012 
2013 	qlafx00_save_queue_ptrs(vha);
2014 
2015 	rval = qlafx00_config_queues(vha);
2016 	if (rval != QLA_SUCCESS)
2017 		return rval;
2018 
2019 	/*
2020 	 * Allocate the array of outstanding commands
2021 	 * now that we know the firmware resources.
2022 	 */
2023 	rval = qla2x00_alloc_outstanding_cmds(ha, vha->req);
2024 	if (rval != QLA_SUCCESS)
2025 		return rval;
2026 
2027 	rval = qla2x00_init_rings(vha);
2028 	ha->flags.chip_reset_done = 1;
2029 
2030 	tempc = QLAFX00_GET_TEMPERATURE(ha);
2031 	ql_dbg(ql_dbg_init, vha, 0x0152,
2032 	    "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n",
2033 	    __func__, tempc);
2034 
2035 	return rval;
2036 }
2037 
2038 uint32_t
2039 qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr,
2040 		      char *buf)
2041 {
2042 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2043 	int rval = QLA_FUNCTION_FAILED;
2044 	uint32_t state[1];
2045 
2046 	if (qla2x00_reset_active(vha))
2047 		ql_log(ql_log_warn, vha, 0x70ce,
2048 		    "ISP reset active.\n");
2049 	else if (!vha->hw->flags.eeh_busy) {
2050 		rval = qlafx00_get_firmware_state(vha, state);
2051 	}
2052 	if (rval != QLA_SUCCESS)
2053 		memset(state, -1, sizeof(state));
2054 
2055 	return state[0];
2056 }
2057 
2058 void
2059 qlafx00_get_host_speed(struct Scsi_Host *shost)
2060 {
2061 	struct qla_hw_data *ha = ((struct scsi_qla_host *)
2062 					(shost_priv(shost)))->hw;
2063 	u32 speed = FC_PORTSPEED_UNKNOWN;
2064 
2065 	switch (ha->link_data_rate) {
2066 	case QLAFX00_PORT_SPEED_2G:
2067 		speed = FC_PORTSPEED_2GBIT;
2068 		break;
2069 	case QLAFX00_PORT_SPEED_4G:
2070 		speed = FC_PORTSPEED_4GBIT;
2071 		break;
2072 	case QLAFX00_PORT_SPEED_8G:
2073 		speed = FC_PORTSPEED_8GBIT;
2074 		break;
2075 	case QLAFX00_PORT_SPEED_10G:
2076 		speed = FC_PORTSPEED_10GBIT;
2077 		break;
2078 	}
2079 	fc_host_speed(shost) = speed;
2080 }
2081 
2082 /** QLAFX00 specific ISR implementation functions */
2083 
2084 static inline void
2085 qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2086 		     uint32_t sense_len, struct rsp_que *rsp, int res)
2087 {
2088 	struct scsi_qla_host *vha = sp->fcport->vha;
2089 	struct scsi_cmnd *cp = GET_CMD_SP(sp);
2090 	uint32_t track_sense_len;
2091 
2092 	SET_FW_SENSE_LEN(sp, sense_len);
2093 
2094 	if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2095 		sense_len = SCSI_SENSE_BUFFERSIZE;
2096 
2097 	SET_CMD_SENSE_LEN(sp, sense_len);
2098 	SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2099 	track_sense_len = sense_len;
2100 
2101 	if (sense_len > par_sense_len)
2102 		sense_len = par_sense_len;
2103 
2104 	memcpy(cp->sense_buffer, sense_data, sense_len);
2105 
2106 	SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len);
2107 
2108 	SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2109 	track_sense_len -= sense_len;
2110 	SET_CMD_SENSE_LEN(sp, track_sense_len);
2111 
2112 	ql_dbg(ql_dbg_io, vha, 0x304d,
2113 	    "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n",
2114 	    sense_len, par_sense_len, track_sense_len);
2115 	if (GET_FW_SENSE_LEN(sp) > 0) {
2116 		rsp->status_srb = sp;
2117 		cp->result = res;
2118 	}
2119 
2120 	if (sense_len) {
2121 		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039,
2122 		    "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
2123 		    sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
2124 		    cp);
2125 		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049,
2126 		    cp->sense_buffer, sense_len);
2127 	}
2128 }
2129 
2130 static void
2131 qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2132 		      struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp,
2133 		      __le16 sstatus, __le16 cpstatus)
2134 {
2135 	struct srb_iocb *tmf;
2136 
2137 	tmf = &sp->u.iocb_cmd;
2138 	if (cpstatus != cpu_to_le16((uint16_t)CS_COMPLETE) ||
2139 	    (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID)))
2140 		cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE);
2141 	tmf->u.tmf.comp_status = cpstatus;
2142 	sp->done(vha, sp, 0);
2143 }
2144 
2145 static void
2146 qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2147 			 struct abort_iocb_entry_fx00 *pkt)
2148 {
2149 	const char func[] = "ABT_IOCB";
2150 	srb_t *sp;
2151 	struct srb_iocb *abt;
2152 
2153 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2154 	if (!sp)
2155 		return;
2156 
2157 	abt = &sp->u.iocb_cmd;
2158 	abt->u.abt.comp_status = pkt->tgt_id_sts;
2159 	sp->done(vha, sp, 0);
2160 }
2161 
2162 static void
2163 qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
2164 			 struct ioctl_iocb_entry_fx00 *pkt)
2165 {
2166 	const char func[] = "IOSB_IOCB";
2167 	srb_t *sp;
2168 	struct fc_bsg_job *bsg_job;
2169 	struct srb_iocb *iocb_job;
2170 	int res;
2171 	struct qla_mt_iocb_rsp_fx00 fstatus;
2172 	uint8_t	*fw_sts_ptr;
2173 
2174 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2175 	if (!sp)
2176 		return;
2177 
2178 	if (sp->type == SRB_FXIOCB_DCMD) {
2179 		iocb_job = &sp->u.iocb_cmd;
2180 		iocb_job->u.fxiocb.seq_number = pkt->seq_no;
2181 		iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags;
2182 		iocb_job->u.fxiocb.result = pkt->status;
2183 		if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID)
2184 			iocb_job->u.fxiocb.req_data =
2185 			    pkt->dataword_r;
2186 	} else {
2187 		bsg_job = sp->u.bsg_job;
2188 
2189 		memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
2190 
2191 		fstatus.reserved_1 = pkt->reserved_0;
2192 		fstatus.func_type = pkt->comp_func_num;
2193 		fstatus.ioctl_flags = pkt->fw_iotcl_flags;
2194 		fstatus.ioctl_data = pkt->dataword_r;
2195 		fstatus.adapid = pkt->adapid;
2196 		fstatus.reserved_2 = pkt->dataword_r_extra;
2197 		fstatus.res_count = pkt->residuallen;
2198 		fstatus.status = pkt->status;
2199 		fstatus.seq_number = pkt->seq_no;
2200 		memcpy(fstatus.reserved_3,
2201 		    pkt->reserved_2, 20 * sizeof(uint8_t));
2202 
2203 		fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
2204 		    sizeof(struct fc_bsg_reply);
2205 
2206 		memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
2207 		    sizeof(struct qla_mt_iocb_rsp_fx00));
2208 		bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
2209 			sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
2210 
2211 		ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
2212 		    sp->fcport->vha, 0x5080,
2213 		    (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00));
2214 
2215 		ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
2216 		    sp->fcport->vha, 0x5074,
2217 		    (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
2218 
2219 		res = bsg_job->reply->result = DID_OK << 16;
2220 		bsg_job->reply->reply_payload_rcv_len =
2221 		    bsg_job->reply_payload.payload_len;
2222 	}
2223 	sp->done(vha, sp, res);
2224 }
2225 
2226 /**
2227  * qlafx00_status_entry() - Process a Status IOCB entry.
2228  * @ha: SCSI driver HA context
2229  * @pkt: Entry pointer
2230  */
2231 static void
2232 qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2233 {
2234 	srb_t		*sp;
2235 	fc_port_t	*fcport;
2236 	struct scsi_cmnd *cp;
2237 	struct sts_entry_fx00 *sts;
2238 	__le16		comp_status;
2239 	__le16		scsi_status;
2240 	uint16_t	ox_id;
2241 	__le16		lscsi_status;
2242 	int32_t		resid;
2243 	uint32_t	sense_len, par_sense_len, rsp_info_len, resid_len,
2244 	    fw_resid_len;
2245 	uint8_t		*rsp_info = NULL, *sense_data = NULL;
2246 	struct qla_hw_data *ha = vha->hw;
2247 	uint32_t hindex, handle;
2248 	uint16_t que;
2249 	struct req_que *req;
2250 	int logit = 1;
2251 	int res = 0;
2252 
2253 	sts = (struct sts_entry_fx00 *) pkt;
2254 
2255 	comp_status = sts->comp_status;
2256 	scsi_status = sts->scsi_status & cpu_to_le16((uint16_t)SS_MASK);
2257 	hindex = sts->handle;
2258 	handle = LSW(hindex);
2259 
2260 	que = MSW(hindex);
2261 	req = ha->req_q_map[que];
2262 
2263 	/* Validate handle. */
2264 	if (handle < req->num_outstanding_cmds)
2265 		sp = req->outstanding_cmds[handle];
2266 	else
2267 		sp = NULL;
2268 
2269 	if (sp == NULL) {
2270 		ql_dbg(ql_dbg_io, vha, 0x3034,
2271 		    "Invalid status handle (0x%x).\n", handle);
2272 
2273 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2274 		qla2xxx_wake_dpc(vha);
2275 		return;
2276 	}
2277 
2278 	if (sp->type == SRB_TM_CMD) {
2279 		req->outstanding_cmds[handle] = NULL;
2280 		qlafx00_tm_iocb_entry(vha, req, pkt, sp,
2281 		    scsi_status, comp_status);
2282 		return;
2283 	}
2284 
2285 	/* Fast path completion. */
2286 	if (comp_status == CS_COMPLETE && scsi_status == 0) {
2287 		qla2x00_process_completed_request(vha, req, handle);
2288 		return;
2289 	}
2290 
2291 	req->outstanding_cmds[handle] = NULL;
2292 	cp = GET_CMD_SP(sp);
2293 	if (cp == NULL) {
2294 		ql_dbg(ql_dbg_io, vha, 0x3048,
2295 		    "Command already returned (0x%x/%p).\n",
2296 		    handle, sp);
2297 
2298 		return;
2299 	}
2300 
2301 	lscsi_status = scsi_status & cpu_to_le16((uint16_t)STATUS_MASK);
2302 
2303 	fcport = sp->fcport;
2304 
2305 	ox_id = 0;
2306 	sense_len = par_sense_len = rsp_info_len = resid_len =
2307 		fw_resid_len = 0;
2308 	if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))
2309 		sense_len = sts->sense_len;
2310 	if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
2311 	    | (uint16_t)SS_RESIDUAL_OVER)))
2312 		resid_len = le32_to_cpu(sts->residual_len);
2313 	if (comp_status == cpu_to_le16((uint16_t)CS_DATA_UNDERRUN))
2314 		fw_resid_len = le32_to_cpu(sts->residual_len);
2315 	rsp_info = sense_data = sts->data;
2316 	par_sense_len = sizeof(sts->data);
2317 
2318 	/* Check for overrun. */
2319 	if (comp_status == CS_COMPLETE &&
2320 	    scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_OVER))
2321 		comp_status = cpu_to_le16((uint16_t)CS_DATA_OVERRUN);
2322 
2323 	/*
2324 	 * Based on Host and scsi status generate status code for Linux
2325 	 */
2326 	switch (le16_to_cpu(comp_status)) {
2327 	case CS_COMPLETE:
2328 	case CS_QUEUE_FULL:
2329 		if (scsi_status == 0) {
2330 			res = DID_OK << 16;
2331 			break;
2332 		}
2333 		if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
2334 		    | (uint16_t)SS_RESIDUAL_OVER))) {
2335 			resid = resid_len;
2336 			scsi_set_resid(cp, resid);
2337 
2338 			if (!lscsi_status &&
2339 			    ((unsigned)(scsi_bufflen(cp) - resid) <
2340 			     cp->underflow)) {
2341 				ql_dbg(ql_dbg_io, fcport->vha, 0x3050,
2342 				    "Mid-layer underflow "
2343 				    "detected (0x%x of 0x%x bytes).\n",
2344 				    resid, scsi_bufflen(cp));
2345 
2346 				res = DID_ERROR << 16;
2347 				break;
2348 			}
2349 		}
2350 		res = DID_OK << 16 | le16_to_cpu(lscsi_status);
2351 
2352 		if (lscsi_status ==
2353 		    cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
2354 			ql_dbg(ql_dbg_io, fcport->vha, 0x3051,
2355 			    "QUEUE FULL detected.\n");
2356 			break;
2357 		}
2358 		logit = 0;
2359 		if (lscsi_status != cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
2360 			break;
2361 
2362 		memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2363 		if (!(scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
2364 			break;
2365 
2366 		qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2367 		    rsp, res);
2368 		break;
2369 
2370 	case CS_DATA_UNDERRUN:
2371 		/* Use F/W calculated residual length. */
2372 		if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
2373 			resid = fw_resid_len;
2374 		else
2375 			resid = resid_len;
2376 		scsi_set_resid(cp, resid);
2377 		if (scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_UNDER)) {
2378 			if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
2379 			    && fw_resid_len != resid_len) {
2380 				ql_dbg(ql_dbg_io, fcport->vha, 0x3052,
2381 				    "Dropped frame(s) detected "
2382 				    "(0x%x of 0x%x bytes).\n",
2383 				    resid, scsi_bufflen(cp));
2384 
2385 				res = DID_ERROR << 16 |
2386 				    le16_to_cpu(lscsi_status);
2387 				goto check_scsi_status;
2388 			}
2389 
2390 			if (!lscsi_status &&
2391 			    ((unsigned)(scsi_bufflen(cp) - resid) <
2392 			    cp->underflow)) {
2393 				ql_dbg(ql_dbg_io, fcport->vha, 0x3053,
2394 				    "Mid-layer underflow "
2395 				    "detected (0x%x of 0x%x bytes, "
2396 				    "cp->underflow: 0x%x).\n",
2397 				    resid, scsi_bufflen(cp), cp->underflow);
2398 
2399 				res = DID_ERROR << 16;
2400 				break;
2401 			}
2402 		} else if (lscsi_status !=
2403 		    cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL) &&
2404 		    lscsi_status != cpu_to_le16((uint16_t)SAM_STAT_BUSY)) {
2405 			/*
2406 			 * scsi status of task set and busy are considered
2407 			 * to be task not completed.
2408 			 */
2409 
2410 			ql_dbg(ql_dbg_io, fcport->vha, 0x3054,
2411 			    "Dropped frame(s) detected (0x%x "
2412 			    "of 0x%x bytes).\n", resid,
2413 			    scsi_bufflen(cp));
2414 
2415 			res = DID_ERROR << 16 | le16_to_cpu(lscsi_status);
2416 			goto check_scsi_status;
2417 		} else {
2418 			ql_dbg(ql_dbg_io, fcport->vha, 0x3055,
2419 			    "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2420 			    scsi_status, lscsi_status);
2421 		}
2422 
2423 		res = DID_OK << 16 | le16_to_cpu(lscsi_status);
2424 		logit = 0;
2425 
2426 check_scsi_status:
2427 		/*
2428 		 * Check to see if SCSI Status is non zero. If so report SCSI
2429 		 * Status.
2430 		 */
2431 		if (lscsi_status != 0) {
2432 			if (lscsi_status ==
2433 			    cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
2434 				ql_dbg(ql_dbg_io, fcport->vha, 0x3056,
2435 				    "QUEUE FULL detected.\n");
2436 				logit = 1;
2437 				break;
2438 			}
2439 			if (lscsi_status !=
2440 			    cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
2441 				break;
2442 
2443 			memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2444 			if (!(scsi_status &
2445 			    cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
2446 				break;
2447 
2448 			qlafx00_handle_sense(sp, sense_data, par_sense_len,
2449 			    sense_len, rsp, res);
2450 		}
2451 		break;
2452 
2453 	case CS_PORT_LOGGED_OUT:
2454 	case CS_PORT_CONFIG_CHG:
2455 	case CS_PORT_BUSY:
2456 	case CS_INCOMPLETE:
2457 	case CS_PORT_UNAVAILABLE:
2458 	case CS_TIMEOUT:
2459 	case CS_RESET:
2460 
2461 		/*
2462 		 * We are going to have the fc class block the rport
2463 		 * while we try to recover so instruct the mid layer
2464 		 * to requeue until the class decides how to handle this.
2465 		 */
2466 		res = DID_TRANSPORT_DISRUPTED << 16;
2467 
2468 		ql_dbg(ql_dbg_io, fcport->vha, 0x3057,
2469 		    "Port down status: port-state=0x%x.\n",
2470 		    atomic_read(&fcport->state));
2471 
2472 		if (atomic_read(&fcport->state) == FCS_ONLINE)
2473 			qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2474 		break;
2475 
2476 	case CS_ABORTED:
2477 		res = DID_RESET << 16;
2478 		break;
2479 
2480 	default:
2481 		res = DID_ERROR << 16;
2482 		break;
2483 	}
2484 
2485 	if (logit)
2486 		ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
2487 		    "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d "
2488 		    "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
2489 		    "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, "
2490 		    "par_sense_len=0x%x, rsp_info_len=0x%x\n",
2491 		    comp_status, scsi_status, res, vha->host_no,
2492 		    cp->device->id, cp->device->lun, fcport->tgt_id,
2493 		    lscsi_status, cp->cmnd, scsi_bufflen(cp),
2494 		    rsp_info_len, resid_len, fw_resid_len, sense_len,
2495 		    par_sense_len, rsp_info_len);
2496 
2497 	if (rsp->status_srb == NULL)
2498 		sp->done(ha, sp, res);
2499 }
2500 
2501 /**
2502  * qlafx00_status_cont_entry() - Process a Status Continuations entry.
2503  * @ha: SCSI driver HA context
2504  * @pkt: Entry pointer
2505  *
2506  * Extended sense data.
2507  */
2508 static void
2509 qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2510 {
2511 	uint8_t	sense_sz = 0;
2512 	struct qla_hw_data *ha = rsp->hw;
2513 	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2514 	srb_t *sp = rsp->status_srb;
2515 	struct scsi_cmnd *cp;
2516 	uint32_t sense_len;
2517 	uint8_t *sense_ptr;
2518 
2519 	if (!sp) {
2520 		ql_dbg(ql_dbg_io, vha, 0x3037,
2521 		    "no SP, sp = %p\n", sp);
2522 		return;
2523 	}
2524 
2525 	if (!GET_FW_SENSE_LEN(sp)) {
2526 		ql_dbg(ql_dbg_io, vha, 0x304b,
2527 		    "no fw sense data, sp = %p\n", sp);
2528 		return;
2529 	}
2530 	cp = GET_CMD_SP(sp);
2531 	if (cp == NULL) {
2532 		ql_log(ql_log_warn, vha, 0x303b,
2533 		    "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2534 
2535 		rsp->status_srb = NULL;
2536 		return;
2537 	}
2538 
2539 	if (!GET_CMD_SENSE_LEN(sp)) {
2540 		ql_dbg(ql_dbg_io, vha, 0x304c,
2541 		    "no sense data, sp = %p\n", sp);
2542 	} else {
2543 		sense_len = GET_CMD_SENSE_LEN(sp);
2544 		sense_ptr = GET_CMD_SENSE_PTR(sp);
2545 		ql_dbg(ql_dbg_io, vha, 0x304f,
2546 		    "sp=%p sense_len=0x%x sense_ptr=%p.\n",
2547 		    sp, sense_len, sense_ptr);
2548 
2549 		if (sense_len > sizeof(pkt->data))
2550 			sense_sz = sizeof(pkt->data);
2551 		else
2552 			sense_sz = sense_len;
2553 
2554 		/* Move sense data. */
2555 		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
2556 		    (uint8_t *)pkt, sizeof(sts_cont_entry_t));
2557 		memcpy(sense_ptr, pkt->data, sense_sz);
2558 		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
2559 		    sense_ptr, sense_sz);
2560 
2561 		sense_len -= sense_sz;
2562 		sense_ptr += sense_sz;
2563 
2564 		SET_CMD_SENSE_PTR(sp, sense_ptr);
2565 		SET_CMD_SENSE_LEN(sp, sense_len);
2566 	}
2567 	sense_len = GET_FW_SENSE_LEN(sp);
2568 	sense_len = (sense_len > sizeof(pkt->data)) ?
2569 	    (sense_len - sizeof(pkt->data)) : 0;
2570 	SET_FW_SENSE_LEN(sp, sense_len);
2571 
2572 	/* Place command on done queue. */
2573 	if (sense_len == 0) {
2574 		rsp->status_srb = NULL;
2575 		sp->done(ha, sp, cp->result);
2576 	}
2577 }
2578 
2579 /**
2580  * qlafx00_multistatus_entry() - Process Multi response queue entries.
2581  * @ha: SCSI driver HA context
2582  */
2583 static void
2584 qlafx00_multistatus_entry(struct scsi_qla_host *vha,
2585 	struct rsp_que *rsp, void *pkt)
2586 {
2587 	srb_t		*sp;
2588 	struct multi_sts_entry_fx00 *stsmfx;
2589 	struct qla_hw_data *ha = vha->hw;
2590 	uint32_t handle, hindex, handle_count, i;
2591 	uint16_t que;
2592 	struct req_que *req;
2593 	__le32 *handle_ptr;
2594 
2595 	stsmfx = (struct multi_sts_entry_fx00 *) pkt;
2596 
2597 	handle_count = stsmfx->handle_count;
2598 
2599 	if (handle_count > MAX_HANDLE_COUNT) {
2600 		ql_dbg(ql_dbg_io, vha, 0x3035,
2601 		    "Invalid handle count (0x%x).\n", handle_count);
2602 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2603 		qla2xxx_wake_dpc(vha);
2604 		return;
2605 	}
2606 
2607 	handle_ptr =  &stsmfx->handles[0];
2608 
2609 	for (i = 0; i < handle_count; i++) {
2610 		hindex = le32_to_cpu(*handle_ptr);
2611 		handle = LSW(hindex);
2612 		que = MSW(hindex);
2613 		req = ha->req_q_map[que];
2614 
2615 		/* Validate handle. */
2616 		if (handle < req->num_outstanding_cmds)
2617 			sp = req->outstanding_cmds[handle];
2618 		else
2619 			sp = NULL;
2620 
2621 		if (sp == NULL) {
2622 			ql_dbg(ql_dbg_io, vha, 0x3044,
2623 			    "Invalid status handle (0x%x).\n", handle);
2624 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2625 			qla2xxx_wake_dpc(vha);
2626 			return;
2627 		}
2628 		qla2x00_process_completed_request(vha, req, handle);
2629 		handle_ptr++;
2630 	}
2631 }
2632 
2633 /**
2634  * qlafx00_error_entry() - Process an error entry.
2635  * @ha: SCSI driver HA context
2636  * @pkt: Entry pointer
2637  */
2638 static void
2639 qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
2640 		    struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype)
2641 {
2642 	srb_t *sp;
2643 	struct qla_hw_data *ha = vha->hw;
2644 	const char func[] = "ERROR-IOCB";
2645 	uint16_t que = 0;
2646 	struct req_que *req = NULL;
2647 	int res = DID_ERROR << 16;
2648 
2649 	ql_dbg(ql_dbg_async, vha, 0x507f,
2650 	    "type of error status in response: 0x%x\n", estatus);
2651 
2652 	req = ha->req_q_map[que];
2653 
2654 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2655 	if (sp) {
2656 		sp->done(ha, sp, res);
2657 		return;
2658 	}
2659 
2660 	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2661 	qla2xxx_wake_dpc(vha);
2662 }
2663 
2664 /**
2665  * qlafx00_process_response_queue() - Process response queue entries.
2666  * @ha: SCSI driver HA context
2667  */
2668 static void
2669 qlafx00_process_response_queue(struct scsi_qla_host *vha,
2670 	struct rsp_que *rsp)
2671 {
2672 	struct sts_entry_fx00 *pkt;
2673 	response_t *lptr;
2674 	uint16_t lreq_q_in = 0;
2675 	uint16_t lreq_q_out = 0;
2676 
2677 	lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
2678 	lreq_q_out = RD_REG_DWORD(rsp->rsp_q_out);
2679 
2680 	while (lreq_q_in != lreq_q_out) {
2681 		lptr = rsp->ring_ptr;
2682 		memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr,
2683 		    sizeof(rsp->rsp_pkt));
2684 		pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
2685 
2686 		rsp->ring_index++;
2687 		lreq_q_out++;
2688 		if (rsp->ring_index == rsp->length) {
2689 			lreq_q_out = 0;
2690 			rsp->ring_index = 0;
2691 			rsp->ring_ptr = rsp->ring;
2692 		} else {
2693 			rsp->ring_ptr++;
2694 		}
2695 
2696 		if (pkt->entry_status != 0 &&
2697 		    pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
2698 			qlafx00_error_entry(vha, rsp,
2699 			    (struct sts_entry_fx00 *)pkt, pkt->entry_status,
2700 			    pkt->entry_type);
2701 			continue;
2702 		}
2703 
2704 		switch (pkt->entry_type) {
2705 		case STATUS_TYPE_FX00:
2706 			qlafx00_status_entry(vha, rsp, pkt);
2707 			break;
2708 
2709 		case STATUS_CONT_TYPE_FX00:
2710 			qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2711 			break;
2712 
2713 		case MULTI_STATUS_TYPE_FX00:
2714 			qlafx00_multistatus_entry(vha, rsp, pkt);
2715 			break;
2716 
2717 		case ABORT_IOCB_TYPE_FX00:
2718 			qlafx00_abort_iocb_entry(vha, rsp->req,
2719 			   (struct abort_iocb_entry_fx00 *)pkt);
2720 			break;
2721 
2722 		case IOCTL_IOSB_TYPE_FX00:
2723 			qlafx00_ioctl_iosb_entry(vha, rsp->req,
2724 			    (struct ioctl_iocb_entry_fx00 *)pkt);
2725 			break;
2726 		default:
2727 			/* Type Not Supported. */
2728 			ql_dbg(ql_dbg_async, vha, 0x5081,
2729 			    "Received unknown response pkt type %x "
2730 			    "entry status=%x.\n",
2731 			    pkt->entry_type, pkt->entry_status);
2732 			break;
2733 		}
2734 	}
2735 
2736 	/* Adjust ring index */
2737 	WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2738 }
2739 
2740 /**
2741  * qlafx00_async_event() - Process aynchronous events.
2742  * @ha: SCSI driver HA context
2743  */
2744 static void
2745 qlafx00_async_event(scsi_qla_host_t *vha)
2746 {
2747 	struct qla_hw_data *ha = vha->hw;
2748 	struct device_reg_fx00 __iomem *reg;
2749 	int data_size = 1;
2750 
2751 	reg = &ha->iobase->ispfx00;
2752 	/* Setup to process RIO completion. */
2753 	switch (ha->aenmb[0]) {
2754 	case QLAFX00_MBA_SYSTEM_ERR:		/* System Error */
2755 		ql_log(ql_log_warn, vha, 0x5079,
2756 		    "ISP System Error - mbx1=%x\n", ha->aenmb[0]);
2757 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2758 		break;
2759 
2760 	case QLAFX00_MBA_SHUTDOWN_RQSTD:	/* Shutdown requested */
2761 		ql_dbg(ql_dbg_async, vha, 0x5076,
2762 		    "Asynchronous FW shutdown requested.\n");
2763 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2764 		qla2xxx_wake_dpc(vha);
2765 		break;
2766 
2767 	case QLAFX00_MBA_PORT_UPDATE:		/* Port database update */
2768 		ha->aenmb[1] = RD_REG_DWORD(&reg->aenmailbox1);
2769 		ha->aenmb[2] = RD_REG_DWORD(&reg->aenmailbox2);
2770 		ha->aenmb[3] = RD_REG_DWORD(&reg->aenmailbox3);
2771 		ql_dbg(ql_dbg_async, vha, 0x5077,
2772 		    "Asynchronous port Update received "
2773 		    "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
2774 		    ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
2775 		data_size = 4;
2776 		break;
2777 
2778 	case QLAFX00_MBA_TEMP_OVER:	/* Over temperature event */
2779 		ql_log(ql_log_info, vha, 0x5085,
2780 		    "Asynchronous over temperature event received "
2781 		    "aenmb[0]: %x\n",
2782 		    ha->aenmb[0]);
2783 		break;
2784 
2785 	case QLAFX00_MBA_TEMP_NORM:	/* Normal temperature event */
2786 		ql_log(ql_log_info, vha, 0x5086,
2787 		    "Asynchronous normal temperature event received "
2788 		    "aenmb[0]: %x\n",
2789 		    ha->aenmb[0]);
2790 		break;
2791 
2792 	case QLAFX00_MBA_TEMP_CRIT:	/* Critical temperature event */
2793 		ql_log(ql_log_info, vha, 0x5083,
2794 		    "Asynchronous critical temperature event received "
2795 		    "aenmb[0]: %x\n",
2796 		ha->aenmb[0]);
2797 		break;
2798 
2799 	default:
2800 		ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
2801 		ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
2802 		ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
2803 		ha->aenmb[4] = RD_REG_WORD(&reg->aenmailbox4);
2804 		ha->aenmb[5] = RD_REG_WORD(&reg->aenmailbox5);
2805 		ha->aenmb[6] = RD_REG_WORD(&reg->aenmailbox6);
2806 		ha->aenmb[7] = RD_REG_WORD(&reg->aenmailbox7);
2807 		ql_dbg(ql_dbg_async, vha, 0x5078,
2808 		    "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
2809 		    ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
2810 		    ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]);
2811 		break;
2812 	}
2813 	qlafx00_post_aenfx_work(vha, ha->aenmb[0],
2814 	    (uint32_t *)ha->aenmb, data_size);
2815 }
2816 
2817 /**
2818  *
2819  * qlafx00x_mbx_completion() - Process mailbox command completions.
2820  * @ha: SCSI driver HA context
2821  * @mb16: Mailbox16 register
2822  */
2823 static void
2824 qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
2825 {
2826 	uint16_t	cnt;
2827 	uint32_t __iomem *wptr;
2828 	struct qla_hw_data *ha = vha->hw;
2829 	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
2830 
2831 	if (!ha->mcp32)
2832 		ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n");
2833 
2834 	/* Load return mailbox registers. */
2835 	ha->flags.mbox_int = 1;
2836 	ha->mailbox_out32[0] = mb0;
2837 	wptr = (uint32_t __iomem *)&reg->mailbox17;
2838 
2839 	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2840 		ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr);
2841 		wptr++;
2842 	}
2843 }
2844 
2845 /**
2846  * qlafx00_intr_handler() - Process interrupts for the ISPFX00.
2847  * @irq:
2848  * @dev_id: SCSI driver HA context
2849  *
2850  * Called by system whenever the host adapter generates an interrupt.
2851  *
2852  * Returns handled flag.
2853  */
2854 irqreturn_t
2855 qlafx00_intr_handler(int irq, void *dev_id)
2856 {
2857 	scsi_qla_host_t	*vha;
2858 	struct qla_hw_data *ha;
2859 	struct device_reg_fx00 __iomem *reg;
2860 	int		status;
2861 	unsigned long	iter;
2862 	uint32_t	stat;
2863 	uint32_t	mb[8];
2864 	struct rsp_que *rsp;
2865 	unsigned long	flags;
2866 	uint32_t clr_intr = 0;
2867 	uint32_t intr_stat = 0;
2868 
2869 	rsp = (struct rsp_que *) dev_id;
2870 	if (!rsp) {
2871 		ql_log(ql_log_info, NULL, 0x507d,
2872 		    "%s: NULL response queue pointer.\n", __func__);
2873 		return IRQ_NONE;
2874 	}
2875 
2876 	ha = rsp->hw;
2877 	reg = &ha->iobase->ispfx00;
2878 	status = 0;
2879 
2880 	if (unlikely(pci_channel_offline(ha->pdev)))
2881 		return IRQ_HANDLED;
2882 
2883 	spin_lock_irqsave(&ha->hardware_lock, flags);
2884 	vha = pci_get_drvdata(ha->pdev);
2885 	for (iter = 50; iter--; clr_intr = 0) {
2886 		stat = QLAFX00_RD_INTR_REG(ha);
2887 		if (qla2x00_check_reg_for_disconnect(vha, stat))
2888 			break;
2889 		intr_stat = stat & QLAFX00_HST_INT_STS_BITS;
2890 		if (!intr_stat)
2891 			break;
2892 
2893 		if (stat & QLAFX00_INTR_MB_CMPLT) {
2894 			mb[0] = RD_REG_WORD(&reg->mailbox16);
2895 			qlafx00_mbx_completion(vha, mb[0]);
2896 			status |= MBX_INTERRUPT;
2897 			clr_intr |= QLAFX00_INTR_MB_CMPLT;
2898 		}
2899 		if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
2900 			ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
2901 			qlafx00_async_event(vha);
2902 			clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
2903 		}
2904 		if (intr_stat & QLAFX00_INTR_RSP_CMPLT) {
2905 			qlafx00_process_response_queue(vha, rsp);
2906 			clr_intr |= QLAFX00_INTR_RSP_CMPLT;
2907 		}
2908 
2909 		QLAFX00_CLR_INTR_REG(ha, clr_intr);
2910 		QLAFX00_RD_INTR_REG(ha);
2911 	}
2912 
2913 	qla2x00_handle_mbx_completion(ha, status);
2914 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2915 
2916 	return IRQ_HANDLED;
2917 }
2918 
2919 /** QLAFX00 specific IOCB implementation functions */
2920 
2921 static inline cont_a64_entry_t *
2922 qlafx00_prep_cont_type1_iocb(struct req_que *req,
2923 			     cont_a64_entry_t *lcont_pkt)
2924 {
2925 	cont_a64_entry_t *cont_pkt;
2926 
2927 	/* Adjust ring index. */
2928 	req->ring_index++;
2929 	if (req->ring_index == req->length) {
2930 		req->ring_index = 0;
2931 		req->ring_ptr = req->ring;
2932 	} else {
2933 		req->ring_ptr++;
2934 	}
2935 
2936 	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
2937 
2938 	/* Load packet defaults. */
2939 	lcont_pkt->entry_type = CONTINUE_A64_TYPE_FX00;
2940 
2941 	return cont_pkt;
2942 }
2943 
2944 static inline void
2945 qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
2946 			 uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
2947 {
2948 	uint16_t	avail_dsds;
2949 	__le32 *cur_dsd;
2950 	scsi_qla_host_t	*vha;
2951 	struct scsi_cmnd *cmd;
2952 	struct scatterlist *sg;
2953 	int i, cont;
2954 	struct req_que *req;
2955 	cont_a64_entry_t lcont_pkt;
2956 	cont_a64_entry_t *cont_pkt;
2957 
2958 	vha = sp->fcport->vha;
2959 	req = vha->req;
2960 
2961 	cmd = GET_CMD_SP(sp);
2962 	cont = 0;
2963 	cont_pkt = NULL;
2964 
2965 	/* Update entry type to indicate Command Type 3 IOCB */
2966 	lcmd_pkt->entry_type = FX00_COMMAND_TYPE_7;
2967 
2968 	/* No data transfer */
2969 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2970 		lcmd_pkt->byte_count = __constant_cpu_to_le32(0);
2971 		return;
2972 	}
2973 
2974 	/* Set transfer direction */
2975 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2976 		lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
2977 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
2978 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2979 		lcmd_pkt->cntrl_flags = TMF_READ_DATA;
2980 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
2981 	}
2982 
2983 	/* One DSD is available in the Command Type 3 IOCB */
2984 	avail_dsds = 1;
2985 	cur_dsd = (__le32 *)&lcmd_pkt->dseg_0_address;
2986 
2987 	/* Load data segments */
2988 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
2989 		dma_addr_t	sle_dma;
2990 
2991 		/* Allocate additional continuation packets? */
2992 		if (avail_dsds == 0) {
2993 			/*
2994 			 * Five DSDs are available in the Continuation
2995 			 * Type 1 IOCB.
2996 			 */
2997 			memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
2998 			cont_pkt =
2999 			    qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
3000 			cur_dsd = (__le32 *)lcont_pkt.dseg_0_address;
3001 			avail_dsds = 5;
3002 			cont = 1;
3003 		}
3004 
3005 		sle_dma = sg_dma_address(sg);
3006 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3007 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3008 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3009 		avail_dsds--;
3010 		if (avail_dsds == 0 && cont == 1) {
3011 			cont = 0;
3012 			memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
3013 			    REQUEST_ENTRY_SIZE);
3014 		}
3015 
3016 	}
3017 	if (avail_dsds != 0 && cont == 1) {
3018 		memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
3019 		    REQUEST_ENTRY_SIZE);
3020 	}
3021 }
3022 
3023 /**
3024  * qlafx00_start_scsi() - Send a SCSI command to the ISP
3025  * @sp: command to send to the ISP
3026  *
3027  * Returns non-zero if a failure occurred, else zero.
3028  */
3029 int
3030 qlafx00_start_scsi(srb_t *sp)
3031 {
3032 	int		ret, nseg;
3033 	unsigned long   flags;
3034 	uint32_t        index;
3035 	uint32_t	handle;
3036 	uint16_t	cnt;
3037 	uint16_t	req_cnt;
3038 	uint16_t	tot_dsds;
3039 	struct req_que *req = NULL;
3040 	struct rsp_que *rsp = NULL;
3041 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
3042 	struct scsi_qla_host *vha = sp->fcport->vha;
3043 	struct qla_hw_data *ha = vha->hw;
3044 	struct cmd_type_7_fx00 *cmd_pkt;
3045 	struct cmd_type_7_fx00 lcmd_pkt;
3046 	struct scsi_lun llun;
3047 	char		tag[2];
3048 
3049 	/* Setup device pointers. */
3050 	ret = 0;
3051 
3052 	rsp = ha->rsp_q_map[0];
3053 	req = vha->req;
3054 
3055 	/* So we know we haven't pci_map'ed anything yet */
3056 	tot_dsds = 0;
3057 
3058 	/* Acquire ring specific lock */
3059 	spin_lock_irqsave(&ha->hardware_lock, flags);
3060 
3061 	/* Check for room in outstanding command list. */
3062 	handle = req->current_outstanding_cmd;
3063 	for (index = 1; index < req->num_outstanding_cmds; index++) {
3064 		handle++;
3065 		if (handle == req->num_outstanding_cmds)
3066 			handle = 1;
3067 		if (!req->outstanding_cmds[handle])
3068 			break;
3069 	}
3070 	if (index == req->num_outstanding_cmds)
3071 		goto queuing_error;
3072 
3073 	/* Map the sg table so we have an accurate count of sg entries needed */
3074 	if (scsi_sg_count(cmd)) {
3075 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3076 		    scsi_sg_count(cmd), cmd->sc_data_direction);
3077 		if (unlikely(!nseg))
3078 			goto queuing_error;
3079 	} else
3080 		nseg = 0;
3081 
3082 	tot_dsds = nseg;
3083 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3084 	if (req->cnt < (req_cnt + 2)) {
3085 		cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
3086 
3087 		if (req->ring_index < cnt)
3088 			req->cnt = cnt - req->ring_index;
3089 		else
3090 			req->cnt = req->length -
3091 				(req->ring_index - cnt);
3092 		if (req->cnt < (req_cnt + 2))
3093 			goto queuing_error;
3094 	}
3095 
3096 	/* Build command packet. */
3097 	req->current_outstanding_cmd = handle;
3098 	req->outstanding_cmds[handle] = sp;
3099 	sp->handle = handle;
3100 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3101 	req->cnt -= req_cnt;
3102 
3103 	cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr;
3104 
3105 	memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
3106 
3107 	lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
3108 	lcmd_pkt.reserved_0 = 0;
3109 	lcmd_pkt.port_path_ctrl = 0;
3110 	lcmd_pkt.reserved_1 = 0;
3111 	lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
3112 	lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
3113 
3114 	int_to_scsilun(cmd->device->lun, &llun);
3115 	host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun,
3116 	    sizeof(lcmd_pkt.lun));
3117 
3118 	/* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
3119 	if (scsi_populate_tag_msg(cmd, tag)) {
3120 		switch (tag[0]) {
3121 		case HEAD_OF_QUEUE_TAG:
3122 			lcmd_pkt.task = TSK_HEAD_OF_QUEUE;
3123 			break;
3124 		case ORDERED_QUEUE_TAG:
3125 			lcmd_pkt.task = TSK_ORDERED;
3126 			break;
3127 		}
3128 	}
3129 
3130 	/* Load SCSI command packet. */
3131 	host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb));
3132 	lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3133 
3134 	/* Build IOCB segments */
3135 	qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt);
3136 
3137 	/* Set total data segment count. */
3138 	lcmd_pkt.entry_count = (uint8_t)req_cnt;
3139 
3140 	/* Specify response queue number where completion should happen */
3141 	lcmd_pkt.entry_status = (uint8_t) rsp->id;
3142 
3143 	ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
3144 	    (uint8_t *)cmd->cmnd, cmd->cmd_len);
3145 	ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
3146 	    (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE);
3147 
3148 	memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
3149 	wmb();
3150 
3151 	/* Adjust ring index. */
3152 	req->ring_index++;
3153 	if (req->ring_index == req->length) {
3154 		req->ring_index = 0;
3155 		req->ring_ptr = req->ring;
3156 	} else
3157 		req->ring_ptr++;
3158 
3159 	sp->flags |= SRB_DMA_VALID;
3160 
3161 	/* Set chip new ring index. */
3162 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
3163 	QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
3164 
3165 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3166 	return QLA_SUCCESS;
3167 
3168 queuing_error:
3169 	if (tot_dsds)
3170 		scsi_dma_unmap(cmd);
3171 
3172 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3173 
3174 	return QLA_FUNCTION_FAILED;
3175 }
3176 
3177 void
3178 qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
3179 {
3180 	struct srb_iocb *fxio = &sp->u.iocb_cmd;
3181 	scsi_qla_host_t *vha = sp->fcport->vha;
3182 	struct req_que *req = vha->req;
3183 	struct tsk_mgmt_entry_fx00 tm_iocb;
3184 	struct scsi_lun llun;
3185 
3186 	memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
3187 	tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
3188 	tm_iocb.entry_count = 1;
3189 	tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3190 	tm_iocb.reserved_0 = 0;
3191 	tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
3192 	tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
3193 	if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) {
3194 		int_to_scsilun(fxio->u.tmf.lun, &llun);
3195 		host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun,
3196 		    sizeof(struct scsi_lun));
3197 	}
3198 
3199 	memcpy((void *)ptm_iocb, &tm_iocb,
3200 	    sizeof(struct tsk_mgmt_entry_fx00));
3201 	wmb();
3202 }
3203 
3204 void
3205 qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
3206 {
3207 	struct srb_iocb *fxio = &sp->u.iocb_cmd;
3208 	scsi_qla_host_t *vha = sp->fcport->vha;
3209 	struct req_que *req = vha->req;
3210 	struct abort_iocb_entry_fx00 abt_iocb;
3211 
3212 	memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
3213 	abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
3214 	abt_iocb.entry_count = 1;
3215 	abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3216 	abt_iocb.abort_handle =
3217 	    cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl));
3218 	abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
3219 	abt_iocb.req_que_no = cpu_to_le16(req->id);
3220 
3221 	memcpy((void *)pabt_iocb, &abt_iocb,
3222 	    sizeof(struct abort_iocb_entry_fx00));
3223 	wmb();
3224 }
3225 
3226 void
3227 qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
3228 {
3229 	struct srb_iocb *fxio = &sp->u.iocb_cmd;
3230 	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
3231 	struct fc_bsg_job *bsg_job;
3232 	struct fxdisc_entry_fx00 fx_iocb;
3233 	uint8_t entry_cnt = 1;
3234 
3235 	memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
3236 	fx_iocb.entry_type = FX00_IOCB_TYPE;
3237 	fx_iocb.handle = cpu_to_le32(sp->handle);
3238 	fx_iocb.entry_count = entry_cnt;
3239 
3240 	if (sp->type == SRB_FXIOCB_DCMD) {
3241 		fx_iocb.func_num =
3242 		    sp->u.iocb_cmd.u.fxiocb.req_func_type;
3243 		fx_iocb.adapid = fxio->u.fxiocb.adapter_id;
3244 		fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi;
3245 		fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0;
3246 		fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1;
3247 		fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra;
3248 
3249 		if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
3250 			fx_iocb.req_dsdcnt = cpu_to_le16(1);
3251 			fx_iocb.req_xfrcnt =
3252 			    cpu_to_le16(fxio->u.fxiocb.req_len);
3253 			fx_iocb.dseg_rq_address[0] =
3254 			    cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle));
3255 			fx_iocb.dseg_rq_address[1] =
3256 			    cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle));
3257 			fx_iocb.dseg_rq_len =
3258 			    cpu_to_le32(fxio->u.fxiocb.req_len);
3259 		}
3260 
3261 		if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
3262 			fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
3263 			fx_iocb.rsp_xfrcnt =
3264 			    cpu_to_le16(fxio->u.fxiocb.rsp_len);
3265 			fx_iocb.dseg_rsp_address[0] =
3266 			    cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle));
3267 			fx_iocb.dseg_rsp_address[1] =
3268 			    cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle));
3269 			fx_iocb.dseg_rsp_len =
3270 			    cpu_to_le32(fxio->u.fxiocb.rsp_len);
3271 		}
3272 
3273 		if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) {
3274 			fx_iocb.dataword = fxio->u.fxiocb.req_data;
3275 		}
3276 		fx_iocb.flags = fxio->u.fxiocb.flags;
3277 	} else {
3278 		struct scatterlist *sg;
3279 		bsg_job = sp->u.bsg_job;
3280 		piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
3281 			&bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
3282 
3283 		fx_iocb.func_num = piocb_rqst->func_type;
3284 		fx_iocb.adapid = piocb_rqst->adapid;
3285 		fx_iocb.adapid_hi = piocb_rqst->adapid_hi;
3286 		fx_iocb.reserved_0 = piocb_rqst->reserved_0;
3287 		fx_iocb.reserved_1 = piocb_rqst->reserved_1;
3288 		fx_iocb.dataword_extra = piocb_rqst->dataword_extra;
3289 		fx_iocb.dataword = piocb_rqst->dataword;
3290 		fx_iocb.req_xfrcnt = piocb_rqst->req_len;
3291 		fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len;
3292 
3293 		if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
3294 			int avail_dsds, tot_dsds;
3295 			cont_a64_entry_t lcont_pkt;
3296 			cont_a64_entry_t *cont_pkt = NULL;
3297 			__le32 *cur_dsd;
3298 			int index = 0, cont = 0;
3299 
3300 			fx_iocb.req_dsdcnt =
3301 			    cpu_to_le16(bsg_job->request_payload.sg_cnt);
3302 			tot_dsds =
3303 			    bsg_job->request_payload.sg_cnt;
3304 			cur_dsd = (__le32 *)&fx_iocb.dseg_rq_address[0];
3305 			avail_dsds = 1;
3306 			for_each_sg(bsg_job->request_payload.sg_list, sg,
3307 			    tot_dsds, index) {
3308 				dma_addr_t sle_dma;
3309 
3310 				/* Allocate additional continuation packets? */
3311 				if (avail_dsds == 0) {
3312 					/*
3313 					 * Five DSDs are available in the Cont.
3314 					 * Type 1 IOCB.
3315 					 */
3316 					memset(&lcont_pkt, 0,
3317 					    REQUEST_ENTRY_SIZE);
3318 					cont_pkt =
3319 					    qlafx00_prep_cont_type1_iocb(
3320 						sp->fcport->vha->req,
3321 						&lcont_pkt);
3322 					cur_dsd = (__le32 *)
3323 					    lcont_pkt.dseg_0_address;
3324 					avail_dsds = 5;
3325 					cont = 1;
3326 					entry_cnt++;
3327 				}
3328 
3329 				sle_dma = sg_dma_address(sg);
3330 				*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3331 				*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3332 				*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3333 				avail_dsds--;
3334 
3335 				if (avail_dsds == 0 && cont == 1) {
3336 					cont = 0;
3337 					memcpy_toio(
3338 					    (void __iomem *)cont_pkt,
3339 					    &lcont_pkt, REQUEST_ENTRY_SIZE);
3340 					ql_dump_buffer(
3341 					    ql_dbg_user + ql_dbg_verbose,
3342 					    sp->fcport->vha, 0x3042,
3343 					    (uint8_t *)&lcont_pkt,
3344 					     REQUEST_ENTRY_SIZE);
3345 				}
3346 			}
3347 			if (avail_dsds != 0 && cont == 1) {
3348 				memcpy_toio((void __iomem *)cont_pkt,
3349 				    &lcont_pkt, REQUEST_ENTRY_SIZE);
3350 				ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3351 				    sp->fcport->vha, 0x3043,
3352 				    (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
3353 			}
3354 		}
3355 
3356 		if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
3357 			int avail_dsds, tot_dsds;
3358 			cont_a64_entry_t lcont_pkt;
3359 			cont_a64_entry_t *cont_pkt = NULL;
3360 			__le32 *cur_dsd;
3361 			int index = 0, cont = 0;
3362 
3363 			fx_iocb.rsp_dsdcnt =
3364 			   cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3365 			tot_dsds = bsg_job->reply_payload.sg_cnt;
3366 			cur_dsd = (__le32 *)&fx_iocb.dseg_rsp_address[0];
3367 			avail_dsds = 1;
3368 
3369 			for_each_sg(bsg_job->reply_payload.sg_list, sg,
3370 			    tot_dsds, index) {
3371 				dma_addr_t sle_dma;
3372 
3373 				/* Allocate additional continuation packets? */
3374 				if (avail_dsds == 0) {
3375 					/*
3376 					* Five DSDs are available in the Cont.
3377 					* Type 1 IOCB.
3378 					*/
3379 					memset(&lcont_pkt, 0,
3380 					    REQUEST_ENTRY_SIZE);
3381 					cont_pkt =
3382 					    qlafx00_prep_cont_type1_iocb(
3383 						sp->fcport->vha->req,
3384 						&lcont_pkt);
3385 					cur_dsd = (__le32 *)
3386 					    lcont_pkt.dseg_0_address;
3387 					avail_dsds = 5;
3388 					cont = 1;
3389 					entry_cnt++;
3390 				}
3391 
3392 				sle_dma = sg_dma_address(sg);
3393 				*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3394 				*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3395 				*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3396 				avail_dsds--;
3397 
3398 				if (avail_dsds == 0 && cont == 1) {
3399 					cont = 0;
3400 					memcpy_toio((void __iomem *)cont_pkt,
3401 					    &lcont_pkt,
3402 					    REQUEST_ENTRY_SIZE);
3403 					ql_dump_buffer(
3404 					    ql_dbg_user + ql_dbg_verbose,
3405 					    sp->fcport->vha, 0x3045,
3406 					    (uint8_t *)&lcont_pkt,
3407 					    REQUEST_ENTRY_SIZE);
3408 				}
3409 			}
3410 			if (avail_dsds != 0 && cont == 1) {
3411 				memcpy_toio((void __iomem *)cont_pkt,
3412 				    &lcont_pkt, REQUEST_ENTRY_SIZE);
3413 				ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3414 				    sp->fcport->vha, 0x3046,
3415 				    (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
3416 			}
3417 		}
3418 
3419 		if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID)
3420 			fx_iocb.dataword = piocb_rqst->dataword;
3421 		fx_iocb.flags = piocb_rqst->flags;
3422 		fx_iocb.entry_count = entry_cnt;
3423 	}
3424 
3425 	ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3426 	    sp->fcport->vha, 0x3047,
3427 	    (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
3428 
3429 	memcpy((void *)pfxiocb, &fx_iocb,
3430 	    sizeof(struct fxdisc_entry_fx00));
3431 	wmb();
3432 }
3433