xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_mbx.c (revision b830f94f)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9 
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
12 
13 static struct mb_cmd_name {
14 	uint16_t cmd;
15 	const char *str;
16 } mb_str[] = {
17 	{MBC_GET_PORT_DATABASE,		"GPDB"},
18 	{MBC_GET_ID_LIST,		"GIDList"},
19 	{MBC_GET_LINK_PRIV_STATS,	"Stats"},
20 	{MBC_GET_RESOURCE_COUNTS,	"ResCnt"},
21 };
22 
23 static const char *mb_to_str(uint16_t cmd)
24 {
25 	int i;
26 	struct mb_cmd_name *e;
27 
28 	for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
29 		e = mb_str + i;
30 		if (cmd == e->cmd)
31 			return e->str;
32 	}
33 	return "unknown";
34 }
35 
36 static struct rom_cmd {
37 	uint16_t cmd;
38 } rom_cmds[] = {
39 	{ MBC_LOAD_RAM },
40 	{ MBC_EXECUTE_FIRMWARE },
41 	{ MBC_READ_RAM_WORD },
42 	{ MBC_MAILBOX_REGISTER_TEST },
43 	{ MBC_VERIFY_CHECKSUM },
44 	{ MBC_GET_FIRMWARE_VERSION },
45 	{ MBC_LOAD_RISC_RAM },
46 	{ MBC_DUMP_RISC_RAM },
47 	{ MBC_LOAD_RISC_RAM_EXTENDED },
48 	{ MBC_DUMP_RISC_RAM_EXTENDED },
49 	{ MBC_WRITE_RAM_WORD_EXTENDED },
50 	{ MBC_READ_RAM_EXTENDED },
51 	{ MBC_GET_RESOURCE_COUNTS },
52 	{ MBC_SET_FIRMWARE_OPTION },
53 	{ MBC_MID_INITIALIZE_FIRMWARE },
54 	{ MBC_GET_FIRMWARE_STATE },
55 	{ MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
56 	{ MBC_GET_RETRY_COUNT },
57 	{ MBC_TRACE_CONTROL },
58 	{ MBC_INITIALIZE_MULTIQ },
59 	{ MBC_IOCB_COMMAND_A64 },
60 	{ MBC_GET_ADAPTER_LOOP_ID },
61 	{ MBC_READ_SFP },
62 	{ MBC_GET_RNID_PARAMS },
63 	{ MBC_GET_SET_ZIO_THRESHOLD },
64 };
65 
66 static int is_rom_cmd(uint16_t cmd)
67 {
68 	int i;
69 	struct  rom_cmd *wc;
70 
71 	for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
72 		wc = rom_cmds + i;
73 		if (wc->cmd == cmd)
74 			return 1;
75 	}
76 
77 	return 0;
78 }
79 
80 /*
81  * qla2x00_mailbox_command
82  *	Issue mailbox command and waits for completion.
83  *
84  * Input:
85  *	ha = adapter block pointer.
86  *	mcp = driver internal mbx struct pointer.
87  *
88  * Output:
89  *	mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
90  *
91  * Returns:
92  *	0 : QLA_SUCCESS = cmd performed success
93  *	1 : QLA_FUNCTION_FAILED   (error encountered)
94  *	6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
95  *
96  * Context:
97  *	Kernel context.
98  */
99 static int
100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
101 {
102 	int		rval, i;
103 	unsigned long    flags = 0;
104 	device_reg_t *reg;
105 	uint8_t		abort_active;
106 	uint8_t		io_lock_on;
107 	uint16_t	command = 0;
108 	uint16_t	*iptr;
109 	uint16_t __iomem *optr;
110 	uint32_t	cnt;
111 	uint32_t	mboxes;
112 	unsigned long	wait_time;
113 	struct qla_hw_data *ha = vha->hw;
114 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
115 	u32 chip_reset;
116 
117 
118 	ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
119 
120 	if (ha->pdev->error_state > pci_channel_io_frozen) {
121 		ql_log(ql_log_warn, vha, 0x1001,
122 		    "error_state is greater than pci_channel_io_frozen, "
123 		    "exiting.\n");
124 		return QLA_FUNCTION_TIMEOUT;
125 	}
126 
127 	if (vha->device_flags & DFLG_DEV_FAILED) {
128 		ql_log(ql_log_warn, vha, 0x1002,
129 		    "Device in failed state, exiting.\n");
130 		return QLA_FUNCTION_TIMEOUT;
131 	}
132 
133 	/* if PCI error, then avoid mbx processing.*/
134 	if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
135 	    test_bit(UNLOADING, &base_vha->dpc_flags)) {
136 		ql_log(ql_log_warn, vha, 0xd04e,
137 		    "PCI error, exiting.\n");
138 		return QLA_FUNCTION_TIMEOUT;
139 	}
140 
141 	reg = ha->iobase;
142 	io_lock_on = base_vha->flags.init_done;
143 
144 	rval = QLA_SUCCESS;
145 	abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
146 	chip_reset = ha->chip_reset;
147 
148 	if (ha->flags.pci_channel_io_perm_failure) {
149 		ql_log(ql_log_warn, vha, 0x1003,
150 		    "Perm failure on EEH timeout MBX, exiting.\n");
151 		return QLA_FUNCTION_TIMEOUT;
152 	}
153 
154 	if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
155 		/* Setting Link-Down error */
156 		mcp->mb[0] = MBS_LINK_DOWN_ERROR;
157 		ql_log(ql_log_warn, vha, 0x1004,
158 		    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
159 		return QLA_FUNCTION_TIMEOUT;
160 	}
161 
162 	/* check if ISP abort is active and return cmd with timeout */
163 	if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
164 	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
165 	    test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
166 	    !is_rom_cmd(mcp->mb[0])) {
167 		ql_log(ql_log_info, vha, 0x1005,
168 		    "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
169 		    mcp->mb[0]);
170 		return QLA_FUNCTION_TIMEOUT;
171 	}
172 
173 	atomic_inc(&ha->num_pend_mbx_stage1);
174 	/*
175 	 * Wait for active mailbox commands to finish by waiting at most tov
176 	 * seconds. This is to serialize actual issuing of mailbox cmds during
177 	 * non ISP abort time.
178 	 */
179 	if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
180 		/* Timeout occurred. Return error. */
181 		ql_log(ql_log_warn, vha, 0xd035,
182 		    "Cmd access timeout, cmd=0x%x, Exiting.\n",
183 		    mcp->mb[0]);
184 		atomic_dec(&ha->num_pend_mbx_stage1);
185 		return QLA_FUNCTION_TIMEOUT;
186 	}
187 	atomic_dec(&ha->num_pend_mbx_stage1);
188 	if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
189 		rval = QLA_ABORTED;
190 		goto premature_exit;
191 	}
192 
193 
194 	/* Save mailbox command for debug */
195 	ha->mcp = mcp;
196 
197 	ql_dbg(ql_dbg_mbx, vha, 0x1006,
198 	    "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
199 
200 	spin_lock_irqsave(&ha->hardware_lock, flags);
201 
202 	if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
203 	    ha->flags.mbox_busy) {
204 		rval = QLA_ABORTED;
205 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
206 		goto premature_exit;
207 	}
208 	ha->flags.mbox_busy = 1;
209 
210 	/* Load mailbox registers. */
211 	if (IS_P3P_TYPE(ha))
212 		optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
213 	else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
214 		optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
215 	else
216 		optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
217 
218 	iptr = mcp->mb;
219 	command = mcp->mb[0];
220 	mboxes = mcp->out_mb;
221 
222 	ql_dbg(ql_dbg_mbx, vha, 0x1111,
223 	    "Mailbox registers (OUT):\n");
224 	for (cnt = 0; cnt < ha->mbx_count; cnt++) {
225 		if (IS_QLA2200(ha) && cnt == 8)
226 			optr =
227 			    (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
228 		if (mboxes & BIT_0) {
229 			ql_dbg(ql_dbg_mbx, vha, 0x1112,
230 			    "mbox[%d]<-0x%04x\n", cnt, *iptr);
231 			WRT_REG_WORD(optr, *iptr);
232 		}
233 
234 		mboxes >>= 1;
235 		optr++;
236 		iptr++;
237 	}
238 
239 	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
240 	    "I/O Address = %p.\n", optr);
241 
242 	/* Issue set host interrupt command to send cmd out. */
243 	ha->flags.mbox_int = 0;
244 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
245 
246 	/* Unlock mbx registers and wait for interrupt */
247 	ql_dbg(ql_dbg_mbx, vha, 0x100f,
248 	    "Going to unlock irq & waiting for interrupts. "
249 	    "jiffies=%lx.\n", jiffies);
250 
251 	/* Wait for mbx cmd completion until timeout */
252 	atomic_inc(&ha->num_pend_mbx_stage2);
253 	if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
254 		set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
255 
256 		if (IS_P3P_TYPE(ha)) {
257 			if (RD_REG_DWORD(&reg->isp82.hint) &
258 				HINT_MBX_INT_PENDING) {
259 				ha->flags.mbox_busy = 0;
260 				spin_unlock_irqrestore(&ha->hardware_lock,
261 					flags);
262 
263 				atomic_dec(&ha->num_pend_mbx_stage2);
264 				ql_dbg(ql_dbg_mbx, vha, 0x1010,
265 				    "Pending mailbox timeout, exiting.\n");
266 				rval = QLA_FUNCTION_TIMEOUT;
267 				goto premature_exit;
268 			}
269 			WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
270 		} else if (IS_FWI2_CAPABLE(ha))
271 			WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
272 		else
273 			WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
274 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
275 
276 		wait_time = jiffies;
277 		atomic_inc(&ha->num_pend_mbx_stage3);
278 		if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
279 		    mcp->tov * HZ)) {
280 			if (chip_reset != ha->chip_reset) {
281 				spin_lock_irqsave(&ha->hardware_lock, flags);
282 				ha->flags.mbox_busy = 0;
283 				spin_unlock_irqrestore(&ha->hardware_lock,
284 				    flags);
285 				atomic_dec(&ha->num_pend_mbx_stage2);
286 				atomic_dec(&ha->num_pend_mbx_stage3);
287 				rval = QLA_ABORTED;
288 				goto premature_exit;
289 			}
290 			ql_dbg(ql_dbg_mbx, vha, 0x117a,
291 			    "cmd=%x Timeout.\n", command);
292 			spin_lock_irqsave(&ha->hardware_lock, flags);
293 			clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
294 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
295 
296 		} else if (ha->flags.purge_mbox ||
297 		    chip_reset != ha->chip_reset) {
298 			spin_lock_irqsave(&ha->hardware_lock, flags);
299 			ha->flags.mbox_busy = 0;
300 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
301 			atomic_dec(&ha->num_pend_mbx_stage2);
302 			atomic_dec(&ha->num_pend_mbx_stage3);
303 			rval = QLA_ABORTED;
304 			goto premature_exit;
305 		}
306 		atomic_dec(&ha->num_pend_mbx_stage3);
307 
308 		if (time_after(jiffies, wait_time + 5 * HZ))
309 			ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
310 			    command, jiffies_to_msecs(jiffies - wait_time));
311 	} else {
312 		ql_dbg(ql_dbg_mbx, vha, 0x1011,
313 		    "Cmd=%x Polling Mode.\n", command);
314 
315 		if (IS_P3P_TYPE(ha)) {
316 			if (RD_REG_DWORD(&reg->isp82.hint) &
317 				HINT_MBX_INT_PENDING) {
318 				ha->flags.mbox_busy = 0;
319 				spin_unlock_irqrestore(&ha->hardware_lock,
320 					flags);
321 				atomic_dec(&ha->num_pend_mbx_stage2);
322 				ql_dbg(ql_dbg_mbx, vha, 0x1012,
323 				    "Pending mailbox timeout, exiting.\n");
324 				rval = QLA_FUNCTION_TIMEOUT;
325 				goto premature_exit;
326 			}
327 			WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
328 		} else if (IS_FWI2_CAPABLE(ha))
329 			WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
330 		else
331 			WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
332 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
333 
334 		wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
335 		while (!ha->flags.mbox_int) {
336 			if (ha->flags.purge_mbox ||
337 			    chip_reset != ha->chip_reset) {
338 				spin_lock_irqsave(&ha->hardware_lock, flags);
339 				ha->flags.mbox_busy = 0;
340 				spin_unlock_irqrestore(&ha->hardware_lock,
341 				    flags);
342 				atomic_dec(&ha->num_pend_mbx_stage2);
343 				rval = QLA_ABORTED;
344 				goto premature_exit;
345 			}
346 
347 			if (time_after(jiffies, wait_time))
348 				break;
349 
350 			/*
351 			 * Check if it's UNLOADING, cause we cannot poll in
352 			 * this case, or else a NULL pointer dereference
353 			 * is triggered.
354 			 */
355 			if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
356 				return QLA_FUNCTION_TIMEOUT;
357 
358 			/* Check for pending interrupts. */
359 			qla2x00_poll(ha->rsp_q_map[0]);
360 
361 			if (!ha->flags.mbox_int &&
362 			    !(IS_QLA2200(ha) &&
363 			    command == MBC_LOAD_RISC_RAM_EXTENDED))
364 				msleep(10);
365 		} /* while */
366 		ql_dbg(ql_dbg_mbx, vha, 0x1013,
367 		    "Waited %d sec.\n",
368 		    (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
369 	}
370 	atomic_dec(&ha->num_pend_mbx_stage2);
371 
372 	/* Check whether we timed out */
373 	if (ha->flags.mbox_int) {
374 		uint16_t *iptr2;
375 
376 		ql_dbg(ql_dbg_mbx, vha, 0x1014,
377 		    "Cmd=%x completed.\n", command);
378 
379 		/* Got interrupt. Clear the flag. */
380 		ha->flags.mbox_int = 0;
381 		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
382 
383 		if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
384 			spin_lock_irqsave(&ha->hardware_lock, flags);
385 			ha->flags.mbox_busy = 0;
386 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
387 
388 			/* Setting Link-Down error */
389 			mcp->mb[0] = MBS_LINK_DOWN_ERROR;
390 			ha->mcp = NULL;
391 			rval = QLA_FUNCTION_FAILED;
392 			ql_log(ql_log_warn, vha, 0xd048,
393 			    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
394 			goto premature_exit;
395 		}
396 
397 		if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
398 			rval = QLA_FUNCTION_FAILED;
399 
400 		/* Load return mailbox registers. */
401 		iptr2 = mcp->mb;
402 		iptr = (uint16_t *)&ha->mailbox_out[0];
403 		mboxes = mcp->in_mb;
404 
405 		ql_dbg(ql_dbg_mbx, vha, 0x1113,
406 		    "Mailbox registers (IN):\n");
407 		for (cnt = 0; cnt < ha->mbx_count; cnt++) {
408 			if (mboxes & BIT_0) {
409 				*iptr2 = *iptr;
410 				ql_dbg(ql_dbg_mbx, vha, 0x1114,
411 				    "mbox[%d]->0x%04x\n", cnt, *iptr2);
412 			}
413 
414 			mboxes >>= 1;
415 			iptr2++;
416 			iptr++;
417 		}
418 	} else {
419 
420 		uint16_t mb[8];
421 		uint32_t ictrl, host_status, hccr;
422 		uint16_t        w;
423 
424 		if (IS_FWI2_CAPABLE(ha)) {
425 			mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
426 			mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
427 			mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
428 			mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
429 			mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
430 			ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
431 			host_status = RD_REG_DWORD(&reg->isp24.host_status);
432 			hccr = RD_REG_DWORD(&reg->isp24.hccr);
433 
434 			ql_log(ql_log_warn, vha, 0xd04c,
435 			    "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
436 			    "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
437 			    command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
438 			    mb[7], host_status, hccr);
439 
440 		} else {
441 			mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
442 			ictrl = RD_REG_WORD(&reg->isp.ictrl);
443 			ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
444 			    "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
445 			    "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
446 		}
447 		ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
448 
449 		/* Capture FW dump only, if PCI device active */
450 		if (!pci_channel_offline(vha->hw->pdev)) {
451 			pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
452 			if (w == 0xffff || ictrl == 0xffffffff ||
453 			    (chip_reset != ha->chip_reset)) {
454 				/* This is special case if there is unload
455 				 * of driver happening and if PCI device go
456 				 * into bad state due to PCI error condition
457 				 * then only PCI ERR flag would be set.
458 				 * we will do premature exit for above case.
459 				 */
460 				spin_lock_irqsave(&ha->hardware_lock, flags);
461 				ha->flags.mbox_busy = 0;
462 				spin_unlock_irqrestore(&ha->hardware_lock,
463 				    flags);
464 				rval = QLA_FUNCTION_TIMEOUT;
465 				goto premature_exit;
466 			}
467 
468 			/* Attempt to capture firmware dump for further
469 			 * anallysis of the current formware state. we do not
470 			 * need to do this if we are intentionally generating
471 			 * a dump
472 			 */
473 			if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
474 				ha->isp_ops->fw_dump(vha, 0);
475 			rval = QLA_FUNCTION_TIMEOUT;
476 		 }
477 	}
478 	spin_lock_irqsave(&ha->hardware_lock, flags);
479 	ha->flags.mbox_busy = 0;
480 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
481 
482 	/* Clean up */
483 	ha->mcp = NULL;
484 
485 	if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
486 		ql_dbg(ql_dbg_mbx, vha, 0x101a,
487 		    "Checking for additional resp interrupt.\n");
488 
489 		/* polling mode for non isp_abort commands. */
490 		qla2x00_poll(ha->rsp_q_map[0]);
491 	}
492 
493 	if (rval == QLA_FUNCTION_TIMEOUT &&
494 	    mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
495 		if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
496 		    ha->flags.eeh_busy) {
497 			/* not in dpc. schedule it for dpc to take over. */
498 			ql_dbg(ql_dbg_mbx, vha, 0x101b,
499 			    "Timeout, schedule isp_abort_needed.\n");
500 
501 			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
502 			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
503 			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
504 				if (IS_QLA82XX(ha)) {
505 					ql_dbg(ql_dbg_mbx, vha, 0x112a,
506 					    "disabling pause transmit on port "
507 					    "0 & 1.\n");
508 					qla82xx_wr_32(ha,
509 					    QLA82XX_CRB_NIU + 0x98,
510 					    CRB_NIU_XG_PAUSE_CTL_P0|
511 					    CRB_NIU_XG_PAUSE_CTL_P1);
512 				}
513 				ql_log(ql_log_info, base_vha, 0x101c,
514 				    "Mailbox cmd timeout occurred, cmd=0x%x, "
515 				    "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
516 				    "abort.\n", command, mcp->mb[0],
517 				    ha->flags.eeh_busy);
518 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
519 				qla2xxx_wake_dpc(vha);
520 			}
521 		} else if (current == ha->dpc_thread) {
522 			/* call abort directly since we are in the DPC thread */
523 			ql_dbg(ql_dbg_mbx, vha, 0x101d,
524 			    "Timeout, calling abort_isp.\n");
525 
526 			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
527 			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
528 			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
529 				if (IS_QLA82XX(ha)) {
530 					ql_dbg(ql_dbg_mbx, vha, 0x112b,
531 					    "disabling pause transmit on port "
532 					    "0 & 1.\n");
533 					qla82xx_wr_32(ha,
534 					    QLA82XX_CRB_NIU + 0x98,
535 					    CRB_NIU_XG_PAUSE_CTL_P0|
536 					    CRB_NIU_XG_PAUSE_CTL_P1);
537 				}
538 				ql_log(ql_log_info, base_vha, 0x101e,
539 				    "Mailbox cmd timeout occurred, cmd=0x%x, "
540 				    "mb[0]=0x%x. Scheduling ISP abort ",
541 				    command, mcp->mb[0]);
542 				set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
543 				clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
544 				/* Allow next mbx cmd to come in. */
545 				complete(&ha->mbx_cmd_comp);
546 				if (ha->isp_ops->abort_isp(vha)) {
547 					/* Failed. retry later. */
548 					set_bit(ISP_ABORT_NEEDED,
549 					    &vha->dpc_flags);
550 				}
551 				clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
552 				ql_dbg(ql_dbg_mbx, vha, 0x101f,
553 				    "Finished abort_isp.\n");
554 				goto mbx_done;
555 			}
556 		}
557 	}
558 
559 premature_exit:
560 	/* Allow next mbx cmd to come in. */
561 	complete(&ha->mbx_cmd_comp);
562 
563 mbx_done:
564 	if (rval == QLA_ABORTED) {
565 		ql_log(ql_log_info, vha, 0xd035,
566 		    "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
567 		    mcp->mb[0]);
568 	} else if (rval) {
569 		if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
570 			pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
571 			    dev_name(&ha->pdev->dev), 0x1020+0x800,
572 			    vha->host_no, rval);
573 			mboxes = mcp->in_mb;
574 			cnt = 4;
575 			for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
576 				if (mboxes & BIT_0) {
577 					printk(" mb[%u]=%x", i, mcp->mb[i]);
578 					cnt--;
579 				}
580 			pr_warn(" cmd=%x ****\n", command);
581 		}
582 		if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
583 			ql_dbg(ql_dbg_mbx, vha, 0x1198,
584 			    "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
585 			    RD_REG_DWORD(&reg->isp24.host_status),
586 			    RD_REG_DWORD(&reg->isp24.ictrl),
587 			    RD_REG_DWORD(&reg->isp24.istatus));
588 		} else {
589 			ql_dbg(ql_dbg_mbx, vha, 0x1206,
590 			    "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
591 			    RD_REG_WORD(&reg->isp.ctrl_status),
592 			    RD_REG_WORD(&reg->isp.ictrl),
593 			    RD_REG_WORD(&reg->isp.istatus));
594 		}
595 	} else {
596 		ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
597 	}
598 
599 	return rval;
600 }
601 
602 int
603 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
604     uint32_t risc_code_size)
605 {
606 	int rval;
607 	struct qla_hw_data *ha = vha->hw;
608 	mbx_cmd_t mc;
609 	mbx_cmd_t *mcp = &mc;
610 
611 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
612 	    "Entered %s.\n", __func__);
613 
614 	if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
615 		mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
616 		mcp->mb[8] = MSW(risc_addr);
617 		mcp->out_mb = MBX_8|MBX_0;
618 	} else {
619 		mcp->mb[0] = MBC_LOAD_RISC_RAM;
620 		mcp->out_mb = MBX_0;
621 	}
622 	mcp->mb[1] = LSW(risc_addr);
623 	mcp->mb[2] = MSW(req_dma);
624 	mcp->mb[3] = LSW(req_dma);
625 	mcp->mb[6] = MSW(MSD(req_dma));
626 	mcp->mb[7] = LSW(MSD(req_dma));
627 	mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
628 	if (IS_FWI2_CAPABLE(ha)) {
629 		mcp->mb[4] = MSW(risc_code_size);
630 		mcp->mb[5] = LSW(risc_code_size);
631 		mcp->out_mb |= MBX_5|MBX_4;
632 	} else {
633 		mcp->mb[4] = LSW(risc_code_size);
634 		mcp->out_mb |= MBX_4;
635 	}
636 
637 	mcp->in_mb = MBX_1|MBX_0;
638 	mcp->tov = MBX_TOV_SECONDS;
639 	mcp->flags = 0;
640 	rval = qla2x00_mailbox_command(vha, mcp);
641 
642 	if (rval != QLA_SUCCESS) {
643 		ql_dbg(ql_dbg_mbx, vha, 0x1023,
644 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
645 		    rval, mcp->mb[0], mcp->mb[1]);
646 	} else {
647 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
648 		    "Done %s.\n", __func__);
649 	}
650 
651 	return rval;
652 }
653 
654 #define	EXTENDED_BB_CREDITS	BIT_0
655 #define	NVME_ENABLE_FLAG	BIT_3
656 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
657 {
658 	uint16_t mb4 = BIT_0;
659 
660 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
661 		mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
662 
663 	return mb4;
664 }
665 
666 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
667 {
668 	uint16_t mb4 = BIT_0;
669 
670 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
671 		struct nvram_81xx *nv = ha->nvram;
672 
673 		mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
674 	}
675 
676 	return mb4;
677 }
678 
679 /*
680  * qla2x00_execute_fw
681  *     Start adapter firmware.
682  *
683  * Input:
684  *     ha = adapter block pointer.
685  *     TARGET_QUEUE_LOCK must be released.
686  *     ADAPTER_STATE_LOCK must be released.
687  *
688  * Returns:
689  *     qla2x00 local function return status code.
690  *
691  * Context:
692  *     Kernel context.
693  */
694 int
695 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
696 {
697 	int rval;
698 	struct qla_hw_data *ha = vha->hw;
699 	mbx_cmd_t mc;
700 	mbx_cmd_t *mcp = &mc;
701 
702 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
703 	    "Entered %s.\n", __func__);
704 
705 	mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
706 	mcp->out_mb = MBX_0;
707 	mcp->in_mb = MBX_0;
708 	if (IS_FWI2_CAPABLE(ha)) {
709 		mcp->mb[1] = MSW(risc_addr);
710 		mcp->mb[2] = LSW(risc_addr);
711 		mcp->mb[3] = 0;
712 		mcp->mb[4] = 0;
713 		ha->flags.using_lr_setting = 0;
714 		if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
715 		    IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
716 			if (ql2xautodetectsfp) {
717 				if (ha->flags.detected_lr_sfp) {
718 					mcp->mb[4] |=
719 					    qla25xx_set_sfp_lr_dist(ha);
720 					ha->flags.using_lr_setting = 1;
721 				}
722 			} else {
723 				struct nvram_81xx *nv = ha->nvram;
724 				/* set LR distance if specified in nvram */
725 				if (nv->enhanced_features &
726 				    NEF_LR_DIST_ENABLE) {
727 					mcp->mb[4] |=
728 					    qla25xx_set_nvr_lr_dist(ha);
729 					ha->flags.using_lr_setting = 1;
730 				}
731 			}
732 		}
733 
734 		if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
735 			mcp->mb[4] |= NVME_ENABLE_FLAG;
736 
737 		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
738 			struct nvram_81xx *nv = ha->nvram;
739 			/* set minimum speed if specified in nvram */
740 			if (nv->min_supported_speed >= 2 &&
741 			    nv->min_supported_speed <= 5) {
742 				mcp->mb[4] |= BIT_4;
743 				mcp->mb[11] |= nv->min_supported_speed & 0xF;
744 				mcp->out_mb |= MBX_11;
745 				mcp->in_mb |= BIT_5;
746 				vha->min_supported_speed =
747 				    nv->min_supported_speed;
748 			}
749 		}
750 
751 		if (ha->flags.exlogins_enabled)
752 			mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
753 
754 		if (ha->flags.exchoffld_enabled)
755 			mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
756 
757 		mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
758 		mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
759 	} else {
760 		mcp->mb[1] = LSW(risc_addr);
761 		mcp->out_mb |= MBX_1;
762 		if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
763 			mcp->mb[2] = 0;
764 			mcp->out_mb |= MBX_2;
765 		}
766 	}
767 
768 	mcp->tov = MBX_TOV_SECONDS;
769 	mcp->flags = 0;
770 	rval = qla2x00_mailbox_command(vha, mcp);
771 
772 	if (rval != QLA_SUCCESS) {
773 		ql_dbg(ql_dbg_mbx, vha, 0x1026,
774 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
775 		return rval;
776 	}
777 
778 	if (!IS_FWI2_CAPABLE(ha))
779 		goto done;
780 
781 	ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
782 	ql_dbg(ql_dbg_mbx, vha, 0x119a,
783 	    "fw_ability_mask=%x.\n", ha->fw_ability_mask);
784 	ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
785 	if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
786 		ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
787 		ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
788 		    ha->max_supported_speed == 0 ? "16Gps" :
789 		    ha->max_supported_speed == 1 ? "32Gps" :
790 		    ha->max_supported_speed == 2 ? "64Gps" : "unknown");
791 		if (vha->min_supported_speed) {
792 			ha->min_supported_speed = mcp->mb[5] &
793 			    (BIT_0 | BIT_1 | BIT_2);
794 			ql_dbg(ql_dbg_mbx, vha, 0x119c,
795 			    "min_supported_speed=%s.\n",
796 			    ha->min_supported_speed == 6 ? "64Gps" :
797 			    ha->min_supported_speed == 5 ? "32Gps" :
798 			    ha->min_supported_speed == 4 ? "16Gps" :
799 			    ha->min_supported_speed == 3 ? "8Gps" :
800 			    ha->min_supported_speed == 2 ? "4Gps" : "unknown");
801 		}
802 	}
803 
804 done:
805 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
806 	    "Done %s.\n", __func__);
807 
808 	return rval;
809 }
810 
811 /*
812  * qla_get_exlogin_status
813  *	Get extended login status
814  *	uses the memory offload control/status Mailbox
815  *
816  * Input:
817  *	ha:		adapter state pointer.
818  *	fwopt:		firmware options
819  *
820  * Returns:
821  *	qla2x00 local function status
822  *
823  * Context:
824  *	Kernel context.
825  */
826 #define	FETCH_XLOGINS_STAT	0x8
827 int
828 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
829 	uint16_t *ex_logins_cnt)
830 {
831 	int rval;
832 	mbx_cmd_t	mc;
833 	mbx_cmd_t	*mcp = &mc;
834 
835 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
836 	    "Entered %s\n", __func__);
837 
838 	memset(mcp->mb, 0 , sizeof(mcp->mb));
839 	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
840 	mcp->mb[1] = FETCH_XLOGINS_STAT;
841 	mcp->out_mb = MBX_1|MBX_0;
842 	mcp->in_mb = MBX_10|MBX_4|MBX_0;
843 	mcp->tov = MBX_TOV_SECONDS;
844 	mcp->flags = 0;
845 
846 	rval = qla2x00_mailbox_command(vha, mcp);
847 	if (rval != QLA_SUCCESS) {
848 		ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
849 	} else {
850 		*buf_sz = mcp->mb[4];
851 		*ex_logins_cnt = mcp->mb[10];
852 
853 		ql_log(ql_log_info, vha, 0x1190,
854 		    "buffer size 0x%x, exchange login count=%d\n",
855 		    mcp->mb[4], mcp->mb[10]);
856 
857 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
858 		    "Done %s.\n", __func__);
859 	}
860 
861 	return rval;
862 }
863 
864 /*
865  * qla_set_exlogin_mem_cfg
866  *	set extended login memory configuration
867  *	Mbx needs to be issues before init_cb is set
868  *
869  * Input:
870  *	ha:		adapter state pointer.
871  *	buffer:		buffer pointer
872  *	phys_addr:	physical address of buffer
873  *	size:		size of buffer
874  *	TARGET_QUEUE_LOCK must be released
875  *	ADAPTER_STATE_LOCK must be release
876  *
877  * Returns:
878  *	qla2x00 local funxtion status code.
879  *
880  * Context:
881  *	Kernel context.
882  */
883 #define CONFIG_XLOGINS_MEM	0x3
884 int
885 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
886 {
887 	int		rval;
888 	mbx_cmd_t	mc;
889 	mbx_cmd_t	*mcp = &mc;
890 	struct qla_hw_data *ha = vha->hw;
891 
892 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
893 	    "Entered %s.\n", __func__);
894 
895 	memset(mcp->mb, 0 , sizeof(mcp->mb));
896 	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
897 	mcp->mb[1] = CONFIG_XLOGINS_MEM;
898 	mcp->mb[2] = MSW(phys_addr);
899 	mcp->mb[3] = LSW(phys_addr);
900 	mcp->mb[6] = MSW(MSD(phys_addr));
901 	mcp->mb[7] = LSW(MSD(phys_addr));
902 	mcp->mb[8] = MSW(ha->exlogin_size);
903 	mcp->mb[9] = LSW(ha->exlogin_size);
904 	mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
905 	mcp->in_mb = MBX_11|MBX_0;
906 	mcp->tov = MBX_TOV_SECONDS;
907 	mcp->flags = 0;
908 	rval = qla2x00_mailbox_command(vha, mcp);
909 	if (rval != QLA_SUCCESS) {
910 		/*EMPTY*/
911 		ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
912 	} else {
913 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
914 		    "Done %s.\n", __func__);
915 	}
916 
917 	return rval;
918 }
919 
920 /*
921  * qla_get_exchoffld_status
922  *	Get exchange offload status
923  *	uses the memory offload control/status Mailbox
924  *
925  * Input:
926  *	ha:		adapter state pointer.
927  *	fwopt:		firmware options
928  *
929  * Returns:
930  *	qla2x00 local function status
931  *
932  * Context:
933  *	Kernel context.
934  */
935 #define	FETCH_XCHOFFLD_STAT	0x2
936 int
937 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
938 	uint16_t *ex_logins_cnt)
939 {
940 	int rval;
941 	mbx_cmd_t	mc;
942 	mbx_cmd_t	*mcp = &mc;
943 
944 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
945 	    "Entered %s\n", __func__);
946 
947 	memset(mcp->mb, 0 , sizeof(mcp->mb));
948 	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
949 	mcp->mb[1] = FETCH_XCHOFFLD_STAT;
950 	mcp->out_mb = MBX_1|MBX_0;
951 	mcp->in_mb = MBX_10|MBX_4|MBX_0;
952 	mcp->tov = MBX_TOV_SECONDS;
953 	mcp->flags = 0;
954 
955 	rval = qla2x00_mailbox_command(vha, mcp);
956 	if (rval != QLA_SUCCESS) {
957 		ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
958 	} else {
959 		*buf_sz = mcp->mb[4];
960 		*ex_logins_cnt = mcp->mb[10];
961 
962 		ql_log(ql_log_info, vha, 0x118e,
963 		    "buffer size 0x%x, exchange offload count=%d\n",
964 		    mcp->mb[4], mcp->mb[10]);
965 
966 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
967 		    "Done %s.\n", __func__);
968 	}
969 
970 	return rval;
971 }
972 
973 /*
974  * qla_set_exchoffld_mem_cfg
975  *	Set exchange offload memory configuration
976  *	Mbx needs to be issues before init_cb is set
977  *
978  * Input:
979  *	ha:		adapter state pointer.
980  *	buffer:		buffer pointer
981  *	phys_addr:	physical address of buffer
982  *	size:		size of buffer
983  *	TARGET_QUEUE_LOCK must be released
984  *	ADAPTER_STATE_LOCK must be release
985  *
986  * Returns:
987  *	qla2x00 local funxtion status code.
988  *
989  * Context:
990  *	Kernel context.
991  */
992 #define CONFIG_XCHOFFLD_MEM	0x3
993 int
994 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
995 {
996 	int		rval;
997 	mbx_cmd_t	mc;
998 	mbx_cmd_t	*mcp = &mc;
999 	struct qla_hw_data *ha = vha->hw;
1000 
1001 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
1002 	    "Entered %s.\n", __func__);
1003 
1004 	memset(mcp->mb, 0 , sizeof(mcp->mb));
1005 	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
1006 	mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
1007 	mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
1008 	mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
1009 	mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
1010 	mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
1011 	mcp->mb[8] = MSW(ha->exchoffld_size);
1012 	mcp->mb[9] = LSW(ha->exchoffld_size);
1013 	mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1014 	mcp->in_mb = MBX_11|MBX_0;
1015 	mcp->tov = MBX_TOV_SECONDS;
1016 	mcp->flags = 0;
1017 	rval = qla2x00_mailbox_command(vha, mcp);
1018 	if (rval != QLA_SUCCESS) {
1019 		/*EMPTY*/
1020 		ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
1021 	} else {
1022 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
1023 		    "Done %s.\n", __func__);
1024 	}
1025 
1026 	return rval;
1027 }
1028 
1029 /*
1030  * qla2x00_get_fw_version
1031  *	Get firmware version.
1032  *
1033  * Input:
1034  *	ha:		adapter state pointer.
1035  *	major:		pointer for major number.
1036  *	minor:		pointer for minor number.
1037  *	subminor:	pointer for subminor number.
1038  *
1039  * Returns:
1040  *	qla2x00 local function return status code.
1041  *
1042  * Context:
1043  *	Kernel context.
1044  */
1045 int
1046 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1047 {
1048 	int		rval;
1049 	mbx_cmd_t	mc;
1050 	mbx_cmd_t	*mcp = &mc;
1051 	struct qla_hw_data *ha = vha->hw;
1052 
1053 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1054 	    "Entered %s.\n", __func__);
1055 
1056 	mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1057 	mcp->out_mb = MBX_0;
1058 	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1059 	if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1060 		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1061 	if (IS_FWI2_CAPABLE(ha))
1062 		mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1063 	if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1064 		mcp->in_mb |=
1065 		    MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1066 		    MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
1067 
1068 	mcp->flags = 0;
1069 	mcp->tov = MBX_TOV_SECONDS;
1070 	rval = qla2x00_mailbox_command(vha, mcp);
1071 	if (rval != QLA_SUCCESS)
1072 		goto failed;
1073 
1074 	/* Return mailbox data. */
1075 	ha->fw_major_version = mcp->mb[1];
1076 	ha->fw_minor_version = mcp->mb[2];
1077 	ha->fw_subminor_version = mcp->mb[3];
1078 	ha->fw_attributes = mcp->mb[6];
1079 	if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1080 		ha->fw_memory_size = 0x1FFFF;		/* Defaults to 128KB. */
1081 	else
1082 		ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1083 
1084 	if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1085 		ha->mpi_version[0] = mcp->mb[10] & 0xff;
1086 		ha->mpi_version[1] = mcp->mb[11] >> 8;
1087 		ha->mpi_version[2] = mcp->mb[11] & 0xff;
1088 		ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1089 		ha->phy_version[0] = mcp->mb[8] & 0xff;
1090 		ha->phy_version[1] = mcp->mb[9] >> 8;
1091 		ha->phy_version[2] = mcp->mb[9] & 0xff;
1092 	}
1093 
1094 	if (IS_FWI2_CAPABLE(ha)) {
1095 		ha->fw_attributes_h = mcp->mb[15];
1096 		ha->fw_attributes_ext[0] = mcp->mb[16];
1097 		ha->fw_attributes_ext[1] = mcp->mb[17];
1098 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1099 		    "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1100 		    __func__, mcp->mb[15], mcp->mb[6]);
1101 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1102 		    "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1103 		    __func__, mcp->mb[17], mcp->mb[16]);
1104 
1105 		if (ha->fw_attributes_h & 0x4)
1106 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1107 			    "%s: Firmware supports Extended Login 0x%x\n",
1108 			    __func__, ha->fw_attributes_h);
1109 
1110 		if (ha->fw_attributes_h & 0x8)
1111 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1112 			    "%s: Firmware supports Exchange Offload 0x%x\n",
1113 			    __func__, ha->fw_attributes_h);
1114 
1115 		/*
1116 		 * FW supports nvme and driver load parameter requested nvme.
1117 		 * BIT 26 of fw_attributes indicates NVMe support.
1118 		 */
1119 		if ((ha->fw_attributes_h &
1120 		    (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1121 			ql2xnvmeenable) {
1122 			if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1123 				vha->flags.nvme_first_burst = 1;
1124 
1125 			vha->flags.nvme_enabled = 1;
1126 			ql_log(ql_log_info, vha, 0xd302,
1127 			    "%s: FC-NVMe is Enabled (0x%x)\n",
1128 			     __func__, ha->fw_attributes_h);
1129 		}
1130 	}
1131 
1132 	if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1133 		ha->serdes_version[0] = mcp->mb[7] & 0xff;
1134 		ha->serdes_version[1] = mcp->mb[8] >> 8;
1135 		ha->serdes_version[2] = mcp->mb[8] & 0xff;
1136 		ha->mpi_version[0] = mcp->mb[10] & 0xff;
1137 		ha->mpi_version[1] = mcp->mb[11] >> 8;
1138 		ha->mpi_version[2] = mcp->mb[11] & 0xff;
1139 		ha->pep_version[0] = mcp->mb[13] & 0xff;
1140 		ha->pep_version[1] = mcp->mb[14] >> 8;
1141 		ha->pep_version[2] = mcp->mb[14] & 0xff;
1142 		ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1143 		ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1144 		ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1145 		ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1146 		if (IS_QLA28XX(ha)) {
1147 			if (mcp->mb[16] & BIT_10) {
1148 				ql_log(ql_log_info, vha, 0xffff,
1149 				    "FW support secure flash updates\n");
1150 				ha->flags.secure_fw = 1;
1151 			}
1152 		}
1153 	}
1154 
1155 failed:
1156 	if (rval != QLA_SUCCESS) {
1157 		/*EMPTY*/
1158 		ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1159 	} else {
1160 		/*EMPTY*/
1161 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1162 		    "Done %s.\n", __func__);
1163 	}
1164 	return rval;
1165 }
1166 
1167 /*
1168  * qla2x00_get_fw_options
1169  *	Set firmware options.
1170  *
1171  * Input:
1172  *	ha = adapter block pointer.
1173  *	fwopt = pointer for firmware options.
1174  *
1175  * Returns:
1176  *	qla2x00 local function return status code.
1177  *
1178  * Context:
1179  *	Kernel context.
1180  */
1181 int
1182 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1183 {
1184 	int rval;
1185 	mbx_cmd_t mc;
1186 	mbx_cmd_t *mcp = &mc;
1187 
1188 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1189 	    "Entered %s.\n", __func__);
1190 
1191 	mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1192 	mcp->out_mb = MBX_0;
1193 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1194 	mcp->tov = MBX_TOV_SECONDS;
1195 	mcp->flags = 0;
1196 	rval = qla2x00_mailbox_command(vha, mcp);
1197 
1198 	if (rval != QLA_SUCCESS) {
1199 		/*EMPTY*/
1200 		ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1201 	} else {
1202 		fwopts[0] = mcp->mb[0];
1203 		fwopts[1] = mcp->mb[1];
1204 		fwopts[2] = mcp->mb[2];
1205 		fwopts[3] = mcp->mb[3];
1206 
1207 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1208 		    "Done %s.\n", __func__);
1209 	}
1210 
1211 	return rval;
1212 }
1213 
1214 
1215 /*
1216  * qla2x00_set_fw_options
1217  *	Set firmware options.
1218  *
1219  * Input:
1220  *	ha = adapter block pointer.
1221  *	fwopt = pointer for firmware options.
1222  *
1223  * Returns:
1224  *	qla2x00 local function return status code.
1225  *
1226  * Context:
1227  *	Kernel context.
1228  */
1229 int
1230 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1231 {
1232 	int rval;
1233 	mbx_cmd_t mc;
1234 	mbx_cmd_t *mcp = &mc;
1235 
1236 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1237 	    "Entered %s.\n", __func__);
1238 
1239 	mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1240 	mcp->mb[1] = fwopts[1];
1241 	mcp->mb[2] = fwopts[2];
1242 	mcp->mb[3] = fwopts[3];
1243 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1244 	mcp->in_mb = MBX_0;
1245 	if (IS_FWI2_CAPABLE(vha->hw)) {
1246 		mcp->in_mb |= MBX_1;
1247 		mcp->mb[10] = fwopts[10];
1248 		mcp->out_mb |= MBX_10;
1249 	} else {
1250 		mcp->mb[10] = fwopts[10];
1251 		mcp->mb[11] = fwopts[11];
1252 		mcp->mb[12] = 0;	/* Undocumented, but used */
1253 		mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1254 	}
1255 	mcp->tov = MBX_TOV_SECONDS;
1256 	mcp->flags = 0;
1257 	rval = qla2x00_mailbox_command(vha, mcp);
1258 
1259 	fwopts[0] = mcp->mb[0];
1260 
1261 	if (rval != QLA_SUCCESS) {
1262 		/*EMPTY*/
1263 		ql_dbg(ql_dbg_mbx, vha, 0x1030,
1264 		    "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1265 	} else {
1266 		/*EMPTY*/
1267 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1268 		    "Done %s.\n", __func__);
1269 	}
1270 
1271 	return rval;
1272 }
1273 
1274 /*
1275  * qla2x00_mbx_reg_test
1276  *	Mailbox register wrap test.
1277  *
1278  * Input:
1279  *	ha = adapter block pointer.
1280  *	TARGET_QUEUE_LOCK must be released.
1281  *	ADAPTER_STATE_LOCK must be released.
1282  *
1283  * Returns:
1284  *	qla2x00 local function return status code.
1285  *
1286  * Context:
1287  *	Kernel context.
1288  */
1289 int
1290 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1291 {
1292 	int rval;
1293 	mbx_cmd_t mc;
1294 	mbx_cmd_t *mcp = &mc;
1295 
1296 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1297 	    "Entered %s.\n", __func__);
1298 
1299 	mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1300 	mcp->mb[1] = 0xAAAA;
1301 	mcp->mb[2] = 0x5555;
1302 	mcp->mb[3] = 0xAA55;
1303 	mcp->mb[4] = 0x55AA;
1304 	mcp->mb[5] = 0xA5A5;
1305 	mcp->mb[6] = 0x5A5A;
1306 	mcp->mb[7] = 0x2525;
1307 	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1308 	mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1309 	mcp->tov = MBX_TOV_SECONDS;
1310 	mcp->flags = 0;
1311 	rval = qla2x00_mailbox_command(vha, mcp);
1312 
1313 	if (rval == QLA_SUCCESS) {
1314 		if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1315 		    mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1316 			rval = QLA_FUNCTION_FAILED;
1317 		if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1318 		    mcp->mb[7] != 0x2525)
1319 			rval = QLA_FUNCTION_FAILED;
1320 	}
1321 
1322 	if (rval != QLA_SUCCESS) {
1323 		/*EMPTY*/
1324 		ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1325 	} else {
1326 		/*EMPTY*/
1327 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1328 		    "Done %s.\n", __func__);
1329 	}
1330 
1331 	return rval;
1332 }
1333 
1334 /*
1335  * qla2x00_verify_checksum
1336  *	Verify firmware checksum.
1337  *
1338  * Input:
1339  *	ha = adapter block pointer.
1340  *	TARGET_QUEUE_LOCK must be released.
1341  *	ADAPTER_STATE_LOCK must be released.
1342  *
1343  * Returns:
1344  *	qla2x00 local function return status code.
1345  *
1346  * Context:
1347  *	Kernel context.
1348  */
1349 int
1350 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1351 {
1352 	int rval;
1353 	mbx_cmd_t mc;
1354 	mbx_cmd_t *mcp = &mc;
1355 
1356 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1357 	    "Entered %s.\n", __func__);
1358 
1359 	mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1360 	mcp->out_mb = MBX_0;
1361 	mcp->in_mb = MBX_0;
1362 	if (IS_FWI2_CAPABLE(vha->hw)) {
1363 		mcp->mb[1] = MSW(risc_addr);
1364 		mcp->mb[2] = LSW(risc_addr);
1365 		mcp->out_mb |= MBX_2|MBX_1;
1366 		mcp->in_mb |= MBX_2|MBX_1;
1367 	} else {
1368 		mcp->mb[1] = LSW(risc_addr);
1369 		mcp->out_mb |= MBX_1;
1370 		mcp->in_mb |= MBX_1;
1371 	}
1372 
1373 	mcp->tov = MBX_TOV_SECONDS;
1374 	mcp->flags = 0;
1375 	rval = qla2x00_mailbox_command(vha, mcp);
1376 
1377 	if (rval != QLA_SUCCESS) {
1378 		ql_dbg(ql_dbg_mbx, vha, 0x1036,
1379 		    "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1380 		    (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1381 	} else {
1382 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1383 		    "Done %s.\n", __func__);
1384 	}
1385 
1386 	return rval;
1387 }
1388 
1389 /*
1390  * qla2x00_issue_iocb
1391  *	Issue IOCB using mailbox command
1392  *
1393  * Input:
1394  *	ha = adapter state pointer.
1395  *	buffer = buffer pointer.
1396  *	phys_addr = physical address of buffer.
1397  *	size = size of buffer.
1398  *	TARGET_QUEUE_LOCK must be released.
1399  *	ADAPTER_STATE_LOCK must be released.
1400  *
1401  * Returns:
1402  *	qla2x00 local function return status code.
1403  *
1404  * Context:
1405  *	Kernel context.
1406  */
1407 int
1408 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1409     dma_addr_t phys_addr, size_t size, uint32_t tov)
1410 {
1411 	int		rval;
1412 	mbx_cmd_t	mc;
1413 	mbx_cmd_t	*mcp = &mc;
1414 
1415 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1416 	    "Entered %s.\n", __func__);
1417 
1418 	mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1419 	mcp->mb[1] = 0;
1420 	mcp->mb[2] = MSW(phys_addr);
1421 	mcp->mb[3] = LSW(phys_addr);
1422 	mcp->mb[6] = MSW(MSD(phys_addr));
1423 	mcp->mb[7] = LSW(MSD(phys_addr));
1424 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1425 	mcp->in_mb = MBX_2|MBX_0;
1426 	mcp->tov = tov;
1427 	mcp->flags = 0;
1428 	rval = qla2x00_mailbox_command(vha, mcp);
1429 
1430 	if (rval != QLA_SUCCESS) {
1431 		/*EMPTY*/
1432 		ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1433 	} else {
1434 		sts_entry_t *sts_entry = (sts_entry_t *) buffer;
1435 
1436 		/* Mask reserved bits. */
1437 		sts_entry->entry_status &=
1438 		    IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1439 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1440 		    "Done %s.\n", __func__);
1441 	}
1442 
1443 	return rval;
1444 }
1445 
1446 int
1447 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1448     size_t size)
1449 {
1450 	return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1451 	    MBX_TOV_SECONDS);
1452 }
1453 
1454 /*
1455  * qla2x00_abort_command
1456  *	Abort command aborts a specified IOCB.
1457  *
1458  * Input:
1459  *	ha = adapter block pointer.
1460  *	sp = SB structure pointer.
1461  *
1462  * Returns:
1463  *	qla2x00 local function return status code.
1464  *
1465  * Context:
1466  *	Kernel context.
1467  */
1468 int
1469 qla2x00_abort_command(srb_t *sp)
1470 {
1471 	unsigned long   flags = 0;
1472 	int		rval;
1473 	uint32_t	handle = 0;
1474 	mbx_cmd_t	mc;
1475 	mbx_cmd_t	*mcp = &mc;
1476 	fc_port_t	*fcport = sp->fcport;
1477 	scsi_qla_host_t *vha = fcport->vha;
1478 	struct qla_hw_data *ha = vha->hw;
1479 	struct req_que *req;
1480 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1481 
1482 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1483 	    "Entered %s.\n", __func__);
1484 
1485 	if (vha->flags.qpairs_available && sp->qpair)
1486 		req = sp->qpair->req;
1487 	else
1488 		req = vha->req;
1489 
1490 	spin_lock_irqsave(&ha->hardware_lock, flags);
1491 	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1492 		if (req->outstanding_cmds[handle] == sp)
1493 			break;
1494 	}
1495 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1496 
1497 	if (handle == req->num_outstanding_cmds) {
1498 		/* command not found */
1499 		return QLA_FUNCTION_FAILED;
1500 	}
1501 
1502 	mcp->mb[0] = MBC_ABORT_COMMAND;
1503 	if (HAS_EXTENDED_IDS(ha))
1504 		mcp->mb[1] = fcport->loop_id;
1505 	else
1506 		mcp->mb[1] = fcport->loop_id << 8;
1507 	mcp->mb[2] = (uint16_t)handle;
1508 	mcp->mb[3] = (uint16_t)(handle >> 16);
1509 	mcp->mb[6] = (uint16_t)cmd->device->lun;
1510 	mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1511 	mcp->in_mb = MBX_0;
1512 	mcp->tov = MBX_TOV_SECONDS;
1513 	mcp->flags = 0;
1514 	rval = qla2x00_mailbox_command(vha, mcp);
1515 
1516 	if (rval != QLA_SUCCESS) {
1517 		ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1518 	} else {
1519 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1520 		    "Done %s.\n", __func__);
1521 	}
1522 
1523 	return rval;
1524 }
1525 
1526 int
1527 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1528 {
1529 	int rval, rval2;
1530 	mbx_cmd_t  mc;
1531 	mbx_cmd_t  *mcp = &mc;
1532 	scsi_qla_host_t *vha;
1533 
1534 	vha = fcport->vha;
1535 
1536 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1537 	    "Entered %s.\n", __func__);
1538 
1539 	mcp->mb[0] = MBC_ABORT_TARGET;
1540 	mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1541 	if (HAS_EXTENDED_IDS(vha->hw)) {
1542 		mcp->mb[1] = fcport->loop_id;
1543 		mcp->mb[10] = 0;
1544 		mcp->out_mb |= MBX_10;
1545 	} else {
1546 		mcp->mb[1] = fcport->loop_id << 8;
1547 	}
1548 	mcp->mb[2] = vha->hw->loop_reset_delay;
1549 	mcp->mb[9] = vha->vp_idx;
1550 
1551 	mcp->in_mb = MBX_0;
1552 	mcp->tov = MBX_TOV_SECONDS;
1553 	mcp->flags = 0;
1554 	rval = qla2x00_mailbox_command(vha, mcp);
1555 	if (rval != QLA_SUCCESS) {
1556 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1557 		    "Failed=%x.\n", rval);
1558 	}
1559 
1560 	/* Issue marker IOCB. */
1561 	rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
1562 							MK_SYNC_ID);
1563 	if (rval2 != QLA_SUCCESS) {
1564 		ql_dbg(ql_dbg_mbx, vha, 0x1040,
1565 		    "Failed to issue marker IOCB (%x).\n", rval2);
1566 	} else {
1567 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1568 		    "Done %s.\n", __func__);
1569 	}
1570 
1571 	return rval;
1572 }
1573 
1574 int
1575 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1576 {
1577 	int rval, rval2;
1578 	mbx_cmd_t  mc;
1579 	mbx_cmd_t  *mcp = &mc;
1580 	scsi_qla_host_t *vha;
1581 
1582 	vha = fcport->vha;
1583 
1584 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1585 	    "Entered %s.\n", __func__);
1586 
1587 	mcp->mb[0] = MBC_LUN_RESET;
1588 	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1589 	if (HAS_EXTENDED_IDS(vha->hw))
1590 		mcp->mb[1] = fcport->loop_id;
1591 	else
1592 		mcp->mb[1] = fcport->loop_id << 8;
1593 	mcp->mb[2] = (u32)l;
1594 	mcp->mb[3] = 0;
1595 	mcp->mb[9] = vha->vp_idx;
1596 
1597 	mcp->in_mb = MBX_0;
1598 	mcp->tov = MBX_TOV_SECONDS;
1599 	mcp->flags = 0;
1600 	rval = qla2x00_mailbox_command(vha, mcp);
1601 	if (rval != QLA_SUCCESS) {
1602 		ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1603 	}
1604 
1605 	/* Issue marker IOCB. */
1606 	rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
1607 								MK_SYNC_ID_LUN);
1608 	if (rval2 != QLA_SUCCESS) {
1609 		ql_dbg(ql_dbg_mbx, vha, 0x1044,
1610 		    "Failed to issue marker IOCB (%x).\n", rval2);
1611 	} else {
1612 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1613 		    "Done %s.\n", __func__);
1614 	}
1615 
1616 	return rval;
1617 }
1618 
1619 /*
1620  * qla2x00_get_adapter_id
1621  *	Get adapter ID and topology.
1622  *
1623  * Input:
1624  *	ha = adapter block pointer.
1625  *	id = pointer for loop ID.
1626  *	al_pa = pointer for AL_PA.
1627  *	area = pointer for area.
1628  *	domain = pointer for domain.
1629  *	top = pointer for topology.
1630  *	TARGET_QUEUE_LOCK must be released.
1631  *	ADAPTER_STATE_LOCK must be released.
1632  *
1633  * Returns:
1634  *	qla2x00 local function return status code.
1635  *
1636  * Context:
1637  *	Kernel context.
1638  */
1639 int
1640 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1641     uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1642 {
1643 	int rval;
1644 	mbx_cmd_t mc;
1645 	mbx_cmd_t *mcp = &mc;
1646 
1647 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1648 	    "Entered %s.\n", __func__);
1649 
1650 	mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1651 	mcp->mb[9] = vha->vp_idx;
1652 	mcp->out_mb = MBX_9|MBX_0;
1653 	mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1654 	if (IS_CNA_CAPABLE(vha->hw))
1655 		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1656 	if (IS_FWI2_CAPABLE(vha->hw))
1657 		mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1658 	if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
1659 		mcp->in_mb |= MBX_15;
1660 	mcp->tov = MBX_TOV_SECONDS;
1661 	mcp->flags = 0;
1662 	rval = qla2x00_mailbox_command(vha, mcp);
1663 	if (mcp->mb[0] == MBS_COMMAND_ERROR)
1664 		rval = QLA_COMMAND_ERROR;
1665 	else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1666 		rval = QLA_INVALID_COMMAND;
1667 
1668 	/* Return data. */
1669 	*id = mcp->mb[1];
1670 	*al_pa = LSB(mcp->mb[2]);
1671 	*area = MSB(mcp->mb[2]);
1672 	*domain	= LSB(mcp->mb[3]);
1673 	*top = mcp->mb[6];
1674 	*sw_cap = mcp->mb[7];
1675 
1676 	if (rval != QLA_SUCCESS) {
1677 		/*EMPTY*/
1678 		ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1679 	} else {
1680 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1681 		    "Done %s.\n", __func__);
1682 
1683 		if (IS_CNA_CAPABLE(vha->hw)) {
1684 			vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1685 			vha->fcoe_fcf_idx = mcp->mb[10];
1686 			vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1687 			vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1688 			vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1689 			vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1690 			vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1691 			vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1692 		}
1693 		/* If FA-WWN supported */
1694 		if (IS_FAWWN_CAPABLE(vha->hw)) {
1695 			if (mcp->mb[7] & BIT_14) {
1696 				vha->port_name[0] = MSB(mcp->mb[16]);
1697 				vha->port_name[1] = LSB(mcp->mb[16]);
1698 				vha->port_name[2] = MSB(mcp->mb[17]);
1699 				vha->port_name[3] = LSB(mcp->mb[17]);
1700 				vha->port_name[4] = MSB(mcp->mb[18]);
1701 				vha->port_name[5] = LSB(mcp->mb[18]);
1702 				vha->port_name[6] = MSB(mcp->mb[19]);
1703 				vha->port_name[7] = LSB(mcp->mb[19]);
1704 				fc_host_port_name(vha->host) =
1705 				    wwn_to_u64(vha->port_name);
1706 				ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1707 				    "FA-WWN acquired %016llx\n",
1708 				    wwn_to_u64(vha->port_name));
1709 			}
1710 		}
1711 
1712 		if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
1713 			vha->bbcr = mcp->mb[15];
1714 	}
1715 
1716 	return rval;
1717 }
1718 
1719 /*
1720  * qla2x00_get_retry_cnt
1721  *	Get current firmware login retry count and delay.
1722  *
1723  * Input:
1724  *	ha = adapter block pointer.
1725  *	retry_cnt = pointer to login retry count.
1726  *	tov = pointer to login timeout value.
1727  *
1728  * Returns:
1729  *	qla2x00 local function return status code.
1730  *
1731  * Context:
1732  *	Kernel context.
1733  */
1734 int
1735 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1736     uint16_t *r_a_tov)
1737 {
1738 	int rval;
1739 	uint16_t ratov;
1740 	mbx_cmd_t mc;
1741 	mbx_cmd_t *mcp = &mc;
1742 
1743 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1744 	    "Entered %s.\n", __func__);
1745 
1746 	mcp->mb[0] = MBC_GET_RETRY_COUNT;
1747 	mcp->out_mb = MBX_0;
1748 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1749 	mcp->tov = MBX_TOV_SECONDS;
1750 	mcp->flags = 0;
1751 	rval = qla2x00_mailbox_command(vha, mcp);
1752 
1753 	if (rval != QLA_SUCCESS) {
1754 		/*EMPTY*/
1755 		ql_dbg(ql_dbg_mbx, vha, 0x104a,
1756 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1757 	} else {
1758 		/* Convert returned data and check our values. */
1759 		*r_a_tov = mcp->mb[3] / 2;
1760 		ratov = (mcp->mb[3]/2) / 10;  /* mb[3] value is in 100ms */
1761 		if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1762 			/* Update to the larger values */
1763 			*retry_cnt = (uint8_t)mcp->mb[1];
1764 			*tov = ratov;
1765 		}
1766 
1767 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1768 		    "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1769 	}
1770 
1771 	return rval;
1772 }
1773 
1774 /*
1775  * qla2x00_init_firmware
1776  *	Initialize adapter firmware.
1777  *
1778  * Input:
1779  *	ha = adapter block pointer.
1780  *	dptr = Initialization control block pointer.
1781  *	size = size of initialization control block.
1782  *	TARGET_QUEUE_LOCK must be released.
1783  *	ADAPTER_STATE_LOCK must be released.
1784  *
1785  * Returns:
1786  *	qla2x00 local function return status code.
1787  *
1788  * Context:
1789  *	Kernel context.
1790  */
1791 int
1792 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1793 {
1794 	int rval;
1795 	mbx_cmd_t mc;
1796 	mbx_cmd_t *mcp = &mc;
1797 	struct qla_hw_data *ha = vha->hw;
1798 
1799 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1800 	    "Entered %s.\n", __func__);
1801 
1802 	if (IS_P3P_TYPE(ha) && ql2xdbwr)
1803 		qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1804 			(0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1805 
1806 	if (ha->flags.npiv_supported)
1807 		mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1808 	else
1809 		mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1810 
1811 	mcp->mb[1] = 0;
1812 	mcp->mb[2] = MSW(ha->init_cb_dma);
1813 	mcp->mb[3] = LSW(ha->init_cb_dma);
1814 	mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1815 	mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1816 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1817 	if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1818 		mcp->mb[1] = BIT_0;
1819 		mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1820 		mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1821 		mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1822 		mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1823 		mcp->mb[14] = sizeof(*ha->ex_init_cb);
1824 		mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1825 	}
1826 	/* 1 and 2 should normally be captured. */
1827 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
1828 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
1829 		/* mb3 is additional info about the installed SFP. */
1830 		mcp->in_mb  |= MBX_3;
1831 	mcp->buf_size = size;
1832 	mcp->flags = MBX_DMA_OUT;
1833 	mcp->tov = MBX_TOV_SECONDS;
1834 	rval = qla2x00_mailbox_command(vha, mcp);
1835 
1836 	if (rval != QLA_SUCCESS) {
1837 		/*EMPTY*/
1838 		ql_dbg(ql_dbg_mbx, vha, 0x104d,
1839 		    "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
1840 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1841 		if (ha->init_cb) {
1842 			ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
1843 			ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1844 			    0x0104d, ha->init_cb, sizeof(*ha->init_cb));
1845 		}
1846 		if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1847 			ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
1848 			ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1849 			    0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
1850 		}
1851 	} else {
1852 		if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1853 			if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1854 				ql_dbg(ql_dbg_mbx, vha, 0x119d,
1855 				    "Invalid SFP/Validation Failed\n");
1856 		}
1857 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1858 		    "Done %s.\n", __func__);
1859 	}
1860 
1861 	return rval;
1862 }
1863 
1864 
1865 /*
1866  * qla2x00_get_port_database
1867  *	Issue normal/enhanced get port database mailbox command
1868  *	and copy device name as necessary.
1869  *
1870  * Input:
1871  *	ha = adapter state pointer.
1872  *	dev = structure pointer.
1873  *	opt = enhanced cmd option byte.
1874  *
1875  * Returns:
1876  *	qla2x00 local function return status code.
1877  *
1878  * Context:
1879  *	Kernel context.
1880  */
1881 int
1882 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1883 {
1884 	int rval;
1885 	mbx_cmd_t mc;
1886 	mbx_cmd_t *mcp = &mc;
1887 	port_database_t *pd;
1888 	struct port_database_24xx *pd24;
1889 	dma_addr_t pd_dma;
1890 	struct qla_hw_data *ha = vha->hw;
1891 
1892 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1893 	    "Entered %s.\n", __func__);
1894 
1895 	pd24 = NULL;
1896 	pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1897 	if (pd  == NULL) {
1898 		ql_log(ql_log_warn, vha, 0x1050,
1899 		    "Failed to allocate port database structure.\n");
1900 		fcport->query = 0;
1901 		return QLA_MEMORY_ALLOC_FAILED;
1902 	}
1903 
1904 	mcp->mb[0] = MBC_GET_PORT_DATABASE;
1905 	if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1906 		mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1907 	mcp->mb[2] = MSW(pd_dma);
1908 	mcp->mb[3] = LSW(pd_dma);
1909 	mcp->mb[6] = MSW(MSD(pd_dma));
1910 	mcp->mb[7] = LSW(MSD(pd_dma));
1911 	mcp->mb[9] = vha->vp_idx;
1912 	mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1913 	mcp->in_mb = MBX_0;
1914 	if (IS_FWI2_CAPABLE(ha)) {
1915 		mcp->mb[1] = fcport->loop_id;
1916 		mcp->mb[10] = opt;
1917 		mcp->out_mb |= MBX_10|MBX_1;
1918 		mcp->in_mb |= MBX_1;
1919 	} else if (HAS_EXTENDED_IDS(ha)) {
1920 		mcp->mb[1] = fcport->loop_id;
1921 		mcp->mb[10] = opt;
1922 		mcp->out_mb |= MBX_10|MBX_1;
1923 	} else {
1924 		mcp->mb[1] = fcport->loop_id << 8 | opt;
1925 		mcp->out_mb |= MBX_1;
1926 	}
1927 	mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1928 	    PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1929 	mcp->flags = MBX_DMA_IN;
1930 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1931 	rval = qla2x00_mailbox_command(vha, mcp);
1932 	if (rval != QLA_SUCCESS)
1933 		goto gpd_error_out;
1934 
1935 	if (IS_FWI2_CAPABLE(ha)) {
1936 		uint64_t zero = 0;
1937 		u8 current_login_state, last_login_state;
1938 
1939 		pd24 = (struct port_database_24xx *) pd;
1940 
1941 		/* Check for logged in state. */
1942 		if (fcport->fc4f_nvme) {
1943 			current_login_state = pd24->current_login_state >> 4;
1944 			last_login_state = pd24->last_login_state >> 4;
1945 		} else {
1946 			current_login_state = pd24->current_login_state & 0xf;
1947 			last_login_state = pd24->last_login_state & 0xf;
1948 		}
1949 		fcport->current_login_state = pd24->current_login_state;
1950 		fcport->last_login_state = pd24->last_login_state;
1951 
1952 		/* Check for logged in state. */
1953 		if (current_login_state != PDS_PRLI_COMPLETE &&
1954 		    last_login_state != PDS_PRLI_COMPLETE) {
1955 			ql_dbg(ql_dbg_mbx, vha, 0x119a,
1956 			    "Unable to verify login-state (%x/%x) for loop_id %x.\n",
1957 			    current_login_state, last_login_state,
1958 			    fcport->loop_id);
1959 			rval = QLA_FUNCTION_FAILED;
1960 
1961 			if (!fcport->query)
1962 				goto gpd_error_out;
1963 		}
1964 
1965 		if (fcport->loop_id == FC_NO_LOOP_ID ||
1966 		    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1967 		     memcmp(fcport->port_name, pd24->port_name, 8))) {
1968 			/* We lost the device mid way. */
1969 			rval = QLA_NOT_LOGGED_IN;
1970 			goto gpd_error_out;
1971 		}
1972 
1973 		/* Names are little-endian. */
1974 		memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1975 		memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1976 
1977 		/* Get port_id of device. */
1978 		fcport->d_id.b.domain = pd24->port_id[0];
1979 		fcport->d_id.b.area = pd24->port_id[1];
1980 		fcport->d_id.b.al_pa = pd24->port_id[2];
1981 		fcport->d_id.b.rsvd_1 = 0;
1982 
1983 		/* If not target must be initiator or unknown type. */
1984 		if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1985 			fcport->port_type = FCT_INITIATOR;
1986 		else
1987 			fcport->port_type = FCT_TARGET;
1988 
1989 		/* Passback COS information. */
1990 		fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1991 				FC_COS_CLASS2 : FC_COS_CLASS3;
1992 
1993 		if (pd24->prli_svc_param_word_3[0] & BIT_7)
1994 			fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1995 	} else {
1996 		uint64_t zero = 0;
1997 
1998 		/* Check for logged in state. */
1999 		if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
2000 		    pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
2001 			ql_dbg(ql_dbg_mbx, vha, 0x100a,
2002 			    "Unable to verify login-state (%x/%x) - "
2003 			    "portid=%02x%02x%02x.\n", pd->master_state,
2004 			    pd->slave_state, fcport->d_id.b.domain,
2005 			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
2006 			rval = QLA_FUNCTION_FAILED;
2007 			goto gpd_error_out;
2008 		}
2009 
2010 		if (fcport->loop_id == FC_NO_LOOP_ID ||
2011 		    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2012 		     memcmp(fcport->port_name, pd->port_name, 8))) {
2013 			/* We lost the device mid way. */
2014 			rval = QLA_NOT_LOGGED_IN;
2015 			goto gpd_error_out;
2016 		}
2017 
2018 		/* Names are little-endian. */
2019 		memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
2020 		memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
2021 
2022 		/* Get port_id of device. */
2023 		fcport->d_id.b.domain = pd->port_id[0];
2024 		fcport->d_id.b.area = pd->port_id[3];
2025 		fcport->d_id.b.al_pa = pd->port_id[2];
2026 		fcport->d_id.b.rsvd_1 = 0;
2027 
2028 		/* If not target must be initiator or unknown type. */
2029 		if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2030 			fcport->port_type = FCT_INITIATOR;
2031 		else
2032 			fcport->port_type = FCT_TARGET;
2033 
2034 		/* Passback COS information. */
2035 		fcport->supported_classes = (pd->options & BIT_4) ?
2036 		    FC_COS_CLASS2 : FC_COS_CLASS3;
2037 	}
2038 
2039 gpd_error_out:
2040 	dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2041 	fcport->query = 0;
2042 
2043 	if (rval != QLA_SUCCESS) {
2044 		ql_dbg(ql_dbg_mbx, vha, 0x1052,
2045 		    "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2046 		    mcp->mb[0], mcp->mb[1]);
2047 	} else {
2048 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2049 		    "Done %s.\n", __func__);
2050 	}
2051 
2052 	return rval;
2053 }
2054 
2055 /*
2056  * qla2x00_get_firmware_state
2057  *	Get adapter firmware state.
2058  *
2059  * Input:
2060  *	ha = adapter block pointer.
2061  *	dptr = pointer for firmware state.
2062  *	TARGET_QUEUE_LOCK must be released.
2063  *	ADAPTER_STATE_LOCK must be released.
2064  *
2065  * Returns:
2066  *	qla2x00 local function return status code.
2067  *
2068  * Context:
2069  *	Kernel context.
2070  */
2071 int
2072 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2073 {
2074 	int rval;
2075 	mbx_cmd_t mc;
2076 	mbx_cmd_t *mcp = &mc;
2077 	struct qla_hw_data *ha = vha->hw;
2078 
2079 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2080 	    "Entered %s.\n", __func__);
2081 
2082 	mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2083 	mcp->out_mb = MBX_0;
2084 	if (IS_FWI2_CAPABLE(vha->hw))
2085 		mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2086 	else
2087 		mcp->in_mb = MBX_1|MBX_0;
2088 	mcp->tov = MBX_TOV_SECONDS;
2089 	mcp->flags = 0;
2090 	rval = qla2x00_mailbox_command(vha, mcp);
2091 
2092 	/* Return firmware states. */
2093 	states[0] = mcp->mb[1];
2094 	if (IS_FWI2_CAPABLE(vha->hw)) {
2095 		states[1] = mcp->mb[2];
2096 		states[2] = mcp->mb[3];  /* SFP info */
2097 		states[3] = mcp->mb[4];
2098 		states[4] = mcp->mb[5];
2099 		states[5] = mcp->mb[6];  /* DPORT status */
2100 	}
2101 
2102 	if (rval != QLA_SUCCESS) {
2103 		/*EMPTY*/
2104 		ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2105 	} else {
2106 		if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2107 			if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2108 				ql_dbg(ql_dbg_mbx, vha, 0x119e,
2109 				    "Invalid SFP/Validation Failed\n");
2110 		}
2111 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2112 		    "Done %s.\n", __func__);
2113 	}
2114 
2115 	return rval;
2116 }
2117 
2118 /*
2119  * qla2x00_get_port_name
2120  *	Issue get port name mailbox command.
2121  *	Returned name is in big endian format.
2122  *
2123  * Input:
2124  *	ha = adapter block pointer.
2125  *	loop_id = loop ID of device.
2126  *	name = pointer for name.
2127  *	TARGET_QUEUE_LOCK must be released.
2128  *	ADAPTER_STATE_LOCK must be released.
2129  *
2130  * Returns:
2131  *	qla2x00 local function return status code.
2132  *
2133  * Context:
2134  *	Kernel context.
2135  */
2136 int
2137 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2138     uint8_t opt)
2139 {
2140 	int rval;
2141 	mbx_cmd_t mc;
2142 	mbx_cmd_t *mcp = &mc;
2143 
2144 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2145 	    "Entered %s.\n", __func__);
2146 
2147 	mcp->mb[0] = MBC_GET_PORT_NAME;
2148 	mcp->mb[9] = vha->vp_idx;
2149 	mcp->out_mb = MBX_9|MBX_1|MBX_0;
2150 	if (HAS_EXTENDED_IDS(vha->hw)) {
2151 		mcp->mb[1] = loop_id;
2152 		mcp->mb[10] = opt;
2153 		mcp->out_mb |= MBX_10;
2154 	} else {
2155 		mcp->mb[1] = loop_id << 8 | opt;
2156 	}
2157 
2158 	mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2159 	mcp->tov = MBX_TOV_SECONDS;
2160 	mcp->flags = 0;
2161 	rval = qla2x00_mailbox_command(vha, mcp);
2162 
2163 	if (rval != QLA_SUCCESS) {
2164 		/*EMPTY*/
2165 		ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2166 	} else {
2167 		if (name != NULL) {
2168 			/* This function returns name in big endian. */
2169 			name[0] = MSB(mcp->mb[2]);
2170 			name[1] = LSB(mcp->mb[2]);
2171 			name[2] = MSB(mcp->mb[3]);
2172 			name[3] = LSB(mcp->mb[3]);
2173 			name[4] = MSB(mcp->mb[6]);
2174 			name[5] = LSB(mcp->mb[6]);
2175 			name[6] = MSB(mcp->mb[7]);
2176 			name[7] = LSB(mcp->mb[7]);
2177 		}
2178 
2179 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2180 		    "Done %s.\n", __func__);
2181 	}
2182 
2183 	return rval;
2184 }
2185 
2186 /*
2187  * qla24xx_link_initialization
2188  *	Issue link initialization mailbox command.
2189  *
2190  * Input:
2191  *	ha = adapter block pointer.
2192  *	TARGET_QUEUE_LOCK must be released.
2193  *	ADAPTER_STATE_LOCK must be released.
2194  *
2195  * Returns:
2196  *	qla2x00 local function return status code.
2197  *
2198  * Context:
2199  *	Kernel context.
2200  */
2201 int
2202 qla24xx_link_initialize(scsi_qla_host_t *vha)
2203 {
2204 	int rval;
2205 	mbx_cmd_t mc;
2206 	mbx_cmd_t *mcp = &mc;
2207 
2208 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2209 	    "Entered %s.\n", __func__);
2210 
2211 	if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2212 		return QLA_FUNCTION_FAILED;
2213 
2214 	mcp->mb[0] = MBC_LINK_INITIALIZATION;
2215 	mcp->mb[1] = BIT_4;
2216 	if (vha->hw->operating_mode == LOOP)
2217 		mcp->mb[1] |= BIT_6;
2218 	else
2219 		mcp->mb[1] |= BIT_5;
2220 	mcp->mb[2] = 0;
2221 	mcp->mb[3] = 0;
2222 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2223 	mcp->in_mb = MBX_0;
2224 	mcp->tov = MBX_TOV_SECONDS;
2225 	mcp->flags = 0;
2226 	rval = qla2x00_mailbox_command(vha, mcp);
2227 
2228 	if (rval != QLA_SUCCESS) {
2229 		ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2230 	} else {
2231 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2232 		    "Done %s.\n", __func__);
2233 	}
2234 
2235 	return rval;
2236 }
2237 
2238 /*
2239  * qla2x00_lip_reset
2240  *	Issue LIP reset mailbox command.
2241  *
2242  * Input:
2243  *	ha = adapter block pointer.
2244  *	TARGET_QUEUE_LOCK must be released.
2245  *	ADAPTER_STATE_LOCK must be released.
2246  *
2247  * Returns:
2248  *	qla2x00 local function return status code.
2249  *
2250  * Context:
2251  *	Kernel context.
2252  */
2253 int
2254 qla2x00_lip_reset(scsi_qla_host_t *vha)
2255 {
2256 	int rval;
2257 	mbx_cmd_t mc;
2258 	mbx_cmd_t *mcp = &mc;
2259 
2260 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
2261 	    "Entered %s.\n", __func__);
2262 
2263 	if (IS_CNA_CAPABLE(vha->hw)) {
2264 		/* Logout across all FCFs. */
2265 		mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2266 		mcp->mb[1] = BIT_1;
2267 		mcp->mb[2] = 0;
2268 		mcp->out_mb = MBX_2|MBX_1|MBX_0;
2269 	} else if (IS_FWI2_CAPABLE(vha->hw)) {
2270 		mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2271 		mcp->mb[1] = BIT_4;
2272 		mcp->mb[2] = 0;
2273 		mcp->mb[3] = vha->hw->loop_reset_delay;
2274 		mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2275 	} else {
2276 		mcp->mb[0] = MBC_LIP_RESET;
2277 		mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2278 		if (HAS_EXTENDED_IDS(vha->hw)) {
2279 			mcp->mb[1] = 0x00ff;
2280 			mcp->mb[10] = 0;
2281 			mcp->out_mb |= MBX_10;
2282 		} else {
2283 			mcp->mb[1] = 0xff00;
2284 		}
2285 		mcp->mb[2] = vha->hw->loop_reset_delay;
2286 		mcp->mb[3] = 0;
2287 	}
2288 	mcp->in_mb = MBX_0;
2289 	mcp->tov = MBX_TOV_SECONDS;
2290 	mcp->flags = 0;
2291 	rval = qla2x00_mailbox_command(vha, mcp);
2292 
2293 	if (rval != QLA_SUCCESS) {
2294 		/*EMPTY*/
2295 		ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2296 	} else {
2297 		/*EMPTY*/
2298 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2299 		    "Done %s.\n", __func__);
2300 	}
2301 
2302 	return rval;
2303 }
2304 
2305 /*
2306  * qla2x00_send_sns
2307  *	Send SNS command.
2308  *
2309  * Input:
2310  *	ha = adapter block pointer.
2311  *	sns = pointer for command.
2312  *	cmd_size = command size.
2313  *	buf_size = response/command size.
2314  *	TARGET_QUEUE_LOCK must be released.
2315  *	ADAPTER_STATE_LOCK must be released.
2316  *
2317  * Returns:
2318  *	qla2x00 local function return status code.
2319  *
2320  * Context:
2321  *	Kernel context.
2322  */
2323 int
2324 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2325     uint16_t cmd_size, size_t buf_size)
2326 {
2327 	int rval;
2328 	mbx_cmd_t mc;
2329 	mbx_cmd_t *mcp = &mc;
2330 
2331 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2332 	    "Entered %s.\n", __func__);
2333 
2334 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2335 	    "Retry cnt=%d ratov=%d total tov=%d.\n",
2336 	    vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2337 
2338 	mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2339 	mcp->mb[1] = cmd_size;
2340 	mcp->mb[2] = MSW(sns_phys_address);
2341 	mcp->mb[3] = LSW(sns_phys_address);
2342 	mcp->mb[6] = MSW(MSD(sns_phys_address));
2343 	mcp->mb[7] = LSW(MSD(sns_phys_address));
2344 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2345 	mcp->in_mb = MBX_0|MBX_1;
2346 	mcp->buf_size = buf_size;
2347 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2348 	mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2349 	rval = qla2x00_mailbox_command(vha, mcp);
2350 
2351 	if (rval != QLA_SUCCESS) {
2352 		/*EMPTY*/
2353 		ql_dbg(ql_dbg_mbx, vha, 0x105f,
2354 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
2355 		    rval, mcp->mb[0], mcp->mb[1]);
2356 	} else {
2357 		/*EMPTY*/
2358 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2359 		    "Done %s.\n", __func__);
2360 	}
2361 
2362 	return rval;
2363 }
2364 
2365 int
2366 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2367     uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2368 {
2369 	int		rval;
2370 
2371 	struct logio_entry_24xx *lg;
2372 	dma_addr_t	lg_dma;
2373 	uint32_t	iop[2];
2374 	struct qla_hw_data *ha = vha->hw;
2375 	struct req_que *req;
2376 
2377 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2378 	    "Entered %s.\n", __func__);
2379 
2380 	if (vha->vp_idx && vha->qpair)
2381 		req = vha->qpair->req;
2382 	else
2383 		req = ha->req_q_map[0];
2384 
2385 	lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2386 	if (lg == NULL) {
2387 		ql_log(ql_log_warn, vha, 0x1062,
2388 		    "Failed to allocate login IOCB.\n");
2389 		return QLA_MEMORY_ALLOC_FAILED;
2390 	}
2391 
2392 	lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2393 	lg->entry_count = 1;
2394 	lg->handle = MAKE_HANDLE(req->id, lg->handle);
2395 	lg->nport_handle = cpu_to_le16(loop_id);
2396 	lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2397 	if (opt & BIT_0)
2398 		lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2399 	if (opt & BIT_1)
2400 		lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2401 	lg->port_id[0] = al_pa;
2402 	lg->port_id[1] = area;
2403 	lg->port_id[2] = domain;
2404 	lg->vp_index = vha->vp_idx;
2405 	rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2406 	    (ha->r_a_tov / 10 * 2) + 2);
2407 	if (rval != QLA_SUCCESS) {
2408 		ql_dbg(ql_dbg_mbx, vha, 0x1063,
2409 		    "Failed to issue login IOCB (%x).\n", rval);
2410 	} else if (lg->entry_status != 0) {
2411 		ql_dbg(ql_dbg_mbx, vha, 0x1064,
2412 		    "Failed to complete IOCB -- error status (%x).\n",
2413 		    lg->entry_status);
2414 		rval = QLA_FUNCTION_FAILED;
2415 	} else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2416 		iop[0] = le32_to_cpu(lg->io_parameter[0]);
2417 		iop[1] = le32_to_cpu(lg->io_parameter[1]);
2418 
2419 		ql_dbg(ql_dbg_mbx, vha, 0x1065,
2420 		    "Failed to complete IOCB -- completion  status (%x) "
2421 		    "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2422 		    iop[0], iop[1]);
2423 
2424 		switch (iop[0]) {
2425 		case LSC_SCODE_PORTID_USED:
2426 			mb[0] = MBS_PORT_ID_USED;
2427 			mb[1] = LSW(iop[1]);
2428 			break;
2429 		case LSC_SCODE_NPORT_USED:
2430 			mb[0] = MBS_LOOP_ID_USED;
2431 			break;
2432 		case LSC_SCODE_NOLINK:
2433 		case LSC_SCODE_NOIOCB:
2434 		case LSC_SCODE_NOXCB:
2435 		case LSC_SCODE_CMD_FAILED:
2436 		case LSC_SCODE_NOFABRIC:
2437 		case LSC_SCODE_FW_NOT_READY:
2438 		case LSC_SCODE_NOT_LOGGED_IN:
2439 		case LSC_SCODE_NOPCB:
2440 		case LSC_SCODE_ELS_REJECT:
2441 		case LSC_SCODE_CMD_PARAM_ERR:
2442 		case LSC_SCODE_NONPORT:
2443 		case LSC_SCODE_LOGGED_IN:
2444 		case LSC_SCODE_NOFLOGI_ACC:
2445 		default:
2446 			mb[0] = MBS_COMMAND_ERROR;
2447 			break;
2448 		}
2449 	} else {
2450 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2451 		    "Done %s.\n", __func__);
2452 
2453 		iop[0] = le32_to_cpu(lg->io_parameter[0]);
2454 
2455 		mb[0] = MBS_COMMAND_COMPLETE;
2456 		mb[1] = 0;
2457 		if (iop[0] & BIT_4) {
2458 			if (iop[0] & BIT_8)
2459 				mb[1] |= BIT_1;
2460 		} else
2461 			mb[1] = BIT_0;
2462 
2463 		/* Passback COS information. */
2464 		mb[10] = 0;
2465 		if (lg->io_parameter[7] || lg->io_parameter[8])
2466 			mb[10] |= BIT_0;	/* Class 2. */
2467 		if (lg->io_parameter[9] || lg->io_parameter[10])
2468 			mb[10] |= BIT_1;	/* Class 3. */
2469 		if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2470 			mb[10] |= BIT_7;	/* Confirmed Completion
2471 						 * Allowed
2472 						 */
2473 	}
2474 
2475 	dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2476 
2477 	return rval;
2478 }
2479 
2480 /*
2481  * qla2x00_login_fabric
2482  *	Issue login fabric port mailbox command.
2483  *
2484  * Input:
2485  *	ha = adapter block pointer.
2486  *	loop_id = device loop ID.
2487  *	domain = device domain.
2488  *	area = device area.
2489  *	al_pa = device AL_PA.
2490  *	status = pointer for return status.
2491  *	opt = command options.
2492  *	TARGET_QUEUE_LOCK must be released.
2493  *	ADAPTER_STATE_LOCK must be released.
2494  *
2495  * Returns:
2496  *	qla2x00 local function return status code.
2497  *
2498  * Context:
2499  *	Kernel context.
2500  */
2501 int
2502 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2503     uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2504 {
2505 	int rval;
2506 	mbx_cmd_t mc;
2507 	mbx_cmd_t *mcp = &mc;
2508 	struct qla_hw_data *ha = vha->hw;
2509 
2510 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2511 	    "Entered %s.\n", __func__);
2512 
2513 	mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2514 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2515 	if (HAS_EXTENDED_IDS(ha)) {
2516 		mcp->mb[1] = loop_id;
2517 		mcp->mb[10] = opt;
2518 		mcp->out_mb |= MBX_10;
2519 	} else {
2520 		mcp->mb[1] = (loop_id << 8) | opt;
2521 	}
2522 	mcp->mb[2] = domain;
2523 	mcp->mb[3] = area << 8 | al_pa;
2524 
2525 	mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2526 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2527 	mcp->flags = 0;
2528 	rval = qla2x00_mailbox_command(vha, mcp);
2529 
2530 	/* Return mailbox statuses. */
2531 	if (mb != NULL) {
2532 		mb[0] = mcp->mb[0];
2533 		mb[1] = mcp->mb[1];
2534 		mb[2] = mcp->mb[2];
2535 		mb[6] = mcp->mb[6];
2536 		mb[7] = mcp->mb[7];
2537 		/* COS retrieved from Get-Port-Database mailbox command. */
2538 		mb[10] = 0;
2539 	}
2540 
2541 	if (rval != QLA_SUCCESS) {
2542 		/* RLU tmp code: need to change main mailbox_command function to
2543 		 * return ok even when the mailbox completion value is not
2544 		 * SUCCESS. The caller needs to be responsible to interpret
2545 		 * the return values of this mailbox command if we're not
2546 		 * to change too much of the existing code.
2547 		 */
2548 		if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2549 		    mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2550 		    mcp->mb[0] == 0x4006)
2551 			rval = QLA_SUCCESS;
2552 
2553 		/*EMPTY*/
2554 		ql_dbg(ql_dbg_mbx, vha, 0x1068,
2555 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2556 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2557 	} else {
2558 		/*EMPTY*/
2559 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2560 		    "Done %s.\n", __func__);
2561 	}
2562 
2563 	return rval;
2564 }
2565 
2566 /*
2567  * qla2x00_login_local_device
2568  *           Issue login loop port mailbox command.
2569  *
2570  * Input:
2571  *           ha = adapter block pointer.
2572  *           loop_id = device loop ID.
2573  *           opt = command options.
2574  *
2575  * Returns:
2576  *            Return status code.
2577  *
2578  * Context:
2579  *            Kernel context.
2580  *
2581  */
2582 int
2583 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2584     uint16_t *mb_ret, uint8_t opt)
2585 {
2586 	int rval;
2587 	mbx_cmd_t mc;
2588 	mbx_cmd_t *mcp = &mc;
2589 	struct qla_hw_data *ha = vha->hw;
2590 
2591 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2592 	    "Entered %s.\n", __func__);
2593 
2594 	if (IS_FWI2_CAPABLE(ha))
2595 		return qla24xx_login_fabric(vha, fcport->loop_id,
2596 		    fcport->d_id.b.domain, fcport->d_id.b.area,
2597 		    fcport->d_id.b.al_pa, mb_ret, opt);
2598 
2599 	mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2600 	if (HAS_EXTENDED_IDS(ha))
2601 		mcp->mb[1] = fcport->loop_id;
2602 	else
2603 		mcp->mb[1] = fcport->loop_id << 8;
2604 	mcp->mb[2] = opt;
2605 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
2606  	mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2607 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2608 	mcp->flags = 0;
2609 	rval = qla2x00_mailbox_command(vha, mcp);
2610 
2611  	/* Return mailbox statuses. */
2612  	if (mb_ret != NULL) {
2613  		mb_ret[0] = mcp->mb[0];
2614  		mb_ret[1] = mcp->mb[1];
2615  		mb_ret[6] = mcp->mb[6];
2616  		mb_ret[7] = mcp->mb[7];
2617  	}
2618 
2619 	if (rval != QLA_SUCCESS) {
2620  		/* AV tmp code: need to change main mailbox_command function to
2621  		 * return ok even when the mailbox completion value is not
2622  		 * SUCCESS. The caller needs to be responsible to interpret
2623  		 * the return values of this mailbox command if we're not
2624  		 * to change too much of the existing code.
2625  		 */
2626  		if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2627  			rval = QLA_SUCCESS;
2628 
2629 		ql_dbg(ql_dbg_mbx, vha, 0x106b,
2630 		    "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2631 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2632 	} else {
2633 		/*EMPTY*/
2634 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2635 		    "Done %s.\n", __func__);
2636 	}
2637 
2638 	return (rval);
2639 }
2640 
2641 int
2642 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2643     uint8_t area, uint8_t al_pa)
2644 {
2645 	int		rval;
2646 	struct logio_entry_24xx *lg;
2647 	dma_addr_t	lg_dma;
2648 	struct qla_hw_data *ha = vha->hw;
2649 	struct req_que *req;
2650 
2651 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2652 	    "Entered %s.\n", __func__);
2653 
2654 	lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2655 	if (lg == NULL) {
2656 		ql_log(ql_log_warn, vha, 0x106e,
2657 		    "Failed to allocate logout IOCB.\n");
2658 		return QLA_MEMORY_ALLOC_FAILED;
2659 	}
2660 
2661 	req = vha->req;
2662 	lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2663 	lg->entry_count = 1;
2664 	lg->handle = MAKE_HANDLE(req->id, lg->handle);
2665 	lg->nport_handle = cpu_to_le16(loop_id);
2666 	lg->control_flags =
2667 	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2668 		LCF_FREE_NPORT);
2669 	lg->port_id[0] = al_pa;
2670 	lg->port_id[1] = area;
2671 	lg->port_id[2] = domain;
2672 	lg->vp_index = vha->vp_idx;
2673 	rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2674 	    (ha->r_a_tov / 10 * 2) + 2);
2675 	if (rval != QLA_SUCCESS) {
2676 		ql_dbg(ql_dbg_mbx, vha, 0x106f,
2677 		    "Failed to issue logout IOCB (%x).\n", rval);
2678 	} else if (lg->entry_status != 0) {
2679 		ql_dbg(ql_dbg_mbx, vha, 0x1070,
2680 		    "Failed to complete IOCB -- error status (%x).\n",
2681 		    lg->entry_status);
2682 		rval = QLA_FUNCTION_FAILED;
2683 	} else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2684 		ql_dbg(ql_dbg_mbx, vha, 0x1071,
2685 		    "Failed to complete IOCB -- completion status (%x) "
2686 		    "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2687 		    le32_to_cpu(lg->io_parameter[0]),
2688 		    le32_to_cpu(lg->io_parameter[1]));
2689 	} else {
2690 		/*EMPTY*/
2691 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2692 		    "Done %s.\n", __func__);
2693 	}
2694 
2695 	dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2696 
2697 	return rval;
2698 }
2699 
2700 /*
2701  * qla2x00_fabric_logout
2702  *	Issue logout fabric port mailbox command.
2703  *
2704  * Input:
2705  *	ha = adapter block pointer.
2706  *	loop_id = device loop ID.
2707  *	TARGET_QUEUE_LOCK must be released.
2708  *	ADAPTER_STATE_LOCK must be released.
2709  *
2710  * Returns:
2711  *	qla2x00 local function return status code.
2712  *
2713  * Context:
2714  *	Kernel context.
2715  */
2716 int
2717 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2718     uint8_t area, uint8_t al_pa)
2719 {
2720 	int rval;
2721 	mbx_cmd_t mc;
2722 	mbx_cmd_t *mcp = &mc;
2723 
2724 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2725 	    "Entered %s.\n", __func__);
2726 
2727 	mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2728 	mcp->out_mb = MBX_1|MBX_0;
2729 	if (HAS_EXTENDED_IDS(vha->hw)) {
2730 		mcp->mb[1] = loop_id;
2731 		mcp->mb[10] = 0;
2732 		mcp->out_mb |= MBX_10;
2733 	} else {
2734 		mcp->mb[1] = loop_id << 8;
2735 	}
2736 
2737 	mcp->in_mb = MBX_1|MBX_0;
2738 	mcp->tov = MBX_TOV_SECONDS;
2739 	mcp->flags = 0;
2740 	rval = qla2x00_mailbox_command(vha, mcp);
2741 
2742 	if (rval != QLA_SUCCESS) {
2743 		/*EMPTY*/
2744 		ql_dbg(ql_dbg_mbx, vha, 0x1074,
2745 		    "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2746 	} else {
2747 		/*EMPTY*/
2748 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2749 		    "Done %s.\n", __func__);
2750 	}
2751 
2752 	return rval;
2753 }
2754 
2755 /*
2756  * qla2x00_full_login_lip
2757  *	Issue full login LIP mailbox command.
2758  *
2759  * Input:
2760  *	ha = adapter block pointer.
2761  *	TARGET_QUEUE_LOCK must be released.
2762  *	ADAPTER_STATE_LOCK must be released.
2763  *
2764  * Returns:
2765  *	qla2x00 local function return status code.
2766  *
2767  * Context:
2768  *	Kernel context.
2769  */
2770 int
2771 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2772 {
2773 	int rval;
2774 	mbx_cmd_t mc;
2775 	mbx_cmd_t *mcp = &mc;
2776 
2777 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2778 	    "Entered %s.\n", __func__);
2779 
2780 	mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2781 	mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
2782 	mcp->mb[2] = 0;
2783 	mcp->mb[3] = 0;
2784 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2785 	mcp->in_mb = MBX_0;
2786 	mcp->tov = MBX_TOV_SECONDS;
2787 	mcp->flags = 0;
2788 	rval = qla2x00_mailbox_command(vha, mcp);
2789 
2790 	if (rval != QLA_SUCCESS) {
2791 		/*EMPTY*/
2792 		ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2793 	} else {
2794 		/*EMPTY*/
2795 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2796 		    "Done %s.\n", __func__);
2797 	}
2798 
2799 	return rval;
2800 }
2801 
2802 /*
2803  * qla2x00_get_id_list
2804  *
2805  * Input:
2806  *	ha = adapter block pointer.
2807  *
2808  * Returns:
2809  *	qla2x00 local function return status code.
2810  *
2811  * Context:
2812  *	Kernel context.
2813  */
2814 int
2815 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2816     uint16_t *entries)
2817 {
2818 	int rval;
2819 	mbx_cmd_t mc;
2820 	mbx_cmd_t *mcp = &mc;
2821 
2822 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2823 	    "Entered %s.\n", __func__);
2824 
2825 	if (id_list == NULL)
2826 		return QLA_FUNCTION_FAILED;
2827 
2828 	mcp->mb[0] = MBC_GET_ID_LIST;
2829 	mcp->out_mb = MBX_0;
2830 	if (IS_FWI2_CAPABLE(vha->hw)) {
2831 		mcp->mb[2] = MSW(id_list_dma);
2832 		mcp->mb[3] = LSW(id_list_dma);
2833 		mcp->mb[6] = MSW(MSD(id_list_dma));
2834 		mcp->mb[7] = LSW(MSD(id_list_dma));
2835 		mcp->mb[8] = 0;
2836 		mcp->mb[9] = vha->vp_idx;
2837 		mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2838 	} else {
2839 		mcp->mb[1] = MSW(id_list_dma);
2840 		mcp->mb[2] = LSW(id_list_dma);
2841 		mcp->mb[3] = MSW(MSD(id_list_dma));
2842 		mcp->mb[6] = LSW(MSD(id_list_dma));
2843 		mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2844 	}
2845 	mcp->in_mb = MBX_1|MBX_0;
2846 	mcp->tov = MBX_TOV_SECONDS;
2847 	mcp->flags = 0;
2848 	rval = qla2x00_mailbox_command(vha, mcp);
2849 
2850 	if (rval != QLA_SUCCESS) {
2851 		/*EMPTY*/
2852 		ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2853 	} else {
2854 		*entries = mcp->mb[1];
2855 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2856 		    "Done %s.\n", __func__);
2857 	}
2858 
2859 	return rval;
2860 }
2861 
2862 /*
2863  * qla2x00_get_resource_cnts
2864  *	Get current firmware resource counts.
2865  *
2866  * Input:
2867  *	ha = adapter block pointer.
2868  *
2869  * Returns:
2870  *	qla2x00 local function return status code.
2871  *
2872  * Context:
2873  *	Kernel context.
2874  */
2875 int
2876 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2877 {
2878 	struct qla_hw_data *ha = vha->hw;
2879 	int rval;
2880 	mbx_cmd_t mc;
2881 	mbx_cmd_t *mcp = &mc;
2882 
2883 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2884 	    "Entered %s.\n", __func__);
2885 
2886 	mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2887 	mcp->out_mb = MBX_0;
2888 	mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2889 	if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
2890 	    IS_QLA27XX(ha) || IS_QLA28XX(ha))
2891 		mcp->in_mb |= MBX_12;
2892 	mcp->tov = MBX_TOV_SECONDS;
2893 	mcp->flags = 0;
2894 	rval = qla2x00_mailbox_command(vha, mcp);
2895 
2896 	if (rval != QLA_SUCCESS) {
2897 		/*EMPTY*/
2898 		ql_dbg(ql_dbg_mbx, vha, 0x107d,
2899 		    "Failed mb[0]=%x.\n", mcp->mb[0]);
2900 	} else {
2901 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2902 		    "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2903 		    "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2904 		    mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2905 		    mcp->mb[11], mcp->mb[12]);
2906 
2907 		ha->orig_fw_tgt_xcb_count =  mcp->mb[1];
2908 		ha->cur_fw_tgt_xcb_count = mcp->mb[2];
2909 		ha->cur_fw_xcb_count = mcp->mb[3];
2910 		ha->orig_fw_xcb_count = mcp->mb[6];
2911 		ha->cur_fw_iocb_count = mcp->mb[7];
2912 		ha->orig_fw_iocb_count = mcp->mb[10];
2913 		if (ha->flags.npiv_supported)
2914 			ha->max_npiv_vports = mcp->mb[11];
2915 		if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2916 		    IS_QLA28XX(ha))
2917 			ha->fw_max_fcf_count = mcp->mb[12];
2918 	}
2919 
2920 	return (rval);
2921 }
2922 
2923 /*
2924  * qla2x00_get_fcal_position_map
2925  *	Get FCAL (LILP) position map using mailbox command
2926  *
2927  * Input:
2928  *	ha = adapter state pointer.
2929  *	pos_map = buffer pointer (can be NULL).
2930  *
2931  * Returns:
2932  *	qla2x00 local function return status code.
2933  *
2934  * Context:
2935  *	Kernel context.
2936  */
2937 int
2938 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2939 {
2940 	int rval;
2941 	mbx_cmd_t mc;
2942 	mbx_cmd_t *mcp = &mc;
2943 	char *pmap;
2944 	dma_addr_t pmap_dma;
2945 	struct qla_hw_data *ha = vha->hw;
2946 
2947 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2948 	    "Entered %s.\n", __func__);
2949 
2950 	pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2951 	if (pmap  == NULL) {
2952 		ql_log(ql_log_warn, vha, 0x1080,
2953 		    "Memory alloc failed.\n");
2954 		return QLA_MEMORY_ALLOC_FAILED;
2955 	}
2956 
2957 	mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2958 	mcp->mb[2] = MSW(pmap_dma);
2959 	mcp->mb[3] = LSW(pmap_dma);
2960 	mcp->mb[6] = MSW(MSD(pmap_dma));
2961 	mcp->mb[7] = LSW(MSD(pmap_dma));
2962 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2963 	mcp->in_mb = MBX_1|MBX_0;
2964 	mcp->buf_size = FCAL_MAP_SIZE;
2965 	mcp->flags = MBX_DMA_IN;
2966 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2967 	rval = qla2x00_mailbox_command(vha, mcp);
2968 
2969 	if (rval == QLA_SUCCESS) {
2970 		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2971 		    "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2972 		    mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2973 		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2974 		    pmap, pmap[0] + 1);
2975 
2976 		if (pos_map)
2977 			memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2978 	}
2979 	dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2980 
2981 	if (rval != QLA_SUCCESS) {
2982 		ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2983 	} else {
2984 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2985 		    "Done %s.\n", __func__);
2986 	}
2987 
2988 	return rval;
2989 }
2990 
2991 /*
2992  * qla2x00_get_link_status
2993  *
2994  * Input:
2995  *	ha = adapter block pointer.
2996  *	loop_id = device loop ID.
2997  *	ret_buf = pointer to link status return buffer.
2998  *
2999  * Returns:
3000  *	0 = success.
3001  *	BIT_0 = mem alloc error.
3002  *	BIT_1 = mailbox error.
3003  */
3004 int
3005 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
3006     struct link_statistics *stats, dma_addr_t stats_dma)
3007 {
3008 	int rval;
3009 	mbx_cmd_t mc;
3010 	mbx_cmd_t *mcp = &mc;
3011 	uint32_t *iter = (void *)stats;
3012 	ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
3013 	struct qla_hw_data *ha = vha->hw;
3014 
3015 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
3016 	    "Entered %s.\n", __func__);
3017 
3018 	mcp->mb[0] = MBC_GET_LINK_STATUS;
3019 	mcp->mb[2] = MSW(LSD(stats_dma));
3020 	mcp->mb[3] = LSW(LSD(stats_dma));
3021 	mcp->mb[6] = MSW(MSD(stats_dma));
3022 	mcp->mb[7] = LSW(MSD(stats_dma));
3023 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3024 	mcp->in_mb = MBX_0;
3025 	if (IS_FWI2_CAPABLE(ha)) {
3026 		mcp->mb[1] = loop_id;
3027 		mcp->mb[4] = 0;
3028 		mcp->mb[10] = 0;
3029 		mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3030 		mcp->in_mb |= MBX_1;
3031 	} else if (HAS_EXTENDED_IDS(ha)) {
3032 		mcp->mb[1] = loop_id;
3033 		mcp->mb[10] = 0;
3034 		mcp->out_mb |= MBX_10|MBX_1;
3035 	} else {
3036 		mcp->mb[1] = loop_id << 8;
3037 		mcp->out_mb |= MBX_1;
3038 	}
3039 	mcp->tov = MBX_TOV_SECONDS;
3040 	mcp->flags = IOCTL_CMD;
3041 	rval = qla2x00_mailbox_command(vha, mcp);
3042 
3043 	if (rval == QLA_SUCCESS) {
3044 		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3045 			ql_dbg(ql_dbg_mbx, vha, 0x1085,
3046 			    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3047 			rval = QLA_FUNCTION_FAILED;
3048 		} else {
3049 			/* Re-endianize - firmware data is le32. */
3050 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3051 			    "Done %s.\n", __func__);
3052 			for ( ; dwords--; iter++)
3053 				le32_to_cpus(iter);
3054 		}
3055 	} else {
3056 		/* Failed. */
3057 		ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3058 	}
3059 
3060 	return rval;
3061 }
3062 
3063 int
3064 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3065     dma_addr_t stats_dma, uint16_t options)
3066 {
3067 	int rval;
3068 	mbx_cmd_t mc;
3069 	mbx_cmd_t *mcp = &mc;
3070 	uint32_t *iter, dwords;
3071 
3072 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3073 	    "Entered %s.\n", __func__);
3074 
3075 	memset(&mc, 0, sizeof(mc));
3076 	mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3077 	mc.mb[2] = MSW(stats_dma);
3078 	mc.mb[3] = LSW(stats_dma);
3079 	mc.mb[6] = MSW(MSD(stats_dma));
3080 	mc.mb[7] = LSW(MSD(stats_dma));
3081 	mc.mb[8] = sizeof(struct link_statistics) / 4;
3082 	mc.mb[9] = cpu_to_le16(vha->vp_idx);
3083 	mc.mb[10] = cpu_to_le16(options);
3084 
3085 	rval = qla24xx_send_mb_cmd(vha, &mc);
3086 
3087 	if (rval == QLA_SUCCESS) {
3088 		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3089 			ql_dbg(ql_dbg_mbx, vha, 0x1089,
3090 			    "Failed mb[0]=%x.\n", mcp->mb[0]);
3091 			rval = QLA_FUNCTION_FAILED;
3092 		} else {
3093 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3094 			    "Done %s.\n", __func__);
3095 			/* Re-endianize - firmware data is le32. */
3096 			dwords = sizeof(struct link_statistics) / 4;
3097 			iter = &stats->link_fail_cnt;
3098 			for ( ; dwords--; iter++)
3099 				le32_to_cpus(iter);
3100 		}
3101 	} else {
3102 		/* Failed. */
3103 		ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3104 	}
3105 
3106 	return rval;
3107 }
3108 
3109 int
3110 qla24xx_abort_command(srb_t *sp)
3111 {
3112 	int		rval;
3113 	unsigned long   flags = 0;
3114 
3115 	struct abort_entry_24xx *abt;
3116 	dma_addr_t	abt_dma;
3117 	uint32_t	handle;
3118 	fc_port_t	*fcport = sp->fcport;
3119 	struct scsi_qla_host *vha = fcport->vha;
3120 	struct qla_hw_data *ha = vha->hw;
3121 	struct req_que *req = vha->req;
3122 	struct qla_qpair *qpair = sp->qpair;
3123 
3124 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3125 	    "Entered %s.\n", __func__);
3126 
3127 	if (vha->flags.qpairs_available && sp->qpair)
3128 		req = sp->qpair->req;
3129 	else
3130 		return QLA_FUNCTION_FAILED;
3131 
3132 	if (ql2xasynctmfenable)
3133 		return qla24xx_async_abort_command(sp);
3134 
3135 	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3136 	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3137 		if (req->outstanding_cmds[handle] == sp)
3138 			break;
3139 	}
3140 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3141 	if (handle == req->num_outstanding_cmds) {
3142 		/* Command not found. */
3143 		return QLA_FUNCTION_FAILED;
3144 	}
3145 
3146 	abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3147 	if (abt == NULL) {
3148 		ql_log(ql_log_warn, vha, 0x108d,
3149 		    "Failed to allocate abort IOCB.\n");
3150 		return QLA_MEMORY_ALLOC_FAILED;
3151 	}
3152 
3153 	abt->entry_type = ABORT_IOCB_TYPE;
3154 	abt->entry_count = 1;
3155 	abt->handle = MAKE_HANDLE(req->id, abt->handle);
3156 	abt->nport_handle = cpu_to_le16(fcport->loop_id);
3157 	abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
3158 	abt->port_id[0] = fcport->d_id.b.al_pa;
3159 	abt->port_id[1] = fcport->d_id.b.area;
3160 	abt->port_id[2] = fcport->d_id.b.domain;
3161 	abt->vp_index = fcport->vha->vp_idx;
3162 
3163 	abt->req_que_no = cpu_to_le16(req->id);
3164 
3165 	rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3166 	if (rval != QLA_SUCCESS) {
3167 		ql_dbg(ql_dbg_mbx, vha, 0x108e,
3168 		    "Failed to issue IOCB (%x).\n", rval);
3169 	} else if (abt->entry_status != 0) {
3170 		ql_dbg(ql_dbg_mbx, vha, 0x108f,
3171 		    "Failed to complete IOCB -- error status (%x).\n",
3172 		    abt->entry_status);
3173 		rval = QLA_FUNCTION_FAILED;
3174 	} else if (abt->nport_handle != cpu_to_le16(0)) {
3175 		ql_dbg(ql_dbg_mbx, vha, 0x1090,
3176 		    "Failed to complete IOCB -- completion status (%x).\n",
3177 		    le16_to_cpu(abt->nport_handle));
3178 		if (abt->nport_handle == CS_IOCB_ERROR)
3179 			rval = QLA_FUNCTION_PARAMETER_ERROR;
3180 		else
3181 			rval = QLA_FUNCTION_FAILED;
3182 	} else {
3183 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3184 		    "Done %s.\n", __func__);
3185 	}
3186 
3187 	dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3188 
3189 	return rval;
3190 }
3191 
3192 struct tsk_mgmt_cmd {
3193 	union {
3194 		struct tsk_mgmt_entry tsk;
3195 		struct sts_entry_24xx sts;
3196 	} p;
3197 };
3198 
3199 static int
3200 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3201     uint64_t l, int tag)
3202 {
3203 	int		rval, rval2;
3204 	struct tsk_mgmt_cmd *tsk;
3205 	struct sts_entry_24xx *sts;
3206 	dma_addr_t	tsk_dma;
3207 	scsi_qla_host_t *vha;
3208 	struct qla_hw_data *ha;
3209 	struct req_que *req;
3210 	struct qla_qpair *qpair;
3211 
3212 	vha = fcport->vha;
3213 	ha = vha->hw;
3214 	req = vha->req;
3215 
3216 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3217 	    "Entered %s.\n", __func__);
3218 
3219 	if (vha->vp_idx && vha->qpair) {
3220 		/* NPIV port */
3221 		qpair = vha->qpair;
3222 		req = qpair->req;
3223 	}
3224 
3225 	tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3226 	if (tsk == NULL) {
3227 		ql_log(ql_log_warn, vha, 0x1093,
3228 		    "Failed to allocate task management IOCB.\n");
3229 		return QLA_MEMORY_ALLOC_FAILED;
3230 	}
3231 
3232 	tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3233 	tsk->p.tsk.entry_count = 1;
3234 	tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
3235 	tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3236 	tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3237 	tsk->p.tsk.control_flags = cpu_to_le32(type);
3238 	tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3239 	tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3240 	tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3241 	tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3242 	if (type == TCF_LUN_RESET) {
3243 		int_to_scsilun(l, &tsk->p.tsk.lun);
3244 		host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3245 		    sizeof(tsk->p.tsk.lun));
3246 	}
3247 
3248 	sts = &tsk->p.sts;
3249 	rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3250 	if (rval != QLA_SUCCESS) {
3251 		ql_dbg(ql_dbg_mbx, vha, 0x1094,
3252 		    "Failed to issue %s reset IOCB (%x).\n", name, rval);
3253 	} else if (sts->entry_status != 0) {
3254 		ql_dbg(ql_dbg_mbx, vha, 0x1095,
3255 		    "Failed to complete IOCB -- error status (%x).\n",
3256 		    sts->entry_status);
3257 		rval = QLA_FUNCTION_FAILED;
3258 	} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3259 		ql_dbg(ql_dbg_mbx, vha, 0x1096,
3260 		    "Failed to complete IOCB -- completion status (%x).\n",
3261 		    le16_to_cpu(sts->comp_status));
3262 		rval = QLA_FUNCTION_FAILED;
3263 	} else if (le16_to_cpu(sts->scsi_status) &
3264 	    SS_RESPONSE_INFO_LEN_VALID) {
3265 		if (le32_to_cpu(sts->rsp_data_len) < 4) {
3266 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3267 			    "Ignoring inconsistent data length -- not enough "
3268 			    "response info (%d).\n",
3269 			    le32_to_cpu(sts->rsp_data_len));
3270 		} else if (sts->data[3]) {
3271 			ql_dbg(ql_dbg_mbx, vha, 0x1098,
3272 			    "Failed to complete IOCB -- response (%x).\n",
3273 			    sts->data[3]);
3274 			rval = QLA_FUNCTION_FAILED;
3275 		}
3276 	}
3277 
3278 	/* Issue marker IOCB. */
3279 	rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3280 	    type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
3281 	if (rval2 != QLA_SUCCESS) {
3282 		ql_dbg(ql_dbg_mbx, vha, 0x1099,
3283 		    "Failed to issue marker IOCB (%x).\n", rval2);
3284 	} else {
3285 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3286 		    "Done %s.\n", __func__);
3287 	}
3288 
3289 	dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3290 
3291 	return rval;
3292 }
3293 
3294 int
3295 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3296 {
3297 	struct qla_hw_data *ha = fcport->vha->hw;
3298 
3299 	if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3300 		return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3301 
3302 	return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3303 }
3304 
3305 int
3306 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3307 {
3308 	struct qla_hw_data *ha = fcport->vha->hw;
3309 
3310 	if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3311 		return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3312 
3313 	return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3314 }
3315 
3316 int
3317 qla2x00_system_error(scsi_qla_host_t *vha)
3318 {
3319 	int rval;
3320 	mbx_cmd_t mc;
3321 	mbx_cmd_t *mcp = &mc;
3322 	struct qla_hw_data *ha = vha->hw;
3323 
3324 	if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3325 		return QLA_FUNCTION_FAILED;
3326 
3327 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3328 	    "Entered %s.\n", __func__);
3329 
3330 	mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3331 	mcp->out_mb = MBX_0;
3332 	mcp->in_mb = MBX_0;
3333 	mcp->tov = 5;
3334 	mcp->flags = 0;
3335 	rval = qla2x00_mailbox_command(vha, mcp);
3336 
3337 	if (rval != QLA_SUCCESS) {
3338 		ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3339 	} else {
3340 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3341 		    "Done %s.\n", __func__);
3342 	}
3343 
3344 	return rval;
3345 }
3346 
3347 int
3348 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3349 {
3350 	int rval;
3351 	mbx_cmd_t mc;
3352 	mbx_cmd_t *mcp = &mc;
3353 
3354 	if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3355 	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3356 		return QLA_FUNCTION_FAILED;
3357 
3358 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3359 	    "Entered %s.\n", __func__);
3360 
3361 	mcp->mb[0] = MBC_WRITE_SERDES;
3362 	mcp->mb[1] = addr;
3363 	if (IS_QLA2031(vha->hw))
3364 		mcp->mb[2] = data & 0xff;
3365 	else
3366 		mcp->mb[2] = data;
3367 
3368 	mcp->mb[3] = 0;
3369 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3370 	mcp->in_mb = MBX_0;
3371 	mcp->tov = MBX_TOV_SECONDS;
3372 	mcp->flags = 0;
3373 	rval = qla2x00_mailbox_command(vha, mcp);
3374 
3375 	if (rval != QLA_SUCCESS) {
3376 		ql_dbg(ql_dbg_mbx, vha, 0x1183,
3377 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3378 	} else {
3379 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3380 		    "Done %s.\n", __func__);
3381 	}
3382 
3383 	return rval;
3384 }
3385 
3386 int
3387 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3388 {
3389 	int rval;
3390 	mbx_cmd_t mc;
3391 	mbx_cmd_t *mcp = &mc;
3392 
3393 	if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3394 	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3395 		return QLA_FUNCTION_FAILED;
3396 
3397 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3398 	    "Entered %s.\n", __func__);
3399 
3400 	mcp->mb[0] = MBC_READ_SERDES;
3401 	mcp->mb[1] = addr;
3402 	mcp->mb[3] = 0;
3403 	mcp->out_mb = MBX_3|MBX_1|MBX_0;
3404 	mcp->in_mb = MBX_1|MBX_0;
3405 	mcp->tov = MBX_TOV_SECONDS;
3406 	mcp->flags = 0;
3407 	rval = qla2x00_mailbox_command(vha, mcp);
3408 
3409 	if (IS_QLA2031(vha->hw))
3410 		*data = mcp->mb[1] & 0xff;
3411 	else
3412 		*data = mcp->mb[1];
3413 
3414 	if (rval != QLA_SUCCESS) {
3415 		ql_dbg(ql_dbg_mbx, vha, 0x1186,
3416 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3417 	} else {
3418 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3419 		    "Done %s.\n", __func__);
3420 	}
3421 
3422 	return rval;
3423 }
3424 
3425 int
3426 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3427 {
3428 	int rval;
3429 	mbx_cmd_t mc;
3430 	mbx_cmd_t *mcp = &mc;
3431 
3432 	if (!IS_QLA8044(vha->hw))
3433 		return QLA_FUNCTION_FAILED;
3434 
3435 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3436 	    "Entered %s.\n", __func__);
3437 
3438 	mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3439 	mcp->mb[1] = HCS_WRITE_SERDES;
3440 	mcp->mb[3] = LSW(addr);
3441 	mcp->mb[4] = MSW(addr);
3442 	mcp->mb[5] = LSW(data);
3443 	mcp->mb[6] = MSW(data);
3444 	mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3445 	mcp->in_mb = MBX_0;
3446 	mcp->tov = MBX_TOV_SECONDS;
3447 	mcp->flags = 0;
3448 	rval = qla2x00_mailbox_command(vha, mcp);
3449 
3450 	if (rval != QLA_SUCCESS) {
3451 		ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3452 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3453 	} else {
3454 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3455 		    "Done %s.\n", __func__);
3456 	}
3457 
3458 	return rval;
3459 }
3460 
3461 int
3462 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3463 {
3464 	int rval;
3465 	mbx_cmd_t mc;
3466 	mbx_cmd_t *mcp = &mc;
3467 
3468 	if (!IS_QLA8044(vha->hw))
3469 		return QLA_FUNCTION_FAILED;
3470 
3471 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3472 	    "Entered %s.\n", __func__);
3473 
3474 	mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3475 	mcp->mb[1] = HCS_READ_SERDES;
3476 	mcp->mb[3] = LSW(addr);
3477 	mcp->mb[4] = MSW(addr);
3478 	mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3479 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
3480 	mcp->tov = MBX_TOV_SECONDS;
3481 	mcp->flags = 0;
3482 	rval = qla2x00_mailbox_command(vha, mcp);
3483 
3484 	*data = mcp->mb[2] << 16 | mcp->mb[1];
3485 
3486 	if (rval != QLA_SUCCESS) {
3487 		ql_dbg(ql_dbg_mbx, vha, 0x118a,
3488 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3489 	} else {
3490 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3491 		    "Done %s.\n", __func__);
3492 	}
3493 
3494 	return rval;
3495 }
3496 
3497 /**
3498  * qla2x00_set_serdes_params() -
3499  * @vha: HA context
3500  * @sw_em_1g: serial link options
3501  * @sw_em_2g: serial link options
3502  * @sw_em_4g: serial link options
3503  *
3504  * Returns
3505  */
3506 int
3507 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3508     uint16_t sw_em_2g, uint16_t sw_em_4g)
3509 {
3510 	int rval;
3511 	mbx_cmd_t mc;
3512 	mbx_cmd_t *mcp = &mc;
3513 
3514 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3515 	    "Entered %s.\n", __func__);
3516 
3517 	mcp->mb[0] = MBC_SERDES_PARAMS;
3518 	mcp->mb[1] = BIT_0;
3519 	mcp->mb[2] = sw_em_1g | BIT_15;
3520 	mcp->mb[3] = sw_em_2g | BIT_15;
3521 	mcp->mb[4] = sw_em_4g | BIT_15;
3522 	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3523 	mcp->in_mb = MBX_0;
3524 	mcp->tov = MBX_TOV_SECONDS;
3525 	mcp->flags = 0;
3526 	rval = qla2x00_mailbox_command(vha, mcp);
3527 
3528 	if (rval != QLA_SUCCESS) {
3529 		/*EMPTY*/
3530 		ql_dbg(ql_dbg_mbx, vha, 0x109f,
3531 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3532 	} else {
3533 		/*EMPTY*/
3534 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3535 		    "Done %s.\n", __func__);
3536 	}
3537 
3538 	return rval;
3539 }
3540 
3541 int
3542 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3543 {
3544 	int rval;
3545 	mbx_cmd_t mc;
3546 	mbx_cmd_t *mcp = &mc;
3547 
3548 	if (!IS_FWI2_CAPABLE(vha->hw))
3549 		return QLA_FUNCTION_FAILED;
3550 
3551 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3552 	    "Entered %s.\n", __func__);
3553 
3554 	mcp->mb[0] = MBC_STOP_FIRMWARE;
3555 	mcp->mb[1] = 0;
3556 	mcp->out_mb = MBX_1|MBX_0;
3557 	mcp->in_mb = MBX_0;
3558 	mcp->tov = 5;
3559 	mcp->flags = 0;
3560 	rval = qla2x00_mailbox_command(vha, mcp);
3561 
3562 	if (rval != QLA_SUCCESS) {
3563 		ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3564 		if (mcp->mb[0] == MBS_INVALID_COMMAND)
3565 			rval = QLA_INVALID_COMMAND;
3566 	} else {
3567 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3568 		    "Done %s.\n", __func__);
3569 	}
3570 
3571 	return rval;
3572 }
3573 
3574 int
3575 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3576     uint16_t buffers)
3577 {
3578 	int rval;
3579 	mbx_cmd_t mc;
3580 	mbx_cmd_t *mcp = &mc;
3581 
3582 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3583 	    "Entered %s.\n", __func__);
3584 
3585 	if (!IS_FWI2_CAPABLE(vha->hw))
3586 		return QLA_FUNCTION_FAILED;
3587 
3588 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
3589 		return QLA_FUNCTION_FAILED;
3590 
3591 	mcp->mb[0] = MBC_TRACE_CONTROL;
3592 	mcp->mb[1] = TC_EFT_ENABLE;
3593 	mcp->mb[2] = LSW(eft_dma);
3594 	mcp->mb[3] = MSW(eft_dma);
3595 	mcp->mb[4] = LSW(MSD(eft_dma));
3596 	mcp->mb[5] = MSW(MSD(eft_dma));
3597 	mcp->mb[6] = buffers;
3598 	mcp->mb[7] = TC_AEN_DISABLE;
3599 	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3600 	mcp->in_mb = MBX_1|MBX_0;
3601 	mcp->tov = MBX_TOV_SECONDS;
3602 	mcp->flags = 0;
3603 	rval = qla2x00_mailbox_command(vha, mcp);
3604 	if (rval != QLA_SUCCESS) {
3605 		ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3606 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
3607 		    rval, mcp->mb[0], mcp->mb[1]);
3608 	} else {
3609 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3610 		    "Done %s.\n", __func__);
3611 	}
3612 
3613 	return rval;
3614 }
3615 
3616 int
3617 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3618 {
3619 	int rval;
3620 	mbx_cmd_t mc;
3621 	mbx_cmd_t *mcp = &mc;
3622 
3623 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3624 	    "Entered %s.\n", __func__);
3625 
3626 	if (!IS_FWI2_CAPABLE(vha->hw))
3627 		return QLA_FUNCTION_FAILED;
3628 
3629 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
3630 		return QLA_FUNCTION_FAILED;
3631 
3632 	mcp->mb[0] = MBC_TRACE_CONTROL;
3633 	mcp->mb[1] = TC_EFT_DISABLE;
3634 	mcp->out_mb = MBX_1|MBX_0;
3635 	mcp->in_mb = MBX_1|MBX_0;
3636 	mcp->tov = MBX_TOV_SECONDS;
3637 	mcp->flags = 0;
3638 	rval = qla2x00_mailbox_command(vha, mcp);
3639 	if (rval != QLA_SUCCESS) {
3640 		ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3641 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
3642 		    rval, mcp->mb[0], mcp->mb[1]);
3643 	} else {
3644 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3645 		    "Done %s.\n", __func__);
3646 	}
3647 
3648 	return rval;
3649 }
3650 
3651 int
3652 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3653     uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3654 {
3655 	int rval;
3656 	mbx_cmd_t mc;
3657 	mbx_cmd_t *mcp = &mc;
3658 
3659 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3660 	    "Entered %s.\n", __func__);
3661 
3662 	if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3663 	    !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
3664 	    !IS_QLA28XX(vha->hw))
3665 		return QLA_FUNCTION_FAILED;
3666 
3667 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
3668 		return QLA_FUNCTION_FAILED;
3669 
3670 	mcp->mb[0] = MBC_TRACE_CONTROL;
3671 	mcp->mb[1] = TC_FCE_ENABLE;
3672 	mcp->mb[2] = LSW(fce_dma);
3673 	mcp->mb[3] = MSW(fce_dma);
3674 	mcp->mb[4] = LSW(MSD(fce_dma));
3675 	mcp->mb[5] = MSW(MSD(fce_dma));
3676 	mcp->mb[6] = buffers;
3677 	mcp->mb[7] = TC_AEN_DISABLE;
3678 	mcp->mb[8] = 0;
3679 	mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3680 	mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3681 	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3682 	    MBX_1|MBX_0;
3683 	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3684 	mcp->tov = MBX_TOV_SECONDS;
3685 	mcp->flags = 0;
3686 	rval = qla2x00_mailbox_command(vha, mcp);
3687 	if (rval != QLA_SUCCESS) {
3688 		ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3689 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
3690 		    rval, mcp->mb[0], mcp->mb[1]);
3691 	} else {
3692 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3693 		    "Done %s.\n", __func__);
3694 
3695 		if (mb)
3696 			memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3697 		if (dwords)
3698 			*dwords = buffers;
3699 	}
3700 
3701 	return rval;
3702 }
3703 
3704 int
3705 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3706 {
3707 	int rval;
3708 	mbx_cmd_t mc;
3709 	mbx_cmd_t *mcp = &mc;
3710 
3711 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3712 	    "Entered %s.\n", __func__);
3713 
3714 	if (!IS_FWI2_CAPABLE(vha->hw))
3715 		return QLA_FUNCTION_FAILED;
3716 
3717 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
3718 		return QLA_FUNCTION_FAILED;
3719 
3720 	mcp->mb[0] = MBC_TRACE_CONTROL;
3721 	mcp->mb[1] = TC_FCE_DISABLE;
3722 	mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3723 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
3724 	mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3725 	    MBX_1|MBX_0;
3726 	mcp->tov = MBX_TOV_SECONDS;
3727 	mcp->flags = 0;
3728 	rval = qla2x00_mailbox_command(vha, mcp);
3729 	if (rval != QLA_SUCCESS) {
3730 		ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3731 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
3732 		    rval, mcp->mb[0], mcp->mb[1]);
3733 	} else {
3734 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3735 		    "Done %s.\n", __func__);
3736 
3737 		if (wr)
3738 			*wr = (uint64_t) mcp->mb[5] << 48 |
3739 			    (uint64_t) mcp->mb[4] << 32 |
3740 			    (uint64_t) mcp->mb[3] << 16 |
3741 			    (uint64_t) mcp->mb[2];
3742 		if (rd)
3743 			*rd = (uint64_t) mcp->mb[9] << 48 |
3744 			    (uint64_t) mcp->mb[8] << 32 |
3745 			    (uint64_t) mcp->mb[7] << 16 |
3746 			    (uint64_t) mcp->mb[6];
3747 	}
3748 
3749 	return rval;
3750 }
3751 
3752 int
3753 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3754 	uint16_t *port_speed, uint16_t *mb)
3755 {
3756 	int rval;
3757 	mbx_cmd_t mc;
3758 	mbx_cmd_t *mcp = &mc;
3759 
3760 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3761 	    "Entered %s.\n", __func__);
3762 
3763 	if (!IS_IIDMA_CAPABLE(vha->hw))
3764 		return QLA_FUNCTION_FAILED;
3765 
3766 	mcp->mb[0] = MBC_PORT_PARAMS;
3767 	mcp->mb[1] = loop_id;
3768 	mcp->mb[2] = mcp->mb[3] = 0;
3769 	mcp->mb[9] = vha->vp_idx;
3770 	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3771 	mcp->in_mb = MBX_3|MBX_1|MBX_0;
3772 	mcp->tov = MBX_TOV_SECONDS;
3773 	mcp->flags = 0;
3774 	rval = qla2x00_mailbox_command(vha, mcp);
3775 
3776 	/* Return mailbox statuses. */
3777 	if (mb) {
3778 		mb[0] = mcp->mb[0];
3779 		mb[1] = mcp->mb[1];
3780 		mb[3] = mcp->mb[3];
3781 	}
3782 
3783 	if (rval != QLA_SUCCESS) {
3784 		ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3785 	} else {
3786 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3787 		    "Done %s.\n", __func__);
3788 		if (port_speed)
3789 			*port_speed = mcp->mb[3];
3790 	}
3791 
3792 	return rval;
3793 }
3794 
3795 int
3796 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3797     uint16_t port_speed, uint16_t *mb)
3798 {
3799 	int rval;
3800 	mbx_cmd_t mc;
3801 	mbx_cmd_t *mcp = &mc;
3802 
3803 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3804 	    "Entered %s.\n", __func__);
3805 
3806 	if (!IS_IIDMA_CAPABLE(vha->hw))
3807 		return QLA_FUNCTION_FAILED;
3808 
3809 	mcp->mb[0] = MBC_PORT_PARAMS;
3810 	mcp->mb[1] = loop_id;
3811 	mcp->mb[2] = BIT_0;
3812 	mcp->mb[3] = port_speed & 0x3F;
3813 	mcp->mb[9] = vha->vp_idx;
3814 	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3815 	mcp->in_mb = MBX_3|MBX_1|MBX_0;
3816 	mcp->tov = MBX_TOV_SECONDS;
3817 	mcp->flags = 0;
3818 	rval = qla2x00_mailbox_command(vha, mcp);
3819 
3820 	/* Return mailbox statuses. */
3821 	if (mb) {
3822 		mb[0] = mcp->mb[0];
3823 		mb[1] = mcp->mb[1];
3824 		mb[3] = mcp->mb[3];
3825 	}
3826 
3827 	if (rval != QLA_SUCCESS) {
3828 		ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3829 		    "Failed=%x.\n", rval);
3830 	} else {
3831 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3832 		    "Done %s.\n", __func__);
3833 	}
3834 
3835 	return rval;
3836 }
3837 
3838 void
3839 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3840 	struct vp_rpt_id_entry_24xx *rptid_entry)
3841 {
3842 	struct qla_hw_data *ha = vha->hw;
3843 	scsi_qla_host_t *vp = NULL;
3844 	unsigned long   flags;
3845 	int found;
3846 	port_id_t id;
3847 	struct fc_port *fcport;
3848 
3849 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3850 	    "Entered %s.\n", __func__);
3851 
3852 	if (rptid_entry->entry_status != 0)
3853 		return;
3854 
3855 	id.b.domain = rptid_entry->port_id[2];
3856 	id.b.area   = rptid_entry->port_id[1];
3857 	id.b.al_pa  = rptid_entry->port_id[0];
3858 	id.b.rsvd_1 = 0;
3859 	ha->flags.n2n_ae = 0;
3860 
3861 	if (rptid_entry->format == 0) {
3862 		/* loop */
3863 		ql_dbg(ql_dbg_async, vha, 0x10b7,
3864 		    "Format 0 : Number of VPs setup %d, number of "
3865 		    "VPs acquired %d.\n", rptid_entry->vp_setup,
3866 		    rptid_entry->vp_acquired);
3867 		ql_dbg(ql_dbg_async, vha, 0x10b8,
3868 		    "Primary port id %02x%02x%02x.\n",
3869 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
3870 		    rptid_entry->port_id[0]);
3871 		ha->current_topology = ISP_CFG_NL;
3872 		qlt_update_host_map(vha, id);
3873 
3874 	} else if (rptid_entry->format == 1) {
3875 		/* fabric */
3876 		ql_dbg(ql_dbg_async, vha, 0x10b9,
3877 		    "Format 1: VP[%d] enabled - status %d - with "
3878 		    "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3879 			rptid_entry->vp_status,
3880 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
3881 		    rptid_entry->port_id[0]);
3882 		ql_dbg(ql_dbg_async, vha, 0x5075,
3883 		   "Format 1: Remote WWPN %8phC.\n",
3884 		   rptid_entry->u.f1.port_name);
3885 
3886 		ql_dbg(ql_dbg_async, vha, 0x5075,
3887 		   "Format 1: WWPN %8phC.\n",
3888 		   vha->port_name);
3889 
3890 		switch (rptid_entry->u.f1.flags & TOPO_MASK) {
3891 		case TOPO_N2N:
3892 			ha->current_topology = ISP_CFG_N;
3893 			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3894 			fcport = qla2x00_find_fcport_by_wwpn(vha,
3895 			    rptid_entry->u.f1.port_name, 1);
3896 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3897 
3898 			if (fcport) {
3899 				fcport->plogi_nack_done_deadline = jiffies + HZ;
3900 				fcport->dm_login_expire = jiffies + 3*HZ;
3901 				fcport->scan_state = QLA_FCPORT_FOUND;
3902 				switch (fcport->disc_state) {
3903 				case DSC_DELETED:
3904 					set_bit(RELOGIN_NEEDED,
3905 					    &vha->dpc_flags);
3906 					break;
3907 				case DSC_DELETE_PEND:
3908 					break;
3909 				default:
3910 					qlt_schedule_sess_for_deletion(fcport);
3911 					break;
3912 				}
3913 			} else {
3914 				id.b24 = 0;
3915 				if (wwn_to_u64(vha->port_name) >
3916 				    wwn_to_u64(rptid_entry->u.f1.port_name)) {
3917 					vha->d_id.b24 = 0;
3918 					vha->d_id.b.al_pa = 1;
3919 					ha->flags.n2n_bigger = 1;
3920 
3921 					id.b.al_pa = 2;
3922 					ql_dbg(ql_dbg_async, vha, 0x5075,
3923 					    "Format 1: assign local id %x remote id %x\n",
3924 					    vha->d_id.b24, id.b24);
3925 				} else {
3926 					ql_dbg(ql_dbg_async, vha, 0x5075,
3927 					    "Format 1: Remote login - Waiting for WWPN %8phC.\n",
3928 					    rptid_entry->u.f1.port_name);
3929 					ha->flags.n2n_bigger = 0;
3930 				}
3931 				qla24xx_post_newsess_work(vha, &id,
3932 				    rptid_entry->u.f1.port_name,
3933 				    rptid_entry->u.f1.node_name,
3934 				    NULL,
3935 				    FC4_TYPE_UNKNOWN);
3936 			}
3937 
3938 			/* if our portname is higher then initiate N2N login */
3939 
3940 			set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
3941 			ha->flags.n2n_ae = 1;
3942 			return;
3943 			break;
3944 		case TOPO_FL:
3945 			ha->current_topology = ISP_CFG_FL;
3946 			break;
3947 		case TOPO_F:
3948 			ha->current_topology = ISP_CFG_F;
3949 			break;
3950 		default:
3951 			break;
3952 		}
3953 
3954 		ha->flags.gpsc_supported = 1;
3955 		ha->current_topology = ISP_CFG_F;
3956 		/* buffer to buffer credit flag */
3957 		vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
3958 
3959 		if (rptid_entry->vp_idx == 0) {
3960 			if (rptid_entry->vp_status == VP_STAT_COMPL) {
3961 				/* FA-WWN is only for physical port */
3962 				if (qla_ini_mode_enabled(vha) &&
3963 				    ha->flags.fawwpn_enabled &&
3964 				    (rptid_entry->u.f1.flags &
3965 				     BIT_6)) {
3966 					memcpy(vha->port_name,
3967 					    rptid_entry->u.f1.port_name,
3968 					    WWN_SIZE);
3969 				}
3970 
3971 				qlt_update_host_map(vha, id);
3972 			}
3973 
3974 			set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3975 			set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3976 		} else {
3977 			if (rptid_entry->vp_status != VP_STAT_COMPL &&
3978 				rptid_entry->vp_status != VP_STAT_ID_CHG) {
3979 				ql_dbg(ql_dbg_mbx, vha, 0x10ba,
3980 				    "Could not acquire ID for VP[%d].\n",
3981 				    rptid_entry->vp_idx);
3982 				return;
3983 			}
3984 
3985 			found = 0;
3986 			spin_lock_irqsave(&ha->vport_slock, flags);
3987 			list_for_each_entry(vp, &ha->vp_list, list) {
3988 				if (rptid_entry->vp_idx == vp->vp_idx) {
3989 					found = 1;
3990 					break;
3991 				}
3992 			}
3993 			spin_unlock_irqrestore(&ha->vport_slock, flags);
3994 
3995 			if (!found)
3996 				return;
3997 
3998 			qlt_update_host_map(vp, id);
3999 
4000 			/*
4001 			 * Cannot configure here as we are still sitting on the
4002 			 * response queue. Handle it in dpc context.
4003 			 */
4004 			set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
4005 			set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
4006 			set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
4007 		}
4008 		set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
4009 		qla2xxx_wake_dpc(vha);
4010 	} else if (rptid_entry->format == 2) {
4011 		ql_dbg(ql_dbg_async, vha, 0x505f,
4012 		    "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
4013 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
4014 		    rptid_entry->port_id[0]);
4015 
4016 		ql_dbg(ql_dbg_async, vha, 0x5075,
4017 		    "N2N: Remote WWPN %8phC.\n",
4018 		    rptid_entry->u.f2.port_name);
4019 
4020 		/* N2N.  direct connect */
4021 		ha->current_topology = ISP_CFG_N;
4022 		ha->flags.rida_fmt2 = 1;
4023 		vha->d_id.b.domain = rptid_entry->port_id[2];
4024 		vha->d_id.b.area = rptid_entry->port_id[1];
4025 		vha->d_id.b.al_pa = rptid_entry->port_id[0];
4026 
4027 		ha->flags.n2n_ae = 1;
4028 		spin_lock_irqsave(&ha->vport_slock, flags);
4029 		qlt_update_vp_map(vha, SET_AL_PA);
4030 		spin_unlock_irqrestore(&ha->vport_slock, flags);
4031 
4032 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
4033 			fcport->scan_state = QLA_FCPORT_SCAN;
4034 		}
4035 
4036 		fcport = qla2x00_find_fcport_by_wwpn(vha,
4037 		    rptid_entry->u.f2.port_name, 1);
4038 
4039 		if (fcport) {
4040 			fcport->login_retry = vha->hw->login_retry_count;
4041 			fcport->plogi_nack_done_deadline = jiffies + HZ;
4042 			fcport->scan_state = QLA_FCPORT_FOUND;
4043 		}
4044 	}
4045 }
4046 
4047 /*
4048  * qla24xx_modify_vp_config
4049  *	Change VP configuration for vha
4050  *
4051  * Input:
4052  *	vha = adapter block pointer.
4053  *
4054  * Returns:
4055  *	qla2xxx local function return status code.
4056  *
4057  * Context:
4058  *	Kernel context.
4059  */
4060 int
4061 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4062 {
4063 	int		rval;
4064 	struct vp_config_entry_24xx *vpmod;
4065 	dma_addr_t	vpmod_dma;
4066 	struct qla_hw_data *ha = vha->hw;
4067 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4068 
4069 	/* This can be called by the parent */
4070 
4071 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4072 	    "Entered %s.\n", __func__);
4073 
4074 	vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4075 	if (!vpmod) {
4076 		ql_log(ql_log_warn, vha, 0x10bc,
4077 		    "Failed to allocate modify VP IOCB.\n");
4078 		return QLA_MEMORY_ALLOC_FAILED;
4079 	}
4080 
4081 	vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4082 	vpmod->entry_count = 1;
4083 	vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4084 	vpmod->vp_count = 1;
4085 	vpmod->vp_index1 = vha->vp_idx;
4086 	vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4087 
4088 	qlt_modify_vp_config(vha, vpmod);
4089 
4090 	memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4091 	memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4092 	vpmod->entry_count = 1;
4093 
4094 	rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4095 	if (rval != QLA_SUCCESS) {
4096 		ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4097 		    "Failed to issue VP config IOCB (%x).\n", rval);
4098 	} else if (vpmod->comp_status != 0) {
4099 		ql_dbg(ql_dbg_mbx, vha, 0x10be,
4100 		    "Failed to complete IOCB -- error status (%x).\n",
4101 		    vpmod->comp_status);
4102 		rval = QLA_FUNCTION_FAILED;
4103 	} else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4104 		ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4105 		    "Failed to complete IOCB -- completion status (%x).\n",
4106 		    le16_to_cpu(vpmod->comp_status));
4107 		rval = QLA_FUNCTION_FAILED;
4108 	} else {
4109 		/* EMPTY */
4110 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4111 		    "Done %s.\n", __func__);
4112 		fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4113 	}
4114 	dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4115 
4116 	return rval;
4117 }
4118 
4119 /*
4120  * qla2x00_send_change_request
4121  *	Receive or disable RSCN request from fabric controller
4122  *
4123  * Input:
4124  *	ha = adapter block pointer
4125  *	format = registration format:
4126  *		0 - Reserved
4127  *		1 - Fabric detected registration
4128  *		2 - N_port detected registration
4129  *		3 - Full registration
4130  *		FF - clear registration
4131  *	vp_idx = Virtual port index
4132  *
4133  * Returns:
4134  *	qla2x00 local function return status code.
4135  *
4136  * Context:
4137  *	Kernel Context
4138  */
4139 
4140 int
4141 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4142 			    uint16_t vp_idx)
4143 {
4144 	int rval;
4145 	mbx_cmd_t mc;
4146 	mbx_cmd_t *mcp = &mc;
4147 
4148 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4149 	    "Entered %s.\n", __func__);
4150 
4151 	mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4152 	mcp->mb[1] = format;
4153 	mcp->mb[9] = vp_idx;
4154 	mcp->out_mb = MBX_9|MBX_1|MBX_0;
4155 	mcp->in_mb = MBX_0|MBX_1;
4156 	mcp->tov = MBX_TOV_SECONDS;
4157 	mcp->flags = 0;
4158 	rval = qla2x00_mailbox_command(vha, mcp);
4159 
4160 	if (rval == QLA_SUCCESS) {
4161 		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4162 			rval = BIT_1;
4163 		}
4164 	} else
4165 		rval = BIT_1;
4166 
4167 	return rval;
4168 }
4169 
4170 int
4171 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4172     uint32_t size)
4173 {
4174 	int rval;
4175 	mbx_cmd_t mc;
4176 	mbx_cmd_t *mcp = &mc;
4177 
4178 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4179 	    "Entered %s.\n", __func__);
4180 
4181 	if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4182 		mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4183 		mcp->mb[8] = MSW(addr);
4184 		mcp->out_mb = MBX_8|MBX_0;
4185 	} else {
4186 		mcp->mb[0] = MBC_DUMP_RISC_RAM;
4187 		mcp->out_mb = MBX_0;
4188 	}
4189 	mcp->mb[1] = LSW(addr);
4190 	mcp->mb[2] = MSW(req_dma);
4191 	mcp->mb[3] = LSW(req_dma);
4192 	mcp->mb[6] = MSW(MSD(req_dma));
4193 	mcp->mb[7] = LSW(MSD(req_dma));
4194 	mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4195 	if (IS_FWI2_CAPABLE(vha->hw)) {
4196 		mcp->mb[4] = MSW(size);
4197 		mcp->mb[5] = LSW(size);
4198 		mcp->out_mb |= MBX_5|MBX_4;
4199 	} else {
4200 		mcp->mb[4] = LSW(size);
4201 		mcp->out_mb |= MBX_4;
4202 	}
4203 
4204 	mcp->in_mb = MBX_0;
4205 	mcp->tov = MBX_TOV_SECONDS;
4206 	mcp->flags = 0;
4207 	rval = qla2x00_mailbox_command(vha, mcp);
4208 
4209 	if (rval != QLA_SUCCESS) {
4210 		ql_dbg(ql_dbg_mbx, vha, 0x1008,
4211 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4212 	} else {
4213 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4214 		    "Done %s.\n", __func__);
4215 	}
4216 
4217 	return rval;
4218 }
4219 /* 84XX Support **************************************************************/
4220 
4221 struct cs84xx_mgmt_cmd {
4222 	union {
4223 		struct verify_chip_entry_84xx req;
4224 		struct verify_chip_rsp_84xx rsp;
4225 	} p;
4226 };
4227 
4228 int
4229 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4230 {
4231 	int rval, retry;
4232 	struct cs84xx_mgmt_cmd *mn;
4233 	dma_addr_t mn_dma;
4234 	uint16_t options;
4235 	unsigned long flags;
4236 	struct qla_hw_data *ha = vha->hw;
4237 
4238 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4239 	    "Entered %s.\n", __func__);
4240 
4241 	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4242 	if (mn == NULL) {
4243 		return QLA_MEMORY_ALLOC_FAILED;
4244 	}
4245 
4246 	/* Force Update? */
4247 	options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4248 	/* Diagnostic firmware? */
4249 	/* options |= MENLO_DIAG_FW; */
4250 	/* We update the firmware with only one data sequence. */
4251 	options |= VCO_END_OF_DATA;
4252 
4253 	do {
4254 		retry = 0;
4255 		memset(mn, 0, sizeof(*mn));
4256 		mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4257 		mn->p.req.entry_count = 1;
4258 		mn->p.req.options = cpu_to_le16(options);
4259 
4260 		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4261 		    "Dump of Verify Request.\n");
4262 		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4263 		    mn, sizeof(*mn));
4264 
4265 		rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4266 		if (rval != QLA_SUCCESS) {
4267 			ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4268 			    "Failed to issue verify IOCB (%x).\n", rval);
4269 			goto verify_done;
4270 		}
4271 
4272 		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4273 		    "Dump of Verify Response.\n");
4274 		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4275 		    mn, sizeof(*mn));
4276 
4277 		status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4278 		status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4279 		    le16_to_cpu(mn->p.rsp.failure_code) : 0;
4280 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4281 		    "cs=%x fc=%x.\n", status[0], status[1]);
4282 
4283 		if (status[0] != CS_COMPLETE) {
4284 			rval = QLA_FUNCTION_FAILED;
4285 			if (!(options & VCO_DONT_UPDATE_FW)) {
4286 				ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4287 				    "Firmware update failed. Retrying "
4288 				    "without update firmware.\n");
4289 				options |= VCO_DONT_UPDATE_FW;
4290 				options &= ~VCO_FORCE_UPDATE;
4291 				retry = 1;
4292 			}
4293 		} else {
4294 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4295 			    "Firmware updated to %x.\n",
4296 			    le32_to_cpu(mn->p.rsp.fw_ver));
4297 
4298 			/* NOTE: we only update OP firmware. */
4299 			spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4300 			ha->cs84xx->op_fw_version =
4301 			    le32_to_cpu(mn->p.rsp.fw_ver);
4302 			spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4303 			    flags);
4304 		}
4305 	} while (retry);
4306 
4307 verify_done:
4308 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4309 
4310 	if (rval != QLA_SUCCESS) {
4311 		ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4312 		    "Failed=%x.\n", rval);
4313 	} else {
4314 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4315 		    "Done %s.\n", __func__);
4316 	}
4317 
4318 	return rval;
4319 }
4320 
4321 int
4322 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4323 {
4324 	int rval;
4325 	unsigned long flags;
4326 	mbx_cmd_t mc;
4327 	mbx_cmd_t *mcp = &mc;
4328 	struct qla_hw_data *ha = vha->hw;
4329 
4330 	if (!ha->flags.fw_started)
4331 		return QLA_SUCCESS;
4332 
4333 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4334 	    "Entered %s.\n", __func__);
4335 
4336 	if (IS_SHADOW_REG_CAPABLE(ha))
4337 		req->options |= BIT_13;
4338 
4339 	mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4340 	mcp->mb[1] = req->options;
4341 	mcp->mb[2] = MSW(LSD(req->dma));
4342 	mcp->mb[3] = LSW(LSD(req->dma));
4343 	mcp->mb[6] = MSW(MSD(req->dma));
4344 	mcp->mb[7] = LSW(MSD(req->dma));
4345 	mcp->mb[5] = req->length;
4346 	if (req->rsp)
4347 		mcp->mb[10] = req->rsp->id;
4348 	mcp->mb[12] = req->qos;
4349 	mcp->mb[11] = req->vp_idx;
4350 	mcp->mb[13] = req->rid;
4351 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4352 		mcp->mb[15] = 0;
4353 
4354 	mcp->mb[4] = req->id;
4355 	/* que in ptr index */
4356 	mcp->mb[8] = 0;
4357 	/* que out ptr index */
4358 	mcp->mb[9] = *req->out_ptr = 0;
4359 	mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4360 			MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4361 	mcp->in_mb = MBX_0;
4362 	mcp->flags = MBX_DMA_OUT;
4363 	mcp->tov = MBX_TOV_SECONDS * 2;
4364 
4365 	if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4366 	    IS_QLA28XX(ha))
4367 		mcp->in_mb |= MBX_1;
4368 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4369 		mcp->out_mb |= MBX_15;
4370 		/* debug q create issue in SR-IOV */
4371 		mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4372 	}
4373 
4374 	spin_lock_irqsave(&ha->hardware_lock, flags);
4375 	if (!(req->options & BIT_0)) {
4376 		WRT_REG_DWORD(req->req_q_in, 0);
4377 		if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4378 			WRT_REG_DWORD(req->req_q_out, 0);
4379 	}
4380 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4381 
4382 	rval = qla2x00_mailbox_command(vha, mcp);
4383 	if (rval != QLA_SUCCESS) {
4384 		ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4385 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4386 	} else {
4387 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4388 		    "Done %s.\n", __func__);
4389 	}
4390 
4391 	return rval;
4392 }
4393 
4394 int
4395 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4396 {
4397 	int rval;
4398 	unsigned long flags;
4399 	mbx_cmd_t mc;
4400 	mbx_cmd_t *mcp = &mc;
4401 	struct qla_hw_data *ha = vha->hw;
4402 
4403 	if (!ha->flags.fw_started)
4404 		return QLA_SUCCESS;
4405 
4406 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4407 	    "Entered %s.\n", __func__);
4408 
4409 	if (IS_SHADOW_REG_CAPABLE(ha))
4410 		rsp->options |= BIT_13;
4411 
4412 	mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4413 	mcp->mb[1] = rsp->options;
4414 	mcp->mb[2] = MSW(LSD(rsp->dma));
4415 	mcp->mb[3] = LSW(LSD(rsp->dma));
4416 	mcp->mb[6] = MSW(MSD(rsp->dma));
4417 	mcp->mb[7] = LSW(MSD(rsp->dma));
4418 	mcp->mb[5] = rsp->length;
4419 	mcp->mb[14] = rsp->msix->entry;
4420 	mcp->mb[13] = rsp->rid;
4421 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4422 		mcp->mb[15] = 0;
4423 
4424 	mcp->mb[4] = rsp->id;
4425 	/* que in ptr index */
4426 	mcp->mb[8] = *rsp->in_ptr = 0;
4427 	/* que out ptr index */
4428 	mcp->mb[9] = 0;
4429 	mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4430 			|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4431 	mcp->in_mb = MBX_0;
4432 	mcp->flags = MBX_DMA_OUT;
4433 	mcp->tov = MBX_TOV_SECONDS * 2;
4434 
4435 	if (IS_QLA81XX(ha)) {
4436 		mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4437 		mcp->in_mb |= MBX_1;
4438 	} else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4439 		mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4440 		mcp->in_mb |= MBX_1;
4441 		/* debug q create issue in SR-IOV */
4442 		mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4443 	}
4444 
4445 	spin_lock_irqsave(&ha->hardware_lock, flags);
4446 	if (!(rsp->options & BIT_0)) {
4447 		WRT_REG_DWORD(rsp->rsp_q_out, 0);
4448 		if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4449 			WRT_REG_DWORD(rsp->rsp_q_in, 0);
4450 	}
4451 
4452 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4453 
4454 	rval = qla2x00_mailbox_command(vha, mcp);
4455 	if (rval != QLA_SUCCESS) {
4456 		ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4457 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4458 	} else {
4459 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4460 		    "Done %s.\n", __func__);
4461 	}
4462 
4463 	return rval;
4464 }
4465 
4466 int
4467 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4468 {
4469 	int rval;
4470 	mbx_cmd_t mc;
4471 	mbx_cmd_t *mcp = &mc;
4472 
4473 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4474 	    "Entered %s.\n", __func__);
4475 
4476 	mcp->mb[0] = MBC_IDC_ACK;
4477 	memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4478 	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4479 	mcp->in_mb = MBX_0;
4480 	mcp->tov = MBX_TOV_SECONDS;
4481 	mcp->flags = 0;
4482 	rval = qla2x00_mailbox_command(vha, mcp);
4483 
4484 	if (rval != QLA_SUCCESS) {
4485 		ql_dbg(ql_dbg_mbx, vha, 0x10da,
4486 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4487 	} else {
4488 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4489 		    "Done %s.\n", __func__);
4490 	}
4491 
4492 	return rval;
4493 }
4494 
4495 int
4496 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4497 {
4498 	int rval;
4499 	mbx_cmd_t mc;
4500 	mbx_cmd_t *mcp = &mc;
4501 
4502 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4503 	    "Entered %s.\n", __func__);
4504 
4505 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4506 	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4507 		return QLA_FUNCTION_FAILED;
4508 
4509 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4510 	mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4511 	mcp->out_mb = MBX_1|MBX_0;
4512 	mcp->in_mb = MBX_1|MBX_0;
4513 	mcp->tov = MBX_TOV_SECONDS;
4514 	mcp->flags = 0;
4515 	rval = qla2x00_mailbox_command(vha, mcp);
4516 
4517 	if (rval != QLA_SUCCESS) {
4518 		ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4519 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
4520 		    rval, mcp->mb[0], mcp->mb[1]);
4521 	} else {
4522 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4523 		    "Done %s.\n", __func__);
4524 		*sector_size = mcp->mb[1];
4525 	}
4526 
4527 	return rval;
4528 }
4529 
4530 int
4531 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4532 {
4533 	int rval;
4534 	mbx_cmd_t mc;
4535 	mbx_cmd_t *mcp = &mc;
4536 
4537 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4538 	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4539 		return QLA_FUNCTION_FAILED;
4540 
4541 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4542 	    "Entered %s.\n", __func__);
4543 
4544 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4545 	mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4546 	    FAC_OPT_CMD_WRITE_PROTECT;
4547 	mcp->out_mb = MBX_1|MBX_0;
4548 	mcp->in_mb = MBX_1|MBX_0;
4549 	mcp->tov = MBX_TOV_SECONDS;
4550 	mcp->flags = 0;
4551 	rval = qla2x00_mailbox_command(vha, mcp);
4552 
4553 	if (rval != QLA_SUCCESS) {
4554 		ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4555 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
4556 		    rval, mcp->mb[0], mcp->mb[1]);
4557 	} else {
4558 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4559 		    "Done %s.\n", __func__);
4560 	}
4561 
4562 	return rval;
4563 }
4564 
4565 int
4566 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4567 {
4568 	int rval;
4569 	mbx_cmd_t mc;
4570 	mbx_cmd_t *mcp = &mc;
4571 
4572 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4573 	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4574 		return QLA_FUNCTION_FAILED;
4575 
4576 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4577 	    "Entered %s.\n", __func__);
4578 
4579 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4580 	mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4581 	mcp->mb[2] = LSW(start);
4582 	mcp->mb[3] = MSW(start);
4583 	mcp->mb[4] = LSW(finish);
4584 	mcp->mb[5] = MSW(finish);
4585 	mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4586 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
4587 	mcp->tov = MBX_TOV_SECONDS;
4588 	mcp->flags = 0;
4589 	rval = qla2x00_mailbox_command(vha, mcp);
4590 
4591 	if (rval != QLA_SUCCESS) {
4592 		ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4593 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4594 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4595 	} else {
4596 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4597 		    "Done %s.\n", __func__);
4598 	}
4599 
4600 	return rval;
4601 }
4602 
4603 int
4604 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
4605 {
4606 	int rval = QLA_SUCCESS;
4607 	mbx_cmd_t mc;
4608 	mbx_cmd_t *mcp = &mc;
4609 	struct qla_hw_data *ha = vha->hw;
4610 
4611 	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4612 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4613 		return rval;
4614 
4615 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4616 	    "Entered %s.\n", __func__);
4617 
4618 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4619 	mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
4620 	    FAC_OPT_CMD_UNLOCK_SEMAPHORE);
4621 	mcp->out_mb = MBX_1|MBX_0;
4622 	mcp->in_mb = MBX_1|MBX_0;
4623 	mcp->tov = MBX_TOV_SECONDS;
4624 	mcp->flags = 0;
4625 	rval = qla2x00_mailbox_command(vha, mcp);
4626 
4627 	if (rval != QLA_SUCCESS) {
4628 		ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4629 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4630 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4631 	} else {
4632 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4633 		    "Done %s.\n", __func__);
4634 	}
4635 
4636 	return rval;
4637 }
4638 
4639 int
4640 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4641 {
4642 	int rval = 0;
4643 	mbx_cmd_t mc;
4644 	mbx_cmd_t *mcp = &mc;
4645 
4646 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4647 	    "Entered %s.\n", __func__);
4648 
4649 	mcp->mb[0] = MBC_RESTART_MPI_FW;
4650 	mcp->out_mb = MBX_0;
4651 	mcp->in_mb = MBX_0|MBX_1;
4652 	mcp->tov = MBX_TOV_SECONDS;
4653 	mcp->flags = 0;
4654 	rval = qla2x00_mailbox_command(vha, mcp);
4655 
4656 	if (rval != QLA_SUCCESS) {
4657 		ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4658 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
4659 		    rval, mcp->mb[0], mcp->mb[1]);
4660 	} else {
4661 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4662 		    "Done %s.\n", __func__);
4663 	}
4664 
4665 	return rval;
4666 }
4667 
4668 int
4669 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4670 {
4671 	int rval;
4672 	mbx_cmd_t mc;
4673 	mbx_cmd_t *mcp = &mc;
4674 	int i;
4675 	int len;
4676 	uint16_t *str;
4677 	struct qla_hw_data *ha = vha->hw;
4678 
4679 	if (!IS_P3P_TYPE(ha))
4680 		return QLA_FUNCTION_FAILED;
4681 
4682 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4683 	    "Entered %s.\n", __func__);
4684 
4685 	str = (void *)version;
4686 	len = strlen(version);
4687 
4688 	mcp->mb[0] = MBC_SET_RNID_PARAMS;
4689 	mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4690 	mcp->out_mb = MBX_1|MBX_0;
4691 	for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4692 		mcp->mb[i] = cpu_to_le16p(str);
4693 		mcp->out_mb |= 1<<i;
4694 	}
4695 	for (; i < 16; i++) {
4696 		mcp->mb[i] = 0;
4697 		mcp->out_mb |= 1<<i;
4698 	}
4699 	mcp->in_mb = MBX_1|MBX_0;
4700 	mcp->tov = MBX_TOV_SECONDS;
4701 	mcp->flags = 0;
4702 	rval = qla2x00_mailbox_command(vha, mcp);
4703 
4704 	if (rval != QLA_SUCCESS) {
4705 		ql_dbg(ql_dbg_mbx, vha, 0x117c,
4706 		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4707 	} else {
4708 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4709 		    "Done %s.\n", __func__);
4710 	}
4711 
4712 	return rval;
4713 }
4714 
4715 int
4716 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4717 {
4718 	int rval;
4719 	mbx_cmd_t mc;
4720 	mbx_cmd_t *mcp = &mc;
4721 	int len;
4722 	uint16_t dwlen;
4723 	uint8_t *str;
4724 	dma_addr_t str_dma;
4725 	struct qla_hw_data *ha = vha->hw;
4726 
4727 	if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4728 	    IS_P3P_TYPE(ha))
4729 		return QLA_FUNCTION_FAILED;
4730 
4731 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4732 	    "Entered %s.\n", __func__);
4733 
4734 	str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4735 	if (!str) {
4736 		ql_log(ql_log_warn, vha, 0x117f,
4737 		    "Failed to allocate driver version param.\n");
4738 		return QLA_MEMORY_ALLOC_FAILED;
4739 	}
4740 
4741 	memcpy(str, "\x7\x3\x11\x0", 4);
4742 	dwlen = str[0];
4743 	len = dwlen * 4 - 4;
4744 	memset(str + 4, 0, len);
4745 	if (len > strlen(version))
4746 		len = strlen(version);
4747 	memcpy(str + 4, version, len);
4748 
4749 	mcp->mb[0] = MBC_SET_RNID_PARAMS;
4750 	mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4751 	mcp->mb[2] = MSW(LSD(str_dma));
4752 	mcp->mb[3] = LSW(LSD(str_dma));
4753 	mcp->mb[6] = MSW(MSD(str_dma));
4754 	mcp->mb[7] = LSW(MSD(str_dma));
4755 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4756 	mcp->in_mb = MBX_1|MBX_0;
4757 	mcp->tov = MBX_TOV_SECONDS;
4758 	mcp->flags = 0;
4759 	rval = qla2x00_mailbox_command(vha, mcp);
4760 
4761 	if (rval != QLA_SUCCESS) {
4762 		ql_dbg(ql_dbg_mbx, vha, 0x1180,
4763 		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4764 	} else {
4765 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4766 		    "Done %s.\n", __func__);
4767 	}
4768 
4769 	dma_pool_free(ha->s_dma_pool, str, str_dma);
4770 
4771 	return rval;
4772 }
4773 
4774 int
4775 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4776 			     void *buf, uint16_t bufsiz)
4777 {
4778 	int rval, i;
4779 	mbx_cmd_t mc;
4780 	mbx_cmd_t *mcp = &mc;
4781 	uint32_t	*bp;
4782 
4783 	if (!IS_FWI2_CAPABLE(vha->hw))
4784 		return QLA_FUNCTION_FAILED;
4785 
4786 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4787 	    "Entered %s.\n", __func__);
4788 
4789 	mcp->mb[0] = MBC_GET_RNID_PARAMS;
4790 	mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4791 	mcp->mb[2] = MSW(buf_dma);
4792 	mcp->mb[3] = LSW(buf_dma);
4793 	mcp->mb[6] = MSW(MSD(buf_dma));
4794 	mcp->mb[7] = LSW(MSD(buf_dma));
4795 	mcp->mb[8] = bufsiz/4;
4796 	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4797 	mcp->in_mb = MBX_1|MBX_0;
4798 	mcp->tov = MBX_TOV_SECONDS;
4799 	mcp->flags = 0;
4800 	rval = qla2x00_mailbox_command(vha, mcp);
4801 
4802 	if (rval != QLA_SUCCESS) {
4803 		ql_dbg(ql_dbg_mbx, vha, 0x115a,
4804 		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4805 	} else {
4806 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4807 		    "Done %s.\n", __func__);
4808 		bp = (uint32_t *) buf;
4809 		for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4810 			*bp = le32_to_cpu(*bp);
4811 	}
4812 
4813 	return rval;
4814 }
4815 
4816 static int
4817 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4818 {
4819 	int rval;
4820 	mbx_cmd_t mc;
4821 	mbx_cmd_t *mcp = &mc;
4822 
4823 	if (!IS_FWI2_CAPABLE(vha->hw))
4824 		return QLA_FUNCTION_FAILED;
4825 
4826 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4827 	    "Entered %s.\n", __func__);
4828 
4829 	mcp->mb[0] = MBC_GET_RNID_PARAMS;
4830 	mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
4831 	mcp->out_mb = MBX_1|MBX_0;
4832 	mcp->in_mb = MBX_1|MBX_0;
4833 	mcp->tov = MBX_TOV_SECONDS;
4834 	mcp->flags = 0;
4835 	rval = qla2x00_mailbox_command(vha, mcp);
4836 	*temp = mcp->mb[1];
4837 
4838 	if (rval != QLA_SUCCESS) {
4839 		ql_dbg(ql_dbg_mbx, vha, 0x115a,
4840 		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4841 	} else {
4842 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4843 		    "Done %s.\n", __func__);
4844 	}
4845 
4846 	return rval;
4847 }
4848 
4849 int
4850 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4851 	uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4852 {
4853 	int rval;
4854 	mbx_cmd_t mc;
4855 	mbx_cmd_t *mcp = &mc;
4856 	struct qla_hw_data *ha = vha->hw;
4857 
4858 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
4859 	    "Entered %s.\n", __func__);
4860 
4861 	if (!IS_FWI2_CAPABLE(ha))
4862 		return QLA_FUNCTION_FAILED;
4863 
4864 	if (len == 1)
4865 		opt |= BIT_0;
4866 
4867 	mcp->mb[0] = MBC_READ_SFP;
4868 	mcp->mb[1] = dev;
4869 	mcp->mb[2] = MSW(sfp_dma);
4870 	mcp->mb[3] = LSW(sfp_dma);
4871 	mcp->mb[6] = MSW(MSD(sfp_dma));
4872 	mcp->mb[7] = LSW(MSD(sfp_dma));
4873 	mcp->mb[8] = len;
4874 	mcp->mb[9] = off;
4875 	mcp->mb[10] = opt;
4876 	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4877 	mcp->in_mb = MBX_1|MBX_0;
4878 	mcp->tov = MBX_TOV_SECONDS;
4879 	mcp->flags = 0;
4880 	rval = qla2x00_mailbox_command(vha, mcp);
4881 
4882 	if (opt & BIT_0)
4883 		*sfp = mcp->mb[1];
4884 
4885 	if (rval != QLA_SUCCESS) {
4886 		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
4887 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4888 		if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
4889 			/* sfp is not there */
4890 			rval = QLA_INTERFACE_ERROR;
4891 		}
4892 	} else {
4893 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
4894 		    "Done %s.\n", __func__);
4895 	}
4896 
4897 	return rval;
4898 }
4899 
4900 int
4901 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4902 	uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4903 {
4904 	int rval;
4905 	mbx_cmd_t mc;
4906 	mbx_cmd_t *mcp = &mc;
4907 	struct qla_hw_data *ha = vha->hw;
4908 
4909 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
4910 	    "Entered %s.\n", __func__);
4911 
4912 	if (!IS_FWI2_CAPABLE(ha))
4913 		return QLA_FUNCTION_FAILED;
4914 
4915 	if (len == 1)
4916 		opt |= BIT_0;
4917 
4918 	if (opt & BIT_0)
4919 		len = *sfp;
4920 
4921 	mcp->mb[0] = MBC_WRITE_SFP;
4922 	mcp->mb[1] = dev;
4923 	mcp->mb[2] = MSW(sfp_dma);
4924 	mcp->mb[3] = LSW(sfp_dma);
4925 	mcp->mb[6] = MSW(MSD(sfp_dma));
4926 	mcp->mb[7] = LSW(MSD(sfp_dma));
4927 	mcp->mb[8] = len;
4928 	mcp->mb[9] = off;
4929 	mcp->mb[10] = opt;
4930 	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4931 	mcp->in_mb = MBX_1|MBX_0;
4932 	mcp->tov = MBX_TOV_SECONDS;
4933 	mcp->flags = 0;
4934 	rval = qla2x00_mailbox_command(vha, mcp);
4935 
4936 	if (rval != QLA_SUCCESS) {
4937 		ql_dbg(ql_dbg_mbx, vha, 0x10ec,
4938 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4939 	} else {
4940 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
4941 		    "Done %s.\n", __func__);
4942 	}
4943 
4944 	return rval;
4945 }
4946 
4947 int
4948 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
4949     uint16_t size_in_bytes, uint16_t *actual_size)
4950 {
4951 	int rval;
4952 	mbx_cmd_t mc;
4953 	mbx_cmd_t *mcp = &mc;
4954 
4955 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
4956 	    "Entered %s.\n", __func__);
4957 
4958 	if (!IS_CNA_CAPABLE(vha->hw))
4959 		return QLA_FUNCTION_FAILED;
4960 
4961 	mcp->mb[0] = MBC_GET_XGMAC_STATS;
4962 	mcp->mb[2] = MSW(stats_dma);
4963 	mcp->mb[3] = LSW(stats_dma);
4964 	mcp->mb[6] = MSW(MSD(stats_dma));
4965 	mcp->mb[7] = LSW(MSD(stats_dma));
4966 	mcp->mb[8] = size_in_bytes >> 2;
4967 	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
4968 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
4969 	mcp->tov = MBX_TOV_SECONDS;
4970 	mcp->flags = 0;
4971 	rval = qla2x00_mailbox_command(vha, mcp);
4972 
4973 	if (rval != QLA_SUCCESS) {
4974 		ql_dbg(ql_dbg_mbx, vha, 0x10ef,
4975 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4976 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4977 	} else {
4978 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
4979 		    "Done %s.\n", __func__);
4980 
4981 
4982 		*actual_size = mcp->mb[2] << 2;
4983 	}
4984 
4985 	return rval;
4986 }
4987 
4988 int
4989 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
4990     uint16_t size)
4991 {
4992 	int rval;
4993 	mbx_cmd_t mc;
4994 	mbx_cmd_t *mcp = &mc;
4995 
4996 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
4997 	    "Entered %s.\n", __func__);
4998 
4999 	if (!IS_CNA_CAPABLE(vha->hw))
5000 		return QLA_FUNCTION_FAILED;
5001 
5002 	mcp->mb[0] = MBC_GET_DCBX_PARAMS;
5003 	mcp->mb[1] = 0;
5004 	mcp->mb[2] = MSW(tlv_dma);
5005 	mcp->mb[3] = LSW(tlv_dma);
5006 	mcp->mb[6] = MSW(MSD(tlv_dma));
5007 	mcp->mb[7] = LSW(MSD(tlv_dma));
5008 	mcp->mb[8] = size;
5009 	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5010 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
5011 	mcp->tov = MBX_TOV_SECONDS;
5012 	mcp->flags = 0;
5013 	rval = qla2x00_mailbox_command(vha, mcp);
5014 
5015 	if (rval != QLA_SUCCESS) {
5016 		ql_dbg(ql_dbg_mbx, vha, 0x10f2,
5017 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5018 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5019 	} else {
5020 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
5021 		    "Done %s.\n", __func__);
5022 	}
5023 
5024 	return rval;
5025 }
5026 
5027 int
5028 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
5029 {
5030 	int rval;
5031 	mbx_cmd_t mc;
5032 	mbx_cmd_t *mcp = &mc;
5033 
5034 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
5035 	    "Entered %s.\n", __func__);
5036 
5037 	if (!IS_FWI2_CAPABLE(vha->hw))
5038 		return QLA_FUNCTION_FAILED;
5039 
5040 	mcp->mb[0] = MBC_READ_RAM_EXTENDED;
5041 	mcp->mb[1] = LSW(risc_addr);
5042 	mcp->mb[8] = MSW(risc_addr);
5043 	mcp->out_mb = MBX_8|MBX_1|MBX_0;
5044 	mcp->in_mb = MBX_3|MBX_2|MBX_0;
5045 	mcp->tov = 30;
5046 	mcp->flags = 0;
5047 	rval = qla2x00_mailbox_command(vha, mcp);
5048 	if (rval != QLA_SUCCESS) {
5049 		ql_dbg(ql_dbg_mbx, vha, 0x10f5,
5050 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5051 	} else {
5052 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
5053 		    "Done %s.\n", __func__);
5054 		*data = mcp->mb[3] << 16 | mcp->mb[2];
5055 	}
5056 
5057 	return rval;
5058 }
5059 
5060 int
5061 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5062 	uint16_t *mresp)
5063 {
5064 	int rval;
5065 	mbx_cmd_t mc;
5066 	mbx_cmd_t *mcp = &mc;
5067 
5068 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5069 	    "Entered %s.\n", __func__);
5070 
5071 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5072 	mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5073 	mcp->mb[1] = mreq->options | BIT_6;	// BIT_6 specifies 64 bit addressing
5074 
5075 	/* transfer count */
5076 	mcp->mb[10] = LSW(mreq->transfer_size);
5077 	mcp->mb[11] = MSW(mreq->transfer_size);
5078 
5079 	/* send data address */
5080 	mcp->mb[14] = LSW(mreq->send_dma);
5081 	mcp->mb[15] = MSW(mreq->send_dma);
5082 	mcp->mb[20] = LSW(MSD(mreq->send_dma));
5083 	mcp->mb[21] = MSW(MSD(mreq->send_dma));
5084 
5085 	/* receive data address */
5086 	mcp->mb[16] = LSW(mreq->rcv_dma);
5087 	mcp->mb[17] = MSW(mreq->rcv_dma);
5088 	mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5089 	mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5090 
5091 	/* Iteration count */
5092 	mcp->mb[18] = LSW(mreq->iteration_count);
5093 	mcp->mb[19] = MSW(mreq->iteration_count);
5094 
5095 	mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5096 	    MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5097 	if (IS_CNA_CAPABLE(vha->hw))
5098 		mcp->out_mb |= MBX_2;
5099 	mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5100 
5101 	mcp->buf_size = mreq->transfer_size;
5102 	mcp->tov = MBX_TOV_SECONDS;
5103 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5104 
5105 	rval = qla2x00_mailbox_command(vha, mcp);
5106 
5107 	if (rval != QLA_SUCCESS) {
5108 		ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5109 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5110 		    "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5111 		    mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5112 	} else {
5113 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5114 		    "Done %s.\n", __func__);
5115 	}
5116 
5117 	/* Copy mailbox information */
5118 	memcpy( mresp, mcp->mb, 64);
5119 	return rval;
5120 }
5121 
5122 int
5123 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5124 	uint16_t *mresp)
5125 {
5126 	int rval;
5127 	mbx_cmd_t mc;
5128 	mbx_cmd_t *mcp = &mc;
5129 	struct qla_hw_data *ha = vha->hw;
5130 
5131 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5132 	    "Entered %s.\n", __func__);
5133 
5134 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5135 	mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5136 	/* BIT_6 specifies 64bit address */
5137 	mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5138 	if (IS_CNA_CAPABLE(ha)) {
5139 		mcp->mb[2] = vha->fcoe_fcf_idx;
5140 	}
5141 	mcp->mb[16] = LSW(mreq->rcv_dma);
5142 	mcp->mb[17] = MSW(mreq->rcv_dma);
5143 	mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5144 	mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5145 
5146 	mcp->mb[10] = LSW(mreq->transfer_size);
5147 
5148 	mcp->mb[14] = LSW(mreq->send_dma);
5149 	mcp->mb[15] = MSW(mreq->send_dma);
5150 	mcp->mb[20] = LSW(MSD(mreq->send_dma));
5151 	mcp->mb[21] = MSW(MSD(mreq->send_dma));
5152 
5153 	mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5154 	    MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5155 	if (IS_CNA_CAPABLE(ha))
5156 		mcp->out_mb |= MBX_2;
5157 
5158 	mcp->in_mb = MBX_0;
5159 	if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5160 	    IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5161 		mcp->in_mb |= MBX_1;
5162 	if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5163 		mcp->in_mb |= MBX_3;
5164 
5165 	mcp->tov = MBX_TOV_SECONDS;
5166 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5167 	mcp->buf_size = mreq->transfer_size;
5168 
5169 	rval = qla2x00_mailbox_command(vha, mcp);
5170 
5171 	if (rval != QLA_SUCCESS) {
5172 		ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5173 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
5174 		    rval, mcp->mb[0], mcp->mb[1]);
5175 	} else {
5176 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5177 		    "Done %s.\n", __func__);
5178 	}
5179 
5180 	/* Copy mailbox information */
5181 	memcpy(mresp, mcp->mb, 64);
5182 	return rval;
5183 }
5184 
5185 int
5186 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5187 {
5188 	int rval;
5189 	mbx_cmd_t mc;
5190 	mbx_cmd_t *mcp = &mc;
5191 
5192 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5193 	    "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5194 
5195 	mcp->mb[0] = MBC_ISP84XX_RESET;
5196 	mcp->mb[1] = enable_diagnostic;
5197 	mcp->out_mb = MBX_1|MBX_0;
5198 	mcp->in_mb = MBX_1|MBX_0;
5199 	mcp->tov = MBX_TOV_SECONDS;
5200 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5201 	rval = qla2x00_mailbox_command(vha, mcp);
5202 
5203 	if (rval != QLA_SUCCESS)
5204 		ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5205 	else
5206 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5207 		    "Done %s.\n", __func__);
5208 
5209 	return rval;
5210 }
5211 
5212 int
5213 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5214 {
5215 	int rval;
5216 	mbx_cmd_t mc;
5217 	mbx_cmd_t *mcp = &mc;
5218 
5219 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5220 	    "Entered %s.\n", __func__);
5221 
5222 	if (!IS_FWI2_CAPABLE(vha->hw))
5223 		return QLA_FUNCTION_FAILED;
5224 
5225 	mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5226 	mcp->mb[1] = LSW(risc_addr);
5227 	mcp->mb[2] = LSW(data);
5228 	mcp->mb[3] = MSW(data);
5229 	mcp->mb[8] = MSW(risc_addr);
5230 	mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5231 	mcp->in_mb = MBX_1|MBX_0;
5232 	mcp->tov = 30;
5233 	mcp->flags = 0;
5234 	rval = qla2x00_mailbox_command(vha, mcp);
5235 	if (rval != QLA_SUCCESS) {
5236 		ql_dbg(ql_dbg_mbx, vha, 0x1101,
5237 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
5238 		    rval, mcp->mb[0], mcp->mb[1]);
5239 	} else {
5240 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5241 		    "Done %s.\n", __func__);
5242 	}
5243 
5244 	return rval;
5245 }
5246 
5247 int
5248 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5249 {
5250 	int rval;
5251 	uint32_t stat, timer;
5252 	uint16_t mb0 = 0;
5253 	struct qla_hw_data *ha = vha->hw;
5254 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5255 
5256 	rval = QLA_SUCCESS;
5257 
5258 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5259 	    "Entered %s.\n", __func__);
5260 
5261 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5262 
5263 	/* Write the MBC data to the registers */
5264 	WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
5265 	WRT_REG_WORD(&reg->mailbox1, mb[0]);
5266 	WRT_REG_WORD(&reg->mailbox2, mb[1]);
5267 	WRT_REG_WORD(&reg->mailbox3, mb[2]);
5268 	WRT_REG_WORD(&reg->mailbox4, mb[3]);
5269 
5270 	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
5271 
5272 	/* Poll for MBC interrupt */
5273 	for (timer = 6000000; timer; timer--) {
5274 		/* Check for pending interrupts. */
5275 		stat = RD_REG_DWORD(&reg->host_status);
5276 		if (stat & HSRX_RISC_INT) {
5277 			stat &= 0xff;
5278 
5279 			if (stat == 0x1 || stat == 0x2 ||
5280 			    stat == 0x10 || stat == 0x11) {
5281 				set_bit(MBX_INTERRUPT,
5282 				    &ha->mbx_cmd_flags);
5283 				mb0 = RD_REG_WORD(&reg->mailbox0);
5284 				WRT_REG_DWORD(&reg->hccr,
5285 				    HCCRX_CLR_RISC_INT);
5286 				RD_REG_DWORD(&reg->hccr);
5287 				break;
5288 			}
5289 		}
5290 		udelay(5);
5291 	}
5292 
5293 	if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5294 		rval = mb0 & MBS_MASK;
5295 	else
5296 		rval = QLA_FUNCTION_FAILED;
5297 
5298 	if (rval != QLA_SUCCESS) {
5299 		ql_dbg(ql_dbg_mbx, vha, 0x1104,
5300 		    "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5301 	} else {
5302 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5303 		    "Done %s.\n", __func__);
5304 	}
5305 
5306 	return rval;
5307 }
5308 
5309 /* Set the specified data rate */
5310 int
5311 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
5312 {
5313 	int rval;
5314 	mbx_cmd_t mc;
5315 	mbx_cmd_t *mcp = &mc;
5316 	struct qla_hw_data *ha = vha->hw;
5317 	uint16_t val;
5318 
5319 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5320 	    "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5321 	    mode);
5322 
5323 	if (!IS_FWI2_CAPABLE(ha))
5324 		return QLA_FUNCTION_FAILED;
5325 
5326 	memset(mcp, 0, sizeof(*mcp));
5327 	switch (ha->set_data_rate) {
5328 	case PORT_SPEED_AUTO:
5329 	case PORT_SPEED_4GB:
5330 	case PORT_SPEED_8GB:
5331 	case PORT_SPEED_16GB:
5332 	case PORT_SPEED_32GB:
5333 		val = ha->set_data_rate;
5334 		break;
5335 	default:
5336 		ql_log(ql_log_warn, vha, 0x1199,
5337 		    "Unrecognized speed setting:%d. Setting Autoneg\n",
5338 		    ha->set_data_rate);
5339 		val = ha->set_data_rate = PORT_SPEED_AUTO;
5340 		break;
5341 	}
5342 
5343 	mcp->mb[0] = MBC_DATA_RATE;
5344 	mcp->mb[1] = mode;
5345 	mcp->mb[2] = val;
5346 
5347 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
5348 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
5349 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5350 		mcp->in_mb |= MBX_4|MBX_3;
5351 	mcp->tov = MBX_TOV_SECONDS;
5352 	mcp->flags = 0;
5353 	rval = qla2x00_mailbox_command(vha, mcp);
5354 	if (rval != QLA_SUCCESS) {
5355 		ql_dbg(ql_dbg_mbx, vha, 0x1107,
5356 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5357 	} else {
5358 		if (mcp->mb[1] != 0x7)
5359 			ql_dbg(ql_dbg_mbx, vha, 0x1179,
5360 				"Speed set:0x%x\n", mcp->mb[1]);
5361 
5362 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5363 		    "Done %s.\n", __func__);
5364 	}
5365 
5366 	return rval;
5367 }
5368 
5369 int
5370 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5371 {
5372 	int rval;
5373 	mbx_cmd_t mc;
5374 	mbx_cmd_t *mcp = &mc;
5375 	struct qla_hw_data *ha = vha->hw;
5376 
5377 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5378 	    "Entered %s.\n", __func__);
5379 
5380 	if (!IS_FWI2_CAPABLE(ha))
5381 		return QLA_FUNCTION_FAILED;
5382 
5383 	mcp->mb[0] = MBC_DATA_RATE;
5384 	mcp->mb[1] = QLA_GET_DATA_RATE;
5385 	mcp->out_mb = MBX_1|MBX_0;
5386 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
5387 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5388 		mcp->in_mb |= MBX_3;
5389 	mcp->tov = MBX_TOV_SECONDS;
5390 	mcp->flags = 0;
5391 	rval = qla2x00_mailbox_command(vha, mcp);
5392 	if (rval != QLA_SUCCESS) {
5393 		ql_dbg(ql_dbg_mbx, vha, 0x1107,
5394 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5395 	} else {
5396 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5397 		    "Done %s.\n", __func__);
5398 		if (mcp->mb[1] != 0x7)
5399 			ha->link_data_rate = mcp->mb[1];
5400 	}
5401 
5402 	return rval;
5403 }
5404 
5405 int
5406 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5407 {
5408 	int rval;
5409 	mbx_cmd_t mc;
5410 	mbx_cmd_t *mcp = &mc;
5411 	struct qla_hw_data *ha = vha->hw;
5412 
5413 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5414 	    "Entered %s.\n", __func__);
5415 
5416 	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5417 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5418 		return QLA_FUNCTION_FAILED;
5419 	mcp->mb[0] = MBC_GET_PORT_CONFIG;
5420 	mcp->out_mb = MBX_0;
5421 	mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5422 	mcp->tov = MBX_TOV_SECONDS;
5423 	mcp->flags = 0;
5424 
5425 	rval = qla2x00_mailbox_command(vha, mcp);
5426 
5427 	if (rval != QLA_SUCCESS) {
5428 		ql_dbg(ql_dbg_mbx, vha, 0x110a,
5429 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5430 	} else {
5431 		/* Copy all bits to preserve original value */
5432 		memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5433 
5434 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5435 		    "Done %s.\n", __func__);
5436 	}
5437 	return rval;
5438 }
5439 
5440 int
5441 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5442 {
5443 	int rval;
5444 	mbx_cmd_t mc;
5445 	mbx_cmd_t *mcp = &mc;
5446 
5447 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5448 	    "Entered %s.\n", __func__);
5449 
5450 	mcp->mb[0] = MBC_SET_PORT_CONFIG;
5451 	/* Copy all bits to preserve original setting */
5452 	memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5453 	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5454 	mcp->in_mb = MBX_0;
5455 	mcp->tov = MBX_TOV_SECONDS;
5456 	mcp->flags = 0;
5457 	rval = qla2x00_mailbox_command(vha, mcp);
5458 
5459 	if (rval != QLA_SUCCESS) {
5460 		ql_dbg(ql_dbg_mbx, vha, 0x110d,
5461 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5462 	} else
5463 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5464 		    "Done %s.\n", __func__);
5465 
5466 	return rval;
5467 }
5468 
5469 
5470 int
5471 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5472 		uint16_t *mb)
5473 {
5474 	int rval;
5475 	mbx_cmd_t mc;
5476 	mbx_cmd_t *mcp = &mc;
5477 	struct qla_hw_data *ha = vha->hw;
5478 
5479 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5480 	    "Entered %s.\n", __func__);
5481 
5482 	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5483 		return QLA_FUNCTION_FAILED;
5484 
5485 	mcp->mb[0] = MBC_PORT_PARAMS;
5486 	mcp->mb[1] = loop_id;
5487 	if (ha->flags.fcp_prio_enabled)
5488 		mcp->mb[2] = BIT_1;
5489 	else
5490 		mcp->mb[2] = BIT_2;
5491 	mcp->mb[4] = priority & 0xf;
5492 	mcp->mb[9] = vha->vp_idx;
5493 	mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5494 	mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5495 	mcp->tov = 30;
5496 	mcp->flags = 0;
5497 	rval = qla2x00_mailbox_command(vha, mcp);
5498 	if (mb != NULL) {
5499 		mb[0] = mcp->mb[0];
5500 		mb[1] = mcp->mb[1];
5501 		mb[3] = mcp->mb[3];
5502 		mb[4] = mcp->mb[4];
5503 	}
5504 
5505 	if (rval != QLA_SUCCESS) {
5506 		ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5507 	} else {
5508 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5509 		    "Done %s.\n", __func__);
5510 	}
5511 
5512 	return rval;
5513 }
5514 
5515 int
5516 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5517 {
5518 	int rval = QLA_FUNCTION_FAILED;
5519 	struct qla_hw_data *ha = vha->hw;
5520 	uint8_t byte;
5521 
5522 	if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5523 		ql_dbg(ql_dbg_mbx, vha, 0x1150,
5524 		    "Thermal not supported by this card.\n");
5525 		return rval;
5526 	}
5527 
5528 	if (IS_QLA25XX(ha)) {
5529 		if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5530 		    ha->pdev->subsystem_device == 0x0175) {
5531 			rval = qla2x00_read_sfp(vha, 0, &byte,
5532 			    0x98, 0x1, 1, BIT_13|BIT_0);
5533 			*temp = byte;
5534 			return rval;
5535 		}
5536 		if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5537 		    ha->pdev->subsystem_device == 0x338e) {
5538 			rval = qla2x00_read_sfp(vha, 0, &byte,
5539 			    0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5540 			*temp = byte;
5541 			return rval;
5542 		}
5543 		ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5544 		    "Thermal not supported by this card.\n");
5545 		return rval;
5546 	}
5547 
5548 	if (IS_QLA82XX(ha)) {
5549 		*temp = qla82xx_read_temperature(vha);
5550 		rval = QLA_SUCCESS;
5551 		return rval;
5552 	} else if (IS_QLA8044(ha)) {
5553 		*temp = qla8044_read_temperature(vha);
5554 		rval = QLA_SUCCESS;
5555 		return rval;
5556 	}
5557 
5558 	rval = qla2x00_read_asic_temperature(vha, temp);
5559 	return rval;
5560 }
5561 
5562 int
5563 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5564 {
5565 	int rval;
5566 	struct qla_hw_data *ha = vha->hw;
5567 	mbx_cmd_t mc;
5568 	mbx_cmd_t *mcp = &mc;
5569 
5570 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5571 	    "Entered %s.\n", __func__);
5572 
5573 	if (!IS_FWI2_CAPABLE(ha))
5574 		return QLA_FUNCTION_FAILED;
5575 
5576 	memset(mcp, 0, sizeof(mbx_cmd_t));
5577 	mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5578 	mcp->mb[1] = 1;
5579 
5580 	mcp->out_mb = MBX_1|MBX_0;
5581 	mcp->in_mb = MBX_0;
5582 	mcp->tov = 30;
5583 	mcp->flags = 0;
5584 
5585 	rval = qla2x00_mailbox_command(vha, mcp);
5586 	if (rval != QLA_SUCCESS) {
5587 		ql_dbg(ql_dbg_mbx, vha, 0x1016,
5588 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5589 	} else {
5590 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5591 		    "Done %s.\n", __func__);
5592 	}
5593 
5594 	return rval;
5595 }
5596 
5597 int
5598 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5599 {
5600 	int rval;
5601 	struct qla_hw_data *ha = vha->hw;
5602 	mbx_cmd_t mc;
5603 	mbx_cmd_t *mcp = &mc;
5604 
5605 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5606 	    "Entered %s.\n", __func__);
5607 
5608 	if (!IS_P3P_TYPE(ha))
5609 		return QLA_FUNCTION_FAILED;
5610 
5611 	memset(mcp, 0, sizeof(mbx_cmd_t));
5612 	mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5613 	mcp->mb[1] = 0;
5614 
5615 	mcp->out_mb = MBX_1|MBX_0;
5616 	mcp->in_mb = MBX_0;
5617 	mcp->tov = 30;
5618 	mcp->flags = 0;
5619 
5620 	rval = qla2x00_mailbox_command(vha, mcp);
5621 	if (rval != QLA_SUCCESS) {
5622 		ql_dbg(ql_dbg_mbx, vha, 0x100c,
5623 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5624 	} else {
5625 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5626 		    "Done %s.\n", __func__);
5627 	}
5628 
5629 	return rval;
5630 }
5631 
5632 int
5633 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5634 {
5635 	struct qla_hw_data *ha = vha->hw;
5636 	mbx_cmd_t mc;
5637 	mbx_cmd_t *mcp = &mc;
5638 	int rval = QLA_FUNCTION_FAILED;
5639 
5640 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5641 	    "Entered %s.\n", __func__);
5642 
5643 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5644 	mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5645 	mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5646 	mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5647 	mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5648 
5649 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5650 	mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5651 	    MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5652 
5653 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5654 	mcp->tov = MBX_TOV_SECONDS;
5655 	rval = qla2x00_mailbox_command(vha, mcp);
5656 
5657 	/* Always copy back return mailbox values. */
5658 	if (rval != QLA_SUCCESS) {
5659 		ql_dbg(ql_dbg_mbx, vha, 0x1120,
5660 		    "mailbox command FAILED=0x%x, subcode=%x.\n",
5661 		    (mcp->mb[1] << 16) | mcp->mb[0],
5662 		    (mcp->mb[3] << 16) | mcp->mb[2]);
5663 	} else {
5664 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5665 		    "Done %s.\n", __func__);
5666 		ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5667 		if (!ha->md_template_size) {
5668 			ql_dbg(ql_dbg_mbx, vha, 0x1122,
5669 			    "Null template size obtained.\n");
5670 			rval = QLA_FUNCTION_FAILED;
5671 		}
5672 	}
5673 	return rval;
5674 }
5675 
5676 int
5677 qla82xx_md_get_template(scsi_qla_host_t *vha)
5678 {
5679 	struct qla_hw_data *ha = vha->hw;
5680 	mbx_cmd_t mc;
5681 	mbx_cmd_t *mcp = &mc;
5682 	int rval = QLA_FUNCTION_FAILED;
5683 
5684 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5685 	    "Entered %s.\n", __func__);
5686 
5687 	ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5688 	   ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5689 	if (!ha->md_tmplt_hdr) {
5690 		ql_log(ql_log_warn, vha, 0x1124,
5691 		    "Unable to allocate memory for Minidump template.\n");
5692 		return rval;
5693 	}
5694 
5695 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5696 	mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5697 	mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5698 	mcp->mb[2] = LSW(RQST_TMPLT);
5699 	mcp->mb[3] = MSW(RQST_TMPLT);
5700 	mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5701 	mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5702 	mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5703 	mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5704 	mcp->mb[8] = LSW(ha->md_template_size);
5705 	mcp->mb[9] = MSW(ha->md_template_size);
5706 
5707 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5708 	mcp->tov = MBX_TOV_SECONDS;
5709 	mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5710 	    MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5711 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5712 	rval = qla2x00_mailbox_command(vha, mcp);
5713 
5714 	if (rval != QLA_SUCCESS) {
5715 		ql_dbg(ql_dbg_mbx, vha, 0x1125,
5716 		    "mailbox command FAILED=0x%x, subcode=%x.\n",
5717 		    ((mcp->mb[1] << 16) | mcp->mb[0]),
5718 		    ((mcp->mb[3] << 16) | mcp->mb[2]));
5719 	} else
5720 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5721 		    "Done %s.\n", __func__);
5722 	return rval;
5723 }
5724 
5725 int
5726 qla8044_md_get_template(scsi_qla_host_t *vha)
5727 {
5728 	struct qla_hw_data *ha = vha->hw;
5729 	mbx_cmd_t mc;
5730 	mbx_cmd_t *mcp = &mc;
5731 	int rval = QLA_FUNCTION_FAILED;
5732 	int offset = 0, size = MINIDUMP_SIZE_36K;
5733 
5734 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5735 	    "Entered %s.\n", __func__);
5736 
5737 	ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5738 	   ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5739 	if (!ha->md_tmplt_hdr) {
5740 		ql_log(ql_log_warn, vha, 0xb11b,
5741 		    "Unable to allocate memory for Minidump template.\n");
5742 		return rval;
5743 	}
5744 
5745 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5746 	while (offset < ha->md_template_size) {
5747 		mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5748 		mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5749 		mcp->mb[2] = LSW(RQST_TMPLT);
5750 		mcp->mb[3] = MSW(RQST_TMPLT);
5751 		mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5752 		mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5753 		mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5754 		mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5755 		mcp->mb[8] = LSW(size);
5756 		mcp->mb[9] = MSW(size);
5757 		mcp->mb[10] = offset & 0x0000FFFF;
5758 		mcp->mb[11] = offset & 0xFFFF0000;
5759 		mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5760 		mcp->tov = MBX_TOV_SECONDS;
5761 		mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5762 			MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5763 		mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5764 		rval = qla2x00_mailbox_command(vha, mcp);
5765 
5766 		if (rval != QLA_SUCCESS) {
5767 			ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5768 				"mailbox command FAILED=0x%x, subcode=%x.\n",
5769 				((mcp->mb[1] << 16) | mcp->mb[0]),
5770 				((mcp->mb[3] << 16) | mcp->mb[2]));
5771 			return rval;
5772 		} else
5773 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5774 				"Done %s.\n", __func__);
5775 		offset = offset + size;
5776 	}
5777 	return rval;
5778 }
5779 
5780 int
5781 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5782 {
5783 	int rval;
5784 	struct qla_hw_data *ha = vha->hw;
5785 	mbx_cmd_t mc;
5786 	mbx_cmd_t *mcp = &mc;
5787 
5788 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5789 		return QLA_FUNCTION_FAILED;
5790 
5791 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
5792 	    "Entered %s.\n", __func__);
5793 
5794 	memset(mcp, 0, sizeof(mbx_cmd_t));
5795 	mcp->mb[0] = MBC_SET_LED_CONFIG;
5796 	mcp->mb[1] = led_cfg[0];
5797 	mcp->mb[2] = led_cfg[1];
5798 	if (IS_QLA8031(ha)) {
5799 		mcp->mb[3] = led_cfg[2];
5800 		mcp->mb[4] = led_cfg[3];
5801 		mcp->mb[5] = led_cfg[4];
5802 		mcp->mb[6] = led_cfg[5];
5803 	}
5804 
5805 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
5806 	if (IS_QLA8031(ha))
5807 		mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5808 	mcp->in_mb = MBX_0;
5809 	mcp->tov = 30;
5810 	mcp->flags = 0;
5811 
5812 	rval = qla2x00_mailbox_command(vha, mcp);
5813 	if (rval != QLA_SUCCESS) {
5814 		ql_dbg(ql_dbg_mbx, vha, 0x1134,
5815 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5816 	} else {
5817 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
5818 		    "Done %s.\n", __func__);
5819 	}
5820 
5821 	return rval;
5822 }
5823 
5824 int
5825 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5826 {
5827 	int rval;
5828 	struct qla_hw_data *ha = vha->hw;
5829 	mbx_cmd_t mc;
5830 	mbx_cmd_t *mcp = &mc;
5831 
5832 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5833 		return QLA_FUNCTION_FAILED;
5834 
5835 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
5836 	    "Entered %s.\n", __func__);
5837 
5838 	memset(mcp, 0, sizeof(mbx_cmd_t));
5839 	mcp->mb[0] = MBC_GET_LED_CONFIG;
5840 
5841 	mcp->out_mb = MBX_0;
5842 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
5843 	if (IS_QLA8031(ha))
5844 		mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5845 	mcp->tov = 30;
5846 	mcp->flags = 0;
5847 
5848 	rval = qla2x00_mailbox_command(vha, mcp);
5849 	if (rval != QLA_SUCCESS) {
5850 		ql_dbg(ql_dbg_mbx, vha, 0x1137,
5851 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5852 	} else {
5853 		led_cfg[0] = mcp->mb[1];
5854 		led_cfg[1] = mcp->mb[2];
5855 		if (IS_QLA8031(ha)) {
5856 			led_cfg[2] = mcp->mb[3];
5857 			led_cfg[3] = mcp->mb[4];
5858 			led_cfg[4] = mcp->mb[5];
5859 			led_cfg[5] = mcp->mb[6];
5860 		}
5861 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
5862 		    "Done %s.\n", __func__);
5863 	}
5864 
5865 	return rval;
5866 }
5867 
5868 int
5869 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
5870 {
5871 	int rval;
5872 	struct qla_hw_data *ha = vha->hw;
5873 	mbx_cmd_t mc;
5874 	mbx_cmd_t *mcp = &mc;
5875 
5876 	if (!IS_P3P_TYPE(ha))
5877 		return QLA_FUNCTION_FAILED;
5878 
5879 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
5880 		"Entered %s.\n", __func__);
5881 
5882 	memset(mcp, 0, sizeof(mbx_cmd_t));
5883 	mcp->mb[0] = MBC_SET_LED_CONFIG;
5884 	if (enable)
5885 		mcp->mb[7] = 0xE;
5886 	else
5887 		mcp->mb[7] = 0xD;
5888 
5889 	mcp->out_mb = MBX_7|MBX_0;
5890 	mcp->in_mb = MBX_0;
5891 	mcp->tov = MBX_TOV_SECONDS;
5892 	mcp->flags = 0;
5893 
5894 	rval = qla2x00_mailbox_command(vha, mcp);
5895 	if (rval != QLA_SUCCESS) {
5896 		ql_dbg(ql_dbg_mbx, vha, 0x1128,
5897 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5898 	} else {
5899 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
5900 		    "Done %s.\n", __func__);
5901 	}
5902 
5903 	return rval;
5904 }
5905 
5906 int
5907 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
5908 {
5909 	int rval;
5910 	struct qla_hw_data *ha = vha->hw;
5911 	mbx_cmd_t mc;
5912 	mbx_cmd_t *mcp = &mc;
5913 
5914 	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5915 		return QLA_FUNCTION_FAILED;
5916 
5917 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
5918 	    "Entered %s.\n", __func__);
5919 
5920 	mcp->mb[0] = MBC_WRITE_REMOTE_REG;
5921 	mcp->mb[1] = LSW(reg);
5922 	mcp->mb[2] = MSW(reg);
5923 	mcp->mb[3] = LSW(data);
5924 	mcp->mb[4] = MSW(data);
5925 	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5926 
5927 	mcp->in_mb = MBX_1|MBX_0;
5928 	mcp->tov = MBX_TOV_SECONDS;
5929 	mcp->flags = 0;
5930 	rval = qla2x00_mailbox_command(vha, mcp);
5931 
5932 	if (rval != QLA_SUCCESS) {
5933 		ql_dbg(ql_dbg_mbx, vha, 0x1131,
5934 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5935 	} else {
5936 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
5937 		    "Done %s.\n", __func__);
5938 	}
5939 
5940 	return rval;
5941 }
5942 
5943 int
5944 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
5945 {
5946 	int rval;
5947 	struct qla_hw_data *ha = vha->hw;
5948 	mbx_cmd_t mc;
5949 	mbx_cmd_t *mcp = &mc;
5950 
5951 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5952 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
5953 		    "Implicit LOGO Unsupported.\n");
5954 		return QLA_FUNCTION_FAILED;
5955 	}
5956 
5957 
5958 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
5959 	    "Entering %s.\n",  __func__);
5960 
5961 	/* Perform Implicit LOGO. */
5962 	mcp->mb[0] = MBC_PORT_LOGOUT;
5963 	mcp->mb[1] = fcport->loop_id;
5964 	mcp->mb[10] = BIT_15;
5965 	mcp->out_mb = MBX_10|MBX_1|MBX_0;
5966 	mcp->in_mb = MBX_0;
5967 	mcp->tov = MBX_TOV_SECONDS;
5968 	mcp->flags = 0;
5969 	rval = qla2x00_mailbox_command(vha, mcp);
5970 	if (rval != QLA_SUCCESS)
5971 		ql_dbg(ql_dbg_mbx, vha, 0x113d,
5972 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5973 	else
5974 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
5975 		    "Done %s.\n", __func__);
5976 
5977 	return rval;
5978 }
5979 
5980 int
5981 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
5982 {
5983 	int rval;
5984 	mbx_cmd_t mc;
5985 	mbx_cmd_t *mcp = &mc;
5986 	struct qla_hw_data *ha = vha->hw;
5987 	unsigned long retry_max_time = jiffies + (2 * HZ);
5988 
5989 	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5990 		return QLA_FUNCTION_FAILED;
5991 
5992 	ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
5993 
5994 retry_rd_reg:
5995 	mcp->mb[0] = MBC_READ_REMOTE_REG;
5996 	mcp->mb[1] = LSW(reg);
5997 	mcp->mb[2] = MSW(reg);
5998 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
5999 	mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
6000 	mcp->tov = MBX_TOV_SECONDS;
6001 	mcp->flags = 0;
6002 	rval = qla2x00_mailbox_command(vha, mcp);
6003 
6004 	if (rval != QLA_SUCCESS) {
6005 		ql_dbg(ql_dbg_mbx, vha, 0x114c,
6006 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
6007 		    rval, mcp->mb[0], mcp->mb[1]);
6008 	} else {
6009 		*data = (mcp->mb[3] | (mcp->mb[4] << 16));
6010 		if (*data == QLA8XXX_BAD_VALUE) {
6011 			/*
6012 			 * During soft-reset CAMRAM register reads might
6013 			 * return 0xbad0bad0. So retry for MAX of 2 sec
6014 			 * while reading camram registers.
6015 			 */
6016 			if (time_after(jiffies, retry_max_time)) {
6017 				ql_dbg(ql_dbg_mbx, vha, 0x1141,
6018 				    "Failure to read CAMRAM register. "
6019 				    "data=0x%x.\n", *data);
6020 				return QLA_FUNCTION_FAILED;
6021 			}
6022 			msleep(100);
6023 			goto retry_rd_reg;
6024 		}
6025 		ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
6026 	}
6027 
6028 	return rval;
6029 }
6030 
6031 int
6032 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
6033 {
6034 	int rval;
6035 	mbx_cmd_t mc;
6036 	mbx_cmd_t *mcp = &mc;
6037 	struct qla_hw_data *ha = vha->hw;
6038 
6039 	if (!IS_QLA83XX(ha))
6040 		return QLA_FUNCTION_FAILED;
6041 
6042 	ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
6043 
6044 	mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
6045 	mcp->out_mb = MBX_0;
6046 	mcp->in_mb = MBX_1|MBX_0;
6047 	mcp->tov = MBX_TOV_SECONDS;
6048 	mcp->flags = 0;
6049 	rval = qla2x00_mailbox_command(vha, mcp);
6050 
6051 	if (rval != QLA_SUCCESS) {
6052 		ql_dbg(ql_dbg_mbx, vha, 0x1144,
6053 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
6054 		    rval, mcp->mb[0], mcp->mb[1]);
6055 		ha->isp_ops->fw_dump(vha, 0);
6056 	} else {
6057 		ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
6058 	}
6059 
6060 	return rval;
6061 }
6062 
6063 int
6064 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
6065 	uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
6066 {
6067 	int rval;
6068 	mbx_cmd_t mc;
6069 	mbx_cmd_t *mcp = &mc;
6070 	uint8_t subcode = (uint8_t)options;
6071 	struct qla_hw_data *ha = vha->hw;
6072 
6073 	if (!IS_QLA8031(ha))
6074 		return QLA_FUNCTION_FAILED;
6075 
6076 	ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
6077 
6078 	mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
6079 	mcp->mb[1] = options;
6080 	mcp->out_mb = MBX_1|MBX_0;
6081 	if (subcode & BIT_2) {
6082 		mcp->mb[2] = LSW(start_addr);
6083 		mcp->mb[3] = MSW(start_addr);
6084 		mcp->mb[4] = LSW(end_addr);
6085 		mcp->mb[5] = MSW(end_addr);
6086 		mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
6087 	}
6088 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
6089 	if (!(subcode & (BIT_2 | BIT_5)))
6090 		mcp->in_mb |= MBX_4|MBX_3;
6091 	mcp->tov = MBX_TOV_SECONDS;
6092 	mcp->flags = 0;
6093 	rval = qla2x00_mailbox_command(vha, mcp);
6094 
6095 	if (rval != QLA_SUCCESS) {
6096 		ql_dbg(ql_dbg_mbx, vha, 0x1147,
6097 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
6098 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
6099 		    mcp->mb[4]);
6100 		ha->isp_ops->fw_dump(vha, 0);
6101 	} else {
6102 		if (subcode & BIT_5)
6103 			*sector_size = mcp->mb[1];
6104 		else if (subcode & (BIT_6 | BIT_7)) {
6105 			ql_dbg(ql_dbg_mbx, vha, 0x1148,
6106 			    "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6107 		} else if (subcode & (BIT_3 | BIT_4)) {
6108 			ql_dbg(ql_dbg_mbx, vha, 0x1149,
6109 			    "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6110 		}
6111 		ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
6112 	}
6113 
6114 	return rval;
6115 }
6116 
6117 int
6118 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6119 	uint32_t size)
6120 {
6121 	int rval;
6122 	mbx_cmd_t mc;
6123 	mbx_cmd_t *mcp = &mc;
6124 
6125 	if (!IS_MCTP_CAPABLE(vha->hw))
6126 		return QLA_FUNCTION_FAILED;
6127 
6128 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6129 	    "Entered %s.\n", __func__);
6130 
6131 	mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6132 	mcp->mb[1] = LSW(addr);
6133 	mcp->mb[2] = MSW(req_dma);
6134 	mcp->mb[3] = LSW(req_dma);
6135 	mcp->mb[4] = MSW(size);
6136 	mcp->mb[5] = LSW(size);
6137 	mcp->mb[6] = MSW(MSD(req_dma));
6138 	mcp->mb[7] = LSW(MSD(req_dma));
6139 	mcp->mb[8] = MSW(addr);
6140 	/* Setting RAM ID to valid */
6141 	mcp->mb[10] |= BIT_7;
6142 	/* For MCTP RAM ID is 0x40 */
6143 	mcp->mb[10] |= 0x40;
6144 
6145 	mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6146 	    MBX_0;
6147 
6148 	mcp->in_mb = MBX_0;
6149 	mcp->tov = MBX_TOV_SECONDS;
6150 	mcp->flags = 0;
6151 	rval = qla2x00_mailbox_command(vha, mcp);
6152 
6153 	if (rval != QLA_SUCCESS) {
6154 		ql_dbg(ql_dbg_mbx, vha, 0x114e,
6155 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6156 	} else {
6157 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6158 		    "Done %s.\n", __func__);
6159 	}
6160 
6161 	return rval;
6162 }
6163 
6164 int
6165 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6166 	void *dd_buf, uint size, uint options)
6167 {
6168 	int rval;
6169 	mbx_cmd_t mc;
6170 	mbx_cmd_t *mcp = &mc;
6171 	dma_addr_t dd_dma;
6172 
6173 	if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
6174 	    !IS_QLA28XX(vha->hw))
6175 		return QLA_FUNCTION_FAILED;
6176 
6177 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6178 	    "Entered %s.\n", __func__);
6179 
6180 	dd_dma = dma_map_single(&vha->hw->pdev->dev,
6181 	    dd_buf, size, DMA_FROM_DEVICE);
6182 	if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6183 		ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6184 		return QLA_MEMORY_ALLOC_FAILED;
6185 	}
6186 
6187 	memset(dd_buf, 0, size);
6188 
6189 	mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6190 	mcp->mb[1] = options;
6191 	mcp->mb[2] = MSW(LSD(dd_dma));
6192 	mcp->mb[3] = LSW(LSD(dd_dma));
6193 	mcp->mb[6] = MSW(MSD(dd_dma));
6194 	mcp->mb[7] = LSW(MSD(dd_dma));
6195 	mcp->mb[8] = size;
6196 	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6197 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6198 	mcp->buf_size = size;
6199 	mcp->flags = MBX_DMA_IN;
6200 	mcp->tov = MBX_TOV_SECONDS * 4;
6201 	rval = qla2x00_mailbox_command(vha, mcp);
6202 
6203 	if (rval != QLA_SUCCESS) {
6204 		ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6205 	} else {
6206 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6207 		    "Done %s.\n", __func__);
6208 	}
6209 
6210 	dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6211 	    size, DMA_FROM_DEVICE);
6212 
6213 	return rval;
6214 }
6215 
6216 static void qla2x00_async_mb_sp_done(void *s, int res)
6217 {
6218 	struct srb *sp = s;
6219 
6220 	sp->u.iocb_cmd.u.mbx.rc = res;
6221 
6222 	complete(&sp->u.iocb_cmd.u.mbx.comp);
6223 	/* don't free sp here. Let the caller do the free */
6224 }
6225 
6226 /*
6227  * This mailbox uses the iocb interface to send MB command.
6228  * This allows non-critial (non chip setup) command to go
6229  * out in parrallel.
6230  */
6231 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6232 {
6233 	int rval = QLA_FUNCTION_FAILED;
6234 	srb_t *sp;
6235 	struct srb_iocb *c;
6236 
6237 	if (!vha->hw->flags.fw_started)
6238 		goto done;
6239 
6240 	sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6241 	if (!sp)
6242 		goto done;
6243 
6244 	sp->type = SRB_MB_IOCB;
6245 	sp->name = mb_to_str(mcp->mb[0]);
6246 
6247 	c = &sp->u.iocb_cmd;
6248 	c->timeout = qla2x00_async_iocb_timeout;
6249 	init_completion(&c->u.mbx.comp);
6250 
6251 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
6252 
6253 	memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6254 
6255 	sp->done = qla2x00_async_mb_sp_done;
6256 
6257 	rval = qla2x00_start_sp(sp);
6258 	if (rval != QLA_SUCCESS) {
6259 		ql_dbg(ql_dbg_mbx, vha, 0x1018,
6260 		    "%s: %s Failed submission. %x.\n",
6261 		    __func__, sp->name, rval);
6262 		goto done_free_sp;
6263 	}
6264 
6265 	ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6266 	    sp->name, sp->handle);
6267 
6268 	wait_for_completion(&c->u.mbx.comp);
6269 	memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6270 
6271 	rval = c->u.mbx.rc;
6272 	switch (rval) {
6273 	case QLA_FUNCTION_TIMEOUT:
6274 		ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6275 		    __func__, sp->name, rval);
6276 		break;
6277 	case  QLA_SUCCESS:
6278 		ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6279 		    __func__, sp->name);
6280 		sp->free(sp);
6281 		break;
6282 	default:
6283 		ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6284 		    __func__, sp->name, rval);
6285 		sp->free(sp);
6286 		break;
6287 	}
6288 
6289 	return rval;
6290 
6291 done_free_sp:
6292 	sp->free(sp);
6293 done:
6294 	return rval;
6295 }
6296 
6297 /*
6298  * qla24xx_gpdb_wait
6299  * NOTE: Do not call this routine from DPC thread
6300  */
6301 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6302 {
6303 	int rval = QLA_FUNCTION_FAILED;
6304 	dma_addr_t pd_dma;
6305 	struct port_database_24xx *pd;
6306 	struct qla_hw_data *ha = vha->hw;
6307 	mbx_cmd_t mc;
6308 
6309 	if (!vha->hw->flags.fw_started)
6310 		goto done;
6311 
6312 	pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6313 	if (pd  == NULL) {
6314 		ql_log(ql_log_warn, vha, 0xd047,
6315 		    "Failed to allocate port database structure.\n");
6316 		goto done_free_sp;
6317 	}
6318 
6319 	memset(&mc, 0, sizeof(mc));
6320 	mc.mb[0] = MBC_GET_PORT_DATABASE;
6321 	mc.mb[1] = cpu_to_le16(fcport->loop_id);
6322 	mc.mb[2] = MSW(pd_dma);
6323 	mc.mb[3] = LSW(pd_dma);
6324 	mc.mb[6] = MSW(MSD(pd_dma));
6325 	mc.mb[7] = LSW(MSD(pd_dma));
6326 	mc.mb[9] = cpu_to_le16(vha->vp_idx);
6327 	mc.mb[10] = cpu_to_le16((uint16_t)opt);
6328 
6329 	rval = qla24xx_send_mb_cmd(vha, &mc);
6330 	if (rval != QLA_SUCCESS) {
6331 		ql_dbg(ql_dbg_mbx, vha, 0x1193,
6332 		    "%s: %8phC fail\n", __func__, fcport->port_name);
6333 		goto done_free_sp;
6334 	}
6335 
6336 	rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6337 
6338 	ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6339 	    __func__, fcport->port_name);
6340 
6341 done_free_sp:
6342 	if (pd)
6343 		dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6344 done:
6345 	return rval;
6346 }
6347 
6348 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6349     struct port_database_24xx *pd)
6350 {
6351 	int rval = QLA_SUCCESS;
6352 	uint64_t zero = 0;
6353 	u8 current_login_state, last_login_state;
6354 
6355 	if (fcport->fc4f_nvme) {
6356 		current_login_state = pd->current_login_state >> 4;
6357 		last_login_state = pd->last_login_state >> 4;
6358 	} else {
6359 		current_login_state = pd->current_login_state & 0xf;
6360 		last_login_state = pd->last_login_state & 0xf;
6361 	}
6362 
6363 	/* Check for logged in state. */
6364 	if (current_login_state != PDS_PRLI_COMPLETE) {
6365 		ql_dbg(ql_dbg_mbx, vha, 0x119a,
6366 		    "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6367 		    current_login_state, last_login_state, fcport->loop_id);
6368 		rval = QLA_FUNCTION_FAILED;
6369 		goto gpd_error_out;
6370 	}
6371 
6372 	if (fcport->loop_id == FC_NO_LOOP_ID ||
6373 	    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6374 	     memcmp(fcport->port_name, pd->port_name, 8))) {
6375 		/* We lost the device mid way. */
6376 		rval = QLA_NOT_LOGGED_IN;
6377 		goto gpd_error_out;
6378 	}
6379 
6380 	/* Names are little-endian. */
6381 	memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6382 	memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6383 
6384 	/* Get port_id of device. */
6385 	fcport->d_id.b.domain = pd->port_id[0];
6386 	fcport->d_id.b.area = pd->port_id[1];
6387 	fcport->d_id.b.al_pa = pd->port_id[2];
6388 	fcport->d_id.b.rsvd_1 = 0;
6389 
6390 	if (fcport->fc4f_nvme) {
6391 		fcport->port_type = 0;
6392 		if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
6393 			fcport->port_type |= FCT_NVME_INITIATOR;
6394 		if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6395 			fcport->port_type |= FCT_NVME_TARGET;
6396 		if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
6397 			fcport->port_type |= FCT_NVME_DISCOVERY;
6398 	} else {
6399 		/* If not target must be initiator or unknown type. */
6400 		if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6401 			fcport->port_type = FCT_INITIATOR;
6402 		else
6403 			fcport->port_type = FCT_TARGET;
6404 	}
6405 	/* Passback COS information. */
6406 	fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6407 		FC_COS_CLASS2 : FC_COS_CLASS3;
6408 
6409 	if (pd->prli_svc_param_word_3[0] & BIT_7) {
6410 		fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6411 		fcport->conf_compl_supported = 1;
6412 	}
6413 
6414 gpd_error_out:
6415 	return rval;
6416 }
6417 
6418 /*
6419  * qla24xx_gidlist__wait
6420  * NOTE: don't call this routine from DPC thread.
6421  */
6422 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6423 	void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6424 {
6425 	int rval = QLA_FUNCTION_FAILED;
6426 	mbx_cmd_t mc;
6427 
6428 	if (!vha->hw->flags.fw_started)
6429 		goto done;
6430 
6431 	memset(&mc, 0, sizeof(mc));
6432 	mc.mb[0] = MBC_GET_ID_LIST;
6433 	mc.mb[2] = MSW(id_list_dma);
6434 	mc.mb[3] = LSW(id_list_dma);
6435 	mc.mb[6] = MSW(MSD(id_list_dma));
6436 	mc.mb[7] = LSW(MSD(id_list_dma));
6437 	mc.mb[8] = 0;
6438 	mc.mb[9] = cpu_to_le16(vha->vp_idx);
6439 
6440 	rval = qla24xx_send_mb_cmd(vha, &mc);
6441 	if (rval != QLA_SUCCESS) {
6442 		ql_dbg(ql_dbg_mbx, vha, 0x119b,
6443 		    "%s:  fail\n", __func__);
6444 	} else {
6445 		*entries = mc.mb[1];
6446 		ql_dbg(ql_dbg_mbx, vha, 0x119c,
6447 		    "%s:  done\n", __func__);
6448 	}
6449 done:
6450 	return rval;
6451 }
6452 
6453 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6454 {
6455 	int rval;
6456 	mbx_cmd_t	mc;
6457 	mbx_cmd_t	*mcp = &mc;
6458 
6459 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6460 	    "Entered %s\n", __func__);
6461 
6462 	memset(mcp->mb, 0 , sizeof(mcp->mb));
6463 	mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6464 	mcp->mb[1] = cpu_to_le16(1);
6465 	mcp->mb[2] = cpu_to_le16(value);
6466 	mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6467 	mcp->in_mb = MBX_2 | MBX_0;
6468 	mcp->tov = MBX_TOV_SECONDS;
6469 	mcp->flags = 0;
6470 
6471 	rval = qla2x00_mailbox_command(vha, mcp);
6472 
6473 	ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6474 	    (rval != QLA_SUCCESS) ? "Failed"  : "Done", rval);
6475 
6476 	return rval;
6477 }
6478 
6479 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6480 {
6481 	int rval;
6482 	mbx_cmd_t	mc;
6483 	mbx_cmd_t	*mcp = &mc;
6484 
6485 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6486 	    "Entered %s\n", __func__);
6487 
6488 	memset(mcp->mb, 0, sizeof(mcp->mb));
6489 	mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6490 	mcp->mb[1] = cpu_to_le16(0);
6491 	mcp->out_mb = MBX_1 | MBX_0;
6492 	mcp->in_mb = MBX_2 | MBX_0;
6493 	mcp->tov = MBX_TOV_SECONDS;
6494 	mcp->flags = 0;
6495 
6496 	rval = qla2x00_mailbox_command(vha, mcp);
6497 	if (rval == QLA_SUCCESS)
6498 		*value = mc.mb[2];
6499 
6500 	ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6501 	    (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6502 
6503 	return rval;
6504 }
6505 
6506 int
6507 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6508 {
6509 	struct qla_hw_data *ha = vha->hw;
6510 	uint16_t iter, addr, offset;
6511 	dma_addr_t phys_addr;
6512 	int rval, c;
6513 	u8 *sfp_data;
6514 
6515 	memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6516 	addr = 0xa0;
6517 	phys_addr = ha->sfp_data_dma;
6518 	sfp_data = ha->sfp_data;
6519 	offset = c = 0;
6520 
6521 	for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6522 		if (iter == 4) {
6523 			/* Skip to next device address. */
6524 			addr = 0xa2;
6525 			offset = 0;
6526 		}
6527 
6528 		rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6529 		    addr, offset, SFP_BLOCK_SIZE, BIT_1);
6530 		if (rval != QLA_SUCCESS) {
6531 			ql_log(ql_log_warn, vha, 0x706d,
6532 			    "Unable to read SFP data (%x/%x/%x).\n", rval,
6533 			    addr, offset);
6534 
6535 			return rval;
6536 		}
6537 
6538 		if (buf && (c < count)) {
6539 			u16 sz;
6540 
6541 			if ((count - c) >= SFP_BLOCK_SIZE)
6542 				sz = SFP_BLOCK_SIZE;
6543 			else
6544 				sz = count - c;
6545 
6546 			memcpy(buf, sfp_data, sz);
6547 			buf += SFP_BLOCK_SIZE;
6548 			c += sz;
6549 		}
6550 		phys_addr += SFP_BLOCK_SIZE;
6551 		sfp_data  += SFP_BLOCK_SIZE;
6552 		offset += SFP_BLOCK_SIZE;
6553 	}
6554 
6555 	return rval;
6556 }
6557 
6558 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6559     uint16_t *out_mb, int out_mb_sz)
6560 {
6561 	int rval = QLA_FUNCTION_FAILED;
6562 	mbx_cmd_t mc;
6563 
6564 	if (!vha->hw->flags.fw_started)
6565 		goto done;
6566 
6567 	memset(&mc, 0, sizeof(mc));
6568 	mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6569 
6570 	rval = qla24xx_send_mb_cmd(vha, &mc);
6571 	if (rval != QLA_SUCCESS) {
6572 		ql_dbg(ql_dbg_mbx, vha, 0xffff,
6573 			"%s:  fail\n", __func__);
6574 	} else {
6575 		if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6576 			memcpy(out_mb, mc.mb, out_mb_sz);
6577 		else
6578 			memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6579 
6580 		ql_dbg(ql_dbg_mbx, vha, 0xffff,
6581 			"%s:  done\n", __func__);
6582 	}
6583 done:
6584 	return rval;
6585 }
6586 
6587 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
6588     uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
6589     uint32_t sfub_len)
6590 {
6591 	int		rval;
6592 	mbx_cmd_t mc;
6593 	mbx_cmd_t *mcp = &mc;
6594 
6595 	mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
6596 	mcp->mb[1] = opts;
6597 	mcp->mb[2] = region;
6598 	mcp->mb[3] = MSW(len);
6599 	mcp->mb[4] = LSW(len);
6600 	mcp->mb[5] = MSW(sfub_dma_addr);
6601 	mcp->mb[6] = LSW(sfub_dma_addr);
6602 	mcp->mb[7] = MSW(MSD(sfub_dma_addr));
6603 	mcp->mb[8] = LSW(MSD(sfub_dma_addr));
6604 	mcp->mb[9] = sfub_len;
6605 	mcp->out_mb =
6606 	    MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6607 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
6608 	mcp->tov = MBX_TOV_SECONDS;
6609 	mcp->flags = 0;
6610 	rval = qla2x00_mailbox_command(vha, mcp);
6611 
6612 	if (rval != QLA_SUCCESS) {
6613 		ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
6614 			__func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
6615 			mcp->mb[2]);
6616 	}
6617 
6618 	return rval;
6619 }
6620 
6621 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6622     uint32_t data)
6623 {
6624 	int rval;
6625 	mbx_cmd_t mc;
6626 	mbx_cmd_t *mcp = &mc;
6627 
6628 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6629 	    "Entered %s.\n", __func__);
6630 
6631 	mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6632 	mcp->mb[1] = LSW(addr);
6633 	mcp->mb[2] = MSW(addr);
6634 	mcp->mb[3] = LSW(data);
6635 	mcp->mb[4] = MSW(data);
6636 	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6637 	mcp->in_mb = MBX_1|MBX_0;
6638 	mcp->tov = MBX_TOV_SECONDS;
6639 	mcp->flags = 0;
6640 	rval = qla2x00_mailbox_command(vha, mcp);
6641 
6642 	if (rval != QLA_SUCCESS) {
6643 		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6644 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6645 	} else {
6646 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6647 		    "Done %s.\n", __func__);
6648 	}
6649 
6650 	return rval;
6651 }
6652 
6653 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6654     uint32_t *data)
6655 {
6656 	int rval;
6657 	mbx_cmd_t mc;
6658 	mbx_cmd_t *mcp = &mc;
6659 
6660 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6661 	    "Entered %s.\n", __func__);
6662 
6663 	mcp->mb[0] = MBC_READ_REMOTE_REG;
6664 	mcp->mb[1] = LSW(addr);
6665 	mcp->mb[2] = MSW(addr);
6666 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
6667 	mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6668 	mcp->tov = MBX_TOV_SECONDS;
6669 	mcp->flags = 0;
6670 	rval = qla2x00_mailbox_command(vha, mcp);
6671 
6672 	*data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
6673 
6674 	if (rval != QLA_SUCCESS) {
6675 		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6676 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6677 	} else {
6678 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6679 		    "Done %s.\n", __func__);
6680 	}
6681 
6682 	return rval;
6683 }
6684