xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_mbx.c (revision da2ef666)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9 
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
12 
13 static struct mb_cmd_name {
14 	uint16_t cmd;
15 	const char *str;
16 } mb_str[] = {
17 	{MBC_GET_PORT_DATABASE,		"GPDB"},
18 	{MBC_GET_ID_LIST,		"GIDList"},
19 	{MBC_GET_LINK_PRIV_STATS,	"Stats"},
20 	{MBC_GET_RESOURCE_COUNTS,	"ResCnt"},
21 };
22 
23 static const char *mb_to_str(uint16_t cmd)
24 {
25 	int i;
26 	struct mb_cmd_name *e;
27 
28 	for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
29 		e = mb_str + i;
30 		if (cmd == e->cmd)
31 			return e->str;
32 	}
33 	return "unknown";
34 }
35 
36 static struct rom_cmd {
37 	uint16_t cmd;
38 } rom_cmds[] = {
39 	{ MBC_LOAD_RAM },
40 	{ MBC_EXECUTE_FIRMWARE },
41 	{ MBC_READ_RAM_WORD },
42 	{ MBC_MAILBOX_REGISTER_TEST },
43 	{ MBC_VERIFY_CHECKSUM },
44 	{ MBC_GET_FIRMWARE_VERSION },
45 	{ MBC_LOAD_RISC_RAM },
46 	{ MBC_DUMP_RISC_RAM },
47 	{ MBC_LOAD_RISC_RAM_EXTENDED },
48 	{ MBC_DUMP_RISC_RAM_EXTENDED },
49 	{ MBC_WRITE_RAM_WORD_EXTENDED },
50 	{ MBC_READ_RAM_EXTENDED },
51 	{ MBC_GET_RESOURCE_COUNTS },
52 	{ MBC_SET_FIRMWARE_OPTION },
53 	{ MBC_MID_INITIALIZE_FIRMWARE },
54 	{ MBC_GET_FIRMWARE_STATE },
55 	{ MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
56 	{ MBC_GET_RETRY_COUNT },
57 	{ MBC_TRACE_CONTROL },
58 	{ MBC_INITIALIZE_MULTIQ },
59 	{ MBC_IOCB_COMMAND_A64 },
60 	{ MBC_GET_ADAPTER_LOOP_ID },
61 	{ MBC_READ_SFP },
62 	{ MBC_GET_RNID_PARAMS },
63 };
64 
65 static int is_rom_cmd(uint16_t cmd)
66 {
67 	int i;
68 	struct  rom_cmd *wc;
69 
70 	for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
71 		wc = rom_cmds + i;
72 		if (wc->cmd == cmd)
73 			return 1;
74 	}
75 
76 	return 0;
77 }
78 
79 /*
80  * qla2x00_mailbox_command
81  *	Issue mailbox command and waits for completion.
82  *
83  * Input:
84  *	ha = adapter block pointer.
85  *	mcp = driver internal mbx struct pointer.
86  *
87  * Output:
88  *	mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
89  *
90  * Returns:
91  *	0 : QLA_SUCCESS = cmd performed success
92  *	1 : QLA_FUNCTION_FAILED   (error encountered)
93  *	6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
94  *
95  * Context:
96  *	Kernel context.
97  */
98 static int
99 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
100 {
101 	int		rval, i;
102 	unsigned long    flags = 0;
103 	device_reg_t *reg;
104 	uint8_t		abort_active;
105 	uint8_t		io_lock_on;
106 	uint16_t	command = 0;
107 	uint16_t	*iptr;
108 	uint16_t __iomem *optr;
109 	uint32_t	cnt;
110 	uint32_t	mboxes;
111 	unsigned long	wait_time;
112 	struct qla_hw_data *ha = vha->hw;
113 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
114 	u32 chip_reset;
115 
116 
117 	ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
118 
119 	if (ha->pdev->error_state > pci_channel_io_frozen) {
120 		ql_log(ql_log_warn, vha, 0x1001,
121 		    "error_state is greater than pci_channel_io_frozen, "
122 		    "exiting.\n");
123 		return QLA_FUNCTION_TIMEOUT;
124 	}
125 
126 	if (vha->device_flags & DFLG_DEV_FAILED) {
127 		ql_log(ql_log_warn, vha, 0x1002,
128 		    "Device in failed state, exiting.\n");
129 		return QLA_FUNCTION_TIMEOUT;
130 	}
131 
132 	/* if PCI error, then avoid mbx processing.*/
133 	if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
134 	    test_bit(UNLOADING, &base_vha->dpc_flags)) {
135 		ql_log(ql_log_warn, vha, 0xd04e,
136 		    "PCI error, exiting.\n");
137 		return QLA_FUNCTION_TIMEOUT;
138 	}
139 
140 	reg = ha->iobase;
141 	io_lock_on = base_vha->flags.init_done;
142 
143 	rval = QLA_SUCCESS;
144 	abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
145 	chip_reset = ha->chip_reset;
146 
147 	if (ha->flags.pci_channel_io_perm_failure) {
148 		ql_log(ql_log_warn, vha, 0x1003,
149 		    "Perm failure on EEH timeout MBX, exiting.\n");
150 		return QLA_FUNCTION_TIMEOUT;
151 	}
152 
153 	if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
154 		/* Setting Link-Down error */
155 		mcp->mb[0] = MBS_LINK_DOWN_ERROR;
156 		ql_log(ql_log_warn, vha, 0x1004,
157 		    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
158 		return QLA_FUNCTION_TIMEOUT;
159 	}
160 
161 	/* check if ISP abort is active and return cmd with timeout */
162 	if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
163 	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
164 	    test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
165 	    !is_rom_cmd(mcp->mb[0])) {
166 		ql_log(ql_log_info, vha, 0x1005,
167 		    "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
168 		    mcp->mb[0]);
169 		return QLA_FUNCTION_TIMEOUT;
170 	}
171 
172 	atomic_inc(&ha->num_pend_mbx_stage1);
173 	/*
174 	 * Wait for active mailbox commands to finish by waiting at most tov
175 	 * seconds. This is to serialize actual issuing of mailbox cmds during
176 	 * non ISP abort time.
177 	 */
178 	if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
179 		/* Timeout occurred. Return error. */
180 		ql_log(ql_log_warn, vha, 0xd035,
181 		    "Cmd access timeout, cmd=0x%x, Exiting.\n",
182 		    mcp->mb[0]);
183 		atomic_dec(&ha->num_pend_mbx_stage1);
184 		return QLA_FUNCTION_TIMEOUT;
185 	}
186 	atomic_dec(&ha->num_pend_mbx_stage1);
187 	if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
188 		rval = QLA_ABORTED;
189 		goto premature_exit;
190 	}
191 
192 	ha->flags.mbox_busy = 1;
193 	/* Save mailbox command for debug */
194 	ha->mcp = mcp;
195 
196 	ql_dbg(ql_dbg_mbx, vha, 0x1006,
197 	    "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
198 
199 	spin_lock_irqsave(&ha->hardware_lock, flags);
200 
201 	if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
202 		rval = QLA_ABORTED;
203 		ha->flags.mbox_busy = 0;
204 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
205 		goto premature_exit;
206 	}
207 
208 	/* Load mailbox registers. */
209 	if (IS_P3P_TYPE(ha))
210 		optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
211 	else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
212 		optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
213 	else
214 		optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
215 
216 	iptr = mcp->mb;
217 	command = mcp->mb[0];
218 	mboxes = mcp->out_mb;
219 
220 	ql_dbg(ql_dbg_mbx, vha, 0x1111,
221 	    "Mailbox registers (OUT):\n");
222 	for (cnt = 0; cnt < ha->mbx_count; cnt++) {
223 		if (IS_QLA2200(ha) && cnt == 8)
224 			optr =
225 			    (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
226 		if (mboxes & BIT_0) {
227 			ql_dbg(ql_dbg_mbx, vha, 0x1112,
228 			    "mbox[%d]<-0x%04x\n", cnt, *iptr);
229 			WRT_REG_WORD(optr, *iptr);
230 		}
231 
232 		mboxes >>= 1;
233 		optr++;
234 		iptr++;
235 	}
236 
237 	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
238 	    "I/O Address = %p.\n", optr);
239 
240 	/* Issue set host interrupt command to send cmd out. */
241 	ha->flags.mbox_int = 0;
242 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
243 
244 	/* Unlock mbx registers and wait for interrupt */
245 	ql_dbg(ql_dbg_mbx, vha, 0x100f,
246 	    "Going to unlock irq & waiting for interrupts. "
247 	    "jiffies=%lx.\n", jiffies);
248 
249 	/* Wait for mbx cmd completion until timeout */
250 	atomic_inc(&ha->num_pend_mbx_stage2);
251 	if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
252 		set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
253 
254 		if (IS_P3P_TYPE(ha)) {
255 			if (RD_REG_DWORD(&reg->isp82.hint) &
256 				HINT_MBX_INT_PENDING) {
257 				spin_unlock_irqrestore(&ha->hardware_lock,
258 					flags);
259 				ha->flags.mbox_busy = 0;
260 				atomic_dec(&ha->num_pend_mbx_stage2);
261 				ql_dbg(ql_dbg_mbx, vha, 0x1010,
262 				    "Pending mailbox timeout, exiting.\n");
263 				rval = QLA_FUNCTION_TIMEOUT;
264 				goto premature_exit;
265 			}
266 			WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
267 		} else if (IS_FWI2_CAPABLE(ha))
268 			WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
269 		else
270 			WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
271 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
272 
273 		wait_time = jiffies;
274 		atomic_inc(&ha->num_pend_mbx_stage3);
275 		if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
276 		    mcp->tov * HZ)) {
277 			ql_dbg(ql_dbg_mbx, vha, 0x117a,
278 			    "cmd=%x Timeout.\n", command);
279 			spin_lock_irqsave(&ha->hardware_lock, flags);
280 			clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
281 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
282 
283 		} else if (ha->flags.purge_mbox ||
284 		    chip_reset != ha->chip_reset) {
285 			ha->flags.mbox_busy = 0;
286 			atomic_dec(&ha->num_pend_mbx_stage2);
287 			atomic_dec(&ha->num_pend_mbx_stage3);
288 			rval = QLA_ABORTED;
289 			goto premature_exit;
290 		}
291 		atomic_dec(&ha->num_pend_mbx_stage3);
292 
293 		if (time_after(jiffies, wait_time + 5 * HZ))
294 			ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
295 			    command, jiffies_to_msecs(jiffies - wait_time));
296 	} else {
297 		ql_dbg(ql_dbg_mbx, vha, 0x1011,
298 		    "Cmd=%x Polling Mode.\n", command);
299 
300 		if (IS_P3P_TYPE(ha)) {
301 			if (RD_REG_DWORD(&reg->isp82.hint) &
302 				HINT_MBX_INT_PENDING) {
303 				spin_unlock_irqrestore(&ha->hardware_lock,
304 					flags);
305 				ha->flags.mbox_busy = 0;
306 				atomic_dec(&ha->num_pend_mbx_stage2);
307 				ql_dbg(ql_dbg_mbx, vha, 0x1012,
308 				    "Pending mailbox timeout, exiting.\n");
309 				rval = QLA_FUNCTION_TIMEOUT;
310 				goto premature_exit;
311 			}
312 			WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
313 		} else if (IS_FWI2_CAPABLE(ha))
314 			WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
315 		else
316 			WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
317 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
318 
319 		wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
320 		while (!ha->flags.mbox_int) {
321 			if (ha->flags.purge_mbox ||
322 			    chip_reset != ha->chip_reset) {
323 				ha->flags.mbox_busy = 0;
324 				atomic_dec(&ha->num_pend_mbx_stage2);
325 				rval = QLA_ABORTED;
326 				goto premature_exit;
327 			}
328 
329 			if (time_after(jiffies, wait_time))
330 				break;
331 
332 			/*
333 			 * Check if it's UNLOADING, cause we cannot poll in
334 			 * this case, or else a NULL pointer dereference
335 			 * is triggered.
336 			 */
337 			if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
338 				return QLA_FUNCTION_TIMEOUT;
339 
340 			/* Check for pending interrupts. */
341 			qla2x00_poll(ha->rsp_q_map[0]);
342 
343 			if (!ha->flags.mbox_int &&
344 			    !(IS_QLA2200(ha) &&
345 			    command == MBC_LOAD_RISC_RAM_EXTENDED))
346 				msleep(10);
347 		} /* while */
348 		ql_dbg(ql_dbg_mbx, vha, 0x1013,
349 		    "Waited %d sec.\n",
350 		    (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
351 	}
352 	atomic_dec(&ha->num_pend_mbx_stage2);
353 
354 	/* Check whether we timed out */
355 	if (ha->flags.mbox_int) {
356 		uint16_t *iptr2;
357 
358 		ql_dbg(ql_dbg_mbx, vha, 0x1014,
359 		    "Cmd=%x completed.\n", command);
360 
361 		/* Got interrupt. Clear the flag. */
362 		ha->flags.mbox_int = 0;
363 		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
364 
365 		if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
366 			ha->flags.mbox_busy = 0;
367 			/* Setting Link-Down error */
368 			mcp->mb[0] = MBS_LINK_DOWN_ERROR;
369 			ha->mcp = NULL;
370 			rval = QLA_FUNCTION_FAILED;
371 			ql_log(ql_log_warn, vha, 0xd048,
372 			    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
373 			goto premature_exit;
374 		}
375 
376 		if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
377 			rval = QLA_FUNCTION_FAILED;
378 
379 		/* Load return mailbox registers. */
380 		iptr2 = mcp->mb;
381 		iptr = (uint16_t *)&ha->mailbox_out[0];
382 		mboxes = mcp->in_mb;
383 
384 		ql_dbg(ql_dbg_mbx, vha, 0x1113,
385 		    "Mailbox registers (IN):\n");
386 		for (cnt = 0; cnt < ha->mbx_count; cnt++) {
387 			if (mboxes & BIT_0) {
388 				*iptr2 = *iptr;
389 				ql_dbg(ql_dbg_mbx, vha, 0x1114,
390 				    "mbox[%d]->0x%04x\n", cnt, *iptr2);
391 			}
392 
393 			mboxes >>= 1;
394 			iptr2++;
395 			iptr++;
396 		}
397 	} else {
398 
399 		uint16_t mb[8];
400 		uint32_t ictrl, host_status, hccr;
401 		uint16_t        w;
402 
403 		if (IS_FWI2_CAPABLE(ha)) {
404 			mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
405 			mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
406 			mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
407 			mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
408 			mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
409 			ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
410 			host_status = RD_REG_DWORD(&reg->isp24.host_status);
411 			hccr = RD_REG_DWORD(&reg->isp24.hccr);
412 
413 			ql_log(ql_log_warn, vha, 0xd04c,
414 			    "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
415 			    "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
416 			    command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
417 			    mb[7], host_status, hccr);
418 
419 		} else {
420 			mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
421 			ictrl = RD_REG_WORD(&reg->isp.ictrl);
422 			ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
423 			    "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
424 			    "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
425 		}
426 		ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
427 
428 		/* Capture FW dump only, if PCI device active */
429 		if (!pci_channel_offline(vha->hw->pdev)) {
430 			pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
431 			if (w == 0xffff || ictrl == 0xffffffff ||
432 			    (chip_reset != ha->chip_reset)) {
433 				/* This is special case if there is unload
434 				 * of driver happening and if PCI device go
435 				 * into bad state due to PCI error condition
436 				 * then only PCI ERR flag would be set.
437 				 * we will do premature exit for above case.
438 				 */
439 				ha->flags.mbox_busy = 0;
440 				rval = QLA_FUNCTION_TIMEOUT;
441 				goto premature_exit;
442 			}
443 
444 			/* Attempt to capture firmware dump for further
445 			 * anallysis of the current formware state. we do not
446 			 * need to do this if we are intentionally generating
447 			 * a dump
448 			 */
449 			if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
450 				ha->isp_ops->fw_dump(vha, 0);
451 			rval = QLA_FUNCTION_TIMEOUT;
452 		 }
453 	}
454 
455 	ha->flags.mbox_busy = 0;
456 
457 	/* Clean up */
458 	ha->mcp = NULL;
459 
460 	if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
461 		ql_dbg(ql_dbg_mbx, vha, 0x101a,
462 		    "Checking for additional resp interrupt.\n");
463 
464 		/* polling mode for non isp_abort commands. */
465 		qla2x00_poll(ha->rsp_q_map[0]);
466 	}
467 
468 	if (rval == QLA_FUNCTION_TIMEOUT &&
469 	    mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
470 		if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
471 		    ha->flags.eeh_busy) {
472 			/* not in dpc. schedule it for dpc to take over. */
473 			ql_dbg(ql_dbg_mbx, vha, 0x101b,
474 			    "Timeout, schedule isp_abort_needed.\n");
475 
476 			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
477 			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
478 			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
479 				if (IS_QLA82XX(ha)) {
480 					ql_dbg(ql_dbg_mbx, vha, 0x112a,
481 					    "disabling pause transmit on port "
482 					    "0 & 1.\n");
483 					qla82xx_wr_32(ha,
484 					    QLA82XX_CRB_NIU + 0x98,
485 					    CRB_NIU_XG_PAUSE_CTL_P0|
486 					    CRB_NIU_XG_PAUSE_CTL_P1);
487 				}
488 				ql_log(ql_log_info, base_vha, 0x101c,
489 				    "Mailbox cmd timeout occurred, cmd=0x%x, "
490 				    "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
491 				    "abort.\n", command, mcp->mb[0],
492 				    ha->flags.eeh_busy);
493 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
494 				qla2xxx_wake_dpc(vha);
495 			}
496 		} else if (!abort_active) {
497 			/* call abort directly since we are in the DPC thread */
498 			ql_dbg(ql_dbg_mbx, vha, 0x101d,
499 			    "Timeout, calling abort_isp.\n");
500 
501 			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
502 			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
503 			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
504 				if (IS_QLA82XX(ha)) {
505 					ql_dbg(ql_dbg_mbx, vha, 0x112b,
506 					    "disabling pause transmit on port "
507 					    "0 & 1.\n");
508 					qla82xx_wr_32(ha,
509 					    QLA82XX_CRB_NIU + 0x98,
510 					    CRB_NIU_XG_PAUSE_CTL_P0|
511 					    CRB_NIU_XG_PAUSE_CTL_P1);
512 				}
513 				ql_log(ql_log_info, base_vha, 0x101e,
514 				    "Mailbox cmd timeout occurred, cmd=0x%x, "
515 				    "mb[0]=0x%x. Scheduling ISP abort ",
516 				    command, mcp->mb[0]);
517 				set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
518 				clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
519 				/* Allow next mbx cmd to come in. */
520 				complete(&ha->mbx_cmd_comp);
521 				if (ha->isp_ops->abort_isp(vha)) {
522 					/* Failed. retry later. */
523 					set_bit(ISP_ABORT_NEEDED,
524 					    &vha->dpc_flags);
525 				}
526 				clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
527 				ql_dbg(ql_dbg_mbx, vha, 0x101f,
528 				    "Finished abort_isp.\n");
529 				goto mbx_done;
530 			}
531 		}
532 	}
533 
534 premature_exit:
535 	/* Allow next mbx cmd to come in. */
536 	complete(&ha->mbx_cmd_comp);
537 
538 mbx_done:
539 	if (rval == QLA_ABORTED) {
540 		ql_log(ql_log_info, vha, 0xd035,
541 		    "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
542 		    mcp->mb[0]);
543 	} else if (rval) {
544 		if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
545 			pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
546 			    dev_name(&ha->pdev->dev), 0x1020+0x800,
547 			    vha->host_no);
548 			mboxes = mcp->in_mb;
549 			cnt = 4;
550 			for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
551 				if (mboxes & BIT_0) {
552 					printk(" mb[%u]=%x", i, mcp->mb[i]);
553 					cnt--;
554 				}
555 			pr_warn(" cmd=%x ****\n", command);
556 		}
557 		if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
558 			ql_dbg(ql_dbg_mbx, vha, 0x1198,
559 			    "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
560 			    RD_REG_DWORD(&reg->isp24.host_status),
561 			    RD_REG_DWORD(&reg->isp24.ictrl),
562 			    RD_REG_DWORD(&reg->isp24.istatus));
563 		} else {
564 			ql_dbg(ql_dbg_mbx, vha, 0x1206,
565 			    "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
566 			    RD_REG_WORD(&reg->isp.ctrl_status),
567 			    RD_REG_WORD(&reg->isp.ictrl),
568 			    RD_REG_WORD(&reg->isp.istatus));
569 		}
570 	} else {
571 		ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
572 	}
573 
574 	return rval;
575 }
576 
577 int
578 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
579     uint32_t risc_code_size)
580 {
581 	int rval;
582 	struct qla_hw_data *ha = vha->hw;
583 	mbx_cmd_t mc;
584 	mbx_cmd_t *mcp = &mc;
585 
586 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
587 	    "Entered %s.\n", __func__);
588 
589 	if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
590 		mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
591 		mcp->mb[8] = MSW(risc_addr);
592 		mcp->out_mb = MBX_8|MBX_0;
593 	} else {
594 		mcp->mb[0] = MBC_LOAD_RISC_RAM;
595 		mcp->out_mb = MBX_0;
596 	}
597 	mcp->mb[1] = LSW(risc_addr);
598 	mcp->mb[2] = MSW(req_dma);
599 	mcp->mb[3] = LSW(req_dma);
600 	mcp->mb[6] = MSW(MSD(req_dma));
601 	mcp->mb[7] = LSW(MSD(req_dma));
602 	mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
603 	if (IS_FWI2_CAPABLE(ha)) {
604 		mcp->mb[4] = MSW(risc_code_size);
605 		mcp->mb[5] = LSW(risc_code_size);
606 		mcp->out_mb |= MBX_5|MBX_4;
607 	} else {
608 		mcp->mb[4] = LSW(risc_code_size);
609 		mcp->out_mb |= MBX_4;
610 	}
611 
612 	mcp->in_mb = MBX_0;
613 	mcp->tov = MBX_TOV_SECONDS;
614 	mcp->flags = 0;
615 	rval = qla2x00_mailbox_command(vha, mcp);
616 
617 	if (rval != QLA_SUCCESS) {
618 		ql_dbg(ql_dbg_mbx, vha, 0x1023,
619 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
620 	} else {
621 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
622 		    "Done %s.\n", __func__);
623 	}
624 
625 	return rval;
626 }
627 
628 #define	EXTENDED_BB_CREDITS	BIT_0
629 #define	NVME_ENABLE_FLAG	BIT_3
630 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
631 {
632 	uint16_t mb4 = BIT_0;
633 
634 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
635 		mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
636 
637 	return mb4;
638 }
639 
640 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
641 {
642 	uint16_t mb4 = BIT_0;
643 
644 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
645 		struct nvram_81xx *nv = ha->nvram;
646 
647 		mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
648 	}
649 
650 	return mb4;
651 }
652 
653 /*
654  * qla2x00_execute_fw
655  *     Start adapter firmware.
656  *
657  * Input:
658  *     ha = adapter block pointer.
659  *     TARGET_QUEUE_LOCK must be released.
660  *     ADAPTER_STATE_LOCK must be released.
661  *
662  * Returns:
663  *     qla2x00 local function return status code.
664  *
665  * Context:
666  *     Kernel context.
667  */
668 int
669 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
670 {
671 	int rval;
672 	struct qla_hw_data *ha = vha->hw;
673 	mbx_cmd_t mc;
674 	mbx_cmd_t *mcp = &mc;
675 
676 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
677 	    "Entered %s.\n", __func__);
678 
679 	mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
680 	mcp->out_mb = MBX_0;
681 	mcp->in_mb = MBX_0;
682 	if (IS_FWI2_CAPABLE(ha)) {
683 		mcp->mb[1] = MSW(risc_addr);
684 		mcp->mb[2] = LSW(risc_addr);
685 		mcp->mb[3] = 0;
686 		mcp->mb[4] = 0;
687 		ha->flags.using_lr_setting = 0;
688 		if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
689 		    IS_QLA27XX(ha)) {
690 			if (ql2xautodetectsfp) {
691 				if (ha->flags.detected_lr_sfp) {
692 					mcp->mb[4] |=
693 					    qla25xx_set_sfp_lr_dist(ha);
694 					ha->flags.using_lr_setting = 1;
695 				}
696 			} else {
697 				struct nvram_81xx *nv = ha->nvram;
698 				/* set LR distance if specified in nvram */
699 				if (nv->enhanced_features &
700 				    NEF_LR_DIST_ENABLE) {
701 					mcp->mb[4] |=
702 					    qla25xx_set_nvr_lr_dist(ha);
703 					ha->flags.using_lr_setting = 1;
704 				}
705 			}
706 		}
707 
708 		if (ql2xnvmeenable && IS_QLA27XX(ha))
709 			mcp->mb[4] |= NVME_ENABLE_FLAG;
710 
711 		if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
712 			struct nvram_81xx *nv = ha->nvram;
713 			/* set minimum speed if specified in nvram */
714 			if (nv->min_link_speed >= 2 &&
715 			    nv->min_link_speed <= 5) {
716 				mcp->mb[4] |= BIT_4;
717 				mcp->mb[11] = nv->min_link_speed;
718 				mcp->out_mb |= MBX_11;
719 				mcp->in_mb |= BIT_5;
720 				vha->min_link_speed_feat = nv->min_link_speed;
721 			}
722 		}
723 
724 		if (ha->flags.exlogins_enabled)
725 			mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
726 
727 		if (ha->flags.exchoffld_enabled)
728 			mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
729 
730 		mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
731 		mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
732 	} else {
733 		mcp->mb[1] = LSW(risc_addr);
734 		mcp->out_mb |= MBX_1;
735 		if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
736 			mcp->mb[2] = 0;
737 			mcp->out_mb |= MBX_2;
738 		}
739 	}
740 
741 	mcp->tov = MBX_TOV_SECONDS;
742 	mcp->flags = 0;
743 	rval = qla2x00_mailbox_command(vha, mcp);
744 
745 	if (rval != QLA_SUCCESS) {
746 		ql_dbg(ql_dbg_mbx, vha, 0x1026,
747 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
748 	} else {
749 		if (IS_FWI2_CAPABLE(ha)) {
750 			ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
751 			ql_dbg(ql_dbg_mbx, vha, 0x119a,
752 			    "fw_ability_mask=%x.\n", ha->fw_ability_mask);
753 			ql_dbg(ql_dbg_mbx, vha, 0x1027,
754 			    "exchanges=%x.\n", mcp->mb[1]);
755 			if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
756 				ha->max_speed_sup = mcp->mb[2] & BIT_0;
757 				ql_dbg(ql_dbg_mbx, vha, 0x119b,
758 				    "Maximum speed supported=%s.\n",
759 				    ha->max_speed_sup ? "32Gps" : "16Gps");
760 				if (vha->min_link_speed_feat) {
761 					ha->min_link_speed = mcp->mb[5];
762 					ql_dbg(ql_dbg_mbx, vha, 0x119c,
763 					    "Minimum speed set=%s.\n",
764 					    mcp->mb[5] == 5 ? "32Gps" :
765 					    mcp->mb[5] == 4 ? "16Gps" :
766 					    mcp->mb[5] == 3 ? "8Gps" :
767 					    mcp->mb[5] == 2 ? "4Gps" :
768 						"unknown");
769 				}
770 			}
771 		}
772 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
773 		    "Done.\n");
774 	}
775 
776 	return rval;
777 }
778 
779 /*
780  * qla_get_exlogin_status
781  *	Get extended login status
782  *	uses the memory offload control/status Mailbox
783  *
784  * Input:
785  *	ha:		adapter state pointer.
786  *	fwopt:		firmware options
787  *
788  * Returns:
789  *	qla2x00 local function status
790  *
791  * Context:
792  *	Kernel context.
793  */
794 #define	FETCH_XLOGINS_STAT	0x8
795 int
796 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
797 	uint16_t *ex_logins_cnt)
798 {
799 	int rval;
800 	mbx_cmd_t	mc;
801 	mbx_cmd_t	*mcp = &mc;
802 
803 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
804 	    "Entered %s\n", __func__);
805 
806 	memset(mcp->mb, 0 , sizeof(mcp->mb));
807 	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
808 	mcp->mb[1] = FETCH_XLOGINS_STAT;
809 	mcp->out_mb = MBX_1|MBX_0;
810 	mcp->in_mb = MBX_10|MBX_4|MBX_0;
811 	mcp->tov = MBX_TOV_SECONDS;
812 	mcp->flags = 0;
813 
814 	rval = qla2x00_mailbox_command(vha, mcp);
815 	if (rval != QLA_SUCCESS) {
816 		ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
817 	} else {
818 		*buf_sz = mcp->mb[4];
819 		*ex_logins_cnt = mcp->mb[10];
820 
821 		ql_log(ql_log_info, vha, 0x1190,
822 		    "buffer size 0x%x, exchange login count=%d\n",
823 		    mcp->mb[4], mcp->mb[10]);
824 
825 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
826 		    "Done %s.\n", __func__);
827 	}
828 
829 	return rval;
830 }
831 
832 /*
833  * qla_set_exlogin_mem_cfg
834  *	set extended login memory configuration
835  *	Mbx needs to be issues before init_cb is set
836  *
837  * Input:
838  *	ha:		adapter state pointer.
839  *	buffer:		buffer pointer
840  *	phys_addr:	physical address of buffer
841  *	size:		size of buffer
842  *	TARGET_QUEUE_LOCK must be released
843  *	ADAPTER_STATE_LOCK must be release
844  *
845  * Returns:
846  *	qla2x00 local funxtion status code.
847  *
848  * Context:
849  *	Kernel context.
850  */
851 #define CONFIG_XLOGINS_MEM	0x3
852 int
853 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
854 {
855 	int		rval;
856 	mbx_cmd_t	mc;
857 	mbx_cmd_t	*mcp = &mc;
858 	struct qla_hw_data *ha = vha->hw;
859 
860 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
861 	    "Entered %s.\n", __func__);
862 
863 	memset(mcp->mb, 0 , sizeof(mcp->mb));
864 	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
865 	mcp->mb[1] = CONFIG_XLOGINS_MEM;
866 	mcp->mb[2] = MSW(phys_addr);
867 	mcp->mb[3] = LSW(phys_addr);
868 	mcp->mb[6] = MSW(MSD(phys_addr));
869 	mcp->mb[7] = LSW(MSD(phys_addr));
870 	mcp->mb[8] = MSW(ha->exlogin_size);
871 	mcp->mb[9] = LSW(ha->exlogin_size);
872 	mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
873 	mcp->in_mb = MBX_11|MBX_0;
874 	mcp->tov = MBX_TOV_SECONDS;
875 	mcp->flags = 0;
876 	rval = qla2x00_mailbox_command(vha, mcp);
877 	if (rval != QLA_SUCCESS) {
878 		/*EMPTY*/
879 		ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
880 	} else {
881 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
882 		    "Done %s.\n", __func__);
883 	}
884 
885 	return rval;
886 }
887 
888 /*
889  * qla_get_exchoffld_status
890  *	Get exchange offload status
891  *	uses the memory offload control/status Mailbox
892  *
893  * Input:
894  *	ha:		adapter state pointer.
895  *	fwopt:		firmware options
896  *
897  * Returns:
898  *	qla2x00 local function status
899  *
900  * Context:
901  *	Kernel context.
902  */
903 #define	FETCH_XCHOFFLD_STAT	0x2
904 int
905 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
906 	uint16_t *ex_logins_cnt)
907 {
908 	int rval;
909 	mbx_cmd_t	mc;
910 	mbx_cmd_t	*mcp = &mc;
911 
912 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
913 	    "Entered %s\n", __func__);
914 
915 	memset(mcp->mb, 0 , sizeof(mcp->mb));
916 	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
917 	mcp->mb[1] = FETCH_XCHOFFLD_STAT;
918 	mcp->out_mb = MBX_1|MBX_0;
919 	mcp->in_mb = MBX_10|MBX_4|MBX_0;
920 	mcp->tov = MBX_TOV_SECONDS;
921 	mcp->flags = 0;
922 
923 	rval = qla2x00_mailbox_command(vha, mcp);
924 	if (rval != QLA_SUCCESS) {
925 		ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
926 	} else {
927 		*buf_sz = mcp->mb[4];
928 		*ex_logins_cnt = mcp->mb[10];
929 
930 		ql_log(ql_log_info, vha, 0x118e,
931 		    "buffer size 0x%x, exchange offload count=%d\n",
932 		    mcp->mb[4], mcp->mb[10]);
933 
934 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
935 		    "Done %s.\n", __func__);
936 	}
937 
938 	return rval;
939 }
940 
941 /*
942  * qla_set_exchoffld_mem_cfg
943  *	Set exchange offload memory configuration
944  *	Mbx needs to be issues before init_cb is set
945  *
946  * Input:
947  *	ha:		adapter state pointer.
948  *	buffer:		buffer pointer
949  *	phys_addr:	physical address of buffer
950  *	size:		size of buffer
951  *	TARGET_QUEUE_LOCK must be released
952  *	ADAPTER_STATE_LOCK must be release
953  *
954  * Returns:
955  *	qla2x00 local funxtion status code.
956  *
957  * Context:
958  *	Kernel context.
959  */
960 #define CONFIG_XCHOFFLD_MEM	0x3
961 int
962 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
963 {
964 	int		rval;
965 	mbx_cmd_t	mc;
966 	mbx_cmd_t	*mcp = &mc;
967 	struct qla_hw_data *ha = vha->hw;
968 
969 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
970 	    "Entered %s.\n", __func__);
971 
972 	memset(mcp->mb, 0 , sizeof(mcp->mb));
973 	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
974 	mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
975 	mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
976 	mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
977 	mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
978 	mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
979 	mcp->mb[8] = MSW(ha->exchoffld_size);
980 	mcp->mb[9] = LSW(ha->exchoffld_size);
981 	mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
982 	mcp->in_mb = MBX_11|MBX_0;
983 	mcp->tov = MBX_TOV_SECONDS;
984 	mcp->flags = 0;
985 	rval = qla2x00_mailbox_command(vha, mcp);
986 	if (rval != QLA_SUCCESS) {
987 		/*EMPTY*/
988 		ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
989 	} else {
990 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
991 		    "Done %s.\n", __func__);
992 	}
993 
994 	return rval;
995 }
996 
997 /*
998  * qla2x00_get_fw_version
999  *	Get firmware version.
1000  *
1001  * Input:
1002  *	ha:		adapter state pointer.
1003  *	major:		pointer for major number.
1004  *	minor:		pointer for minor number.
1005  *	subminor:	pointer for subminor number.
1006  *
1007  * Returns:
1008  *	qla2x00 local function return status code.
1009  *
1010  * Context:
1011  *	Kernel context.
1012  */
1013 int
1014 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1015 {
1016 	int		rval;
1017 	mbx_cmd_t	mc;
1018 	mbx_cmd_t	*mcp = &mc;
1019 	struct qla_hw_data *ha = vha->hw;
1020 
1021 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1022 	    "Entered %s.\n", __func__);
1023 
1024 	mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1025 	mcp->out_mb = MBX_0;
1026 	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1027 	if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1028 		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1029 	if (IS_FWI2_CAPABLE(ha))
1030 		mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1031 	if (IS_QLA27XX(ha))
1032 		mcp->in_mb |=
1033 		    MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1034 		    MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
1035 
1036 	mcp->flags = 0;
1037 	mcp->tov = MBX_TOV_SECONDS;
1038 	rval = qla2x00_mailbox_command(vha, mcp);
1039 	if (rval != QLA_SUCCESS)
1040 		goto failed;
1041 
1042 	/* Return mailbox data. */
1043 	ha->fw_major_version = mcp->mb[1];
1044 	ha->fw_minor_version = mcp->mb[2];
1045 	ha->fw_subminor_version = mcp->mb[3];
1046 	ha->fw_attributes = mcp->mb[6];
1047 	if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1048 		ha->fw_memory_size = 0x1FFFF;		/* Defaults to 128KB. */
1049 	else
1050 		ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1051 
1052 	if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1053 		ha->mpi_version[0] = mcp->mb[10] & 0xff;
1054 		ha->mpi_version[1] = mcp->mb[11] >> 8;
1055 		ha->mpi_version[2] = mcp->mb[11] & 0xff;
1056 		ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1057 		ha->phy_version[0] = mcp->mb[8] & 0xff;
1058 		ha->phy_version[1] = mcp->mb[9] >> 8;
1059 		ha->phy_version[2] = mcp->mb[9] & 0xff;
1060 	}
1061 
1062 	if (IS_FWI2_CAPABLE(ha)) {
1063 		ha->fw_attributes_h = mcp->mb[15];
1064 		ha->fw_attributes_ext[0] = mcp->mb[16];
1065 		ha->fw_attributes_ext[1] = mcp->mb[17];
1066 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1067 		    "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1068 		    __func__, mcp->mb[15], mcp->mb[6]);
1069 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1070 		    "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1071 		    __func__, mcp->mb[17], mcp->mb[16]);
1072 
1073 		if (ha->fw_attributes_h & 0x4)
1074 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1075 			    "%s: Firmware supports Extended Login 0x%x\n",
1076 			    __func__, ha->fw_attributes_h);
1077 
1078 		if (ha->fw_attributes_h & 0x8)
1079 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1080 			    "%s: Firmware supports Exchange Offload 0x%x\n",
1081 			    __func__, ha->fw_attributes_h);
1082 
1083 		/*
1084 		 * FW supports nvme and driver load parameter requested nvme.
1085 		 * BIT 26 of fw_attributes indicates NVMe support.
1086 		 */
1087 		if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
1088 			vha->flags.nvme_enabled = 1;
1089 			ql_log(ql_log_info, vha, 0xd302,
1090 			    "%s: FC-NVMe is Enabled (0x%x)\n",
1091 			     __func__, ha->fw_attributes_h);
1092 		}
1093 	}
1094 
1095 	if (IS_QLA27XX(ha)) {
1096 		ha->mpi_version[0] = mcp->mb[10] & 0xff;
1097 		ha->mpi_version[1] = mcp->mb[11] >> 8;
1098 		ha->mpi_version[2] = mcp->mb[11] & 0xff;
1099 		ha->pep_version[0] = mcp->mb[13] & 0xff;
1100 		ha->pep_version[1] = mcp->mb[14] >> 8;
1101 		ha->pep_version[2] = mcp->mb[14] & 0xff;
1102 		ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1103 		ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1104 		ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1105 		ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1106 	}
1107 
1108 failed:
1109 	if (rval != QLA_SUCCESS) {
1110 		/*EMPTY*/
1111 		ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1112 	} else {
1113 		/*EMPTY*/
1114 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1115 		    "Done %s.\n", __func__);
1116 	}
1117 	return rval;
1118 }
1119 
1120 /*
1121  * qla2x00_get_fw_options
1122  *	Set firmware options.
1123  *
1124  * Input:
1125  *	ha = adapter block pointer.
1126  *	fwopt = pointer for firmware options.
1127  *
1128  * Returns:
1129  *	qla2x00 local function return status code.
1130  *
1131  * Context:
1132  *	Kernel context.
1133  */
1134 int
1135 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1136 {
1137 	int rval;
1138 	mbx_cmd_t mc;
1139 	mbx_cmd_t *mcp = &mc;
1140 
1141 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1142 	    "Entered %s.\n", __func__);
1143 
1144 	mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1145 	mcp->out_mb = MBX_0;
1146 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1147 	mcp->tov = MBX_TOV_SECONDS;
1148 	mcp->flags = 0;
1149 	rval = qla2x00_mailbox_command(vha, mcp);
1150 
1151 	if (rval != QLA_SUCCESS) {
1152 		/*EMPTY*/
1153 		ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1154 	} else {
1155 		fwopts[0] = mcp->mb[0];
1156 		fwopts[1] = mcp->mb[1];
1157 		fwopts[2] = mcp->mb[2];
1158 		fwopts[3] = mcp->mb[3];
1159 
1160 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1161 		    "Done %s.\n", __func__);
1162 	}
1163 
1164 	return rval;
1165 }
1166 
1167 
1168 /*
1169  * qla2x00_set_fw_options
1170  *	Set firmware options.
1171  *
1172  * Input:
1173  *	ha = adapter block pointer.
1174  *	fwopt = pointer for firmware options.
1175  *
1176  * Returns:
1177  *	qla2x00 local function return status code.
1178  *
1179  * Context:
1180  *	Kernel context.
1181  */
1182 int
1183 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1184 {
1185 	int rval;
1186 	mbx_cmd_t mc;
1187 	mbx_cmd_t *mcp = &mc;
1188 
1189 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1190 	    "Entered %s.\n", __func__);
1191 
1192 	mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1193 	mcp->mb[1] = fwopts[1];
1194 	mcp->mb[2] = fwopts[2];
1195 	mcp->mb[3] = fwopts[3];
1196 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1197 	mcp->in_mb = MBX_0;
1198 	if (IS_FWI2_CAPABLE(vha->hw)) {
1199 		mcp->in_mb |= MBX_1;
1200 		mcp->mb[10] = fwopts[10];
1201 		mcp->out_mb |= MBX_10;
1202 	} else {
1203 		mcp->mb[10] = fwopts[10];
1204 		mcp->mb[11] = fwopts[11];
1205 		mcp->mb[12] = 0;	/* Undocumented, but used */
1206 		mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1207 	}
1208 	mcp->tov = MBX_TOV_SECONDS;
1209 	mcp->flags = 0;
1210 	rval = qla2x00_mailbox_command(vha, mcp);
1211 
1212 	fwopts[0] = mcp->mb[0];
1213 
1214 	if (rval != QLA_SUCCESS) {
1215 		/*EMPTY*/
1216 		ql_dbg(ql_dbg_mbx, vha, 0x1030,
1217 		    "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1218 	} else {
1219 		/*EMPTY*/
1220 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1221 		    "Done %s.\n", __func__);
1222 	}
1223 
1224 	return rval;
1225 }
1226 
1227 /*
1228  * qla2x00_mbx_reg_test
1229  *	Mailbox register wrap test.
1230  *
1231  * Input:
1232  *	ha = adapter block pointer.
1233  *	TARGET_QUEUE_LOCK must be released.
1234  *	ADAPTER_STATE_LOCK must be released.
1235  *
1236  * Returns:
1237  *	qla2x00 local function return status code.
1238  *
1239  * Context:
1240  *	Kernel context.
1241  */
1242 int
1243 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1244 {
1245 	int rval;
1246 	mbx_cmd_t mc;
1247 	mbx_cmd_t *mcp = &mc;
1248 
1249 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1250 	    "Entered %s.\n", __func__);
1251 
1252 	mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1253 	mcp->mb[1] = 0xAAAA;
1254 	mcp->mb[2] = 0x5555;
1255 	mcp->mb[3] = 0xAA55;
1256 	mcp->mb[4] = 0x55AA;
1257 	mcp->mb[5] = 0xA5A5;
1258 	mcp->mb[6] = 0x5A5A;
1259 	mcp->mb[7] = 0x2525;
1260 	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1261 	mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1262 	mcp->tov = MBX_TOV_SECONDS;
1263 	mcp->flags = 0;
1264 	rval = qla2x00_mailbox_command(vha, mcp);
1265 
1266 	if (rval == QLA_SUCCESS) {
1267 		if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1268 		    mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1269 			rval = QLA_FUNCTION_FAILED;
1270 		if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1271 		    mcp->mb[7] != 0x2525)
1272 			rval = QLA_FUNCTION_FAILED;
1273 	}
1274 
1275 	if (rval != QLA_SUCCESS) {
1276 		/*EMPTY*/
1277 		ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1278 	} else {
1279 		/*EMPTY*/
1280 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1281 		    "Done %s.\n", __func__);
1282 	}
1283 
1284 	return rval;
1285 }
1286 
1287 /*
1288  * qla2x00_verify_checksum
1289  *	Verify firmware checksum.
1290  *
1291  * Input:
1292  *	ha = adapter block pointer.
1293  *	TARGET_QUEUE_LOCK must be released.
1294  *	ADAPTER_STATE_LOCK must be released.
1295  *
1296  * Returns:
1297  *	qla2x00 local function return status code.
1298  *
1299  * Context:
1300  *	Kernel context.
1301  */
1302 int
1303 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1304 {
1305 	int rval;
1306 	mbx_cmd_t mc;
1307 	mbx_cmd_t *mcp = &mc;
1308 
1309 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1310 	    "Entered %s.\n", __func__);
1311 
1312 	mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1313 	mcp->out_mb = MBX_0;
1314 	mcp->in_mb = MBX_0;
1315 	if (IS_FWI2_CAPABLE(vha->hw)) {
1316 		mcp->mb[1] = MSW(risc_addr);
1317 		mcp->mb[2] = LSW(risc_addr);
1318 		mcp->out_mb |= MBX_2|MBX_1;
1319 		mcp->in_mb |= MBX_2|MBX_1;
1320 	} else {
1321 		mcp->mb[1] = LSW(risc_addr);
1322 		mcp->out_mb |= MBX_1;
1323 		mcp->in_mb |= MBX_1;
1324 	}
1325 
1326 	mcp->tov = MBX_TOV_SECONDS;
1327 	mcp->flags = 0;
1328 	rval = qla2x00_mailbox_command(vha, mcp);
1329 
1330 	if (rval != QLA_SUCCESS) {
1331 		ql_dbg(ql_dbg_mbx, vha, 0x1036,
1332 		    "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1333 		    (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1334 	} else {
1335 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1336 		    "Done %s.\n", __func__);
1337 	}
1338 
1339 	return rval;
1340 }
1341 
1342 /*
1343  * qla2x00_issue_iocb
1344  *	Issue IOCB using mailbox command
1345  *
1346  * Input:
1347  *	ha = adapter state pointer.
1348  *	buffer = buffer pointer.
1349  *	phys_addr = physical address of buffer.
1350  *	size = size of buffer.
1351  *	TARGET_QUEUE_LOCK must be released.
1352  *	ADAPTER_STATE_LOCK must be released.
1353  *
1354  * Returns:
1355  *	qla2x00 local function return status code.
1356  *
1357  * Context:
1358  *	Kernel context.
1359  */
1360 int
1361 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1362     dma_addr_t phys_addr, size_t size, uint32_t tov)
1363 {
1364 	int		rval;
1365 	mbx_cmd_t	mc;
1366 	mbx_cmd_t	*mcp = &mc;
1367 
1368 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1369 	    "Entered %s.\n", __func__);
1370 
1371 	mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1372 	mcp->mb[1] = 0;
1373 	mcp->mb[2] = MSW(phys_addr);
1374 	mcp->mb[3] = LSW(phys_addr);
1375 	mcp->mb[6] = MSW(MSD(phys_addr));
1376 	mcp->mb[7] = LSW(MSD(phys_addr));
1377 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1378 	mcp->in_mb = MBX_2|MBX_0;
1379 	mcp->tov = tov;
1380 	mcp->flags = 0;
1381 	rval = qla2x00_mailbox_command(vha, mcp);
1382 
1383 	if (rval != QLA_SUCCESS) {
1384 		/*EMPTY*/
1385 		ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1386 	} else {
1387 		sts_entry_t *sts_entry = (sts_entry_t *) buffer;
1388 
1389 		/* Mask reserved bits. */
1390 		sts_entry->entry_status &=
1391 		    IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1392 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1393 		    "Done %s.\n", __func__);
1394 	}
1395 
1396 	return rval;
1397 }
1398 
1399 int
1400 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1401     size_t size)
1402 {
1403 	return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1404 	    MBX_TOV_SECONDS);
1405 }
1406 
1407 /*
1408  * qla2x00_abort_command
1409  *	Abort command aborts a specified IOCB.
1410  *
1411  * Input:
1412  *	ha = adapter block pointer.
1413  *	sp = SB structure pointer.
1414  *
1415  * Returns:
1416  *	qla2x00 local function return status code.
1417  *
1418  * Context:
1419  *	Kernel context.
1420  */
1421 int
1422 qla2x00_abort_command(srb_t *sp)
1423 {
1424 	unsigned long   flags = 0;
1425 	int		rval;
1426 	uint32_t	handle = 0;
1427 	mbx_cmd_t	mc;
1428 	mbx_cmd_t	*mcp = &mc;
1429 	fc_port_t	*fcport = sp->fcport;
1430 	scsi_qla_host_t *vha = fcport->vha;
1431 	struct qla_hw_data *ha = vha->hw;
1432 	struct req_que *req;
1433 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1434 
1435 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1436 	    "Entered %s.\n", __func__);
1437 
1438 	if (vha->flags.qpairs_available && sp->qpair)
1439 		req = sp->qpair->req;
1440 	else
1441 		req = vha->req;
1442 
1443 	spin_lock_irqsave(&ha->hardware_lock, flags);
1444 	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1445 		if (req->outstanding_cmds[handle] == sp)
1446 			break;
1447 	}
1448 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1449 
1450 	if (handle == req->num_outstanding_cmds) {
1451 		/* command not found */
1452 		return QLA_FUNCTION_FAILED;
1453 	}
1454 
1455 	mcp->mb[0] = MBC_ABORT_COMMAND;
1456 	if (HAS_EXTENDED_IDS(ha))
1457 		mcp->mb[1] = fcport->loop_id;
1458 	else
1459 		mcp->mb[1] = fcport->loop_id << 8;
1460 	mcp->mb[2] = (uint16_t)handle;
1461 	mcp->mb[3] = (uint16_t)(handle >> 16);
1462 	mcp->mb[6] = (uint16_t)cmd->device->lun;
1463 	mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1464 	mcp->in_mb = MBX_0;
1465 	mcp->tov = MBX_TOV_SECONDS;
1466 	mcp->flags = 0;
1467 	rval = qla2x00_mailbox_command(vha, mcp);
1468 
1469 	if (rval != QLA_SUCCESS) {
1470 		ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1471 	} else {
1472 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1473 		    "Done %s.\n", __func__);
1474 	}
1475 
1476 	return rval;
1477 }
1478 
1479 int
1480 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1481 {
1482 	int rval, rval2;
1483 	mbx_cmd_t  mc;
1484 	mbx_cmd_t  *mcp = &mc;
1485 	scsi_qla_host_t *vha;
1486 	struct req_que *req;
1487 	struct rsp_que *rsp;
1488 
1489 	l = l;
1490 	vha = fcport->vha;
1491 
1492 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1493 	    "Entered %s.\n", __func__);
1494 
1495 	req = vha->hw->req_q_map[0];
1496 	rsp = req->rsp;
1497 	mcp->mb[0] = MBC_ABORT_TARGET;
1498 	mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1499 	if (HAS_EXTENDED_IDS(vha->hw)) {
1500 		mcp->mb[1] = fcport->loop_id;
1501 		mcp->mb[10] = 0;
1502 		mcp->out_mb |= MBX_10;
1503 	} else {
1504 		mcp->mb[1] = fcport->loop_id << 8;
1505 	}
1506 	mcp->mb[2] = vha->hw->loop_reset_delay;
1507 	mcp->mb[9] = vha->vp_idx;
1508 
1509 	mcp->in_mb = MBX_0;
1510 	mcp->tov = MBX_TOV_SECONDS;
1511 	mcp->flags = 0;
1512 	rval = qla2x00_mailbox_command(vha, mcp);
1513 	if (rval != QLA_SUCCESS) {
1514 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1515 		    "Failed=%x.\n", rval);
1516 	}
1517 
1518 	/* Issue marker IOCB. */
1519 	rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
1520 							MK_SYNC_ID);
1521 	if (rval2 != QLA_SUCCESS) {
1522 		ql_dbg(ql_dbg_mbx, vha, 0x1040,
1523 		    "Failed to issue marker IOCB (%x).\n", rval2);
1524 	} else {
1525 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1526 		    "Done %s.\n", __func__);
1527 	}
1528 
1529 	return rval;
1530 }
1531 
1532 int
1533 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1534 {
1535 	int rval, rval2;
1536 	mbx_cmd_t  mc;
1537 	mbx_cmd_t  *mcp = &mc;
1538 	scsi_qla_host_t *vha;
1539 	struct req_que *req;
1540 	struct rsp_que *rsp;
1541 
1542 	vha = fcport->vha;
1543 
1544 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1545 	    "Entered %s.\n", __func__);
1546 
1547 	req = vha->hw->req_q_map[0];
1548 	rsp = req->rsp;
1549 	mcp->mb[0] = MBC_LUN_RESET;
1550 	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1551 	if (HAS_EXTENDED_IDS(vha->hw))
1552 		mcp->mb[1] = fcport->loop_id;
1553 	else
1554 		mcp->mb[1] = fcport->loop_id << 8;
1555 	mcp->mb[2] = (u32)l;
1556 	mcp->mb[3] = 0;
1557 	mcp->mb[9] = vha->vp_idx;
1558 
1559 	mcp->in_mb = MBX_0;
1560 	mcp->tov = MBX_TOV_SECONDS;
1561 	mcp->flags = 0;
1562 	rval = qla2x00_mailbox_command(vha, mcp);
1563 	if (rval != QLA_SUCCESS) {
1564 		ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1565 	}
1566 
1567 	/* Issue marker IOCB. */
1568 	rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
1569 								MK_SYNC_ID_LUN);
1570 	if (rval2 != QLA_SUCCESS) {
1571 		ql_dbg(ql_dbg_mbx, vha, 0x1044,
1572 		    "Failed to issue marker IOCB (%x).\n", rval2);
1573 	} else {
1574 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1575 		    "Done %s.\n", __func__);
1576 	}
1577 
1578 	return rval;
1579 }
1580 
1581 /*
1582  * qla2x00_get_adapter_id
1583  *	Get adapter ID and topology.
1584  *
1585  * Input:
1586  *	ha = adapter block pointer.
1587  *	id = pointer for loop ID.
1588  *	al_pa = pointer for AL_PA.
1589  *	area = pointer for area.
1590  *	domain = pointer for domain.
1591  *	top = pointer for topology.
1592  *	TARGET_QUEUE_LOCK must be released.
1593  *	ADAPTER_STATE_LOCK must be released.
1594  *
1595  * Returns:
1596  *	qla2x00 local function return status code.
1597  *
1598  * Context:
1599  *	Kernel context.
1600  */
1601 int
1602 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1603     uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1604 {
1605 	int rval;
1606 	mbx_cmd_t mc;
1607 	mbx_cmd_t *mcp = &mc;
1608 
1609 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1610 	    "Entered %s.\n", __func__);
1611 
1612 	mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1613 	mcp->mb[9] = vha->vp_idx;
1614 	mcp->out_mb = MBX_9|MBX_0;
1615 	mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1616 	if (IS_CNA_CAPABLE(vha->hw))
1617 		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1618 	if (IS_FWI2_CAPABLE(vha->hw))
1619 		mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1620 	if (IS_QLA27XX(vha->hw))
1621 		mcp->in_mb |= MBX_15;
1622 	mcp->tov = MBX_TOV_SECONDS;
1623 	mcp->flags = 0;
1624 	rval = qla2x00_mailbox_command(vha, mcp);
1625 	if (mcp->mb[0] == MBS_COMMAND_ERROR)
1626 		rval = QLA_COMMAND_ERROR;
1627 	else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1628 		rval = QLA_INVALID_COMMAND;
1629 
1630 	/* Return data. */
1631 	*id = mcp->mb[1];
1632 	*al_pa = LSB(mcp->mb[2]);
1633 	*area = MSB(mcp->mb[2]);
1634 	*domain	= LSB(mcp->mb[3]);
1635 	*top = mcp->mb[6];
1636 	*sw_cap = mcp->mb[7];
1637 
1638 	if (rval != QLA_SUCCESS) {
1639 		/*EMPTY*/
1640 		ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1641 	} else {
1642 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1643 		    "Done %s.\n", __func__);
1644 
1645 		if (IS_CNA_CAPABLE(vha->hw)) {
1646 			vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1647 			vha->fcoe_fcf_idx = mcp->mb[10];
1648 			vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1649 			vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1650 			vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1651 			vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1652 			vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1653 			vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1654 		}
1655 		/* If FA-WWN supported */
1656 		if (IS_FAWWN_CAPABLE(vha->hw)) {
1657 			if (mcp->mb[7] & BIT_14) {
1658 				vha->port_name[0] = MSB(mcp->mb[16]);
1659 				vha->port_name[1] = LSB(mcp->mb[16]);
1660 				vha->port_name[2] = MSB(mcp->mb[17]);
1661 				vha->port_name[3] = LSB(mcp->mb[17]);
1662 				vha->port_name[4] = MSB(mcp->mb[18]);
1663 				vha->port_name[5] = LSB(mcp->mb[18]);
1664 				vha->port_name[6] = MSB(mcp->mb[19]);
1665 				vha->port_name[7] = LSB(mcp->mb[19]);
1666 				fc_host_port_name(vha->host) =
1667 				    wwn_to_u64(vha->port_name);
1668 				ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1669 				    "FA-WWN acquired %016llx\n",
1670 				    wwn_to_u64(vha->port_name));
1671 			}
1672 		}
1673 
1674 		if (IS_QLA27XX(vha->hw))
1675 			vha->bbcr = mcp->mb[15];
1676 	}
1677 
1678 	return rval;
1679 }
1680 
1681 /*
1682  * qla2x00_get_retry_cnt
1683  *	Get current firmware login retry count and delay.
1684  *
1685  * Input:
1686  *	ha = adapter block pointer.
1687  *	retry_cnt = pointer to login retry count.
1688  *	tov = pointer to login timeout value.
1689  *
1690  * Returns:
1691  *	qla2x00 local function return status code.
1692  *
1693  * Context:
1694  *	Kernel context.
1695  */
1696 int
1697 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1698     uint16_t *r_a_tov)
1699 {
1700 	int rval;
1701 	uint16_t ratov;
1702 	mbx_cmd_t mc;
1703 	mbx_cmd_t *mcp = &mc;
1704 
1705 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1706 	    "Entered %s.\n", __func__);
1707 
1708 	mcp->mb[0] = MBC_GET_RETRY_COUNT;
1709 	mcp->out_mb = MBX_0;
1710 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1711 	mcp->tov = MBX_TOV_SECONDS;
1712 	mcp->flags = 0;
1713 	rval = qla2x00_mailbox_command(vha, mcp);
1714 
1715 	if (rval != QLA_SUCCESS) {
1716 		/*EMPTY*/
1717 		ql_dbg(ql_dbg_mbx, vha, 0x104a,
1718 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1719 	} else {
1720 		/* Convert returned data and check our values. */
1721 		*r_a_tov = mcp->mb[3] / 2;
1722 		ratov = (mcp->mb[3]/2) / 10;  /* mb[3] value is in 100ms */
1723 		if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1724 			/* Update to the larger values */
1725 			*retry_cnt = (uint8_t)mcp->mb[1];
1726 			*tov = ratov;
1727 		}
1728 
1729 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1730 		    "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1731 	}
1732 
1733 	return rval;
1734 }
1735 
1736 /*
1737  * qla2x00_init_firmware
1738  *	Initialize adapter firmware.
1739  *
1740  * Input:
1741  *	ha = adapter block pointer.
1742  *	dptr = Initialization control block pointer.
1743  *	size = size of initialization control block.
1744  *	TARGET_QUEUE_LOCK must be released.
1745  *	ADAPTER_STATE_LOCK must be released.
1746  *
1747  * Returns:
1748  *	qla2x00 local function return status code.
1749  *
1750  * Context:
1751  *	Kernel context.
1752  */
1753 int
1754 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1755 {
1756 	int rval;
1757 	mbx_cmd_t mc;
1758 	mbx_cmd_t *mcp = &mc;
1759 	struct qla_hw_data *ha = vha->hw;
1760 
1761 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1762 	    "Entered %s.\n", __func__);
1763 
1764 	if (IS_P3P_TYPE(ha) && ql2xdbwr)
1765 		qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1766 			(0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1767 
1768 	if (ha->flags.npiv_supported)
1769 		mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1770 	else
1771 		mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1772 
1773 	mcp->mb[1] = 0;
1774 	mcp->mb[2] = MSW(ha->init_cb_dma);
1775 	mcp->mb[3] = LSW(ha->init_cb_dma);
1776 	mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1777 	mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1778 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1779 	if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1780 		mcp->mb[1] = BIT_0;
1781 		mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1782 		mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1783 		mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1784 		mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1785 		mcp->mb[14] = sizeof(*ha->ex_init_cb);
1786 		mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1787 	}
1788 	/* 1 and 2 should normally be captured. */
1789 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
1790 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1791 		/* mb3 is additional info about the installed SFP. */
1792 		mcp->in_mb  |= MBX_3;
1793 	mcp->buf_size = size;
1794 	mcp->flags = MBX_DMA_OUT;
1795 	mcp->tov = MBX_TOV_SECONDS;
1796 	rval = qla2x00_mailbox_command(vha, mcp);
1797 
1798 	if (rval != QLA_SUCCESS) {
1799 		/*EMPTY*/
1800 		ql_dbg(ql_dbg_mbx, vha, 0x104d,
1801 		    "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
1802 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1803 	} else {
1804 		if (IS_QLA27XX(ha)) {
1805 			if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1806 				ql_dbg(ql_dbg_mbx, vha, 0x119d,
1807 				    "Invalid SFP/Validation Failed\n");
1808 		}
1809 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1810 		    "Done %s.\n", __func__);
1811 	}
1812 
1813 	return rval;
1814 }
1815 
1816 
1817 /*
1818  * qla2x00_get_port_database
1819  *	Issue normal/enhanced get port database mailbox command
1820  *	and copy device name as necessary.
1821  *
1822  * Input:
1823  *	ha = adapter state pointer.
1824  *	dev = structure pointer.
1825  *	opt = enhanced cmd option byte.
1826  *
1827  * Returns:
1828  *	qla2x00 local function return status code.
1829  *
1830  * Context:
1831  *	Kernel context.
1832  */
1833 int
1834 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1835 {
1836 	int rval;
1837 	mbx_cmd_t mc;
1838 	mbx_cmd_t *mcp = &mc;
1839 	port_database_t *pd;
1840 	struct port_database_24xx *pd24;
1841 	dma_addr_t pd_dma;
1842 	struct qla_hw_data *ha = vha->hw;
1843 
1844 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1845 	    "Entered %s.\n", __func__);
1846 
1847 	pd24 = NULL;
1848 	pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1849 	if (pd  == NULL) {
1850 		ql_log(ql_log_warn, vha, 0x1050,
1851 		    "Failed to allocate port database structure.\n");
1852 		fcport->query = 0;
1853 		return QLA_MEMORY_ALLOC_FAILED;
1854 	}
1855 
1856 	mcp->mb[0] = MBC_GET_PORT_DATABASE;
1857 	if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1858 		mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1859 	mcp->mb[2] = MSW(pd_dma);
1860 	mcp->mb[3] = LSW(pd_dma);
1861 	mcp->mb[6] = MSW(MSD(pd_dma));
1862 	mcp->mb[7] = LSW(MSD(pd_dma));
1863 	mcp->mb[9] = vha->vp_idx;
1864 	mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1865 	mcp->in_mb = MBX_0;
1866 	if (IS_FWI2_CAPABLE(ha)) {
1867 		mcp->mb[1] = fcport->loop_id;
1868 		mcp->mb[10] = opt;
1869 		mcp->out_mb |= MBX_10|MBX_1;
1870 		mcp->in_mb |= MBX_1;
1871 	} else if (HAS_EXTENDED_IDS(ha)) {
1872 		mcp->mb[1] = fcport->loop_id;
1873 		mcp->mb[10] = opt;
1874 		mcp->out_mb |= MBX_10|MBX_1;
1875 	} else {
1876 		mcp->mb[1] = fcport->loop_id << 8 | opt;
1877 		mcp->out_mb |= MBX_1;
1878 	}
1879 	mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1880 	    PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1881 	mcp->flags = MBX_DMA_IN;
1882 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1883 	rval = qla2x00_mailbox_command(vha, mcp);
1884 	if (rval != QLA_SUCCESS)
1885 		goto gpd_error_out;
1886 
1887 	if (IS_FWI2_CAPABLE(ha)) {
1888 		uint64_t zero = 0;
1889 		u8 current_login_state, last_login_state;
1890 
1891 		pd24 = (struct port_database_24xx *) pd;
1892 
1893 		/* Check for logged in state. */
1894 		if (fcport->fc4f_nvme) {
1895 			current_login_state = pd24->current_login_state >> 4;
1896 			last_login_state = pd24->last_login_state >> 4;
1897 		} else {
1898 			current_login_state = pd24->current_login_state & 0xf;
1899 			last_login_state = pd24->last_login_state & 0xf;
1900 		}
1901 		fcport->current_login_state = pd24->current_login_state;
1902 		fcport->last_login_state = pd24->last_login_state;
1903 
1904 		/* Check for logged in state. */
1905 		if (current_login_state != PDS_PRLI_COMPLETE &&
1906 		    last_login_state != PDS_PRLI_COMPLETE) {
1907 			ql_dbg(ql_dbg_mbx, vha, 0x119a,
1908 			    "Unable to verify login-state (%x/%x) for loop_id %x.\n",
1909 			    current_login_state, last_login_state,
1910 			    fcport->loop_id);
1911 			rval = QLA_FUNCTION_FAILED;
1912 
1913 			if (!fcport->query)
1914 				goto gpd_error_out;
1915 		}
1916 
1917 		if (fcport->loop_id == FC_NO_LOOP_ID ||
1918 		    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1919 		     memcmp(fcport->port_name, pd24->port_name, 8))) {
1920 			/* We lost the device mid way. */
1921 			rval = QLA_NOT_LOGGED_IN;
1922 			goto gpd_error_out;
1923 		}
1924 
1925 		/* Names are little-endian. */
1926 		memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1927 		memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1928 
1929 		/* Get port_id of device. */
1930 		fcport->d_id.b.domain = pd24->port_id[0];
1931 		fcport->d_id.b.area = pd24->port_id[1];
1932 		fcport->d_id.b.al_pa = pd24->port_id[2];
1933 		fcport->d_id.b.rsvd_1 = 0;
1934 
1935 		/* If not target must be initiator or unknown type. */
1936 		if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1937 			fcport->port_type = FCT_INITIATOR;
1938 		else
1939 			fcport->port_type = FCT_TARGET;
1940 
1941 		/* Passback COS information. */
1942 		fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1943 				FC_COS_CLASS2 : FC_COS_CLASS3;
1944 
1945 		if (pd24->prli_svc_param_word_3[0] & BIT_7)
1946 			fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1947 	} else {
1948 		uint64_t zero = 0;
1949 
1950 		/* Check for logged in state. */
1951 		if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1952 		    pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1953 			ql_dbg(ql_dbg_mbx, vha, 0x100a,
1954 			    "Unable to verify login-state (%x/%x) - "
1955 			    "portid=%02x%02x%02x.\n", pd->master_state,
1956 			    pd->slave_state, fcport->d_id.b.domain,
1957 			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
1958 			rval = QLA_FUNCTION_FAILED;
1959 			goto gpd_error_out;
1960 		}
1961 
1962 		if (fcport->loop_id == FC_NO_LOOP_ID ||
1963 		    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1964 		     memcmp(fcport->port_name, pd->port_name, 8))) {
1965 			/* We lost the device mid way. */
1966 			rval = QLA_NOT_LOGGED_IN;
1967 			goto gpd_error_out;
1968 		}
1969 
1970 		/* Names are little-endian. */
1971 		memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1972 		memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
1973 
1974 		/* Get port_id of device. */
1975 		fcport->d_id.b.domain = pd->port_id[0];
1976 		fcport->d_id.b.area = pd->port_id[3];
1977 		fcport->d_id.b.al_pa = pd->port_id[2];
1978 		fcport->d_id.b.rsvd_1 = 0;
1979 
1980 		/* If not target must be initiator or unknown type. */
1981 		if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
1982 			fcport->port_type = FCT_INITIATOR;
1983 		else
1984 			fcport->port_type = FCT_TARGET;
1985 
1986 		/* Passback COS information. */
1987 		fcport->supported_classes = (pd->options & BIT_4) ?
1988 		    FC_COS_CLASS2: FC_COS_CLASS3;
1989 	}
1990 
1991 gpd_error_out:
1992 	dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1993 	fcport->query = 0;
1994 
1995 	if (rval != QLA_SUCCESS) {
1996 		ql_dbg(ql_dbg_mbx, vha, 0x1052,
1997 		    "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1998 		    mcp->mb[0], mcp->mb[1]);
1999 	} else {
2000 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2001 		    "Done %s.\n", __func__);
2002 	}
2003 
2004 	return rval;
2005 }
2006 
2007 /*
2008  * qla2x00_get_firmware_state
2009  *	Get adapter firmware state.
2010  *
2011  * Input:
2012  *	ha = adapter block pointer.
2013  *	dptr = pointer for firmware state.
2014  *	TARGET_QUEUE_LOCK must be released.
2015  *	ADAPTER_STATE_LOCK must be released.
2016  *
2017  * Returns:
2018  *	qla2x00 local function return status code.
2019  *
2020  * Context:
2021  *	Kernel context.
2022  */
2023 int
2024 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2025 {
2026 	int rval;
2027 	mbx_cmd_t mc;
2028 	mbx_cmd_t *mcp = &mc;
2029 	struct qla_hw_data *ha = vha->hw;
2030 
2031 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2032 	    "Entered %s.\n", __func__);
2033 
2034 	mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2035 	mcp->out_mb = MBX_0;
2036 	if (IS_FWI2_CAPABLE(vha->hw))
2037 		mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2038 	else
2039 		mcp->in_mb = MBX_1|MBX_0;
2040 	mcp->tov = MBX_TOV_SECONDS;
2041 	mcp->flags = 0;
2042 	rval = qla2x00_mailbox_command(vha, mcp);
2043 
2044 	/* Return firmware states. */
2045 	states[0] = mcp->mb[1];
2046 	if (IS_FWI2_CAPABLE(vha->hw)) {
2047 		states[1] = mcp->mb[2];
2048 		states[2] = mcp->mb[3];  /* SFP info */
2049 		states[3] = mcp->mb[4];
2050 		states[4] = mcp->mb[5];
2051 		states[5] = mcp->mb[6];  /* DPORT status */
2052 	}
2053 
2054 	if (rval != QLA_SUCCESS) {
2055 		/*EMPTY*/
2056 		ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2057 	} else {
2058 		if (IS_QLA27XX(ha)) {
2059 			if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2060 				ql_dbg(ql_dbg_mbx, vha, 0x119e,
2061 				    "Invalid SFP/Validation Failed\n");
2062 		}
2063 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2064 		    "Done %s.\n", __func__);
2065 	}
2066 
2067 	return rval;
2068 }
2069 
2070 /*
2071  * qla2x00_get_port_name
2072  *	Issue get port name mailbox command.
2073  *	Returned name is in big endian format.
2074  *
2075  * Input:
2076  *	ha = adapter block pointer.
2077  *	loop_id = loop ID of device.
2078  *	name = pointer for name.
2079  *	TARGET_QUEUE_LOCK must be released.
2080  *	ADAPTER_STATE_LOCK must be released.
2081  *
2082  * Returns:
2083  *	qla2x00 local function return status code.
2084  *
2085  * Context:
2086  *	Kernel context.
2087  */
2088 int
2089 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2090     uint8_t opt)
2091 {
2092 	int rval;
2093 	mbx_cmd_t mc;
2094 	mbx_cmd_t *mcp = &mc;
2095 
2096 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2097 	    "Entered %s.\n", __func__);
2098 
2099 	mcp->mb[0] = MBC_GET_PORT_NAME;
2100 	mcp->mb[9] = vha->vp_idx;
2101 	mcp->out_mb = MBX_9|MBX_1|MBX_0;
2102 	if (HAS_EXTENDED_IDS(vha->hw)) {
2103 		mcp->mb[1] = loop_id;
2104 		mcp->mb[10] = opt;
2105 		mcp->out_mb |= MBX_10;
2106 	} else {
2107 		mcp->mb[1] = loop_id << 8 | opt;
2108 	}
2109 
2110 	mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2111 	mcp->tov = MBX_TOV_SECONDS;
2112 	mcp->flags = 0;
2113 	rval = qla2x00_mailbox_command(vha, mcp);
2114 
2115 	if (rval != QLA_SUCCESS) {
2116 		/*EMPTY*/
2117 		ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2118 	} else {
2119 		if (name != NULL) {
2120 			/* This function returns name in big endian. */
2121 			name[0] = MSB(mcp->mb[2]);
2122 			name[1] = LSB(mcp->mb[2]);
2123 			name[2] = MSB(mcp->mb[3]);
2124 			name[3] = LSB(mcp->mb[3]);
2125 			name[4] = MSB(mcp->mb[6]);
2126 			name[5] = LSB(mcp->mb[6]);
2127 			name[6] = MSB(mcp->mb[7]);
2128 			name[7] = LSB(mcp->mb[7]);
2129 		}
2130 
2131 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2132 		    "Done %s.\n", __func__);
2133 	}
2134 
2135 	return rval;
2136 }
2137 
2138 /*
2139  * qla24xx_link_initialization
2140  *	Issue link initialization mailbox command.
2141  *
2142  * Input:
2143  *	ha = adapter block pointer.
2144  *	TARGET_QUEUE_LOCK must be released.
2145  *	ADAPTER_STATE_LOCK must be released.
2146  *
2147  * Returns:
2148  *	qla2x00 local function return status code.
2149  *
2150  * Context:
2151  *	Kernel context.
2152  */
2153 int
2154 qla24xx_link_initialize(scsi_qla_host_t *vha)
2155 {
2156 	int rval;
2157 	mbx_cmd_t mc;
2158 	mbx_cmd_t *mcp = &mc;
2159 
2160 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2161 	    "Entered %s.\n", __func__);
2162 
2163 	if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2164 		return QLA_FUNCTION_FAILED;
2165 
2166 	mcp->mb[0] = MBC_LINK_INITIALIZATION;
2167 	mcp->mb[1] = BIT_4;
2168 	if (vha->hw->operating_mode == LOOP)
2169 		mcp->mb[1] |= BIT_6;
2170 	else
2171 		mcp->mb[1] |= BIT_5;
2172 	mcp->mb[2] = 0;
2173 	mcp->mb[3] = 0;
2174 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2175 	mcp->in_mb = MBX_0;
2176 	mcp->tov = MBX_TOV_SECONDS;
2177 	mcp->flags = 0;
2178 	rval = qla2x00_mailbox_command(vha, mcp);
2179 
2180 	if (rval != QLA_SUCCESS) {
2181 		ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2182 	} else {
2183 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2184 		    "Done %s.\n", __func__);
2185 	}
2186 
2187 	return rval;
2188 }
2189 
2190 /*
2191  * qla2x00_lip_reset
2192  *	Issue LIP reset mailbox command.
2193  *
2194  * Input:
2195  *	ha = adapter block pointer.
2196  *	TARGET_QUEUE_LOCK must be released.
2197  *	ADAPTER_STATE_LOCK must be released.
2198  *
2199  * Returns:
2200  *	qla2x00 local function return status code.
2201  *
2202  * Context:
2203  *	Kernel context.
2204  */
2205 int
2206 qla2x00_lip_reset(scsi_qla_host_t *vha)
2207 {
2208 	int rval;
2209 	mbx_cmd_t mc;
2210 	mbx_cmd_t *mcp = &mc;
2211 
2212 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
2213 	    "Entered %s.\n", __func__);
2214 
2215 	if (IS_CNA_CAPABLE(vha->hw)) {
2216 		/* Logout across all FCFs. */
2217 		mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2218 		mcp->mb[1] = BIT_1;
2219 		mcp->mb[2] = 0;
2220 		mcp->out_mb = MBX_2|MBX_1|MBX_0;
2221 	} else if (IS_FWI2_CAPABLE(vha->hw)) {
2222 		mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2223 		if (N2N_TOPO(vha->hw))
2224 			mcp->mb[1] = BIT_4; /* re-init */
2225 		else
2226 			mcp->mb[1] = BIT_6; /* LIP */
2227 		mcp->mb[2] = 0;
2228 		mcp->mb[3] = vha->hw->loop_reset_delay;
2229 		mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2230 	} else {
2231 		mcp->mb[0] = MBC_LIP_RESET;
2232 		mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2233 		if (HAS_EXTENDED_IDS(vha->hw)) {
2234 			mcp->mb[1] = 0x00ff;
2235 			mcp->mb[10] = 0;
2236 			mcp->out_mb |= MBX_10;
2237 		} else {
2238 			mcp->mb[1] = 0xff00;
2239 		}
2240 		mcp->mb[2] = vha->hw->loop_reset_delay;
2241 		mcp->mb[3] = 0;
2242 	}
2243 	mcp->in_mb = MBX_0;
2244 	mcp->tov = MBX_TOV_SECONDS;
2245 	mcp->flags = 0;
2246 	rval = qla2x00_mailbox_command(vha, mcp);
2247 
2248 	if (rval != QLA_SUCCESS) {
2249 		/*EMPTY*/
2250 		ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2251 	} else {
2252 		/*EMPTY*/
2253 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2254 		    "Done %s.\n", __func__);
2255 	}
2256 
2257 	return rval;
2258 }
2259 
2260 /*
2261  * qla2x00_send_sns
2262  *	Send SNS command.
2263  *
2264  * Input:
2265  *	ha = adapter block pointer.
2266  *	sns = pointer for command.
2267  *	cmd_size = command size.
2268  *	buf_size = response/command size.
2269  *	TARGET_QUEUE_LOCK must be released.
2270  *	ADAPTER_STATE_LOCK must be released.
2271  *
2272  * Returns:
2273  *	qla2x00 local function return status code.
2274  *
2275  * Context:
2276  *	Kernel context.
2277  */
2278 int
2279 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2280     uint16_t cmd_size, size_t buf_size)
2281 {
2282 	int rval;
2283 	mbx_cmd_t mc;
2284 	mbx_cmd_t *mcp = &mc;
2285 
2286 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2287 	    "Entered %s.\n", __func__);
2288 
2289 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2290 	    "Retry cnt=%d ratov=%d total tov=%d.\n",
2291 	    vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2292 
2293 	mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2294 	mcp->mb[1] = cmd_size;
2295 	mcp->mb[2] = MSW(sns_phys_address);
2296 	mcp->mb[3] = LSW(sns_phys_address);
2297 	mcp->mb[6] = MSW(MSD(sns_phys_address));
2298 	mcp->mb[7] = LSW(MSD(sns_phys_address));
2299 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2300 	mcp->in_mb = MBX_0|MBX_1;
2301 	mcp->buf_size = buf_size;
2302 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2303 	mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2304 	rval = qla2x00_mailbox_command(vha, mcp);
2305 
2306 	if (rval != QLA_SUCCESS) {
2307 		/*EMPTY*/
2308 		ql_dbg(ql_dbg_mbx, vha, 0x105f,
2309 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
2310 		    rval, mcp->mb[0], mcp->mb[1]);
2311 	} else {
2312 		/*EMPTY*/
2313 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2314 		    "Done %s.\n", __func__);
2315 	}
2316 
2317 	return rval;
2318 }
2319 
2320 int
2321 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2322     uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2323 {
2324 	int		rval;
2325 
2326 	struct logio_entry_24xx *lg;
2327 	dma_addr_t	lg_dma;
2328 	uint32_t	iop[2];
2329 	struct qla_hw_data *ha = vha->hw;
2330 	struct req_que *req;
2331 
2332 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2333 	    "Entered %s.\n", __func__);
2334 
2335 	if (vha->vp_idx && vha->qpair)
2336 		req = vha->qpair->req;
2337 	else
2338 		req = ha->req_q_map[0];
2339 
2340 	lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2341 	if (lg == NULL) {
2342 		ql_log(ql_log_warn, vha, 0x1062,
2343 		    "Failed to allocate login IOCB.\n");
2344 		return QLA_MEMORY_ALLOC_FAILED;
2345 	}
2346 
2347 	lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2348 	lg->entry_count = 1;
2349 	lg->handle = MAKE_HANDLE(req->id, lg->handle);
2350 	lg->nport_handle = cpu_to_le16(loop_id);
2351 	lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2352 	if (opt & BIT_0)
2353 		lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2354 	if (opt & BIT_1)
2355 		lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2356 	lg->port_id[0] = al_pa;
2357 	lg->port_id[1] = area;
2358 	lg->port_id[2] = domain;
2359 	lg->vp_index = vha->vp_idx;
2360 	rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2361 	    (ha->r_a_tov / 10 * 2) + 2);
2362 	if (rval != QLA_SUCCESS) {
2363 		ql_dbg(ql_dbg_mbx, vha, 0x1063,
2364 		    "Failed to issue login IOCB (%x).\n", rval);
2365 	} else if (lg->entry_status != 0) {
2366 		ql_dbg(ql_dbg_mbx, vha, 0x1064,
2367 		    "Failed to complete IOCB -- error status (%x).\n",
2368 		    lg->entry_status);
2369 		rval = QLA_FUNCTION_FAILED;
2370 	} else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2371 		iop[0] = le32_to_cpu(lg->io_parameter[0]);
2372 		iop[1] = le32_to_cpu(lg->io_parameter[1]);
2373 
2374 		ql_dbg(ql_dbg_mbx, vha, 0x1065,
2375 		    "Failed to complete IOCB -- completion  status (%x) "
2376 		    "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2377 		    iop[0], iop[1]);
2378 
2379 		switch (iop[0]) {
2380 		case LSC_SCODE_PORTID_USED:
2381 			mb[0] = MBS_PORT_ID_USED;
2382 			mb[1] = LSW(iop[1]);
2383 			break;
2384 		case LSC_SCODE_NPORT_USED:
2385 			mb[0] = MBS_LOOP_ID_USED;
2386 			break;
2387 		case LSC_SCODE_NOLINK:
2388 		case LSC_SCODE_NOIOCB:
2389 		case LSC_SCODE_NOXCB:
2390 		case LSC_SCODE_CMD_FAILED:
2391 		case LSC_SCODE_NOFABRIC:
2392 		case LSC_SCODE_FW_NOT_READY:
2393 		case LSC_SCODE_NOT_LOGGED_IN:
2394 		case LSC_SCODE_NOPCB:
2395 		case LSC_SCODE_ELS_REJECT:
2396 		case LSC_SCODE_CMD_PARAM_ERR:
2397 		case LSC_SCODE_NONPORT:
2398 		case LSC_SCODE_LOGGED_IN:
2399 		case LSC_SCODE_NOFLOGI_ACC:
2400 		default:
2401 			mb[0] = MBS_COMMAND_ERROR;
2402 			break;
2403 		}
2404 	} else {
2405 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2406 		    "Done %s.\n", __func__);
2407 
2408 		iop[0] = le32_to_cpu(lg->io_parameter[0]);
2409 
2410 		mb[0] = MBS_COMMAND_COMPLETE;
2411 		mb[1] = 0;
2412 		if (iop[0] & BIT_4) {
2413 			if (iop[0] & BIT_8)
2414 				mb[1] |= BIT_1;
2415 		} else
2416 			mb[1] = BIT_0;
2417 
2418 		/* Passback COS information. */
2419 		mb[10] = 0;
2420 		if (lg->io_parameter[7] || lg->io_parameter[8])
2421 			mb[10] |= BIT_0;	/* Class 2. */
2422 		if (lg->io_parameter[9] || lg->io_parameter[10])
2423 			mb[10] |= BIT_1;	/* Class 3. */
2424 		if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2425 			mb[10] |= BIT_7;	/* Confirmed Completion
2426 						 * Allowed
2427 						 */
2428 	}
2429 
2430 	dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2431 
2432 	return rval;
2433 }
2434 
2435 /*
2436  * qla2x00_login_fabric
2437  *	Issue login fabric port mailbox command.
2438  *
2439  * Input:
2440  *	ha = adapter block pointer.
2441  *	loop_id = device loop ID.
2442  *	domain = device domain.
2443  *	area = device area.
2444  *	al_pa = device AL_PA.
2445  *	status = pointer for return status.
2446  *	opt = command options.
2447  *	TARGET_QUEUE_LOCK must be released.
2448  *	ADAPTER_STATE_LOCK must be released.
2449  *
2450  * Returns:
2451  *	qla2x00 local function return status code.
2452  *
2453  * Context:
2454  *	Kernel context.
2455  */
2456 int
2457 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2458     uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2459 {
2460 	int rval;
2461 	mbx_cmd_t mc;
2462 	mbx_cmd_t *mcp = &mc;
2463 	struct qla_hw_data *ha = vha->hw;
2464 
2465 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2466 	    "Entered %s.\n", __func__);
2467 
2468 	mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2469 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2470 	if (HAS_EXTENDED_IDS(ha)) {
2471 		mcp->mb[1] = loop_id;
2472 		mcp->mb[10] = opt;
2473 		mcp->out_mb |= MBX_10;
2474 	} else {
2475 		mcp->mb[1] = (loop_id << 8) | opt;
2476 	}
2477 	mcp->mb[2] = domain;
2478 	mcp->mb[3] = area << 8 | al_pa;
2479 
2480 	mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2481 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2482 	mcp->flags = 0;
2483 	rval = qla2x00_mailbox_command(vha, mcp);
2484 
2485 	/* Return mailbox statuses. */
2486 	if (mb != NULL) {
2487 		mb[0] = mcp->mb[0];
2488 		mb[1] = mcp->mb[1];
2489 		mb[2] = mcp->mb[2];
2490 		mb[6] = mcp->mb[6];
2491 		mb[7] = mcp->mb[7];
2492 		/* COS retrieved from Get-Port-Database mailbox command. */
2493 		mb[10] = 0;
2494 	}
2495 
2496 	if (rval != QLA_SUCCESS) {
2497 		/* RLU tmp code: need to change main mailbox_command function to
2498 		 * return ok even when the mailbox completion value is not
2499 		 * SUCCESS. The caller needs to be responsible to interpret
2500 		 * the return values of this mailbox command if we're not
2501 		 * to change too much of the existing code.
2502 		 */
2503 		if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2504 		    mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2505 		    mcp->mb[0] == 0x4006)
2506 			rval = QLA_SUCCESS;
2507 
2508 		/*EMPTY*/
2509 		ql_dbg(ql_dbg_mbx, vha, 0x1068,
2510 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2511 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2512 	} else {
2513 		/*EMPTY*/
2514 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2515 		    "Done %s.\n", __func__);
2516 	}
2517 
2518 	return rval;
2519 }
2520 
2521 /*
2522  * qla2x00_login_local_device
2523  *           Issue login loop port mailbox command.
2524  *
2525  * Input:
2526  *           ha = adapter block pointer.
2527  *           loop_id = device loop ID.
2528  *           opt = command options.
2529  *
2530  * Returns:
2531  *            Return status code.
2532  *
2533  * Context:
2534  *            Kernel context.
2535  *
2536  */
2537 int
2538 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2539     uint16_t *mb_ret, uint8_t opt)
2540 {
2541 	int rval;
2542 	mbx_cmd_t mc;
2543 	mbx_cmd_t *mcp = &mc;
2544 	struct qla_hw_data *ha = vha->hw;
2545 
2546 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2547 	    "Entered %s.\n", __func__);
2548 
2549 	if (IS_FWI2_CAPABLE(ha))
2550 		return qla24xx_login_fabric(vha, fcport->loop_id,
2551 		    fcport->d_id.b.domain, fcport->d_id.b.area,
2552 		    fcport->d_id.b.al_pa, mb_ret, opt);
2553 
2554 	mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2555 	if (HAS_EXTENDED_IDS(ha))
2556 		mcp->mb[1] = fcport->loop_id;
2557 	else
2558 		mcp->mb[1] = fcport->loop_id << 8;
2559 	mcp->mb[2] = opt;
2560 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
2561  	mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2562 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2563 	mcp->flags = 0;
2564 	rval = qla2x00_mailbox_command(vha, mcp);
2565 
2566  	/* Return mailbox statuses. */
2567  	if (mb_ret != NULL) {
2568  		mb_ret[0] = mcp->mb[0];
2569  		mb_ret[1] = mcp->mb[1];
2570  		mb_ret[6] = mcp->mb[6];
2571  		mb_ret[7] = mcp->mb[7];
2572  	}
2573 
2574 	if (rval != QLA_SUCCESS) {
2575  		/* AV tmp code: need to change main mailbox_command function to
2576  		 * return ok even when the mailbox completion value is not
2577  		 * SUCCESS. The caller needs to be responsible to interpret
2578  		 * the return values of this mailbox command if we're not
2579  		 * to change too much of the existing code.
2580  		 */
2581  		if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2582  			rval = QLA_SUCCESS;
2583 
2584 		ql_dbg(ql_dbg_mbx, vha, 0x106b,
2585 		    "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2586 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2587 	} else {
2588 		/*EMPTY*/
2589 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2590 		    "Done %s.\n", __func__);
2591 	}
2592 
2593 	return (rval);
2594 }
2595 
2596 int
2597 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2598     uint8_t area, uint8_t al_pa)
2599 {
2600 	int		rval;
2601 	struct logio_entry_24xx *lg;
2602 	dma_addr_t	lg_dma;
2603 	struct qla_hw_data *ha = vha->hw;
2604 	struct req_que *req;
2605 
2606 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2607 	    "Entered %s.\n", __func__);
2608 
2609 	lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2610 	if (lg == NULL) {
2611 		ql_log(ql_log_warn, vha, 0x106e,
2612 		    "Failed to allocate logout IOCB.\n");
2613 		return QLA_MEMORY_ALLOC_FAILED;
2614 	}
2615 
2616 	req = vha->req;
2617 	lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2618 	lg->entry_count = 1;
2619 	lg->handle = MAKE_HANDLE(req->id, lg->handle);
2620 	lg->nport_handle = cpu_to_le16(loop_id);
2621 	lg->control_flags =
2622 	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2623 		LCF_FREE_NPORT);
2624 	lg->port_id[0] = al_pa;
2625 	lg->port_id[1] = area;
2626 	lg->port_id[2] = domain;
2627 	lg->vp_index = vha->vp_idx;
2628 	rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2629 	    (ha->r_a_tov / 10 * 2) + 2);
2630 	if (rval != QLA_SUCCESS) {
2631 		ql_dbg(ql_dbg_mbx, vha, 0x106f,
2632 		    "Failed to issue logout IOCB (%x).\n", rval);
2633 	} else if (lg->entry_status != 0) {
2634 		ql_dbg(ql_dbg_mbx, vha, 0x1070,
2635 		    "Failed to complete IOCB -- error status (%x).\n",
2636 		    lg->entry_status);
2637 		rval = QLA_FUNCTION_FAILED;
2638 	} else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2639 		ql_dbg(ql_dbg_mbx, vha, 0x1071,
2640 		    "Failed to complete IOCB -- completion status (%x) "
2641 		    "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2642 		    le32_to_cpu(lg->io_parameter[0]),
2643 		    le32_to_cpu(lg->io_parameter[1]));
2644 	} else {
2645 		/*EMPTY*/
2646 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2647 		    "Done %s.\n", __func__);
2648 	}
2649 
2650 	dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2651 
2652 	return rval;
2653 }
2654 
2655 /*
2656  * qla2x00_fabric_logout
2657  *	Issue logout fabric port mailbox command.
2658  *
2659  * Input:
2660  *	ha = adapter block pointer.
2661  *	loop_id = device loop ID.
2662  *	TARGET_QUEUE_LOCK must be released.
2663  *	ADAPTER_STATE_LOCK must be released.
2664  *
2665  * Returns:
2666  *	qla2x00 local function return status code.
2667  *
2668  * Context:
2669  *	Kernel context.
2670  */
2671 int
2672 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2673     uint8_t area, uint8_t al_pa)
2674 {
2675 	int rval;
2676 	mbx_cmd_t mc;
2677 	mbx_cmd_t *mcp = &mc;
2678 
2679 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2680 	    "Entered %s.\n", __func__);
2681 
2682 	mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2683 	mcp->out_mb = MBX_1|MBX_0;
2684 	if (HAS_EXTENDED_IDS(vha->hw)) {
2685 		mcp->mb[1] = loop_id;
2686 		mcp->mb[10] = 0;
2687 		mcp->out_mb |= MBX_10;
2688 	} else {
2689 		mcp->mb[1] = loop_id << 8;
2690 	}
2691 
2692 	mcp->in_mb = MBX_1|MBX_0;
2693 	mcp->tov = MBX_TOV_SECONDS;
2694 	mcp->flags = 0;
2695 	rval = qla2x00_mailbox_command(vha, mcp);
2696 
2697 	if (rval != QLA_SUCCESS) {
2698 		/*EMPTY*/
2699 		ql_dbg(ql_dbg_mbx, vha, 0x1074,
2700 		    "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2701 	} else {
2702 		/*EMPTY*/
2703 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2704 		    "Done %s.\n", __func__);
2705 	}
2706 
2707 	return rval;
2708 }
2709 
2710 /*
2711  * qla2x00_full_login_lip
2712  *	Issue full login LIP mailbox command.
2713  *
2714  * Input:
2715  *	ha = adapter block pointer.
2716  *	TARGET_QUEUE_LOCK must be released.
2717  *	ADAPTER_STATE_LOCK must be released.
2718  *
2719  * Returns:
2720  *	qla2x00 local function return status code.
2721  *
2722  * Context:
2723  *	Kernel context.
2724  */
2725 int
2726 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2727 {
2728 	int rval;
2729 	mbx_cmd_t mc;
2730 	mbx_cmd_t *mcp = &mc;
2731 
2732 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2733 	    "Entered %s.\n", __func__);
2734 
2735 	mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2736 	mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
2737 	mcp->mb[2] = 0;
2738 	mcp->mb[3] = 0;
2739 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2740 	mcp->in_mb = MBX_0;
2741 	mcp->tov = MBX_TOV_SECONDS;
2742 	mcp->flags = 0;
2743 	rval = qla2x00_mailbox_command(vha, mcp);
2744 
2745 	if (rval != QLA_SUCCESS) {
2746 		/*EMPTY*/
2747 		ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2748 	} else {
2749 		/*EMPTY*/
2750 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2751 		    "Done %s.\n", __func__);
2752 	}
2753 
2754 	return rval;
2755 }
2756 
2757 /*
2758  * qla2x00_get_id_list
2759  *
2760  * Input:
2761  *	ha = adapter block pointer.
2762  *
2763  * Returns:
2764  *	qla2x00 local function return status code.
2765  *
2766  * Context:
2767  *	Kernel context.
2768  */
2769 int
2770 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2771     uint16_t *entries)
2772 {
2773 	int rval;
2774 	mbx_cmd_t mc;
2775 	mbx_cmd_t *mcp = &mc;
2776 
2777 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2778 	    "Entered %s.\n", __func__);
2779 
2780 	if (id_list == NULL)
2781 		return QLA_FUNCTION_FAILED;
2782 
2783 	mcp->mb[0] = MBC_GET_ID_LIST;
2784 	mcp->out_mb = MBX_0;
2785 	if (IS_FWI2_CAPABLE(vha->hw)) {
2786 		mcp->mb[2] = MSW(id_list_dma);
2787 		mcp->mb[3] = LSW(id_list_dma);
2788 		mcp->mb[6] = MSW(MSD(id_list_dma));
2789 		mcp->mb[7] = LSW(MSD(id_list_dma));
2790 		mcp->mb[8] = 0;
2791 		mcp->mb[9] = vha->vp_idx;
2792 		mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2793 	} else {
2794 		mcp->mb[1] = MSW(id_list_dma);
2795 		mcp->mb[2] = LSW(id_list_dma);
2796 		mcp->mb[3] = MSW(MSD(id_list_dma));
2797 		mcp->mb[6] = LSW(MSD(id_list_dma));
2798 		mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2799 	}
2800 	mcp->in_mb = MBX_1|MBX_0;
2801 	mcp->tov = MBX_TOV_SECONDS;
2802 	mcp->flags = 0;
2803 	rval = qla2x00_mailbox_command(vha, mcp);
2804 
2805 	if (rval != QLA_SUCCESS) {
2806 		/*EMPTY*/
2807 		ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2808 	} else {
2809 		*entries = mcp->mb[1];
2810 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2811 		    "Done %s.\n", __func__);
2812 	}
2813 
2814 	return rval;
2815 }
2816 
2817 /*
2818  * qla2x00_get_resource_cnts
2819  *	Get current firmware resource counts.
2820  *
2821  * Input:
2822  *	ha = adapter block pointer.
2823  *
2824  * Returns:
2825  *	qla2x00 local function return status code.
2826  *
2827  * Context:
2828  *	Kernel context.
2829  */
2830 int
2831 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2832 {
2833 	struct qla_hw_data *ha = vha->hw;
2834 	int rval;
2835 	mbx_cmd_t mc;
2836 	mbx_cmd_t *mcp = &mc;
2837 
2838 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2839 	    "Entered %s.\n", __func__);
2840 
2841 	mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2842 	mcp->out_mb = MBX_0;
2843 	mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2844 	if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
2845 		mcp->in_mb |= MBX_12;
2846 	mcp->tov = MBX_TOV_SECONDS;
2847 	mcp->flags = 0;
2848 	rval = qla2x00_mailbox_command(vha, mcp);
2849 
2850 	if (rval != QLA_SUCCESS) {
2851 		/*EMPTY*/
2852 		ql_dbg(ql_dbg_mbx, vha, 0x107d,
2853 		    "Failed mb[0]=%x.\n", mcp->mb[0]);
2854 	} else {
2855 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2856 		    "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2857 		    "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2858 		    mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2859 		    mcp->mb[11], mcp->mb[12]);
2860 
2861 		ha->orig_fw_tgt_xcb_count =  mcp->mb[1];
2862 		ha->cur_fw_tgt_xcb_count = mcp->mb[2];
2863 		ha->cur_fw_xcb_count = mcp->mb[3];
2864 		ha->orig_fw_xcb_count = mcp->mb[6];
2865 		ha->cur_fw_iocb_count = mcp->mb[7];
2866 		ha->orig_fw_iocb_count = mcp->mb[10];
2867 		if (ha->flags.npiv_supported)
2868 			ha->max_npiv_vports = mcp->mb[11];
2869 		if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2870 			ha->fw_max_fcf_count = mcp->mb[12];
2871 	}
2872 
2873 	return (rval);
2874 }
2875 
2876 /*
2877  * qla2x00_get_fcal_position_map
2878  *	Get FCAL (LILP) position map using mailbox command
2879  *
2880  * Input:
2881  *	ha = adapter state pointer.
2882  *	pos_map = buffer pointer (can be NULL).
2883  *
2884  * Returns:
2885  *	qla2x00 local function return status code.
2886  *
2887  * Context:
2888  *	Kernel context.
2889  */
2890 int
2891 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2892 {
2893 	int rval;
2894 	mbx_cmd_t mc;
2895 	mbx_cmd_t *mcp = &mc;
2896 	char *pmap;
2897 	dma_addr_t pmap_dma;
2898 	struct qla_hw_data *ha = vha->hw;
2899 
2900 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2901 	    "Entered %s.\n", __func__);
2902 
2903 	pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2904 	if (pmap  == NULL) {
2905 		ql_log(ql_log_warn, vha, 0x1080,
2906 		    "Memory alloc failed.\n");
2907 		return QLA_MEMORY_ALLOC_FAILED;
2908 	}
2909 
2910 	mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2911 	mcp->mb[2] = MSW(pmap_dma);
2912 	mcp->mb[3] = LSW(pmap_dma);
2913 	mcp->mb[6] = MSW(MSD(pmap_dma));
2914 	mcp->mb[7] = LSW(MSD(pmap_dma));
2915 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2916 	mcp->in_mb = MBX_1|MBX_0;
2917 	mcp->buf_size = FCAL_MAP_SIZE;
2918 	mcp->flags = MBX_DMA_IN;
2919 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2920 	rval = qla2x00_mailbox_command(vha, mcp);
2921 
2922 	if (rval == QLA_SUCCESS) {
2923 		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2924 		    "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2925 		    mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2926 		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2927 		    pmap, pmap[0] + 1);
2928 
2929 		if (pos_map)
2930 			memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2931 	}
2932 	dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2933 
2934 	if (rval != QLA_SUCCESS) {
2935 		ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2936 	} else {
2937 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2938 		    "Done %s.\n", __func__);
2939 	}
2940 
2941 	return rval;
2942 }
2943 
2944 /*
2945  * qla2x00_get_link_status
2946  *
2947  * Input:
2948  *	ha = adapter block pointer.
2949  *	loop_id = device loop ID.
2950  *	ret_buf = pointer to link status return buffer.
2951  *
2952  * Returns:
2953  *	0 = success.
2954  *	BIT_0 = mem alloc error.
2955  *	BIT_1 = mailbox error.
2956  */
2957 int
2958 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2959     struct link_statistics *stats, dma_addr_t stats_dma)
2960 {
2961 	int rval;
2962 	mbx_cmd_t mc;
2963 	mbx_cmd_t *mcp = &mc;
2964 	uint32_t *iter = (void *)stats;
2965 	ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
2966 	struct qla_hw_data *ha = vha->hw;
2967 
2968 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
2969 	    "Entered %s.\n", __func__);
2970 
2971 	mcp->mb[0] = MBC_GET_LINK_STATUS;
2972 	mcp->mb[2] = MSW(LSD(stats_dma));
2973 	mcp->mb[3] = LSW(LSD(stats_dma));
2974 	mcp->mb[6] = MSW(MSD(stats_dma));
2975 	mcp->mb[7] = LSW(MSD(stats_dma));
2976 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2977 	mcp->in_mb = MBX_0;
2978 	if (IS_FWI2_CAPABLE(ha)) {
2979 		mcp->mb[1] = loop_id;
2980 		mcp->mb[4] = 0;
2981 		mcp->mb[10] = 0;
2982 		mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2983 		mcp->in_mb |= MBX_1;
2984 	} else if (HAS_EXTENDED_IDS(ha)) {
2985 		mcp->mb[1] = loop_id;
2986 		mcp->mb[10] = 0;
2987 		mcp->out_mb |= MBX_10|MBX_1;
2988 	} else {
2989 		mcp->mb[1] = loop_id << 8;
2990 		mcp->out_mb |= MBX_1;
2991 	}
2992 	mcp->tov = MBX_TOV_SECONDS;
2993 	mcp->flags = IOCTL_CMD;
2994 	rval = qla2x00_mailbox_command(vha, mcp);
2995 
2996 	if (rval == QLA_SUCCESS) {
2997 		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2998 			ql_dbg(ql_dbg_mbx, vha, 0x1085,
2999 			    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3000 			rval = QLA_FUNCTION_FAILED;
3001 		} else {
3002 			/* Re-endianize - firmware data is le32. */
3003 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3004 			    "Done %s.\n", __func__);
3005 			for ( ; dwords--; iter++)
3006 				le32_to_cpus(iter);
3007 		}
3008 	} else {
3009 		/* Failed. */
3010 		ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3011 	}
3012 
3013 	return rval;
3014 }
3015 
3016 int
3017 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3018     dma_addr_t stats_dma, uint16_t options)
3019 {
3020 	int rval;
3021 	mbx_cmd_t mc;
3022 	mbx_cmd_t *mcp = &mc;
3023 	uint32_t *iter, dwords;
3024 
3025 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3026 	    "Entered %s.\n", __func__);
3027 
3028 	memset(&mc, 0, sizeof(mc));
3029 	mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3030 	mc.mb[2] = MSW(stats_dma);
3031 	mc.mb[3] = LSW(stats_dma);
3032 	mc.mb[6] = MSW(MSD(stats_dma));
3033 	mc.mb[7] = LSW(MSD(stats_dma));
3034 	mc.mb[8] = sizeof(struct link_statistics) / 4;
3035 	mc.mb[9] = cpu_to_le16(vha->vp_idx);
3036 	mc.mb[10] = cpu_to_le16(options);
3037 
3038 	rval = qla24xx_send_mb_cmd(vha, &mc);
3039 
3040 	if (rval == QLA_SUCCESS) {
3041 		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3042 			ql_dbg(ql_dbg_mbx, vha, 0x1089,
3043 			    "Failed mb[0]=%x.\n", mcp->mb[0]);
3044 			rval = QLA_FUNCTION_FAILED;
3045 		} else {
3046 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3047 			    "Done %s.\n", __func__);
3048 			/* Re-endianize - firmware data is le32. */
3049 			dwords = sizeof(struct link_statistics) / 4;
3050 			iter = &stats->link_fail_cnt;
3051 			for ( ; dwords--; iter++)
3052 				le32_to_cpus(iter);
3053 		}
3054 	} else {
3055 		/* Failed. */
3056 		ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3057 	}
3058 
3059 	return rval;
3060 }
3061 
3062 int
3063 qla24xx_abort_command(srb_t *sp)
3064 {
3065 	int		rval;
3066 	unsigned long   flags = 0;
3067 
3068 	struct abort_entry_24xx *abt;
3069 	dma_addr_t	abt_dma;
3070 	uint32_t	handle;
3071 	fc_port_t	*fcport = sp->fcport;
3072 	struct scsi_qla_host *vha = fcport->vha;
3073 	struct qla_hw_data *ha = vha->hw;
3074 	struct req_que *req = vha->req;
3075 
3076 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3077 	    "Entered %s.\n", __func__);
3078 
3079 	if (vha->flags.qpairs_available && sp->qpair)
3080 		req = sp->qpair->req;
3081 
3082 	if (ql2xasynctmfenable)
3083 		return qla24xx_async_abort_command(sp);
3084 
3085 	spin_lock_irqsave(&ha->hardware_lock, flags);
3086 	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3087 		if (req->outstanding_cmds[handle] == sp)
3088 			break;
3089 	}
3090 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3091 	if (handle == req->num_outstanding_cmds) {
3092 		/* Command not found. */
3093 		return QLA_FUNCTION_FAILED;
3094 	}
3095 
3096 	abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3097 	if (abt == NULL) {
3098 		ql_log(ql_log_warn, vha, 0x108d,
3099 		    "Failed to allocate abort IOCB.\n");
3100 		return QLA_MEMORY_ALLOC_FAILED;
3101 	}
3102 
3103 	abt->entry_type = ABORT_IOCB_TYPE;
3104 	abt->entry_count = 1;
3105 	abt->handle = MAKE_HANDLE(req->id, abt->handle);
3106 	abt->nport_handle = cpu_to_le16(fcport->loop_id);
3107 	abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
3108 	abt->port_id[0] = fcport->d_id.b.al_pa;
3109 	abt->port_id[1] = fcport->d_id.b.area;
3110 	abt->port_id[2] = fcport->d_id.b.domain;
3111 	abt->vp_index = fcport->vha->vp_idx;
3112 
3113 	abt->req_que_no = cpu_to_le16(req->id);
3114 
3115 	rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3116 	if (rval != QLA_SUCCESS) {
3117 		ql_dbg(ql_dbg_mbx, vha, 0x108e,
3118 		    "Failed to issue IOCB (%x).\n", rval);
3119 	} else if (abt->entry_status != 0) {
3120 		ql_dbg(ql_dbg_mbx, vha, 0x108f,
3121 		    "Failed to complete IOCB -- error status (%x).\n",
3122 		    abt->entry_status);
3123 		rval = QLA_FUNCTION_FAILED;
3124 	} else if (abt->nport_handle != cpu_to_le16(0)) {
3125 		ql_dbg(ql_dbg_mbx, vha, 0x1090,
3126 		    "Failed to complete IOCB -- completion status (%x).\n",
3127 		    le16_to_cpu(abt->nport_handle));
3128 		if (abt->nport_handle == CS_IOCB_ERROR)
3129 			rval = QLA_FUNCTION_PARAMETER_ERROR;
3130 		else
3131 			rval = QLA_FUNCTION_FAILED;
3132 	} else {
3133 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3134 		    "Done %s.\n", __func__);
3135 	}
3136 
3137 	dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3138 
3139 	return rval;
3140 }
3141 
3142 struct tsk_mgmt_cmd {
3143 	union {
3144 		struct tsk_mgmt_entry tsk;
3145 		struct sts_entry_24xx sts;
3146 	} p;
3147 };
3148 
3149 static int
3150 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3151     uint64_t l, int tag)
3152 {
3153 	int		rval, rval2;
3154 	struct tsk_mgmt_cmd *tsk;
3155 	struct sts_entry_24xx *sts;
3156 	dma_addr_t	tsk_dma;
3157 	scsi_qla_host_t *vha;
3158 	struct qla_hw_data *ha;
3159 	struct req_que *req;
3160 	struct rsp_que *rsp;
3161 	struct qla_qpair *qpair;
3162 
3163 	vha = fcport->vha;
3164 	ha = vha->hw;
3165 	req = vha->req;
3166 
3167 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3168 	    "Entered %s.\n", __func__);
3169 
3170 	if (vha->vp_idx && vha->qpair) {
3171 		/* NPIV port */
3172 		qpair = vha->qpair;
3173 		rsp = qpair->rsp;
3174 		req = qpair->req;
3175 	} else {
3176 		rsp = req->rsp;
3177 	}
3178 
3179 	tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3180 	if (tsk == NULL) {
3181 		ql_log(ql_log_warn, vha, 0x1093,
3182 		    "Failed to allocate task management IOCB.\n");
3183 		return QLA_MEMORY_ALLOC_FAILED;
3184 	}
3185 
3186 	tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3187 	tsk->p.tsk.entry_count = 1;
3188 	tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
3189 	tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3190 	tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3191 	tsk->p.tsk.control_flags = cpu_to_le32(type);
3192 	tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3193 	tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3194 	tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3195 	tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3196 	if (type == TCF_LUN_RESET) {
3197 		int_to_scsilun(l, &tsk->p.tsk.lun);
3198 		host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3199 		    sizeof(tsk->p.tsk.lun));
3200 	}
3201 
3202 	sts = &tsk->p.sts;
3203 	rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3204 	if (rval != QLA_SUCCESS) {
3205 		ql_dbg(ql_dbg_mbx, vha, 0x1094,
3206 		    "Failed to issue %s reset IOCB (%x).\n", name, rval);
3207 	} else if (sts->entry_status != 0) {
3208 		ql_dbg(ql_dbg_mbx, vha, 0x1095,
3209 		    "Failed to complete IOCB -- error status (%x).\n",
3210 		    sts->entry_status);
3211 		rval = QLA_FUNCTION_FAILED;
3212 	} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3213 		ql_dbg(ql_dbg_mbx, vha, 0x1096,
3214 		    "Failed to complete IOCB -- completion status (%x).\n",
3215 		    le16_to_cpu(sts->comp_status));
3216 		rval = QLA_FUNCTION_FAILED;
3217 	} else if (le16_to_cpu(sts->scsi_status) &
3218 	    SS_RESPONSE_INFO_LEN_VALID) {
3219 		if (le32_to_cpu(sts->rsp_data_len) < 4) {
3220 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3221 			    "Ignoring inconsistent data length -- not enough "
3222 			    "response info (%d).\n",
3223 			    le32_to_cpu(sts->rsp_data_len));
3224 		} else if (sts->data[3]) {
3225 			ql_dbg(ql_dbg_mbx, vha, 0x1098,
3226 			    "Failed to complete IOCB -- response (%x).\n",
3227 			    sts->data[3]);
3228 			rval = QLA_FUNCTION_FAILED;
3229 		}
3230 	}
3231 
3232 	/* Issue marker IOCB. */
3233 	rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
3234 	    type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
3235 	if (rval2 != QLA_SUCCESS) {
3236 		ql_dbg(ql_dbg_mbx, vha, 0x1099,
3237 		    "Failed to issue marker IOCB (%x).\n", rval2);
3238 	} else {
3239 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3240 		    "Done %s.\n", __func__);
3241 	}
3242 
3243 	dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3244 
3245 	return rval;
3246 }
3247 
3248 int
3249 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3250 {
3251 	struct qla_hw_data *ha = fcport->vha->hw;
3252 
3253 	if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3254 		return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3255 
3256 	return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3257 }
3258 
3259 int
3260 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3261 {
3262 	struct qla_hw_data *ha = fcport->vha->hw;
3263 
3264 	if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3265 		return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3266 
3267 	return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3268 }
3269 
3270 int
3271 qla2x00_system_error(scsi_qla_host_t *vha)
3272 {
3273 	int rval;
3274 	mbx_cmd_t mc;
3275 	mbx_cmd_t *mcp = &mc;
3276 	struct qla_hw_data *ha = vha->hw;
3277 
3278 	if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3279 		return QLA_FUNCTION_FAILED;
3280 
3281 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3282 	    "Entered %s.\n", __func__);
3283 
3284 	mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3285 	mcp->out_mb = MBX_0;
3286 	mcp->in_mb = MBX_0;
3287 	mcp->tov = 5;
3288 	mcp->flags = 0;
3289 	rval = qla2x00_mailbox_command(vha, mcp);
3290 
3291 	if (rval != QLA_SUCCESS) {
3292 		ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3293 	} else {
3294 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3295 		    "Done %s.\n", __func__);
3296 	}
3297 
3298 	return rval;
3299 }
3300 
3301 int
3302 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3303 {
3304 	int rval;
3305 	mbx_cmd_t mc;
3306 	mbx_cmd_t *mcp = &mc;
3307 
3308 	if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3309 	    !IS_QLA27XX(vha->hw))
3310 		return QLA_FUNCTION_FAILED;
3311 
3312 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3313 	    "Entered %s.\n", __func__);
3314 
3315 	mcp->mb[0] = MBC_WRITE_SERDES;
3316 	mcp->mb[1] = addr;
3317 	if (IS_QLA2031(vha->hw))
3318 		mcp->mb[2] = data & 0xff;
3319 	else
3320 		mcp->mb[2] = data;
3321 
3322 	mcp->mb[3] = 0;
3323 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3324 	mcp->in_mb = MBX_0;
3325 	mcp->tov = MBX_TOV_SECONDS;
3326 	mcp->flags = 0;
3327 	rval = qla2x00_mailbox_command(vha, mcp);
3328 
3329 	if (rval != QLA_SUCCESS) {
3330 		ql_dbg(ql_dbg_mbx, vha, 0x1183,
3331 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3332 	} else {
3333 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3334 		    "Done %s.\n", __func__);
3335 	}
3336 
3337 	return rval;
3338 }
3339 
3340 int
3341 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3342 {
3343 	int rval;
3344 	mbx_cmd_t mc;
3345 	mbx_cmd_t *mcp = &mc;
3346 
3347 	if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3348 	    !IS_QLA27XX(vha->hw))
3349 		return QLA_FUNCTION_FAILED;
3350 
3351 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3352 	    "Entered %s.\n", __func__);
3353 
3354 	mcp->mb[0] = MBC_READ_SERDES;
3355 	mcp->mb[1] = addr;
3356 	mcp->mb[3] = 0;
3357 	mcp->out_mb = MBX_3|MBX_1|MBX_0;
3358 	mcp->in_mb = MBX_1|MBX_0;
3359 	mcp->tov = MBX_TOV_SECONDS;
3360 	mcp->flags = 0;
3361 	rval = qla2x00_mailbox_command(vha, mcp);
3362 
3363 	if (IS_QLA2031(vha->hw))
3364 		*data = mcp->mb[1] & 0xff;
3365 	else
3366 		*data = mcp->mb[1];
3367 
3368 	if (rval != QLA_SUCCESS) {
3369 		ql_dbg(ql_dbg_mbx, vha, 0x1186,
3370 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3371 	} else {
3372 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3373 		    "Done %s.\n", __func__);
3374 	}
3375 
3376 	return rval;
3377 }
3378 
3379 int
3380 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3381 {
3382 	int rval;
3383 	mbx_cmd_t mc;
3384 	mbx_cmd_t *mcp = &mc;
3385 
3386 	if (!IS_QLA8044(vha->hw))
3387 		return QLA_FUNCTION_FAILED;
3388 
3389 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3390 	    "Entered %s.\n", __func__);
3391 
3392 	mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3393 	mcp->mb[1] = HCS_WRITE_SERDES;
3394 	mcp->mb[3] = LSW(addr);
3395 	mcp->mb[4] = MSW(addr);
3396 	mcp->mb[5] = LSW(data);
3397 	mcp->mb[6] = MSW(data);
3398 	mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3399 	mcp->in_mb = MBX_0;
3400 	mcp->tov = MBX_TOV_SECONDS;
3401 	mcp->flags = 0;
3402 	rval = qla2x00_mailbox_command(vha, mcp);
3403 
3404 	if (rval != QLA_SUCCESS) {
3405 		ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3406 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3407 	} else {
3408 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3409 		    "Done %s.\n", __func__);
3410 	}
3411 
3412 	return rval;
3413 }
3414 
3415 int
3416 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3417 {
3418 	int rval;
3419 	mbx_cmd_t mc;
3420 	mbx_cmd_t *mcp = &mc;
3421 
3422 	if (!IS_QLA8044(vha->hw))
3423 		return QLA_FUNCTION_FAILED;
3424 
3425 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3426 	    "Entered %s.\n", __func__);
3427 
3428 	mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3429 	mcp->mb[1] = HCS_READ_SERDES;
3430 	mcp->mb[3] = LSW(addr);
3431 	mcp->mb[4] = MSW(addr);
3432 	mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3433 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
3434 	mcp->tov = MBX_TOV_SECONDS;
3435 	mcp->flags = 0;
3436 	rval = qla2x00_mailbox_command(vha, mcp);
3437 
3438 	*data = mcp->mb[2] << 16 | mcp->mb[1];
3439 
3440 	if (rval != QLA_SUCCESS) {
3441 		ql_dbg(ql_dbg_mbx, vha, 0x118a,
3442 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3443 	} else {
3444 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3445 		    "Done %s.\n", __func__);
3446 	}
3447 
3448 	return rval;
3449 }
3450 
3451 /**
3452  * qla2x00_set_serdes_params() -
3453  * @vha: HA context
3454  * @sw_em_1g:
3455  * @sw_em_2g:
3456  * @sw_em_4g:
3457  *
3458  * Returns
3459  */
3460 int
3461 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3462     uint16_t sw_em_2g, uint16_t sw_em_4g)
3463 {
3464 	int rval;
3465 	mbx_cmd_t mc;
3466 	mbx_cmd_t *mcp = &mc;
3467 
3468 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3469 	    "Entered %s.\n", __func__);
3470 
3471 	mcp->mb[0] = MBC_SERDES_PARAMS;
3472 	mcp->mb[1] = BIT_0;
3473 	mcp->mb[2] = sw_em_1g | BIT_15;
3474 	mcp->mb[3] = sw_em_2g | BIT_15;
3475 	mcp->mb[4] = sw_em_4g | BIT_15;
3476 	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3477 	mcp->in_mb = MBX_0;
3478 	mcp->tov = MBX_TOV_SECONDS;
3479 	mcp->flags = 0;
3480 	rval = qla2x00_mailbox_command(vha, mcp);
3481 
3482 	if (rval != QLA_SUCCESS) {
3483 		/*EMPTY*/
3484 		ql_dbg(ql_dbg_mbx, vha, 0x109f,
3485 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3486 	} else {
3487 		/*EMPTY*/
3488 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3489 		    "Done %s.\n", __func__);
3490 	}
3491 
3492 	return rval;
3493 }
3494 
3495 int
3496 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3497 {
3498 	int rval;
3499 	mbx_cmd_t mc;
3500 	mbx_cmd_t *mcp = &mc;
3501 
3502 	if (!IS_FWI2_CAPABLE(vha->hw))
3503 		return QLA_FUNCTION_FAILED;
3504 
3505 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3506 	    "Entered %s.\n", __func__);
3507 
3508 	mcp->mb[0] = MBC_STOP_FIRMWARE;
3509 	mcp->mb[1] = 0;
3510 	mcp->out_mb = MBX_1|MBX_0;
3511 	mcp->in_mb = MBX_0;
3512 	mcp->tov = 5;
3513 	mcp->flags = 0;
3514 	rval = qla2x00_mailbox_command(vha, mcp);
3515 
3516 	if (rval != QLA_SUCCESS) {
3517 		ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3518 		if (mcp->mb[0] == MBS_INVALID_COMMAND)
3519 			rval = QLA_INVALID_COMMAND;
3520 	} else {
3521 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3522 		    "Done %s.\n", __func__);
3523 	}
3524 
3525 	return rval;
3526 }
3527 
3528 int
3529 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3530     uint16_t buffers)
3531 {
3532 	int rval;
3533 	mbx_cmd_t mc;
3534 	mbx_cmd_t *mcp = &mc;
3535 
3536 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3537 	    "Entered %s.\n", __func__);
3538 
3539 	if (!IS_FWI2_CAPABLE(vha->hw))
3540 		return QLA_FUNCTION_FAILED;
3541 
3542 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
3543 		return QLA_FUNCTION_FAILED;
3544 
3545 	mcp->mb[0] = MBC_TRACE_CONTROL;
3546 	mcp->mb[1] = TC_EFT_ENABLE;
3547 	mcp->mb[2] = LSW(eft_dma);
3548 	mcp->mb[3] = MSW(eft_dma);
3549 	mcp->mb[4] = LSW(MSD(eft_dma));
3550 	mcp->mb[5] = MSW(MSD(eft_dma));
3551 	mcp->mb[6] = buffers;
3552 	mcp->mb[7] = TC_AEN_DISABLE;
3553 	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3554 	mcp->in_mb = MBX_1|MBX_0;
3555 	mcp->tov = MBX_TOV_SECONDS;
3556 	mcp->flags = 0;
3557 	rval = qla2x00_mailbox_command(vha, mcp);
3558 	if (rval != QLA_SUCCESS) {
3559 		ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3560 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
3561 		    rval, mcp->mb[0], mcp->mb[1]);
3562 	} else {
3563 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3564 		    "Done %s.\n", __func__);
3565 	}
3566 
3567 	return rval;
3568 }
3569 
3570 int
3571 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3572 {
3573 	int rval;
3574 	mbx_cmd_t mc;
3575 	mbx_cmd_t *mcp = &mc;
3576 
3577 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3578 	    "Entered %s.\n", __func__);
3579 
3580 	if (!IS_FWI2_CAPABLE(vha->hw))
3581 		return QLA_FUNCTION_FAILED;
3582 
3583 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
3584 		return QLA_FUNCTION_FAILED;
3585 
3586 	mcp->mb[0] = MBC_TRACE_CONTROL;
3587 	mcp->mb[1] = TC_EFT_DISABLE;
3588 	mcp->out_mb = MBX_1|MBX_0;
3589 	mcp->in_mb = MBX_1|MBX_0;
3590 	mcp->tov = MBX_TOV_SECONDS;
3591 	mcp->flags = 0;
3592 	rval = qla2x00_mailbox_command(vha, mcp);
3593 	if (rval != QLA_SUCCESS) {
3594 		ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3595 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
3596 		    rval, mcp->mb[0], mcp->mb[1]);
3597 	} else {
3598 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3599 		    "Done %s.\n", __func__);
3600 	}
3601 
3602 	return rval;
3603 }
3604 
3605 int
3606 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3607     uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3608 {
3609 	int rval;
3610 	mbx_cmd_t mc;
3611 	mbx_cmd_t *mcp = &mc;
3612 
3613 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3614 	    "Entered %s.\n", __func__);
3615 
3616 	if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3617 	    !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
3618 		return QLA_FUNCTION_FAILED;
3619 
3620 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
3621 		return QLA_FUNCTION_FAILED;
3622 
3623 	mcp->mb[0] = MBC_TRACE_CONTROL;
3624 	mcp->mb[1] = TC_FCE_ENABLE;
3625 	mcp->mb[2] = LSW(fce_dma);
3626 	mcp->mb[3] = MSW(fce_dma);
3627 	mcp->mb[4] = LSW(MSD(fce_dma));
3628 	mcp->mb[5] = MSW(MSD(fce_dma));
3629 	mcp->mb[6] = buffers;
3630 	mcp->mb[7] = TC_AEN_DISABLE;
3631 	mcp->mb[8] = 0;
3632 	mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3633 	mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3634 	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3635 	    MBX_1|MBX_0;
3636 	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3637 	mcp->tov = MBX_TOV_SECONDS;
3638 	mcp->flags = 0;
3639 	rval = qla2x00_mailbox_command(vha, mcp);
3640 	if (rval != QLA_SUCCESS) {
3641 		ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3642 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
3643 		    rval, mcp->mb[0], mcp->mb[1]);
3644 	} else {
3645 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3646 		    "Done %s.\n", __func__);
3647 
3648 		if (mb)
3649 			memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3650 		if (dwords)
3651 			*dwords = buffers;
3652 	}
3653 
3654 	return rval;
3655 }
3656 
3657 int
3658 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3659 {
3660 	int rval;
3661 	mbx_cmd_t mc;
3662 	mbx_cmd_t *mcp = &mc;
3663 
3664 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3665 	    "Entered %s.\n", __func__);
3666 
3667 	if (!IS_FWI2_CAPABLE(vha->hw))
3668 		return QLA_FUNCTION_FAILED;
3669 
3670 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
3671 		return QLA_FUNCTION_FAILED;
3672 
3673 	mcp->mb[0] = MBC_TRACE_CONTROL;
3674 	mcp->mb[1] = TC_FCE_DISABLE;
3675 	mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3676 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
3677 	mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3678 	    MBX_1|MBX_0;
3679 	mcp->tov = MBX_TOV_SECONDS;
3680 	mcp->flags = 0;
3681 	rval = qla2x00_mailbox_command(vha, mcp);
3682 	if (rval != QLA_SUCCESS) {
3683 		ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3684 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
3685 		    rval, mcp->mb[0], mcp->mb[1]);
3686 	} else {
3687 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3688 		    "Done %s.\n", __func__);
3689 
3690 		if (wr)
3691 			*wr = (uint64_t) mcp->mb[5] << 48 |
3692 			    (uint64_t) mcp->mb[4] << 32 |
3693 			    (uint64_t) mcp->mb[3] << 16 |
3694 			    (uint64_t) mcp->mb[2];
3695 		if (rd)
3696 			*rd = (uint64_t) mcp->mb[9] << 48 |
3697 			    (uint64_t) mcp->mb[8] << 32 |
3698 			    (uint64_t) mcp->mb[7] << 16 |
3699 			    (uint64_t) mcp->mb[6];
3700 	}
3701 
3702 	return rval;
3703 }
3704 
3705 int
3706 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3707 	uint16_t *port_speed, uint16_t *mb)
3708 {
3709 	int rval;
3710 	mbx_cmd_t mc;
3711 	mbx_cmd_t *mcp = &mc;
3712 
3713 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3714 	    "Entered %s.\n", __func__);
3715 
3716 	if (!IS_IIDMA_CAPABLE(vha->hw))
3717 		return QLA_FUNCTION_FAILED;
3718 
3719 	mcp->mb[0] = MBC_PORT_PARAMS;
3720 	mcp->mb[1] = loop_id;
3721 	mcp->mb[2] = mcp->mb[3] = 0;
3722 	mcp->mb[9] = vha->vp_idx;
3723 	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3724 	mcp->in_mb = MBX_3|MBX_1|MBX_0;
3725 	mcp->tov = MBX_TOV_SECONDS;
3726 	mcp->flags = 0;
3727 	rval = qla2x00_mailbox_command(vha, mcp);
3728 
3729 	/* Return mailbox statuses. */
3730 	if (mb != NULL) {
3731 		mb[0] = mcp->mb[0];
3732 		mb[1] = mcp->mb[1];
3733 		mb[3] = mcp->mb[3];
3734 	}
3735 
3736 	if (rval != QLA_SUCCESS) {
3737 		ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3738 	} else {
3739 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3740 		    "Done %s.\n", __func__);
3741 		if (port_speed)
3742 			*port_speed = mcp->mb[3];
3743 	}
3744 
3745 	return rval;
3746 }
3747 
3748 int
3749 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3750     uint16_t port_speed, uint16_t *mb)
3751 {
3752 	int rval;
3753 	mbx_cmd_t mc;
3754 	mbx_cmd_t *mcp = &mc;
3755 
3756 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3757 	    "Entered %s.\n", __func__);
3758 
3759 	if (!IS_IIDMA_CAPABLE(vha->hw))
3760 		return QLA_FUNCTION_FAILED;
3761 
3762 	mcp->mb[0] = MBC_PORT_PARAMS;
3763 	mcp->mb[1] = loop_id;
3764 	mcp->mb[2] = BIT_0;
3765 	if (IS_CNA_CAPABLE(vha->hw))
3766 		mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
3767 	else
3768 		mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
3769 	mcp->mb[9] = vha->vp_idx;
3770 	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3771 	mcp->in_mb = MBX_3|MBX_1|MBX_0;
3772 	mcp->tov = MBX_TOV_SECONDS;
3773 	mcp->flags = 0;
3774 	rval = qla2x00_mailbox_command(vha, mcp);
3775 
3776 	/* Return mailbox statuses. */
3777 	if (mb != NULL) {
3778 		mb[0] = mcp->mb[0];
3779 		mb[1] = mcp->mb[1];
3780 		mb[3] = mcp->mb[3];
3781 	}
3782 
3783 	if (rval != QLA_SUCCESS) {
3784 		ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3785 		    "Failed=%x.\n", rval);
3786 	} else {
3787 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3788 		    "Done %s.\n", __func__);
3789 	}
3790 
3791 	return rval;
3792 }
3793 
3794 void
3795 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3796 	struct vp_rpt_id_entry_24xx *rptid_entry)
3797 {
3798 	struct qla_hw_data *ha = vha->hw;
3799 	scsi_qla_host_t *vp = NULL;
3800 	unsigned long   flags;
3801 	int found;
3802 	port_id_t id;
3803 	struct fc_port *fcport;
3804 
3805 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3806 	    "Entered %s.\n", __func__);
3807 
3808 	if (rptid_entry->entry_status != 0)
3809 		return;
3810 
3811 	id.b.domain = rptid_entry->port_id[2];
3812 	id.b.area   = rptid_entry->port_id[1];
3813 	id.b.al_pa  = rptid_entry->port_id[0];
3814 	id.b.rsvd_1 = 0;
3815 	ha->flags.n2n_ae = 0;
3816 
3817 	if (rptid_entry->format == 0) {
3818 		/* loop */
3819 		ql_dbg(ql_dbg_async, vha, 0x10b7,
3820 		    "Format 0 : Number of VPs setup %d, number of "
3821 		    "VPs acquired %d.\n", rptid_entry->vp_setup,
3822 		    rptid_entry->vp_acquired);
3823 		ql_dbg(ql_dbg_async, vha, 0x10b8,
3824 		    "Primary port id %02x%02x%02x.\n",
3825 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
3826 		    rptid_entry->port_id[0]);
3827 		ha->current_topology = ISP_CFG_NL;
3828 		qlt_update_host_map(vha, id);
3829 
3830 	} else if (rptid_entry->format == 1) {
3831 		/* fabric */
3832 		ql_dbg(ql_dbg_async, vha, 0x10b9,
3833 		    "Format 1: VP[%d] enabled - status %d - with "
3834 		    "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3835 			rptid_entry->vp_status,
3836 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
3837 		    rptid_entry->port_id[0]);
3838 		ql_dbg(ql_dbg_async, vha, 0x5075,
3839 		   "Format 1: Remote WWPN %8phC.\n",
3840 		   rptid_entry->u.f1.port_name);
3841 
3842 		ql_dbg(ql_dbg_async, vha, 0x5075,
3843 		   "Format 1: WWPN %8phC.\n",
3844 		   vha->port_name);
3845 
3846 		switch (rptid_entry->u.f1.flags & TOPO_MASK) {
3847 		case TOPO_N2N:
3848 			ha->current_topology = ISP_CFG_N;
3849 			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3850 			fcport = qla2x00_find_fcport_by_wwpn(vha,
3851 			    rptid_entry->u.f1.port_name, 1);
3852 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3853 
3854 			if (fcport) {
3855 				fcport->plogi_nack_done_deadline = jiffies + HZ;
3856 				fcport->dm_login_expire = jiffies + 3*HZ;
3857 				fcport->scan_state = QLA_FCPORT_FOUND;
3858 				switch (fcport->disc_state) {
3859 				case DSC_DELETED:
3860 					set_bit(RELOGIN_NEEDED,
3861 					    &vha->dpc_flags);
3862 					break;
3863 				case DSC_DELETE_PEND:
3864 					break;
3865 				default:
3866 					qlt_schedule_sess_for_deletion(fcport);
3867 					break;
3868 				}
3869 			} else {
3870 				id.b24 = 0;
3871 				if (wwn_to_u64(vha->port_name) >
3872 				    wwn_to_u64(rptid_entry->u.f1.port_name)) {
3873 					vha->d_id.b24 = 0;
3874 					vha->d_id.b.al_pa = 1;
3875 					ha->flags.n2n_bigger = 1;
3876 
3877 					id.b.al_pa = 2;
3878 					ql_dbg(ql_dbg_async, vha, 0x5075,
3879 					    "Format 1: assign local id %x remote id %x\n",
3880 					    vha->d_id.b24, id.b24);
3881 				} else {
3882 					ql_dbg(ql_dbg_async, vha, 0x5075,
3883 					    "Format 1: Remote login - Waiting for WWPN %8phC.\n",
3884 					    rptid_entry->u.f1.port_name);
3885 					ha->flags.n2n_bigger = 0;
3886 				}
3887 				qla24xx_post_newsess_work(vha, &id,
3888 				    rptid_entry->u.f1.port_name,
3889 				    rptid_entry->u.f1.node_name,
3890 				    NULL,
3891 				    FC4_TYPE_UNKNOWN);
3892 			}
3893 
3894 			/* if our portname is higher then initiate N2N login */
3895 
3896 			set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
3897 			ha->flags.n2n_ae = 1;
3898 			return;
3899 			break;
3900 		case TOPO_FL:
3901 			ha->current_topology = ISP_CFG_FL;
3902 			break;
3903 		case TOPO_F:
3904 			ha->current_topology = ISP_CFG_F;
3905 			break;
3906 		default:
3907 			break;
3908 		}
3909 
3910 		ha->flags.gpsc_supported = 1;
3911 		ha->current_topology = ISP_CFG_F;
3912 		/* buffer to buffer credit flag */
3913 		vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
3914 
3915 		if (rptid_entry->vp_idx == 0) {
3916 			if (rptid_entry->vp_status == VP_STAT_COMPL) {
3917 				/* FA-WWN is only for physical port */
3918 				if (qla_ini_mode_enabled(vha) &&
3919 				    ha->flags.fawwpn_enabled &&
3920 				    (rptid_entry->u.f1.flags &
3921 				     BIT_6)) {
3922 					memcpy(vha->port_name,
3923 					    rptid_entry->u.f1.port_name,
3924 					    WWN_SIZE);
3925 				}
3926 
3927 				qlt_update_host_map(vha, id);
3928 			}
3929 
3930 			set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3931 			set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3932 		} else {
3933 			if (rptid_entry->vp_status != VP_STAT_COMPL &&
3934 				rptid_entry->vp_status != VP_STAT_ID_CHG) {
3935 				ql_dbg(ql_dbg_mbx, vha, 0x10ba,
3936 				    "Could not acquire ID for VP[%d].\n",
3937 				    rptid_entry->vp_idx);
3938 				return;
3939 			}
3940 
3941 			found = 0;
3942 			spin_lock_irqsave(&ha->vport_slock, flags);
3943 			list_for_each_entry(vp, &ha->vp_list, list) {
3944 				if (rptid_entry->vp_idx == vp->vp_idx) {
3945 					found = 1;
3946 					break;
3947 				}
3948 			}
3949 			spin_unlock_irqrestore(&ha->vport_slock, flags);
3950 
3951 			if (!found)
3952 				return;
3953 
3954 			qlt_update_host_map(vp, id);
3955 
3956 			/*
3957 			 * Cannot configure here as we are still sitting on the
3958 			 * response queue. Handle it in dpc context.
3959 			 */
3960 			set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
3961 			set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
3962 			set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
3963 		}
3964 		set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
3965 		qla2xxx_wake_dpc(vha);
3966 	} else if (rptid_entry->format == 2) {
3967 		ql_dbg(ql_dbg_async, vha, 0x505f,
3968 		    "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
3969 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
3970 		    rptid_entry->port_id[0]);
3971 
3972 		ql_dbg(ql_dbg_async, vha, 0x5075,
3973 		    "N2N: Remote WWPN %8phC.\n",
3974 		    rptid_entry->u.f2.port_name);
3975 
3976 		/* N2N.  direct connect */
3977 		ha->current_topology = ISP_CFG_N;
3978 		ha->flags.rida_fmt2 = 1;
3979 		vha->d_id.b.domain = rptid_entry->port_id[2];
3980 		vha->d_id.b.area = rptid_entry->port_id[1];
3981 		vha->d_id.b.al_pa = rptid_entry->port_id[0];
3982 
3983 		ha->flags.n2n_ae = 1;
3984 		spin_lock_irqsave(&ha->vport_slock, flags);
3985 		qlt_update_vp_map(vha, SET_AL_PA);
3986 		spin_unlock_irqrestore(&ha->vport_slock, flags);
3987 
3988 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
3989 			fcport->scan_state = QLA_FCPORT_SCAN;
3990 		}
3991 
3992 		fcport = qla2x00_find_fcport_by_wwpn(vha,
3993 		    rptid_entry->u.f2.port_name, 1);
3994 
3995 		if (fcport) {
3996 			fcport->login_retry = vha->hw->login_retry_count;
3997 			fcport->plogi_nack_done_deadline = jiffies + HZ;
3998 			fcport->scan_state = QLA_FCPORT_FOUND;
3999 		}
4000 	}
4001 }
4002 
4003 /*
4004  * qla24xx_modify_vp_config
4005  *	Change VP configuration for vha
4006  *
4007  * Input:
4008  *	vha = adapter block pointer.
4009  *
4010  * Returns:
4011  *	qla2xxx local function return status code.
4012  *
4013  * Context:
4014  *	Kernel context.
4015  */
4016 int
4017 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4018 {
4019 	int		rval;
4020 	struct vp_config_entry_24xx *vpmod;
4021 	dma_addr_t	vpmod_dma;
4022 	struct qla_hw_data *ha = vha->hw;
4023 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4024 
4025 	/* This can be called by the parent */
4026 
4027 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4028 	    "Entered %s.\n", __func__);
4029 
4030 	vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4031 	if (!vpmod) {
4032 		ql_log(ql_log_warn, vha, 0x10bc,
4033 		    "Failed to allocate modify VP IOCB.\n");
4034 		return QLA_MEMORY_ALLOC_FAILED;
4035 	}
4036 
4037 	vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4038 	vpmod->entry_count = 1;
4039 	vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4040 	vpmod->vp_count = 1;
4041 	vpmod->vp_index1 = vha->vp_idx;
4042 	vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4043 
4044 	qlt_modify_vp_config(vha, vpmod);
4045 
4046 	memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4047 	memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4048 	vpmod->entry_count = 1;
4049 
4050 	rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4051 	if (rval != QLA_SUCCESS) {
4052 		ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4053 		    "Failed to issue VP config IOCB (%x).\n", rval);
4054 	} else if (vpmod->comp_status != 0) {
4055 		ql_dbg(ql_dbg_mbx, vha, 0x10be,
4056 		    "Failed to complete IOCB -- error status (%x).\n",
4057 		    vpmod->comp_status);
4058 		rval = QLA_FUNCTION_FAILED;
4059 	} else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4060 		ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4061 		    "Failed to complete IOCB -- completion status (%x).\n",
4062 		    le16_to_cpu(vpmod->comp_status));
4063 		rval = QLA_FUNCTION_FAILED;
4064 	} else {
4065 		/* EMPTY */
4066 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4067 		    "Done %s.\n", __func__);
4068 		fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4069 	}
4070 	dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4071 
4072 	return rval;
4073 }
4074 
4075 /*
4076  * qla2x00_send_change_request
4077  *	Receive or disable RSCN request from fabric controller
4078  *
4079  * Input:
4080  *	ha = adapter block pointer
4081  *	format = registration format:
4082  *		0 - Reserved
4083  *		1 - Fabric detected registration
4084  *		2 - N_port detected registration
4085  *		3 - Full registration
4086  *		FF - clear registration
4087  *	vp_idx = Virtual port index
4088  *
4089  * Returns:
4090  *	qla2x00 local function return status code.
4091  *
4092  * Context:
4093  *	Kernel Context
4094  */
4095 
4096 int
4097 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4098 			    uint16_t vp_idx)
4099 {
4100 	int rval;
4101 	mbx_cmd_t mc;
4102 	mbx_cmd_t *mcp = &mc;
4103 
4104 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4105 	    "Entered %s.\n", __func__);
4106 
4107 	mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4108 	mcp->mb[1] = format;
4109 	mcp->mb[9] = vp_idx;
4110 	mcp->out_mb = MBX_9|MBX_1|MBX_0;
4111 	mcp->in_mb = MBX_0|MBX_1;
4112 	mcp->tov = MBX_TOV_SECONDS;
4113 	mcp->flags = 0;
4114 	rval = qla2x00_mailbox_command(vha, mcp);
4115 
4116 	if (rval == QLA_SUCCESS) {
4117 		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4118 			rval = BIT_1;
4119 		}
4120 	} else
4121 		rval = BIT_1;
4122 
4123 	return rval;
4124 }
4125 
4126 int
4127 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4128     uint32_t size)
4129 {
4130 	int rval;
4131 	mbx_cmd_t mc;
4132 	mbx_cmd_t *mcp = &mc;
4133 
4134 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4135 	    "Entered %s.\n", __func__);
4136 
4137 	if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4138 		mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4139 		mcp->mb[8] = MSW(addr);
4140 		mcp->out_mb = MBX_8|MBX_0;
4141 	} else {
4142 		mcp->mb[0] = MBC_DUMP_RISC_RAM;
4143 		mcp->out_mb = MBX_0;
4144 	}
4145 	mcp->mb[1] = LSW(addr);
4146 	mcp->mb[2] = MSW(req_dma);
4147 	mcp->mb[3] = LSW(req_dma);
4148 	mcp->mb[6] = MSW(MSD(req_dma));
4149 	mcp->mb[7] = LSW(MSD(req_dma));
4150 	mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4151 	if (IS_FWI2_CAPABLE(vha->hw)) {
4152 		mcp->mb[4] = MSW(size);
4153 		mcp->mb[5] = LSW(size);
4154 		mcp->out_mb |= MBX_5|MBX_4;
4155 	} else {
4156 		mcp->mb[4] = LSW(size);
4157 		mcp->out_mb |= MBX_4;
4158 	}
4159 
4160 	mcp->in_mb = MBX_0;
4161 	mcp->tov = MBX_TOV_SECONDS;
4162 	mcp->flags = 0;
4163 	rval = qla2x00_mailbox_command(vha, mcp);
4164 
4165 	if (rval != QLA_SUCCESS) {
4166 		ql_dbg(ql_dbg_mbx, vha, 0x1008,
4167 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4168 	} else {
4169 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4170 		    "Done %s.\n", __func__);
4171 	}
4172 
4173 	return rval;
4174 }
4175 /* 84XX Support **************************************************************/
4176 
4177 struct cs84xx_mgmt_cmd {
4178 	union {
4179 		struct verify_chip_entry_84xx req;
4180 		struct verify_chip_rsp_84xx rsp;
4181 	} p;
4182 };
4183 
4184 int
4185 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4186 {
4187 	int rval, retry;
4188 	struct cs84xx_mgmt_cmd *mn;
4189 	dma_addr_t mn_dma;
4190 	uint16_t options;
4191 	unsigned long flags;
4192 	struct qla_hw_data *ha = vha->hw;
4193 
4194 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4195 	    "Entered %s.\n", __func__);
4196 
4197 	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4198 	if (mn == NULL) {
4199 		return QLA_MEMORY_ALLOC_FAILED;
4200 	}
4201 
4202 	/* Force Update? */
4203 	options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4204 	/* Diagnostic firmware? */
4205 	/* options |= MENLO_DIAG_FW; */
4206 	/* We update the firmware with only one data sequence. */
4207 	options |= VCO_END_OF_DATA;
4208 
4209 	do {
4210 		retry = 0;
4211 		memset(mn, 0, sizeof(*mn));
4212 		mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4213 		mn->p.req.entry_count = 1;
4214 		mn->p.req.options = cpu_to_le16(options);
4215 
4216 		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4217 		    "Dump of Verify Request.\n");
4218 		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4219 		    (uint8_t *)mn, sizeof(*mn));
4220 
4221 		rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4222 		if (rval != QLA_SUCCESS) {
4223 			ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4224 			    "Failed to issue verify IOCB (%x).\n", rval);
4225 			goto verify_done;
4226 		}
4227 
4228 		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4229 		    "Dump of Verify Response.\n");
4230 		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4231 		    (uint8_t *)mn, sizeof(*mn));
4232 
4233 		status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4234 		status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4235 		    le16_to_cpu(mn->p.rsp.failure_code) : 0;
4236 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4237 		    "cs=%x fc=%x.\n", status[0], status[1]);
4238 
4239 		if (status[0] != CS_COMPLETE) {
4240 			rval = QLA_FUNCTION_FAILED;
4241 			if (!(options & VCO_DONT_UPDATE_FW)) {
4242 				ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4243 				    "Firmware update failed. Retrying "
4244 				    "without update firmware.\n");
4245 				options |= VCO_DONT_UPDATE_FW;
4246 				options &= ~VCO_FORCE_UPDATE;
4247 				retry = 1;
4248 			}
4249 		} else {
4250 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4251 			    "Firmware updated to %x.\n",
4252 			    le32_to_cpu(mn->p.rsp.fw_ver));
4253 
4254 			/* NOTE: we only update OP firmware. */
4255 			spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4256 			ha->cs84xx->op_fw_version =
4257 			    le32_to_cpu(mn->p.rsp.fw_ver);
4258 			spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4259 			    flags);
4260 		}
4261 	} while (retry);
4262 
4263 verify_done:
4264 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4265 
4266 	if (rval != QLA_SUCCESS) {
4267 		ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4268 		    "Failed=%x.\n", rval);
4269 	} else {
4270 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4271 		    "Done %s.\n", __func__);
4272 	}
4273 
4274 	return rval;
4275 }
4276 
4277 int
4278 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4279 {
4280 	int rval;
4281 	unsigned long flags;
4282 	mbx_cmd_t mc;
4283 	mbx_cmd_t *mcp = &mc;
4284 	struct qla_hw_data *ha = vha->hw;
4285 
4286 	if (!ha->flags.fw_started)
4287 		return QLA_SUCCESS;
4288 
4289 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4290 	    "Entered %s.\n", __func__);
4291 
4292 	if (IS_SHADOW_REG_CAPABLE(ha))
4293 		req->options |= BIT_13;
4294 
4295 	mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4296 	mcp->mb[1] = req->options;
4297 	mcp->mb[2] = MSW(LSD(req->dma));
4298 	mcp->mb[3] = LSW(LSD(req->dma));
4299 	mcp->mb[6] = MSW(MSD(req->dma));
4300 	mcp->mb[7] = LSW(MSD(req->dma));
4301 	mcp->mb[5] = req->length;
4302 	if (req->rsp)
4303 		mcp->mb[10] = req->rsp->id;
4304 	mcp->mb[12] = req->qos;
4305 	mcp->mb[11] = req->vp_idx;
4306 	mcp->mb[13] = req->rid;
4307 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4308 		mcp->mb[15] = 0;
4309 
4310 	mcp->mb[4] = req->id;
4311 	/* que in ptr index */
4312 	mcp->mb[8] = 0;
4313 	/* que out ptr index */
4314 	mcp->mb[9] = *req->out_ptr = 0;
4315 	mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4316 			MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4317 	mcp->in_mb = MBX_0;
4318 	mcp->flags = MBX_DMA_OUT;
4319 	mcp->tov = MBX_TOV_SECONDS * 2;
4320 
4321 	if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
4322 		mcp->in_mb |= MBX_1;
4323 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4324 		mcp->out_mb |= MBX_15;
4325 		/* debug q create issue in SR-IOV */
4326 		mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4327 	}
4328 
4329 	spin_lock_irqsave(&ha->hardware_lock, flags);
4330 	if (!(req->options & BIT_0)) {
4331 		WRT_REG_DWORD(req->req_q_in, 0);
4332 		if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4333 			WRT_REG_DWORD(req->req_q_out, 0);
4334 	}
4335 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4336 
4337 	rval = qla2x00_mailbox_command(vha, mcp);
4338 	if (rval != QLA_SUCCESS) {
4339 		ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4340 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4341 	} else {
4342 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4343 		    "Done %s.\n", __func__);
4344 	}
4345 
4346 	return rval;
4347 }
4348 
4349 int
4350 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4351 {
4352 	int rval;
4353 	unsigned long flags;
4354 	mbx_cmd_t mc;
4355 	mbx_cmd_t *mcp = &mc;
4356 	struct qla_hw_data *ha = vha->hw;
4357 
4358 	if (!ha->flags.fw_started)
4359 		return QLA_SUCCESS;
4360 
4361 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4362 	    "Entered %s.\n", __func__);
4363 
4364 	if (IS_SHADOW_REG_CAPABLE(ha))
4365 		rsp->options |= BIT_13;
4366 
4367 	mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4368 	mcp->mb[1] = rsp->options;
4369 	mcp->mb[2] = MSW(LSD(rsp->dma));
4370 	mcp->mb[3] = LSW(LSD(rsp->dma));
4371 	mcp->mb[6] = MSW(MSD(rsp->dma));
4372 	mcp->mb[7] = LSW(MSD(rsp->dma));
4373 	mcp->mb[5] = rsp->length;
4374 	mcp->mb[14] = rsp->msix->entry;
4375 	mcp->mb[13] = rsp->rid;
4376 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4377 		mcp->mb[15] = 0;
4378 
4379 	mcp->mb[4] = rsp->id;
4380 	/* que in ptr index */
4381 	mcp->mb[8] = *rsp->in_ptr = 0;
4382 	/* que out ptr index */
4383 	mcp->mb[9] = 0;
4384 	mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4385 			|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4386 	mcp->in_mb = MBX_0;
4387 	mcp->flags = MBX_DMA_OUT;
4388 	mcp->tov = MBX_TOV_SECONDS * 2;
4389 
4390 	if (IS_QLA81XX(ha)) {
4391 		mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4392 		mcp->in_mb |= MBX_1;
4393 	} else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4394 		mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4395 		mcp->in_mb |= MBX_1;
4396 		/* debug q create issue in SR-IOV */
4397 		mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4398 	}
4399 
4400 	spin_lock_irqsave(&ha->hardware_lock, flags);
4401 	if (!(rsp->options & BIT_0)) {
4402 		WRT_REG_DWORD(rsp->rsp_q_out, 0);
4403 		if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4404 			WRT_REG_DWORD(rsp->rsp_q_in, 0);
4405 	}
4406 
4407 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4408 
4409 	rval = qla2x00_mailbox_command(vha, mcp);
4410 	if (rval != QLA_SUCCESS) {
4411 		ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4412 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4413 	} else {
4414 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4415 		    "Done %s.\n", __func__);
4416 	}
4417 
4418 	return rval;
4419 }
4420 
4421 int
4422 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4423 {
4424 	int rval;
4425 	mbx_cmd_t mc;
4426 	mbx_cmd_t *mcp = &mc;
4427 
4428 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4429 	    "Entered %s.\n", __func__);
4430 
4431 	mcp->mb[0] = MBC_IDC_ACK;
4432 	memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4433 	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4434 	mcp->in_mb = MBX_0;
4435 	mcp->tov = MBX_TOV_SECONDS;
4436 	mcp->flags = 0;
4437 	rval = qla2x00_mailbox_command(vha, mcp);
4438 
4439 	if (rval != QLA_SUCCESS) {
4440 		ql_dbg(ql_dbg_mbx, vha, 0x10da,
4441 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4442 	} else {
4443 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4444 		    "Done %s.\n", __func__);
4445 	}
4446 
4447 	return rval;
4448 }
4449 
4450 int
4451 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4452 {
4453 	int rval;
4454 	mbx_cmd_t mc;
4455 	mbx_cmd_t *mcp = &mc;
4456 
4457 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4458 	    "Entered %s.\n", __func__);
4459 
4460 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4461 	    !IS_QLA27XX(vha->hw))
4462 		return QLA_FUNCTION_FAILED;
4463 
4464 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4465 	mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4466 	mcp->out_mb = MBX_1|MBX_0;
4467 	mcp->in_mb = MBX_1|MBX_0;
4468 	mcp->tov = MBX_TOV_SECONDS;
4469 	mcp->flags = 0;
4470 	rval = qla2x00_mailbox_command(vha, mcp);
4471 
4472 	if (rval != QLA_SUCCESS) {
4473 		ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4474 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
4475 		    rval, mcp->mb[0], mcp->mb[1]);
4476 	} else {
4477 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4478 		    "Done %s.\n", __func__);
4479 		*sector_size = mcp->mb[1];
4480 	}
4481 
4482 	return rval;
4483 }
4484 
4485 int
4486 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4487 {
4488 	int rval;
4489 	mbx_cmd_t mc;
4490 	mbx_cmd_t *mcp = &mc;
4491 
4492 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4493 	    !IS_QLA27XX(vha->hw))
4494 		return QLA_FUNCTION_FAILED;
4495 
4496 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4497 	    "Entered %s.\n", __func__);
4498 
4499 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4500 	mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4501 	    FAC_OPT_CMD_WRITE_PROTECT;
4502 	mcp->out_mb = MBX_1|MBX_0;
4503 	mcp->in_mb = MBX_1|MBX_0;
4504 	mcp->tov = MBX_TOV_SECONDS;
4505 	mcp->flags = 0;
4506 	rval = qla2x00_mailbox_command(vha, mcp);
4507 
4508 	if (rval != QLA_SUCCESS) {
4509 		ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4510 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
4511 		    rval, mcp->mb[0], mcp->mb[1]);
4512 	} else {
4513 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4514 		    "Done %s.\n", __func__);
4515 	}
4516 
4517 	return rval;
4518 }
4519 
4520 int
4521 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4522 {
4523 	int rval;
4524 	mbx_cmd_t mc;
4525 	mbx_cmd_t *mcp = &mc;
4526 
4527 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4528 	    !IS_QLA27XX(vha->hw))
4529 		return QLA_FUNCTION_FAILED;
4530 
4531 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4532 	    "Entered %s.\n", __func__);
4533 
4534 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4535 	mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4536 	mcp->mb[2] = LSW(start);
4537 	mcp->mb[3] = MSW(start);
4538 	mcp->mb[4] = LSW(finish);
4539 	mcp->mb[5] = MSW(finish);
4540 	mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4541 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
4542 	mcp->tov = MBX_TOV_SECONDS;
4543 	mcp->flags = 0;
4544 	rval = qla2x00_mailbox_command(vha, mcp);
4545 
4546 	if (rval != QLA_SUCCESS) {
4547 		ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4548 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4549 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4550 	} else {
4551 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4552 		    "Done %s.\n", __func__);
4553 	}
4554 
4555 	return rval;
4556 }
4557 
4558 int
4559 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4560 {
4561 	int rval = 0;
4562 	mbx_cmd_t mc;
4563 	mbx_cmd_t *mcp = &mc;
4564 
4565 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4566 	    "Entered %s.\n", __func__);
4567 
4568 	mcp->mb[0] = MBC_RESTART_MPI_FW;
4569 	mcp->out_mb = MBX_0;
4570 	mcp->in_mb = MBX_0|MBX_1;
4571 	mcp->tov = MBX_TOV_SECONDS;
4572 	mcp->flags = 0;
4573 	rval = qla2x00_mailbox_command(vha, mcp);
4574 
4575 	if (rval != QLA_SUCCESS) {
4576 		ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4577 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
4578 		    rval, mcp->mb[0], mcp->mb[1]);
4579 	} else {
4580 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4581 		    "Done %s.\n", __func__);
4582 	}
4583 
4584 	return rval;
4585 }
4586 
4587 int
4588 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4589 {
4590 	int rval;
4591 	mbx_cmd_t mc;
4592 	mbx_cmd_t *mcp = &mc;
4593 	int i;
4594 	int len;
4595 	uint16_t *str;
4596 	struct qla_hw_data *ha = vha->hw;
4597 
4598 	if (!IS_P3P_TYPE(ha))
4599 		return QLA_FUNCTION_FAILED;
4600 
4601 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4602 	    "Entered %s.\n", __func__);
4603 
4604 	str = (void *)version;
4605 	len = strlen(version);
4606 
4607 	mcp->mb[0] = MBC_SET_RNID_PARAMS;
4608 	mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4609 	mcp->out_mb = MBX_1|MBX_0;
4610 	for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4611 		mcp->mb[i] = cpu_to_le16p(str);
4612 		mcp->out_mb |= 1<<i;
4613 	}
4614 	for (; i < 16; i++) {
4615 		mcp->mb[i] = 0;
4616 		mcp->out_mb |= 1<<i;
4617 	}
4618 	mcp->in_mb = MBX_1|MBX_0;
4619 	mcp->tov = MBX_TOV_SECONDS;
4620 	mcp->flags = 0;
4621 	rval = qla2x00_mailbox_command(vha, mcp);
4622 
4623 	if (rval != QLA_SUCCESS) {
4624 		ql_dbg(ql_dbg_mbx, vha, 0x117c,
4625 		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4626 	} else {
4627 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4628 		    "Done %s.\n", __func__);
4629 	}
4630 
4631 	return rval;
4632 }
4633 
4634 int
4635 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4636 {
4637 	int rval;
4638 	mbx_cmd_t mc;
4639 	mbx_cmd_t *mcp = &mc;
4640 	int len;
4641 	uint16_t dwlen;
4642 	uint8_t *str;
4643 	dma_addr_t str_dma;
4644 	struct qla_hw_data *ha = vha->hw;
4645 
4646 	if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4647 	    IS_P3P_TYPE(ha))
4648 		return QLA_FUNCTION_FAILED;
4649 
4650 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4651 	    "Entered %s.\n", __func__);
4652 
4653 	str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4654 	if (!str) {
4655 		ql_log(ql_log_warn, vha, 0x117f,
4656 		    "Failed to allocate driver version param.\n");
4657 		return QLA_MEMORY_ALLOC_FAILED;
4658 	}
4659 
4660 	memcpy(str, "\x7\x3\x11\x0", 4);
4661 	dwlen = str[0];
4662 	len = dwlen * 4 - 4;
4663 	memset(str + 4, 0, len);
4664 	if (len > strlen(version))
4665 		len = strlen(version);
4666 	memcpy(str + 4, version, len);
4667 
4668 	mcp->mb[0] = MBC_SET_RNID_PARAMS;
4669 	mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4670 	mcp->mb[2] = MSW(LSD(str_dma));
4671 	mcp->mb[3] = LSW(LSD(str_dma));
4672 	mcp->mb[6] = MSW(MSD(str_dma));
4673 	mcp->mb[7] = LSW(MSD(str_dma));
4674 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4675 	mcp->in_mb = MBX_1|MBX_0;
4676 	mcp->tov = MBX_TOV_SECONDS;
4677 	mcp->flags = 0;
4678 	rval = qla2x00_mailbox_command(vha, mcp);
4679 
4680 	if (rval != QLA_SUCCESS) {
4681 		ql_dbg(ql_dbg_mbx, vha, 0x1180,
4682 		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4683 	} else {
4684 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4685 		    "Done %s.\n", __func__);
4686 	}
4687 
4688 	dma_pool_free(ha->s_dma_pool, str, str_dma);
4689 
4690 	return rval;
4691 }
4692 
4693 int
4694 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4695 			     void *buf, uint16_t bufsiz)
4696 {
4697 	int rval, i;
4698 	mbx_cmd_t mc;
4699 	mbx_cmd_t *mcp = &mc;
4700 	uint32_t	*bp;
4701 
4702 	if (!IS_FWI2_CAPABLE(vha->hw))
4703 		return QLA_FUNCTION_FAILED;
4704 
4705 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4706 	    "Entered %s.\n", __func__);
4707 
4708 	mcp->mb[0] = MBC_GET_RNID_PARAMS;
4709 	mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4710 	mcp->mb[2] = MSW(buf_dma);
4711 	mcp->mb[3] = LSW(buf_dma);
4712 	mcp->mb[6] = MSW(MSD(buf_dma));
4713 	mcp->mb[7] = LSW(MSD(buf_dma));
4714 	mcp->mb[8] = bufsiz/4;
4715 	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4716 	mcp->in_mb = MBX_1|MBX_0;
4717 	mcp->tov = MBX_TOV_SECONDS;
4718 	mcp->flags = 0;
4719 	rval = qla2x00_mailbox_command(vha, mcp);
4720 
4721 	if (rval != QLA_SUCCESS) {
4722 		ql_dbg(ql_dbg_mbx, vha, 0x115a,
4723 		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4724 	} else {
4725 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4726 		    "Done %s.\n", __func__);
4727 		bp = (uint32_t *) buf;
4728 		for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4729 			*bp = le32_to_cpu(*bp);
4730 	}
4731 
4732 	return rval;
4733 }
4734 
4735 static int
4736 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4737 {
4738 	int rval;
4739 	mbx_cmd_t mc;
4740 	mbx_cmd_t *mcp = &mc;
4741 
4742 	if (!IS_FWI2_CAPABLE(vha->hw))
4743 		return QLA_FUNCTION_FAILED;
4744 
4745 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4746 	    "Entered %s.\n", __func__);
4747 
4748 	mcp->mb[0] = MBC_GET_RNID_PARAMS;
4749 	mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
4750 	mcp->out_mb = MBX_1|MBX_0;
4751 	mcp->in_mb = MBX_1|MBX_0;
4752 	mcp->tov = MBX_TOV_SECONDS;
4753 	mcp->flags = 0;
4754 	rval = qla2x00_mailbox_command(vha, mcp);
4755 	*temp = mcp->mb[1];
4756 
4757 	if (rval != QLA_SUCCESS) {
4758 		ql_dbg(ql_dbg_mbx, vha, 0x115a,
4759 		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4760 	} else {
4761 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4762 		    "Done %s.\n", __func__);
4763 	}
4764 
4765 	return rval;
4766 }
4767 
4768 int
4769 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4770 	uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4771 {
4772 	int rval;
4773 	mbx_cmd_t mc;
4774 	mbx_cmd_t *mcp = &mc;
4775 	struct qla_hw_data *ha = vha->hw;
4776 
4777 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
4778 	    "Entered %s.\n", __func__);
4779 
4780 	if (!IS_FWI2_CAPABLE(ha))
4781 		return QLA_FUNCTION_FAILED;
4782 
4783 	if (len == 1)
4784 		opt |= BIT_0;
4785 
4786 	mcp->mb[0] = MBC_READ_SFP;
4787 	mcp->mb[1] = dev;
4788 	mcp->mb[2] = MSW(sfp_dma);
4789 	mcp->mb[3] = LSW(sfp_dma);
4790 	mcp->mb[6] = MSW(MSD(sfp_dma));
4791 	mcp->mb[7] = LSW(MSD(sfp_dma));
4792 	mcp->mb[8] = len;
4793 	mcp->mb[9] = off;
4794 	mcp->mb[10] = opt;
4795 	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4796 	mcp->in_mb = MBX_1|MBX_0;
4797 	mcp->tov = MBX_TOV_SECONDS;
4798 	mcp->flags = 0;
4799 	rval = qla2x00_mailbox_command(vha, mcp);
4800 
4801 	if (opt & BIT_0)
4802 		*sfp = mcp->mb[1];
4803 
4804 	if (rval != QLA_SUCCESS) {
4805 		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
4806 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4807 		if (mcp->mb[0] == MBS_COMMAND_ERROR &&
4808 		    mcp->mb[1] == 0x22)
4809 			/* sfp is not there */
4810 			rval = QLA_INTERFACE_ERROR;
4811 	} else {
4812 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
4813 		    "Done %s.\n", __func__);
4814 	}
4815 
4816 	return rval;
4817 }
4818 
4819 int
4820 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4821 	uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4822 {
4823 	int rval;
4824 	mbx_cmd_t mc;
4825 	mbx_cmd_t *mcp = &mc;
4826 	struct qla_hw_data *ha = vha->hw;
4827 
4828 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
4829 	    "Entered %s.\n", __func__);
4830 
4831 	if (!IS_FWI2_CAPABLE(ha))
4832 		return QLA_FUNCTION_FAILED;
4833 
4834 	if (len == 1)
4835 		opt |= BIT_0;
4836 
4837 	if (opt & BIT_0)
4838 		len = *sfp;
4839 
4840 	mcp->mb[0] = MBC_WRITE_SFP;
4841 	mcp->mb[1] = dev;
4842 	mcp->mb[2] = MSW(sfp_dma);
4843 	mcp->mb[3] = LSW(sfp_dma);
4844 	mcp->mb[6] = MSW(MSD(sfp_dma));
4845 	mcp->mb[7] = LSW(MSD(sfp_dma));
4846 	mcp->mb[8] = len;
4847 	mcp->mb[9] = off;
4848 	mcp->mb[10] = opt;
4849 	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4850 	mcp->in_mb = MBX_1|MBX_0;
4851 	mcp->tov = MBX_TOV_SECONDS;
4852 	mcp->flags = 0;
4853 	rval = qla2x00_mailbox_command(vha, mcp);
4854 
4855 	if (rval != QLA_SUCCESS) {
4856 		ql_dbg(ql_dbg_mbx, vha, 0x10ec,
4857 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4858 	} else {
4859 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
4860 		    "Done %s.\n", __func__);
4861 	}
4862 
4863 	return rval;
4864 }
4865 
4866 int
4867 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
4868     uint16_t size_in_bytes, uint16_t *actual_size)
4869 {
4870 	int rval;
4871 	mbx_cmd_t mc;
4872 	mbx_cmd_t *mcp = &mc;
4873 
4874 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
4875 	    "Entered %s.\n", __func__);
4876 
4877 	if (!IS_CNA_CAPABLE(vha->hw))
4878 		return QLA_FUNCTION_FAILED;
4879 
4880 	mcp->mb[0] = MBC_GET_XGMAC_STATS;
4881 	mcp->mb[2] = MSW(stats_dma);
4882 	mcp->mb[3] = LSW(stats_dma);
4883 	mcp->mb[6] = MSW(MSD(stats_dma));
4884 	mcp->mb[7] = LSW(MSD(stats_dma));
4885 	mcp->mb[8] = size_in_bytes >> 2;
4886 	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
4887 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
4888 	mcp->tov = MBX_TOV_SECONDS;
4889 	mcp->flags = 0;
4890 	rval = qla2x00_mailbox_command(vha, mcp);
4891 
4892 	if (rval != QLA_SUCCESS) {
4893 		ql_dbg(ql_dbg_mbx, vha, 0x10ef,
4894 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4895 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4896 	} else {
4897 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
4898 		    "Done %s.\n", __func__);
4899 
4900 
4901 		*actual_size = mcp->mb[2] << 2;
4902 	}
4903 
4904 	return rval;
4905 }
4906 
4907 int
4908 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
4909     uint16_t size)
4910 {
4911 	int rval;
4912 	mbx_cmd_t mc;
4913 	mbx_cmd_t *mcp = &mc;
4914 
4915 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
4916 	    "Entered %s.\n", __func__);
4917 
4918 	if (!IS_CNA_CAPABLE(vha->hw))
4919 		return QLA_FUNCTION_FAILED;
4920 
4921 	mcp->mb[0] = MBC_GET_DCBX_PARAMS;
4922 	mcp->mb[1] = 0;
4923 	mcp->mb[2] = MSW(tlv_dma);
4924 	mcp->mb[3] = LSW(tlv_dma);
4925 	mcp->mb[6] = MSW(MSD(tlv_dma));
4926 	mcp->mb[7] = LSW(MSD(tlv_dma));
4927 	mcp->mb[8] = size;
4928 	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4929 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
4930 	mcp->tov = MBX_TOV_SECONDS;
4931 	mcp->flags = 0;
4932 	rval = qla2x00_mailbox_command(vha, mcp);
4933 
4934 	if (rval != QLA_SUCCESS) {
4935 		ql_dbg(ql_dbg_mbx, vha, 0x10f2,
4936 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4937 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4938 	} else {
4939 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
4940 		    "Done %s.\n", __func__);
4941 	}
4942 
4943 	return rval;
4944 }
4945 
4946 int
4947 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
4948 {
4949 	int rval;
4950 	mbx_cmd_t mc;
4951 	mbx_cmd_t *mcp = &mc;
4952 
4953 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
4954 	    "Entered %s.\n", __func__);
4955 
4956 	if (!IS_FWI2_CAPABLE(vha->hw))
4957 		return QLA_FUNCTION_FAILED;
4958 
4959 	mcp->mb[0] = MBC_READ_RAM_EXTENDED;
4960 	mcp->mb[1] = LSW(risc_addr);
4961 	mcp->mb[8] = MSW(risc_addr);
4962 	mcp->out_mb = MBX_8|MBX_1|MBX_0;
4963 	mcp->in_mb = MBX_3|MBX_2|MBX_0;
4964 	mcp->tov = 30;
4965 	mcp->flags = 0;
4966 	rval = qla2x00_mailbox_command(vha, mcp);
4967 	if (rval != QLA_SUCCESS) {
4968 		ql_dbg(ql_dbg_mbx, vha, 0x10f5,
4969 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4970 	} else {
4971 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
4972 		    "Done %s.\n", __func__);
4973 		*data = mcp->mb[3] << 16 | mcp->mb[2];
4974 	}
4975 
4976 	return rval;
4977 }
4978 
4979 int
4980 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4981 	uint16_t *mresp)
4982 {
4983 	int rval;
4984 	mbx_cmd_t mc;
4985 	mbx_cmd_t *mcp = &mc;
4986 
4987 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
4988 	    "Entered %s.\n", __func__);
4989 
4990 	memset(mcp->mb, 0 , sizeof(mcp->mb));
4991 	mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
4992 	mcp->mb[1] = mreq->options | BIT_6;	// BIT_6 specifies 64 bit addressing
4993 
4994 	/* transfer count */
4995 	mcp->mb[10] = LSW(mreq->transfer_size);
4996 	mcp->mb[11] = MSW(mreq->transfer_size);
4997 
4998 	/* send data address */
4999 	mcp->mb[14] = LSW(mreq->send_dma);
5000 	mcp->mb[15] = MSW(mreq->send_dma);
5001 	mcp->mb[20] = LSW(MSD(mreq->send_dma));
5002 	mcp->mb[21] = MSW(MSD(mreq->send_dma));
5003 
5004 	/* receive data address */
5005 	mcp->mb[16] = LSW(mreq->rcv_dma);
5006 	mcp->mb[17] = MSW(mreq->rcv_dma);
5007 	mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5008 	mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5009 
5010 	/* Iteration count */
5011 	mcp->mb[18] = LSW(mreq->iteration_count);
5012 	mcp->mb[19] = MSW(mreq->iteration_count);
5013 
5014 	mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5015 	    MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5016 	if (IS_CNA_CAPABLE(vha->hw))
5017 		mcp->out_mb |= MBX_2;
5018 	mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5019 
5020 	mcp->buf_size = mreq->transfer_size;
5021 	mcp->tov = MBX_TOV_SECONDS;
5022 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5023 
5024 	rval = qla2x00_mailbox_command(vha, mcp);
5025 
5026 	if (rval != QLA_SUCCESS) {
5027 		ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5028 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5029 		    "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5030 		    mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5031 	} else {
5032 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5033 		    "Done %s.\n", __func__);
5034 	}
5035 
5036 	/* Copy mailbox information */
5037 	memcpy( mresp, mcp->mb, 64);
5038 	return rval;
5039 }
5040 
5041 int
5042 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5043 	uint16_t *mresp)
5044 {
5045 	int rval;
5046 	mbx_cmd_t mc;
5047 	mbx_cmd_t *mcp = &mc;
5048 	struct qla_hw_data *ha = vha->hw;
5049 
5050 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5051 	    "Entered %s.\n", __func__);
5052 
5053 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5054 	mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5055 	/* BIT_6 specifies 64bit address */
5056 	mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5057 	if (IS_CNA_CAPABLE(ha)) {
5058 		mcp->mb[2] = vha->fcoe_fcf_idx;
5059 	}
5060 	mcp->mb[16] = LSW(mreq->rcv_dma);
5061 	mcp->mb[17] = MSW(mreq->rcv_dma);
5062 	mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5063 	mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5064 
5065 	mcp->mb[10] = LSW(mreq->transfer_size);
5066 
5067 	mcp->mb[14] = LSW(mreq->send_dma);
5068 	mcp->mb[15] = MSW(mreq->send_dma);
5069 	mcp->mb[20] = LSW(MSD(mreq->send_dma));
5070 	mcp->mb[21] = MSW(MSD(mreq->send_dma));
5071 
5072 	mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5073 	    MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5074 	if (IS_CNA_CAPABLE(ha))
5075 		mcp->out_mb |= MBX_2;
5076 
5077 	mcp->in_mb = MBX_0;
5078 	if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5079 	    IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5080 		mcp->in_mb |= MBX_1;
5081 	if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5082 		mcp->in_mb |= MBX_3;
5083 
5084 	mcp->tov = MBX_TOV_SECONDS;
5085 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5086 	mcp->buf_size = mreq->transfer_size;
5087 
5088 	rval = qla2x00_mailbox_command(vha, mcp);
5089 
5090 	if (rval != QLA_SUCCESS) {
5091 		ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5092 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
5093 		    rval, mcp->mb[0], mcp->mb[1]);
5094 	} else {
5095 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5096 		    "Done %s.\n", __func__);
5097 	}
5098 
5099 	/* Copy mailbox information */
5100 	memcpy(mresp, mcp->mb, 64);
5101 	return rval;
5102 }
5103 
5104 int
5105 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5106 {
5107 	int rval;
5108 	mbx_cmd_t mc;
5109 	mbx_cmd_t *mcp = &mc;
5110 
5111 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5112 	    "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5113 
5114 	mcp->mb[0] = MBC_ISP84XX_RESET;
5115 	mcp->mb[1] = enable_diagnostic;
5116 	mcp->out_mb = MBX_1|MBX_0;
5117 	mcp->in_mb = MBX_1|MBX_0;
5118 	mcp->tov = MBX_TOV_SECONDS;
5119 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5120 	rval = qla2x00_mailbox_command(vha, mcp);
5121 
5122 	if (rval != QLA_SUCCESS)
5123 		ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5124 	else
5125 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5126 		    "Done %s.\n", __func__);
5127 
5128 	return rval;
5129 }
5130 
5131 int
5132 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5133 {
5134 	int rval;
5135 	mbx_cmd_t mc;
5136 	mbx_cmd_t *mcp = &mc;
5137 
5138 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5139 	    "Entered %s.\n", __func__);
5140 
5141 	if (!IS_FWI2_CAPABLE(vha->hw))
5142 		return QLA_FUNCTION_FAILED;
5143 
5144 	mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5145 	mcp->mb[1] = LSW(risc_addr);
5146 	mcp->mb[2] = LSW(data);
5147 	mcp->mb[3] = MSW(data);
5148 	mcp->mb[8] = MSW(risc_addr);
5149 	mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5150 	mcp->in_mb = MBX_0;
5151 	mcp->tov = 30;
5152 	mcp->flags = 0;
5153 	rval = qla2x00_mailbox_command(vha, mcp);
5154 	if (rval != QLA_SUCCESS) {
5155 		ql_dbg(ql_dbg_mbx, vha, 0x1101,
5156 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5157 	} else {
5158 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5159 		    "Done %s.\n", __func__);
5160 	}
5161 
5162 	return rval;
5163 }
5164 
5165 int
5166 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5167 {
5168 	int rval;
5169 	uint32_t stat, timer;
5170 	uint16_t mb0 = 0;
5171 	struct qla_hw_data *ha = vha->hw;
5172 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5173 
5174 	rval = QLA_SUCCESS;
5175 
5176 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5177 	    "Entered %s.\n", __func__);
5178 
5179 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5180 
5181 	/* Write the MBC data to the registers */
5182 	WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
5183 	WRT_REG_WORD(&reg->mailbox1, mb[0]);
5184 	WRT_REG_WORD(&reg->mailbox2, mb[1]);
5185 	WRT_REG_WORD(&reg->mailbox3, mb[2]);
5186 	WRT_REG_WORD(&reg->mailbox4, mb[3]);
5187 
5188 	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
5189 
5190 	/* Poll for MBC interrupt */
5191 	for (timer = 6000000; timer; timer--) {
5192 		/* Check for pending interrupts. */
5193 		stat = RD_REG_DWORD(&reg->host_status);
5194 		if (stat & HSRX_RISC_INT) {
5195 			stat &= 0xff;
5196 
5197 			if (stat == 0x1 || stat == 0x2 ||
5198 			    stat == 0x10 || stat == 0x11) {
5199 				set_bit(MBX_INTERRUPT,
5200 				    &ha->mbx_cmd_flags);
5201 				mb0 = RD_REG_WORD(&reg->mailbox0);
5202 				WRT_REG_DWORD(&reg->hccr,
5203 				    HCCRX_CLR_RISC_INT);
5204 				RD_REG_DWORD(&reg->hccr);
5205 				break;
5206 			}
5207 		}
5208 		udelay(5);
5209 	}
5210 
5211 	if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5212 		rval = mb0 & MBS_MASK;
5213 	else
5214 		rval = QLA_FUNCTION_FAILED;
5215 
5216 	if (rval != QLA_SUCCESS) {
5217 		ql_dbg(ql_dbg_mbx, vha, 0x1104,
5218 		    "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5219 	} else {
5220 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5221 		    "Done %s.\n", __func__);
5222 	}
5223 
5224 	return rval;
5225 }
5226 
5227 int
5228 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5229 {
5230 	int rval;
5231 	mbx_cmd_t mc;
5232 	mbx_cmd_t *mcp = &mc;
5233 	struct qla_hw_data *ha = vha->hw;
5234 
5235 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5236 	    "Entered %s.\n", __func__);
5237 
5238 	if (!IS_FWI2_CAPABLE(ha))
5239 		return QLA_FUNCTION_FAILED;
5240 
5241 	mcp->mb[0] = MBC_DATA_RATE;
5242 	mcp->mb[1] = 0;
5243 	mcp->out_mb = MBX_1|MBX_0;
5244 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
5245 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
5246 		mcp->in_mb |= MBX_3;
5247 	mcp->tov = MBX_TOV_SECONDS;
5248 	mcp->flags = 0;
5249 	rval = qla2x00_mailbox_command(vha, mcp);
5250 	if (rval != QLA_SUCCESS) {
5251 		ql_dbg(ql_dbg_mbx, vha, 0x1107,
5252 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5253 	} else {
5254 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5255 		    "Done %s.\n", __func__);
5256 		if (mcp->mb[1] != 0x7)
5257 			ha->link_data_rate = mcp->mb[1];
5258 	}
5259 
5260 	return rval;
5261 }
5262 
5263 int
5264 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5265 {
5266 	int rval;
5267 	mbx_cmd_t mc;
5268 	mbx_cmd_t *mcp = &mc;
5269 	struct qla_hw_data *ha = vha->hw;
5270 
5271 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5272 	    "Entered %s.\n", __func__);
5273 
5274 	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5275 	    !IS_QLA27XX(ha))
5276 		return QLA_FUNCTION_FAILED;
5277 	mcp->mb[0] = MBC_GET_PORT_CONFIG;
5278 	mcp->out_mb = MBX_0;
5279 	mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5280 	mcp->tov = MBX_TOV_SECONDS;
5281 	mcp->flags = 0;
5282 
5283 	rval = qla2x00_mailbox_command(vha, mcp);
5284 
5285 	if (rval != QLA_SUCCESS) {
5286 		ql_dbg(ql_dbg_mbx, vha, 0x110a,
5287 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5288 	} else {
5289 		/* Copy all bits to preserve original value */
5290 		memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5291 
5292 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5293 		    "Done %s.\n", __func__);
5294 	}
5295 	return rval;
5296 }
5297 
5298 int
5299 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5300 {
5301 	int rval;
5302 	mbx_cmd_t mc;
5303 	mbx_cmd_t *mcp = &mc;
5304 
5305 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5306 	    "Entered %s.\n", __func__);
5307 
5308 	mcp->mb[0] = MBC_SET_PORT_CONFIG;
5309 	/* Copy all bits to preserve original setting */
5310 	memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5311 	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5312 	mcp->in_mb = MBX_0;
5313 	mcp->tov = MBX_TOV_SECONDS;
5314 	mcp->flags = 0;
5315 	rval = qla2x00_mailbox_command(vha, mcp);
5316 
5317 	if (rval != QLA_SUCCESS) {
5318 		ql_dbg(ql_dbg_mbx, vha, 0x110d,
5319 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5320 	} else
5321 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5322 		    "Done %s.\n", __func__);
5323 
5324 	return rval;
5325 }
5326 
5327 
5328 int
5329 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5330 		uint16_t *mb)
5331 {
5332 	int rval;
5333 	mbx_cmd_t mc;
5334 	mbx_cmd_t *mcp = &mc;
5335 	struct qla_hw_data *ha = vha->hw;
5336 
5337 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5338 	    "Entered %s.\n", __func__);
5339 
5340 	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5341 		return QLA_FUNCTION_FAILED;
5342 
5343 	mcp->mb[0] = MBC_PORT_PARAMS;
5344 	mcp->mb[1] = loop_id;
5345 	if (ha->flags.fcp_prio_enabled)
5346 		mcp->mb[2] = BIT_1;
5347 	else
5348 		mcp->mb[2] = BIT_2;
5349 	mcp->mb[4] = priority & 0xf;
5350 	mcp->mb[9] = vha->vp_idx;
5351 	mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5352 	mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5353 	mcp->tov = 30;
5354 	mcp->flags = 0;
5355 	rval = qla2x00_mailbox_command(vha, mcp);
5356 	if (mb != NULL) {
5357 		mb[0] = mcp->mb[0];
5358 		mb[1] = mcp->mb[1];
5359 		mb[3] = mcp->mb[3];
5360 		mb[4] = mcp->mb[4];
5361 	}
5362 
5363 	if (rval != QLA_SUCCESS) {
5364 		ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5365 	} else {
5366 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5367 		    "Done %s.\n", __func__);
5368 	}
5369 
5370 	return rval;
5371 }
5372 
5373 int
5374 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5375 {
5376 	int rval = QLA_FUNCTION_FAILED;
5377 	struct qla_hw_data *ha = vha->hw;
5378 	uint8_t byte;
5379 
5380 	if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5381 		ql_dbg(ql_dbg_mbx, vha, 0x1150,
5382 		    "Thermal not supported by this card.\n");
5383 		return rval;
5384 	}
5385 
5386 	if (IS_QLA25XX(ha)) {
5387 		if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5388 		    ha->pdev->subsystem_device == 0x0175) {
5389 			rval = qla2x00_read_sfp(vha, 0, &byte,
5390 			    0x98, 0x1, 1, BIT_13|BIT_0);
5391 			*temp = byte;
5392 			return rval;
5393 		}
5394 		if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5395 		    ha->pdev->subsystem_device == 0x338e) {
5396 			rval = qla2x00_read_sfp(vha, 0, &byte,
5397 			    0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5398 			*temp = byte;
5399 			return rval;
5400 		}
5401 		ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5402 		    "Thermal not supported by this card.\n");
5403 		return rval;
5404 	}
5405 
5406 	if (IS_QLA82XX(ha)) {
5407 		*temp = qla82xx_read_temperature(vha);
5408 		rval = QLA_SUCCESS;
5409 		return rval;
5410 	} else if (IS_QLA8044(ha)) {
5411 		*temp = qla8044_read_temperature(vha);
5412 		rval = QLA_SUCCESS;
5413 		return rval;
5414 	}
5415 
5416 	rval = qla2x00_read_asic_temperature(vha, temp);
5417 	return rval;
5418 }
5419 
5420 int
5421 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5422 {
5423 	int rval;
5424 	struct qla_hw_data *ha = vha->hw;
5425 	mbx_cmd_t mc;
5426 	mbx_cmd_t *mcp = &mc;
5427 
5428 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5429 	    "Entered %s.\n", __func__);
5430 
5431 	if (!IS_FWI2_CAPABLE(ha))
5432 		return QLA_FUNCTION_FAILED;
5433 
5434 	memset(mcp, 0, sizeof(mbx_cmd_t));
5435 	mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5436 	mcp->mb[1] = 1;
5437 
5438 	mcp->out_mb = MBX_1|MBX_0;
5439 	mcp->in_mb = MBX_0;
5440 	mcp->tov = 30;
5441 	mcp->flags = 0;
5442 
5443 	rval = qla2x00_mailbox_command(vha, mcp);
5444 	if (rval != QLA_SUCCESS) {
5445 		ql_dbg(ql_dbg_mbx, vha, 0x1016,
5446 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5447 	} else {
5448 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5449 		    "Done %s.\n", __func__);
5450 	}
5451 
5452 	return rval;
5453 }
5454 
5455 int
5456 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5457 {
5458 	int rval;
5459 	struct qla_hw_data *ha = vha->hw;
5460 	mbx_cmd_t mc;
5461 	mbx_cmd_t *mcp = &mc;
5462 
5463 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5464 	    "Entered %s.\n", __func__);
5465 
5466 	if (!IS_P3P_TYPE(ha))
5467 		return QLA_FUNCTION_FAILED;
5468 
5469 	memset(mcp, 0, sizeof(mbx_cmd_t));
5470 	mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5471 	mcp->mb[1] = 0;
5472 
5473 	mcp->out_mb = MBX_1|MBX_0;
5474 	mcp->in_mb = MBX_0;
5475 	mcp->tov = 30;
5476 	mcp->flags = 0;
5477 
5478 	rval = qla2x00_mailbox_command(vha, mcp);
5479 	if (rval != QLA_SUCCESS) {
5480 		ql_dbg(ql_dbg_mbx, vha, 0x100c,
5481 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5482 	} else {
5483 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5484 		    "Done %s.\n", __func__);
5485 	}
5486 
5487 	return rval;
5488 }
5489 
5490 int
5491 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5492 {
5493 	struct qla_hw_data *ha = vha->hw;
5494 	mbx_cmd_t mc;
5495 	mbx_cmd_t *mcp = &mc;
5496 	int rval = QLA_FUNCTION_FAILED;
5497 
5498 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5499 	    "Entered %s.\n", __func__);
5500 
5501 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5502 	mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5503 	mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5504 	mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5505 	mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5506 
5507 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5508 	mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5509 	    MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5510 
5511 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5512 	mcp->tov = MBX_TOV_SECONDS;
5513 	rval = qla2x00_mailbox_command(vha, mcp);
5514 
5515 	/* Always copy back return mailbox values. */
5516 	if (rval != QLA_SUCCESS) {
5517 		ql_dbg(ql_dbg_mbx, vha, 0x1120,
5518 		    "mailbox command FAILED=0x%x, subcode=%x.\n",
5519 		    (mcp->mb[1] << 16) | mcp->mb[0],
5520 		    (mcp->mb[3] << 16) | mcp->mb[2]);
5521 	} else {
5522 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5523 		    "Done %s.\n", __func__);
5524 		ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5525 		if (!ha->md_template_size) {
5526 			ql_dbg(ql_dbg_mbx, vha, 0x1122,
5527 			    "Null template size obtained.\n");
5528 			rval = QLA_FUNCTION_FAILED;
5529 		}
5530 	}
5531 	return rval;
5532 }
5533 
5534 int
5535 qla82xx_md_get_template(scsi_qla_host_t *vha)
5536 {
5537 	struct qla_hw_data *ha = vha->hw;
5538 	mbx_cmd_t mc;
5539 	mbx_cmd_t *mcp = &mc;
5540 	int rval = QLA_FUNCTION_FAILED;
5541 
5542 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5543 	    "Entered %s.\n", __func__);
5544 
5545 	ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5546 	   ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5547 	if (!ha->md_tmplt_hdr) {
5548 		ql_log(ql_log_warn, vha, 0x1124,
5549 		    "Unable to allocate memory for Minidump template.\n");
5550 		return rval;
5551 	}
5552 
5553 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5554 	mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5555 	mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5556 	mcp->mb[2] = LSW(RQST_TMPLT);
5557 	mcp->mb[3] = MSW(RQST_TMPLT);
5558 	mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5559 	mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5560 	mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5561 	mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5562 	mcp->mb[8] = LSW(ha->md_template_size);
5563 	mcp->mb[9] = MSW(ha->md_template_size);
5564 
5565 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5566 	mcp->tov = MBX_TOV_SECONDS;
5567 	mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5568 	    MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5569 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5570 	rval = qla2x00_mailbox_command(vha, mcp);
5571 
5572 	if (rval != QLA_SUCCESS) {
5573 		ql_dbg(ql_dbg_mbx, vha, 0x1125,
5574 		    "mailbox command FAILED=0x%x, subcode=%x.\n",
5575 		    ((mcp->mb[1] << 16) | mcp->mb[0]),
5576 		    ((mcp->mb[3] << 16) | mcp->mb[2]));
5577 	} else
5578 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5579 		    "Done %s.\n", __func__);
5580 	return rval;
5581 }
5582 
5583 int
5584 qla8044_md_get_template(scsi_qla_host_t *vha)
5585 {
5586 	struct qla_hw_data *ha = vha->hw;
5587 	mbx_cmd_t mc;
5588 	mbx_cmd_t *mcp = &mc;
5589 	int rval = QLA_FUNCTION_FAILED;
5590 	int offset = 0, size = MINIDUMP_SIZE_36K;
5591 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5592 	    "Entered %s.\n", __func__);
5593 
5594 	ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5595 	   ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5596 	if (!ha->md_tmplt_hdr) {
5597 		ql_log(ql_log_warn, vha, 0xb11b,
5598 		    "Unable to allocate memory for Minidump template.\n");
5599 		return rval;
5600 	}
5601 
5602 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5603 	while (offset < ha->md_template_size) {
5604 		mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5605 		mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5606 		mcp->mb[2] = LSW(RQST_TMPLT);
5607 		mcp->mb[3] = MSW(RQST_TMPLT);
5608 		mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5609 		mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5610 		mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5611 		mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5612 		mcp->mb[8] = LSW(size);
5613 		mcp->mb[9] = MSW(size);
5614 		mcp->mb[10] = offset & 0x0000FFFF;
5615 		mcp->mb[11] = offset & 0xFFFF0000;
5616 		mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5617 		mcp->tov = MBX_TOV_SECONDS;
5618 		mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5619 			MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5620 		mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5621 		rval = qla2x00_mailbox_command(vha, mcp);
5622 
5623 		if (rval != QLA_SUCCESS) {
5624 			ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5625 				"mailbox command FAILED=0x%x, subcode=%x.\n",
5626 				((mcp->mb[1] << 16) | mcp->mb[0]),
5627 				((mcp->mb[3] << 16) | mcp->mb[2]));
5628 			return rval;
5629 		} else
5630 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5631 				"Done %s.\n", __func__);
5632 		offset = offset + size;
5633 	}
5634 	return rval;
5635 }
5636 
5637 int
5638 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5639 {
5640 	int rval;
5641 	struct qla_hw_data *ha = vha->hw;
5642 	mbx_cmd_t mc;
5643 	mbx_cmd_t *mcp = &mc;
5644 
5645 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5646 		return QLA_FUNCTION_FAILED;
5647 
5648 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
5649 	    "Entered %s.\n", __func__);
5650 
5651 	memset(mcp, 0, sizeof(mbx_cmd_t));
5652 	mcp->mb[0] = MBC_SET_LED_CONFIG;
5653 	mcp->mb[1] = led_cfg[0];
5654 	mcp->mb[2] = led_cfg[1];
5655 	if (IS_QLA8031(ha)) {
5656 		mcp->mb[3] = led_cfg[2];
5657 		mcp->mb[4] = led_cfg[3];
5658 		mcp->mb[5] = led_cfg[4];
5659 		mcp->mb[6] = led_cfg[5];
5660 	}
5661 
5662 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
5663 	if (IS_QLA8031(ha))
5664 		mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5665 	mcp->in_mb = MBX_0;
5666 	mcp->tov = 30;
5667 	mcp->flags = 0;
5668 
5669 	rval = qla2x00_mailbox_command(vha, mcp);
5670 	if (rval != QLA_SUCCESS) {
5671 		ql_dbg(ql_dbg_mbx, vha, 0x1134,
5672 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5673 	} else {
5674 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
5675 		    "Done %s.\n", __func__);
5676 	}
5677 
5678 	return rval;
5679 }
5680 
5681 int
5682 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5683 {
5684 	int rval;
5685 	struct qla_hw_data *ha = vha->hw;
5686 	mbx_cmd_t mc;
5687 	mbx_cmd_t *mcp = &mc;
5688 
5689 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5690 		return QLA_FUNCTION_FAILED;
5691 
5692 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
5693 	    "Entered %s.\n", __func__);
5694 
5695 	memset(mcp, 0, sizeof(mbx_cmd_t));
5696 	mcp->mb[0] = MBC_GET_LED_CONFIG;
5697 
5698 	mcp->out_mb = MBX_0;
5699 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
5700 	if (IS_QLA8031(ha))
5701 		mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5702 	mcp->tov = 30;
5703 	mcp->flags = 0;
5704 
5705 	rval = qla2x00_mailbox_command(vha, mcp);
5706 	if (rval != QLA_SUCCESS) {
5707 		ql_dbg(ql_dbg_mbx, vha, 0x1137,
5708 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5709 	} else {
5710 		led_cfg[0] = mcp->mb[1];
5711 		led_cfg[1] = mcp->mb[2];
5712 		if (IS_QLA8031(ha)) {
5713 			led_cfg[2] = mcp->mb[3];
5714 			led_cfg[3] = mcp->mb[4];
5715 			led_cfg[4] = mcp->mb[5];
5716 			led_cfg[5] = mcp->mb[6];
5717 		}
5718 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
5719 		    "Done %s.\n", __func__);
5720 	}
5721 
5722 	return rval;
5723 }
5724 
5725 int
5726 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
5727 {
5728 	int rval;
5729 	struct qla_hw_data *ha = vha->hw;
5730 	mbx_cmd_t mc;
5731 	mbx_cmd_t *mcp = &mc;
5732 
5733 	if (!IS_P3P_TYPE(ha))
5734 		return QLA_FUNCTION_FAILED;
5735 
5736 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
5737 		"Entered %s.\n", __func__);
5738 
5739 	memset(mcp, 0, sizeof(mbx_cmd_t));
5740 	mcp->mb[0] = MBC_SET_LED_CONFIG;
5741 	if (enable)
5742 		mcp->mb[7] = 0xE;
5743 	else
5744 		mcp->mb[7] = 0xD;
5745 
5746 	mcp->out_mb = MBX_7|MBX_0;
5747 	mcp->in_mb = MBX_0;
5748 	mcp->tov = MBX_TOV_SECONDS;
5749 	mcp->flags = 0;
5750 
5751 	rval = qla2x00_mailbox_command(vha, mcp);
5752 	if (rval != QLA_SUCCESS) {
5753 		ql_dbg(ql_dbg_mbx, vha, 0x1128,
5754 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5755 	} else {
5756 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
5757 		    "Done %s.\n", __func__);
5758 	}
5759 
5760 	return rval;
5761 }
5762 
5763 int
5764 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
5765 {
5766 	int rval;
5767 	struct qla_hw_data *ha = vha->hw;
5768 	mbx_cmd_t mc;
5769 	mbx_cmd_t *mcp = &mc;
5770 
5771 	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5772 		return QLA_FUNCTION_FAILED;
5773 
5774 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
5775 	    "Entered %s.\n", __func__);
5776 
5777 	mcp->mb[0] = MBC_WRITE_REMOTE_REG;
5778 	mcp->mb[1] = LSW(reg);
5779 	mcp->mb[2] = MSW(reg);
5780 	mcp->mb[3] = LSW(data);
5781 	mcp->mb[4] = MSW(data);
5782 	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5783 
5784 	mcp->in_mb = MBX_1|MBX_0;
5785 	mcp->tov = MBX_TOV_SECONDS;
5786 	mcp->flags = 0;
5787 	rval = qla2x00_mailbox_command(vha, mcp);
5788 
5789 	if (rval != QLA_SUCCESS) {
5790 		ql_dbg(ql_dbg_mbx, vha, 0x1131,
5791 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5792 	} else {
5793 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
5794 		    "Done %s.\n", __func__);
5795 	}
5796 
5797 	return rval;
5798 }
5799 
5800 int
5801 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
5802 {
5803 	int rval;
5804 	struct qla_hw_data *ha = vha->hw;
5805 	mbx_cmd_t mc;
5806 	mbx_cmd_t *mcp = &mc;
5807 
5808 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5809 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
5810 		    "Implicit LOGO Unsupported.\n");
5811 		return QLA_FUNCTION_FAILED;
5812 	}
5813 
5814 
5815 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
5816 	    "Entering %s.\n",  __func__);
5817 
5818 	/* Perform Implicit LOGO. */
5819 	mcp->mb[0] = MBC_PORT_LOGOUT;
5820 	mcp->mb[1] = fcport->loop_id;
5821 	mcp->mb[10] = BIT_15;
5822 	mcp->out_mb = MBX_10|MBX_1|MBX_0;
5823 	mcp->in_mb = MBX_0;
5824 	mcp->tov = MBX_TOV_SECONDS;
5825 	mcp->flags = 0;
5826 	rval = qla2x00_mailbox_command(vha, mcp);
5827 	if (rval != QLA_SUCCESS)
5828 		ql_dbg(ql_dbg_mbx, vha, 0x113d,
5829 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5830 	else
5831 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
5832 		    "Done %s.\n", __func__);
5833 
5834 	return rval;
5835 }
5836 
5837 int
5838 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
5839 {
5840 	int rval;
5841 	mbx_cmd_t mc;
5842 	mbx_cmd_t *mcp = &mc;
5843 	struct qla_hw_data *ha = vha->hw;
5844 	unsigned long retry_max_time = jiffies + (2 * HZ);
5845 
5846 	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5847 		return QLA_FUNCTION_FAILED;
5848 
5849 	ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
5850 
5851 retry_rd_reg:
5852 	mcp->mb[0] = MBC_READ_REMOTE_REG;
5853 	mcp->mb[1] = LSW(reg);
5854 	mcp->mb[2] = MSW(reg);
5855 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
5856 	mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5857 	mcp->tov = MBX_TOV_SECONDS;
5858 	mcp->flags = 0;
5859 	rval = qla2x00_mailbox_command(vha, mcp);
5860 
5861 	if (rval != QLA_SUCCESS) {
5862 		ql_dbg(ql_dbg_mbx, vha, 0x114c,
5863 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
5864 		    rval, mcp->mb[0], mcp->mb[1]);
5865 	} else {
5866 		*data = (mcp->mb[3] | (mcp->mb[4] << 16));
5867 		if (*data == QLA8XXX_BAD_VALUE) {
5868 			/*
5869 			 * During soft-reset CAMRAM register reads might
5870 			 * return 0xbad0bad0. So retry for MAX of 2 sec
5871 			 * while reading camram registers.
5872 			 */
5873 			if (time_after(jiffies, retry_max_time)) {
5874 				ql_dbg(ql_dbg_mbx, vha, 0x1141,
5875 				    "Failure to read CAMRAM register. "
5876 				    "data=0x%x.\n", *data);
5877 				return QLA_FUNCTION_FAILED;
5878 			}
5879 			msleep(100);
5880 			goto retry_rd_reg;
5881 		}
5882 		ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
5883 	}
5884 
5885 	return rval;
5886 }
5887 
5888 int
5889 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5890 {
5891 	int rval;
5892 	mbx_cmd_t mc;
5893 	mbx_cmd_t *mcp = &mc;
5894 	struct qla_hw_data *ha = vha->hw;
5895 
5896 	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5897 		return QLA_FUNCTION_FAILED;
5898 
5899 	ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
5900 
5901 	mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
5902 	mcp->out_mb = MBX_0;
5903 	mcp->in_mb = MBX_1|MBX_0;
5904 	mcp->tov = MBX_TOV_SECONDS;
5905 	mcp->flags = 0;
5906 	rval = qla2x00_mailbox_command(vha, mcp);
5907 
5908 	if (rval != QLA_SUCCESS) {
5909 		ql_dbg(ql_dbg_mbx, vha, 0x1144,
5910 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
5911 		    rval, mcp->mb[0], mcp->mb[1]);
5912 		ha->isp_ops->fw_dump(vha, 0);
5913 	} else {
5914 		ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
5915 	}
5916 
5917 	return rval;
5918 }
5919 
5920 int
5921 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
5922 	uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
5923 {
5924 	int rval;
5925 	mbx_cmd_t mc;
5926 	mbx_cmd_t *mcp = &mc;
5927 	uint8_t subcode = (uint8_t)options;
5928 	struct qla_hw_data *ha = vha->hw;
5929 
5930 	if (!IS_QLA8031(ha))
5931 		return QLA_FUNCTION_FAILED;
5932 
5933 	ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
5934 
5935 	mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
5936 	mcp->mb[1] = options;
5937 	mcp->out_mb = MBX_1|MBX_0;
5938 	if (subcode & BIT_2) {
5939 		mcp->mb[2] = LSW(start_addr);
5940 		mcp->mb[3] = MSW(start_addr);
5941 		mcp->mb[4] = LSW(end_addr);
5942 		mcp->mb[5] = MSW(end_addr);
5943 		mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
5944 	}
5945 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
5946 	if (!(subcode & (BIT_2 | BIT_5)))
5947 		mcp->in_mb |= MBX_4|MBX_3;
5948 	mcp->tov = MBX_TOV_SECONDS;
5949 	mcp->flags = 0;
5950 	rval = qla2x00_mailbox_command(vha, mcp);
5951 
5952 	if (rval != QLA_SUCCESS) {
5953 		ql_dbg(ql_dbg_mbx, vha, 0x1147,
5954 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
5955 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
5956 		    mcp->mb[4]);
5957 		ha->isp_ops->fw_dump(vha, 0);
5958 	} else {
5959 		if (subcode & BIT_5)
5960 			*sector_size = mcp->mb[1];
5961 		else if (subcode & (BIT_6 | BIT_7)) {
5962 			ql_dbg(ql_dbg_mbx, vha, 0x1148,
5963 			    "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5964 		} else if (subcode & (BIT_3 | BIT_4)) {
5965 			ql_dbg(ql_dbg_mbx, vha, 0x1149,
5966 			    "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5967 		}
5968 		ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
5969 	}
5970 
5971 	return rval;
5972 }
5973 
5974 int
5975 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
5976 	uint32_t size)
5977 {
5978 	int rval;
5979 	mbx_cmd_t mc;
5980 	mbx_cmd_t *mcp = &mc;
5981 
5982 	if (!IS_MCTP_CAPABLE(vha->hw))
5983 		return QLA_FUNCTION_FAILED;
5984 
5985 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
5986 	    "Entered %s.\n", __func__);
5987 
5988 	mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
5989 	mcp->mb[1] = LSW(addr);
5990 	mcp->mb[2] = MSW(req_dma);
5991 	mcp->mb[3] = LSW(req_dma);
5992 	mcp->mb[4] = MSW(size);
5993 	mcp->mb[5] = LSW(size);
5994 	mcp->mb[6] = MSW(MSD(req_dma));
5995 	mcp->mb[7] = LSW(MSD(req_dma));
5996 	mcp->mb[8] = MSW(addr);
5997 	/* Setting RAM ID to valid */
5998 	mcp->mb[10] |= BIT_7;
5999 	/* For MCTP RAM ID is 0x40 */
6000 	mcp->mb[10] |= 0x40;
6001 
6002 	mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6003 	    MBX_0;
6004 
6005 	mcp->in_mb = MBX_0;
6006 	mcp->tov = MBX_TOV_SECONDS;
6007 	mcp->flags = 0;
6008 	rval = qla2x00_mailbox_command(vha, mcp);
6009 
6010 	if (rval != QLA_SUCCESS) {
6011 		ql_dbg(ql_dbg_mbx, vha, 0x114e,
6012 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6013 	} else {
6014 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6015 		    "Done %s.\n", __func__);
6016 	}
6017 
6018 	return rval;
6019 }
6020 
6021 int
6022 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6023 	void *dd_buf, uint size, uint options)
6024 {
6025 	int rval;
6026 	mbx_cmd_t mc;
6027 	mbx_cmd_t *mcp = &mc;
6028 	dma_addr_t dd_dma;
6029 
6030 	if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
6031 		return QLA_FUNCTION_FAILED;
6032 
6033 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6034 	    "Entered %s.\n", __func__);
6035 
6036 	dd_dma = dma_map_single(&vha->hw->pdev->dev,
6037 	    dd_buf, size, DMA_FROM_DEVICE);
6038 	if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6039 		ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6040 		return QLA_MEMORY_ALLOC_FAILED;
6041 	}
6042 
6043 	memset(dd_buf, 0, size);
6044 
6045 	mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6046 	mcp->mb[1] = options;
6047 	mcp->mb[2] = MSW(LSD(dd_dma));
6048 	mcp->mb[3] = LSW(LSD(dd_dma));
6049 	mcp->mb[6] = MSW(MSD(dd_dma));
6050 	mcp->mb[7] = LSW(MSD(dd_dma));
6051 	mcp->mb[8] = size;
6052 	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6053 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6054 	mcp->buf_size = size;
6055 	mcp->flags = MBX_DMA_IN;
6056 	mcp->tov = MBX_TOV_SECONDS * 4;
6057 	rval = qla2x00_mailbox_command(vha, mcp);
6058 
6059 	if (rval != QLA_SUCCESS) {
6060 		ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6061 	} else {
6062 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6063 		    "Done %s.\n", __func__);
6064 	}
6065 
6066 	dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6067 	    size, DMA_FROM_DEVICE);
6068 
6069 	return rval;
6070 }
6071 
6072 static void qla2x00_async_mb_sp_done(void *s, int res)
6073 {
6074 	struct srb *sp = s;
6075 
6076 	sp->u.iocb_cmd.u.mbx.rc = res;
6077 
6078 	complete(&sp->u.iocb_cmd.u.mbx.comp);
6079 	/* don't free sp here. Let the caller do the free */
6080 }
6081 
6082 /*
6083  * This mailbox uses the iocb interface to send MB command.
6084  * This allows non-critial (non chip setup) command to go
6085  * out in parrallel.
6086  */
6087 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6088 {
6089 	int rval = QLA_FUNCTION_FAILED;
6090 	srb_t *sp;
6091 	struct srb_iocb *c;
6092 
6093 	if (!vha->hw->flags.fw_started)
6094 		goto done;
6095 
6096 	sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6097 	if (!sp)
6098 		goto done;
6099 
6100 	sp->type = SRB_MB_IOCB;
6101 	sp->name = mb_to_str(mcp->mb[0]);
6102 
6103 	c = &sp->u.iocb_cmd;
6104 	c->timeout = qla2x00_async_iocb_timeout;
6105 	init_completion(&c->u.mbx.comp);
6106 
6107 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
6108 
6109 	memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6110 
6111 	sp->done = qla2x00_async_mb_sp_done;
6112 
6113 	rval = qla2x00_start_sp(sp);
6114 	if (rval != QLA_SUCCESS) {
6115 		ql_dbg(ql_dbg_mbx, vha, 0x1018,
6116 		    "%s: %s Failed submission. %x.\n",
6117 		    __func__, sp->name, rval);
6118 		goto done_free_sp;
6119 	}
6120 
6121 	ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6122 	    sp->name, sp->handle);
6123 
6124 	wait_for_completion(&c->u.mbx.comp);
6125 	memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6126 
6127 	rval = c->u.mbx.rc;
6128 	switch (rval) {
6129 	case QLA_FUNCTION_TIMEOUT:
6130 		ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6131 		    __func__, sp->name, rval);
6132 		break;
6133 	case  QLA_SUCCESS:
6134 		ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6135 		    __func__, sp->name);
6136 		sp->free(sp);
6137 		break;
6138 	default:
6139 		ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6140 		    __func__, sp->name, rval);
6141 		sp->free(sp);
6142 		break;
6143 	}
6144 
6145 	return rval;
6146 
6147 done_free_sp:
6148 	sp->free(sp);
6149 done:
6150 	return rval;
6151 }
6152 
6153 /*
6154  * qla24xx_gpdb_wait
6155  * NOTE: Do not call this routine from DPC thread
6156  */
6157 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6158 {
6159 	int rval = QLA_FUNCTION_FAILED;
6160 	dma_addr_t pd_dma;
6161 	struct port_database_24xx *pd;
6162 	struct qla_hw_data *ha = vha->hw;
6163 	mbx_cmd_t mc;
6164 
6165 	if (!vha->hw->flags.fw_started)
6166 		goto done;
6167 
6168 	pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6169 	if (pd  == NULL) {
6170 		ql_log(ql_log_warn, vha, 0xd047,
6171 		    "Failed to allocate port database structure.\n");
6172 		goto done_free_sp;
6173 	}
6174 
6175 	memset(&mc, 0, sizeof(mc));
6176 	mc.mb[0] = MBC_GET_PORT_DATABASE;
6177 	mc.mb[1] = cpu_to_le16(fcport->loop_id);
6178 	mc.mb[2] = MSW(pd_dma);
6179 	mc.mb[3] = LSW(pd_dma);
6180 	mc.mb[6] = MSW(MSD(pd_dma));
6181 	mc.mb[7] = LSW(MSD(pd_dma));
6182 	mc.mb[9] = cpu_to_le16(vha->vp_idx);
6183 	mc.mb[10] = cpu_to_le16((uint16_t)opt);
6184 
6185 	rval = qla24xx_send_mb_cmd(vha, &mc);
6186 	if (rval != QLA_SUCCESS) {
6187 		ql_dbg(ql_dbg_mbx, vha, 0x1193,
6188 		    "%s: %8phC fail\n", __func__, fcport->port_name);
6189 		goto done_free_sp;
6190 	}
6191 
6192 	rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6193 
6194 	ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6195 	    __func__, fcport->port_name);
6196 
6197 done_free_sp:
6198 	if (pd)
6199 		dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6200 done:
6201 	return rval;
6202 }
6203 
6204 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6205     struct port_database_24xx *pd)
6206 {
6207 	int rval = QLA_SUCCESS;
6208 	uint64_t zero = 0;
6209 	u8 current_login_state, last_login_state;
6210 
6211 	if (fcport->fc4f_nvme) {
6212 		current_login_state = pd->current_login_state >> 4;
6213 		last_login_state = pd->last_login_state >> 4;
6214 	} else {
6215 		current_login_state = pd->current_login_state & 0xf;
6216 		last_login_state = pd->last_login_state & 0xf;
6217 	}
6218 
6219 	/* Check for logged in state. */
6220 	if (current_login_state != PDS_PRLI_COMPLETE) {
6221 		ql_dbg(ql_dbg_mbx, vha, 0x119a,
6222 		    "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6223 		    current_login_state, last_login_state, fcport->loop_id);
6224 		rval = QLA_FUNCTION_FAILED;
6225 		goto gpd_error_out;
6226 	}
6227 
6228 	if (fcport->loop_id == FC_NO_LOOP_ID ||
6229 	    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6230 	     memcmp(fcport->port_name, pd->port_name, 8))) {
6231 		/* We lost the device mid way. */
6232 		rval = QLA_NOT_LOGGED_IN;
6233 		goto gpd_error_out;
6234 	}
6235 
6236 	/* Names are little-endian. */
6237 	memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6238 	memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6239 
6240 	/* Get port_id of device. */
6241 	fcport->d_id.b.domain = pd->port_id[0];
6242 	fcport->d_id.b.area = pd->port_id[1];
6243 	fcport->d_id.b.al_pa = pd->port_id[2];
6244 	fcport->d_id.b.rsvd_1 = 0;
6245 
6246 	if (fcport->fc4f_nvme) {
6247 		fcport->nvme_prli_service_param =
6248 		    pd->prli_nvme_svc_param_word_3;
6249 		fcport->port_type = FCT_NVME;
6250 	} else {
6251 		/* If not target must be initiator or unknown type. */
6252 		if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6253 			fcport->port_type = FCT_INITIATOR;
6254 		else
6255 			fcport->port_type = FCT_TARGET;
6256 	}
6257 	/* Passback COS information. */
6258 	fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6259 		FC_COS_CLASS2 : FC_COS_CLASS3;
6260 
6261 	if (pd->prli_svc_param_word_3[0] & BIT_7) {
6262 		fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6263 		fcport->conf_compl_supported = 1;
6264 	}
6265 
6266 gpd_error_out:
6267 	return rval;
6268 }
6269 
6270 /*
6271  * qla24xx_gidlist__wait
6272  * NOTE: don't call this routine from DPC thread.
6273  */
6274 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6275 	void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6276 {
6277 	int rval = QLA_FUNCTION_FAILED;
6278 	mbx_cmd_t mc;
6279 
6280 	if (!vha->hw->flags.fw_started)
6281 		goto done;
6282 
6283 	memset(&mc, 0, sizeof(mc));
6284 	mc.mb[0] = MBC_GET_ID_LIST;
6285 	mc.mb[2] = MSW(id_list_dma);
6286 	mc.mb[3] = LSW(id_list_dma);
6287 	mc.mb[6] = MSW(MSD(id_list_dma));
6288 	mc.mb[7] = LSW(MSD(id_list_dma));
6289 	mc.mb[8] = 0;
6290 	mc.mb[9] = cpu_to_le16(vha->vp_idx);
6291 
6292 	rval = qla24xx_send_mb_cmd(vha, &mc);
6293 	if (rval != QLA_SUCCESS) {
6294 		ql_dbg(ql_dbg_mbx, vha, 0x119b,
6295 		    "%s:  fail\n", __func__);
6296 	} else {
6297 		*entries = mc.mb[1];
6298 		ql_dbg(ql_dbg_mbx, vha, 0x119c,
6299 		    "%s:  done\n", __func__);
6300 	}
6301 done:
6302 	return rval;
6303 }
6304 
6305 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6306 {
6307 	int rval;
6308 	mbx_cmd_t	mc;
6309 	mbx_cmd_t	*mcp = &mc;
6310 
6311 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6312 	    "Entered %s\n", __func__);
6313 
6314 	memset(mcp->mb, 0 , sizeof(mcp->mb));
6315 	mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6316 	mcp->mb[1] = cpu_to_le16(1);
6317 	mcp->mb[2] = cpu_to_le16(value);
6318 	mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6319 	mcp->in_mb = MBX_2 | MBX_0;
6320 	mcp->tov = MBX_TOV_SECONDS;
6321 	mcp->flags = 0;
6322 
6323 	rval = qla2x00_mailbox_command(vha, mcp);
6324 
6325 	ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6326 	    (rval != QLA_SUCCESS) ? "Failed"  : "Done", rval);
6327 
6328 	return rval;
6329 }
6330 
6331 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6332 {
6333 	int rval;
6334 	mbx_cmd_t	mc;
6335 	mbx_cmd_t	*mcp = &mc;
6336 
6337 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6338 	    "Entered %s\n", __func__);
6339 
6340 	memset(mcp->mb, 0, sizeof(mcp->mb));
6341 	mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6342 	mcp->mb[1] = cpu_to_le16(0);
6343 	mcp->out_mb = MBX_1 | MBX_0;
6344 	mcp->in_mb = MBX_2 | MBX_0;
6345 	mcp->tov = MBX_TOV_SECONDS;
6346 	mcp->flags = 0;
6347 
6348 	rval = qla2x00_mailbox_command(vha, mcp);
6349 	if (rval == QLA_SUCCESS)
6350 		*value = mc.mb[2];
6351 
6352 	ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6353 	    (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6354 
6355 	return rval;
6356 }
6357 
6358 int
6359 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6360 {
6361 	struct qla_hw_data *ha = vha->hw;
6362 	uint16_t iter, addr, offset;
6363 	dma_addr_t phys_addr;
6364 	int rval, c;
6365 	u8 *sfp_data;
6366 
6367 	memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6368 	addr = 0xa0;
6369 	phys_addr = ha->sfp_data_dma;
6370 	sfp_data = ha->sfp_data;
6371 	offset = c = 0;
6372 
6373 	for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6374 		if (iter == 4) {
6375 			/* Skip to next device address. */
6376 			addr = 0xa2;
6377 			offset = 0;
6378 		}
6379 
6380 		rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6381 		    addr, offset, SFP_BLOCK_SIZE, BIT_1);
6382 		if (rval != QLA_SUCCESS) {
6383 			ql_log(ql_log_warn, vha, 0x706d,
6384 			    "Unable to read SFP data (%x/%x/%x).\n", rval,
6385 			    addr, offset);
6386 
6387 			return rval;
6388 		}
6389 
6390 		if (buf && (c < count)) {
6391 			u16 sz;
6392 
6393 			if ((count - c) >= SFP_BLOCK_SIZE)
6394 				sz = SFP_BLOCK_SIZE;
6395 			else
6396 				sz = count - c;
6397 
6398 			memcpy(buf, sfp_data, sz);
6399 			buf += SFP_BLOCK_SIZE;
6400 			c += sz;
6401 		}
6402 		phys_addr += SFP_BLOCK_SIZE;
6403 		sfp_data  += SFP_BLOCK_SIZE;
6404 		offset += SFP_BLOCK_SIZE;
6405 	}
6406 
6407 	return rval;
6408 }
6409 
6410 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6411     uint16_t *out_mb, int out_mb_sz)
6412 {
6413 	int rval = QLA_FUNCTION_FAILED;
6414 	mbx_cmd_t mc;
6415 
6416 	if (!vha->hw->flags.fw_started)
6417 		goto done;
6418 
6419 	memset(&mc, 0, sizeof(mc));
6420 	mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6421 
6422 	rval = qla24xx_send_mb_cmd(vha, &mc);
6423 	if (rval != QLA_SUCCESS) {
6424 		ql_dbg(ql_dbg_mbx, vha, 0xffff,
6425 			"%s:  fail\n", __func__);
6426 	} else {
6427 		if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6428 			memcpy(out_mb, mc.mb, out_mb_sz);
6429 		else
6430 			memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6431 
6432 		ql_dbg(ql_dbg_mbx, vha, 0xffff,
6433 			"%s:  done\n", __func__);
6434 	}
6435 done:
6436 	return rval;
6437 }
6438