xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_mbx.c (revision 2be6bc48df59c99d35aab16a51d4a814e9bb8c35)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2014 QLogic Corporation
5  */
6 #include "qla_def.h"
7 #include "qla_target.h"
8 
9 #include <linux/delay.h>
10 #include <linux/gfp.h>
11 
12 #ifdef CONFIG_PPC
13 #define IS_PPCARCH      true
14 #else
15 #define IS_PPCARCH      false
16 #endif
17 
18 static struct mb_cmd_name {
19 	uint16_t cmd;
20 	const char *str;
21 } mb_str[] = {
22 	{MBC_GET_PORT_DATABASE,		"GPDB"},
23 	{MBC_GET_ID_LIST,		"GIDList"},
24 	{MBC_GET_LINK_PRIV_STATS,	"Stats"},
25 	{MBC_GET_RESOURCE_COUNTS,	"ResCnt"},
26 };
27 
28 static const char *mb_to_str(uint16_t cmd)
29 {
30 	int i;
31 	struct mb_cmd_name *e;
32 
33 	for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
34 		e = mb_str + i;
35 		if (cmd == e->cmd)
36 			return e->str;
37 	}
38 	return "unknown";
39 }
40 
41 static struct rom_cmd {
42 	uint16_t cmd;
43 } rom_cmds[] = {
44 	{ MBC_LOAD_RAM },
45 	{ MBC_EXECUTE_FIRMWARE },
46 	{ MBC_READ_RAM_WORD },
47 	{ MBC_MAILBOX_REGISTER_TEST },
48 	{ MBC_VERIFY_CHECKSUM },
49 	{ MBC_GET_FIRMWARE_VERSION },
50 	{ MBC_LOAD_RISC_RAM },
51 	{ MBC_DUMP_RISC_RAM },
52 	{ MBC_LOAD_RISC_RAM_EXTENDED },
53 	{ MBC_DUMP_RISC_RAM_EXTENDED },
54 	{ MBC_WRITE_RAM_WORD_EXTENDED },
55 	{ MBC_READ_RAM_EXTENDED },
56 	{ MBC_GET_RESOURCE_COUNTS },
57 	{ MBC_SET_FIRMWARE_OPTION },
58 	{ MBC_MID_INITIALIZE_FIRMWARE },
59 	{ MBC_GET_FIRMWARE_STATE },
60 	{ MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
61 	{ MBC_GET_RETRY_COUNT },
62 	{ MBC_TRACE_CONTROL },
63 	{ MBC_INITIALIZE_MULTIQ },
64 	{ MBC_IOCB_COMMAND_A64 },
65 	{ MBC_GET_ADAPTER_LOOP_ID },
66 	{ MBC_READ_SFP },
67 	{ MBC_SET_RNID_PARAMS },
68 	{ MBC_GET_RNID_PARAMS },
69 	{ MBC_GET_SET_ZIO_THRESHOLD },
70 };
71 
72 static int is_rom_cmd(uint16_t cmd)
73 {
74 	int i;
75 	struct  rom_cmd *wc;
76 
77 	for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
78 		wc = rom_cmds + i;
79 		if (wc->cmd == cmd)
80 			return 1;
81 	}
82 
83 	return 0;
84 }
85 
86 /*
87  * qla2x00_mailbox_command
88  *	Issue mailbox command and waits for completion.
89  *
90  * Input:
91  *	ha = adapter block pointer.
92  *	mcp = driver internal mbx struct pointer.
93  *
94  * Output:
95  *	mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
96  *
97  * Returns:
98  *	0 : QLA_SUCCESS = cmd performed success
99  *	1 : QLA_FUNCTION_FAILED   (error encountered)
100  *	6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
101  *
102  * Context:
103  *	Kernel context.
104  */
105 static int
106 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
107 {
108 	int		rval, i;
109 	unsigned long    flags = 0;
110 	device_reg_t *reg;
111 	uint8_t		abort_active, eeh_delay;
112 	uint8_t		io_lock_on;
113 	uint16_t	command = 0;
114 	uint16_t	*iptr;
115 	__le16 __iomem  *optr;
116 	uint32_t	cnt;
117 	uint32_t	mboxes;
118 	unsigned long	wait_time;
119 	struct qla_hw_data *ha = vha->hw;
120 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
121 	u32 chip_reset;
122 
123 
124 	ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
125 
126 	if (ha->pdev->error_state == pci_channel_io_perm_failure) {
127 		ql_log(ql_log_warn, vha, 0x1001,
128 		    "PCI channel failed permanently, exiting.\n");
129 		return QLA_FUNCTION_TIMEOUT;
130 	}
131 
132 	if (vha->device_flags & DFLG_DEV_FAILED) {
133 		ql_log(ql_log_warn, vha, 0x1002,
134 		    "Device in failed state, exiting.\n");
135 		return QLA_FUNCTION_TIMEOUT;
136 	}
137 
138 	/* if PCI error, then avoid mbx processing.*/
139 	if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
140 	    test_bit(UNLOADING, &base_vha->dpc_flags)) {
141 		ql_log(ql_log_warn, vha, 0xd04e,
142 		    "PCI error, exiting.\n");
143 		return QLA_FUNCTION_TIMEOUT;
144 	}
145 	eeh_delay = 0;
146 	reg = ha->iobase;
147 	io_lock_on = base_vha->flags.init_done;
148 
149 	rval = QLA_SUCCESS;
150 	abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
151 	chip_reset = ha->chip_reset;
152 
153 	if (ha->flags.pci_channel_io_perm_failure) {
154 		ql_log(ql_log_warn, vha, 0x1003,
155 		    "Perm failure on EEH timeout MBX, exiting.\n");
156 		return QLA_FUNCTION_TIMEOUT;
157 	}
158 
159 	if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
160 		/* Setting Link-Down error */
161 		mcp->mb[0] = MBS_LINK_DOWN_ERROR;
162 		ql_log(ql_log_warn, vha, 0x1004,
163 		    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
164 		return QLA_FUNCTION_TIMEOUT;
165 	}
166 
167 	/* check if ISP abort is active and return cmd with timeout */
168 	if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
169 	      test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
170 	      test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
171 	      !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) {
172 		ql_log(ql_log_info, vha, 0x1005,
173 		    "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
174 		    mcp->mb[0]);
175 		return QLA_FUNCTION_TIMEOUT;
176 	}
177 
178 	atomic_inc(&ha->num_pend_mbx_stage1);
179 	/*
180 	 * Wait for active mailbox commands to finish by waiting at most tov
181 	 * seconds. This is to serialize actual issuing of mailbox cmds during
182 	 * non ISP abort time.
183 	 */
184 	if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
185 		/* Timeout occurred. Return error. */
186 		ql_log(ql_log_warn, vha, 0xd035,
187 		    "Cmd access timeout, cmd=0x%x, Exiting.\n",
188 		    mcp->mb[0]);
189 		vha->hw_err_cnt++;
190 		atomic_dec(&ha->num_pend_mbx_stage1);
191 		return QLA_FUNCTION_TIMEOUT;
192 	}
193 	atomic_dec(&ha->num_pend_mbx_stage1);
194 	if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
195 	    ha->flags.eeh_busy) {
196 		ql_log(ql_log_warn, vha, 0xd035,
197 		       "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
198 		       ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
199 		rval = QLA_ABORTED;
200 		goto premature_exit;
201 	}
202 
203 
204 	/* Save mailbox command for debug */
205 	ha->mcp = mcp;
206 
207 	ql_dbg(ql_dbg_mbx, vha, 0x1006,
208 	    "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
209 
210 	spin_lock_irqsave(&ha->hardware_lock, flags);
211 
212 	if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
213 	    ha->flags.mbox_busy) {
214 		rval = QLA_ABORTED;
215 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
216 		goto premature_exit;
217 	}
218 	ha->flags.mbox_busy = 1;
219 
220 	/* Load mailbox registers. */
221 	if (IS_P3P_TYPE(ha))
222 		optr = &reg->isp82.mailbox_in[0];
223 	else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
224 		optr = &reg->isp24.mailbox0;
225 	else
226 		optr = MAILBOX_REG(ha, &reg->isp, 0);
227 
228 	iptr = mcp->mb;
229 	command = mcp->mb[0];
230 	mboxes = mcp->out_mb;
231 
232 	ql_dbg(ql_dbg_mbx, vha, 0x1111,
233 	    "Mailbox registers (OUT):\n");
234 	for (cnt = 0; cnt < ha->mbx_count; cnt++) {
235 		if (IS_QLA2200(ha) && cnt == 8)
236 			optr = MAILBOX_REG(ha, &reg->isp, 8);
237 		if (mboxes & BIT_0) {
238 			ql_dbg(ql_dbg_mbx, vha, 0x1112,
239 			    "mbox[%d]<-0x%04x\n", cnt, *iptr);
240 			wrt_reg_word(optr, *iptr);
241 		} else {
242 			wrt_reg_word(optr, 0);
243 		}
244 
245 		mboxes >>= 1;
246 		optr++;
247 		iptr++;
248 	}
249 
250 	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
251 	    "I/O Address = %p.\n", optr);
252 
253 	/* Issue set host interrupt command to send cmd out. */
254 	ha->flags.mbox_int = 0;
255 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
256 
257 	/* Unlock mbx registers and wait for interrupt */
258 	ql_dbg(ql_dbg_mbx, vha, 0x100f,
259 	    "Going to unlock irq & waiting for interrupts. "
260 	    "jiffies=%lx.\n", jiffies);
261 
262 	/* Wait for mbx cmd completion until timeout */
263 	atomic_inc(&ha->num_pend_mbx_stage2);
264 	if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
265 		set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
266 
267 		if (IS_P3P_TYPE(ha))
268 			wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING);
269 		else if (IS_FWI2_CAPABLE(ha))
270 			wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
271 		else
272 			wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT);
273 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
274 
275 		wait_time = jiffies;
276 		atomic_inc(&ha->num_pend_mbx_stage3);
277 		if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
278 		    mcp->tov * HZ)) {
279 			ql_dbg(ql_dbg_mbx, vha, 0x117a,
280 			    "cmd=%x Timeout.\n", command);
281 			spin_lock_irqsave(&ha->hardware_lock, flags);
282 			clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
283 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
284 
285 			if (chip_reset != ha->chip_reset) {
286 				eeh_delay = ha->flags.eeh_busy ? 1 : 0;
287 
288 				spin_lock_irqsave(&ha->hardware_lock, flags);
289 				ha->flags.mbox_busy = 0;
290 				spin_unlock_irqrestore(&ha->hardware_lock,
291 				    flags);
292 				atomic_dec(&ha->num_pend_mbx_stage2);
293 				atomic_dec(&ha->num_pend_mbx_stage3);
294 				rval = QLA_ABORTED;
295 				goto premature_exit;
296 			}
297 		} else if (ha->flags.purge_mbox ||
298 		    chip_reset != ha->chip_reset) {
299 			eeh_delay = ha->flags.eeh_busy ? 1 : 0;
300 
301 			spin_lock_irqsave(&ha->hardware_lock, flags);
302 			ha->flags.mbox_busy = 0;
303 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
304 			atomic_dec(&ha->num_pend_mbx_stage2);
305 			atomic_dec(&ha->num_pend_mbx_stage3);
306 			rval = QLA_ABORTED;
307 			goto premature_exit;
308 		}
309 		atomic_dec(&ha->num_pend_mbx_stage3);
310 
311 		if (time_after(jiffies, wait_time + 5 * HZ))
312 			ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
313 			    command, jiffies_to_msecs(jiffies - wait_time));
314 	} else {
315 		ql_dbg(ql_dbg_mbx, vha, 0x1011,
316 		    "Cmd=%x Polling Mode.\n", command);
317 
318 		if (IS_P3P_TYPE(ha)) {
319 			if (rd_reg_dword(&reg->isp82.hint) &
320 				HINT_MBX_INT_PENDING) {
321 				ha->flags.mbox_busy = 0;
322 				spin_unlock_irqrestore(&ha->hardware_lock,
323 					flags);
324 				atomic_dec(&ha->num_pend_mbx_stage2);
325 				ql_dbg(ql_dbg_mbx, vha, 0x1012,
326 				    "Pending mailbox timeout, exiting.\n");
327 				vha->hw_err_cnt++;
328 				rval = QLA_FUNCTION_TIMEOUT;
329 				goto premature_exit;
330 			}
331 			wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING);
332 		} else if (IS_FWI2_CAPABLE(ha))
333 			wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
334 		else
335 			wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT);
336 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
337 
338 		wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
339 		while (!ha->flags.mbox_int) {
340 			if (ha->flags.purge_mbox ||
341 			    chip_reset != ha->chip_reset) {
342 				eeh_delay = ha->flags.eeh_busy ? 1 : 0;
343 
344 				spin_lock_irqsave(&ha->hardware_lock, flags);
345 				ha->flags.mbox_busy = 0;
346 				spin_unlock_irqrestore(&ha->hardware_lock,
347 				    flags);
348 				atomic_dec(&ha->num_pend_mbx_stage2);
349 				rval = QLA_ABORTED;
350 				goto premature_exit;
351 			}
352 
353 			if (time_after(jiffies, wait_time))
354 				break;
355 
356 			/* Check for pending interrupts. */
357 			qla2x00_poll(ha->rsp_q_map[0]);
358 
359 			if (!ha->flags.mbox_int &&
360 			    !(IS_QLA2200(ha) &&
361 			    command == MBC_LOAD_RISC_RAM_EXTENDED))
362 				msleep(10);
363 		} /* while */
364 		ql_dbg(ql_dbg_mbx, vha, 0x1013,
365 		    "Waited %d sec.\n",
366 		    (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
367 	}
368 	atomic_dec(&ha->num_pend_mbx_stage2);
369 
370 	/* Check whether we timed out */
371 	if (ha->flags.mbox_int) {
372 		uint16_t *iptr2;
373 
374 		ql_dbg(ql_dbg_mbx, vha, 0x1014,
375 		    "Cmd=%x completed.\n", command);
376 
377 		/* Got interrupt. Clear the flag. */
378 		ha->flags.mbox_int = 0;
379 		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
380 
381 		if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
382 			spin_lock_irqsave(&ha->hardware_lock, flags);
383 			ha->flags.mbox_busy = 0;
384 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
385 
386 			/* Setting Link-Down error */
387 			mcp->mb[0] = MBS_LINK_DOWN_ERROR;
388 			ha->mcp = NULL;
389 			rval = QLA_FUNCTION_FAILED;
390 			ql_log(ql_log_warn, vha, 0xd048,
391 			    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
392 			goto premature_exit;
393 		}
394 
395 		if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
396 			ql_dbg(ql_dbg_mbx, vha, 0x11ff,
397 			       "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
398 			       MBS_COMMAND_COMPLETE);
399 			rval = QLA_FUNCTION_FAILED;
400 		}
401 
402 		/* Load return mailbox registers. */
403 		iptr2 = mcp->mb;
404 		iptr = (uint16_t *)&ha->mailbox_out[0];
405 		mboxes = mcp->in_mb;
406 
407 		ql_dbg(ql_dbg_mbx, vha, 0x1113,
408 		    "Mailbox registers (IN):\n");
409 		for (cnt = 0; cnt < ha->mbx_count; cnt++) {
410 			if (mboxes & BIT_0) {
411 				*iptr2 = *iptr;
412 				ql_dbg(ql_dbg_mbx, vha, 0x1114,
413 				    "mbox[%d]->0x%04x\n", cnt, *iptr2);
414 			}
415 
416 			mboxes >>= 1;
417 			iptr2++;
418 			iptr++;
419 		}
420 	} else {
421 
422 		uint16_t mb[8];
423 		uint32_t ictrl, host_status, hccr;
424 		uint16_t        w;
425 
426 		if (IS_FWI2_CAPABLE(ha)) {
427 			mb[0] = rd_reg_word(&reg->isp24.mailbox0);
428 			mb[1] = rd_reg_word(&reg->isp24.mailbox1);
429 			mb[2] = rd_reg_word(&reg->isp24.mailbox2);
430 			mb[3] = rd_reg_word(&reg->isp24.mailbox3);
431 			mb[7] = rd_reg_word(&reg->isp24.mailbox7);
432 			ictrl = rd_reg_dword(&reg->isp24.ictrl);
433 			host_status = rd_reg_dword(&reg->isp24.host_status);
434 			hccr = rd_reg_dword(&reg->isp24.hccr);
435 
436 			ql_log(ql_log_warn, vha, 0xd04c,
437 			    "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
438 			    "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
439 			    command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
440 			    mb[7], host_status, hccr);
441 			vha->hw_err_cnt++;
442 
443 		} else {
444 			mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
445 			ictrl = rd_reg_word(&reg->isp.ictrl);
446 			ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
447 			    "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
448 			    "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
449 			vha->hw_err_cnt++;
450 		}
451 		ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
452 
453 		/* Capture FW dump only, if PCI device active */
454 		if (!pci_channel_offline(vha->hw->pdev)) {
455 			pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
456 			if (w == 0xffff || ictrl == 0xffffffff ||
457 			    (chip_reset != ha->chip_reset)) {
458 				/* This is special case if there is unload
459 				 * of driver happening and if PCI device go
460 				 * into bad state due to PCI error condition
461 				 * then only PCI ERR flag would be set.
462 				 * we will do premature exit for above case.
463 				 */
464 				spin_lock_irqsave(&ha->hardware_lock, flags);
465 				ha->flags.mbox_busy = 0;
466 				spin_unlock_irqrestore(&ha->hardware_lock,
467 				    flags);
468 				rval = QLA_FUNCTION_TIMEOUT;
469 				goto premature_exit;
470 			}
471 
472 			/* Attempt to capture firmware dump for further
473 			 * anallysis of the current formware state. we do not
474 			 * need to do this if we are intentionally generating
475 			 * a dump
476 			 */
477 			if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
478 				qla2xxx_dump_fw(vha);
479 			rval = QLA_FUNCTION_TIMEOUT;
480 		 }
481 	}
482 	spin_lock_irqsave(&ha->hardware_lock, flags);
483 	ha->flags.mbox_busy = 0;
484 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
485 
486 	/* Clean up */
487 	ha->mcp = NULL;
488 
489 	if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
490 		ql_dbg(ql_dbg_mbx, vha, 0x101a,
491 		    "Checking for additional resp interrupt.\n");
492 
493 		/* polling mode for non isp_abort commands. */
494 		qla2x00_poll(ha->rsp_q_map[0]);
495 	}
496 
497 	if (rval == QLA_FUNCTION_TIMEOUT &&
498 	    mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
499 		if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
500 		    ha->flags.eeh_busy) {
501 			/* not in dpc. schedule it for dpc to take over. */
502 			ql_dbg(ql_dbg_mbx, vha, 0x101b,
503 			    "Timeout, schedule isp_abort_needed.\n");
504 
505 			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
506 			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
507 			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
508 				if (IS_QLA82XX(ha)) {
509 					ql_dbg(ql_dbg_mbx, vha, 0x112a,
510 					    "disabling pause transmit on port "
511 					    "0 & 1.\n");
512 					qla82xx_wr_32(ha,
513 					    QLA82XX_CRB_NIU + 0x98,
514 					    CRB_NIU_XG_PAUSE_CTL_P0|
515 					    CRB_NIU_XG_PAUSE_CTL_P1);
516 				}
517 				ql_log(ql_log_info, base_vha, 0x101c,
518 				    "Mailbox cmd timeout occurred, cmd=0x%x, "
519 				    "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
520 				    "abort.\n", command, mcp->mb[0],
521 				    ha->flags.eeh_busy);
522 				vha->hw_err_cnt++;
523 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
524 				qla2xxx_wake_dpc(vha);
525 			}
526 		} else if (current == ha->dpc_thread) {
527 			/* call abort directly since we are in the DPC thread */
528 			ql_dbg(ql_dbg_mbx, vha, 0x101d,
529 			    "Timeout, calling abort_isp.\n");
530 
531 			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
532 			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
533 			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
534 				if (IS_QLA82XX(ha)) {
535 					ql_dbg(ql_dbg_mbx, vha, 0x112b,
536 					    "disabling pause transmit on port "
537 					    "0 & 1.\n");
538 					qla82xx_wr_32(ha,
539 					    QLA82XX_CRB_NIU + 0x98,
540 					    CRB_NIU_XG_PAUSE_CTL_P0|
541 					    CRB_NIU_XG_PAUSE_CTL_P1);
542 				}
543 				ql_log(ql_log_info, base_vha, 0x101e,
544 				    "Mailbox cmd timeout occurred, cmd=0x%x, "
545 				    "mb[0]=0x%x. Scheduling ISP abort ",
546 				    command, mcp->mb[0]);
547 				vha->hw_err_cnt++;
548 				set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
549 				clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
550 				/* Allow next mbx cmd to come in. */
551 				complete(&ha->mbx_cmd_comp);
552 				if (ha->isp_ops->abort_isp(vha) &&
553 				    !ha->flags.eeh_busy) {
554 					/* Failed. retry later. */
555 					set_bit(ISP_ABORT_NEEDED,
556 					    &vha->dpc_flags);
557 				}
558 				clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
559 				ql_dbg(ql_dbg_mbx, vha, 0x101f,
560 				    "Finished abort_isp.\n");
561 				goto mbx_done;
562 			}
563 		}
564 	}
565 
566 premature_exit:
567 	/* Allow next mbx cmd to come in. */
568 	complete(&ha->mbx_cmd_comp);
569 
570 mbx_done:
571 	if (rval == QLA_ABORTED) {
572 		ql_log(ql_log_info, vha, 0xd035,
573 		    "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
574 		    mcp->mb[0]);
575 	} else if (rval) {
576 		if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
577 			pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
578 			    dev_name(&ha->pdev->dev), 0x1020+0x800,
579 			    vha->host_no, rval);
580 			mboxes = mcp->in_mb;
581 			cnt = 4;
582 			for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
583 				if (mboxes & BIT_0) {
584 					printk(" mb[%u]=%x", i, mcp->mb[i]);
585 					cnt--;
586 				}
587 			pr_warn(" cmd=%x ****\n", command);
588 		}
589 		if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
590 			ql_dbg(ql_dbg_mbx, vha, 0x1198,
591 			    "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
592 			    rd_reg_dword(&reg->isp24.host_status),
593 			    rd_reg_dword(&reg->isp24.ictrl),
594 			    rd_reg_dword(&reg->isp24.istatus));
595 		} else {
596 			ql_dbg(ql_dbg_mbx, vha, 0x1206,
597 			    "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
598 			    rd_reg_word(&reg->isp.ctrl_status),
599 			    rd_reg_word(&reg->isp.ictrl),
600 			    rd_reg_word(&reg->isp.istatus));
601 		}
602 	} else {
603 		ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
604 	}
605 
606 	i = 500;
607 	while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) {
608 		/*
609 		 * The caller of this mailbox encounter pci error.
610 		 * Hold the thread until PCIE link reset complete to make
611 		 * sure caller does not unmap dma while recovery is
612 		 * in progress.
613 		 */
614 		msleep(1);
615 		i--;
616 	}
617 	return rval;
618 }
619 
620 int
621 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
622     uint32_t risc_code_size)
623 {
624 	int rval;
625 	struct qla_hw_data *ha = vha->hw;
626 	mbx_cmd_t mc;
627 	mbx_cmd_t *mcp = &mc;
628 
629 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
630 	    "Entered %s.\n", __func__);
631 
632 	if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
633 		mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
634 		mcp->mb[8] = MSW(risc_addr);
635 		mcp->out_mb = MBX_8|MBX_0;
636 	} else {
637 		mcp->mb[0] = MBC_LOAD_RISC_RAM;
638 		mcp->out_mb = MBX_0;
639 	}
640 	mcp->mb[1] = LSW(risc_addr);
641 	mcp->mb[2] = MSW(req_dma);
642 	mcp->mb[3] = LSW(req_dma);
643 	mcp->mb[6] = MSW(MSD(req_dma));
644 	mcp->mb[7] = LSW(MSD(req_dma));
645 	mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
646 	if (IS_FWI2_CAPABLE(ha)) {
647 		mcp->mb[4] = MSW(risc_code_size);
648 		mcp->mb[5] = LSW(risc_code_size);
649 		mcp->out_mb |= MBX_5|MBX_4;
650 	} else {
651 		mcp->mb[4] = LSW(risc_code_size);
652 		mcp->out_mb |= MBX_4;
653 	}
654 
655 	mcp->in_mb = MBX_1|MBX_0;
656 	mcp->tov = MBX_TOV_SECONDS;
657 	mcp->flags = 0;
658 	rval = qla2x00_mailbox_command(vha, mcp);
659 
660 	if (rval != QLA_SUCCESS) {
661 		ql_dbg(ql_dbg_mbx, vha, 0x1023,
662 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
663 		    rval, mcp->mb[0], mcp->mb[1]);
664 		vha->hw_err_cnt++;
665 	} else {
666 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
667 		    "Done %s.\n", __func__);
668 	}
669 
670 	return rval;
671 }
672 
673 #define	NVME_ENABLE_FLAG	BIT_3
674 #define	EDIF_HW_SUPPORT		BIT_10
675 
676 /*
677  * qla2x00_execute_fw
678  *     Start adapter firmware.
679  *
680  * Input:
681  *     ha = adapter block pointer.
682  *     TARGET_QUEUE_LOCK must be released.
683  *     ADAPTER_STATE_LOCK must be released.
684  *
685  * Returns:
686  *     qla2x00 local function return status code.
687  *
688  * Context:
689  *     Kernel context.
690  */
691 int
692 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
693 {
694 	int rval;
695 	struct qla_hw_data *ha = vha->hw;
696 	mbx_cmd_t mc;
697 	mbx_cmd_t *mcp = &mc;
698 	u8 semaphore = 0;
699 #define EXE_FW_FORCE_SEMAPHORE BIT_7
700 	u8 retry = 5;
701 
702 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
703 	    "Entered %s.\n", __func__);
704 
705 again:
706 	mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
707 	mcp->out_mb = MBX_0;
708 	mcp->in_mb = MBX_0;
709 	if (IS_FWI2_CAPABLE(ha)) {
710 		mcp->mb[1] = MSW(risc_addr);
711 		mcp->mb[2] = LSW(risc_addr);
712 		mcp->mb[3] = 0;
713 		mcp->mb[4] = 0;
714 		mcp->mb[11] = 0;
715 
716 		/* Enable BPM? */
717 		if (ha->flags.lr_detected) {
718 			mcp->mb[4] = BIT_0;
719 			if (IS_BPM_RANGE_CAPABLE(ha))
720 				mcp->mb[4] |=
721 				    ha->lr_distance << LR_DIST_FW_POS;
722 		}
723 
724 		if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
725 			mcp->mb[4] |= NVME_ENABLE_FLAG;
726 
727 		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
728 			struct nvram_81xx *nv = ha->nvram;
729 			/* set minimum speed if specified in nvram */
730 			if (nv->min_supported_speed >= 2 &&
731 			    nv->min_supported_speed <= 5) {
732 				mcp->mb[4] |= BIT_4;
733 				mcp->mb[11] |= nv->min_supported_speed & 0xF;
734 				mcp->out_mb |= MBX_11;
735 				mcp->in_mb |= BIT_5;
736 				vha->min_supported_speed =
737 				    nv->min_supported_speed;
738 			}
739 
740 			if (IS_PPCARCH)
741 				mcp->mb[11] |= BIT_4;
742 		}
743 
744 		if (ha->flags.exlogins_enabled)
745 			mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
746 
747 		if (ha->flags.exchoffld_enabled)
748 			mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
749 
750 		if (semaphore)
751 			mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
752 
753 		mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
754 		mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1;
755 	} else {
756 		mcp->mb[1] = LSW(risc_addr);
757 		mcp->out_mb |= MBX_1;
758 		if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
759 			mcp->mb[2] = 0;
760 			mcp->out_mb |= MBX_2;
761 		}
762 	}
763 
764 	mcp->tov = MBX_TOV_SECONDS;
765 	mcp->flags = 0;
766 	rval = qla2x00_mailbox_command(vha, mcp);
767 
768 	if (rval != QLA_SUCCESS) {
769 		if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR &&
770 		    mcp->mb[1] == 0x27 && retry) {
771 			semaphore = 1;
772 			retry--;
773 			ql_dbg(ql_dbg_async, vha, 0x1026,
774 			    "Exe FW: force semaphore.\n");
775 			goto again;
776 		}
777 
778 		if (retry) {
779 			retry--;
780 			ql_dbg(ql_dbg_async, vha, 0x509d,
781 			    "Exe FW retry: mb[0]=%x retry[%d]\n", mcp->mb[0], retry);
782 			goto again;
783 		}
784 		ql_dbg(ql_dbg_mbx, vha, 0x1026,
785 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
786 		vha->hw_err_cnt++;
787 		return rval;
788 	}
789 
790 	if (!IS_FWI2_CAPABLE(ha))
791 		goto done;
792 
793 	ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
794 	ql_dbg(ql_dbg_mbx, vha, 0x119a,
795 	    "fw_ability_mask=%x.\n", ha->fw_ability_mask);
796 	ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
797 	if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
798 		ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
799 		ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
800 		    ha->max_supported_speed == 0 ? "16Gps" :
801 		    ha->max_supported_speed == 1 ? "32Gps" :
802 		    ha->max_supported_speed == 2 ? "64Gps" : "unknown");
803 		if (vha->min_supported_speed) {
804 			ha->min_supported_speed = mcp->mb[5] &
805 			    (BIT_0 | BIT_1 | BIT_2);
806 			ql_dbg(ql_dbg_mbx, vha, 0x119c,
807 			    "min_supported_speed=%s.\n",
808 			    ha->min_supported_speed == 6 ? "64Gps" :
809 			    ha->min_supported_speed == 5 ? "32Gps" :
810 			    ha->min_supported_speed == 4 ? "16Gps" :
811 			    ha->min_supported_speed == 3 ? "8Gps" :
812 			    ha->min_supported_speed == 2 ? "4Gps" : "unknown");
813 		}
814 	}
815 
816 	if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) {
817 		ha->flags.edif_hw = 1;
818 		ql_log(ql_log_info, vha, 0xffff,
819 		    "%s: edif HW\n", __func__);
820 	}
821 
822 done:
823 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
824 	    "Done %s.\n", __func__);
825 
826 	return rval;
827 }
828 
829 /*
830  * qla_get_exlogin_status
831  *	Get extended login status
832  *	uses the memory offload control/status Mailbox
833  *
834  * Input:
835  *	ha:		adapter state pointer.
836  *	fwopt:		firmware options
837  *
838  * Returns:
839  *	qla2x00 local function status
840  *
841  * Context:
842  *	Kernel context.
843  */
844 #define	FETCH_XLOGINS_STAT	0x8
845 int
846 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
847 	uint16_t *ex_logins_cnt)
848 {
849 	int rval;
850 	mbx_cmd_t	mc;
851 	mbx_cmd_t	*mcp = &mc;
852 
853 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
854 	    "Entered %s\n", __func__);
855 
856 	memset(mcp->mb, 0 , sizeof(mcp->mb));
857 	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
858 	mcp->mb[1] = FETCH_XLOGINS_STAT;
859 	mcp->out_mb = MBX_1|MBX_0;
860 	mcp->in_mb = MBX_10|MBX_4|MBX_0;
861 	mcp->tov = MBX_TOV_SECONDS;
862 	mcp->flags = 0;
863 
864 	rval = qla2x00_mailbox_command(vha, mcp);
865 	if (rval != QLA_SUCCESS) {
866 		ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
867 	} else {
868 		*buf_sz = mcp->mb[4];
869 		*ex_logins_cnt = mcp->mb[10];
870 
871 		ql_log(ql_log_info, vha, 0x1190,
872 		    "buffer size 0x%x, exchange login count=%d\n",
873 		    mcp->mb[4], mcp->mb[10]);
874 
875 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
876 		    "Done %s.\n", __func__);
877 	}
878 
879 	return rval;
880 }
881 
882 /*
883  * qla_set_exlogin_mem_cfg
884  *	set extended login memory configuration
885  *	Mbx needs to be issues before init_cb is set
886  *
887  * Input:
888  *	ha:		adapter state pointer.
889  *	buffer:		buffer pointer
890  *	phys_addr:	physical address of buffer
891  *	size:		size of buffer
892  *	TARGET_QUEUE_LOCK must be released
893  *	ADAPTER_STATE_LOCK must be release
894  *
895  * Returns:
896  *	qla2x00 local funxtion status code.
897  *
898  * Context:
899  *	Kernel context.
900  */
901 #define CONFIG_XLOGINS_MEM	0x9
902 int
903 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
904 {
905 	int		rval;
906 	mbx_cmd_t	mc;
907 	mbx_cmd_t	*mcp = &mc;
908 	struct qla_hw_data *ha = vha->hw;
909 
910 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
911 	    "Entered %s.\n", __func__);
912 
913 	memset(mcp->mb, 0 , sizeof(mcp->mb));
914 	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
915 	mcp->mb[1] = CONFIG_XLOGINS_MEM;
916 	mcp->mb[2] = MSW(phys_addr);
917 	mcp->mb[3] = LSW(phys_addr);
918 	mcp->mb[6] = MSW(MSD(phys_addr));
919 	mcp->mb[7] = LSW(MSD(phys_addr));
920 	mcp->mb[8] = MSW(ha->exlogin_size);
921 	mcp->mb[9] = LSW(ha->exlogin_size);
922 	mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
923 	mcp->in_mb = MBX_11|MBX_0;
924 	mcp->tov = MBX_TOV_SECONDS;
925 	mcp->flags = 0;
926 	rval = qla2x00_mailbox_command(vha, mcp);
927 	if (rval != QLA_SUCCESS) {
928 		ql_dbg(ql_dbg_mbx, vha, 0x111b,
929 		       "EXlogin Failed=%x. MB0=%x MB11=%x\n",
930 		       rval, mcp->mb[0], mcp->mb[11]);
931 	} else {
932 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
933 		    "Done %s.\n", __func__);
934 	}
935 
936 	return rval;
937 }
938 
939 /*
940  * qla_get_exchoffld_status
941  *	Get exchange offload status
942  *	uses the memory offload control/status Mailbox
943  *
944  * Input:
945  *	ha:		adapter state pointer.
946  *	fwopt:		firmware options
947  *
948  * Returns:
949  *	qla2x00 local function status
950  *
951  * Context:
952  *	Kernel context.
953  */
954 #define	FETCH_XCHOFFLD_STAT	0x2
955 int
956 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
957 	uint16_t *ex_logins_cnt)
958 {
959 	int rval;
960 	mbx_cmd_t	mc;
961 	mbx_cmd_t	*mcp = &mc;
962 
963 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
964 	    "Entered %s\n", __func__);
965 
966 	memset(mcp->mb, 0 , sizeof(mcp->mb));
967 	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
968 	mcp->mb[1] = FETCH_XCHOFFLD_STAT;
969 	mcp->out_mb = MBX_1|MBX_0;
970 	mcp->in_mb = MBX_10|MBX_4|MBX_0;
971 	mcp->tov = MBX_TOV_SECONDS;
972 	mcp->flags = 0;
973 
974 	rval = qla2x00_mailbox_command(vha, mcp);
975 	if (rval != QLA_SUCCESS) {
976 		ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
977 	} else {
978 		*buf_sz = mcp->mb[4];
979 		*ex_logins_cnt = mcp->mb[10];
980 
981 		ql_log(ql_log_info, vha, 0x118e,
982 		    "buffer size 0x%x, exchange offload count=%d\n",
983 		    mcp->mb[4], mcp->mb[10]);
984 
985 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
986 		    "Done %s.\n", __func__);
987 	}
988 
989 	return rval;
990 }
991 
992 /*
993  * qla_set_exchoffld_mem_cfg
994  *	Set exchange offload memory configuration
995  *	Mbx needs to be issues before init_cb is set
996  *
997  * Input:
998  *	ha:		adapter state pointer.
999  *	buffer:		buffer pointer
1000  *	phys_addr:	physical address of buffer
1001  *	size:		size of buffer
1002  *	TARGET_QUEUE_LOCK must be released
1003  *	ADAPTER_STATE_LOCK must be release
1004  *
1005  * Returns:
1006  *	qla2x00 local funxtion status code.
1007  *
1008  * Context:
1009  *	Kernel context.
1010  */
1011 #define CONFIG_XCHOFFLD_MEM	0x3
1012 int
1013 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
1014 {
1015 	int		rval;
1016 	mbx_cmd_t	mc;
1017 	mbx_cmd_t	*mcp = &mc;
1018 	struct qla_hw_data *ha = vha->hw;
1019 
1020 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
1021 	    "Entered %s.\n", __func__);
1022 
1023 	memset(mcp->mb, 0 , sizeof(mcp->mb));
1024 	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
1025 	mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
1026 	mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
1027 	mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
1028 	mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
1029 	mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
1030 	mcp->mb[8] = MSW(ha->exchoffld_size);
1031 	mcp->mb[9] = LSW(ha->exchoffld_size);
1032 	mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1033 	mcp->in_mb = MBX_11|MBX_0;
1034 	mcp->tov = MBX_TOV_SECONDS;
1035 	mcp->flags = 0;
1036 	rval = qla2x00_mailbox_command(vha, mcp);
1037 	if (rval != QLA_SUCCESS) {
1038 		/*EMPTY*/
1039 		ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
1040 	} else {
1041 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
1042 		    "Done %s.\n", __func__);
1043 	}
1044 
1045 	return rval;
1046 }
1047 
1048 /*
1049  * qla2x00_get_fw_version
1050  *	Get firmware version.
1051  *
1052  * Input:
1053  *	ha:		adapter state pointer.
1054  *	major:		pointer for major number.
1055  *	minor:		pointer for minor number.
1056  *	subminor:	pointer for subminor number.
1057  *
1058  * Returns:
1059  *	qla2x00 local function return status code.
1060  *
1061  * Context:
1062  *	Kernel context.
1063  */
1064 int
1065 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1066 {
1067 	int		rval;
1068 	mbx_cmd_t	mc;
1069 	mbx_cmd_t	*mcp = &mc;
1070 	struct qla_hw_data *ha = vha->hw;
1071 
1072 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1073 	    "Entered %s.\n", __func__);
1074 
1075 	mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1076 	mcp->out_mb = MBX_0;
1077 	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1078 	if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1079 		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1080 	if (IS_FWI2_CAPABLE(ha))
1081 		mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1082 	if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1083 		mcp->in_mb |=
1084 		    MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1085 		    MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
1086 
1087 	mcp->flags = 0;
1088 	mcp->tov = MBX_TOV_SECONDS;
1089 	rval = qla2x00_mailbox_command(vha, mcp);
1090 	if (rval != QLA_SUCCESS)
1091 		goto failed;
1092 
1093 	/* Return mailbox data. */
1094 	ha->fw_major_version = mcp->mb[1];
1095 	ha->fw_minor_version = mcp->mb[2];
1096 	ha->fw_subminor_version = mcp->mb[3];
1097 	ha->fw_attributes = mcp->mb[6];
1098 	if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1099 		ha->fw_memory_size = 0x1FFFF;		/* Defaults to 128KB. */
1100 	else
1101 		ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1102 
1103 	if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1104 		ha->mpi_version[0] = mcp->mb[10] & 0xff;
1105 		ha->mpi_version[1] = mcp->mb[11] >> 8;
1106 		ha->mpi_version[2] = mcp->mb[11] & 0xff;
1107 		ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1108 		ha->phy_version[0] = mcp->mb[8] & 0xff;
1109 		ha->phy_version[1] = mcp->mb[9] >> 8;
1110 		ha->phy_version[2] = mcp->mb[9] & 0xff;
1111 	}
1112 
1113 	if (IS_FWI2_CAPABLE(ha)) {
1114 		ha->fw_attributes_h = mcp->mb[15];
1115 		ha->fw_attributes_ext[0] = mcp->mb[16];
1116 		ha->fw_attributes_ext[1] = mcp->mb[17];
1117 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1118 		    "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1119 		    __func__, mcp->mb[15], mcp->mb[6]);
1120 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1121 		    "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1122 		    __func__, mcp->mb[17], mcp->mb[16]);
1123 
1124 		if (ha->fw_attributes_h & 0x4)
1125 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1126 			    "%s: Firmware supports Extended Login 0x%x\n",
1127 			    __func__, ha->fw_attributes_h);
1128 
1129 		if (ha->fw_attributes_h & 0x8)
1130 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1131 			    "%s: Firmware supports Exchange Offload 0x%x\n",
1132 			    __func__, ha->fw_attributes_h);
1133 
1134 		/*
1135 		 * FW supports nvme and driver load parameter requested nvme.
1136 		 * BIT 26 of fw_attributes indicates NVMe support.
1137 		 */
1138 		if ((ha->fw_attributes_h &
1139 		    (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1140 			ql2xnvmeenable) {
1141 			if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1142 				vha->flags.nvme_first_burst = 1;
1143 
1144 			vha->flags.nvme_enabled = 1;
1145 			ql_log(ql_log_info, vha, 0xd302,
1146 			    "%s: FC-NVMe is Enabled (0x%x)\n",
1147 			     __func__, ha->fw_attributes_h);
1148 		}
1149 
1150 		/* BIT_13 of Extended FW Attributes informs about NVMe2 support */
1151 		if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) {
1152 			ql_log(ql_log_info, vha, 0xd302,
1153 			       "Firmware supports NVMe2 0x%x\n",
1154 			       ha->fw_attributes_ext[0]);
1155 			vha->flags.nvme2_enabled = 1;
1156 		}
1157 
1158 		if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable &&
1159 		    (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) {
1160 			ha->flags.edif_enabled = 1;
1161 			ql_log(ql_log_info, vha, 0xffff,
1162 			       "%s: edif is enabled\n", __func__);
1163 		}
1164 	}
1165 
1166 	if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1167 		ha->serdes_version[0] = mcp->mb[7] & 0xff;
1168 		ha->serdes_version[1] = mcp->mb[8] >> 8;
1169 		ha->serdes_version[2] = mcp->mb[8] & 0xff;
1170 		ha->mpi_version[0] = mcp->mb[10] & 0xff;
1171 		ha->mpi_version[1] = mcp->mb[11] >> 8;
1172 		ha->mpi_version[2] = mcp->mb[11] & 0xff;
1173 		ha->pep_version[0] = mcp->mb[13] & 0xff;
1174 		ha->pep_version[1] = mcp->mb[14] >> 8;
1175 		ha->pep_version[2] = mcp->mb[14] & 0xff;
1176 		ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1177 		ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1178 		ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1179 		ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1180 		if (IS_QLA28XX(ha)) {
1181 			if (mcp->mb[16] & BIT_10)
1182 				ha->flags.secure_fw = 1;
1183 
1184 			ql_log(ql_log_info, vha, 0xffff,
1185 			    "Secure Flash Update in FW: %s\n",
1186 			    (ha->flags.secure_fw) ? "Supported" :
1187 			    "Not Supported");
1188 		}
1189 
1190 		if (ha->flags.scm_supported_a &&
1191 		    (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
1192 			ha->flags.scm_supported_f = 1;
1193 			ha->sf_init_cb->flags |= cpu_to_le16(BIT_13);
1194 		}
1195 		ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n",
1196 		       (ha->flags.scm_supported_f) ? "Supported" :
1197 		       "Not Supported");
1198 
1199 		if (vha->flags.nvme2_enabled) {
1200 			/* set BIT_15 of special feature control block for SLER */
1201 			ha->sf_init_cb->flags |= cpu_to_le16(BIT_15);
1202 			/* set BIT_14 of special feature control block for PI CTRL*/
1203 			ha->sf_init_cb->flags |= cpu_to_le16(BIT_14);
1204 		}
1205 	}
1206 
1207 failed:
1208 	if (rval != QLA_SUCCESS) {
1209 		/*EMPTY*/
1210 		ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1211 	} else {
1212 		/*EMPTY*/
1213 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1214 		    "Done %s.\n", __func__);
1215 	}
1216 	return rval;
1217 }
1218 
1219 /*
1220  * qla2x00_get_fw_options
1221  *	Set firmware options.
1222  *
1223  * Input:
1224  *	ha = adapter block pointer.
1225  *	fwopt = pointer for firmware options.
1226  *
1227  * Returns:
1228  *	qla2x00 local function return status code.
1229  *
1230  * Context:
1231  *	Kernel context.
1232  */
1233 int
1234 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1235 {
1236 	int rval;
1237 	mbx_cmd_t mc;
1238 	mbx_cmd_t *mcp = &mc;
1239 
1240 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1241 	    "Entered %s.\n", __func__);
1242 
1243 	mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1244 	mcp->out_mb = MBX_0;
1245 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1246 	mcp->tov = MBX_TOV_SECONDS;
1247 	mcp->flags = 0;
1248 	rval = qla2x00_mailbox_command(vha, mcp);
1249 
1250 	if (rval != QLA_SUCCESS) {
1251 		/*EMPTY*/
1252 		ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1253 	} else {
1254 		fwopts[0] = mcp->mb[0];
1255 		fwopts[1] = mcp->mb[1];
1256 		fwopts[2] = mcp->mb[2];
1257 		fwopts[3] = mcp->mb[3];
1258 
1259 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1260 		    "Done %s.\n", __func__);
1261 	}
1262 
1263 	return rval;
1264 }
1265 
1266 
1267 /*
1268  * qla2x00_set_fw_options
1269  *	Set firmware options.
1270  *
1271  * Input:
1272  *	ha = adapter block pointer.
1273  *	fwopt = pointer for firmware options.
1274  *
1275  * Returns:
1276  *	qla2x00 local function return status code.
1277  *
1278  * Context:
1279  *	Kernel context.
1280  */
1281 int
1282 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1283 {
1284 	int rval;
1285 	mbx_cmd_t mc;
1286 	mbx_cmd_t *mcp = &mc;
1287 
1288 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1289 	    "Entered %s.\n", __func__);
1290 
1291 	mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1292 	mcp->mb[1] = fwopts[1];
1293 	mcp->mb[2] = fwopts[2];
1294 	mcp->mb[3] = fwopts[3];
1295 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1296 	mcp->in_mb = MBX_0;
1297 	if (IS_FWI2_CAPABLE(vha->hw)) {
1298 		mcp->in_mb |= MBX_1;
1299 		mcp->mb[10] = fwopts[10];
1300 		mcp->out_mb |= MBX_10;
1301 	} else {
1302 		mcp->mb[10] = fwopts[10];
1303 		mcp->mb[11] = fwopts[11];
1304 		mcp->mb[12] = 0;	/* Undocumented, but used */
1305 		mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1306 	}
1307 	mcp->tov = MBX_TOV_SECONDS;
1308 	mcp->flags = 0;
1309 	rval = qla2x00_mailbox_command(vha, mcp);
1310 
1311 	fwopts[0] = mcp->mb[0];
1312 
1313 	if (rval != QLA_SUCCESS) {
1314 		/*EMPTY*/
1315 		ql_dbg(ql_dbg_mbx, vha, 0x1030,
1316 		    "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1317 	} else {
1318 		/*EMPTY*/
1319 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1320 		    "Done %s.\n", __func__);
1321 	}
1322 
1323 	return rval;
1324 }
1325 
1326 /*
1327  * qla2x00_mbx_reg_test
1328  *	Mailbox register wrap test.
1329  *
1330  * Input:
1331  *	ha = adapter block pointer.
1332  *	TARGET_QUEUE_LOCK must be released.
1333  *	ADAPTER_STATE_LOCK must be released.
1334  *
1335  * Returns:
1336  *	qla2x00 local function return status code.
1337  *
1338  * Context:
1339  *	Kernel context.
1340  */
1341 int
1342 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1343 {
1344 	int rval;
1345 	mbx_cmd_t mc;
1346 	mbx_cmd_t *mcp = &mc;
1347 
1348 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1349 	    "Entered %s.\n", __func__);
1350 
1351 	mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1352 	mcp->mb[1] = 0xAAAA;
1353 	mcp->mb[2] = 0x5555;
1354 	mcp->mb[3] = 0xAA55;
1355 	mcp->mb[4] = 0x55AA;
1356 	mcp->mb[5] = 0xA5A5;
1357 	mcp->mb[6] = 0x5A5A;
1358 	mcp->mb[7] = 0x2525;
1359 	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1360 	mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1361 	mcp->tov = MBX_TOV_SECONDS;
1362 	mcp->flags = 0;
1363 	rval = qla2x00_mailbox_command(vha, mcp);
1364 
1365 	if (rval == QLA_SUCCESS) {
1366 		if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1367 		    mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1368 			rval = QLA_FUNCTION_FAILED;
1369 		if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1370 		    mcp->mb[7] != 0x2525)
1371 			rval = QLA_FUNCTION_FAILED;
1372 	}
1373 
1374 	if (rval != QLA_SUCCESS) {
1375 		/*EMPTY*/
1376 		ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1377 		vha->hw_err_cnt++;
1378 	} else {
1379 		/*EMPTY*/
1380 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1381 		    "Done %s.\n", __func__);
1382 	}
1383 
1384 	return rval;
1385 }
1386 
1387 /*
1388  * qla2x00_verify_checksum
1389  *	Verify firmware checksum.
1390  *
1391  * Input:
1392  *	ha = adapter block pointer.
1393  *	TARGET_QUEUE_LOCK must be released.
1394  *	ADAPTER_STATE_LOCK must be released.
1395  *
1396  * Returns:
1397  *	qla2x00 local function return status code.
1398  *
1399  * Context:
1400  *	Kernel context.
1401  */
1402 int
1403 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1404 {
1405 	int rval;
1406 	mbx_cmd_t mc;
1407 	mbx_cmd_t *mcp = &mc;
1408 
1409 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1410 	    "Entered %s.\n", __func__);
1411 
1412 	mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1413 	mcp->out_mb = MBX_0;
1414 	mcp->in_mb = MBX_0;
1415 	if (IS_FWI2_CAPABLE(vha->hw)) {
1416 		mcp->mb[1] = MSW(risc_addr);
1417 		mcp->mb[2] = LSW(risc_addr);
1418 		mcp->out_mb |= MBX_2|MBX_1;
1419 		mcp->in_mb |= MBX_2|MBX_1;
1420 	} else {
1421 		mcp->mb[1] = LSW(risc_addr);
1422 		mcp->out_mb |= MBX_1;
1423 		mcp->in_mb |= MBX_1;
1424 	}
1425 
1426 	mcp->tov = MBX_TOV_SECONDS;
1427 	mcp->flags = 0;
1428 	rval = qla2x00_mailbox_command(vha, mcp);
1429 
1430 	if (rval != QLA_SUCCESS) {
1431 		ql_dbg(ql_dbg_mbx, vha, 0x1036,
1432 		    "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1433 		    (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1434 	} else {
1435 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1436 		    "Done %s.\n", __func__);
1437 	}
1438 
1439 	return rval;
1440 }
1441 
1442 /*
1443  * qla2x00_issue_iocb
1444  *	Issue IOCB using mailbox command
1445  *
1446  * Input:
1447  *	ha = adapter state pointer.
1448  *	buffer = buffer pointer.
1449  *	phys_addr = physical address of buffer.
1450  *	size = size of buffer.
1451  *	TARGET_QUEUE_LOCK must be released.
1452  *	ADAPTER_STATE_LOCK must be released.
1453  *
1454  * Returns:
1455  *	qla2x00 local function return status code.
1456  *
1457  * Context:
1458  *	Kernel context.
1459  */
1460 int
1461 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1462     dma_addr_t phys_addr, size_t size, uint32_t tov)
1463 {
1464 	int		rval;
1465 	mbx_cmd_t	mc;
1466 	mbx_cmd_t	*mcp = &mc;
1467 
1468 	if (!vha->hw->flags.fw_started)
1469 		return QLA_INVALID_COMMAND;
1470 
1471 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1472 	    "Entered %s.\n", __func__);
1473 
1474 	mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1475 	mcp->mb[1] = 0;
1476 	mcp->mb[2] = MSW(LSD(phys_addr));
1477 	mcp->mb[3] = LSW(LSD(phys_addr));
1478 	mcp->mb[6] = MSW(MSD(phys_addr));
1479 	mcp->mb[7] = LSW(MSD(phys_addr));
1480 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1481 	mcp->in_mb = MBX_1|MBX_0;
1482 	mcp->tov = tov;
1483 	mcp->flags = 0;
1484 	rval = qla2x00_mailbox_command(vha, mcp);
1485 
1486 	if (rval != QLA_SUCCESS) {
1487 		/*EMPTY*/
1488 		ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1489 	} else {
1490 		sts_entry_t *sts_entry = buffer;
1491 
1492 		/* Mask reserved bits. */
1493 		sts_entry->entry_status &=
1494 		    IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1495 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1496 		    "Done %s (status=%x).\n", __func__,
1497 		    sts_entry->entry_status);
1498 	}
1499 
1500 	return rval;
1501 }
1502 
1503 int
1504 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1505     size_t size)
1506 {
1507 	return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1508 	    MBX_TOV_SECONDS);
1509 }
1510 
1511 /*
1512  * qla2x00_abort_command
1513  *	Abort command aborts a specified IOCB.
1514  *
1515  * Input:
1516  *	ha = adapter block pointer.
1517  *	sp = SB structure pointer.
1518  *
1519  * Returns:
1520  *	qla2x00 local function return status code.
1521  *
1522  * Context:
1523  *	Kernel context.
1524  */
1525 int
1526 qla2x00_abort_command(srb_t *sp)
1527 {
1528 	unsigned long   flags = 0;
1529 	int		rval;
1530 	uint32_t	handle = 0;
1531 	mbx_cmd_t	mc;
1532 	mbx_cmd_t	*mcp = &mc;
1533 	fc_port_t	*fcport = sp->fcport;
1534 	scsi_qla_host_t *vha = fcport->vha;
1535 	struct qla_hw_data *ha = vha->hw;
1536 	struct req_que *req;
1537 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1538 
1539 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1540 	    "Entered %s.\n", __func__);
1541 
1542 	if (sp->qpair)
1543 		req = sp->qpair->req;
1544 	else
1545 		req = vha->req;
1546 
1547 	spin_lock_irqsave(&ha->hardware_lock, flags);
1548 	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1549 		if (req->outstanding_cmds[handle] == sp)
1550 			break;
1551 	}
1552 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1553 
1554 	if (handle == req->num_outstanding_cmds) {
1555 		/* command not found */
1556 		return QLA_FUNCTION_FAILED;
1557 	}
1558 
1559 	mcp->mb[0] = MBC_ABORT_COMMAND;
1560 	if (HAS_EXTENDED_IDS(ha))
1561 		mcp->mb[1] = fcport->loop_id;
1562 	else
1563 		mcp->mb[1] = fcport->loop_id << 8;
1564 	mcp->mb[2] = (uint16_t)handle;
1565 	mcp->mb[3] = (uint16_t)(handle >> 16);
1566 	mcp->mb[6] = (uint16_t)cmd->device->lun;
1567 	mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1568 	mcp->in_mb = MBX_0;
1569 	mcp->tov = MBX_TOV_SECONDS;
1570 	mcp->flags = 0;
1571 	rval = qla2x00_mailbox_command(vha, mcp);
1572 
1573 	if (rval != QLA_SUCCESS) {
1574 		ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1575 	} else {
1576 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1577 		    "Done %s.\n", __func__);
1578 	}
1579 
1580 	return rval;
1581 }
1582 
1583 int
1584 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1585 {
1586 	int rval, rval2;
1587 	mbx_cmd_t  mc;
1588 	mbx_cmd_t  *mcp = &mc;
1589 	scsi_qla_host_t *vha;
1590 
1591 	vha = fcport->vha;
1592 
1593 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1594 	    "Entered %s.\n", __func__);
1595 
1596 	mcp->mb[0] = MBC_ABORT_TARGET;
1597 	mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1598 	if (HAS_EXTENDED_IDS(vha->hw)) {
1599 		mcp->mb[1] = fcport->loop_id;
1600 		mcp->mb[10] = 0;
1601 		mcp->out_mb |= MBX_10;
1602 	} else {
1603 		mcp->mb[1] = fcport->loop_id << 8;
1604 	}
1605 	mcp->mb[2] = vha->hw->loop_reset_delay;
1606 	mcp->mb[9] = vha->vp_idx;
1607 
1608 	mcp->in_mb = MBX_0;
1609 	mcp->tov = MBX_TOV_SECONDS;
1610 	mcp->flags = 0;
1611 	rval = qla2x00_mailbox_command(vha, mcp);
1612 	if (rval != QLA_SUCCESS) {
1613 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1614 		    "Failed=%x.\n", rval);
1615 	}
1616 
1617 	/* Issue marker IOCB. */
1618 	rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
1619 							MK_SYNC_ID);
1620 	if (rval2 != QLA_SUCCESS) {
1621 		ql_dbg(ql_dbg_mbx, vha, 0x1040,
1622 		    "Failed to issue marker IOCB (%x).\n", rval2);
1623 	} else {
1624 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1625 		    "Done %s.\n", __func__);
1626 	}
1627 
1628 	return rval;
1629 }
1630 
1631 int
1632 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1633 {
1634 	int rval, rval2;
1635 	mbx_cmd_t  mc;
1636 	mbx_cmd_t  *mcp = &mc;
1637 	scsi_qla_host_t *vha;
1638 
1639 	vha = fcport->vha;
1640 
1641 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1642 	    "Entered %s.\n", __func__);
1643 
1644 	mcp->mb[0] = MBC_LUN_RESET;
1645 	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1646 	if (HAS_EXTENDED_IDS(vha->hw))
1647 		mcp->mb[1] = fcport->loop_id;
1648 	else
1649 		mcp->mb[1] = fcport->loop_id << 8;
1650 	mcp->mb[2] = (u32)l;
1651 	mcp->mb[3] = 0;
1652 	mcp->mb[9] = vha->vp_idx;
1653 
1654 	mcp->in_mb = MBX_0;
1655 	mcp->tov = MBX_TOV_SECONDS;
1656 	mcp->flags = 0;
1657 	rval = qla2x00_mailbox_command(vha, mcp);
1658 	if (rval != QLA_SUCCESS) {
1659 		ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1660 	}
1661 
1662 	/* Issue marker IOCB. */
1663 	rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
1664 								MK_SYNC_ID_LUN);
1665 	if (rval2 != QLA_SUCCESS) {
1666 		ql_dbg(ql_dbg_mbx, vha, 0x1044,
1667 		    "Failed to issue marker IOCB (%x).\n", rval2);
1668 	} else {
1669 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1670 		    "Done %s.\n", __func__);
1671 	}
1672 
1673 	return rval;
1674 }
1675 
1676 /*
1677  * qla2x00_get_adapter_id
1678  *	Get adapter ID and topology.
1679  *
1680  * Input:
1681  *	ha = adapter block pointer.
1682  *	id = pointer for loop ID.
1683  *	al_pa = pointer for AL_PA.
1684  *	area = pointer for area.
1685  *	domain = pointer for domain.
1686  *	top = pointer for topology.
1687  *	TARGET_QUEUE_LOCK must be released.
1688  *	ADAPTER_STATE_LOCK must be released.
1689  *
1690  * Returns:
1691  *	qla2x00 local function return status code.
1692  *
1693  * Context:
1694  *	Kernel context.
1695  */
1696 int
1697 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1698     uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1699 {
1700 	int rval;
1701 	mbx_cmd_t mc;
1702 	mbx_cmd_t *mcp = &mc;
1703 
1704 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1705 	    "Entered %s.\n", __func__);
1706 
1707 	mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1708 	mcp->mb[9] = vha->vp_idx;
1709 	mcp->out_mb = MBX_9|MBX_0;
1710 	mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1711 	if (IS_CNA_CAPABLE(vha->hw))
1712 		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1713 	if (IS_FWI2_CAPABLE(vha->hw))
1714 		mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1715 	if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
1716 		mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23;
1717 
1718 	mcp->tov = MBX_TOV_SECONDS;
1719 	mcp->flags = 0;
1720 	rval = qla2x00_mailbox_command(vha, mcp);
1721 	if (mcp->mb[0] == MBS_COMMAND_ERROR)
1722 		rval = QLA_COMMAND_ERROR;
1723 	else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1724 		rval = QLA_INVALID_COMMAND;
1725 
1726 	/* Return data. */
1727 	*id = mcp->mb[1];
1728 	*al_pa = LSB(mcp->mb[2]);
1729 	*area = MSB(mcp->mb[2]);
1730 	*domain	= LSB(mcp->mb[3]);
1731 	*top = mcp->mb[6];
1732 	*sw_cap = mcp->mb[7];
1733 
1734 	if (rval != QLA_SUCCESS) {
1735 		/*EMPTY*/
1736 		ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1737 	} else {
1738 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1739 		    "Done %s.\n", __func__);
1740 
1741 		if (IS_CNA_CAPABLE(vha->hw)) {
1742 			vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1743 			vha->fcoe_fcf_idx = mcp->mb[10];
1744 			vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1745 			vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1746 			vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1747 			vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1748 			vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1749 			vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1750 		}
1751 		/* If FA-WWN supported */
1752 		if (IS_FAWWN_CAPABLE(vha->hw)) {
1753 			if (mcp->mb[7] & BIT_14) {
1754 				vha->port_name[0] = MSB(mcp->mb[16]);
1755 				vha->port_name[1] = LSB(mcp->mb[16]);
1756 				vha->port_name[2] = MSB(mcp->mb[17]);
1757 				vha->port_name[3] = LSB(mcp->mb[17]);
1758 				vha->port_name[4] = MSB(mcp->mb[18]);
1759 				vha->port_name[5] = LSB(mcp->mb[18]);
1760 				vha->port_name[6] = MSB(mcp->mb[19]);
1761 				vha->port_name[7] = LSB(mcp->mb[19]);
1762 				fc_host_port_name(vha->host) =
1763 				    wwn_to_u64(vha->port_name);
1764 				ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1765 				    "FA-WWN acquired %016llx\n",
1766 				    wwn_to_u64(vha->port_name));
1767 			}
1768 		}
1769 
1770 		if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
1771 			vha->bbcr = mcp->mb[15];
1772 			if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) {
1773 				ql_log(ql_log_info, vha, 0x11a4,
1774 				       "SCM: EDC ELS completed, flags 0x%x\n",
1775 				       mcp->mb[21]);
1776 			}
1777 			if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) {
1778 				vha->hw->flags.scm_enabled = 1;
1779 				vha->scm_fabric_connection_flags |=
1780 				    SCM_FLAG_RDF_COMPLETED;
1781 				ql_log(ql_log_info, vha, 0x11a5,
1782 				       "SCM: RDF ELS completed, flags 0x%x\n",
1783 				       mcp->mb[23]);
1784 			}
1785 		}
1786 	}
1787 
1788 	return rval;
1789 }
1790 
1791 /*
1792  * qla2x00_get_retry_cnt
1793  *	Get current firmware login retry count and delay.
1794  *
1795  * Input:
1796  *	ha = adapter block pointer.
1797  *	retry_cnt = pointer to login retry count.
1798  *	tov = pointer to login timeout value.
1799  *
1800  * Returns:
1801  *	qla2x00 local function return status code.
1802  *
1803  * Context:
1804  *	Kernel context.
1805  */
1806 int
1807 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1808     uint16_t *r_a_tov)
1809 {
1810 	int rval;
1811 	uint16_t ratov;
1812 	mbx_cmd_t mc;
1813 	mbx_cmd_t *mcp = &mc;
1814 
1815 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1816 	    "Entered %s.\n", __func__);
1817 
1818 	mcp->mb[0] = MBC_GET_RETRY_COUNT;
1819 	mcp->out_mb = MBX_0;
1820 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1821 	mcp->tov = MBX_TOV_SECONDS;
1822 	mcp->flags = 0;
1823 	rval = qla2x00_mailbox_command(vha, mcp);
1824 
1825 	if (rval != QLA_SUCCESS) {
1826 		/*EMPTY*/
1827 		ql_dbg(ql_dbg_mbx, vha, 0x104a,
1828 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1829 	} else {
1830 		/* Convert returned data and check our values. */
1831 		*r_a_tov = mcp->mb[3] / 2;
1832 		ratov = (mcp->mb[3]/2) / 10;  /* mb[3] value is in 100ms */
1833 		if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1834 			/* Update to the larger values */
1835 			*retry_cnt = (uint8_t)mcp->mb[1];
1836 			*tov = ratov;
1837 		}
1838 
1839 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1840 		    "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1841 	}
1842 
1843 	return rval;
1844 }
1845 
1846 /*
1847  * qla2x00_init_firmware
1848  *	Initialize adapter firmware.
1849  *
1850  * Input:
1851  *	ha = adapter block pointer.
1852  *	dptr = Initialization control block pointer.
1853  *	size = size of initialization control block.
1854  *	TARGET_QUEUE_LOCK must be released.
1855  *	ADAPTER_STATE_LOCK must be released.
1856  *
1857  * Returns:
1858  *	qla2x00 local function return status code.
1859  *
1860  * Context:
1861  *	Kernel context.
1862  */
1863 int
1864 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1865 {
1866 	int rval;
1867 	mbx_cmd_t mc;
1868 	mbx_cmd_t *mcp = &mc;
1869 	struct qla_hw_data *ha = vha->hw;
1870 
1871 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1872 	    "Entered %s.\n", __func__);
1873 
1874 	if (IS_P3P_TYPE(ha) && ql2xdbwr)
1875 		qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1876 			(0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1877 
1878 	if (ha->flags.npiv_supported)
1879 		mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1880 	else
1881 		mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1882 
1883 	mcp->mb[1] = 0;
1884 	mcp->mb[2] = MSW(ha->init_cb_dma);
1885 	mcp->mb[3] = LSW(ha->init_cb_dma);
1886 	mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1887 	mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1888 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1889 	if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1890 		mcp->mb[1] = BIT_0;
1891 		mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1892 		mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1893 		mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1894 		mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1895 		mcp->mb[14] = sizeof(*ha->ex_init_cb);
1896 		mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1897 	}
1898 
1899 	if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) {
1900 		mcp->mb[1] |= BIT_1;
1901 		mcp->mb[16] = MSW(ha->sf_init_cb_dma);
1902 		mcp->mb[17] = LSW(ha->sf_init_cb_dma);
1903 		mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma));
1904 		mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma));
1905 		mcp->mb[15] = sizeof(*ha->sf_init_cb);
1906 		mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15;
1907 	}
1908 
1909 	/* 1 and 2 should normally be captured. */
1910 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
1911 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
1912 		/* mb3 is additional info about the installed SFP. */
1913 		mcp->in_mb  |= MBX_3;
1914 	mcp->buf_size = size;
1915 	mcp->flags = MBX_DMA_OUT;
1916 	mcp->tov = MBX_TOV_SECONDS;
1917 	rval = qla2x00_mailbox_command(vha, mcp);
1918 
1919 	if (rval != QLA_SUCCESS) {
1920 		/*EMPTY*/
1921 		ql_dbg(ql_dbg_mbx, vha, 0x104d,
1922 		    "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
1923 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1924 		if (ha->init_cb) {
1925 			ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
1926 			ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1927 			    0x0104d, ha->init_cb, sizeof(*ha->init_cb));
1928 		}
1929 		if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1930 			ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
1931 			ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1932 			    0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
1933 		}
1934 	} else {
1935 		if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1936 			if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1937 				ql_dbg(ql_dbg_mbx, vha, 0x119d,
1938 				    "Invalid SFP/Validation Failed\n");
1939 		}
1940 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1941 		    "Done %s.\n", __func__);
1942 	}
1943 
1944 	return rval;
1945 }
1946 
1947 
1948 /*
1949  * qla2x00_get_port_database
1950  *	Issue normal/enhanced get port database mailbox command
1951  *	and copy device name as necessary.
1952  *
1953  * Input:
1954  *	ha = adapter state pointer.
1955  *	dev = structure pointer.
1956  *	opt = enhanced cmd option byte.
1957  *
1958  * Returns:
1959  *	qla2x00 local function return status code.
1960  *
1961  * Context:
1962  *	Kernel context.
1963  */
1964 int
1965 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1966 {
1967 	int rval;
1968 	mbx_cmd_t mc;
1969 	mbx_cmd_t *mcp = &mc;
1970 	port_database_t *pd;
1971 	struct port_database_24xx *pd24;
1972 	dma_addr_t pd_dma;
1973 	struct qla_hw_data *ha = vha->hw;
1974 
1975 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1976 	    "Entered %s.\n", __func__);
1977 
1978 	pd24 = NULL;
1979 	pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1980 	if (pd  == NULL) {
1981 		ql_log(ql_log_warn, vha, 0x1050,
1982 		    "Failed to allocate port database structure.\n");
1983 		fcport->query = 0;
1984 		return QLA_MEMORY_ALLOC_FAILED;
1985 	}
1986 
1987 	mcp->mb[0] = MBC_GET_PORT_DATABASE;
1988 	if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1989 		mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1990 	mcp->mb[2] = MSW(pd_dma);
1991 	mcp->mb[3] = LSW(pd_dma);
1992 	mcp->mb[6] = MSW(MSD(pd_dma));
1993 	mcp->mb[7] = LSW(MSD(pd_dma));
1994 	mcp->mb[9] = vha->vp_idx;
1995 	mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1996 	mcp->in_mb = MBX_0;
1997 	if (IS_FWI2_CAPABLE(ha)) {
1998 		mcp->mb[1] = fcport->loop_id;
1999 		mcp->mb[10] = opt;
2000 		mcp->out_mb |= MBX_10|MBX_1;
2001 		mcp->in_mb |= MBX_1;
2002 	} else if (HAS_EXTENDED_IDS(ha)) {
2003 		mcp->mb[1] = fcport->loop_id;
2004 		mcp->mb[10] = opt;
2005 		mcp->out_mb |= MBX_10|MBX_1;
2006 	} else {
2007 		mcp->mb[1] = fcport->loop_id << 8 | opt;
2008 		mcp->out_mb |= MBX_1;
2009 	}
2010 	mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
2011 	    PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
2012 	mcp->flags = MBX_DMA_IN;
2013 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2014 	rval = qla2x00_mailbox_command(vha, mcp);
2015 	if (rval != QLA_SUCCESS)
2016 		goto gpd_error_out;
2017 
2018 	if (IS_FWI2_CAPABLE(ha)) {
2019 		uint64_t zero = 0;
2020 		u8 current_login_state, last_login_state;
2021 
2022 		pd24 = (struct port_database_24xx *) pd;
2023 
2024 		/* Check for logged in state. */
2025 		if (NVME_TARGET(ha, fcport)) {
2026 			current_login_state = pd24->current_login_state >> 4;
2027 			last_login_state = pd24->last_login_state >> 4;
2028 		} else {
2029 			current_login_state = pd24->current_login_state & 0xf;
2030 			last_login_state = pd24->last_login_state & 0xf;
2031 		}
2032 		fcport->current_login_state = pd24->current_login_state;
2033 		fcport->last_login_state = pd24->last_login_state;
2034 
2035 		/* Check for logged in state. */
2036 		if (current_login_state != PDS_PRLI_COMPLETE &&
2037 		    last_login_state != PDS_PRLI_COMPLETE) {
2038 			ql_dbg(ql_dbg_mbx, vha, 0x119a,
2039 			    "Unable to verify login-state (%x/%x) for loop_id %x.\n",
2040 			    current_login_state, last_login_state,
2041 			    fcport->loop_id);
2042 			rval = QLA_FUNCTION_FAILED;
2043 
2044 			if (!fcport->query)
2045 				goto gpd_error_out;
2046 		}
2047 
2048 		if (fcport->loop_id == FC_NO_LOOP_ID ||
2049 		    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2050 		     memcmp(fcport->port_name, pd24->port_name, 8))) {
2051 			/* We lost the device mid way. */
2052 			rval = QLA_NOT_LOGGED_IN;
2053 			goto gpd_error_out;
2054 		}
2055 
2056 		/* Names are little-endian. */
2057 		memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
2058 		memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
2059 
2060 		/* Get port_id of device. */
2061 		fcport->d_id.b.domain = pd24->port_id[0];
2062 		fcport->d_id.b.area = pd24->port_id[1];
2063 		fcport->d_id.b.al_pa = pd24->port_id[2];
2064 		fcport->d_id.b.rsvd_1 = 0;
2065 
2066 		/* If not target must be initiator or unknown type. */
2067 		if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
2068 			fcport->port_type = FCT_INITIATOR;
2069 		else
2070 			fcport->port_type = FCT_TARGET;
2071 
2072 		/* Passback COS information. */
2073 		fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
2074 				FC_COS_CLASS2 : FC_COS_CLASS3;
2075 
2076 		if (pd24->prli_svc_param_word_3[0] & BIT_7)
2077 			fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2078 	} else {
2079 		uint64_t zero = 0;
2080 
2081 		/* Check for logged in state. */
2082 		if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
2083 		    pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
2084 			ql_dbg(ql_dbg_mbx, vha, 0x100a,
2085 			    "Unable to verify login-state (%x/%x) - "
2086 			    "portid=%02x%02x%02x.\n", pd->master_state,
2087 			    pd->slave_state, fcport->d_id.b.domain,
2088 			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
2089 			rval = QLA_FUNCTION_FAILED;
2090 			goto gpd_error_out;
2091 		}
2092 
2093 		if (fcport->loop_id == FC_NO_LOOP_ID ||
2094 		    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2095 		     memcmp(fcport->port_name, pd->port_name, 8))) {
2096 			/* We lost the device mid way. */
2097 			rval = QLA_NOT_LOGGED_IN;
2098 			goto gpd_error_out;
2099 		}
2100 
2101 		/* Names are little-endian. */
2102 		memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
2103 		memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
2104 
2105 		/* Get port_id of device. */
2106 		fcport->d_id.b.domain = pd->port_id[0];
2107 		fcport->d_id.b.area = pd->port_id[3];
2108 		fcport->d_id.b.al_pa = pd->port_id[2];
2109 		fcport->d_id.b.rsvd_1 = 0;
2110 
2111 		/* If not target must be initiator or unknown type. */
2112 		if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2113 			fcport->port_type = FCT_INITIATOR;
2114 		else
2115 			fcport->port_type = FCT_TARGET;
2116 
2117 		/* Passback COS information. */
2118 		fcport->supported_classes = (pd->options & BIT_4) ?
2119 		    FC_COS_CLASS2 : FC_COS_CLASS3;
2120 	}
2121 
2122 gpd_error_out:
2123 	dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2124 	fcport->query = 0;
2125 
2126 	if (rval != QLA_SUCCESS) {
2127 		ql_dbg(ql_dbg_mbx, vha, 0x1052,
2128 		    "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2129 		    mcp->mb[0], mcp->mb[1]);
2130 	} else {
2131 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2132 		    "Done %s.\n", __func__);
2133 	}
2134 
2135 	return rval;
2136 }
2137 
2138 int
2139 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
2140 	struct port_database_24xx *pdb)
2141 {
2142 	mbx_cmd_t mc;
2143 	mbx_cmd_t *mcp = &mc;
2144 	dma_addr_t pdb_dma;
2145 	int rval;
2146 
2147 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115,
2148 	    "Entered %s.\n", __func__);
2149 
2150 	memset(pdb, 0, sizeof(*pdb));
2151 
2152 	pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
2153 	    sizeof(*pdb), DMA_FROM_DEVICE);
2154 	if (!pdb_dma) {
2155 		ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n");
2156 		return QLA_MEMORY_ALLOC_FAILED;
2157 	}
2158 
2159 	mcp->mb[0] = MBC_GET_PORT_DATABASE;
2160 	mcp->mb[1] = nport_handle;
2161 	mcp->mb[2] = MSW(LSD(pdb_dma));
2162 	mcp->mb[3] = LSW(LSD(pdb_dma));
2163 	mcp->mb[6] = MSW(MSD(pdb_dma));
2164 	mcp->mb[7] = LSW(MSD(pdb_dma));
2165 	mcp->mb[9] = 0;
2166 	mcp->mb[10] = 0;
2167 	mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2168 	mcp->in_mb = MBX_1|MBX_0;
2169 	mcp->buf_size = sizeof(*pdb);
2170 	mcp->flags = MBX_DMA_IN;
2171 	mcp->tov = vha->hw->login_timeout * 2;
2172 	rval = qla2x00_mailbox_command(vha, mcp);
2173 
2174 	if (rval != QLA_SUCCESS) {
2175 		ql_dbg(ql_dbg_mbx, vha, 0x111a,
2176 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
2177 		    rval, mcp->mb[0], mcp->mb[1]);
2178 	} else {
2179 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b,
2180 		    "Done %s.\n", __func__);
2181 	}
2182 
2183 	dma_unmap_single(&vha->hw->pdev->dev, pdb_dma,
2184 	    sizeof(*pdb), DMA_FROM_DEVICE);
2185 
2186 	return rval;
2187 }
2188 
2189 /*
2190  * qla2x00_get_firmware_state
2191  *	Get adapter firmware state.
2192  *
2193  * Input:
2194  *	ha = adapter block pointer.
2195  *	dptr = pointer for firmware state.
2196  *	TARGET_QUEUE_LOCK must be released.
2197  *	ADAPTER_STATE_LOCK must be released.
2198  *
2199  * Returns:
2200  *	qla2x00 local function return status code.
2201  *
2202  * Context:
2203  *	Kernel context.
2204  */
2205 int
2206 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2207 {
2208 	int rval;
2209 	mbx_cmd_t mc;
2210 	mbx_cmd_t *mcp = &mc;
2211 	struct qla_hw_data *ha = vha->hw;
2212 
2213 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2214 	    "Entered %s.\n", __func__);
2215 
2216 	if (!ha->flags.fw_started)
2217 		return QLA_FUNCTION_FAILED;
2218 
2219 	mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2220 	mcp->out_mb = MBX_0;
2221 	if (IS_FWI2_CAPABLE(vha->hw))
2222 		mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2223 	else
2224 		mcp->in_mb = MBX_1|MBX_0;
2225 	mcp->tov = MBX_TOV_SECONDS;
2226 	mcp->flags = 0;
2227 	rval = qla2x00_mailbox_command(vha, mcp);
2228 
2229 	/* Return firmware states. */
2230 	states[0] = mcp->mb[1];
2231 	if (IS_FWI2_CAPABLE(vha->hw)) {
2232 		states[1] = mcp->mb[2];
2233 		states[2] = mcp->mb[3];  /* SFP info */
2234 		states[3] = mcp->mb[4];
2235 		states[4] = mcp->mb[5];
2236 		states[5] = mcp->mb[6];  /* DPORT status */
2237 	}
2238 
2239 	if (rval != QLA_SUCCESS) {
2240 		/*EMPTY*/
2241 		ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2242 	} else {
2243 		if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2244 			if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2245 				ql_dbg(ql_dbg_mbx, vha, 0x119e,
2246 				    "Invalid SFP/Validation Failed\n");
2247 		}
2248 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2249 		    "Done %s.\n", __func__);
2250 	}
2251 
2252 	return rval;
2253 }
2254 
2255 /*
2256  * qla2x00_get_port_name
2257  *	Issue get port name mailbox command.
2258  *	Returned name is in big endian format.
2259  *
2260  * Input:
2261  *	ha = adapter block pointer.
2262  *	loop_id = loop ID of device.
2263  *	name = pointer for name.
2264  *	TARGET_QUEUE_LOCK must be released.
2265  *	ADAPTER_STATE_LOCK must be released.
2266  *
2267  * Returns:
2268  *	qla2x00 local function return status code.
2269  *
2270  * Context:
2271  *	Kernel context.
2272  */
2273 int
2274 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2275     uint8_t opt)
2276 {
2277 	int rval;
2278 	mbx_cmd_t mc;
2279 	mbx_cmd_t *mcp = &mc;
2280 
2281 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2282 	    "Entered %s.\n", __func__);
2283 
2284 	mcp->mb[0] = MBC_GET_PORT_NAME;
2285 	mcp->mb[9] = vha->vp_idx;
2286 	mcp->out_mb = MBX_9|MBX_1|MBX_0;
2287 	if (HAS_EXTENDED_IDS(vha->hw)) {
2288 		mcp->mb[1] = loop_id;
2289 		mcp->mb[10] = opt;
2290 		mcp->out_mb |= MBX_10;
2291 	} else {
2292 		mcp->mb[1] = loop_id << 8 | opt;
2293 	}
2294 
2295 	mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2296 	mcp->tov = MBX_TOV_SECONDS;
2297 	mcp->flags = 0;
2298 	rval = qla2x00_mailbox_command(vha, mcp);
2299 
2300 	if (rval != QLA_SUCCESS) {
2301 		/*EMPTY*/
2302 		ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2303 	} else {
2304 		if (name != NULL) {
2305 			/* This function returns name in big endian. */
2306 			name[0] = MSB(mcp->mb[2]);
2307 			name[1] = LSB(mcp->mb[2]);
2308 			name[2] = MSB(mcp->mb[3]);
2309 			name[3] = LSB(mcp->mb[3]);
2310 			name[4] = MSB(mcp->mb[6]);
2311 			name[5] = LSB(mcp->mb[6]);
2312 			name[6] = MSB(mcp->mb[7]);
2313 			name[7] = LSB(mcp->mb[7]);
2314 		}
2315 
2316 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2317 		    "Done %s.\n", __func__);
2318 	}
2319 
2320 	return rval;
2321 }
2322 
2323 /*
2324  * qla24xx_link_initialization
2325  *	Issue link initialization mailbox command.
2326  *
2327  * Input:
2328  *	ha = adapter block pointer.
2329  *	TARGET_QUEUE_LOCK must be released.
2330  *	ADAPTER_STATE_LOCK must be released.
2331  *
2332  * Returns:
2333  *	qla2x00 local function return status code.
2334  *
2335  * Context:
2336  *	Kernel context.
2337  */
2338 int
2339 qla24xx_link_initialize(scsi_qla_host_t *vha)
2340 {
2341 	int rval;
2342 	mbx_cmd_t mc;
2343 	mbx_cmd_t *mcp = &mc;
2344 
2345 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2346 	    "Entered %s.\n", __func__);
2347 
2348 	if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2349 		return QLA_FUNCTION_FAILED;
2350 
2351 	mcp->mb[0] = MBC_LINK_INITIALIZATION;
2352 	mcp->mb[1] = BIT_4;
2353 	if (vha->hw->operating_mode == LOOP)
2354 		mcp->mb[1] |= BIT_6;
2355 	else
2356 		mcp->mb[1] |= BIT_5;
2357 	mcp->mb[2] = 0;
2358 	mcp->mb[3] = 0;
2359 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2360 	mcp->in_mb = MBX_0;
2361 	mcp->tov = MBX_TOV_SECONDS;
2362 	mcp->flags = 0;
2363 	rval = qla2x00_mailbox_command(vha, mcp);
2364 
2365 	if (rval != QLA_SUCCESS) {
2366 		ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2367 	} else {
2368 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2369 		    "Done %s.\n", __func__);
2370 	}
2371 
2372 	return rval;
2373 }
2374 
2375 /*
2376  * qla2x00_lip_reset
2377  *	Issue LIP reset mailbox command.
2378  *
2379  * Input:
2380  *	ha = adapter block pointer.
2381  *	TARGET_QUEUE_LOCK must be released.
2382  *	ADAPTER_STATE_LOCK must be released.
2383  *
2384  * Returns:
2385  *	qla2x00 local function return status code.
2386  *
2387  * Context:
2388  *	Kernel context.
2389  */
2390 int
2391 qla2x00_lip_reset(scsi_qla_host_t *vha)
2392 {
2393 	int rval;
2394 	mbx_cmd_t mc;
2395 	mbx_cmd_t *mcp = &mc;
2396 
2397 	ql_dbg(ql_dbg_disc, vha, 0x105a,
2398 	    "Entered %s.\n", __func__);
2399 
2400 	if (IS_CNA_CAPABLE(vha->hw)) {
2401 		/* Logout across all FCFs. */
2402 		mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2403 		mcp->mb[1] = BIT_1;
2404 		mcp->mb[2] = 0;
2405 		mcp->out_mb = MBX_2|MBX_1|MBX_0;
2406 	} else if (IS_FWI2_CAPABLE(vha->hw)) {
2407 		mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2408 		mcp->mb[1] = BIT_4;
2409 		mcp->mb[2] = 0;
2410 		mcp->mb[3] = vha->hw->loop_reset_delay;
2411 		mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2412 	} else {
2413 		mcp->mb[0] = MBC_LIP_RESET;
2414 		mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2415 		if (HAS_EXTENDED_IDS(vha->hw)) {
2416 			mcp->mb[1] = 0x00ff;
2417 			mcp->mb[10] = 0;
2418 			mcp->out_mb |= MBX_10;
2419 		} else {
2420 			mcp->mb[1] = 0xff00;
2421 		}
2422 		mcp->mb[2] = vha->hw->loop_reset_delay;
2423 		mcp->mb[3] = 0;
2424 	}
2425 	mcp->in_mb = MBX_0;
2426 	mcp->tov = MBX_TOV_SECONDS;
2427 	mcp->flags = 0;
2428 	rval = qla2x00_mailbox_command(vha, mcp);
2429 
2430 	if (rval != QLA_SUCCESS) {
2431 		/*EMPTY*/
2432 		ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2433 	} else {
2434 		/*EMPTY*/
2435 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2436 		    "Done %s.\n", __func__);
2437 	}
2438 
2439 	return rval;
2440 }
2441 
2442 /*
2443  * qla2x00_send_sns
2444  *	Send SNS command.
2445  *
2446  * Input:
2447  *	ha = adapter block pointer.
2448  *	sns = pointer for command.
2449  *	cmd_size = command size.
2450  *	buf_size = response/command size.
2451  *	TARGET_QUEUE_LOCK must be released.
2452  *	ADAPTER_STATE_LOCK must be released.
2453  *
2454  * Returns:
2455  *	qla2x00 local function return status code.
2456  *
2457  * Context:
2458  *	Kernel context.
2459  */
2460 int
2461 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2462     uint16_t cmd_size, size_t buf_size)
2463 {
2464 	int rval;
2465 	mbx_cmd_t mc;
2466 	mbx_cmd_t *mcp = &mc;
2467 
2468 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2469 	    "Entered %s.\n", __func__);
2470 
2471 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2472 	    "Retry cnt=%d ratov=%d total tov=%d.\n",
2473 	    vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2474 
2475 	mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2476 	mcp->mb[1] = cmd_size;
2477 	mcp->mb[2] = MSW(sns_phys_address);
2478 	mcp->mb[3] = LSW(sns_phys_address);
2479 	mcp->mb[6] = MSW(MSD(sns_phys_address));
2480 	mcp->mb[7] = LSW(MSD(sns_phys_address));
2481 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2482 	mcp->in_mb = MBX_0|MBX_1;
2483 	mcp->buf_size = buf_size;
2484 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2485 	mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2486 	rval = qla2x00_mailbox_command(vha, mcp);
2487 
2488 	if (rval != QLA_SUCCESS) {
2489 		/*EMPTY*/
2490 		ql_dbg(ql_dbg_mbx, vha, 0x105f,
2491 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
2492 		    rval, mcp->mb[0], mcp->mb[1]);
2493 	} else {
2494 		/*EMPTY*/
2495 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2496 		    "Done %s.\n", __func__);
2497 	}
2498 
2499 	return rval;
2500 }
2501 
2502 int
2503 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2504     uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2505 {
2506 	int		rval;
2507 
2508 	struct logio_entry_24xx *lg;
2509 	dma_addr_t	lg_dma;
2510 	uint32_t	iop[2];
2511 	struct qla_hw_data *ha = vha->hw;
2512 	struct req_que *req;
2513 
2514 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2515 	    "Entered %s.\n", __func__);
2516 
2517 	if (vha->vp_idx && vha->qpair)
2518 		req = vha->qpair->req;
2519 	else
2520 		req = ha->req_q_map[0];
2521 
2522 	lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2523 	if (lg == NULL) {
2524 		ql_log(ql_log_warn, vha, 0x1062,
2525 		    "Failed to allocate login IOCB.\n");
2526 		return QLA_MEMORY_ALLOC_FAILED;
2527 	}
2528 
2529 	lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2530 	lg->entry_count = 1;
2531 	lg->handle = make_handle(req->id, lg->handle);
2532 	lg->nport_handle = cpu_to_le16(loop_id);
2533 	lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2534 	if (opt & BIT_0)
2535 		lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2536 	if (opt & BIT_1)
2537 		lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2538 	lg->port_id[0] = al_pa;
2539 	lg->port_id[1] = area;
2540 	lg->port_id[2] = domain;
2541 	lg->vp_index = vha->vp_idx;
2542 	rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2543 	    (ha->r_a_tov / 10 * 2) + 2);
2544 	if (rval != QLA_SUCCESS) {
2545 		ql_dbg(ql_dbg_mbx, vha, 0x1063,
2546 		    "Failed to issue login IOCB (%x).\n", rval);
2547 	} else if (lg->entry_status != 0) {
2548 		ql_dbg(ql_dbg_mbx, vha, 0x1064,
2549 		    "Failed to complete IOCB -- error status (%x).\n",
2550 		    lg->entry_status);
2551 		rval = QLA_FUNCTION_FAILED;
2552 	} else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2553 		iop[0] = le32_to_cpu(lg->io_parameter[0]);
2554 		iop[1] = le32_to_cpu(lg->io_parameter[1]);
2555 
2556 		ql_dbg(ql_dbg_mbx, vha, 0x1065,
2557 		    "Failed to complete IOCB -- completion  status (%x) "
2558 		    "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2559 		    iop[0], iop[1]);
2560 
2561 		switch (iop[0]) {
2562 		case LSC_SCODE_PORTID_USED:
2563 			mb[0] = MBS_PORT_ID_USED;
2564 			mb[1] = LSW(iop[1]);
2565 			break;
2566 		case LSC_SCODE_NPORT_USED:
2567 			mb[0] = MBS_LOOP_ID_USED;
2568 			break;
2569 		case LSC_SCODE_NOLINK:
2570 		case LSC_SCODE_NOIOCB:
2571 		case LSC_SCODE_NOXCB:
2572 		case LSC_SCODE_CMD_FAILED:
2573 		case LSC_SCODE_NOFABRIC:
2574 		case LSC_SCODE_FW_NOT_READY:
2575 		case LSC_SCODE_NOT_LOGGED_IN:
2576 		case LSC_SCODE_NOPCB:
2577 		case LSC_SCODE_ELS_REJECT:
2578 		case LSC_SCODE_CMD_PARAM_ERR:
2579 		case LSC_SCODE_NONPORT:
2580 		case LSC_SCODE_LOGGED_IN:
2581 		case LSC_SCODE_NOFLOGI_ACC:
2582 		default:
2583 			mb[0] = MBS_COMMAND_ERROR;
2584 			break;
2585 		}
2586 	} else {
2587 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2588 		    "Done %s.\n", __func__);
2589 
2590 		iop[0] = le32_to_cpu(lg->io_parameter[0]);
2591 
2592 		mb[0] = MBS_COMMAND_COMPLETE;
2593 		mb[1] = 0;
2594 		if (iop[0] & BIT_4) {
2595 			if (iop[0] & BIT_8)
2596 				mb[1] |= BIT_1;
2597 		} else
2598 			mb[1] = BIT_0;
2599 
2600 		/* Passback COS information. */
2601 		mb[10] = 0;
2602 		if (lg->io_parameter[7] || lg->io_parameter[8])
2603 			mb[10] |= BIT_0;	/* Class 2. */
2604 		if (lg->io_parameter[9] || lg->io_parameter[10])
2605 			mb[10] |= BIT_1;	/* Class 3. */
2606 		if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2607 			mb[10] |= BIT_7;	/* Confirmed Completion
2608 						 * Allowed
2609 						 */
2610 	}
2611 
2612 	dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2613 
2614 	return rval;
2615 }
2616 
2617 /*
2618  * qla2x00_login_fabric
2619  *	Issue login fabric port mailbox command.
2620  *
2621  * Input:
2622  *	ha = adapter block pointer.
2623  *	loop_id = device loop ID.
2624  *	domain = device domain.
2625  *	area = device area.
2626  *	al_pa = device AL_PA.
2627  *	status = pointer for return status.
2628  *	opt = command options.
2629  *	TARGET_QUEUE_LOCK must be released.
2630  *	ADAPTER_STATE_LOCK must be released.
2631  *
2632  * Returns:
2633  *	qla2x00 local function return status code.
2634  *
2635  * Context:
2636  *	Kernel context.
2637  */
2638 int
2639 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2640     uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2641 {
2642 	int rval;
2643 	mbx_cmd_t mc;
2644 	mbx_cmd_t *mcp = &mc;
2645 	struct qla_hw_data *ha = vha->hw;
2646 
2647 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2648 	    "Entered %s.\n", __func__);
2649 
2650 	mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2651 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2652 	if (HAS_EXTENDED_IDS(ha)) {
2653 		mcp->mb[1] = loop_id;
2654 		mcp->mb[10] = opt;
2655 		mcp->out_mb |= MBX_10;
2656 	} else {
2657 		mcp->mb[1] = (loop_id << 8) | opt;
2658 	}
2659 	mcp->mb[2] = domain;
2660 	mcp->mb[3] = area << 8 | al_pa;
2661 
2662 	mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2663 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2664 	mcp->flags = 0;
2665 	rval = qla2x00_mailbox_command(vha, mcp);
2666 
2667 	/* Return mailbox statuses. */
2668 	if (mb != NULL) {
2669 		mb[0] = mcp->mb[0];
2670 		mb[1] = mcp->mb[1];
2671 		mb[2] = mcp->mb[2];
2672 		mb[6] = mcp->mb[6];
2673 		mb[7] = mcp->mb[7];
2674 		/* COS retrieved from Get-Port-Database mailbox command. */
2675 		mb[10] = 0;
2676 	}
2677 
2678 	if (rval != QLA_SUCCESS) {
2679 		/* RLU tmp code: need to change main mailbox_command function to
2680 		 * return ok even when the mailbox completion value is not
2681 		 * SUCCESS. The caller needs to be responsible to interpret
2682 		 * the return values of this mailbox command if we're not
2683 		 * to change too much of the existing code.
2684 		 */
2685 		if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2686 		    mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2687 		    mcp->mb[0] == 0x4006)
2688 			rval = QLA_SUCCESS;
2689 
2690 		/*EMPTY*/
2691 		ql_dbg(ql_dbg_mbx, vha, 0x1068,
2692 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2693 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2694 	} else {
2695 		/*EMPTY*/
2696 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2697 		    "Done %s.\n", __func__);
2698 	}
2699 
2700 	return rval;
2701 }
2702 
2703 /*
2704  * qla2x00_login_local_device
2705  *           Issue login loop port mailbox command.
2706  *
2707  * Input:
2708  *           ha = adapter block pointer.
2709  *           loop_id = device loop ID.
2710  *           opt = command options.
2711  *
2712  * Returns:
2713  *            Return status code.
2714  *
2715  * Context:
2716  *            Kernel context.
2717  *
2718  */
2719 int
2720 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2721     uint16_t *mb_ret, uint8_t opt)
2722 {
2723 	int rval;
2724 	mbx_cmd_t mc;
2725 	mbx_cmd_t *mcp = &mc;
2726 	struct qla_hw_data *ha = vha->hw;
2727 
2728 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2729 	    "Entered %s.\n", __func__);
2730 
2731 	if (IS_FWI2_CAPABLE(ha))
2732 		return qla24xx_login_fabric(vha, fcport->loop_id,
2733 		    fcport->d_id.b.domain, fcport->d_id.b.area,
2734 		    fcport->d_id.b.al_pa, mb_ret, opt);
2735 
2736 	mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2737 	if (HAS_EXTENDED_IDS(ha))
2738 		mcp->mb[1] = fcport->loop_id;
2739 	else
2740 		mcp->mb[1] = fcport->loop_id << 8;
2741 	mcp->mb[2] = opt;
2742 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
2743  	mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2744 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2745 	mcp->flags = 0;
2746 	rval = qla2x00_mailbox_command(vha, mcp);
2747 
2748  	/* Return mailbox statuses. */
2749  	if (mb_ret != NULL) {
2750  		mb_ret[0] = mcp->mb[0];
2751  		mb_ret[1] = mcp->mb[1];
2752  		mb_ret[6] = mcp->mb[6];
2753  		mb_ret[7] = mcp->mb[7];
2754  	}
2755 
2756 	if (rval != QLA_SUCCESS) {
2757  		/* AV tmp code: need to change main mailbox_command function to
2758  		 * return ok even when the mailbox completion value is not
2759  		 * SUCCESS. The caller needs to be responsible to interpret
2760  		 * the return values of this mailbox command if we're not
2761  		 * to change too much of the existing code.
2762  		 */
2763  		if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2764  			rval = QLA_SUCCESS;
2765 
2766 		ql_dbg(ql_dbg_mbx, vha, 0x106b,
2767 		    "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2768 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2769 	} else {
2770 		/*EMPTY*/
2771 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2772 		    "Done %s.\n", __func__);
2773 	}
2774 
2775 	return (rval);
2776 }
2777 
2778 int
2779 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2780     uint8_t area, uint8_t al_pa)
2781 {
2782 	int		rval;
2783 	struct logio_entry_24xx *lg;
2784 	dma_addr_t	lg_dma;
2785 	struct qla_hw_data *ha = vha->hw;
2786 	struct req_que *req;
2787 
2788 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2789 	    "Entered %s.\n", __func__);
2790 
2791 	lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2792 	if (lg == NULL) {
2793 		ql_log(ql_log_warn, vha, 0x106e,
2794 		    "Failed to allocate logout IOCB.\n");
2795 		return QLA_MEMORY_ALLOC_FAILED;
2796 	}
2797 
2798 	req = vha->req;
2799 	lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2800 	lg->entry_count = 1;
2801 	lg->handle = make_handle(req->id, lg->handle);
2802 	lg->nport_handle = cpu_to_le16(loop_id);
2803 	lg->control_flags =
2804 	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2805 		LCF_FREE_NPORT);
2806 	lg->port_id[0] = al_pa;
2807 	lg->port_id[1] = area;
2808 	lg->port_id[2] = domain;
2809 	lg->vp_index = vha->vp_idx;
2810 	rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2811 	    (ha->r_a_tov / 10 * 2) + 2);
2812 	if (rval != QLA_SUCCESS) {
2813 		ql_dbg(ql_dbg_mbx, vha, 0x106f,
2814 		    "Failed to issue logout IOCB (%x).\n", rval);
2815 	} else if (lg->entry_status != 0) {
2816 		ql_dbg(ql_dbg_mbx, vha, 0x1070,
2817 		    "Failed to complete IOCB -- error status (%x).\n",
2818 		    lg->entry_status);
2819 		rval = QLA_FUNCTION_FAILED;
2820 	} else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2821 		ql_dbg(ql_dbg_mbx, vha, 0x1071,
2822 		    "Failed to complete IOCB -- completion status (%x) "
2823 		    "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2824 		    le32_to_cpu(lg->io_parameter[0]),
2825 		    le32_to_cpu(lg->io_parameter[1]));
2826 	} else {
2827 		/*EMPTY*/
2828 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2829 		    "Done %s.\n", __func__);
2830 	}
2831 
2832 	dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2833 
2834 	return rval;
2835 }
2836 
2837 /*
2838  * qla2x00_fabric_logout
2839  *	Issue logout fabric port mailbox command.
2840  *
2841  * Input:
2842  *	ha = adapter block pointer.
2843  *	loop_id = device loop ID.
2844  *	TARGET_QUEUE_LOCK must be released.
2845  *	ADAPTER_STATE_LOCK must be released.
2846  *
2847  * Returns:
2848  *	qla2x00 local function return status code.
2849  *
2850  * Context:
2851  *	Kernel context.
2852  */
2853 int
2854 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2855     uint8_t area, uint8_t al_pa)
2856 {
2857 	int rval;
2858 	mbx_cmd_t mc;
2859 	mbx_cmd_t *mcp = &mc;
2860 
2861 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2862 	    "Entered %s.\n", __func__);
2863 
2864 	mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2865 	mcp->out_mb = MBX_1|MBX_0;
2866 	if (HAS_EXTENDED_IDS(vha->hw)) {
2867 		mcp->mb[1] = loop_id;
2868 		mcp->mb[10] = 0;
2869 		mcp->out_mb |= MBX_10;
2870 	} else {
2871 		mcp->mb[1] = loop_id << 8;
2872 	}
2873 
2874 	mcp->in_mb = MBX_1|MBX_0;
2875 	mcp->tov = MBX_TOV_SECONDS;
2876 	mcp->flags = 0;
2877 	rval = qla2x00_mailbox_command(vha, mcp);
2878 
2879 	if (rval != QLA_SUCCESS) {
2880 		/*EMPTY*/
2881 		ql_dbg(ql_dbg_mbx, vha, 0x1074,
2882 		    "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2883 	} else {
2884 		/*EMPTY*/
2885 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2886 		    "Done %s.\n", __func__);
2887 	}
2888 
2889 	return rval;
2890 }
2891 
2892 /*
2893  * qla2x00_full_login_lip
2894  *	Issue full login LIP mailbox command.
2895  *
2896  * Input:
2897  *	ha = adapter block pointer.
2898  *	TARGET_QUEUE_LOCK must be released.
2899  *	ADAPTER_STATE_LOCK must be released.
2900  *
2901  * Returns:
2902  *	qla2x00 local function return status code.
2903  *
2904  * Context:
2905  *	Kernel context.
2906  */
2907 int
2908 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2909 {
2910 	int rval;
2911 	mbx_cmd_t mc;
2912 	mbx_cmd_t *mcp = &mc;
2913 
2914 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2915 	    "Entered %s.\n", __func__);
2916 
2917 	mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2918 	mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
2919 	mcp->mb[2] = 0;
2920 	mcp->mb[3] = 0;
2921 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2922 	mcp->in_mb = MBX_0;
2923 	mcp->tov = MBX_TOV_SECONDS;
2924 	mcp->flags = 0;
2925 	rval = qla2x00_mailbox_command(vha, mcp);
2926 
2927 	if (rval != QLA_SUCCESS) {
2928 		/*EMPTY*/
2929 		ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2930 	} else {
2931 		/*EMPTY*/
2932 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2933 		    "Done %s.\n", __func__);
2934 	}
2935 
2936 	return rval;
2937 }
2938 
2939 /*
2940  * qla2x00_get_id_list
2941  *
2942  * Input:
2943  *	ha = adapter block pointer.
2944  *
2945  * Returns:
2946  *	qla2x00 local function return status code.
2947  *
2948  * Context:
2949  *	Kernel context.
2950  */
2951 int
2952 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2953     uint16_t *entries)
2954 {
2955 	int rval;
2956 	mbx_cmd_t mc;
2957 	mbx_cmd_t *mcp = &mc;
2958 
2959 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2960 	    "Entered %s.\n", __func__);
2961 
2962 	if (id_list == NULL)
2963 		return QLA_FUNCTION_FAILED;
2964 
2965 	mcp->mb[0] = MBC_GET_ID_LIST;
2966 	mcp->out_mb = MBX_0;
2967 	if (IS_FWI2_CAPABLE(vha->hw)) {
2968 		mcp->mb[2] = MSW(id_list_dma);
2969 		mcp->mb[3] = LSW(id_list_dma);
2970 		mcp->mb[6] = MSW(MSD(id_list_dma));
2971 		mcp->mb[7] = LSW(MSD(id_list_dma));
2972 		mcp->mb[8] = 0;
2973 		mcp->mb[9] = vha->vp_idx;
2974 		mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2975 	} else {
2976 		mcp->mb[1] = MSW(id_list_dma);
2977 		mcp->mb[2] = LSW(id_list_dma);
2978 		mcp->mb[3] = MSW(MSD(id_list_dma));
2979 		mcp->mb[6] = LSW(MSD(id_list_dma));
2980 		mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2981 	}
2982 	mcp->in_mb = MBX_1|MBX_0;
2983 	mcp->tov = MBX_TOV_SECONDS;
2984 	mcp->flags = 0;
2985 	rval = qla2x00_mailbox_command(vha, mcp);
2986 
2987 	if (rval != QLA_SUCCESS) {
2988 		/*EMPTY*/
2989 		ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2990 	} else {
2991 		*entries = mcp->mb[1];
2992 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2993 		    "Done %s.\n", __func__);
2994 	}
2995 
2996 	return rval;
2997 }
2998 
2999 /*
3000  * qla2x00_get_resource_cnts
3001  *	Get current firmware resource counts.
3002  *
3003  * Input:
3004  *	ha = adapter block pointer.
3005  *
3006  * Returns:
3007  *	qla2x00 local function return status code.
3008  *
3009  * Context:
3010  *	Kernel context.
3011  */
3012 int
3013 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
3014 {
3015 	struct qla_hw_data *ha = vha->hw;
3016 	int rval;
3017 	mbx_cmd_t mc;
3018 	mbx_cmd_t *mcp = &mc;
3019 
3020 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
3021 	    "Entered %s.\n", __func__);
3022 
3023 	mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
3024 	mcp->out_mb = MBX_0;
3025 	mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3026 	if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
3027 	    IS_QLA27XX(ha) || IS_QLA28XX(ha))
3028 		mcp->in_mb |= MBX_12;
3029 	mcp->tov = MBX_TOV_SECONDS;
3030 	mcp->flags = 0;
3031 	rval = qla2x00_mailbox_command(vha, mcp);
3032 
3033 	if (rval != QLA_SUCCESS) {
3034 		/*EMPTY*/
3035 		ql_dbg(ql_dbg_mbx, vha, 0x107d,
3036 		    "Failed mb[0]=%x.\n", mcp->mb[0]);
3037 	} else {
3038 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
3039 		    "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
3040 		    "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
3041 		    mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
3042 		    mcp->mb[11], mcp->mb[12]);
3043 
3044 		ha->orig_fw_tgt_xcb_count =  mcp->mb[1];
3045 		ha->cur_fw_tgt_xcb_count = mcp->mb[2];
3046 		ha->cur_fw_xcb_count = mcp->mb[3];
3047 		ha->orig_fw_xcb_count = mcp->mb[6];
3048 		ha->cur_fw_iocb_count = mcp->mb[7];
3049 		ha->orig_fw_iocb_count = mcp->mb[10];
3050 		if (ha->flags.npiv_supported)
3051 			ha->max_npiv_vports = mcp->mb[11];
3052 		if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3053 			ha->fw_max_fcf_count = mcp->mb[12];
3054 	}
3055 
3056 	return (rval);
3057 }
3058 
3059 /*
3060  * qla2x00_get_fcal_position_map
3061  *	Get FCAL (LILP) position map using mailbox command
3062  *
3063  * Input:
3064  *	ha = adapter state pointer.
3065  *	pos_map = buffer pointer (can be NULL).
3066  *
3067  * Returns:
3068  *	qla2x00 local function return status code.
3069  *
3070  * Context:
3071  *	Kernel context.
3072  */
3073 int
3074 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map,
3075 		u8 *num_entries)
3076 {
3077 	int rval;
3078 	mbx_cmd_t mc;
3079 	mbx_cmd_t *mcp = &mc;
3080 	char *pmap;
3081 	dma_addr_t pmap_dma;
3082 	struct qla_hw_data *ha = vha->hw;
3083 
3084 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
3085 	    "Entered %s.\n", __func__);
3086 
3087 	pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
3088 	if (pmap  == NULL) {
3089 		ql_log(ql_log_warn, vha, 0x1080,
3090 		    "Memory alloc failed.\n");
3091 		return QLA_MEMORY_ALLOC_FAILED;
3092 	}
3093 
3094 	mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
3095 	mcp->mb[2] = MSW(pmap_dma);
3096 	mcp->mb[3] = LSW(pmap_dma);
3097 	mcp->mb[6] = MSW(MSD(pmap_dma));
3098 	mcp->mb[7] = LSW(MSD(pmap_dma));
3099 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3100 	mcp->in_mb = MBX_1|MBX_0;
3101 	mcp->buf_size = FCAL_MAP_SIZE;
3102 	mcp->flags = MBX_DMA_IN;
3103 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
3104 	rval = qla2x00_mailbox_command(vha, mcp);
3105 
3106 	if (rval == QLA_SUCCESS) {
3107 		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
3108 		    "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
3109 		    mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
3110 		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
3111 		    pmap, pmap[0] + 1);
3112 
3113 		if (pos_map)
3114 			memcpy(pos_map, pmap, FCAL_MAP_SIZE);
3115 		if (num_entries)
3116 			*num_entries = pmap[0];
3117 	}
3118 	dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
3119 
3120 	if (rval != QLA_SUCCESS) {
3121 		ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
3122 	} else {
3123 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
3124 		    "Done %s.\n", __func__);
3125 	}
3126 
3127 	return rval;
3128 }
3129 
3130 /*
3131  * qla2x00_get_link_status
3132  *
3133  * Input:
3134  *	ha = adapter block pointer.
3135  *	loop_id = device loop ID.
3136  *	ret_buf = pointer to link status return buffer.
3137  *
3138  * Returns:
3139  *	0 = success.
3140  *	BIT_0 = mem alloc error.
3141  *	BIT_1 = mailbox error.
3142  */
3143 int
3144 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
3145     struct link_statistics *stats, dma_addr_t stats_dma)
3146 {
3147 	int rval;
3148 	mbx_cmd_t mc;
3149 	mbx_cmd_t *mcp = &mc;
3150 	uint32_t *iter = (uint32_t *)stats;
3151 	ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
3152 	struct qla_hw_data *ha = vha->hw;
3153 
3154 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
3155 	    "Entered %s.\n", __func__);
3156 
3157 	mcp->mb[0] = MBC_GET_LINK_STATUS;
3158 	mcp->mb[2] = MSW(LSD(stats_dma));
3159 	mcp->mb[3] = LSW(LSD(stats_dma));
3160 	mcp->mb[6] = MSW(MSD(stats_dma));
3161 	mcp->mb[7] = LSW(MSD(stats_dma));
3162 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3163 	mcp->in_mb = MBX_0;
3164 	if (IS_FWI2_CAPABLE(ha)) {
3165 		mcp->mb[1] = loop_id;
3166 		mcp->mb[4] = 0;
3167 		mcp->mb[10] = 0;
3168 		mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3169 		mcp->in_mb |= MBX_1;
3170 	} else if (HAS_EXTENDED_IDS(ha)) {
3171 		mcp->mb[1] = loop_id;
3172 		mcp->mb[10] = 0;
3173 		mcp->out_mb |= MBX_10|MBX_1;
3174 	} else {
3175 		mcp->mb[1] = loop_id << 8;
3176 		mcp->out_mb |= MBX_1;
3177 	}
3178 	mcp->tov = MBX_TOV_SECONDS;
3179 	mcp->flags = IOCTL_CMD;
3180 	rval = qla2x00_mailbox_command(vha, mcp);
3181 
3182 	if (rval == QLA_SUCCESS) {
3183 		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3184 			ql_dbg(ql_dbg_mbx, vha, 0x1085,
3185 			    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3186 			rval = QLA_FUNCTION_FAILED;
3187 		} else {
3188 			/* Re-endianize - firmware data is le32. */
3189 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3190 			    "Done %s.\n", __func__);
3191 			for ( ; dwords--; iter++)
3192 				le32_to_cpus(iter);
3193 		}
3194 	} else {
3195 		/* Failed. */
3196 		ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3197 	}
3198 
3199 	return rval;
3200 }
3201 
3202 int
3203 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3204     dma_addr_t stats_dma, uint16_t options)
3205 {
3206 	int rval;
3207 	mbx_cmd_t mc;
3208 	mbx_cmd_t *mcp = &mc;
3209 	uint32_t *iter = (uint32_t *)stats;
3210 	ushort dwords = sizeof(*stats)/sizeof(*iter);
3211 
3212 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3213 	    "Entered %s.\n", __func__);
3214 
3215 	memset(&mc, 0, sizeof(mc));
3216 	mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3217 	mc.mb[2] = MSW(LSD(stats_dma));
3218 	mc.mb[3] = LSW(LSD(stats_dma));
3219 	mc.mb[6] = MSW(MSD(stats_dma));
3220 	mc.mb[7] = LSW(MSD(stats_dma));
3221 	mc.mb[8] = dwords;
3222 	mc.mb[9] = vha->vp_idx;
3223 	mc.mb[10] = options;
3224 
3225 	rval = qla24xx_send_mb_cmd(vha, &mc);
3226 
3227 	if (rval == QLA_SUCCESS) {
3228 		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3229 			ql_dbg(ql_dbg_mbx, vha, 0x1089,
3230 			    "Failed mb[0]=%x.\n", mcp->mb[0]);
3231 			rval = QLA_FUNCTION_FAILED;
3232 		} else {
3233 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3234 			    "Done %s.\n", __func__);
3235 			/* Re-endianize - firmware data is le32. */
3236 			for ( ; dwords--; iter++)
3237 				le32_to_cpus(iter);
3238 		}
3239 	} else {
3240 		/* Failed. */
3241 		ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3242 	}
3243 
3244 	return rval;
3245 }
3246 
3247 int
3248 qla24xx_abort_command(srb_t *sp)
3249 {
3250 	int		rval;
3251 	unsigned long   flags = 0;
3252 
3253 	struct abort_entry_24xx *abt;
3254 	dma_addr_t	abt_dma;
3255 	uint32_t	handle;
3256 	fc_port_t	*fcport = sp->fcport;
3257 	struct scsi_qla_host *vha = fcport->vha;
3258 	struct qla_hw_data *ha = vha->hw;
3259 	struct req_que *req;
3260 	struct qla_qpair *qpair = sp->qpair;
3261 
3262 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3263 	    "Entered %s.\n", __func__);
3264 
3265 	if (sp->qpair)
3266 		req = sp->qpair->req;
3267 	else
3268 		return QLA_ERR_NO_QPAIR;
3269 
3270 	if (ql2xasynctmfenable)
3271 		return qla24xx_async_abort_command(sp);
3272 
3273 	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3274 	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3275 		if (req->outstanding_cmds[handle] == sp)
3276 			break;
3277 	}
3278 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3279 	if (handle == req->num_outstanding_cmds) {
3280 		/* Command not found. */
3281 		return QLA_ERR_NOT_FOUND;
3282 	}
3283 
3284 	abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3285 	if (abt == NULL) {
3286 		ql_log(ql_log_warn, vha, 0x108d,
3287 		    "Failed to allocate abort IOCB.\n");
3288 		return QLA_MEMORY_ALLOC_FAILED;
3289 	}
3290 
3291 	abt->entry_type = ABORT_IOCB_TYPE;
3292 	abt->entry_count = 1;
3293 	abt->handle = make_handle(req->id, abt->handle);
3294 	abt->nport_handle = cpu_to_le16(fcport->loop_id);
3295 	abt->handle_to_abort = make_handle(req->id, handle);
3296 	abt->port_id[0] = fcport->d_id.b.al_pa;
3297 	abt->port_id[1] = fcport->d_id.b.area;
3298 	abt->port_id[2] = fcport->d_id.b.domain;
3299 	abt->vp_index = fcport->vha->vp_idx;
3300 
3301 	abt->req_que_no = cpu_to_le16(req->id);
3302 	/* Need to pass original sp */
3303 	qla_nvme_abort_set_option(abt, sp);
3304 
3305 	rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3306 	if (rval != QLA_SUCCESS) {
3307 		ql_dbg(ql_dbg_mbx, vha, 0x108e,
3308 		    "Failed to issue IOCB (%x).\n", rval);
3309 	} else if (abt->entry_status != 0) {
3310 		ql_dbg(ql_dbg_mbx, vha, 0x108f,
3311 		    "Failed to complete IOCB -- error status (%x).\n",
3312 		    abt->entry_status);
3313 		rval = QLA_FUNCTION_FAILED;
3314 	} else if (abt->nport_handle != cpu_to_le16(0)) {
3315 		ql_dbg(ql_dbg_mbx, vha, 0x1090,
3316 		    "Failed to complete IOCB -- completion status (%x).\n",
3317 		    le16_to_cpu(abt->nport_handle));
3318 		if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR))
3319 			rval = QLA_FUNCTION_PARAMETER_ERROR;
3320 		else
3321 			rval = QLA_FUNCTION_FAILED;
3322 	} else {
3323 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3324 		    "Done %s.\n", __func__);
3325 	}
3326 	if (rval == QLA_SUCCESS)
3327 		qla_nvme_abort_process_comp_status(abt, sp);
3328 
3329 	qla_wait_nvme_release_cmd_kref(sp);
3330 
3331 	dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3332 
3333 	return rval;
3334 }
3335 
3336 struct tsk_mgmt_cmd {
3337 	union {
3338 		struct tsk_mgmt_entry tsk;
3339 		struct sts_entry_24xx sts;
3340 	} p;
3341 };
3342 
3343 static int
3344 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3345     uint64_t l, int tag)
3346 {
3347 	int		rval, rval2;
3348 	struct tsk_mgmt_cmd *tsk;
3349 	struct sts_entry_24xx *sts;
3350 	dma_addr_t	tsk_dma;
3351 	scsi_qla_host_t *vha;
3352 	struct qla_hw_data *ha;
3353 	struct req_que *req;
3354 	struct qla_qpair *qpair;
3355 
3356 	vha = fcport->vha;
3357 	ha = vha->hw;
3358 	req = vha->req;
3359 
3360 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3361 	    "Entered %s.\n", __func__);
3362 
3363 	if (vha->vp_idx && vha->qpair) {
3364 		/* NPIV port */
3365 		qpair = vha->qpair;
3366 		req = qpair->req;
3367 	}
3368 
3369 	tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3370 	if (tsk == NULL) {
3371 		ql_log(ql_log_warn, vha, 0x1093,
3372 		    "Failed to allocate task management IOCB.\n");
3373 		return QLA_MEMORY_ALLOC_FAILED;
3374 	}
3375 
3376 	tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3377 	tsk->p.tsk.entry_count = 1;
3378 	tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle);
3379 	tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3380 	tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3381 	tsk->p.tsk.control_flags = cpu_to_le32(type);
3382 	tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3383 	tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3384 	tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3385 	tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3386 	if (type == TCF_LUN_RESET) {
3387 		int_to_scsilun(l, &tsk->p.tsk.lun);
3388 		host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3389 		    sizeof(tsk->p.tsk.lun));
3390 	}
3391 
3392 	sts = &tsk->p.sts;
3393 	rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3394 	if (rval != QLA_SUCCESS) {
3395 		ql_dbg(ql_dbg_mbx, vha, 0x1094,
3396 		    "Failed to issue %s reset IOCB (%x).\n", name, rval);
3397 	} else if (sts->entry_status != 0) {
3398 		ql_dbg(ql_dbg_mbx, vha, 0x1095,
3399 		    "Failed to complete IOCB -- error status (%x).\n",
3400 		    sts->entry_status);
3401 		rval = QLA_FUNCTION_FAILED;
3402 	} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3403 		ql_dbg(ql_dbg_mbx, vha, 0x1096,
3404 		    "Failed to complete IOCB -- completion status (%x).\n",
3405 		    le16_to_cpu(sts->comp_status));
3406 		rval = QLA_FUNCTION_FAILED;
3407 	} else if (le16_to_cpu(sts->scsi_status) &
3408 	    SS_RESPONSE_INFO_LEN_VALID) {
3409 		if (le32_to_cpu(sts->rsp_data_len) < 4) {
3410 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3411 			    "Ignoring inconsistent data length -- not enough "
3412 			    "response info (%d).\n",
3413 			    le32_to_cpu(sts->rsp_data_len));
3414 		} else if (sts->data[3]) {
3415 			ql_dbg(ql_dbg_mbx, vha, 0x1098,
3416 			    "Failed to complete IOCB -- response (%x).\n",
3417 			    sts->data[3]);
3418 			rval = QLA_FUNCTION_FAILED;
3419 		}
3420 	}
3421 
3422 	/* Issue marker IOCB. */
3423 	rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3424 	    type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
3425 	if (rval2 != QLA_SUCCESS) {
3426 		ql_dbg(ql_dbg_mbx, vha, 0x1099,
3427 		    "Failed to issue marker IOCB (%x).\n", rval2);
3428 	} else {
3429 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3430 		    "Done %s.\n", __func__);
3431 	}
3432 
3433 	dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3434 
3435 	return rval;
3436 }
3437 
3438 int
3439 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3440 {
3441 	struct qla_hw_data *ha = fcport->vha->hw;
3442 
3443 	if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3444 		return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3445 
3446 	return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3447 }
3448 
3449 int
3450 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3451 {
3452 	struct qla_hw_data *ha = fcport->vha->hw;
3453 
3454 	if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3455 		return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3456 
3457 	return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3458 }
3459 
3460 int
3461 qla2x00_system_error(scsi_qla_host_t *vha)
3462 {
3463 	int rval;
3464 	mbx_cmd_t mc;
3465 	mbx_cmd_t *mcp = &mc;
3466 	struct qla_hw_data *ha = vha->hw;
3467 
3468 	if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3469 		return QLA_FUNCTION_FAILED;
3470 
3471 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3472 	    "Entered %s.\n", __func__);
3473 
3474 	mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3475 	mcp->out_mb = MBX_0;
3476 	mcp->in_mb = MBX_0;
3477 	mcp->tov = 5;
3478 	mcp->flags = 0;
3479 	rval = qla2x00_mailbox_command(vha, mcp);
3480 
3481 	if (rval != QLA_SUCCESS) {
3482 		ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3483 	} else {
3484 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3485 		    "Done %s.\n", __func__);
3486 	}
3487 
3488 	return rval;
3489 }
3490 
3491 int
3492 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3493 {
3494 	int rval;
3495 	mbx_cmd_t mc;
3496 	mbx_cmd_t *mcp = &mc;
3497 
3498 	if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3499 	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3500 		return QLA_FUNCTION_FAILED;
3501 
3502 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3503 	    "Entered %s.\n", __func__);
3504 
3505 	mcp->mb[0] = MBC_WRITE_SERDES;
3506 	mcp->mb[1] = addr;
3507 	if (IS_QLA2031(vha->hw))
3508 		mcp->mb[2] = data & 0xff;
3509 	else
3510 		mcp->mb[2] = data;
3511 
3512 	mcp->mb[3] = 0;
3513 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3514 	mcp->in_mb = MBX_0;
3515 	mcp->tov = MBX_TOV_SECONDS;
3516 	mcp->flags = 0;
3517 	rval = qla2x00_mailbox_command(vha, mcp);
3518 
3519 	if (rval != QLA_SUCCESS) {
3520 		ql_dbg(ql_dbg_mbx, vha, 0x1183,
3521 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3522 	} else {
3523 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3524 		    "Done %s.\n", __func__);
3525 	}
3526 
3527 	return rval;
3528 }
3529 
3530 int
3531 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3532 {
3533 	int rval;
3534 	mbx_cmd_t mc;
3535 	mbx_cmd_t *mcp = &mc;
3536 
3537 	if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3538 	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3539 		return QLA_FUNCTION_FAILED;
3540 
3541 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3542 	    "Entered %s.\n", __func__);
3543 
3544 	mcp->mb[0] = MBC_READ_SERDES;
3545 	mcp->mb[1] = addr;
3546 	mcp->mb[3] = 0;
3547 	mcp->out_mb = MBX_3|MBX_1|MBX_0;
3548 	mcp->in_mb = MBX_1|MBX_0;
3549 	mcp->tov = MBX_TOV_SECONDS;
3550 	mcp->flags = 0;
3551 	rval = qla2x00_mailbox_command(vha, mcp);
3552 
3553 	if (IS_QLA2031(vha->hw))
3554 		*data = mcp->mb[1] & 0xff;
3555 	else
3556 		*data = mcp->mb[1];
3557 
3558 	if (rval != QLA_SUCCESS) {
3559 		ql_dbg(ql_dbg_mbx, vha, 0x1186,
3560 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3561 	} else {
3562 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3563 		    "Done %s.\n", __func__);
3564 	}
3565 
3566 	return rval;
3567 }
3568 
3569 int
3570 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3571 {
3572 	int rval;
3573 	mbx_cmd_t mc;
3574 	mbx_cmd_t *mcp = &mc;
3575 
3576 	if (!IS_QLA8044(vha->hw))
3577 		return QLA_FUNCTION_FAILED;
3578 
3579 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3580 	    "Entered %s.\n", __func__);
3581 
3582 	mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3583 	mcp->mb[1] = HCS_WRITE_SERDES;
3584 	mcp->mb[3] = LSW(addr);
3585 	mcp->mb[4] = MSW(addr);
3586 	mcp->mb[5] = LSW(data);
3587 	mcp->mb[6] = MSW(data);
3588 	mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3589 	mcp->in_mb = MBX_0;
3590 	mcp->tov = MBX_TOV_SECONDS;
3591 	mcp->flags = 0;
3592 	rval = qla2x00_mailbox_command(vha, mcp);
3593 
3594 	if (rval != QLA_SUCCESS) {
3595 		ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3596 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3597 	} else {
3598 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3599 		    "Done %s.\n", __func__);
3600 	}
3601 
3602 	return rval;
3603 }
3604 
3605 int
3606 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3607 {
3608 	int rval;
3609 	mbx_cmd_t mc;
3610 	mbx_cmd_t *mcp = &mc;
3611 
3612 	if (!IS_QLA8044(vha->hw))
3613 		return QLA_FUNCTION_FAILED;
3614 
3615 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3616 	    "Entered %s.\n", __func__);
3617 
3618 	mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3619 	mcp->mb[1] = HCS_READ_SERDES;
3620 	mcp->mb[3] = LSW(addr);
3621 	mcp->mb[4] = MSW(addr);
3622 	mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3623 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
3624 	mcp->tov = MBX_TOV_SECONDS;
3625 	mcp->flags = 0;
3626 	rval = qla2x00_mailbox_command(vha, mcp);
3627 
3628 	*data = mcp->mb[2] << 16 | mcp->mb[1];
3629 
3630 	if (rval != QLA_SUCCESS) {
3631 		ql_dbg(ql_dbg_mbx, vha, 0x118a,
3632 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3633 	} else {
3634 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3635 		    "Done %s.\n", __func__);
3636 	}
3637 
3638 	return rval;
3639 }
3640 
3641 /**
3642  * qla2x00_set_serdes_params() -
3643  * @vha: HA context
3644  * @sw_em_1g: serial link options
3645  * @sw_em_2g: serial link options
3646  * @sw_em_4g: serial link options
3647  *
3648  * Returns
3649  */
3650 int
3651 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3652     uint16_t sw_em_2g, uint16_t sw_em_4g)
3653 {
3654 	int rval;
3655 	mbx_cmd_t mc;
3656 	mbx_cmd_t *mcp = &mc;
3657 
3658 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3659 	    "Entered %s.\n", __func__);
3660 
3661 	mcp->mb[0] = MBC_SERDES_PARAMS;
3662 	mcp->mb[1] = BIT_0;
3663 	mcp->mb[2] = sw_em_1g | BIT_15;
3664 	mcp->mb[3] = sw_em_2g | BIT_15;
3665 	mcp->mb[4] = sw_em_4g | BIT_15;
3666 	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3667 	mcp->in_mb = MBX_0;
3668 	mcp->tov = MBX_TOV_SECONDS;
3669 	mcp->flags = 0;
3670 	rval = qla2x00_mailbox_command(vha, mcp);
3671 
3672 	if (rval != QLA_SUCCESS) {
3673 		/*EMPTY*/
3674 		ql_dbg(ql_dbg_mbx, vha, 0x109f,
3675 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3676 	} else {
3677 		/*EMPTY*/
3678 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3679 		    "Done %s.\n", __func__);
3680 	}
3681 
3682 	return rval;
3683 }
3684 
3685 int
3686 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3687 {
3688 	int rval;
3689 	mbx_cmd_t mc;
3690 	mbx_cmd_t *mcp = &mc;
3691 
3692 	if (!IS_FWI2_CAPABLE(vha->hw))
3693 		return QLA_FUNCTION_FAILED;
3694 
3695 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3696 	    "Entered %s.\n", __func__);
3697 
3698 	mcp->mb[0] = MBC_STOP_FIRMWARE;
3699 	mcp->mb[1] = 0;
3700 	mcp->out_mb = MBX_1|MBX_0;
3701 	mcp->in_mb = MBX_0;
3702 	mcp->tov = 5;
3703 	mcp->flags = 0;
3704 	rval = qla2x00_mailbox_command(vha, mcp);
3705 
3706 	if (rval != QLA_SUCCESS) {
3707 		ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3708 		if (mcp->mb[0] == MBS_INVALID_COMMAND)
3709 			rval = QLA_INVALID_COMMAND;
3710 	} else {
3711 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3712 		    "Done %s.\n", __func__);
3713 	}
3714 
3715 	return rval;
3716 }
3717 
3718 int
3719 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3720     uint16_t buffers)
3721 {
3722 	int rval;
3723 	mbx_cmd_t mc;
3724 	mbx_cmd_t *mcp = &mc;
3725 
3726 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3727 	    "Entered %s.\n", __func__);
3728 
3729 	if (!IS_FWI2_CAPABLE(vha->hw))
3730 		return QLA_FUNCTION_FAILED;
3731 
3732 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
3733 		return QLA_FUNCTION_FAILED;
3734 
3735 	mcp->mb[0] = MBC_TRACE_CONTROL;
3736 	mcp->mb[1] = TC_EFT_ENABLE;
3737 	mcp->mb[2] = LSW(eft_dma);
3738 	mcp->mb[3] = MSW(eft_dma);
3739 	mcp->mb[4] = LSW(MSD(eft_dma));
3740 	mcp->mb[5] = MSW(MSD(eft_dma));
3741 	mcp->mb[6] = buffers;
3742 	mcp->mb[7] = TC_AEN_DISABLE;
3743 	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3744 	mcp->in_mb = MBX_1|MBX_0;
3745 	mcp->tov = MBX_TOV_SECONDS;
3746 	mcp->flags = 0;
3747 	rval = qla2x00_mailbox_command(vha, mcp);
3748 	if (rval != QLA_SUCCESS) {
3749 		ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3750 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
3751 		    rval, mcp->mb[0], mcp->mb[1]);
3752 	} else {
3753 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3754 		    "Done %s.\n", __func__);
3755 	}
3756 
3757 	return rval;
3758 }
3759 
3760 int
3761 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3762 {
3763 	int rval;
3764 	mbx_cmd_t mc;
3765 	mbx_cmd_t *mcp = &mc;
3766 
3767 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3768 	    "Entered %s.\n", __func__);
3769 
3770 	if (!IS_FWI2_CAPABLE(vha->hw))
3771 		return QLA_FUNCTION_FAILED;
3772 
3773 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
3774 		return QLA_FUNCTION_FAILED;
3775 
3776 	mcp->mb[0] = MBC_TRACE_CONTROL;
3777 	mcp->mb[1] = TC_EFT_DISABLE;
3778 	mcp->out_mb = MBX_1|MBX_0;
3779 	mcp->in_mb = MBX_1|MBX_0;
3780 	mcp->tov = MBX_TOV_SECONDS;
3781 	mcp->flags = 0;
3782 	rval = qla2x00_mailbox_command(vha, mcp);
3783 	if (rval != QLA_SUCCESS) {
3784 		ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3785 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
3786 		    rval, mcp->mb[0], mcp->mb[1]);
3787 	} else {
3788 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3789 		    "Done %s.\n", __func__);
3790 	}
3791 
3792 	return rval;
3793 }
3794 
3795 int
3796 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3797     uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3798 {
3799 	int rval;
3800 	mbx_cmd_t mc;
3801 	mbx_cmd_t *mcp = &mc;
3802 
3803 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3804 	    "Entered %s.\n", __func__);
3805 
3806 	if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3807 	    !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
3808 	    !IS_QLA28XX(vha->hw))
3809 		return QLA_FUNCTION_FAILED;
3810 
3811 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
3812 		return QLA_FUNCTION_FAILED;
3813 
3814 	mcp->mb[0] = MBC_TRACE_CONTROL;
3815 	mcp->mb[1] = TC_FCE_ENABLE;
3816 	mcp->mb[2] = LSW(fce_dma);
3817 	mcp->mb[3] = MSW(fce_dma);
3818 	mcp->mb[4] = LSW(MSD(fce_dma));
3819 	mcp->mb[5] = MSW(MSD(fce_dma));
3820 	mcp->mb[6] = buffers;
3821 	mcp->mb[7] = TC_AEN_DISABLE;
3822 	mcp->mb[8] = 0;
3823 	mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3824 	mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3825 	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3826 	    MBX_1|MBX_0;
3827 	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3828 	mcp->tov = MBX_TOV_SECONDS;
3829 	mcp->flags = 0;
3830 	rval = qla2x00_mailbox_command(vha, mcp);
3831 	if (rval != QLA_SUCCESS) {
3832 		ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3833 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
3834 		    rval, mcp->mb[0], mcp->mb[1]);
3835 	} else {
3836 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3837 		    "Done %s.\n", __func__);
3838 
3839 		if (mb)
3840 			memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3841 		if (dwords)
3842 			*dwords = buffers;
3843 	}
3844 
3845 	return rval;
3846 }
3847 
3848 int
3849 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3850 {
3851 	int rval;
3852 	mbx_cmd_t mc;
3853 	mbx_cmd_t *mcp = &mc;
3854 
3855 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3856 	    "Entered %s.\n", __func__);
3857 
3858 	if (!IS_FWI2_CAPABLE(vha->hw))
3859 		return QLA_FUNCTION_FAILED;
3860 
3861 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
3862 		return QLA_FUNCTION_FAILED;
3863 
3864 	mcp->mb[0] = MBC_TRACE_CONTROL;
3865 	mcp->mb[1] = TC_FCE_DISABLE;
3866 	mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3867 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
3868 	mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3869 	    MBX_1|MBX_0;
3870 	mcp->tov = MBX_TOV_SECONDS;
3871 	mcp->flags = 0;
3872 	rval = qla2x00_mailbox_command(vha, mcp);
3873 	if (rval != QLA_SUCCESS) {
3874 		ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3875 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
3876 		    rval, mcp->mb[0], mcp->mb[1]);
3877 	} else {
3878 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3879 		    "Done %s.\n", __func__);
3880 
3881 		if (wr)
3882 			*wr = (uint64_t) mcp->mb[5] << 48 |
3883 			    (uint64_t) mcp->mb[4] << 32 |
3884 			    (uint64_t) mcp->mb[3] << 16 |
3885 			    (uint64_t) mcp->mb[2];
3886 		if (rd)
3887 			*rd = (uint64_t) mcp->mb[9] << 48 |
3888 			    (uint64_t) mcp->mb[8] << 32 |
3889 			    (uint64_t) mcp->mb[7] << 16 |
3890 			    (uint64_t) mcp->mb[6];
3891 	}
3892 
3893 	return rval;
3894 }
3895 
3896 int
3897 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3898 	uint16_t *port_speed, uint16_t *mb)
3899 {
3900 	int rval;
3901 	mbx_cmd_t mc;
3902 	mbx_cmd_t *mcp = &mc;
3903 
3904 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3905 	    "Entered %s.\n", __func__);
3906 
3907 	if (!IS_IIDMA_CAPABLE(vha->hw))
3908 		return QLA_FUNCTION_FAILED;
3909 
3910 	mcp->mb[0] = MBC_PORT_PARAMS;
3911 	mcp->mb[1] = loop_id;
3912 	mcp->mb[2] = mcp->mb[3] = 0;
3913 	mcp->mb[9] = vha->vp_idx;
3914 	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3915 	mcp->in_mb = MBX_3|MBX_1|MBX_0;
3916 	mcp->tov = MBX_TOV_SECONDS;
3917 	mcp->flags = 0;
3918 	rval = qla2x00_mailbox_command(vha, mcp);
3919 
3920 	/* Return mailbox statuses. */
3921 	if (mb) {
3922 		mb[0] = mcp->mb[0];
3923 		mb[1] = mcp->mb[1];
3924 		mb[3] = mcp->mb[3];
3925 	}
3926 
3927 	if (rval != QLA_SUCCESS) {
3928 		ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3929 	} else {
3930 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3931 		    "Done %s.\n", __func__);
3932 		if (port_speed)
3933 			*port_speed = mcp->mb[3];
3934 	}
3935 
3936 	return rval;
3937 }
3938 
3939 int
3940 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3941     uint16_t port_speed, uint16_t *mb)
3942 {
3943 	int rval;
3944 	mbx_cmd_t mc;
3945 	mbx_cmd_t *mcp = &mc;
3946 
3947 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3948 	    "Entered %s.\n", __func__);
3949 
3950 	if (!IS_IIDMA_CAPABLE(vha->hw))
3951 		return QLA_FUNCTION_FAILED;
3952 
3953 	mcp->mb[0] = MBC_PORT_PARAMS;
3954 	mcp->mb[1] = loop_id;
3955 	mcp->mb[2] = BIT_0;
3956 	mcp->mb[3] = port_speed & 0x3F;
3957 	mcp->mb[9] = vha->vp_idx;
3958 	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3959 	mcp->in_mb = MBX_3|MBX_1|MBX_0;
3960 	mcp->tov = MBX_TOV_SECONDS;
3961 	mcp->flags = 0;
3962 	rval = qla2x00_mailbox_command(vha, mcp);
3963 
3964 	/* Return mailbox statuses. */
3965 	if (mb) {
3966 		mb[0] = mcp->mb[0];
3967 		mb[1] = mcp->mb[1];
3968 		mb[3] = mcp->mb[3];
3969 	}
3970 
3971 	if (rval != QLA_SUCCESS) {
3972 		ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3973 		    "Failed=%x.\n", rval);
3974 	} else {
3975 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3976 		    "Done %s.\n", __func__);
3977 	}
3978 
3979 	return rval;
3980 }
3981 
3982 void
3983 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3984 	struct vp_rpt_id_entry_24xx *rptid_entry)
3985 {
3986 	struct qla_hw_data *ha = vha->hw;
3987 	scsi_qla_host_t *vp = NULL;
3988 	unsigned long   flags;
3989 	int found;
3990 	port_id_t id;
3991 	struct fc_port *fcport;
3992 
3993 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3994 	    "Entered %s.\n", __func__);
3995 
3996 	if (rptid_entry->entry_status != 0)
3997 		return;
3998 
3999 	id.b.domain = rptid_entry->port_id[2];
4000 	id.b.area   = rptid_entry->port_id[1];
4001 	id.b.al_pa  = rptid_entry->port_id[0];
4002 	id.b.rsvd_1 = 0;
4003 	ha->flags.n2n_ae = 0;
4004 
4005 	if (rptid_entry->format == 0) {
4006 		/* loop */
4007 		ql_dbg(ql_dbg_async, vha, 0x10b7,
4008 		    "Format 0 : Number of VPs setup %d, number of "
4009 		    "VPs acquired %d.\n", rptid_entry->vp_setup,
4010 		    rptid_entry->vp_acquired);
4011 		ql_dbg(ql_dbg_async, vha, 0x10b8,
4012 		    "Primary port id %02x%02x%02x.\n",
4013 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
4014 		    rptid_entry->port_id[0]);
4015 		ha->current_topology = ISP_CFG_NL;
4016 		qla_update_host_map(vha, id);
4017 
4018 	} else if (rptid_entry->format == 1) {
4019 		/* fabric */
4020 		ql_dbg(ql_dbg_async, vha, 0x10b9,
4021 		    "Format 1: VP[%d] enabled - status %d - with "
4022 		    "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
4023 			rptid_entry->vp_status,
4024 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
4025 		    rptid_entry->port_id[0]);
4026 		ql_dbg(ql_dbg_async, vha, 0x5075,
4027 		   "Format 1: Remote WWPN %8phC.\n",
4028 		   rptid_entry->u.f1.port_name);
4029 
4030 		ql_dbg(ql_dbg_async, vha, 0x5075,
4031 		   "Format 1: WWPN %8phC.\n",
4032 		   vha->port_name);
4033 
4034 		switch (rptid_entry->u.f1.flags & TOPO_MASK) {
4035 		case TOPO_N2N:
4036 			ha->current_topology = ISP_CFG_N;
4037 			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4038 			list_for_each_entry(fcport, &vha->vp_fcports, list) {
4039 				fcport->scan_state = QLA_FCPORT_SCAN;
4040 				fcport->n2n_flag = 0;
4041 			}
4042 			id.b24 = 0;
4043 			if (wwn_to_u64(vha->port_name) >
4044 			    wwn_to_u64(rptid_entry->u.f1.port_name)) {
4045 				vha->d_id.b24 = 0;
4046 				vha->d_id.b.al_pa = 1;
4047 				ha->flags.n2n_bigger = 1;
4048 
4049 				id.b.al_pa = 2;
4050 				ql_dbg(ql_dbg_async, vha, 0x5075,
4051 				    "Format 1: assign local id %x remote id %x\n",
4052 				    vha->d_id.b24, id.b24);
4053 			} else {
4054 				ql_dbg(ql_dbg_async, vha, 0x5075,
4055 				    "Format 1: Remote login - Waiting for WWPN %8phC.\n",
4056 				    rptid_entry->u.f1.port_name);
4057 				ha->flags.n2n_bigger = 0;
4058 			}
4059 
4060 			fcport = qla2x00_find_fcport_by_wwpn(vha,
4061 			    rptid_entry->u.f1.port_name, 1);
4062 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4063 
4064 
4065 			if (fcport) {
4066 				fcport->plogi_nack_done_deadline = jiffies + HZ;
4067 				fcport->dm_login_expire = jiffies +
4068 					QLA_N2N_WAIT_TIME * HZ;
4069 				fcport->scan_state = QLA_FCPORT_FOUND;
4070 				fcport->n2n_flag = 1;
4071 				fcport->keep_nport_handle = 1;
4072 				fcport->login_retry = vha->hw->login_retry_count;
4073 				fcport->fc4_type = FS_FC4TYPE_FCP;
4074 				if (vha->flags.nvme_enabled)
4075 					fcport->fc4_type |= FS_FC4TYPE_NVME;
4076 
4077 				if (wwn_to_u64(vha->port_name) >
4078 				    wwn_to_u64(fcport->port_name)) {
4079 					fcport->d_id = id;
4080 				}
4081 
4082 				switch (fcport->disc_state) {
4083 				case DSC_DELETED:
4084 					set_bit(RELOGIN_NEEDED,
4085 					    &vha->dpc_flags);
4086 					break;
4087 				case DSC_DELETE_PEND:
4088 					break;
4089 				default:
4090 					qlt_schedule_sess_for_deletion(fcport);
4091 					break;
4092 				}
4093 			} else {
4094 				qla24xx_post_newsess_work(vha, &id,
4095 				    rptid_entry->u.f1.port_name,
4096 				    rptid_entry->u.f1.node_name,
4097 				    NULL,
4098 				    FS_FCP_IS_N2N);
4099 			}
4100 
4101 			/* if our portname is higher then initiate N2N login */
4102 
4103 			set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
4104 			return;
4105 		case TOPO_FL:
4106 			ha->current_topology = ISP_CFG_FL;
4107 			break;
4108 		case TOPO_F:
4109 			ha->current_topology = ISP_CFG_F;
4110 			break;
4111 		default:
4112 			break;
4113 		}
4114 
4115 		ha->flags.gpsc_supported = 1;
4116 		ha->current_topology = ISP_CFG_F;
4117 		/* buffer to buffer credit flag */
4118 		vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
4119 
4120 		if (rptid_entry->vp_idx == 0) {
4121 			if (rptid_entry->vp_status == VP_STAT_COMPL) {
4122 				/* FA-WWN is only for physical port */
4123 				if (qla_ini_mode_enabled(vha) &&
4124 				    ha->flags.fawwpn_enabled &&
4125 				    (rptid_entry->u.f1.flags &
4126 				     BIT_6)) {
4127 					memcpy(vha->port_name,
4128 					    rptid_entry->u.f1.port_name,
4129 					    WWN_SIZE);
4130 				}
4131 
4132 				qla_update_host_map(vha, id);
4133 			}
4134 
4135 			set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
4136 			set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4137 		} else {
4138 			if (rptid_entry->vp_status != VP_STAT_COMPL &&
4139 				rptid_entry->vp_status != VP_STAT_ID_CHG) {
4140 				ql_dbg(ql_dbg_mbx, vha, 0x10ba,
4141 				    "Could not acquire ID for VP[%d].\n",
4142 				    rptid_entry->vp_idx);
4143 				return;
4144 			}
4145 
4146 			found = 0;
4147 			spin_lock_irqsave(&ha->vport_slock, flags);
4148 			list_for_each_entry(vp, &ha->vp_list, list) {
4149 				if (rptid_entry->vp_idx == vp->vp_idx) {
4150 					found = 1;
4151 					break;
4152 				}
4153 			}
4154 			spin_unlock_irqrestore(&ha->vport_slock, flags);
4155 
4156 			if (!found)
4157 				return;
4158 
4159 			qla_update_host_map(vp, id);
4160 
4161 			/*
4162 			 * Cannot configure here as we are still sitting on the
4163 			 * response queue. Handle it in dpc context.
4164 			 */
4165 			set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
4166 			set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
4167 			set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
4168 		}
4169 		set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
4170 		qla2xxx_wake_dpc(vha);
4171 	} else if (rptid_entry->format == 2) {
4172 		ql_dbg(ql_dbg_async, vha, 0x505f,
4173 		    "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
4174 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
4175 		    rptid_entry->port_id[0]);
4176 
4177 		ql_dbg(ql_dbg_async, vha, 0x5075,
4178 		    "N2N: Remote WWPN %8phC.\n",
4179 		    rptid_entry->u.f2.port_name);
4180 
4181 		/* N2N.  direct connect */
4182 		ha->current_topology = ISP_CFG_N;
4183 		ha->flags.rida_fmt2 = 1;
4184 		vha->d_id.b.domain = rptid_entry->port_id[2];
4185 		vha->d_id.b.area = rptid_entry->port_id[1];
4186 		vha->d_id.b.al_pa = rptid_entry->port_id[0];
4187 
4188 		ha->flags.n2n_ae = 1;
4189 		spin_lock_irqsave(&ha->vport_slock, flags);
4190 		qla_update_vp_map(vha, SET_AL_PA);
4191 		spin_unlock_irqrestore(&ha->vport_slock, flags);
4192 
4193 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
4194 			fcport->scan_state = QLA_FCPORT_SCAN;
4195 			fcport->n2n_flag = 0;
4196 		}
4197 
4198 		fcport = qla2x00_find_fcport_by_wwpn(vha,
4199 		    rptid_entry->u.f2.port_name, 1);
4200 
4201 		if (fcport) {
4202 			fcport->login_retry = vha->hw->login_retry_count;
4203 			fcport->plogi_nack_done_deadline = jiffies + HZ;
4204 			fcport->scan_state = QLA_FCPORT_FOUND;
4205 			fcport->keep_nport_handle = 1;
4206 			fcport->n2n_flag = 1;
4207 			fcport->d_id.b.domain =
4208 				rptid_entry->u.f2.remote_nport_id[2];
4209 			fcport->d_id.b.area =
4210 				rptid_entry->u.f2.remote_nport_id[1];
4211 			fcport->d_id.b.al_pa =
4212 				rptid_entry->u.f2.remote_nport_id[0];
4213 
4214 			/*
4215 			 * For the case where remote port sending PRLO, FW
4216 			 * sends up RIDA Format 2 as an indication of session
4217 			 * loss. In other word, FW state change from PRLI
4218 			 * complete back to PLOGI complete. Delete the
4219 			 * session and let relogin drive the reconnect.
4220 			 */
4221 			if (atomic_read(&fcport->state) == FCS_ONLINE)
4222 				qlt_schedule_sess_for_deletion(fcport);
4223 		}
4224 	}
4225 }
4226 
4227 /*
4228  * qla24xx_modify_vp_config
4229  *	Change VP configuration for vha
4230  *
4231  * Input:
4232  *	vha = adapter block pointer.
4233  *
4234  * Returns:
4235  *	qla2xxx local function return status code.
4236  *
4237  * Context:
4238  *	Kernel context.
4239  */
4240 int
4241 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4242 {
4243 	int		rval;
4244 	struct vp_config_entry_24xx *vpmod;
4245 	dma_addr_t	vpmod_dma;
4246 	struct qla_hw_data *ha = vha->hw;
4247 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4248 
4249 	/* This can be called by the parent */
4250 
4251 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4252 	    "Entered %s.\n", __func__);
4253 
4254 	vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4255 	if (!vpmod) {
4256 		ql_log(ql_log_warn, vha, 0x10bc,
4257 		    "Failed to allocate modify VP IOCB.\n");
4258 		return QLA_MEMORY_ALLOC_FAILED;
4259 	}
4260 
4261 	vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4262 	vpmod->entry_count = 1;
4263 	vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4264 	vpmod->vp_count = 1;
4265 	vpmod->vp_index1 = vha->vp_idx;
4266 	vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4267 
4268 	qlt_modify_vp_config(vha, vpmod);
4269 
4270 	memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4271 	memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4272 	vpmod->entry_count = 1;
4273 
4274 	rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4275 	if (rval != QLA_SUCCESS) {
4276 		ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4277 		    "Failed to issue VP config IOCB (%x).\n", rval);
4278 	} else if (vpmod->comp_status != 0) {
4279 		ql_dbg(ql_dbg_mbx, vha, 0x10be,
4280 		    "Failed to complete IOCB -- error status (%x).\n",
4281 		    vpmod->comp_status);
4282 		rval = QLA_FUNCTION_FAILED;
4283 	} else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4284 		ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4285 		    "Failed to complete IOCB -- completion status (%x).\n",
4286 		    le16_to_cpu(vpmod->comp_status));
4287 		rval = QLA_FUNCTION_FAILED;
4288 	} else {
4289 		/* EMPTY */
4290 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4291 		    "Done %s.\n", __func__);
4292 		fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4293 	}
4294 	dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4295 
4296 	return rval;
4297 }
4298 
4299 /*
4300  * qla2x00_send_change_request
4301  *	Receive or disable RSCN request from fabric controller
4302  *
4303  * Input:
4304  *	ha = adapter block pointer
4305  *	format = registration format:
4306  *		0 - Reserved
4307  *		1 - Fabric detected registration
4308  *		2 - N_port detected registration
4309  *		3 - Full registration
4310  *		FF - clear registration
4311  *	vp_idx = Virtual port index
4312  *
4313  * Returns:
4314  *	qla2x00 local function return status code.
4315  *
4316  * Context:
4317  *	Kernel Context
4318  */
4319 
4320 int
4321 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4322 			    uint16_t vp_idx)
4323 {
4324 	int rval;
4325 	mbx_cmd_t mc;
4326 	mbx_cmd_t *mcp = &mc;
4327 
4328 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4329 	    "Entered %s.\n", __func__);
4330 
4331 	mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4332 	mcp->mb[1] = format;
4333 	mcp->mb[9] = vp_idx;
4334 	mcp->out_mb = MBX_9|MBX_1|MBX_0;
4335 	mcp->in_mb = MBX_0|MBX_1;
4336 	mcp->tov = MBX_TOV_SECONDS;
4337 	mcp->flags = 0;
4338 	rval = qla2x00_mailbox_command(vha, mcp);
4339 
4340 	if (rval == QLA_SUCCESS) {
4341 		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4342 			rval = BIT_1;
4343 		}
4344 	} else
4345 		rval = BIT_1;
4346 
4347 	return rval;
4348 }
4349 
4350 int
4351 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4352     uint32_t size)
4353 {
4354 	int rval;
4355 	mbx_cmd_t mc;
4356 	mbx_cmd_t *mcp = &mc;
4357 
4358 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4359 	    "Entered %s.\n", __func__);
4360 
4361 	if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4362 		mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4363 		mcp->mb[8] = MSW(addr);
4364 		mcp->mb[10] = 0;
4365 		mcp->out_mb = MBX_10|MBX_8|MBX_0;
4366 	} else {
4367 		mcp->mb[0] = MBC_DUMP_RISC_RAM;
4368 		mcp->out_mb = MBX_0;
4369 	}
4370 	mcp->mb[1] = LSW(addr);
4371 	mcp->mb[2] = MSW(req_dma);
4372 	mcp->mb[3] = LSW(req_dma);
4373 	mcp->mb[6] = MSW(MSD(req_dma));
4374 	mcp->mb[7] = LSW(MSD(req_dma));
4375 	mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4376 	if (IS_FWI2_CAPABLE(vha->hw)) {
4377 		mcp->mb[4] = MSW(size);
4378 		mcp->mb[5] = LSW(size);
4379 		mcp->out_mb |= MBX_5|MBX_4;
4380 	} else {
4381 		mcp->mb[4] = LSW(size);
4382 		mcp->out_mb |= MBX_4;
4383 	}
4384 
4385 	mcp->in_mb = MBX_0;
4386 	mcp->tov = MBX_TOV_SECONDS;
4387 	mcp->flags = 0;
4388 	rval = qla2x00_mailbox_command(vha, mcp);
4389 
4390 	if (rval != QLA_SUCCESS) {
4391 		ql_dbg(ql_dbg_mbx, vha, 0x1008,
4392 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4393 	} else {
4394 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4395 		    "Done %s.\n", __func__);
4396 	}
4397 
4398 	return rval;
4399 }
4400 /* 84XX Support **************************************************************/
4401 
4402 struct cs84xx_mgmt_cmd {
4403 	union {
4404 		struct verify_chip_entry_84xx req;
4405 		struct verify_chip_rsp_84xx rsp;
4406 	} p;
4407 };
4408 
4409 int
4410 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4411 {
4412 	int rval, retry;
4413 	struct cs84xx_mgmt_cmd *mn;
4414 	dma_addr_t mn_dma;
4415 	uint16_t options;
4416 	unsigned long flags;
4417 	struct qla_hw_data *ha = vha->hw;
4418 
4419 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4420 	    "Entered %s.\n", __func__);
4421 
4422 	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4423 	if (mn == NULL) {
4424 		return QLA_MEMORY_ALLOC_FAILED;
4425 	}
4426 
4427 	/* Force Update? */
4428 	options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4429 	/* Diagnostic firmware? */
4430 	/* options |= MENLO_DIAG_FW; */
4431 	/* We update the firmware with only one data sequence. */
4432 	options |= VCO_END_OF_DATA;
4433 
4434 	do {
4435 		retry = 0;
4436 		memset(mn, 0, sizeof(*mn));
4437 		mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4438 		mn->p.req.entry_count = 1;
4439 		mn->p.req.options = cpu_to_le16(options);
4440 
4441 		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4442 		    "Dump of Verify Request.\n");
4443 		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4444 		    mn, sizeof(*mn));
4445 
4446 		rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4447 		if (rval != QLA_SUCCESS) {
4448 			ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4449 			    "Failed to issue verify IOCB (%x).\n", rval);
4450 			goto verify_done;
4451 		}
4452 
4453 		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4454 		    "Dump of Verify Response.\n");
4455 		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4456 		    mn, sizeof(*mn));
4457 
4458 		status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4459 		status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4460 		    le16_to_cpu(mn->p.rsp.failure_code) : 0;
4461 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4462 		    "cs=%x fc=%x.\n", status[0], status[1]);
4463 
4464 		if (status[0] != CS_COMPLETE) {
4465 			rval = QLA_FUNCTION_FAILED;
4466 			if (!(options & VCO_DONT_UPDATE_FW)) {
4467 				ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4468 				    "Firmware update failed. Retrying "
4469 				    "without update firmware.\n");
4470 				options |= VCO_DONT_UPDATE_FW;
4471 				options &= ~VCO_FORCE_UPDATE;
4472 				retry = 1;
4473 			}
4474 		} else {
4475 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4476 			    "Firmware updated to %x.\n",
4477 			    le32_to_cpu(mn->p.rsp.fw_ver));
4478 
4479 			/* NOTE: we only update OP firmware. */
4480 			spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4481 			ha->cs84xx->op_fw_version =
4482 			    le32_to_cpu(mn->p.rsp.fw_ver);
4483 			spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4484 			    flags);
4485 		}
4486 	} while (retry);
4487 
4488 verify_done:
4489 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4490 
4491 	if (rval != QLA_SUCCESS) {
4492 		ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4493 		    "Failed=%x.\n", rval);
4494 	} else {
4495 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4496 		    "Done %s.\n", __func__);
4497 	}
4498 
4499 	return rval;
4500 }
4501 
4502 int
4503 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4504 {
4505 	int rval;
4506 	unsigned long flags;
4507 	mbx_cmd_t mc;
4508 	mbx_cmd_t *mcp = &mc;
4509 	struct qla_hw_data *ha = vha->hw;
4510 
4511 	if (!ha->flags.fw_started)
4512 		return QLA_SUCCESS;
4513 
4514 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4515 	    "Entered %s.\n", __func__);
4516 
4517 	if (IS_SHADOW_REG_CAPABLE(ha))
4518 		req->options |= BIT_13;
4519 
4520 	mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4521 	mcp->mb[1] = req->options;
4522 	mcp->mb[2] = MSW(LSD(req->dma));
4523 	mcp->mb[3] = LSW(LSD(req->dma));
4524 	mcp->mb[6] = MSW(MSD(req->dma));
4525 	mcp->mb[7] = LSW(MSD(req->dma));
4526 	mcp->mb[5] = req->length;
4527 	if (req->rsp)
4528 		mcp->mb[10] = req->rsp->id;
4529 	mcp->mb[12] = req->qos;
4530 	mcp->mb[11] = req->vp_idx;
4531 	mcp->mb[13] = req->rid;
4532 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4533 		mcp->mb[15] = 0;
4534 
4535 	mcp->mb[4] = req->id;
4536 	/* que in ptr index */
4537 	mcp->mb[8] = 0;
4538 	/* que out ptr index */
4539 	mcp->mb[9] = *req->out_ptr = 0;
4540 	mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4541 			MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4542 	mcp->in_mb = MBX_0;
4543 	mcp->flags = MBX_DMA_OUT;
4544 	mcp->tov = MBX_TOV_SECONDS * 2;
4545 
4546 	if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4547 	    IS_QLA28XX(ha))
4548 		mcp->in_mb |= MBX_1;
4549 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4550 		mcp->out_mb |= MBX_15;
4551 		/* debug q create issue in SR-IOV */
4552 		mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4553 	}
4554 
4555 	spin_lock_irqsave(&ha->hardware_lock, flags);
4556 	if (!(req->options & BIT_0)) {
4557 		wrt_reg_dword(req->req_q_in, 0);
4558 		if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4559 			wrt_reg_dword(req->req_q_out, 0);
4560 	}
4561 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4562 
4563 	rval = qla2x00_mailbox_command(vha, mcp);
4564 	if (rval != QLA_SUCCESS) {
4565 		ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4566 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4567 	} else {
4568 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4569 		    "Done %s.\n", __func__);
4570 	}
4571 
4572 	return rval;
4573 }
4574 
4575 int
4576 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4577 {
4578 	int rval;
4579 	unsigned long flags;
4580 	mbx_cmd_t mc;
4581 	mbx_cmd_t *mcp = &mc;
4582 	struct qla_hw_data *ha = vha->hw;
4583 
4584 	if (!ha->flags.fw_started)
4585 		return QLA_SUCCESS;
4586 
4587 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4588 	    "Entered %s.\n", __func__);
4589 
4590 	if (IS_SHADOW_REG_CAPABLE(ha))
4591 		rsp->options |= BIT_13;
4592 
4593 	mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4594 	mcp->mb[1] = rsp->options;
4595 	mcp->mb[2] = MSW(LSD(rsp->dma));
4596 	mcp->mb[3] = LSW(LSD(rsp->dma));
4597 	mcp->mb[6] = MSW(MSD(rsp->dma));
4598 	mcp->mb[7] = LSW(MSD(rsp->dma));
4599 	mcp->mb[5] = rsp->length;
4600 	mcp->mb[14] = rsp->msix->entry;
4601 	mcp->mb[13] = rsp->rid;
4602 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4603 		mcp->mb[15] = 0;
4604 
4605 	mcp->mb[4] = rsp->id;
4606 	/* que in ptr index */
4607 	mcp->mb[8] = *rsp->in_ptr = 0;
4608 	/* que out ptr index */
4609 	mcp->mb[9] = 0;
4610 	mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4611 			|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4612 	mcp->in_mb = MBX_0;
4613 	mcp->flags = MBX_DMA_OUT;
4614 	mcp->tov = MBX_TOV_SECONDS * 2;
4615 
4616 	if (IS_QLA81XX(ha)) {
4617 		mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4618 		mcp->in_mb |= MBX_1;
4619 	} else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4620 		mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4621 		mcp->in_mb |= MBX_1;
4622 		/* debug q create issue in SR-IOV */
4623 		mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4624 	}
4625 
4626 	spin_lock_irqsave(&ha->hardware_lock, flags);
4627 	if (!(rsp->options & BIT_0)) {
4628 		wrt_reg_dword(rsp->rsp_q_out, 0);
4629 		if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4630 			wrt_reg_dword(rsp->rsp_q_in, 0);
4631 	}
4632 
4633 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4634 
4635 	rval = qla2x00_mailbox_command(vha, mcp);
4636 	if (rval != QLA_SUCCESS) {
4637 		ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4638 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4639 	} else {
4640 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4641 		    "Done %s.\n", __func__);
4642 	}
4643 
4644 	return rval;
4645 }
4646 
4647 int
4648 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4649 {
4650 	int rval;
4651 	mbx_cmd_t mc;
4652 	mbx_cmd_t *mcp = &mc;
4653 
4654 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4655 	    "Entered %s.\n", __func__);
4656 
4657 	mcp->mb[0] = MBC_IDC_ACK;
4658 	memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4659 	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4660 	mcp->in_mb = MBX_0;
4661 	mcp->tov = MBX_TOV_SECONDS;
4662 	mcp->flags = 0;
4663 	rval = qla2x00_mailbox_command(vha, mcp);
4664 
4665 	if (rval != QLA_SUCCESS) {
4666 		ql_dbg(ql_dbg_mbx, vha, 0x10da,
4667 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4668 	} else {
4669 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4670 		    "Done %s.\n", __func__);
4671 	}
4672 
4673 	return rval;
4674 }
4675 
4676 int
4677 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4678 {
4679 	int rval;
4680 	mbx_cmd_t mc;
4681 	mbx_cmd_t *mcp = &mc;
4682 
4683 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4684 	    "Entered %s.\n", __func__);
4685 
4686 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4687 	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4688 		return QLA_FUNCTION_FAILED;
4689 
4690 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4691 	mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4692 	mcp->out_mb = MBX_1|MBX_0;
4693 	mcp->in_mb = MBX_1|MBX_0;
4694 	mcp->tov = MBX_TOV_SECONDS;
4695 	mcp->flags = 0;
4696 	rval = qla2x00_mailbox_command(vha, mcp);
4697 
4698 	if (rval != QLA_SUCCESS) {
4699 		ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4700 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
4701 		    rval, mcp->mb[0], mcp->mb[1]);
4702 	} else {
4703 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4704 		    "Done %s.\n", __func__);
4705 		*sector_size = mcp->mb[1];
4706 	}
4707 
4708 	return rval;
4709 }
4710 
4711 int
4712 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4713 {
4714 	int rval;
4715 	mbx_cmd_t mc;
4716 	mbx_cmd_t *mcp = &mc;
4717 
4718 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4719 	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4720 		return QLA_FUNCTION_FAILED;
4721 
4722 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4723 	    "Entered %s.\n", __func__);
4724 
4725 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4726 	mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4727 	    FAC_OPT_CMD_WRITE_PROTECT;
4728 	mcp->out_mb = MBX_1|MBX_0;
4729 	mcp->in_mb = MBX_1|MBX_0;
4730 	mcp->tov = MBX_TOV_SECONDS;
4731 	mcp->flags = 0;
4732 	rval = qla2x00_mailbox_command(vha, mcp);
4733 
4734 	if (rval != QLA_SUCCESS) {
4735 		ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4736 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
4737 		    rval, mcp->mb[0], mcp->mb[1]);
4738 	} else {
4739 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4740 		    "Done %s.\n", __func__);
4741 	}
4742 
4743 	return rval;
4744 }
4745 
4746 int
4747 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4748 {
4749 	int rval;
4750 	mbx_cmd_t mc;
4751 	mbx_cmd_t *mcp = &mc;
4752 
4753 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4754 	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4755 		return QLA_FUNCTION_FAILED;
4756 
4757 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4758 	    "Entered %s.\n", __func__);
4759 
4760 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4761 	mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4762 	mcp->mb[2] = LSW(start);
4763 	mcp->mb[3] = MSW(start);
4764 	mcp->mb[4] = LSW(finish);
4765 	mcp->mb[5] = MSW(finish);
4766 	mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4767 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
4768 	mcp->tov = MBX_TOV_SECONDS;
4769 	mcp->flags = 0;
4770 	rval = qla2x00_mailbox_command(vha, mcp);
4771 
4772 	if (rval != QLA_SUCCESS) {
4773 		ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4774 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4775 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4776 	} else {
4777 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4778 		    "Done %s.\n", __func__);
4779 	}
4780 
4781 	return rval;
4782 }
4783 
4784 int
4785 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
4786 {
4787 	int rval = QLA_SUCCESS;
4788 	mbx_cmd_t mc;
4789 	mbx_cmd_t *mcp = &mc;
4790 	struct qla_hw_data *ha = vha->hw;
4791 
4792 	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4793 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4794 		return rval;
4795 
4796 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4797 	    "Entered %s.\n", __func__);
4798 
4799 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4800 	mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
4801 	    FAC_OPT_CMD_UNLOCK_SEMAPHORE);
4802 	mcp->out_mb = MBX_1|MBX_0;
4803 	mcp->in_mb = MBX_1|MBX_0;
4804 	mcp->tov = MBX_TOV_SECONDS;
4805 	mcp->flags = 0;
4806 	rval = qla2x00_mailbox_command(vha, mcp);
4807 
4808 	if (rval != QLA_SUCCESS) {
4809 		ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4810 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4811 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4812 	} else {
4813 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4814 		    "Done %s.\n", __func__);
4815 	}
4816 
4817 	return rval;
4818 }
4819 
4820 int
4821 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4822 {
4823 	int rval = 0;
4824 	mbx_cmd_t mc;
4825 	mbx_cmd_t *mcp = &mc;
4826 
4827 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4828 	    "Entered %s.\n", __func__);
4829 
4830 	mcp->mb[0] = MBC_RESTART_MPI_FW;
4831 	mcp->out_mb = MBX_0;
4832 	mcp->in_mb = MBX_0|MBX_1;
4833 	mcp->tov = MBX_TOV_SECONDS;
4834 	mcp->flags = 0;
4835 	rval = qla2x00_mailbox_command(vha, mcp);
4836 
4837 	if (rval != QLA_SUCCESS) {
4838 		ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4839 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
4840 		    rval, mcp->mb[0], mcp->mb[1]);
4841 	} else {
4842 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4843 		    "Done %s.\n", __func__);
4844 	}
4845 
4846 	return rval;
4847 }
4848 
4849 int
4850 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4851 {
4852 	int rval;
4853 	mbx_cmd_t mc;
4854 	mbx_cmd_t *mcp = &mc;
4855 	int i;
4856 	int len;
4857 	__le16 *str;
4858 	struct qla_hw_data *ha = vha->hw;
4859 
4860 	if (!IS_P3P_TYPE(ha))
4861 		return QLA_FUNCTION_FAILED;
4862 
4863 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4864 	    "Entered %s.\n", __func__);
4865 
4866 	str = (__force __le16 *)version;
4867 	len = strlen(version);
4868 
4869 	mcp->mb[0] = MBC_SET_RNID_PARAMS;
4870 	mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4871 	mcp->out_mb = MBX_1|MBX_0;
4872 	for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4873 		mcp->mb[i] = le16_to_cpup(str);
4874 		mcp->out_mb |= 1<<i;
4875 	}
4876 	for (; i < 16; i++) {
4877 		mcp->mb[i] = 0;
4878 		mcp->out_mb |= 1<<i;
4879 	}
4880 	mcp->in_mb = MBX_1|MBX_0;
4881 	mcp->tov = MBX_TOV_SECONDS;
4882 	mcp->flags = 0;
4883 	rval = qla2x00_mailbox_command(vha, mcp);
4884 
4885 	if (rval != QLA_SUCCESS) {
4886 		ql_dbg(ql_dbg_mbx, vha, 0x117c,
4887 		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4888 	} else {
4889 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4890 		    "Done %s.\n", __func__);
4891 	}
4892 
4893 	return rval;
4894 }
4895 
4896 int
4897 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4898 {
4899 	int rval;
4900 	mbx_cmd_t mc;
4901 	mbx_cmd_t *mcp = &mc;
4902 	int len;
4903 	uint16_t dwlen;
4904 	uint8_t *str;
4905 	dma_addr_t str_dma;
4906 	struct qla_hw_data *ha = vha->hw;
4907 
4908 	if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4909 	    IS_P3P_TYPE(ha))
4910 		return QLA_FUNCTION_FAILED;
4911 
4912 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4913 	    "Entered %s.\n", __func__);
4914 
4915 	str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4916 	if (!str) {
4917 		ql_log(ql_log_warn, vha, 0x117f,
4918 		    "Failed to allocate driver version param.\n");
4919 		return QLA_MEMORY_ALLOC_FAILED;
4920 	}
4921 
4922 	memcpy(str, "\x7\x3\x11\x0", 4);
4923 	dwlen = str[0];
4924 	len = dwlen * 4 - 4;
4925 	memset(str + 4, 0, len);
4926 	if (len > strlen(version))
4927 		len = strlen(version);
4928 	memcpy(str + 4, version, len);
4929 
4930 	mcp->mb[0] = MBC_SET_RNID_PARAMS;
4931 	mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4932 	mcp->mb[2] = MSW(LSD(str_dma));
4933 	mcp->mb[3] = LSW(LSD(str_dma));
4934 	mcp->mb[6] = MSW(MSD(str_dma));
4935 	mcp->mb[7] = LSW(MSD(str_dma));
4936 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4937 	mcp->in_mb = MBX_1|MBX_0;
4938 	mcp->tov = MBX_TOV_SECONDS;
4939 	mcp->flags = 0;
4940 	rval = qla2x00_mailbox_command(vha, mcp);
4941 
4942 	if (rval != QLA_SUCCESS) {
4943 		ql_dbg(ql_dbg_mbx, vha, 0x1180,
4944 		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4945 	} else {
4946 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4947 		    "Done %s.\n", __func__);
4948 	}
4949 
4950 	dma_pool_free(ha->s_dma_pool, str, str_dma);
4951 
4952 	return rval;
4953 }
4954 
4955 int
4956 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4957 			     void *buf, uint16_t bufsiz)
4958 {
4959 	int rval, i;
4960 	mbx_cmd_t mc;
4961 	mbx_cmd_t *mcp = &mc;
4962 	uint32_t	*bp;
4963 
4964 	if (!IS_FWI2_CAPABLE(vha->hw))
4965 		return QLA_FUNCTION_FAILED;
4966 
4967 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4968 	    "Entered %s.\n", __func__);
4969 
4970 	mcp->mb[0] = MBC_GET_RNID_PARAMS;
4971 	mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4972 	mcp->mb[2] = MSW(buf_dma);
4973 	mcp->mb[3] = LSW(buf_dma);
4974 	mcp->mb[6] = MSW(MSD(buf_dma));
4975 	mcp->mb[7] = LSW(MSD(buf_dma));
4976 	mcp->mb[8] = bufsiz/4;
4977 	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4978 	mcp->in_mb = MBX_1|MBX_0;
4979 	mcp->tov = MBX_TOV_SECONDS;
4980 	mcp->flags = 0;
4981 	rval = qla2x00_mailbox_command(vha, mcp);
4982 
4983 	if (rval != QLA_SUCCESS) {
4984 		ql_dbg(ql_dbg_mbx, vha, 0x115a,
4985 		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4986 	} else {
4987 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4988 		    "Done %s.\n", __func__);
4989 		bp = (uint32_t *) buf;
4990 		for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4991 			*bp = le32_to_cpu((__force __le32)*bp);
4992 	}
4993 
4994 	return rval;
4995 }
4996 
4997 #define PUREX_CMD_COUNT	4
4998 int
4999 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
5000 {
5001 	int rval;
5002 	mbx_cmd_t mc;
5003 	mbx_cmd_t *mcp = &mc;
5004 	uint8_t *els_cmd_map;
5005 	uint8_t active_cnt = 0;
5006 	dma_addr_t els_cmd_map_dma;
5007 	uint8_t cmd_opcode[PUREX_CMD_COUNT];
5008 	uint8_t i, index, purex_bit;
5009 	struct qla_hw_data *ha = vha->hw;
5010 
5011 	if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) &&
5012 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5013 		return QLA_SUCCESS;
5014 
5015 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197,
5016 	    "Entered %s.\n", __func__);
5017 
5018 	els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5019 	    &els_cmd_map_dma, GFP_KERNEL);
5020 	if (!els_cmd_map) {
5021 		ql_log(ql_log_warn, vha, 0x7101,
5022 		    "Failed to allocate RDP els command param.\n");
5023 		return QLA_MEMORY_ALLOC_FAILED;
5024 	}
5025 
5026 	/* List of Purex ELS */
5027 	if (ql2xrdpenable) {
5028 		cmd_opcode[active_cnt] = ELS_RDP;
5029 		active_cnt++;
5030 	}
5031 	if (ha->flags.scm_supported_f) {
5032 		cmd_opcode[active_cnt] = ELS_FPIN;
5033 		active_cnt++;
5034 	}
5035 	if (ha->flags.edif_enabled) {
5036 		cmd_opcode[active_cnt] = ELS_AUTH_ELS;
5037 		active_cnt++;
5038 	}
5039 
5040 	for (i = 0; i < active_cnt; i++) {
5041 		index = cmd_opcode[i] / 8;
5042 		purex_bit = cmd_opcode[i] % 8;
5043 		els_cmd_map[index] |= 1 << purex_bit;
5044 	}
5045 
5046 	mcp->mb[0] = MBC_SET_RNID_PARAMS;
5047 	mcp->mb[1] = RNID_TYPE_ELS_CMD << 8;
5048 	mcp->mb[2] = MSW(LSD(els_cmd_map_dma));
5049 	mcp->mb[3] = LSW(LSD(els_cmd_map_dma));
5050 	mcp->mb[6] = MSW(MSD(els_cmd_map_dma));
5051 	mcp->mb[7] = LSW(MSD(els_cmd_map_dma));
5052 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5053 	mcp->in_mb = MBX_1|MBX_0;
5054 	mcp->tov = MBX_TOV_SECONDS;
5055 	mcp->flags = MBX_DMA_OUT;
5056 	mcp->buf_size = ELS_CMD_MAP_SIZE;
5057 	rval = qla2x00_mailbox_command(vha, mcp);
5058 
5059 	if (rval != QLA_SUCCESS) {
5060 		ql_dbg(ql_dbg_mbx, vha, 0x118d,
5061 		    "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]);
5062 	} else {
5063 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
5064 		    "Done %s.\n", __func__);
5065 	}
5066 
5067 	dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5068 	   els_cmd_map, els_cmd_map_dma);
5069 
5070 	return rval;
5071 }
5072 
5073 static int
5074 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
5075 {
5076 	int rval;
5077 	mbx_cmd_t mc;
5078 	mbx_cmd_t *mcp = &mc;
5079 
5080 	if (!IS_FWI2_CAPABLE(vha->hw))
5081 		return QLA_FUNCTION_FAILED;
5082 
5083 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
5084 	    "Entered %s.\n", __func__);
5085 
5086 	mcp->mb[0] = MBC_GET_RNID_PARAMS;
5087 	mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
5088 	mcp->out_mb = MBX_1|MBX_0;
5089 	mcp->in_mb = MBX_1|MBX_0;
5090 	mcp->tov = MBX_TOV_SECONDS;
5091 	mcp->flags = 0;
5092 	rval = qla2x00_mailbox_command(vha, mcp);
5093 	*temp = mcp->mb[1];
5094 
5095 	if (rval != QLA_SUCCESS) {
5096 		ql_dbg(ql_dbg_mbx, vha, 0x115a,
5097 		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
5098 	} else {
5099 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
5100 		    "Done %s.\n", __func__);
5101 	}
5102 
5103 	return rval;
5104 }
5105 
5106 int
5107 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5108 	uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5109 {
5110 	int rval;
5111 	mbx_cmd_t mc;
5112 	mbx_cmd_t *mcp = &mc;
5113 	struct qla_hw_data *ha = vha->hw;
5114 
5115 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
5116 	    "Entered %s.\n", __func__);
5117 
5118 	if (!IS_FWI2_CAPABLE(ha))
5119 		return QLA_FUNCTION_FAILED;
5120 
5121 	if (len == 1)
5122 		opt |= BIT_0;
5123 
5124 	mcp->mb[0] = MBC_READ_SFP;
5125 	mcp->mb[1] = dev;
5126 	mcp->mb[2] = MSW(LSD(sfp_dma));
5127 	mcp->mb[3] = LSW(LSD(sfp_dma));
5128 	mcp->mb[6] = MSW(MSD(sfp_dma));
5129 	mcp->mb[7] = LSW(MSD(sfp_dma));
5130 	mcp->mb[8] = len;
5131 	mcp->mb[9] = off;
5132 	mcp->mb[10] = opt;
5133 	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5134 	mcp->in_mb = MBX_1|MBX_0;
5135 	mcp->tov = MBX_TOV_SECONDS;
5136 	mcp->flags = 0;
5137 	rval = qla2x00_mailbox_command(vha, mcp);
5138 
5139 	if (opt & BIT_0)
5140 		*sfp = mcp->mb[1];
5141 
5142 	if (rval != QLA_SUCCESS) {
5143 		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
5144 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5145 		if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
5146 			/* sfp is not there */
5147 			rval = QLA_INTERFACE_ERROR;
5148 		}
5149 	} else {
5150 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
5151 		    "Done %s.\n", __func__);
5152 	}
5153 
5154 	return rval;
5155 }
5156 
5157 int
5158 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5159 	uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5160 {
5161 	int rval;
5162 	mbx_cmd_t mc;
5163 	mbx_cmd_t *mcp = &mc;
5164 	struct qla_hw_data *ha = vha->hw;
5165 
5166 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
5167 	    "Entered %s.\n", __func__);
5168 
5169 	if (!IS_FWI2_CAPABLE(ha))
5170 		return QLA_FUNCTION_FAILED;
5171 
5172 	if (len == 1)
5173 		opt |= BIT_0;
5174 
5175 	if (opt & BIT_0)
5176 		len = *sfp;
5177 
5178 	mcp->mb[0] = MBC_WRITE_SFP;
5179 	mcp->mb[1] = dev;
5180 	mcp->mb[2] = MSW(LSD(sfp_dma));
5181 	mcp->mb[3] = LSW(LSD(sfp_dma));
5182 	mcp->mb[6] = MSW(MSD(sfp_dma));
5183 	mcp->mb[7] = LSW(MSD(sfp_dma));
5184 	mcp->mb[8] = len;
5185 	mcp->mb[9] = off;
5186 	mcp->mb[10] = opt;
5187 	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5188 	mcp->in_mb = MBX_1|MBX_0;
5189 	mcp->tov = MBX_TOV_SECONDS;
5190 	mcp->flags = 0;
5191 	rval = qla2x00_mailbox_command(vha, mcp);
5192 
5193 	if (rval != QLA_SUCCESS) {
5194 		ql_dbg(ql_dbg_mbx, vha, 0x10ec,
5195 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5196 	} else {
5197 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
5198 		    "Done %s.\n", __func__);
5199 	}
5200 
5201 	return rval;
5202 }
5203 
5204 int
5205 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
5206     uint16_t size_in_bytes, uint16_t *actual_size)
5207 {
5208 	int rval;
5209 	mbx_cmd_t mc;
5210 	mbx_cmd_t *mcp = &mc;
5211 
5212 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
5213 	    "Entered %s.\n", __func__);
5214 
5215 	if (!IS_CNA_CAPABLE(vha->hw))
5216 		return QLA_FUNCTION_FAILED;
5217 
5218 	mcp->mb[0] = MBC_GET_XGMAC_STATS;
5219 	mcp->mb[2] = MSW(stats_dma);
5220 	mcp->mb[3] = LSW(stats_dma);
5221 	mcp->mb[6] = MSW(MSD(stats_dma));
5222 	mcp->mb[7] = LSW(MSD(stats_dma));
5223 	mcp->mb[8] = size_in_bytes >> 2;
5224 	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
5225 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
5226 	mcp->tov = MBX_TOV_SECONDS;
5227 	mcp->flags = 0;
5228 	rval = qla2x00_mailbox_command(vha, mcp);
5229 
5230 	if (rval != QLA_SUCCESS) {
5231 		ql_dbg(ql_dbg_mbx, vha, 0x10ef,
5232 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5233 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5234 	} else {
5235 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
5236 		    "Done %s.\n", __func__);
5237 
5238 
5239 		*actual_size = mcp->mb[2] << 2;
5240 	}
5241 
5242 	return rval;
5243 }
5244 
5245 int
5246 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
5247     uint16_t size)
5248 {
5249 	int rval;
5250 	mbx_cmd_t mc;
5251 	mbx_cmd_t *mcp = &mc;
5252 
5253 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
5254 	    "Entered %s.\n", __func__);
5255 
5256 	if (!IS_CNA_CAPABLE(vha->hw))
5257 		return QLA_FUNCTION_FAILED;
5258 
5259 	mcp->mb[0] = MBC_GET_DCBX_PARAMS;
5260 	mcp->mb[1] = 0;
5261 	mcp->mb[2] = MSW(tlv_dma);
5262 	mcp->mb[3] = LSW(tlv_dma);
5263 	mcp->mb[6] = MSW(MSD(tlv_dma));
5264 	mcp->mb[7] = LSW(MSD(tlv_dma));
5265 	mcp->mb[8] = size;
5266 	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5267 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
5268 	mcp->tov = MBX_TOV_SECONDS;
5269 	mcp->flags = 0;
5270 	rval = qla2x00_mailbox_command(vha, mcp);
5271 
5272 	if (rval != QLA_SUCCESS) {
5273 		ql_dbg(ql_dbg_mbx, vha, 0x10f2,
5274 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5275 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5276 	} else {
5277 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
5278 		    "Done %s.\n", __func__);
5279 	}
5280 
5281 	return rval;
5282 }
5283 
5284 int
5285 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
5286 {
5287 	int rval;
5288 	mbx_cmd_t mc;
5289 	mbx_cmd_t *mcp = &mc;
5290 
5291 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
5292 	    "Entered %s.\n", __func__);
5293 
5294 	if (!IS_FWI2_CAPABLE(vha->hw))
5295 		return QLA_FUNCTION_FAILED;
5296 
5297 	mcp->mb[0] = MBC_READ_RAM_EXTENDED;
5298 	mcp->mb[1] = LSW(risc_addr);
5299 	mcp->mb[8] = MSW(risc_addr);
5300 	mcp->out_mb = MBX_8|MBX_1|MBX_0;
5301 	mcp->in_mb = MBX_3|MBX_2|MBX_0;
5302 	mcp->tov = MBX_TOV_SECONDS;
5303 	mcp->flags = 0;
5304 	rval = qla2x00_mailbox_command(vha, mcp);
5305 	if (rval != QLA_SUCCESS) {
5306 		ql_dbg(ql_dbg_mbx, vha, 0x10f5,
5307 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5308 	} else {
5309 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
5310 		    "Done %s.\n", __func__);
5311 		*data = mcp->mb[3] << 16 | mcp->mb[2];
5312 	}
5313 
5314 	return rval;
5315 }
5316 
5317 int
5318 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5319 	uint16_t *mresp)
5320 {
5321 	int rval;
5322 	mbx_cmd_t mc;
5323 	mbx_cmd_t *mcp = &mc;
5324 
5325 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5326 	    "Entered %s.\n", __func__);
5327 
5328 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5329 	mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5330 	mcp->mb[1] = mreq->options | BIT_6;	// BIT_6 specifies 64 bit addressing
5331 
5332 	/* transfer count */
5333 	mcp->mb[10] = LSW(mreq->transfer_size);
5334 	mcp->mb[11] = MSW(mreq->transfer_size);
5335 
5336 	/* send data address */
5337 	mcp->mb[14] = LSW(mreq->send_dma);
5338 	mcp->mb[15] = MSW(mreq->send_dma);
5339 	mcp->mb[20] = LSW(MSD(mreq->send_dma));
5340 	mcp->mb[21] = MSW(MSD(mreq->send_dma));
5341 
5342 	/* receive data address */
5343 	mcp->mb[16] = LSW(mreq->rcv_dma);
5344 	mcp->mb[17] = MSW(mreq->rcv_dma);
5345 	mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5346 	mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5347 
5348 	/* Iteration count */
5349 	mcp->mb[18] = LSW(mreq->iteration_count);
5350 	mcp->mb[19] = MSW(mreq->iteration_count);
5351 
5352 	mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5353 	    MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5354 	if (IS_CNA_CAPABLE(vha->hw))
5355 		mcp->out_mb |= MBX_2;
5356 	mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5357 
5358 	mcp->buf_size = mreq->transfer_size;
5359 	mcp->tov = MBX_TOV_SECONDS;
5360 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5361 
5362 	rval = qla2x00_mailbox_command(vha, mcp);
5363 
5364 	if (rval != QLA_SUCCESS) {
5365 		ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5366 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5367 		    "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5368 		    mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5369 	} else {
5370 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5371 		    "Done %s.\n", __func__);
5372 	}
5373 
5374 	/* Copy mailbox information */
5375 	memcpy( mresp, mcp->mb, 64);
5376 	return rval;
5377 }
5378 
5379 int
5380 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5381 	uint16_t *mresp)
5382 {
5383 	int rval;
5384 	mbx_cmd_t mc;
5385 	mbx_cmd_t *mcp = &mc;
5386 	struct qla_hw_data *ha = vha->hw;
5387 
5388 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5389 	    "Entered %s.\n", __func__);
5390 
5391 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5392 	mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5393 	/* BIT_6 specifies 64bit address */
5394 	mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5395 	if (IS_CNA_CAPABLE(ha)) {
5396 		mcp->mb[2] = vha->fcoe_fcf_idx;
5397 	}
5398 	mcp->mb[16] = LSW(mreq->rcv_dma);
5399 	mcp->mb[17] = MSW(mreq->rcv_dma);
5400 	mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5401 	mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5402 
5403 	mcp->mb[10] = LSW(mreq->transfer_size);
5404 
5405 	mcp->mb[14] = LSW(mreq->send_dma);
5406 	mcp->mb[15] = MSW(mreq->send_dma);
5407 	mcp->mb[20] = LSW(MSD(mreq->send_dma));
5408 	mcp->mb[21] = MSW(MSD(mreq->send_dma));
5409 
5410 	mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5411 	    MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5412 	if (IS_CNA_CAPABLE(ha))
5413 		mcp->out_mb |= MBX_2;
5414 
5415 	mcp->in_mb = MBX_0;
5416 	if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5417 	    IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5418 		mcp->in_mb |= MBX_1;
5419 	if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
5420 	    IS_QLA28XX(ha))
5421 		mcp->in_mb |= MBX_3;
5422 
5423 	mcp->tov = MBX_TOV_SECONDS;
5424 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5425 	mcp->buf_size = mreq->transfer_size;
5426 
5427 	rval = qla2x00_mailbox_command(vha, mcp);
5428 
5429 	if (rval != QLA_SUCCESS) {
5430 		ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5431 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
5432 		    rval, mcp->mb[0], mcp->mb[1]);
5433 	} else {
5434 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5435 		    "Done %s.\n", __func__);
5436 	}
5437 
5438 	/* Copy mailbox information */
5439 	memcpy(mresp, mcp->mb, 64);
5440 	return rval;
5441 }
5442 
5443 int
5444 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5445 {
5446 	int rval;
5447 	mbx_cmd_t mc;
5448 	mbx_cmd_t *mcp = &mc;
5449 
5450 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5451 	    "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5452 
5453 	mcp->mb[0] = MBC_ISP84XX_RESET;
5454 	mcp->mb[1] = enable_diagnostic;
5455 	mcp->out_mb = MBX_1|MBX_0;
5456 	mcp->in_mb = MBX_1|MBX_0;
5457 	mcp->tov = MBX_TOV_SECONDS;
5458 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5459 	rval = qla2x00_mailbox_command(vha, mcp);
5460 
5461 	if (rval != QLA_SUCCESS)
5462 		ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5463 	else
5464 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5465 		    "Done %s.\n", __func__);
5466 
5467 	return rval;
5468 }
5469 
5470 int
5471 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5472 {
5473 	int rval;
5474 	mbx_cmd_t mc;
5475 	mbx_cmd_t *mcp = &mc;
5476 
5477 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5478 	    "Entered %s.\n", __func__);
5479 
5480 	if (!IS_FWI2_CAPABLE(vha->hw))
5481 		return QLA_FUNCTION_FAILED;
5482 
5483 	mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5484 	mcp->mb[1] = LSW(risc_addr);
5485 	mcp->mb[2] = LSW(data);
5486 	mcp->mb[3] = MSW(data);
5487 	mcp->mb[8] = MSW(risc_addr);
5488 	mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5489 	mcp->in_mb = MBX_1|MBX_0;
5490 	mcp->tov = MBX_TOV_SECONDS;
5491 	mcp->flags = 0;
5492 	rval = qla2x00_mailbox_command(vha, mcp);
5493 	if (rval != QLA_SUCCESS) {
5494 		ql_dbg(ql_dbg_mbx, vha, 0x1101,
5495 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
5496 		    rval, mcp->mb[0], mcp->mb[1]);
5497 	} else {
5498 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5499 		    "Done %s.\n", __func__);
5500 	}
5501 
5502 	return rval;
5503 }
5504 
5505 int
5506 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5507 {
5508 	int rval;
5509 	uint32_t stat, timer;
5510 	uint16_t mb0 = 0;
5511 	struct qla_hw_data *ha = vha->hw;
5512 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5513 
5514 	rval = QLA_SUCCESS;
5515 
5516 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5517 	    "Entered %s.\n", __func__);
5518 
5519 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5520 
5521 	/* Write the MBC data to the registers */
5522 	wrt_reg_word(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
5523 	wrt_reg_word(&reg->mailbox1, mb[0]);
5524 	wrt_reg_word(&reg->mailbox2, mb[1]);
5525 	wrt_reg_word(&reg->mailbox3, mb[2]);
5526 	wrt_reg_word(&reg->mailbox4, mb[3]);
5527 
5528 	wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
5529 
5530 	/* Poll for MBC interrupt */
5531 	for (timer = 6000000; timer; timer--) {
5532 		/* Check for pending interrupts. */
5533 		stat = rd_reg_dword(&reg->host_status);
5534 		if (stat & HSRX_RISC_INT) {
5535 			stat &= 0xff;
5536 
5537 			if (stat == 0x1 || stat == 0x2 ||
5538 			    stat == 0x10 || stat == 0x11) {
5539 				set_bit(MBX_INTERRUPT,
5540 				    &ha->mbx_cmd_flags);
5541 				mb0 = rd_reg_word(&reg->mailbox0);
5542 				wrt_reg_dword(&reg->hccr,
5543 				    HCCRX_CLR_RISC_INT);
5544 				rd_reg_dword(&reg->hccr);
5545 				break;
5546 			}
5547 		}
5548 		udelay(5);
5549 	}
5550 
5551 	if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5552 		rval = mb0 & MBS_MASK;
5553 	else
5554 		rval = QLA_FUNCTION_FAILED;
5555 
5556 	if (rval != QLA_SUCCESS) {
5557 		ql_dbg(ql_dbg_mbx, vha, 0x1104,
5558 		    "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5559 	} else {
5560 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5561 		    "Done %s.\n", __func__);
5562 	}
5563 
5564 	return rval;
5565 }
5566 
5567 /* Set the specified data rate */
5568 int
5569 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
5570 {
5571 	int rval;
5572 	mbx_cmd_t mc;
5573 	mbx_cmd_t *mcp = &mc;
5574 	struct qla_hw_data *ha = vha->hw;
5575 	uint16_t val;
5576 
5577 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5578 	    "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5579 	    mode);
5580 
5581 	if (!IS_FWI2_CAPABLE(ha))
5582 		return QLA_FUNCTION_FAILED;
5583 
5584 	memset(mcp, 0, sizeof(*mcp));
5585 	switch (ha->set_data_rate) {
5586 	case PORT_SPEED_AUTO:
5587 	case PORT_SPEED_4GB:
5588 	case PORT_SPEED_8GB:
5589 	case PORT_SPEED_16GB:
5590 	case PORT_SPEED_32GB:
5591 		val = ha->set_data_rate;
5592 		break;
5593 	default:
5594 		ql_log(ql_log_warn, vha, 0x1199,
5595 		    "Unrecognized speed setting:%d. Setting Autoneg\n",
5596 		    ha->set_data_rate);
5597 		val = ha->set_data_rate = PORT_SPEED_AUTO;
5598 		break;
5599 	}
5600 
5601 	mcp->mb[0] = MBC_DATA_RATE;
5602 	mcp->mb[1] = mode;
5603 	mcp->mb[2] = val;
5604 
5605 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
5606 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
5607 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5608 		mcp->in_mb |= MBX_4|MBX_3;
5609 	mcp->tov = MBX_TOV_SECONDS;
5610 	mcp->flags = 0;
5611 	rval = qla2x00_mailbox_command(vha, mcp);
5612 	if (rval != QLA_SUCCESS) {
5613 		ql_dbg(ql_dbg_mbx, vha, 0x1107,
5614 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5615 	} else {
5616 		if (mcp->mb[1] != 0x7)
5617 			ql_dbg(ql_dbg_mbx, vha, 0x1179,
5618 				"Speed set:0x%x\n", mcp->mb[1]);
5619 
5620 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5621 		    "Done %s.\n", __func__);
5622 	}
5623 
5624 	return rval;
5625 }
5626 
5627 int
5628 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5629 {
5630 	int rval;
5631 	mbx_cmd_t mc;
5632 	mbx_cmd_t *mcp = &mc;
5633 	struct qla_hw_data *ha = vha->hw;
5634 
5635 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5636 	    "Entered %s.\n", __func__);
5637 
5638 	if (!IS_FWI2_CAPABLE(ha))
5639 		return QLA_FUNCTION_FAILED;
5640 
5641 	mcp->mb[0] = MBC_DATA_RATE;
5642 	mcp->mb[1] = QLA_GET_DATA_RATE;
5643 	mcp->out_mb = MBX_1|MBX_0;
5644 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
5645 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5646 		mcp->in_mb |= MBX_4|MBX_3;
5647 	mcp->tov = MBX_TOV_SECONDS;
5648 	mcp->flags = 0;
5649 	rval = qla2x00_mailbox_command(vha, mcp);
5650 	if (rval != QLA_SUCCESS) {
5651 		ql_dbg(ql_dbg_mbx, vha, 0x1107,
5652 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5653 	} else {
5654 		if (mcp->mb[1] != 0x7)
5655 			ha->link_data_rate = mcp->mb[1];
5656 
5657 		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
5658 			if (mcp->mb[4] & BIT_0)
5659 				ql_log(ql_log_info, vha, 0x11a2,
5660 				    "FEC=enabled (data rate).\n");
5661 		}
5662 
5663 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5664 		    "Done %s.\n", __func__);
5665 		if (mcp->mb[1] != 0x7)
5666 			ha->link_data_rate = mcp->mb[1];
5667 	}
5668 
5669 	return rval;
5670 }
5671 
5672 int
5673 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5674 {
5675 	int rval;
5676 	mbx_cmd_t mc;
5677 	mbx_cmd_t *mcp = &mc;
5678 	struct qla_hw_data *ha = vha->hw;
5679 
5680 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5681 	    "Entered %s.\n", __func__);
5682 
5683 	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5684 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5685 		return QLA_FUNCTION_FAILED;
5686 	mcp->mb[0] = MBC_GET_PORT_CONFIG;
5687 	mcp->out_mb = MBX_0;
5688 	mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5689 	mcp->tov = MBX_TOV_SECONDS;
5690 	mcp->flags = 0;
5691 
5692 	rval = qla2x00_mailbox_command(vha, mcp);
5693 
5694 	if (rval != QLA_SUCCESS) {
5695 		ql_dbg(ql_dbg_mbx, vha, 0x110a,
5696 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5697 	} else {
5698 		/* Copy all bits to preserve original value */
5699 		memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5700 
5701 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5702 		    "Done %s.\n", __func__);
5703 	}
5704 	return rval;
5705 }
5706 
5707 int
5708 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5709 {
5710 	int rval;
5711 	mbx_cmd_t mc;
5712 	mbx_cmd_t *mcp = &mc;
5713 
5714 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5715 	    "Entered %s.\n", __func__);
5716 
5717 	mcp->mb[0] = MBC_SET_PORT_CONFIG;
5718 	/* Copy all bits to preserve original setting */
5719 	memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5720 	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5721 	mcp->in_mb = MBX_0;
5722 	mcp->tov = MBX_TOV_SECONDS;
5723 	mcp->flags = 0;
5724 	rval = qla2x00_mailbox_command(vha, mcp);
5725 
5726 	if (rval != QLA_SUCCESS) {
5727 		ql_dbg(ql_dbg_mbx, vha, 0x110d,
5728 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5729 	} else
5730 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5731 		    "Done %s.\n", __func__);
5732 
5733 	return rval;
5734 }
5735 
5736 
5737 int
5738 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5739 		uint16_t *mb)
5740 {
5741 	int rval;
5742 	mbx_cmd_t mc;
5743 	mbx_cmd_t *mcp = &mc;
5744 	struct qla_hw_data *ha = vha->hw;
5745 
5746 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5747 	    "Entered %s.\n", __func__);
5748 
5749 	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5750 		return QLA_FUNCTION_FAILED;
5751 
5752 	mcp->mb[0] = MBC_PORT_PARAMS;
5753 	mcp->mb[1] = loop_id;
5754 	if (ha->flags.fcp_prio_enabled)
5755 		mcp->mb[2] = BIT_1;
5756 	else
5757 		mcp->mb[2] = BIT_2;
5758 	mcp->mb[4] = priority & 0xf;
5759 	mcp->mb[9] = vha->vp_idx;
5760 	mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5761 	mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5762 	mcp->tov = MBX_TOV_SECONDS;
5763 	mcp->flags = 0;
5764 	rval = qla2x00_mailbox_command(vha, mcp);
5765 	if (mb != NULL) {
5766 		mb[0] = mcp->mb[0];
5767 		mb[1] = mcp->mb[1];
5768 		mb[3] = mcp->mb[3];
5769 		mb[4] = mcp->mb[4];
5770 	}
5771 
5772 	if (rval != QLA_SUCCESS) {
5773 		ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5774 	} else {
5775 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5776 		    "Done %s.\n", __func__);
5777 	}
5778 
5779 	return rval;
5780 }
5781 
5782 int
5783 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5784 {
5785 	int rval = QLA_FUNCTION_FAILED;
5786 	struct qla_hw_data *ha = vha->hw;
5787 	uint8_t byte;
5788 
5789 	if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5790 		ql_dbg(ql_dbg_mbx, vha, 0x1150,
5791 		    "Thermal not supported by this card.\n");
5792 		return rval;
5793 	}
5794 
5795 	if (IS_QLA25XX(ha)) {
5796 		if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5797 		    ha->pdev->subsystem_device == 0x0175) {
5798 			rval = qla2x00_read_sfp(vha, 0, &byte,
5799 			    0x98, 0x1, 1, BIT_13|BIT_0);
5800 			*temp = byte;
5801 			return rval;
5802 		}
5803 		if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5804 		    ha->pdev->subsystem_device == 0x338e) {
5805 			rval = qla2x00_read_sfp(vha, 0, &byte,
5806 			    0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5807 			*temp = byte;
5808 			return rval;
5809 		}
5810 		ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5811 		    "Thermal not supported by this card.\n");
5812 		return rval;
5813 	}
5814 
5815 	if (IS_QLA82XX(ha)) {
5816 		*temp = qla82xx_read_temperature(vha);
5817 		rval = QLA_SUCCESS;
5818 		return rval;
5819 	} else if (IS_QLA8044(ha)) {
5820 		*temp = qla8044_read_temperature(vha);
5821 		rval = QLA_SUCCESS;
5822 		return rval;
5823 	}
5824 
5825 	rval = qla2x00_read_asic_temperature(vha, temp);
5826 	return rval;
5827 }
5828 
5829 int
5830 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5831 {
5832 	int rval;
5833 	struct qla_hw_data *ha = vha->hw;
5834 	mbx_cmd_t mc;
5835 	mbx_cmd_t *mcp = &mc;
5836 
5837 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5838 	    "Entered %s.\n", __func__);
5839 
5840 	if (!IS_FWI2_CAPABLE(ha))
5841 		return QLA_FUNCTION_FAILED;
5842 
5843 	memset(mcp, 0, sizeof(mbx_cmd_t));
5844 	mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5845 	mcp->mb[1] = 1;
5846 
5847 	mcp->out_mb = MBX_1|MBX_0;
5848 	mcp->in_mb = MBX_0;
5849 	mcp->tov = MBX_TOV_SECONDS;
5850 	mcp->flags = 0;
5851 
5852 	rval = qla2x00_mailbox_command(vha, mcp);
5853 	if (rval != QLA_SUCCESS) {
5854 		ql_dbg(ql_dbg_mbx, vha, 0x1016,
5855 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5856 	} else {
5857 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5858 		    "Done %s.\n", __func__);
5859 	}
5860 
5861 	return rval;
5862 }
5863 
5864 int
5865 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5866 {
5867 	int rval;
5868 	struct qla_hw_data *ha = vha->hw;
5869 	mbx_cmd_t mc;
5870 	mbx_cmd_t *mcp = &mc;
5871 
5872 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5873 	    "Entered %s.\n", __func__);
5874 
5875 	if (!IS_P3P_TYPE(ha))
5876 		return QLA_FUNCTION_FAILED;
5877 
5878 	memset(mcp, 0, sizeof(mbx_cmd_t));
5879 	mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5880 	mcp->mb[1] = 0;
5881 
5882 	mcp->out_mb = MBX_1|MBX_0;
5883 	mcp->in_mb = MBX_0;
5884 	mcp->tov = MBX_TOV_SECONDS;
5885 	mcp->flags = 0;
5886 
5887 	rval = qla2x00_mailbox_command(vha, mcp);
5888 	if (rval != QLA_SUCCESS) {
5889 		ql_dbg(ql_dbg_mbx, vha, 0x100c,
5890 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5891 	} else {
5892 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5893 		    "Done %s.\n", __func__);
5894 	}
5895 
5896 	return rval;
5897 }
5898 
5899 int
5900 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5901 {
5902 	struct qla_hw_data *ha = vha->hw;
5903 	mbx_cmd_t mc;
5904 	mbx_cmd_t *mcp = &mc;
5905 	int rval = QLA_FUNCTION_FAILED;
5906 
5907 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5908 	    "Entered %s.\n", __func__);
5909 
5910 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5911 	mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5912 	mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5913 	mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5914 	mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5915 
5916 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5917 	mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5918 	    MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5919 
5920 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5921 	mcp->tov = MBX_TOV_SECONDS;
5922 	rval = qla2x00_mailbox_command(vha, mcp);
5923 
5924 	/* Always copy back return mailbox values. */
5925 	if (rval != QLA_SUCCESS) {
5926 		ql_dbg(ql_dbg_mbx, vha, 0x1120,
5927 		    "mailbox command FAILED=0x%x, subcode=%x.\n",
5928 		    (mcp->mb[1] << 16) | mcp->mb[0],
5929 		    (mcp->mb[3] << 16) | mcp->mb[2]);
5930 	} else {
5931 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5932 		    "Done %s.\n", __func__);
5933 		ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5934 		if (!ha->md_template_size) {
5935 			ql_dbg(ql_dbg_mbx, vha, 0x1122,
5936 			    "Null template size obtained.\n");
5937 			rval = QLA_FUNCTION_FAILED;
5938 		}
5939 	}
5940 	return rval;
5941 }
5942 
5943 int
5944 qla82xx_md_get_template(scsi_qla_host_t *vha)
5945 {
5946 	struct qla_hw_data *ha = vha->hw;
5947 	mbx_cmd_t mc;
5948 	mbx_cmd_t *mcp = &mc;
5949 	int rval = QLA_FUNCTION_FAILED;
5950 
5951 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5952 	    "Entered %s.\n", __func__);
5953 
5954 	ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5955 	   ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5956 	if (!ha->md_tmplt_hdr) {
5957 		ql_log(ql_log_warn, vha, 0x1124,
5958 		    "Unable to allocate memory for Minidump template.\n");
5959 		return rval;
5960 	}
5961 
5962 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5963 	mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5964 	mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5965 	mcp->mb[2] = LSW(RQST_TMPLT);
5966 	mcp->mb[3] = MSW(RQST_TMPLT);
5967 	mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5968 	mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5969 	mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5970 	mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5971 	mcp->mb[8] = LSW(ha->md_template_size);
5972 	mcp->mb[9] = MSW(ha->md_template_size);
5973 
5974 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5975 	mcp->tov = MBX_TOV_SECONDS;
5976 	mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5977 	    MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5978 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5979 	rval = qla2x00_mailbox_command(vha, mcp);
5980 
5981 	if (rval != QLA_SUCCESS) {
5982 		ql_dbg(ql_dbg_mbx, vha, 0x1125,
5983 		    "mailbox command FAILED=0x%x, subcode=%x.\n",
5984 		    ((mcp->mb[1] << 16) | mcp->mb[0]),
5985 		    ((mcp->mb[3] << 16) | mcp->mb[2]));
5986 	} else
5987 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5988 		    "Done %s.\n", __func__);
5989 	return rval;
5990 }
5991 
5992 int
5993 qla8044_md_get_template(scsi_qla_host_t *vha)
5994 {
5995 	struct qla_hw_data *ha = vha->hw;
5996 	mbx_cmd_t mc;
5997 	mbx_cmd_t *mcp = &mc;
5998 	int rval = QLA_FUNCTION_FAILED;
5999 	int offset = 0, size = MINIDUMP_SIZE_36K;
6000 
6001 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
6002 	    "Entered %s.\n", __func__);
6003 
6004 	ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
6005 	   ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
6006 	if (!ha->md_tmplt_hdr) {
6007 		ql_log(ql_log_warn, vha, 0xb11b,
6008 		    "Unable to allocate memory for Minidump template.\n");
6009 		return rval;
6010 	}
6011 
6012 	memset(mcp->mb, 0 , sizeof(mcp->mb));
6013 	while (offset < ha->md_template_size) {
6014 		mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
6015 		mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
6016 		mcp->mb[2] = LSW(RQST_TMPLT);
6017 		mcp->mb[3] = MSW(RQST_TMPLT);
6018 		mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
6019 		mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
6020 		mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
6021 		mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
6022 		mcp->mb[8] = LSW(size);
6023 		mcp->mb[9] = MSW(size);
6024 		mcp->mb[10] = offset & 0x0000FFFF;
6025 		mcp->mb[11] = offset & 0xFFFF0000;
6026 		mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
6027 		mcp->tov = MBX_TOV_SECONDS;
6028 		mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
6029 			MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6030 		mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6031 		rval = qla2x00_mailbox_command(vha, mcp);
6032 
6033 		if (rval != QLA_SUCCESS) {
6034 			ql_dbg(ql_dbg_mbx, vha, 0xb11c,
6035 				"mailbox command FAILED=0x%x, subcode=%x.\n",
6036 				((mcp->mb[1] << 16) | mcp->mb[0]),
6037 				((mcp->mb[3] << 16) | mcp->mb[2]));
6038 			return rval;
6039 		} else
6040 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
6041 				"Done %s.\n", __func__);
6042 		offset = offset + size;
6043 	}
6044 	return rval;
6045 }
6046 
6047 int
6048 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6049 {
6050 	int rval;
6051 	struct qla_hw_data *ha = vha->hw;
6052 	mbx_cmd_t mc;
6053 	mbx_cmd_t *mcp = &mc;
6054 
6055 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6056 		return QLA_FUNCTION_FAILED;
6057 
6058 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
6059 	    "Entered %s.\n", __func__);
6060 
6061 	memset(mcp, 0, sizeof(mbx_cmd_t));
6062 	mcp->mb[0] = MBC_SET_LED_CONFIG;
6063 	mcp->mb[1] = led_cfg[0];
6064 	mcp->mb[2] = led_cfg[1];
6065 	if (IS_QLA8031(ha)) {
6066 		mcp->mb[3] = led_cfg[2];
6067 		mcp->mb[4] = led_cfg[3];
6068 		mcp->mb[5] = led_cfg[4];
6069 		mcp->mb[6] = led_cfg[5];
6070 	}
6071 
6072 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
6073 	if (IS_QLA8031(ha))
6074 		mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6075 	mcp->in_mb = MBX_0;
6076 	mcp->tov = MBX_TOV_SECONDS;
6077 	mcp->flags = 0;
6078 
6079 	rval = qla2x00_mailbox_command(vha, mcp);
6080 	if (rval != QLA_SUCCESS) {
6081 		ql_dbg(ql_dbg_mbx, vha, 0x1134,
6082 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6083 	} else {
6084 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
6085 		    "Done %s.\n", __func__);
6086 	}
6087 
6088 	return rval;
6089 }
6090 
6091 int
6092 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6093 {
6094 	int rval;
6095 	struct qla_hw_data *ha = vha->hw;
6096 	mbx_cmd_t mc;
6097 	mbx_cmd_t *mcp = &mc;
6098 
6099 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6100 		return QLA_FUNCTION_FAILED;
6101 
6102 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
6103 	    "Entered %s.\n", __func__);
6104 
6105 	memset(mcp, 0, sizeof(mbx_cmd_t));
6106 	mcp->mb[0] = MBC_GET_LED_CONFIG;
6107 
6108 	mcp->out_mb = MBX_0;
6109 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
6110 	if (IS_QLA8031(ha))
6111 		mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6112 	mcp->tov = MBX_TOV_SECONDS;
6113 	mcp->flags = 0;
6114 
6115 	rval = qla2x00_mailbox_command(vha, mcp);
6116 	if (rval != QLA_SUCCESS) {
6117 		ql_dbg(ql_dbg_mbx, vha, 0x1137,
6118 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6119 	} else {
6120 		led_cfg[0] = mcp->mb[1];
6121 		led_cfg[1] = mcp->mb[2];
6122 		if (IS_QLA8031(ha)) {
6123 			led_cfg[2] = mcp->mb[3];
6124 			led_cfg[3] = mcp->mb[4];
6125 			led_cfg[4] = mcp->mb[5];
6126 			led_cfg[5] = mcp->mb[6];
6127 		}
6128 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
6129 		    "Done %s.\n", __func__);
6130 	}
6131 
6132 	return rval;
6133 }
6134 
6135 int
6136 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
6137 {
6138 	int rval;
6139 	struct qla_hw_data *ha = vha->hw;
6140 	mbx_cmd_t mc;
6141 	mbx_cmd_t *mcp = &mc;
6142 
6143 	if (!IS_P3P_TYPE(ha))
6144 		return QLA_FUNCTION_FAILED;
6145 
6146 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
6147 		"Entered %s.\n", __func__);
6148 
6149 	memset(mcp, 0, sizeof(mbx_cmd_t));
6150 	mcp->mb[0] = MBC_SET_LED_CONFIG;
6151 	if (enable)
6152 		mcp->mb[7] = 0xE;
6153 	else
6154 		mcp->mb[7] = 0xD;
6155 
6156 	mcp->out_mb = MBX_7|MBX_0;
6157 	mcp->in_mb = MBX_0;
6158 	mcp->tov = MBX_TOV_SECONDS;
6159 	mcp->flags = 0;
6160 
6161 	rval = qla2x00_mailbox_command(vha, mcp);
6162 	if (rval != QLA_SUCCESS) {
6163 		ql_dbg(ql_dbg_mbx, vha, 0x1128,
6164 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6165 	} else {
6166 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
6167 		    "Done %s.\n", __func__);
6168 	}
6169 
6170 	return rval;
6171 }
6172 
6173 int
6174 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
6175 {
6176 	int rval;
6177 	struct qla_hw_data *ha = vha->hw;
6178 	mbx_cmd_t mc;
6179 	mbx_cmd_t *mcp = &mc;
6180 
6181 	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6182 		return QLA_FUNCTION_FAILED;
6183 
6184 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
6185 	    "Entered %s.\n", __func__);
6186 
6187 	mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6188 	mcp->mb[1] = LSW(reg);
6189 	mcp->mb[2] = MSW(reg);
6190 	mcp->mb[3] = LSW(data);
6191 	mcp->mb[4] = MSW(data);
6192 	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6193 
6194 	mcp->in_mb = MBX_1|MBX_0;
6195 	mcp->tov = MBX_TOV_SECONDS;
6196 	mcp->flags = 0;
6197 	rval = qla2x00_mailbox_command(vha, mcp);
6198 
6199 	if (rval != QLA_SUCCESS) {
6200 		ql_dbg(ql_dbg_mbx, vha, 0x1131,
6201 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6202 	} else {
6203 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
6204 		    "Done %s.\n", __func__);
6205 	}
6206 
6207 	return rval;
6208 }
6209 
6210 int
6211 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
6212 {
6213 	int rval;
6214 	struct qla_hw_data *ha = vha->hw;
6215 	mbx_cmd_t mc;
6216 	mbx_cmd_t *mcp = &mc;
6217 
6218 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
6219 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
6220 		    "Implicit LOGO Unsupported.\n");
6221 		return QLA_FUNCTION_FAILED;
6222 	}
6223 
6224 
6225 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
6226 	    "Entering %s.\n",  __func__);
6227 
6228 	/* Perform Implicit LOGO. */
6229 	mcp->mb[0] = MBC_PORT_LOGOUT;
6230 	mcp->mb[1] = fcport->loop_id;
6231 	mcp->mb[10] = BIT_15;
6232 	mcp->out_mb = MBX_10|MBX_1|MBX_0;
6233 	mcp->in_mb = MBX_0;
6234 	mcp->tov = MBX_TOV_SECONDS;
6235 	mcp->flags = 0;
6236 	rval = qla2x00_mailbox_command(vha, mcp);
6237 	if (rval != QLA_SUCCESS)
6238 		ql_dbg(ql_dbg_mbx, vha, 0x113d,
6239 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6240 	else
6241 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
6242 		    "Done %s.\n", __func__);
6243 
6244 	return rval;
6245 }
6246 
6247 int
6248 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
6249 {
6250 	int rval;
6251 	mbx_cmd_t mc;
6252 	mbx_cmd_t *mcp = &mc;
6253 	struct qla_hw_data *ha = vha->hw;
6254 	unsigned long retry_max_time = jiffies + (2 * HZ);
6255 
6256 	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6257 		return QLA_FUNCTION_FAILED;
6258 
6259 	ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
6260 
6261 retry_rd_reg:
6262 	mcp->mb[0] = MBC_READ_REMOTE_REG;
6263 	mcp->mb[1] = LSW(reg);
6264 	mcp->mb[2] = MSW(reg);
6265 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
6266 	mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
6267 	mcp->tov = MBX_TOV_SECONDS;
6268 	mcp->flags = 0;
6269 	rval = qla2x00_mailbox_command(vha, mcp);
6270 
6271 	if (rval != QLA_SUCCESS) {
6272 		ql_dbg(ql_dbg_mbx, vha, 0x114c,
6273 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
6274 		    rval, mcp->mb[0], mcp->mb[1]);
6275 	} else {
6276 		*data = (mcp->mb[3] | (mcp->mb[4] << 16));
6277 		if (*data == QLA8XXX_BAD_VALUE) {
6278 			/*
6279 			 * During soft-reset CAMRAM register reads might
6280 			 * return 0xbad0bad0. So retry for MAX of 2 sec
6281 			 * while reading camram registers.
6282 			 */
6283 			if (time_after(jiffies, retry_max_time)) {
6284 				ql_dbg(ql_dbg_mbx, vha, 0x1141,
6285 				    "Failure to read CAMRAM register. "
6286 				    "data=0x%x.\n", *data);
6287 				return QLA_FUNCTION_FAILED;
6288 			}
6289 			msleep(100);
6290 			goto retry_rd_reg;
6291 		}
6292 		ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
6293 	}
6294 
6295 	return rval;
6296 }
6297 
6298 int
6299 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
6300 {
6301 	int rval;
6302 	mbx_cmd_t mc;
6303 	mbx_cmd_t *mcp = &mc;
6304 	struct qla_hw_data *ha = vha->hw;
6305 
6306 	if (!IS_QLA83XX(ha))
6307 		return QLA_FUNCTION_FAILED;
6308 
6309 	ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
6310 
6311 	mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
6312 	mcp->out_mb = MBX_0;
6313 	mcp->in_mb = MBX_1|MBX_0;
6314 	mcp->tov = MBX_TOV_SECONDS;
6315 	mcp->flags = 0;
6316 	rval = qla2x00_mailbox_command(vha, mcp);
6317 
6318 	if (rval != QLA_SUCCESS) {
6319 		ql_dbg(ql_dbg_mbx, vha, 0x1144,
6320 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
6321 		    rval, mcp->mb[0], mcp->mb[1]);
6322 		qla2xxx_dump_fw(vha);
6323 	} else {
6324 		ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
6325 	}
6326 
6327 	return rval;
6328 }
6329 
6330 int
6331 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
6332 	uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
6333 {
6334 	int rval;
6335 	mbx_cmd_t mc;
6336 	mbx_cmd_t *mcp = &mc;
6337 	uint8_t subcode = (uint8_t)options;
6338 	struct qla_hw_data *ha = vha->hw;
6339 
6340 	if (!IS_QLA8031(ha))
6341 		return QLA_FUNCTION_FAILED;
6342 
6343 	ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
6344 
6345 	mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
6346 	mcp->mb[1] = options;
6347 	mcp->out_mb = MBX_1|MBX_0;
6348 	if (subcode & BIT_2) {
6349 		mcp->mb[2] = LSW(start_addr);
6350 		mcp->mb[3] = MSW(start_addr);
6351 		mcp->mb[4] = LSW(end_addr);
6352 		mcp->mb[5] = MSW(end_addr);
6353 		mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
6354 	}
6355 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
6356 	if (!(subcode & (BIT_2 | BIT_5)))
6357 		mcp->in_mb |= MBX_4|MBX_3;
6358 	mcp->tov = MBX_TOV_SECONDS;
6359 	mcp->flags = 0;
6360 	rval = qla2x00_mailbox_command(vha, mcp);
6361 
6362 	if (rval != QLA_SUCCESS) {
6363 		ql_dbg(ql_dbg_mbx, vha, 0x1147,
6364 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
6365 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
6366 		    mcp->mb[4]);
6367 		qla2xxx_dump_fw(vha);
6368 	} else {
6369 		if (subcode & BIT_5)
6370 			*sector_size = mcp->mb[1];
6371 		else if (subcode & (BIT_6 | BIT_7)) {
6372 			ql_dbg(ql_dbg_mbx, vha, 0x1148,
6373 			    "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6374 		} else if (subcode & (BIT_3 | BIT_4)) {
6375 			ql_dbg(ql_dbg_mbx, vha, 0x1149,
6376 			    "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6377 		}
6378 		ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
6379 	}
6380 
6381 	return rval;
6382 }
6383 
6384 int
6385 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6386 	uint32_t size)
6387 {
6388 	int rval;
6389 	mbx_cmd_t mc;
6390 	mbx_cmd_t *mcp = &mc;
6391 
6392 	if (!IS_MCTP_CAPABLE(vha->hw))
6393 		return QLA_FUNCTION_FAILED;
6394 
6395 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6396 	    "Entered %s.\n", __func__);
6397 
6398 	mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6399 	mcp->mb[1] = LSW(addr);
6400 	mcp->mb[2] = MSW(req_dma);
6401 	mcp->mb[3] = LSW(req_dma);
6402 	mcp->mb[4] = MSW(size);
6403 	mcp->mb[5] = LSW(size);
6404 	mcp->mb[6] = MSW(MSD(req_dma));
6405 	mcp->mb[7] = LSW(MSD(req_dma));
6406 	mcp->mb[8] = MSW(addr);
6407 	/* Setting RAM ID to valid */
6408 	/* For MCTP RAM ID is 0x40 */
6409 	mcp->mb[10] = BIT_7 | 0x40;
6410 
6411 	mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6412 	    MBX_0;
6413 
6414 	mcp->in_mb = MBX_0;
6415 	mcp->tov = MBX_TOV_SECONDS;
6416 	mcp->flags = 0;
6417 	rval = qla2x00_mailbox_command(vha, mcp);
6418 
6419 	if (rval != QLA_SUCCESS) {
6420 		ql_dbg(ql_dbg_mbx, vha, 0x114e,
6421 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6422 	} else {
6423 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6424 		    "Done %s.\n", __func__);
6425 	}
6426 
6427 	return rval;
6428 }
6429 
6430 int
6431 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6432 	void *dd_buf, uint size, uint options)
6433 {
6434 	int rval;
6435 	mbx_cmd_t mc;
6436 	mbx_cmd_t *mcp = &mc;
6437 	dma_addr_t dd_dma;
6438 
6439 	if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
6440 	    !IS_QLA28XX(vha->hw))
6441 		return QLA_FUNCTION_FAILED;
6442 
6443 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6444 	    "Entered %s.\n", __func__);
6445 
6446 	dd_dma = dma_map_single(&vha->hw->pdev->dev,
6447 	    dd_buf, size, DMA_FROM_DEVICE);
6448 	if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6449 		ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6450 		return QLA_MEMORY_ALLOC_FAILED;
6451 	}
6452 
6453 	memset(dd_buf, 0, size);
6454 
6455 	mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6456 	mcp->mb[1] = options;
6457 	mcp->mb[2] = MSW(LSD(dd_dma));
6458 	mcp->mb[3] = LSW(LSD(dd_dma));
6459 	mcp->mb[6] = MSW(MSD(dd_dma));
6460 	mcp->mb[7] = LSW(MSD(dd_dma));
6461 	mcp->mb[8] = size;
6462 	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6463 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6464 	mcp->buf_size = size;
6465 	mcp->flags = MBX_DMA_IN;
6466 	mcp->tov = MBX_TOV_SECONDS * 4;
6467 	rval = qla2x00_mailbox_command(vha, mcp);
6468 
6469 	if (rval != QLA_SUCCESS) {
6470 		ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6471 	} else {
6472 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6473 		    "Done %s.\n", __func__);
6474 	}
6475 
6476 	dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6477 	    size, DMA_FROM_DEVICE);
6478 
6479 	return rval;
6480 }
6481 
6482 int
6483 qla26xx_dport_diagnostics_v2(scsi_qla_host_t *vha,
6484 			     struct qla_dport_diag_v2 *dd,  mbx_cmd_t *mcp)
6485 {
6486 	int rval;
6487 	dma_addr_t dd_dma;
6488 	uint size = sizeof(dd->buf);
6489 	uint16_t options = dd->options;
6490 
6491 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6492 	       "Entered %s.\n", __func__);
6493 
6494 	dd_dma = dma_map_single(&vha->hw->pdev->dev,
6495 				dd->buf, size, DMA_FROM_DEVICE);
6496 	if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6497 		ql_log(ql_log_warn, vha, 0x1194,
6498 		       "Failed to map dma buffer.\n");
6499 		return QLA_MEMORY_ALLOC_FAILED;
6500 	}
6501 
6502 	memset(dd->buf, 0, size);
6503 
6504 	mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6505 	mcp->mb[1] = options;
6506 	mcp->mb[2] = MSW(LSD(dd_dma));
6507 	mcp->mb[3] = LSW(LSD(dd_dma));
6508 	mcp->mb[6] = MSW(MSD(dd_dma));
6509 	mcp->mb[7] = LSW(MSD(dd_dma));
6510 	mcp->mb[8] = size;
6511 	mcp->out_mb = MBX_8 | MBX_7 | MBX_6 | MBX_3 | MBX_2 | MBX_1 | MBX_0;
6512 	mcp->in_mb = MBX_3 | MBX_2 | MBX_1 | MBX_0;
6513 	mcp->buf_size = size;
6514 	mcp->flags = MBX_DMA_IN;
6515 	mcp->tov = MBX_TOV_SECONDS * 4;
6516 	rval = qla2x00_mailbox_command(vha, mcp);
6517 
6518 	if (rval != QLA_SUCCESS) {
6519 		ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6520 	} else {
6521 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6522 		       "Done %s.\n", __func__);
6523 	}
6524 
6525 	dma_unmap_single(&vha->hw->pdev->dev, dd_dma, size, DMA_FROM_DEVICE);
6526 
6527 	return rval;
6528 }
6529 
6530 static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
6531 {
6532 	sp->u.iocb_cmd.u.mbx.rc = res;
6533 
6534 	complete(&sp->u.iocb_cmd.u.mbx.comp);
6535 	/* don't free sp here. Let the caller do the free */
6536 }
6537 
6538 /*
6539  * This mailbox uses the iocb interface to send MB command.
6540  * This allows non-critial (non chip setup) command to go
6541  * out in parrallel.
6542  */
6543 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6544 {
6545 	int rval = QLA_FUNCTION_FAILED;
6546 	srb_t *sp;
6547 	struct srb_iocb *c;
6548 
6549 	if (!vha->hw->flags.fw_started)
6550 		goto done;
6551 
6552 	/* ref: INIT */
6553 	sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6554 	if (!sp)
6555 		goto done;
6556 
6557 	c = &sp->u.iocb_cmd;
6558 	init_completion(&c->u.mbx.comp);
6559 
6560 	sp->type = SRB_MB_IOCB;
6561 	sp->name = mb_to_str(mcp->mb[0]);
6562 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
6563 			      qla2x00_async_mb_sp_done);
6564 
6565 	memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6566 
6567 	rval = qla2x00_start_sp(sp);
6568 	if (rval != QLA_SUCCESS) {
6569 		ql_dbg(ql_dbg_mbx, vha, 0x1018,
6570 		    "%s: %s Failed submission. %x.\n",
6571 		    __func__, sp->name, rval);
6572 		goto done_free_sp;
6573 	}
6574 
6575 	ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6576 	    sp->name, sp->handle);
6577 
6578 	wait_for_completion(&c->u.mbx.comp);
6579 	memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6580 
6581 	rval = c->u.mbx.rc;
6582 	switch (rval) {
6583 	case QLA_FUNCTION_TIMEOUT:
6584 		ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6585 		    __func__, sp->name, rval);
6586 		break;
6587 	case  QLA_SUCCESS:
6588 		ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6589 		    __func__, sp->name);
6590 		break;
6591 	default:
6592 		ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6593 		    __func__, sp->name, rval);
6594 		break;
6595 	}
6596 
6597 done_free_sp:
6598 	/* ref: INIT */
6599 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
6600 done:
6601 	return rval;
6602 }
6603 
6604 /*
6605  * qla24xx_gpdb_wait
6606  * NOTE: Do not call this routine from DPC thread
6607  */
6608 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6609 {
6610 	int rval = QLA_FUNCTION_FAILED;
6611 	dma_addr_t pd_dma;
6612 	struct port_database_24xx *pd;
6613 	struct qla_hw_data *ha = vha->hw;
6614 	mbx_cmd_t mc;
6615 
6616 	if (!vha->hw->flags.fw_started)
6617 		goto done;
6618 
6619 	pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6620 	if (pd  == NULL) {
6621 		ql_log(ql_log_warn, vha, 0xd047,
6622 		    "Failed to allocate port database structure.\n");
6623 		goto done_free_sp;
6624 	}
6625 
6626 	memset(&mc, 0, sizeof(mc));
6627 	mc.mb[0] = MBC_GET_PORT_DATABASE;
6628 	mc.mb[1] = fcport->loop_id;
6629 	mc.mb[2] = MSW(pd_dma);
6630 	mc.mb[3] = LSW(pd_dma);
6631 	mc.mb[6] = MSW(MSD(pd_dma));
6632 	mc.mb[7] = LSW(MSD(pd_dma));
6633 	mc.mb[9] = vha->vp_idx;
6634 	mc.mb[10] = opt;
6635 
6636 	rval = qla24xx_send_mb_cmd(vha, &mc);
6637 	if (rval != QLA_SUCCESS) {
6638 		ql_dbg(ql_dbg_mbx, vha, 0x1193,
6639 		    "%s: %8phC fail\n", __func__, fcport->port_name);
6640 		goto done_free_sp;
6641 	}
6642 
6643 	rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6644 
6645 	ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6646 	    __func__, fcport->port_name);
6647 
6648 done_free_sp:
6649 	if (pd)
6650 		dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6651 done:
6652 	return rval;
6653 }
6654 
6655 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6656     struct port_database_24xx *pd)
6657 {
6658 	int rval = QLA_SUCCESS;
6659 	uint64_t zero = 0;
6660 	u8 current_login_state, last_login_state;
6661 
6662 	if (NVME_TARGET(vha->hw, fcport)) {
6663 		current_login_state = pd->current_login_state >> 4;
6664 		last_login_state = pd->last_login_state >> 4;
6665 	} else {
6666 		current_login_state = pd->current_login_state & 0xf;
6667 		last_login_state = pd->last_login_state & 0xf;
6668 	}
6669 
6670 	/* Check for logged in state. */
6671 	if (current_login_state != PDS_PRLI_COMPLETE) {
6672 		ql_dbg(ql_dbg_mbx, vha, 0x119a,
6673 		    "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6674 		    current_login_state, last_login_state, fcport->loop_id);
6675 		rval = QLA_FUNCTION_FAILED;
6676 		goto gpd_error_out;
6677 	}
6678 
6679 	if (fcport->loop_id == FC_NO_LOOP_ID ||
6680 	    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6681 	     memcmp(fcport->port_name, pd->port_name, 8))) {
6682 		/* We lost the device mid way. */
6683 		rval = QLA_NOT_LOGGED_IN;
6684 		goto gpd_error_out;
6685 	}
6686 
6687 	/* Names are little-endian. */
6688 	memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6689 	memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6690 
6691 	/* Get port_id of device. */
6692 	fcport->d_id.b.domain = pd->port_id[0];
6693 	fcport->d_id.b.area = pd->port_id[1];
6694 	fcport->d_id.b.al_pa = pd->port_id[2];
6695 	fcport->d_id.b.rsvd_1 = 0;
6696 
6697 	ql_dbg(ql_dbg_disc, vha, 0x2062,
6698 	     "%8phC SVC Param w3 %02x%02x",
6699 	     fcport->port_name,
6700 	     pd->prli_svc_param_word_3[1],
6701 	     pd->prli_svc_param_word_3[0]);
6702 
6703 	if (NVME_TARGET(vha->hw, fcport)) {
6704 		fcport->port_type = FCT_NVME;
6705 		if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
6706 			fcport->port_type |= FCT_NVME_INITIATOR;
6707 		if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6708 			fcport->port_type |= FCT_NVME_TARGET;
6709 		if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
6710 			fcport->port_type |= FCT_NVME_DISCOVERY;
6711 	} else {
6712 		/* If not target must be initiator or unknown type. */
6713 		if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6714 			fcport->port_type = FCT_INITIATOR;
6715 		else
6716 			fcport->port_type = FCT_TARGET;
6717 	}
6718 	/* Passback COS information. */
6719 	fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6720 		FC_COS_CLASS2 : FC_COS_CLASS3;
6721 
6722 	if (pd->prli_svc_param_word_3[0] & BIT_7) {
6723 		fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6724 		fcport->conf_compl_supported = 1;
6725 	}
6726 
6727 gpd_error_out:
6728 	return rval;
6729 }
6730 
6731 /*
6732  * qla24xx_gidlist__wait
6733  * NOTE: don't call this routine from DPC thread.
6734  */
6735 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6736 	void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6737 {
6738 	int rval = QLA_FUNCTION_FAILED;
6739 	mbx_cmd_t mc;
6740 
6741 	if (!vha->hw->flags.fw_started)
6742 		goto done;
6743 
6744 	memset(&mc, 0, sizeof(mc));
6745 	mc.mb[0] = MBC_GET_ID_LIST;
6746 	mc.mb[2] = MSW(id_list_dma);
6747 	mc.mb[3] = LSW(id_list_dma);
6748 	mc.mb[6] = MSW(MSD(id_list_dma));
6749 	mc.mb[7] = LSW(MSD(id_list_dma));
6750 	mc.mb[8] = 0;
6751 	mc.mb[9] = vha->vp_idx;
6752 
6753 	rval = qla24xx_send_mb_cmd(vha, &mc);
6754 	if (rval != QLA_SUCCESS) {
6755 		ql_dbg(ql_dbg_mbx, vha, 0x119b,
6756 		    "%s:  fail\n", __func__);
6757 	} else {
6758 		*entries = mc.mb[1];
6759 		ql_dbg(ql_dbg_mbx, vha, 0x119c,
6760 		    "%s:  done\n", __func__);
6761 	}
6762 done:
6763 	return rval;
6764 }
6765 
6766 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6767 {
6768 	int rval;
6769 	mbx_cmd_t	mc;
6770 	mbx_cmd_t	*mcp = &mc;
6771 
6772 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6773 	    "Entered %s\n", __func__);
6774 
6775 	memset(mcp->mb, 0 , sizeof(mcp->mb));
6776 	mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6777 	mcp->mb[1] = 1;
6778 	mcp->mb[2] = value;
6779 	mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6780 	mcp->in_mb = MBX_2 | MBX_0;
6781 	mcp->tov = MBX_TOV_SECONDS;
6782 	mcp->flags = 0;
6783 
6784 	rval = qla2x00_mailbox_command(vha, mcp);
6785 
6786 	ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6787 	    (rval != QLA_SUCCESS) ? "Failed"  : "Done", rval);
6788 
6789 	return rval;
6790 }
6791 
6792 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6793 {
6794 	int rval;
6795 	mbx_cmd_t	mc;
6796 	mbx_cmd_t	*mcp = &mc;
6797 
6798 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6799 	    "Entered %s\n", __func__);
6800 
6801 	memset(mcp->mb, 0, sizeof(mcp->mb));
6802 	mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6803 	mcp->mb[1] = 0;
6804 	mcp->out_mb = MBX_1 | MBX_0;
6805 	mcp->in_mb = MBX_2 | MBX_0;
6806 	mcp->tov = MBX_TOV_SECONDS;
6807 	mcp->flags = 0;
6808 
6809 	rval = qla2x00_mailbox_command(vha, mcp);
6810 	if (rval == QLA_SUCCESS)
6811 		*value = mc.mb[2];
6812 
6813 	ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6814 	    (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6815 
6816 	return rval;
6817 }
6818 
6819 int
6820 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6821 {
6822 	struct qla_hw_data *ha = vha->hw;
6823 	uint16_t iter, addr, offset;
6824 	dma_addr_t phys_addr;
6825 	int rval, c;
6826 	u8 *sfp_data;
6827 
6828 	memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6829 	addr = 0xa0;
6830 	phys_addr = ha->sfp_data_dma;
6831 	sfp_data = ha->sfp_data;
6832 	offset = c = 0;
6833 
6834 	for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6835 		if (iter == 4) {
6836 			/* Skip to next device address. */
6837 			addr = 0xa2;
6838 			offset = 0;
6839 		}
6840 
6841 		rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6842 		    addr, offset, SFP_BLOCK_SIZE, BIT_1);
6843 		if (rval != QLA_SUCCESS) {
6844 			ql_log(ql_log_warn, vha, 0x706d,
6845 			    "Unable to read SFP data (%x/%x/%x).\n", rval,
6846 			    addr, offset);
6847 
6848 			return rval;
6849 		}
6850 
6851 		if (buf && (c < count)) {
6852 			u16 sz;
6853 
6854 			if ((count - c) >= SFP_BLOCK_SIZE)
6855 				sz = SFP_BLOCK_SIZE;
6856 			else
6857 				sz = count - c;
6858 
6859 			memcpy(buf, sfp_data, sz);
6860 			buf += SFP_BLOCK_SIZE;
6861 			c += sz;
6862 		}
6863 		phys_addr += SFP_BLOCK_SIZE;
6864 		sfp_data  += SFP_BLOCK_SIZE;
6865 		offset += SFP_BLOCK_SIZE;
6866 	}
6867 
6868 	return rval;
6869 }
6870 
6871 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6872     uint16_t *out_mb, int out_mb_sz)
6873 {
6874 	int rval = QLA_FUNCTION_FAILED;
6875 	mbx_cmd_t mc;
6876 
6877 	if (!vha->hw->flags.fw_started)
6878 		goto done;
6879 
6880 	memset(&mc, 0, sizeof(mc));
6881 	mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6882 
6883 	rval = qla24xx_send_mb_cmd(vha, &mc);
6884 	if (rval != QLA_SUCCESS) {
6885 		ql_dbg(ql_dbg_mbx, vha, 0xffff,
6886 			"%s:  fail\n", __func__);
6887 	} else {
6888 		if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6889 			memcpy(out_mb, mc.mb, out_mb_sz);
6890 		else
6891 			memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6892 
6893 		ql_dbg(ql_dbg_mbx, vha, 0xffff,
6894 			"%s:  done\n", __func__);
6895 	}
6896 done:
6897 	return rval;
6898 }
6899 
6900 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
6901     uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
6902     uint32_t sfub_len)
6903 {
6904 	int		rval;
6905 	mbx_cmd_t mc;
6906 	mbx_cmd_t *mcp = &mc;
6907 
6908 	mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
6909 	mcp->mb[1] = opts;
6910 	mcp->mb[2] = region;
6911 	mcp->mb[3] = MSW(len);
6912 	mcp->mb[4] = LSW(len);
6913 	mcp->mb[5] = MSW(sfub_dma_addr);
6914 	mcp->mb[6] = LSW(sfub_dma_addr);
6915 	mcp->mb[7] = MSW(MSD(sfub_dma_addr));
6916 	mcp->mb[8] = LSW(MSD(sfub_dma_addr));
6917 	mcp->mb[9] = sfub_len;
6918 	mcp->out_mb =
6919 	    MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6920 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
6921 	mcp->tov = MBX_TOV_SECONDS;
6922 	mcp->flags = 0;
6923 	rval = qla2x00_mailbox_command(vha, mcp);
6924 
6925 	if (rval != QLA_SUCCESS) {
6926 		ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
6927 			__func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
6928 			mcp->mb[2]);
6929 	}
6930 
6931 	return rval;
6932 }
6933 
6934 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6935     uint32_t data)
6936 {
6937 	int rval;
6938 	mbx_cmd_t mc;
6939 	mbx_cmd_t *mcp = &mc;
6940 
6941 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6942 	    "Entered %s.\n", __func__);
6943 
6944 	mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6945 	mcp->mb[1] = LSW(addr);
6946 	mcp->mb[2] = MSW(addr);
6947 	mcp->mb[3] = LSW(data);
6948 	mcp->mb[4] = MSW(data);
6949 	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6950 	mcp->in_mb = MBX_1|MBX_0;
6951 	mcp->tov = MBX_TOV_SECONDS;
6952 	mcp->flags = 0;
6953 	rval = qla2x00_mailbox_command(vha, mcp);
6954 
6955 	if (rval != QLA_SUCCESS) {
6956 		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6957 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6958 	} else {
6959 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6960 		    "Done %s.\n", __func__);
6961 	}
6962 
6963 	return rval;
6964 }
6965 
6966 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6967     uint32_t *data)
6968 {
6969 	int rval;
6970 	mbx_cmd_t mc;
6971 	mbx_cmd_t *mcp = &mc;
6972 
6973 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6974 	    "Entered %s.\n", __func__);
6975 
6976 	mcp->mb[0] = MBC_READ_REMOTE_REG;
6977 	mcp->mb[1] = LSW(addr);
6978 	mcp->mb[2] = MSW(addr);
6979 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
6980 	mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6981 	mcp->tov = MBX_TOV_SECONDS;
6982 	mcp->flags = 0;
6983 	rval = qla2x00_mailbox_command(vha, mcp);
6984 
6985 	*data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
6986 
6987 	if (rval != QLA_SUCCESS) {
6988 		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6989 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6990 	} else {
6991 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6992 		    "Done %s.\n", __func__);
6993 	}
6994 
6995 	return rval;
6996 }
6997 
6998 int
6999 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
7000 {
7001 	struct qla_hw_data *ha = vha->hw;
7002 	mbx_cmd_t mc;
7003 	mbx_cmd_t *mcp = &mc;
7004 	int rval;
7005 
7006 	if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
7007 		return QLA_FUNCTION_FAILED;
7008 
7009 	ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n",
7010 	    __func__, options);
7011 
7012 	mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG;
7013 	mcp->mb[1] = options;
7014 	mcp->out_mb = MBX_1|MBX_0;
7015 	mcp->in_mb = MBX_1|MBX_0;
7016 	if (options & BIT_0) {
7017 		if (options & BIT_1) {
7018 			mcp->mb[2] = led[2];
7019 			mcp->out_mb |= MBX_2;
7020 		}
7021 		if (options & BIT_2) {
7022 			mcp->mb[3] = led[0];
7023 			mcp->out_mb |= MBX_3;
7024 		}
7025 		if (options & BIT_3) {
7026 			mcp->mb[4] = led[1];
7027 			mcp->out_mb |= MBX_4;
7028 		}
7029 	} else {
7030 		mcp->in_mb |= MBX_4|MBX_3|MBX_2;
7031 	}
7032 	mcp->tov = MBX_TOV_SECONDS;
7033 	mcp->flags = 0;
7034 	rval = qla2x00_mailbox_command(vha, mcp);
7035 	if (rval) {
7036 		ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n",
7037 		    __func__, rval, mcp->mb[0], mcp->mb[1]);
7038 		return rval;
7039 	}
7040 
7041 	if (options & BIT_0) {
7042 		ha->beacon_blink_led = 0;
7043 		ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__);
7044 	} else {
7045 		led[2] = mcp->mb[2];
7046 		led[0] = mcp->mb[3];
7047 		led[1] = mcp->mb[4];
7048 		ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n",
7049 		    __func__, led[0], led[1], led[2]);
7050 	}
7051 
7052 	return rval;
7053 }
7054 
7055 /**
7056  * qla_no_op_mb(): This MB is used to check if FW is still alive and
7057  * able to generate an interrupt. Otherwise, a timeout will trigger
7058  * FW dump + reset
7059  * @vha: host adapter pointer
7060  * Return: None
7061  */
7062 void qla_no_op_mb(struct scsi_qla_host *vha)
7063 {
7064 	mbx_cmd_t mc;
7065 	mbx_cmd_t *mcp = &mc;
7066 	int rval;
7067 
7068 	memset(&mc, 0, sizeof(mc));
7069 	mcp->mb[0] = 0; // noop cmd= 0
7070 	mcp->out_mb = MBX_0;
7071 	mcp->in_mb = MBX_0;
7072 	mcp->tov = 5;
7073 	mcp->flags = 0;
7074 	rval = qla2x00_mailbox_command(vha, mcp);
7075 
7076 	if (rval) {
7077 		ql_dbg(ql_dbg_async, vha, 0x7071,
7078 			"Failed %s %x\n", __func__, rval);
7079 	}
7080 }
7081 
7082 int qla_mailbox_passthru(scsi_qla_host_t *vha,
7083 			 uint16_t *mbx_in, uint16_t *mbx_out)
7084 {
7085 	mbx_cmd_t mc;
7086 	mbx_cmd_t *mcp = &mc;
7087 	int rval = -EINVAL;
7088 
7089 	memset(&mc, 0, sizeof(mc));
7090 	/* Receiving all 32 register's contents */
7091 	memcpy(&mcp->mb, (char *)mbx_in, (32 * sizeof(uint16_t)));
7092 
7093 	mcp->out_mb = 0xFFFFFFFF;
7094 	mcp->in_mb = 0xFFFFFFFF;
7095 
7096 	mcp->tov = MBX_TOV_SECONDS;
7097 	mcp->flags = 0;
7098 	mcp->bufp = NULL;
7099 
7100 	rval = qla2x00_mailbox_command(vha, mcp);
7101 
7102 	if (rval != QLA_SUCCESS) {
7103 		ql_dbg(ql_dbg_mbx, vha, 0xf0a2,
7104 			"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
7105 	} else {
7106 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xf0a3, "Done %s.\n",
7107 		       __func__);
7108 		/* passing all 32 register's contents */
7109 		memcpy(mbx_out, &mcp->mb, 32 * sizeof(uint16_t));
7110 	}
7111 
7112 	return rval;
7113 }
7114