1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_target.h" 8 9 #include <linux/delay.h> 10 #include <linux/gfp.h> 11 12 static struct mb_cmd_name { 13 uint16_t cmd; 14 const char *str; 15 } mb_str[] = { 16 {MBC_GET_PORT_DATABASE, "GPDB"}, 17 {MBC_GET_ID_LIST, "GIDList"}, 18 {MBC_GET_LINK_PRIV_STATS, "Stats"}, 19 {MBC_GET_RESOURCE_COUNTS, "ResCnt"}, 20 }; 21 22 static const char *mb_to_str(uint16_t cmd) 23 { 24 int i; 25 struct mb_cmd_name *e; 26 27 for (i = 0; i < ARRAY_SIZE(mb_str); i++) { 28 e = mb_str + i; 29 if (cmd == e->cmd) 30 return e->str; 31 } 32 return "unknown"; 33 } 34 35 static struct rom_cmd { 36 uint16_t cmd; 37 } rom_cmds[] = { 38 { MBC_LOAD_RAM }, 39 { MBC_EXECUTE_FIRMWARE }, 40 { MBC_READ_RAM_WORD }, 41 { MBC_MAILBOX_REGISTER_TEST }, 42 { MBC_VERIFY_CHECKSUM }, 43 { MBC_GET_FIRMWARE_VERSION }, 44 { MBC_LOAD_RISC_RAM }, 45 { MBC_DUMP_RISC_RAM }, 46 { MBC_LOAD_RISC_RAM_EXTENDED }, 47 { MBC_DUMP_RISC_RAM_EXTENDED }, 48 { MBC_WRITE_RAM_WORD_EXTENDED }, 49 { MBC_READ_RAM_EXTENDED }, 50 { MBC_GET_RESOURCE_COUNTS }, 51 { MBC_SET_FIRMWARE_OPTION }, 52 { MBC_MID_INITIALIZE_FIRMWARE }, 53 { MBC_GET_FIRMWARE_STATE }, 54 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, 55 { MBC_GET_RETRY_COUNT }, 56 { MBC_TRACE_CONTROL }, 57 { MBC_INITIALIZE_MULTIQ }, 58 { MBC_IOCB_COMMAND_A64 }, 59 { MBC_GET_ADAPTER_LOOP_ID }, 60 { MBC_READ_SFP }, 61 { MBC_SET_RNID_PARAMS }, 62 { MBC_GET_RNID_PARAMS }, 63 { MBC_GET_SET_ZIO_THRESHOLD }, 64 }; 65 66 static int is_rom_cmd(uint16_t cmd) 67 { 68 int i; 69 struct rom_cmd *wc; 70 71 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) { 72 wc = rom_cmds + i; 73 if (wc->cmd == cmd) 74 return 1; 75 } 76 77 return 0; 78 } 79 80 /* 81 * qla2x00_mailbox_command 82 * Issue mailbox command and waits for completion. 83 * 84 * Input: 85 * ha = adapter block pointer. 86 * mcp = driver internal mbx struct pointer. 87 * 88 * Output: 89 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 90 * 91 * Returns: 92 * 0 : QLA_SUCCESS = cmd performed success 93 * 1 : QLA_FUNCTION_FAILED (error encountered) 94 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 95 * 96 * Context: 97 * Kernel context. 98 */ 99 static int 100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) 101 { 102 int rval, i; 103 unsigned long flags = 0; 104 device_reg_t *reg; 105 uint8_t abort_active, eeh_delay; 106 uint8_t io_lock_on; 107 uint16_t command = 0; 108 uint16_t *iptr; 109 __le16 __iomem *optr; 110 uint32_t cnt; 111 uint32_t mboxes; 112 unsigned long wait_time; 113 struct qla_hw_data *ha = vha->hw; 114 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 115 u32 chip_reset; 116 117 118 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); 119 120 if (ha->pdev->error_state == pci_channel_io_perm_failure) { 121 ql_log(ql_log_warn, vha, 0x1001, 122 "PCI channel failed permanently, exiting.\n"); 123 return QLA_FUNCTION_TIMEOUT; 124 } 125 126 if (vha->device_flags & DFLG_DEV_FAILED) { 127 ql_log(ql_log_warn, vha, 0x1002, 128 "Device in failed state, exiting.\n"); 129 return QLA_FUNCTION_TIMEOUT; 130 } 131 132 /* if PCI error, then avoid mbx processing.*/ 133 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && 134 test_bit(UNLOADING, &base_vha->dpc_flags)) { 135 ql_log(ql_log_warn, vha, 0xd04e, 136 "PCI error, exiting.\n"); 137 return QLA_FUNCTION_TIMEOUT; 138 } 139 eeh_delay = 0; 140 reg = ha->iobase; 141 io_lock_on = base_vha->flags.init_done; 142 143 rval = QLA_SUCCESS; 144 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 145 chip_reset = ha->chip_reset; 146 147 if (ha->flags.pci_channel_io_perm_failure) { 148 ql_log(ql_log_warn, vha, 0x1003, 149 "Perm failure on EEH timeout MBX, exiting.\n"); 150 return QLA_FUNCTION_TIMEOUT; 151 } 152 153 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 154 /* Setting Link-Down error */ 155 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 156 ql_log(ql_log_warn, vha, 0x1004, 157 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 158 return QLA_FUNCTION_TIMEOUT; 159 } 160 161 /* check if ISP abort is active and return cmd with timeout */ 162 if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 163 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 164 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && 165 !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) { 166 ql_log(ql_log_info, vha, 0x1005, 167 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", 168 mcp->mb[0]); 169 return QLA_FUNCTION_TIMEOUT; 170 } 171 172 atomic_inc(&ha->num_pend_mbx_stage1); 173 /* 174 * Wait for active mailbox commands to finish by waiting at most tov 175 * seconds. This is to serialize actual issuing of mailbox cmds during 176 * non ISP abort time. 177 */ 178 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 179 /* Timeout occurred. Return error. */ 180 ql_log(ql_log_warn, vha, 0xd035, 181 "Cmd access timeout, cmd=0x%x, Exiting.\n", 182 mcp->mb[0]); 183 vha->hw_err_cnt++; 184 atomic_dec(&ha->num_pend_mbx_stage1); 185 return QLA_FUNCTION_TIMEOUT; 186 } 187 atomic_dec(&ha->num_pend_mbx_stage1); 188 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || 189 ha->flags.eeh_busy) { 190 ql_log(ql_log_warn, vha, 0xd035, 191 "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n", 192 ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]); 193 rval = QLA_ABORTED; 194 goto premature_exit; 195 } 196 197 198 /* Save mailbox command for debug */ 199 ha->mcp = mcp; 200 201 ql_dbg(ql_dbg_mbx, vha, 0x1006, 202 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 203 204 spin_lock_irqsave(&ha->hardware_lock, flags); 205 206 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || 207 ha->flags.mbox_busy) { 208 rval = QLA_ABORTED; 209 spin_unlock_irqrestore(&ha->hardware_lock, flags); 210 goto premature_exit; 211 } 212 ha->flags.mbox_busy = 1; 213 214 /* Load mailbox registers. */ 215 if (IS_P3P_TYPE(ha)) 216 optr = ®->isp82.mailbox_in[0]; 217 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) 218 optr = ®->isp24.mailbox0; 219 else 220 optr = MAILBOX_REG(ha, ®->isp, 0); 221 222 iptr = mcp->mb; 223 command = mcp->mb[0]; 224 mboxes = mcp->out_mb; 225 226 ql_dbg(ql_dbg_mbx, vha, 0x1111, 227 "Mailbox registers (OUT):\n"); 228 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 229 if (IS_QLA2200(ha) && cnt == 8) 230 optr = MAILBOX_REG(ha, ®->isp, 8); 231 if (mboxes & BIT_0) { 232 ql_dbg(ql_dbg_mbx, vha, 0x1112, 233 "mbox[%d]<-0x%04x\n", cnt, *iptr); 234 wrt_reg_word(optr, *iptr); 235 } 236 237 mboxes >>= 1; 238 optr++; 239 iptr++; 240 } 241 242 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, 243 "I/O Address = %p.\n", optr); 244 245 /* Issue set host interrupt command to send cmd out. */ 246 ha->flags.mbox_int = 0; 247 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 248 249 /* Unlock mbx registers and wait for interrupt */ 250 ql_dbg(ql_dbg_mbx, vha, 0x100f, 251 "Going to unlock irq & waiting for interrupts. " 252 "jiffies=%lx.\n", jiffies); 253 254 /* Wait for mbx cmd completion until timeout */ 255 atomic_inc(&ha->num_pend_mbx_stage2); 256 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 257 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 258 259 if (IS_P3P_TYPE(ha)) 260 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); 261 else if (IS_FWI2_CAPABLE(ha)) 262 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); 263 else 264 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); 265 spin_unlock_irqrestore(&ha->hardware_lock, flags); 266 267 wait_time = jiffies; 268 atomic_inc(&ha->num_pend_mbx_stage3); 269 if (!wait_for_completion_timeout(&ha->mbx_intr_comp, 270 mcp->tov * HZ)) { 271 if (chip_reset != ha->chip_reset) { 272 eeh_delay = ha->flags.eeh_busy ? 1 : 0; 273 274 spin_lock_irqsave(&ha->hardware_lock, flags); 275 ha->flags.mbox_busy = 0; 276 spin_unlock_irqrestore(&ha->hardware_lock, 277 flags); 278 atomic_dec(&ha->num_pend_mbx_stage2); 279 atomic_dec(&ha->num_pend_mbx_stage3); 280 rval = QLA_ABORTED; 281 goto premature_exit; 282 } 283 ql_dbg(ql_dbg_mbx, vha, 0x117a, 284 "cmd=%x Timeout.\n", command); 285 spin_lock_irqsave(&ha->hardware_lock, flags); 286 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 287 spin_unlock_irqrestore(&ha->hardware_lock, flags); 288 289 } else if (ha->flags.purge_mbox || 290 chip_reset != ha->chip_reset) { 291 eeh_delay = ha->flags.eeh_busy ? 1 : 0; 292 293 spin_lock_irqsave(&ha->hardware_lock, flags); 294 ha->flags.mbox_busy = 0; 295 spin_unlock_irqrestore(&ha->hardware_lock, flags); 296 atomic_dec(&ha->num_pend_mbx_stage2); 297 atomic_dec(&ha->num_pend_mbx_stage3); 298 rval = QLA_ABORTED; 299 goto premature_exit; 300 } 301 atomic_dec(&ha->num_pend_mbx_stage3); 302 303 if (time_after(jiffies, wait_time + 5 * HZ)) 304 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", 305 command, jiffies_to_msecs(jiffies - wait_time)); 306 } else { 307 ql_dbg(ql_dbg_mbx, vha, 0x1011, 308 "Cmd=%x Polling Mode.\n", command); 309 310 if (IS_P3P_TYPE(ha)) { 311 if (rd_reg_dword(®->isp82.hint) & 312 HINT_MBX_INT_PENDING) { 313 ha->flags.mbox_busy = 0; 314 spin_unlock_irqrestore(&ha->hardware_lock, 315 flags); 316 atomic_dec(&ha->num_pend_mbx_stage2); 317 ql_dbg(ql_dbg_mbx, vha, 0x1012, 318 "Pending mailbox timeout, exiting.\n"); 319 vha->hw_err_cnt++; 320 rval = QLA_FUNCTION_TIMEOUT; 321 goto premature_exit; 322 } 323 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); 324 } else if (IS_FWI2_CAPABLE(ha)) 325 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); 326 else 327 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); 328 spin_unlock_irqrestore(&ha->hardware_lock, flags); 329 330 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 331 while (!ha->flags.mbox_int) { 332 if (ha->flags.purge_mbox || 333 chip_reset != ha->chip_reset) { 334 eeh_delay = ha->flags.eeh_busy ? 1 : 0; 335 336 spin_lock_irqsave(&ha->hardware_lock, flags); 337 ha->flags.mbox_busy = 0; 338 spin_unlock_irqrestore(&ha->hardware_lock, 339 flags); 340 atomic_dec(&ha->num_pend_mbx_stage2); 341 rval = QLA_ABORTED; 342 goto premature_exit; 343 } 344 345 if (time_after(jiffies, wait_time)) 346 break; 347 348 /* Check for pending interrupts. */ 349 qla2x00_poll(ha->rsp_q_map[0]); 350 351 if (!ha->flags.mbox_int && 352 !(IS_QLA2200(ha) && 353 command == MBC_LOAD_RISC_RAM_EXTENDED)) 354 msleep(10); 355 } /* while */ 356 ql_dbg(ql_dbg_mbx, vha, 0x1013, 357 "Waited %d sec.\n", 358 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 359 } 360 atomic_dec(&ha->num_pend_mbx_stage2); 361 362 /* Check whether we timed out */ 363 if (ha->flags.mbox_int) { 364 uint16_t *iptr2; 365 366 ql_dbg(ql_dbg_mbx, vha, 0x1014, 367 "Cmd=%x completed.\n", command); 368 369 /* Got interrupt. Clear the flag. */ 370 ha->flags.mbox_int = 0; 371 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 372 373 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 374 spin_lock_irqsave(&ha->hardware_lock, flags); 375 ha->flags.mbox_busy = 0; 376 spin_unlock_irqrestore(&ha->hardware_lock, flags); 377 378 /* Setting Link-Down error */ 379 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 380 ha->mcp = NULL; 381 rval = QLA_FUNCTION_FAILED; 382 ql_log(ql_log_warn, vha, 0xd048, 383 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 384 goto premature_exit; 385 } 386 387 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) { 388 ql_dbg(ql_dbg_mbx, vha, 0x11ff, 389 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0], 390 MBS_COMMAND_COMPLETE); 391 rval = QLA_FUNCTION_FAILED; 392 } 393 394 /* Load return mailbox registers. */ 395 iptr2 = mcp->mb; 396 iptr = (uint16_t *)&ha->mailbox_out[0]; 397 mboxes = mcp->in_mb; 398 399 ql_dbg(ql_dbg_mbx, vha, 0x1113, 400 "Mailbox registers (IN):\n"); 401 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 402 if (mboxes & BIT_0) { 403 *iptr2 = *iptr; 404 ql_dbg(ql_dbg_mbx, vha, 0x1114, 405 "mbox[%d]->0x%04x\n", cnt, *iptr2); 406 } 407 408 mboxes >>= 1; 409 iptr2++; 410 iptr++; 411 } 412 } else { 413 414 uint16_t mb[8]; 415 uint32_t ictrl, host_status, hccr; 416 uint16_t w; 417 418 if (IS_FWI2_CAPABLE(ha)) { 419 mb[0] = rd_reg_word(®->isp24.mailbox0); 420 mb[1] = rd_reg_word(®->isp24.mailbox1); 421 mb[2] = rd_reg_word(®->isp24.mailbox2); 422 mb[3] = rd_reg_word(®->isp24.mailbox3); 423 mb[7] = rd_reg_word(®->isp24.mailbox7); 424 ictrl = rd_reg_dword(®->isp24.ictrl); 425 host_status = rd_reg_dword(®->isp24.host_status); 426 hccr = rd_reg_dword(®->isp24.hccr); 427 428 ql_log(ql_log_warn, vha, 0xd04c, 429 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 430 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", 431 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], 432 mb[7], host_status, hccr); 433 vha->hw_err_cnt++; 434 435 } else { 436 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); 437 ictrl = rd_reg_word(®->isp.ictrl); 438 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, 439 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 440 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); 441 vha->hw_err_cnt++; 442 } 443 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 444 445 /* Capture FW dump only, if PCI device active */ 446 if (!pci_channel_offline(vha->hw->pdev)) { 447 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 448 if (w == 0xffff || ictrl == 0xffffffff || 449 (chip_reset != ha->chip_reset)) { 450 /* This is special case if there is unload 451 * of driver happening and if PCI device go 452 * into bad state due to PCI error condition 453 * then only PCI ERR flag would be set. 454 * we will do premature exit for above case. 455 */ 456 spin_lock_irqsave(&ha->hardware_lock, flags); 457 ha->flags.mbox_busy = 0; 458 spin_unlock_irqrestore(&ha->hardware_lock, 459 flags); 460 rval = QLA_FUNCTION_TIMEOUT; 461 goto premature_exit; 462 } 463 464 /* Attempt to capture firmware dump for further 465 * anallysis of the current formware state. we do not 466 * need to do this if we are intentionally generating 467 * a dump 468 */ 469 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) 470 qla2xxx_dump_fw(vha); 471 rval = QLA_FUNCTION_TIMEOUT; 472 } 473 } 474 spin_lock_irqsave(&ha->hardware_lock, flags); 475 ha->flags.mbox_busy = 0; 476 spin_unlock_irqrestore(&ha->hardware_lock, flags); 477 478 /* Clean up */ 479 ha->mcp = NULL; 480 481 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 482 ql_dbg(ql_dbg_mbx, vha, 0x101a, 483 "Checking for additional resp interrupt.\n"); 484 485 /* polling mode for non isp_abort commands. */ 486 qla2x00_poll(ha->rsp_q_map[0]); 487 } 488 489 if (rval == QLA_FUNCTION_TIMEOUT && 490 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 491 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 492 ha->flags.eeh_busy) { 493 /* not in dpc. schedule it for dpc to take over. */ 494 ql_dbg(ql_dbg_mbx, vha, 0x101b, 495 "Timeout, schedule isp_abort_needed.\n"); 496 497 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 498 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 499 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 500 if (IS_QLA82XX(ha)) { 501 ql_dbg(ql_dbg_mbx, vha, 0x112a, 502 "disabling pause transmit on port " 503 "0 & 1.\n"); 504 qla82xx_wr_32(ha, 505 QLA82XX_CRB_NIU + 0x98, 506 CRB_NIU_XG_PAUSE_CTL_P0| 507 CRB_NIU_XG_PAUSE_CTL_P1); 508 } 509 ql_log(ql_log_info, base_vha, 0x101c, 510 "Mailbox cmd timeout occurred, cmd=0x%x, " 511 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 512 "abort.\n", command, mcp->mb[0], 513 ha->flags.eeh_busy); 514 vha->hw_err_cnt++; 515 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 516 qla2xxx_wake_dpc(vha); 517 } 518 } else if (current == ha->dpc_thread) { 519 /* call abort directly since we are in the DPC thread */ 520 ql_dbg(ql_dbg_mbx, vha, 0x101d, 521 "Timeout, calling abort_isp.\n"); 522 523 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 524 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 525 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 526 if (IS_QLA82XX(ha)) { 527 ql_dbg(ql_dbg_mbx, vha, 0x112b, 528 "disabling pause transmit on port " 529 "0 & 1.\n"); 530 qla82xx_wr_32(ha, 531 QLA82XX_CRB_NIU + 0x98, 532 CRB_NIU_XG_PAUSE_CTL_P0| 533 CRB_NIU_XG_PAUSE_CTL_P1); 534 } 535 ql_log(ql_log_info, base_vha, 0x101e, 536 "Mailbox cmd timeout occurred, cmd=0x%x, " 537 "mb[0]=0x%x. Scheduling ISP abort ", 538 command, mcp->mb[0]); 539 vha->hw_err_cnt++; 540 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 541 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 542 /* Allow next mbx cmd to come in. */ 543 complete(&ha->mbx_cmd_comp); 544 if (ha->isp_ops->abort_isp(vha) && 545 !ha->flags.eeh_busy) { 546 /* Failed. retry later. */ 547 set_bit(ISP_ABORT_NEEDED, 548 &vha->dpc_flags); 549 } 550 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 551 ql_dbg(ql_dbg_mbx, vha, 0x101f, 552 "Finished abort_isp.\n"); 553 goto mbx_done; 554 } 555 } 556 } 557 558 premature_exit: 559 /* Allow next mbx cmd to come in. */ 560 complete(&ha->mbx_cmd_comp); 561 562 mbx_done: 563 if (rval == QLA_ABORTED) { 564 ql_log(ql_log_info, vha, 0xd035, 565 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n", 566 mcp->mb[0]); 567 } else if (rval) { 568 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { 569 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR, 570 dev_name(&ha->pdev->dev), 0x1020+0x800, 571 vha->host_no, rval); 572 mboxes = mcp->in_mb; 573 cnt = 4; 574 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1) 575 if (mboxes & BIT_0) { 576 printk(" mb[%u]=%x", i, mcp->mb[i]); 577 cnt--; 578 } 579 pr_warn(" cmd=%x ****\n", command); 580 } 581 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { 582 ql_dbg(ql_dbg_mbx, vha, 0x1198, 583 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", 584 rd_reg_dword(®->isp24.host_status), 585 rd_reg_dword(®->isp24.ictrl), 586 rd_reg_dword(®->isp24.istatus)); 587 } else { 588 ql_dbg(ql_dbg_mbx, vha, 0x1206, 589 "ctrl_status=%#x ictrl=%#x istatus=%#x\n", 590 rd_reg_word(®->isp.ctrl_status), 591 rd_reg_word(®->isp.ictrl), 592 rd_reg_word(®->isp.istatus)); 593 } 594 } else { 595 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); 596 } 597 598 i = 500; 599 while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) { 600 /* 601 * The caller of this mailbox encounter pci error. 602 * Hold the thread until PCIE link reset complete to make 603 * sure caller does not unmap dma while recovery is 604 * in progress. 605 */ 606 msleep(1); 607 i--; 608 } 609 return rval; 610 } 611 612 int 613 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, 614 uint32_t risc_code_size) 615 { 616 int rval; 617 struct qla_hw_data *ha = vha->hw; 618 mbx_cmd_t mc; 619 mbx_cmd_t *mcp = &mc; 620 621 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, 622 "Entered %s.\n", __func__); 623 624 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 625 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 626 mcp->mb[8] = MSW(risc_addr); 627 mcp->out_mb = MBX_8|MBX_0; 628 } else { 629 mcp->mb[0] = MBC_LOAD_RISC_RAM; 630 mcp->out_mb = MBX_0; 631 } 632 mcp->mb[1] = LSW(risc_addr); 633 mcp->mb[2] = MSW(req_dma); 634 mcp->mb[3] = LSW(req_dma); 635 mcp->mb[6] = MSW(MSD(req_dma)); 636 mcp->mb[7] = LSW(MSD(req_dma)); 637 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 638 if (IS_FWI2_CAPABLE(ha)) { 639 mcp->mb[4] = MSW(risc_code_size); 640 mcp->mb[5] = LSW(risc_code_size); 641 mcp->out_mb |= MBX_5|MBX_4; 642 } else { 643 mcp->mb[4] = LSW(risc_code_size); 644 mcp->out_mb |= MBX_4; 645 } 646 647 mcp->in_mb = MBX_1|MBX_0; 648 mcp->tov = MBX_TOV_SECONDS; 649 mcp->flags = 0; 650 rval = qla2x00_mailbox_command(vha, mcp); 651 652 if (rval != QLA_SUCCESS) { 653 ql_dbg(ql_dbg_mbx, vha, 0x1023, 654 "Failed=%x mb[0]=%x mb[1]=%x.\n", 655 rval, mcp->mb[0], mcp->mb[1]); 656 vha->hw_err_cnt++; 657 } else { 658 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, 659 "Done %s.\n", __func__); 660 } 661 662 return rval; 663 } 664 665 #define NVME_ENABLE_FLAG BIT_3 666 #define EDIF_HW_SUPPORT BIT_10 667 668 /* 669 * qla2x00_execute_fw 670 * Start adapter firmware. 671 * 672 * Input: 673 * ha = adapter block pointer. 674 * TARGET_QUEUE_LOCK must be released. 675 * ADAPTER_STATE_LOCK must be released. 676 * 677 * Returns: 678 * qla2x00 local function return status code. 679 * 680 * Context: 681 * Kernel context. 682 */ 683 int 684 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) 685 { 686 int rval; 687 struct qla_hw_data *ha = vha->hw; 688 mbx_cmd_t mc; 689 mbx_cmd_t *mcp = &mc; 690 u8 semaphore = 0; 691 #define EXE_FW_FORCE_SEMAPHORE BIT_7 692 u8 retry = 3; 693 694 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, 695 "Entered %s.\n", __func__); 696 697 again: 698 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 699 mcp->out_mb = MBX_0; 700 mcp->in_mb = MBX_0; 701 if (IS_FWI2_CAPABLE(ha)) { 702 mcp->mb[1] = MSW(risc_addr); 703 mcp->mb[2] = LSW(risc_addr); 704 mcp->mb[3] = 0; 705 mcp->mb[4] = 0; 706 mcp->mb[11] = 0; 707 708 /* Enable BPM? */ 709 if (ha->flags.lr_detected) { 710 mcp->mb[4] = BIT_0; 711 if (IS_BPM_RANGE_CAPABLE(ha)) 712 mcp->mb[4] |= 713 ha->lr_distance << LR_DIST_FW_POS; 714 } 715 716 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha))) 717 mcp->mb[4] |= NVME_ENABLE_FLAG; 718 719 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 720 struct nvram_81xx *nv = ha->nvram; 721 /* set minimum speed if specified in nvram */ 722 if (nv->min_supported_speed >= 2 && 723 nv->min_supported_speed <= 5) { 724 mcp->mb[4] |= BIT_4; 725 mcp->mb[11] |= nv->min_supported_speed & 0xF; 726 mcp->out_mb |= MBX_11; 727 mcp->in_mb |= BIT_5; 728 vha->min_supported_speed = 729 nv->min_supported_speed; 730 } 731 } 732 733 if (ha->flags.exlogins_enabled) 734 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; 735 736 if (ha->flags.exchoffld_enabled) 737 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; 738 739 if (semaphore) 740 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE; 741 742 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; 743 mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1; 744 } else { 745 mcp->mb[1] = LSW(risc_addr); 746 mcp->out_mb |= MBX_1; 747 if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 748 mcp->mb[2] = 0; 749 mcp->out_mb |= MBX_2; 750 } 751 } 752 753 mcp->tov = MBX_TOV_SECONDS; 754 mcp->flags = 0; 755 rval = qla2x00_mailbox_command(vha, mcp); 756 757 if (rval != QLA_SUCCESS) { 758 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR && 759 mcp->mb[1] == 0x27 && retry) { 760 semaphore = 1; 761 retry--; 762 ql_dbg(ql_dbg_async, vha, 0x1026, 763 "Exe FW: force semaphore.\n"); 764 goto again; 765 } 766 767 ql_dbg(ql_dbg_mbx, vha, 0x1026, 768 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 769 vha->hw_err_cnt++; 770 return rval; 771 } 772 773 if (!IS_FWI2_CAPABLE(ha)) 774 goto done; 775 776 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; 777 ql_dbg(ql_dbg_mbx, vha, 0x119a, 778 "fw_ability_mask=%x.\n", ha->fw_ability_mask); 779 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]); 780 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 781 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1); 782 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n", 783 ha->max_supported_speed == 0 ? "16Gps" : 784 ha->max_supported_speed == 1 ? "32Gps" : 785 ha->max_supported_speed == 2 ? "64Gps" : "unknown"); 786 if (vha->min_supported_speed) { 787 ha->min_supported_speed = mcp->mb[5] & 788 (BIT_0 | BIT_1 | BIT_2); 789 ql_dbg(ql_dbg_mbx, vha, 0x119c, 790 "min_supported_speed=%s.\n", 791 ha->min_supported_speed == 6 ? "64Gps" : 792 ha->min_supported_speed == 5 ? "32Gps" : 793 ha->min_supported_speed == 4 ? "16Gps" : 794 ha->min_supported_speed == 3 ? "8Gps" : 795 ha->min_supported_speed == 2 ? "4Gps" : "unknown"); 796 } 797 } 798 799 if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) { 800 ha->flags.edif_hw = 1; 801 ql_log(ql_log_info, vha, 0xffff, 802 "%s: edif HW\n", __func__); 803 } 804 805 done: 806 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, 807 "Done %s.\n", __func__); 808 809 return rval; 810 } 811 812 /* 813 * qla_get_exlogin_status 814 * Get extended login status 815 * uses the memory offload control/status Mailbox 816 * 817 * Input: 818 * ha: adapter state pointer. 819 * fwopt: firmware options 820 * 821 * Returns: 822 * qla2x00 local function status 823 * 824 * Context: 825 * Kernel context. 826 */ 827 #define FETCH_XLOGINS_STAT 0x8 828 int 829 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 830 uint16_t *ex_logins_cnt) 831 { 832 int rval; 833 mbx_cmd_t mc; 834 mbx_cmd_t *mcp = &mc; 835 836 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f, 837 "Entered %s\n", __func__); 838 839 memset(mcp->mb, 0 , sizeof(mcp->mb)); 840 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 841 mcp->mb[1] = FETCH_XLOGINS_STAT; 842 mcp->out_mb = MBX_1|MBX_0; 843 mcp->in_mb = MBX_10|MBX_4|MBX_0; 844 mcp->tov = MBX_TOV_SECONDS; 845 mcp->flags = 0; 846 847 rval = qla2x00_mailbox_command(vha, mcp); 848 if (rval != QLA_SUCCESS) { 849 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval); 850 } else { 851 *buf_sz = mcp->mb[4]; 852 *ex_logins_cnt = mcp->mb[10]; 853 854 ql_log(ql_log_info, vha, 0x1190, 855 "buffer size 0x%x, exchange login count=%d\n", 856 mcp->mb[4], mcp->mb[10]); 857 858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116, 859 "Done %s.\n", __func__); 860 } 861 862 return rval; 863 } 864 865 /* 866 * qla_set_exlogin_mem_cfg 867 * set extended login memory configuration 868 * Mbx needs to be issues before init_cb is set 869 * 870 * Input: 871 * ha: adapter state pointer. 872 * buffer: buffer pointer 873 * phys_addr: physical address of buffer 874 * size: size of buffer 875 * TARGET_QUEUE_LOCK must be released 876 * ADAPTER_STATE_LOCK must be release 877 * 878 * Returns: 879 * qla2x00 local funxtion status code. 880 * 881 * Context: 882 * Kernel context. 883 */ 884 #define CONFIG_XLOGINS_MEM 0x9 885 int 886 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) 887 { 888 int rval; 889 mbx_cmd_t mc; 890 mbx_cmd_t *mcp = &mc; 891 struct qla_hw_data *ha = vha->hw; 892 893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, 894 "Entered %s.\n", __func__); 895 896 memset(mcp->mb, 0 , sizeof(mcp->mb)); 897 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 898 mcp->mb[1] = CONFIG_XLOGINS_MEM; 899 mcp->mb[2] = MSW(phys_addr); 900 mcp->mb[3] = LSW(phys_addr); 901 mcp->mb[6] = MSW(MSD(phys_addr)); 902 mcp->mb[7] = LSW(MSD(phys_addr)); 903 mcp->mb[8] = MSW(ha->exlogin_size); 904 mcp->mb[9] = LSW(ha->exlogin_size); 905 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 906 mcp->in_mb = MBX_11|MBX_0; 907 mcp->tov = MBX_TOV_SECONDS; 908 mcp->flags = 0; 909 rval = qla2x00_mailbox_command(vha, mcp); 910 if (rval != QLA_SUCCESS) { 911 ql_dbg(ql_dbg_mbx, vha, 0x111b, 912 "EXlogin Failed=%x. MB0=%x MB11=%x\n", 913 rval, mcp->mb[0], mcp->mb[11]); 914 } else { 915 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 916 "Done %s.\n", __func__); 917 } 918 919 return rval; 920 } 921 922 /* 923 * qla_get_exchoffld_status 924 * Get exchange offload status 925 * uses the memory offload control/status Mailbox 926 * 927 * Input: 928 * ha: adapter state pointer. 929 * fwopt: firmware options 930 * 931 * Returns: 932 * qla2x00 local function status 933 * 934 * Context: 935 * Kernel context. 936 */ 937 #define FETCH_XCHOFFLD_STAT 0x2 938 int 939 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 940 uint16_t *ex_logins_cnt) 941 { 942 int rval; 943 mbx_cmd_t mc; 944 mbx_cmd_t *mcp = &mc; 945 946 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019, 947 "Entered %s\n", __func__); 948 949 memset(mcp->mb, 0 , sizeof(mcp->mb)); 950 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 951 mcp->mb[1] = FETCH_XCHOFFLD_STAT; 952 mcp->out_mb = MBX_1|MBX_0; 953 mcp->in_mb = MBX_10|MBX_4|MBX_0; 954 mcp->tov = MBX_TOV_SECONDS; 955 mcp->flags = 0; 956 957 rval = qla2x00_mailbox_command(vha, mcp); 958 if (rval != QLA_SUCCESS) { 959 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval); 960 } else { 961 *buf_sz = mcp->mb[4]; 962 *ex_logins_cnt = mcp->mb[10]; 963 964 ql_log(ql_log_info, vha, 0x118e, 965 "buffer size 0x%x, exchange offload count=%d\n", 966 mcp->mb[4], mcp->mb[10]); 967 968 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156, 969 "Done %s.\n", __func__); 970 } 971 972 return rval; 973 } 974 975 /* 976 * qla_set_exchoffld_mem_cfg 977 * Set exchange offload memory configuration 978 * Mbx needs to be issues before init_cb is set 979 * 980 * Input: 981 * ha: adapter state pointer. 982 * buffer: buffer pointer 983 * phys_addr: physical address of buffer 984 * size: size of buffer 985 * TARGET_QUEUE_LOCK must be released 986 * ADAPTER_STATE_LOCK must be release 987 * 988 * Returns: 989 * qla2x00 local funxtion status code. 990 * 991 * Context: 992 * Kernel context. 993 */ 994 #define CONFIG_XCHOFFLD_MEM 0x3 995 int 996 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha) 997 { 998 int rval; 999 mbx_cmd_t mc; 1000 mbx_cmd_t *mcp = &mc; 1001 struct qla_hw_data *ha = vha->hw; 1002 1003 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157, 1004 "Entered %s.\n", __func__); 1005 1006 memset(mcp->mb, 0 , sizeof(mcp->mb)); 1007 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 1008 mcp->mb[1] = CONFIG_XCHOFFLD_MEM; 1009 mcp->mb[2] = MSW(ha->exchoffld_buf_dma); 1010 mcp->mb[3] = LSW(ha->exchoffld_buf_dma); 1011 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma)); 1012 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma)); 1013 mcp->mb[8] = MSW(ha->exchoffld_size); 1014 mcp->mb[9] = LSW(ha->exchoffld_size); 1015 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1016 mcp->in_mb = MBX_11|MBX_0; 1017 mcp->tov = MBX_TOV_SECONDS; 1018 mcp->flags = 0; 1019 rval = qla2x00_mailbox_command(vha, mcp); 1020 if (rval != QLA_SUCCESS) { 1021 /*EMPTY*/ 1022 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval); 1023 } else { 1024 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, 1025 "Done %s.\n", __func__); 1026 } 1027 1028 return rval; 1029 } 1030 1031 /* 1032 * qla2x00_get_fw_version 1033 * Get firmware version. 1034 * 1035 * Input: 1036 * ha: adapter state pointer. 1037 * major: pointer for major number. 1038 * minor: pointer for minor number. 1039 * subminor: pointer for subminor number. 1040 * 1041 * Returns: 1042 * qla2x00 local function return status code. 1043 * 1044 * Context: 1045 * Kernel context. 1046 */ 1047 int 1048 qla2x00_get_fw_version(scsi_qla_host_t *vha) 1049 { 1050 int rval; 1051 mbx_cmd_t mc; 1052 mbx_cmd_t *mcp = &mc; 1053 struct qla_hw_data *ha = vha->hw; 1054 1055 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, 1056 "Entered %s.\n", __func__); 1057 1058 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 1059 mcp->out_mb = MBX_0; 1060 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1061 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) 1062 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 1063 if (IS_FWI2_CAPABLE(ha)) 1064 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 1065 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1066 mcp->in_mb |= 1067 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| 1068 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7; 1069 1070 mcp->flags = 0; 1071 mcp->tov = MBX_TOV_SECONDS; 1072 rval = qla2x00_mailbox_command(vha, mcp); 1073 if (rval != QLA_SUCCESS) 1074 goto failed; 1075 1076 /* Return mailbox data. */ 1077 ha->fw_major_version = mcp->mb[1]; 1078 ha->fw_minor_version = mcp->mb[2]; 1079 ha->fw_subminor_version = mcp->mb[3]; 1080 ha->fw_attributes = mcp->mb[6]; 1081 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) 1082 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ 1083 else 1084 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; 1085 1086 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1087 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1088 ha->mpi_version[1] = mcp->mb[11] >> 8; 1089 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1090 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; 1091 ha->phy_version[0] = mcp->mb[8] & 0xff; 1092 ha->phy_version[1] = mcp->mb[9] >> 8; 1093 ha->phy_version[2] = mcp->mb[9] & 0xff; 1094 } 1095 1096 if (IS_FWI2_CAPABLE(ha)) { 1097 ha->fw_attributes_h = mcp->mb[15]; 1098 ha->fw_attributes_ext[0] = mcp->mb[16]; 1099 ha->fw_attributes_ext[1] = mcp->mb[17]; 1100 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, 1101 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 1102 __func__, mcp->mb[15], mcp->mb[6]); 1103 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, 1104 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 1105 __func__, mcp->mb[17], mcp->mb[16]); 1106 1107 if (ha->fw_attributes_h & 0x4) 1108 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d, 1109 "%s: Firmware supports Extended Login 0x%x\n", 1110 __func__, ha->fw_attributes_h); 1111 1112 if (ha->fw_attributes_h & 0x8) 1113 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, 1114 "%s: Firmware supports Exchange Offload 0x%x\n", 1115 __func__, ha->fw_attributes_h); 1116 1117 /* 1118 * FW supports nvme and driver load parameter requested nvme. 1119 * BIT 26 of fw_attributes indicates NVMe support. 1120 */ 1121 if ((ha->fw_attributes_h & 1122 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) && 1123 ql2xnvmeenable) { 1124 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST) 1125 vha->flags.nvme_first_burst = 1; 1126 1127 vha->flags.nvme_enabled = 1; 1128 ql_log(ql_log_info, vha, 0xd302, 1129 "%s: FC-NVMe is Enabled (0x%x)\n", 1130 __func__, ha->fw_attributes_h); 1131 } 1132 1133 /* BIT_13 of Extended FW Attributes informs about NVMe2 support */ 1134 if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) { 1135 ql_log(ql_log_info, vha, 0xd302, 1136 "Firmware supports NVMe2 0x%x\n", 1137 ha->fw_attributes_ext[0]); 1138 vha->flags.nvme2_enabled = 1; 1139 } 1140 1141 if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable && 1142 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) { 1143 ha->flags.edif_enabled = 1; 1144 ql_log(ql_log_info, vha, 0xffff, 1145 "%s: edif is enabled\n", __func__); 1146 } 1147 } 1148 1149 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1150 ha->serdes_version[0] = mcp->mb[7] & 0xff; 1151 ha->serdes_version[1] = mcp->mb[8] >> 8; 1152 ha->serdes_version[2] = mcp->mb[8] & 0xff; 1153 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1154 ha->mpi_version[1] = mcp->mb[11] >> 8; 1155 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1156 ha->pep_version[0] = mcp->mb[13] & 0xff; 1157 ha->pep_version[1] = mcp->mb[14] >> 8; 1158 ha->pep_version[2] = mcp->mb[14] & 0xff; 1159 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; 1160 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; 1161 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; 1162 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; 1163 if (IS_QLA28XX(ha)) { 1164 if (mcp->mb[16] & BIT_10) 1165 ha->flags.secure_fw = 1; 1166 1167 ql_log(ql_log_info, vha, 0xffff, 1168 "Secure Flash Update in FW: %s\n", 1169 (ha->flags.secure_fw) ? "Supported" : 1170 "Not Supported"); 1171 } 1172 1173 if (ha->flags.scm_supported_a && 1174 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) { 1175 ha->flags.scm_supported_f = 1; 1176 ha->sf_init_cb->flags |= cpu_to_le16(BIT_13); 1177 } 1178 ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n", 1179 (ha->flags.scm_supported_f) ? "Supported" : 1180 "Not Supported"); 1181 1182 if (vha->flags.nvme2_enabled) { 1183 /* set BIT_15 of special feature control block for SLER */ 1184 ha->sf_init_cb->flags |= cpu_to_le16(BIT_15); 1185 /* set BIT_14 of special feature control block for PI CTRL*/ 1186 ha->sf_init_cb->flags |= cpu_to_le16(BIT_14); 1187 } 1188 } 1189 1190 failed: 1191 if (rval != QLA_SUCCESS) { 1192 /*EMPTY*/ 1193 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); 1194 } else { 1195 /*EMPTY*/ 1196 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, 1197 "Done %s.\n", __func__); 1198 } 1199 return rval; 1200 } 1201 1202 /* 1203 * qla2x00_get_fw_options 1204 * Set firmware options. 1205 * 1206 * Input: 1207 * ha = adapter block pointer. 1208 * fwopt = pointer for firmware options. 1209 * 1210 * Returns: 1211 * qla2x00 local function return status code. 1212 * 1213 * Context: 1214 * Kernel context. 1215 */ 1216 int 1217 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1218 { 1219 int rval; 1220 mbx_cmd_t mc; 1221 mbx_cmd_t *mcp = &mc; 1222 1223 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, 1224 "Entered %s.\n", __func__); 1225 1226 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 1227 mcp->out_mb = MBX_0; 1228 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1229 mcp->tov = MBX_TOV_SECONDS; 1230 mcp->flags = 0; 1231 rval = qla2x00_mailbox_command(vha, mcp); 1232 1233 if (rval != QLA_SUCCESS) { 1234 /*EMPTY*/ 1235 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); 1236 } else { 1237 fwopts[0] = mcp->mb[0]; 1238 fwopts[1] = mcp->mb[1]; 1239 fwopts[2] = mcp->mb[2]; 1240 fwopts[3] = mcp->mb[3]; 1241 1242 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, 1243 "Done %s.\n", __func__); 1244 } 1245 1246 return rval; 1247 } 1248 1249 1250 /* 1251 * qla2x00_set_fw_options 1252 * Set firmware options. 1253 * 1254 * Input: 1255 * ha = adapter block pointer. 1256 * fwopt = pointer for firmware options. 1257 * 1258 * Returns: 1259 * qla2x00 local function return status code. 1260 * 1261 * Context: 1262 * Kernel context. 1263 */ 1264 int 1265 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1266 { 1267 int rval; 1268 mbx_cmd_t mc; 1269 mbx_cmd_t *mcp = &mc; 1270 1271 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, 1272 "Entered %s.\n", __func__); 1273 1274 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 1275 mcp->mb[1] = fwopts[1]; 1276 mcp->mb[2] = fwopts[2]; 1277 mcp->mb[3] = fwopts[3]; 1278 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1279 mcp->in_mb = MBX_0; 1280 if (IS_FWI2_CAPABLE(vha->hw)) { 1281 mcp->in_mb |= MBX_1; 1282 mcp->mb[10] = fwopts[10]; 1283 mcp->out_mb |= MBX_10; 1284 } else { 1285 mcp->mb[10] = fwopts[10]; 1286 mcp->mb[11] = fwopts[11]; 1287 mcp->mb[12] = 0; /* Undocumented, but used */ 1288 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 1289 } 1290 mcp->tov = MBX_TOV_SECONDS; 1291 mcp->flags = 0; 1292 rval = qla2x00_mailbox_command(vha, mcp); 1293 1294 fwopts[0] = mcp->mb[0]; 1295 1296 if (rval != QLA_SUCCESS) { 1297 /*EMPTY*/ 1298 ql_dbg(ql_dbg_mbx, vha, 0x1030, 1299 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); 1300 } else { 1301 /*EMPTY*/ 1302 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, 1303 "Done %s.\n", __func__); 1304 } 1305 1306 return rval; 1307 } 1308 1309 /* 1310 * qla2x00_mbx_reg_test 1311 * Mailbox register wrap test. 1312 * 1313 * Input: 1314 * ha = adapter block pointer. 1315 * TARGET_QUEUE_LOCK must be released. 1316 * ADAPTER_STATE_LOCK must be released. 1317 * 1318 * Returns: 1319 * qla2x00 local function return status code. 1320 * 1321 * Context: 1322 * Kernel context. 1323 */ 1324 int 1325 qla2x00_mbx_reg_test(scsi_qla_host_t *vha) 1326 { 1327 int rval; 1328 mbx_cmd_t mc; 1329 mbx_cmd_t *mcp = &mc; 1330 1331 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, 1332 "Entered %s.\n", __func__); 1333 1334 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 1335 mcp->mb[1] = 0xAAAA; 1336 mcp->mb[2] = 0x5555; 1337 mcp->mb[3] = 0xAA55; 1338 mcp->mb[4] = 0x55AA; 1339 mcp->mb[5] = 0xA5A5; 1340 mcp->mb[6] = 0x5A5A; 1341 mcp->mb[7] = 0x2525; 1342 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1343 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1344 mcp->tov = MBX_TOV_SECONDS; 1345 mcp->flags = 0; 1346 rval = qla2x00_mailbox_command(vha, mcp); 1347 1348 if (rval == QLA_SUCCESS) { 1349 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || 1350 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) 1351 rval = QLA_FUNCTION_FAILED; 1352 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || 1353 mcp->mb[7] != 0x2525) 1354 rval = QLA_FUNCTION_FAILED; 1355 } 1356 1357 if (rval != QLA_SUCCESS) { 1358 /*EMPTY*/ 1359 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); 1360 vha->hw_err_cnt++; 1361 } else { 1362 /*EMPTY*/ 1363 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, 1364 "Done %s.\n", __func__); 1365 } 1366 1367 return rval; 1368 } 1369 1370 /* 1371 * qla2x00_verify_checksum 1372 * Verify firmware checksum. 1373 * 1374 * Input: 1375 * ha = adapter block pointer. 1376 * TARGET_QUEUE_LOCK must be released. 1377 * ADAPTER_STATE_LOCK must be released. 1378 * 1379 * Returns: 1380 * qla2x00 local function return status code. 1381 * 1382 * Context: 1383 * Kernel context. 1384 */ 1385 int 1386 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) 1387 { 1388 int rval; 1389 mbx_cmd_t mc; 1390 mbx_cmd_t *mcp = &mc; 1391 1392 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, 1393 "Entered %s.\n", __func__); 1394 1395 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 1396 mcp->out_mb = MBX_0; 1397 mcp->in_mb = MBX_0; 1398 if (IS_FWI2_CAPABLE(vha->hw)) { 1399 mcp->mb[1] = MSW(risc_addr); 1400 mcp->mb[2] = LSW(risc_addr); 1401 mcp->out_mb |= MBX_2|MBX_1; 1402 mcp->in_mb |= MBX_2|MBX_1; 1403 } else { 1404 mcp->mb[1] = LSW(risc_addr); 1405 mcp->out_mb |= MBX_1; 1406 mcp->in_mb |= MBX_1; 1407 } 1408 1409 mcp->tov = MBX_TOV_SECONDS; 1410 mcp->flags = 0; 1411 rval = qla2x00_mailbox_command(vha, mcp); 1412 1413 if (rval != QLA_SUCCESS) { 1414 ql_dbg(ql_dbg_mbx, vha, 0x1036, 1415 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? 1416 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); 1417 } else { 1418 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, 1419 "Done %s.\n", __func__); 1420 } 1421 1422 return rval; 1423 } 1424 1425 /* 1426 * qla2x00_issue_iocb 1427 * Issue IOCB using mailbox command 1428 * 1429 * Input: 1430 * ha = adapter state pointer. 1431 * buffer = buffer pointer. 1432 * phys_addr = physical address of buffer. 1433 * size = size of buffer. 1434 * TARGET_QUEUE_LOCK must be released. 1435 * ADAPTER_STATE_LOCK must be released. 1436 * 1437 * Returns: 1438 * qla2x00 local function return status code. 1439 * 1440 * Context: 1441 * Kernel context. 1442 */ 1443 int 1444 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, 1445 dma_addr_t phys_addr, size_t size, uint32_t tov) 1446 { 1447 int rval; 1448 mbx_cmd_t mc; 1449 mbx_cmd_t *mcp = &mc; 1450 1451 if (!vha->hw->flags.fw_started) 1452 return QLA_INVALID_COMMAND; 1453 1454 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, 1455 "Entered %s.\n", __func__); 1456 1457 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 1458 mcp->mb[1] = 0; 1459 mcp->mb[2] = MSW(LSD(phys_addr)); 1460 mcp->mb[3] = LSW(LSD(phys_addr)); 1461 mcp->mb[6] = MSW(MSD(phys_addr)); 1462 mcp->mb[7] = LSW(MSD(phys_addr)); 1463 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1464 mcp->in_mb = MBX_1|MBX_0; 1465 mcp->tov = tov; 1466 mcp->flags = 0; 1467 rval = qla2x00_mailbox_command(vha, mcp); 1468 1469 if (rval != QLA_SUCCESS) { 1470 /*EMPTY*/ 1471 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); 1472 } else { 1473 sts_entry_t *sts_entry = buffer; 1474 1475 /* Mask reserved bits. */ 1476 sts_entry->entry_status &= 1477 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 1478 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, 1479 "Done %s (status=%x).\n", __func__, 1480 sts_entry->entry_status); 1481 } 1482 1483 return rval; 1484 } 1485 1486 int 1487 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, 1488 size_t size) 1489 { 1490 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, 1491 MBX_TOV_SECONDS); 1492 } 1493 1494 /* 1495 * qla2x00_abort_command 1496 * Abort command aborts a specified IOCB. 1497 * 1498 * Input: 1499 * ha = adapter block pointer. 1500 * sp = SB structure pointer. 1501 * 1502 * Returns: 1503 * qla2x00 local function return status code. 1504 * 1505 * Context: 1506 * Kernel context. 1507 */ 1508 int 1509 qla2x00_abort_command(srb_t *sp) 1510 { 1511 unsigned long flags = 0; 1512 int rval; 1513 uint32_t handle = 0; 1514 mbx_cmd_t mc; 1515 mbx_cmd_t *mcp = &mc; 1516 fc_port_t *fcport = sp->fcport; 1517 scsi_qla_host_t *vha = fcport->vha; 1518 struct qla_hw_data *ha = vha->hw; 1519 struct req_que *req; 1520 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1521 1522 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, 1523 "Entered %s.\n", __func__); 1524 1525 if (sp->qpair) 1526 req = sp->qpair->req; 1527 else 1528 req = vha->req; 1529 1530 spin_lock_irqsave(&ha->hardware_lock, flags); 1531 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1532 if (req->outstanding_cmds[handle] == sp) 1533 break; 1534 } 1535 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1536 1537 if (handle == req->num_outstanding_cmds) { 1538 /* command not found */ 1539 return QLA_FUNCTION_FAILED; 1540 } 1541 1542 mcp->mb[0] = MBC_ABORT_COMMAND; 1543 if (HAS_EXTENDED_IDS(ha)) 1544 mcp->mb[1] = fcport->loop_id; 1545 else 1546 mcp->mb[1] = fcport->loop_id << 8; 1547 mcp->mb[2] = (uint16_t)handle; 1548 mcp->mb[3] = (uint16_t)(handle >> 16); 1549 mcp->mb[6] = (uint16_t)cmd->device->lun; 1550 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1551 mcp->in_mb = MBX_0; 1552 mcp->tov = MBX_TOV_SECONDS; 1553 mcp->flags = 0; 1554 rval = qla2x00_mailbox_command(vha, mcp); 1555 1556 if (rval != QLA_SUCCESS) { 1557 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); 1558 } else { 1559 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, 1560 "Done %s.\n", __func__); 1561 } 1562 1563 return rval; 1564 } 1565 1566 int 1567 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) 1568 { 1569 int rval, rval2; 1570 mbx_cmd_t mc; 1571 mbx_cmd_t *mcp = &mc; 1572 scsi_qla_host_t *vha; 1573 1574 vha = fcport->vha; 1575 1576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, 1577 "Entered %s.\n", __func__); 1578 1579 mcp->mb[0] = MBC_ABORT_TARGET; 1580 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 1581 if (HAS_EXTENDED_IDS(vha->hw)) { 1582 mcp->mb[1] = fcport->loop_id; 1583 mcp->mb[10] = 0; 1584 mcp->out_mb |= MBX_10; 1585 } else { 1586 mcp->mb[1] = fcport->loop_id << 8; 1587 } 1588 mcp->mb[2] = vha->hw->loop_reset_delay; 1589 mcp->mb[9] = vha->vp_idx; 1590 1591 mcp->in_mb = MBX_0; 1592 mcp->tov = MBX_TOV_SECONDS; 1593 mcp->flags = 0; 1594 rval = qla2x00_mailbox_command(vha, mcp); 1595 if (rval != QLA_SUCCESS) { 1596 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, 1597 "Failed=%x.\n", rval); 1598 } 1599 1600 /* Issue marker IOCB. */ 1601 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0, 1602 MK_SYNC_ID); 1603 if (rval2 != QLA_SUCCESS) { 1604 ql_dbg(ql_dbg_mbx, vha, 0x1040, 1605 "Failed to issue marker IOCB (%x).\n", rval2); 1606 } else { 1607 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, 1608 "Done %s.\n", __func__); 1609 } 1610 1611 return rval; 1612 } 1613 1614 int 1615 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 1616 { 1617 int rval, rval2; 1618 mbx_cmd_t mc; 1619 mbx_cmd_t *mcp = &mc; 1620 scsi_qla_host_t *vha; 1621 1622 vha = fcport->vha; 1623 1624 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, 1625 "Entered %s.\n", __func__); 1626 1627 mcp->mb[0] = MBC_LUN_RESET; 1628 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 1629 if (HAS_EXTENDED_IDS(vha->hw)) 1630 mcp->mb[1] = fcport->loop_id; 1631 else 1632 mcp->mb[1] = fcport->loop_id << 8; 1633 mcp->mb[2] = (u32)l; 1634 mcp->mb[3] = 0; 1635 mcp->mb[9] = vha->vp_idx; 1636 1637 mcp->in_mb = MBX_0; 1638 mcp->tov = MBX_TOV_SECONDS; 1639 mcp->flags = 0; 1640 rval = qla2x00_mailbox_command(vha, mcp); 1641 if (rval != QLA_SUCCESS) { 1642 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); 1643 } 1644 1645 /* Issue marker IOCB. */ 1646 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l, 1647 MK_SYNC_ID_LUN); 1648 if (rval2 != QLA_SUCCESS) { 1649 ql_dbg(ql_dbg_mbx, vha, 0x1044, 1650 "Failed to issue marker IOCB (%x).\n", rval2); 1651 } else { 1652 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, 1653 "Done %s.\n", __func__); 1654 } 1655 1656 return rval; 1657 } 1658 1659 /* 1660 * qla2x00_get_adapter_id 1661 * Get adapter ID and topology. 1662 * 1663 * Input: 1664 * ha = adapter block pointer. 1665 * id = pointer for loop ID. 1666 * al_pa = pointer for AL_PA. 1667 * area = pointer for area. 1668 * domain = pointer for domain. 1669 * top = pointer for topology. 1670 * TARGET_QUEUE_LOCK must be released. 1671 * ADAPTER_STATE_LOCK must be released. 1672 * 1673 * Returns: 1674 * qla2x00 local function return status code. 1675 * 1676 * Context: 1677 * Kernel context. 1678 */ 1679 int 1680 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, 1681 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) 1682 { 1683 int rval; 1684 mbx_cmd_t mc; 1685 mbx_cmd_t *mcp = &mc; 1686 1687 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, 1688 "Entered %s.\n", __func__); 1689 1690 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1691 mcp->mb[9] = vha->vp_idx; 1692 mcp->out_mb = MBX_9|MBX_0; 1693 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1694 if (IS_CNA_CAPABLE(vha->hw)) 1695 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; 1696 if (IS_FWI2_CAPABLE(vha->hw)) 1697 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; 1698 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) 1699 mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23; 1700 1701 mcp->tov = MBX_TOV_SECONDS; 1702 mcp->flags = 0; 1703 rval = qla2x00_mailbox_command(vha, mcp); 1704 if (mcp->mb[0] == MBS_COMMAND_ERROR) 1705 rval = QLA_COMMAND_ERROR; 1706 else if (mcp->mb[0] == MBS_INVALID_COMMAND) 1707 rval = QLA_INVALID_COMMAND; 1708 1709 /* Return data. */ 1710 *id = mcp->mb[1]; 1711 *al_pa = LSB(mcp->mb[2]); 1712 *area = MSB(mcp->mb[2]); 1713 *domain = LSB(mcp->mb[3]); 1714 *top = mcp->mb[6]; 1715 *sw_cap = mcp->mb[7]; 1716 1717 if (rval != QLA_SUCCESS) { 1718 /*EMPTY*/ 1719 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); 1720 } else { 1721 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, 1722 "Done %s.\n", __func__); 1723 1724 if (IS_CNA_CAPABLE(vha->hw)) { 1725 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1726 vha->fcoe_fcf_idx = mcp->mb[10]; 1727 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; 1728 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; 1729 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; 1730 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; 1731 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; 1732 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; 1733 } 1734 /* If FA-WWN supported */ 1735 if (IS_FAWWN_CAPABLE(vha->hw)) { 1736 if (mcp->mb[7] & BIT_14) { 1737 vha->port_name[0] = MSB(mcp->mb[16]); 1738 vha->port_name[1] = LSB(mcp->mb[16]); 1739 vha->port_name[2] = MSB(mcp->mb[17]); 1740 vha->port_name[3] = LSB(mcp->mb[17]); 1741 vha->port_name[4] = MSB(mcp->mb[18]); 1742 vha->port_name[5] = LSB(mcp->mb[18]); 1743 vha->port_name[6] = MSB(mcp->mb[19]); 1744 vha->port_name[7] = LSB(mcp->mb[19]); 1745 fc_host_port_name(vha->host) = 1746 wwn_to_u64(vha->port_name); 1747 ql_dbg(ql_dbg_mbx, vha, 0x10ca, 1748 "FA-WWN acquired %016llx\n", 1749 wwn_to_u64(vha->port_name)); 1750 } 1751 } 1752 1753 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) { 1754 vha->bbcr = mcp->mb[15]; 1755 if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) { 1756 ql_log(ql_log_info, vha, 0x11a4, 1757 "SCM: EDC ELS completed, flags 0x%x\n", 1758 mcp->mb[21]); 1759 } 1760 if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) { 1761 vha->hw->flags.scm_enabled = 1; 1762 vha->scm_fabric_connection_flags |= 1763 SCM_FLAG_RDF_COMPLETED; 1764 ql_log(ql_log_info, vha, 0x11a5, 1765 "SCM: RDF ELS completed, flags 0x%x\n", 1766 mcp->mb[23]); 1767 } 1768 } 1769 } 1770 1771 return rval; 1772 } 1773 1774 /* 1775 * qla2x00_get_retry_cnt 1776 * Get current firmware login retry count and delay. 1777 * 1778 * Input: 1779 * ha = adapter block pointer. 1780 * retry_cnt = pointer to login retry count. 1781 * tov = pointer to login timeout value. 1782 * 1783 * Returns: 1784 * qla2x00 local function return status code. 1785 * 1786 * Context: 1787 * Kernel context. 1788 */ 1789 int 1790 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, 1791 uint16_t *r_a_tov) 1792 { 1793 int rval; 1794 uint16_t ratov; 1795 mbx_cmd_t mc; 1796 mbx_cmd_t *mcp = &mc; 1797 1798 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, 1799 "Entered %s.\n", __func__); 1800 1801 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1802 mcp->out_mb = MBX_0; 1803 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1804 mcp->tov = MBX_TOV_SECONDS; 1805 mcp->flags = 0; 1806 rval = qla2x00_mailbox_command(vha, mcp); 1807 1808 if (rval != QLA_SUCCESS) { 1809 /*EMPTY*/ 1810 ql_dbg(ql_dbg_mbx, vha, 0x104a, 1811 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 1812 } else { 1813 /* Convert returned data and check our values. */ 1814 *r_a_tov = mcp->mb[3] / 2; 1815 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ 1816 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { 1817 /* Update to the larger values */ 1818 *retry_cnt = (uint8_t)mcp->mb[1]; 1819 *tov = ratov; 1820 } 1821 1822 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, 1823 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); 1824 } 1825 1826 return rval; 1827 } 1828 1829 /* 1830 * qla2x00_init_firmware 1831 * Initialize adapter firmware. 1832 * 1833 * Input: 1834 * ha = adapter block pointer. 1835 * dptr = Initialization control block pointer. 1836 * size = size of initialization control block. 1837 * TARGET_QUEUE_LOCK must be released. 1838 * ADAPTER_STATE_LOCK must be released. 1839 * 1840 * Returns: 1841 * qla2x00 local function return status code. 1842 * 1843 * Context: 1844 * Kernel context. 1845 */ 1846 int 1847 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 1848 { 1849 int rval; 1850 mbx_cmd_t mc; 1851 mbx_cmd_t *mcp = &mc; 1852 struct qla_hw_data *ha = vha->hw; 1853 1854 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, 1855 "Entered %s.\n", __func__); 1856 1857 if (IS_P3P_TYPE(ha) && ql2xdbwr) 1858 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, 1859 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); 1860 1861 if (ha->flags.npiv_supported) 1862 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1863 else 1864 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 1865 1866 mcp->mb[1] = 0; 1867 mcp->mb[2] = MSW(ha->init_cb_dma); 1868 mcp->mb[3] = LSW(ha->init_cb_dma); 1869 mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); 1870 mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); 1871 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1872 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1873 mcp->mb[1] = BIT_0; 1874 mcp->mb[10] = MSW(ha->ex_init_cb_dma); 1875 mcp->mb[11] = LSW(ha->ex_init_cb_dma); 1876 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); 1877 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); 1878 mcp->mb[14] = sizeof(*ha->ex_init_cb); 1879 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; 1880 } 1881 1882 if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) { 1883 mcp->mb[1] |= BIT_1; 1884 mcp->mb[16] = MSW(ha->sf_init_cb_dma); 1885 mcp->mb[17] = LSW(ha->sf_init_cb_dma); 1886 mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma)); 1887 mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma)); 1888 mcp->mb[15] = sizeof(*ha->sf_init_cb); 1889 mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15; 1890 } 1891 1892 /* 1 and 2 should normally be captured. */ 1893 mcp->in_mb = MBX_2|MBX_1|MBX_0; 1894 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1895 /* mb3 is additional info about the installed SFP. */ 1896 mcp->in_mb |= MBX_3; 1897 mcp->buf_size = size; 1898 mcp->flags = MBX_DMA_OUT; 1899 mcp->tov = MBX_TOV_SECONDS; 1900 rval = qla2x00_mailbox_command(vha, mcp); 1901 1902 if (rval != QLA_SUCCESS) { 1903 /*EMPTY*/ 1904 ql_dbg(ql_dbg_mbx, vha, 0x104d, 1905 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n", 1906 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); 1907 if (ha->init_cb) { 1908 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n"); 1909 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1910 0x0104d, ha->init_cb, sizeof(*ha->init_cb)); 1911 } 1912 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1913 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n"); 1914 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1915 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb)); 1916 } 1917 } else { 1918 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1919 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 1920 ql_dbg(ql_dbg_mbx, vha, 0x119d, 1921 "Invalid SFP/Validation Failed\n"); 1922 } 1923 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, 1924 "Done %s.\n", __func__); 1925 } 1926 1927 return rval; 1928 } 1929 1930 1931 /* 1932 * qla2x00_get_port_database 1933 * Issue normal/enhanced get port database mailbox command 1934 * and copy device name as necessary. 1935 * 1936 * Input: 1937 * ha = adapter state pointer. 1938 * dev = structure pointer. 1939 * opt = enhanced cmd option byte. 1940 * 1941 * Returns: 1942 * qla2x00 local function return status code. 1943 * 1944 * Context: 1945 * Kernel context. 1946 */ 1947 int 1948 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) 1949 { 1950 int rval; 1951 mbx_cmd_t mc; 1952 mbx_cmd_t *mcp = &mc; 1953 port_database_t *pd; 1954 struct port_database_24xx *pd24; 1955 dma_addr_t pd_dma; 1956 struct qla_hw_data *ha = vha->hw; 1957 1958 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, 1959 "Entered %s.\n", __func__); 1960 1961 pd24 = NULL; 1962 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1963 if (pd == NULL) { 1964 ql_log(ql_log_warn, vha, 0x1050, 1965 "Failed to allocate port database structure.\n"); 1966 fcport->query = 0; 1967 return QLA_MEMORY_ALLOC_FAILED; 1968 } 1969 1970 mcp->mb[0] = MBC_GET_PORT_DATABASE; 1971 if (opt != 0 && !IS_FWI2_CAPABLE(ha)) 1972 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; 1973 mcp->mb[2] = MSW(pd_dma); 1974 mcp->mb[3] = LSW(pd_dma); 1975 mcp->mb[6] = MSW(MSD(pd_dma)); 1976 mcp->mb[7] = LSW(MSD(pd_dma)); 1977 mcp->mb[9] = vha->vp_idx; 1978 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1979 mcp->in_mb = MBX_0; 1980 if (IS_FWI2_CAPABLE(ha)) { 1981 mcp->mb[1] = fcport->loop_id; 1982 mcp->mb[10] = opt; 1983 mcp->out_mb |= MBX_10|MBX_1; 1984 mcp->in_mb |= MBX_1; 1985 } else if (HAS_EXTENDED_IDS(ha)) { 1986 mcp->mb[1] = fcport->loop_id; 1987 mcp->mb[10] = opt; 1988 mcp->out_mb |= MBX_10|MBX_1; 1989 } else { 1990 mcp->mb[1] = fcport->loop_id << 8 | opt; 1991 mcp->out_mb |= MBX_1; 1992 } 1993 mcp->buf_size = IS_FWI2_CAPABLE(ha) ? 1994 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; 1995 mcp->flags = MBX_DMA_IN; 1996 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1997 rval = qla2x00_mailbox_command(vha, mcp); 1998 if (rval != QLA_SUCCESS) 1999 goto gpd_error_out; 2000 2001 if (IS_FWI2_CAPABLE(ha)) { 2002 uint64_t zero = 0; 2003 u8 current_login_state, last_login_state; 2004 2005 pd24 = (struct port_database_24xx *) pd; 2006 2007 /* Check for logged in state. */ 2008 if (NVME_TARGET(ha, fcport)) { 2009 current_login_state = pd24->current_login_state >> 4; 2010 last_login_state = pd24->last_login_state >> 4; 2011 } else { 2012 current_login_state = pd24->current_login_state & 0xf; 2013 last_login_state = pd24->last_login_state & 0xf; 2014 } 2015 fcport->current_login_state = pd24->current_login_state; 2016 fcport->last_login_state = pd24->last_login_state; 2017 2018 /* Check for logged in state. */ 2019 if (current_login_state != PDS_PRLI_COMPLETE && 2020 last_login_state != PDS_PRLI_COMPLETE) { 2021 ql_dbg(ql_dbg_mbx, vha, 0x119a, 2022 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 2023 current_login_state, last_login_state, 2024 fcport->loop_id); 2025 rval = QLA_FUNCTION_FAILED; 2026 2027 if (!fcport->query) 2028 goto gpd_error_out; 2029 } 2030 2031 if (fcport->loop_id == FC_NO_LOOP_ID || 2032 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 2033 memcmp(fcport->port_name, pd24->port_name, 8))) { 2034 /* We lost the device mid way. */ 2035 rval = QLA_NOT_LOGGED_IN; 2036 goto gpd_error_out; 2037 } 2038 2039 /* Names are little-endian. */ 2040 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); 2041 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); 2042 2043 /* Get port_id of device. */ 2044 fcport->d_id.b.domain = pd24->port_id[0]; 2045 fcport->d_id.b.area = pd24->port_id[1]; 2046 fcport->d_id.b.al_pa = pd24->port_id[2]; 2047 fcport->d_id.b.rsvd_1 = 0; 2048 2049 /* If not target must be initiator or unknown type. */ 2050 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) 2051 fcport->port_type = FCT_INITIATOR; 2052 else 2053 fcport->port_type = FCT_TARGET; 2054 2055 /* Passback COS information. */ 2056 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? 2057 FC_COS_CLASS2 : FC_COS_CLASS3; 2058 2059 if (pd24->prli_svc_param_word_3[0] & BIT_7) 2060 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 2061 } else { 2062 uint64_t zero = 0; 2063 2064 /* Check for logged in state. */ 2065 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 2066 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 2067 ql_dbg(ql_dbg_mbx, vha, 0x100a, 2068 "Unable to verify login-state (%x/%x) - " 2069 "portid=%02x%02x%02x.\n", pd->master_state, 2070 pd->slave_state, fcport->d_id.b.domain, 2071 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2072 rval = QLA_FUNCTION_FAILED; 2073 goto gpd_error_out; 2074 } 2075 2076 if (fcport->loop_id == FC_NO_LOOP_ID || 2077 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 2078 memcmp(fcport->port_name, pd->port_name, 8))) { 2079 /* We lost the device mid way. */ 2080 rval = QLA_NOT_LOGGED_IN; 2081 goto gpd_error_out; 2082 } 2083 2084 /* Names are little-endian. */ 2085 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 2086 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 2087 2088 /* Get port_id of device. */ 2089 fcport->d_id.b.domain = pd->port_id[0]; 2090 fcport->d_id.b.area = pd->port_id[3]; 2091 fcport->d_id.b.al_pa = pd->port_id[2]; 2092 fcport->d_id.b.rsvd_1 = 0; 2093 2094 /* If not target must be initiator or unknown type. */ 2095 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 2096 fcport->port_type = FCT_INITIATOR; 2097 else 2098 fcport->port_type = FCT_TARGET; 2099 2100 /* Passback COS information. */ 2101 fcport->supported_classes = (pd->options & BIT_4) ? 2102 FC_COS_CLASS2 : FC_COS_CLASS3; 2103 } 2104 2105 gpd_error_out: 2106 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 2107 fcport->query = 0; 2108 2109 if (rval != QLA_SUCCESS) { 2110 ql_dbg(ql_dbg_mbx, vha, 0x1052, 2111 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, 2112 mcp->mb[0], mcp->mb[1]); 2113 } else { 2114 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, 2115 "Done %s.\n", __func__); 2116 } 2117 2118 return rval; 2119 } 2120 2121 int 2122 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle, 2123 struct port_database_24xx *pdb) 2124 { 2125 mbx_cmd_t mc; 2126 mbx_cmd_t *mcp = &mc; 2127 dma_addr_t pdb_dma; 2128 int rval; 2129 2130 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115, 2131 "Entered %s.\n", __func__); 2132 2133 memset(pdb, 0, sizeof(*pdb)); 2134 2135 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb, 2136 sizeof(*pdb), DMA_FROM_DEVICE); 2137 if (!pdb_dma) { 2138 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n"); 2139 return QLA_MEMORY_ALLOC_FAILED; 2140 } 2141 2142 mcp->mb[0] = MBC_GET_PORT_DATABASE; 2143 mcp->mb[1] = nport_handle; 2144 mcp->mb[2] = MSW(LSD(pdb_dma)); 2145 mcp->mb[3] = LSW(LSD(pdb_dma)); 2146 mcp->mb[6] = MSW(MSD(pdb_dma)); 2147 mcp->mb[7] = LSW(MSD(pdb_dma)); 2148 mcp->mb[9] = 0; 2149 mcp->mb[10] = 0; 2150 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2151 mcp->in_mb = MBX_1|MBX_0; 2152 mcp->buf_size = sizeof(*pdb); 2153 mcp->flags = MBX_DMA_IN; 2154 mcp->tov = vha->hw->login_timeout * 2; 2155 rval = qla2x00_mailbox_command(vha, mcp); 2156 2157 if (rval != QLA_SUCCESS) { 2158 ql_dbg(ql_dbg_mbx, vha, 0x111a, 2159 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2160 rval, mcp->mb[0], mcp->mb[1]); 2161 } else { 2162 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b, 2163 "Done %s.\n", __func__); 2164 } 2165 2166 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma, 2167 sizeof(*pdb), DMA_FROM_DEVICE); 2168 2169 return rval; 2170 } 2171 2172 /* 2173 * qla2x00_get_firmware_state 2174 * Get adapter firmware state. 2175 * 2176 * Input: 2177 * ha = adapter block pointer. 2178 * dptr = pointer for firmware state. 2179 * TARGET_QUEUE_LOCK must be released. 2180 * ADAPTER_STATE_LOCK must be released. 2181 * 2182 * Returns: 2183 * qla2x00 local function return status code. 2184 * 2185 * Context: 2186 * Kernel context. 2187 */ 2188 int 2189 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) 2190 { 2191 int rval; 2192 mbx_cmd_t mc; 2193 mbx_cmd_t *mcp = &mc; 2194 struct qla_hw_data *ha = vha->hw; 2195 2196 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, 2197 "Entered %s.\n", __func__); 2198 2199 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 2200 mcp->out_mb = MBX_0; 2201 if (IS_FWI2_CAPABLE(vha->hw)) 2202 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2203 else 2204 mcp->in_mb = MBX_1|MBX_0; 2205 mcp->tov = MBX_TOV_SECONDS; 2206 mcp->flags = 0; 2207 rval = qla2x00_mailbox_command(vha, mcp); 2208 2209 /* Return firmware states. */ 2210 states[0] = mcp->mb[1]; 2211 if (IS_FWI2_CAPABLE(vha->hw)) { 2212 states[1] = mcp->mb[2]; 2213 states[2] = mcp->mb[3]; /* SFP info */ 2214 states[3] = mcp->mb[4]; 2215 states[4] = mcp->mb[5]; 2216 states[5] = mcp->mb[6]; /* DPORT status */ 2217 } 2218 2219 if (rval != QLA_SUCCESS) { 2220 /*EMPTY*/ 2221 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); 2222 } else { 2223 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 2224 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 2225 ql_dbg(ql_dbg_mbx, vha, 0x119e, 2226 "Invalid SFP/Validation Failed\n"); 2227 } 2228 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, 2229 "Done %s.\n", __func__); 2230 } 2231 2232 return rval; 2233 } 2234 2235 /* 2236 * qla2x00_get_port_name 2237 * Issue get port name mailbox command. 2238 * Returned name is in big endian format. 2239 * 2240 * Input: 2241 * ha = adapter block pointer. 2242 * loop_id = loop ID of device. 2243 * name = pointer for name. 2244 * TARGET_QUEUE_LOCK must be released. 2245 * ADAPTER_STATE_LOCK must be released. 2246 * 2247 * Returns: 2248 * qla2x00 local function return status code. 2249 * 2250 * Context: 2251 * Kernel context. 2252 */ 2253 int 2254 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, 2255 uint8_t opt) 2256 { 2257 int rval; 2258 mbx_cmd_t mc; 2259 mbx_cmd_t *mcp = &mc; 2260 2261 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, 2262 "Entered %s.\n", __func__); 2263 2264 mcp->mb[0] = MBC_GET_PORT_NAME; 2265 mcp->mb[9] = vha->vp_idx; 2266 mcp->out_mb = MBX_9|MBX_1|MBX_0; 2267 if (HAS_EXTENDED_IDS(vha->hw)) { 2268 mcp->mb[1] = loop_id; 2269 mcp->mb[10] = opt; 2270 mcp->out_mb |= MBX_10; 2271 } else { 2272 mcp->mb[1] = loop_id << 8 | opt; 2273 } 2274 2275 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2276 mcp->tov = MBX_TOV_SECONDS; 2277 mcp->flags = 0; 2278 rval = qla2x00_mailbox_command(vha, mcp); 2279 2280 if (rval != QLA_SUCCESS) { 2281 /*EMPTY*/ 2282 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); 2283 } else { 2284 if (name != NULL) { 2285 /* This function returns name in big endian. */ 2286 name[0] = MSB(mcp->mb[2]); 2287 name[1] = LSB(mcp->mb[2]); 2288 name[2] = MSB(mcp->mb[3]); 2289 name[3] = LSB(mcp->mb[3]); 2290 name[4] = MSB(mcp->mb[6]); 2291 name[5] = LSB(mcp->mb[6]); 2292 name[6] = MSB(mcp->mb[7]); 2293 name[7] = LSB(mcp->mb[7]); 2294 } 2295 2296 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, 2297 "Done %s.\n", __func__); 2298 } 2299 2300 return rval; 2301 } 2302 2303 /* 2304 * qla24xx_link_initialization 2305 * Issue link initialization mailbox command. 2306 * 2307 * Input: 2308 * ha = adapter block pointer. 2309 * TARGET_QUEUE_LOCK must be released. 2310 * ADAPTER_STATE_LOCK must be released. 2311 * 2312 * Returns: 2313 * qla2x00 local function return status code. 2314 * 2315 * Context: 2316 * Kernel context. 2317 */ 2318 int 2319 qla24xx_link_initialize(scsi_qla_host_t *vha) 2320 { 2321 int rval; 2322 mbx_cmd_t mc; 2323 mbx_cmd_t *mcp = &mc; 2324 2325 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, 2326 "Entered %s.\n", __func__); 2327 2328 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) 2329 return QLA_FUNCTION_FAILED; 2330 2331 mcp->mb[0] = MBC_LINK_INITIALIZATION; 2332 mcp->mb[1] = BIT_4; 2333 if (vha->hw->operating_mode == LOOP) 2334 mcp->mb[1] |= BIT_6; 2335 else 2336 mcp->mb[1] |= BIT_5; 2337 mcp->mb[2] = 0; 2338 mcp->mb[3] = 0; 2339 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2340 mcp->in_mb = MBX_0; 2341 mcp->tov = MBX_TOV_SECONDS; 2342 mcp->flags = 0; 2343 rval = qla2x00_mailbox_command(vha, mcp); 2344 2345 if (rval != QLA_SUCCESS) { 2346 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); 2347 } else { 2348 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, 2349 "Done %s.\n", __func__); 2350 } 2351 2352 return rval; 2353 } 2354 2355 /* 2356 * qla2x00_lip_reset 2357 * Issue LIP reset mailbox command. 2358 * 2359 * Input: 2360 * ha = adapter block pointer. 2361 * TARGET_QUEUE_LOCK must be released. 2362 * ADAPTER_STATE_LOCK must be released. 2363 * 2364 * Returns: 2365 * qla2x00 local function return status code. 2366 * 2367 * Context: 2368 * Kernel context. 2369 */ 2370 int 2371 qla2x00_lip_reset(scsi_qla_host_t *vha) 2372 { 2373 int rval; 2374 mbx_cmd_t mc; 2375 mbx_cmd_t *mcp = &mc; 2376 2377 ql_dbg(ql_dbg_disc, vha, 0x105a, 2378 "Entered %s.\n", __func__); 2379 2380 if (IS_CNA_CAPABLE(vha->hw)) { 2381 /* Logout across all FCFs. */ 2382 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2383 mcp->mb[1] = BIT_1; 2384 mcp->mb[2] = 0; 2385 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2386 } else if (IS_FWI2_CAPABLE(vha->hw)) { 2387 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2388 mcp->mb[1] = BIT_4; 2389 mcp->mb[2] = 0; 2390 mcp->mb[3] = vha->hw->loop_reset_delay; 2391 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2392 } else { 2393 mcp->mb[0] = MBC_LIP_RESET; 2394 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2395 if (HAS_EXTENDED_IDS(vha->hw)) { 2396 mcp->mb[1] = 0x00ff; 2397 mcp->mb[10] = 0; 2398 mcp->out_mb |= MBX_10; 2399 } else { 2400 mcp->mb[1] = 0xff00; 2401 } 2402 mcp->mb[2] = vha->hw->loop_reset_delay; 2403 mcp->mb[3] = 0; 2404 } 2405 mcp->in_mb = MBX_0; 2406 mcp->tov = MBX_TOV_SECONDS; 2407 mcp->flags = 0; 2408 rval = qla2x00_mailbox_command(vha, mcp); 2409 2410 if (rval != QLA_SUCCESS) { 2411 /*EMPTY*/ 2412 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); 2413 } else { 2414 /*EMPTY*/ 2415 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, 2416 "Done %s.\n", __func__); 2417 } 2418 2419 return rval; 2420 } 2421 2422 /* 2423 * qla2x00_send_sns 2424 * Send SNS command. 2425 * 2426 * Input: 2427 * ha = adapter block pointer. 2428 * sns = pointer for command. 2429 * cmd_size = command size. 2430 * buf_size = response/command size. 2431 * TARGET_QUEUE_LOCK must be released. 2432 * ADAPTER_STATE_LOCK must be released. 2433 * 2434 * Returns: 2435 * qla2x00 local function return status code. 2436 * 2437 * Context: 2438 * Kernel context. 2439 */ 2440 int 2441 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, 2442 uint16_t cmd_size, size_t buf_size) 2443 { 2444 int rval; 2445 mbx_cmd_t mc; 2446 mbx_cmd_t *mcp = &mc; 2447 2448 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, 2449 "Entered %s.\n", __func__); 2450 2451 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, 2452 "Retry cnt=%d ratov=%d total tov=%d.\n", 2453 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); 2454 2455 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 2456 mcp->mb[1] = cmd_size; 2457 mcp->mb[2] = MSW(sns_phys_address); 2458 mcp->mb[3] = LSW(sns_phys_address); 2459 mcp->mb[6] = MSW(MSD(sns_phys_address)); 2460 mcp->mb[7] = LSW(MSD(sns_phys_address)); 2461 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2462 mcp->in_mb = MBX_0|MBX_1; 2463 mcp->buf_size = buf_size; 2464 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; 2465 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); 2466 rval = qla2x00_mailbox_command(vha, mcp); 2467 2468 if (rval != QLA_SUCCESS) { 2469 /*EMPTY*/ 2470 ql_dbg(ql_dbg_mbx, vha, 0x105f, 2471 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2472 rval, mcp->mb[0], mcp->mb[1]); 2473 } else { 2474 /*EMPTY*/ 2475 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, 2476 "Done %s.\n", __func__); 2477 } 2478 2479 return rval; 2480 } 2481 2482 int 2483 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2484 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2485 { 2486 int rval; 2487 2488 struct logio_entry_24xx *lg; 2489 dma_addr_t lg_dma; 2490 uint32_t iop[2]; 2491 struct qla_hw_data *ha = vha->hw; 2492 struct req_que *req; 2493 2494 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, 2495 "Entered %s.\n", __func__); 2496 2497 if (vha->vp_idx && vha->qpair) 2498 req = vha->qpair->req; 2499 else 2500 req = ha->req_q_map[0]; 2501 2502 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2503 if (lg == NULL) { 2504 ql_log(ql_log_warn, vha, 0x1062, 2505 "Failed to allocate login IOCB.\n"); 2506 return QLA_MEMORY_ALLOC_FAILED; 2507 } 2508 2509 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2510 lg->entry_count = 1; 2511 lg->handle = make_handle(req->id, lg->handle); 2512 lg->nport_handle = cpu_to_le16(loop_id); 2513 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2514 if (opt & BIT_0) 2515 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2516 if (opt & BIT_1) 2517 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2518 lg->port_id[0] = al_pa; 2519 lg->port_id[1] = area; 2520 lg->port_id[2] = domain; 2521 lg->vp_index = vha->vp_idx; 2522 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2523 (ha->r_a_tov / 10 * 2) + 2); 2524 if (rval != QLA_SUCCESS) { 2525 ql_dbg(ql_dbg_mbx, vha, 0x1063, 2526 "Failed to issue login IOCB (%x).\n", rval); 2527 } else if (lg->entry_status != 0) { 2528 ql_dbg(ql_dbg_mbx, vha, 0x1064, 2529 "Failed to complete IOCB -- error status (%x).\n", 2530 lg->entry_status); 2531 rval = QLA_FUNCTION_FAILED; 2532 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2533 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2534 iop[1] = le32_to_cpu(lg->io_parameter[1]); 2535 2536 ql_dbg(ql_dbg_mbx, vha, 0x1065, 2537 "Failed to complete IOCB -- completion status (%x) " 2538 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2539 iop[0], iop[1]); 2540 2541 switch (iop[0]) { 2542 case LSC_SCODE_PORTID_USED: 2543 mb[0] = MBS_PORT_ID_USED; 2544 mb[1] = LSW(iop[1]); 2545 break; 2546 case LSC_SCODE_NPORT_USED: 2547 mb[0] = MBS_LOOP_ID_USED; 2548 break; 2549 case LSC_SCODE_NOLINK: 2550 case LSC_SCODE_NOIOCB: 2551 case LSC_SCODE_NOXCB: 2552 case LSC_SCODE_CMD_FAILED: 2553 case LSC_SCODE_NOFABRIC: 2554 case LSC_SCODE_FW_NOT_READY: 2555 case LSC_SCODE_NOT_LOGGED_IN: 2556 case LSC_SCODE_NOPCB: 2557 case LSC_SCODE_ELS_REJECT: 2558 case LSC_SCODE_CMD_PARAM_ERR: 2559 case LSC_SCODE_NONPORT: 2560 case LSC_SCODE_LOGGED_IN: 2561 case LSC_SCODE_NOFLOGI_ACC: 2562 default: 2563 mb[0] = MBS_COMMAND_ERROR; 2564 break; 2565 } 2566 } else { 2567 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, 2568 "Done %s.\n", __func__); 2569 2570 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2571 2572 mb[0] = MBS_COMMAND_COMPLETE; 2573 mb[1] = 0; 2574 if (iop[0] & BIT_4) { 2575 if (iop[0] & BIT_8) 2576 mb[1] |= BIT_1; 2577 } else 2578 mb[1] = BIT_0; 2579 2580 /* Passback COS information. */ 2581 mb[10] = 0; 2582 if (lg->io_parameter[7] || lg->io_parameter[8]) 2583 mb[10] |= BIT_0; /* Class 2. */ 2584 if (lg->io_parameter[9] || lg->io_parameter[10]) 2585 mb[10] |= BIT_1; /* Class 3. */ 2586 if (lg->io_parameter[0] & cpu_to_le32(BIT_7)) 2587 mb[10] |= BIT_7; /* Confirmed Completion 2588 * Allowed 2589 */ 2590 } 2591 2592 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2593 2594 return rval; 2595 } 2596 2597 /* 2598 * qla2x00_login_fabric 2599 * Issue login fabric port mailbox command. 2600 * 2601 * Input: 2602 * ha = adapter block pointer. 2603 * loop_id = device loop ID. 2604 * domain = device domain. 2605 * area = device area. 2606 * al_pa = device AL_PA. 2607 * status = pointer for return status. 2608 * opt = command options. 2609 * TARGET_QUEUE_LOCK must be released. 2610 * ADAPTER_STATE_LOCK must be released. 2611 * 2612 * Returns: 2613 * qla2x00 local function return status code. 2614 * 2615 * Context: 2616 * Kernel context. 2617 */ 2618 int 2619 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2620 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2621 { 2622 int rval; 2623 mbx_cmd_t mc; 2624 mbx_cmd_t *mcp = &mc; 2625 struct qla_hw_data *ha = vha->hw; 2626 2627 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, 2628 "Entered %s.\n", __func__); 2629 2630 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 2631 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2632 if (HAS_EXTENDED_IDS(ha)) { 2633 mcp->mb[1] = loop_id; 2634 mcp->mb[10] = opt; 2635 mcp->out_mb |= MBX_10; 2636 } else { 2637 mcp->mb[1] = (loop_id << 8) | opt; 2638 } 2639 mcp->mb[2] = domain; 2640 mcp->mb[3] = area << 8 | al_pa; 2641 2642 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; 2643 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2644 mcp->flags = 0; 2645 rval = qla2x00_mailbox_command(vha, mcp); 2646 2647 /* Return mailbox statuses. */ 2648 if (mb != NULL) { 2649 mb[0] = mcp->mb[0]; 2650 mb[1] = mcp->mb[1]; 2651 mb[2] = mcp->mb[2]; 2652 mb[6] = mcp->mb[6]; 2653 mb[7] = mcp->mb[7]; 2654 /* COS retrieved from Get-Port-Database mailbox command. */ 2655 mb[10] = 0; 2656 } 2657 2658 if (rval != QLA_SUCCESS) { 2659 /* RLU tmp code: need to change main mailbox_command function to 2660 * return ok even when the mailbox completion value is not 2661 * SUCCESS. The caller needs to be responsible to interpret 2662 * the return values of this mailbox command if we're not 2663 * to change too much of the existing code. 2664 */ 2665 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || 2666 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || 2667 mcp->mb[0] == 0x4006) 2668 rval = QLA_SUCCESS; 2669 2670 /*EMPTY*/ 2671 ql_dbg(ql_dbg_mbx, vha, 0x1068, 2672 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 2673 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 2674 } else { 2675 /*EMPTY*/ 2676 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, 2677 "Done %s.\n", __func__); 2678 } 2679 2680 return rval; 2681 } 2682 2683 /* 2684 * qla2x00_login_local_device 2685 * Issue login loop port mailbox command. 2686 * 2687 * Input: 2688 * ha = adapter block pointer. 2689 * loop_id = device loop ID. 2690 * opt = command options. 2691 * 2692 * Returns: 2693 * Return status code. 2694 * 2695 * Context: 2696 * Kernel context. 2697 * 2698 */ 2699 int 2700 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, 2701 uint16_t *mb_ret, uint8_t opt) 2702 { 2703 int rval; 2704 mbx_cmd_t mc; 2705 mbx_cmd_t *mcp = &mc; 2706 struct qla_hw_data *ha = vha->hw; 2707 2708 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, 2709 "Entered %s.\n", __func__); 2710 2711 if (IS_FWI2_CAPABLE(ha)) 2712 return qla24xx_login_fabric(vha, fcport->loop_id, 2713 fcport->d_id.b.domain, fcport->d_id.b.area, 2714 fcport->d_id.b.al_pa, mb_ret, opt); 2715 2716 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 2717 if (HAS_EXTENDED_IDS(ha)) 2718 mcp->mb[1] = fcport->loop_id; 2719 else 2720 mcp->mb[1] = fcport->loop_id << 8; 2721 mcp->mb[2] = opt; 2722 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2723 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; 2724 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2725 mcp->flags = 0; 2726 rval = qla2x00_mailbox_command(vha, mcp); 2727 2728 /* Return mailbox statuses. */ 2729 if (mb_ret != NULL) { 2730 mb_ret[0] = mcp->mb[0]; 2731 mb_ret[1] = mcp->mb[1]; 2732 mb_ret[6] = mcp->mb[6]; 2733 mb_ret[7] = mcp->mb[7]; 2734 } 2735 2736 if (rval != QLA_SUCCESS) { 2737 /* AV tmp code: need to change main mailbox_command function to 2738 * return ok even when the mailbox completion value is not 2739 * SUCCESS. The caller needs to be responsible to interpret 2740 * the return values of this mailbox command if we're not 2741 * to change too much of the existing code. 2742 */ 2743 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 2744 rval = QLA_SUCCESS; 2745 2746 ql_dbg(ql_dbg_mbx, vha, 0x106b, 2747 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", 2748 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); 2749 } else { 2750 /*EMPTY*/ 2751 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, 2752 "Done %s.\n", __func__); 2753 } 2754 2755 return (rval); 2756 } 2757 2758 int 2759 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2760 uint8_t area, uint8_t al_pa) 2761 { 2762 int rval; 2763 struct logio_entry_24xx *lg; 2764 dma_addr_t lg_dma; 2765 struct qla_hw_data *ha = vha->hw; 2766 struct req_que *req; 2767 2768 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, 2769 "Entered %s.\n", __func__); 2770 2771 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2772 if (lg == NULL) { 2773 ql_log(ql_log_warn, vha, 0x106e, 2774 "Failed to allocate logout IOCB.\n"); 2775 return QLA_MEMORY_ALLOC_FAILED; 2776 } 2777 2778 req = vha->req; 2779 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2780 lg->entry_count = 1; 2781 lg->handle = make_handle(req->id, lg->handle); 2782 lg->nport_handle = cpu_to_le16(loop_id); 2783 lg->control_flags = 2784 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| 2785 LCF_FREE_NPORT); 2786 lg->port_id[0] = al_pa; 2787 lg->port_id[1] = area; 2788 lg->port_id[2] = domain; 2789 lg->vp_index = vha->vp_idx; 2790 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2791 (ha->r_a_tov / 10 * 2) + 2); 2792 if (rval != QLA_SUCCESS) { 2793 ql_dbg(ql_dbg_mbx, vha, 0x106f, 2794 "Failed to issue logout IOCB (%x).\n", rval); 2795 } else if (lg->entry_status != 0) { 2796 ql_dbg(ql_dbg_mbx, vha, 0x1070, 2797 "Failed to complete IOCB -- error status (%x).\n", 2798 lg->entry_status); 2799 rval = QLA_FUNCTION_FAILED; 2800 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2801 ql_dbg(ql_dbg_mbx, vha, 0x1071, 2802 "Failed to complete IOCB -- completion status (%x) " 2803 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2804 le32_to_cpu(lg->io_parameter[0]), 2805 le32_to_cpu(lg->io_parameter[1])); 2806 } else { 2807 /*EMPTY*/ 2808 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, 2809 "Done %s.\n", __func__); 2810 } 2811 2812 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2813 2814 return rval; 2815 } 2816 2817 /* 2818 * qla2x00_fabric_logout 2819 * Issue logout fabric port mailbox command. 2820 * 2821 * Input: 2822 * ha = adapter block pointer. 2823 * loop_id = device loop ID. 2824 * TARGET_QUEUE_LOCK must be released. 2825 * ADAPTER_STATE_LOCK must be released. 2826 * 2827 * Returns: 2828 * qla2x00 local function return status code. 2829 * 2830 * Context: 2831 * Kernel context. 2832 */ 2833 int 2834 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2835 uint8_t area, uint8_t al_pa) 2836 { 2837 int rval; 2838 mbx_cmd_t mc; 2839 mbx_cmd_t *mcp = &mc; 2840 2841 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, 2842 "Entered %s.\n", __func__); 2843 2844 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 2845 mcp->out_mb = MBX_1|MBX_0; 2846 if (HAS_EXTENDED_IDS(vha->hw)) { 2847 mcp->mb[1] = loop_id; 2848 mcp->mb[10] = 0; 2849 mcp->out_mb |= MBX_10; 2850 } else { 2851 mcp->mb[1] = loop_id << 8; 2852 } 2853 2854 mcp->in_mb = MBX_1|MBX_0; 2855 mcp->tov = MBX_TOV_SECONDS; 2856 mcp->flags = 0; 2857 rval = qla2x00_mailbox_command(vha, mcp); 2858 2859 if (rval != QLA_SUCCESS) { 2860 /*EMPTY*/ 2861 ql_dbg(ql_dbg_mbx, vha, 0x1074, 2862 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); 2863 } else { 2864 /*EMPTY*/ 2865 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, 2866 "Done %s.\n", __func__); 2867 } 2868 2869 return rval; 2870 } 2871 2872 /* 2873 * qla2x00_full_login_lip 2874 * Issue full login LIP mailbox command. 2875 * 2876 * Input: 2877 * ha = adapter block pointer. 2878 * TARGET_QUEUE_LOCK must be released. 2879 * ADAPTER_STATE_LOCK must be released. 2880 * 2881 * Returns: 2882 * qla2x00 local function return status code. 2883 * 2884 * Context: 2885 * Kernel context. 2886 */ 2887 int 2888 qla2x00_full_login_lip(scsi_qla_host_t *vha) 2889 { 2890 int rval; 2891 mbx_cmd_t mc; 2892 mbx_cmd_t *mcp = &mc; 2893 2894 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, 2895 "Entered %s.\n", __func__); 2896 2897 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2898 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0; 2899 mcp->mb[2] = 0; 2900 mcp->mb[3] = 0; 2901 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2902 mcp->in_mb = MBX_0; 2903 mcp->tov = MBX_TOV_SECONDS; 2904 mcp->flags = 0; 2905 rval = qla2x00_mailbox_command(vha, mcp); 2906 2907 if (rval != QLA_SUCCESS) { 2908 /*EMPTY*/ 2909 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); 2910 } else { 2911 /*EMPTY*/ 2912 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, 2913 "Done %s.\n", __func__); 2914 } 2915 2916 return rval; 2917 } 2918 2919 /* 2920 * qla2x00_get_id_list 2921 * 2922 * Input: 2923 * ha = adapter block pointer. 2924 * 2925 * Returns: 2926 * qla2x00 local function return status code. 2927 * 2928 * Context: 2929 * Kernel context. 2930 */ 2931 int 2932 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, 2933 uint16_t *entries) 2934 { 2935 int rval; 2936 mbx_cmd_t mc; 2937 mbx_cmd_t *mcp = &mc; 2938 2939 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, 2940 "Entered %s.\n", __func__); 2941 2942 if (id_list == NULL) 2943 return QLA_FUNCTION_FAILED; 2944 2945 mcp->mb[0] = MBC_GET_ID_LIST; 2946 mcp->out_mb = MBX_0; 2947 if (IS_FWI2_CAPABLE(vha->hw)) { 2948 mcp->mb[2] = MSW(id_list_dma); 2949 mcp->mb[3] = LSW(id_list_dma); 2950 mcp->mb[6] = MSW(MSD(id_list_dma)); 2951 mcp->mb[7] = LSW(MSD(id_list_dma)); 2952 mcp->mb[8] = 0; 2953 mcp->mb[9] = vha->vp_idx; 2954 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 2955 } else { 2956 mcp->mb[1] = MSW(id_list_dma); 2957 mcp->mb[2] = LSW(id_list_dma); 2958 mcp->mb[3] = MSW(MSD(id_list_dma)); 2959 mcp->mb[6] = LSW(MSD(id_list_dma)); 2960 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; 2961 } 2962 mcp->in_mb = MBX_1|MBX_0; 2963 mcp->tov = MBX_TOV_SECONDS; 2964 mcp->flags = 0; 2965 rval = qla2x00_mailbox_command(vha, mcp); 2966 2967 if (rval != QLA_SUCCESS) { 2968 /*EMPTY*/ 2969 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); 2970 } else { 2971 *entries = mcp->mb[1]; 2972 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, 2973 "Done %s.\n", __func__); 2974 } 2975 2976 return rval; 2977 } 2978 2979 /* 2980 * qla2x00_get_resource_cnts 2981 * Get current firmware resource counts. 2982 * 2983 * Input: 2984 * ha = adapter block pointer. 2985 * 2986 * Returns: 2987 * qla2x00 local function return status code. 2988 * 2989 * Context: 2990 * Kernel context. 2991 */ 2992 int 2993 qla2x00_get_resource_cnts(scsi_qla_host_t *vha) 2994 { 2995 struct qla_hw_data *ha = vha->hw; 2996 int rval; 2997 mbx_cmd_t mc; 2998 mbx_cmd_t *mcp = &mc; 2999 3000 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, 3001 "Entered %s.\n", __func__); 3002 3003 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 3004 mcp->out_mb = MBX_0; 3005 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 3006 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 3007 IS_QLA27XX(ha) || IS_QLA28XX(ha)) 3008 mcp->in_mb |= MBX_12; 3009 mcp->tov = MBX_TOV_SECONDS; 3010 mcp->flags = 0; 3011 rval = qla2x00_mailbox_command(vha, mcp); 3012 3013 if (rval != QLA_SUCCESS) { 3014 /*EMPTY*/ 3015 ql_dbg(ql_dbg_mbx, vha, 0x107d, 3016 "Failed mb[0]=%x.\n", mcp->mb[0]); 3017 } else { 3018 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, 3019 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " 3020 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], 3021 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 3022 mcp->mb[11], mcp->mb[12]); 3023 3024 ha->orig_fw_tgt_xcb_count = mcp->mb[1]; 3025 ha->cur_fw_tgt_xcb_count = mcp->mb[2]; 3026 ha->cur_fw_xcb_count = mcp->mb[3]; 3027 ha->orig_fw_xcb_count = mcp->mb[6]; 3028 ha->cur_fw_iocb_count = mcp->mb[7]; 3029 ha->orig_fw_iocb_count = mcp->mb[10]; 3030 if (ha->flags.npiv_supported) 3031 ha->max_npiv_vports = mcp->mb[11]; 3032 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 3033 IS_QLA28XX(ha)) 3034 ha->fw_max_fcf_count = mcp->mb[12]; 3035 } 3036 3037 return (rval); 3038 } 3039 3040 /* 3041 * qla2x00_get_fcal_position_map 3042 * Get FCAL (LILP) position map using mailbox command 3043 * 3044 * Input: 3045 * ha = adapter state pointer. 3046 * pos_map = buffer pointer (can be NULL). 3047 * 3048 * Returns: 3049 * qla2x00 local function return status code. 3050 * 3051 * Context: 3052 * Kernel context. 3053 */ 3054 int 3055 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) 3056 { 3057 int rval; 3058 mbx_cmd_t mc; 3059 mbx_cmd_t *mcp = &mc; 3060 char *pmap; 3061 dma_addr_t pmap_dma; 3062 struct qla_hw_data *ha = vha->hw; 3063 3064 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, 3065 "Entered %s.\n", __func__); 3066 3067 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 3068 if (pmap == NULL) { 3069 ql_log(ql_log_warn, vha, 0x1080, 3070 "Memory alloc failed.\n"); 3071 return QLA_MEMORY_ALLOC_FAILED; 3072 } 3073 3074 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; 3075 mcp->mb[2] = MSW(pmap_dma); 3076 mcp->mb[3] = LSW(pmap_dma); 3077 mcp->mb[6] = MSW(MSD(pmap_dma)); 3078 mcp->mb[7] = LSW(MSD(pmap_dma)); 3079 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3080 mcp->in_mb = MBX_1|MBX_0; 3081 mcp->buf_size = FCAL_MAP_SIZE; 3082 mcp->flags = MBX_DMA_IN; 3083 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 3084 rval = qla2x00_mailbox_command(vha, mcp); 3085 3086 if (rval == QLA_SUCCESS) { 3087 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, 3088 "mb0/mb1=%x/%X FC/AL position map size (%x).\n", 3089 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); 3090 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, 3091 pmap, pmap[0] + 1); 3092 3093 if (pos_map) 3094 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 3095 } 3096 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 3097 3098 if (rval != QLA_SUCCESS) { 3099 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); 3100 } else { 3101 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, 3102 "Done %s.\n", __func__); 3103 } 3104 3105 return rval; 3106 } 3107 3108 /* 3109 * qla2x00_get_link_status 3110 * 3111 * Input: 3112 * ha = adapter block pointer. 3113 * loop_id = device loop ID. 3114 * ret_buf = pointer to link status return buffer. 3115 * 3116 * Returns: 3117 * 0 = success. 3118 * BIT_0 = mem alloc error. 3119 * BIT_1 = mailbox error. 3120 */ 3121 int 3122 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, 3123 struct link_statistics *stats, dma_addr_t stats_dma) 3124 { 3125 int rval; 3126 mbx_cmd_t mc; 3127 mbx_cmd_t *mcp = &mc; 3128 uint32_t *iter = (uint32_t *)stats; 3129 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); 3130 struct qla_hw_data *ha = vha->hw; 3131 3132 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, 3133 "Entered %s.\n", __func__); 3134 3135 mcp->mb[0] = MBC_GET_LINK_STATUS; 3136 mcp->mb[2] = MSW(LSD(stats_dma)); 3137 mcp->mb[3] = LSW(LSD(stats_dma)); 3138 mcp->mb[6] = MSW(MSD(stats_dma)); 3139 mcp->mb[7] = LSW(MSD(stats_dma)); 3140 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3141 mcp->in_mb = MBX_0; 3142 if (IS_FWI2_CAPABLE(ha)) { 3143 mcp->mb[1] = loop_id; 3144 mcp->mb[4] = 0; 3145 mcp->mb[10] = 0; 3146 mcp->out_mb |= MBX_10|MBX_4|MBX_1; 3147 mcp->in_mb |= MBX_1; 3148 } else if (HAS_EXTENDED_IDS(ha)) { 3149 mcp->mb[1] = loop_id; 3150 mcp->mb[10] = 0; 3151 mcp->out_mb |= MBX_10|MBX_1; 3152 } else { 3153 mcp->mb[1] = loop_id << 8; 3154 mcp->out_mb |= MBX_1; 3155 } 3156 mcp->tov = MBX_TOV_SECONDS; 3157 mcp->flags = IOCTL_CMD; 3158 rval = qla2x00_mailbox_command(vha, mcp); 3159 3160 if (rval == QLA_SUCCESS) { 3161 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3162 ql_dbg(ql_dbg_mbx, vha, 0x1085, 3163 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3164 rval = QLA_FUNCTION_FAILED; 3165 } else { 3166 /* Re-endianize - firmware data is le32. */ 3167 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, 3168 "Done %s.\n", __func__); 3169 for ( ; dwords--; iter++) 3170 le32_to_cpus(iter); 3171 } 3172 } else { 3173 /* Failed. */ 3174 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); 3175 } 3176 3177 return rval; 3178 } 3179 3180 int 3181 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, 3182 dma_addr_t stats_dma, uint16_t options) 3183 { 3184 int rval; 3185 mbx_cmd_t mc; 3186 mbx_cmd_t *mcp = &mc; 3187 uint32_t *iter = (uint32_t *)stats; 3188 ushort dwords = sizeof(*stats)/sizeof(*iter); 3189 3190 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, 3191 "Entered %s.\n", __func__); 3192 3193 memset(&mc, 0, sizeof(mc)); 3194 mc.mb[0] = MBC_GET_LINK_PRIV_STATS; 3195 mc.mb[2] = MSW(LSD(stats_dma)); 3196 mc.mb[3] = LSW(LSD(stats_dma)); 3197 mc.mb[6] = MSW(MSD(stats_dma)); 3198 mc.mb[7] = LSW(MSD(stats_dma)); 3199 mc.mb[8] = dwords; 3200 mc.mb[9] = vha->vp_idx; 3201 mc.mb[10] = options; 3202 3203 rval = qla24xx_send_mb_cmd(vha, &mc); 3204 3205 if (rval == QLA_SUCCESS) { 3206 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3207 ql_dbg(ql_dbg_mbx, vha, 0x1089, 3208 "Failed mb[0]=%x.\n", mcp->mb[0]); 3209 rval = QLA_FUNCTION_FAILED; 3210 } else { 3211 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, 3212 "Done %s.\n", __func__); 3213 /* Re-endianize - firmware data is le32. */ 3214 for ( ; dwords--; iter++) 3215 le32_to_cpus(iter); 3216 } 3217 } else { 3218 /* Failed. */ 3219 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); 3220 } 3221 3222 return rval; 3223 } 3224 3225 int 3226 qla24xx_abort_command(srb_t *sp) 3227 { 3228 int rval; 3229 unsigned long flags = 0; 3230 3231 struct abort_entry_24xx *abt; 3232 dma_addr_t abt_dma; 3233 uint32_t handle; 3234 fc_port_t *fcport = sp->fcport; 3235 struct scsi_qla_host *vha = fcport->vha; 3236 struct qla_hw_data *ha = vha->hw; 3237 struct req_que *req; 3238 struct qla_qpair *qpair = sp->qpair; 3239 3240 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 3241 "Entered %s.\n", __func__); 3242 3243 if (sp->qpair) 3244 req = sp->qpair->req; 3245 else 3246 return QLA_ERR_NO_QPAIR; 3247 3248 if (ql2xasynctmfenable) 3249 return qla24xx_async_abort_command(sp); 3250 3251 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3252 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 3253 if (req->outstanding_cmds[handle] == sp) 3254 break; 3255 } 3256 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3257 if (handle == req->num_outstanding_cmds) { 3258 /* Command not found. */ 3259 return QLA_ERR_NOT_FOUND; 3260 } 3261 3262 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 3263 if (abt == NULL) { 3264 ql_log(ql_log_warn, vha, 0x108d, 3265 "Failed to allocate abort IOCB.\n"); 3266 return QLA_MEMORY_ALLOC_FAILED; 3267 } 3268 3269 abt->entry_type = ABORT_IOCB_TYPE; 3270 abt->entry_count = 1; 3271 abt->handle = make_handle(req->id, abt->handle); 3272 abt->nport_handle = cpu_to_le16(fcport->loop_id); 3273 abt->handle_to_abort = make_handle(req->id, handle); 3274 abt->port_id[0] = fcport->d_id.b.al_pa; 3275 abt->port_id[1] = fcport->d_id.b.area; 3276 abt->port_id[2] = fcport->d_id.b.domain; 3277 abt->vp_index = fcport->vha->vp_idx; 3278 3279 abt->req_que_no = cpu_to_le16(req->id); 3280 /* Need to pass original sp */ 3281 qla_nvme_abort_set_option(abt, sp); 3282 3283 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 3284 if (rval != QLA_SUCCESS) { 3285 ql_dbg(ql_dbg_mbx, vha, 0x108e, 3286 "Failed to issue IOCB (%x).\n", rval); 3287 } else if (abt->entry_status != 0) { 3288 ql_dbg(ql_dbg_mbx, vha, 0x108f, 3289 "Failed to complete IOCB -- error status (%x).\n", 3290 abt->entry_status); 3291 rval = QLA_FUNCTION_FAILED; 3292 } else if (abt->nport_handle != cpu_to_le16(0)) { 3293 ql_dbg(ql_dbg_mbx, vha, 0x1090, 3294 "Failed to complete IOCB -- completion status (%x).\n", 3295 le16_to_cpu(abt->nport_handle)); 3296 if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR)) 3297 rval = QLA_FUNCTION_PARAMETER_ERROR; 3298 else 3299 rval = QLA_FUNCTION_FAILED; 3300 } else { 3301 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, 3302 "Done %s.\n", __func__); 3303 } 3304 if (rval == QLA_SUCCESS) 3305 qla_nvme_abort_process_comp_status(abt, sp); 3306 3307 qla_wait_nvme_release_cmd_kref(sp); 3308 3309 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 3310 3311 return rval; 3312 } 3313 3314 struct tsk_mgmt_cmd { 3315 union { 3316 struct tsk_mgmt_entry tsk; 3317 struct sts_entry_24xx sts; 3318 } p; 3319 }; 3320 3321 static int 3322 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 3323 uint64_t l, int tag) 3324 { 3325 int rval, rval2; 3326 struct tsk_mgmt_cmd *tsk; 3327 struct sts_entry_24xx *sts; 3328 dma_addr_t tsk_dma; 3329 scsi_qla_host_t *vha; 3330 struct qla_hw_data *ha; 3331 struct req_que *req; 3332 struct qla_qpair *qpair; 3333 3334 vha = fcport->vha; 3335 ha = vha->hw; 3336 req = vha->req; 3337 3338 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, 3339 "Entered %s.\n", __func__); 3340 3341 if (vha->vp_idx && vha->qpair) { 3342 /* NPIV port */ 3343 qpair = vha->qpair; 3344 req = qpair->req; 3345 } 3346 3347 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 3348 if (tsk == NULL) { 3349 ql_log(ql_log_warn, vha, 0x1093, 3350 "Failed to allocate task management IOCB.\n"); 3351 return QLA_MEMORY_ALLOC_FAILED; 3352 } 3353 3354 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 3355 tsk->p.tsk.entry_count = 1; 3356 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle); 3357 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 3358 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 3359 tsk->p.tsk.control_flags = cpu_to_le32(type); 3360 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 3361 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 3362 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 3363 tsk->p.tsk.vp_index = fcport->vha->vp_idx; 3364 if (type == TCF_LUN_RESET) { 3365 int_to_scsilun(l, &tsk->p.tsk.lun); 3366 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, 3367 sizeof(tsk->p.tsk.lun)); 3368 } 3369 3370 sts = &tsk->p.sts; 3371 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 3372 if (rval != QLA_SUCCESS) { 3373 ql_dbg(ql_dbg_mbx, vha, 0x1094, 3374 "Failed to issue %s reset IOCB (%x).\n", name, rval); 3375 } else if (sts->entry_status != 0) { 3376 ql_dbg(ql_dbg_mbx, vha, 0x1095, 3377 "Failed to complete IOCB -- error status (%x).\n", 3378 sts->entry_status); 3379 rval = QLA_FUNCTION_FAILED; 3380 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 3381 ql_dbg(ql_dbg_mbx, vha, 0x1096, 3382 "Failed to complete IOCB -- completion status (%x).\n", 3383 le16_to_cpu(sts->comp_status)); 3384 rval = QLA_FUNCTION_FAILED; 3385 } else if (le16_to_cpu(sts->scsi_status) & 3386 SS_RESPONSE_INFO_LEN_VALID) { 3387 if (le32_to_cpu(sts->rsp_data_len) < 4) { 3388 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, 3389 "Ignoring inconsistent data length -- not enough " 3390 "response info (%d).\n", 3391 le32_to_cpu(sts->rsp_data_len)); 3392 } else if (sts->data[3]) { 3393 ql_dbg(ql_dbg_mbx, vha, 0x1098, 3394 "Failed to complete IOCB -- response (%x).\n", 3395 sts->data[3]); 3396 rval = QLA_FUNCTION_FAILED; 3397 } 3398 } 3399 3400 /* Issue marker IOCB. */ 3401 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l, 3402 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 3403 if (rval2 != QLA_SUCCESS) { 3404 ql_dbg(ql_dbg_mbx, vha, 0x1099, 3405 "Failed to issue marker IOCB (%x).\n", rval2); 3406 } else { 3407 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, 3408 "Done %s.\n", __func__); 3409 } 3410 3411 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 3412 3413 return rval; 3414 } 3415 3416 int 3417 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) 3418 { 3419 struct qla_hw_data *ha = fcport->vha->hw; 3420 3421 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3422 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 3423 3424 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); 3425 } 3426 3427 int 3428 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 3429 { 3430 struct qla_hw_data *ha = fcport->vha->hw; 3431 3432 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3433 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 3434 3435 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); 3436 } 3437 3438 int 3439 qla2x00_system_error(scsi_qla_host_t *vha) 3440 { 3441 int rval; 3442 mbx_cmd_t mc; 3443 mbx_cmd_t *mcp = &mc; 3444 struct qla_hw_data *ha = vha->hw; 3445 3446 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 3447 return QLA_FUNCTION_FAILED; 3448 3449 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, 3450 "Entered %s.\n", __func__); 3451 3452 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 3453 mcp->out_mb = MBX_0; 3454 mcp->in_mb = MBX_0; 3455 mcp->tov = 5; 3456 mcp->flags = 0; 3457 rval = qla2x00_mailbox_command(vha, mcp); 3458 3459 if (rval != QLA_SUCCESS) { 3460 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); 3461 } else { 3462 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, 3463 "Done %s.\n", __func__); 3464 } 3465 3466 return rval; 3467 } 3468 3469 int 3470 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) 3471 { 3472 int rval; 3473 mbx_cmd_t mc; 3474 mbx_cmd_t *mcp = &mc; 3475 3476 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3477 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3478 return QLA_FUNCTION_FAILED; 3479 3480 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, 3481 "Entered %s.\n", __func__); 3482 3483 mcp->mb[0] = MBC_WRITE_SERDES; 3484 mcp->mb[1] = addr; 3485 if (IS_QLA2031(vha->hw)) 3486 mcp->mb[2] = data & 0xff; 3487 else 3488 mcp->mb[2] = data; 3489 3490 mcp->mb[3] = 0; 3491 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 3492 mcp->in_mb = MBX_0; 3493 mcp->tov = MBX_TOV_SECONDS; 3494 mcp->flags = 0; 3495 rval = qla2x00_mailbox_command(vha, mcp); 3496 3497 if (rval != QLA_SUCCESS) { 3498 ql_dbg(ql_dbg_mbx, vha, 0x1183, 3499 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3500 } else { 3501 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, 3502 "Done %s.\n", __func__); 3503 } 3504 3505 return rval; 3506 } 3507 3508 int 3509 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) 3510 { 3511 int rval; 3512 mbx_cmd_t mc; 3513 mbx_cmd_t *mcp = &mc; 3514 3515 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3516 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3517 return QLA_FUNCTION_FAILED; 3518 3519 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, 3520 "Entered %s.\n", __func__); 3521 3522 mcp->mb[0] = MBC_READ_SERDES; 3523 mcp->mb[1] = addr; 3524 mcp->mb[3] = 0; 3525 mcp->out_mb = MBX_3|MBX_1|MBX_0; 3526 mcp->in_mb = MBX_1|MBX_0; 3527 mcp->tov = MBX_TOV_SECONDS; 3528 mcp->flags = 0; 3529 rval = qla2x00_mailbox_command(vha, mcp); 3530 3531 if (IS_QLA2031(vha->hw)) 3532 *data = mcp->mb[1] & 0xff; 3533 else 3534 *data = mcp->mb[1]; 3535 3536 if (rval != QLA_SUCCESS) { 3537 ql_dbg(ql_dbg_mbx, vha, 0x1186, 3538 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3539 } else { 3540 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, 3541 "Done %s.\n", __func__); 3542 } 3543 3544 return rval; 3545 } 3546 3547 int 3548 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) 3549 { 3550 int rval; 3551 mbx_cmd_t mc; 3552 mbx_cmd_t *mcp = &mc; 3553 3554 if (!IS_QLA8044(vha->hw)) 3555 return QLA_FUNCTION_FAILED; 3556 3557 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0, 3558 "Entered %s.\n", __func__); 3559 3560 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3561 mcp->mb[1] = HCS_WRITE_SERDES; 3562 mcp->mb[3] = LSW(addr); 3563 mcp->mb[4] = MSW(addr); 3564 mcp->mb[5] = LSW(data); 3565 mcp->mb[6] = MSW(data); 3566 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 3567 mcp->in_mb = MBX_0; 3568 mcp->tov = MBX_TOV_SECONDS; 3569 mcp->flags = 0; 3570 rval = qla2x00_mailbox_command(vha, mcp); 3571 3572 if (rval != QLA_SUCCESS) { 3573 ql_dbg(ql_dbg_mbx, vha, 0x11a1, 3574 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3575 } else { 3576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, 3577 "Done %s.\n", __func__); 3578 } 3579 3580 return rval; 3581 } 3582 3583 int 3584 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) 3585 { 3586 int rval; 3587 mbx_cmd_t mc; 3588 mbx_cmd_t *mcp = &mc; 3589 3590 if (!IS_QLA8044(vha->hw)) 3591 return QLA_FUNCTION_FAILED; 3592 3593 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, 3594 "Entered %s.\n", __func__); 3595 3596 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3597 mcp->mb[1] = HCS_READ_SERDES; 3598 mcp->mb[3] = LSW(addr); 3599 mcp->mb[4] = MSW(addr); 3600 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; 3601 mcp->in_mb = MBX_2|MBX_1|MBX_0; 3602 mcp->tov = MBX_TOV_SECONDS; 3603 mcp->flags = 0; 3604 rval = qla2x00_mailbox_command(vha, mcp); 3605 3606 *data = mcp->mb[2] << 16 | mcp->mb[1]; 3607 3608 if (rval != QLA_SUCCESS) { 3609 ql_dbg(ql_dbg_mbx, vha, 0x118a, 3610 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3611 } else { 3612 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, 3613 "Done %s.\n", __func__); 3614 } 3615 3616 return rval; 3617 } 3618 3619 /** 3620 * qla2x00_set_serdes_params() - 3621 * @vha: HA context 3622 * @sw_em_1g: serial link options 3623 * @sw_em_2g: serial link options 3624 * @sw_em_4g: serial link options 3625 * 3626 * Returns 3627 */ 3628 int 3629 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, 3630 uint16_t sw_em_2g, uint16_t sw_em_4g) 3631 { 3632 int rval; 3633 mbx_cmd_t mc; 3634 mbx_cmd_t *mcp = &mc; 3635 3636 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, 3637 "Entered %s.\n", __func__); 3638 3639 mcp->mb[0] = MBC_SERDES_PARAMS; 3640 mcp->mb[1] = BIT_0; 3641 mcp->mb[2] = sw_em_1g | BIT_15; 3642 mcp->mb[3] = sw_em_2g | BIT_15; 3643 mcp->mb[4] = sw_em_4g | BIT_15; 3644 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3645 mcp->in_mb = MBX_0; 3646 mcp->tov = MBX_TOV_SECONDS; 3647 mcp->flags = 0; 3648 rval = qla2x00_mailbox_command(vha, mcp); 3649 3650 if (rval != QLA_SUCCESS) { 3651 /*EMPTY*/ 3652 ql_dbg(ql_dbg_mbx, vha, 0x109f, 3653 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3654 } else { 3655 /*EMPTY*/ 3656 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, 3657 "Done %s.\n", __func__); 3658 } 3659 3660 return rval; 3661 } 3662 3663 int 3664 qla2x00_stop_firmware(scsi_qla_host_t *vha) 3665 { 3666 int rval; 3667 mbx_cmd_t mc; 3668 mbx_cmd_t *mcp = &mc; 3669 3670 if (!IS_FWI2_CAPABLE(vha->hw)) 3671 return QLA_FUNCTION_FAILED; 3672 3673 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, 3674 "Entered %s.\n", __func__); 3675 3676 mcp->mb[0] = MBC_STOP_FIRMWARE; 3677 mcp->mb[1] = 0; 3678 mcp->out_mb = MBX_1|MBX_0; 3679 mcp->in_mb = MBX_0; 3680 mcp->tov = 5; 3681 mcp->flags = 0; 3682 rval = qla2x00_mailbox_command(vha, mcp); 3683 3684 if (rval != QLA_SUCCESS) { 3685 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); 3686 if (mcp->mb[0] == MBS_INVALID_COMMAND) 3687 rval = QLA_INVALID_COMMAND; 3688 } else { 3689 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, 3690 "Done %s.\n", __func__); 3691 } 3692 3693 return rval; 3694 } 3695 3696 int 3697 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, 3698 uint16_t buffers) 3699 { 3700 int rval; 3701 mbx_cmd_t mc; 3702 mbx_cmd_t *mcp = &mc; 3703 3704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, 3705 "Entered %s.\n", __func__); 3706 3707 if (!IS_FWI2_CAPABLE(vha->hw)) 3708 return QLA_FUNCTION_FAILED; 3709 3710 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3711 return QLA_FUNCTION_FAILED; 3712 3713 mcp->mb[0] = MBC_TRACE_CONTROL; 3714 mcp->mb[1] = TC_EFT_ENABLE; 3715 mcp->mb[2] = LSW(eft_dma); 3716 mcp->mb[3] = MSW(eft_dma); 3717 mcp->mb[4] = LSW(MSD(eft_dma)); 3718 mcp->mb[5] = MSW(MSD(eft_dma)); 3719 mcp->mb[6] = buffers; 3720 mcp->mb[7] = TC_AEN_DISABLE; 3721 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3722 mcp->in_mb = MBX_1|MBX_0; 3723 mcp->tov = MBX_TOV_SECONDS; 3724 mcp->flags = 0; 3725 rval = qla2x00_mailbox_command(vha, mcp); 3726 if (rval != QLA_SUCCESS) { 3727 ql_dbg(ql_dbg_mbx, vha, 0x10a5, 3728 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3729 rval, mcp->mb[0], mcp->mb[1]); 3730 } else { 3731 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, 3732 "Done %s.\n", __func__); 3733 } 3734 3735 return rval; 3736 } 3737 3738 int 3739 qla2x00_disable_eft_trace(scsi_qla_host_t *vha) 3740 { 3741 int rval; 3742 mbx_cmd_t mc; 3743 mbx_cmd_t *mcp = &mc; 3744 3745 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, 3746 "Entered %s.\n", __func__); 3747 3748 if (!IS_FWI2_CAPABLE(vha->hw)) 3749 return QLA_FUNCTION_FAILED; 3750 3751 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3752 return QLA_FUNCTION_FAILED; 3753 3754 mcp->mb[0] = MBC_TRACE_CONTROL; 3755 mcp->mb[1] = TC_EFT_DISABLE; 3756 mcp->out_mb = MBX_1|MBX_0; 3757 mcp->in_mb = MBX_1|MBX_0; 3758 mcp->tov = MBX_TOV_SECONDS; 3759 mcp->flags = 0; 3760 rval = qla2x00_mailbox_command(vha, mcp); 3761 if (rval != QLA_SUCCESS) { 3762 ql_dbg(ql_dbg_mbx, vha, 0x10a8, 3763 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3764 rval, mcp->mb[0], mcp->mb[1]); 3765 } else { 3766 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, 3767 "Done %s.\n", __func__); 3768 } 3769 3770 return rval; 3771 } 3772 3773 int 3774 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, 3775 uint16_t buffers, uint16_t *mb, uint32_t *dwords) 3776 { 3777 int rval; 3778 mbx_cmd_t mc; 3779 mbx_cmd_t *mcp = &mc; 3780 3781 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, 3782 "Entered %s.\n", __func__); 3783 3784 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 3785 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 3786 !IS_QLA28XX(vha->hw)) 3787 return QLA_FUNCTION_FAILED; 3788 3789 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3790 return QLA_FUNCTION_FAILED; 3791 3792 mcp->mb[0] = MBC_TRACE_CONTROL; 3793 mcp->mb[1] = TC_FCE_ENABLE; 3794 mcp->mb[2] = LSW(fce_dma); 3795 mcp->mb[3] = MSW(fce_dma); 3796 mcp->mb[4] = LSW(MSD(fce_dma)); 3797 mcp->mb[5] = MSW(MSD(fce_dma)); 3798 mcp->mb[6] = buffers; 3799 mcp->mb[7] = TC_AEN_DISABLE; 3800 mcp->mb[8] = 0; 3801 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; 3802 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; 3803 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3804 MBX_1|MBX_0; 3805 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3806 mcp->tov = MBX_TOV_SECONDS; 3807 mcp->flags = 0; 3808 rval = qla2x00_mailbox_command(vha, mcp); 3809 if (rval != QLA_SUCCESS) { 3810 ql_dbg(ql_dbg_mbx, vha, 0x10ab, 3811 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3812 rval, mcp->mb[0], mcp->mb[1]); 3813 } else { 3814 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, 3815 "Done %s.\n", __func__); 3816 3817 if (mb) 3818 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 3819 if (dwords) 3820 *dwords = buffers; 3821 } 3822 3823 return rval; 3824 } 3825 3826 int 3827 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) 3828 { 3829 int rval; 3830 mbx_cmd_t mc; 3831 mbx_cmd_t *mcp = &mc; 3832 3833 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, 3834 "Entered %s.\n", __func__); 3835 3836 if (!IS_FWI2_CAPABLE(vha->hw)) 3837 return QLA_FUNCTION_FAILED; 3838 3839 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3840 return QLA_FUNCTION_FAILED; 3841 3842 mcp->mb[0] = MBC_TRACE_CONTROL; 3843 mcp->mb[1] = TC_FCE_DISABLE; 3844 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 3845 mcp->out_mb = MBX_2|MBX_1|MBX_0; 3846 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3847 MBX_1|MBX_0; 3848 mcp->tov = MBX_TOV_SECONDS; 3849 mcp->flags = 0; 3850 rval = qla2x00_mailbox_command(vha, mcp); 3851 if (rval != QLA_SUCCESS) { 3852 ql_dbg(ql_dbg_mbx, vha, 0x10ae, 3853 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3854 rval, mcp->mb[0], mcp->mb[1]); 3855 } else { 3856 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, 3857 "Done %s.\n", __func__); 3858 3859 if (wr) 3860 *wr = (uint64_t) mcp->mb[5] << 48 | 3861 (uint64_t) mcp->mb[4] << 32 | 3862 (uint64_t) mcp->mb[3] << 16 | 3863 (uint64_t) mcp->mb[2]; 3864 if (rd) 3865 *rd = (uint64_t) mcp->mb[9] << 48 | 3866 (uint64_t) mcp->mb[8] << 32 | 3867 (uint64_t) mcp->mb[7] << 16 | 3868 (uint64_t) mcp->mb[6]; 3869 } 3870 3871 return rval; 3872 } 3873 3874 int 3875 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3876 uint16_t *port_speed, uint16_t *mb) 3877 { 3878 int rval; 3879 mbx_cmd_t mc; 3880 mbx_cmd_t *mcp = &mc; 3881 3882 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, 3883 "Entered %s.\n", __func__); 3884 3885 if (!IS_IIDMA_CAPABLE(vha->hw)) 3886 return QLA_FUNCTION_FAILED; 3887 3888 mcp->mb[0] = MBC_PORT_PARAMS; 3889 mcp->mb[1] = loop_id; 3890 mcp->mb[2] = mcp->mb[3] = 0; 3891 mcp->mb[9] = vha->vp_idx; 3892 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3893 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3894 mcp->tov = MBX_TOV_SECONDS; 3895 mcp->flags = 0; 3896 rval = qla2x00_mailbox_command(vha, mcp); 3897 3898 /* Return mailbox statuses. */ 3899 if (mb) { 3900 mb[0] = mcp->mb[0]; 3901 mb[1] = mcp->mb[1]; 3902 mb[3] = mcp->mb[3]; 3903 } 3904 3905 if (rval != QLA_SUCCESS) { 3906 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); 3907 } else { 3908 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, 3909 "Done %s.\n", __func__); 3910 if (port_speed) 3911 *port_speed = mcp->mb[3]; 3912 } 3913 3914 return rval; 3915 } 3916 3917 int 3918 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3919 uint16_t port_speed, uint16_t *mb) 3920 { 3921 int rval; 3922 mbx_cmd_t mc; 3923 mbx_cmd_t *mcp = &mc; 3924 3925 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, 3926 "Entered %s.\n", __func__); 3927 3928 if (!IS_IIDMA_CAPABLE(vha->hw)) 3929 return QLA_FUNCTION_FAILED; 3930 3931 mcp->mb[0] = MBC_PORT_PARAMS; 3932 mcp->mb[1] = loop_id; 3933 mcp->mb[2] = BIT_0; 3934 mcp->mb[3] = port_speed & 0x3F; 3935 mcp->mb[9] = vha->vp_idx; 3936 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3937 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3938 mcp->tov = MBX_TOV_SECONDS; 3939 mcp->flags = 0; 3940 rval = qla2x00_mailbox_command(vha, mcp); 3941 3942 /* Return mailbox statuses. */ 3943 if (mb) { 3944 mb[0] = mcp->mb[0]; 3945 mb[1] = mcp->mb[1]; 3946 mb[3] = mcp->mb[3]; 3947 } 3948 3949 if (rval != QLA_SUCCESS) { 3950 ql_dbg(ql_dbg_mbx, vha, 0x10b4, 3951 "Failed=%x.\n", rval); 3952 } else { 3953 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, 3954 "Done %s.\n", __func__); 3955 } 3956 3957 return rval; 3958 } 3959 3960 void 3961 qla24xx_report_id_acquisition(scsi_qla_host_t *vha, 3962 struct vp_rpt_id_entry_24xx *rptid_entry) 3963 { 3964 struct qla_hw_data *ha = vha->hw; 3965 scsi_qla_host_t *vp = NULL; 3966 unsigned long flags; 3967 int found; 3968 port_id_t id; 3969 struct fc_port *fcport; 3970 3971 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3972 "Entered %s.\n", __func__); 3973 3974 if (rptid_entry->entry_status != 0) 3975 return; 3976 3977 id.b.domain = rptid_entry->port_id[2]; 3978 id.b.area = rptid_entry->port_id[1]; 3979 id.b.al_pa = rptid_entry->port_id[0]; 3980 id.b.rsvd_1 = 0; 3981 ha->flags.n2n_ae = 0; 3982 3983 if (rptid_entry->format == 0) { 3984 /* loop */ 3985 ql_dbg(ql_dbg_async, vha, 0x10b7, 3986 "Format 0 : Number of VPs setup %d, number of " 3987 "VPs acquired %d.\n", rptid_entry->vp_setup, 3988 rptid_entry->vp_acquired); 3989 ql_dbg(ql_dbg_async, vha, 0x10b8, 3990 "Primary port id %02x%02x%02x.\n", 3991 rptid_entry->port_id[2], rptid_entry->port_id[1], 3992 rptid_entry->port_id[0]); 3993 ha->current_topology = ISP_CFG_NL; 3994 qlt_update_host_map(vha, id); 3995 3996 } else if (rptid_entry->format == 1) { 3997 /* fabric */ 3998 ql_dbg(ql_dbg_async, vha, 0x10b9, 3999 "Format 1: VP[%d] enabled - status %d - with " 4000 "port id %02x%02x%02x.\n", rptid_entry->vp_idx, 4001 rptid_entry->vp_status, 4002 rptid_entry->port_id[2], rptid_entry->port_id[1], 4003 rptid_entry->port_id[0]); 4004 ql_dbg(ql_dbg_async, vha, 0x5075, 4005 "Format 1: Remote WWPN %8phC.\n", 4006 rptid_entry->u.f1.port_name); 4007 4008 ql_dbg(ql_dbg_async, vha, 0x5075, 4009 "Format 1: WWPN %8phC.\n", 4010 vha->port_name); 4011 4012 switch (rptid_entry->u.f1.flags & TOPO_MASK) { 4013 case TOPO_N2N: 4014 ha->current_topology = ISP_CFG_N; 4015 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4016 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4017 fcport->scan_state = QLA_FCPORT_SCAN; 4018 fcport->n2n_flag = 0; 4019 } 4020 id.b24 = 0; 4021 if (wwn_to_u64(vha->port_name) > 4022 wwn_to_u64(rptid_entry->u.f1.port_name)) { 4023 vha->d_id.b24 = 0; 4024 vha->d_id.b.al_pa = 1; 4025 ha->flags.n2n_bigger = 1; 4026 4027 id.b.al_pa = 2; 4028 ql_dbg(ql_dbg_async, vha, 0x5075, 4029 "Format 1: assign local id %x remote id %x\n", 4030 vha->d_id.b24, id.b24); 4031 } else { 4032 ql_dbg(ql_dbg_async, vha, 0x5075, 4033 "Format 1: Remote login - Waiting for WWPN %8phC.\n", 4034 rptid_entry->u.f1.port_name); 4035 ha->flags.n2n_bigger = 0; 4036 } 4037 4038 fcport = qla2x00_find_fcport_by_wwpn(vha, 4039 rptid_entry->u.f1.port_name, 1); 4040 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4041 4042 4043 if (fcport) { 4044 fcport->plogi_nack_done_deadline = jiffies + HZ; 4045 fcport->dm_login_expire = jiffies + 4046 QLA_N2N_WAIT_TIME * HZ; 4047 fcport->scan_state = QLA_FCPORT_FOUND; 4048 fcport->n2n_flag = 1; 4049 fcport->keep_nport_handle = 1; 4050 fcport->login_retry = vha->hw->login_retry_count; 4051 fcport->fc4_type = FS_FC4TYPE_FCP; 4052 if (vha->flags.nvme_enabled) 4053 fcport->fc4_type |= FS_FC4TYPE_NVME; 4054 4055 if (wwn_to_u64(vha->port_name) > 4056 wwn_to_u64(fcport->port_name)) { 4057 fcport->d_id = id; 4058 } 4059 4060 switch (fcport->disc_state) { 4061 case DSC_DELETED: 4062 set_bit(RELOGIN_NEEDED, 4063 &vha->dpc_flags); 4064 break; 4065 case DSC_DELETE_PEND: 4066 break; 4067 default: 4068 qlt_schedule_sess_for_deletion(fcport); 4069 break; 4070 } 4071 } else { 4072 qla24xx_post_newsess_work(vha, &id, 4073 rptid_entry->u.f1.port_name, 4074 rptid_entry->u.f1.node_name, 4075 NULL, 4076 FS_FCP_IS_N2N); 4077 } 4078 4079 /* if our portname is higher then initiate N2N login */ 4080 4081 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); 4082 return; 4083 case TOPO_FL: 4084 ha->current_topology = ISP_CFG_FL; 4085 break; 4086 case TOPO_F: 4087 ha->current_topology = ISP_CFG_F; 4088 break; 4089 default: 4090 break; 4091 } 4092 4093 ha->flags.gpsc_supported = 1; 4094 ha->current_topology = ISP_CFG_F; 4095 /* buffer to buffer credit flag */ 4096 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; 4097 4098 if (rptid_entry->vp_idx == 0) { 4099 if (rptid_entry->vp_status == VP_STAT_COMPL) { 4100 /* FA-WWN is only for physical port */ 4101 if (qla_ini_mode_enabled(vha) && 4102 ha->flags.fawwpn_enabled && 4103 (rptid_entry->u.f1.flags & 4104 BIT_6)) { 4105 memcpy(vha->port_name, 4106 rptid_entry->u.f1.port_name, 4107 WWN_SIZE); 4108 } 4109 4110 qlt_update_host_map(vha, id); 4111 } 4112 4113 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 4114 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 4115 } else { 4116 if (rptid_entry->vp_status != VP_STAT_COMPL && 4117 rptid_entry->vp_status != VP_STAT_ID_CHG) { 4118 ql_dbg(ql_dbg_mbx, vha, 0x10ba, 4119 "Could not acquire ID for VP[%d].\n", 4120 rptid_entry->vp_idx); 4121 return; 4122 } 4123 4124 found = 0; 4125 spin_lock_irqsave(&ha->vport_slock, flags); 4126 list_for_each_entry(vp, &ha->vp_list, list) { 4127 if (rptid_entry->vp_idx == vp->vp_idx) { 4128 found = 1; 4129 break; 4130 } 4131 } 4132 spin_unlock_irqrestore(&ha->vport_slock, flags); 4133 4134 if (!found) 4135 return; 4136 4137 qlt_update_host_map(vp, id); 4138 4139 /* 4140 * Cannot configure here as we are still sitting on the 4141 * response queue. Handle it in dpc context. 4142 */ 4143 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); 4144 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); 4145 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); 4146 } 4147 set_bit(VP_DPC_NEEDED, &vha->dpc_flags); 4148 qla2xxx_wake_dpc(vha); 4149 } else if (rptid_entry->format == 2) { 4150 ql_dbg(ql_dbg_async, vha, 0x505f, 4151 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", 4152 rptid_entry->port_id[2], rptid_entry->port_id[1], 4153 rptid_entry->port_id[0]); 4154 4155 ql_dbg(ql_dbg_async, vha, 0x5075, 4156 "N2N: Remote WWPN %8phC.\n", 4157 rptid_entry->u.f2.port_name); 4158 4159 /* N2N. direct connect */ 4160 ha->current_topology = ISP_CFG_N; 4161 ha->flags.rida_fmt2 = 1; 4162 vha->d_id.b.domain = rptid_entry->port_id[2]; 4163 vha->d_id.b.area = rptid_entry->port_id[1]; 4164 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 4165 4166 ha->flags.n2n_ae = 1; 4167 spin_lock_irqsave(&ha->vport_slock, flags); 4168 qlt_update_vp_map(vha, SET_AL_PA); 4169 spin_unlock_irqrestore(&ha->vport_slock, flags); 4170 4171 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4172 fcport->scan_state = QLA_FCPORT_SCAN; 4173 fcport->n2n_flag = 0; 4174 } 4175 4176 fcport = qla2x00_find_fcport_by_wwpn(vha, 4177 rptid_entry->u.f2.port_name, 1); 4178 4179 if (fcport) { 4180 fcport->login_retry = vha->hw->login_retry_count; 4181 fcport->plogi_nack_done_deadline = jiffies + HZ; 4182 fcport->scan_state = QLA_FCPORT_FOUND; 4183 fcport->keep_nport_handle = 1; 4184 fcport->n2n_flag = 1; 4185 fcport->d_id.b.domain = 4186 rptid_entry->u.f2.remote_nport_id[2]; 4187 fcport->d_id.b.area = 4188 rptid_entry->u.f2.remote_nport_id[1]; 4189 fcport->d_id.b.al_pa = 4190 rptid_entry->u.f2.remote_nport_id[0]; 4191 4192 /* 4193 * For the case where remote port sending PRLO, FW 4194 * sends up RIDA Format 2 as an indication of session 4195 * loss. In other word, FW state change from PRLI 4196 * complete back to PLOGI complete. Delete the 4197 * session and let relogin drive the reconnect. 4198 */ 4199 if (atomic_read(&fcport->state) == FCS_ONLINE) 4200 qlt_schedule_sess_for_deletion(fcport); 4201 } 4202 } 4203 } 4204 4205 /* 4206 * qla24xx_modify_vp_config 4207 * Change VP configuration for vha 4208 * 4209 * Input: 4210 * vha = adapter block pointer. 4211 * 4212 * Returns: 4213 * qla2xxx local function return status code. 4214 * 4215 * Context: 4216 * Kernel context. 4217 */ 4218 int 4219 qla24xx_modify_vp_config(scsi_qla_host_t *vha) 4220 { 4221 int rval; 4222 struct vp_config_entry_24xx *vpmod; 4223 dma_addr_t vpmod_dma; 4224 struct qla_hw_data *ha = vha->hw; 4225 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4226 4227 /* This can be called by the parent */ 4228 4229 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, 4230 "Entered %s.\n", __func__); 4231 4232 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 4233 if (!vpmod) { 4234 ql_log(ql_log_warn, vha, 0x10bc, 4235 "Failed to allocate modify VP IOCB.\n"); 4236 return QLA_MEMORY_ALLOC_FAILED; 4237 } 4238 4239 vpmod->entry_type = VP_CONFIG_IOCB_TYPE; 4240 vpmod->entry_count = 1; 4241 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; 4242 vpmod->vp_count = 1; 4243 vpmod->vp_index1 = vha->vp_idx; 4244 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; 4245 4246 qlt_modify_vp_config(vha, vpmod); 4247 4248 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); 4249 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 4250 vpmod->entry_count = 1; 4251 4252 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 4253 if (rval != QLA_SUCCESS) { 4254 ql_dbg(ql_dbg_mbx, vha, 0x10bd, 4255 "Failed to issue VP config IOCB (%x).\n", rval); 4256 } else if (vpmod->comp_status != 0) { 4257 ql_dbg(ql_dbg_mbx, vha, 0x10be, 4258 "Failed to complete IOCB -- error status (%x).\n", 4259 vpmod->comp_status); 4260 rval = QLA_FUNCTION_FAILED; 4261 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) { 4262 ql_dbg(ql_dbg_mbx, vha, 0x10bf, 4263 "Failed to complete IOCB -- completion status (%x).\n", 4264 le16_to_cpu(vpmod->comp_status)); 4265 rval = QLA_FUNCTION_FAILED; 4266 } else { 4267 /* EMPTY */ 4268 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, 4269 "Done %s.\n", __func__); 4270 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 4271 } 4272 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 4273 4274 return rval; 4275 } 4276 4277 /* 4278 * qla2x00_send_change_request 4279 * Receive or disable RSCN request from fabric controller 4280 * 4281 * Input: 4282 * ha = adapter block pointer 4283 * format = registration format: 4284 * 0 - Reserved 4285 * 1 - Fabric detected registration 4286 * 2 - N_port detected registration 4287 * 3 - Full registration 4288 * FF - clear registration 4289 * vp_idx = Virtual port index 4290 * 4291 * Returns: 4292 * qla2x00 local function return status code. 4293 * 4294 * Context: 4295 * Kernel Context 4296 */ 4297 4298 int 4299 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, 4300 uint16_t vp_idx) 4301 { 4302 int rval; 4303 mbx_cmd_t mc; 4304 mbx_cmd_t *mcp = &mc; 4305 4306 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, 4307 "Entered %s.\n", __func__); 4308 4309 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; 4310 mcp->mb[1] = format; 4311 mcp->mb[9] = vp_idx; 4312 mcp->out_mb = MBX_9|MBX_1|MBX_0; 4313 mcp->in_mb = MBX_0|MBX_1; 4314 mcp->tov = MBX_TOV_SECONDS; 4315 mcp->flags = 0; 4316 rval = qla2x00_mailbox_command(vha, mcp); 4317 4318 if (rval == QLA_SUCCESS) { 4319 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 4320 rval = BIT_1; 4321 } 4322 } else 4323 rval = BIT_1; 4324 4325 return rval; 4326 } 4327 4328 int 4329 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 4330 uint32_t size) 4331 { 4332 int rval; 4333 mbx_cmd_t mc; 4334 mbx_cmd_t *mcp = &mc; 4335 4336 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, 4337 "Entered %s.\n", __func__); 4338 4339 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 4340 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 4341 mcp->mb[8] = MSW(addr); 4342 mcp->mb[10] = 0; 4343 mcp->out_mb = MBX_10|MBX_8|MBX_0; 4344 } else { 4345 mcp->mb[0] = MBC_DUMP_RISC_RAM; 4346 mcp->out_mb = MBX_0; 4347 } 4348 mcp->mb[1] = LSW(addr); 4349 mcp->mb[2] = MSW(req_dma); 4350 mcp->mb[3] = LSW(req_dma); 4351 mcp->mb[6] = MSW(MSD(req_dma)); 4352 mcp->mb[7] = LSW(MSD(req_dma)); 4353 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 4354 if (IS_FWI2_CAPABLE(vha->hw)) { 4355 mcp->mb[4] = MSW(size); 4356 mcp->mb[5] = LSW(size); 4357 mcp->out_mb |= MBX_5|MBX_4; 4358 } else { 4359 mcp->mb[4] = LSW(size); 4360 mcp->out_mb |= MBX_4; 4361 } 4362 4363 mcp->in_mb = MBX_0; 4364 mcp->tov = MBX_TOV_SECONDS; 4365 mcp->flags = 0; 4366 rval = qla2x00_mailbox_command(vha, mcp); 4367 4368 if (rval != QLA_SUCCESS) { 4369 ql_dbg(ql_dbg_mbx, vha, 0x1008, 4370 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4371 } else { 4372 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, 4373 "Done %s.\n", __func__); 4374 } 4375 4376 return rval; 4377 } 4378 /* 84XX Support **************************************************************/ 4379 4380 struct cs84xx_mgmt_cmd { 4381 union { 4382 struct verify_chip_entry_84xx req; 4383 struct verify_chip_rsp_84xx rsp; 4384 } p; 4385 }; 4386 4387 int 4388 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) 4389 { 4390 int rval, retry; 4391 struct cs84xx_mgmt_cmd *mn; 4392 dma_addr_t mn_dma; 4393 uint16_t options; 4394 unsigned long flags; 4395 struct qla_hw_data *ha = vha->hw; 4396 4397 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, 4398 "Entered %s.\n", __func__); 4399 4400 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 4401 if (mn == NULL) { 4402 return QLA_MEMORY_ALLOC_FAILED; 4403 } 4404 4405 /* Force Update? */ 4406 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; 4407 /* Diagnostic firmware? */ 4408 /* options |= MENLO_DIAG_FW; */ 4409 /* We update the firmware with only one data sequence. */ 4410 options |= VCO_END_OF_DATA; 4411 4412 do { 4413 retry = 0; 4414 memset(mn, 0, sizeof(*mn)); 4415 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; 4416 mn->p.req.entry_count = 1; 4417 mn->p.req.options = cpu_to_le16(options); 4418 4419 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, 4420 "Dump of Verify Request.\n"); 4421 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, 4422 mn, sizeof(*mn)); 4423 4424 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 4425 if (rval != QLA_SUCCESS) { 4426 ql_dbg(ql_dbg_mbx, vha, 0x10cb, 4427 "Failed to issue verify IOCB (%x).\n", rval); 4428 goto verify_done; 4429 } 4430 4431 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, 4432 "Dump of Verify Response.\n"); 4433 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, 4434 mn, sizeof(*mn)); 4435 4436 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 4437 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 4438 le16_to_cpu(mn->p.rsp.failure_code) : 0; 4439 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, 4440 "cs=%x fc=%x.\n", status[0], status[1]); 4441 4442 if (status[0] != CS_COMPLETE) { 4443 rval = QLA_FUNCTION_FAILED; 4444 if (!(options & VCO_DONT_UPDATE_FW)) { 4445 ql_dbg(ql_dbg_mbx, vha, 0x10cf, 4446 "Firmware update failed. Retrying " 4447 "without update firmware.\n"); 4448 options |= VCO_DONT_UPDATE_FW; 4449 options &= ~VCO_FORCE_UPDATE; 4450 retry = 1; 4451 } 4452 } else { 4453 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, 4454 "Firmware updated to %x.\n", 4455 le32_to_cpu(mn->p.rsp.fw_ver)); 4456 4457 /* NOTE: we only update OP firmware. */ 4458 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 4459 ha->cs84xx->op_fw_version = 4460 le32_to_cpu(mn->p.rsp.fw_ver); 4461 spin_unlock_irqrestore(&ha->cs84xx->access_lock, 4462 flags); 4463 } 4464 } while (retry); 4465 4466 verify_done: 4467 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 4468 4469 if (rval != QLA_SUCCESS) { 4470 ql_dbg(ql_dbg_mbx, vha, 0x10d1, 4471 "Failed=%x.\n", rval); 4472 } else { 4473 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, 4474 "Done %s.\n", __func__); 4475 } 4476 4477 return rval; 4478 } 4479 4480 int 4481 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) 4482 { 4483 int rval; 4484 unsigned long flags; 4485 mbx_cmd_t mc; 4486 mbx_cmd_t *mcp = &mc; 4487 struct qla_hw_data *ha = vha->hw; 4488 4489 if (!ha->flags.fw_started) 4490 return QLA_SUCCESS; 4491 4492 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 4493 "Entered %s.\n", __func__); 4494 4495 if (IS_SHADOW_REG_CAPABLE(ha)) 4496 req->options |= BIT_13; 4497 4498 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4499 mcp->mb[1] = req->options; 4500 mcp->mb[2] = MSW(LSD(req->dma)); 4501 mcp->mb[3] = LSW(LSD(req->dma)); 4502 mcp->mb[6] = MSW(MSD(req->dma)); 4503 mcp->mb[7] = LSW(MSD(req->dma)); 4504 mcp->mb[5] = req->length; 4505 if (req->rsp) 4506 mcp->mb[10] = req->rsp->id; 4507 mcp->mb[12] = req->qos; 4508 mcp->mb[11] = req->vp_idx; 4509 mcp->mb[13] = req->rid; 4510 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4511 mcp->mb[15] = 0; 4512 4513 mcp->mb[4] = req->id; 4514 /* que in ptr index */ 4515 mcp->mb[8] = 0; 4516 /* que out ptr index */ 4517 mcp->mb[9] = *req->out_ptr = 0; 4518 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| 4519 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4520 mcp->in_mb = MBX_0; 4521 mcp->flags = MBX_DMA_OUT; 4522 mcp->tov = MBX_TOV_SECONDS * 2; 4523 4524 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4525 IS_QLA28XX(ha)) 4526 mcp->in_mb |= MBX_1; 4527 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4528 mcp->out_mb |= MBX_15; 4529 /* debug q create issue in SR-IOV */ 4530 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4531 } 4532 4533 spin_lock_irqsave(&ha->hardware_lock, flags); 4534 if (!(req->options & BIT_0)) { 4535 wrt_reg_dword(req->req_q_in, 0); 4536 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4537 wrt_reg_dword(req->req_q_out, 0); 4538 } 4539 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4540 4541 rval = qla2x00_mailbox_command(vha, mcp); 4542 if (rval != QLA_SUCCESS) { 4543 ql_dbg(ql_dbg_mbx, vha, 0x10d4, 4544 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4545 } else { 4546 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, 4547 "Done %s.\n", __func__); 4548 } 4549 4550 return rval; 4551 } 4552 4553 int 4554 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 4555 { 4556 int rval; 4557 unsigned long flags; 4558 mbx_cmd_t mc; 4559 mbx_cmd_t *mcp = &mc; 4560 struct qla_hw_data *ha = vha->hw; 4561 4562 if (!ha->flags.fw_started) 4563 return QLA_SUCCESS; 4564 4565 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 4566 "Entered %s.\n", __func__); 4567 4568 if (IS_SHADOW_REG_CAPABLE(ha)) 4569 rsp->options |= BIT_13; 4570 4571 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4572 mcp->mb[1] = rsp->options; 4573 mcp->mb[2] = MSW(LSD(rsp->dma)); 4574 mcp->mb[3] = LSW(LSD(rsp->dma)); 4575 mcp->mb[6] = MSW(MSD(rsp->dma)); 4576 mcp->mb[7] = LSW(MSD(rsp->dma)); 4577 mcp->mb[5] = rsp->length; 4578 mcp->mb[14] = rsp->msix->entry; 4579 mcp->mb[13] = rsp->rid; 4580 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4581 mcp->mb[15] = 0; 4582 4583 mcp->mb[4] = rsp->id; 4584 /* que in ptr index */ 4585 mcp->mb[8] = *rsp->in_ptr = 0; 4586 /* que out ptr index */ 4587 mcp->mb[9] = 0; 4588 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 4589 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4590 mcp->in_mb = MBX_0; 4591 mcp->flags = MBX_DMA_OUT; 4592 mcp->tov = MBX_TOV_SECONDS * 2; 4593 4594 if (IS_QLA81XX(ha)) { 4595 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 4596 mcp->in_mb |= MBX_1; 4597 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4598 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; 4599 mcp->in_mb |= MBX_1; 4600 /* debug q create issue in SR-IOV */ 4601 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4602 } 4603 4604 spin_lock_irqsave(&ha->hardware_lock, flags); 4605 if (!(rsp->options & BIT_0)) { 4606 wrt_reg_dword(rsp->rsp_q_out, 0); 4607 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4608 wrt_reg_dword(rsp->rsp_q_in, 0); 4609 } 4610 4611 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4612 4613 rval = qla2x00_mailbox_command(vha, mcp); 4614 if (rval != QLA_SUCCESS) { 4615 ql_dbg(ql_dbg_mbx, vha, 0x10d7, 4616 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4617 } else { 4618 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, 4619 "Done %s.\n", __func__); 4620 } 4621 4622 return rval; 4623 } 4624 4625 int 4626 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) 4627 { 4628 int rval; 4629 mbx_cmd_t mc; 4630 mbx_cmd_t *mcp = &mc; 4631 4632 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, 4633 "Entered %s.\n", __func__); 4634 4635 mcp->mb[0] = MBC_IDC_ACK; 4636 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4637 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4638 mcp->in_mb = MBX_0; 4639 mcp->tov = MBX_TOV_SECONDS; 4640 mcp->flags = 0; 4641 rval = qla2x00_mailbox_command(vha, mcp); 4642 4643 if (rval != QLA_SUCCESS) { 4644 ql_dbg(ql_dbg_mbx, vha, 0x10da, 4645 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4646 } else { 4647 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, 4648 "Done %s.\n", __func__); 4649 } 4650 4651 return rval; 4652 } 4653 4654 int 4655 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) 4656 { 4657 int rval; 4658 mbx_cmd_t mc; 4659 mbx_cmd_t *mcp = &mc; 4660 4661 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, 4662 "Entered %s.\n", __func__); 4663 4664 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4665 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4666 return QLA_FUNCTION_FAILED; 4667 4668 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4669 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 4670 mcp->out_mb = MBX_1|MBX_0; 4671 mcp->in_mb = MBX_1|MBX_0; 4672 mcp->tov = MBX_TOV_SECONDS; 4673 mcp->flags = 0; 4674 rval = qla2x00_mailbox_command(vha, mcp); 4675 4676 if (rval != QLA_SUCCESS) { 4677 ql_dbg(ql_dbg_mbx, vha, 0x10dd, 4678 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4679 rval, mcp->mb[0], mcp->mb[1]); 4680 } else { 4681 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, 4682 "Done %s.\n", __func__); 4683 *sector_size = mcp->mb[1]; 4684 } 4685 4686 return rval; 4687 } 4688 4689 int 4690 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) 4691 { 4692 int rval; 4693 mbx_cmd_t mc; 4694 mbx_cmd_t *mcp = &mc; 4695 4696 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4697 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4698 return QLA_FUNCTION_FAILED; 4699 4700 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, 4701 "Entered %s.\n", __func__); 4702 4703 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4704 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 4705 FAC_OPT_CMD_WRITE_PROTECT; 4706 mcp->out_mb = MBX_1|MBX_0; 4707 mcp->in_mb = MBX_1|MBX_0; 4708 mcp->tov = MBX_TOV_SECONDS; 4709 mcp->flags = 0; 4710 rval = qla2x00_mailbox_command(vha, mcp); 4711 4712 if (rval != QLA_SUCCESS) { 4713 ql_dbg(ql_dbg_mbx, vha, 0x10e0, 4714 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4715 rval, mcp->mb[0], mcp->mb[1]); 4716 } else { 4717 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, 4718 "Done %s.\n", __func__); 4719 } 4720 4721 return rval; 4722 } 4723 4724 int 4725 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) 4726 { 4727 int rval; 4728 mbx_cmd_t mc; 4729 mbx_cmd_t *mcp = &mc; 4730 4731 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4732 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4733 return QLA_FUNCTION_FAILED; 4734 4735 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4736 "Entered %s.\n", __func__); 4737 4738 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4739 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 4740 mcp->mb[2] = LSW(start); 4741 mcp->mb[3] = MSW(start); 4742 mcp->mb[4] = LSW(finish); 4743 mcp->mb[5] = MSW(finish); 4744 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4745 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4746 mcp->tov = MBX_TOV_SECONDS; 4747 mcp->flags = 0; 4748 rval = qla2x00_mailbox_command(vha, mcp); 4749 4750 if (rval != QLA_SUCCESS) { 4751 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4752 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4753 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4754 } else { 4755 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4756 "Done %s.\n", __func__); 4757 } 4758 4759 return rval; 4760 } 4761 4762 int 4763 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock) 4764 { 4765 int rval = QLA_SUCCESS; 4766 mbx_cmd_t mc; 4767 mbx_cmd_t *mcp = &mc; 4768 struct qla_hw_data *ha = vha->hw; 4769 4770 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 4771 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4772 return rval; 4773 4774 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4775 "Entered %s.\n", __func__); 4776 4777 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4778 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE : 4779 FAC_OPT_CMD_UNLOCK_SEMAPHORE); 4780 mcp->out_mb = MBX_1|MBX_0; 4781 mcp->in_mb = MBX_1|MBX_0; 4782 mcp->tov = MBX_TOV_SECONDS; 4783 mcp->flags = 0; 4784 rval = qla2x00_mailbox_command(vha, mcp); 4785 4786 if (rval != QLA_SUCCESS) { 4787 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4788 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4789 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4790 } else { 4791 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4792 "Done %s.\n", __func__); 4793 } 4794 4795 return rval; 4796 } 4797 4798 int 4799 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) 4800 { 4801 int rval = 0; 4802 mbx_cmd_t mc; 4803 mbx_cmd_t *mcp = &mc; 4804 4805 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, 4806 "Entered %s.\n", __func__); 4807 4808 mcp->mb[0] = MBC_RESTART_MPI_FW; 4809 mcp->out_mb = MBX_0; 4810 mcp->in_mb = MBX_0|MBX_1; 4811 mcp->tov = MBX_TOV_SECONDS; 4812 mcp->flags = 0; 4813 rval = qla2x00_mailbox_command(vha, mcp); 4814 4815 if (rval != QLA_SUCCESS) { 4816 ql_dbg(ql_dbg_mbx, vha, 0x10e6, 4817 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4818 rval, mcp->mb[0], mcp->mb[1]); 4819 } else { 4820 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, 4821 "Done %s.\n", __func__); 4822 } 4823 4824 return rval; 4825 } 4826 4827 int 4828 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4829 { 4830 int rval; 4831 mbx_cmd_t mc; 4832 mbx_cmd_t *mcp = &mc; 4833 int i; 4834 int len; 4835 __le16 *str; 4836 struct qla_hw_data *ha = vha->hw; 4837 4838 if (!IS_P3P_TYPE(ha)) 4839 return QLA_FUNCTION_FAILED; 4840 4841 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, 4842 "Entered %s.\n", __func__); 4843 4844 str = (__force __le16 *)version; 4845 len = strlen(version); 4846 4847 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4848 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; 4849 mcp->out_mb = MBX_1|MBX_0; 4850 for (i = 4; i < 16 && len; i++, str++, len -= 2) { 4851 mcp->mb[i] = le16_to_cpup(str); 4852 mcp->out_mb |= 1<<i; 4853 } 4854 for (; i < 16; i++) { 4855 mcp->mb[i] = 0; 4856 mcp->out_mb |= 1<<i; 4857 } 4858 mcp->in_mb = MBX_1|MBX_0; 4859 mcp->tov = MBX_TOV_SECONDS; 4860 mcp->flags = 0; 4861 rval = qla2x00_mailbox_command(vha, mcp); 4862 4863 if (rval != QLA_SUCCESS) { 4864 ql_dbg(ql_dbg_mbx, vha, 0x117c, 4865 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4866 } else { 4867 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, 4868 "Done %s.\n", __func__); 4869 } 4870 4871 return rval; 4872 } 4873 4874 int 4875 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4876 { 4877 int rval; 4878 mbx_cmd_t mc; 4879 mbx_cmd_t *mcp = &mc; 4880 int len; 4881 uint16_t dwlen; 4882 uint8_t *str; 4883 dma_addr_t str_dma; 4884 struct qla_hw_data *ha = vha->hw; 4885 4886 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || 4887 IS_P3P_TYPE(ha)) 4888 return QLA_FUNCTION_FAILED; 4889 4890 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, 4891 "Entered %s.\n", __func__); 4892 4893 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); 4894 if (!str) { 4895 ql_log(ql_log_warn, vha, 0x117f, 4896 "Failed to allocate driver version param.\n"); 4897 return QLA_MEMORY_ALLOC_FAILED; 4898 } 4899 4900 memcpy(str, "\x7\x3\x11\x0", 4); 4901 dwlen = str[0]; 4902 len = dwlen * 4 - 4; 4903 memset(str + 4, 0, len); 4904 if (len > strlen(version)) 4905 len = strlen(version); 4906 memcpy(str + 4, version, len); 4907 4908 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4909 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; 4910 mcp->mb[2] = MSW(LSD(str_dma)); 4911 mcp->mb[3] = LSW(LSD(str_dma)); 4912 mcp->mb[6] = MSW(MSD(str_dma)); 4913 mcp->mb[7] = LSW(MSD(str_dma)); 4914 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4915 mcp->in_mb = MBX_1|MBX_0; 4916 mcp->tov = MBX_TOV_SECONDS; 4917 mcp->flags = 0; 4918 rval = qla2x00_mailbox_command(vha, mcp); 4919 4920 if (rval != QLA_SUCCESS) { 4921 ql_dbg(ql_dbg_mbx, vha, 0x1180, 4922 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4923 } else { 4924 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, 4925 "Done %s.\n", __func__); 4926 } 4927 4928 dma_pool_free(ha->s_dma_pool, str, str_dma); 4929 4930 return rval; 4931 } 4932 4933 int 4934 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, 4935 void *buf, uint16_t bufsiz) 4936 { 4937 int rval, i; 4938 mbx_cmd_t mc; 4939 mbx_cmd_t *mcp = &mc; 4940 uint32_t *bp; 4941 4942 if (!IS_FWI2_CAPABLE(vha->hw)) 4943 return QLA_FUNCTION_FAILED; 4944 4945 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4946 "Entered %s.\n", __func__); 4947 4948 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4949 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8; 4950 mcp->mb[2] = MSW(buf_dma); 4951 mcp->mb[3] = LSW(buf_dma); 4952 mcp->mb[6] = MSW(MSD(buf_dma)); 4953 mcp->mb[7] = LSW(MSD(buf_dma)); 4954 mcp->mb[8] = bufsiz/4; 4955 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4956 mcp->in_mb = MBX_1|MBX_0; 4957 mcp->tov = MBX_TOV_SECONDS; 4958 mcp->flags = 0; 4959 rval = qla2x00_mailbox_command(vha, mcp); 4960 4961 if (rval != QLA_SUCCESS) { 4962 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4963 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4964 } else { 4965 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4966 "Done %s.\n", __func__); 4967 bp = (uint32_t *) buf; 4968 for (i = 0; i < (bufsiz-4)/4; i++, bp++) 4969 *bp = le32_to_cpu((__force __le32)*bp); 4970 } 4971 4972 return rval; 4973 } 4974 4975 #define PUREX_CMD_COUNT 4 4976 int 4977 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) 4978 { 4979 int rval; 4980 mbx_cmd_t mc; 4981 mbx_cmd_t *mcp = &mc; 4982 uint8_t *els_cmd_map; 4983 uint8_t active_cnt = 0; 4984 dma_addr_t els_cmd_map_dma; 4985 uint8_t cmd_opcode[PUREX_CMD_COUNT]; 4986 uint8_t i, index, purex_bit; 4987 struct qla_hw_data *ha = vha->hw; 4988 4989 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && 4990 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4991 return QLA_SUCCESS; 4992 4993 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197, 4994 "Entered %s.\n", __func__); 4995 4996 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, 4997 &els_cmd_map_dma, GFP_KERNEL); 4998 if (!els_cmd_map) { 4999 ql_log(ql_log_warn, vha, 0x7101, 5000 "Failed to allocate RDP els command param.\n"); 5001 return QLA_MEMORY_ALLOC_FAILED; 5002 } 5003 5004 /* List of Purex ELS */ 5005 if (ql2xrdpenable) { 5006 cmd_opcode[active_cnt] = ELS_RDP; 5007 active_cnt++; 5008 } 5009 if (ha->flags.scm_supported_f) { 5010 cmd_opcode[active_cnt] = ELS_FPIN; 5011 active_cnt++; 5012 } 5013 if (ha->flags.edif_enabled) { 5014 cmd_opcode[active_cnt] = ELS_AUTH_ELS; 5015 active_cnt++; 5016 } 5017 5018 for (i = 0; i < active_cnt; i++) { 5019 index = cmd_opcode[i] / 8; 5020 purex_bit = cmd_opcode[i] % 8; 5021 els_cmd_map[index] |= 1 << purex_bit; 5022 } 5023 5024 mcp->mb[0] = MBC_SET_RNID_PARAMS; 5025 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8; 5026 mcp->mb[2] = MSW(LSD(els_cmd_map_dma)); 5027 mcp->mb[3] = LSW(LSD(els_cmd_map_dma)); 5028 mcp->mb[6] = MSW(MSD(els_cmd_map_dma)); 5029 mcp->mb[7] = LSW(MSD(els_cmd_map_dma)); 5030 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5031 mcp->in_mb = MBX_1|MBX_0; 5032 mcp->tov = MBX_TOV_SECONDS; 5033 mcp->flags = MBX_DMA_OUT; 5034 mcp->buf_size = ELS_CMD_MAP_SIZE; 5035 rval = qla2x00_mailbox_command(vha, mcp); 5036 5037 if (rval != QLA_SUCCESS) { 5038 ql_dbg(ql_dbg_mbx, vha, 0x118d, 5039 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]); 5040 } else { 5041 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 5042 "Done %s.\n", __func__); 5043 } 5044 5045 dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, 5046 els_cmd_map, els_cmd_map_dma); 5047 5048 return rval; 5049 } 5050 5051 static int 5052 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 5053 { 5054 int rval; 5055 mbx_cmd_t mc; 5056 mbx_cmd_t *mcp = &mc; 5057 5058 if (!IS_FWI2_CAPABLE(vha->hw)) 5059 return QLA_FUNCTION_FAILED; 5060 5061 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 5062 "Entered %s.\n", __func__); 5063 5064 mcp->mb[0] = MBC_GET_RNID_PARAMS; 5065 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; 5066 mcp->out_mb = MBX_1|MBX_0; 5067 mcp->in_mb = MBX_1|MBX_0; 5068 mcp->tov = MBX_TOV_SECONDS; 5069 mcp->flags = 0; 5070 rval = qla2x00_mailbox_command(vha, mcp); 5071 *temp = mcp->mb[1]; 5072 5073 if (rval != QLA_SUCCESS) { 5074 ql_dbg(ql_dbg_mbx, vha, 0x115a, 5075 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 5076 } else { 5077 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 5078 "Done %s.\n", __func__); 5079 } 5080 5081 return rval; 5082 } 5083 5084 int 5085 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 5086 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 5087 { 5088 int rval; 5089 mbx_cmd_t mc; 5090 mbx_cmd_t *mcp = &mc; 5091 struct qla_hw_data *ha = vha->hw; 5092 5093 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 5094 "Entered %s.\n", __func__); 5095 5096 if (!IS_FWI2_CAPABLE(ha)) 5097 return QLA_FUNCTION_FAILED; 5098 5099 if (len == 1) 5100 opt |= BIT_0; 5101 5102 mcp->mb[0] = MBC_READ_SFP; 5103 mcp->mb[1] = dev; 5104 mcp->mb[2] = MSW(LSD(sfp_dma)); 5105 mcp->mb[3] = LSW(LSD(sfp_dma)); 5106 mcp->mb[6] = MSW(MSD(sfp_dma)); 5107 mcp->mb[7] = LSW(MSD(sfp_dma)); 5108 mcp->mb[8] = len; 5109 mcp->mb[9] = off; 5110 mcp->mb[10] = opt; 5111 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5112 mcp->in_mb = MBX_1|MBX_0; 5113 mcp->tov = MBX_TOV_SECONDS; 5114 mcp->flags = 0; 5115 rval = qla2x00_mailbox_command(vha, mcp); 5116 5117 if (opt & BIT_0) 5118 *sfp = mcp->mb[1]; 5119 5120 if (rval != QLA_SUCCESS) { 5121 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 5122 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5123 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) { 5124 /* sfp is not there */ 5125 rval = QLA_INTERFACE_ERROR; 5126 } 5127 } else { 5128 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 5129 "Done %s.\n", __func__); 5130 } 5131 5132 return rval; 5133 } 5134 5135 int 5136 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 5137 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 5138 { 5139 int rval; 5140 mbx_cmd_t mc; 5141 mbx_cmd_t *mcp = &mc; 5142 struct qla_hw_data *ha = vha->hw; 5143 5144 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, 5145 "Entered %s.\n", __func__); 5146 5147 if (!IS_FWI2_CAPABLE(ha)) 5148 return QLA_FUNCTION_FAILED; 5149 5150 if (len == 1) 5151 opt |= BIT_0; 5152 5153 if (opt & BIT_0) 5154 len = *sfp; 5155 5156 mcp->mb[0] = MBC_WRITE_SFP; 5157 mcp->mb[1] = dev; 5158 mcp->mb[2] = MSW(LSD(sfp_dma)); 5159 mcp->mb[3] = LSW(LSD(sfp_dma)); 5160 mcp->mb[6] = MSW(MSD(sfp_dma)); 5161 mcp->mb[7] = LSW(MSD(sfp_dma)); 5162 mcp->mb[8] = len; 5163 mcp->mb[9] = off; 5164 mcp->mb[10] = opt; 5165 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5166 mcp->in_mb = MBX_1|MBX_0; 5167 mcp->tov = MBX_TOV_SECONDS; 5168 mcp->flags = 0; 5169 rval = qla2x00_mailbox_command(vha, mcp); 5170 5171 if (rval != QLA_SUCCESS) { 5172 ql_dbg(ql_dbg_mbx, vha, 0x10ec, 5173 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5174 } else { 5175 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, 5176 "Done %s.\n", __func__); 5177 } 5178 5179 return rval; 5180 } 5181 5182 int 5183 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, 5184 uint16_t size_in_bytes, uint16_t *actual_size) 5185 { 5186 int rval; 5187 mbx_cmd_t mc; 5188 mbx_cmd_t *mcp = &mc; 5189 5190 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, 5191 "Entered %s.\n", __func__); 5192 5193 if (!IS_CNA_CAPABLE(vha->hw)) 5194 return QLA_FUNCTION_FAILED; 5195 5196 mcp->mb[0] = MBC_GET_XGMAC_STATS; 5197 mcp->mb[2] = MSW(stats_dma); 5198 mcp->mb[3] = LSW(stats_dma); 5199 mcp->mb[6] = MSW(MSD(stats_dma)); 5200 mcp->mb[7] = LSW(MSD(stats_dma)); 5201 mcp->mb[8] = size_in_bytes >> 2; 5202 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 5203 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5204 mcp->tov = MBX_TOV_SECONDS; 5205 mcp->flags = 0; 5206 rval = qla2x00_mailbox_command(vha, mcp); 5207 5208 if (rval != QLA_SUCCESS) { 5209 ql_dbg(ql_dbg_mbx, vha, 0x10ef, 5210 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5211 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5212 } else { 5213 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, 5214 "Done %s.\n", __func__); 5215 5216 5217 *actual_size = mcp->mb[2] << 2; 5218 } 5219 5220 return rval; 5221 } 5222 5223 int 5224 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, 5225 uint16_t size) 5226 { 5227 int rval; 5228 mbx_cmd_t mc; 5229 mbx_cmd_t *mcp = &mc; 5230 5231 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, 5232 "Entered %s.\n", __func__); 5233 5234 if (!IS_CNA_CAPABLE(vha->hw)) 5235 return QLA_FUNCTION_FAILED; 5236 5237 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 5238 mcp->mb[1] = 0; 5239 mcp->mb[2] = MSW(tlv_dma); 5240 mcp->mb[3] = LSW(tlv_dma); 5241 mcp->mb[6] = MSW(MSD(tlv_dma)); 5242 mcp->mb[7] = LSW(MSD(tlv_dma)); 5243 mcp->mb[8] = size; 5244 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5245 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5246 mcp->tov = MBX_TOV_SECONDS; 5247 mcp->flags = 0; 5248 rval = qla2x00_mailbox_command(vha, mcp); 5249 5250 if (rval != QLA_SUCCESS) { 5251 ql_dbg(ql_dbg_mbx, vha, 0x10f2, 5252 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5253 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5254 } else { 5255 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, 5256 "Done %s.\n", __func__); 5257 } 5258 5259 return rval; 5260 } 5261 5262 int 5263 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) 5264 { 5265 int rval; 5266 mbx_cmd_t mc; 5267 mbx_cmd_t *mcp = &mc; 5268 5269 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, 5270 "Entered %s.\n", __func__); 5271 5272 if (!IS_FWI2_CAPABLE(vha->hw)) 5273 return QLA_FUNCTION_FAILED; 5274 5275 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 5276 mcp->mb[1] = LSW(risc_addr); 5277 mcp->mb[8] = MSW(risc_addr); 5278 mcp->out_mb = MBX_8|MBX_1|MBX_0; 5279 mcp->in_mb = MBX_3|MBX_2|MBX_0; 5280 mcp->tov = MBX_TOV_SECONDS; 5281 mcp->flags = 0; 5282 rval = qla2x00_mailbox_command(vha, mcp); 5283 if (rval != QLA_SUCCESS) { 5284 ql_dbg(ql_dbg_mbx, vha, 0x10f5, 5285 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5286 } else { 5287 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, 5288 "Done %s.\n", __func__); 5289 *data = mcp->mb[3] << 16 | mcp->mb[2]; 5290 } 5291 5292 return rval; 5293 } 5294 5295 int 5296 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5297 uint16_t *mresp) 5298 { 5299 int rval; 5300 mbx_cmd_t mc; 5301 mbx_cmd_t *mcp = &mc; 5302 5303 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, 5304 "Entered %s.\n", __func__); 5305 5306 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5307 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 5308 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing 5309 5310 /* transfer count */ 5311 mcp->mb[10] = LSW(mreq->transfer_size); 5312 mcp->mb[11] = MSW(mreq->transfer_size); 5313 5314 /* send data address */ 5315 mcp->mb[14] = LSW(mreq->send_dma); 5316 mcp->mb[15] = MSW(mreq->send_dma); 5317 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5318 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5319 5320 /* receive data address */ 5321 mcp->mb[16] = LSW(mreq->rcv_dma); 5322 mcp->mb[17] = MSW(mreq->rcv_dma); 5323 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5324 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5325 5326 /* Iteration count */ 5327 mcp->mb[18] = LSW(mreq->iteration_count); 5328 mcp->mb[19] = MSW(mreq->iteration_count); 5329 5330 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| 5331 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5332 if (IS_CNA_CAPABLE(vha->hw)) 5333 mcp->out_mb |= MBX_2; 5334 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; 5335 5336 mcp->buf_size = mreq->transfer_size; 5337 mcp->tov = MBX_TOV_SECONDS; 5338 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5339 5340 rval = qla2x00_mailbox_command(vha, mcp); 5341 5342 if (rval != QLA_SUCCESS) { 5343 ql_dbg(ql_dbg_mbx, vha, 0x10f8, 5344 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " 5345 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], 5346 mcp->mb[3], mcp->mb[18], mcp->mb[19]); 5347 } else { 5348 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, 5349 "Done %s.\n", __func__); 5350 } 5351 5352 /* Copy mailbox information */ 5353 memcpy( mresp, mcp->mb, 64); 5354 return rval; 5355 } 5356 5357 int 5358 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5359 uint16_t *mresp) 5360 { 5361 int rval; 5362 mbx_cmd_t mc; 5363 mbx_cmd_t *mcp = &mc; 5364 struct qla_hw_data *ha = vha->hw; 5365 5366 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, 5367 "Entered %s.\n", __func__); 5368 5369 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5370 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 5371 /* BIT_6 specifies 64bit address */ 5372 mcp->mb[1] = mreq->options | BIT_15 | BIT_6; 5373 if (IS_CNA_CAPABLE(ha)) { 5374 mcp->mb[2] = vha->fcoe_fcf_idx; 5375 } 5376 mcp->mb[16] = LSW(mreq->rcv_dma); 5377 mcp->mb[17] = MSW(mreq->rcv_dma); 5378 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5379 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5380 5381 mcp->mb[10] = LSW(mreq->transfer_size); 5382 5383 mcp->mb[14] = LSW(mreq->send_dma); 5384 mcp->mb[15] = MSW(mreq->send_dma); 5385 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5386 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5387 5388 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| 5389 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5390 if (IS_CNA_CAPABLE(ha)) 5391 mcp->out_mb |= MBX_2; 5392 5393 mcp->in_mb = MBX_0; 5394 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 5395 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5396 mcp->in_mb |= MBX_1; 5397 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 5398 IS_QLA28XX(ha)) 5399 mcp->in_mb |= MBX_3; 5400 5401 mcp->tov = MBX_TOV_SECONDS; 5402 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5403 mcp->buf_size = mreq->transfer_size; 5404 5405 rval = qla2x00_mailbox_command(vha, mcp); 5406 5407 if (rval != QLA_SUCCESS) { 5408 ql_dbg(ql_dbg_mbx, vha, 0x10fb, 5409 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5410 rval, mcp->mb[0], mcp->mb[1]); 5411 } else { 5412 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, 5413 "Done %s.\n", __func__); 5414 } 5415 5416 /* Copy mailbox information */ 5417 memcpy(mresp, mcp->mb, 64); 5418 return rval; 5419 } 5420 5421 int 5422 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) 5423 { 5424 int rval; 5425 mbx_cmd_t mc; 5426 mbx_cmd_t *mcp = &mc; 5427 5428 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, 5429 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); 5430 5431 mcp->mb[0] = MBC_ISP84XX_RESET; 5432 mcp->mb[1] = enable_diagnostic; 5433 mcp->out_mb = MBX_1|MBX_0; 5434 mcp->in_mb = MBX_1|MBX_0; 5435 mcp->tov = MBX_TOV_SECONDS; 5436 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5437 rval = qla2x00_mailbox_command(vha, mcp); 5438 5439 if (rval != QLA_SUCCESS) 5440 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); 5441 else 5442 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, 5443 "Done %s.\n", __func__); 5444 5445 return rval; 5446 } 5447 5448 int 5449 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) 5450 { 5451 int rval; 5452 mbx_cmd_t mc; 5453 mbx_cmd_t *mcp = &mc; 5454 5455 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, 5456 "Entered %s.\n", __func__); 5457 5458 if (!IS_FWI2_CAPABLE(vha->hw)) 5459 return QLA_FUNCTION_FAILED; 5460 5461 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 5462 mcp->mb[1] = LSW(risc_addr); 5463 mcp->mb[2] = LSW(data); 5464 mcp->mb[3] = MSW(data); 5465 mcp->mb[8] = MSW(risc_addr); 5466 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; 5467 mcp->in_mb = MBX_1|MBX_0; 5468 mcp->tov = MBX_TOV_SECONDS; 5469 mcp->flags = 0; 5470 rval = qla2x00_mailbox_command(vha, mcp); 5471 if (rval != QLA_SUCCESS) { 5472 ql_dbg(ql_dbg_mbx, vha, 0x1101, 5473 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5474 rval, mcp->mb[0], mcp->mb[1]); 5475 } else { 5476 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, 5477 "Done %s.\n", __func__); 5478 } 5479 5480 return rval; 5481 } 5482 5483 int 5484 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) 5485 { 5486 int rval; 5487 uint32_t stat, timer; 5488 uint16_t mb0 = 0; 5489 struct qla_hw_data *ha = vha->hw; 5490 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 5491 5492 rval = QLA_SUCCESS; 5493 5494 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, 5495 "Entered %s.\n", __func__); 5496 5497 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 5498 5499 /* Write the MBC data to the registers */ 5500 wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER); 5501 wrt_reg_word(®->mailbox1, mb[0]); 5502 wrt_reg_word(®->mailbox2, mb[1]); 5503 wrt_reg_word(®->mailbox3, mb[2]); 5504 wrt_reg_word(®->mailbox4, mb[3]); 5505 5506 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); 5507 5508 /* Poll for MBC interrupt */ 5509 for (timer = 6000000; timer; timer--) { 5510 /* Check for pending interrupts. */ 5511 stat = rd_reg_dword(®->host_status); 5512 if (stat & HSRX_RISC_INT) { 5513 stat &= 0xff; 5514 5515 if (stat == 0x1 || stat == 0x2 || 5516 stat == 0x10 || stat == 0x11) { 5517 set_bit(MBX_INTERRUPT, 5518 &ha->mbx_cmd_flags); 5519 mb0 = rd_reg_word(®->mailbox0); 5520 wrt_reg_dword(®->hccr, 5521 HCCRX_CLR_RISC_INT); 5522 rd_reg_dword(®->hccr); 5523 break; 5524 } 5525 } 5526 udelay(5); 5527 } 5528 5529 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) 5530 rval = mb0 & MBS_MASK; 5531 else 5532 rval = QLA_FUNCTION_FAILED; 5533 5534 if (rval != QLA_SUCCESS) { 5535 ql_dbg(ql_dbg_mbx, vha, 0x1104, 5536 "Failed=%x mb[0]=%x.\n", rval, mb[0]); 5537 } else { 5538 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, 5539 "Done %s.\n", __func__); 5540 } 5541 5542 return rval; 5543 } 5544 5545 /* Set the specified data rate */ 5546 int 5547 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode) 5548 { 5549 int rval; 5550 mbx_cmd_t mc; 5551 mbx_cmd_t *mcp = &mc; 5552 struct qla_hw_data *ha = vha->hw; 5553 uint16_t val; 5554 5555 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5556 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate, 5557 mode); 5558 5559 if (!IS_FWI2_CAPABLE(ha)) 5560 return QLA_FUNCTION_FAILED; 5561 5562 memset(mcp, 0, sizeof(*mcp)); 5563 switch (ha->set_data_rate) { 5564 case PORT_SPEED_AUTO: 5565 case PORT_SPEED_4GB: 5566 case PORT_SPEED_8GB: 5567 case PORT_SPEED_16GB: 5568 case PORT_SPEED_32GB: 5569 val = ha->set_data_rate; 5570 break; 5571 default: 5572 ql_log(ql_log_warn, vha, 0x1199, 5573 "Unrecognized speed setting:%d. Setting Autoneg\n", 5574 ha->set_data_rate); 5575 val = ha->set_data_rate = PORT_SPEED_AUTO; 5576 break; 5577 } 5578 5579 mcp->mb[0] = MBC_DATA_RATE; 5580 mcp->mb[1] = mode; 5581 mcp->mb[2] = val; 5582 5583 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5584 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5585 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5586 mcp->in_mb |= MBX_4|MBX_3; 5587 mcp->tov = MBX_TOV_SECONDS; 5588 mcp->flags = 0; 5589 rval = qla2x00_mailbox_command(vha, mcp); 5590 if (rval != QLA_SUCCESS) { 5591 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5592 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5593 } else { 5594 if (mcp->mb[1] != 0x7) 5595 ql_dbg(ql_dbg_mbx, vha, 0x1179, 5596 "Speed set:0x%x\n", mcp->mb[1]); 5597 5598 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5599 "Done %s.\n", __func__); 5600 } 5601 5602 return rval; 5603 } 5604 5605 int 5606 qla2x00_get_data_rate(scsi_qla_host_t *vha) 5607 { 5608 int rval; 5609 mbx_cmd_t mc; 5610 mbx_cmd_t *mcp = &mc; 5611 struct qla_hw_data *ha = vha->hw; 5612 5613 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5614 "Entered %s.\n", __func__); 5615 5616 if (!IS_FWI2_CAPABLE(ha)) 5617 return QLA_FUNCTION_FAILED; 5618 5619 mcp->mb[0] = MBC_DATA_RATE; 5620 mcp->mb[1] = QLA_GET_DATA_RATE; 5621 mcp->out_mb = MBX_1|MBX_0; 5622 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5623 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5624 mcp->in_mb |= MBX_3; 5625 mcp->tov = MBX_TOV_SECONDS; 5626 mcp->flags = 0; 5627 rval = qla2x00_mailbox_command(vha, mcp); 5628 if (rval != QLA_SUCCESS) { 5629 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5630 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5631 } else { 5632 if (mcp->mb[1] != 0x7) 5633 ha->link_data_rate = mcp->mb[1]; 5634 5635 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 5636 if (mcp->mb[4] & BIT_0) 5637 ql_log(ql_log_info, vha, 0x11a2, 5638 "FEC=enabled (data rate).\n"); 5639 } 5640 5641 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5642 "Done %s.\n", __func__); 5643 if (mcp->mb[1] != 0x7) 5644 ha->link_data_rate = mcp->mb[1]; 5645 } 5646 5647 return rval; 5648 } 5649 5650 int 5651 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5652 { 5653 int rval; 5654 mbx_cmd_t mc; 5655 mbx_cmd_t *mcp = &mc; 5656 struct qla_hw_data *ha = vha->hw; 5657 5658 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, 5659 "Entered %s.\n", __func__); 5660 5661 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && 5662 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 5663 return QLA_FUNCTION_FAILED; 5664 mcp->mb[0] = MBC_GET_PORT_CONFIG; 5665 mcp->out_mb = MBX_0; 5666 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5667 mcp->tov = MBX_TOV_SECONDS; 5668 mcp->flags = 0; 5669 5670 rval = qla2x00_mailbox_command(vha, mcp); 5671 5672 if (rval != QLA_SUCCESS) { 5673 ql_dbg(ql_dbg_mbx, vha, 0x110a, 5674 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5675 } else { 5676 /* Copy all bits to preserve original value */ 5677 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 5678 5679 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, 5680 "Done %s.\n", __func__); 5681 } 5682 return rval; 5683 } 5684 5685 int 5686 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5687 { 5688 int rval; 5689 mbx_cmd_t mc; 5690 mbx_cmd_t *mcp = &mc; 5691 5692 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, 5693 "Entered %s.\n", __func__); 5694 5695 mcp->mb[0] = MBC_SET_PORT_CONFIG; 5696 /* Copy all bits to preserve original setting */ 5697 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); 5698 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5699 mcp->in_mb = MBX_0; 5700 mcp->tov = MBX_TOV_SECONDS; 5701 mcp->flags = 0; 5702 rval = qla2x00_mailbox_command(vha, mcp); 5703 5704 if (rval != QLA_SUCCESS) { 5705 ql_dbg(ql_dbg_mbx, vha, 0x110d, 5706 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5707 } else 5708 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, 5709 "Done %s.\n", __func__); 5710 5711 return rval; 5712 } 5713 5714 5715 int 5716 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, 5717 uint16_t *mb) 5718 { 5719 int rval; 5720 mbx_cmd_t mc; 5721 mbx_cmd_t *mcp = &mc; 5722 struct qla_hw_data *ha = vha->hw; 5723 5724 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, 5725 "Entered %s.\n", __func__); 5726 5727 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 5728 return QLA_FUNCTION_FAILED; 5729 5730 mcp->mb[0] = MBC_PORT_PARAMS; 5731 mcp->mb[1] = loop_id; 5732 if (ha->flags.fcp_prio_enabled) 5733 mcp->mb[2] = BIT_1; 5734 else 5735 mcp->mb[2] = BIT_2; 5736 mcp->mb[4] = priority & 0xf; 5737 mcp->mb[9] = vha->vp_idx; 5738 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5739 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 5740 mcp->tov = MBX_TOV_SECONDS; 5741 mcp->flags = 0; 5742 rval = qla2x00_mailbox_command(vha, mcp); 5743 if (mb != NULL) { 5744 mb[0] = mcp->mb[0]; 5745 mb[1] = mcp->mb[1]; 5746 mb[3] = mcp->mb[3]; 5747 mb[4] = mcp->mb[4]; 5748 } 5749 5750 if (rval != QLA_SUCCESS) { 5751 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); 5752 } else { 5753 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, 5754 "Done %s.\n", __func__); 5755 } 5756 5757 return rval; 5758 } 5759 5760 int 5761 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) 5762 { 5763 int rval = QLA_FUNCTION_FAILED; 5764 struct qla_hw_data *ha = vha->hw; 5765 uint8_t byte; 5766 5767 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { 5768 ql_dbg(ql_dbg_mbx, vha, 0x1150, 5769 "Thermal not supported by this card.\n"); 5770 return rval; 5771 } 5772 5773 if (IS_QLA25XX(ha)) { 5774 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 5775 ha->pdev->subsystem_device == 0x0175) { 5776 rval = qla2x00_read_sfp(vha, 0, &byte, 5777 0x98, 0x1, 1, BIT_13|BIT_0); 5778 *temp = byte; 5779 return rval; 5780 } 5781 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 5782 ha->pdev->subsystem_device == 0x338e) { 5783 rval = qla2x00_read_sfp(vha, 0, &byte, 5784 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); 5785 *temp = byte; 5786 return rval; 5787 } 5788 ql_dbg(ql_dbg_mbx, vha, 0x10c9, 5789 "Thermal not supported by this card.\n"); 5790 return rval; 5791 } 5792 5793 if (IS_QLA82XX(ha)) { 5794 *temp = qla82xx_read_temperature(vha); 5795 rval = QLA_SUCCESS; 5796 return rval; 5797 } else if (IS_QLA8044(ha)) { 5798 *temp = qla8044_read_temperature(vha); 5799 rval = QLA_SUCCESS; 5800 return rval; 5801 } 5802 5803 rval = qla2x00_read_asic_temperature(vha, temp); 5804 return rval; 5805 } 5806 5807 int 5808 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) 5809 { 5810 int rval; 5811 struct qla_hw_data *ha = vha->hw; 5812 mbx_cmd_t mc; 5813 mbx_cmd_t *mcp = &mc; 5814 5815 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, 5816 "Entered %s.\n", __func__); 5817 5818 if (!IS_FWI2_CAPABLE(ha)) 5819 return QLA_FUNCTION_FAILED; 5820 5821 memset(mcp, 0, sizeof(mbx_cmd_t)); 5822 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5823 mcp->mb[1] = 1; 5824 5825 mcp->out_mb = MBX_1|MBX_0; 5826 mcp->in_mb = MBX_0; 5827 mcp->tov = MBX_TOV_SECONDS; 5828 mcp->flags = 0; 5829 5830 rval = qla2x00_mailbox_command(vha, mcp); 5831 if (rval != QLA_SUCCESS) { 5832 ql_dbg(ql_dbg_mbx, vha, 0x1016, 5833 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5834 } else { 5835 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, 5836 "Done %s.\n", __func__); 5837 } 5838 5839 return rval; 5840 } 5841 5842 int 5843 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) 5844 { 5845 int rval; 5846 struct qla_hw_data *ha = vha->hw; 5847 mbx_cmd_t mc; 5848 mbx_cmd_t *mcp = &mc; 5849 5850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, 5851 "Entered %s.\n", __func__); 5852 5853 if (!IS_P3P_TYPE(ha)) 5854 return QLA_FUNCTION_FAILED; 5855 5856 memset(mcp, 0, sizeof(mbx_cmd_t)); 5857 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5858 mcp->mb[1] = 0; 5859 5860 mcp->out_mb = MBX_1|MBX_0; 5861 mcp->in_mb = MBX_0; 5862 mcp->tov = MBX_TOV_SECONDS; 5863 mcp->flags = 0; 5864 5865 rval = qla2x00_mailbox_command(vha, mcp); 5866 if (rval != QLA_SUCCESS) { 5867 ql_dbg(ql_dbg_mbx, vha, 0x100c, 5868 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5869 } else { 5870 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, 5871 "Done %s.\n", __func__); 5872 } 5873 5874 return rval; 5875 } 5876 5877 int 5878 qla82xx_md_get_template_size(scsi_qla_host_t *vha) 5879 { 5880 struct qla_hw_data *ha = vha->hw; 5881 mbx_cmd_t mc; 5882 mbx_cmd_t *mcp = &mc; 5883 int rval = QLA_FUNCTION_FAILED; 5884 5885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, 5886 "Entered %s.\n", __func__); 5887 5888 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5889 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5890 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5891 mcp->mb[2] = LSW(RQST_TMPLT_SIZE); 5892 mcp->mb[3] = MSW(RQST_TMPLT_SIZE); 5893 5894 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5895 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 5896 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5897 5898 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5899 mcp->tov = MBX_TOV_SECONDS; 5900 rval = qla2x00_mailbox_command(vha, mcp); 5901 5902 /* Always copy back return mailbox values. */ 5903 if (rval != QLA_SUCCESS) { 5904 ql_dbg(ql_dbg_mbx, vha, 0x1120, 5905 "mailbox command FAILED=0x%x, subcode=%x.\n", 5906 (mcp->mb[1] << 16) | mcp->mb[0], 5907 (mcp->mb[3] << 16) | mcp->mb[2]); 5908 } else { 5909 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, 5910 "Done %s.\n", __func__); 5911 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); 5912 if (!ha->md_template_size) { 5913 ql_dbg(ql_dbg_mbx, vha, 0x1122, 5914 "Null template size obtained.\n"); 5915 rval = QLA_FUNCTION_FAILED; 5916 } 5917 } 5918 return rval; 5919 } 5920 5921 int 5922 qla82xx_md_get_template(scsi_qla_host_t *vha) 5923 { 5924 struct qla_hw_data *ha = vha->hw; 5925 mbx_cmd_t mc; 5926 mbx_cmd_t *mcp = &mc; 5927 int rval = QLA_FUNCTION_FAILED; 5928 5929 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, 5930 "Entered %s.\n", __func__); 5931 5932 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5933 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5934 if (!ha->md_tmplt_hdr) { 5935 ql_log(ql_log_warn, vha, 0x1124, 5936 "Unable to allocate memory for Minidump template.\n"); 5937 return rval; 5938 } 5939 5940 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5941 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5942 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5943 mcp->mb[2] = LSW(RQST_TMPLT); 5944 mcp->mb[3] = MSW(RQST_TMPLT); 5945 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); 5946 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); 5947 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); 5948 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); 5949 mcp->mb[8] = LSW(ha->md_template_size); 5950 mcp->mb[9] = MSW(ha->md_template_size); 5951 5952 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5953 mcp->tov = MBX_TOV_SECONDS; 5954 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5955 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5956 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5957 rval = qla2x00_mailbox_command(vha, mcp); 5958 5959 if (rval != QLA_SUCCESS) { 5960 ql_dbg(ql_dbg_mbx, vha, 0x1125, 5961 "mailbox command FAILED=0x%x, subcode=%x.\n", 5962 ((mcp->mb[1] << 16) | mcp->mb[0]), 5963 ((mcp->mb[3] << 16) | mcp->mb[2])); 5964 } else 5965 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, 5966 "Done %s.\n", __func__); 5967 return rval; 5968 } 5969 5970 int 5971 qla8044_md_get_template(scsi_qla_host_t *vha) 5972 { 5973 struct qla_hw_data *ha = vha->hw; 5974 mbx_cmd_t mc; 5975 mbx_cmd_t *mcp = &mc; 5976 int rval = QLA_FUNCTION_FAILED; 5977 int offset = 0, size = MINIDUMP_SIZE_36K; 5978 5979 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, 5980 "Entered %s.\n", __func__); 5981 5982 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5983 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5984 if (!ha->md_tmplt_hdr) { 5985 ql_log(ql_log_warn, vha, 0xb11b, 5986 "Unable to allocate memory for Minidump template.\n"); 5987 return rval; 5988 } 5989 5990 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5991 while (offset < ha->md_template_size) { 5992 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5993 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5994 mcp->mb[2] = LSW(RQST_TMPLT); 5995 mcp->mb[3] = MSW(RQST_TMPLT); 5996 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5997 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5998 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5999 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); 6000 mcp->mb[8] = LSW(size); 6001 mcp->mb[9] = MSW(size); 6002 mcp->mb[10] = offset & 0x0000FFFF; 6003 mcp->mb[11] = offset & 0xFFFF0000; 6004 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 6005 mcp->tov = MBX_TOV_SECONDS; 6006 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 6007 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6008 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 6009 rval = qla2x00_mailbox_command(vha, mcp); 6010 6011 if (rval != QLA_SUCCESS) { 6012 ql_dbg(ql_dbg_mbx, vha, 0xb11c, 6013 "mailbox command FAILED=0x%x, subcode=%x.\n", 6014 ((mcp->mb[1] << 16) | mcp->mb[0]), 6015 ((mcp->mb[3] << 16) | mcp->mb[2])); 6016 return rval; 6017 } else 6018 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, 6019 "Done %s.\n", __func__); 6020 offset = offset + size; 6021 } 6022 return rval; 6023 } 6024 6025 int 6026 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 6027 { 6028 int rval; 6029 struct qla_hw_data *ha = vha->hw; 6030 mbx_cmd_t mc; 6031 mbx_cmd_t *mcp = &mc; 6032 6033 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 6034 return QLA_FUNCTION_FAILED; 6035 6036 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, 6037 "Entered %s.\n", __func__); 6038 6039 memset(mcp, 0, sizeof(mbx_cmd_t)); 6040 mcp->mb[0] = MBC_SET_LED_CONFIG; 6041 mcp->mb[1] = led_cfg[0]; 6042 mcp->mb[2] = led_cfg[1]; 6043 if (IS_QLA8031(ha)) { 6044 mcp->mb[3] = led_cfg[2]; 6045 mcp->mb[4] = led_cfg[3]; 6046 mcp->mb[5] = led_cfg[4]; 6047 mcp->mb[6] = led_cfg[5]; 6048 } 6049 6050 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6051 if (IS_QLA8031(ha)) 6052 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 6053 mcp->in_mb = MBX_0; 6054 mcp->tov = MBX_TOV_SECONDS; 6055 mcp->flags = 0; 6056 6057 rval = qla2x00_mailbox_command(vha, mcp); 6058 if (rval != QLA_SUCCESS) { 6059 ql_dbg(ql_dbg_mbx, vha, 0x1134, 6060 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6061 } else { 6062 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, 6063 "Done %s.\n", __func__); 6064 } 6065 6066 return rval; 6067 } 6068 6069 int 6070 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 6071 { 6072 int rval; 6073 struct qla_hw_data *ha = vha->hw; 6074 mbx_cmd_t mc; 6075 mbx_cmd_t *mcp = &mc; 6076 6077 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 6078 return QLA_FUNCTION_FAILED; 6079 6080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, 6081 "Entered %s.\n", __func__); 6082 6083 memset(mcp, 0, sizeof(mbx_cmd_t)); 6084 mcp->mb[0] = MBC_GET_LED_CONFIG; 6085 6086 mcp->out_mb = MBX_0; 6087 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6088 if (IS_QLA8031(ha)) 6089 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 6090 mcp->tov = MBX_TOV_SECONDS; 6091 mcp->flags = 0; 6092 6093 rval = qla2x00_mailbox_command(vha, mcp); 6094 if (rval != QLA_SUCCESS) { 6095 ql_dbg(ql_dbg_mbx, vha, 0x1137, 6096 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6097 } else { 6098 led_cfg[0] = mcp->mb[1]; 6099 led_cfg[1] = mcp->mb[2]; 6100 if (IS_QLA8031(ha)) { 6101 led_cfg[2] = mcp->mb[3]; 6102 led_cfg[3] = mcp->mb[4]; 6103 led_cfg[4] = mcp->mb[5]; 6104 led_cfg[5] = mcp->mb[6]; 6105 } 6106 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, 6107 "Done %s.\n", __func__); 6108 } 6109 6110 return rval; 6111 } 6112 6113 int 6114 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) 6115 { 6116 int rval; 6117 struct qla_hw_data *ha = vha->hw; 6118 mbx_cmd_t mc; 6119 mbx_cmd_t *mcp = &mc; 6120 6121 if (!IS_P3P_TYPE(ha)) 6122 return QLA_FUNCTION_FAILED; 6123 6124 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, 6125 "Entered %s.\n", __func__); 6126 6127 memset(mcp, 0, sizeof(mbx_cmd_t)); 6128 mcp->mb[0] = MBC_SET_LED_CONFIG; 6129 if (enable) 6130 mcp->mb[7] = 0xE; 6131 else 6132 mcp->mb[7] = 0xD; 6133 6134 mcp->out_mb = MBX_7|MBX_0; 6135 mcp->in_mb = MBX_0; 6136 mcp->tov = MBX_TOV_SECONDS; 6137 mcp->flags = 0; 6138 6139 rval = qla2x00_mailbox_command(vha, mcp); 6140 if (rval != QLA_SUCCESS) { 6141 ql_dbg(ql_dbg_mbx, vha, 0x1128, 6142 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6143 } else { 6144 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, 6145 "Done %s.\n", __func__); 6146 } 6147 6148 return rval; 6149 } 6150 6151 int 6152 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) 6153 { 6154 int rval; 6155 struct qla_hw_data *ha = vha->hw; 6156 mbx_cmd_t mc; 6157 mbx_cmd_t *mcp = &mc; 6158 6159 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6160 return QLA_FUNCTION_FAILED; 6161 6162 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, 6163 "Entered %s.\n", __func__); 6164 6165 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6166 mcp->mb[1] = LSW(reg); 6167 mcp->mb[2] = MSW(reg); 6168 mcp->mb[3] = LSW(data); 6169 mcp->mb[4] = MSW(data); 6170 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6171 6172 mcp->in_mb = MBX_1|MBX_0; 6173 mcp->tov = MBX_TOV_SECONDS; 6174 mcp->flags = 0; 6175 rval = qla2x00_mailbox_command(vha, mcp); 6176 6177 if (rval != QLA_SUCCESS) { 6178 ql_dbg(ql_dbg_mbx, vha, 0x1131, 6179 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6180 } else { 6181 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, 6182 "Done %s.\n", __func__); 6183 } 6184 6185 return rval; 6186 } 6187 6188 int 6189 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) 6190 { 6191 int rval; 6192 struct qla_hw_data *ha = vha->hw; 6193 mbx_cmd_t mc; 6194 mbx_cmd_t *mcp = &mc; 6195 6196 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 6197 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, 6198 "Implicit LOGO Unsupported.\n"); 6199 return QLA_FUNCTION_FAILED; 6200 } 6201 6202 6203 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, 6204 "Entering %s.\n", __func__); 6205 6206 /* Perform Implicit LOGO. */ 6207 mcp->mb[0] = MBC_PORT_LOGOUT; 6208 mcp->mb[1] = fcport->loop_id; 6209 mcp->mb[10] = BIT_15; 6210 mcp->out_mb = MBX_10|MBX_1|MBX_0; 6211 mcp->in_mb = MBX_0; 6212 mcp->tov = MBX_TOV_SECONDS; 6213 mcp->flags = 0; 6214 rval = qla2x00_mailbox_command(vha, mcp); 6215 if (rval != QLA_SUCCESS) 6216 ql_dbg(ql_dbg_mbx, vha, 0x113d, 6217 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6218 else 6219 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, 6220 "Done %s.\n", __func__); 6221 6222 return rval; 6223 } 6224 6225 int 6226 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) 6227 { 6228 int rval; 6229 mbx_cmd_t mc; 6230 mbx_cmd_t *mcp = &mc; 6231 struct qla_hw_data *ha = vha->hw; 6232 unsigned long retry_max_time = jiffies + (2 * HZ); 6233 6234 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6235 return QLA_FUNCTION_FAILED; 6236 6237 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); 6238 6239 retry_rd_reg: 6240 mcp->mb[0] = MBC_READ_REMOTE_REG; 6241 mcp->mb[1] = LSW(reg); 6242 mcp->mb[2] = MSW(reg); 6243 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6244 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 6245 mcp->tov = MBX_TOV_SECONDS; 6246 mcp->flags = 0; 6247 rval = qla2x00_mailbox_command(vha, mcp); 6248 6249 if (rval != QLA_SUCCESS) { 6250 ql_dbg(ql_dbg_mbx, vha, 0x114c, 6251 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6252 rval, mcp->mb[0], mcp->mb[1]); 6253 } else { 6254 *data = (mcp->mb[3] | (mcp->mb[4] << 16)); 6255 if (*data == QLA8XXX_BAD_VALUE) { 6256 /* 6257 * During soft-reset CAMRAM register reads might 6258 * return 0xbad0bad0. So retry for MAX of 2 sec 6259 * while reading camram registers. 6260 */ 6261 if (time_after(jiffies, retry_max_time)) { 6262 ql_dbg(ql_dbg_mbx, vha, 0x1141, 6263 "Failure to read CAMRAM register. " 6264 "data=0x%x.\n", *data); 6265 return QLA_FUNCTION_FAILED; 6266 } 6267 msleep(100); 6268 goto retry_rd_reg; 6269 } 6270 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); 6271 } 6272 6273 return rval; 6274 } 6275 6276 int 6277 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) 6278 { 6279 int rval; 6280 mbx_cmd_t mc; 6281 mbx_cmd_t *mcp = &mc; 6282 struct qla_hw_data *ha = vha->hw; 6283 6284 if (!IS_QLA83XX(ha)) 6285 return QLA_FUNCTION_FAILED; 6286 6287 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 6288 6289 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; 6290 mcp->out_mb = MBX_0; 6291 mcp->in_mb = MBX_1|MBX_0; 6292 mcp->tov = MBX_TOV_SECONDS; 6293 mcp->flags = 0; 6294 rval = qla2x00_mailbox_command(vha, mcp); 6295 6296 if (rval != QLA_SUCCESS) { 6297 ql_dbg(ql_dbg_mbx, vha, 0x1144, 6298 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6299 rval, mcp->mb[0], mcp->mb[1]); 6300 qla2xxx_dump_fw(vha); 6301 } else { 6302 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); 6303 } 6304 6305 return rval; 6306 } 6307 6308 int 6309 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, 6310 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) 6311 { 6312 int rval; 6313 mbx_cmd_t mc; 6314 mbx_cmd_t *mcp = &mc; 6315 uint8_t subcode = (uint8_t)options; 6316 struct qla_hw_data *ha = vha->hw; 6317 6318 if (!IS_QLA8031(ha)) 6319 return QLA_FUNCTION_FAILED; 6320 6321 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); 6322 6323 mcp->mb[0] = MBC_SET_ACCESS_CONTROL; 6324 mcp->mb[1] = options; 6325 mcp->out_mb = MBX_1|MBX_0; 6326 if (subcode & BIT_2) { 6327 mcp->mb[2] = LSW(start_addr); 6328 mcp->mb[3] = MSW(start_addr); 6329 mcp->mb[4] = LSW(end_addr); 6330 mcp->mb[5] = MSW(end_addr); 6331 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; 6332 } 6333 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6334 if (!(subcode & (BIT_2 | BIT_5))) 6335 mcp->in_mb |= MBX_4|MBX_3; 6336 mcp->tov = MBX_TOV_SECONDS; 6337 mcp->flags = 0; 6338 rval = qla2x00_mailbox_command(vha, mcp); 6339 6340 if (rval != QLA_SUCCESS) { 6341 ql_dbg(ql_dbg_mbx, vha, 0x1147, 6342 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", 6343 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], 6344 mcp->mb[4]); 6345 qla2xxx_dump_fw(vha); 6346 } else { 6347 if (subcode & BIT_5) 6348 *sector_size = mcp->mb[1]; 6349 else if (subcode & (BIT_6 | BIT_7)) { 6350 ql_dbg(ql_dbg_mbx, vha, 0x1148, 6351 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6352 } else if (subcode & (BIT_3 | BIT_4)) { 6353 ql_dbg(ql_dbg_mbx, vha, 0x1149, 6354 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6355 } 6356 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); 6357 } 6358 6359 return rval; 6360 } 6361 6362 int 6363 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 6364 uint32_t size) 6365 { 6366 int rval; 6367 mbx_cmd_t mc; 6368 mbx_cmd_t *mcp = &mc; 6369 6370 if (!IS_MCTP_CAPABLE(vha->hw)) 6371 return QLA_FUNCTION_FAILED; 6372 6373 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, 6374 "Entered %s.\n", __func__); 6375 6376 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 6377 mcp->mb[1] = LSW(addr); 6378 mcp->mb[2] = MSW(req_dma); 6379 mcp->mb[3] = LSW(req_dma); 6380 mcp->mb[4] = MSW(size); 6381 mcp->mb[5] = LSW(size); 6382 mcp->mb[6] = MSW(MSD(req_dma)); 6383 mcp->mb[7] = LSW(MSD(req_dma)); 6384 mcp->mb[8] = MSW(addr); 6385 /* Setting RAM ID to valid */ 6386 /* For MCTP RAM ID is 0x40 */ 6387 mcp->mb[10] = BIT_7 | 0x40; 6388 6389 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| 6390 MBX_0; 6391 6392 mcp->in_mb = MBX_0; 6393 mcp->tov = MBX_TOV_SECONDS; 6394 mcp->flags = 0; 6395 rval = qla2x00_mailbox_command(vha, mcp); 6396 6397 if (rval != QLA_SUCCESS) { 6398 ql_dbg(ql_dbg_mbx, vha, 0x114e, 6399 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6400 } else { 6401 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, 6402 "Done %s.\n", __func__); 6403 } 6404 6405 return rval; 6406 } 6407 6408 int 6409 qla26xx_dport_diagnostics(scsi_qla_host_t *vha, 6410 void *dd_buf, uint size, uint options) 6411 { 6412 int rval; 6413 mbx_cmd_t mc; 6414 mbx_cmd_t *mcp = &mc; 6415 dma_addr_t dd_dma; 6416 6417 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 6418 !IS_QLA28XX(vha->hw)) 6419 return QLA_FUNCTION_FAILED; 6420 6421 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, 6422 "Entered %s.\n", __func__); 6423 6424 dd_dma = dma_map_single(&vha->hw->pdev->dev, 6425 dd_buf, size, DMA_FROM_DEVICE); 6426 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { 6427 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); 6428 return QLA_MEMORY_ALLOC_FAILED; 6429 } 6430 6431 memset(dd_buf, 0, size); 6432 6433 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; 6434 mcp->mb[1] = options; 6435 mcp->mb[2] = MSW(LSD(dd_dma)); 6436 mcp->mb[3] = LSW(LSD(dd_dma)); 6437 mcp->mb[6] = MSW(MSD(dd_dma)); 6438 mcp->mb[7] = LSW(MSD(dd_dma)); 6439 mcp->mb[8] = size; 6440 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 6441 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 6442 mcp->buf_size = size; 6443 mcp->flags = MBX_DMA_IN; 6444 mcp->tov = MBX_TOV_SECONDS * 4; 6445 rval = qla2x00_mailbox_command(vha, mcp); 6446 6447 if (rval != QLA_SUCCESS) { 6448 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); 6449 } else { 6450 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, 6451 "Done %s.\n", __func__); 6452 } 6453 6454 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, 6455 size, DMA_FROM_DEVICE); 6456 6457 return rval; 6458 } 6459 6460 static void qla2x00_async_mb_sp_done(srb_t *sp, int res) 6461 { 6462 sp->u.iocb_cmd.u.mbx.rc = res; 6463 6464 complete(&sp->u.iocb_cmd.u.mbx.comp); 6465 /* don't free sp here. Let the caller do the free */ 6466 } 6467 6468 /* 6469 * This mailbox uses the iocb interface to send MB command. 6470 * This allows non-critial (non chip setup) command to go 6471 * out in parrallel. 6472 */ 6473 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) 6474 { 6475 int rval = QLA_FUNCTION_FAILED; 6476 srb_t *sp; 6477 struct srb_iocb *c; 6478 6479 if (!vha->hw->flags.fw_started) 6480 goto done; 6481 6482 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 6483 if (!sp) 6484 goto done; 6485 6486 sp->type = SRB_MB_IOCB; 6487 sp->name = mb_to_str(mcp->mb[0]); 6488 6489 c = &sp->u.iocb_cmd; 6490 c->timeout = qla2x00_async_iocb_timeout; 6491 init_completion(&c->u.mbx.comp); 6492 6493 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 6494 6495 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); 6496 6497 sp->done = qla2x00_async_mb_sp_done; 6498 6499 rval = qla2x00_start_sp(sp); 6500 if (rval != QLA_SUCCESS) { 6501 ql_dbg(ql_dbg_mbx, vha, 0x1018, 6502 "%s: %s Failed submission. %x.\n", 6503 __func__, sp->name, rval); 6504 goto done_free_sp; 6505 } 6506 6507 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n", 6508 sp->name, sp->handle); 6509 6510 wait_for_completion(&c->u.mbx.comp); 6511 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); 6512 6513 rval = c->u.mbx.rc; 6514 switch (rval) { 6515 case QLA_FUNCTION_TIMEOUT: 6516 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n", 6517 __func__, sp->name, rval); 6518 break; 6519 case QLA_SUCCESS: 6520 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", 6521 __func__, sp->name); 6522 break; 6523 default: 6524 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", 6525 __func__, sp->name, rval); 6526 break; 6527 } 6528 6529 done_free_sp: 6530 sp->free(sp); 6531 done: 6532 return rval; 6533 } 6534 6535 /* 6536 * qla24xx_gpdb_wait 6537 * NOTE: Do not call this routine from DPC thread 6538 */ 6539 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 6540 { 6541 int rval = QLA_FUNCTION_FAILED; 6542 dma_addr_t pd_dma; 6543 struct port_database_24xx *pd; 6544 struct qla_hw_data *ha = vha->hw; 6545 mbx_cmd_t mc; 6546 6547 if (!vha->hw->flags.fw_started) 6548 goto done; 6549 6550 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 6551 if (pd == NULL) { 6552 ql_log(ql_log_warn, vha, 0xd047, 6553 "Failed to allocate port database structure.\n"); 6554 goto done_free_sp; 6555 } 6556 6557 memset(&mc, 0, sizeof(mc)); 6558 mc.mb[0] = MBC_GET_PORT_DATABASE; 6559 mc.mb[1] = fcport->loop_id; 6560 mc.mb[2] = MSW(pd_dma); 6561 mc.mb[3] = LSW(pd_dma); 6562 mc.mb[6] = MSW(MSD(pd_dma)); 6563 mc.mb[7] = LSW(MSD(pd_dma)); 6564 mc.mb[9] = vha->vp_idx; 6565 mc.mb[10] = opt; 6566 6567 rval = qla24xx_send_mb_cmd(vha, &mc); 6568 if (rval != QLA_SUCCESS) { 6569 ql_dbg(ql_dbg_mbx, vha, 0x1193, 6570 "%s: %8phC fail\n", __func__, fcport->port_name); 6571 goto done_free_sp; 6572 } 6573 6574 rval = __qla24xx_parse_gpdb(vha, fcport, pd); 6575 6576 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", 6577 __func__, fcport->port_name); 6578 6579 done_free_sp: 6580 if (pd) 6581 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 6582 done: 6583 return rval; 6584 } 6585 6586 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, 6587 struct port_database_24xx *pd) 6588 { 6589 int rval = QLA_SUCCESS; 6590 uint64_t zero = 0; 6591 u8 current_login_state, last_login_state; 6592 6593 if (NVME_TARGET(vha->hw, fcport)) { 6594 current_login_state = pd->current_login_state >> 4; 6595 last_login_state = pd->last_login_state >> 4; 6596 } else { 6597 current_login_state = pd->current_login_state & 0xf; 6598 last_login_state = pd->last_login_state & 0xf; 6599 } 6600 6601 /* Check for logged in state. */ 6602 if (current_login_state != PDS_PRLI_COMPLETE) { 6603 ql_dbg(ql_dbg_mbx, vha, 0x119a, 6604 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 6605 current_login_state, last_login_state, fcport->loop_id); 6606 rval = QLA_FUNCTION_FAILED; 6607 goto gpd_error_out; 6608 } 6609 6610 if (fcport->loop_id == FC_NO_LOOP_ID || 6611 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 6612 memcmp(fcport->port_name, pd->port_name, 8))) { 6613 /* We lost the device mid way. */ 6614 rval = QLA_NOT_LOGGED_IN; 6615 goto gpd_error_out; 6616 } 6617 6618 /* Names are little-endian. */ 6619 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 6620 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 6621 6622 /* Get port_id of device. */ 6623 fcport->d_id.b.domain = pd->port_id[0]; 6624 fcport->d_id.b.area = pd->port_id[1]; 6625 fcport->d_id.b.al_pa = pd->port_id[2]; 6626 fcport->d_id.b.rsvd_1 = 0; 6627 6628 ql_dbg(ql_dbg_disc, vha, 0x2062, 6629 "%8phC SVC Param w3 %02x%02x", 6630 fcport->port_name, 6631 pd->prli_svc_param_word_3[1], 6632 pd->prli_svc_param_word_3[0]); 6633 6634 if (NVME_TARGET(vha->hw, fcport)) { 6635 fcport->port_type = FCT_NVME; 6636 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) 6637 fcport->port_type |= FCT_NVME_INITIATOR; 6638 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6639 fcport->port_type |= FCT_NVME_TARGET; 6640 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0) 6641 fcport->port_type |= FCT_NVME_DISCOVERY; 6642 } else { 6643 /* If not target must be initiator or unknown type. */ 6644 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6645 fcport->port_type = FCT_INITIATOR; 6646 else 6647 fcport->port_type = FCT_TARGET; 6648 } 6649 /* Passback COS information. */ 6650 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? 6651 FC_COS_CLASS2 : FC_COS_CLASS3; 6652 6653 if (pd->prli_svc_param_word_3[0] & BIT_7) { 6654 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 6655 fcport->conf_compl_supported = 1; 6656 } 6657 6658 gpd_error_out: 6659 return rval; 6660 } 6661 6662 /* 6663 * qla24xx_gidlist__wait 6664 * NOTE: don't call this routine from DPC thread. 6665 */ 6666 int qla24xx_gidlist_wait(struct scsi_qla_host *vha, 6667 void *id_list, dma_addr_t id_list_dma, uint16_t *entries) 6668 { 6669 int rval = QLA_FUNCTION_FAILED; 6670 mbx_cmd_t mc; 6671 6672 if (!vha->hw->flags.fw_started) 6673 goto done; 6674 6675 memset(&mc, 0, sizeof(mc)); 6676 mc.mb[0] = MBC_GET_ID_LIST; 6677 mc.mb[2] = MSW(id_list_dma); 6678 mc.mb[3] = LSW(id_list_dma); 6679 mc.mb[6] = MSW(MSD(id_list_dma)); 6680 mc.mb[7] = LSW(MSD(id_list_dma)); 6681 mc.mb[8] = 0; 6682 mc.mb[9] = vha->vp_idx; 6683 6684 rval = qla24xx_send_mb_cmd(vha, &mc); 6685 if (rval != QLA_SUCCESS) { 6686 ql_dbg(ql_dbg_mbx, vha, 0x119b, 6687 "%s: fail\n", __func__); 6688 } else { 6689 *entries = mc.mb[1]; 6690 ql_dbg(ql_dbg_mbx, vha, 0x119c, 6691 "%s: done\n", __func__); 6692 } 6693 done: 6694 return rval; 6695 } 6696 6697 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) 6698 { 6699 int rval; 6700 mbx_cmd_t mc; 6701 mbx_cmd_t *mcp = &mc; 6702 6703 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200, 6704 "Entered %s\n", __func__); 6705 6706 memset(mcp->mb, 0 , sizeof(mcp->mb)); 6707 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6708 mcp->mb[1] = 1; 6709 mcp->mb[2] = value; 6710 mcp->out_mb = MBX_2 | MBX_1 | MBX_0; 6711 mcp->in_mb = MBX_2 | MBX_0; 6712 mcp->tov = MBX_TOV_SECONDS; 6713 mcp->flags = 0; 6714 6715 rval = qla2x00_mailbox_command(vha, mcp); 6716 6717 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n", 6718 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6719 6720 return rval; 6721 } 6722 6723 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) 6724 { 6725 int rval; 6726 mbx_cmd_t mc; 6727 mbx_cmd_t *mcp = &mc; 6728 6729 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203, 6730 "Entered %s\n", __func__); 6731 6732 memset(mcp->mb, 0, sizeof(mcp->mb)); 6733 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6734 mcp->mb[1] = 0; 6735 mcp->out_mb = MBX_1 | MBX_0; 6736 mcp->in_mb = MBX_2 | MBX_0; 6737 mcp->tov = MBX_TOV_SECONDS; 6738 mcp->flags = 0; 6739 6740 rval = qla2x00_mailbox_command(vha, mcp); 6741 if (rval == QLA_SUCCESS) 6742 *value = mc.mb[2]; 6743 6744 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n", 6745 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6746 6747 return rval; 6748 } 6749 6750 int 6751 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count) 6752 { 6753 struct qla_hw_data *ha = vha->hw; 6754 uint16_t iter, addr, offset; 6755 dma_addr_t phys_addr; 6756 int rval, c; 6757 u8 *sfp_data; 6758 6759 memset(ha->sfp_data, 0, SFP_DEV_SIZE); 6760 addr = 0xa0; 6761 phys_addr = ha->sfp_data_dma; 6762 sfp_data = ha->sfp_data; 6763 offset = c = 0; 6764 6765 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) { 6766 if (iter == 4) { 6767 /* Skip to next device address. */ 6768 addr = 0xa2; 6769 offset = 0; 6770 } 6771 6772 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data, 6773 addr, offset, SFP_BLOCK_SIZE, BIT_1); 6774 if (rval != QLA_SUCCESS) { 6775 ql_log(ql_log_warn, vha, 0x706d, 6776 "Unable to read SFP data (%x/%x/%x).\n", rval, 6777 addr, offset); 6778 6779 return rval; 6780 } 6781 6782 if (buf && (c < count)) { 6783 u16 sz; 6784 6785 if ((count - c) >= SFP_BLOCK_SIZE) 6786 sz = SFP_BLOCK_SIZE; 6787 else 6788 sz = count - c; 6789 6790 memcpy(buf, sfp_data, sz); 6791 buf += SFP_BLOCK_SIZE; 6792 c += sz; 6793 } 6794 phys_addr += SFP_BLOCK_SIZE; 6795 sfp_data += SFP_BLOCK_SIZE; 6796 offset += SFP_BLOCK_SIZE; 6797 } 6798 6799 return rval; 6800 } 6801 6802 int qla24xx_res_count_wait(struct scsi_qla_host *vha, 6803 uint16_t *out_mb, int out_mb_sz) 6804 { 6805 int rval = QLA_FUNCTION_FAILED; 6806 mbx_cmd_t mc; 6807 6808 if (!vha->hw->flags.fw_started) 6809 goto done; 6810 6811 memset(&mc, 0, sizeof(mc)); 6812 mc.mb[0] = MBC_GET_RESOURCE_COUNTS; 6813 6814 rval = qla24xx_send_mb_cmd(vha, &mc); 6815 if (rval != QLA_SUCCESS) { 6816 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6817 "%s: fail\n", __func__); 6818 } else { 6819 if (out_mb_sz <= SIZEOF_IOCB_MB_REG) 6820 memcpy(out_mb, mc.mb, out_mb_sz); 6821 else 6822 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG); 6823 6824 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6825 "%s: done\n", __func__); 6826 } 6827 done: 6828 return rval; 6829 } 6830 6831 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts, 6832 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr, 6833 uint32_t sfub_len) 6834 { 6835 int rval; 6836 mbx_cmd_t mc; 6837 mbx_cmd_t *mcp = &mc; 6838 6839 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE; 6840 mcp->mb[1] = opts; 6841 mcp->mb[2] = region; 6842 mcp->mb[3] = MSW(len); 6843 mcp->mb[4] = LSW(len); 6844 mcp->mb[5] = MSW(sfub_dma_addr); 6845 mcp->mb[6] = LSW(sfub_dma_addr); 6846 mcp->mb[7] = MSW(MSD(sfub_dma_addr)); 6847 mcp->mb[8] = LSW(MSD(sfub_dma_addr)); 6848 mcp->mb[9] = sfub_len; 6849 mcp->out_mb = 6850 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6851 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6852 mcp->tov = MBX_TOV_SECONDS; 6853 mcp->flags = 0; 6854 rval = qla2x00_mailbox_command(vha, mcp); 6855 6856 if (rval != QLA_SUCCESS) { 6857 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x", 6858 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], 6859 mcp->mb[2]); 6860 } 6861 6862 return rval; 6863 } 6864 6865 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6866 uint32_t data) 6867 { 6868 int rval; 6869 mbx_cmd_t mc; 6870 mbx_cmd_t *mcp = &mc; 6871 6872 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6873 "Entered %s.\n", __func__); 6874 6875 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6876 mcp->mb[1] = LSW(addr); 6877 mcp->mb[2] = MSW(addr); 6878 mcp->mb[3] = LSW(data); 6879 mcp->mb[4] = MSW(data); 6880 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6881 mcp->in_mb = MBX_1|MBX_0; 6882 mcp->tov = MBX_TOV_SECONDS; 6883 mcp->flags = 0; 6884 rval = qla2x00_mailbox_command(vha, mcp); 6885 6886 if (rval != QLA_SUCCESS) { 6887 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6888 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6889 } else { 6890 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6891 "Done %s.\n", __func__); 6892 } 6893 6894 return rval; 6895 } 6896 6897 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6898 uint32_t *data) 6899 { 6900 int rval; 6901 mbx_cmd_t mc; 6902 mbx_cmd_t *mcp = &mc; 6903 6904 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6905 "Entered %s.\n", __func__); 6906 6907 mcp->mb[0] = MBC_READ_REMOTE_REG; 6908 mcp->mb[1] = LSW(addr); 6909 mcp->mb[2] = MSW(addr); 6910 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6911 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6912 mcp->tov = MBX_TOV_SECONDS; 6913 mcp->flags = 0; 6914 rval = qla2x00_mailbox_command(vha, mcp); 6915 6916 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]); 6917 6918 if (rval != QLA_SUCCESS) { 6919 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6920 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6921 } else { 6922 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6923 "Done %s.\n", __func__); 6924 } 6925 6926 return rval; 6927 } 6928 6929 int 6930 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led) 6931 { 6932 struct qla_hw_data *ha = vha->hw; 6933 mbx_cmd_t mc; 6934 mbx_cmd_t *mcp = &mc; 6935 int rval; 6936 6937 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6938 return QLA_FUNCTION_FAILED; 6939 6940 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n", 6941 __func__, options); 6942 6943 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG; 6944 mcp->mb[1] = options; 6945 mcp->out_mb = MBX_1|MBX_0; 6946 mcp->in_mb = MBX_1|MBX_0; 6947 if (options & BIT_0) { 6948 if (options & BIT_1) { 6949 mcp->mb[2] = led[2]; 6950 mcp->out_mb |= MBX_2; 6951 } 6952 if (options & BIT_2) { 6953 mcp->mb[3] = led[0]; 6954 mcp->out_mb |= MBX_3; 6955 } 6956 if (options & BIT_3) { 6957 mcp->mb[4] = led[1]; 6958 mcp->out_mb |= MBX_4; 6959 } 6960 } else { 6961 mcp->in_mb |= MBX_4|MBX_3|MBX_2; 6962 } 6963 mcp->tov = MBX_TOV_SECONDS; 6964 mcp->flags = 0; 6965 rval = qla2x00_mailbox_command(vha, mcp); 6966 if (rval) { 6967 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n", 6968 __func__, rval, mcp->mb[0], mcp->mb[1]); 6969 return rval; 6970 } 6971 6972 if (options & BIT_0) { 6973 ha->beacon_blink_led = 0; 6974 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__); 6975 } else { 6976 led[2] = mcp->mb[2]; 6977 led[0] = mcp->mb[3]; 6978 led[1] = mcp->mb[4]; 6979 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n", 6980 __func__, led[0], led[1], led[2]); 6981 } 6982 6983 return rval; 6984 } 6985 6986 /** 6987 * qla_no_op_mb(): This MB is used to check if FW is still alive and 6988 * able to generate an interrupt. Otherwise, a timeout will trigger 6989 * FW dump + reset 6990 * @vha: host adapter pointer 6991 * Return: None 6992 */ 6993 void qla_no_op_mb(struct scsi_qla_host *vha) 6994 { 6995 mbx_cmd_t mc; 6996 mbx_cmd_t *mcp = &mc; 6997 int rval; 6998 6999 memset(&mc, 0, sizeof(mc)); 7000 mcp->mb[0] = 0; // noop cmd= 0 7001 mcp->out_mb = MBX_0; 7002 mcp->in_mb = MBX_0; 7003 mcp->tov = 5; 7004 mcp->flags = 0; 7005 rval = qla2x00_mailbox_command(vha, mcp); 7006 7007 if (rval) { 7008 ql_dbg(ql_dbg_async, vha, 0x7071, 7009 "Failed %s %x\n", __func__, rval); 7010 } 7011 } 7012 7013 int qla_mailbox_passthru(scsi_qla_host_t *vha, 7014 uint16_t *mbx_in, uint16_t *mbx_out) 7015 { 7016 mbx_cmd_t mc; 7017 mbx_cmd_t *mcp = &mc; 7018 int rval = -EINVAL; 7019 7020 memset(&mc, 0, sizeof(mc)); 7021 /* Receiving all 32 register's contents */ 7022 memcpy(&mcp->mb, (char *)mbx_in, (32 * sizeof(uint16_t))); 7023 7024 mcp->out_mb = 0xFFFFFFFF; 7025 mcp->in_mb = 0xFFFFFFFF; 7026 7027 mcp->tov = MBX_TOV_SECONDS; 7028 mcp->flags = 0; 7029 mcp->bufp = NULL; 7030 7031 rval = qla2x00_mailbox_command(vha, mcp); 7032 7033 if (rval != QLA_SUCCESS) { 7034 ql_dbg(ql_dbg_mbx, vha, 0xf0a2, 7035 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 7036 } else { 7037 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xf0a3, "Done %s.\n", 7038 __func__); 7039 /* passing all 32 register's contents */ 7040 memcpy(mbx_out, &mcp->mb, 32 * sizeof(uint16_t)); 7041 } 7042 7043 return rval; 7044 } 7045