1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_target.h" 8 9 #include <linux/delay.h> 10 #include <linux/gfp.h> 11 12 #ifdef CONFIG_PPC 13 #define IS_PPCARCH true 14 #else 15 #define IS_PPCARCH false 16 #endif 17 18 static struct mb_cmd_name { 19 uint16_t cmd; 20 const char *str; 21 } mb_str[] = { 22 {MBC_GET_PORT_DATABASE, "GPDB"}, 23 {MBC_GET_ID_LIST, "GIDList"}, 24 {MBC_GET_LINK_PRIV_STATS, "Stats"}, 25 {MBC_GET_RESOURCE_COUNTS, "ResCnt"}, 26 }; 27 28 static const char *mb_to_str(uint16_t cmd) 29 { 30 int i; 31 struct mb_cmd_name *e; 32 33 for (i = 0; i < ARRAY_SIZE(mb_str); i++) { 34 e = mb_str + i; 35 if (cmd == e->cmd) 36 return e->str; 37 } 38 return "unknown"; 39 } 40 41 static struct rom_cmd { 42 uint16_t cmd; 43 } rom_cmds[] = { 44 { MBC_LOAD_RAM }, 45 { MBC_EXECUTE_FIRMWARE }, 46 { MBC_READ_RAM_WORD }, 47 { MBC_MAILBOX_REGISTER_TEST }, 48 { MBC_VERIFY_CHECKSUM }, 49 { MBC_GET_FIRMWARE_VERSION }, 50 { MBC_LOAD_RISC_RAM }, 51 { MBC_DUMP_RISC_RAM }, 52 { MBC_LOAD_RISC_RAM_EXTENDED }, 53 { MBC_DUMP_RISC_RAM_EXTENDED }, 54 { MBC_WRITE_RAM_WORD_EXTENDED }, 55 { MBC_READ_RAM_EXTENDED }, 56 { MBC_GET_RESOURCE_COUNTS }, 57 { MBC_SET_FIRMWARE_OPTION }, 58 { MBC_MID_INITIALIZE_FIRMWARE }, 59 { MBC_GET_FIRMWARE_STATE }, 60 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, 61 { MBC_GET_RETRY_COUNT }, 62 { MBC_TRACE_CONTROL }, 63 { MBC_INITIALIZE_MULTIQ }, 64 { MBC_IOCB_COMMAND_A64 }, 65 { MBC_GET_ADAPTER_LOOP_ID }, 66 { MBC_READ_SFP }, 67 { MBC_SET_RNID_PARAMS }, 68 { MBC_GET_RNID_PARAMS }, 69 { MBC_GET_SET_ZIO_THRESHOLD }, 70 }; 71 72 static int is_rom_cmd(uint16_t cmd) 73 { 74 int i; 75 struct rom_cmd *wc; 76 77 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) { 78 wc = rom_cmds + i; 79 if (wc->cmd == cmd) 80 return 1; 81 } 82 83 return 0; 84 } 85 86 /* 87 * qla2x00_mailbox_command 88 * Issue mailbox command and waits for completion. 89 * 90 * Input: 91 * ha = adapter block pointer. 92 * mcp = driver internal mbx struct pointer. 93 * 94 * Output: 95 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 96 * 97 * Returns: 98 * 0 : QLA_SUCCESS = cmd performed success 99 * 1 : QLA_FUNCTION_FAILED (error encountered) 100 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 101 * 102 * Context: 103 * Kernel context. 104 */ 105 static int 106 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) 107 { 108 int rval, i; 109 unsigned long flags = 0; 110 device_reg_t *reg; 111 uint8_t abort_active, eeh_delay; 112 uint8_t io_lock_on; 113 uint16_t command = 0; 114 uint16_t *iptr; 115 __le16 __iomem *optr; 116 uint32_t cnt; 117 uint32_t mboxes; 118 unsigned long wait_time; 119 struct qla_hw_data *ha = vha->hw; 120 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 121 u32 chip_reset; 122 123 124 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); 125 126 if (ha->pdev->error_state == pci_channel_io_perm_failure) { 127 ql_log(ql_log_warn, vha, 0x1001, 128 "PCI channel failed permanently, exiting.\n"); 129 return QLA_FUNCTION_TIMEOUT; 130 } 131 132 if (vha->device_flags & DFLG_DEV_FAILED) { 133 ql_log(ql_log_warn, vha, 0x1002, 134 "Device in failed state, exiting.\n"); 135 return QLA_FUNCTION_TIMEOUT; 136 } 137 138 /* if PCI error, then avoid mbx processing.*/ 139 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && 140 test_bit(UNLOADING, &base_vha->dpc_flags)) { 141 ql_log(ql_log_warn, vha, 0xd04e, 142 "PCI error, exiting.\n"); 143 return QLA_FUNCTION_TIMEOUT; 144 } 145 eeh_delay = 0; 146 reg = ha->iobase; 147 io_lock_on = base_vha->flags.init_done; 148 149 rval = QLA_SUCCESS; 150 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 151 chip_reset = ha->chip_reset; 152 153 if (ha->flags.pci_channel_io_perm_failure) { 154 ql_log(ql_log_warn, vha, 0x1003, 155 "Perm failure on EEH timeout MBX, exiting.\n"); 156 return QLA_FUNCTION_TIMEOUT; 157 } 158 159 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 160 /* Setting Link-Down error */ 161 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 162 ql_log(ql_log_warn, vha, 0x1004, 163 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 164 return QLA_FUNCTION_TIMEOUT; 165 } 166 167 /* check if ISP abort is active and return cmd with timeout */ 168 if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 169 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 170 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && 171 !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) { 172 ql_log(ql_log_info, vha, 0x1005, 173 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", 174 mcp->mb[0]); 175 return QLA_FUNCTION_TIMEOUT; 176 } 177 178 atomic_inc(&ha->num_pend_mbx_stage1); 179 /* 180 * Wait for active mailbox commands to finish by waiting at most tov 181 * seconds. This is to serialize actual issuing of mailbox cmds during 182 * non ISP abort time. 183 */ 184 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 185 /* Timeout occurred. Return error. */ 186 ql_log(ql_log_warn, vha, 0xd035, 187 "Cmd access timeout, cmd=0x%x, Exiting.\n", 188 mcp->mb[0]); 189 vha->hw_err_cnt++; 190 atomic_dec(&ha->num_pend_mbx_stage1); 191 return QLA_FUNCTION_TIMEOUT; 192 } 193 atomic_dec(&ha->num_pend_mbx_stage1); 194 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || 195 ha->flags.eeh_busy) { 196 ql_log(ql_log_warn, vha, 0xd035, 197 "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n", 198 ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]); 199 rval = QLA_ABORTED; 200 goto premature_exit; 201 } 202 203 204 /* Save mailbox command for debug */ 205 ha->mcp = mcp; 206 207 ql_dbg(ql_dbg_mbx, vha, 0x1006, 208 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 209 210 spin_lock_irqsave(&ha->hardware_lock, flags); 211 212 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || 213 ha->flags.mbox_busy) { 214 rval = QLA_ABORTED; 215 spin_unlock_irqrestore(&ha->hardware_lock, flags); 216 goto premature_exit; 217 } 218 ha->flags.mbox_busy = 1; 219 220 /* Load mailbox registers. */ 221 if (IS_P3P_TYPE(ha)) 222 optr = ®->isp82.mailbox_in[0]; 223 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) 224 optr = ®->isp24.mailbox0; 225 else 226 optr = MAILBOX_REG(ha, ®->isp, 0); 227 228 iptr = mcp->mb; 229 command = mcp->mb[0]; 230 mboxes = mcp->out_mb; 231 232 ql_dbg(ql_dbg_mbx, vha, 0x1111, 233 "Mailbox registers (OUT):\n"); 234 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 235 if (IS_QLA2200(ha) && cnt == 8) 236 optr = MAILBOX_REG(ha, ®->isp, 8); 237 if (mboxes & BIT_0) { 238 ql_dbg(ql_dbg_mbx, vha, 0x1112, 239 "mbox[%d]<-0x%04x\n", cnt, *iptr); 240 wrt_reg_word(optr, *iptr); 241 } 242 243 mboxes >>= 1; 244 optr++; 245 iptr++; 246 } 247 248 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, 249 "I/O Address = %p.\n", optr); 250 251 /* Issue set host interrupt command to send cmd out. */ 252 ha->flags.mbox_int = 0; 253 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 254 255 /* Unlock mbx registers and wait for interrupt */ 256 ql_dbg(ql_dbg_mbx, vha, 0x100f, 257 "Going to unlock irq & waiting for interrupts. " 258 "jiffies=%lx.\n", jiffies); 259 260 /* Wait for mbx cmd completion until timeout */ 261 atomic_inc(&ha->num_pend_mbx_stage2); 262 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 263 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 264 265 if (IS_P3P_TYPE(ha)) 266 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); 267 else if (IS_FWI2_CAPABLE(ha)) 268 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); 269 else 270 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); 271 spin_unlock_irqrestore(&ha->hardware_lock, flags); 272 273 wait_time = jiffies; 274 atomic_inc(&ha->num_pend_mbx_stage3); 275 if (!wait_for_completion_timeout(&ha->mbx_intr_comp, 276 mcp->tov * HZ)) { 277 if (chip_reset != ha->chip_reset) { 278 eeh_delay = ha->flags.eeh_busy ? 1 : 0; 279 280 spin_lock_irqsave(&ha->hardware_lock, flags); 281 ha->flags.mbox_busy = 0; 282 spin_unlock_irqrestore(&ha->hardware_lock, 283 flags); 284 atomic_dec(&ha->num_pend_mbx_stage2); 285 atomic_dec(&ha->num_pend_mbx_stage3); 286 rval = QLA_ABORTED; 287 goto premature_exit; 288 } 289 ql_dbg(ql_dbg_mbx, vha, 0x117a, 290 "cmd=%x Timeout.\n", command); 291 spin_lock_irqsave(&ha->hardware_lock, flags); 292 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 293 spin_unlock_irqrestore(&ha->hardware_lock, flags); 294 295 } else if (ha->flags.purge_mbox || 296 chip_reset != ha->chip_reset) { 297 eeh_delay = ha->flags.eeh_busy ? 1 : 0; 298 299 spin_lock_irqsave(&ha->hardware_lock, flags); 300 ha->flags.mbox_busy = 0; 301 spin_unlock_irqrestore(&ha->hardware_lock, flags); 302 atomic_dec(&ha->num_pend_mbx_stage2); 303 atomic_dec(&ha->num_pend_mbx_stage3); 304 rval = QLA_ABORTED; 305 goto premature_exit; 306 } 307 atomic_dec(&ha->num_pend_mbx_stage3); 308 309 if (time_after(jiffies, wait_time + 5 * HZ)) 310 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", 311 command, jiffies_to_msecs(jiffies - wait_time)); 312 } else { 313 ql_dbg(ql_dbg_mbx, vha, 0x1011, 314 "Cmd=%x Polling Mode.\n", command); 315 316 if (IS_P3P_TYPE(ha)) { 317 if (rd_reg_dword(®->isp82.hint) & 318 HINT_MBX_INT_PENDING) { 319 ha->flags.mbox_busy = 0; 320 spin_unlock_irqrestore(&ha->hardware_lock, 321 flags); 322 atomic_dec(&ha->num_pend_mbx_stage2); 323 ql_dbg(ql_dbg_mbx, vha, 0x1012, 324 "Pending mailbox timeout, exiting.\n"); 325 vha->hw_err_cnt++; 326 rval = QLA_FUNCTION_TIMEOUT; 327 goto premature_exit; 328 } 329 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); 330 } else if (IS_FWI2_CAPABLE(ha)) 331 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); 332 else 333 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); 334 spin_unlock_irqrestore(&ha->hardware_lock, flags); 335 336 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 337 while (!ha->flags.mbox_int) { 338 if (ha->flags.purge_mbox || 339 chip_reset != ha->chip_reset) { 340 eeh_delay = ha->flags.eeh_busy ? 1 : 0; 341 342 spin_lock_irqsave(&ha->hardware_lock, flags); 343 ha->flags.mbox_busy = 0; 344 spin_unlock_irqrestore(&ha->hardware_lock, 345 flags); 346 atomic_dec(&ha->num_pend_mbx_stage2); 347 rval = QLA_ABORTED; 348 goto premature_exit; 349 } 350 351 if (time_after(jiffies, wait_time)) 352 break; 353 354 /* Check for pending interrupts. */ 355 qla2x00_poll(ha->rsp_q_map[0]); 356 357 if (!ha->flags.mbox_int && 358 !(IS_QLA2200(ha) && 359 command == MBC_LOAD_RISC_RAM_EXTENDED)) 360 msleep(10); 361 } /* while */ 362 ql_dbg(ql_dbg_mbx, vha, 0x1013, 363 "Waited %d sec.\n", 364 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 365 } 366 atomic_dec(&ha->num_pend_mbx_stage2); 367 368 /* Check whether we timed out */ 369 if (ha->flags.mbox_int) { 370 uint16_t *iptr2; 371 372 ql_dbg(ql_dbg_mbx, vha, 0x1014, 373 "Cmd=%x completed.\n", command); 374 375 /* Got interrupt. Clear the flag. */ 376 ha->flags.mbox_int = 0; 377 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 378 379 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 380 spin_lock_irqsave(&ha->hardware_lock, flags); 381 ha->flags.mbox_busy = 0; 382 spin_unlock_irqrestore(&ha->hardware_lock, flags); 383 384 /* Setting Link-Down error */ 385 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 386 ha->mcp = NULL; 387 rval = QLA_FUNCTION_FAILED; 388 ql_log(ql_log_warn, vha, 0xd048, 389 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 390 goto premature_exit; 391 } 392 393 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) { 394 ql_dbg(ql_dbg_mbx, vha, 0x11ff, 395 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0], 396 MBS_COMMAND_COMPLETE); 397 rval = QLA_FUNCTION_FAILED; 398 } 399 400 /* Load return mailbox registers. */ 401 iptr2 = mcp->mb; 402 iptr = (uint16_t *)&ha->mailbox_out[0]; 403 mboxes = mcp->in_mb; 404 405 ql_dbg(ql_dbg_mbx, vha, 0x1113, 406 "Mailbox registers (IN):\n"); 407 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 408 if (mboxes & BIT_0) { 409 *iptr2 = *iptr; 410 ql_dbg(ql_dbg_mbx, vha, 0x1114, 411 "mbox[%d]->0x%04x\n", cnt, *iptr2); 412 } 413 414 mboxes >>= 1; 415 iptr2++; 416 iptr++; 417 } 418 } else { 419 420 uint16_t mb[8]; 421 uint32_t ictrl, host_status, hccr; 422 uint16_t w; 423 424 if (IS_FWI2_CAPABLE(ha)) { 425 mb[0] = rd_reg_word(®->isp24.mailbox0); 426 mb[1] = rd_reg_word(®->isp24.mailbox1); 427 mb[2] = rd_reg_word(®->isp24.mailbox2); 428 mb[3] = rd_reg_word(®->isp24.mailbox3); 429 mb[7] = rd_reg_word(®->isp24.mailbox7); 430 ictrl = rd_reg_dword(®->isp24.ictrl); 431 host_status = rd_reg_dword(®->isp24.host_status); 432 hccr = rd_reg_dword(®->isp24.hccr); 433 434 ql_log(ql_log_warn, vha, 0xd04c, 435 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 436 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", 437 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], 438 mb[7], host_status, hccr); 439 vha->hw_err_cnt++; 440 441 } else { 442 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); 443 ictrl = rd_reg_word(®->isp.ictrl); 444 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, 445 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 446 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); 447 vha->hw_err_cnt++; 448 } 449 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 450 451 /* Capture FW dump only, if PCI device active */ 452 if (!pci_channel_offline(vha->hw->pdev)) { 453 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 454 if (w == 0xffff || ictrl == 0xffffffff || 455 (chip_reset != ha->chip_reset)) { 456 /* This is special case if there is unload 457 * of driver happening and if PCI device go 458 * into bad state due to PCI error condition 459 * then only PCI ERR flag would be set. 460 * we will do premature exit for above case. 461 */ 462 spin_lock_irqsave(&ha->hardware_lock, flags); 463 ha->flags.mbox_busy = 0; 464 spin_unlock_irqrestore(&ha->hardware_lock, 465 flags); 466 rval = QLA_FUNCTION_TIMEOUT; 467 goto premature_exit; 468 } 469 470 /* Attempt to capture firmware dump for further 471 * anallysis of the current formware state. we do not 472 * need to do this if we are intentionally generating 473 * a dump 474 */ 475 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) 476 qla2xxx_dump_fw(vha); 477 rval = QLA_FUNCTION_TIMEOUT; 478 } 479 } 480 spin_lock_irqsave(&ha->hardware_lock, flags); 481 ha->flags.mbox_busy = 0; 482 spin_unlock_irqrestore(&ha->hardware_lock, flags); 483 484 /* Clean up */ 485 ha->mcp = NULL; 486 487 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 488 ql_dbg(ql_dbg_mbx, vha, 0x101a, 489 "Checking for additional resp interrupt.\n"); 490 491 /* polling mode for non isp_abort commands. */ 492 qla2x00_poll(ha->rsp_q_map[0]); 493 } 494 495 if (rval == QLA_FUNCTION_TIMEOUT && 496 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 497 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 498 ha->flags.eeh_busy) { 499 /* not in dpc. schedule it for dpc to take over. */ 500 ql_dbg(ql_dbg_mbx, vha, 0x101b, 501 "Timeout, schedule isp_abort_needed.\n"); 502 503 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 504 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 505 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 506 if (IS_QLA82XX(ha)) { 507 ql_dbg(ql_dbg_mbx, vha, 0x112a, 508 "disabling pause transmit on port " 509 "0 & 1.\n"); 510 qla82xx_wr_32(ha, 511 QLA82XX_CRB_NIU + 0x98, 512 CRB_NIU_XG_PAUSE_CTL_P0| 513 CRB_NIU_XG_PAUSE_CTL_P1); 514 } 515 ql_log(ql_log_info, base_vha, 0x101c, 516 "Mailbox cmd timeout occurred, cmd=0x%x, " 517 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 518 "abort.\n", command, mcp->mb[0], 519 ha->flags.eeh_busy); 520 vha->hw_err_cnt++; 521 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 522 qla2xxx_wake_dpc(vha); 523 } 524 } else if (current == ha->dpc_thread) { 525 /* call abort directly since we are in the DPC thread */ 526 ql_dbg(ql_dbg_mbx, vha, 0x101d, 527 "Timeout, calling abort_isp.\n"); 528 529 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 530 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 531 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 532 if (IS_QLA82XX(ha)) { 533 ql_dbg(ql_dbg_mbx, vha, 0x112b, 534 "disabling pause transmit on port " 535 "0 & 1.\n"); 536 qla82xx_wr_32(ha, 537 QLA82XX_CRB_NIU + 0x98, 538 CRB_NIU_XG_PAUSE_CTL_P0| 539 CRB_NIU_XG_PAUSE_CTL_P1); 540 } 541 ql_log(ql_log_info, base_vha, 0x101e, 542 "Mailbox cmd timeout occurred, cmd=0x%x, " 543 "mb[0]=0x%x. Scheduling ISP abort ", 544 command, mcp->mb[0]); 545 vha->hw_err_cnt++; 546 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 547 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 548 /* Allow next mbx cmd to come in. */ 549 complete(&ha->mbx_cmd_comp); 550 if (ha->isp_ops->abort_isp(vha) && 551 !ha->flags.eeh_busy) { 552 /* Failed. retry later. */ 553 set_bit(ISP_ABORT_NEEDED, 554 &vha->dpc_flags); 555 } 556 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 557 ql_dbg(ql_dbg_mbx, vha, 0x101f, 558 "Finished abort_isp.\n"); 559 goto mbx_done; 560 } 561 } 562 } 563 564 premature_exit: 565 /* Allow next mbx cmd to come in. */ 566 complete(&ha->mbx_cmd_comp); 567 568 mbx_done: 569 if (rval == QLA_ABORTED) { 570 ql_log(ql_log_info, vha, 0xd035, 571 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n", 572 mcp->mb[0]); 573 } else if (rval) { 574 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { 575 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR, 576 dev_name(&ha->pdev->dev), 0x1020+0x800, 577 vha->host_no, rval); 578 mboxes = mcp->in_mb; 579 cnt = 4; 580 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1) 581 if (mboxes & BIT_0) { 582 printk(" mb[%u]=%x", i, mcp->mb[i]); 583 cnt--; 584 } 585 pr_warn(" cmd=%x ****\n", command); 586 } 587 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { 588 ql_dbg(ql_dbg_mbx, vha, 0x1198, 589 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", 590 rd_reg_dword(®->isp24.host_status), 591 rd_reg_dword(®->isp24.ictrl), 592 rd_reg_dword(®->isp24.istatus)); 593 } else { 594 ql_dbg(ql_dbg_mbx, vha, 0x1206, 595 "ctrl_status=%#x ictrl=%#x istatus=%#x\n", 596 rd_reg_word(®->isp.ctrl_status), 597 rd_reg_word(®->isp.ictrl), 598 rd_reg_word(®->isp.istatus)); 599 } 600 } else { 601 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); 602 } 603 604 i = 500; 605 while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) { 606 /* 607 * The caller of this mailbox encounter pci error. 608 * Hold the thread until PCIE link reset complete to make 609 * sure caller does not unmap dma while recovery is 610 * in progress. 611 */ 612 msleep(1); 613 i--; 614 } 615 return rval; 616 } 617 618 int 619 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, 620 uint32_t risc_code_size) 621 { 622 int rval; 623 struct qla_hw_data *ha = vha->hw; 624 mbx_cmd_t mc; 625 mbx_cmd_t *mcp = &mc; 626 627 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, 628 "Entered %s.\n", __func__); 629 630 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 631 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 632 mcp->mb[8] = MSW(risc_addr); 633 mcp->out_mb = MBX_8|MBX_0; 634 } else { 635 mcp->mb[0] = MBC_LOAD_RISC_RAM; 636 mcp->out_mb = MBX_0; 637 } 638 mcp->mb[1] = LSW(risc_addr); 639 mcp->mb[2] = MSW(req_dma); 640 mcp->mb[3] = LSW(req_dma); 641 mcp->mb[6] = MSW(MSD(req_dma)); 642 mcp->mb[7] = LSW(MSD(req_dma)); 643 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 644 if (IS_FWI2_CAPABLE(ha)) { 645 mcp->mb[4] = MSW(risc_code_size); 646 mcp->mb[5] = LSW(risc_code_size); 647 mcp->out_mb |= MBX_5|MBX_4; 648 } else { 649 mcp->mb[4] = LSW(risc_code_size); 650 mcp->out_mb |= MBX_4; 651 } 652 653 mcp->in_mb = MBX_1|MBX_0; 654 mcp->tov = MBX_TOV_SECONDS; 655 mcp->flags = 0; 656 rval = qla2x00_mailbox_command(vha, mcp); 657 658 if (rval != QLA_SUCCESS) { 659 ql_dbg(ql_dbg_mbx, vha, 0x1023, 660 "Failed=%x mb[0]=%x mb[1]=%x.\n", 661 rval, mcp->mb[0], mcp->mb[1]); 662 vha->hw_err_cnt++; 663 } else { 664 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, 665 "Done %s.\n", __func__); 666 } 667 668 return rval; 669 } 670 671 #define NVME_ENABLE_FLAG BIT_3 672 #define EDIF_HW_SUPPORT BIT_10 673 674 /* 675 * qla2x00_execute_fw 676 * Start adapter firmware. 677 * 678 * Input: 679 * ha = adapter block pointer. 680 * TARGET_QUEUE_LOCK must be released. 681 * ADAPTER_STATE_LOCK must be released. 682 * 683 * Returns: 684 * qla2x00 local function return status code. 685 * 686 * Context: 687 * Kernel context. 688 */ 689 int 690 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) 691 { 692 int rval; 693 struct qla_hw_data *ha = vha->hw; 694 mbx_cmd_t mc; 695 mbx_cmd_t *mcp = &mc; 696 u8 semaphore = 0; 697 #define EXE_FW_FORCE_SEMAPHORE BIT_7 698 u8 retry = 5; 699 700 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, 701 "Entered %s.\n", __func__); 702 703 again: 704 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 705 mcp->out_mb = MBX_0; 706 mcp->in_mb = MBX_0; 707 if (IS_FWI2_CAPABLE(ha)) { 708 mcp->mb[1] = MSW(risc_addr); 709 mcp->mb[2] = LSW(risc_addr); 710 mcp->mb[3] = 0; 711 mcp->mb[4] = 0; 712 mcp->mb[11] = 0; 713 714 /* Enable BPM? */ 715 if (ha->flags.lr_detected) { 716 mcp->mb[4] = BIT_0; 717 if (IS_BPM_RANGE_CAPABLE(ha)) 718 mcp->mb[4] |= 719 ha->lr_distance << LR_DIST_FW_POS; 720 } 721 722 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha))) 723 mcp->mb[4] |= NVME_ENABLE_FLAG; 724 725 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 726 struct nvram_81xx *nv = ha->nvram; 727 /* set minimum speed if specified in nvram */ 728 if (nv->min_supported_speed >= 2 && 729 nv->min_supported_speed <= 5) { 730 mcp->mb[4] |= BIT_4; 731 mcp->mb[11] |= nv->min_supported_speed & 0xF; 732 mcp->out_mb |= MBX_11; 733 mcp->in_mb |= BIT_5; 734 vha->min_supported_speed = 735 nv->min_supported_speed; 736 } 737 738 if (IS_PPCARCH) 739 mcp->mb[11] |= BIT_4; 740 } 741 742 if (ha->flags.exlogins_enabled) 743 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; 744 745 if (ha->flags.exchoffld_enabled) 746 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; 747 748 if (semaphore) 749 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE; 750 751 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; 752 mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1; 753 } else { 754 mcp->mb[1] = LSW(risc_addr); 755 mcp->out_mb |= MBX_1; 756 if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 757 mcp->mb[2] = 0; 758 mcp->out_mb |= MBX_2; 759 } 760 } 761 762 mcp->tov = MBX_TOV_SECONDS; 763 mcp->flags = 0; 764 rval = qla2x00_mailbox_command(vha, mcp); 765 766 if (rval != QLA_SUCCESS) { 767 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR && 768 mcp->mb[1] == 0x27 && retry) { 769 semaphore = 1; 770 retry--; 771 ql_dbg(ql_dbg_async, vha, 0x1026, 772 "Exe FW: force semaphore.\n"); 773 goto again; 774 } 775 776 if (retry) { 777 retry--; 778 ql_dbg(ql_dbg_async, vha, 0x509d, 779 "Exe FW retry: mb[0]=%x retry[%d]\n", mcp->mb[0], retry); 780 goto again; 781 } 782 ql_dbg(ql_dbg_mbx, vha, 0x1026, 783 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 784 vha->hw_err_cnt++; 785 return rval; 786 } 787 788 if (!IS_FWI2_CAPABLE(ha)) 789 goto done; 790 791 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; 792 ql_dbg(ql_dbg_mbx, vha, 0x119a, 793 "fw_ability_mask=%x.\n", ha->fw_ability_mask); 794 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]); 795 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 796 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1); 797 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n", 798 ha->max_supported_speed == 0 ? "16Gps" : 799 ha->max_supported_speed == 1 ? "32Gps" : 800 ha->max_supported_speed == 2 ? "64Gps" : "unknown"); 801 if (vha->min_supported_speed) { 802 ha->min_supported_speed = mcp->mb[5] & 803 (BIT_0 | BIT_1 | BIT_2); 804 ql_dbg(ql_dbg_mbx, vha, 0x119c, 805 "min_supported_speed=%s.\n", 806 ha->min_supported_speed == 6 ? "64Gps" : 807 ha->min_supported_speed == 5 ? "32Gps" : 808 ha->min_supported_speed == 4 ? "16Gps" : 809 ha->min_supported_speed == 3 ? "8Gps" : 810 ha->min_supported_speed == 2 ? "4Gps" : "unknown"); 811 } 812 } 813 814 if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) { 815 ha->flags.edif_hw = 1; 816 ql_log(ql_log_info, vha, 0xffff, 817 "%s: edif HW\n", __func__); 818 } 819 820 done: 821 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, 822 "Done %s.\n", __func__); 823 824 return rval; 825 } 826 827 /* 828 * qla_get_exlogin_status 829 * Get extended login status 830 * uses the memory offload control/status Mailbox 831 * 832 * Input: 833 * ha: adapter state pointer. 834 * fwopt: firmware options 835 * 836 * Returns: 837 * qla2x00 local function status 838 * 839 * Context: 840 * Kernel context. 841 */ 842 #define FETCH_XLOGINS_STAT 0x8 843 int 844 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 845 uint16_t *ex_logins_cnt) 846 { 847 int rval; 848 mbx_cmd_t mc; 849 mbx_cmd_t *mcp = &mc; 850 851 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f, 852 "Entered %s\n", __func__); 853 854 memset(mcp->mb, 0 , sizeof(mcp->mb)); 855 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 856 mcp->mb[1] = FETCH_XLOGINS_STAT; 857 mcp->out_mb = MBX_1|MBX_0; 858 mcp->in_mb = MBX_10|MBX_4|MBX_0; 859 mcp->tov = MBX_TOV_SECONDS; 860 mcp->flags = 0; 861 862 rval = qla2x00_mailbox_command(vha, mcp); 863 if (rval != QLA_SUCCESS) { 864 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval); 865 } else { 866 *buf_sz = mcp->mb[4]; 867 *ex_logins_cnt = mcp->mb[10]; 868 869 ql_log(ql_log_info, vha, 0x1190, 870 "buffer size 0x%x, exchange login count=%d\n", 871 mcp->mb[4], mcp->mb[10]); 872 873 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116, 874 "Done %s.\n", __func__); 875 } 876 877 return rval; 878 } 879 880 /* 881 * qla_set_exlogin_mem_cfg 882 * set extended login memory configuration 883 * Mbx needs to be issues before init_cb is set 884 * 885 * Input: 886 * ha: adapter state pointer. 887 * buffer: buffer pointer 888 * phys_addr: physical address of buffer 889 * size: size of buffer 890 * TARGET_QUEUE_LOCK must be released 891 * ADAPTER_STATE_LOCK must be release 892 * 893 * Returns: 894 * qla2x00 local funxtion status code. 895 * 896 * Context: 897 * Kernel context. 898 */ 899 #define CONFIG_XLOGINS_MEM 0x9 900 int 901 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) 902 { 903 int rval; 904 mbx_cmd_t mc; 905 mbx_cmd_t *mcp = &mc; 906 struct qla_hw_data *ha = vha->hw; 907 908 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, 909 "Entered %s.\n", __func__); 910 911 memset(mcp->mb, 0 , sizeof(mcp->mb)); 912 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 913 mcp->mb[1] = CONFIG_XLOGINS_MEM; 914 mcp->mb[2] = MSW(phys_addr); 915 mcp->mb[3] = LSW(phys_addr); 916 mcp->mb[6] = MSW(MSD(phys_addr)); 917 mcp->mb[7] = LSW(MSD(phys_addr)); 918 mcp->mb[8] = MSW(ha->exlogin_size); 919 mcp->mb[9] = LSW(ha->exlogin_size); 920 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 921 mcp->in_mb = MBX_11|MBX_0; 922 mcp->tov = MBX_TOV_SECONDS; 923 mcp->flags = 0; 924 rval = qla2x00_mailbox_command(vha, mcp); 925 if (rval != QLA_SUCCESS) { 926 ql_dbg(ql_dbg_mbx, vha, 0x111b, 927 "EXlogin Failed=%x. MB0=%x MB11=%x\n", 928 rval, mcp->mb[0], mcp->mb[11]); 929 } else { 930 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 931 "Done %s.\n", __func__); 932 } 933 934 return rval; 935 } 936 937 /* 938 * qla_get_exchoffld_status 939 * Get exchange offload status 940 * uses the memory offload control/status Mailbox 941 * 942 * Input: 943 * ha: adapter state pointer. 944 * fwopt: firmware options 945 * 946 * Returns: 947 * qla2x00 local function status 948 * 949 * Context: 950 * Kernel context. 951 */ 952 #define FETCH_XCHOFFLD_STAT 0x2 953 int 954 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 955 uint16_t *ex_logins_cnt) 956 { 957 int rval; 958 mbx_cmd_t mc; 959 mbx_cmd_t *mcp = &mc; 960 961 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019, 962 "Entered %s\n", __func__); 963 964 memset(mcp->mb, 0 , sizeof(mcp->mb)); 965 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 966 mcp->mb[1] = FETCH_XCHOFFLD_STAT; 967 mcp->out_mb = MBX_1|MBX_0; 968 mcp->in_mb = MBX_10|MBX_4|MBX_0; 969 mcp->tov = MBX_TOV_SECONDS; 970 mcp->flags = 0; 971 972 rval = qla2x00_mailbox_command(vha, mcp); 973 if (rval != QLA_SUCCESS) { 974 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval); 975 } else { 976 *buf_sz = mcp->mb[4]; 977 *ex_logins_cnt = mcp->mb[10]; 978 979 ql_log(ql_log_info, vha, 0x118e, 980 "buffer size 0x%x, exchange offload count=%d\n", 981 mcp->mb[4], mcp->mb[10]); 982 983 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156, 984 "Done %s.\n", __func__); 985 } 986 987 return rval; 988 } 989 990 /* 991 * qla_set_exchoffld_mem_cfg 992 * Set exchange offload memory configuration 993 * Mbx needs to be issues before init_cb is set 994 * 995 * Input: 996 * ha: adapter state pointer. 997 * buffer: buffer pointer 998 * phys_addr: physical address of buffer 999 * size: size of buffer 1000 * TARGET_QUEUE_LOCK must be released 1001 * ADAPTER_STATE_LOCK must be release 1002 * 1003 * Returns: 1004 * qla2x00 local funxtion status code. 1005 * 1006 * Context: 1007 * Kernel context. 1008 */ 1009 #define CONFIG_XCHOFFLD_MEM 0x3 1010 int 1011 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha) 1012 { 1013 int rval; 1014 mbx_cmd_t mc; 1015 mbx_cmd_t *mcp = &mc; 1016 struct qla_hw_data *ha = vha->hw; 1017 1018 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157, 1019 "Entered %s.\n", __func__); 1020 1021 memset(mcp->mb, 0 , sizeof(mcp->mb)); 1022 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 1023 mcp->mb[1] = CONFIG_XCHOFFLD_MEM; 1024 mcp->mb[2] = MSW(ha->exchoffld_buf_dma); 1025 mcp->mb[3] = LSW(ha->exchoffld_buf_dma); 1026 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma)); 1027 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma)); 1028 mcp->mb[8] = MSW(ha->exchoffld_size); 1029 mcp->mb[9] = LSW(ha->exchoffld_size); 1030 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1031 mcp->in_mb = MBX_11|MBX_0; 1032 mcp->tov = MBX_TOV_SECONDS; 1033 mcp->flags = 0; 1034 rval = qla2x00_mailbox_command(vha, mcp); 1035 if (rval != QLA_SUCCESS) { 1036 /*EMPTY*/ 1037 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval); 1038 } else { 1039 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, 1040 "Done %s.\n", __func__); 1041 } 1042 1043 return rval; 1044 } 1045 1046 /* 1047 * qla2x00_get_fw_version 1048 * Get firmware version. 1049 * 1050 * Input: 1051 * ha: adapter state pointer. 1052 * major: pointer for major number. 1053 * minor: pointer for minor number. 1054 * subminor: pointer for subminor number. 1055 * 1056 * Returns: 1057 * qla2x00 local function return status code. 1058 * 1059 * Context: 1060 * Kernel context. 1061 */ 1062 int 1063 qla2x00_get_fw_version(scsi_qla_host_t *vha) 1064 { 1065 int rval; 1066 mbx_cmd_t mc; 1067 mbx_cmd_t *mcp = &mc; 1068 struct qla_hw_data *ha = vha->hw; 1069 1070 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, 1071 "Entered %s.\n", __func__); 1072 1073 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 1074 mcp->out_mb = MBX_0; 1075 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1076 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) 1077 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 1078 if (IS_FWI2_CAPABLE(ha)) 1079 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 1080 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1081 mcp->in_mb |= 1082 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| 1083 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7; 1084 1085 mcp->flags = 0; 1086 mcp->tov = MBX_TOV_SECONDS; 1087 rval = qla2x00_mailbox_command(vha, mcp); 1088 if (rval != QLA_SUCCESS) 1089 goto failed; 1090 1091 /* Return mailbox data. */ 1092 ha->fw_major_version = mcp->mb[1]; 1093 ha->fw_minor_version = mcp->mb[2]; 1094 ha->fw_subminor_version = mcp->mb[3]; 1095 ha->fw_attributes = mcp->mb[6]; 1096 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) 1097 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ 1098 else 1099 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; 1100 1101 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1102 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1103 ha->mpi_version[1] = mcp->mb[11] >> 8; 1104 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1105 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; 1106 ha->phy_version[0] = mcp->mb[8] & 0xff; 1107 ha->phy_version[1] = mcp->mb[9] >> 8; 1108 ha->phy_version[2] = mcp->mb[9] & 0xff; 1109 } 1110 1111 if (IS_FWI2_CAPABLE(ha)) { 1112 ha->fw_attributes_h = mcp->mb[15]; 1113 ha->fw_attributes_ext[0] = mcp->mb[16]; 1114 ha->fw_attributes_ext[1] = mcp->mb[17]; 1115 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, 1116 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 1117 __func__, mcp->mb[15], mcp->mb[6]); 1118 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, 1119 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 1120 __func__, mcp->mb[17], mcp->mb[16]); 1121 1122 if (ha->fw_attributes_h & 0x4) 1123 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d, 1124 "%s: Firmware supports Extended Login 0x%x\n", 1125 __func__, ha->fw_attributes_h); 1126 1127 if (ha->fw_attributes_h & 0x8) 1128 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, 1129 "%s: Firmware supports Exchange Offload 0x%x\n", 1130 __func__, ha->fw_attributes_h); 1131 1132 /* 1133 * FW supports nvme and driver load parameter requested nvme. 1134 * BIT 26 of fw_attributes indicates NVMe support. 1135 */ 1136 if ((ha->fw_attributes_h & 1137 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) && 1138 ql2xnvmeenable) { 1139 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST) 1140 vha->flags.nvme_first_burst = 1; 1141 1142 vha->flags.nvme_enabled = 1; 1143 ql_log(ql_log_info, vha, 0xd302, 1144 "%s: FC-NVMe is Enabled (0x%x)\n", 1145 __func__, ha->fw_attributes_h); 1146 } 1147 1148 /* BIT_13 of Extended FW Attributes informs about NVMe2 support */ 1149 if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) { 1150 ql_log(ql_log_info, vha, 0xd302, 1151 "Firmware supports NVMe2 0x%x\n", 1152 ha->fw_attributes_ext[0]); 1153 vha->flags.nvme2_enabled = 1; 1154 } 1155 1156 if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable && 1157 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) { 1158 ha->flags.edif_enabled = 1; 1159 ql_log(ql_log_info, vha, 0xffff, 1160 "%s: edif is enabled\n", __func__); 1161 } 1162 } 1163 1164 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1165 ha->serdes_version[0] = mcp->mb[7] & 0xff; 1166 ha->serdes_version[1] = mcp->mb[8] >> 8; 1167 ha->serdes_version[2] = mcp->mb[8] & 0xff; 1168 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1169 ha->mpi_version[1] = mcp->mb[11] >> 8; 1170 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1171 ha->pep_version[0] = mcp->mb[13] & 0xff; 1172 ha->pep_version[1] = mcp->mb[14] >> 8; 1173 ha->pep_version[2] = mcp->mb[14] & 0xff; 1174 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; 1175 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; 1176 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; 1177 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; 1178 if (IS_QLA28XX(ha)) { 1179 if (mcp->mb[16] & BIT_10) 1180 ha->flags.secure_fw = 1; 1181 1182 ql_log(ql_log_info, vha, 0xffff, 1183 "Secure Flash Update in FW: %s\n", 1184 (ha->flags.secure_fw) ? "Supported" : 1185 "Not Supported"); 1186 } 1187 1188 if (ha->flags.scm_supported_a && 1189 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) { 1190 ha->flags.scm_supported_f = 1; 1191 ha->sf_init_cb->flags |= cpu_to_le16(BIT_13); 1192 } 1193 ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n", 1194 (ha->flags.scm_supported_f) ? "Supported" : 1195 "Not Supported"); 1196 1197 if (vha->flags.nvme2_enabled) { 1198 /* set BIT_15 of special feature control block for SLER */ 1199 ha->sf_init_cb->flags |= cpu_to_le16(BIT_15); 1200 /* set BIT_14 of special feature control block for PI CTRL*/ 1201 ha->sf_init_cb->flags |= cpu_to_le16(BIT_14); 1202 } 1203 } 1204 1205 failed: 1206 if (rval != QLA_SUCCESS) { 1207 /*EMPTY*/ 1208 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); 1209 } else { 1210 /*EMPTY*/ 1211 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, 1212 "Done %s.\n", __func__); 1213 } 1214 return rval; 1215 } 1216 1217 /* 1218 * qla2x00_get_fw_options 1219 * Set firmware options. 1220 * 1221 * Input: 1222 * ha = adapter block pointer. 1223 * fwopt = pointer for firmware options. 1224 * 1225 * Returns: 1226 * qla2x00 local function return status code. 1227 * 1228 * Context: 1229 * Kernel context. 1230 */ 1231 int 1232 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1233 { 1234 int rval; 1235 mbx_cmd_t mc; 1236 mbx_cmd_t *mcp = &mc; 1237 1238 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, 1239 "Entered %s.\n", __func__); 1240 1241 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 1242 mcp->out_mb = MBX_0; 1243 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1244 mcp->tov = MBX_TOV_SECONDS; 1245 mcp->flags = 0; 1246 rval = qla2x00_mailbox_command(vha, mcp); 1247 1248 if (rval != QLA_SUCCESS) { 1249 /*EMPTY*/ 1250 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); 1251 } else { 1252 fwopts[0] = mcp->mb[0]; 1253 fwopts[1] = mcp->mb[1]; 1254 fwopts[2] = mcp->mb[2]; 1255 fwopts[3] = mcp->mb[3]; 1256 1257 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, 1258 "Done %s.\n", __func__); 1259 } 1260 1261 return rval; 1262 } 1263 1264 1265 /* 1266 * qla2x00_set_fw_options 1267 * Set firmware options. 1268 * 1269 * Input: 1270 * ha = adapter block pointer. 1271 * fwopt = pointer for firmware options. 1272 * 1273 * Returns: 1274 * qla2x00 local function return status code. 1275 * 1276 * Context: 1277 * Kernel context. 1278 */ 1279 int 1280 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1281 { 1282 int rval; 1283 mbx_cmd_t mc; 1284 mbx_cmd_t *mcp = &mc; 1285 1286 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, 1287 "Entered %s.\n", __func__); 1288 1289 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 1290 mcp->mb[1] = fwopts[1]; 1291 mcp->mb[2] = fwopts[2]; 1292 mcp->mb[3] = fwopts[3]; 1293 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1294 mcp->in_mb = MBX_0; 1295 if (IS_FWI2_CAPABLE(vha->hw)) { 1296 mcp->in_mb |= MBX_1; 1297 mcp->mb[10] = fwopts[10]; 1298 mcp->out_mb |= MBX_10; 1299 } else { 1300 mcp->mb[10] = fwopts[10]; 1301 mcp->mb[11] = fwopts[11]; 1302 mcp->mb[12] = 0; /* Undocumented, but used */ 1303 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 1304 } 1305 mcp->tov = MBX_TOV_SECONDS; 1306 mcp->flags = 0; 1307 rval = qla2x00_mailbox_command(vha, mcp); 1308 1309 fwopts[0] = mcp->mb[0]; 1310 1311 if (rval != QLA_SUCCESS) { 1312 /*EMPTY*/ 1313 ql_dbg(ql_dbg_mbx, vha, 0x1030, 1314 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); 1315 } else { 1316 /*EMPTY*/ 1317 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, 1318 "Done %s.\n", __func__); 1319 } 1320 1321 return rval; 1322 } 1323 1324 /* 1325 * qla2x00_mbx_reg_test 1326 * Mailbox register wrap test. 1327 * 1328 * Input: 1329 * ha = adapter block pointer. 1330 * TARGET_QUEUE_LOCK must be released. 1331 * ADAPTER_STATE_LOCK must be released. 1332 * 1333 * Returns: 1334 * qla2x00 local function return status code. 1335 * 1336 * Context: 1337 * Kernel context. 1338 */ 1339 int 1340 qla2x00_mbx_reg_test(scsi_qla_host_t *vha) 1341 { 1342 int rval; 1343 mbx_cmd_t mc; 1344 mbx_cmd_t *mcp = &mc; 1345 1346 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, 1347 "Entered %s.\n", __func__); 1348 1349 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 1350 mcp->mb[1] = 0xAAAA; 1351 mcp->mb[2] = 0x5555; 1352 mcp->mb[3] = 0xAA55; 1353 mcp->mb[4] = 0x55AA; 1354 mcp->mb[5] = 0xA5A5; 1355 mcp->mb[6] = 0x5A5A; 1356 mcp->mb[7] = 0x2525; 1357 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1358 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1359 mcp->tov = MBX_TOV_SECONDS; 1360 mcp->flags = 0; 1361 rval = qla2x00_mailbox_command(vha, mcp); 1362 1363 if (rval == QLA_SUCCESS) { 1364 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || 1365 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) 1366 rval = QLA_FUNCTION_FAILED; 1367 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || 1368 mcp->mb[7] != 0x2525) 1369 rval = QLA_FUNCTION_FAILED; 1370 } 1371 1372 if (rval != QLA_SUCCESS) { 1373 /*EMPTY*/ 1374 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); 1375 vha->hw_err_cnt++; 1376 } else { 1377 /*EMPTY*/ 1378 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, 1379 "Done %s.\n", __func__); 1380 } 1381 1382 return rval; 1383 } 1384 1385 /* 1386 * qla2x00_verify_checksum 1387 * Verify firmware checksum. 1388 * 1389 * Input: 1390 * ha = adapter block pointer. 1391 * TARGET_QUEUE_LOCK must be released. 1392 * ADAPTER_STATE_LOCK must be released. 1393 * 1394 * Returns: 1395 * qla2x00 local function return status code. 1396 * 1397 * Context: 1398 * Kernel context. 1399 */ 1400 int 1401 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) 1402 { 1403 int rval; 1404 mbx_cmd_t mc; 1405 mbx_cmd_t *mcp = &mc; 1406 1407 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, 1408 "Entered %s.\n", __func__); 1409 1410 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 1411 mcp->out_mb = MBX_0; 1412 mcp->in_mb = MBX_0; 1413 if (IS_FWI2_CAPABLE(vha->hw)) { 1414 mcp->mb[1] = MSW(risc_addr); 1415 mcp->mb[2] = LSW(risc_addr); 1416 mcp->out_mb |= MBX_2|MBX_1; 1417 mcp->in_mb |= MBX_2|MBX_1; 1418 } else { 1419 mcp->mb[1] = LSW(risc_addr); 1420 mcp->out_mb |= MBX_1; 1421 mcp->in_mb |= MBX_1; 1422 } 1423 1424 mcp->tov = MBX_TOV_SECONDS; 1425 mcp->flags = 0; 1426 rval = qla2x00_mailbox_command(vha, mcp); 1427 1428 if (rval != QLA_SUCCESS) { 1429 ql_dbg(ql_dbg_mbx, vha, 0x1036, 1430 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? 1431 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); 1432 } else { 1433 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, 1434 "Done %s.\n", __func__); 1435 } 1436 1437 return rval; 1438 } 1439 1440 /* 1441 * qla2x00_issue_iocb 1442 * Issue IOCB using mailbox command 1443 * 1444 * Input: 1445 * ha = adapter state pointer. 1446 * buffer = buffer pointer. 1447 * phys_addr = physical address of buffer. 1448 * size = size of buffer. 1449 * TARGET_QUEUE_LOCK must be released. 1450 * ADAPTER_STATE_LOCK must be released. 1451 * 1452 * Returns: 1453 * qla2x00 local function return status code. 1454 * 1455 * Context: 1456 * Kernel context. 1457 */ 1458 int 1459 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, 1460 dma_addr_t phys_addr, size_t size, uint32_t tov) 1461 { 1462 int rval; 1463 mbx_cmd_t mc; 1464 mbx_cmd_t *mcp = &mc; 1465 1466 if (!vha->hw->flags.fw_started) 1467 return QLA_INVALID_COMMAND; 1468 1469 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, 1470 "Entered %s.\n", __func__); 1471 1472 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 1473 mcp->mb[1] = 0; 1474 mcp->mb[2] = MSW(LSD(phys_addr)); 1475 mcp->mb[3] = LSW(LSD(phys_addr)); 1476 mcp->mb[6] = MSW(MSD(phys_addr)); 1477 mcp->mb[7] = LSW(MSD(phys_addr)); 1478 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1479 mcp->in_mb = MBX_1|MBX_0; 1480 mcp->tov = tov; 1481 mcp->flags = 0; 1482 rval = qla2x00_mailbox_command(vha, mcp); 1483 1484 if (rval != QLA_SUCCESS) { 1485 /*EMPTY*/ 1486 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); 1487 } else { 1488 sts_entry_t *sts_entry = buffer; 1489 1490 /* Mask reserved bits. */ 1491 sts_entry->entry_status &= 1492 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 1493 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, 1494 "Done %s (status=%x).\n", __func__, 1495 sts_entry->entry_status); 1496 } 1497 1498 return rval; 1499 } 1500 1501 int 1502 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, 1503 size_t size) 1504 { 1505 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, 1506 MBX_TOV_SECONDS); 1507 } 1508 1509 /* 1510 * qla2x00_abort_command 1511 * Abort command aborts a specified IOCB. 1512 * 1513 * Input: 1514 * ha = adapter block pointer. 1515 * sp = SB structure pointer. 1516 * 1517 * Returns: 1518 * qla2x00 local function return status code. 1519 * 1520 * Context: 1521 * Kernel context. 1522 */ 1523 int 1524 qla2x00_abort_command(srb_t *sp) 1525 { 1526 unsigned long flags = 0; 1527 int rval; 1528 uint32_t handle = 0; 1529 mbx_cmd_t mc; 1530 mbx_cmd_t *mcp = &mc; 1531 fc_port_t *fcport = sp->fcport; 1532 scsi_qla_host_t *vha = fcport->vha; 1533 struct qla_hw_data *ha = vha->hw; 1534 struct req_que *req; 1535 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1536 1537 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, 1538 "Entered %s.\n", __func__); 1539 1540 if (sp->qpair) 1541 req = sp->qpair->req; 1542 else 1543 req = vha->req; 1544 1545 spin_lock_irqsave(&ha->hardware_lock, flags); 1546 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1547 if (req->outstanding_cmds[handle] == sp) 1548 break; 1549 } 1550 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1551 1552 if (handle == req->num_outstanding_cmds) { 1553 /* command not found */ 1554 return QLA_FUNCTION_FAILED; 1555 } 1556 1557 mcp->mb[0] = MBC_ABORT_COMMAND; 1558 if (HAS_EXTENDED_IDS(ha)) 1559 mcp->mb[1] = fcport->loop_id; 1560 else 1561 mcp->mb[1] = fcport->loop_id << 8; 1562 mcp->mb[2] = (uint16_t)handle; 1563 mcp->mb[3] = (uint16_t)(handle >> 16); 1564 mcp->mb[6] = (uint16_t)cmd->device->lun; 1565 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1566 mcp->in_mb = MBX_0; 1567 mcp->tov = MBX_TOV_SECONDS; 1568 mcp->flags = 0; 1569 rval = qla2x00_mailbox_command(vha, mcp); 1570 1571 if (rval != QLA_SUCCESS) { 1572 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); 1573 } else { 1574 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, 1575 "Done %s.\n", __func__); 1576 } 1577 1578 return rval; 1579 } 1580 1581 int 1582 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) 1583 { 1584 int rval, rval2; 1585 mbx_cmd_t mc; 1586 mbx_cmd_t *mcp = &mc; 1587 scsi_qla_host_t *vha; 1588 1589 vha = fcport->vha; 1590 1591 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, 1592 "Entered %s.\n", __func__); 1593 1594 mcp->mb[0] = MBC_ABORT_TARGET; 1595 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 1596 if (HAS_EXTENDED_IDS(vha->hw)) { 1597 mcp->mb[1] = fcport->loop_id; 1598 mcp->mb[10] = 0; 1599 mcp->out_mb |= MBX_10; 1600 } else { 1601 mcp->mb[1] = fcport->loop_id << 8; 1602 } 1603 mcp->mb[2] = vha->hw->loop_reset_delay; 1604 mcp->mb[9] = vha->vp_idx; 1605 1606 mcp->in_mb = MBX_0; 1607 mcp->tov = MBX_TOV_SECONDS; 1608 mcp->flags = 0; 1609 rval = qla2x00_mailbox_command(vha, mcp); 1610 if (rval != QLA_SUCCESS) { 1611 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, 1612 "Failed=%x.\n", rval); 1613 } 1614 1615 /* Issue marker IOCB. */ 1616 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0, 1617 MK_SYNC_ID); 1618 if (rval2 != QLA_SUCCESS) { 1619 ql_dbg(ql_dbg_mbx, vha, 0x1040, 1620 "Failed to issue marker IOCB (%x).\n", rval2); 1621 } else { 1622 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, 1623 "Done %s.\n", __func__); 1624 } 1625 1626 return rval; 1627 } 1628 1629 int 1630 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 1631 { 1632 int rval, rval2; 1633 mbx_cmd_t mc; 1634 mbx_cmd_t *mcp = &mc; 1635 scsi_qla_host_t *vha; 1636 1637 vha = fcport->vha; 1638 1639 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, 1640 "Entered %s.\n", __func__); 1641 1642 mcp->mb[0] = MBC_LUN_RESET; 1643 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 1644 if (HAS_EXTENDED_IDS(vha->hw)) 1645 mcp->mb[1] = fcport->loop_id; 1646 else 1647 mcp->mb[1] = fcport->loop_id << 8; 1648 mcp->mb[2] = (u32)l; 1649 mcp->mb[3] = 0; 1650 mcp->mb[9] = vha->vp_idx; 1651 1652 mcp->in_mb = MBX_0; 1653 mcp->tov = MBX_TOV_SECONDS; 1654 mcp->flags = 0; 1655 rval = qla2x00_mailbox_command(vha, mcp); 1656 if (rval != QLA_SUCCESS) { 1657 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); 1658 } 1659 1660 /* Issue marker IOCB. */ 1661 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l, 1662 MK_SYNC_ID_LUN); 1663 if (rval2 != QLA_SUCCESS) { 1664 ql_dbg(ql_dbg_mbx, vha, 0x1044, 1665 "Failed to issue marker IOCB (%x).\n", rval2); 1666 } else { 1667 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, 1668 "Done %s.\n", __func__); 1669 } 1670 1671 return rval; 1672 } 1673 1674 /* 1675 * qla2x00_get_adapter_id 1676 * Get adapter ID and topology. 1677 * 1678 * Input: 1679 * ha = adapter block pointer. 1680 * id = pointer for loop ID. 1681 * al_pa = pointer for AL_PA. 1682 * area = pointer for area. 1683 * domain = pointer for domain. 1684 * top = pointer for topology. 1685 * TARGET_QUEUE_LOCK must be released. 1686 * ADAPTER_STATE_LOCK must be released. 1687 * 1688 * Returns: 1689 * qla2x00 local function return status code. 1690 * 1691 * Context: 1692 * Kernel context. 1693 */ 1694 int 1695 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, 1696 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) 1697 { 1698 int rval; 1699 mbx_cmd_t mc; 1700 mbx_cmd_t *mcp = &mc; 1701 1702 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, 1703 "Entered %s.\n", __func__); 1704 1705 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1706 mcp->mb[9] = vha->vp_idx; 1707 mcp->out_mb = MBX_9|MBX_0; 1708 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1709 if (IS_CNA_CAPABLE(vha->hw)) 1710 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; 1711 if (IS_FWI2_CAPABLE(vha->hw)) 1712 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; 1713 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) 1714 mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23; 1715 1716 mcp->tov = MBX_TOV_SECONDS; 1717 mcp->flags = 0; 1718 rval = qla2x00_mailbox_command(vha, mcp); 1719 if (mcp->mb[0] == MBS_COMMAND_ERROR) 1720 rval = QLA_COMMAND_ERROR; 1721 else if (mcp->mb[0] == MBS_INVALID_COMMAND) 1722 rval = QLA_INVALID_COMMAND; 1723 1724 /* Return data. */ 1725 *id = mcp->mb[1]; 1726 *al_pa = LSB(mcp->mb[2]); 1727 *area = MSB(mcp->mb[2]); 1728 *domain = LSB(mcp->mb[3]); 1729 *top = mcp->mb[6]; 1730 *sw_cap = mcp->mb[7]; 1731 1732 if (rval != QLA_SUCCESS) { 1733 /*EMPTY*/ 1734 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); 1735 } else { 1736 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, 1737 "Done %s.\n", __func__); 1738 1739 if (IS_CNA_CAPABLE(vha->hw)) { 1740 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1741 vha->fcoe_fcf_idx = mcp->mb[10]; 1742 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; 1743 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; 1744 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; 1745 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; 1746 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; 1747 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; 1748 } 1749 /* If FA-WWN supported */ 1750 if (IS_FAWWN_CAPABLE(vha->hw)) { 1751 if (mcp->mb[7] & BIT_14) { 1752 vha->port_name[0] = MSB(mcp->mb[16]); 1753 vha->port_name[1] = LSB(mcp->mb[16]); 1754 vha->port_name[2] = MSB(mcp->mb[17]); 1755 vha->port_name[3] = LSB(mcp->mb[17]); 1756 vha->port_name[4] = MSB(mcp->mb[18]); 1757 vha->port_name[5] = LSB(mcp->mb[18]); 1758 vha->port_name[6] = MSB(mcp->mb[19]); 1759 vha->port_name[7] = LSB(mcp->mb[19]); 1760 fc_host_port_name(vha->host) = 1761 wwn_to_u64(vha->port_name); 1762 ql_dbg(ql_dbg_mbx, vha, 0x10ca, 1763 "FA-WWN acquired %016llx\n", 1764 wwn_to_u64(vha->port_name)); 1765 } 1766 } 1767 1768 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) { 1769 vha->bbcr = mcp->mb[15]; 1770 if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) { 1771 ql_log(ql_log_info, vha, 0x11a4, 1772 "SCM: EDC ELS completed, flags 0x%x\n", 1773 mcp->mb[21]); 1774 } 1775 if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) { 1776 vha->hw->flags.scm_enabled = 1; 1777 vha->scm_fabric_connection_flags |= 1778 SCM_FLAG_RDF_COMPLETED; 1779 ql_log(ql_log_info, vha, 0x11a5, 1780 "SCM: RDF ELS completed, flags 0x%x\n", 1781 mcp->mb[23]); 1782 } 1783 } 1784 } 1785 1786 return rval; 1787 } 1788 1789 /* 1790 * qla2x00_get_retry_cnt 1791 * Get current firmware login retry count and delay. 1792 * 1793 * Input: 1794 * ha = adapter block pointer. 1795 * retry_cnt = pointer to login retry count. 1796 * tov = pointer to login timeout value. 1797 * 1798 * Returns: 1799 * qla2x00 local function return status code. 1800 * 1801 * Context: 1802 * Kernel context. 1803 */ 1804 int 1805 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, 1806 uint16_t *r_a_tov) 1807 { 1808 int rval; 1809 uint16_t ratov; 1810 mbx_cmd_t mc; 1811 mbx_cmd_t *mcp = &mc; 1812 1813 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, 1814 "Entered %s.\n", __func__); 1815 1816 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1817 mcp->out_mb = MBX_0; 1818 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1819 mcp->tov = MBX_TOV_SECONDS; 1820 mcp->flags = 0; 1821 rval = qla2x00_mailbox_command(vha, mcp); 1822 1823 if (rval != QLA_SUCCESS) { 1824 /*EMPTY*/ 1825 ql_dbg(ql_dbg_mbx, vha, 0x104a, 1826 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 1827 } else { 1828 /* Convert returned data and check our values. */ 1829 *r_a_tov = mcp->mb[3] / 2; 1830 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ 1831 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { 1832 /* Update to the larger values */ 1833 *retry_cnt = (uint8_t)mcp->mb[1]; 1834 *tov = ratov; 1835 } 1836 1837 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, 1838 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); 1839 } 1840 1841 return rval; 1842 } 1843 1844 /* 1845 * qla2x00_init_firmware 1846 * Initialize adapter firmware. 1847 * 1848 * Input: 1849 * ha = adapter block pointer. 1850 * dptr = Initialization control block pointer. 1851 * size = size of initialization control block. 1852 * TARGET_QUEUE_LOCK must be released. 1853 * ADAPTER_STATE_LOCK must be released. 1854 * 1855 * Returns: 1856 * qla2x00 local function return status code. 1857 * 1858 * Context: 1859 * Kernel context. 1860 */ 1861 int 1862 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 1863 { 1864 int rval; 1865 mbx_cmd_t mc; 1866 mbx_cmd_t *mcp = &mc; 1867 struct qla_hw_data *ha = vha->hw; 1868 1869 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, 1870 "Entered %s.\n", __func__); 1871 1872 if (IS_P3P_TYPE(ha) && ql2xdbwr) 1873 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, 1874 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); 1875 1876 if (ha->flags.npiv_supported) 1877 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1878 else 1879 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 1880 1881 mcp->mb[1] = 0; 1882 mcp->mb[2] = MSW(ha->init_cb_dma); 1883 mcp->mb[3] = LSW(ha->init_cb_dma); 1884 mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); 1885 mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); 1886 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1887 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1888 mcp->mb[1] = BIT_0; 1889 mcp->mb[10] = MSW(ha->ex_init_cb_dma); 1890 mcp->mb[11] = LSW(ha->ex_init_cb_dma); 1891 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); 1892 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); 1893 mcp->mb[14] = sizeof(*ha->ex_init_cb); 1894 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; 1895 } 1896 1897 if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) { 1898 mcp->mb[1] |= BIT_1; 1899 mcp->mb[16] = MSW(ha->sf_init_cb_dma); 1900 mcp->mb[17] = LSW(ha->sf_init_cb_dma); 1901 mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma)); 1902 mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma)); 1903 mcp->mb[15] = sizeof(*ha->sf_init_cb); 1904 mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15; 1905 } 1906 1907 /* 1 and 2 should normally be captured. */ 1908 mcp->in_mb = MBX_2|MBX_1|MBX_0; 1909 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1910 /* mb3 is additional info about the installed SFP. */ 1911 mcp->in_mb |= MBX_3; 1912 mcp->buf_size = size; 1913 mcp->flags = MBX_DMA_OUT; 1914 mcp->tov = MBX_TOV_SECONDS; 1915 rval = qla2x00_mailbox_command(vha, mcp); 1916 1917 if (rval != QLA_SUCCESS) { 1918 /*EMPTY*/ 1919 ql_dbg(ql_dbg_mbx, vha, 0x104d, 1920 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n", 1921 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); 1922 if (ha->init_cb) { 1923 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n"); 1924 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1925 0x0104d, ha->init_cb, sizeof(*ha->init_cb)); 1926 } 1927 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1928 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n"); 1929 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1930 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb)); 1931 } 1932 } else { 1933 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1934 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 1935 ql_dbg(ql_dbg_mbx, vha, 0x119d, 1936 "Invalid SFP/Validation Failed\n"); 1937 } 1938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, 1939 "Done %s.\n", __func__); 1940 } 1941 1942 return rval; 1943 } 1944 1945 1946 /* 1947 * qla2x00_get_port_database 1948 * Issue normal/enhanced get port database mailbox command 1949 * and copy device name as necessary. 1950 * 1951 * Input: 1952 * ha = adapter state pointer. 1953 * dev = structure pointer. 1954 * opt = enhanced cmd option byte. 1955 * 1956 * Returns: 1957 * qla2x00 local function return status code. 1958 * 1959 * Context: 1960 * Kernel context. 1961 */ 1962 int 1963 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) 1964 { 1965 int rval; 1966 mbx_cmd_t mc; 1967 mbx_cmd_t *mcp = &mc; 1968 port_database_t *pd; 1969 struct port_database_24xx *pd24; 1970 dma_addr_t pd_dma; 1971 struct qla_hw_data *ha = vha->hw; 1972 1973 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, 1974 "Entered %s.\n", __func__); 1975 1976 pd24 = NULL; 1977 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1978 if (pd == NULL) { 1979 ql_log(ql_log_warn, vha, 0x1050, 1980 "Failed to allocate port database structure.\n"); 1981 fcport->query = 0; 1982 return QLA_MEMORY_ALLOC_FAILED; 1983 } 1984 1985 mcp->mb[0] = MBC_GET_PORT_DATABASE; 1986 if (opt != 0 && !IS_FWI2_CAPABLE(ha)) 1987 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; 1988 mcp->mb[2] = MSW(pd_dma); 1989 mcp->mb[3] = LSW(pd_dma); 1990 mcp->mb[6] = MSW(MSD(pd_dma)); 1991 mcp->mb[7] = LSW(MSD(pd_dma)); 1992 mcp->mb[9] = vha->vp_idx; 1993 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1994 mcp->in_mb = MBX_0; 1995 if (IS_FWI2_CAPABLE(ha)) { 1996 mcp->mb[1] = fcport->loop_id; 1997 mcp->mb[10] = opt; 1998 mcp->out_mb |= MBX_10|MBX_1; 1999 mcp->in_mb |= MBX_1; 2000 } else if (HAS_EXTENDED_IDS(ha)) { 2001 mcp->mb[1] = fcport->loop_id; 2002 mcp->mb[10] = opt; 2003 mcp->out_mb |= MBX_10|MBX_1; 2004 } else { 2005 mcp->mb[1] = fcport->loop_id << 8 | opt; 2006 mcp->out_mb |= MBX_1; 2007 } 2008 mcp->buf_size = IS_FWI2_CAPABLE(ha) ? 2009 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; 2010 mcp->flags = MBX_DMA_IN; 2011 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2012 rval = qla2x00_mailbox_command(vha, mcp); 2013 if (rval != QLA_SUCCESS) 2014 goto gpd_error_out; 2015 2016 if (IS_FWI2_CAPABLE(ha)) { 2017 uint64_t zero = 0; 2018 u8 current_login_state, last_login_state; 2019 2020 pd24 = (struct port_database_24xx *) pd; 2021 2022 /* Check for logged in state. */ 2023 if (NVME_TARGET(ha, fcport)) { 2024 current_login_state = pd24->current_login_state >> 4; 2025 last_login_state = pd24->last_login_state >> 4; 2026 } else { 2027 current_login_state = pd24->current_login_state & 0xf; 2028 last_login_state = pd24->last_login_state & 0xf; 2029 } 2030 fcport->current_login_state = pd24->current_login_state; 2031 fcport->last_login_state = pd24->last_login_state; 2032 2033 /* Check for logged in state. */ 2034 if (current_login_state != PDS_PRLI_COMPLETE && 2035 last_login_state != PDS_PRLI_COMPLETE) { 2036 ql_dbg(ql_dbg_mbx, vha, 0x119a, 2037 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 2038 current_login_state, last_login_state, 2039 fcport->loop_id); 2040 rval = QLA_FUNCTION_FAILED; 2041 2042 if (!fcport->query) 2043 goto gpd_error_out; 2044 } 2045 2046 if (fcport->loop_id == FC_NO_LOOP_ID || 2047 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 2048 memcmp(fcport->port_name, pd24->port_name, 8))) { 2049 /* We lost the device mid way. */ 2050 rval = QLA_NOT_LOGGED_IN; 2051 goto gpd_error_out; 2052 } 2053 2054 /* Names are little-endian. */ 2055 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); 2056 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); 2057 2058 /* Get port_id of device. */ 2059 fcport->d_id.b.domain = pd24->port_id[0]; 2060 fcport->d_id.b.area = pd24->port_id[1]; 2061 fcport->d_id.b.al_pa = pd24->port_id[2]; 2062 fcport->d_id.b.rsvd_1 = 0; 2063 2064 /* If not target must be initiator or unknown type. */ 2065 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) 2066 fcport->port_type = FCT_INITIATOR; 2067 else 2068 fcport->port_type = FCT_TARGET; 2069 2070 /* Passback COS information. */ 2071 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? 2072 FC_COS_CLASS2 : FC_COS_CLASS3; 2073 2074 if (pd24->prli_svc_param_word_3[0] & BIT_7) 2075 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 2076 } else { 2077 uint64_t zero = 0; 2078 2079 /* Check for logged in state. */ 2080 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 2081 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 2082 ql_dbg(ql_dbg_mbx, vha, 0x100a, 2083 "Unable to verify login-state (%x/%x) - " 2084 "portid=%02x%02x%02x.\n", pd->master_state, 2085 pd->slave_state, fcport->d_id.b.domain, 2086 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2087 rval = QLA_FUNCTION_FAILED; 2088 goto gpd_error_out; 2089 } 2090 2091 if (fcport->loop_id == FC_NO_LOOP_ID || 2092 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 2093 memcmp(fcport->port_name, pd->port_name, 8))) { 2094 /* We lost the device mid way. */ 2095 rval = QLA_NOT_LOGGED_IN; 2096 goto gpd_error_out; 2097 } 2098 2099 /* Names are little-endian. */ 2100 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 2101 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 2102 2103 /* Get port_id of device. */ 2104 fcport->d_id.b.domain = pd->port_id[0]; 2105 fcport->d_id.b.area = pd->port_id[3]; 2106 fcport->d_id.b.al_pa = pd->port_id[2]; 2107 fcport->d_id.b.rsvd_1 = 0; 2108 2109 /* If not target must be initiator or unknown type. */ 2110 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 2111 fcport->port_type = FCT_INITIATOR; 2112 else 2113 fcport->port_type = FCT_TARGET; 2114 2115 /* Passback COS information. */ 2116 fcport->supported_classes = (pd->options & BIT_4) ? 2117 FC_COS_CLASS2 : FC_COS_CLASS3; 2118 } 2119 2120 gpd_error_out: 2121 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 2122 fcport->query = 0; 2123 2124 if (rval != QLA_SUCCESS) { 2125 ql_dbg(ql_dbg_mbx, vha, 0x1052, 2126 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, 2127 mcp->mb[0], mcp->mb[1]); 2128 } else { 2129 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, 2130 "Done %s.\n", __func__); 2131 } 2132 2133 return rval; 2134 } 2135 2136 int 2137 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle, 2138 struct port_database_24xx *pdb) 2139 { 2140 mbx_cmd_t mc; 2141 mbx_cmd_t *mcp = &mc; 2142 dma_addr_t pdb_dma; 2143 int rval; 2144 2145 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115, 2146 "Entered %s.\n", __func__); 2147 2148 memset(pdb, 0, sizeof(*pdb)); 2149 2150 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb, 2151 sizeof(*pdb), DMA_FROM_DEVICE); 2152 if (!pdb_dma) { 2153 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n"); 2154 return QLA_MEMORY_ALLOC_FAILED; 2155 } 2156 2157 mcp->mb[0] = MBC_GET_PORT_DATABASE; 2158 mcp->mb[1] = nport_handle; 2159 mcp->mb[2] = MSW(LSD(pdb_dma)); 2160 mcp->mb[3] = LSW(LSD(pdb_dma)); 2161 mcp->mb[6] = MSW(MSD(pdb_dma)); 2162 mcp->mb[7] = LSW(MSD(pdb_dma)); 2163 mcp->mb[9] = 0; 2164 mcp->mb[10] = 0; 2165 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2166 mcp->in_mb = MBX_1|MBX_0; 2167 mcp->buf_size = sizeof(*pdb); 2168 mcp->flags = MBX_DMA_IN; 2169 mcp->tov = vha->hw->login_timeout * 2; 2170 rval = qla2x00_mailbox_command(vha, mcp); 2171 2172 if (rval != QLA_SUCCESS) { 2173 ql_dbg(ql_dbg_mbx, vha, 0x111a, 2174 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2175 rval, mcp->mb[0], mcp->mb[1]); 2176 } else { 2177 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b, 2178 "Done %s.\n", __func__); 2179 } 2180 2181 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma, 2182 sizeof(*pdb), DMA_FROM_DEVICE); 2183 2184 return rval; 2185 } 2186 2187 /* 2188 * qla2x00_get_firmware_state 2189 * Get adapter firmware state. 2190 * 2191 * Input: 2192 * ha = adapter block pointer. 2193 * dptr = pointer for firmware state. 2194 * TARGET_QUEUE_LOCK must be released. 2195 * ADAPTER_STATE_LOCK must be released. 2196 * 2197 * Returns: 2198 * qla2x00 local function return status code. 2199 * 2200 * Context: 2201 * Kernel context. 2202 */ 2203 int 2204 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) 2205 { 2206 int rval; 2207 mbx_cmd_t mc; 2208 mbx_cmd_t *mcp = &mc; 2209 struct qla_hw_data *ha = vha->hw; 2210 2211 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, 2212 "Entered %s.\n", __func__); 2213 2214 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 2215 mcp->out_mb = MBX_0; 2216 if (IS_FWI2_CAPABLE(vha->hw)) 2217 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2218 else 2219 mcp->in_mb = MBX_1|MBX_0; 2220 mcp->tov = MBX_TOV_SECONDS; 2221 mcp->flags = 0; 2222 rval = qla2x00_mailbox_command(vha, mcp); 2223 2224 /* Return firmware states. */ 2225 states[0] = mcp->mb[1]; 2226 if (IS_FWI2_CAPABLE(vha->hw)) { 2227 states[1] = mcp->mb[2]; 2228 states[2] = mcp->mb[3]; /* SFP info */ 2229 states[3] = mcp->mb[4]; 2230 states[4] = mcp->mb[5]; 2231 states[5] = mcp->mb[6]; /* DPORT status */ 2232 } 2233 2234 if (rval != QLA_SUCCESS) { 2235 /*EMPTY*/ 2236 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); 2237 } else { 2238 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 2239 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 2240 ql_dbg(ql_dbg_mbx, vha, 0x119e, 2241 "Invalid SFP/Validation Failed\n"); 2242 } 2243 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, 2244 "Done %s.\n", __func__); 2245 } 2246 2247 return rval; 2248 } 2249 2250 /* 2251 * qla2x00_get_port_name 2252 * Issue get port name mailbox command. 2253 * Returned name is in big endian format. 2254 * 2255 * Input: 2256 * ha = adapter block pointer. 2257 * loop_id = loop ID of device. 2258 * name = pointer for name. 2259 * TARGET_QUEUE_LOCK must be released. 2260 * ADAPTER_STATE_LOCK must be released. 2261 * 2262 * Returns: 2263 * qla2x00 local function return status code. 2264 * 2265 * Context: 2266 * Kernel context. 2267 */ 2268 int 2269 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, 2270 uint8_t opt) 2271 { 2272 int rval; 2273 mbx_cmd_t mc; 2274 mbx_cmd_t *mcp = &mc; 2275 2276 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, 2277 "Entered %s.\n", __func__); 2278 2279 mcp->mb[0] = MBC_GET_PORT_NAME; 2280 mcp->mb[9] = vha->vp_idx; 2281 mcp->out_mb = MBX_9|MBX_1|MBX_0; 2282 if (HAS_EXTENDED_IDS(vha->hw)) { 2283 mcp->mb[1] = loop_id; 2284 mcp->mb[10] = opt; 2285 mcp->out_mb |= MBX_10; 2286 } else { 2287 mcp->mb[1] = loop_id << 8 | opt; 2288 } 2289 2290 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2291 mcp->tov = MBX_TOV_SECONDS; 2292 mcp->flags = 0; 2293 rval = qla2x00_mailbox_command(vha, mcp); 2294 2295 if (rval != QLA_SUCCESS) { 2296 /*EMPTY*/ 2297 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); 2298 } else { 2299 if (name != NULL) { 2300 /* This function returns name in big endian. */ 2301 name[0] = MSB(mcp->mb[2]); 2302 name[1] = LSB(mcp->mb[2]); 2303 name[2] = MSB(mcp->mb[3]); 2304 name[3] = LSB(mcp->mb[3]); 2305 name[4] = MSB(mcp->mb[6]); 2306 name[5] = LSB(mcp->mb[6]); 2307 name[6] = MSB(mcp->mb[7]); 2308 name[7] = LSB(mcp->mb[7]); 2309 } 2310 2311 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, 2312 "Done %s.\n", __func__); 2313 } 2314 2315 return rval; 2316 } 2317 2318 /* 2319 * qla24xx_link_initialization 2320 * Issue link initialization mailbox command. 2321 * 2322 * Input: 2323 * ha = adapter block pointer. 2324 * TARGET_QUEUE_LOCK must be released. 2325 * ADAPTER_STATE_LOCK must be released. 2326 * 2327 * Returns: 2328 * qla2x00 local function return status code. 2329 * 2330 * Context: 2331 * Kernel context. 2332 */ 2333 int 2334 qla24xx_link_initialize(scsi_qla_host_t *vha) 2335 { 2336 int rval; 2337 mbx_cmd_t mc; 2338 mbx_cmd_t *mcp = &mc; 2339 2340 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, 2341 "Entered %s.\n", __func__); 2342 2343 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) 2344 return QLA_FUNCTION_FAILED; 2345 2346 mcp->mb[0] = MBC_LINK_INITIALIZATION; 2347 mcp->mb[1] = BIT_4; 2348 if (vha->hw->operating_mode == LOOP) 2349 mcp->mb[1] |= BIT_6; 2350 else 2351 mcp->mb[1] |= BIT_5; 2352 mcp->mb[2] = 0; 2353 mcp->mb[3] = 0; 2354 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2355 mcp->in_mb = MBX_0; 2356 mcp->tov = MBX_TOV_SECONDS; 2357 mcp->flags = 0; 2358 rval = qla2x00_mailbox_command(vha, mcp); 2359 2360 if (rval != QLA_SUCCESS) { 2361 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); 2362 } else { 2363 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, 2364 "Done %s.\n", __func__); 2365 } 2366 2367 return rval; 2368 } 2369 2370 /* 2371 * qla2x00_lip_reset 2372 * Issue LIP reset mailbox command. 2373 * 2374 * Input: 2375 * ha = adapter block pointer. 2376 * TARGET_QUEUE_LOCK must be released. 2377 * ADAPTER_STATE_LOCK must be released. 2378 * 2379 * Returns: 2380 * qla2x00 local function return status code. 2381 * 2382 * Context: 2383 * Kernel context. 2384 */ 2385 int 2386 qla2x00_lip_reset(scsi_qla_host_t *vha) 2387 { 2388 int rval; 2389 mbx_cmd_t mc; 2390 mbx_cmd_t *mcp = &mc; 2391 2392 ql_dbg(ql_dbg_disc, vha, 0x105a, 2393 "Entered %s.\n", __func__); 2394 2395 if (IS_CNA_CAPABLE(vha->hw)) { 2396 /* Logout across all FCFs. */ 2397 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2398 mcp->mb[1] = BIT_1; 2399 mcp->mb[2] = 0; 2400 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2401 } else if (IS_FWI2_CAPABLE(vha->hw)) { 2402 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2403 mcp->mb[1] = BIT_4; 2404 mcp->mb[2] = 0; 2405 mcp->mb[3] = vha->hw->loop_reset_delay; 2406 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2407 } else { 2408 mcp->mb[0] = MBC_LIP_RESET; 2409 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2410 if (HAS_EXTENDED_IDS(vha->hw)) { 2411 mcp->mb[1] = 0x00ff; 2412 mcp->mb[10] = 0; 2413 mcp->out_mb |= MBX_10; 2414 } else { 2415 mcp->mb[1] = 0xff00; 2416 } 2417 mcp->mb[2] = vha->hw->loop_reset_delay; 2418 mcp->mb[3] = 0; 2419 } 2420 mcp->in_mb = MBX_0; 2421 mcp->tov = MBX_TOV_SECONDS; 2422 mcp->flags = 0; 2423 rval = qla2x00_mailbox_command(vha, mcp); 2424 2425 if (rval != QLA_SUCCESS) { 2426 /*EMPTY*/ 2427 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); 2428 } else { 2429 /*EMPTY*/ 2430 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, 2431 "Done %s.\n", __func__); 2432 } 2433 2434 return rval; 2435 } 2436 2437 /* 2438 * qla2x00_send_sns 2439 * Send SNS command. 2440 * 2441 * Input: 2442 * ha = adapter block pointer. 2443 * sns = pointer for command. 2444 * cmd_size = command size. 2445 * buf_size = response/command size. 2446 * TARGET_QUEUE_LOCK must be released. 2447 * ADAPTER_STATE_LOCK must be released. 2448 * 2449 * Returns: 2450 * qla2x00 local function return status code. 2451 * 2452 * Context: 2453 * Kernel context. 2454 */ 2455 int 2456 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, 2457 uint16_t cmd_size, size_t buf_size) 2458 { 2459 int rval; 2460 mbx_cmd_t mc; 2461 mbx_cmd_t *mcp = &mc; 2462 2463 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, 2464 "Entered %s.\n", __func__); 2465 2466 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, 2467 "Retry cnt=%d ratov=%d total tov=%d.\n", 2468 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); 2469 2470 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 2471 mcp->mb[1] = cmd_size; 2472 mcp->mb[2] = MSW(sns_phys_address); 2473 mcp->mb[3] = LSW(sns_phys_address); 2474 mcp->mb[6] = MSW(MSD(sns_phys_address)); 2475 mcp->mb[7] = LSW(MSD(sns_phys_address)); 2476 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2477 mcp->in_mb = MBX_0|MBX_1; 2478 mcp->buf_size = buf_size; 2479 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; 2480 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); 2481 rval = qla2x00_mailbox_command(vha, mcp); 2482 2483 if (rval != QLA_SUCCESS) { 2484 /*EMPTY*/ 2485 ql_dbg(ql_dbg_mbx, vha, 0x105f, 2486 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2487 rval, mcp->mb[0], mcp->mb[1]); 2488 } else { 2489 /*EMPTY*/ 2490 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, 2491 "Done %s.\n", __func__); 2492 } 2493 2494 return rval; 2495 } 2496 2497 int 2498 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2499 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2500 { 2501 int rval; 2502 2503 struct logio_entry_24xx *lg; 2504 dma_addr_t lg_dma; 2505 uint32_t iop[2]; 2506 struct qla_hw_data *ha = vha->hw; 2507 struct req_que *req; 2508 2509 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, 2510 "Entered %s.\n", __func__); 2511 2512 if (vha->vp_idx && vha->qpair) 2513 req = vha->qpair->req; 2514 else 2515 req = ha->req_q_map[0]; 2516 2517 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2518 if (lg == NULL) { 2519 ql_log(ql_log_warn, vha, 0x1062, 2520 "Failed to allocate login IOCB.\n"); 2521 return QLA_MEMORY_ALLOC_FAILED; 2522 } 2523 2524 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2525 lg->entry_count = 1; 2526 lg->handle = make_handle(req->id, lg->handle); 2527 lg->nport_handle = cpu_to_le16(loop_id); 2528 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2529 if (opt & BIT_0) 2530 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2531 if (opt & BIT_1) 2532 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2533 lg->port_id[0] = al_pa; 2534 lg->port_id[1] = area; 2535 lg->port_id[2] = domain; 2536 lg->vp_index = vha->vp_idx; 2537 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2538 (ha->r_a_tov / 10 * 2) + 2); 2539 if (rval != QLA_SUCCESS) { 2540 ql_dbg(ql_dbg_mbx, vha, 0x1063, 2541 "Failed to issue login IOCB (%x).\n", rval); 2542 } else if (lg->entry_status != 0) { 2543 ql_dbg(ql_dbg_mbx, vha, 0x1064, 2544 "Failed to complete IOCB -- error status (%x).\n", 2545 lg->entry_status); 2546 rval = QLA_FUNCTION_FAILED; 2547 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2548 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2549 iop[1] = le32_to_cpu(lg->io_parameter[1]); 2550 2551 ql_dbg(ql_dbg_mbx, vha, 0x1065, 2552 "Failed to complete IOCB -- completion status (%x) " 2553 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2554 iop[0], iop[1]); 2555 2556 switch (iop[0]) { 2557 case LSC_SCODE_PORTID_USED: 2558 mb[0] = MBS_PORT_ID_USED; 2559 mb[1] = LSW(iop[1]); 2560 break; 2561 case LSC_SCODE_NPORT_USED: 2562 mb[0] = MBS_LOOP_ID_USED; 2563 break; 2564 case LSC_SCODE_NOLINK: 2565 case LSC_SCODE_NOIOCB: 2566 case LSC_SCODE_NOXCB: 2567 case LSC_SCODE_CMD_FAILED: 2568 case LSC_SCODE_NOFABRIC: 2569 case LSC_SCODE_FW_NOT_READY: 2570 case LSC_SCODE_NOT_LOGGED_IN: 2571 case LSC_SCODE_NOPCB: 2572 case LSC_SCODE_ELS_REJECT: 2573 case LSC_SCODE_CMD_PARAM_ERR: 2574 case LSC_SCODE_NONPORT: 2575 case LSC_SCODE_LOGGED_IN: 2576 case LSC_SCODE_NOFLOGI_ACC: 2577 default: 2578 mb[0] = MBS_COMMAND_ERROR; 2579 break; 2580 } 2581 } else { 2582 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, 2583 "Done %s.\n", __func__); 2584 2585 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2586 2587 mb[0] = MBS_COMMAND_COMPLETE; 2588 mb[1] = 0; 2589 if (iop[0] & BIT_4) { 2590 if (iop[0] & BIT_8) 2591 mb[1] |= BIT_1; 2592 } else 2593 mb[1] = BIT_0; 2594 2595 /* Passback COS information. */ 2596 mb[10] = 0; 2597 if (lg->io_parameter[7] || lg->io_parameter[8]) 2598 mb[10] |= BIT_0; /* Class 2. */ 2599 if (lg->io_parameter[9] || lg->io_parameter[10]) 2600 mb[10] |= BIT_1; /* Class 3. */ 2601 if (lg->io_parameter[0] & cpu_to_le32(BIT_7)) 2602 mb[10] |= BIT_7; /* Confirmed Completion 2603 * Allowed 2604 */ 2605 } 2606 2607 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2608 2609 return rval; 2610 } 2611 2612 /* 2613 * qla2x00_login_fabric 2614 * Issue login fabric port mailbox command. 2615 * 2616 * Input: 2617 * ha = adapter block pointer. 2618 * loop_id = device loop ID. 2619 * domain = device domain. 2620 * area = device area. 2621 * al_pa = device AL_PA. 2622 * status = pointer for return status. 2623 * opt = command options. 2624 * TARGET_QUEUE_LOCK must be released. 2625 * ADAPTER_STATE_LOCK must be released. 2626 * 2627 * Returns: 2628 * qla2x00 local function return status code. 2629 * 2630 * Context: 2631 * Kernel context. 2632 */ 2633 int 2634 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2635 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2636 { 2637 int rval; 2638 mbx_cmd_t mc; 2639 mbx_cmd_t *mcp = &mc; 2640 struct qla_hw_data *ha = vha->hw; 2641 2642 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, 2643 "Entered %s.\n", __func__); 2644 2645 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 2646 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2647 if (HAS_EXTENDED_IDS(ha)) { 2648 mcp->mb[1] = loop_id; 2649 mcp->mb[10] = opt; 2650 mcp->out_mb |= MBX_10; 2651 } else { 2652 mcp->mb[1] = (loop_id << 8) | opt; 2653 } 2654 mcp->mb[2] = domain; 2655 mcp->mb[3] = area << 8 | al_pa; 2656 2657 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; 2658 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2659 mcp->flags = 0; 2660 rval = qla2x00_mailbox_command(vha, mcp); 2661 2662 /* Return mailbox statuses. */ 2663 if (mb != NULL) { 2664 mb[0] = mcp->mb[0]; 2665 mb[1] = mcp->mb[1]; 2666 mb[2] = mcp->mb[2]; 2667 mb[6] = mcp->mb[6]; 2668 mb[7] = mcp->mb[7]; 2669 /* COS retrieved from Get-Port-Database mailbox command. */ 2670 mb[10] = 0; 2671 } 2672 2673 if (rval != QLA_SUCCESS) { 2674 /* RLU tmp code: need to change main mailbox_command function to 2675 * return ok even when the mailbox completion value is not 2676 * SUCCESS. The caller needs to be responsible to interpret 2677 * the return values of this mailbox command if we're not 2678 * to change too much of the existing code. 2679 */ 2680 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || 2681 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || 2682 mcp->mb[0] == 0x4006) 2683 rval = QLA_SUCCESS; 2684 2685 /*EMPTY*/ 2686 ql_dbg(ql_dbg_mbx, vha, 0x1068, 2687 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 2688 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 2689 } else { 2690 /*EMPTY*/ 2691 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, 2692 "Done %s.\n", __func__); 2693 } 2694 2695 return rval; 2696 } 2697 2698 /* 2699 * qla2x00_login_local_device 2700 * Issue login loop port mailbox command. 2701 * 2702 * Input: 2703 * ha = adapter block pointer. 2704 * loop_id = device loop ID. 2705 * opt = command options. 2706 * 2707 * Returns: 2708 * Return status code. 2709 * 2710 * Context: 2711 * Kernel context. 2712 * 2713 */ 2714 int 2715 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, 2716 uint16_t *mb_ret, uint8_t opt) 2717 { 2718 int rval; 2719 mbx_cmd_t mc; 2720 mbx_cmd_t *mcp = &mc; 2721 struct qla_hw_data *ha = vha->hw; 2722 2723 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, 2724 "Entered %s.\n", __func__); 2725 2726 if (IS_FWI2_CAPABLE(ha)) 2727 return qla24xx_login_fabric(vha, fcport->loop_id, 2728 fcport->d_id.b.domain, fcport->d_id.b.area, 2729 fcport->d_id.b.al_pa, mb_ret, opt); 2730 2731 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 2732 if (HAS_EXTENDED_IDS(ha)) 2733 mcp->mb[1] = fcport->loop_id; 2734 else 2735 mcp->mb[1] = fcport->loop_id << 8; 2736 mcp->mb[2] = opt; 2737 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2738 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; 2739 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2740 mcp->flags = 0; 2741 rval = qla2x00_mailbox_command(vha, mcp); 2742 2743 /* Return mailbox statuses. */ 2744 if (mb_ret != NULL) { 2745 mb_ret[0] = mcp->mb[0]; 2746 mb_ret[1] = mcp->mb[1]; 2747 mb_ret[6] = mcp->mb[6]; 2748 mb_ret[7] = mcp->mb[7]; 2749 } 2750 2751 if (rval != QLA_SUCCESS) { 2752 /* AV tmp code: need to change main mailbox_command function to 2753 * return ok even when the mailbox completion value is not 2754 * SUCCESS. The caller needs to be responsible to interpret 2755 * the return values of this mailbox command if we're not 2756 * to change too much of the existing code. 2757 */ 2758 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 2759 rval = QLA_SUCCESS; 2760 2761 ql_dbg(ql_dbg_mbx, vha, 0x106b, 2762 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", 2763 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); 2764 } else { 2765 /*EMPTY*/ 2766 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, 2767 "Done %s.\n", __func__); 2768 } 2769 2770 return (rval); 2771 } 2772 2773 int 2774 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2775 uint8_t area, uint8_t al_pa) 2776 { 2777 int rval; 2778 struct logio_entry_24xx *lg; 2779 dma_addr_t lg_dma; 2780 struct qla_hw_data *ha = vha->hw; 2781 struct req_que *req; 2782 2783 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, 2784 "Entered %s.\n", __func__); 2785 2786 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2787 if (lg == NULL) { 2788 ql_log(ql_log_warn, vha, 0x106e, 2789 "Failed to allocate logout IOCB.\n"); 2790 return QLA_MEMORY_ALLOC_FAILED; 2791 } 2792 2793 req = vha->req; 2794 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2795 lg->entry_count = 1; 2796 lg->handle = make_handle(req->id, lg->handle); 2797 lg->nport_handle = cpu_to_le16(loop_id); 2798 lg->control_flags = 2799 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| 2800 LCF_FREE_NPORT); 2801 lg->port_id[0] = al_pa; 2802 lg->port_id[1] = area; 2803 lg->port_id[2] = domain; 2804 lg->vp_index = vha->vp_idx; 2805 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2806 (ha->r_a_tov / 10 * 2) + 2); 2807 if (rval != QLA_SUCCESS) { 2808 ql_dbg(ql_dbg_mbx, vha, 0x106f, 2809 "Failed to issue logout IOCB (%x).\n", rval); 2810 } else if (lg->entry_status != 0) { 2811 ql_dbg(ql_dbg_mbx, vha, 0x1070, 2812 "Failed to complete IOCB -- error status (%x).\n", 2813 lg->entry_status); 2814 rval = QLA_FUNCTION_FAILED; 2815 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2816 ql_dbg(ql_dbg_mbx, vha, 0x1071, 2817 "Failed to complete IOCB -- completion status (%x) " 2818 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2819 le32_to_cpu(lg->io_parameter[0]), 2820 le32_to_cpu(lg->io_parameter[1])); 2821 } else { 2822 /*EMPTY*/ 2823 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, 2824 "Done %s.\n", __func__); 2825 } 2826 2827 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2828 2829 return rval; 2830 } 2831 2832 /* 2833 * qla2x00_fabric_logout 2834 * Issue logout fabric port mailbox command. 2835 * 2836 * Input: 2837 * ha = adapter block pointer. 2838 * loop_id = device loop ID. 2839 * TARGET_QUEUE_LOCK must be released. 2840 * ADAPTER_STATE_LOCK must be released. 2841 * 2842 * Returns: 2843 * qla2x00 local function return status code. 2844 * 2845 * Context: 2846 * Kernel context. 2847 */ 2848 int 2849 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2850 uint8_t area, uint8_t al_pa) 2851 { 2852 int rval; 2853 mbx_cmd_t mc; 2854 mbx_cmd_t *mcp = &mc; 2855 2856 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, 2857 "Entered %s.\n", __func__); 2858 2859 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 2860 mcp->out_mb = MBX_1|MBX_0; 2861 if (HAS_EXTENDED_IDS(vha->hw)) { 2862 mcp->mb[1] = loop_id; 2863 mcp->mb[10] = 0; 2864 mcp->out_mb |= MBX_10; 2865 } else { 2866 mcp->mb[1] = loop_id << 8; 2867 } 2868 2869 mcp->in_mb = MBX_1|MBX_0; 2870 mcp->tov = MBX_TOV_SECONDS; 2871 mcp->flags = 0; 2872 rval = qla2x00_mailbox_command(vha, mcp); 2873 2874 if (rval != QLA_SUCCESS) { 2875 /*EMPTY*/ 2876 ql_dbg(ql_dbg_mbx, vha, 0x1074, 2877 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); 2878 } else { 2879 /*EMPTY*/ 2880 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, 2881 "Done %s.\n", __func__); 2882 } 2883 2884 return rval; 2885 } 2886 2887 /* 2888 * qla2x00_full_login_lip 2889 * Issue full login LIP mailbox command. 2890 * 2891 * Input: 2892 * ha = adapter block pointer. 2893 * TARGET_QUEUE_LOCK must be released. 2894 * ADAPTER_STATE_LOCK must be released. 2895 * 2896 * Returns: 2897 * qla2x00 local function return status code. 2898 * 2899 * Context: 2900 * Kernel context. 2901 */ 2902 int 2903 qla2x00_full_login_lip(scsi_qla_host_t *vha) 2904 { 2905 int rval; 2906 mbx_cmd_t mc; 2907 mbx_cmd_t *mcp = &mc; 2908 2909 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, 2910 "Entered %s.\n", __func__); 2911 2912 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2913 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0; 2914 mcp->mb[2] = 0; 2915 mcp->mb[3] = 0; 2916 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2917 mcp->in_mb = MBX_0; 2918 mcp->tov = MBX_TOV_SECONDS; 2919 mcp->flags = 0; 2920 rval = qla2x00_mailbox_command(vha, mcp); 2921 2922 if (rval != QLA_SUCCESS) { 2923 /*EMPTY*/ 2924 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); 2925 } else { 2926 /*EMPTY*/ 2927 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, 2928 "Done %s.\n", __func__); 2929 } 2930 2931 return rval; 2932 } 2933 2934 /* 2935 * qla2x00_get_id_list 2936 * 2937 * Input: 2938 * ha = adapter block pointer. 2939 * 2940 * Returns: 2941 * qla2x00 local function return status code. 2942 * 2943 * Context: 2944 * Kernel context. 2945 */ 2946 int 2947 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, 2948 uint16_t *entries) 2949 { 2950 int rval; 2951 mbx_cmd_t mc; 2952 mbx_cmd_t *mcp = &mc; 2953 2954 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, 2955 "Entered %s.\n", __func__); 2956 2957 if (id_list == NULL) 2958 return QLA_FUNCTION_FAILED; 2959 2960 mcp->mb[0] = MBC_GET_ID_LIST; 2961 mcp->out_mb = MBX_0; 2962 if (IS_FWI2_CAPABLE(vha->hw)) { 2963 mcp->mb[2] = MSW(id_list_dma); 2964 mcp->mb[3] = LSW(id_list_dma); 2965 mcp->mb[6] = MSW(MSD(id_list_dma)); 2966 mcp->mb[7] = LSW(MSD(id_list_dma)); 2967 mcp->mb[8] = 0; 2968 mcp->mb[9] = vha->vp_idx; 2969 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 2970 } else { 2971 mcp->mb[1] = MSW(id_list_dma); 2972 mcp->mb[2] = LSW(id_list_dma); 2973 mcp->mb[3] = MSW(MSD(id_list_dma)); 2974 mcp->mb[6] = LSW(MSD(id_list_dma)); 2975 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; 2976 } 2977 mcp->in_mb = MBX_1|MBX_0; 2978 mcp->tov = MBX_TOV_SECONDS; 2979 mcp->flags = 0; 2980 rval = qla2x00_mailbox_command(vha, mcp); 2981 2982 if (rval != QLA_SUCCESS) { 2983 /*EMPTY*/ 2984 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); 2985 } else { 2986 *entries = mcp->mb[1]; 2987 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, 2988 "Done %s.\n", __func__); 2989 } 2990 2991 return rval; 2992 } 2993 2994 /* 2995 * qla2x00_get_resource_cnts 2996 * Get current firmware resource counts. 2997 * 2998 * Input: 2999 * ha = adapter block pointer. 3000 * 3001 * Returns: 3002 * qla2x00 local function return status code. 3003 * 3004 * Context: 3005 * Kernel context. 3006 */ 3007 int 3008 qla2x00_get_resource_cnts(scsi_qla_host_t *vha) 3009 { 3010 struct qla_hw_data *ha = vha->hw; 3011 int rval; 3012 mbx_cmd_t mc; 3013 mbx_cmd_t *mcp = &mc; 3014 3015 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, 3016 "Entered %s.\n", __func__); 3017 3018 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 3019 mcp->out_mb = MBX_0; 3020 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 3021 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 3022 IS_QLA27XX(ha) || IS_QLA28XX(ha)) 3023 mcp->in_mb |= MBX_12; 3024 mcp->tov = MBX_TOV_SECONDS; 3025 mcp->flags = 0; 3026 rval = qla2x00_mailbox_command(vha, mcp); 3027 3028 if (rval != QLA_SUCCESS) { 3029 /*EMPTY*/ 3030 ql_dbg(ql_dbg_mbx, vha, 0x107d, 3031 "Failed mb[0]=%x.\n", mcp->mb[0]); 3032 } else { 3033 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, 3034 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " 3035 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], 3036 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 3037 mcp->mb[11], mcp->mb[12]); 3038 3039 ha->orig_fw_tgt_xcb_count = mcp->mb[1]; 3040 ha->cur_fw_tgt_xcb_count = mcp->mb[2]; 3041 ha->cur_fw_xcb_count = mcp->mb[3]; 3042 ha->orig_fw_xcb_count = mcp->mb[6]; 3043 ha->cur_fw_iocb_count = mcp->mb[7]; 3044 ha->orig_fw_iocb_count = mcp->mb[10]; 3045 if (ha->flags.npiv_supported) 3046 ha->max_npiv_vports = mcp->mb[11]; 3047 if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) 3048 ha->fw_max_fcf_count = mcp->mb[12]; 3049 } 3050 3051 return (rval); 3052 } 3053 3054 /* 3055 * qla2x00_get_fcal_position_map 3056 * Get FCAL (LILP) position map using mailbox command 3057 * 3058 * Input: 3059 * ha = adapter state pointer. 3060 * pos_map = buffer pointer (can be NULL). 3061 * 3062 * Returns: 3063 * qla2x00 local function return status code. 3064 * 3065 * Context: 3066 * Kernel context. 3067 */ 3068 int 3069 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) 3070 { 3071 int rval; 3072 mbx_cmd_t mc; 3073 mbx_cmd_t *mcp = &mc; 3074 char *pmap; 3075 dma_addr_t pmap_dma; 3076 struct qla_hw_data *ha = vha->hw; 3077 3078 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, 3079 "Entered %s.\n", __func__); 3080 3081 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 3082 if (pmap == NULL) { 3083 ql_log(ql_log_warn, vha, 0x1080, 3084 "Memory alloc failed.\n"); 3085 return QLA_MEMORY_ALLOC_FAILED; 3086 } 3087 3088 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; 3089 mcp->mb[2] = MSW(pmap_dma); 3090 mcp->mb[3] = LSW(pmap_dma); 3091 mcp->mb[6] = MSW(MSD(pmap_dma)); 3092 mcp->mb[7] = LSW(MSD(pmap_dma)); 3093 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3094 mcp->in_mb = MBX_1|MBX_0; 3095 mcp->buf_size = FCAL_MAP_SIZE; 3096 mcp->flags = MBX_DMA_IN; 3097 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 3098 rval = qla2x00_mailbox_command(vha, mcp); 3099 3100 if (rval == QLA_SUCCESS) { 3101 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, 3102 "mb0/mb1=%x/%X FC/AL position map size (%x).\n", 3103 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); 3104 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, 3105 pmap, pmap[0] + 1); 3106 3107 if (pos_map) 3108 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 3109 } 3110 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 3111 3112 if (rval != QLA_SUCCESS) { 3113 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); 3114 } else { 3115 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, 3116 "Done %s.\n", __func__); 3117 } 3118 3119 return rval; 3120 } 3121 3122 /* 3123 * qla2x00_get_link_status 3124 * 3125 * Input: 3126 * ha = adapter block pointer. 3127 * loop_id = device loop ID. 3128 * ret_buf = pointer to link status return buffer. 3129 * 3130 * Returns: 3131 * 0 = success. 3132 * BIT_0 = mem alloc error. 3133 * BIT_1 = mailbox error. 3134 */ 3135 int 3136 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, 3137 struct link_statistics *stats, dma_addr_t stats_dma) 3138 { 3139 int rval; 3140 mbx_cmd_t mc; 3141 mbx_cmd_t *mcp = &mc; 3142 uint32_t *iter = (uint32_t *)stats; 3143 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); 3144 struct qla_hw_data *ha = vha->hw; 3145 3146 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, 3147 "Entered %s.\n", __func__); 3148 3149 mcp->mb[0] = MBC_GET_LINK_STATUS; 3150 mcp->mb[2] = MSW(LSD(stats_dma)); 3151 mcp->mb[3] = LSW(LSD(stats_dma)); 3152 mcp->mb[6] = MSW(MSD(stats_dma)); 3153 mcp->mb[7] = LSW(MSD(stats_dma)); 3154 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3155 mcp->in_mb = MBX_0; 3156 if (IS_FWI2_CAPABLE(ha)) { 3157 mcp->mb[1] = loop_id; 3158 mcp->mb[4] = 0; 3159 mcp->mb[10] = 0; 3160 mcp->out_mb |= MBX_10|MBX_4|MBX_1; 3161 mcp->in_mb |= MBX_1; 3162 } else if (HAS_EXTENDED_IDS(ha)) { 3163 mcp->mb[1] = loop_id; 3164 mcp->mb[10] = 0; 3165 mcp->out_mb |= MBX_10|MBX_1; 3166 } else { 3167 mcp->mb[1] = loop_id << 8; 3168 mcp->out_mb |= MBX_1; 3169 } 3170 mcp->tov = MBX_TOV_SECONDS; 3171 mcp->flags = IOCTL_CMD; 3172 rval = qla2x00_mailbox_command(vha, mcp); 3173 3174 if (rval == QLA_SUCCESS) { 3175 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3176 ql_dbg(ql_dbg_mbx, vha, 0x1085, 3177 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3178 rval = QLA_FUNCTION_FAILED; 3179 } else { 3180 /* Re-endianize - firmware data is le32. */ 3181 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, 3182 "Done %s.\n", __func__); 3183 for ( ; dwords--; iter++) 3184 le32_to_cpus(iter); 3185 } 3186 } else { 3187 /* Failed. */ 3188 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); 3189 } 3190 3191 return rval; 3192 } 3193 3194 int 3195 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, 3196 dma_addr_t stats_dma, uint16_t options) 3197 { 3198 int rval; 3199 mbx_cmd_t mc; 3200 mbx_cmd_t *mcp = &mc; 3201 uint32_t *iter = (uint32_t *)stats; 3202 ushort dwords = sizeof(*stats)/sizeof(*iter); 3203 3204 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, 3205 "Entered %s.\n", __func__); 3206 3207 memset(&mc, 0, sizeof(mc)); 3208 mc.mb[0] = MBC_GET_LINK_PRIV_STATS; 3209 mc.mb[2] = MSW(LSD(stats_dma)); 3210 mc.mb[3] = LSW(LSD(stats_dma)); 3211 mc.mb[6] = MSW(MSD(stats_dma)); 3212 mc.mb[7] = LSW(MSD(stats_dma)); 3213 mc.mb[8] = dwords; 3214 mc.mb[9] = vha->vp_idx; 3215 mc.mb[10] = options; 3216 3217 rval = qla24xx_send_mb_cmd(vha, &mc); 3218 3219 if (rval == QLA_SUCCESS) { 3220 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3221 ql_dbg(ql_dbg_mbx, vha, 0x1089, 3222 "Failed mb[0]=%x.\n", mcp->mb[0]); 3223 rval = QLA_FUNCTION_FAILED; 3224 } else { 3225 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, 3226 "Done %s.\n", __func__); 3227 /* Re-endianize - firmware data is le32. */ 3228 for ( ; dwords--; iter++) 3229 le32_to_cpus(iter); 3230 } 3231 } else { 3232 /* Failed. */ 3233 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); 3234 } 3235 3236 return rval; 3237 } 3238 3239 int 3240 qla24xx_abort_command(srb_t *sp) 3241 { 3242 int rval; 3243 unsigned long flags = 0; 3244 3245 struct abort_entry_24xx *abt; 3246 dma_addr_t abt_dma; 3247 uint32_t handle; 3248 fc_port_t *fcport = sp->fcport; 3249 struct scsi_qla_host *vha = fcport->vha; 3250 struct qla_hw_data *ha = vha->hw; 3251 struct req_que *req; 3252 struct qla_qpair *qpair = sp->qpair; 3253 3254 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 3255 "Entered %s.\n", __func__); 3256 3257 if (sp->qpair) 3258 req = sp->qpair->req; 3259 else 3260 return QLA_ERR_NO_QPAIR; 3261 3262 if (ql2xasynctmfenable) 3263 return qla24xx_async_abort_command(sp); 3264 3265 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3266 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 3267 if (req->outstanding_cmds[handle] == sp) 3268 break; 3269 } 3270 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3271 if (handle == req->num_outstanding_cmds) { 3272 /* Command not found. */ 3273 return QLA_ERR_NOT_FOUND; 3274 } 3275 3276 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 3277 if (abt == NULL) { 3278 ql_log(ql_log_warn, vha, 0x108d, 3279 "Failed to allocate abort IOCB.\n"); 3280 return QLA_MEMORY_ALLOC_FAILED; 3281 } 3282 3283 abt->entry_type = ABORT_IOCB_TYPE; 3284 abt->entry_count = 1; 3285 abt->handle = make_handle(req->id, abt->handle); 3286 abt->nport_handle = cpu_to_le16(fcport->loop_id); 3287 abt->handle_to_abort = make_handle(req->id, handle); 3288 abt->port_id[0] = fcport->d_id.b.al_pa; 3289 abt->port_id[1] = fcport->d_id.b.area; 3290 abt->port_id[2] = fcport->d_id.b.domain; 3291 abt->vp_index = fcport->vha->vp_idx; 3292 3293 abt->req_que_no = cpu_to_le16(req->id); 3294 /* Need to pass original sp */ 3295 qla_nvme_abort_set_option(abt, sp); 3296 3297 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 3298 if (rval != QLA_SUCCESS) { 3299 ql_dbg(ql_dbg_mbx, vha, 0x108e, 3300 "Failed to issue IOCB (%x).\n", rval); 3301 } else if (abt->entry_status != 0) { 3302 ql_dbg(ql_dbg_mbx, vha, 0x108f, 3303 "Failed to complete IOCB -- error status (%x).\n", 3304 abt->entry_status); 3305 rval = QLA_FUNCTION_FAILED; 3306 } else if (abt->nport_handle != cpu_to_le16(0)) { 3307 ql_dbg(ql_dbg_mbx, vha, 0x1090, 3308 "Failed to complete IOCB -- completion status (%x).\n", 3309 le16_to_cpu(abt->nport_handle)); 3310 if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR)) 3311 rval = QLA_FUNCTION_PARAMETER_ERROR; 3312 else 3313 rval = QLA_FUNCTION_FAILED; 3314 } else { 3315 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, 3316 "Done %s.\n", __func__); 3317 } 3318 if (rval == QLA_SUCCESS) 3319 qla_nvme_abort_process_comp_status(abt, sp); 3320 3321 qla_wait_nvme_release_cmd_kref(sp); 3322 3323 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 3324 3325 return rval; 3326 } 3327 3328 struct tsk_mgmt_cmd { 3329 union { 3330 struct tsk_mgmt_entry tsk; 3331 struct sts_entry_24xx sts; 3332 } p; 3333 }; 3334 3335 static int 3336 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 3337 uint64_t l, int tag) 3338 { 3339 int rval, rval2; 3340 struct tsk_mgmt_cmd *tsk; 3341 struct sts_entry_24xx *sts; 3342 dma_addr_t tsk_dma; 3343 scsi_qla_host_t *vha; 3344 struct qla_hw_data *ha; 3345 struct req_que *req; 3346 struct qla_qpair *qpair; 3347 3348 vha = fcport->vha; 3349 ha = vha->hw; 3350 req = vha->req; 3351 3352 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, 3353 "Entered %s.\n", __func__); 3354 3355 if (vha->vp_idx && vha->qpair) { 3356 /* NPIV port */ 3357 qpair = vha->qpair; 3358 req = qpair->req; 3359 } 3360 3361 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 3362 if (tsk == NULL) { 3363 ql_log(ql_log_warn, vha, 0x1093, 3364 "Failed to allocate task management IOCB.\n"); 3365 return QLA_MEMORY_ALLOC_FAILED; 3366 } 3367 3368 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 3369 tsk->p.tsk.entry_count = 1; 3370 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle); 3371 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 3372 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 3373 tsk->p.tsk.control_flags = cpu_to_le32(type); 3374 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 3375 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 3376 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 3377 tsk->p.tsk.vp_index = fcport->vha->vp_idx; 3378 if (type == TCF_LUN_RESET) { 3379 int_to_scsilun(l, &tsk->p.tsk.lun); 3380 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, 3381 sizeof(tsk->p.tsk.lun)); 3382 } 3383 3384 sts = &tsk->p.sts; 3385 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 3386 if (rval != QLA_SUCCESS) { 3387 ql_dbg(ql_dbg_mbx, vha, 0x1094, 3388 "Failed to issue %s reset IOCB (%x).\n", name, rval); 3389 } else if (sts->entry_status != 0) { 3390 ql_dbg(ql_dbg_mbx, vha, 0x1095, 3391 "Failed to complete IOCB -- error status (%x).\n", 3392 sts->entry_status); 3393 rval = QLA_FUNCTION_FAILED; 3394 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 3395 ql_dbg(ql_dbg_mbx, vha, 0x1096, 3396 "Failed to complete IOCB -- completion status (%x).\n", 3397 le16_to_cpu(sts->comp_status)); 3398 rval = QLA_FUNCTION_FAILED; 3399 } else if (le16_to_cpu(sts->scsi_status) & 3400 SS_RESPONSE_INFO_LEN_VALID) { 3401 if (le32_to_cpu(sts->rsp_data_len) < 4) { 3402 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, 3403 "Ignoring inconsistent data length -- not enough " 3404 "response info (%d).\n", 3405 le32_to_cpu(sts->rsp_data_len)); 3406 } else if (sts->data[3]) { 3407 ql_dbg(ql_dbg_mbx, vha, 0x1098, 3408 "Failed to complete IOCB -- response (%x).\n", 3409 sts->data[3]); 3410 rval = QLA_FUNCTION_FAILED; 3411 } 3412 } 3413 3414 /* Issue marker IOCB. */ 3415 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l, 3416 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 3417 if (rval2 != QLA_SUCCESS) { 3418 ql_dbg(ql_dbg_mbx, vha, 0x1099, 3419 "Failed to issue marker IOCB (%x).\n", rval2); 3420 } else { 3421 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, 3422 "Done %s.\n", __func__); 3423 } 3424 3425 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 3426 3427 return rval; 3428 } 3429 3430 int 3431 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) 3432 { 3433 struct qla_hw_data *ha = fcport->vha->hw; 3434 3435 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3436 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 3437 3438 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); 3439 } 3440 3441 int 3442 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 3443 { 3444 struct qla_hw_data *ha = fcport->vha->hw; 3445 3446 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3447 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 3448 3449 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); 3450 } 3451 3452 int 3453 qla2x00_system_error(scsi_qla_host_t *vha) 3454 { 3455 int rval; 3456 mbx_cmd_t mc; 3457 mbx_cmd_t *mcp = &mc; 3458 struct qla_hw_data *ha = vha->hw; 3459 3460 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 3461 return QLA_FUNCTION_FAILED; 3462 3463 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, 3464 "Entered %s.\n", __func__); 3465 3466 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 3467 mcp->out_mb = MBX_0; 3468 mcp->in_mb = MBX_0; 3469 mcp->tov = 5; 3470 mcp->flags = 0; 3471 rval = qla2x00_mailbox_command(vha, mcp); 3472 3473 if (rval != QLA_SUCCESS) { 3474 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); 3475 } else { 3476 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, 3477 "Done %s.\n", __func__); 3478 } 3479 3480 return rval; 3481 } 3482 3483 int 3484 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) 3485 { 3486 int rval; 3487 mbx_cmd_t mc; 3488 mbx_cmd_t *mcp = &mc; 3489 3490 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3491 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3492 return QLA_FUNCTION_FAILED; 3493 3494 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, 3495 "Entered %s.\n", __func__); 3496 3497 mcp->mb[0] = MBC_WRITE_SERDES; 3498 mcp->mb[1] = addr; 3499 if (IS_QLA2031(vha->hw)) 3500 mcp->mb[2] = data & 0xff; 3501 else 3502 mcp->mb[2] = data; 3503 3504 mcp->mb[3] = 0; 3505 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 3506 mcp->in_mb = MBX_0; 3507 mcp->tov = MBX_TOV_SECONDS; 3508 mcp->flags = 0; 3509 rval = qla2x00_mailbox_command(vha, mcp); 3510 3511 if (rval != QLA_SUCCESS) { 3512 ql_dbg(ql_dbg_mbx, vha, 0x1183, 3513 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3514 } else { 3515 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, 3516 "Done %s.\n", __func__); 3517 } 3518 3519 return rval; 3520 } 3521 3522 int 3523 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) 3524 { 3525 int rval; 3526 mbx_cmd_t mc; 3527 mbx_cmd_t *mcp = &mc; 3528 3529 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3530 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3531 return QLA_FUNCTION_FAILED; 3532 3533 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, 3534 "Entered %s.\n", __func__); 3535 3536 mcp->mb[0] = MBC_READ_SERDES; 3537 mcp->mb[1] = addr; 3538 mcp->mb[3] = 0; 3539 mcp->out_mb = MBX_3|MBX_1|MBX_0; 3540 mcp->in_mb = MBX_1|MBX_0; 3541 mcp->tov = MBX_TOV_SECONDS; 3542 mcp->flags = 0; 3543 rval = qla2x00_mailbox_command(vha, mcp); 3544 3545 if (IS_QLA2031(vha->hw)) 3546 *data = mcp->mb[1] & 0xff; 3547 else 3548 *data = mcp->mb[1]; 3549 3550 if (rval != QLA_SUCCESS) { 3551 ql_dbg(ql_dbg_mbx, vha, 0x1186, 3552 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3553 } else { 3554 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, 3555 "Done %s.\n", __func__); 3556 } 3557 3558 return rval; 3559 } 3560 3561 int 3562 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) 3563 { 3564 int rval; 3565 mbx_cmd_t mc; 3566 mbx_cmd_t *mcp = &mc; 3567 3568 if (!IS_QLA8044(vha->hw)) 3569 return QLA_FUNCTION_FAILED; 3570 3571 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0, 3572 "Entered %s.\n", __func__); 3573 3574 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3575 mcp->mb[1] = HCS_WRITE_SERDES; 3576 mcp->mb[3] = LSW(addr); 3577 mcp->mb[4] = MSW(addr); 3578 mcp->mb[5] = LSW(data); 3579 mcp->mb[6] = MSW(data); 3580 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 3581 mcp->in_mb = MBX_0; 3582 mcp->tov = MBX_TOV_SECONDS; 3583 mcp->flags = 0; 3584 rval = qla2x00_mailbox_command(vha, mcp); 3585 3586 if (rval != QLA_SUCCESS) { 3587 ql_dbg(ql_dbg_mbx, vha, 0x11a1, 3588 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3589 } else { 3590 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, 3591 "Done %s.\n", __func__); 3592 } 3593 3594 return rval; 3595 } 3596 3597 int 3598 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) 3599 { 3600 int rval; 3601 mbx_cmd_t mc; 3602 mbx_cmd_t *mcp = &mc; 3603 3604 if (!IS_QLA8044(vha->hw)) 3605 return QLA_FUNCTION_FAILED; 3606 3607 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, 3608 "Entered %s.\n", __func__); 3609 3610 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3611 mcp->mb[1] = HCS_READ_SERDES; 3612 mcp->mb[3] = LSW(addr); 3613 mcp->mb[4] = MSW(addr); 3614 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; 3615 mcp->in_mb = MBX_2|MBX_1|MBX_0; 3616 mcp->tov = MBX_TOV_SECONDS; 3617 mcp->flags = 0; 3618 rval = qla2x00_mailbox_command(vha, mcp); 3619 3620 *data = mcp->mb[2] << 16 | mcp->mb[1]; 3621 3622 if (rval != QLA_SUCCESS) { 3623 ql_dbg(ql_dbg_mbx, vha, 0x118a, 3624 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3625 } else { 3626 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, 3627 "Done %s.\n", __func__); 3628 } 3629 3630 return rval; 3631 } 3632 3633 /** 3634 * qla2x00_set_serdes_params() - 3635 * @vha: HA context 3636 * @sw_em_1g: serial link options 3637 * @sw_em_2g: serial link options 3638 * @sw_em_4g: serial link options 3639 * 3640 * Returns 3641 */ 3642 int 3643 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, 3644 uint16_t sw_em_2g, uint16_t sw_em_4g) 3645 { 3646 int rval; 3647 mbx_cmd_t mc; 3648 mbx_cmd_t *mcp = &mc; 3649 3650 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, 3651 "Entered %s.\n", __func__); 3652 3653 mcp->mb[0] = MBC_SERDES_PARAMS; 3654 mcp->mb[1] = BIT_0; 3655 mcp->mb[2] = sw_em_1g | BIT_15; 3656 mcp->mb[3] = sw_em_2g | BIT_15; 3657 mcp->mb[4] = sw_em_4g | BIT_15; 3658 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3659 mcp->in_mb = MBX_0; 3660 mcp->tov = MBX_TOV_SECONDS; 3661 mcp->flags = 0; 3662 rval = qla2x00_mailbox_command(vha, mcp); 3663 3664 if (rval != QLA_SUCCESS) { 3665 /*EMPTY*/ 3666 ql_dbg(ql_dbg_mbx, vha, 0x109f, 3667 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3668 } else { 3669 /*EMPTY*/ 3670 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, 3671 "Done %s.\n", __func__); 3672 } 3673 3674 return rval; 3675 } 3676 3677 int 3678 qla2x00_stop_firmware(scsi_qla_host_t *vha) 3679 { 3680 int rval; 3681 mbx_cmd_t mc; 3682 mbx_cmd_t *mcp = &mc; 3683 3684 if (!IS_FWI2_CAPABLE(vha->hw)) 3685 return QLA_FUNCTION_FAILED; 3686 3687 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, 3688 "Entered %s.\n", __func__); 3689 3690 mcp->mb[0] = MBC_STOP_FIRMWARE; 3691 mcp->mb[1] = 0; 3692 mcp->out_mb = MBX_1|MBX_0; 3693 mcp->in_mb = MBX_0; 3694 mcp->tov = 5; 3695 mcp->flags = 0; 3696 rval = qla2x00_mailbox_command(vha, mcp); 3697 3698 if (rval != QLA_SUCCESS) { 3699 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); 3700 if (mcp->mb[0] == MBS_INVALID_COMMAND) 3701 rval = QLA_INVALID_COMMAND; 3702 } else { 3703 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, 3704 "Done %s.\n", __func__); 3705 } 3706 3707 return rval; 3708 } 3709 3710 int 3711 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, 3712 uint16_t buffers) 3713 { 3714 int rval; 3715 mbx_cmd_t mc; 3716 mbx_cmd_t *mcp = &mc; 3717 3718 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, 3719 "Entered %s.\n", __func__); 3720 3721 if (!IS_FWI2_CAPABLE(vha->hw)) 3722 return QLA_FUNCTION_FAILED; 3723 3724 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3725 return QLA_FUNCTION_FAILED; 3726 3727 mcp->mb[0] = MBC_TRACE_CONTROL; 3728 mcp->mb[1] = TC_EFT_ENABLE; 3729 mcp->mb[2] = LSW(eft_dma); 3730 mcp->mb[3] = MSW(eft_dma); 3731 mcp->mb[4] = LSW(MSD(eft_dma)); 3732 mcp->mb[5] = MSW(MSD(eft_dma)); 3733 mcp->mb[6] = buffers; 3734 mcp->mb[7] = TC_AEN_DISABLE; 3735 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3736 mcp->in_mb = MBX_1|MBX_0; 3737 mcp->tov = MBX_TOV_SECONDS; 3738 mcp->flags = 0; 3739 rval = qla2x00_mailbox_command(vha, mcp); 3740 if (rval != QLA_SUCCESS) { 3741 ql_dbg(ql_dbg_mbx, vha, 0x10a5, 3742 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3743 rval, mcp->mb[0], mcp->mb[1]); 3744 } else { 3745 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, 3746 "Done %s.\n", __func__); 3747 } 3748 3749 return rval; 3750 } 3751 3752 int 3753 qla2x00_disable_eft_trace(scsi_qla_host_t *vha) 3754 { 3755 int rval; 3756 mbx_cmd_t mc; 3757 mbx_cmd_t *mcp = &mc; 3758 3759 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, 3760 "Entered %s.\n", __func__); 3761 3762 if (!IS_FWI2_CAPABLE(vha->hw)) 3763 return QLA_FUNCTION_FAILED; 3764 3765 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3766 return QLA_FUNCTION_FAILED; 3767 3768 mcp->mb[0] = MBC_TRACE_CONTROL; 3769 mcp->mb[1] = TC_EFT_DISABLE; 3770 mcp->out_mb = MBX_1|MBX_0; 3771 mcp->in_mb = MBX_1|MBX_0; 3772 mcp->tov = MBX_TOV_SECONDS; 3773 mcp->flags = 0; 3774 rval = qla2x00_mailbox_command(vha, mcp); 3775 if (rval != QLA_SUCCESS) { 3776 ql_dbg(ql_dbg_mbx, vha, 0x10a8, 3777 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3778 rval, mcp->mb[0], mcp->mb[1]); 3779 } else { 3780 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, 3781 "Done %s.\n", __func__); 3782 } 3783 3784 return rval; 3785 } 3786 3787 int 3788 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, 3789 uint16_t buffers, uint16_t *mb, uint32_t *dwords) 3790 { 3791 int rval; 3792 mbx_cmd_t mc; 3793 mbx_cmd_t *mcp = &mc; 3794 3795 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, 3796 "Entered %s.\n", __func__); 3797 3798 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 3799 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 3800 !IS_QLA28XX(vha->hw)) 3801 return QLA_FUNCTION_FAILED; 3802 3803 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3804 return QLA_FUNCTION_FAILED; 3805 3806 mcp->mb[0] = MBC_TRACE_CONTROL; 3807 mcp->mb[1] = TC_FCE_ENABLE; 3808 mcp->mb[2] = LSW(fce_dma); 3809 mcp->mb[3] = MSW(fce_dma); 3810 mcp->mb[4] = LSW(MSD(fce_dma)); 3811 mcp->mb[5] = MSW(MSD(fce_dma)); 3812 mcp->mb[6] = buffers; 3813 mcp->mb[7] = TC_AEN_DISABLE; 3814 mcp->mb[8] = 0; 3815 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; 3816 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; 3817 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3818 MBX_1|MBX_0; 3819 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3820 mcp->tov = MBX_TOV_SECONDS; 3821 mcp->flags = 0; 3822 rval = qla2x00_mailbox_command(vha, mcp); 3823 if (rval != QLA_SUCCESS) { 3824 ql_dbg(ql_dbg_mbx, vha, 0x10ab, 3825 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3826 rval, mcp->mb[0], mcp->mb[1]); 3827 } else { 3828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, 3829 "Done %s.\n", __func__); 3830 3831 if (mb) 3832 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 3833 if (dwords) 3834 *dwords = buffers; 3835 } 3836 3837 return rval; 3838 } 3839 3840 int 3841 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) 3842 { 3843 int rval; 3844 mbx_cmd_t mc; 3845 mbx_cmd_t *mcp = &mc; 3846 3847 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, 3848 "Entered %s.\n", __func__); 3849 3850 if (!IS_FWI2_CAPABLE(vha->hw)) 3851 return QLA_FUNCTION_FAILED; 3852 3853 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3854 return QLA_FUNCTION_FAILED; 3855 3856 mcp->mb[0] = MBC_TRACE_CONTROL; 3857 mcp->mb[1] = TC_FCE_DISABLE; 3858 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 3859 mcp->out_mb = MBX_2|MBX_1|MBX_0; 3860 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3861 MBX_1|MBX_0; 3862 mcp->tov = MBX_TOV_SECONDS; 3863 mcp->flags = 0; 3864 rval = qla2x00_mailbox_command(vha, mcp); 3865 if (rval != QLA_SUCCESS) { 3866 ql_dbg(ql_dbg_mbx, vha, 0x10ae, 3867 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3868 rval, mcp->mb[0], mcp->mb[1]); 3869 } else { 3870 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, 3871 "Done %s.\n", __func__); 3872 3873 if (wr) 3874 *wr = (uint64_t) mcp->mb[5] << 48 | 3875 (uint64_t) mcp->mb[4] << 32 | 3876 (uint64_t) mcp->mb[3] << 16 | 3877 (uint64_t) mcp->mb[2]; 3878 if (rd) 3879 *rd = (uint64_t) mcp->mb[9] << 48 | 3880 (uint64_t) mcp->mb[8] << 32 | 3881 (uint64_t) mcp->mb[7] << 16 | 3882 (uint64_t) mcp->mb[6]; 3883 } 3884 3885 return rval; 3886 } 3887 3888 int 3889 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3890 uint16_t *port_speed, uint16_t *mb) 3891 { 3892 int rval; 3893 mbx_cmd_t mc; 3894 mbx_cmd_t *mcp = &mc; 3895 3896 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, 3897 "Entered %s.\n", __func__); 3898 3899 if (!IS_IIDMA_CAPABLE(vha->hw)) 3900 return QLA_FUNCTION_FAILED; 3901 3902 mcp->mb[0] = MBC_PORT_PARAMS; 3903 mcp->mb[1] = loop_id; 3904 mcp->mb[2] = mcp->mb[3] = 0; 3905 mcp->mb[9] = vha->vp_idx; 3906 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3907 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3908 mcp->tov = MBX_TOV_SECONDS; 3909 mcp->flags = 0; 3910 rval = qla2x00_mailbox_command(vha, mcp); 3911 3912 /* Return mailbox statuses. */ 3913 if (mb) { 3914 mb[0] = mcp->mb[0]; 3915 mb[1] = mcp->mb[1]; 3916 mb[3] = mcp->mb[3]; 3917 } 3918 3919 if (rval != QLA_SUCCESS) { 3920 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); 3921 } else { 3922 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, 3923 "Done %s.\n", __func__); 3924 if (port_speed) 3925 *port_speed = mcp->mb[3]; 3926 } 3927 3928 return rval; 3929 } 3930 3931 int 3932 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3933 uint16_t port_speed, uint16_t *mb) 3934 { 3935 int rval; 3936 mbx_cmd_t mc; 3937 mbx_cmd_t *mcp = &mc; 3938 3939 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, 3940 "Entered %s.\n", __func__); 3941 3942 if (!IS_IIDMA_CAPABLE(vha->hw)) 3943 return QLA_FUNCTION_FAILED; 3944 3945 mcp->mb[0] = MBC_PORT_PARAMS; 3946 mcp->mb[1] = loop_id; 3947 mcp->mb[2] = BIT_0; 3948 mcp->mb[3] = port_speed & 0x3F; 3949 mcp->mb[9] = vha->vp_idx; 3950 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3951 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3952 mcp->tov = MBX_TOV_SECONDS; 3953 mcp->flags = 0; 3954 rval = qla2x00_mailbox_command(vha, mcp); 3955 3956 /* Return mailbox statuses. */ 3957 if (mb) { 3958 mb[0] = mcp->mb[0]; 3959 mb[1] = mcp->mb[1]; 3960 mb[3] = mcp->mb[3]; 3961 } 3962 3963 if (rval != QLA_SUCCESS) { 3964 ql_dbg(ql_dbg_mbx, vha, 0x10b4, 3965 "Failed=%x.\n", rval); 3966 } else { 3967 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, 3968 "Done %s.\n", __func__); 3969 } 3970 3971 return rval; 3972 } 3973 3974 void 3975 qla24xx_report_id_acquisition(scsi_qla_host_t *vha, 3976 struct vp_rpt_id_entry_24xx *rptid_entry) 3977 { 3978 struct qla_hw_data *ha = vha->hw; 3979 scsi_qla_host_t *vp = NULL; 3980 unsigned long flags; 3981 int found; 3982 port_id_t id; 3983 struct fc_port *fcport; 3984 3985 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3986 "Entered %s.\n", __func__); 3987 3988 if (rptid_entry->entry_status != 0) 3989 return; 3990 3991 id.b.domain = rptid_entry->port_id[2]; 3992 id.b.area = rptid_entry->port_id[1]; 3993 id.b.al_pa = rptid_entry->port_id[0]; 3994 id.b.rsvd_1 = 0; 3995 ha->flags.n2n_ae = 0; 3996 3997 if (rptid_entry->format == 0) { 3998 /* loop */ 3999 ql_dbg(ql_dbg_async, vha, 0x10b7, 4000 "Format 0 : Number of VPs setup %d, number of " 4001 "VPs acquired %d.\n", rptid_entry->vp_setup, 4002 rptid_entry->vp_acquired); 4003 ql_dbg(ql_dbg_async, vha, 0x10b8, 4004 "Primary port id %02x%02x%02x.\n", 4005 rptid_entry->port_id[2], rptid_entry->port_id[1], 4006 rptid_entry->port_id[0]); 4007 ha->current_topology = ISP_CFG_NL; 4008 qlt_update_host_map(vha, id); 4009 4010 } else if (rptid_entry->format == 1) { 4011 /* fabric */ 4012 ql_dbg(ql_dbg_async, vha, 0x10b9, 4013 "Format 1: VP[%d] enabled - status %d - with " 4014 "port id %02x%02x%02x.\n", rptid_entry->vp_idx, 4015 rptid_entry->vp_status, 4016 rptid_entry->port_id[2], rptid_entry->port_id[1], 4017 rptid_entry->port_id[0]); 4018 ql_dbg(ql_dbg_async, vha, 0x5075, 4019 "Format 1: Remote WWPN %8phC.\n", 4020 rptid_entry->u.f1.port_name); 4021 4022 ql_dbg(ql_dbg_async, vha, 0x5075, 4023 "Format 1: WWPN %8phC.\n", 4024 vha->port_name); 4025 4026 switch (rptid_entry->u.f1.flags & TOPO_MASK) { 4027 case TOPO_N2N: 4028 ha->current_topology = ISP_CFG_N; 4029 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4030 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4031 fcport->scan_state = QLA_FCPORT_SCAN; 4032 fcport->n2n_flag = 0; 4033 } 4034 id.b24 = 0; 4035 if (wwn_to_u64(vha->port_name) > 4036 wwn_to_u64(rptid_entry->u.f1.port_name)) { 4037 vha->d_id.b24 = 0; 4038 vha->d_id.b.al_pa = 1; 4039 ha->flags.n2n_bigger = 1; 4040 4041 id.b.al_pa = 2; 4042 ql_dbg(ql_dbg_async, vha, 0x5075, 4043 "Format 1: assign local id %x remote id %x\n", 4044 vha->d_id.b24, id.b24); 4045 } else { 4046 ql_dbg(ql_dbg_async, vha, 0x5075, 4047 "Format 1: Remote login - Waiting for WWPN %8phC.\n", 4048 rptid_entry->u.f1.port_name); 4049 ha->flags.n2n_bigger = 0; 4050 } 4051 4052 fcport = qla2x00_find_fcport_by_wwpn(vha, 4053 rptid_entry->u.f1.port_name, 1); 4054 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4055 4056 4057 if (fcport) { 4058 fcport->plogi_nack_done_deadline = jiffies + HZ; 4059 fcport->dm_login_expire = jiffies + 4060 QLA_N2N_WAIT_TIME * HZ; 4061 fcport->scan_state = QLA_FCPORT_FOUND; 4062 fcport->n2n_flag = 1; 4063 fcport->keep_nport_handle = 1; 4064 fcport->login_retry = vha->hw->login_retry_count; 4065 fcport->fc4_type = FS_FC4TYPE_FCP; 4066 if (vha->flags.nvme_enabled) 4067 fcport->fc4_type |= FS_FC4TYPE_NVME; 4068 4069 if (wwn_to_u64(vha->port_name) > 4070 wwn_to_u64(fcport->port_name)) { 4071 fcport->d_id = id; 4072 } 4073 4074 switch (fcport->disc_state) { 4075 case DSC_DELETED: 4076 set_bit(RELOGIN_NEEDED, 4077 &vha->dpc_flags); 4078 break; 4079 case DSC_DELETE_PEND: 4080 break; 4081 default: 4082 qlt_schedule_sess_for_deletion(fcport); 4083 break; 4084 } 4085 } else { 4086 qla24xx_post_newsess_work(vha, &id, 4087 rptid_entry->u.f1.port_name, 4088 rptid_entry->u.f1.node_name, 4089 NULL, 4090 FS_FCP_IS_N2N); 4091 } 4092 4093 /* if our portname is higher then initiate N2N login */ 4094 4095 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); 4096 return; 4097 case TOPO_FL: 4098 ha->current_topology = ISP_CFG_FL; 4099 break; 4100 case TOPO_F: 4101 ha->current_topology = ISP_CFG_F; 4102 break; 4103 default: 4104 break; 4105 } 4106 4107 ha->flags.gpsc_supported = 1; 4108 ha->current_topology = ISP_CFG_F; 4109 /* buffer to buffer credit flag */ 4110 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; 4111 4112 if (rptid_entry->vp_idx == 0) { 4113 if (rptid_entry->vp_status == VP_STAT_COMPL) { 4114 /* FA-WWN is only for physical port */ 4115 if (qla_ini_mode_enabled(vha) && 4116 ha->flags.fawwpn_enabled && 4117 (rptid_entry->u.f1.flags & 4118 BIT_6)) { 4119 memcpy(vha->port_name, 4120 rptid_entry->u.f1.port_name, 4121 WWN_SIZE); 4122 } 4123 4124 qlt_update_host_map(vha, id); 4125 } 4126 4127 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 4128 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 4129 } else { 4130 if (rptid_entry->vp_status != VP_STAT_COMPL && 4131 rptid_entry->vp_status != VP_STAT_ID_CHG) { 4132 ql_dbg(ql_dbg_mbx, vha, 0x10ba, 4133 "Could not acquire ID for VP[%d].\n", 4134 rptid_entry->vp_idx); 4135 return; 4136 } 4137 4138 found = 0; 4139 spin_lock_irqsave(&ha->vport_slock, flags); 4140 list_for_each_entry(vp, &ha->vp_list, list) { 4141 if (rptid_entry->vp_idx == vp->vp_idx) { 4142 found = 1; 4143 break; 4144 } 4145 } 4146 spin_unlock_irqrestore(&ha->vport_slock, flags); 4147 4148 if (!found) 4149 return; 4150 4151 qlt_update_host_map(vp, id); 4152 4153 /* 4154 * Cannot configure here as we are still sitting on the 4155 * response queue. Handle it in dpc context. 4156 */ 4157 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); 4158 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); 4159 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); 4160 } 4161 set_bit(VP_DPC_NEEDED, &vha->dpc_flags); 4162 qla2xxx_wake_dpc(vha); 4163 } else if (rptid_entry->format == 2) { 4164 ql_dbg(ql_dbg_async, vha, 0x505f, 4165 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", 4166 rptid_entry->port_id[2], rptid_entry->port_id[1], 4167 rptid_entry->port_id[0]); 4168 4169 ql_dbg(ql_dbg_async, vha, 0x5075, 4170 "N2N: Remote WWPN %8phC.\n", 4171 rptid_entry->u.f2.port_name); 4172 4173 /* N2N. direct connect */ 4174 ha->current_topology = ISP_CFG_N; 4175 ha->flags.rida_fmt2 = 1; 4176 vha->d_id.b.domain = rptid_entry->port_id[2]; 4177 vha->d_id.b.area = rptid_entry->port_id[1]; 4178 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 4179 4180 ha->flags.n2n_ae = 1; 4181 spin_lock_irqsave(&ha->vport_slock, flags); 4182 qlt_update_vp_map(vha, SET_AL_PA); 4183 spin_unlock_irqrestore(&ha->vport_slock, flags); 4184 4185 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4186 fcport->scan_state = QLA_FCPORT_SCAN; 4187 fcport->n2n_flag = 0; 4188 } 4189 4190 fcport = qla2x00_find_fcport_by_wwpn(vha, 4191 rptid_entry->u.f2.port_name, 1); 4192 4193 if (fcport) { 4194 fcport->login_retry = vha->hw->login_retry_count; 4195 fcport->plogi_nack_done_deadline = jiffies + HZ; 4196 fcport->scan_state = QLA_FCPORT_FOUND; 4197 fcport->keep_nport_handle = 1; 4198 fcport->n2n_flag = 1; 4199 fcport->d_id.b.domain = 4200 rptid_entry->u.f2.remote_nport_id[2]; 4201 fcport->d_id.b.area = 4202 rptid_entry->u.f2.remote_nport_id[1]; 4203 fcport->d_id.b.al_pa = 4204 rptid_entry->u.f2.remote_nport_id[0]; 4205 4206 /* 4207 * For the case where remote port sending PRLO, FW 4208 * sends up RIDA Format 2 as an indication of session 4209 * loss. In other word, FW state change from PRLI 4210 * complete back to PLOGI complete. Delete the 4211 * session and let relogin drive the reconnect. 4212 */ 4213 if (atomic_read(&fcport->state) == FCS_ONLINE) 4214 qlt_schedule_sess_for_deletion(fcport); 4215 } 4216 } 4217 } 4218 4219 /* 4220 * qla24xx_modify_vp_config 4221 * Change VP configuration for vha 4222 * 4223 * Input: 4224 * vha = adapter block pointer. 4225 * 4226 * Returns: 4227 * qla2xxx local function return status code. 4228 * 4229 * Context: 4230 * Kernel context. 4231 */ 4232 int 4233 qla24xx_modify_vp_config(scsi_qla_host_t *vha) 4234 { 4235 int rval; 4236 struct vp_config_entry_24xx *vpmod; 4237 dma_addr_t vpmod_dma; 4238 struct qla_hw_data *ha = vha->hw; 4239 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4240 4241 /* This can be called by the parent */ 4242 4243 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, 4244 "Entered %s.\n", __func__); 4245 4246 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 4247 if (!vpmod) { 4248 ql_log(ql_log_warn, vha, 0x10bc, 4249 "Failed to allocate modify VP IOCB.\n"); 4250 return QLA_MEMORY_ALLOC_FAILED; 4251 } 4252 4253 vpmod->entry_type = VP_CONFIG_IOCB_TYPE; 4254 vpmod->entry_count = 1; 4255 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; 4256 vpmod->vp_count = 1; 4257 vpmod->vp_index1 = vha->vp_idx; 4258 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; 4259 4260 qlt_modify_vp_config(vha, vpmod); 4261 4262 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); 4263 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 4264 vpmod->entry_count = 1; 4265 4266 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 4267 if (rval != QLA_SUCCESS) { 4268 ql_dbg(ql_dbg_mbx, vha, 0x10bd, 4269 "Failed to issue VP config IOCB (%x).\n", rval); 4270 } else if (vpmod->comp_status != 0) { 4271 ql_dbg(ql_dbg_mbx, vha, 0x10be, 4272 "Failed to complete IOCB -- error status (%x).\n", 4273 vpmod->comp_status); 4274 rval = QLA_FUNCTION_FAILED; 4275 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) { 4276 ql_dbg(ql_dbg_mbx, vha, 0x10bf, 4277 "Failed to complete IOCB -- completion status (%x).\n", 4278 le16_to_cpu(vpmod->comp_status)); 4279 rval = QLA_FUNCTION_FAILED; 4280 } else { 4281 /* EMPTY */ 4282 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, 4283 "Done %s.\n", __func__); 4284 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 4285 } 4286 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 4287 4288 return rval; 4289 } 4290 4291 /* 4292 * qla2x00_send_change_request 4293 * Receive or disable RSCN request from fabric controller 4294 * 4295 * Input: 4296 * ha = adapter block pointer 4297 * format = registration format: 4298 * 0 - Reserved 4299 * 1 - Fabric detected registration 4300 * 2 - N_port detected registration 4301 * 3 - Full registration 4302 * FF - clear registration 4303 * vp_idx = Virtual port index 4304 * 4305 * Returns: 4306 * qla2x00 local function return status code. 4307 * 4308 * Context: 4309 * Kernel Context 4310 */ 4311 4312 int 4313 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, 4314 uint16_t vp_idx) 4315 { 4316 int rval; 4317 mbx_cmd_t mc; 4318 mbx_cmd_t *mcp = &mc; 4319 4320 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, 4321 "Entered %s.\n", __func__); 4322 4323 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; 4324 mcp->mb[1] = format; 4325 mcp->mb[9] = vp_idx; 4326 mcp->out_mb = MBX_9|MBX_1|MBX_0; 4327 mcp->in_mb = MBX_0|MBX_1; 4328 mcp->tov = MBX_TOV_SECONDS; 4329 mcp->flags = 0; 4330 rval = qla2x00_mailbox_command(vha, mcp); 4331 4332 if (rval == QLA_SUCCESS) { 4333 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 4334 rval = BIT_1; 4335 } 4336 } else 4337 rval = BIT_1; 4338 4339 return rval; 4340 } 4341 4342 int 4343 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 4344 uint32_t size) 4345 { 4346 int rval; 4347 mbx_cmd_t mc; 4348 mbx_cmd_t *mcp = &mc; 4349 4350 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, 4351 "Entered %s.\n", __func__); 4352 4353 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 4354 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 4355 mcp->mb[8] = MSW(addr); 4356 mcp->mb[10] = 0; 4357 mcp->out_mb = MBX_10|MBX_8|MBX_0; 4358 } else { 4359 mcp->mb[0] = MBC_DUMP_RISC_RAM; 4360 mcp->out_mb = MBX_0; 4361 } 4362 mcp->mb[1] = LSW(addr); 4363 mcp->mb[2] = MSW(req_dma); 4364 mcp->mb[3] = LSW(req_dma); 4365 mcp->mb[6] = MSW(MSD(req_dma)); 4366 mcp->mb[7] = LSW(MSD(req_dma)); 4367 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 4368 if (IS_FWI2_CAPABLE(vha->hw)) { 4369 mcp->mb[4] = MSW(size); 4370 mcp->mb[5] = LSW(size); 4371 mcp->out_mb |= MBX_5|MBX_4; 4372 } else { 4373 mcp->mb[4] = LSW(size); 4374 mcp->out_mb |= MBX_4; 4375 } 4376 4377 mcp->in_mb = MBX_0; 4378 mcp->tov = MBX_TOV_SECONDS; 4379 mcp->flags = 0; 4380 rval = qla2x00_mailbox_command(vha, mcp); 4381 4382 if (rval != QLA_SUCCESS) { 4383 ql_dbg(ql_dbg_mbx, vha, 0x1008, 4384 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4385 } else { 4386 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, 4387 "Done %s.\n", __func__); 4388 } 4389 4390 return rval; 4391 } 4392 /* 84XX Support **************************************************************/ 4393 4394 struct cs84xx_mgmt_cmd { 4395 union { 4396 struct verify_chip_entry_84xx req; 4397 struct verify_chip_rsp_84xx rsp; 4398 } p; 4399 }; 4400 4401 int 4402 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) 4403 { 4404 int rval, retry; 4405 struct cs84xx_mgmt_cmd *mn; 4406 dma_addr_t mn_dma; 4407 uint16_t options; 4408 unsigned long flags; 4409 struct qla_hw_data *ha = vha->hw; 4410 4411 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, 4412 "Entered %s.\n", __func__); 4413 4414 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 4415 if (mn == NULL) { 4416 return QLA_MEMORY_ALLOC_FAILED; 4417 } 4418 4419 /* Force Update? */ 4420 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; 4421 /* Diagnostic firmware? */ 4422 /* options |= MENLO_DIAG_FW; */ 4423 /* We update the firmware with only one data sequence. */ 4424 options |= VCO_END_OF_DATA; 4425 4426 do { 4427 retry = 0; 4428 memset(mn, 0, sizeof(*mn)); 4429 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; 4430 mn->p.req.entry_count = 1; 4431 mn->p.req.options = cpu_to_le16(options); 4432 4433 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, 4434 "Dump of Verify Request.\n"); 4435 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, 4436 mn, sizeof(*mn)); 4437 4438 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 4439 if (rval != QLA_SUCCESS) { 4440 ql_dbg(ql_dbg_mbx, vha, 0x10cb, 4441 "Failed to issue verify IOCB (%x).\n", rval); 4442 goto verify_done; 4443 } 4444 4445 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, 4446 "Dump of Verify Response.\n"); 4447 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, 4448 mn, sizeof(*mn)); 4449 4450 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 4451 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 4452 le16_to_cpu(mn->p.rsp.failure_code) : 0; 4453 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, 4454 "cs=%x fc=%x.\n", status[0], status[1]); 4455 4456 if (status[0] != CS_COMPLETE) { 4457 rval = QLA_FUNCTION_FAILED; 4458 if (!(options & VCO_DONT_UPDATE_FW)) { 4459 ql_dbg(ql_dbg_mbx, vha, 0x10cf, 4460 "Firmware update failed. Retrying " 4461 "without update firmware.\n"); 4462 options |= VCO_DONT_UPDATE_FW; 4463 options &= ~VCO_FORCE_UPDATE; 4464 retry = 1; 4465 } 4466 } else { 4467 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, 4468 "Firmware updated to %x.\n", 4469 le32_to_cpu(mn->p.rsp.fw_ver)); 4470 4471 /* NOTE: we only update OP firmware. */ 4472 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 4473 ha->cs84xx->op_fw_version = 4474 le32_to_cpu(mn->p.rsp.fw_ver); 4475 spin_unlock_irqrestore(&ha->cs84xx->access_lock, 4476 flags); 4477 } 4478 } while (retry); 4479 4480 verify_done: 4481 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 4482 4483 if (rval != QLA_SUCCESS) { 4484 ql_dbg(ql_dbg_mbx, vha, 0x10d1, 4485 "Failed=%x.\n", rval); 4486 } else { 4487 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, 4488 "Done %s.\n", __func__); 4489 } 4490 4491 return rval; 4492 } 4493 4494 int 4495 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) 4496 { 4497 int rval; 4498 unsigned long flags; 4499 mbx_cmd_t mc; 4500 mbx_cmd_t *mcp = &mc; 4501 struct qla_hw_data *ha = vha->hw; 4502 4503 if (!ha->flags.fw_started) 4504 return QLA_SUCCESS; 4505 4506 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 4507 "Entered %s.\n", __func__); 4508 4509 if (IS_SHADOW_REG_CAPABLE(ha)) 4510 req->options |= BIT_13; 4511 4512 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4513 mcp->mb[1] = req->options; 4514 mcp->mb[2] = MSW(LSD(req->dma)); 4515 mcp->mb[3] = LSW(LSD(req->dma)); 4516 mcp->mb[6] = MSW(MSD(req->dma)); 4517 mcp->mb[7] = LSW(MSD(req->dma)); 4518 mcp->mb[5] = req->length; 4519 if (req->rsp) 4520 mcp->mb[10] = req->rsp->id; 4521 mcp->mb[12] = req->qos; 4522 mcp->mb[11] = req->vp_idx; 4523 mcp->mb[13] = req->rid; 4524 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4525 mcp->mb[15] = 0; 4526 4527 mcp->mb[4] = req->id; 4528 /* que in ptr index */ 4529 mcp->mb[8] = 0; 4530 /* que out ptr index */ 4531 mcp->mb[9] = *req->out_ptr = 0; 4532 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| 4533 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4534 mcp->in_mb = MBX_0; 4535 mcp->flags = MBX_DMA_OUT; 4536 mcp->tov = MBX_TOV_SECONDS * 2; 4537 4538 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4539 IS_QLA28XX(ha)) 4540 mcp->in_mb |= MBX_1; 4541 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4542 mcp->out_mb |= MBX_15; 4543 /* debug q create issue in SR-IOV */ 4544 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4545 } 4546 4547 spin_lock_irqsave(&ha->hardware_lock, flags); 4548 if (!(req->options & BIT_0)) { 4549 wrt_reg_dword(req->req_q_in, 0); 4550 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4551 wrt_reg_dword(req->req_q_out, 0); 4552 } 4553 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4554 4555 rval = qla2x00_mailbox_command(vha, mcp); 4556 if (rval != QLA_SUCCESS) { 4557 ql_dbg(ql_dbg_mbx, vha, 0x10d4, 4558 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4559 } else { 4560 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, 4561 "Done %s.\n", __func__); 4562 } 4563 4564 return rval; 4565 } 4566 4567 int 4568 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 4569 { 4570 int rval; 4571 unsigned long flags; 4572 mbx_cmd_t mc; 4573 mbx_cmd_t *mcp = &mc; 4574 struct qla_hw_data *ha = vha->hw; 4575 4576 if (!ha->flags.fw_started) 4577 return QLA_SUCCESS; 4578 4579 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 4580 "Entered %s.\n", __func__); 4581 4582 if (IS_SHADOW_REG_CAPABLE(ha)) 4583 rsp->options |= BIT_13; 4584 4585 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4586 mcp->mb[1] = rsp->options; 4587 mcp->mb[2] = MSW(LSD(rsp->dma)); 4588 mcp->mb[3] = LSW(LSD(rsp->dma)); 4589 mcp->mb[6] = MSW(MSD(rsp->dma)); 4590 mcp->mb[7] = LSW(MSD(rsp->dma)); 4591 mcp->mb[5] = rsp->length; 4592 mcp->mb[14] = rsp->msix->entry; 4593 mcp->mb[13] = rsp->rid; 4594 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4595 mcp->mb[15] = 0; 4596 4597 mcp->mb[4] = rsp->id; 4598 /* que in ptr index */ 4599 mcp->mb[8] = *rsp->in_ptr = 0; 4600 /* que out ptr index */ 4601 mcp->mb[9] = 0; 4602 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 4603 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4604 mcp->in_mb = MBX_0; 4605 mcp->flags = MBX_DMA_OUT; 4606 mcp->tov = MBX_TOV_SECONDS * 2; 4607 4608 if (IS_QLA81XX(ha)) { 4609 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 4610 mcp->in_mb |= MBX_1; 4611 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4612 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; 4613 mcp->in_mb |= MBX_1; 4614 /* debug q create issue in SR-IOV */ 4615 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4616 } 4617 4618 spin_lock_irqsave(&ha->hardware_lock, flags); 4619 if (!(rsp->options & BIT_0)) { 4620 wrt_reg_dword(rsp->rsp_q_out, 0); 4621 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4622 wrt_reg_dword(rsp->rsp_q_in, 0); 4623 } 4624 4625 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4626 4627 rval = qla2x00_mailbox_command(vha, mcp); 4628 if (rval != QLA_SUCCESS) { 4629 ql_dbg(ql_dbg_mbx, vha, 0x10d7, 4630 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4631 } else { 4632 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, 4633 "Done %s.\n", __func__); 4634 } 4635 4636 return rval; 4637 } 4638 4639 int 4640 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) 4641 { 4642 int rval; 4643 mbx_cmd_t mc; 4644 mbx_cmd_t *mcp = &mc; 4645 4646 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, 4647 "Entered %s.\n", __func__); 4648 4649 mcp->mb[0] = MBC_IDC_ACK; 4650 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4651 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4652 mcp->in_mb = MBX_0; 4653 mcp->tov = MBX_TOV_SECONDS; 4654 mcp->flags = 0; 4655 rval = qla2x00_mailbox_command(vha, mcp); 4656 4657 if (rval != QLA_SUCCESS) { 4658 ql_dbg(ql_dbg_mbx, vha, 0x10da, 4659 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4660 } else { 4661 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, 4662 "Done %s.\n", __func__); 4663 } 4664 4665 return rval; 4666 } 4667 4668 int 4669 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) 4670 { 4671 int rval; 4672 mbx_cmd_t mc; 4673 mbx_cmd_t *mcp = &mc; 4674 4675 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, 4676 "Entered %s.\n", __func__); 4677 4678 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4679 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4680 return QLA_FUNCTION_FAILED; 4681 4682 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4683 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 4684 mcp->out_mb = MBX_1|MBX_0; 4685 mcp->in_mb = MBX_1|MBX_0; 4686 mcp->tov = MBX_TOV_SECONDS; 4687 mcp->flags = 0; 4688 rval = qla2x00_mailbox_command(vha, mcp); 4689 4690 if (rval != QLA_SUCCESS) { 4691 ql_dbg(ql_dbg_mbx, vha, 0x10dd, 4692 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4693 rval, mcp->mb[0], mcp->mb[1]); 4694 } else { 4695 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, 4696 "Done %s.\n", __func__); 4697 *sector_size = mcp->mb[1]; 4698 } 4699 4700 return rval; 4701 } 4702 4703 int 4704 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) 4705 { 4706 int rval; 4707 mbx_cmd_t mc; 4708 mbx_cmd_t *mcp = &mc; 4709 4710 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4711 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4712 return QLA_FUNCTION_FAILED; 4713 4714 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, 4715 "Entered %s.\n", __func__); 4716 4717 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4718 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 4719 FAC_OPT_CMD_WRITE_PROTECT; 4720 mcp->out_mb = MBX_1|MBX_0; 4721 mcp->in_mb = MBX_1|MBX_0; 4722 mcp->tov = MBX_TOV_SECONDS; 4723 mcp->flags = 0; 4724 rval = qla2x00_mailbox_command(vha, mcp); 4725 4726 if (rval != QLA_SUCCESS) { 4727 ql_dbg(ql_dbg_mbx, vha, 0x10e0, 4728 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4729 rval, mcp->mb[0], mcp->mb[1]); 4730 } else { 4731 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, 4732 "Done %s.\n", __func__); 4733 } 4734 4735 return rval; 4736 } 4737 4738 int 4739 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) 4740 { 4741 int rval; 4742 mbx_cmd_t mc; 4743 mbx_cmd_t *mcp = &mc; 4744 4745 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4746 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4747 return QLA_FUNCTION_FAILED; 4748 4749 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4750 "Entered %s.\n", __func__); 4751 4752 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4753 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 4754 mcp->mb[2] = LSW(start); 4755 mcp->mb[3] = MSW(start); 4756 mcp->mb[4] = LSW(finish); 4757 mcp->mb[5] = MSW(finish); 4758 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4759 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4760 mcp->tov = MBX_TOV_SECONDS; 4761 mcp->flags = 0; 4762 rval = qla2x00_mailbox_command(vha, mcp); 4763 4764 if (rval != QLA_SUCCESS) { 4765 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4766 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4767 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4768 } else { 4769 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4770 "Done %s.\n", __func__); 4771 } 4772 4773 return rval; 4774 } 4775 4776 int 4777 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock) 4778 { 4779 int rval = QLA_SUCCESS; 4780 mbx_cmd_t mc; 4781 mbx_cmd_t *mcp = &mc; 4782 struct qla_hw_data *ha = vha->hw; 4783 4784 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 4785 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4786 return rval; 4787 4788 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4789 "Entered %s.\n", __func__); 4790 4791 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4792 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE : 4793 FAC_OPT_CMD_UNLOCK_SEMAPHORE); 4794 mcp->out_mb = MBX_1|MBX_0; 4795 mcp->in_mb = MBX_1|MBX_0; 4796 mcp->tov = MBX_TOV_SECONDS; 4797 mcp->flags = 0; 4798 rval = qla2x00_mailbox_command(vha, mcp); 4799 4800 if (rval != QLA_SUCCESS) { 4801 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4802 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4803 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4804 } else { 4805 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4806 "Done %s.\n", __func__); 4807 } 4808 4809 return rval; 4810 } 4811 4812 int 4813 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) 4814 { 4815 int rval = 0; 4816 mbx_cmd_t mc; 4817 mbx_cmd_t *mcp = &mc; 4818 4819 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, 4820 "Entered %s.\n", __func__); 4821 4822 mcp->mb[0] = MBC_RESTART_MPI_FW; 4823 mcp->out_mb = MBX_0; 4824 mcp->in_mb = MBX_0|MBX_1; 4825 mcp->tov = MBX_TOV_SECONDS; 4826 mcp->flags = 0; 4827 rval = qla2x00_mailbox_command(vha, mcp); 4828 4829 if (rval != QLA_SUCCESS) { 4830 ql_dbg(ql_dbg_mbx, vha, 0x10e6, 4831 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4832 rval, mcp->mb[0], mcp->mb[1]); 4833 } else { 4834 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, 4835 "Done %s.\n", __func__); 4836 } 4837 4838 return rval; 4839 } 4840 4841 int 4842 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4843 { 4844 int rval; 4845 mbx_cmd_t mc; 4846 mbx_cmd_t *mcp = &mc; 4847 int i; 4848 int len; 4849 __le16 *str; 4850 struct qla_hw_data *ha = vha->hw; 4851 4852 if (!IS_P3P_TYPE(ha)) 4853 return QLA_FUNCTION_FAILED; 4854 4855 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, 4856 "Entered %s.\n", __func__); 4857 4858 str = (__force __le16 *)version; 4859 len = strlen(version); 4860 4861 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4862 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; 4863 mcp->out_mb = MBX_1|MBX_0; 4864 for (i = 4; i < 16 && len; i++, str++, len -= 2) { 4865 mcp->mb[i] = le16_to_cpup(str); 4866 mcp->out_mb |= 1<<i; 4867 } 4868 for (; i < 16; i++) { 4869 mcp->mb[i] = 0; 4870 mcp->out_mb |= 1<<i; 4871 } 4872 mcp->in_mb = MBX_1|MBX_0; 4873 mcp->tov = MBX_TOV_SECONDS; 4874 mcp->flags = 0; 4875 rval = qla2x00_mailbox_command(vha, mcp); 4876 4877 if (rval != QLA_SUCCESS) { 4878 ql_dbg(ql_dbg_mbx, vha, 0x117c, 4879 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4880 } else { 4881 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, 4882 "Done %s.\n", __func__); 4883 } 4884 4885 return rval; 4886 } 4887 4888 int 4889 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4890 { 4891 int rval; 4892 mbx_cmd_t mc; 4893 mbx_cmd_t *mcp = &mc; 4894 int len; 4895 uint16_t dwlen; 4896 uint8_t *str; 4897 dma_addr_t str_dma; 4898 struct qla_hw_data *ha = vha->hw; 4899 4900 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || 4901 IS_P3P_TYPE(ha)) 4902 return QLA_FUNCTION_FAILED; 4903 4904 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, 4905 "Entered %s.\n", __func__); 4906 4907 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); 4908 if (!str) { 4909 ql_log(ql_log_warn, vha, 0x117f, 4910 "Failed to allocate driver version param.\n"); 4911 return QLA_MEMORY_ALLOC_FAILED; 4912 } 4913 4914 memcpy(str, "\x7\x3\x11\x0", 4); 4915 dwlen = str[0]; 4916 len = dwlen * 4 - 4; 4917 memset(str + 4, 0, len); 4918 if (len > strlen(version)) 4919 len = strlen(version); 4920 memcpy(str + 4, version, len); 4921 4922 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4923 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; 4924 mcp->mb[2] = MSW(LSD(str_dma)); 4925 mcp->mb[3] = LSW(LSD(str_dma)); 4926 mcp->mb[6] = MSW(MSD(str_dma)); 4927 mcp->mb[7] = LSW(MSD(str_dma)); 4928 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4929 mcp->in_mb = MBX_1|MBX_0; 4930 mcp->tov = MBX_TOV_SECONDS; 4931 mcp->flags = 0; 4932 rval = qla2x00_mailbox_command(vha, mcp); 4933 4934 if (rval != QLA_SUCCESS) { 4935 ql_dbg(ql_dbg_mbx, vha, 0x1180, 4936 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4937 } else { 4938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, 4939 "Done %s.\n", __func__); 4940 } 4941 4942 dma_pool_free(ha->s_dma_pool, str, str_dma); 4943 4944 return rval; 4945 } 4946 4947 int 4948 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, 4949 void *buf, uint16_t bufsiz) 4950 { 4951 int rval, i; 4952 mbx_cmd_t mc; 4953 mbx_cmd_t *mcp = &mc; 4954 uint32_t *bp; 4955 4956 if (!IS_FWI2_CAPABLE(vha->hw)) 4957 return QLA_FUNCTION_FAILED; 4958 4959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4960 "Entered %s.\n", __func__); 4961 4962 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4963 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8; 4964 mcp->mb[2] = MSW(buf_dma); 4965 mcp->mb[3] = LSW(buf_dma); 4966 mcp->mb[6] = MSW(MSD(buf_dma)); 4967 mcp->mb[7] = LSW(MSD(buf_dma)); 4968 mcp->mb[8] = bufsiz/4; 4969 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4970 mcp->in_mb = MBX_1|MBX_0; 4971 mcp->tov = MBX_TOV_SECONDS; 4972 mcp->flags = 0; 4973 rval = qla2x00_mailbox_command(vha, mcp); 4974 4975 if (rval != QLA_SUCCESS) { 4976 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4977 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4978 } else { 4979 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4980 "Done %s.\n", __func__); 4981 bp = (uint32_t *) buf; 4982 for (i = 0; i < (bufsiz-4)/4; i++, bp++) 4983 *bp = le32_to_cpu((__force __le32)*bp); 4984 } 4985 4986 return rval; 4987 } 4988 4989 #define PUREX_CMD_COUNT 4 4990 int 4991 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) 4992 { 4993 int rval; 4994 mbx_cmd_t mc; 4995 mbx_cmd_t *mcp = &mc; 4996 uint8_t *els_cmd_map; 4997 uint8_t active_cnt = 0; 4998 dma_addr_t els_cmd_map_dma; 4999 uint8_t cmd_opcode[PUREX_CMD_COUNT]; 5000 uint8_t i, index, purex_bit; 5001 struct qla_hw_data *ha = vha->hw; 5002 5003 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && 5004 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 5005 return QLA_SUCCESS; 5006 5007 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197, 5008 "Entered %s.\n", __func__); 5009 5010 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, 5011 &els_cmd_map_dma, GFP_KERNEL); 5012 if (!els_cmd_map) { 5013 ql_log(ql_log_warn, vha, 0x7101, 5014 "Failed to allocate RDP els command param.\n"); 5015 return QLA_MEMORY_ALLOC_FAILED; 5016 } 5017 5018 /* List of Purex ELS */ 5019 if (ql2xrdpenable) { 5020 cmd_opcode[active_cnt] = ELS_RDP; 5021 active_cnt++; 5022 } 5023 if (ha->flags.scm_supported_f) { 5024 cmd_opcode[active_cnt] = ELS_FPIN; 5025 active_cnt++; 5026 } 5027 if (ha->flags.edif_enabled) { 5028 cmd_opcode[active_cnt] = ELS_AUTH_ELS; 5029 active_cnt++; 5030 } 5031 5032 for (i = 0; i < active_cnt; i++) { 5033 index = cmd_opcode[i] / 8; 5034 purex_bit = cmd_opcode[i] % 8; 5035 els_cmd_map[index] |= 1 << purex_bit; 5036 } 5037 5038 mcp->mb[0] = MBC_SET_RNID_PARAMS; 5039 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8; 5040 mcp->mb[2] = MSW(LSD(els_cmd_map_dma)); 5041 mcp->mb[3] = LSW(LSD(els_cmd_map_dma)); 5042 mcp->mb[6] = MSW(MSD(els_cmd_map_dma)); 5043 mcp->mb[7] = LSW(MSD(els_cmd_map_dma)); 5044 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5045 mcp->in_mb = MBX_1|MBX_0; 5046 mcp->tov = MBX_TOV_SECONDS; 5047 mcp->flags = MBX_DMA_OUT; 5048 mcp->buf_size = ELS_CMD_MAP_SIZE; 5049 rval = qla2x00_mailbox_command(vha, mcp); 5050 5051 if (rval != QLA_SUCCESS) { 5052 ql_dbg(ql_dbg_mbx, vha, 0x118d, 5053 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]); 5054 } else { 5055 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 5056 "Done %s.\n", __func__); 5057 } 5058 5059 dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, 5060 els_cmd_map, els_cmd_map_dma); 5061 5062 return rval; 5063 } 5064 5065 static int 5066 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 5067 { 5068 int rval; 5069 mbx_cmd_t mc; 5070 mbx_cmd_t *mcp = &mc; 5071 5072 if (!IS_FWI2_CAPABLE(vha->hw)) 5073 return QLA_FUNCTION_FAILED; 5074 5075 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 5076 "Entered %s.\n", __func__); 5077 5078 mcp->mb[0] = MBC_GET_RNID_PARAMS; 5079 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; 5080 mcp->out_mb = MBX_1|MBX_0; 5081 mcp->in_mb = MBX_1|MBX_0; 5082 mcp->tov = MBX_TOV_SECONDS; 5083 mcp->flags = 0; 5084 rval = qla2x00_mailbox_command(vha, mcp); 5085 *temp = mcp->mb[1]; 5086 5087 if (rval != QLA_SUCCESS) { 5088 ql_dbg(ql_dbg_mbx, vha, 0x115a, 5089 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 5090 } else { 5091 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 5092 "Done %s.\n", __func__); 5093 } 5094 5095 return rval; 5096 } 5097 5098 int 5099 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 5100 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 5101 { 5102 int rval; 5103 mbx_cmd_t mc; 5104 mbx_cmd_t *mcp = &mc; 5105 struct qla_hw_data *ha = vha->hw; 5106 5107 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 5108 "Entered %s.\n", __func__); 5109 5110 if (!IS_FWI2_CAPABLE(ha)) 5111 return QLA_FUNCTION_FAILED; 5112 5113 if (len == 1) 5114 opt |= BIT_0; 5115 5116 mcp->mb[0] = MBC_READ_SFP; 5117 mcp->mb[1] = dev; 5118 mcp->mb[2] = MSW(LSD(sfp_dma)); 5119 mcp->mb[3] = LSW(LSD(sfp_dma)); 5120 mcp->mb[6] = MSW(MSD(sfp_dma)); 5121 mcp->mb[7] = LSW(MSD(sfp_dma)); 5122 mcp->mb[8] = len; 5123 mcp->mb[9] = off; 5124 mcp->mb[10] = opt; 5125 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5126 mcp->in_mb = MBX_1|MBX_0; 5127 mcp->tov = MBX_TOV_SECONDS; 5128 mcp->flags = 0; 5129 rval = qla2x00_mailbox_command(vha, mcp); 5130 5131 if (opt & BIT_0) 5132 *sfp = mcp->mb[1]; 5133 5134 if (rval != QLA_SUCCESS) { 5135 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 5136 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5137 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) { 5138 /* sfp is not there */ 5139 rval = QLA_INTERFACE_ERROR; 5140 } 5141 } else { 5142 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 5143 "Done %s.\n", __func__); 5144 } 5145 5146 return rval; 5147 } 5148 5149 int 5150 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 5151 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 5152 { 5153 int rval; 5154 mbx_cmd_t mc; 5155 mbx_cmd_t *mcp = &mc; 5156 struct qla_hw_data *ha = vha->hw; 5157 5158 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, 5159 "Entered %s.\n", __func__); 5160 5161 if (!IS_FWI2_CAPABLE(ha)) 5162 return QLA_FUNCTION_FAILED; 5163 5164 if (len == 1) 5165 opt |= BIT_0; 5166 5167 if (opt & BIT_0) 5168 len = *sfp; 5169 5170 mcp->mb[0] = MBC_WRITE_SFP; 5171 mcp->mb[1] = dev; 5172 mcp->mb[2] = MSW(LSD(sfp_dma)); 5173 mcp->mb[3] = LSW(LSD(sfp_dma)); 5174 mcp->mb[6] = MSW(MSD(sfp_dma)); 5175 mcp->mb[7] = LSW(MSD(sfp_dma)); 5176 mcp->mb[8] = len; 5177 mcp->mb[9] = off; 5178 mcp->mb[10] = opt; 5179 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5180 mcp->in_mb = MBX_1|MBX_0; 5181 mcp->tov = MBX_TOV_SECONDS; 5182 mcp->flags = 0; 5183 rval = qla2x00_mailbox_command(vha, mcp); 5184 5185 if (rval != QLA_SUCCESS) { 5186 ql_dbg(ql_dbg_mbx, vha, 0x10ec, 5187 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5188 } else { 5189 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, 5190 "Done %s.\n", __func__); 5191 } 5192 5193 return rval; 5194 } 5195 5196 int 5197 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, 5198 uint16_t size_in_bytes, uint16_t *actual_size) 5199 { 5200 int rval; 5201 mbx_cmd_t mc; 5202 mbx_cmd_t *mcp = &mc; 5203 5204 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, 5205 "Entered %s.\n", __func__); 5206 5207 if (!IS_CNA_CAPABLE(vha->hw)) 5208 return QLA_FUNCTION_FAILED; 5209 5210 mcp->mb[0] = MBC_GET_XGMAC_STATS; 5211 mcp->mb[2] = MSW(stats_dma); 5212 mcp->mb[3] = LSW(stats_dma); 5213 mcp->mb[6] = MSW(MSD(stats_dma)); 5214 mcp->mb[7] = LSW(MSD(stats_dma)); 5215 mcp->mb[8] = size_in_bytes >> 2; 5216 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 5217 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5218 mcp->tov = MBX_TOV_SECONDS; 5219 mcp->flags = 0; 5220 rval = qla2x00_mailbox_command(vha, mcp); 5221 5222 if (rval != QLA_SUCCESS) { 5223 ql_dbg(ql_dbg_mbx, vha, 0x10ef, 5224 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5225 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5226 } else { 5227 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, 5228 "Done %s.\n", __func__); 5229 5230 5231 *actual_size = mcp->mb[2] << 2; 5232 } 5233 5234 return rval; 5235 } 5236 5237 int 5238 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, 5239 uint16_t size) 5240 { 5241 int rval; 5242 mbx_cmd_t mc; 5243 mbx_cmd_t *mcp = &mc; 5244 5245 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, 5246 "Entered %s.\n", __func__); 5247 5248 if (!IS_CNA_CAPABLE(vha->hw)) 5249 return QLA_FUNCTION_FAILED; 5250 5251 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 5252 mcp->mb[1] = 0; 5253 mcp->mb[2] = MSW(tlv_dma); 5254 mcp->mb[3] = LSW(tlv_dma); 5255 mcp->mb[6] = MSW(MSD(tlv_dma)); 5256 mcp->mb[7] = LSW(MSD(tlv_dma)); 5257 mcp->mb[8] = size; 5258 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5259 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5260 mcp->tov = MBX_TOV_SECONDS; 5261 mcp->flags = 0; 5262 rval = qla2x00_mailbox_command(vha, mcp); 5263 5264 if (rval != QLA_SUCCESS) { 5265 ql_dbg(ql_dbg_mbx, vha, 0x10f2, 5266 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5267 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5268 } else { 5269 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, 5270 "Done %s.\n", __func__); 5271 } 5272 5273 return rval; 5274 } 5275 5276 int 5277 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) 5278 { 5279 int rval; 5280 mbx_cmd_t mc; 5281 mbx_cmd_t *mcp = &mc; 5282 5283 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, 5284 "Entered %s.\n", __func__); 5285 5286 if (!IS_FWI2_CAPABLE(vha->hw)) 5287 return QLA_FUNCTION_FAILED; 5288 5289 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 5290 mcp->mb[1] = LSW(risc_addr); 5291 mcp->mb[8] = MSW(risc_addr); 5292 mcp->out_mb = MBX_8|MBX_1|MBX_0; 5293 mcp->in_mb = MBX_3|MBX_2|MBX_0; 5294 mcp->tov = MBX_TOV_SECONDS; 5295 mcp->flags = 0; 5296 rval = qla2x00_mailbox_command(vha, mcp); 5297 if (rval != QLA_SUCCESS) { 5298 ql_dbg(ql_dbg_mbx, vha, 0x10f5, 5299 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5300 } else { 5301 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, 5302 "Done %s.\n", __func__); 5303 *data = mcp->mb[3] << 16 | mcp->mb[2]; 5304 } 5305 5306 return rval; 5307 } 5308 5309 int 5310 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5311 uint16_t *mresp) 5312 { 5313 int rval; 5314 mbx_cmd_t mc; 5315 mbx_cmd_t *mcp = &mc; 5316 5317 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, 5318 "Entered %s.\n", __func__); 5319 5320 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5321 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 5322 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing 5323 5324 /* transfer count */ 5325 mcp->mb[10] = LSW(mreq->transfer_size); 5326 mcp->mb[11] = MSW(mreq->transfer_size); 5327 5328 /* send data address */ 5329 mcp->mb[14] = LSW(mreq->send_dma); 5330 mcp->mb[15] = MSW(mreq->send_dma); 5331 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5332 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5333 5334 /* receive data address */ 5335 mcp->mb[16] = LSW(mreq->rcv_dma); 5336 mcp->mb[17] = MSW(mreq->rcv_dma); 5337 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5338 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5339 5340 /* Iteration count */ 5341 mcp->mb[18] = LSW(mreq->iteration_count); 5342 mcp->mb[19] = MSW(mreq->iteration_count); 5343 5344 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| 5345 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5346 if (IS_CNA_CAPABLE(vha->hw)) 5347 mcp->out_mb |= MBX_2; 5348 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; 5349 5350 mcp->buf_size = mreq->transfer_size; 5351 mcp->tov = MBX_TOV_SECONDS; 5352 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5353 5354 rval = qla2x00_mailbox_command(vha, mcp); 5355 5356 if (rval != QLA_SUCCESS) { 5357 ql_dbg(ql_dbg_mbx, vha, 0x10f8, 5358 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " 5359 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], 5360 mcp->mb[3], mcp->mb[18], mcp->mb[19]); 5361 } else { 5362 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, 5363 "Done %s.\n", __func__); 5364 } 5365 5366 /* Copy mailbox information */ 5367 memcpy( mresp, mcp->mb, 64); 5368 return rval; 5369 } 5370 5371 int 5372 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5373 uint16_t *mresp) 5374 { 5375 int rval; 5376 mbx_cmd_t mc; 5377 mbx_cmd_t *mcp = &mc; 5378 struct qla_hw_data *ha = vha->hw; 5379 5380 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, 5381 "Entered %s.\n", __func__); 5382 5383 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5384 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 5385 /* BIT_6 specifies 64bit address */ 5386 mcp->mb[1] = mreq->options | BIT_15 | BIT_6; 5387 if (IS_CNA_CAPABLE(ha)) { 5388 mcp->mb[2] = vha->fcoe_fcf_idx; 5389 } 5390 mcp->mb[16] = LSW(mreq->rcv_dma); 5391 mcp->mb[17] = MSW(mreq->rcv_dma); 5392 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5393 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5394 5395 mcp->mb[10] = LSW(mreq->transfer_size); 5396 5397 mcp->mb[14] = LSW(mreq->send_dma); 5398 mcp->mb[15] = MSW(mreq->send_dma); 5399 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5400 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5401 5402 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| 5403 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5404 if (IS_CNA_CAPABLE(ha)) 5405 mcp->out_mb |= MBX_2; 5406 5407 mcp->in_mb = MBX_0; 5408 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 5409 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5410 mcp->in_mb |= MBX_1; 5411 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 5412 IS_QLA28XX(ha)) 5413 mcp->in_mb |= MBX_3; 5414 5415 mcp->tov = MBX_TOV_SECONDS; 5416 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5417 mcp->buf_size = mreq->transfer_size; 5418 5419 rval = qla2x00_mailbox_command(vha, mcp); 5420 5421 if (rval != QLA_SUCCESS) { 5422 ql_dbg(ql_dbg_mbx, vha, 0x10fb, 5423 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5424 rval, mcp->mb[0], mcp->mb[1]); 5425 } else { 5426 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, 5427 "Done %s.\n", __func__); 5428 } 5429 5430 /* Copy mailbox information */ 5431 memcpy(mresp, mcp->mb, 64); 5432 return rval; 5433 } 5434 5435 int 5436 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) 5437 { 5438 int rval; 5439 mbx_cmd_t mc; 5440 mbx_cmd_t *mcp = &mc; 5441 5442 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, 5443 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); 5444 5445 mcp->mb[0] = MBC_ISP84XX_RESET; 5446 mcp->mb[1] = enable_diagnostic; 5447 mcp->out_mb = MBX_1|MBX_0; 5448 mcp->in_mb = MBX_1|MBX_0; 5449 mcp->tov = MBX_TOV_SECONDS; 5450 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5451 rval = qla2x00_mailbox_command(vha, mcp); 5452 5453 if (rval != QLA_SUCCESS) 5454 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); 5455 else 5456 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, 5457 "Done %s.\n", __func__); 5458 5459 return rval; 5460 } 5461 5462 int 5463 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) 5464 { 5465 int rval; 5466 mbx_cmd_t mc; 5467 mbx_cmd_t *mcp = &mc; 5468 5469 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, 5470 "Entered %s.\n", __func__); 5471 5472 if (!IS_FWI2_CAPABLE(vha->hw)) 5473 return QLA_FUNCTION_FAILED; 5474 5475 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 5476 mcp->mb[1] = LSW(risc_addr); 5477 mcp->mb[2] = LSW(data); 5478 mcp->mb[3] = MSW(data); 5479 mcp->mb[8] = MSW(risc_addr); 5480 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; 5481 mcp->in_mb = MBX_1|MBX_0; 5482 mcp->tov = MBX_TOV_SECONDS; 5483 mcp->flags = 0; 5484 rval = qla2x00_mailbox_command(vha, mcp); 5485 if (rval != QLA_SUCCESS) { 5486 ql_dbg(ql_dbg_mbx, vha, 0x1101, 5487 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5488 rval, mcp->mb[0], mcp->mb[1]); 5489 } else { 5490 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, 5491 "Done %s.\n", __func__); 5492 } 5493 5494 return rval; 5495 } 5496 5497 int 5498 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) 5499 { 5500 int rval; 5501 uint32_t stat, timer; 5502 uint16_t mb0 = 0; 5503 struct qla_hw_data *ha = vha->hw; 5504 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 5505 5506 rval = QLA_SUCCESS; 5507 5508 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, 5509 "Entered %s.\n", __func__); 5510 5511 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 5512 5513 /* Write the MBC data to the registers */ 5514 wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER); 5515 wrt_reg_word(®->mailbox1, mb[0]); 5516 wrt_reg_word(®->mailbox2, mb[1]); 5517 wrt_reg_word(®->mailbox3, mb[2]); 5518 wrt_reg_word(®->mailbox4, mb[3]); 5519 5520 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); 5521 5522 /* Poll for MBC interrupt */ 5523 for (timer = 6000000; timer; timer--) { 5524 /* Check for pending interrupts. */ 5525 stat = rd_reg_dword(®->host_status); 5526 if (stat & HSRX_RISC_INT) { 5527 stat &= 0xff; 5528 5529 if (stat == 0x1 || stat == 0x2 || 5530 stat == 0x10 || stat == 0x11) { 5531 set_bit(MBX_INTERRUPT, 5532 &ha->mbx_cmd_flags); 5533 mb0 = rd_reg_word(®->mailbox0); 5534 wrt_reg_dword(®->hccr, 5535 HCCRX_CLR_RISC_INT); 5536 rd_reg_dword(®->hccr); 5537 break; 5538 } 5539 } 5540 udelay(5); 5541 } 5542 5543 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) 5544 rval = mb0 & MBS_MASK; 5545 else 5546 rval = QLA_FUNCTION_FAILED; 5547 5548 if (rval != QLA_SUCCESS) { 5549 ql_dbg(ql_dbg_mbx, vha, 0x1104, 5550 "Failed=%x mb[0]=%x.\n", rval, mb[0]); 5551 } else { 5552 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, 5553 "Done %s.\n", __func__); 5554 } 5555 5556 return rval; 5557 } 5558 5559 /* Set the specified data rate */ 5560 int 5561 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode) 5562 { 5563 int rval; 5564 mbx_cmd_t mc; 5565 mbx_cmd_t *mcp = &mc; 5566 struct qla_hw_data *ha = vha->hw; 5567 uint16_t val; 5568 5569 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5570 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate, 5571 mode); 5572 5573 if (!IS_FWI2_CAPABLE(ha)) 5574 return QLA_FUNCTION_FAILED; 5575 5576 memset(mcp, 0, sizeof(*mcp)); 5577 switch (ha->set_data_rate) { 5578 case PORT_SPEED_AUTO: 5579 case PORT_SPEED_4GB: 5580 case PORT_SPEED_8GB: 5581 case PORT_SPEED_16GB: 5582 case PORT_SPEED_32GB: 5583 val = ha->set_data_rate; 5584 break; 5585 default: 5586 ql_log(ql_log_warn, vha, 0x1199, 5587 "Unrecognized speed setting:%d. Setting Autoneg\n", 5588 ha->set_data_rate); 5589 val = ha->set_data_rate = PORT_SPEED_AUTO; 5590 break; 5591 } 5592 5593 mcp->mb[0] = MBC_DATA_RATE; 5594 mcp->mb[1] = mode; 5595 mcp->mb[2] = val; 5596 5597 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5598 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5599 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5600 mcp->in_mb |= MBX_4|MBX_3; 5601 mcp->tov = MBX_TOV_SECONDS; 5602 mcp->flags = 0; 5603 rval = qla2x00_mailbox_command(vha, mcp); 5604 if (rval != QLA_SUCCESS) { 5605 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5606 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5607 } else { 5608 if (mcp->mb[1] != 0x7) 5609 ql_dbg(ql_dbg_mbx, vha, 0x1179, 5610 "Speed set:0x%x\n", mcp->mb[1]); 5611 5612 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5613 "Done %s.\n", __func__); 5614 } 5615 5616 return rval; 5617 } 5618 5619 int 5620 qla2x00_get_data_rate(scsi_qla_host_t *vha) 5621 { 5622 int rval; 5623 mbx_cmd_t mc; 5624 mbx_cmd_t *mcp = &mc; 5625 struct qla_hw_data *ha = vha->hw; 5626 5627 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5628 "Entered %s.\n", __func__); 5629 5630 if (!IS_FWI2_CAPABLE(ha)) 5631 return QLA_FUNCTION_FAILED; 5632 5633 mcp->mb[0] = MBC_DATA_RATE; 5634 mcp->mb[1] = QLA_GET_DATA_RATE; 5635 mcp->out_mb = MBX_1|MBX_0; 5636 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5637 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5638 mcp->in_mb |= MBX_4|MBX_3; 5639 mcp->tov = MBX_TOV_SECONDS; 5640 mcp->flags = 0; 5641 rval = qla2x00_mailbox_command(vha, mcp); 5642 if (rval != QLA_SUCCESS) { 5643 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5644 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5645 } else { 5646 if (mcp->mb[1] != 0x7) 5647 ha->link_data_rate = mcp->mb[1]; 5648 5649 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 5650 if (mcp->mb[4] & BIT_0) 5651 ql_log(ql_log_info, vha, 0x11a2, 5652 "FEC=enabled (data rate).\n"); 5653 } 5654 5655 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5656 "Done %s.\n", __func__); 5657 if (mcp->mb[1] != 0x7) 5658 ha->link_data_rate = mcp->mb[1]; 5659 } 5660 5661 return rval; 5662 } 5663 5664 int 5665 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5666 { 5667 int rval; 5668 mbx_cmd_t mc; 5669 mbx_cmd_t *mcp = &mc; 5670 struct qla_hw_data *ha = vha->hw; 5671 5672 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, 5673 "Entered %s.\n", __func__); 5674 5675 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && 5676 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 5677 return QLA_FUNCTION_FAILED; 5678 mcp->mb[0] = MBC_GET_PORT_CONFIG; 5679 mcp->out_mb = MBX_0; 5680 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5681 mcp->tov = MBX_TOV_SECONDS; 5682 mcp->flags = 0; 5683 5684 rval = qla2x00_mailbox_command(vha, mcp); 5685 5686 if (rval != QLA_SUCCESS) { 5687 ql_dbg(ql_dbg_mbx, vha, 0x110a, 5688 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5689 } else { 5690 /* Copy all bits to preserve original value */ 5691 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 5692 5693 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, 5694 "Done %s.\n", __func__); 5695 } 5696 return rval; 5697 } 5698 5699 int 5700 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5701 { 5702 int rval; 5703 mbx_cmd_t mc; 5704 mbx_cmd_t *mcp = &mc; 5705 5706 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, 5707 "Entered %s.\n", __func__); 5708 5709 mcp->mb[0] = MBC_SET_PORT_CONFIG; 5710 /* Copy all bits to preserve original setting */ 5711 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); 5712 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5713 mcp->in_mb = MBX_0; 5714 mcp->tov = MBX_TOV_SECONDS; 5715 mcp->flags = 0; 5716 rval = qla2x00_mailbox_command(vha, mcp); 5717 5718 if (rval != QLA_SUCCESS) { 5719 ql_dbg(ql_dbg_mbx, vha, 0x110d, 5720 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5721 } else 5722 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, 5723 "Done %s.\n", __func__); 5724 5725 return rval; 5726 } 5727 5728 5729 int 5730 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, 5731 uint16_t *mb) 5732 { 5733 int rval; 5734 mbx_cmd_t mc; 5735 mbx_cmd_t *mcp = &mc; 5736 struct qla_hw_data *ha = vha->hw; 5737 5738 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, 5739 "Entered %s.\n", __func__); 5740 5741 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 5742 return QLA_FUNCTION_FAILED; 5743 5744 mcp->mb[0] = MBC_PORT_PARAMS; 5745 mcp->mb[1] = loop_id; 5746 if (ha->flags.fcp_prio_enabled) 5747 mcp->mb[2] = BIT_1; 5748 else 5749 mcp->mb[2] = BIT_2; 5750 mcp->mb[4] = priority & 0xf; 5751 mcp->mb[9] = vha->vp_idx; 5752 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5753 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 5754 mcp->tov = MBX_TOV_SECONDS; 5755 mcp->flags = 0; 5756 rval = qla2x00_mailbox_command(vha, mcp); 5757 if (mb != NULL) { 5758 mb[0] = mcp->mb[0]; 5759 mb[1] = mcp->mb[1]; 5760 mb[3] = mcp->mb[3]; 5761 mb[4] = mcp->mb[4]; 5762 } 5763 5764 if (rval != QLA_SUCCESS) { 5765 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); 5766 } else { 5767 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, 5768 "Done %s.\n", __func__); 5769 } 5770 5771 return rval; 5772 } 5773 5774 int 5775 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) 5776 { 5777 int rval = QLA_FUNCTION_FAILED; 5778 struct qla_hw_data *ha = vha->hw; 5779 uint8_t byte; 5780 5781 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { 5782 ql_dbg(ql_dbg_mbx, vha, 0x1150, 5783 "Thermal not supported by this card.\n"); 5784 return rval; 5785 } 5786 5787 if (IS_QLA25XX(ha)) { 5788 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 5789 ha->pdev->subsystem_device == 0x0175) { 5790 rval = qla2x00_read_sfp(vha, 0, &byte, 5791 0x98, 0x1, 1, BIT_13|BIT_0); 5792 *temp = byte; 5793 return rval; 5794 } 5795 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 5796 ha->pdev->subsystem_device == 0x338e) { 5797 rval = qla2x00_read_sfp(vha, 0, &byte, 5798 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); 5799 *temp = byte; 5800 return rval; 5801 } 5802 ql_dbg(ql_dbg_mbx, vha, 0x10c9, 5803 "Thermal not supported by this card.\n"); 5804 return rval; 5805 } 5806 5807 if (IS_QLA82XX(ha)) { 5808 *temp = qla82xx_read_temperature(vha); 5809 rval = QLA_SUCCESS; 5810 return rval; 5811 } else if (IS_QLA8044(ha)) { 5812 *temp = qla8044_read_temperature(vha); 5813 rval = QLA_SUCCESS; 5814 return rval; 5815 } 5816 5817 rval = qla2x00_read_asic_temperature(vha, temp); 5818 return rval; 5819 } 5820 5821 int 5822 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) 5823 { 5824 int rval; 5825 struct qla_hw_data *ha = vha->hw; 5826 mbx_cmd_t mc; 5827 mbx_cmd_t *mcp = &mc; 5828 5829 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, 5830 "Entered %s.\n", __func__); 5831 5832 if (!IS_FWI2_CAPABLE(ha)) 5833 return QLA_FUNCTION_FAILED; 5834 5835 memset(mcp, 0, sizeof(mbx_cmd_t)); 5836 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5837 mcp->mb[1] = 1; 5838 5839 mcp->out_mb = MBX_1|MBX_0; 5840 mcp->in_mb = MBX_0; 5841 mcp->tov = MBX_TOV_SECONDS; 5842 mcp->flags = 0; 5843 5844 rval = qla2x00_mailbox_command(vha, mcp); 5845 if (rval != QLA_SUCCESS) { 5846 ql_dbg(ql_dbg_mbx, vha, 0x1016, 5847 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5848 } else { 5849 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, 5850 "Done %s.\n", __func__); 5851 } 5852 5853 return rval; 5854 } 5855 5856 int 5857 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) 5858 { 5859 int rval; 5860 struct qla_hw_data *ha = vha->hw; 5861 mbx_cmd_t mc; 5862 mbx_cmd_t *mcp = &mc; 5863 5864 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, 5865 "Entered %s.\n", __func__); 5866 5867 if (!IS_P3P_TYPE(ha)) 5868 return QLA_FUNCTION_FAILED; 5869 5870 memset(mcp, 0, sizeof(mbx_cmd_t)); 5871 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5872 mcp->mb[1] = 0; 5873 5874 mcp->out_mb = MBX_1|MBX_0; 5875 mcp->in_mb = MBX_0; 5876 mcp->tov = MBX_TOV_SECONDS; 5877 mcp->flags = 0; 5878 5879 rval = qla2x00_mailbox_command(vha, mcp); 5880 if (rval != QLA_SUCCESS) { 5881 ql_dbg(ql_dbg_mbx, vha, 0x100c, 5882 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5883 } else { 5884 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, 5885 "Done %s.\n", __func__); 5886 } 5887 5888 return rval; 5889 } 5890 5891 int 5892 qla82xx_md_get_template_size(scsi_qla_host_t *vha) 5893 { 5894 struct qla_hw_data *ha = vha->hw; 5895 mbx_cmd_t mc; 5896 mbx_cmd_t *mcp = &mc; 5897 int rval = QLA_FUNCTION_FAILED; 5898 5899 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, 5900 "Entered %s.\n", __func__); 5901 5902 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5903 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5904 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5905 mcp->mb[2] = LSW(RQST_TMPLT_SIZE); 5906 mcp->mb[3] = MSW(RQST_TMPLT_SIZE); 5907 5908 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5909 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 5910 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5911 5912 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5913 mcp->tov = MBX_TOV_SECONDS; 5914 rval = qla2x00_mailbox_command(vha, mcp); 5915 5916 /* Always copy back return mailbox values. */ 5917 if (rval != QLA_SUCCESS) { 5918 ql_dbg(ql_dbg_mbx, vha, 0x1120, 5919 "mailbox command FAILED=0x%x, subcode=%x.\n", 5920 (mcp->mb[1] << 16) | mcp->mb[0], 5921 (mcp->mb[3] << 16) | mcp->mb[2]); 5922 } else { 5923 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, 5924 "Done %s.\n", __func__); 5925 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); 5926 if (!ha->md_template_size) { 5927 ql_dbg(ql_dbg_mbx, vha, 0x1122, 5928 "Null template size obtained.\n"); 5929 rval = QLA_FUNCTION_FAILED; 5930 } 5931 } 5932 return rval; 5933 } 5934 5935 int 5936 qla82xx_md_get_template(scsi_qla_host_t *vha) 5937 { 5938 struct qla_hw_data *ha = vha->hw; 5939 mbx_cmd_t mc; 5940 mbx_cmd_t *mcp = &mc; 5941 int rval = QLA_FUNCTION_FAILED; 5942 5943 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, 5944 "Entered %s.\n", __func__); 5945 5946 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5947 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5948 if (!ha->md_tmplt_hdr) { 5949 ql_log(ql_log_warn, vha, 0x1124, 5950 "Unable to allocate memory for Minidump template.\n"); 5951 return rval; 5952 } 5953 5954 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5955 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5956 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5957 mcp->mb[2] = LSW(RQST_TMPLT); 5958 mcp->mb[3] = MSW(RQST_TMPLT); 5959 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); 5960 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); 5961 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); 5962 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); 5963 mcp->mb[8] = LSW(ha->md_template_size); 5964 mcp->mb[9] = MSW(ha->md_template_size); 5965 5966 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5967 mcp->tov = MBX_TOV_SECONDS; 5968 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5969 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5970 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5971 rval = qla2x00_mailbox_command(vha, mcp); 5972 5973 if (rval != QLA_SUCCESS) { 5974 ql_dbg(ql_dbg_mbx, vha, 0x1125, 5975 "mailbox command FAILED=0x%x, subcode=%x.\n", 5976 ((mcp->mb[1] << 16) | mcp->mb[0]), 5977 ((mcp->mb[3] << 16) | mcp->mb[2])); 5978 } else 5979 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, 5980 "Done %s.\n", __func__); 5981 return rval; 5982 } 5983 5984 int 5985 qla8044_md_get_template(scsi_qla_host_t *vha) 5986 { 5987 struct qla_hw_data *ha = vha->hw; 5988 mbx_cmd_t mc; 5989 mbx_cmd_t *mcp = &mc; 5990 int rval = QLA_FUNCTION_FAILED; 5991 int offset = 0, size = MINIDUMP_SIZE_36K; 5992 5993 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, 5994 "Entered %s.\n", __func__); 5995 5996 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5997 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5998 if (!ha->md_tmplt_hdr) { 5999 ql_log(ql_log_warn, vha, 0xb11b, 6000 "Unable to allocate memory for Minidump template.\n"); 6001 return rval; 6002 } 6003 6004 memset(mcp->mb, 0 , sizeof(mcp->mb)); 6005 while (offset < ha->md_template_size) { 6006 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 6007 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 6008 mcp->mb[2] = LSW(RQST_TMPLT); 6009 mcp->mb[3] = MSW(RQST_TMPLT); 6010 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); 6011 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); 6012 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); 6013 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); 6014 mcp->mb[8] = LSW(size); 6015 mcp->mb[9] = MSW(size); 6016 mcp->mb[10] = offset & 0x0000FFFF; 6017 mcp->mb[11] = offset & 0xFFFF0000; 6018 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 6019 mcp->tov = MBX_TOV_SECONDS; 6020 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 6021 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6022 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 6023 rval = qla2x00_mailbox_command(vha, mcp); 6024 6025 if (rval != QLA_SUCCESS) { 6026 ql_dbg(ql_dbg_mbx, vha, 0xb11c, 6027 "mailbox command FAILED=0x%x, subcode=%x.\n", 6028 ((mcp->mb[1] << 16) | mcp->mb[0]), 6029 ((mcp->mb[3] << 16) | mcp->mb[2])); 6030 return rval; 6031 } else 6032 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, 6033 "Done %s.\n", __func__); 6034 offset = offset + size; 6035 } 6036 return rval; 6037 } 6038 6039 int 6040 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 6041 { 6042 int rval; 6043 struct qla_hw_data *ha = vha->hw; 6044 mbx_cmd_t mc; 6045 mbx_cmd_t *mcp = &mc; 6046 6047 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 6048 return QLA_FUNCTION_FAILED; 6049 6050 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, 6051 "Entered %s.\n", __func__); 6052 6053 memset(mcp, 0, sizeof(mbx_cmd_t)); 6054 mcp->mb[0] = MBC_SET_LED_CONFIG; 6055 mcp->mb[1] = led_cfg[0]; 6056 mcp->mb[2] = led_cfg[1]; 6057 if (IS_QLA8031(ha)) { 6058 mcp->mb[3] = led_cfg[2]; 6059 mcp->mb[4] = led_cfg[3]; 6060 mcp->mb[5] = led_cfg[4]; 6061 mcp->mb[6] = led_cfg[5]; 6062 } 6063 6064 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6065 if (IS_QLA8031(ha)) 6066 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 6067 mcp->in_mb = MBX_0; 6068 mcp->tov = MBX_TOV_SECONDS; 6069 mcp->flags = 0; 6070 6071 rval = qla2x00_mailbox_command(vha, mcp); 6072 if (rval != QLA_SUCCESS) { 6073 ql_dbg(ql_dbg_mbx, vha, 0x1134, 6074 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6075 } else { 6076 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, 6077 "Done %s.\n", __func__); 6078 } 6079 6080 return rval; 6081 } 6082 6083 int 6084 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 6085 { 6086 int rval; 6087 struct qla_hw_data *ha = vha->hw; 6088 mbx_cmd_t mc; 6089 mbx_cmd_t *mcp = &mc; 6090 6091 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 6092 return QLA_FUNCTION_FAILED; 6093 6094 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, 6095 "Entered %s.\n", __func__); 6096 6097 memset(mcp, 0, sizeof(mbx_cmd_t)); 6098 mcp->mb[0] = MBC_GET_LED_CONFIG; 6099 6100 mcp->out_mb = MBX_0; 6101 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6102 if (IS_QLA8031(ha)) 6103 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 6104 mcp->tov = MBX_TOV_SECONDS; 6105 mcp->flags = 0; 6106 6107 rval = qla2x00_mailbox_command(vha, mcp); 6108 if (rval != QLA_SUCCESS) { 6109 ql_dbg(ql_dbg_mbx, vha, 0x1137, 6110 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6111 } else { 6112 led_cfg[0] = mcp->mb[1]; 6113 led_cfg[1] = mcp->mb[2]; 6114 if (IS_QLA8031(ha)) { 6115 led_cfg[2] = mcp->mb[3]; 6116 led_cfg[3] = mcp->mb[4]; 6117 led_cfg[4] = mcp->mb[5]; 6118 led_cfg[5] = mcp->mb[6]; 6119 } 6120 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, 6121 "Done %s.\n", __func__); 6122 } 6123 6124 return rval; 6125 } 6126 6127 int 6128 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) 6129 { 6130 int rval; 6131 struct qla_hw_data *ha = vha->hw; 6132 mbx_cmd_t mc; 6133 mbx_cmd_t *mcp = &mc; 6134 6135 if (!IS_P3P_TYPE(ha)) 6136 return QLA_FUNCTION_FAILED; 6137 6138 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, 6139 "Entered %s.\n", __func__); 6140 6141 memset(mcp, 0, sizeof(mbx_cmd_t)); 6142 mcp->mb[0] = MBC_SET_LED_CONFIG; 6143 if (enable) 6144 mcp->mb[7] = 0xE; 6145 else 6146 mcp->mb[7] = 0xD; 6147 6148 mcp->out_mb = MBX_7|MBX_0; 6149 mcp->in_mb = MBX_0; 6150 mcp->tov = MBX_TOV_SECONDS; 6151 mcp->flags = 0; 6152 6153 rval = qla2x00_mailbox_command(vha, mcp); 6154 if (rval != QLA_SUCCESS) { 6155 ql_dbg(ql_dbg_mbx, vha, 0x1128, 6156 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6157 } else { 6158 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, 6159 "Done %s.\n", __func__); 6160 } 6161 6162 return rval; 6163 } 6164 6165 int 6166 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) 6167 { 6168 int rval; 6169 struct qla_hw_data *ha = vha->hw; 6170 mbx_cmd_t mc; 6171 mbx_cmd_t *mcp = &mc; 6172 6173 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6174 return QLA_FUNCTION_FAILED; 6175 6176 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, 6177 "Entered %s.\n", __func__); 6178 6179 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6180 mcp->mb[1] = LSW(reg); 6181 mcp->mb[2] = MSW(reg); 6182 mcp->mb[3] = LSW(data); 6183 mcp->mb[4] = MSW(data); 6184 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6185 6186 mcp->in_mb = MBX_1|MBX_0; 6187 mcp->tov = MBX_TOV_SECONDS; 6188 mcp->flags = 0; 6189 rval = qla2x00_mailbox_command(vha, mcp); 6190 6191 if (rval != QLA_SUCCESS) { 6192 ql_dbg(ql_dbg_mbx, vha, 0x1131, 6193 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6194 } else { 6195 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, 6196 "Done %s.\n", __func__); 6197 } 6198 6199 return rval; 6200 } 6201 6202 int 6203 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) 6204 { 6205 int rval; 6206 struct qla_hw_data *ha = vha->hw; 6207 mbx_cmd_t mc; 6208 mbx_cmd_t *mcp = &mc; 6209 6210 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 6211 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, 6212 "Implicit LOGO Unsupported.\n"); 6213 return QLA_FUNCTION_FAILED; 6214 } 6215 6216 6217 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, 6218 "Entering %s.\n", __func__); 6219 6220 /* Perform Implicit LOGO. */ 6221 mcp->mb[0] = MBC_PORT_LOGOUT; 6222 mcp->mb[1] = fcport->loop_id; 6223 mcp->mb[10] = BIT_15; 6224 mcp->out_mb = MBX_10|MBX_1|MBX_0; 6225 mcp->in_mb = MBX_0; 6226 mcp->tov = MBX_TOV_SECONDS; 6227 mcp->flags = 0; 6228 rval = qla2x00_mailbox_command(vha, mcp); 6229 if (rval != QLA_SUCCESS) 6230 ql_dbg(ql_dbg_mbx, vha, 0x113d, 6231 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6232 else 6233 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, 6234 "Done %s.\n", __func__); 6235 6236 return rval; 6237 } 6238 6239 int 6240 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) 6241 { 6242 int rval; 6243 mbx_cmd_t mc; 6244 mbx_cmd_t *mcp = &mc; 6245 struct qla_hw_data *ha = vha->hw; 6246 unsigned long retry_max_time = jiffies + (2 * HZ); 6247 6248 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6249 return QLA_FUNCTION_FAILED; 6250 6251 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); 6252 6253 retry_rd_reg: 6254 mcp->mb[0] = MBC_READ_REMOTE_REG; 6255 mcp->mb[1] = LSW(reg); 6256 mcp->mb[2] = MSW(reg); 6257 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6258 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 6259 mcp->tov = MBX_TOV_SECONDS; 6260 mcp->flags = 0; 6261 rval = qla2x00_mailbox_command(vha, mcp); 6262 6263 if (rval != QLA_SUCCESS) { 6264 ql_dbg(ql_dbg_mbx, vha, 0x114c, 6265 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6266 rval, mcp->mb[0], mcp->mb[1]); 6267 } else { 6268 *data = (mcp->mb[3] | (mcp->mb[4] << 16)); 6269 if (*data == QLA8XXX_BAD_VALUE) { 6270 /* 6271 * During soft-reset CAMRAM register reads might 6272 * return 0xbad0bad0. So retry for MAX of 2 sec 6273 * while reading camram registers. 6274 */ 6275 if (time_after(jiffies, retry_max_time)) { 6276 ql_dbg(ql_dbg_mbx, vha, 0x1141, 6277 "Failure to read CAMRAM register. " 6278 "data=0x%x.\n", *data); 6279 return QLA_FUNCTION_FAILED; 6280 } 6281 msleep(100); 6282 goto retry_rd_reg; 6283 } 6284 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); 6285 } 6286 6287 return rval; 6288 } 6289 6290 int 6291 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) 6292 { 6293 int rval; 6294 mbx_cmd_t mc; 6295 mbx_cmd_t *mcp = &mc; 6296 struct qla_hw_data *ha = vha->hw; 6297 6298 if (!IS_QLA83XX(ha)) 6299 return QLA_FUNCTION_FAILED; 6300 6301 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 6302 6303 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; 6304 mcp->out_mb = MBX_0; 6305 mcp->in_mb = MBX_1|MBX_0; 6306 mcp->tov = MBX_TOV_SECONDS; 6307 mcp->flags = 0; 6308 rval = qla2x00_mailbox_command(vha, mcp); 6309 6310 if (rval != QLA_SUCCESS) { 6311 ql_dbg(ql_dbg_mbx, vha, 0x1144, 6312 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6313 rval, mcp->mb[0], mcp->mb[1]); 6314 qla2xxx_dump_fw(vha); 6315 } else { 6316 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); 6317 } 6318 6319 return rval; 6320 } 6321 6322 int 6323 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, 6324 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) 6325 { 6326 int rval; 6327 mbx_cmd_t mc; 6328 mbx_cmd_t *mcp = &mc; 6329 uint8_t subcode = (uint8_t)options; 6330 struct qla_hw_data *ha = vha->hw; 6331 6332 if (!IS_QLA8031(ha)) 6333 return QLA_FUNCTION_FAILED; 6334 6335 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); 6336 6337 mcp->mb[0] = MBC_SET_ACCESS_CONTROL; 6338 mcp->mb[1] = options; 6339 mcp->out_mb = MBX_1|MBX_0; 6340 if (subcode & BIT_2) { 6341 mcp->mb[2] = LSW(start_addr); 6342 mcp->mb[3] = MSW(start_addr); 6343 mcp->mb[4] = LSW(end_addr); 6344 mcp->mb[5] = MSW(end_addr); 6345 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; 6346 } 6347 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6348 if (!(subcode & (BIT_2 | BIT_5))) 6349 mcp->in_mb |= MBX_4|MBX_3; 6350 mcp->tov = MBX_TOV_SECONDS; 6351 mcp->flags = 0; 6352 rval = qla2x00_mailbox_command(vha, mcp); 6353 6354 if (rval != QLA_SUCCESS) { 6355 ql_dbg(ql_dbg_mbx, vha, 0x1147, 6356 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", 6357 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], 6358 mcp->mb[4]); 6359 qla2xxx_dump_fw(vha); 6360 } else { 6361 if (subcode & BIT_5) 6362 *sector_size = mcp->mb[1]; 6363 else if (subcode & (BIT_6 | BIT_7)) { 6364 ql_dbg(ql_dbg_mbx, vha, 0x1148, 6365 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6366 } else if (subcode & (BIT_3 | BIT_4)) { 6367 ql_dbg(ql_dbg_mbx, vha, 0x1149, 6368 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6369 } 6370 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); 6371 } 6372 6373 return rval; 6374 } 6375 6376 int 6377 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 6378 uint32_t size) 6379 { 6380 int rval; 6381 mbx_cmd_t mc; 6382 mbx_cmd_t *mcp = &mc; 6383 6384 if (!IS_MCTP_CAPABLE(vha->hw)) 6385 return QLA_FUNCTION_FAILED; 6386 6387 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, 6388 "Entered %s.\n", __func__); 6389 6390 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 6391 mcp->mb[1] = LSW(addr); 6392 mcp->mb[2] = MSW(req_dma); 6393 mcp->mb[3] = LSW(req_dma); 6394 mcp->mb[4] = MSW(size); 6395 mcp->mb[5] = LSW(size); 6396 mcp->mb[6] = MSW(MSD(req_dma)); 6397 mcp->mb[7] = LSW(MSD(req_dma)); 6398 mcp->mb[8] = MSW(addr); 6399 /* Setting RAM ID to valid */ 6400 /* For MCTP RAM ID is 0x40 */ 6401 mcp->mb[10] = BIT_7 | 0x40; 6402 6403 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| 6404 MBX_0; 6405 6406 mcp->in_mb = MBX_0; 6407 mcp->tov = MBX_TOV_SECONDS; 6408 mcp->flags = 0; 6409 rval = qla2x00_mailbox_command(vha, mcp); 6410 6411 if (rval != QLA_SUCCESS) { 6412 ql_dbg(ql_dbg_mbx, vha, 0x114e, 6413 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6414 } else { 6415 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, 6416 "Done %s.\n", __func__); 6417 } 6418 6419 return rval; 6420 } 6421 6422 int 6423 qla26xx_dport_diagnostics(scsi_qla_host_t *vha, 6424 void *dd_buf, uint size, uint options) 6425 { 6426 int rval; 6427 mbx_cmd_t mc; 6428 mbx_cmd_t *mcp = &mc; 6429 dma_addr_t dd_dma; 6430 6431 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 6432 !IS_QLA28XX(vha->hw)) 6433 return QLA_FUNCTION_FAILED; 6434 6435 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, 6436 "Entered %s.\n", __func__); 6437 6438 dd_dma = dma_map_single(&vha->hw->pdev->dev, 6439 dd_buf, size, DMA_FROM_DEVICE); 6440 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { 6441 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); 6442 return QLA_MEMORY_ALLOC_FAILED; 6443 } 6444 6445 memset(dd_buf, 0, size); 6446 6447 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; 6448 mcp->mb[1] = options; 6449 mcp->mb[2] = MSW(LSD(dd_dma)); 6450 mcp->mb[3] = LSW(LSD(dd_dma)); 6451 mcp->mb[6] = MSW(MSD(dd_dma)); 6452 mcp->mb[7] = LSW(MSD(dd_dma)); 6453 mcp->mb[8] = size; 6454 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 6455 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 6456 mcp->buf_size = size; 6457 mcp->flags = MBX_DMA_IN; 6458 mcp->tov = MBX_TOV_SECONDS * 4; 6459 rval = qla2x00_mailbox_command(vha, mcp); 6460 6461 if (rval != QLA_SUCCESS) { 6462 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); 6463 } else { 6464 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, 6465 "Done %s.\n", __func__); 6466 } 6467 6468 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, 6469 size, DMA_FROM_DEVICE); 6470 6471 return rval; 6472 } 6473 6474 static void qla2x00_async_mb_sp_done(srb_t *sp, int res) 6475 { 6476 sp->u.iocb_cmd.u.mbx.rc = res; 6477 6478 complete(&sp->u.iocb_cmd.u.mbx.comp); 6479 /* don't free sp here. Let the caller do the free */ 6480 } 6481 6482 /* 6483 * This mailbox uses the iocb interface to send MB command. 6484 * This allows non-critial (non chip setup) command to go 6485 * out in parrallel. 6486 */ 6487 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) 6488 { 6489 int rval = QLA_FUNCTION_FAILED; 6490 srb_t *sp; 6491 struct srb_iocb *c; 6492 6493 if (!vha->hw->flags.fw_started) 6494 goto done; 6495 6496 /* ref: INIT */ 6497 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 6498 if (!sp) 6499 goto done; 6500 6501 c = &sp->u.iocb_cmd; 6502 init_completion(&c->u.mbx.comp); 6503 6504 sp->type = SRB_MB_IOCB; 6505 sp->name = mb_to_str(mcp->mb[0]); 6506 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 6507 qla2x00_async_mb_sp_done); 6508 6509 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); 6510 6511 rval = qla2x00_start_sp(sp); 6512 if (rval != QLA_SUCCESS) { 6513 ql_dbg(ql_dbg_mbx, vha, 0x1018, 6514 "%s: %s Failed submission. %x.\n", 6515 __func__, sp->name, rval); 6516 goto done_free_sp; 6517 } 6518 6519 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n", 6520 sp->name, sp->handle); 6521 6522 wait_for_completion(&c->u.mbx.comp); 6523 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); 6524 6525 rval = c->u.mbx.rc; 6526 switch (rval) { 6527 case QLA_FUNCTION_TIMEOUT: 6528 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n", 6529 __func__, sp->name, rval); 6530 break; 6531 case QLA_SUCCESS: 6532 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", 6533 __func__, sp->name); 6534 break; 6535 default: 6536 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", 6537 __func__, sp->name, rval); 6538 break; 6539 } 6540 6541 done_free_sp: 6542 /* ref: INIT */ 6543 kref_put(&sp->cmd_kref, qla2x00_sp_release); 6544 done: 6545 return rval; 6546 } 6547 6548 /* 6549 * qla24xx_gpdb_wait 6550 * NOTE: Do not call this routine from DPC thread 6551 */ 6552 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 6553 { 6554 int rval = QLA_FUNCTION_FAILED; 6555 dma_addr_t pd_dma; 6556 struct port_database_24xx *pd; 6557 struct qla_hw_data *ha = vha->hw; 6558 mbx_cmd_t mc; 6559 6560 if (!vha->hw->flags.fw_started) 6561 goto done; 6562 6563 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 6564 if (pd == NULL) { 6565 ql_log(ql_log_warn, vha, 0xd047, 6566 "Failed to allocate port database structure.\n"); 6567 goto done_free_sp; 6568 } 6569 6570 memset(&mc, 0, sizeof(mc)); 6571 mc.mb[0] = MBC_GET_PORT_DATABASE; 6572 mc.mb[1] = fcport->loop_id; 6573 mc.mb[2] = MSW(pd_dma); 6574 mc.mb[3] = LSW(pd_dma); 6575 mc.mb[6] = MSW(MSD(pd_dma)); 6576 mc.mb[7] = LSW(MSD(pd_dma)); 6577 mc.mb[9] = vha->vp_idx; 6578 mc.mb[10] = opt; 6579 6580 rval = qla24xx_send_mb_cmd(vha, &mc); 6581 if (rval != QLA_SUCCESS) { 6582 ql_dbg(ql_dbg_mbx, vha, 0x1193, 6583 "%s: %8phC fail\n", __func__, fcport->port_name); 6584 goto done_free_sp; 6585 } 6586 6587 rval = __qla24xx_parse_gpdb(vha, fcport, pd); 6588 6589 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", 6590 __func__, fcport->port_name); 6591 6592 done_free_sp: 6593 if (pd) 6594 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 6595 done: 6596 return rval; 6597 } 6598 6599 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, 6600 struct port_database_24xx *pd) 6601 { 6602 int rval = QLA_SUCCESS; 6603 uint64_t zero = 0; 6604 u8 current_login_state, last_login_state; 6605 6606 if (NVME_TARGET(vha->hw, fcport)) { 6607 current_login_state = pd->current_login_state >> 4; 6608 last_login_state = pd->last_login_state >> 4; 6609 } else { 6610 current_login_state = pd->current_login_state & 0xf; 6611 last_login_state = pd->last_login_state & 0xf; 6612 } 6613 6614 /* Check for logged in state. */ 6615 if (current_login_state != PDS_PRLI_COMPLETE) { 6616 ql_dbg(ql_dbg_mbx, vha, 0x119a, 6617 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 6618 current_login_state, last_login_state, fcport->loop_id); 6619 rval = QLA_FUNCTION_FAILED; 6620 goto gpd_error_out; 6621 } 6622 6623 if (fcport->loop_id == FC_NO_LOOP_ID || 6624 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 6625 memcmp(fcport->port_name, pd->port_name, 8))) { 6626 /* We lost the device mid way. */ 6627 rval = QLA_NOT_LOGGED_IN; 6628 goto gpd_error_out; 6629 } 6630 6631 /* Names are little-endian. */ 6632 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 6633 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 6634 6635 /* Get port_id of device. */ 6636 fcport->d_id.b.domain = pd->port_id[0]; 6637 fcport->d_id.b.area = pd->port_id[1]; 6638 fcport->d_id.b.al_pa = pd->port_id[2]; 6639 fcport->d_id.b.rsvd_1 = 0; 6640 6641 ql_dbg(ql_dbg_disc, vha, 0x2062, 6642 "%8phC SVC Param w3 %02x%02x", 6643 fcport->port_name, 6644 pd->prli_svc_param_word_3[1], 6645 pd->prli_svc_param_word_3[0]); 6646 6647 if (NVME_TARGET(vha->hw, fcport)) { 6648 fcport->port_type = FCT_NVME; 6649 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) 6650 fcport->port_type |= FCT_NVME_INITIATOR; 6651 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6652 fcport->port_type |= FCT_NVME_TARGET; 6653 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0) 6654 fcport->port_type |= FCT_NVME_DISCOVERY; 6655 } else { 6656 /* If not target must be initiator or unknown type. */ 6657 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6658 fcport->port_type = FCT_INITIATOR; 6659 else 6660 fcport->port_type = FCT_TARGET; 6661 } 6662 /* Passback COS information. */ 6663 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? 6664 FC_COS_CLASS2 : FC_COS_CLASS3; 6665 6666 if (pd->prli_svc_param_word_3[0] & BIT_7) { 6667 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 6668 fcport->conf_compl_supported = 1; 6669 } 6670 6671 gpd_error_out: 6672 return rval; 6673 } 6674 6675 /* 6676 * qla24xx_gidlist__wait 6677 * NOTE: don't call this routine from DPC thread. 6678 */ 6679 int qla24xx_gidlist_wait(struct scsi_qla_host *vha, 6680 void *id_list, dma_addr_t id_list_dma, uint16_t *entries) 6681 { 6682 int rval = QLA_FUNCTION_FAILED; 6683 mbx_cmd_t mc; 6684 6685 if (!vha->hw->flags.fw_started) 6686 goto done; 6687 6688 memset(&mc, 0, sizeof(mc)); 6689 mc.mb[0] = MBC_GET_ID_LIST; 6690 mc.mb[2] = MSW(id_list_dma); 6691 mc.mb[3] = LSW(id_list_dma); 6692 mc.mb[6] = MSW(MSD(id_list_dma)); 6693 mc.mb[7] = LSW(MSD(id_list_dma)); 6694 mc.mb[8] = 0; 6695 mc.mb[9] = vha->vp_idx; 6696 6697 rval = qla24xx_send_mb_cmd(vha, &mc); 6698 if (rval != QLA_SUCCESS) { 6699 ql_dbg(ql_dbg_mbx, vha, 0x119b, 6700 "%s: fail\n", __func__); 6701 } else { 6702 *entries = mc.mb[1]; 6703 ql_dbg(ql_dbg_mbx, vha, 0x119c, 6704 "%s: done\n", __func__); 6705 } 6706 done: 6707 return rval; 6708 } 6709 6710 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) 6711 { 6712 int rval; 6713 mbx_cmd_t mc; 6714 mbx_cmd_t *mcp = &mc; 6715 6716 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200, 6717 "Entered %s\n", __func__); 6718 6719 memset(mcp->mb, 0 , sizeof(mcp->mb)); 6720 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6721 mcp->mb[1] = 1; 6722 mcp->mb[2] = value; 6723 mcp->out_mb = MBX_2 | MBX_1 | MBX_0; 6724 mcp->in_mb = MBX_2 | MBX_0; 6725 mcp->tov = MBX_TOV_SECONDS; 6726 mcp->flags = 0; 6727 6728 rval = qla2x00_mailbox_command(vha, mcp); 6729 6730 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n", 6731 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6732 6733 return rval; 6734 } 6735 6736 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) 6737 { 6738 int rval; 6739 mbx_cmd_t mc; 6740 mbx_cmd_t *mcp = &mc; 6741 6742 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203, 6743 "Entered %s\n", __func__); 6744 6745 memset(mcp->mb, 0, sizeof(mcp->mb)); 6746 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6747 mcp->mb[1] = 0; 6748 mcp->out_mb = MBX_1 | MBX_0; 6749 mcp->in_mb = MBX_2 | MBX_0; 6750 mcp->tov = MBX_TOV_SECONDS; 6751 mcp->flags = 0; 6752 6753 rval = qla2x00_mailbox_command(vha, mcp); 6754 if (rval == QLA_SUCCESS) 6755 *value = mc.mb[2]; 6756 6757 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n", 6758 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6759 6760 return rval; 6761 } 6762 6763 int 6764 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count) 6765 { 6766 struct qla_hw_data *ha = vha->hw; 6767 uint16_t iter, addr, offset; 6768 dma_addr_t phys_addr; 6769 int rval, c; 6770 u8 *sfp_data; 6771 6772 memset(ha->sfp_data, 0, SFP_DEV_SIZE); 6773 addr = 0xa0; 6774 phys_addr = ha->sfp_data_dma; 6775 sfp_data = ha->sfp_data; 6776 offset = c = 0; 6777 6778 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) { 6779 if (iter == 4) { 6780 /* Skip to next device address. */ 6781 addr = 0xa2; 6782 offset = 0; 6783 } 6784 6785 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data, 6786 addr, offset, SFP_BLOCK_SIZE, BIT_1); 6787 if (rval != QLA_SUCCESS) { 6788 ql_log(ql_log_warn, vha, 0x706d, 6789 "Unable to read SFP data (%x/%x/%x).\n", rval, 6790 addr, offset); 6791 6792 return rval; 6793 } 6794 6795 if (buf && (c < count)) { 6796 u16 sz; 6797 6798 if ((count - c) >= SFP_BLOCK_SIZE) 6799 sz = SFP_BLOCK_SIZE; 6800 else 6801 sz = count - c; 6802 6803 memcpy(buf, sfp_data, sz); 6804 buf += SFP_BLOCK_SIZE; 6805 c += sz; 6806 } 6807 phys_addr += SFP_BLOCK_SIZE; 6808 sfp_data += SFP_BLOCK_SIZE; 6809 offset += SFP_BLOCK_SIZE; 6810 } 6811 6812 return rval; 6813 } 6814 6815 int qla24xx_res_count_wait(struct scsi_qla_host *vha, 6816 uint16_t *out_mb, int out_mb_sz) 6817 { 6818 int rval = QLA_FUNCTION_FAILED; 6819 mbx_cmd_t mc; 6820 6821 if (!vha->hw->flags.fw_started) 6822 goto done; 6823 6824 memset(&mc, 0, sizeof(mc)); 6825 mc.mb[0] = MBC_GET_RESOURCE_COUNTS; 6826 6827 rval = qla24xx_send_mb_cmd(vha, &mc); 6828 if (rval != QLA_SUCCESS) { 6829 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6830 "%s: fail\n", __func__); 6831 } else { 6832 if (out_mb_sz <= SIZEOF_IOCB_MB_REG) 6833 memcpy(out_mb, mc.mb, out_mb_sz); 6834 else 6835 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG); 6836 6837 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6838 "%s: done\n", __func__); 6839 } 6840 done: 6841 return rval; 6842 } 6843 6844 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts, 6845 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr, 6846 uint32_t sfub_len) 6847 { 6848 int rval; 6849 mbx_cmd_t mc; 6850 mbx_cmd_t *mcp = &mc; 6851 6852 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE; 6853 mcp->mb[1] = opts; 6854 mcp->mb[2] = region; 6855 mcp->mb[3] = MSW(len); 6856 mcp->mb[4] = LSW(len); 6857 mcp->mb[5] = MSW(sfub_dma_addr); 6858 mcp->mb[6] = LSW(sfub_dma_addr); 6859 mcp->mb[7] = MSW(MSD(sfub_dma_addr)); 6860 mcp->mb[8] = LSW(MSD(sfub_dma_addr)); 6861 mcp->mb[9] = sfub_len; 6862 mcp->out_mb = 6863 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6864 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6865 mcp->tov = MBX_TOV_SECONDS; 6866 mcp->flags = 0; 6867 rval = qla2x00_mailbox_command(vha, mcp); 6868 6869 if (rval != QLA_SUCCESS) { 6870 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x", 6871 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], 6872 mcp->mb[2]); 6873 } 6874 6875 return rval; 6876 } 6877 6878 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6879 uint32_t data) 6880 { 6881 int rval; 6882 mbx_cmd_t mc; 6883 mbx_cmd_t *mcp = &mc; 6884 6885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6886 "Entered %s.\n", __func__); 6887 6888 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6889 mcp->mb[1] = LSW(addr); 6890 mcp->mb[2] = MSW(addr); 6891 mcp->mb[3] = LSW(data); 6892 mcp->mb[4] = MSW(data); 6893 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6894 mcp->in_mb = MBX_1|MBX_0; 6895 mcp->tov = MBX_TOV_SECONDS; 6896 mcp->flags = 0; 6897 rval = qla2x00_mailbox_command(vha, mcp); 6898 6899 if (rval != QLA_SUCCESS) { 6900 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6901 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6902 } else { 6903 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6904 "Done %s.\n", __func__); 6905 } 6906 6907 return rval; 6908 } 6909 6910 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6911 uint32_t *data) 6912 { 6913 int rval; 6914 mbx_cmd_t mc; 6915 mbx_cmd_t *mcp = &mc; 6916 6917 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6918 "Entered %s.\n", __func__); 6919 6920 mcp->mb[0] = MBC_READ_REMOTE_REG; 6921 mcp->mb[1] = LSW(addr); 6922 mcp->mb[2] = MSW(addr); 6923 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6924 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6925 mcp->tov = MBX_TOV_SECONDS; 6926 mcp->flags = 0; 6927 rval = qla2x00_mailbox_command(vha, mcp); 6928 6929 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]); 6930 6931 if (rval != QLA_SUCCESS) { 6932 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6933 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6934 } else { 6935 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6936 "Done %s.\n", __func__); 6937 } 6938 6939 return rval; 6940 } 6941 6942 int 6943 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led) 6944 { 6945 struct qla_hw_data *ha = vha->hw; 6946 mbx_cmd_t mc; 6947 mbx_cmd_t *mcp = &mc; 6948 int rval; 6949 6950 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6951 return QLA_FUNCTION_FAILED; 6952 6953 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n", 6954 __func__, options); 6955 6956 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG; 6957 mcp->mb[1] = options; 6958 mcp->out_mb = MBX_1|MBX_0; 6959 mcp->in_mb = MBX_1|MBX_0; 6960 if (options & BIT_0) { 6961 if (options & BIT_1) { 6962 mcp->mb[2] = led[2]; 6963 mcp->out_mb |= MBX_2; 6964 } 6965 if (options & BIT_2) { 6966 mcp->mb[3] = led[0]; 6967 mcp->out_mb |= MBX_3; 6968 } 6969 if (options & BIT_3) { 6970 mcp->mb[4] = led[1]; 6971 mcp->out_mb |= MBX_4; 6972 } 6973 } else { 6974 mcp->in_mb |= MBX_4|MBX_3|MBX_2; 6975 } 6976 mcp->tov = MBX_TOV_SECONDS; 6977 mcp->flags = 0; 6978 rval = qla2x00_mailbox_command(vha, mcp); 6979 if (rval) { 6980 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n", 6981 __func__, rval, mcp->mb[0], mcp->mb[1]); 6982 return rval; 6983 } 6984 6985 if (options & BIT_0) { 6986 ha->beacon_blink_led = 0; 6987 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__); 6988 } else { 6989 led[2] = mcp->mb[2]; 6990 led[0] = mcp->mb[3]; 6991 led[1] = mcp->mb[4]; 6992 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n", 6993 __func__, led[0], led[1], led[2]); 6994 } 6995 6996 return rval; 6997 } 6998 6999 /** 7000 * qla_no_op_mb(): This MB is used to check if FW is still alive and 7001 * able to generate an interrupt. Otherwise, a timeout will trigger 7002 * FW dump + reset 7003 * @vha: host adapter pointer 7004 * Return: None 7005 */ 7006 void qla_no_op_mb(struct scsi_qla_host *vha) 7007 { 7008 mbx_cmd_t mc; 7009 mbx_cmd_t *mcp = &mc; 7010 int rval; 7011 7012 memset(&mc, 0, sizeof(mc)); 7013 mcp->mb[0] = 0; // noop cmd= 0 7014 mcp->out_mb = MBX_0; 7015 mcp->in_mb = MBX_0; 7016 mcp->tov = 5; 7017 mcp->flags = 0; 7018 rval = qla2x00_mailbox_command(vha, mcp); 7019 7020 if (rval) { 7021 ql_dbg(ql_dbg_async, vha, 0x7071, 7022 "Failed %s %x\n", __func__, rval); 7023 } 7024 } 7025 7026 int qla_mailbox_passthru(scsi_qla_host_t *vha, 7027 uint16_t *mbx_in, uint16_t *mbx_out) 7028 { 7029 mbx_cmd_t mc; 7030 mbx_cmd_t *mcp = &mc; 7031 int rval = -EINVAL; 7032 7033 memset(&mc, 0, sizeof(mc)); 7034 /* Receiving all 32 register's contents */ 7035 memcpy(&mcp->mb, (char *)mbx_in, (32 * sizeof(uint16_t))); 7036 7037 mcp->out_mb = 0xFFFFFFFF; 7038 mcp->in_mb = 0xFFFFFFFF; 7039 7040 mcp->tov = MBX_TOV_SECONDS; 7041 mcp->flags = 0; 7042 mcp->bufp = NULL; 7043 7044 rval = qla2x00_mailbox_command(vha, mcp); 7045 7046 if (rval != QLA_SUCCESS) { 7047 ql_dbg(ql_dbg_mbx, vha, 0xf0a2, 7048 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 7049 } else { 7050 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xf0a3, "Done %s.\n", 7051 __func__); 7052 /* passing all 32 register's contents */ 7053 memcpy(mbx_out, &mcp->mb, 32 * sizeof(uint16_t)); 7054 } 7055 7056 return rval; 7057 } 7058