1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_target.h" 8 9 #include <linux/delay.h> 10 #include <linux/gfp.h> 11 12 static struct mb_cmd_name { 13 uint16_t cmd; 14 const char *str; 15 } mb_str[] = { 16 {MBC_GET_PORT_DATABASE, "GPDB"}, 17 {MBC_GET_ID_LIST, "GIDList"}, 18 {MBC_GET_LINK_PRIV_STATS, "Stats"}, 19 {MBC_GET_RESOURCE_COUNTS, "ResCnt"}, 20 }; 21 22 static const char *mb_to_str(uint16_t cmd) 23 { 24 int i; 25 struct mb_cmd_name *e; 26 27 for (i = 0; i < ARRAY_SIZE(mb_str); i++) { 28 e = mb_str + i; 29 if (cmd == e->cmd) 30 return e->str; 31 } 32 return "unknown"; 33 } 34 35 static struct rom_cmd { 36 uint16_t cmd; 37 } rom_cmds[] = { 38 { MBC_LOAD_RAM }, 39 { MBC_EXECUTE_FIRMWARE }, 40 { MBC_READ_RAM_WORD }, 41 { MBC_MAILBOX_REGISTER_TEST }, 42 { MBC_VERIFY_CHECKSUM }, 43 { MBC_GET_FIRMWARE_VERSION }, 44 { MBC_LOAD_RISC_RAM }, 45 { MBC_DUMP_RISC_RAM }, 46 { MBC_LOAD_RISC_RAM_EXTENDED }, 47 { MBC_DUMP_RISC_RAM_EXTENDED }, 48 { MBC_WRITE_RAM_WORD_EXTENDED }, 49 { MBC_READ_RAM_EXTENDED }, 50 { MBC_GET_RESOURCE_COUNTS }, 51 { MBC_SET_FIRMWARE_OPTION }, 52 { MBC_MID_INITIALIZE_FIRMWARE }, 53 { MBC_GET_FIRMWARE_STATE }, 54 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, 55 { MBC_GET_RETRY_COUNT }, 56 { MBC_TRACE_CONTROL }, 57 { MBC_INITIALIZE_MULTIQ }, 58 { MBC_IOCB_COMMAND_A64 }, 59 { MBC_GET_ADAPTER_LOOP_ID }, 60 { MBC_READ_SFP }, 61 { MBC_SET_RNID_PARAMS }, 62 { MBC_GET_RNID_PARAMS }, 63 { MBC_GET_SET_ZIO_THRESHOLD }, 64 }; 65 66 static int is_rom_cmd(uint16_t cmd) 67 { 68 int i; 69 struct rom_cmd *wc; 70 71 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) { 72 wc = rom_cmds + i; 73 if (wc->cmd == cmd) 74 return 1; 75 } 76 77 return 0; 78 } 79 80 /* 81 * qla2x00_mailbox_command 82 * Issue mailbox command and waits for completion. 83 * 84 * Input: 85 * ha = adapter block pointer. 86 * mcp = driver internal mbx struct pointer. 87 * 88 * Output: 89 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 90 * 91 * Returns: 92 * 0 : QLA_SUCCESS = cmd performed success 93 * 1 : QLA_FUNCTION_FAILED (error encountered) 94 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 95 * 96 * Context: 97 * Kernel context. 98 */ 99 static int 100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) 101 { 102 int rval, i; 103 unsigned long flags = 0; 104 device_reg_t *reg; 105 uint8_t abort_active; 106 uint8_t io_lock_on; 107 uint16_t command = 0; 108 uint16_t *iptr; 109 __le16 __iomem *optr; 110 uint32_t cnt; 111 uint32_t mboxes; 112 unsigned long wait_time; 113 struct qla_hw_data *ha = vha->hw; 114 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 115 u32 chip_reset; 116 117 118 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); 119 120 if (ha->pdev->error_state == pci_channel_io_perm_failure) { 121 ql_log(ql_log_warn, vha, 0x1001, 122 "PCI channel failed permanently, exiting.\n"); 123 return QLA_FUNCTION_TIMEOUT; 124 } 125 126 if (vha->device_flags & DFLG_DEV_FAILED) { 127 ql_log(ql_log_warn, vha, 0x1002, 128 "Device in failed state, exiting.\n"); 129 return QLA_FUNCTION_TIMEOUT; 130 } 131 132 /* if PCI error, then avoid mbx processing.*/ 133 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && 134 test_bit(UNLOADING, &base_vha->dpc_flags)) { 135 ql_log(ql_log_warn, vha, 0xd04e, 136 "PCI error, exiting.\n"); 137 return QLA_FUNCTION_TIMEOUT; 138 } 139 140 reg = ha->iobase; 141 io_lock_on = base_vha->flags.init_done; 142 143 rval = QLA_SUCCESS; 144 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 145 chip_reset = ha->chip_reset; 146 147 if (ha->flags.pci_channel_io_perm_failure) { 148 ql_log(ql_log_warn, vha, 0x1003, 149 "Perm failure on EEH timeout MBX, exiting.\n"); 150 return QLA_FUNCTION_TIMEOUT; 151 } 152 153 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 154 /* Setting Link-Down error */ 155 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 156 ql_log(ql_log_warn, vha, 0x1004, 157 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 158 return QLA_FUNCTION_TIMEOUT; 159 } 160 161 /* check if ISP abort is active and return cmd with timeout */ 162 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 163 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 164 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && 165 !is_rom_cmd(mcp->mb[0])) { 166 ql_log(ql_log_info, vha, 0x1005, 167 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", 168 mcp->mb[0]); 169 return QLA_FUNCTION_TIMEOUT; 170 } 171 172 atomic_inc(&ha->num_pend_mbx_stage1); 173 /* 174 * Wait for active mailbox commands to finish by waiting at most tov 175 * seconds. This is to serialize actual issuing of mailbox cmds during 176 * non ISP abort time. 177 */ 178 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 179 /* Timeout occurred. Return error. */ 180 ql_log(ql_log_warn, vha, 0xd035, 181 "Cmd access timeout, cmd=0x%x, Exiting.\n", 182 mcp->mb[0]); 183 vha->hw_err_cnt++; 184 atomic_dec(&ha->num_pend_mbx_stage1); 185 return QLA_FUNCTION_TIMEOUT; 186 } 187 atomic_dec(&ha->num_pend_mbx_stage1); 188 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { 189 rval = QLA_ABORTED; 190 goto premature_exit; 191 } 192 193 194 /* Save mailbox command for debug */ 195 ha->mcp = mcp; 196 197 ql_dbg(ql_dbg_mbx, vha, 0x1006, 198 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 199 200 spin_lock_irqsave(&ha->hardware_lock, flags); 201 202 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || 203 ha->flags.mbox_busy) { 204 rval = QLA_ABORTED; 205 spin_unlock_irqrestore(&ha->hardware_lock, flags); 206 goto premature_exit; 207 } 208 ha->flags.mbox_busy = 1; 209 210 /* Load mailbox registers. */ 211 if (IS_P3P_TYPE(ha)) 212 optr = ®->isp82.mailbox_in[0]; 213 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) 214 optr = ®->isp24.mailbox0; 215 else 216 optr = MAILBOX_REG(ha, ®->isp, 0); 217 218 iptr = mcp->mb; 219 command = mcp->mb[0]; 220 mboxes = mcp->out_mb; 221 222 ql_dbg(ql_dbg_mbx, vha, 0x1111, 223 "Mailbox registers (OUT):\n"); 224 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 225 if (IS_QLA2200(ha) && cnt == 8) 226 optr = MAILBOX_REG(ha, ®->isp, 8); 227 if (mboxes & BIT_0) { 228 ql_dbg(ql_dbg_mbx, vha, 0x1112, 229 "mbox[%d]<-0x%04x\n", cnt, *iptr); 230 wrt_reg_word(optr, *iptr); 231 } 232 233 mboxes >>= 1; 234 optr++; 235 iptr++; 236 } 237 238 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, 239 "I/O Address = %p.\n", optr); 240 241 /* Issue set host interrupt command to send cmd out. */ 242 ha->flags.mbox_int = 0; 243 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 244 245 /* Unlock mbx registers and wait for interrupt */ 246 ql_dbg(ql_dbg_mbx, vha, 0x100f, 247 "Going to unlock irq & waiting for interrupts. " 248 "jiffies=%lx.\n", jiffies); 249 250 /* Wait for mbx cmd completion until timeout */ 251 atomic_inc(&ha->num_pend_mbx_stage2); 252 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 253 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 254 255 if (IS_P3P_TYPE(ha)) 256 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); 257 else if (IS_FWI2_CAPABLE(ha)) 258 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); 259 else 260 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); 261 spin_unlock_irqrestore(&ha->hardware_lock, flags); 262 263 wait_time = jiffies; 264 atomic_inc(&ha->num_pend_mbx_stage3); 265 if (!wait_for_completion_timeout(&ha->mbx_intr_comp, 266 mcp->tov * HZ)) { 267 if (chip_reset != ha->chip_reset) { 268 spin_lock_irqsave(&ha->hardware_lock, flags); 269 ha->flags.mbox_busy = 0; 270 spin_unlock_irqrestore(&ha->hardware_lock, 271 flags); 272 atomic_dec(&ha->num_pend_mbx_stage2); 273 atomic_dec(&ha->num_pend_mbx_stage3); 274 rval = QLA_ABORTED; 275 goto premature_exit; 276 } 277 ql_dbg(ql_dbg_mbx, vha, 0x117a, 278 "cmd=%x Timeout.\n", command); 279 spin_lock_irqsave(&ha->hardware_lock, flags); 280 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 281 spin_unlock_irqrestore(&ha->hardware_lock, flags); 282 283 } else if (ha->flags.purge_mbox || 284 chip_reset != ha->chip_reset) { 285 spin_lock_irqsave(&ha->hardware_lock, flags); 286 ha->flags.mbox_busy = 0; 287 spin_unlock_irqrestore(&ha->hardware_lock, flags); 288 atomic_dec(&ha->num_pend_mbx_stage2); 289 atomic_dec(&ha->num_pend_mbx_stage3); 290 rval = QLA_ABORTED; 291 goto premature_exit; 292 } 293 atomic_dec(&ha->num_pend_mbx_stage3); 294 295 if (time_after(jiffies, wait_time + 5 * HZ)) 296 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", 297 command, jiffies_to_msecs(jiffies - wait_time)); 298 } else { 299 ql_dbg(ql_dbg_mbx, vha, 0x1011, 300 "Cmd=%x Polling Mode.\n", command); 301 302 if (IS_P3P_TYPE(ha)) { 303 if (rd_reg_dword(®->isp82.hint) & 304 HINT_MBX_INT_PENDING) { 305 ha->flags.mbox_busy = 0; 306 spin_unlock_irqrestore(&ha->hardware_lock, 307 flags); 308 atomic_dec(&ha->num_pend_mbx_stage2); 309 ql_dbg(ql_dbg_mbx, vha, 0x1012, 310 "Pending mailbox timeout, exiting.\n"); 311 vha->hw_err_cnt++; 312 rval = QLA_FUNCTION_TIMEOUT; 313 goto premature_exit; 314 } 315 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); 316 } else if (IS_FWI2_CAPABLE(ha)) 317 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); 318 else 319 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); 320 spin_unlock_irqrestore(&ha->hardware_lock, flags); 321 322 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 323 while (!ha->flags.mbox_int) { 324 if (ha->flags.purge_mbox || 325 chip_reset != ha->chip_reset) { 326 spin_lock_irqsave(&ha->hardware_lock, flags); 327 ha->flags.mbox_busy = 0; 328 spin_unlock_irqrestore(&ha->hardware_lock, 329 flags); 330 atomic_dec(&ha->num_pend_mbx_stage2); 331 rval = QLA_ABORTED; 332 goto premature_exit; 333 } 334 335 if (time_after(jiffies, wait_time)) 336 break; 337 338 /* Check for pending interrupts. */ 339 qla2x00_poll(ha->rsp_q_map[0]); 340 341 if (!ha->flags.mbox_int && 342 !(IS_QLA2200(ha) && 343 command == MBC_LOAD_RISC_RAM_EXTENDED)) 344 msleep(10); 345 } /* while */ 346 ql_dbg(ql_dbg_mbx, vha, 0x1013, 347 "Waited %d sec.\n", 348 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 349 } 350 atomic_dec(&ha->num_pend_mbx_stage2); 351 352 /* Check whether we timed out */ 353 if (ha->flags.mbox_int) { 354 uint16_t *iptr2; 355 356 ql_dbg(ql_dbg_mbx, vha, 0x1014, 357 "Cmd=%x completed.\n", command); 358 359 /* Got interrupt. Clear the flag. */ 360 ha->flags.mbox_int = 0; 361 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 362 363 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 364 spin_lock_irqsave(&ha->hardware_lock, flags); 365 ha->flags.mbox_busy = 0; 366 spin_unlock_irqrestore(&ha->hardware_lock, flags); 367 368 /* Setting Link-Down error */ 369 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 370 ha->mcp = NULL; 371 rval = QLA_FUNCTION_FAILED; 372 ql_log(ql_log_warn, vha, 0xd048, 373 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 374 goto premature_exit; 375 } 376 377 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) { 378 ql_dbg(ql_dbg_mbx, vha, 0x11ff, 379 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0], 380 MBS_COMMAND_COMPLETE); 381 rval = QLA_FUNCTION_FAILED; 382 } 383 384 /* Load return mailbox registers. */ 385 iptr2 = mcp->mb; 386 iptr = (uint16_t *)&ha->mailbox_out[0]; 387 mboxes = mcp->in_mb; 388 389 ql_dbg(ql_dbg_mbx, vha, 0x1113, 390 "Mailbox registers (IN):\n"); 391 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 392 if (mboxes & BIT_0) { 393 *iptr2 = *iptr; 394 ql_dbg(ql_dbg_mbx, vha, 0x1114, 395 "mbox[%d]->0x%04x\n", cnt, *iptr2); 396 } 397 398 mboxes >>= 1; 399 iptr2++; 400 iptr++; 401 } 402 } else { 403 404 uint16_t mb[8]; 405 uint32_t ictrl, host_status, hccr; 406 uint16_t w; 407 408 if (IS_FWI2_CAPABLE(ha)) { 409 mb[0] = rd_reg_word(®->isp24.mailbox0); 410 mb[1] = rd_reg_word(®->isp24.mailbox1); 411 mb[2] = rd_reg_word(®->isp24.mailbox2); 412 mb[3] = rd_reg_word(®->isp24.mailbox3); 413 mb[7] = rd_reg_word(®->isp24.mailbox7); 414 ictrl = rd_reg_dword(®->isp24.ictrl); 415 host_status = rd_reg_dword(®->isp24.host_status); 416 hccr = rd_reg_dword(®->isp24.hccr); 417 418 ql_log(ql_log_warn, vha, 0xd04c, 419 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 420 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", 421 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], 422 mb[7], host_status, hccr); 423 vha->hw_err_cnt++; 424 425 } else { 426 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); 427 ictrl = rd_reg_word(®->isp.ictrl); 428 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, 429 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 430 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); 431 vha->hw_err_cnt++; 432 } 433 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 434 435 /* Capture FW dump only, if PCI device active */ 436 if (!pci_channel_offline(vha->hw->pdev)) { 437 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 438 if (w == 0xffff || ictrl == 0xffffffff || 439 (chip_reset != ha->chip_reset)) { 440 /* This is special case if there is unload 441 * of driver happening and if PCI device go 442 * into bad state due to PCI error condition 443 * then only PCI ERR flag would be set. 444 * we will do premature exit for above case. 445 */ 446 spin_lock_irqsave(&ha->hardware_lock, flags); 447 ha->flags.mbox_busy = 0; 448 spin_unlock_irqrestore(&ha->hardware_lock, 449 flags); 450 rval = QLA_FUNCTION_TIMEOUT; 451 goto premature_exit; 452 } 453 454 /* Attempt to capture firmware dump for further 455 * anallysis of the current formware state. we do not 456 * need to do this if we are intentionally generating 457 * a dump 458 */ 459 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) 460 qla2xxx_dump_fw(vha); 461 rval = QLA_FUNCTION_TIMEOUT; 462 } 463 } 464 spin_lock_irqsave(&ha->hardware_lock, flags); 465 ha->flags.mbox_busy = 0; 466 spin_unlock_irqrestore(&ha->hardware_lock, flags); 467 468 /* Clean up */ 469 ha->mcp = NULL; 470 471 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 472 ql_dbg(ql_dbg_mbx, vha, 0x101a, 473 "Checking for additional resp interrupt.\n"); 474 475 /* polling mode for non isp_abort commands. */ 476 qla2x00_poll(ha->rsp_q_map[0]); 477 } 478 479 if (rval == QLA_FUNCTION_TIMEOUT && 480 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 481 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 482 ha->flags.eeh_busy) { 483 /* not in dpc. schedule it for dpc to take over. */ 484 ql_dbg(ql_dbg_mbx, vha, 0x101b, 485 "Timeout, schedule isp_abort_needed.\n"); 486 487 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 488 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 489 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 490 if (IS_QLA82XX(ha)) { 491 ql_dbg(ql_dbg_mbx, vha, 0x112a, 492 "disabling pause transmit on port " 493 "0 & 1.\n"); 494 qla82xx_wr_32(ha, 495 QLA82XX_CRB_NIU + 0x98, 496 CRB_NIU_XG_PAUSE_CTL_P0| 497 CRB_NIU_XG_PAUSE_CTL_P1); 498 } 499 ql_log(ql_log_info, base_vha, 0x101c, 500 "Mailbox cmd timeout occurred, cmd=0x%x, " 501 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 502 "abort.\n", command, mcp->mb[0], 503 ha->flags.eeh_busy); 504 vha->hw_err_cnt++; 505 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 506 qla2xxx_wake_dpc(vha); 507 } 508 } else if (current == ha->dpc_thread) { 509 /* call abort directly since we are in the DPC thread */ 510 ql_dbg(ql_dbg_mbx, vha, 0x101d, 511 "Timeout, calling abort_isp.\n"); 512 513 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 514 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 515 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 516 if (IS_QLA82XX(ha)) { 517 ql_dbg(ql_dbg_mbx, vha, 0x112b, 518 "disabling pause transmit on port " 519 "0 & 1.\n"); 520 qla82xx_wr_32(ha, 521 QLA82XX_CRB_NIU + 0x98, 522 CRB_NIU_XG_PAUSE_CTL_P0| 523 CRB_NIU_XG_PAUSE_CTL_P1); 524 } 525 ql_log(ql_log_info, base_vha, 0x101e, 526 "Mailbox cmd timeout occurred, cmd=0x%x, " 527 "mb[0]=0x%x. Scheduling ISP abort ", 528 command, mcp->mb[0]); 529 vha->hw_err_cnt++; 530 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 531 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 532 /* Allow next mbx cmd to come in. */ 533 complete(&ha->mbx_cmd_comp); 534 if (ha->isp_ops->abort_isp(vha)) { 535 /* Failed. retry later. */ 536 set_bit(ISP_ABORT_NEEDED, 537 &vha->dpc_flags); 538 } 539 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 540 ql_dbg(ql_dbg_mbx, vha, 0x101f, 541 "Finished abort_isp.\n"); 542 goto mbx_done; 543 } 544 } 545 } 546 547 premature_exit: 548 /* Allow next mbx cmd to come in. */ 549 complete(&ha->mbx_cmd_comp); 550 551 mbx_done: 552 if (rval == QLA_ABORTED) { 553 ql_log(ql_log_info, vha, 0xd035, 554 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n", 555 mcp->mb[0]); 556 } else if (rval) { 557 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { 558 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR, 559 dev_name(&ha->pdev->dev), 0x1020+0x800, 560 vha->host_no, rval); 561 mboxes = mcp->in_mb; 562 cnt = 4; 563 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1) 564 if (mboxes & BIT_0) { 565 printk(" mb[%u]=%x", i, mcp->mb[i]); 566 cnt--; 567 } 568 pr_warn(" cmd=%x ****\n", command); 569 } 570 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { 571 ql_dbg(ql_dbg_mbx, vha, 0x1198, 572 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", 573 rd_reg_dword(®->isp24.host_status), 574 rd_reg_dword(®->isp24.ictrl), 575 rd_reg_dword(®->isp24.istatus)); 576 } else { 577 ql_dbg(ql_dbg_mbx, vha, 0x1206, 578 "ctrl_status=%#x ictrl=%#x istatus=%#x\n", 579 rd_reg_word(®->isp.ctrl_status), 580 rd_reg_word(®->isp.ictrl), 581 rd_reg_word(®->isp.istatus)); 582 } 583 } else { 584 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); 585 } 586 587 return rval; 588 } 589 590 int 591 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, 592 uint32_t risc_code_size) 593 { 594 int rval; 595 struct qla_hw_data *ha = vha->hw; 596 mbx_cmd_t mc; 597 mbx_cmd_t *mcp = &mc; 598 599 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, 600 "Entered %s.\n", __func__); 601 602 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 603 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 604 mcp->mb[8] = MSW(risc_addr); 605 mcp->out_mb = MBX_8|MBX_0; 606 } else { 607 mcp->mb[0] = MBC_LOAD_RISC_RAM; 608 mcp->out_mb = MBX_0; 609 } 610 mcp->mb[1] = LSW(risc_addr); 611 mcp->mb[2] = MSW(req_dma); 612 mcp->mb[3] = LSW(req_dma); 613 mcp->mb[6] = MSW(MSD(req_dma)); 614 mcp->mb[7] = LSW(MSD(req_dma)); 615 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 616 if (IS_FWI2_CAPABLE(ha)) { 617 mcp->mb[4] = MSW(risc_code_size); 618 mcp->mb[5] = LSW(risc_code_size); 619 mcp->out_mb |= MBX_5|MBX_4; 620 } else { 621 mcp->mb[4] = LSW(risc_code_size); 622 mcp->out_mb |= MBX_4; 623 } 624 625 mcp->in_mb = MBX_1|MBX_0; 626 mcp->tov = MBX_TOV_SECONDS; 627 mcp->flags = 0; 628 rval = qla2x00_mailbox_command(vha, mcp); 629 630 if (rval != QLA_SUCCESS) { 631 ql_dbg(ql_dbg_mbx, vha, 0x1023, 632 "Failed=%x mb[0]=%x mb[1]=%x.\n", 633 rval, mcp->mb[0], mcp->mb[1]); 634 vha->hw_err_cnt++; 635 } else { 636 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, 637 "Done %s.\n", __func__); 638 } 639 640 return rval; 641 } 642 643 #define NVME_ENABLE_FLAG BIT_3 644 645 /* 646 * qla2x00_execute_fw 647 * Start adapter firmware. 648 * 649 * Input: 650 * ha = adapter block pointer. 651 * TARGET_QUEUE_LOCK must be released. 652 * ADAPTER_STATE_LOCK must be released. 653 * 654 * Returns: 655 * qla2x00 local function return status code. 656 * 657 * Context: 658 * Kernel context. 659 */ 660 int 661 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) 662 { 663 int rval; 664 struct qla_hw_data *ha = vha->hw; 665 mbx_cmd_t mc; 666 mbx_cmd_t *mcp = &mc; 667 u8 semaphore = 0; 668 #define EXE_FW_FORCE_SEMAPHORE BIT_7 669 u8 retry = 3; 670 671 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, 672 "Entered %s.\n", __func__); 673 674 again: 675 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 676 mcp->out_mb = MBX_0; 677 mcp->in_mb = MBX_0; 678 if (IS_FWI2_CAPABLE(ha)) { 679 mcp->mb[1] = MSW(risc_addr); 680 mcp->mb[2] = LSW(risc_addr); 681 mcp->mb[3] = 0; 682 mcp->mb[4] = 0; 683 mcp->mb[11] = 0; 684 685 /* Enable BPM? */ 686 if (ha->flags.lr_detected) { 687 mcp->mb[4] = BIT_0; 688 if (IS_BPM_RANGE_CAPABLE(ha)) 689 mcp->mb[4] |= 690 ha->lr_distance << LR_DIST_FW_POS; 691 } 692 693 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha))) 694 mcp->mb[4] |= NVME_ENABLE_FLAG; 695 696 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 697 struct nvram_81xx *nv = ha->nvram; 698 /* set minimum speed if specified in nvram */ 699 if (nv->min_supported_speed >= 2 && 700 nv->min_supported_speed <= 5) { 701 mcp->mb[4] |= BIT_4; 702 mcp->mb[11] |= nv->min_supported_speed & 0xF; 703 mcp->out_mb |= MBX_11; 704 mcp->in_mb |= BIT_5; 705 vha->min_supported_speed = 706 nv->min_supported_speed; 707 } 708 } 709 710 if (ha->flags.exlogins_enabled) 711 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; 712 713 if (ha->flags.exchoffld_enabled) 714 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; 715 716 if (semaphore) 717 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE; 718 719 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; 720 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1; 721 } else { 722 mcp->mb[1] = LSW(risc_addr); 723 mcp->out_mb |= MBX_1; 724 if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 725 mcp->mb[2] = 0; 726 mcp->out_mb |= MBX_2; 727 } 728 } 729 730 mcp->tov = MBX_TOV_SECONDS; 731 mcp->flags = 0; 732 rval = qla2x00_mailbox_command(vha, mcp); 733 734 if (rval != QLA_SUCCESS) { 735 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR && 736 mcp->mb[1] == 0x27 && retry) { 737 semaphore = 1; 738 retry--; 739 ql_dbg(ql_dbg_async, vha, 0x1026, 740 "Exe FW: force semaphore.\n"); 741 goto again; 742 } 743 744 ql_dbg(ql_dbg_mbx, vha, 0x1026, 745 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 746 vha->hw_err_cnt++; 747 return rval; 748 } 749 750 if (!IS_FWI2_CAPABLE(ha)) 751 goto done; 752 753 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; 754 ql_dbg(ql_dbg_mbx, vha, 0x119a, 755 "fw_ability_mask=%x.\n", ha->fw_ability_mask); 756 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]); 757 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 758 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1); 759 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n", 760 ha->max_supported_speed == 0 ? "16Gps" : 761 ha->max_supported_speed == 1 ? "32Gps" : 762 ha->max_supported_speed == 2 ? "64Gps" : "unknown"); 763 if (vha->min_supported_speed) { 764 ha->min_supported_speed = mcp->mb[5] & 765 (BIT_0 | BIT_1 | BIT_2); 766 ql_dbg(ql_dbg_mbx, vha, 0x119c, 767 "min_supported_speed=%s.\n", 768 ha->min_supported_speed == 6 ? "64Gps" : 769 ha->min_supported_speed == 5 ? "32Gps" : 770 ha->min_supported_speed == 4 ? "16Gps" : 771 ha->min_supported_speed == 3 ? "8Gps" : 772 ha->min_supported_speed == 2 ? "4Gps" : "unknown"); 773 } 774 } 775 776 done: 777 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, 778 "Done %s.\n", __func__); 779 780 return rval; 781 } 782 783 /* 784 * qla_get_exlogin_status 785 * Get extended login status 786 * uses the memory offload control/status Mailbox 787 * 788 * Input: 789 * ha: adapter state pointer. 790 * fwopt: firmware options 791 * 792 * Returns: 793 * qla2x00 local function status 794 * 795 * Context: 796 * Kernel context. 797 */ 798 #define FETCH_XLOGINS_STAT 0x8 799 int 800 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 801 uint16_t *ex_logins_cnt) 802 { 803 int rval; 804 mbx_cmd_t mc; 805 mbx_cmd_t *mcp = &mc; 806 807 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f, 808 "Entered %s\n", __func__); 809 810 memset(mcp->mb, 0 , sizeof(mcp->mb)); 811 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 812 mcp->mb[1] = FETCH_XLOGINS_STAT; 813 mcp->out_mb = MBX_1|MBX_0; 814 mcp->in_mb = MBX_10|MBX_4|MBX_0; 815 mcp->tov = MBX_TOV_SECONDS; 816 mcp->flags = 0; 817 818 rval = qla2x00_mailbox_command(vha, mcp); 819 if (rval != QLA_SUCCESS) { 820 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval); 821 } else { 822 *buf_sz = mcp->mb[4]; 823 *ex_logins_cnt = mcp->mb[10]; 824 825 ql_log(ql_log_info, vha, 0x1190, 826 "buffer size 0x%x, exchange login count=%d\n", 827 mcp->mb[4], mcp->mb[10]); 828 829 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116, 830 "Done %s.\n", __func__); 831 } 832 833 return rval; 834 } 835 836 /* 837 * qla_set_exlogin_mem_cfg 838 * set extended login memory configuration 839 * Mbx needs to be issues before init_cb is set 840 * 841 * Input: 842 * ha: adapter state pointer. 843 * buffer: buffer pointer 844 * phys_addr: physical address of buffer 845 * size: size of buffer 846 * TARGET_QUEUE_LOCK must be released 847 * ADAPTER_STATE_LOCK must be release 848 * 849 * Returns: 850 * qla2x00 local funxtion status code. 851 * 852 * Context: 853 * Kernel context. 854 */ 855 #define CONFIG_XLOGINS_MEM 0x9 856 int 857 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) 858 { 859 int rval; 860 mbx_cmd_t mc; 861 mbx_cmd_t *mcp = &mc; 862 struct qla_hw_data *ha = vha->hw; 863 864 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, 865 "Entered %s.\n", __func__); 866 867 memset(mcp->mb, 0 , sizeof(mcp->mb)); 868 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 869 mcp->mb[1] = CONFIG_XLOGINS_MEM; 870 mcp->mb[2] = MSW(phys_addr); 871 mcp->mb[3] = LSW(phys_addr); 872 mcp->mb[6] = MSW(MSD(phys_addr)); 873 mcp->mb[7] = LSW(MSD(phys_addr)); 874 mcp->mb[8] = MSW(ha->exlogin_size); 875 mcp->mb[9] = LSW(ha->exlogin_size); 876 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 877 mcp->in_mb = MBX_11|MBX_0; 878 mcp->tov = MBX_TOV_SECONDS; 879 mcp->flags = 0; 880 rval = qla2x00_mailbox_command(vha, mcp); 881 if (rval != QLA_SUCCESS) { 882 ql_dbg(ql_dbg_mbx, vha, 0x111b, 883 "EXlogin Failed=%x. MB0=%x MB11=%x\n", 884 rval, mcp->mb[0], mcp->mb[11]); 885 } else { 886 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 887 "Done %s.\n", __func__); 888 } 889 890 return rval; 891 } 892 893 /* 894 * qla_get_exchoffld_status 895 * Get exchange offload status 896 * uses the memory offload control/status Mailbox 897 * 898 * Input: 899 * ha: adapter state pointer. 900 * fwopt: firmware options 901 * 902 * Returns: 903 * qla2x00 local function status 904 * 905 * Context: 906 * Kernel context. 907 */ 908 #define FETCH_XCHOFFLD_STAT 0x2 909 int 910 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 911 uint16_t *ex_logins_cnt) 912 { 913 int rval; 914 mbx_cmd_t mc; 915 mbx_cmd_t *mcp = &mc; 916 917 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019, 918 "Entered %s\n", __func__); 919 920 memset(mcp->mb, 0 , sizeof(mcp->mb)); 921 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 922 mcp->mb[1] = FETCH_XCHOFFLD_STAT; 923 mcp->out_mb = MBX_1|MBX_0; 924 mcp->in_mb = MBX_10|MBX_4|MBX_0; 925 mcp->tov = MBX_TOV_SECONDS; 926 mcp->flags = 0; 927 928 rval = qla2x00_mailbox_command(vha, mcp); 929 if (rval != QLA_SUCCESS) { 930 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval); 931 } else { 932 *buf_sz = mcp->mb[4]; 933 *ex_logins_cnt = mcp->mb[10]; 934 935 ql_log(ql_log_info, vha, 0x118e, 936 "buffer size 0x%x, exchange offload count=%d\n", 937 mcp->mb[4], mcp->mb[10]); 938 939 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156, 940 "Done %s.\n", __func__); 941 } 942 943 return rval; 944 } 945 946 /* 947 * qla_set_exchoffld_mem_cfg 948 * Set exchange offload memory configuration 949 * Mbx needs to be issues before init_cb is set 950 * 951 * Input: 952 * ha: adapter state pointer. 953 * buffer: buffer pointer 954 * phys_addr: physical address of buffer 955 * size: size of buffer 956 * TARGET_QUEUE_LOCK must be released 957 * ADAPTER_STATE_LOCK must be release 958 * 959 * Returns: 960 * qla2x00 local funxtion status code. 961 * 962 * Context: 963 * Kernel context. 964 */ 965 #define CONFIG_XCHOFFLD_MEM 0x3 966 int 967 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha) 968 { 969 int rval; 970 mbx_cmd_t mc; 971 mbx_cmd_t *mcp = &mc; 972 struct qla_hw_data *ha = vha->hw; 973 974 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157, 975 "Entered %s.\n", __func__); 976 977 memset(mcp->mb, 0 , sizeof(mcp->mb)); 978 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 979 mcp->mb[1] = CONFIG_XCHOFFLD_MEM; 980 mcp->mb[2] = MSW(ha->exchoffld_buf_dma); 981 mcp->mb[3] = LSW(ha->exchoffld_buf_dma); 982 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma)); 983 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma)); 984 mcp->mb[8] = MSW(ha->exchoffld_size); 985 mcp->mb[9] = LSW(ha->exchoffld_size); 986 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 987 mcp->in_mb = MBX_11|MBX_0; 988 mcp->tov = MBX_TOV_SECONDS; 989 mcp->flags = 0; 990 rval = qla2x00_mailbox_command(vha, mcp); 991 if (rval != QLA_SUCCESS) { 992 /*EMPTY*/ 993 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval); 994 } else { 995 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, 996 "Done %s.\n", __func__); 997 } 998 999 return rval; 1000 } 1001 1002 /* 1003 * qla2x00_get_fw_version 1004 * Get firmware version. 1005 * 1006 * Input: 1007 * ha: adapter state pointer. 1008 * major: pointer for major number. 1009 * minor: pointer for minor number. 1010 * subminor: pointer for subminor number. 1011 * 1012 * Returns: 1013 * qla2x00 local function return status code. 1014 * 1015 * Context: 1016 * Kernel context. 1017 */ 1018 int 1019 qla2x00_get_fw_version(scsi_qla_host_t *vha) 1020 { 1021 int rval; 1022 mbx_cmd_t mc; 1023 mbx_cmd_t *mcp = &mc; 1024 struct qla_hw_data *ha = vha->hw; 1025 1026 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, 1027 "Entered %s.\n", __func__); 1028 1029 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 1030 mcp->out_mb = MBX_0; 1031 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1032 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) 1033 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 1034 if (IS_FWI2_CAPABLE(ha)) 1035 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 1036 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1037 mcp->in_mb |= 1038 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| 1039 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7; 1040 1041 mcp->flags = 0; 1042 mcp->tov = MBX_TOV_SECONDS; 1043 rval = qla2x00_mailbox_command(vha, mcp); 1044 if (rval != QLA_SUCCESS) 1045 goto failed; 1046 1047 /* Return mailbox data. */ 1048 ha->fw_major_version = mcp->mb[1]; 1049 ha->fw_minor_version = mcp->mb[2]; 1050 ha->fw_subminor_version = mcp->mb[3]; 1051 ha->fw_attributes = mcp->mb[6]; 1052 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) 1053 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ 1054 else 1055 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; 1056 1057 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1058 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1059 ha->mpi_version[1] = mcp->mb[11] >> 8; 1060 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1061 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; 1062 ha->phy_version[0] = mcp->mb[8] & 0xff; 1063 ha->phy_version[1] = mcp->mb[9] >> 8; 1064 ha->phy_version[2] = mcp->mb[9] & 0xff; 1065 } 1066 1067 if (IS_FWI2_CAPABLE(ha)) { 1068 ha->fw_attributes_h = mcp->mb[15]; 1069 ha->fw_attributes_ext[0] = mcp->mb[16]; 1070 ha->fw_attributes_ext[1] = mcp->mb[17]; 1071 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, 1072 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 1073 __func__, mcp->mb[15], mcp->mb[6]); 1074 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, 1075 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 1076 __func__, mcp->mb[17], mcp->mb[16]); 1077 1078 if (ha->fw_attributes_h & 0x4) 1079 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d, 1080 "%s: Firmware supports Extended Login 0x%x\n", 1081 __func__, ha->fw_attributes_h); 1082 1083 if (ha->fw_attributes_h & 0x8) 1084 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, 1085 "%s: Firmware supports Exchange Offload 0x%x\n", 1086 __func__, ha->fw_attributes_h); 1087 1088 /* 1089 * FW supports nvme and driver load parameter requested nvme. 1090 * BIT 26 of fw_attributes indicates NVMe support. 1091 */ 1092 if ((ha->fw_attributes_h & 1093 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) && 1094 ql2xnvmeenable) { 1095 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST) 1096 vha->flags.nvme_first_burst = 1; 1097 1098 vha->flags.nvme_enabled = 1; 1099 ql_log(ql_log_info, vha, 0xd302, 1100 "%s: FC-NVMe is Enabled (0x%x)\n", 1101 __func__, ha->fw_attributes_h); 1102 } 1103 1104 /* BIT_13 of Extended FW Attributes informs about NVMe2 support */ 1105 if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) { 1106 ql_log(ql_log_info, vha, 0xd302, 1107 "Firmware supports NVMe2 0x%x\n", 1108 ha->fw_attributes_ext[0]); 1109 vha->flags.nvme2_enabled = 1; 1110 } 1111 } 1112 1113 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1114 ha->serdes_version[0] = mcp->mb[7] & 0xff; 1115 ha->serdes_version[1] = mcp->mb[8] >> 8; 1116 ha->serdes_version[2] = mcp->mb[8] & 0xff; 1117 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1118 ha->mpi_version[1] = mcp->mb[11] >> 8; 1119 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1120 ha->pep_version[0] = mcp->mb[13] & 0xff; 1121 ha->pep_version[1] = mcp->mb[14] >> 8; 1122 ha->pep_version[2] = mcp->mb[14] & 0xff; 1123 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; 1124 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; 1125 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; 1126 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; 1127 if (IS_QLA28XX(ha)) { 1128 if (mcp->mb[16] & BIT_10) 1129 ha->flags.secure_fw = 1; 1130 1131 ql_log(ql_log_info, vha, 0xffff, 1132 "Secure Flash Update in FW: %s\n", 1133 (ha->flags.secure_fw) ? "Supported" : 1134 "Not Supported"); 1135 } 1136 1137 if (ha->flags.scm_supported_a && 1138 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) { 1139 ha->flags.scm_supported_f = 1; 1140 ha->sf_init_cb->flags |= cpu_to_le16(BIT_13); 1141 } 1142 ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n", 1143 (ha->flags.scm_supported_f) ? "Supported" : 1144 "Not Supported"); 1145 1146 if (vha->flags.nvme2_enabled) { 1147 /* set BIT_15 of special feature control block for SLER */ 1148 ha->sf_init_cb->flags |= cpu_to_le16(BIT_15); 1149 /* set BIT_14 of special feature control block for PI CTRL*/ 1150 ha->sf_init_cb->flags |= cpu_to_le16(BIT_14); 1151 } 1152 } 1153 1154 failed: 1155 if (rval != QLA_SUCCESS) { 1156 /*EMPTY*/ 1157 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); 1158 } else { 1159 /*EMPTY*/ 1160 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, 1161 "Done %s.\n", __func__); 1162 } 1163 return rval; 1164 } 1165 1166 /* 1167 * qla2x00_get_fw_options 1168 * Set firmware options. 1169 * 1170 * Input: 1171 * ha = adapter block pointer. 1172 * fwopt = pointer for firmware options. 1173 * 1174 * Returns: 1175 * qla2x00 local function return status code. 1176 * 1177 * Context: 1178 * Kernel context. 1179 */ 1180 int 1181 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1182 { 1183 int rval; 1184 mbx_cmd_t mc; 1185 mbx_cmd_t *mcp = &mc; 1186 1187 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, 1188 "Entered %s.\n", __func__); 1189 1190 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 1191 mcp->out_mb = MBX_0; 1192 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1193 mcp->tov = MBX_TOV_SECONDS; 1194 mcp->flags = 0; 1195 rval = qla2x00_mailbox_command(vha, mcp); 1196 1197 if (rval != QLA_SUCCESS) { 1198 /*EMPTY*/ 1199 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); 1200 } else { 1201 fwopts[0] = mcp->mb[0]; 1202 fwopts[1] = mcp->mb[1]; 1203 fwopts[2] = mcp->mb[2]; 1204 fwopts[3] = mcp->mb[3]; 1205 1206 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, 1207 "Done %s.\n", __func__); 1208 } 1209 1210 return rval; 1211 } 1212 1213 1214 /* 1215 * qla2x00_set_fw_options 1216 * Set firmware options. 1217 * 1218 * Input: 1219 * ha = adapter block pointer. 1220 * fwopt = pointer for firmware options. 1221 * 1222 * Returns: 1223 * qla2x00 local function return status code. 1224 * 1225 * Context: 1226 * Kernel context. 1227 */ 1228 int 1229 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1230 { 1231 int rval; 1232 mbx_cmd_t mc; 1233 mbx_cmd_t *mcp = &mc; 1234 1235 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, 1236 "Entered %s.\n", __func__); 1237 1238 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 1239 mcp->mb[1] = fwopts[1]; 1240 mcp->mb[2] = fwopts[2]; 1241 mcp->mb[3] = fwopts[3]; 1242 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1243 mcp->in_mb = MBX_0; 1244 if (IS_FWI2_CAPABLE(vha->hw)) { 1245 mcp->in_mb |= MBX_1; 1246 mcp->mb[10] = fwopts[10]; 1247 mcp->out_mb |= MBX_10; 1248 } else { 1249 mcp->mb[10] = fwopts[10]; 1250 mcp->mb[11] = fwopts[11]; 1251 mcp->mb[12] = 0; /* Undocumented, but used */ 1252 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 1253 } 1254 mcp->tov = MBX_TOV_SECONDS; 1255 mcp->flags = 0; 1256 rval = qla2x00_mailbox_command(vha, mcp); 1257 1258 fwopts[0] = mcp->mb[0]; 1259 1260 if (rval != QLA_SUCCESS) { 1261 /*EMPTY*/ 1262 ql_dbg(ql_dbg_mbx, vha, 0x1030, 1263 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); 1264 } else { 1265 /*EMPTY*/ 1266 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, 1267 "Done %s.\n", __func__); 1268 } 1269 1270 return rval; 1271 } 1272 1273 /* 1274 * qla2x00_mbx_reg_test 1275 * Mailbox register wrap test. 1276 * 1277 * Input: 1278 * ha = adapter block pointer. 1279 * TARGET_QUEUE_LOCK must be released. 1280 * ADAPTER_STATE_LOCK must be released. 1281 * 1282 * Returns: 1283 * qla2x00 local function return status code. 1284 * 1285 * Context: 1286 * Kernel context. 1287 */ 1288 int 1289 qla2x00_mbx_reg_test(scsi_qla_host_t *vha) 1290 { 1291 int rval; 1292 mbx_cmd_t mc; 1293 mbx_cmd_t *mcp = &mc; 1294 1295 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, 1296 "Entered %s.\n", __func__); 1297 1298 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 1299 mcp->mb[1] = 0xAAAA; 1300 mcp->mb[2] = 0x5555; 1301 mcp->mb[3] = 0xAA55; 1302 mcp->mb[4] = 0x55AA; 1303 mcp->mb[5] = 0xA5A5; 1304 mcp->mb[6] = 0x5A5A; 1305 mcp->mb[7] = 0x2525; 1306 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1307 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1308 mcp->tov = MBX_TOV_SECONDS; 1309 mcp->flags = 0; 1310 rval = qla2x00_mailbox_command(vha, mcp); 1311 1312 if (rval == QLA_SUCCESS) { 1313 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || 1314 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) 1315 rval = QLA_FUNCTION_FAILED; 1316 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || 1317 mcp->mb[7] != 0x2525) 1318 rval = QLA_FUNCTION_FAILED; 1319 } 1320 1321 if (rval != QLA_SUCCESS) { 1322 /*EMPTY*/ 1323 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); 1324 vha->hw_err_cnt++; 1325 } else { 1326 /*EMPTY*/ 1327 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, 1328 "Done %s.\n", __func__); 1329 } 1330 1331 return rval; 1332 } 1333 1334 /* 1335 * qla2x00_verify_checksum 1336 * Verify firmware checksum. 1337 * 1338 * Input: 1339 * ha = adapter block pointer. 1340 * TARGET_QUEUE_LOCK must be released. 1341 * ADAPTER_STATE_LOCK must be released. 1342 * 1343 * Returns: 1344 * qla2x00 local function return status code. 1345 * 1346 * Context: 1347 * Kernel context. 1348 */ 1349 int 1350 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) 1351 { 1352 int rval; 1353 mbx_cmd_t mc; 1354 mbx_cmd_t *mcp = &mc; 1355 1356 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, 1357 "Entered %s.\n", __func__); 1358 1359 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 1360 mcp->out_mb = MBX_0; 1361 mcp->in_mb = MBX_0; 1362 if (IS_FWI2_CAPABLE(vha->hw)) { 1363 mcp->mb[1] = MSW(risc_addr); 1364 mcp->mb[2] = LSW(risc_addr); 1365 mcp->out_mb |= MBX_2|MBX_1; 1366 mcp->in_mb |= MBX_2|MBX_1; 1367 } else { 1368 mcp->mb[1] = LSW(risc_addr); 1369 mcp->out_mb |= MBX_1; 1370 mcp->in_mb |= MBX_1; 1371 } 1372 1373 mcp->tov = MBX_TOV_SECONDS; 1374 mcp->flags = 0; 1375 rval = qla2x00_mailbox_command(vha, mcp); 1376 1377 if (rval != QLA_SUCCESS) { 1378 ql_dbg(ql_dbg_mbx, vha, 0x1036, 1379 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? 1380 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); 1381 } else { 1382 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, 1383 "Done %s.\n", __func__); 1384 } 1385 1386 return rval; 1387 } 1388 1389 /* 1390 * qla2x00_issue_iocb 1391 * Issue IOCB using mailbox command 1392 * 1393 * Input: 1394 * ha = adapter state pointer. 1395 * buffer = buffer pointer. 1396 * phys_addr = physical address of buffer. 1397 * size = size of buffer. 1398 * TARGET_QUEUE_LOCK must be released. 1399 * ADAPTER_STATE_LOCK must be released. 1400 * 1401 * Returns: 1402 * qla2x00 local function return status code. 1403 * 1404 * Context: 1405 * Kernel context. 1406 */ 1407 int 1408 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, 1409 dma_addr_t phys_addr, size_t size, uint32_t tov) 1410 { 1411 int rval; 1412 mbx_cmd_t mc; 1413 mbx_cmd_t *mcp = &mc; 1414 1415 if (!vha->hw->flags.fw_started) 1416 return QLA_INVALID_COMMAND; 1417 1418 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, 1419 "Entered %s.\n", __func__); 1420 1421 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 1422 mcp->mb[1] = 0; 1423 mcp->mb[2] = MSW(LSD(phys_addr)); 1424 mcp->mb[3] = LSW(LSD(phys_addr)); 1425 mcp->mb[6] = MSW(MSD(phys_addr)); 1426 mcp->mb[7] = LSW(MSD(phys_addr)); 1427 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1428 mcp->in_mb = MBX_1|MBX_0; 1429 mcp->tov = tov; 1430 mcp->flags = 0; 1431 rval = qla2x00_mailbox_command(vha, mcp); 1432 1433 if (rval != QLA_SUCCESS) { 1434 /*EMPTY*/ 1435 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); 1436 } else { 1437 sts_entry_t *sts_entry = buffer; 1438 1439 /* Mask reserved bits. */ 1440 sts_entry->entry_status &= 1441 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 1442 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, 1443 "Done %s (status=%x).\n", __func__, 1444 sts_entry->entry_status); 1445 } 1446 1447 return rval; 1448 } 1449 1450 int 1451 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, 1452 size_t size) 1453 { 1454 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, 1455 MBX_TOV_SECONDS); 1456 } 1457 1458 /* 1459 * qla2x00_abort_command 1460 * Abort command aborts a specified IOCB. 1461 * 1462 * Input: 1463 * ha = adapter block pointer. 1464 * sp = SB structure pointer. 1465 * 1466 * Returns: 1467 * qla2x00 local function return status code. 1468 * 1469 * Context: 1470 * Kernel context. 1471 */ 1472 int 1473 qla2x00_abort_command(srb_t *sp) 1474 { 1475 unsigned long flags = 0; 1476 int rval; 1477 uint32_t handle = 0; 1478 mbx_cmd_t mc; 1479 mbx_cmd_t *mcp = &mc; 1480 fc_port_t *fcport = sp->fcport; 1481 scsi_qla_host_t *vha = fcport->vha; 1482 struct qla_hw_data *ha = vha->hw; 1483 struct req_que *req; 1484 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1485 1486 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, 1487 "Entered %s.\n", __func__); 1488 1489 if (sp->qpair) 1490 req = sp->qpair->req; 1491 else 1492 req = vha->req; 1493 1494 spin_lock_irqsave(&ha->hardware_lock, flags); 1495 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1496 if (req->outstanding_cmds[handle] == sp) 1497 break; 1498 } 1499 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1500 1501 if (handle == req->num_outstanding_cmds) { 1502 /* command not found */ 1503 return QLA_FUNCTION_FAILED; 1504 } 1505 1506 mcp->mb[0] = MBC_ABORT_COMMAND; 1507 if (HAS_EXTENDED_IDS(ha)) 1508 mcp->mb[1] = fcport->loop_id; 1509 else 1510 mcp->mb[1] = fcport->loop_id << 8; 1511 mcp->mb[2] = (uint16_t)handle; 1512 mcp->mb[3] = (uint16_t)(handle >> 16); 1513 mcp->mb[6] = (uint16_t)cmd->device->lun; 1514 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1515 mcp->in_mb = MBX_0; 1516 mcp->tov = MBX_TOV_SECONDS; 1517 mcp->flags = 0; 1518 rval = qla2x00_mailbox_command(vha, mcp); 1519 1520 if (rval != QLA_SUCCESS) { 1521 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); 1522 } else { 1523 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, 1524 "Done %s.\n", __func__); 1525 } 1526 1527 return rval; 1528 } 1529 1530 int 1531 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) 1532 { 1533 int rval, rval2; 1534 mbx_cmd_t mc; 1535 mbx_cmd_t *mcp = &mc; 1536 scsi_qla_host_t *vha; 1537 1538 vha = fcport->vha; 1539 1540 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, 1541 "Entered %s.\n", __func__); 1542 1543 mcp->mb[0] = MBC_ABORT_TARGET; 1544 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 1545 if (HAS_EXTENDED_IDS(vha->hw)) { 1546 mcp->mb[1] = fcport->loop_id; 1547 mcp->mb[10] = 0; 1548 mcp->out_mb |= MBX_10; 1549 } else { 1550 mcp->mb[1] = fcport->loop_id << 8; 1551 } 1552 mcp->mb[2] = vha->hw->loop_reset_delay; 1553 mcp->mb[9] = vha->vp_idx; 1554 1555 mcp->in_mb = MBX_0; 1556 mcp->tov = MBX_TOV_SECONDS; 1557 mcp->flags = 0; 1558 rval = qla2x00_mailbox_command(vha, mcp); 1559 if (rval != QLA_SUCCESS) { 1560 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, 1561 "Failed=%x.\n", rval); 1562 } 1563 1564 /* Issue marker IOCB. */ 1565 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0, 1566 MK_SYNC_ID); 1567 if (rval2 != QLA_SUCCESS) { 1568 ql_dbg(ql_dbg_mbx, vha, 0x1040, 1569 "Failed to issue marker IOCB (%x).\n", rval2); 1570 } else { 1571 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, 1572 "Done %s.\n", __func__); 1573 } 1574 1575 return rval; 1576 } 1577 1578 int 1579 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 1580 { 1581 int rval, rval2; 1582 mbx_cmd_t mc; 1583 mbx_cmd_t *mcp = &mc; 1584 scsi_qla_host_t *vha; 1585 1586 vha = fcport->vha; 1587 1588 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, 1589 "Entered %s.\n", __func__); 1590 1591 mcp->mb[0] = MBC_LUN_RESET; 1592 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 1593 if (HAS_EXTENDED_IDS(vha->hw)) 1594 mcp->mb[1] = fcport->loop_id; 1595 else 1596 mcp->mb[1] = fcport->loop_id << 8; 1597 mcp->mb[2] = (u32)l; 1598 mcp->mb[3] = 0; 1599 mcp->mb[9] = vha->vp_idx; 1600 1601 mcp->in_mb = MBX_0; 1602 mcp->tov = MBX_TOV_SECONDS; 1603 mcp->flags = 0; 1604 rval = qla2x00_mailbox_command(vha, mcp); 1605 if (rval != QLA_SUCCESS) { 1606 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); 1607 } 1608 1609 /* Issue marker IOCB. */ 1610 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l, 1611 MK_SYNC_ID_LUN); 1612 if (rval2 != QLA_SUCCESS) { 1613 ql_dbg(ql_dbg_mbx, vha, 0x1044, 1614 "Failed to issue marker IOCB (%x).\n", rval2); 1615 } else { 1616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, 1617 "Done %s.\n", __func__); 1618 } 1619 1620 return rval; 1621 } 1622 1623 /* 1624 * qla2x00_get_adapter_id 1625 * Get adapter ID and topology. 1626 * 1627 * Input: 1628 * ha = adapter block pointer. 1629 * id = pointer for loop ID. 1630 * al_pa = pointer for AL_PA. 1631 * area = pointer for area. 1632 * domain = pointer for domain. 1633 * top = pointer for topology. 1634 * TARGET_QUEUE_LOCK must be released. 1635 * ADAPTER_STATE_LOCK must be released. 1636 * 1637 * Returns: 1638 * qla2x00 local function return status code. 1639 * 1640 * Context: 1641 * Kernel context. 1642 */ 1643 int 1644 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, 1645 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) 1646 { 1647 int rval; 1648 mbx_cmd_t mc; 1649 mbx_cmd_t *mcp = &mc; 1650 1651 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, 1652 "Entered %s.\n", __func__); 1653 1654 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1655 mcp->mb[9] = vha->vp_idx; 1656 mcp->out_mb = MBX_9|MBX_0; 1657 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1658 if (IS_CNA_CAPABLE(vha->hw)) 1659 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; 1660 if (IS_FWI2_CAPABLE(vha->hw)) 1661 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; 1662 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) { 1663 mcp->in_mb |= MBX_15; 1664 mcp->out_mb |= MBX_7|MBX_21|MBX_22|MBX_23; 1665 } 1666 1667 mcp->tov = MBX_TOV_SECONDS; 1668 mcp->flags = 0; 1669 rval = qla2x00_mailbox_command(vha, mcp); 1670 if (mcp->mb[0] == MBS_COMMAND_ERROR) 1671 rval = QLA_COMMAND_ERROR; 1672 else if (mcp->mb[0] == MBS_INVALID_COMMAND) 1673 rval = QLA_INVALID_COMMAND; 1674 1675 /* Return data. */ 1676 *id = mcp->mb[1]; 1677 *al_pa = LSB(mcp->mb[2]); 1678 *area = MSB(mcp->mb[2]); 1679 *domain = LSB(mcp->mb[3]); 1680 *top = mcp->mb[6]; 1681 *sw_cap = mcp->mb[7]; 1682 1683 if (rval != QLA_SUCCESS) { 1684 /*EMPTY*/ 1685 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); 1686 } else { 1687 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, 1688 "Done %s.\n", __func__); 1689 1690 if (IS_CNA_CAPABLE(vha->hw)) { 1691 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1692 vha->fcoe_fcf_idx = mcp->mb[10]; 1693 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; 1694 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; 1695 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; 1696 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; 1697 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; 1698 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; 1699 } 1700 /* If FA-WWN supported */ 1701 if (IS_FAWWN_CAPABLE(vha->hw)) { 1702 if (mcp->mb[7] & BIT_14) { 1703 vha->port_name[0] = MSB(mcp->mb[16]); 1704 vha->port_name[1] = LSB(mcp->mb[16]); 1705 vha->port_name[2] = MSB(mcp->mb[17]); 1706 vha->port_name[3] = LSB(mcp->mb[17]); 1707 vha->port_name[4] = MSB(mcp->mb[18]); 1708 vha->port_name[5] = LSB(mcp->mb[18]); 1709 vha->port_name[6] = MSB(mcp->mb[19]); 1710 vha->port_name[7] = LSB(mcp->mb[19]); 1711 fc_host_port_name(vha->host) = 1712 wwn_to_u64(vha->port_name); 1713 ql_dbg(ql_dbg_mbx, vha, 0x10ca, 1714 "FA-WWN acquired %016llx\n", 1715 wwn_to_u64(vha->port_name)); 1716 } 1717 } 1718 1719 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) { 1720 vha->bbcr = mcp->mb[15]; 1721 if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) { 1722 ql_log(ql_log_info, vha, 0x11a4, 1723 "SCM: EDC ELS completed, flags 0x%x\n", 1724 mcp->mb[21]); 1725 } 1726 if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) { 1727 vha->hw->flags.scm_enabled = 1; 1728 vha->scm_fabric_connection_flags |= 1729 SCM_FLAG_RDF_COMPLETED; 1730 ql_log(ql_log_info, vha, 0x11a5, 1731 "SCM: RDF ELS completed, flags 0x%x\n", 1732 mcp->mb[23]); 1733 } 1734 } 1735 } 1736 1737 return rval; 1738 } 1739 1740 /* 1741 * qla2x00_get_retry_cnt 1742 * Get current firmware login retry count and delay. 1743 * 1744 * Input: 1745 * ha = adapter block pointer. 1746 * retry_cnt = pointer to login retry count. 1747 * tov = pointer to login timeout value. 1748 * 1749 * Returns: 1750 * qla2x00 local function return status code. 1751 * 1752 * Context: 1753 * Kernel context. 1754 */ 1755 int 1756 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, 1757 uint16_t *r_a_tov) 1758 { 1759 int rval; 1760 uint16_t ratov; 1761 mbx_cmd_t mc; 1762 mbx_cmd_t *mcp = &mc; 1763 1764 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, 1765 "Entered %s.\n", __func__); 1766 1767 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1768 mcp->out_mb = MBX_0; 1769 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1770 mcp->tov = MBX_TOV_SECONDS; 1771 mcp->flags = 0; 1772 rval = qla2x00_mailbox_command(vha, mcp); 1773 1774 if (rval != QLA_SUCCESS) { 1775 /*EMPTY*/ 1776 ql_dbg(ql_dbg_mbx, vha, 0x104a, 1777 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 1778 } else { 1779 /* Convert returned data and check our values. */ 1780 *r_a_tov = mcp->mb[3] / 2; 1781 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ 1782 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { 1783 /* Update to the larger values */ 1784 *retry_cnt = (uint8_t)mcp->mb[1]; 1785 *tov = ratov; 1786 } 1787 1788 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, 1789 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); 1790 } 1791 1792 return rval; 1793 } 1794 1795 /* 1796 * qla2x00_init_firmware 1797 * Initialize adapter firmware. 1798 * 1799 * Input: 1800 * ha = adapter block pointer. 1801 * dptr = Initialization control block pointer. 1802 * size = size of initialization control block. 1803 * TARGET_QUEUE_LOCK must be released. 1804 * ADAPTER_STATE_LOCK must be released. 1805 * 1806 * Returns: 1807 * qla2x00 local function return status code. 1808 * 1809 * Context: 1810 * Kernel context. 1811 */ 1812 int 1813 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 1814 { 1815 int rval; 1816 mbx_cmd_t mc; 1817 mbx_cmd_t *mcp = &mc; 1818 struct qla_hw_data *ha = vha->hw; 1819 1820 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, 1821 "Entered %s.\n", __func__); 1822 1823 if (IS_P3P_TYPE(ha) && ql2xdbwr) 1824 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, 1825 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); 1826 1827 if (ha->flags.npiv_supported) 1828 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1829 else 1830 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 1831 1832 mcp->mb[1] = 0; 1833 mcp->mb[2] = MSW(ha->init_cb_dma); 1834 mcp->mb[3] = LSW(ha->init_cb_dma); 1835 mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); 1836 mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); 1837 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1838 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1839 mcp->mb[1] = BIT_0; 1840 mcp->mb[10] = MSW(ha->ex_init_cb_dma); 1841 mcp->mb[11] = LSW(ha->ex_init_cb_dma); 1842 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); 1843 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); 1844 mcp->mb[14] = sizeof(*ha->ex_init_cb); 1845 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; 1846 } 1847 1848 if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) { 1849 mcp->mb[1] |= BIT_1; 1850 mcp->mb[16] = MSW(ha->sf_init_cb_dma); 1851 mcp->mb[17] = LSW(ha->sf_init_cb_dma); 1852 mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma)); 1853 mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma)); 1854 mcp->mb[15] = sizeof(*ha->sf_init_cb); 1855 mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15; 1856 } 1857 1858 /* 1 and 2 should normally be captured. */ 1859 mcp->in_mb = MBX_2|MBX_1|MBX_0; 1860 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1861 /* mb3 is additional info about the installed SFP. */ 1862 mcp->in_mb |= MBX_3; 1863 mcp->buf_size = size; 1864 mcp->flags = MBX_DMA_OUT; 1865 mcp->tov = MBX_TOV_SECONDS; 1866 rval = qla2x00_mailbox_command(vha, mcp); 1867 1868 if (rval != QLA_SUCCESS) { 1869 /*EMPTY*/ 1870 ql_dbg(ql_dbg_mbx, vha, 0x104d, 1871 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n", 1872 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); 1873 if (ha->init_cb) { 1874 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n"); 1875 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1876 0x0104d, ha->init_cb, sizeof(*ha->init_cb)); 1877 } 1878 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1879 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n"); 1880 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1881 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb)); 1882 } 1883 } else { 1884 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1885 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 1886 ql_dbg(ql_dbg_mbx, vha, 0x119d, 1887 "Invalid SFP/Validation Failed\n"); 1888 } 1889 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, 1890 "Done %s.\n", __func__); 1891 } 1892 1893 return rval; 1894 } 1895 1896 1897 /* 1898 * qla2x00_get_port_database 1899 * Issue normal/enhanced get port database mailbox command 1900 * and copy device name as necessary. 1901 * 1902 * Input: 1903 * ha = adapter state pointer. 1904 * dev = structure pointer. 1905 * opt = enhanced cmd option byte. 1906 * 1907 * Returns: 1908 * qla2x00 local function return status code. 1909 * 1910 * Context: 1911 * Kernel context. 1912 */ 1913 int 1914 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) 1915 { 1916 int rval; 1917 mbx_cmd_t mc; 1918 mbx_cmd_t *mcp = &mc; 1919 port_database_t *pd; 1920 struct port_database_24xx *pd24; 1921 dma_addr_t pd_dma; 1922 struct qla_hw_data *ha = vha->hw; 1923 1924 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, 1925 "Entered %s.\n", __func__); 1926 1927 pd24 = NULL; 1928 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1929 if (pd == NULL) { 1930 ql_log(ql_log_warn, vha, 0x1050, 1931 "Failed to allocate port database structure.\n"); 1932 fcport->query = 0; 1933 return QLA_MEMORY_ALLOC_FAILED; 1934 } 1935 1936 mcp->mb[0] = MBC_GET_PORT_DATABASE; 1937 if (opt != 0 && !IS_FWI2_CAPABLE(ha)) 1938 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; 1939 mcp->mb[2] = MSW(pd_dma); 1940 mcp->mb[3] = LSW(pd_dma); 1941 mcp->mb[6] = MSW(MSD(pd_dma)); 1942 mcp->mb[7] = LSW(MSD(pd_dma)); 1943 mcp->mb[9] = vha->vp_idx; 1944 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1945 mcp->in_mb = MBX_0; 1946 if (IS_FWI2_CAPABLE(ha)) { 1947 mcp->mb[1] = fcport->loop_id; 1948 mcp->mb[10] = opt; 1949 mcp->out_mb |= MBX_10|MBX_1; 1950 mcp->in_mb |= MBX_1; 1951 } else if (HAS_EXTENDED_IDS(ha)) { 1952 mcp->mb[1] = fcport->loop_id; 1953 mcp->mb[10] = opt; 1954 mcp->out_mb |= MBX_10|MBX_1; 1955 } else { 1956 mcp->mb[1] = fcport->loop_id << 8 | opt; 1957 mcp->out_mb |= MBX_1; 1958 } 1959 mcp->buf_size = IS_FWI2_CAPABLE(ha) ? 1960 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; 1961 mcp->flags = MBX_DMA_IN; 1962 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1963 rval = qla2x00_mailbox_command(vha, mcp); 1964 if (rval != QLA_SUCCESS) 1965 goto gpd_error_out; 1966 1967 if (IS_FWI2_CAPABLE(ha)) { 1968 uint64_t zero = 0; 1969 u8 current_login_state, last_login_state; 1970 1971 pd24 = (struct port_database_24xx *) pd; 1972 1973 /* Check for logged in state. */ 1974 if (NVME_TARGET(ha, fcport)) { 1975 current_login_state = pd24->current_login_state >> 4; 1976 last_login_state = pd24->last_login_state >> 4; 1977 } else { 1978 current_login_state = pd24->current_login_state & 0xf; 1979 last_login_state = pd24->last_login_state & 0xf; 1980 } 1981 fcport->current_login_state = pd24->current_login_state; 1982 fcport->last_login_state = pd24->last_login_state; 1983 1984 /* Check for logged in state. */ 1985 if (current_login_state != PDS_PRLI_COMPLETE && 1986 last_login_state != PDS_PRLI_COMPLETE) { 1987 ql_dbg(ql_dbg_mbx, vha, 0x119a, 1988 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 1989 current_login_state, last_login_state, 1990 fcport->loop_id); 1991 rval = QLA_FUNCTION_FAILED; 1992 1993 if (!fcport->query) 1994 goto gpd_error_out; 1995 } 1996 1997 if (fcport->loop_id == FC_NO_LOOP_ID || 1998 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 1999 memcmp(fcport->port_name, pd24->port_name, 8))) { 2000 /* We lost the device mid way. */ 2001 rval = QLA_NOT_LOGGED_IN; 2002 goto gpd_error_out; 2003 } 2004 2005 /* Names are little-endian. */ 2006 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); 2007 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); 2008 2009 /* Get port_id of device. */ 2010 fcport->d_id.b.domain = pd24->port_id[0]; 2011 fcport->d_id.b.area = pd24->port_id[1]; 2012 fcport->d_id.b.al_pa = pd24->port_id[2]; 2013 fcport->d_id.b.rsvd_1 = 0; 2014 2015 /* If not target must be initiator or unknown type. */ 2016 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) 2017 fcport->port_type = FCT_INITIATOR; 2018 else 2019 fcport->port_type = FCT_TARGET; 2020 2021 /* Passback COS information. */ 2022 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? 2023 FC_COS_CLASS2 : FC_COS_CLASS3; 2024 2025 if (pd24->prli_svc_param_word_3[0] & BIT_7) 2026 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 2027 } else { 2028 uint64_t zero = 0; 2029 2030 /* Check for logged in state. */ 2031 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 2032 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 2033 ql_dbg(ql_dbg_mbx, vha, 0x100a, 2034 "Unable to verify login-state (%x/%x) - " 2035 "portid=%02x%02x%02x.\n", pd->master_state, 2036 pd->slave_state, fcport->d_id.b.domain, 2037 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2038 rval = QLA_FUNCTION_FAILED; 2039 goto gpd_error_out; 2040 } 2041 2042 if (fcport->loop_id == FC_NO_LOOP_ID || 2043 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 2044 memcmp(fcport->port_name, pd->port_name, 8))) { 2045 /* We lost the device mid way. */ 2046 rval = QLA_NOT_LOGGED_IN; 2047 goto gpd_error_out; 2048 } 2049 2050 /* Names are little-endian. */ 2051 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 2052 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 2053 2054 /* Get port_id of device. */ 2055 fcport->d_id.b.domain = pd->port_id[0]; 2056 fcport->d_id.b.area = pd->port_id[3]; 2057 fcport->d_id.b.al_pa = pd->port_id[2]; 2058 fcport->d_id.b.rsvd_1 = 0; 2059 2060 /* If not target must be initiator or unknown type. */ 2061 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 2062 fcport->port_type = FCT_INITIATOR; 2063 else 2064 fcport->port_type = FCT_TARGET; 2065 2066 /* Passback COS information. */ 2067 fcport->supported_classes = (pd->options & BIT_4) ? 2068 FC_COS_CLASS2 : FC_COS_CLASS3; 2069 } 2070 2071 gpd_error_out: 2072 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 2073 fcport->query = 0; 2074 2075 if (rval != QLA_SUCCESS) { 2076 ql_dbg(ql_dbg_mbx, vha, 0x1052, 2077 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, 2078 mcp->mb[0], mcp->mb[1]); 2079 } else { 2080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, 2081 "Done %s.\n", __func__); 2082 } 2083 2084 return rval; 2085 } 2086 2087 int 2088 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle, 2089 struct port_database_24xx *pdb) 2090 { 2091 mbx_cmd_t mc; 2092 mbx_cmd_t *mcp = &mc; 2093 dma_addr_t pdb_dma; 2094 int rval; 2095 2096 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115, 2097 "Entered %s.\n", __func__); 2098 2099 memset(pdb, 0, sizeof(*pdb)); 2100 2101 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb, 2102 sizeof(*pdb), DMA_FROM_DEVICE); 2103 if (!pdb_dma) { 2104 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n"); 2105 return QLA_MEMORY_ALLOC_FAILED; 2106 } 2107 2108 mcp->mb[0] = MBC_GET_PORT_DATABASE; 2109 mcp->mb[1] = nport_handle; 2110 mcp->mb[2] = MSW(LSD(pdb_dma)); 2111 mcp->mb[3] = LSW(LSD(pdb_dma)); 2112 mcp->mb[6] = MSW(MSD(pdb_dma)); 2113 mcp->mb[7] = LSW(MSD(pdb_dma)); 2114 mcp->mb[9] = 0; 2115 mcp->mb[10] = 0; 2116 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2117 mcp->in_mb = MBX_1|MBX_0; 2118 mcp->buf_size = sizeof(*pdb); 2119 mcp->flags = MBX_DMA_IN; 2120 mcp->tov = vha->hw->login_timeout * 2; 2121 rval = qla2x00_mailbox_command(vha, mcp); 2122 2123 if (rval != QLA_SUCCESS) { 2124 ql_dbg(ql_dbg_mbx, vha, 0x111a, 2125 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2126 rval, mcp->mb[0], mcp->mb[1]); 2127 } else { 2128 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b, 2129 "Done %s.\n", __func__); 2130 } 2131 2132 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma, 2133 sizeof(*pdb), DMA_FROM_DEVICE); 2134 2135 return rval; 2136 } 2137 2138 /* 2139 * qla2x00_get_firmware_state 2140 * Get adapter firmware state. 2141 * 2142 * Input: 2143 * ha = adapter block pointer. 2144 * dptr = pointer for firmware state. 2145 * TARGET_QUEUE_LOCK must be released. 2146 * ADAPTER_STATE_LOCK must be released. 2147 * 2148 * Returns: 2149 * qla2x00 local function return status code. 2150 * 2151 * Context: 2152 * Kernel context. 2153 */ 2154 int 2155 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) 2156 { 2157 int rval; 2158 mbx_cmd_t mc; 2159 mbx_cmd_t *mcp = &mc; 2160 struct qla_hw_data *ha = vha->hw; 2161 2162 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, 2163 "Entered %s.\n", __func__); 2164 2165 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 2166 mcp->out_mb = MBX_0; 2167 if (IS_FWI2_CAPABLE(vha->hw)) 2168 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2169 else 2170 mcp->in_mb = MBX_1|MBX_0; 2171 mcp->tov = MBX_TOV_SECONDS; 2172 mcp->flags = 0; 2173 rval = qla2x00_mailbox_command(vha, mcp); 2174 2175 /* Return firmware states. */ 2176 states[0] = mcp->mb[1]; 2177 if (IS_FWI2_CAPABLE(vha->hw)) { 2178 states[1] = mcp->mb[2]; 2179 states[2] = mcp->mb[3]; /* SFP info */ 2180 states[3] = mcp->mb[4]; 2181 states[4] = mcp->mb[5]; 2182 states[5] = mcp->mb[6]; /* DPORT status */ 2183 } 2184 2185 if (rval != QLA_SUCCESS) { 2186 /*EMPTY*/ 2187 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); 2188 } else { 2189 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 2190 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 2191 ql_dbg(ql_dbg_mbx, vha, 0x119e, 2192 "Invalid SFP/Validation Failed\n"); 2193 } 2194 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, 2195 "Done %s.\n", __func__); 2196 } 2197 2198 return rval; 2199 } 2200 2201 /* 2202 * qla2x00_get_port_name 2203 * Issue get port name mailbox command. 2204 * Returned name is in big endian format. 2205 * 2206 * Input: 2207 * ha = adapter block pointer. 2208 * loop_id = loop ID of device. 2209 * name = pointer for name. 2210 * TARGET_QUEUE_LOCK must be released. 2211 * ADAPTER_STATE_LOCK must be released. 2212 * 2213 * Returns: 2214 * qla2x00 local function return status code. 2215 * 2216 * Context: 2217 * Kernel context. 2218 */ 2219 int 2220 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, 2221 uint8_t opt) 2222 { 2223 int rval; 2224 mbx_cmd_t mc; 2225 mbx_cmd_t *mcp = &mc; 2226 2227 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, 2228 "Entered %s.\n", __func__); 2229 2230 mcp->mb[0] = MBC_GET_PORT_NAME; 2231 mcp->mb[9] = vha->vp_idx; 2232 mcp->out_mb = MBX_9|MBX_1|MBX_0; 2233 if (HAS_EXTENDED_IDS(vha->hw)) { 2234 mcp->mb[1] = loop_id; 2235 mcp->mb[10] = opt; 2236 mcp->out_mb |= MBX_10; 2237 } else { 2238 mcp->mb[1] = loop_id << 8 | opt; 2239 } 2240 2241 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2242 mcp->tov = MBX_TOV_SECONDS; 2243 mcp->flags = 0; 2244 rval = qla2x00_mailbox_command(vha, mcp); 2245 2246 if (rval != QLA_SUCCESS) { 2247 /*EMPTY*/ 2248 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); 2249 } else { 2250 if (name != NULL) { 2251 /* This function returns name in big endian. */ 2252 name[0] = MSB(mcp->mb[2]); 2253 name[1] = LSB(mcp->mb[2]); 2254 name[2] = MSB(mcp->mb[3]); 2255 name[3] = LSB(mcp->mb[3]); 2256 name[4] = MSB(mcp->mb[6]); 2257 name[5] = LSB(mcp->mb[6]); 2258 name[6] = MSB(mcp->mb[7]); 2259 name[7] = LSB(mcp->mb[7]); 2260 } 2261 2262 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, 2263 "Done %s.\n", __func__); 2264 } 2265 2266 return rval; 2267 } 2268 2269 /* 2270 * qla24xx_link_initialization 2271 * Issue link initialization mailbox command. 2272 * 2273 * Input: 2274 * ha = adapter block pointer. 2275 * TARGET_QUEUE_LOCK must be released. 2276 * ADAPTER_STATE_LOCK must be released. 2277 * 2278 * Returns: 2279 * qla2x00 local function return status code. 2280 * 2281 * Context: 2282 * Kernel context. 2283 */ 2284 int 2285 qla24xx_link_initialize(scsi_qla_host_t *vha) 2286 { 2287 int rval; 2288 mbx_cmd_t mc; 2289 mbx_cmd_t *mcp = &mc; 2290 2291 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, 2292 "Entered %s.\n", __func__); 2293 2294 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) 2295 return QLA_FUNCTION_FAILED; 2296 2297 mcp->mb[0] = MBC_LINK_INITIALIZATION; 2298 mcp->mb[1] = BIT_4; 2299 if (vha->hw->operating_mode == LOOP) 2300 mcp->mb[1] |= BIT_6; 2301 else 2302 mcp->mb[1] |= BIT_5; 2303 mcp->mb[2] = 0; 2304 mcp->mb[3] = 0; 2305 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2306 mcp->in_mb = MBX_0; 2307 mcp->tov = MBX_TOV_SECONDS; 2308 mcp->flags = 0; 2309 rval = qla2x00_mailbox_command(vha, mcp); 2310 2311 if (rval != QLA_SUCCESS) { 2312 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); 2313 } else { 2314 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, 2315 "Done %s.\n", __func__); 2316 } 2317 2318 return rval; 2319 } 2320 2321 /* 2322 * qla2x00_lip_reset 2323 * Issue LIP reset mailbox command. 2324 * 2325 * Input: 2326 * ha = adapter block pointer. 2327 * TARGET_QUEUE_LOCK must be released. 2328 * ADAPTER_STATE_LOCK must be released. 2329 * 2330 * Returns: 2331 * qla2x00 local function return status code. 2332 * 2333 * Context: 2334 * Kernel context. 2335 */ 2336 int 2337 qla2x00_lip_reset(scsi_qla_host_t *vha) 2338 { 2339 int rval; 2340 mbx_cmd_t mc; 2341 mbx_cmd_t *mcp = &mc; 2342 2343 ql_dbg(ql_dbg_disc, vha, 0x105a, 2344 "Entered %s.\n", __func__); 2345 2346 if (IS_CNA_CAPABLE(vha->hw)) { 2347 /* Logout across all FCFs. */ 2348 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2349 mcp->mb[1] = BIT_1; 2350 mcp->mb[2] = 0; 2351 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2352 } else if (IS_FWI2_CAPABLE(vha->hw)) { 2353 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2354 mcp->mb[1] = BIT_4; 2355 mcp->mb[2] = 0; 2356 mcp->mb[3] = vha->hw->loop_reset_delay; 2357 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2358 } else { 2359 mcp->mb[0] = MBC_LIP_RESET; 2360 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2361 if (HAS_EXTENDED_IDS(vha->hw)) { 2362 mcp->mb[1] = 0x00ff; 2363 mcp->mb[10] = 0; 2364 mcp->out_mb |= MBX_10; 2365 } else { 2366 mcp->mb[1] = 0xff00; 2367 } 2368 mcp->mb[2] = vha->hw->loop_reset_delay; 2369 mcp->mb[3] = 0; 2370 } 2371 mcp->in_mb = MBX_0; 2372 mcp->tov = MBX_TOV_SECONDS; 2373 mcp->flags = 0; 2374 rval = qla2x00_mailbox_command(vha, mcp); 2375 2376 if (rval != QLA_SUCCESS) { 2377 /*EMPTY*/ 2378 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); 2379 } else { 2380 /*EMPTY*/ 2381 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, 2382 "Done %s.\n", __func__); 2383 } 2384 2385 return rval; 2386 } 2387 2388 /* 2389 * qla2x00_send_sns 2390 * Send SNS command. 2391 * 2392 * Input: 2393 * ha = adapter block pointer. 2394 * sns = pointer for command. 2395 * cmd_size = command size. 2396 * buf_size = response/command size. 2397 * TARGET_QUEUE_LOCK must be released. 2398 * ADAPTER_STATE_LOCK must be released. 2399 * 2400 * Returns: 2401 * qla2x00 local function return status code. 2402 * 2403 * Context: 2404 * Kernel context. 2405 */ 2406 int 2407 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, 2408 uint16_t cmd_size, size_t buf_size) 2409 { 2410 int rval; 2411 mbx_cmd_t mc; 2412 mbx_cmd_t *mcp = &mc; 2413 2414 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, 2415 "Entered %s.\n", __func__); 2416 2417 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, 2418 "Retry cnt=%d ratov=%d total tov=%d.\n", 2419 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); 2420 2421 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 2422 mcp->mb[1] = cmd_size; 2423 mcp->mb[2] = MSW(sns_phys_address); 2424 mcp->mb[3] = LSW(sns_phys_address); 2425 mcp->mb[6] = MSW(MSD(sns_phys_address)); 2426 mcp->mb[7] = LSW(MSD(sns_phys_address)); 2427 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2428 mcp->in_mb = MBX_0|MBX_1; 2429 mcp->buf_size = buf_size; 2430 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; 2431 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); 2432 rval = qla2x00_mailbox_command(vha, mcp); 2433 2434 if (rval != QLA_SUCCESS) { 2435 /*EMPTY*/ 2436 ql_dbg(ql_dbg_mbx, vha, 0x105f, 2437 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2438 rval, mcp->mb[0], mcp->mb[1]); 2439 } else { 2440 /*EMPTY*/ 2441 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, 2442 "Done %s.\n", __func__); 2443 } 2444 2445 return rval; 2446 } 2447 2448 int 2449 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2450 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2451 { 2452 int rval; 2453 2454 struct logio_entry_24xx *lg; 2455 dma_addr_t lg_dma; 2456 uint32_t iop[2]; 2457 struct qla_hw_data *ha = vha->hw; 2458 struct req_que *req; 2459 2460 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, 2461 "Entered %s.\n", __func__); 2462 2463 if (vha->vp_idx && vha->qpair) 2464 req = vha->qpair->req; 2465 else 2466 req = ha->req_q_map[0]; 2467 2468 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2469 if (lg == NULL) { 2470 ql_log(ql_log_warn, vha, 0x1062, 2471 "Failed to allocate login IOCB.\n"); 2472 return QLA_MEMORY_ALLOC_FAILED; 2473 } 2474 2475 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2476 lg->entry_count = 1; 2477 lg->handle = make_handle(req->id, lg->handle); 2478 lg->nport_handle = cpu_to_le16(loop_id); 2479 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2480 if (opt & BIT_0) 2481 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2482 if (opt & BIT_1) 2483 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2484 lg->port_id[0] = al_pa; 2485 lg->port_id[1] = area; 2486 lg->port_id[2] = domain; 2487 lg->vp_index = vha->vp_idx; 2488 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2489 (ha->r_a_tov / 10 * 2) + 2); 2490 if (rval != QLA_SUCCESS) { 2491 ql_dbg(ql_dbg_mbx, vha, 0x1063, 2492 "Failed to issue login IOCB (%x).\n", rval); 2493 } else if (lg->entry_status != 0) { 2494 ql_dbg(ql_dbg_mbx, vha, 0x1064, 2495 "Failed to complete IOCB -- error status (%x).\n", 2496 lg->entry_status); 2497 rval = QLA_FUNCTION_FAILED; 2498 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2499 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2500 iop[1] = le32_to_cpu(lg->io_parameter[1]); 2501 2502 ql_dbg(ql_dbg_mbx, vha, 0x1065, 2503 "Failed to complete IOCB -- completion status (%x) " 2504 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2505 iop[0], iop[1]); 2506 2507 switch (iop[0]) { 2508 case LSC_SCODE_PORTID_USED: 2509 mb[0] = MBS_PORT_ID_USED; 2510 mb[1] = LSW(iop[1]); 2511 break; 2512 case LSC_SCODE_NPORT_USED: 2513 mb[0] = MBS_LOOP_ID_USED; 2514 break; 2515 case LSC_SCODE_NOLINK: 2516 case LSC_SCODE_NOIOCB: 2517 case LSC_SCODE_NOXCB: 2518 case LSC_SCODE_CMD_FAILED: 2519 case LSC_SCODE_NOFABRIC: 2520 case LSC_SCODE_FW_NOT_READY: 2521 case LSC_SCODE_NOT_LOGGED_IN: 2522 case LSC_SCODE_NOPCB: 2523 case LSC_SCODE_ELS_REJECT: 2524 case LSC_SCODE_CMD_PARAM_ERR: 2525 case LSC_SCODE_NONPORT: 2526 case LSC_SCODE_LOGGED_IN: 2527 case LSC_SCODE_NOFLOGI_ACC: 2528 default: 2529 mb[0] = MBS_COMMAND_ERROR; 2530 break; 2531 } 2532 } else { 2533 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, 2534 "Done %s.\n", __func__); 2535 2536 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2537 2538 mb[0] = MBS_COMMAND_COMPLETE; 2539 mb[1] = 0; 2540 if (iop[0] & BIT_4) { 2541 if (iop[0] & BIT_8) 2542 mb[1] |= BIT_1; 2543 } else 2544 mb[1] = BIT_0; 2545 2546 /* Passback COS information. */ 2547 mb[10] = 0; 2548 if (lg->io_parameter[7] || lg->io_parameter[8]) 2549 mb[10] |= BIT_0; /* Class 2. */ 2550 if (lg->io_parameter[9] || lg->io_parameter[10]) 2551 mb[10] |= BIT_1; /* Class 3. */ 2552 if (lg->io_parameter[0] & cpu_to_le32(BIT_7)) 2553 mb[10] |= BIT_7; /* Confirmed Completion 2554 * Allowed 2555 */ 2556 } 2557 2558 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2559 2560 return rval; 2561 } 2562 2563 /* 2564 * qla2x00_login_fabric 2565 * Issue login fabric port mailbox command. 2566 * 2567 * Input: 2568 * ha = adapter block pointer. 2569 * loop_id = device loop ID. 2570 * domain = device domain. 2571 * area = device area. 2572 * al_pa = device AL_PA. 2573 * status = pointer for return status. 2574 * opt = command options. 2575 * TARGET_QUEUE_LOCK must be released. 2576 * ADAPTER_STATE_LOCK must be released. 2577 * 2578 * Returns: 2579 * qla2x00 local function return status code. 2580 * 2581 * Context: 2582 * Kernel context. 2583 */ 2584 int 2585 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2586 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2587 { 2588 int rval; 2589 mbx_cmd_t mc; 2590 mbx_cmd_t *mcp = &mc; 2591 struct qla_hw_data *ha = vha->hw; 2592 2593 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, 2594 "Entered %s.\n", __func__); 2595 2596 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 2597 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2598 if (HAS_EXTENDED_IDS(ha)) { 2599 mcp->mb[1] = loop_id; 2600 mcp->mb[10] = opt; 2601 mcp->out_mb |= MBX_10; 2602 } else { 2603 mcp->mb[1] = (loop_id << 8) | opt; 2604 } 2605 mcp->mb[2] = domain; 2606 mcp->mb[3] = area << 8 | al_pa; 2607 2608 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; 2609 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2610 mcp->flags = 0; 2611 rval = qla2x00_mailbox_command(vha, mcp); 2612 2613 /* Return mailbox statuses. */ 2614 if (mb != NULL) { 2615 mb[0] = mcp->mb[0]; 2616 mb[1] = mcp->mb[1]; 2617 mb[2] = mcp->mb[2]; 2618 mb[6] = mcp->mb[6]; 2619 mb[7] = mcp->mb[7]; 2620 /* COS retrieved from Get-Port-Database mailbox command. */ 2621 mb[10] = 0; 2622 } 2623 2624 if (rval != QLA_SUCCESS) { 2625 /* RLU tmp code: need to change main mailbox_command function to 2626 * return ok even when the mailbox completion value is not 2627 * SUCCESS. The caller needs to be responsible to interpret 2628 * the return values of this mailbox command if we're not 2629 * to change too much of the existing code. 2630 */ 2631 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || 2632 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || 2633 mcp->mb[0] == 0x4006) 2634 rval = QLA_SUCCESS; 2635 2636 /*EMPTY*/ 2637 ql_dbg(ql_dbg_mbx, vha, 0x1068, 2638 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 2639 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 2640 } else { 2641 /*EMPTY*/ 2642 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, 2643 "Done %s.\n", __func__); 2644 } 2645 2646 return rval; 2647 } 2648 2649 /* 2650 * qla2x00_login_local_device 2651 * Issue login loop port mailbox command. 2652 * 2653 * Input: 2654 * ha = adapter block pointer. 2655 * loop_id = device loop ID. 2656 * opt = command options. 2657 * 2658 * Returns: 2659 * Return status code. 2660 * 2661 * Context: 2662 * Kernel context. 2663 * 2664 */ 2665 int 2666 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, 2667 uint16_t *mb_ret, uint8_t opt) 2668 { 2669 int rval; 2670 mbx_cmd_t mc; 2671 mbx_cmd_t *mcp = &mc; 2672 struct qla_hw_data *ha = vha->hw; 2673 2674 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, 2675 "Entered %s.\n", __func__); 2676 2677 if (IS_FWI2_CAPABLE(ha)) 2678 return qla24xx_login_fabric(vha, fcport->loop_id, 2679 fcport->d_id.b.domain, fcport->d_id.b.area, 2680 fcport->d_id.b.al_pa, mb_ret, opt); 2681 2682 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 2683 if (HAS_EXTENDED_IDS(ha)) 2684 mcp->mb[1] = fcport->loop_id; 2685 else 2686 mcp->mb[1] = fcport->loop_id << 8; 2687 mcp->mb[2] = opt; 2688 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2689 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; 2690 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2691 mcp->flags = 0; 2692 rval = qla2x00_mailbox_command(vha, mcp); 2693 2694 /* Return mailbox statuses. */ 2695 if (mb_ret != NULL) { 2696 mb_ret[0] = mcp->mb[0]; 2697 mb_ret[1] = mcp->mb[1]; 2698 mb_ret[6] = mcp->mb[6]; 2699 mb_ret[7] = mcp->mb[7]; 2700 } 2701 2702 if (rval != QLA_SUCCESS) { 2703 /* AV tmp code: need to change main mailbox_command function to 2704 * return ok even when the mailbox completion value is not 2705 * SUCCESS. The caller needs to be responsible to interpret 2706 * the return values of this mailbox command if we're not 2707 * to change too much of the existing code. 2708 */ 2709 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 2710 rval = QLA_SUCCESS; 2711 2712 ql_dbg(ql_dbg_mbx, vha, 0x106b, 2713 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", 2714 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); 2715 } else { 2716 /*EMPTY*/ 2717 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, 2718 "Done %s.\n", __func__); 2719 } 2720 2721 return (rval); 2722 } 2723 2724 int 2725 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2726 uint8_t area, uint8_t al_pa) 2727 { 2728 int rval; 2729 struct logio_entry_24xx *lg; 2730 dma_addr_t lg_dma; 2731 struct qla_hw_data *ha = vha->hw; 2732 struct req_que *req; 2733 2734 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, 2735 "Entered %s.\n", __func__); 2736 2737 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2738 if (lg == NULL) { 2739 ql_log(ql_log_warn, vha, 0x106e, 2740 "Failed to allocate logout IOCB.\n"); 2741 return QLA_MEMORY_ALLOC_FAILED; 2742 } 2743 2744 req = vha->req; 2745 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2746 lg->entry_count = 1; 2747 lg->handle = make_handle(req->id, lg->handle); 2748 lg->nport_handle = cpu_to_le16(loop_id); 2749 lg->control_flags = 2750 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| 2751 LCF_FREE_NPORT); 2752 lg->port_id[0] = al_pa; 2753 lg->port_id[1] = area; 2754 lg->port_id[2] = domain; 2755 lg->vp_index = vha->vp_idx; 2756 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2757 (ha->r_a_tov / 10 * 2) + 2); 2758 if (rval != QLA_SUCCESS) { 2759 ql_dbg(ql_dbg_mbx, vha, 0x106f, 2760 "Failed to issue logout IOCB (%x).\n", rval); 2761 } else if (lg->entry_status != 0) { 2762 ql_dbg(ql_dbg_mbx, vha, 0x1070, 2763 "Failed to complete IOCB -- error status (%x).\n", 2764 lg->entry_status); 2765 rval = QLA_FUNCTION_FAILED; 2766 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2767 ql_dbg(ql_dbg_mbx, vha, 0x1071, 2768 "Failed to complete IOCB -- completion status (%x) " 2769 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2770 le32_to_cpu(lg->io_parameter[0]), 2771 le32_to_cpu(lg->io_parameter[1])); 2772 } else { 2773 /*EMPTY*/ 2774 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, 2775 "Done %s.\n", __func__); 2776 } 2777 2778 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2779 2780 return rval; 2781 } 2782 2783 /* 2784 * qla2x00_fabric_logout 2785 * Issue logout fabric port mailbox command. 2786 * 2787 * Input: 2788 * ha = adapter block pointer. 2789 * loop_id = device loop ID. 2790 * TARGET_QUEUE_LOCK must be released. 2791 * ADAPTER_STATE_LOCK must be released. 2792 * 2793 * Returns: 2794 * qla2x00 local function return status code. 2795 * 2796 * Context: 2797 * Kernel context. 2798 */ 2799 int 2800 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2801 uint8_t area, uint8_t al_pa) 2802 { 2803 int rval; 2804 mbx_cmd_t mc; 2805 mbx_cmd_t *mcp = &mc; 2806 2807 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, 2808 "Entered %s.\n", __func__); 2809 2810 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 2811 mcp->out_mb = MBX_1|MBX_0; 2812 if (HAS_EXTENDED_IDS(vha->hw)) { 2813 mcp->mb[1] = loop_id; 2814 mcp->mb[10] = 0; 2815 mcp->out_mb |= MBX_10; 2816 } else { 2817 mcp->mb[1] = loop_id << 8; 2818 } 2819 2820 mcp->in_mb = MBX_1|MBX_0; 2821 mcp->tov = MBX_TOV_SECONDS; 2822 mcp->flags = 0; 2823 rval = qla2x00_mailbox_command(vha, mcp); 2824 2825 if (rval != QLA_SUCCESS) { 2826 /*EMPTY*/ 2827 ql_dbg(ql_dbg_mbx, vha, 0x1074, 2828 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); 2829 } else { 2830 /*EMPTY*/ 2831 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, 2832 "Done %s.\n", __func__); 2833 } 2834 2835 return rval; 2836 } 2837 2838 /* 2839 * qla2x00_full_login_lip 2840 * Issue full login LIP mailbox command. 2841 * 2842 * Input: 2843 * ha = adapter block pointer. 2844 * TARGET_QUEUE_LOCK must be released. 2845 * ADAPTER_STATE_LOCK must be released. 2846 * 2847 * Returns: 2848 * qla2x00 local function return status code. 2849 * 2850 * Context: 2851 * Kernel context. 2852 */ 2853 int 2854 qla2x00_full_login_lip(scsi_qla_host_t *vha) 2855 { 2856 int rval; 2857 mbx_cmd_t mc; 2858 mbx_cmd_t *mcp = &mc; 2859 2860 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, 2861 "Entered %s.\n", __func__); 2862 2863 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2864 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0; 2865 mcp->mb[2] = 0; 2866 mcp->mb[3] = 0; 2867 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2868 mcp->in_mb = MBX_0; 2869 mcp->tov = MBX_TOV_SECONDS; 2870 mcp->flags = 0; 2871 rval = qla2x00_mailbox_command(vha, mcp); 2872 2873 if (rval != QLA_SUCCESS) { 2874 /*EMPTY*/ 2875 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); 2876 } else { 2877 /*EMPTY*/ 2878 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, 2879 "Done %s.\n", __func__); 2880 } 2881 2882 return rval; 2883 } 2884 2885 /* 2886 * qla2x00_get_id_list 2887 * 2888 * Input: 2889 * ha = adapter block pointer. 2890 * 2891 * Returns: 2892 * qla2x00 local function return status code. 2893 * 2894 * Context: 2895 * Kernel context. 2896 */ 2897 int 2898 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, 2899 uint16_t *entries) 2900 { 2901 int rval; 2902 mbx_cmd_t mc; 2903 mbx_cmd_t *mcp = &mc; 2904 2905 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, 2906 "Entered %s.\n", __func__); 2907 2908 if (id_list == NULL) 2909 return QLA_FUNCTION_FAILED; 2910 2911 mcp->mb[0] = MBC_GET_ID_LIST; 2912 mcp->out_mb = MBX_0; 2913 if (IS_FWI2_CAPABLE(vha->hw)) { 2914 mcp->mb[2] = MSW(id_list_dma); 2915 mcp->mb[3] = LSW(id_list_dma); 2916 mcp->mb[6] = MSW(MSD(id_list_dma)); 2917 mcp->mb[7] = LSW(MSD(id_list_dma)); 2918 mcp->mb[8] = 0; 2919 mcp->mb[9] = vha->vp_idx; 2920 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 2921 } else { 2922 mcp->mb[1] = MSW(id_list_dma); 2923 mcp->mb[2] = LSW(id_list_dma); 2924 mcp->mb[3] = MSW(MSD(id_list_dma)); 2925 mcp->mb[6] = LSW(MSD(id_list_dma)); 2926 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; 2927 } 2928 mcp->in_mb = MBX_1|MBX_0; 2929 mcp->tov = MBX_TOV_SECONDS; 2930 mcp->flags = 0; 2931 rval = qla2x00_mailbox_command(vha, mcp); 2932 2933 if (rval != QLA_SUCCESS) { 2934 /*EMPTY*/ 2935 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); 2936 } else { 2937 *entries = mcp->mb[1]; 2938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, 2939 "Done %s.\n", __func__); 2940 } 2941 2942 return rval; 2943 } 2944 2945 /* 2946 * qla2x00_get_resource_cnts 2947 * Get current firmware resource counts. 2948 * 2949 * Input: 2950 * ha = adapter block pointer. 2951 * 2952 * Returns: 2953 * qla2x00 local function return status code. 2954 * 2955 * Context: 2956 * Kernel context. 2957 */ 2958 int 2959 qla2x00_get_resource_cnts(scsi_qla_host_t *vha) 2960 { 2961 struct qla_hw_data *ha = vha->hw; 2962 int rval; 2963 mbx_cmd_t mc; 2964 mbx_cmd_t *mcp = &mc; 2965 2966 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, 2967 "Entered %s.\n", __func__); 2968 2969 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2970 mcp->out_mb = MBX_0; 2971 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2972 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 2973 IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2974 mcp->in_mb |= MBX_12; 2975 mcp->tov = MBX_TOV_SECONDS; 2976 mcp->flags = 0; 2977 rval = qla2x00_mailbox_command(vha, mcp); 2978 2979 if (rval != QLA_SUCCESS) { 2980 /*EMPTY*/ 2981 ql_dbg(ql_dbg_mbx, vha, 0x107d, 2982 "Failed mb[0]=%x.\n", mcp->mb[0]); 2983 } else { 2984 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, 2985 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " 2986 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], 2987 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 2988 mcp->mb[11], mcp->mb[12]); 2989 2990 ha->orig_fw_tgt_xcb_count = mcp->mb[1]; 2991 ha->cur_fw_tgt_xcb_count = mcp->mb[2]; 2992 ha->cur_fw_xcb_count = mcp->mb[3]; 2993 ha->orig_fw_xcb_count = mcp->mb[6]; 2994 ha->cur_fw_iocb_count = mcp->mb[7]; 2995 ha->orig_fw_iocb_count = mcp->mb[10]; 2996 if (ha->flags.npiv_supported) 2997 ha->max_npiv_vports = mcp->mb[11]; 2998 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 2999 IS_QLA28XX(ha)) 3000 ha->fw_max_fcf_count = mcp->mb[12]; 3001 } 3002 3003 return (rval); 3004 } 3005 3006 /* 3007 * qla2x00_get_fcal_position_map 3008 * Get FCAL (LILP) position map using mailbox command 3009 * 3010 * Input: 3011 * ha = adapter state pointer. 3012 * pos_map = buffer pointer (can be NULL). 3013 * 3014 * Returns: 3015 * qla2x00 local function return status code. 3016 * 3017 * Context: 3018 * Kernel context. 3019 */ 3020 int 3021 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) 3022 { 3023 int rval; 3024 mbx_cmd_t mc; 3025 mbx_cmd_t *mcp = &mc; 3026 char *pmap; 3027 dma_addr_t pmap_dma; 3028 struct qla_hw_data *ha = vha->hw; 3029 3030 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, 3031 "Entered %s.\n", __func__); 3032 3033 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 3034 if (pmap == NULL) { 3035 ql_log(ql_log_warn, vha, 0x1080, 3036 "Memory alloc failed.\n"); 3037 return QLA_MEMORY_ALLOC_FAILED; 3038 } 3039 3040 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; 3041 mcp->mb[2] = MSW(pmap_dma); 3042 mcp->mb[3] = LSW(pmap_dma); 3043 mcp->mb[6] = MSW(MSD(pmap_dma)); 3044 mcp->mb[7] = LSW(MSD(pmap_dma)); 3045 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3046 mcp->in_mb = MBX_1|MBX_0; 3047 mcp->buf_size = FCAL_MAP_SIZE; 3048 mcp->flags = MBX_DMA_IN; 3049 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 3050 rval = qla2x00_mailbox_command(vha, mcp); 3051 3052 if (rval == QLA_SUCCESS) { 3053 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, 3054 "mb0/mb1=%x/%X FC/AL position map size (%x).\n", 3055 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); 3056 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, 3057 pmap, pmap[0] + 1); 3058 3059 if (pos_map) 3060 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 3061 } 3062 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 3063 3064 if (rval != QLA_SUCCESS) { 3065 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); 3066 } else { 3067 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, 3068 "Done %s.\n", __func__); 3069 } 3070 3071 return rval; 3072 } 3073 3074 /* 3075 * qla2x00_get_link_status 3076 * 3077 * Input: 3078 * ha = adapter block pointer. 3079 * loop_id = device loop ID. 3080 * ret_buf = pointer to link status return buffer. 3081 * 3082 * Returns: 3083 * 0 = success. 3084 * BIT_0 = mem alloc error. 3085 * BIT_1 = mailbox error. 3086 */ 3087 int 3088 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, 3089 struct link_statistics *stats, dma_addr_t stats_dma) 3090 { 3091 int rval; 3092 mbx_cmd_t mc; 3093 mbx_cmd_t *mcp = &mc; 3094 uint32_t *iter = (uint32_t *)stats; 3095 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); 3096 struct qla_hw_data *ha = vha->hw; 3097 3098 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, 3099 "Entered %s.\n", __func__); 3100 3101 mcp->mb[0] = MBC_GET_LINK_STATUS; 3102 mcp->mb[2] = MSW(LSD(stats_dma)); 3103 mcp->mb[3] = LSW(LSD(stats_dma)); 3104 mcp->mb[6] = MSW(MSD(stats_dma)); 3105 mcp->mb[7] = LSW(MSD(stats_dma)); 3106 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3107 mcp->in_mb = MBX_0; 3108 if (IS_FWI2_CAPABLE(ha)) { 3109 mcp->mb[1] = loop_id; 3110 mcp->mb[4] = 0; 3111 mcp->mb[10] = 0; 3112 mcp->out_mb |= MBX_10|MBX_4|MBX_1; 3113 mcp->in_mb |= MBX_1; 3114 } else if (HAS_EXTENDED_IDS(ha)) { 3115 mcp->mb[1] = loop_id; 3116 mcp->mb[10] = 0; 3117 mcp->out_mb |= MBX_10|MBX_1; 3118 } else { 3119 mcp->mb[1] = loop_id << 8; 3120 mcp->out_mb |= MBX_1; 3121 } 3122 mcp->tov = MBX_TOV_SECONDS; 3123 mcp->flags = IOCTL_CMD; 3124 rval = qla2x00_mailbox_command(vha, mcp); 3125 3126 if (rval == QLA_SUCCESS) { 3127 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3128 ql_dbg(ql_dbg_mbx, vha, 0x1085, 3129 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3130 rval = QLA_FUNCTION_FAILED; 3131 } else { 3132 /* Re-endianize - firmware data is le32. */ 3133 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, 3134 "Done %s.\n", __func__); 3135 for ( ; dwords--; iter++) 3136 le32_to_cpus(iter); 3137 } 3138 } else { 3139 /* Failed. */ 3140 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); 3141 } 3142 3143 return rval; 3144 } 3145 3146 int 3147 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, 3148 dma_addr_t stats_dma, uint16_t options) 3149 { 3150 int rval; 3151 mbx_cmd_t mc; 3152 mbx_cmd_t *mcp = &mc; 3153 uint32_t *iter = (uint32_t *)stats; 3154 ushort dwords = sizeof(*stats)/sizeof(*iter); 3155 3156 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, 3157 "Entered %s.\n", __func__); 3158 3159 memset(&mc, 0, sizeof(mc)); 3160 mc.mb[0] = MBC_GET_LINK_PRIV_STATS; 3161 mc.mb[2] = MSW(LSD(stats_dma)); 3162 mc.mb[3] = LSW(LSD(stats_dma)); 3163 mc.mb[6] = MSW(MSD(stats_dma)); 3164 mc.mb[7] = LSW(MSD(stats_dma)); 3165 mc.mb[8] = dwords; 3166 mc.mb[9] = vha->vp_idx; 3167 mc.mb[10] = options; 3168 3169 rval = qla24xx_send_mb_cmd(vha, &mc); 3170 3171 if (rval == QLA_SUCCESS) { 3172 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3173 ql_dbg(ql_dbg_mbx, vha, 0x1089, 3174 "Failed mb[0]=%x.\n", mcp->mb[0]); 3175 rval = QLA_FUNCTION_FAILED; 3176 } else { 3177 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, 3178 "Done %s.\n", __func__); 3179 /* Re-endianize - firmware data is le32. */ 3180 for ( ; dwords--; iter++) 3181 le32_to_cpus(iter); 3182 } 3183 } else { 3184 /* Failed. */ 3185 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); 3186 } 3187 3188 return rval; 3189 } 3190 3191 int 3192 qla24xx_abort_command(srb_t *sp) 3193 { 3194 int rval; 3195 unsigned long flags = 0; 3196 3197 struct abort_entry_24xx *abt; 3198 dma_addr_t abt_dma; 3199 uint32_t handle; 3200 fc_port_t *fcport = sp->fcport; 3201 struct scsi_qla_host *vha = fcport->vha; 3202 struct qla_hw_data *ha = vha->hw; 3203 struct req_que *req = vha->req; 3204 struct qla_qpair *qpair = sp->qpair; 3205 3206 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 3207 "Entered %s.\n", __func__); 3208 3209 if (sp->qpair) 3210 req = sp->qpair->req; 3211 else 3212 return QLA_FUNCTION_FAILED; 3213 3214 if (ql2xasynctmfenable) 3215 return qla24xx_async_abort_command(sp); 3216 3217 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3218 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 3219 if (req->outstanding_cmds[handle] == sp) 3220 break; 3221 } 3222 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3223 if (handle == req->num_outstanding_cmds) { 3224 /* Command not found. */ 3225 return QLA_FUNCTION_FAILED; 3226 } 3227 3228 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 3229 if (abt == NULL) { 3230 ql_log(ql_log_warn, vha, 0x108d, 3231 "Failed to allocate abort IOCB.\n"); 3232 return QLA_MEMORY_ALLOC_FAILED; 3233 } 3234 3235 abt->entry_type = ABORT_IOCB_TYPE; 3236 abt->entry_count = 1; 3237 abt->handle = make_handle(req->id, abt->handle); 3238 abt->nport_handle = cpu_to_le16(fcport->loop_id); 3239 abt->handle_to_abort = make_handle(req->id, handle); 3240 abt->port_id[0] = fcport->d_id.b.al_pa; 3241 abt->port_id[1] = fcport->d_id.b.area; 3242 abt->port_id[2] = fcport->d_id.b.domain; 3243 abt->vp_index = fcport->vha->vp_idx; 3244 3245 abt->req_que_no = cpu_to_le16(req->id); 3246 /* Need to pass original sp */ 3247 qla_nvme_abort_set_option(abt, sp); 3248 3249 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 3250 if (rval != QLA_SUCCESS) { 3251 ql_dbg(ql_dbg_mbx, vha, 0x108e, 3252 "Failed to issue IOCB (%x).\n", rval); 3253 } else if (abt->entry_status != 0) { 3254 ql_dbg(ql_dbg_mbx, vha, 0x108f, 3255 "Failed to complete IOCB -- error status (%x).\n", 3256 abt->entry_status); 3257 rval = QLA_FUNCTION_FAILED; 3258 } else if (abt->nport_handle != cpu_to_le16(0)) { 3259 ql_dbg(ql_dbg_mbx, vha, 0x1090, 3260 "Failed to complete IOCB -- completion status (%x).\n", 3261 le16_to_cpu(abt->nport_handle)); 3262 if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR)) 3263 rval = QLA_FUNCTION_PARAMETER_ERROR; 3264 else 3265 rval = QLA_FUNCTION_FAILED; 3266 } else { 3267 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, 3268 "Done %s.\n", __func__); 3269 } 3270 if (rval == QLA_SUCCESS) 3271 qla_nvme_abort_process_comp_status(abt, sp); 3272 3273 qla_wait_nvme_release_cmd_kref(sp); 3274 3275 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 3276 3277 return rval; 3278 } 3279 3280 struct tsk_mgmt_cmd { 3281 union { 3282 struct tsk_mgmt_entry tsk; 3283 struct sts_entry_24xx sts; 3284 } p; 3285 }; 3286 3287 static int 3288 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 3289 uint64_t l, int tag) 3290 { 3291 int rval, rval2; 3292 struct tsk_mgmt_cmd *tsk; 3293 struct sts_entry_24xx *sts; 3294 dma_addr_t tsk_dma; 3295 scsi_qla_host_t *vha; 3296 struct qla_hw_data *ha; 3297 struct req_que *req; 3298 struct qla_qpair *qpair; 3299 3300 vha = fcport->vha; 3301 ha = vha->hw; 3302 req = vha->req; 3303 3304 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, 3305 "Entered %s.\n", __func__); 3306 3307 if (vha->vp_idx && vha->qpair) { 3308 /* NPIV port */ 3309 qpair = vha->qpair; 3310 req = qpair->req; 3311 } 3312 3313 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 3314 if (tsk == NULL) { 3315 ql_log(ql_log_warn, vha, 0x1093, 3316 "Failed to allocate task management IOCB.\n"); 3317 return QLA_MEMORY_ALLOC_FAILED; 3318 } 3319 3320 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 3321 tsk->p.tsk.entry_count = 1; 3322 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle); 3323 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 3324 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 3325 tsk->p.tsk.control_flags = cpu_to_le32(type); 3326 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 3327 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 3328 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 3329 tsk->p.tsk.vp_index = fcport->vha->vp_idx; 3330 if (type == TCF_LUN_RESET) { 3331 int_to_scsilun(l, &tsk->p.tsk.lun); 3332 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, 3333 sizeof(tsk->p.tsk.lun)); 3334 } 3335 3336 sts = &tsk->p.sts; 3337 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 3338 if (rval != QLA_SUCCESS) { 3339 ql_dbg(ql_dbg_mbx, vha, 0x1094, 3340 "Failed to issue %s reset IOCB (%x).\n", name, rval); 3341 } else if (sts->entry_status != 0) { 3342 ql_dbg(ql_dbg_mbx, vha, 0x1095, 3343 "Failed to complete IOCB -- error status (%x).\n", 3344 sts->entry_status); 3345 rval = QLA_FUNCTION_FAILED; 3346 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 3347 ql_dbg(ql_dbg_mbx, vha, 0x1096, 3348 "Failed to complete IOCB -- completion status (%x).\n", 3349 le16_to_cpu(sts->comp_status)); 3350 rval = QLA_FUNCTION_FAILED; 3351 } else if (le16_to_cpu(sts->scsi_status) & 3352 SS_RESPONSE_INFO_LEN_VALID) { 3353 if (le32_to_cpu(sts->rsp_data_len) < 4) { 3354 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, 3355 "Ignoring inconsistent data length -- not enough " 3356 "response info (%d).\n", 3357 le32_to_cpu(sts->rsp_data_len)); 3358 } else if (sts->data[3]) { 3359 ql_dbg(ql_dbg_mbx, vha, 0x1098, 3360 "Failed to complete IOCB -- response (%x).\n", 3361 sts->data[3]); 3362 rval = QLA_FUNCTION_FAILED; 3363 } 3364 } 3365 3366 /* Issue marker IOCB. */ 3367 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l, 3368 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 3369 if (rval2 != QLA_SUCCESS) { 3370 ql_dbg(ql_dbg_mbx, vha, 0x1099, 3371 "Failed to issue marker IOCB (%x).\n", rval2); 3372 } else { 3373 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, 3374 "Done %s.\n", __func__); 3375 } 3376 3377 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 3378 3379 return rval; 3380 } 3381 3382 int 3383 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) 3384 { 3385 struct qla_hw_data *ha = fcport->vha->hw; 3386 3387 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3388 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 3389 3390 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); 3391 } 3392 3393 int 3394 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 3395 { 3396 struct qla_hw_data *ha = fcport->vha->hw; 3397 3398 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3399 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 3400 3401 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); 3402 } 3403 3404 int 3405 qla2x00_system_error(scsi_qla_host_t *vha) 3406 { 3407 int rval; 3408 mbx_cmd_t mc; 3409 mbx_cmd_t *mcp = &mc; 3410 struct qla_hw_data *ha = vha->hw; 3411 3412 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 3413 return QLA_FUNCTION_FAILED; 3414 3415 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, 3416 "Entered %s.\n", __func__); 3417 3418 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 3419 mcp->out_mb = MBX_0; 3420 mcp->in_mb = MBX_0; 3421 mcp->tov = 5; 3422 mcp->flags = 0; 3423 rval = qla2x00_mailbox_command(vha, mcp); 3424 3425 if (rval != QLA_SUCCESS) { 3426 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); 3427 } else { 3428 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, 3429 "Done %s.\n", __func__); 3430 } 3431 3432 return rval; 3433 } 3434 3435 int 3436 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) 3437 { 3438 int rval; 3439 mbx_cmd_t mc; 3440 mbx_cmd_t *mcp = &mc; 3441 3442 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3443 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3444 return QLA_FUNCTION_FAILED; 3445 3446 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, 3447 "Entered %s.\n", __func__); 3448 3449 mcp->mb[0] = MBC_WRITE_SERDES; 3450 mcp->mb[1] = addr; 3451 if (IS_QLA2031(vha->hw)) 3452 mcp->mb[2] = data & 0xff; 3453 else 3454 mcp->mb[2] = data; 3455 3456 mcp->mb[3] = 0; 3457 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 3458 mcp->in_mb = MBX_0; 3459 mcp->tov = MBX_TOV_SECONDS; 3460 mcp->flags = 0; 3461 rval = qla2x00_mailbox_command(vha, mcp); 3462 3463 if (rval != QLA_SUCCESS) { 3464 ql_dbg(ql_dbg_mbx, vha, 0x1183, 3465 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3466 } else { 3467 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, 3468 "Done %s.\n", __func__); 3469 } 3470 3471 return rval; 3472 } 3473 3474 int 3475 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) 3476 { 3477 int rval; 3478 mbx_cmd_t mc; 3479 mbx_cmd_t *mcp = &mc; 3480 3481 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3482 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3483 return QLA_FUNCTION_FAILED; 3484 3485 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, 3486 "Entered %s.\n", __func__); 3487 3488 mcp->mb[0] = MBC_READ_SERDES; 3489 mcp->mb[1] = addr; 3490 mcp->mb[3] = 0; 3491 mcp->out_mb = MBX_3|MBX_1|MBX_0; 3492 mcp->in_mb = MBX_1|MBX_0; 3493 mcp->tov = MBX_TOV_SECONDS; 3494 mcp->flags = 0; 3495 rval = qla2x00_mailbox_command(vha, mcp); 3496 3497 if (IS_QLA2031(vha->hw)) 3498 *data = mcp->mb[1] & 0xff; 3499 else 3500 *data = mcp->mb[1]; 3501 3502 if (rval != QLA_SUCCESS) { 3503 ql_dbg(ql_dbg_mbx, vha, 0x1186, 3504 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3505 } else { 3506 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, 3507 "Done %s.\n", __func__); 3508 } 3509 3510 return rval; 3511 } 3512 3513 int 3514 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) 3515 { 3516 int rval; 3517 mbx_cmd_t mc; 3518 mbx_cmd_t *mcp = &mc; 3519 3520 if (!IS_QLA8044(vha->hw)) 3521 return QLA_FUNCTION_FAILED; 3522 3523 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0, 3524 "Entered %s.\n", __func__); 3525 3526 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3527 mcp->mb[1] = HCS_WRITE_SERDES; 3528 mcp->mb[3] = LSW(addr); 3529 mcp->mb[4] = MSW(addr); 3530 mcp->mb[5] = LSW(data); 3531 mcp->mb[6] = MSW(data); 3532 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 3533 mcp->in_mb = MBX_0; 3534 mcp->tov = MBX_TOV_SECONDS; 3535 mcp->flags = 0; 3536 rval = qla2x00_mailbox_command(vha, mcp); 3537 3538 if (rval != QLA_SUCCESS) { 3539 ql_dbg(ql_dbg_mbx, vha, 0x11a1, 3540 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3541 } else { 3542 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, 3543 "Done %s.\n", __func__); 3544 } 3545 3546 return rval; 3547 } 3548 3549 int 3550 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) 3551 { 3552 int rval; 3553 mbx_cmd_t mc; 3554 mbx_cmd_t *mcp = &mc; 3555 3556 if (!IS_QLA8044(vha->hw)) 3557 return QLA_FUNCTION_FAILED; 3558 3559 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, 3560 "Entered %s.\n", __func__); 3561 3562 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3563 mcp->mb[1] = HCS_READ_SERDES; 3564 mcp->mb[3] = LSW(addr); 3565 mcp->mb[4] = MSW(addr); 3566 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; 3567 mcp->in_mb = MBX_2|MBX_1|MBX_0; 3568 mcp->tov = MBX_TOV_SECONDS; 3569 mcp->flags = 0; 3570 rval = qla2x00_mailbox_command(vha, mcp); 3571 3572 *data = mcp->mb[2] << 16 | mcp->mb[1]; 3573 3574 if (rval != QLA_SUCCESS) { 3575 ql_dbg(ql_dbg_mbx, vha, 0x118a, 3576 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3577 } else { 3578 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, 3579 "Done %s.\n", __func__); 3580 } 3581 3582 return rval; 3583 } 3584 3585 /** 3586 * qla2x00_set_serdes_params() - 3587 * @vha: HA context 3588 * @sw_em_1g: serial link options 3589 * @sw_em_2g: serial link options 3590 * @sw_em_4g: serial link options 3591 * 3592 * Returns 3593 */ 3594 int 3595 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, 3596 uint16_t sw_em_2g, uint16_t sw_em_4g) 3597 { 3598 int rval; 3599 mbx_cmd_t mc; 3600 mbx_cmd_t *mcp = &mc; 3601 3602 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, 3603 "Entered %s.\n", __func__); 3604 3605 mcp->mb[0] = MBC_SERDES_PARAMS; 3606 mcp->mb[1] = BIT_0; 3607 mcp->mb[2] = sw_em_1g | BIT_15; 3608 mcp->mb[3] = sw_em_2g | BIT_15; 3609 mcp->mb[4] = sw_em_4g | BIT_15; 3610 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3611 mcp->in_mb = MBX_0; 3612 mcp->tov = MBX_TOV_SECONDS; 3613 mcp->flags = 0; 3614 rval = qla2x00_mailbox_command(vha, mcp); 3615 3616 if (rval != QLA_SUCCESS) { 3617 /*EMPTY*/ 3618 ql_dbg(ql_dbg_mbx, vha, 0x109f, 3619 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3620 } else { 3621 /*EMPTY*/ 3622 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, 3623 "Done %s.\n", __func__); 3624 } 3625 3626 return rval; 3627 } 3628 3629 int 3630 qla2x00_stop_firmware(scsi_qla_host_t *vha) 3631 { 3632 int rval; 3633 mbx_cmd_t mc; 3634 mbx_cmd_t *mcp = &mc; 3635 3636 if (!IS_FWI2_CAPABLE(vha->hw)) 3637 return QLA_FUNCTION_FAILED; 3638 3639 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, 3640 "Entered %s.\n", __func__); 3641 3642 mcp->mb[0] = MBC_STOP_FIRMWARE; 3643 mcp->mb[1] = 0; 3644 mcp->out_mb = MBX_1|MBX_0; 3645 mcp->in_mb = MBX_0; 3646 mcp->tov = 5; 3647 mcp->flags = 0; 3648 rval = qla2x00_mailbox_command(vha, mcp); 3649 3650 if (rval != QLA_SUCCESS) { 3651 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); 3652 if (mcp->mb[0] == MBS_INVALID_COMMAND) 3653 rval = QLA_INVALID_COMMAND; 3654 } else { 3655 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, 3656 "Done %s.\n", __func__); 3657 } 3658 3659 return rval; 3660 } 3661 3662 int 3663 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, 3664 uint16_t buffers) 3665 { 3666 int rval; 3667 mbx_cmd_t mc; 3668 mbx_cmd_t *mcp = &mc; 3669 3670 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, 3671 "Entered %s.\n", __func__); 3672 3673 if (!IS_FWI2_CAPABLE(vha->hw)) 3674 return QLA_FUNCTION_FAILED; 3675 3676 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3677 return QLA_FUNCTION_FAILED; 3678 3679 mcp->mb[0] = MBC_TRACE_CONTROL; 3680 mcp->mb[1] = TC_EFT_ENABLE; 3681 mcp->mb[2] = LSW(eft_dma); 3682 mcp->mb[3] = MSW(eft_dma); 3683 mcp->mb[4] = LSW(MSD(eft_dma)); 3684 mcp->mb[5] = MSW(MSD(eft_dma)); 3685 mcp->mb[6] = buffers; 3686 mcp->mb[7] = TC_AEN_DISABLE; 3687 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3688 mcp->in_mb = MBX_1|MBX_0; 3689 mcp->tov = MBX_TOV_SECONDS; 3690 mcp->flags = 0; 3691 rval = qla2x00_mailbox_command(vha, mcp); 3692 if (rval != QLA_SUCCESS) { 3693 ql_dbg(ql_dbg_mbx, vha, 0x10a5, 3694 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3695 rval, mcp->mb[0], mcp->mb[1]); 3696 } else { 3697 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, 3698 "Done %s.\n", __func__); 3699 } 3700 3701 return rval; 3702 } 3703 3704 int 3705 qla2x00_disable_eft_trace(scsi_qla_host_t *vha) 3706 { 3707 int rval; 3708 mbx_cmd_t mc; 3709 mbx_cmd_t *mcp = &mc; 3710 3711 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, 3712 "Entered %s.\n", __func__); 3713 3714 if (!IS_FWI2_CAPABLE(vha->hw)) 3715 return QLA_FUNCTION_FAILED; 3716 3717 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3718 return QLA_FUNCTION_FAILED; 3719 3720 mcp->mb[0] = MBC_TRACE_CONTROL; 3721 mcp->mb[1] = TC_EFT_DISABLE; 3722 mcp->out_mb = MBX_1|MBX_0; 3723 mcp->in_mb = MBX_1|MBX_0; 3724 mcp->tov = MBX_TOV_SECONDS; 3725 mcp->flags = 0; 3726 rval = qla2x00_mailbox_command(vha, mcp); 3727 if (rval != QLA_SUCCESS) { 3728 ql_dbg(ql_dbg_mbx, vha, 0x10a8, 3729 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3730 rval, mcp->mb[0], mcp->mb[1]); 3731 } else { 3732 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, 3733 "Done %s.\n", __func__); 3734 } 3735 3736 return rval; 3737 } 3738 3739 int 3740 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, 3741 uint16_t buffers, uint16_t *mb, uint32_t *dwords) 3742 { 3743 int rval; 3744 mbx_cmd_t mc; 3745 mbx_cmd_t *mcp = &mc; 3746 3747 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, 3748 "Entered %s.\n", __func__); 3749 3750 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 3751 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 3752 !IS_QLA28XX(vha->hw)) 3753 return QLA_FUNCTION_FAILED; 3754 3755 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3756 return QLA_FUNCTION_FAILED; 3757 3758 mcp->mb[0] = MBC_TRACE_CONTROL; 3759 mcp->mb[1] = TC_FCE_ENABLE; 3760 mcp->mb[2] = LSW(fce_dma); 3761 mcp->mb[3] = MSW(fce_dma); 3762 mcp->mb[4] = LSW(MSD(fce_dma)); 3763 mcp->mb[5] = MSW(MSD(fce_dma)); 3764 mcp->mb[6] = buffers; 3765 mcp->mb[7] = TC_AEN_DISABLE; 3766 mcp->mb[8] = 0; 3767 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; 3768 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; 3769 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3770 MBX_1|MBX_0; 3771 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3772 mcp->tov = MBX_TOV_SECONDS; 3773 mcp->flags = 0; 3774 rval = qla2x00_mailbox_command(vha, mcp); 3775 if (rval != QLA_SUCCESS) { 3776 ql_dbg(ql_dbg_mbx, vha, 0x10ab, 3777 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3778 rval, mcp->mb[0], mcp->mb[1]); 3779 } else { 3780 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, 3781 "Done %s.\n", __func__); 3782 3783 if (mb) 3784 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 3785 if (dwords) 3786 *dwords = buffers; 3787 } 3788 3789 return rval; 3790 } 3791 3792 int 3793 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) 3794 { 3795 int rval; 3796 mbx_cmd_t mc; 3797 mbx_cmd_t *mcp = &mc; 3798 3799 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, 3800 "Entered %s.\n", __func__); 3801 3802 if (!IS_FWI2_CAPABLE(vha->hw)) 3803 return QLA_FUNCTION_FAILED; 3804 3805 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3806 return QLA_FUNCTION_FAILED; 3807 3808 mcp->mb[0] = MBC_TRACE_CONTROL; 3809 mcp->mb[1] = TC_FCE_DISABLE; 3810 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 3811 mcp->out_mb = MBX_2|MBX_1|MBX_0; 3812 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3813 MBX_1|MBX_0; 3814 mcp->tov = MBX_TOV_SECONDS; 3815 mcp->flags = 0; 3816 rval = qla2x00_mailbox_command(vha, mcp); 3817 if (rval != QLA_SUCCESS) { 3818 ql_dbg(ql_dbg_mbx, vha, 0x10ae, 3819 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3820 rval, mcp->mb[0], mcp->mb[1]); 3821 } else { 3822 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, 3823 "Done %s.\n", __func__); 3824 3825 if (wr) 3826 *wr = (uint64_t) mcp->mb[5] << 48 | 3827 (uint64_t) mcp->mb[4] << 32 | 3828 (uint64_t) mcp->mb[3] << 16 | 3829 (uint64_t) mcp->mb[2]; 3830 if (rd) 3831 *rd = (uint64_t) mcp->mb[9] << 48 | 3832 (uint64_t) mcp->mb[8] << 32 | 3833 (uint64_t) mcp->mb[7] << 16 | 3834 (uint64_t) mcp->mb[6]; 3835 } 3836 3837 return rval; 3838 } 3839 3840 int 3841 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3842 uint16_t *port_speed, uint16_t *mb) 3843 { 3844 int rval; 3845 mbx_cmd_t mc; 3846 mbx_cmd_t *mcp = &mc; 3847 3848 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, 3849 "Entered %s.\n", __func__); 3850 3851 if (!IS_IIDMA_CAPABLE(vha->hw)) 3852 return QLA_FUNCTION_FAILED; 3853 3854 mcp->mb[0] = MBC_PORT_PARAMS; 3855 mcp->mb[1] = loop_id; 3856 mcp->mb[2] = mcp->mb[3] = 0; 3857 mcp->mb[9] = vha->vp_idx; 3858 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3859 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3860 mcp->tov = MBX_TOV_SECONDS; 3861 mcp->flags = 0; 3862 rval = qla2x00_mailbox_command(vha, mcp); 3863 3864 /* Return mailbox statuses. */ 3865 if (mb) { 3866 mb[0] = mcp->mb[0]; 3867 mb[1] = mcp->mb[1]; 3868 mb[3] = mcp->mb[3]; 3869 } 3870 3871 if (rval != QLA_SUCCESS) { 3872 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); 3873 } else { 3874 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, 3875 "Done %s.\n", __func__); 3876 if (port_speed) 3877 *port_speed = mcp->mb[3]; 3878 } 3879 3880 return rval; 3881 } 3882 3883 int 3884 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3885 uint16_t port_speed, uint16_t *mb) 3886 { 3887 int rval; 3888 mbx_cmd_t mc; 3889 mbx_cmd_t *mcp = &mc; 3890 3891 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, 3892 "Entered %s.\n", __func__); 3893 3894 if (!IS_IIDMA_CAPABLE(vha->hw)) 3895 return QLA_FUNCTION_FAILED; 3896 3897 mcp->mb[0] = MBC_PORT_PARAMS; 3898 mcp->mb[1] = loop_id; 3899 mcp->mb[2] = BIT_0; 3900 mcp->mb[3] = port_speed & 0x3F; 3901 mcp->mb[9] = vha->vp_idx; 3902 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3903 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3904 mcp->tov = MBX_TOV_SECONDS; 3905 mcp->flags = 0; 3906 rval = qla2x00_mailbox_command(vha, mcp); 3907 3908 /* Return mailbox statuses. */ 3909 if (mb) { 3910 mb[0] = mcp->mb[0]; 3911 mb[1] = mcp->mb[1]; 3912 mb[3] = mcp->mb[3]; 3913 } 3914 3915 if (rval != QLA_SUCCESS) { 3916 ql_dbg(ql_dbg_mbx, vha, 0x10b4, 3917 "Failed=%x.\n", rval); 3918 } else { 3919 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, 3920 "Done %s.\n", __func__); 3921 } 3922 3923 return rval; 3924 } 3925 3926 void 3927 qla24xx_report_id_acquisition(scsi_qla_host_t *vha, 3928 struct vp_rpt_id_entry_24xx *rptid_entry) 3929 { 3930 struct qla_hw_data *ha = vha->hw; 3931 scsi_qla_host_t *vp = NULL; 3932 unsigned long flags; 3933 int found; 3934 port_id_t id; 3935 struct fc_port *fcport; 3936 3937 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3938 "Entered %s.\n", __func__); 3939 3940 if (rptid_entry->entry_status != 0) 3941 return; 3942 3943 id.b.domain = rptid_entry->port_id[2]; 3944 id.b.area = rptid_entry->port_id[1]; 3945 id.b.al_pa = rptid_entry->port_id[0]; 3946 id.b.rsvd_1 = 0; 3947 ha->flags.n2n_ae = 0; 3948 3949 if (rptid_entry->format == 0) { 3950 /* loop */ 3951 ql_dbg(ql_dbg_async, vha, 0x10b7, 3952 "Format 0 : Number of VPs setup %d, number of " 3953 "VPs acquired %d.\n", rptid_entry->vp_setup, 3954 rptid_entry->vp_acquired); 3955 ql_dbg(ql_dbg_async, vha, 0x10b8, 3956 "Primary port id %02x%02x%02x.\n", 3957 rptid_entry->port_id[2], rptid_entry->port_id[1], 3958 rptid_entry->port_id[0]); 3959 ha->current_topology = ISP_CFG_NL; 3960 qlt_update_host_map(vha, id); 3961 3962 } else if (rptid_entry->format == 1) { 3963 /* fabric */ 3964 ql_dbg(ql_dbg_async, vha, 0x10b9, 3965 "Format 1: VP[%d] enabled - status %d - with " 3966 "port id %02x%02x%02x.\n", rptid_entry->vp_idx, 3967 rptid_entry->vp_status, 3968 rptid_entry->port_id[2], rptid_entry->port_id[1], 3969 rptid_entry->port_id[0]); 3970 ql_dbg(ql_dbg_async, vha, 0x5075, 3971 "Format 1: Remote WWPN %8phC.\n", 3972 rptid_entry->u.f1.port_name); 3973 3974 ql_dbg(ql_dbg_async, vha, 0x5075, 3975 "Format 1: WWPN %8phC.\n", 3976 vha->port_name); 3977 3978 switch (rptid_entry->u.f1.flags & TOPO_MASK) { 3979 case TOPO_N2N: 3980 ha->current_topology = ISP_CFG_N; 3981 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 3982 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3983 fcport->scan_state = QLA_FCPORT_SCAN; 3984 fcport->n2n_flag = 0; 3985 } 3986 id.b24 = 0; 3987 if (wwn_to_u64(vha->port_name) > 3988 wwn_to_u64(rptid_entry->u.f1.port_name)) { 3989 vha->d_id.b24 = 0; 3990 vha->d_id.b.al_pa = 1; 3991 ha->flags.n2n_bigger = 1; 3992 3993 id.b.al_pa = 2; 3994 ql_dbg(ql_dbg_async, vha, 0x5075, 3995 "Format 1: assign local id %x remote id %x\n", 3996 vha->d_id.b24, id.b24); 3997 } else { 3998 ql_dbg(ql_dbg_async, vha, 0x5075, 3999 "Format 1: Remote login - Waiting for WWPN %8phC.\n", 4000 rptid_entry->u.f1.port_name); 4001 ha->flags.n2n_bigger = 0; 4002 } 4003 4004 fcport = qla2x00_find_fcport_by_wwpn(vha, 4005 rptid_entry->u.f1.port_name, 1); 4006 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4007 4008 4009 if (fcport) { 4010 fcport->plogi_nack_done_deadline = jiffies + HZ; 4011 fcport->dm_login_expire = jiffies + 4012 QLA_N2N_WAIT_TIME * HZ; 4013 fcport->scan_state = QLA_FCPORT_FOUND; 4014 fcport->n2n_flag = 1; 4015 fcport->keep_nport_handle = 1; 4016 4017 if (wwn_to_u64(vha->port_name) > 4018 wwn_to_u64(fcport->port_name)) { 4019 fcport->d_id = id; 4020 } 4021 4022 switch (fcport->disc_state) { 4023 case DSC_DELETED: 4024 set_bit(RELOGIN_NEEDED, 4025 &vha->dpc_flags); 4026 break; 4027 case DSC_DELETE_PEND: 4028 break; 4029 default: 4030 qlt_schedule_sess_for_deletion(fcport); 4031 break; 4032 } 4033 } else { 4034 qla24xx_post_newsess_work(vha, &id, 4035 rptid_entry->u.f1.port_name, 4036 rptid_entry->u.f1.node_name, 4037 NULL, 4038 FS_FCP_IS_N2N); 4039 } 4040 4041 /* if our portname is higher then initiate N2N login */ 4042 4043 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); 4044 return; 4045 case TOPO_FL: 4046 ha->current_topology = ISP_CFG_FL; 4047 break; 4048 case TOPO_F: 4049 ha->current_topology = ISP_CFG_F; 4050 break; 4051 default: 4052 break; 4053 } 4054 4055 ha->flags.gpsc_supported = 1; 4056 ha->current_topology = ISP_CFG_F; 4057 /* buffer to buffer credit flag */ 4058 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; 4059 4060 if (rptid_entry->vp_idx == 0) { 4061 if (rptid_entry->vp_status == VP_STAT_COMPL) { 4062 /* FA-WWN is only for physical port */ 4063 if (qla_ini_mode_enabled(vha) && 4064 ha->flags.fawwpn_enabled && 4065 (rptid_entry->u.f1.flags & 4066 BIT_6)) { 4067 memcpy(vha->port_name, 4068 rptid_entry->u.f1.port_name, 4069 WWN_SIZE); 4070 } 4071 4072 qlt_update_host_map(vha, id); 4073 } 4074 4075 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 4076 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 4077 } else { 4078 if (rptid_entry->vp_status != VP_STAT_COMPL && 4079 rptid_entry->vp_status != VP_STAT_ID_CHG) { 4080 ql_dbg(ql_dbg_mbx, vha, 0x10ba, 4081 "Could not acquire ID for VP[%d].\n", 4082 rptid_entry->vp_idx); 4083 return; 4084 } 4085 4086 found = 0; 4087 spin_lock_irqsave(&ha->vport_slock, flags); 4088 list_for_each_entry(vp, &ha->vp_list, list) { 4089 if (rptid_entry->vp_idx == vp->vp_idx) { 4090 found = 1; 4091 break; 4092 } 4093 } 4094 spin_unlock_irqrestore(&ha->vport_slock, flags); 4095 4096 if (!found) 4097 return; 4098 4099 qlt_update_host_map(vp, id); 4100 4101 /* 4102 * Cannot configure here as we are still sitting on the 4103 * response queue. Handle it in dpc context. 4104 */ 4105 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); 4106 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); 4107 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); 4108 } 4109 set_bit(VP_DPC_NEEDED, &vha->dpc_flags); 4110 qla2xxx_wake_dpc(vha); 4111 } else if (rptid_entry->format == 2) { 4112 ql_dbg(ql_dbg_async, vha, 0x505f, 4113 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", 4114 rptid_entry->port_id[2], rptid_entry->port_id[1], 4115 rptid_entry->port_id[0]); 4116 4117 ql_dbg(ql_dbg_async, vha, 0x5075, 4118 "N2N: Remote WWPN %8phC.\n", 4119 rptid_entry->u.f2.port_name); 4120 4121 /* N2N. direct connect */ 4122 ha->current_topology = ISP_CFG_N; 4123 ha->flags.rida_fmt2 = 1; 4124 vha->d_id.b.domain = rptid_entry->port_id[2]; 4125 vha->d_id.b.area = rptid_entry->port_id[1]; 4126 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 4127 4128 ha->flags.n2n_ae = 1; 4129 spin_lock_irqsave(&ha->vport_slock, flags); 4130 qlt_update_vp_map(vha, SET_AL_PA); 4131 spin_unlock_irqrestore(&ha->vport_slock, flags); 4132 4133 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4134 fcport->scan_state = QLA_FCPORT_SCAN; 4135 fcport->n2n_flag = 0; 4136 } 4137 4138 fcport = qla2x00_find_fcport_by_wwpn(vha, 4139 rptid_entry->u.f2.port_name, 1); 4140 4141 if (fcport) { 4142 fcport->login_retry = vha->hw->login_retry_count; 4143 fcport->plogi_nack_done_deadline = jiffies + HZ; 4144 fcport->scan_state = QLA_FCPORT_FOUND; 4145 fcport->keep_nport_handle = 1; 4146 fcport->n2n_flag = 1; 4147 fcport->d_id.b.domain = 4148 rptid_entry->u.f2.remote_nport_id[2]; 4149 fcport->d_id.b.area = 4150 rptid_entry->u.f2.remote_nport_id[1]; 4151 fcport->d_id.b.al_pa = 4152 rptid_entry->u.f2.remote_nport_id[0]; 4153 } 4154 } 4155 } 4156 4157 /* 4158 * qla24xx_modify_vp_config 4159 * Change VP configuration for vha 4160 * 4161 * Input: 4162 * vha = adapter block pointer. 4163 * 4164 * Returns: 4165 * qla2xxx local function return status code. 4166 * 4167 * Context: 4168 * Kernel context. 4169 */ 4170 int 4171 qla24xx_modify_vp_config(scsi_qla_host_t *vha) 4172 { 4173 int rval; 4174 struct vp_config_entry_24xx *vpmod; 4175 dma_addr_t vpmod_dma; 4176 struct qla_hw_data *ha = vha->hw; 4177 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4178 4179 /* This can be called by the parent */ 4180 4181 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, 4182 "Entered %s.\n", __func__); 4183 4184 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 4185 if (!vpmod) { 4186 ql_log(ql_log_warn, vha, 0x10bc, 4187 "Failed to allocate modify VP IOCB.\n"); 4188 return QLA_MEMORY_ALLOC_FAILED; 4189 } 4190 4191 vpmod->entry_type = VP_CONFIG_IOCB_TYPE; 4192 vpmod->entry_count = 1; 4193 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; 4194 vpmod->vp_count = 1; 4195 vpmod->vp_index1 = vha->vp_idx; 4196 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; 4197 4198 qlt_modify_vp_config(vha, vpmod); 4199 4200 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); 4201 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 4202 vpmod->entry_count = 1; 4203 4204 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 4205 if (rval != QLA_SUCCESS) { 4206 ql_dbg(ql_dbg_mbx, vha, 0x10bd, 4207 "Failed to issue VP config IOCB (%x).\n", rval); 4208 } else if (vpmod->comp_status != 0) { 4209 ql_dbg(ql_dbg_mbx, vha, 0x10be, 4210 "Failed to complete IOCB -- error status (%x).\n", 4211 vpmod->comp_status); 4212 rval = QLA_FUNCTION_FAILED; 4213 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) { 4214 ql_dbg(ql_dbg_mbx, vha, 0x10bf, 4215 "Failed to complete IOCB -- completion status (%x).\n", 4216 le16_to_cpu(vpmod->comp_status)); 4217 rval = QLA_FUNCTION_FAILED; 4218 } else { 4219 /* EMPTY */ 4220 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, 4221 "Done %s.\n", __func__); 4222 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 4223 } 4224 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 4225 4226 return rval; 4227 } 4228 4229 /* 4230 * qla2x00_send_change_request 4231 * Receive or disable RSCN request from fabric controller 4232 * 4233 * Input: 4234 * ha = adapter block pointer 4235 * format = registration format: 4236 * 0 - Reserved 4237 * 1 - Fabric detected registration 4238 * 2 - N_port detected registration 4239 * 3 - Full registration 4240 * FF - clear registration 4241 * vp_idx = Virtual port index 4242 * 4243 * Returns: 4244 * qla2x00 local function return status code. 4245 * 4246 * Context: 4247 * Kernel Context 4248 */ 4249 4250 int 4251 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, 4252 uint16_t vp_idx) 4253 { 4254 int rval; 4255 mbx_cmd_t mc; 4256 mbx_cmd_t *mcp = &mc; 4257 4258 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, 4259 "Entered %s.\n", __func__); 4260 4261 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; 4262 mcp->mb[1] = format; 4263 mcp->mb[9] = vp_idx; 4264 mcp->out_mb = MBX_9|MBX_1|MBX_0; 4265 mcp->in_mb = MBX_0|MBX_1; 4266 mcp->tov = MBX_TOV_SECONDS; 4267 mcp->flags = 0; 4268 rval = qla2x00_mailbox_command(vha, mcp); 4269 4270 if (rval == QLA_SUCCESS) { 4271 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 4272 rval = BIT_1; 4273 } 4274 } else 4275 rval = BIT_1; 4276 4277 return rval; 4278 } 4279 4280 int 4281 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 4282 uint32_t size) 4283 { 4284 int rval; 4285 mbx_cmd_t mc; 4286 mbx_cmd_t *mcp = &mc; 4287 4288 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, 4289 "Entered %s.\n", __func__); 4290 4291 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 4292 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 4293 mcp->mb[8] = MSW(addr); 4294 mcp->mb[10] = 0; 4295 mcp->out_mb = MBX_10|MBX_8|MBX_0; 4296 } else { 4297 mcp->mb[0] = MBC_DUMP_RISC_RAM; 4298 mcp->out_mb = MBX_0; 4299 } 4300 mcp->mb[1] = LSW(addr); 4301 mcp->mb[2] = MSW(req_dma); 4302 mcp->mb[3] = LSW(req_dma); 4303 mcp->mb[6] = MSW(MSD(req_dma)); 4304 mcp->mb[7] = LSW(MSD(req_dma)); 4305 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 4306 if (IS_FWI2_CAPABLE(vha->hw)) { 4307 mcp->mb[4] = MSW(size); 4308 mcp->mb[5] = LSW(size); 4309 mcp->out_mb |= MBX_5|MBX_4; 4310 } else { 4311 mcp->mb[4] = LSW(size); 4312 mcp->out_mb |= MBX_4; 4313 } 4314 4315 mcp->in_mb = MBX_0; 4316 mcp->tov = MBX_TOV_SECONDS; 4317 mcp->flags = 0; 4318 rval = qla2x00_mailbox_command(vha, mcp); 4319 4320 if (rval != QLA_SUCCESS) { 4321 ql_dbg(ql_dbg_mbx, vha, 0x1008, 4322 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4323 } else { 4324 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, 4325 "Done %s.\n", __func__); 4326 } 4327 4328 return rval; 4329 } 4330 /* 84XX Support **************************************************************/ 4331 4332 struct cs84xx_mgmt_cmd { 4333 union { 4334 struct verify_chip_entry_84xx req; 4335 struct verify_chip_rsp_84xx rsp; 4336 } p; 4337 }; 4338 4339 int 4340 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) 4341 { 4342 int rval, retry; 4343 struct cs84xx_mgmt_cmd *mn; 4344 dma_addr_t mn_dma; 4345 uint16_t options; 4346 unsigned long flags; 4347 struct qla_hw_data *ha = vha->hw; 4348 4349 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, 4350 "Entered %s.\n", __func__); 4351 4352 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 4353 if (mn == NULL) { 4354 return QLA_MEMORY_ALLOC_FAILED; 4355 } 4356 4357 /* Force Update? */ 4358 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; 4359 /* Diagnostic firmware? */ 4360 /* options |= MENLO_DIAG_FW; */ 4361 /* We update the firmware with only one data sequence. */ 4362 options |= VCO_END_OF_DATA; 4363 4364 do { 4365 retry = 0; 4366 memset(mn, 0, sizeof(*mn)); 4367 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; 4368 mn->p.req.entry_count = 1; 4369 mn->p.req.options = cpu_to_le16(options); 4370 4371 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, 4372 "Dump of Verify Request.\n"); 4373 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, 4374 mn, sizeof(*mn)); 4375 4376 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 4377 if (rval != QLA_SUCCESS) { 4378 ql_dbg(ql_dbg_mbx, vha, 0x10cb, 4379 "Failed to issue verify IOCB (%x).\n", rval); 4380 goto verify_done; 4381 } 4382 4383 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, 4384 "Dump of Verify Response.\n"); 4385 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, 4386 mn, sizeof(*mn)); 4387 4388 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 4389 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 4390 le16_to_cpu(mn->p.rsp.failure_code) : 0; 4391 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, 4392 "cs=%x fc=%x.\n", status[0], status[1]); 4393 4394 if (status[0] != CS_COMPLETE) { 4395 rval = QLA_FUNCTION_FAILED; 4396 if (!(options & VCO_DONT_UPDATE_FW)) { 4397 ql_dbg(ql_dbg_mbx, vha, 0x10cf, 4398 "Firmware update failed. Retrying " 4399 "without update firmware.\n"); 4400 options |= VCO_DONT_UPDATE_FW; 4401 options &= ~VCO_FORCE_UPDATE; 4402 retry = 1; 4403 } 4404 } else { 4405 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, 4406 "Firmware updated to %x.\n", 4407 le32_to_cpu(mn->p.rsp.fw_ver)); 4408 4409 /* NOTE: we only update OP firmware. */ 4410 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 4411 ha->cs84xx->op_fw_version = 4412 le32_to_cpu(mn->p.rsp.fw_ver); 4413 spin_unlock_irqrestore(&ha->cs84xx->access_lock, 4414 flags); 4415 } 4416 } while (retry); 4417 4418 verify_done: 4419 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 4420 4421 if (rval != QLA_SUCCESS) { 4422 ql_dbg(ql_dbg_mbx, vha, 0x10d1, 4423 "Failed=%x.\n", rval); 4424 } else { 4425 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, 4426 "Done %s.\n", __func__); 4427 } 4428 4429 return rval; 4430 } 4431 4432 int 4433 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) 4434 { 4435 int rval; 4436 unsigned long flags; 4437 mbx_cmd_t mc; 4438 mbx_cmd_t *mcp = &mc; 4439 struct qla_hw_data *ha = vha->hw; 4440 4441 if (!ha->flags.fw_started) 4442 return QLA_SUCCESS; 4443 4444 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 4445 "Entered %s.\n", __func__); 4446 4447 if (IS_SHADOW_REG_CAPABLE(ha)) 4448 req->options |= BIT_13; 4449 4450 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4451 mcp->mb[1] = req->options; 4452 mcp->mb[2] = MSW(LSD(req->dma)); 4453 mcp->mb[3] = LSW(LSD(req->dma)); 4454 mcp->mb[6] = MSW(MSD(req->dma)); 4455 mcp->mb[7] = LSW(MSD(req->dma)); 4456 mcp->mb[5] = req->length; 4457 if (req->rsp) 4458 mcp->mb[10] = req->rsp->id; 4459 mcp->mb[12] = req->qos; 4460 mcp->mb[11] = req->vp_idx; 4461 mcp->mb[13] = req->rid; 4462 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4463 mcp->mb[15] = 0; 4464 4465 mcp->mb[4] = req->id; 4466 /* que in ptr index */ 4467 mcp->mb[8] = 0; 4468 /* que out ptr index */ 4469 mcp->mb[9] = *req->out_ptr = 0; 4470 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| 4471 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4472 mcp->in_mb = MBX_0; 4473 mcp->flags = MBX_DMA_OUT; 4474 mcp->tov = MBX_TOV_SECONDS * 2; 4475 4476 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4477 IS_QLA28XX(ha)) 4478 mcp->in_mb |= MBX_1; 4479 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4480 mcp->out_mb |= MBX_15; 4481 /* debug q create issue in SR-IOV */ 4482 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4483 } 4484 4485 spin_lock_irqsave(&ha->hardware_lock, flags); 4486 if (!(req->options & BIT_0)) { 4487 wrt_reg_dword(req->req_q_in, 0); 4488 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4489 wrt_reg_dword(req->req_q_out, 0); 4490 } 4491 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4492 4493 rval = qla2x00_mailbox_command(vha, mcp); 4494 if (rval != QLA_SUCCESS) { 4495 ql_dbg(ql_dbg_mbx, vha, 0x10d4, 4496 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4497 } else { 4498 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, 4499 "Done %s.\n", __func__); 4500 } 4501 4502 return rval; 4503 } 4504 4505 int 4506 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 4507 { 4508 int rval; 4509 unsigned long flags; 4510 mbx_cmd_t mc; 4511 mbx_cmd_t *mcp = &mc; 4512 struct qla_hw_data *ha = vha->hw; 4513 4514 if (!ha->flags.fw_started) 4515 return QLA_SUCCESS; 4516 4517 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 4518 "Entered %s.\n", __func__); 4519 4520 if (IS_SHADOW_REG_CAPABLE(ha)) 4521 rsp->options |= BIT_13; 4522 4523 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4524 mcp->mb[1] = rsp->options; 4525 mcp->mb[2] = MSW(LSD(rsp->dma)); 4526 mcp->mb[3] = LSW(LSD(rsp->dma)); 4527 mcp->mb[6] = MSW(MSD(rsp->dma)); 4528 mcp->mb[7] = LSW(MSD(rsp->dma)); 4529 mcp->mb[5] = rsp->length; 4530 mcp->mb[14] = rsp->msix->entry; 4531 mcp->mb[13] = rsp->rid; 4532 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4533 mcp->mb[15] = 0; 4534 4535 mcp->mb[4] = rsp->id; 4536 /* que in ptr index */ 4537 mcp->mb[8] = *rsp->in_ptr = 0; 4538 /* que out ptr index */ 4539 mcp->mb[9] = 0; 4540 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 4541 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4542 mcp->in_mb = MBX_0; 4543 mcp->flags = MBX_DMA_OUT; 4544 mcp->tov = MBX_TOV_SECONDS * 2; 4545 4546 if (IS_QLA81XX(ha)) { 4547 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 4548 mcp->in_mb |= MBX_1; 4549 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4550 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; 4551 mcp->in_mb |= MBX_1; 4552 /* debug q create issue in SR-IOV */ 4553 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4554 } 4555 4556 spin_lock_irqsave(&ha->hardware_lock, flags); 4557 if (!(rsp->options & BIT_0)) { 4558 wrt_reg_dword(rsp->rsp_q_out, 0); 4559 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4560 wrt_reg_dword(rsp->rsp_q_in, 0); 4561 } 4562 4563 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4564 4565 rval = qla2x00_mailbox_command(vha, mcp); 4566 if (rval != QLA_SUCCESS) { 4567 ql_dbg(ql_dbg_mbx, vha, 0x10d7, 4568 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4569 } else { 4570 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, 4571 "Done %s.\n", __func__); 4572 } 4573 4574 return rval; 4575 } 4576 4577 int 4578 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) 4579 { 4580 int rval; 4581 mbx_cmd_t mc; 4582 mbx_cmd_t *mcp = &mc; 4583 4584 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, 4585 "Entered %s.\n", __func__); 4586 4587 mcp->mb[0] = MBC_IDC_ACK; 4588 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4589 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4590 mcp->in_mb = MBX_0; 4591 mcp->tov = MBX_TOV_SECONDS; 4592 mcp->flags = 0; 4593 rval = qla2x00_mailbox_command(vha, mcp); 4594 4595 if (rval != QLA_SUCCESS) { 4596 ql_dbg(ql_dbg_mbx, vha, 0x10da, 4597 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4598 } else { 4599 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, 4600 "Done %s.\n", __func__); 4601 } 4602 4603 return rval; 4604 } 4605 4606 int 4607 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) 4608 { 4609 int rval; 4610 mbx_cmd_t mc; 4611 mbx_cmd_t *mcp = &mc; 4612 4613 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, 4614 "Entered %s.\n", __func__); 4615 4616 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4617 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4618 return QLA_FUNCTION_FAILED; 4619 4620 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4621 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 4622 mcp->out_mb = MBX_1|MBX_0; 4623 mcp->in_mb = MBX_1|MBX_0; 4624 mcp->tov = MBX_TOV_SECONDS; 4625 mcp->flags = 0; 4626 rval = qla2x00_mailbox_command(vha, mcp); 4627 4628 if (rval != QLA_SUCCESS) { 4629 ql_dbg(ql_dbg_mbx, vha, 0x10dd, 4630 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4631 rval, mcp->mb[0], mcp->mb[1]); 4632 } else { 4633 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, 4634 "Done %s.\n", __func__); 4635 *sector_size = mcp->mb[1]; 4636 } 4637 4638 return rval; 4639 } 4640 4641 int 4642 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) 4643 { 4644 int rval; 4645 mbx_cmd_t mc; 4646 mbx_cmd_t *mcp = &mc; 4647 4648 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4649 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4650 return QLA_FUNCTION_FAILED; 4651 4652 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, 4653 "Entered %s.\n", __func__); 4654 4655 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4656 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 4657 FAC_OPT_CMD_WRITE_PROTECT; 4658 mcp->out_mb = MBX_1|MBX_0; 4659 mcp->in_mb = MBX_1|MBX_0; 4660 mcp->tov = MBX_TOV_SECONDS; 4661 mcp->flags = 0; 4662 rval = qla2x00_mailbox_command(vha, mcp); 4663 4664 if (rval != QLA_SUCCESS) { 4665 ql_dbg(ql_dbg_mbx, vha, 0x10e0, 4666 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4667 rval, mcp->mb[0], mcp->mb[1]); 4668 } else { 4669 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, 4670 "Done %s.\n", __func__); 4671 } 4672 4673 return rval; 4674 } 4675 4676 int 4677 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) 4678 { 4679 int rval; 4680 mbx_cmd_t mc; 4681 mbx_cmd_t *mcp = &mc; 4682 4683 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4684 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4685 return QLA_FUNCTION_FAILED; 4686 4687 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4688 "Entered %s.\n", __func__); 4689 4690 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4691 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 4692 mcp->mb[2] = LSW(start); 4693 mcp->mb[3] = MSW(start); 4694 mcp->mb[4] = LSW(finish); 4695 mcp->mb[5] = MSW(finish); 4696 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4697 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4698 mcp->tov = MBX_TOV_SECONDS; 4699 mcp->flags = 0; 4700 rval = qla2x00_mailbox_command(vha, mcp); 4701 4702 if (rval != QLA_SUCCESS) { 4703 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4704 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4705 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4706 } else { 4707 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4708 "Done %s.\n", __func__); 4709 } 4710 4711 return rval; 4712 } 4713 4714 int 4715 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock) 4716 { 4717 int rval = QLA_SUCCESS; 4718 mbx_cmd_t mc; 4719 mbx_cmd_t *mcp = &mc; 4720 struct qla_hw_data *ha = vha->hw; 4721 4722 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 4723 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4724 return rval; 4725 4726 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4727 "Entered %s.\n", __func__); 4728 4729 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4730 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE : 4731 FAC_OPT_CMD_UNLOCK_SEMAPHORE); 4732 mcp->out_mb = MBX_1|MBX_0; 4733 mcp->in_mb = MBX_1|MBX_0; 4734 mcp->tov = MBX_TOV_SECONDS; 4735 mcp->flags = 0; 4736 rval = qla2x00_mailbox_command(vha, mcp); 4737 4738 if (rval != QLA_SUCCESS) { 4739 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4740 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4741 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4742 } else { 4743 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4744 "Done %s.\n", __func__); 4745 } 4746 4747 return rval; 4748 } 4749 4750 int 4751 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) 4752 { 4753 int rval = 0; 4754 mbx_cmd_t mc; 4755 mbx_cmd_t *mcp = &mc; 4756 4757 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, 4758 "Entered %s.\n", __func__); 4759 4760 mcp->mb[0] = MBC_RESTART_MPI_FW; 4761 mcp->out_mb = MBX_0; 4762 mcp->in_mb = MBX_0|MBX_1; 4763 mcp->tov = MBX_TOV_SECONDS; 4764 mcp->flags = 0; 4765 rval = qla2x00_mailbox_command(vha, mcp); 4766 4767 if (rval != QLA_SUCCESS) { 4768 ql_dbg(ql_dbg_mbx, vha, 0x10e6, 4769 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4770 rval, mcp->mb[0], mcp->mb[1]); 4771 } else { 4772 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, 4773 "Done %s.\n", __func__); 4774 } 4775 4776 return rval; 4777 } 4778 4779 int 4780 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4781 { 4782 int rval; 4783 mbx_cmd_t mc; 4784 mbx_cmd_t *mcp = &mc; 4785 int i; 4786 int len; 4787 __le16 *str; 4788 struct qla_hw_data *ha = vha->hw; 4789 4790 if (!IS_P3P_TYPE(ha)) 4791 return QLA_FUNCTION_FAILED; 4792 4793 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, 4794 "Entered %s.\n", __func__); 4795 4796 str = (__force __le16 *)version; 4797 len = strlen(version); 4798 4799 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4800 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; 4801 mcp->out_mb = MBX_1|MBX_0; 4802 for (i = 4; i < 16 && len; i++, str++, len -= 2) { 4803 mcp->mb[i] = le16_to_cpup(str); 4804 mcp->out_mb |= 1<<i; 4805 } 4806 for (; i < 16; i++) { 4807 mcp->mb[i] = 0; 4808 mcp->out_mb |= 1<<i; 4809 } 4810 mcp->in_mb = MBX_1|MBX_0; 4811 mcp->tov = MBX_TOV_SECONDS; 4812 mcp->flags = 0; 4813 rval = qla2x00_mailbox_command(vha, mcp); 4814 4815 if (rval != QLA_SUCCESS) { 4816 ql_dbg(ql_dbg_mbx, vha, 0x117c, 4817 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4818 } else { 4819 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, 4820 "Done %s.\n", __func__); 4821 } 4822 4823 return rval; 4824 } 4825 4826 int 4827 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4828 { 4829 int rval; 4830 mbx_cmd_t mc; 4831 mbx_cmd_t *mcp = &mc; 4832 int len; 4833 uint16_t dwlen; 4834 uint8_t *str; 4835 dma_addr_t str_dma; 4836 struct qla_hw_data *ha = vha->hw; 4837 4838 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || 4839 IS_P3P_TYPE(ha)) 4840 return QLA_FUNCTION_FAILED; 4841 4842 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, 4843 "Entered %s.\n", __func__); 4844 4845 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); 4846 if (!str) { 4847 ql_log(ql_log_warn, vha, 0x117f, 4848 "Failed to allocate driver version param.\n"); 4849 return QLA_MEMORY_ALLOC_FAILED; 4850 } 4851 4852 memcpy(str, "\x7\x3\x11\x0", 4); 4853 dwlen = str[0]; 4854 len = dwlen * 4 - 4; 4855 memset(str + 4, 0, len); 4856 if (len > strlen(version)) 4857 len = strlen(version); 4858 memcpy(str + 4, version, len); 4859 4860 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4861 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; 4862 mcp->mb[2] = MSW(LSD(str_dma)); 4863 mcp->mb[3] = LSW(LSD(str_dma)); 4864 mcp->mb[6] = MSW(MSD(str_dma)); 4865 mcp->mb[7] = LSW(MSD(str_dma)); 4866 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4867 mcp->in_mb = MBX_1|MBX_0; 4868 mcp->tov = MBX_TOV_SECONDS; 4869 mcp->flags = 0; 4870 rval = qla2x00_mailbox_command(vha, mcp); 4871 4872 if (rval != QLA_SUCCESS) { 4873 ql_dbg(ql_dbg_mbx, vha, 0x1180, 4874 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4875 } else { 4876 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, 4877 "Done %s.\n", __func__); 4878 } 4879 4880 dma_pool_free(ha->s_dma_pool, str, str_dma); 4881 4882 return rval; 4883 } 4884 4885 int 4886 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, 4887 void *buf, uint16_t bufsiz) 4888 { 4889 int rval, i; 4890 mbx_cmd_t mc; 4891 mbx_cmd_t *mcp = &mc; 4892 uint32_t *bp; 4893 4894 if (!IS_FWI2_CAPABLE(vha->hw)) 4895 return QLA_FUNCTION_FAILED; 4896 4897 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4898 "Entered %s.\n", __func__); 4899 4900 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4901 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8; 4902 mcp->mb[2] = MSW(buf_dma); 4903 mcp->mb[3] = LSW(buf_dma); 4904 mcp->mb[6] = MSW(MSD(buf_dma)); 4905 mcp->mb[7] = LSW(MSD(buf_dma)); 4906 mcp->mb[8] = bufsiz/4; 4907 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4908 mcp->in_mb = MBX_1|MBX_0; 4909 mcp->tov = MBX_TOV_SECONDS; 4910 mcp->flags = 0; 4911 rval = qla2x00_mailbox_command(vha, mcp); 4912 4913 if (rval != QLA_SUCCESS) { 4914 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4915 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4916 } else { 4917 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4918 "Done %s.\n", __func__); 4919 bp = (uint32_t *) buf; 4920 for (i = 0; i < (bufsiz-4)/4; i++, bp++) 4921 *bp = le32_to_cpu((__force __le32)*bp); 4922 } 4923 4924 return rval; 4925 } 4926 4927 #define PUREX_CMD_COUNT 2 4928 int 4929 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) 4930 { 4931 int rval; 4932 mbx_cmd_t mc; 4933 mbx_cmd_t *mcp = &mc; 4934 uint8_t *els_cmd_map; 4935 dma_addr_t els_cmd_map_dma; 4936 uint8_t cmd_opcode[PUREX_CMD_COUNT]; 4937 uint8_t i, index, purex_bit; 4938 struct qla_hw_data *ha = vha->hw; 4939 4940 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && 4941 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4942 return QLA_SUCCESS; 4943 4944 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197, 4945 "Entered %s.\n", __func__); 4946 4947 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, 4948 &els_cmd_map_dma, GFP_KERNEL); 4949 if (!els_cmd_map) { 4950 ql_log(ql_log_warn, vha, 0x7101, 4951 "Failed to allocate RDP els command param.\n"); 4952 return QLA_MEMORY_ALLOC_FAILED; 4953 } 4954 4955 /* List of Purex ELS */ 4956 cmd_opcode[0] = ELS_FPIN; 4957 cmd_opcode[1] = ELS_RDP; 4958 4959 for (i = 0; i < PUREX_CMD_COUNT; i++) { 4960 index = cmd_opcode[i] / 8; 4961 purex_bit = cmd_opcode[i] % 8; 4962 els_cmd_map[index] |= 1 << purex_bit; 4963 } 4964 4965 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4966 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8; 4967 mcp->mb[2] = MSW(LSD(els_cmd_map_dma)); 4968 mcp->mb[3] = LSW(LSD(els_cmd_map_dma)); 4969 mcp->mb[6] = MSW(MSD(els_cmd_map_dma)); 4970 mcp->mb[7] = LSW(MSD(els_cmd_map_dma)); 4971 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4972 mcp->in_mb = MBX_1|MBX_0; 4973 mcp->tov = MBX_TOV_SECONDS; 4974 mcp->flags = MBX_DMA_OUT; 4975 mcp->buf_size = ELS_CMD_MAP_SIZE; 4976 rval = qla2x00_mailbox_command(vha, mcp); 4977 4978 if (rval != QLA_SUCCESS) { 4979 ql_dbg(ql_dbg_mbx, vha, 0x118d, 4980 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]); 4981 } else { 4982 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 4983 "Done %s.\n", __func__); 4984 } 4985 4986 dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, 4987 els_cmd_map, els_cmd_map_dma); 4988 4989 return rval; 4990 } 4991 4992 static int 4993 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 4994 { 4995 int rval; 4996 mbx_cmd_t mc; 4997 mbx_cmd_t *mcp = &mc; 4998 4999 if (!IS_FWI2_CAPABLE(vha->hw)) 5000 return QLA_FUNCTION_FAILED; 5001 5002 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 5003 "Entered %s.\n", __func__); 5004 5005 mcp->mb[0] = MBC_GET_RNID_PARAMS; 5006 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; 5007 mcp->out_mb = MBX_1|MBX_0; 5008 mcp->in_mb = MBX_1|MBX_0; 5009 mcp->tov = MBX_TOV_SECONDS; 5010 mcp->flags = 0; 5011 rval = qla2x00_mailbox_command(vha, mcp); 5012 *temp = mcp->mb[1]; 5013 5014 if (rval != QLA_SUCCESS) { 5015 ql_dbg(ql_dbg_mbx, vha, 0x115a, 5016 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 5017 } else { 5018 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 5019 "Done %s.\n", __func__); 5020 } 5021 5022 return rval; 5023 } 5024 5025 int 5026 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 5027 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 5028 { 5029 int rval; 5030 mbx_cmd_t mc; 5031 mbx_cmd_t *mcp = &mc; 5032 struct qla_hw_data *ha = vha->hw; 5033 5034 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 5035 "Entered %s.\n", __func__); 5036 5037 if (!IS_FWI2_CAPABLE(ha)) 5038 return QLA_FUNCTION_FAILED; 5039 5040 if (len == 1) 5041 opt |= BIT_0; 5042 5043 mcp->mb[0] = MBC_READ_SFP; 5044 mcp->mb[1] = dev; 5045 mcp->mb[2] = MSW(LSD(sfp_dma)); 5046 mcp->mb[3] = LSW(LSD(sfp_dma)); 5047 mcp->mb[6] = MSW(MSD(sfp_dma)); 5048 mcp->mb[7] = LSW(MSD(sfp_dma)); 5049 mcp->mb[8] = len; 5050 mcp->mb[9] = off; 5051 mcp->mb[10] = opt; 5052 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5053 mcp->in_mb = MBX_1|MBX_0; 5054 mcp->tov = MBX_TOV_SECONDS; 5055 mcp->flags = 0; 5056 rval = qla2x00_mailbox_command(vha, mcp); 5057 5058 if (opt & BIT_0) 5059 *sfp = mcp->mb[1]; 5060 5061 if (rval != QLA_SUCCESS) { 5062 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 5063 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5064 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) { 5065 /* sfp is not there */ 5066 rval = QLA_INTERFACE_ERROR; 5067 } 5068 } else { 5069 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 5070 "Done %s.\n", __func__); 5071 } 5072 5073 return rval; 5074 } 5075 5076 int 5077 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 5078 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 5079 { 5080 int rval; 5081 mbx_cmd_t mc; 5082 mbx_cmd_t *mcp = &mc; 5083 struct qla_hw_data *ha = vha->hw; 5084 5085 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, 5086 "Entered %s.\n", __func__); 5087 5088 if (!IS_FWI2_CAPABLE(ha)) 5089 return QLA_FUNCTION_FAILED; 5090 5091 if (len == 1) 5092 opt |= BIT_0; 5093 5094 if (opt & BIT_0) 5095 len = *sfp; 5096 5097 mcp->mb[0] = MBC_WRITE_SFP; 5098 mcp->mb[1] = dev; 5099 mcp->mb[2] = MSW(LSD(sfp_dma)); 5100 mcp->mb[3] = LSW(LSD(sfp_dma)); 5101 mcp->mb[6] = MSW(MSD(sfp_dma)); 5102 mcp->mb[7] = LSW(MSD(sfp_dma)); 5103 mcp->mb[8] = len; 5104 mcp->mb[9] = off; 5105 mcp->mb[10] = opt; 5106 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5107 mcp->in_mb = MBX_1|MBX_0; 5108 mcp->tov = MBX_TOV_SECONDS; 5109 mcp->flags = 0; 5110 rval = qla2x00_mailbox_command(vha, mcp); 5111 5112 if (rval != QLA_SUCCESS) { 5113 ql_dbg(ql_dbg_mbx, vha, 0x10ec, 5114 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5115 } else { 5116 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, 5117 "Done %s.\n", __func__); 5118 } 5119 5120 return rval; 5121 } 5122 5123 int 5124 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, 5125 uint16_t size_in_bytes, uint16_t *actual_size) 5126 { 5127 int rval; 5128 mbx_cmd_t mc; 5129 mbx_cmd_t *mcp = &mc; 5130 5131 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, 5132 "Entered %s.\n", __func__); 5133 5134 if (!IS_CNA_CAPABLE(vha->hw)) 5135 return QLA_FUNCTION_FAILED; 5136 5137 mcp->mb[0] = MBC_GET_XGMAC_STATS; 5138 mcp->mb[2] = MSW(stats_dma); 5139 mcp->mb[3] = LSW(stats_dma); 5140 mcp->mb[6] = MSW(MSD(stats_dma)); 5141 mcp->mb[7] = LSW(MSD(stats_dma)); 5142 mcp->mb[8] = size_in_bytes >> 2; 5143 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 5144 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5145 mcp->tov = MBX_TOV_SECONDS; 5146 mcp->flags = 0; 5147 rval = qla2x00_mailbox_command(vha, mcp); 5148 5149 if (rval != QLA_SUCCESS) { 5150 ql_dbg(ql_dbg_mbx, vha, 0x10ef, 5151 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5152 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5153 } else { 5154 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, 5155 "Done %s.\n", __func__); 5156 5157 5158 *actual_size = mcp->mb[2] << 2; 5159 } 5160 5161 return rval; 5162 } 5163 5164 int 5165 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, 5166 uint16_t size) 5167 { 5168 int rval; 5169 mbx_cmd_t mc; 5170 mbx_cmd_t *mcp = &mc; 5171 5172 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, 5173 "Entered %s.\n", __func__); 5174 5175 if (!IS_CNA_CAPABLE(vha->hw)) 5176 return QLA_FUNCTION_FAILED; 5177 5178 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 5179 mcp->mb[1] = 0; 5180 mcp->mb[2] = MSW(tlv_dma); 5181 mcp->mb[3] = LSW(tlv_dma); 5182 mcp->mb[6] = MSW(MSD(tlv_dma)); 5183 mcp->mb[7] = LSW(MSD(tlv_dma)); 5184 mcp->mb[8] = size; 5185 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5186 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5187 mcp->tov = MBX_TOV_SECONDS; 5188 mcp->flags = 0; 5189 rval = qla2x00_mailbox_command(vha, mcp); 5190 5191 if (rval != QLA_SUCCESS) { 5192 ql_dbg(ql_dbg_mbx, vha, 0x10f2, 5193 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5194 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5195 } else { 5196 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, 5197 "Done %s.\n", __func__); 5198 } 5199 5200 return rval; 5201 } 5202 5203 int 5204 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) 5205 { 5206 int rval; 5207 mbx_cmd_t mc; 5208 mbx_cmd_t *mcp = &mc; 5209 5210 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, 5211 "Entered %s.\n", __func__); 5212 5213 if (!IS_FWI2_CAPABLE(vha->hw)) 5214 return QLA_FUNCTION_FAILED; 5215 5216 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 5217 mcp->mb[1] = LSW(risc_addr); 5218 mcp->mb[8] = MSW(risc_addr); 5219 mcp->out_mb = MBX_8|MBX_1|MBX_0; 5220 mcp->in_mb = MBX_3|MBX_2|MBX_0; 5221 mcp->tov = MBX_TOV_SECONDS; 5222 mcp->flags = 0; 5223 rval = qla2x00_mailbox_command(vha, mcp); 5224 if (rval != QLA_SUCCESS) { 5225 ql_dbg(ql_dbg_mbx, vha, 0x10f5, 5226 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5227 } else { 5228 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, 5229 "Done %s.\n", __func__); 5230 *data = mcp->mb[3] << 16 | mcp->mb[2]; 5231 } 5232 5233 return rval; 5234 } 5235 5236 int 5237 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5238 uint16_t *mresp) 5239 { 5240 int rval; 5241 mbx_cmd_t mc; 5242 mbx_cmd_t *mcp = &mc; 5243 5244 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, 5245 "Entered %s.\n", __func__); 5246 5247 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5248 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 5249 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing 5250 5251 /* transfer count */ 5252 mcp->mb[10] = LSW(mreq->transfer_size); 5253 mcp->mb[11] = MSW(mreq->transfer_size); 5254 5255 /* send data address */ 5256 mcp->mb[14] = LSW(mreq->send_dma); 5257 mcp->mb[15] = MSW(mreq->send_dma); 5258 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5259 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5260 5261 /* receive data address */ 5262 mcp->mb[16] = LSW(mreq->rcv_dma); 5263 mcp->mb[17] = MSW(mreq->rcv_dma); 5264 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5265 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5266 5267 /* Iteration count */ 5268 mcp->mb[18] = LSW(mreq->iteration_count); 5269 mcp->mb[19] = MSW(mreq->iteration_count); 5270 5271 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| 5272 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5273 if (IS_CNA_CAPABLE(vha->hw)) 5274 mcp->out_mb |= MBX_2; 5275 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; 5276 5277 mcp->buf_size = mreq->transfer_size; 5278 mcp->tov = MBX_TOV_SECONDS; 5279 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5280 5281 rval = qla2x00_mailbox_command(vha, mcp); 5282 5283 if (rval != QLA_SUCCESS) { 5284 ql_dbg(ql_dbg_mbx, vha, 0x10f8, 5285 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " 5286 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], 5287 mcp->mb[3], mcp->mb[18], mcp->mb[19]); 5288 } else { 5289 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, 5290 "Done %s.\n", __func__); 5291 } 5292 5293 /* Copy mailbox information */ 5294 memcpy( mresp, mcp->mb, 64); 5295 return rval; 5296 } 5297 5298 int 5299 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5300 uint16_t *mresp) 5301 { 5302 int rval; 5303 mbx_cmd_t mc; 5304 mbx_cmd_t *mcp = &mc; 5305 struct qla_hw_data *ha = vha->hw; 5306 5307 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, 5308 "Entered %s.\n", __func__); 5309 5310 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5311 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 5312 /* BIT_6 specifies 64bit address */ 5313 mcp->mb[1] = mreq->options | BIT_15 | BIT_6; 5314 if (IS_CNA_CAPABLE(ha)) { 5315 mcp->mb[2] = vha->fcoe_fcf_idx; 5316 } 5317 mcp->mb[16] = LSW(mreq->rcv_dma); 5318 mcp->mb[17] = MSW(mreq->rcv_dma); 5319 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5320 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5321 5322 mcp->mb[10] = LSW(mreq->transfer_size); 5323 5324 mcp->mb[14] = LSW(mreq->send_dma); 5325 mcp->mb[15] = MSW(mreq->send_dma); 5326 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5327 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5328 5329 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| 5330 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5331 if (IS_CNA_CAPABLE(ha)) 5332 mcp->out_mb |= MBX_2; 5333 5334 mcp->in_mb = MBX_0; 5335 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 5336 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5337 mcp->in_mb |= MBX_1; 5338 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 5339 IS_QLA28XX(ha)) 5340 mcp->in_mb |= MBX_3; 5341 5342 mcp->tov = MBX_TOV_SECONDS; 5343 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5344 mcp->buf_size = mreq->transfer_size; 5345 5346 rval = qla2x00_mailbox_command(vha, mcp); 5347 5348 if (rval != QLA_SUCCESS) { 5349 ql_dbg(ql_dbg_mbx, vha, 0x10fb, 5350 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5351 rval, mcp->mb[0], mcp->mb[1]); 5352 } else { 5353 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, 5354 "Done %s.\n", __func__); 5355 } 5356 5357 /* Copy mailbox information */ 5358 memcpy(mresp, mcp->mb, 64); 5359 return rval; 5360 } 5361 5362 int 5363 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) 5364 { 5365 int rval; 5366 mbx_cmd_t mc; 5367 mbx_cmd_t *mcp = &mc; 5368 5369 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, 5370 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); 5371 5372 mcp->mb[0] = MBC_ISP84XX_RESET; 5373 mcp->mb[1] = enable_diagnostic; 5374 mcp->out_mb = MBX_1|MBX_0; 5375 mcp->in_mb = MBX_1|MBX_0; 5376 mcp->tov = MBX_TOV_SECONDS; 5377 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5378 rval = qla2x00_mailbox_command(vha, mcp); 5379 5380 if (rval != QLA_SUCCESS) 5381 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); 5382 else 5383 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, 5384 "Done %s.\n", __func__); 5385 5386 return rval; 5387 } 5388 5389 int 5390 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) 5391 { 5392 int rval; 5393 mbx_cmd_t mc; 5394 mbx_cmd_t *mcp = &mc; 5395 5396 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, 5397 "Entered %s.\n", __func__); 5398 5399 if (!IS_FWI2_CAPABLE(vha->hw)) 5400 return QLA_FUNCTION_FAILED; 5401 5402 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 5403 mcp->mb[1] = LSW(risc_addr); 5404 mcp->mb[2] = LSW(data); 5405 mcp->mb[3] = MSW(data); 5406 mcp->mb[8] = MSW(risc_addr); 5407 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; 5408 mcp->in_mb = MBX_1|MBX_0; 5409 mcp->tov = MBX_TOV_SECONDS; 5410 mcp->flags = 0; 5411 rval = qla2x00_mailbox_command(vha, mcp); 5412 if (rval != QLA_SUCCESS) { 5413 ql_dbg(ql_dbg_mbx, vha, 0x1101, 5414 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5415 rval, mcp->mb[0], mcp->mb[1]); 5416 } else { 5417 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, 5418 "Done %s.\n", __func__); 5419 } 5420 5421 return rval; 5422 } 5423 5424 int 5425 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) 5426 { 5427 int rval; 5428 uint32_t stat, timer; 5429 uint16_t mb0 = 0; 5430 struct qla_hw_data *ha = vha->hw; 5431 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 5432 5433 rval = QLA_SUCCESS; 5434 5435 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, 5436 "Entered %s.\n", __func__); 5437 5438 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 5439 5440 /* Write the MBC data to the registers */ 5441 wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER); 5442 wrt_reg_word(®->mailbox1, mb[0]); 5443 wrt_reg_word(®->mailbox2, mb[1]); 5444 wrt_reg_word(®->mailbox3, mb[2]); 5445 wrt_reg_word(®->mailbox4, mb[3]); 5446 5447 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); 5448 5449 /* Poll for MBC interrupt */ 5450 for (timer = 6000000; timer; timer--) { 5451 /* Check for pending interrupts. */ 5452 stat = rd_reg_dword(®->host_status); 5453 if (stat & HSRX_RISC_INT) { 5454 stat &= 0xff; 5455 5456 if (stat == 0x1 || stat == 0x2 || 5457 stat == 0x10 || stat == 0x11) { 5458 set_bit(MBX_INTERRUPT, 5459 &ha->mbx_cmd_flags); 5460 mb0 = rd_reg_word(®->mailbox0); 5461 wrt_reg_dword(®->hccr, 5462 HCCRX_CLR_RISC_INT); 5463 rd_reg_dword(®->hccr); 5464 break; 5465 } 5466 } 5467 udelay(5); 5468 } 5469 5470 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) 5471 rval = mb0 & MBS_MASK; 5472 else 5473 rval = QLA_FUNCTION_FAILED; 5474 5475 if (rval != QLA_SUCCESS) { 5476 ql_dbg(ql_dbg_mbx, vha, 0x1104, 5477 "Failed=%x mb[0]=%x.\n", rval, mb[0]); 5478 } else { 5479 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, 5480 "Done %s.\n", __func__); 5481 } 5482 5483 return rval; 5484 } 5485 5486 /* Set the specified data rate */ 5487 int 5488 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode) 5489 { 5490 int rval; 5491 mbx_cmd_t mc; 5492 mbx_cmd_t *mcp = &mc; 5493 struct qla_hw_data *ha = vha->hw; 5494 uint16_t val; 5495 5496 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5497 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate, 5498 mode); 5499 5500 if (!IS_FWI2_CAPABLE(ha)) 5501 return QLA_FUNCTION_FAILED; 5502 5503 memset(mcp, 0, sizeof(*mcp)); 5504 switch (ha->set_data_rate) { 5505 case PORT_SPEED_AUTO: 5506 case PORT_SPEED_4GB: 5507 case PORT_SPEED_8GB: 5508 case PORT_SPEED_16GB: 5509 case PORT_SPEED_32GB: 5510 val = ha->set_data_rate; 5511 break; 5512 default: 5513 ql_log(ql_log_warn, vha, 0x1199, 5514 "Unrecognized speed setting:%d. Setting Autoneg\n", 5515 ha->set_data_rate); 5516 val = ha->set_data_rate = PORT_SPEED_AUTO; 5517 break; 5518 } 5519 5520 mcp->mb[0] = MBC_DATA_RATE; 5521 mcp->mb[1] = mode; 5522 mcp->mb[2] = val; 5523 5524 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5525 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5526 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5527 mcp->in_mb |= MBX_4|MBX_3; 5528 mcp->tov = MBX_TOV_SECONDS; 5529 mcp->flags = 0; 5530 rval = qla2x00_mailbox_command(vha, mcp); 5531 if (rval != QLA_SUCCESS) { 5532 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5533 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5534 } else { 5535 if (mcp->mb[1] != 0x7) 5536 ql_dbg(ql_dbg_mbx, vha, 0x1179, 5537 "Speed set:0x%x\n", mcp->mb[1]); 5538 5539 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5540 "Done %s.\n", __func__); 5541 } 5542 5543 return rval; 5544 } 5545 5546 int 5547 qla2x00_get_data_rate(scsi_qla_host_t *vha) 5548 { 5549 int rval; 5550 mbx_cmd_t mc; 5551 mbx_cmd_t *mcp = &mc; 5552 struct qla_hw_data *ha = vha->hw; 5553 5554 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5555 "Entered %s.\n", __func__); 5556 5557 if (!IS_FWI2_CAPABLE(ha)) 5558 return QLA_FUNCTION_FAILED; 5559 5560 mcp->mb[0] = MBC_DATA_RATE; 5561 mcp->mb[1] = QLA_GET_DATA_RATE; 5562 mcp->out_mb = MBX_1|MBX_0; 5563 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5564 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5565 mcp->in_mb |= MBX_3; 5566 mcp->tov = MBX_TOV_SECONDS; 5567 mcp->flags = 0; 5568 rval = qla2x00_mailbox_command(vha, mcp); 5569 if (rval != QLA_SUCCESS) { 5570 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5571 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5572 } else { 5573 if (mcp->mb[1] != 0x7) 5574 ha->link_data_rate = mcp->mb[1]; 5575 5576 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 5577 if (mcp->mb[4] & BIT_0) 5578 ql_log(ql_log_info, vha, 0x11a2, 5579 "FEC=enabled (data rate).\n"); 5580 } 5581 5582 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5583 "Done %s.\n", __func__); 5584 if (mcp->mb[1] != 0x7) 5585 ha->link_data_rate = mcp->mb[1]; 5586 } 5587 5588 return rval; 5589 } 5590 5591 int 5592 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5593 { 5594 int rval; 5595 mbx_cmd_t mc; 5596 mbx_cmd_t *mcp = &mc; 5597 struct qla_hw_data *ha = vha->hw; 5598 5599 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, 5600 "Entered %s.\n", __func__); 5601 5602 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && 5603 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 5604 return QLA_FUNCTION_FAILED; 5605 mcp->mb[0] = MBC_GET_PORT_CONFIG; 5606 mcp->out_mb = MBX_0; 5607 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5608 mcp->tov = MBX_TOV_SECONDS; 5609 mcp->flags = 0; 5610 5611 rval = qla2x00_mailbox_command(vha, mcp); 5612 5613 if (rval != QLA_SUCCESS) { 5614 ql_dbg(ql_dbg_mbx, vha, 0x110a, 5615 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5616 } else { 5617 /* Copy all bits to preserve original value */ 5618 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 5619 5620 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, 5621 "Done %s.\n", __func__); 5622 } 5623 return rval; 5624 } 5625 5626 int 5627 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5628 { 5629 int rval; 5630 mbx_cmd_t mc; 5631 mbx_cmd_t *mcp = &mc; 5632 5633 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, 5634 "Entered %s.\n", __func__); 5635 5636 mcp->mb[0] = MBC_SET_PORT_CONFIG; 5637 /* Copy all bits to preserve original setting */ 5638 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); 5639 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5640 mcp->in_mb = MBX_0; 5641 mcp->tov = MBX_TOV_SECONDS; 5642 mcp->flags = 0; 5643 rval = qla2x00_mailbox_command(vha, mcp); 5644 5645 if (rval != QLA_SUCCESS) { 5646 ql_dbg(ql_dbg_mbx, vha, 0x110d, 5647 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5648 } else 5649 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, 5650 "Done %s.\n", __func__); 5651 5652 return rval; 5653 } 5654 5655 5656 int 5657 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, 5658 uint16_t *mb) 5659 { 5660 int rval; 5661 mbx_cmd_t mc; 5662 mbx_cmd_t *mcp = &mc; 5663 struct qla_hw_data *ha = vha->hw; 5664 5665 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, 5666 "Entered %s.\n", __func__); 5667 5668 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 5669 return QLA_FUNCTION_FAILED; 5670 5671 mcp->mb[0] = MBC_PORT_PARAMS; 5672 mcp->mb[1] = loop_id; 5673 if (ha->flags.fcp_prio_enabled) 5674 mcp->mb[2] = BIT_1; 5675 else 5676 mcp->mb[2] = BIT_2; 5677 mcp->mb[4] = priority & 0xf; 5678 mcp->mb[9] = vha->vp_idx; 5679 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5680 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 5681 mcp->tov = MBX_TOV_SECONDS; 5682 mcp->flags = 0; 5683 rval = qla2x00_mailbox_command(vha, mcp); 5684 if (mb != NULL) { 5685 mb[0] = mcp->mb[0]; 5686 mb[1] = mcp->mb[1]; 5687 mb[3] = mcp->mb[3]; 5688 mb[4] = mcp->mb[4]; 5689 } 5690 5691 if (rval != QLA_SUCCESS) { 5692 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); 5693 } else { 5694 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, 5695 "Done %s.\n", __func__); 5696 } 5697 5698 return rval; 5699 } 5700 5701 int 5702 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) 5703 { 5704 int rval = QLA_FUNCTION_FAILED; 5705 struct qla_hw_data *ha = vha->hw; 5706 uint8_t byte; 5707 5708 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { 5709 ql_dbg(ql_dbg_mbx, vha, 0x1150, 5710 "Thermal not supported by this card.\n"); 5711 return rval; 5712 } 5713 5714 if (IS_QLA25XX(ha)) { 5715 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 5716 ha->pdev->subsystem_device == 0x0175) { 5717 rval = qla2x00_read_sfp(vha, 0, &byte, 5718 0x98, 0x1, 1, BIT_13|BIT_0); 5719 *temp = byte; 5720 return rval; 5721 } 5722 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 5723 ha->pdev->subsystem_device == 0x338e) { 5724 rval = qla2x00_read_sfp(vha, 0, &byte, 5725 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); 5726 *temp = byte; 5727 return rval; 5728 } 5729 ql_dbg(ql_dbg_mbx, vha, 0x10c9, 5730 "Thermal not supported by this card.\n"); 5731 return rval; 5732 } 5733 5734 if (IS_QLA82XX(ha)) { 5735 *temp = qla82xx_read_temperature(vha); 5736 rval = QLA_SUCCESS; 5737 return rval; 5738 } else if (IS_QLA8044(ha)) { 5739 *temp = qla8044_read_temperature(vha); 5740 rval = QLA_SUCCESS; 5741 return rval; 5742 } 5743 5744 rval = qla2x00_read_asic_temperature(vha, temp); 5745 return rval; 5746 } 5747 5748 int 5749 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) 5750 { 5751 int rval; 5752 struct qla_hw_data *ha = vha->hw; 5753 mbx_cmd_t mc; 5754 mbx_cmd_t *mcp = &mc; 5755 5756 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, 5757 "Entered %s.\n", __func__); 5758 5759 if (!IS_FWI2_CAPABLE(ha)) 5760 return QLA_FUNCTION_FAILED; 5761 5762 memset(mcp, 0, sizeof(mbx_cmd_t)); 5763 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5764 mcp->mb[1] = 1; 5765 5766 mcp->out_mb = MBX_1|MBX_0; 5767 mcp->in_mb = MBX_0; 5768 mcp->tov = MBX_TOV_SECONDS; 5769 mcp->flags = 0; 5770 5771 rval = qla2x00_mailbox_command(vha, mcp); 5772 if (rval != QLA_SUCCESS) { 5773 ql_dbg(ql_dbg_mbx, vha, 0x1016, 5774 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5775 } else { 5776 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, 5777 "Done %s.\n", __func__); 5778 } 5779 5780 return rval; 5781 } 5782 5783 int 5784 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) 5785 { 5786 int rval; 5787 struct qla_hw_data *ha = vha->hw; 5788 mbx_cmd_t mc; 5789 mbx_cmd_t *mcp = &mc; 5790 5791 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, 5792 "Entered %s.\n", __func__); 5793 5794 if (!IS_P3P_TYPE(ha)) 5795 return QLA_FUNCTION_FAILED; 5796 5797 memset(mcp, 0, sizeof(mbx_cmd_t)); 5798 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5799 mcp->mb[1] = 0; 5800 5801 mcp->out_mb = MBX_1|MBX_0; 5802 mcp->in_mb = MBX_0; 5803 mcp->tov = MBX_TOV_SECONDS; 5804 mcp->flags = 0; 5805 5806 rval = qla2x00_mailbox_command(vha, mcp); 5807 if (rval != QLA_SUCCESS) { 5808 ql_dbg(ql_dbg_mbx, vha, 0x100c, 5809 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5810 } else { 5811 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, 5812 "Done %s.\n", __func__); 5813 } 5814 5815 return rval; 5816 } 5817 5818 int 5819 qla82xx_md_get_template_size(scsi_qla_host_t *vha) 5820 { 5821 struct qla_hw_data *ha = vha->hw; 5822 mbx_cmd_t mc; 5823 mbx_cmd_t *mcp = &mc; 5824 int rval = QLA_FUNCTION_FAILED; 5825 5826 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, 5827 "Entered %s.\n", __func__); 5828 5829 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5830 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5831 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5832 mcp->mb[2] = LSW(RQST_TMPLT_SIZE); 5833 mcp->mb[3] = MSW(RQST_TMPLT_SIZE); 5834 5835 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5836 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 5837 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5838 5839 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5840 mcp->tov = MBX_TOV_SECONDS; 5841 rval = qla2x00_mailbox_command(vha, mcp); 5842 5843 /* Always copy back return mailbox values. */ 5844 if (rval != QLA_SUCCESS) { 5845 ql_dbg(ql_dbg_mbx, vha, 0x1120, 5846 "mailbox command FAILED=0x%x, subcode=%x.\n", 5847 (mcp->mb[1] << 16) | mcp->mb[0], 5848 (mcp->mb[3] << 16) | mcp->mb[2]); 5849 } else { 5850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, 5851 "Done %s.\n", __func__); 5852 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); 5853 if (!ha->md_template_size) { 5854 ql_dbg(ql_dbg_mbx, vha, 0x1122, 5855 "Null template size obtained.\n"); 5856 rval = QLA_FUNCTION_FAILED; 5857 } 5858 } 5859 return rval; 5860 } 5861 5862 int 5863 qla82xx_md_get_template(scsi_qla_host_t *vha) 5864 { 5865 struct qla_hw_data *ha = vha->hw; 5866 mbx_cmd_t mc; 5867 mbx_cmd_t *mcp = &mc; 5868 int rval = QLA_FUNCTION_FAILED; 5869 5870 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, 5871 "Entered %s.\n", __func__); 5872 5873 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5874 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5875 if (!ha->md_tmplt_hdr) { 5876 ql_log(ql_log_warn, vha, 0x1124, 5877 "Unable to allocate memory for Minidump template.\n"); 5878 return rval; 5879 } 5880 5881 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5882 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5883 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5884 mcp->mb[2] = LSW(RQST_TMPLT); 5885 mcp->mb[3] = MSW(RQST_TMPLT); 5886 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); 5887 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); 5888 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); 5889 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); 5890 mcp->mb[8] = LSW(ha->md_template_size); 5891 mcp->mb[9] = MSW(ha->md_template_size); 5892 5893 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5894 mcp->tov = MBX_TOV_SECONDS; 5895 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5896 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5897 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5898 rval = qla2x00_mailbox_command(vha, mcp); 5899 5900 if (rval != QLA_SUCCESS) { 5901 ql_dbg(ql_dbg_mbx, vha, 0x1125, 5902 "mailbox command FAILED=0x%x, subcode=%x.\n", 5903 ((mcp->mb[1] << 16) | mcp->mb[0]), 5904 ((mcp->mb[3] << 16) | mcp->mb[2])); 5905 } else 5906 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, 5907 "Done %s.\n", __func__); 5908 return rval; 5909 } 5910 5911 int 5912 qla8044_md_get_template(scsi_qla_host_t *vha) 5913 { 5914 struct qla_hw_data *ha = vha->hw; 5915 mbx_cmd_t mc; 5916 mbx_cmd_t *mcp = &mc; 5917 int rval = QLA_FUNCTION_FAILED; 5918 int offset = 0, size = MINIDUMP_SIZE_36K; 5919 5920 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, 5921 "Entered %s.\n", __func__); 5922 5923 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5924 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5925 if (!ha->md_tmplt_hdr) { 5926 ql_log(ql_log_warn, vha, 0xb11b, 5927 "Unable to allocate memory for Minidump template.\n"); 5928 return rval; 5929 } 5930 5931 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5932 while (offset < ha->md_template_size) { 5933 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5934 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5935 mcp->mb[2] = LSW(RQST_TMPLT); 5936 mcp->mb[3] = MSW(RQST_TMPLT); 5937 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5938 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5939 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5940 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5941 mcp->mb[8] = LSW(size); 5942 mcp->mb[9] = MSW(size); 5943 mcp->mb[10] = offset & 0x0000FFFF; 5944 mcp->mb[11] = offset & 0xFFFF0000; 5945 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5946 mcp->tov = MBX_TOV_SECONDS; 5947 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5948 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5949 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5950 rval = qla2x00_mailbox_command(vha, mcp); 5951 5952 if (rval != QLA_SUCCESS) { 5953 ql_dbg(ql_dbg_mbx, vha, 0xb11c, 5954 "mailbox command FAILED=0x%x, subcode=%x.\n", 5955 ((mcp->mb[1] << 16) | mcp->mb[0]), 5956 ((mcp->mb[3] << 16) | mcp->mb[2])); 5957 return rval; 5958 } else 5959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, 5960 "Done %s.\n", __func__); 5961 offset = offset + size; 5962 } 5963 return rval; 5964 } 5965 5966 int 5967 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5968 { 5969 int rval; 5970 struct qla_hw_data *ha = vha->hw; 5971 mbx_cmd_t mc; 5972 mbx_cmd_t *mcp = &mc; 5973 5974 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5975 return QLA_FUNCTION_FAILED; 5976 5977 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, 5978 "Entered %s.\n", __func__); 5979 5980 memset(mcp, 0, sizeof(mbx_cmd_t)); 5981 mcp->mb[0] = MBC_SET_LED_CONFIG; 5982 mcp->mb[1] = led_cfg[0]; 5983 mcp->mb[2] = led_cfg[1]; 5984 if (IS_QLA8031(ha)) { 5985 mcp->mb[3] = led_cfg[2]; 5986 mcp->mb[4] = led_cfg[3]; 5987 mcp->mb[5] = led_cfg[4]; 5988 mcp->mb[6] = led_cfg[5]; 5989 } 5990 5991 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5992 if (IS_QLA8031(ha)) 5993 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 5994 mcp->in_mb = MBX_0; 5995 mcp->tov = MBX_TOV_SECONDS; 5996 mcp->flags = 0; 5997 5998 rval = qla2x00_mailbox_command(vha, mcp); 5999 if (rval != QLA_SUCCESS) { 6000 ql_dbg(ql_dbg_mbx, vha, 0x1134, 6001 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6002 } else { 6003 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, 6004 "Done %s.\n", __func__); 6005 } 6006 6007 return rval; 6008 } 6009 6010 int 6011 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 6012 { 6013 int rval; 6014 struct qla_hw_data *ha = vha->hw; 6015 mbx_cmd_t mc; 6016 mbx_cmd_t *mcp = &mc; 6017 6018 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 6019 return QLA_FUNCTION_FAILED; 6020 6021 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, 6022 "Entered %s.\n", __func__); 6023 6024 memset(mcp, 0, sizeof(mbx_cmd_t)); 6025 mcp->mb[0] = MBC_GET_LED_CONFIG; 6026 6027 mcp->out_mb = MBX_0; 6028 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6029 if (IS_QLA8031(ha)) 6030 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 6031 mcp->tov = MBX_TOV_SECONDS; 6032 mcp->flags = 0; 6033 6034 rval = qla2x00_mailbox_command(vha, mcp); 6035 if (rval != QLA_SUCCESS) { 6036 ql_dbg(ql_dbg_mbx, vha, 0x1137, 6037 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6038 } else { 6039 led_cfg[0] = mcp->mb[1]; 6040 led_cfg[1] = mcp->mb[2]; 6041 if (IS_QLA8031(ha)) { 6042 led_cfg[2] = mcp->mb[3]; 6043 led_cfg[3] = mcp->mb[4]; 6044 led_cfg[4] = mcp->mb[5]; 6045 led_cfg[5] = mcp->mb[6]; 6046 } 6047 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, 6048 "Done %s.\n", __func__); 6049 } 6050 6051 return rval; 6052 } 6053 6054 int 6055 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) 6056 { 6057 int rval; 6058 struct qla_hw_data *ha = vha->hw; 6059 mbx_cmd_t mc; 6060 mbx_cmd_t *mcp = &mc; 6061 6062 if (!IS_P3P_TYPE(ha)) 6063 return QLA_FUNCTION_FAILED; 6064 6065 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, 6066 "Entered %s.\n", __func__); 6067 6068 memset(mcp, 0, sizeof(mbx_cmd_t)); 6069 mcp->mb[0] = MBC_SET_LED_CONFIG; 6070 if (enable) 6071 mcp->mb[7] = 0xE; 6072 else 6073 mcp->mb[7] = 0xD; 6074 6075 mcp->out_mb = MBX_7|MBX_0; 6076 mcp->in_mb = MBX_0; 6077 mcp->tov = MBX_TOV_SECONDS; 6078 mcp->flags = 0; 6079 6080 rval = qla2x00_mailbox_command(vha, mcp); 6081 if (rval != QLA_SUCCESS) { 6082 ql_dbg(ql_dbg_mbx, vha, 0x1128, 6083 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6084 } else { 6085 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, 6086 "Done %s.\n", __func__); 6087 } 6088 6089 return rval; 6090 } 6091 6092 int 6093 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) 6094 { 6095 int rval; 6096 struct qla_hw_data *ha = vha->hw; 6097 mbx_cmd_t mc; 6098 mbx_cmd_t *mcp = &mc; 6099 6100 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6101 return QLA_FUNCTION_FAILED; 6102 6103 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, 6104 "Entered %s.\n", __func__); 6105 6106 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6107 mcp->mb[1] = LSW(reg); 6108 mcp->mb[2] = MSW(reg); 6109 mcp->mb[3] = LSW(data); 6110 mcp->mb[4] = MSW(data); 6111 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6112 6113 mcp->in_mb = MBX_1|MBX_0; 6114 mcp->tov = MBX_TOV_SECONDS; 6115 mcp->flags = 0; 6116 rval = qla2x00_mailbox_command(vha, mcp); 6117 6118 if (rval != QLA_SUCCESS) { 6119 ql_dbg(ql_dbg_mbx, vha, 0x1131, 6120 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6121 } else { 6122 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, 6123 "Done %s.\n", __func__); 6124 } 6125 6126 return rval; 6127 } 6128 6129 int 6130 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) 6131 { 6132 int rval; 6133 struct qla_hw_data *ha = vha->hw; 6134 mbx_cmd_t mc; 6135 mbx_cmd_t *mcp = &mc; 6136 6137 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 6138 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, 6139 "Implicit LOGO Unsupported.\n"); 6140 return QLA_FUNCTION_FAILED; 6141 } 6142 6143 6144 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, 6145 "Entering %s.\n", __func__); 6146 6147 /* Perform Implicit LOGO. */ 6148 mcp->mb[0] = MBC_PORT_LOGOUT; 6149 mcp->mb[1] = fcport->loop_id; 6150 mcp->mb[10] = BIT_15; 6151 mcp->out_mb = MBX_10|MBX_1|MBX_0; 6152 mcp->in_mb = MBX_0; 6153 mcp->tov = MBX_TOV_SECONDS; 6154 mcp->flags = 0; 6155 rval = qla2x00_mailbox_command(vha, mcp); 6156 if (rval != QLA_SUCCESS) 6157 ql_dbg(ql_dbg_mbx, vha, 0x113d, 6158 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6159 else 6160 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, 6161 "Done %s.\n", __func__); 6162 6163 return rval; 6164 } 6165 6166 int 6167 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) 6168 { 6169 int rval; 6170 mbx_cmd_t mc; 6171 mbx_cmd_t *mcp = &mc; 6172 struct qla_hw_data *ha = vha->hw; 6173 unsigned long retry_max_time = jiffies + (2 * HZ); 6174 6175 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6176 return QLA_FUNCTION_FAILED; 6177 6178 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); 6179 6180 retry_rd_reg: 6181 mcp->mb[0] = MBC_READ_REMOTE_REG; 6182 mcp->mb[1] = LSW(reg); 6183 mcp->mb[2] = MSW(reg); 6184 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6185 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 6186 mcp->tov = MBX_TOV_SECONDS; 6187 mcp->flags = 0; 6188 rval = qla2x00_mailbox_command(vha, mcp); 6189 6190 if (rval != QLA_SUCCESS) { 6191 ql_dbg(ql_dbg_mbx, vha, 0x114c, 6192 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6193 rval, mcp->mb[0], mcp->mb[1]); 6194 } else { 6195 *data = (mcp->mb[3] | (mcp->mb[4] << 16)); 6196 if (*data == QLA8XXX_BAD_VALUE) { 6197 /* 6198 * During soft-reset CAMRAM register reads might 6199 * return 0xbad0bad0. So retry for MAX of 2 sec 6200 * while reading camram registers. 6201 */ 6202 if (time_after(jiffies, retry_max_time)) { 6203 ql_dbg(ql_dbg_mbx, vha, 0x1141, 6204 "Failure to read CAMRAM register. " 6205 "data=0x%x.\n", *data); 6206 return QLA_FUNCTION_FAILED; 6207 } 6208 msleep(100); 6209 goto retry_rd_reg; 6210 } 6211 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); 6212 } 6213 6214 return rval; 6215 } 6216 6217 int 6218 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) 6219 { 6220 int rval; 6221 mbx_cmd_t mc; 6222 mbx_cmd_t *mcp = &mc; 6223 struct qla_hw_data *ha = vha->hw; 6224 6225 if (!IS_QLA83XX(ha)) 6226 return QLA_FUNCTION_FAILED; 6227 6228 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 6229 6230 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; 6231 mcp->out_mb = MBX_0; 6232 mcp->in_mb = MBX_1|MBX_0; 6233 mcp->tov = MBX_TOV_SECONDS; 6234 mcp->flags = 0; 6235 rval = qla2x00_mailbox_command(vha, mcp); 6236 6237 if (rval != QLA_SUCCESS) { 6238 ql_dbg(ql_dbg_mbx, vha, 0x1144, 6239 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6240 rval, mcp->mb[0], mcp->mb[1]); 6241 qla2xxx_dump_fw(vha); 6242 } else { 6243 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); 6244 } 6245 6246 return rval; 6247 } 6248 6249 int 6250 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, 6251 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) 6252 { 6253 int rval; 6254 mbx_cmd_t mc; 6255 mbx_cmd_t *mcp = &mc; 6256 uint8_t subcode = (uint8_t)options; 6257 struct qla_hw_data *ha = vha->hw; 6258 6259 if (!IS_QLA8031(ha)) 6260 return QLA_FUNCTION_FAILED; 6261 6262 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); 6263 6264 mcp->mb[0] = MBC_SET_ACCESS_CONTROL; 6265 mcp->mb[1] = options; 6266 mcp->out_mb = MBX_1|MBX_0; 6267 if (subcode & BIT_2) { 6268 mcp->mb[2] = LSW(start_addr); 6269 mcp->mb[3] = MSW(start_addr); 6270 mcp->mb[4] = LSW(end_addr); 6271 mcp->mb[5] = MSW(end_addr); 6272 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; 6273 } 6274 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6275 if (!(subcode & (BIT_2 | BIT_5))) 6276 mcp->in_mb |= MBX_4|MBX_3; 6277 mcp->tov = MBX_TOV_SECONDS; 6278 mcp->flags = 0; 6279 rval = qla2x00_mailbox_command(vha, mcp); 6280 6281 if (rval != QLA_SUCCESS) { 6282 ql_dbg(ql_dbg_mbx, vha, 0x1147, 6283 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", 6284 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], 6285 mcp->mb[4]); 6286 qla2xxx_dump_fw(vha); 6287 } else { 6288 if (subcode & BIT_5) 6289 *sector_size = mcp->mb[1]; 6290 else if (subcode & (BIT_6 | BIT_7)) { 6291 ql_dbg(ql_dbg_mbx, vha, 0x1148, 6292 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6293 } else if (subcode & (BIT_3 | BIT_4)) { 6294 ql_dbg(ql_dbg_mbx, vha, 0x1149, 6295 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6296 } 6297 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); 6298 } 6299 6300 return rval; 6301 } 6302 6303 int 6304 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 6305 uint32_t size) 6306 { 6307 int rval; 6308 mbx_cmd_t mc; 6309 mbx_cmd_t *mcp = &mc; 6310 6311 if (!IS_MCTP_CAPABLE(vha->hw)) 6312 return QLA_FUNCTION_FAILED; 6313 6314 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, 6315 "Entered %s.\n", __func__); 6316 6317 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 6318 mcp->mb[1] = LSW(addr); 6319 mcp->mb[2] = MSW(req_dma); 6320 mcp->mb[3] = LSW(req_dma); 6321 mcp->mb[4] = MSW(size); 6322 mcp->mb[5] = LSW(size); 6323 mcp->mb[6] = MSW(MSD(req_dma)); 6324 mcp->mb[7] = LSW(MSD(req_dma)); 6325 mcp->mb[8] = MSW(addr); 6326 /* Setting RAM ID to valid */ 6327 /* For MCTP RAM ID is 0x40 */ 6328 mcp->mb[10] = BIT_7 | 0x40; 6329 6330 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| 6331 MBX_0; 6332 6333 mcp->in_mb = MBX_0; 6334 mcp->tov = MBX_TOV_SECONDS; 6335 mcp->flags = 0; 6336 rval = qla2x00_mailbox_command(vha, mcp); 6337 6338 if (rval != QLA_SUCCESS) { 6339 ql_dbg(ql_dbg_mbx, vha, 0x114e, 6340 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6341 } else { 6342 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, 6343 "Done %s.\n", __func__); 6344 } 6345 6346 return rval; 6347 } 6348 6349 int 6350 qla26xx_dport_diagnostics(scsi_qla_host_t *vha, 6351 void *dd_buf, uint size, uint options) 6352 { 6353 int rval; 6354 mbx_cmd_t mc; 6355 mbx_cmd_t *mcp = &mc; 6356 dma_addr_t dd_dma; 6357 6358 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 6359 !IS_QLA28XX(vha->hw)) 6360 return QLA_FUNCTION_FAILED; 6361 6362 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, 6363 "Entered %s.\n", __func__); 6364 6365 dd_dma = dma_map_single(&vha->hw->pdev->dev, 6366 dd_buf, size, DMA_FROM_DEVICE); 6367 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { 6368 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); 6369 return QLA_MEMORY_ALLOC_FAILED; 6370 } 6371 6372 memset(dd_buf, 0, size); 6373 6374 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; 6375 mcp->mb[1] = options; 6376 mcp->mb[2] = MSW(LSD(dd_dma)); 6377 mcp->mb[3] = LSW(LSD(dd_dma)); 6378 mcp->mb[6] = MSW(MSD(dd_dma)); 6379 mcp->mb[7] = LSW(MSD(dd_dma)); 6380 mcp->mb[8] = size; 6381 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 6382 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 6383 mcp->buf_size = size; 6384 mcp->flags = MBX_DMA_IN; 6385 mcp->tov = MBX_TOV_SECONDS * 4; 6386 rval = qla2x00_mailbox_command(vha, mcp); 6387 6388 if (rval != QLA_SUCCESS) { 6389 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); 6390 } else { 6391 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, 6392 "Done %s.\n", __func__); 6393 } 6394 6395 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, 6396 size, DMA_FROM_DEVICE); 6397 6398 return rval; 6399 } 6400 6401 static void qla2x00_async_mb_sp_done(srb_t *sp, int res) 6402 { 6403 sp->u.iocb_cmd.u.mbx.rc = res; 6404 6405 complete(&sp->u.iocb_cmd.u.mbx.comp); 6406 /* don't free sp here. Let the caller do the free */ 6407 } 6408 6409 /* 6410 * This mailbox uses the iocb interface to send MB command. 6411 * This allows non-critial (non chip setup) command to go 6412 * out in parrallel. 6413 */ 6414 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) 6415 { 6416 int rval = QLA_FUNCTION_FAILED; 6417 srb_t *sp; 6418 struct srb_iocb *c; 6419 6420 if (!vha->hw->flags.fw_started) 6421 goto done; 6422 6423 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 6424 if (!sp) 6425 goto done; 6426 6427 sp->type = SRB_MB_IOCB; 6428 sp->name = mb_to_str(mcp->mb[0]); 6429 6430 c = &sp->u.iocb_cmd; 6431 c->timeout = qla2x00_async_iocb_timeout; 6432 init_completion(&c->u.mbx.comp); 6433 6434 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 6435 6436 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); 6437 6438 sp->done = qla2x00_async_mb_sp_done; 6439 6440 rval = qla2x00_start_sp(sp); 6441 if (rval != QLA_SUCCESS) { 6442 ql_dbg(ql_dbg_mbx, vha, 0x1018, 6443 "%s: %s Failed submission. %x.\n", 6444 __func__, sp->name, rval); 6445 goto done_free_sp; 6446 } 6447 6448 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n", 6449 sp->name, sp->handle); 6450 6451 wait_for_completion(&c->u.mbx.comp); 6452 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); 6453 6454 rval = c->u.mbx.rc; 6455 switch (rval) { 6456 case QLA_FUNCTION_TIMEOUT: 6457 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n", 6458 __func__, sp->name, rval); 6459 break; 6460 case QLA_SUCCESS: 6461 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", 6462 __func__, sp->name); 6463 break; 6464 default: 6465 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", 6466 __func__, sp->name, rval); 6467 break; 6468 } 6469 6470 done_free_sp: 6471 sp->free(sp); 6472 done: 6473 return rval; 6474 } 6475 6476 /* 6477 * qla24xx_gpdb_wait 6478 * NOTE: Do not call this routine from DPC thread 6479 */ 6480 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 6481 { 6482 int rval = QLA_FUNCTION_FAILED; 6483 dma_addr_t pd_dma; 6484 struct port_database_24xx *pd; 6485 struct qla_hw_data *ha = vha->hw; 6486 mbx_cmd_t mc; 6487 6488 if (!vha->hw->flags.fw_started) 6489 goto done; 6490 6491 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 6492 if (pd == NULL) { 6493 ql_log(ql_log_warn, vha, 0xd047, 6494 "Failed to allocate port database structure.\n"); 6495 goto done_free_sp; 6496 } 6497 6498 memset(&mc, 0, sizeof(mc)); 6499 mc.mb[0] = MBC_GET_PORT_DATABASE; 6500 mc.mb[1] = fcport->loop_id; 6501 mc.mb[2] = MSW(pd_dma); 6502 mc.mb[3] = LSW(pd_dma); 6503 mc.mb[6] = MSW(MSD(pd_dma)); 6504 mc.mb[7] = LSW(MSD(pd_dma)); 6505 mc.mb[9] = vha->vp_idx; 6506 mc.mb[10] = opt; 6507 6508 rval = qla24xx_send_mb_cmd(vha, &mc); 6509 if (rval != QLA_SUCCESS) { 6510 ql_dbg(ql_dbg_mbx, vha, 0x1193, 6511 "%s: %8phC fail\n", __func__, fcport->port_name); 6512 goto done_free_sp; 6513 } 6514 6515 rval = __qla24xx_parse_gpdb(vha, fcport, pd); 6516 6517 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", 6518 __func__, fcport->port_name); 6519 6520 done_free_sp: 6521 if (pd) 6522 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 6523 done: 6524 return rval; 6525 } 6526 6527 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, 6528 struct port_database_24xx *pd) 6529 { 6530 int rval = QLA_SUCCESS; 6531 uint64_t zero = 0; 6532 u8 current_login_state, last_login_state; 6533 6534 if (NVME_TARGET(vha->hw, fcport)) { 6535 current_login_state = pd->current_login_state >> 4; 6536 last_login_state = pd->last_login_state >> 4; 6537 } else { 6538 current_login_state = pd->current_login_state & 0xf; 6539 last_login_state = pd->last_login_state & 0xf; 6540 } 6541 6542 /* Check for logged in state. */ 6543 if (current_login_state != PDS_PRLI_COMPLETE) { 6544 ql_dbg(ql_dbg_mbx, vha, 0x119a, 6545 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 6546 current_login_state, last_login_state, fcport->loop_id); 6547 rval = QLA_FUNCTION_FAILED; 6548 goto gpd_error_out; 6549 } 6550 6551 if (fcport->loop_id == FC_NO_LOOP_ID || 6552 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 6553 memcmp(fcport->port_name, pd->port_name, 8))) { 6554 /* We lost the device mid way. */ 6555 rval = QLA_NOT_LOGGED_IN; 6556 goto gpd_error_out; 6557 } 6558 6559 /* Names are little-endian. */ 6560 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 6561 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 6562 6563 /* Get port_id of device. */ 6564 fcport->d_id.b.domain = pd->port_id[0]; 6565 fcport->d_id.b.area = pd->port_id[1]; 6566 fcport->d_id.b.al_pa = pd->port_id[2]; 6567 fcport->d_id.b.rsvd_1 = 0; 6568 6569 if (NVME_TARGET(vha->hw, fcport)) { 6570 fcport->port_type = FCT_NVME; 6571 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) 6572 fcport->port_type |= FCT_NVME_INITIATOR; 6573 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6574 fcport->port_type |= FCT_NVME_TARGET; 6575 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0) 6576 fcport->port_type |= FCT_NVME_DISCOVERY; 6577 } else { 6578 /* If not target must be initiator or unknown type. */ 6579 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6580 fcport->port_type = FCT_INITIATOR; 6581 else 6582 fcport->port_type = FCT_TARGET; 6583 } 6584 /* Passback COS information. */ 6585 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? 6586 FC_COS_CLASS2 : FC_COS_CLASS3; 6587 6588 if (pd->prli_svc_param_word_3[0] & BIT_7) { 6589 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 6590 fcport->conf_compl_supported = 1; 6591 } 6592 6593 gpd_error_out: 6594 return rval; 6595 } 6596 6597 /* 6598 * qla24xx_gidlist__wait 6599 * NOTE: don't call this routine from DPC thread. 6600 */ 6601 int qla24xx_gidlist_wait(struct scsi_qla_host *vha, 6602 void *id_list, dma_addr_t id_list_dma, uint16_t *entries) 6603 { 6604 int rval = QLA_FUNCTION_FAILED; 6605 mbx_cmd_t mc; 6606 6607 if (!vha->hw->flags.fw_started) 6608 goto done; 6609 6610 memset(&mc, 0, sizeof(mc)); 6611 mc.mb[0] = MBC_GET_ID_LIST; 6612 mc.mb[2] = MSW(id_list_dma); 6613 mc.mb[3] = LSW(id_list_dma); 6614 mc.mb[6] = MSW(MSD(id_list_dma)); 6615 mc.mb[7] = LSW(MSD(id_list_dma)); 6616 mc.mb[8] = 0; 6617 mc.mb[9] = vha->vp_idx; 6618 6619 rval = qla24xx_send_mb_cmd(vha, &mc); 6620 if (rval != QLA_SUCCESS) { 6621 ql_dbg(ql_dbg_mbx, vha, 0x119b, 6622 "%s: fail\n", __func__); 6623 } else { 6624 *entries = mc.mb[1]; 6625 ql_dbg(ql_dbg_mbx, vha, 0x119c, 6626 "%s: done\n", __func__); 6627 } 6628 done: 6629 return rval; 6630 } 6631 6632 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) 6633 { 6634 int rval; 6635 mbx_cmd_t mc; 6636 mbx_cmd_t *mcp = &mc; 6637 6638 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200, 6639 "Entered %s\n", __func__); 6640 6641 memset(mcp->mb, 0 , sizeof(mcp->mb)); 6642 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6643 mcp->mb[1] = 1; 6644 mcp->mb[2] = value; 6645 mcp->out_mb = MBX_2 | MBX_1 | MBX_0; 6646 mcp->in_mb = MBX_2 | MBX_0; 6647 mcp->tov = MBX_TOV_SECONDS; 6648 mcp->flags = 0; 6649 6650 rval = qla2x00_mailbox_command(vha, mcp); 6651 6652 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n", 6653 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6654 6655 return rval; 6656 } 6657 6658 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) 6659 { 6660 int rval; 6661 mbx_cmd_t mc; 6662 mbx_cmd_t *mcp = &mc; 6663 6664 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203, 6665 "Entered %s\n", __func__); 6666 6667 memset(mcp->mb, 0, sizeof(mcp->mb)); 6668 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6669 mcp->mb[1] = 0; 6670 mcp->out_mb = MBX_1 | MBX_0; 6671 mcp->in_mb = MBX_2 | MBX_0; 6672 mcp->tov = MBX_TOV_SECONDS; 6673 mcp->flags = 0; 6674 6675 rval = qla2x00_mailbox_command(vha, mcp); 6676 if (rval == QLA_SUCCESS) 6677 *value = mc.mb[2]; 6678 6679 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n", 6680 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6681 6682 return rval; 6683 } 6684 6685 int 6686 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count) 6687 { 6688 struct qla_hw_data *ha = vha->hw; 6689 uint16_t iter, addr, offset; 6690 dma_addr_t phys_addr; 6691 int rval, c; 6692 u8 *sfp_data; 6693 6694 memset(ha->sfp_data, 0, SFP_DEV_SIZE); 6695 addr = 0xa0; 6696 phys_addr = ha->sfp_data_dma; 6697 sfp_data = ha->sfp_data; 6698 offset = c = 0; 6699 6700 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) { 6701 if (iter == 4) { 6702 /* Skip to next device address. */ 6703 addr = 0xa2; 6704 offset = 0; 6705 } 6706 6707 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data, 6708 addr, offset, SFP_BLOCK_SIZE, BIT_1); 6709 if (rval != QLA_SUCCESS) { 6710 ql_log(ql_log_warn, vha, 0x706d, 6711 "Unable to read SFP data (%x/%x/%x).\n", rval, 6712 addr, offset); 6713 6714 return rval; 6715 } 6716 6717 if (buf && (c < count)) { 6718 u16 sz; 6719 6720 if ((count - c) >= SFP_BLOCK_SIZE) 6721 sz = SFP_BLOCK_SIZE; 6722 else 6723 sz = count - c; 6724 6725 memcpy(buf, sfp_data, sz); 6726 buf += SFP_BLOCK_SIZE; 6727 c += sz; 6728 } 6729 phys_addr += SFP_BLOCK_SIZE; 6730 sfp_data += SFP_BLOCK_SIZE; 6731 offset += SFP_BLOCK_SIZE; 6732 } 6733 6734 return rval; 6735 } 6736 6737 int qla24xx_res_count_wait(struct scsi_qla_host *vha, 6738 uint16_t *out_mb, int out_mb_sz) 6739 { 6740 int rval = QLA_FUNCTION_FAILED; 6741 mbx_cmd_t mc; 6742 6743 if (!vha->hw->flags.fw_started) 6744 goto done; 6745 6746 memset(&mc, 0, sizeof(mc)); 6747 mc.mb[0] = MBC_GET_RESOURCE_COUNTS; 6748 6749 rval = qla24xx_send_mb_cmd(vha, &mc); 6750 if (rval != QLA_SUCCESS) { 6751 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6752 "%s: fail\n", __func__); 6753 } else { 6754 if (out_mb_sz <= SIZEOF_IOCB_MB_REG) 6755 memcpy(out_mb, mc.mb, out_mb_sz); 6756 else 6757 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG); 6758 6759 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6760 "%s: done\n", __func__); 6761 } 6762 done: 6763 return rval; 6764 } 6765 6766 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts, 6767 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr, 6768 uint32_t sfub_len) 6769 { 6770 int rval; 6771 mbx_cmd_t mc; 6772 mbx_cmd_t *mcp = &mc; 6773 6774 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE; 6775 mcp->mb[1] = opts; 6776 mcp->mb[2] = region; 6777 mcp->mb[3] = MSW(len); 6778 mcp->mb[4] = LSW(len); 6779 mcp->mb[5] = MSW(sfub_dma_addr); 6780 mcp->mb[6] = LSW(sfub_dma_addr); 6781 mcp->mb[7] = MSW(MSD(sfub_dma_addr)); 6782 mcp->mb[8] = LSW(MSD(sfub_dma_addr)); 6783 mcp->mb[9] = sfub_len; 6784 mcp->out_mb = 6785 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6786 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6787 mcp->tov = MBX_TOV_SECONDS; 6788 mcp->flags = 0; 6789 rval = qla2x00_mailbox_command(vha, mcp); 6790 6791 if (rval != QLA_SUCCESS) { 6792 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x", 6793 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], 6794 mcp->mb[2]); 6795 } 6796 6797 return rval; 6798 } 6799 6800 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6801 uint32_t data) 6802 { 6803 int rval; 6804 mbx_cmd_t mc; 6805 mbx_cmd_t *mcp = &mc; 6806 6807 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6808 "Entered %s.\n", __func__); 6809 6810 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6811 mcp->mb[1] = LSW(addr); 6812 mcp->mb[2] = MSW(addr); 6813 mcp->mb[3] = LSW(data); 6814 mcp->mb[4] = MSW(data); 6815 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6816 mcp->in_mb = MBX_1|MBX_0; 6817 mcp->tov = MBX_TOV_SECONDS; 6818 mcp->flags = 0; 6819 rval = qla2x00_mailbox_command(vha, mcp); 6820 6821 if (rval != QLA_SUCCESS) { 6822 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6823 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6824 } else { 6825 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6826 "Done %s.\n", __func__); 6827 } 6828 6829 return rval; 6830 } 6831 6832 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6833 uint32_t *data) 6834 { 6835 int rval; 6836 mbx_cmd_t mc; 6837 mbx_cmd_t *mcp = &mc; 6838 6839 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6840 "Entered %s.\n", __func__); 6841 6842 mcp->mb[0] = MBC_READ_REMOTE_REG; 6843 mcp->mb[1] = LSW(addr); 6844 mcp->mb[2] = MSW(addr); 6845 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6846 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6847 mcp->tov = MBX_TOV_SECONDS; 6848 mcp->flags = 0; 6849 rval = qla2x00_mailbox_command(vha, mcp); 6850 6851 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]); 6852 6853 if (rval != QLA_SUCCESS) { 6854 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6855 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6856 } else { 6857 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6858 "Done %s.\n", __func__); 6859 } 6860 6861 return rval; 6862 } 6863 6864 int 6865 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led) 6866 { 6867 struct qla_hw_data *ha = vha->hw; 6868 mbx_cmd_t mc; 6869 mbx_cmd_t *mcp = &mc; 6870 int rval; 6871 6872 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6873 return QLA_FUNCTION_FAILED; 6874 6875 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n", 6876 __func__, options); 6877 6878 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG; 6879 mcp->mb[1] = options; 6880 mcp->out_mb = MBX_1|MBX_0; 6881 mcp->in_mb = MBX_1|MBX_0; 6882 if (options & BIT_0) { 6883 if (options & BIT_1) { 6884 mcp->mb[2] = led[2]; 6885 mcp->out_mb |= MBX_2; 6886 } 6887 if (options & BIT_2) { 6888 mcp->mb[3] = led[0]; 6889 mcp->out_mb |= MBX_3; 6890 } 6891 if (options & BIT_3) { 6892 mcp->mb[4] = led[1]; 6893 mcp->out_mb |= MBX_4; 6894 } 6895 } else { 6896 mcp->in_mb |= MBX_4|MBX_3|MBX_2; 6897 } 6898 mcp->tov = MBX_TOV_SECONDS; 6899 mcp->flags = 0; 6900 rval = qla2x00_mailbox_command(vha, mcp); 6901 if (rval) { 6902 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n", 6903 __func__, rval, mcp->mb[0], mcp->mb[1]); 6904 return rval; 6905 } 6906 6907 if (options & BIT_0) { 6908 ha->beacon_blink_led = 0; 6909 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__); 6910 } else { 6911 led[2] = mcp->mb[2]; 6912 led[0] = mcp->mb[3]; 6913 led[1] = mcp->mb[4]; 6914 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n", 6915 __func__, led[0], led[1], led[2]); 6916 } 6917 6918 return rval; 6919 } 6920