1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_target.h" 8 9 #include <linux/delay.h> 10 #include <linux/gfp.h> 11 12 static struct mb_cmd_name { 13 uint16_t cmd; 14 const char *str; 15 } mb_str[] = { 16 {MBC_GET_PORT_DATABASE, "GPDB"}, 17 {MBC_GET_ID_LIST, "GIDList"}, 18 {MBC_GET_LINK_PRIV_STATS, "Stats"}, 19 {MBC_GET_RESOURCE_COUNTS, "ResCnt"}, 20 }; 21 22 static const char *mb_to_str(uint16_t cmd) 23 { 24 int i; 25 struct mb_cmd_name *e; 26 27 for (i = 0; i < ARRAY_SIZE(mb_str); i++) { 28 e = mb_str + i; 29 if (cmd == e->cmd) 30 return e->str; 31 } 32 return "unknown"; 33 } 34 35 static struct rom_cmd { 36 uint16_t cmd; 37 } rom_cmds[] = { 38 { MBC_LOAD_RAM }, 39 { MBC_EXECUTE_FIRMWARE }, 40 { MBC_READ_RAM_WORD }, 41 { MBC_MAILBOX_REGISTER_TEST }, 42 { MBC_VERIFY_CHECKSUM }, 43 { MBC_GET_FIRMWARE_VERSION }, 44 { MBC_LOAD_RISC_RAM }, 45 { MBC_DUMP_RISC_RAM }, 46 { MBC_LOAD_RISC_RAM_EXTENDED }, 47 { MBC_DUMP_RISC_RAM_EXTENDED }, 48 { MBC_WRITE_RAM_WORD_EXTENDED }, 49 { MBC_READ_RAM_EXTENDED }, 50 { MBC_GET_RESOURCE_COUNTS }, 51 { MBC_SET_FIRMWARE_OPTION }, 52 { MBC_MID_INITIALIZE_FIRMWARE }, 53 { MBC_GET_FIRMWARE_STATE }, 54 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, 55 { MBC_GET_RETRY_COUNT }, 56 { MBC_TRACE_CONTROL }, 57 { MBC_INITIALIZE_MULTIQ }, 58 { MBC_IOCB_COMMAND_A64 }, 59 { MBC_GET_ADAPTER_LOOP_ID }, 60 { MBC_READ_SFP }, 61 { MBC_SET_RNID_PARAMS }, 62 { MBC_GET_RNID_PARAMS }, 63 { MBC_GET_SET_ZIO_THRESHOLD }, 64 }; 65 66 static int is_rom_cmd(uint16_t cmd) 67 { 68 int i; 69 struct rom_cmd *wc; 70 71 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) { 72 wc = rom_cmds + i; 73 if (wc->cmd == cmd) 74 return 1; 75 } 76 77 return 0; 78 } 79 80 /* 81 * qla2x00_mailbox_command 82 * Issue mailbox command and waits for completion. 83 * 84 * Input: 85 * ha = adapter block pointer. 86 * mcp = driver internal mbx struct pointer. 87 * 88 * Output: 89 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 90 * 91 * Returns: 92 * 0 : QLA_SUCCESS = cmd performed success 93 * 1 : QLA_FUNCTION_FAILED (error encountered) 94 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 95 * 96 * Context: 97 * Kernel context. 98 */ 99 static int 100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) 101 { 102 int rval, i; 103 unsigned long flags = 0; 104 device_reg_t *reg; 105 uint8_t abort_active, eeh_delay; 106 uint8_t io_lock_on; 107 uint16_t command = 0; 108 uint16_t *iptr; 109 __le16 __iomem *optr; 110 uint32_t cnt; 111 uint32_t mboxes; 112 unsigned long wait_time; 113 struct qla_hw_data *ha = vha->hw; 114 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 115 u32 chip_reset; 116 117 118 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); 119 120 if (ha->pdev->error_state == pci_channel_io_perm_failure) { 121 ql_log(ql_log_warn, vha, 0x1001, 122 "PCI channel failed permanently, exiting.\n"); 123 return QLA_FUNCTION_TIMEOUT; 124 } 125 126 if (vha->device_flags & DFLG_DEV_FAILED) { 127 ql_log(ql_log_warn, vha, 0x1002, 128 "Device in failed state, exiting.\n"); 129 return QLA_FUNCTION_TIMEOUT; 130 } 131 132 /* if PCI error, then avoid mbx processing.*/ 133 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && 134 test_bit(UNLOADING, &base_vha->dpc_flags)) { 135 ql_log(ql_log_warn, vha, 0xd04e, 136 "PCI error, exiting.\n"); 137 return QLA_FUNCTION_TIMEOUT; 138 } 139 eeh_delay = 0; 140 reg = ha->iobase; 141 io_lock_on = base_vha->flags.init_done; 142 143 rval = QLA_SUCCESS; 144 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 145 chip_reset = ha->chip_reset; 146 147 if (ha->flags.pci_channel_io_perm_failure) { 148 ql_log(ql_log_warn, vha, 0x1003, 149 "Perm failure on EEH timeout MBX, exiting.\n"); 150 return QLA_FUNCTION_TIMEOUT; 151 } 152 153 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 154 /* Setting Link-Down error */ 155 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 156 ql_log(ql_log_warn, vha, 0x1004, 157 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 158 return QLA_FUNCTION_TIMEOUT; 159 } 160 161 /* check if ISP abort is active and return cmd with timeout */ 162 if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 163 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 164 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && 165 !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) { 166 ql_log(ql_log_info, vha, 0x1005, 167 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", 168 mcp->mb[0]); 169 return QLA_FUNCTION_TIMEOUT; 170 } 171 172 atomic_inc(&ha->num_pend_mbx_stage1); 173 /* 174 * Wait for active mailbox commands to finish by waiting at most tov 175 * seconds. This is to serialize actual issuing of mailbox cmds during 176 * non ISP abort time. 177 */ 178 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 179 /* Timeout occurred. Return error. */ 180 ql_log(ql_log_warn, vha, 0xd035, 181 "Cmd access timeout, cmd=0x%x, Exiting.\n", 182 mcp->mb[0]); 183 vha->hw_err_cnt++; 184 atomic_dec(&ha->num_pend_mbx_stage1); 185 return QLA_FUNCTION_TIMEOUT; 186 } 187 atomic_dec(&ha->num_pend_mbx_stage1); 188 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || 189 ha->flags.eeh_busy) { 190 ql_log(ql_log_warn, vha, 0xd035, 191 "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n", 192 ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]); 193 rval = QLA_ABORTED; 194 goto premature_exit; 195 } 196 197 198 /* Save mailbox command for debug */ 199 ha->mcp = mcp; 200 201 ql_dbg(ql_dbg_mbx, vha, 0x1006, 202 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 203 204 spin_lock_irqsave(&ha->hardware_lock, flags); 205 206 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || 207 ha->flags.mbox_busy) { 208 rval = QLA_ABORTED; 209 spin_unlock_irqrestore(&ha->hardware_lock, flags); 210 goto premature_exit; 211 } 212 ha->flags.mbox_busy = 1; 213 214 /* Load mailbox registers. */ 215 if (IS_P3P_TYPE(ha)) 216 optr = ®->isp82.mailbox_in[0]; 217 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) 218 optr = ®->isp24.mailbox0; 219 else 220 optr = MAILBOX_REG(ha, ®->isp, 0); 221 222 iptr = mcp->mb; 223 command = mcp->mb[0]; 224 mboxes = mcp->out_mb; 225 226 ql_dbg(ql_dbg_mbx, vha, 0x1111, 227 "Mailbox registers (OUT):\n"); 228 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 229 if (IS_QLA2200(ha) && cnt == 8) 230 optr = MAILBOX_REG(ha, ®->isp, 8); 231 if (mboxes & BIT_0) { 232 ql_dbg(ql_dbg_mbx, vha, 0x1112, 233 "mbox[%d]<-0x%04x\n", cnt, *iptr); 234 wrt_reg_word(optr, *iptr); 235 } 236 237 mboxes >>= 1; 238 optr++; 239 iptr++; 240 } 241 242 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, 243 "I/O Address = %p.\n", optr); 244 245 /* Issue set host interrupt command to send cmd out. */ 246 ha->flags.mbox_int = 0; 247 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 248 249 /* Unlock mbx registers and wait for interrupt */ 250 ql_dbg(ql_dbg_mbx, vha, 0x100f, 251 "Going to unlock irq & waiting for interrupts. " 252 "jiffies=%lx.\n", jiffies); 253 254 /* Wait for mbx cmd completion until timeout */ 255 atomic_inc(&ha->num_pend_mbx_stage2); 256 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 257 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 258 259 if (IS_P3P_TYPE(ha)) 260 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); 261 else if (IS_FWI2_CAPABLE(ha)) 262 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); 263 else 264 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); 265 spin_unlock_irqrestore(&ha->hardware_lock, flags); 266 267 wait_time = jiffies; 268 atomic_inc(&ha->num_pend_mbx_stage3); 269 if (!wait_for_completion_timeout(&ha->mbx_intr_comp, 270 mcp->tov * HZ)) { 271 if (chip_reset != ha->chip_reset) { 272 eeh_delay = ha->flags.eeh_busy ? 1 : 0; 273 274 spin_lock_irqsave(&ha->hardware_lock, flags); 275 ha->flags.mbox_busy = 0; 276 spin_unlock_irqrestore(&ha->hardware_lock, 277 flags); 278 atomic_dec(&ha->num_pend_mbx_stage2); 279 atomic_dec(&ha->num_pend_mbx_stage3); 280 rval = QLA_ABORTED; 281 goto premature_exit; 282 } 283 ql_dbg(ql_dbg_mbx, vha, 0x117a, 284 "cmd=%x Timeout.\n", command); 285 spin_lock_irqsave(&ha->hardware_lock, flags); 286 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 287 spin_unlock_irqrestore(&ha->hardware_lock, flags); 288 289 } else if (ha->flags.purge_mbox || 290 chip_reset != ha->chip_reset) { 291 eeh_delay = ha->flags.eeh_busy ? 1 : 0; 292 293 spin_lock_irqsave(&ha->hardware_lock, flags); 294 ha->flags.mbox_busy = 0; 295 spin_unlock_irqrestore(&ha->hardware_lock, flags); 296 atomic_dec(&ha->num_pend_mbx_stage2); 297 atomic_dec(&ha->num_pend_mbx_stage3); 298 rval = QLA_ABORTED; 299 goto premature_exit; 300 } 301 atomic_dec(&ha->num_pend_mbx_stage3); 302 303 if (time_after(jiffies, wait_time + 5 * HZ)) 304 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", 305 command, jiffies_to_msecs(jiffies - wait_time)); 306 } else { 307 ql_dbg(ql_dbg_mbx, vha, 0x1011, 308 "Cmd=%x Polling Mode.\n", command); 309 310 if (IS_P3P_TYPE(ha)) { 311 if (rd_reg_dword(®->isp82.hint) & 312 HINT_MBX_INT_PENDING) { 313 ha->flags.mbox_busy = 0; 314 spin_unlock_irqrestore(&ha->hardware_lock, 315 flags); 316 atomic_dec(&ha->num_pend_mbx_stage2); 317 ql_dbg(ql_dbg_mbx, vha, 0x1012, 318 "Pending mailbox timeout, exiting.\n"); 319 vha->hw_err_cnt++; 320 rval = QLA_FUNCTION_TIMEOUT; 321 goto premature_exit; 322 } 323 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); 324 } else if (IS_FWI2_CAPABLE(ha)) 325 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); 326 else 327 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); 328 spin_unlock_irqrestore(&ha->hardware_lock, flags); 329 330 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 331 while (!ha->flags.mbox_int) { 332 if (ha->flags.purge_mbox || 333 chip_reset != ha->chip_reset) { 334 eeh_delay = ha->flags.eeh_busy ? 1 : 0; 335 336 spin_lock_irqsave(&ha->hardware_lock, flags); 337 ha->flags.mbox_busy = 0; 338 spin_unlock_irqrestore(&ha->hardware_lock, 339 flags); 340 atomic_dec(&ha->num_pend_mbx_stage2); 341 rval = QLA_ABORTED; 342 goto premature_exit; 343 } 344 345 if (time_after(jiffies, wait_time)) 346 break; 347 348 /* Check for pending interrupts. */ 349 qla2x00_poll(ha->rsp_q_map[0]); 350 351 if (!ha->flags.mbox_int && 352 !(IS_QLA2200(ha) && 353 command == MBC_LOAD_RISC_RAM_EXTENDED)) 354 msleep(10); 355 } /* while */ 356 ql_dbg(ql_dbg_mbx, vha, 0x1013, 357 "Waited %d sec.\n", 358 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 359 } 360 atomic_dec(&ha->num_pend_mbx_stage2); 361 362 /* Check whether we timed out */ 363 if (ha->flags.mbox_int) { 364 uint16_t *iptr2; 365 366 ql_dbg(ql_dbg_mbx, vha, 0x1014, 367 "Cmd=%x completed.\n", command); 368 369 /* Got interrupt. Clear the flag. */ 370 ha->flags.mbox_int = 0; 371 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 372 373 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 374 spin_lock_irqsave(&ha->hardware_lock, flags); 375 ha->flags.mbox_busy = 0; 376 spin_unlock_irqrestore(&ha->hardware_lock, flags); 377 378 /* Setting Link-Down error */ 379 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 380 ha->mcp = NULL; 381 rval = QLA_FUNCTION_FAILED; 382 ql_log(ql_log_warn, vha, 0xd048, 383 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 384 goto premature_exit; 385 } 386 387 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) { 388 ql_dbg(ql_dbg_mbx, vha, 0x11ff, 389 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0], 390 MBS_COMMAND_COMPLETE); 391 rval = QLA_FUNCTION_FAILED; 392 } 393 394 /* Load return mailbox registers. */ 395 iptr2 = mcp->mb; 396 iptr = (uint16_t *)&ha->mailbox_out[0]; 397 mboxes = mcp->in_mb; 398 399 ql_dbg(ql_dbg_mbx, vha, 0x1113, 400 "Mailbox registers (IN):\n"); 401 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 402 if (mboxes & BIT_0) { 403 *iptr2 = *iptr; 404 ql_dbg(ql_dbg_mbx, vha, 0x1114, 405 "mbox[%d]->0x%04x\n", cnt, *iptr2); 406 } 407 408 mboxes >>= 1; 409 iptr2++; 410 iptr++; 411 } 412 } else { 413 414 uint16_t mb[8]; 415 uint32_t ictrl, host_status, hccr; 416 uint16_t w; 417 418 if (IS_FWI2_CAPABLE(ha)) { 419 mb[0] = rd_reg_word(®->isp24.mailbox0); 420 mb[1] = rd_reg_word(®->isp24.mailbox1); 421 mb[2] = rd_reg_word(®->isp24.mailbox2); 422 mb[3] = rd_reg_word(®->isp24.mailbox3); 423 mb[7] = rd_reg_word(®->isp24.mailbox7); 424 ictrl = rd_reg_dword(®->isp24.ictrl); 425 host_status = rd_reg_dword(®->isp24.host_status); 426 hccr = rd_reg_dword(®->isp24.hccr); 427 428 ql_log(ql_log_warn, vha, 0xd04c, 429 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 430 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", 431 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], 432 mb[7], host_status, hccr); 433 vha->hw_err_cnt++; 434 435 } else { 436 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); 437 ictrl = rd_reg_word(®->isp.ictrl); 438 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, 439 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 440 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); 441 vha->hw_err_cnt++; 442 } 443 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 444 445 /* Capture FW dump only, if PCI device active */ 446 if (!pci_channel_offline(vha->hw->pdev)) { 447 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 448 if (w == 0xffff || ictrl == 0xffffffff || 449 (chip_reset != ha->chip_reset)) { 450 /* This is special case if there is unload 451 * of driver happening and if PCI device go 452 * into bad state due to PCI error condition 453 * then only PCI ERR flag would be set. 454 * we will do premature exit for above case. 455 */ 456 spin_lock_irqsave(&ha->hardware_lock, flags); 457 ha->flags.mbox_busy = 0; 458 spin_unlock_irqrestore(&ha->hardware_lock, 459 flags); 460 rval = QLA_FUNCTION_TIMEOUT; 461 goto premature_exit; 462 } 463 464 /* Attempt to capture firmware dump for further 465 * anallysis of the current formware state. we do not 466 * need to do this if we are intentionally generating 467 * a dump 468 */ 469 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) 470 qla2xxx_dump_fw(vha); 471 rval = QLA_FUNCTION_TIMEOUT; 472 } 473 } 474 spin_lock_irqsave(&ha->hardware_lock, flags); 475 ha->flags.mbox_busy = 0; 476 spin_unlock_irqrestore(&ha->hardware_lock, flags); 477 478 /* Clean up */ 479 ha->mcp = NULL; 480 481 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 482 ql_dbg(ql_dbg_mbx, vha, 0x101a, 483 "Checking for additional resp interrupt.\n"); 484 485 /* polling mode for non isp_abort commands. */ 486 qla2x00_poll(ha->rsp_q_map[0]); 487 } 488 489 if (rval == QLA_FUNCTION_TIMEOUT && 490 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 491 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 492 ha->flags.eeh_busy) { 493 /* not in dpc. schedule it for dpc to take over. */ 494 ql_dbg(ql_dbg_mbx, vha, 0x101b, 495 "Timeout, schedule isp_abort_needed.\n"); 496 497 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 498 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 499 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 500 if (IS_QLA82XX(ha)) { 501 ql_dbg(ql_dbg_mbx, vha, 0x112a, 502 "disabling pause transmit on port " 503 "0 & 1.\n"); 504 qla82xx_wr_32(ha, 505 QLA82XX_CRB_NIU + 0x98, 506 CRB_NIU_XG_PAUSE_CTL_P0| 507 CRB_NIU_XG_PAUSE_CTL_P1); 508 } 509 ql_log(ql_log_info, base_vha, 0x101c, 510 "Mailbox cmd timeout occurred, cmd=0x%x, " 511 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 512 "abort.\n", command, mcp->mb[0], 513 ha->flags.eeh_busy); 514 vha->hw_err_cnt++; 515 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 516 qla2xxx_wake_dpc(vha); 517 } 518 } else if (current == ha->dpc_thread) { 519 /* call abort directly since we are in the DPC thread */ 520 ql_dbg(ql_dbg_mbx, vha, 0x101d, 521 "Timeout, calling abort_isp.\n"); 522 523 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 524 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 525 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 526 if (IS_QLA82XX(ha)) { 527 ql_dbg(ql_dbg_mbx, vha, 0x112b, 528 "disabling pause transmit on port " 529 "0 & 1.\n"); 530 qla82xx_wr_32(ha, 531 QLA82XX_CRB_NIU + 0x98, 532 CRB_NIU_XG_PAUSE_CTL_P0| 533 CRB_NIU_XG_PAUSE_CTL_P1); 534 } 535 ql_log(ql_log_info, base_vha, 0x101e, 536 "Mailbox cmd timeout occurred, cmd=0x%x, " 537 "mb[0]=0x%x. Scheduling ISP abort ", 538 command, mcp->mb[0]); 539 vha->hw_err_cnt++; 540 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 541 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 542 /* Allow next mbx cmd to come in. */ 543 complete(&ha->mbx_cmd_comp); 544 if (ha->isp_ops->abort_isp(vha) && 545 !ha->flags.eeh_busy) { 546 /* Failed. retry later. */ 547 set_bit(ISP_ABORT_NEEDED, 548 &vha->dpc_flags); 549 } 550 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 551 ql_dbg(ql_dbg_mbx, vha, 0x101f, 552 "Finished abort_isp.\n"); 553 goto mbx_done; 554 } 555 } 556 } 557 558 premature_exit: 559 /* Allow next mbx cmd to come in. */ 560 complete(&ha->mbx_cmd_comp); 561 562 mbx_done: 563 if (rval == QLA_ABORTED) { 564 ql_log(ql_log_info, vha, 0xd035, 565 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n", 566 mcp->mb[0]); 567 } else if (rval) { 568 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { 569 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR, 570 dev_name(&ha->pdev->dev), 0x1020+0x800, 571 vha->host_no, rval); 572 mboxes = mcp->in_mb; 573 cnt = 4; 574 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1) 575 if (mboxes & BIT_0) { 576 printk(" mb[%u]=%x", i, mcp->mb[i]); 577 cnt--; 578 } 579 pr_warn(" cmd=%x ****\n", command); 580 } 581 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { 582 ql_dbg(ql_dbg_mbx, vha, 0x1198, 583 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", 584 rd_reg_dword(®->isp24.host_status), 585 rd_reg_dword(®->isp24.ictrl), 586 rd_reg_dword(®->isp24.istatus)); 587 } else { 588 ql_dbg(ql_dbg_mbx, vha, 0x1206, 589 "ctrl_status=%#x ictrl=%#x istatus=%#x\n", 590 rd_reg_word(®->isp.ctrl_status), 591 rd_reg_word(®->isp.ictrl), 592 rd_reg_word(®->isp.istatus)); 593 } 594 } else { 595 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); 596 } 597 598 i = 500; 599 while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) { 600 /* 601 * The caller of this mailbox encounter pci error. 602 * Hold the thread until PCIE link reset complete to make 603 * sure caller does not unmap dma while recovery is 604 * in progress. 605 */ 606 msleep(1); 607 i--; 608 } 609 return rval; 610 } 611 612 int 613 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, 614 uint32_t risc_code_size) 615 { 616 int rval; 617 struct qla_hw_data *ha = vha->hw; 618 mbx_cmd_t mc; 619 mbx_cmd_t *mcp = &mc; 620 621 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, 622 "Entered %s.\n", __func__); 623 624 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 625 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 626 mcp->mb[8] = MSW(risc_addr); 627 mcp->out_mb = MBX_8|MBX_0; 628 } else { 629 mcp->mb[0] = MBC_LOAD_RISC_RAM; 630 mcp->out_mb = MBX_0; 631 } 632 mcp->mb[1] = LSW(risc_addr); 633 mcp->mb[2] = MSW(req_dma); 634 mcp->mb[3] = LSW(req_dma); 635 mcp->mb[6] = MSW(MSD(req_dma)); 636 mcp->mb[7] = LSW(MSD(req_dma)); 637 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 638 if (IS_FWI2_CAPABLE(ha)) { 639 mcp->mb[4] = MSW(risc_code_size); 640 mcp->mb[5] = LSW(risc_code_size); 641 mcp->out_mb |= MBX_5|MBX_4; 642 } else { 643 mcp->mb[4] = LSW(risc_code_size); 644 mcp->out_mb |= MBX_4; 645 } 646 647 mcp->in_mb = MBX_1|MBX_0; 648 mcp->tov = MBX_TOV_SECONDS; 649 mcp->flags = 0; 650 rval = qla2x00_mailbox_command(vha, mcp); 651 652 if (rval != QLA_SUCCESS) { 653 ql_dbg(ql_dbg_mbx, vha, 0x1023, 654 "Failed=%x mb[0]=%x mb[1]=%x.\n", 655 rval, mcp->mb[0], mcp->mb[1]); 656 vha->hw_err_cnt++; 657 } else { 658 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, 659 "Done %s.\n", __func__); 660 } 661 662 return rval; 663 } 664 665 #define NVME_ENABLE_FLAG BIT_3 666 667 /* 668 * qla2x00_execute_fw 669 * Start adapter firmware. 670 * 671 * Input: 672 * ha = adapter block pointer. 673 * TARGET_QUEUE_LOCK must be released. 674 * ADAPTER_STATE_LOCK must be released. 675 * 676 * Returns: 677 * qla2x00 local function return status code. 678 * 679 * Context: 680 * Kernel context. 681 */ 682 int 683 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) 684 { 685 int rval; 686 struct qla_hw_data *ha = vha->hw; 687 mbx_cmd_t mc; 688 mbx_cmd_t *mcp = &mc; 689 u8 semaphore = 0; 690 #define EXE_FW_FORCE_SEMAPHORE BIT_7 691 u8 retry = 3; 692 693 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, 694 "Entered %s.\n", __func__); 695 696 again: 697 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 698 mcp->out_mb = MBX_0; 699 mcp->in_mb = MBX_0; 700 if (IS_FWI2_CAPABLE(ha)) { 701 mcp->mb[1] = MSW(risc_addr); 702 mcp->mb[2] = LSW(risc_addr); 703 mcp->mb[3] = 0; 704 mcp->mb[4] = 0; 705 mcp->mb[11] = 0; 706 707 /* Enable BPM? */ 708 if (ha->flags.lr_detected) { 709 mcp->mb[4] = BIT_0; 710 if (IS_BPM_RANGE_CAPABLE(ha)) 711 mcp->mb[4] |= 712 ha->lr_distance << LR_DIST_FW_POS; 713 } 714 715 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha))) 716 mcp->mb[4] |= NVME_ENABLE_FLAG; 717 718 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 719 struct nvram_81xx *nv = ha->nvram; 720 /* set minimum speed if specified in nvram */ 721 if (nv->min_supported_speed >= 2 && 722 nv->min_supported_speed <= 5) { 723 mcp->mb[4] |= BIT_4; 724 mcp->mb[11] |= nv->min_supported_speed & 0xF; 725 mcp->out_mb |= MBX_11; 726 mcp->in_mb |= BIT_5; 727 vha->min_supported_speed = 728 nv->min_supported_speed; 729 } 730 } 731 732 if (ha->flags.exlogins_enabled) 733 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; 734 735 if (ha->flags.exchoffld_enabled) 736 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; 737 738 if (semaphore) 739 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE; 740 741 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; 742 mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1; 743 } else { 744 mcp->mb[1] = LSW(risc_addr); 745 mcp->out_mb |= MBX_1; 746 if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 747 mcp->mb[2] = 0; 748 mcp->out_mb |= MBX_2; 749 } 750 } 751 752 mcp->tov = MBX_TOV_SECONDS; 753 mcp->flags = 0; 754 rval = qla2x00_mailbox_command(vha, mcp); 755 756 if (rval != QLA_SUCCESS) { 757 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR && 758 mcp->mb[1] == 0x27 && retry) { 759 semaphore = 1; 760 retry--; 761 ql_dbg(ql_dbg_async, vha, 0x1026, 762 "Exe FW: force semaphore.\n"); 763 goto again; 764 } 765 766 ql_dbg(ql_dbg_mbx, vha, 0x1026, 767 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 768 vha->hw_err_cnt++; 769 return rval; 770 } 771 772 if (!IS_FWI2_CAPABLE(ha)) 773 goto done; 774 775 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; 776 ql_dbg(ql_dbg_mbx, vha, 0x119a, 777 "fw_ability_mask=%x.\n", ha->fw_ability_mask); 778 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]); 779 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 780 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1); 781 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n", 782 ha->max_supported_speed == 0 ? "16Gps" : 783 ha->max_supported_speed == 1 ? "32Gps" : 784 ha->max_supported_speed == 2 ? "64Gps" : "unknown"); 785 if (vha->min_supported_speed) { 786 ha->min_supported_speed = mcp->mb[5] & 787 (BIT_0 | BIT_1 | BIT_2); 788 ql_dbg(ql_dbg_mbx, vha, 0x119c, 789 "min_supported_speed=%s.\n", 790 ha->min_supported_speed == 6 ? "64Gps" : 791 ha->min_supported_speed == 5 ? "32Gps" : 792 ha->min_supported_speed == 4 ? "16Gps" : 793 ha->min_supported_speed == 3 ? "8Gps" : 794 ha->min_supported_speed == 2 ? "4Gps" : "unknown"); 795 } 796 } 797 798 if (IS_QLA28XX(ha) && (mcp->mb[5] & BIT_10) && ql2xsecenable) { 799 ha->flags.edif_enabled = 1; 800 ql_log(ql_log_info, vha, 0xffff, 801 "%s: edif is enabled\n", __func__); 802 } 803 804 done: 805 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, 806 "Done %s.\n", __func__); 807 808 return rval; 809 } 810 811 /* 812 * qla_get_exlogin_status 813 * Get extended login status 814 * uses the memory offload control/status Mailbox 815 * 816 * Input: 817 * ha: adapter state pointer. 818 * fwopt: firmware options 819 * 820 * Returns: 821 * qla2x00 local function status 822 * 823 * Context: 824 * Kernel context. 825 */ 826 #define FETCH_XLOGINS_STAT 0x8 827 int 828 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 829 uint16_t *ex_logins_cnt) 830 { 831 int rval; 832 mbx_cmd_t mc; 833 mbx_cmd_t *mcp = &mc; 834 835 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f, 836 "Entered %s\n", __func__); 837 838 memset(mcp->mb, 0 , sizeof(mcp->mb)); 839 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 840 mcp->mb[1] = FETCH_XLOGINS_STAT; 841 mcp->out_mb = MBX_1|MBX_0; 842 mcp->in_mb = MBX_10|MBX_4|MBX_0; 843 mcp->tov = MBX_TOV_SECONDS; 844 mcp->flags = 0; 845 846 rval = qla2x00_mailbox_command(vha, mcp); 847 if (rval != QLA_SUCCESS) { 848 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval); 849 } else { 850 *buf_sz = mcp->mb[4]; 851 *ex_logins_cnt = mcp->mb[10]; 852 853 ql_log(ql_log_info, vha, 0x1190, 854 "buffer size 0x%x, exchange login count=%d\n", 855 mcp->mb[4], mcp->mb[10]); 856 857 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116, 858 "Done %s.\n", __func__); 859 } 860 861 return rval; 862 } 863 864 /* 865 * qla_set_exlogin_mem_cfg 866 * set extended login memory configuration 867 * Mbx needs to be issues before init_cb is set 868 * 869 * Input: 870 * ha: adapter state pointer. 871 * buffer: buffer pointer 872 * phys_addr: physical address of buffer 873 * size: size of buffer 874 * TARGET_QUEUE_LOCK must be released 875 * ADAPTER_STATE_LOCK must be release 876 * 877 * Returns: 878 * qla2x00 local funxtion status code. 879 * 880 * Context: 881 * Kernel context. 882 */ 883 #define CONFIG_XLOGINS_MEM 0x9 884 int 885 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) 886 { 887 int rval; 888 mbx_cmd_t mc; 889 mbx_cmd_t *mcp = &mc; 890 struct qla_hw_data *ha = vha->hw; 891 892 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, 893 "Entered %s.\n", __func__); 894 895 memset(mcp->mb, 0 , sizeof(mcp->mb)); 896 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 897 mcp->mb[1] = CONFIG_XLOGINS_MEM; 898 mcp->mb[2] = MSW(phys_addr); 899 mcp->mb[3] = LSW(phys_addr); 900 mcp->mb[6] = MSW(MSD(phys_addr)); 901 mcp->mb[7] = LSW(MSD(phys_addr)); 902 mcp->mb[8] = MSW(ha->exlogin_size); 903 mcp->mb[9] = LSW(ha->exlogin_size); 904 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 905 mcp->in_mb = MBX_11|MBX_0; 906 mcp->tov = MBX_TOV_SECONDS; 907 mcp->flags = 0; 908 rval = qla2x00_mailbox_command(vha, mcp); 909 if (rval != QLA_SUCCESS) { 910 ql_dbg(ql_dbg_mbx, vha, 0x111b, 911 "EXlogin Failed=%x. MB0=%x MB11=%x\n", 912 rval, mcp->mb[0], mcp->mb[11]); 913 } else { 914 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 915 "Done %s.\n", __func__); 916 } 917 918 return rval; 919 } 920 921 /* 922 * qla_get_exchoffld_status 923 * Get exchange offload status 924 * uses the memory offload control/status Mailbox 925 * 926 * Input: 927 * ha: adapter state pointer. 928 * fwopt: firmware options 929 * 930 * Returns: 931 * qla2x00 local function status 932 * 933 * Context: 934 * Kernel context. 935 */ 936 #define FETCH_XCHOFFLD_STAT 0x2 937 int 938 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 939 uint16_t *ex_logins_cnt) 940 { 941 int rval; 942 mbx_cmd_t mc; 943 mbx_cmd_t *mcp = &mc; 944 945 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019, 946 "Entered %s\n", __func__); 947 948 memset(mcp->mb, 0 , sizeof(mcp->mb)); 949 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 950 mcp->mb[1] = FETCH_XCHOFFLD_STAT; 951 mcp->out_mb = MBX_1|MBX_0; 952 mcp->in_mb = MBX_10|MBX_4|MBX_0; 953 mcp->tov = MBX_TOV_SECONDS; 954 mcp->flags = 0; 955 956 rval = qla2x00_mailbox_command(vha, mcp); 957 if (rval != QLA_SUCCESS) { 958 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval); 959 } else { 960 *buf_sz = mcp->mb[4]; 961 *ex_logins_cnt = mcp->mb[10]; 962 963 ql_log(ql_log_info, vha, 0x118e, 964 "buffer size 0x%x, exchange offload count=%d\n", 965 mcp->mb[4], mcp->mb[10]); 966 967 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156, 968 "Done %s.\n", __func__); 969 } 970 971 return rval; 972 } 973 974 /* 975 * qla_set_exchoffld_mem_cfg 976 * Set exchange offload memory configuration 977 * Mbx needs to be issues before init_cb is set 978 * 979 * Input: 980 * ha: adapter state pointer. 981 * buffer: buffer pointer 982 * phys_addr: physical address of buffer 983 * size: size of buffer 984 * TARGET_QUEUE_LOCK must be released 985 * ADAPTER_STATE_LOCK must be release 986 * 987 * Returns: 988 * qla2x00 local funxtion status code. 989 * 990 * Context: 991 * Kernel context. 992 */ 993 #define CONFIG_XCHOFFLD_MEM 0x3 994 int 995 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha) 996 { 997 int rval; 998 mbx_cmd_t mc; 999 mbx_cmd_t *mcp = &mc; 1000 struct qla_hw_data *ha = vha->hw; 1001 1002 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157, 1003 "Entered %s.\n", __func__); 1004 1005 memset(mcp->mb, 0 , sizeof(mcp->mb)); 1006 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 1007 mcp->mb[1] = CONFIG_XCHOFFLD_MEM; 1008 mcp->mb[2] = MSW(ha->exchoffld_buf_dma); 1009 mcp->mb[3] = LSW(ha->exchoffld_buf_dma); 1010 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma)); 1011 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma)); 1012 mcp->mb[8] = MSW(ha->exchoffld_size); 1013 mcp->mb[9] = LSW(ha->exchoffld_size); 1014 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1015 mcp->in_mb = MBX_11|MBX_0; 1016 mcp->tov = MBX_TOV_SECONDS; 1017 mcp->flags = 0; 1018 rval = qla2x00_mailbox_command(vha, mcp); 1019 if (rval != QLA_SUCCESS) { 1020 /*EMPTY*/ 1021 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval); 1022 } else { 1023 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, 1024 "Done %s.\n", __func__); 1025 } 1026 1027 return rval; 1028 } 1029 1030 /* 1031 * qla2x00_get_fw_version 1032 * Get firmware version. 1033 * 1034 * Input: 1035 * ha: adapter state pointer. 1036 * major: pointer for major number. 1037 * minor: pointer for minor number. 1038 * subminor: pointer for subminor number. 1039 * 1040 * Returns: 1041 * qla2x00 local function return status code. 1042 * 1043 * Context: 1044 * Kernel context. 1045 */ 1046 int 1047 qla2x00_get_fw_version(scsi_qla_host_t *vha) 1048 { 1049 int rval; 1050 mbx_cmd_t mc; 1051 mbx_cmd_t *mcp = &mc; 1052 struct qla_hw_data *ha = vha->hw; 1053 1054 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, 1055 "Entered %s.\n", __func__); 1056 1057 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 1058 mcp->out_mb = MBX_0; 1059 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1060 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) 1061 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 1062 if (IS_FWI2_CAPABLE(ha)) 1063 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 1064 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1065 mcp->in_mb |= 1066 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| 1067 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7; 1068 1069 mcp->flags = 0; 1070 mcp->tov = MBX_TOV_SECONDS; 1071 rval = qla2x00_mailbox_command(vha, mcp); 1072 if (rval != QLA_SUCCESS) 1073 goto failed; 1074 1075 /* Return mailbox data. */ 1076 ha->fw_major_version = mcp->mb[1]; 1077 ha->fw_minor_version = mcp->mb[2]; 1078 ha->fw_subminor_version = mcp->mb[3]; 1079 ha->fw_attributes = mcp->mb[6]; 1080 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) 1081 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ 1082 else 1083 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; 1084 1085 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1086 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1087 ha->mpi_version[1] = mcp->mb[11] >> 8; 1088 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1089 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; 1090 ha->phy_version[0] = mcp->mb[8] & 0xff; 1091 ha->phy_version[1] = mcp->mb[9] >> 8; 1092 ha->phy_version[2] = mcp->mb[9] & 0xff; 1093 } 1094 1095 if (IS_FWI2_CAPABLE(ha)) { 1096 ha->fw_attributes_h = mcp->mb[15]; 1097 ha->fw_attributes_ext[0] = mcp->mb[16]; 1098 ha->fw_attributes_ext[1] = mcp->mb[17]; 1099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, 1100 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 1101 __func__, mcp->mb[15], mcp->mb[6]); 1102 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, 1103 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 1104 __func__, mcp->mb[17], mcp->mb[16]); 1105 1106 if (ha->fw_attributes_h & 0x4) 1107 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d, 1108 "%s: Firmware supports Extended Login 0x%x\n", 1109 __func__, ha->fw_attributes_h); 1110 1111 if (ha->fw_attributes_h & 0x8) 1112 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, 1113 "%s: Firmware supports Exchange Offload 0x%x\n", 1114 __func__, ha->fw_attributes_h); 1115 1116 /* 1117 * FW supports nvme and driver load parameter requested nvme. 1118 * BIT 26 of fw_attributes indicates NVMe support. 1119 */ 1120 if ((ha->fw_attributes_h & 1121 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) && 1122 ql2xnvmeenable) { 1123 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST) 1124 vha->flags.nvme_first_burst = 1; 1125 1126 vha->flags.nvme_enabled = 1; 1127 ql_log(ql_log_info, vha, 0xd302, 1128 "%s: FC-NVMe is Enabled (0x%x)\n", 1129 __func__, ha->fw_attributes_h); 1130 } 1131 1132 /* BIT_13 of Extended FW Attributes informs about NVMe2 support */ 1133 if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) { 1134 ql_log(ql_log_info, vha, 0xd302, 1135 "Firmware supports NVMe2 0x%x\n", 1136 ha->fw_attributes_ext[0]); 1137 vha->flags.nvme2_enabled = 1; 1138 } 1139 } 1140 1141 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1142 ha->serdes_version[0] = mcp->mb[7] & 0xff; 1143 ha->serdes_version[1] = mcp->mb[8] >> 8; 1144 ha->serdes_version[2] = mcp->mb[8] & 0xff; 1145 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1146 ha->mpi_version[1] = mcp->mb[11] >> 8; 1147 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1148 ha->pep_version[0] = mcp->mb[13] & 0xff; 1149 ha->pep_version[1] = mcp->mb[14] >> 8; 1150 ha->pep_version[2] = mcp->mb[14] & 0xff; 1151 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; 1152 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; 1153 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; 1154 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; 1155 if (IS_QLA28XX(ha)) { 1156 if (mcp->mb[16] & BIT_10) 1157 ha->flags.secure_fw = 1; 1158 1159 ql_log(ql_log_info, vha, 0xffff, 1160 "Secure Flash Update in FW: %s\n", 1161 (ha->flags.secure_fw) ? "Supported" : 1162 "Not Supported"); 1163 } 1164 1165 if (ha->flags.scm_supported_a && 1166 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) { 1167 ha->flags.scm_supported_f = 1; 1168 ha->sf_init_cb->flags |= cpu_to_le16(BIT_13); 1169 } 1170 ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n", 1171 (ha->flags.scm_supported_f) ? "Supported" : 1172 "Not Supported"); 1173 1174 if (vha->flags.nvme2_enabled) { 1175 /* set BIT_15 of special feature control block for SLER */ 1176 ha->sf_init_cb->flags |= cpu_to_le16(BIT_15); 1177 /* set BIT_14 of special feature control block for PI CTRL*/ 1178 ha->sf_init_cb->flags |= cpu_to_le16(BIT_14); 1179 } 1180 } 1181 1182 failed: 1183 if (rval != QLA_SUCCESS) { 1184 /*EMPTY*/ 1185 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); 1186 } else { 1187 /*EMPTY*/ 1188 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, 1189 "Done %s.\n", __func__); 1190 } 1191 return rval; 1192 } 1193 1194 /* 1195 * qla2x00_get_fw_options 1196 * Set firmware options. 1197 * 1198 * Input: 1199 * ha = adapter block pointer. 1200 * fwopt = pointer for firmware options. 1201 * 1202 * Returns: 1203 * qla2x00 local function return status code. 1204 * 1205 * Context: 1206 * Kernel context. 1207 */ 1208 int 1209 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1210 { 1211 int rval; 1212 mbx_cmd_t mc; 1213 mbx_cmd_t *mcp = &mc; 1214 1215 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, 1216 "Entered %s.\n", __func__); 1217 1218 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 1219 mcp->out_mb = MBX_0; 1220 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1221 mcp->tov = MBX_TOV_SECONDS; 1222 mcp->flags = 0; 1223 rval = qla2x00_mailbox_command(vha, mcp); 1224 1225 if (rval != QLA_SUCCESS) { 1226 /*EMPTY*/ 1227 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); 1228 } else { 1229 fwopts[0] = mcp->mb[0]; 1230 fwopts[1] = mcp->mb[1]; 1231 fwopts[2] = mcp->mb[2]; 1232 fwopts[3] = mcp->mb[3]; 1233 1234 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, 1235 "Done %s.\n", __func__); 1236 } 1237 1238 return rval; 1239 } 1240 1241 1242 /* 1243 * qla2x00_set_fw_options 1244 * Set firmware options. 1245 * 1246 * Input: 1247 * ha = adapter block pointer. 1248 * fwopt = pointer for firmware options. 1249 * 1250 * Returns: 1251 * qla2x00 local function return status code. 1252 * 1253 * Context: 1254 * Kernel context. 1255 */ 1256 int 1257 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1258 { 1259 int rval; 1260 mbx_cmd_t mc; 1261 mbx_cmd_t *mcp = &mc; 1262 1263 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, 1264 "Entered %s.\n", __func__); 1265 1266 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 1267 mcp->mb[1] = fwopts[1]; 1268 mcp->mb[2] = fwopts[2]; 1269 mcp->mb[3] = fwopts[3]; 1270 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1271 mcp->in_mb = MBX_0; 1272 if (IS_FWI2_CAPABLE(vha->hw)) { 1273 mcp->in_mb |= MBX_1; 1274 mcp->mb[10] = fwopts[10]; 1275 mcp->out_mb |= MBX_10; 1276 } else { 1277 mcp->mb[10] = fwopts[10]; 1278 mcp->mb[11] = fwopts[11]; 1279 mcp->mb[12] = 0; /* Undocumented, but used */ 1280 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 1281 } 1282 mcp->tov = MBX_TOV_SECONDS; 1283 mcp->flags = 0; 1284 rval = qla2x00_mailbox_command(vha, mcp); 1285 1286 fwopts[0] = mcp->mb[0]; 1287 1288 if (rval != QLA_SUCCESS) { 1289 /*EMPTY*/ 1290 ql_dbg(ql_dbg_mbx, vha, 0x1030, 1291 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); 1292 } else { 1293 /*EMPTY*/ 1294 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, 1295 "Done %s.\n", __func__); 1296 } 1297 1298 return rval; 1299 } 1300 1301 /* 1302 * qla2x00_mbx_reg_test 1303 * Mailbox register wrap test. 1304 * 1305 * Input: 1306 * ha = adapter block pointer. 1307 * TARGET_QUEUE_LOCK must be released. 1308 * ADAPTER_STATE_LOCK must be released. 1309 * 1310 * Returns: 1311 * qla2x00 local function return status code. 1312 * 1313 * Context: 1314 * Kernel context. 1315 */ 1316 int 1317 qla2x00_mbx_reg_test(scsi_qla_host_t *vha) 1318 { 1319 int rval; 1320 mbx_cmd_t mc; 1321 mbx_cmd_t *mcp = &mc; 1322 1323 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, 1324 "Entered %s.\n", __func__); 1325 1326 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 1327 mcp->mb[1] = 0xAAAA; 1328 mcp->mb[2] = 0x5555; 1329 mcp->mb[3] = 0xAA55; 1330 mcp->mb[4] = 0x55AA; 1331 mcp->mb[5] = 0xA5A5; 1332 mcp->mb[6] = 0x5A5A; 1333 mcp->mb[7] = 0x2525; 1334 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1335 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1336 mcp->tov = MBX_TOV_SECONDS; 1337 mcp->flags = 0; 1338 rval = qla2x00_mailbox_command(vha, mcp); 1339 1340 if (rval == QLA_SUCCESS) { 1341 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || 1342 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) 1343 rval = QLA_FUNCTION_FAILED; 1344 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || 1345 mcp->mb[7] != 0x2525) 1346 rval = QLA_FUNCTION_FAILED; 1347 } 1348 1349 if (rval != QLA_SUCCESS) { 1350 /*EMPTY*/ 1351 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); 1352 vha->hw_err_cnt++; 1353 } else { 1354 /*EMPTY*/ 1355 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, 1356 "Done %s.\n", __func__); 1357 } 1358 1359 return rval; 1360 } 1361 1362 /* 1363 * qla2x00_verify_checksum 1364 * Verify firmware checksum. 1365 * 1366 * Input: 1367 * ha = adapter block pointer. 1368 * TARGET_QUEUE_LOCK must be released. 1369 * ADAPTER_STATE_LOCK must be released. 1370 * 1371 * Returns: 1372 * qla2x00 local function return status code. 1373 * 1374 * Context: 1375 * Kernel context. 1376 */ 1377 int 1378 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) 1379 { 1380 int rval; 1381 mbx_cmd_t mc; 1382 mbx_cmd_t *mcp = &mc; 1383 1384 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, 1385 "Entered %s.\n", __func__); 1386 1387 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 1388 mcp->out_mb = MBX_0; 1389 mcp->in_mb = MBX_0; 1390 if (IS_FWI2_CAPABLE(vha->hw)) { 1391 mcp->mb[1] = MSW(risc_addr); 1392 mcp->mb[2] = LSW(risc_addr); 1393 mcp->out_mb |= MBX_2|MBX_1; 1394 mcp->in_mb |= MBX_2|MBX_1; 1395 } else { 1396 mcp->mb[1] = LSW(risc_addr); 1397 mcp->out_mb |= MBX_1; 1398 mcp->in_mb |= MBX_1; 1399 } 1400 1401 mcp->tov = MBX_TOV_SECONDS; 1402 mcp->flags = 0; 1403 rval = qla2x00_mailbox_command(vha, mcp); 1404 1405 if (rval != QLA_SUCCESS) { 1406 ql_dbg(ql_dbg_mbx, vha, 0x1036, 1407 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? 1408 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); 1409 } else { 1410 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, 1411 "Done %s.\n", __func__); 1412 } 1413 1414 return rval; 1415 } 1416 1417 /* 1418 * qla2x00_issue_iocb 1419 * Issue IOCB using mailbox command 1420 * 1421 * Input: 1422 * ha = adapter state pointer. 1423 * buffer = buffer pointer. 1424 * phys_addr = physical address of buffer. 1425 * size = size of buffer. 1426 * TARGET_QUEUE_LOCK must be released. 1427 * ADAPTER_STATE_LOCK must be released. 1428 * 1429 * Returns: 1430 * qla2x00 local function return status code. 1431 * 1432 * Context: 1433 * Kernel context. 1434 */ 1435 int 1436 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, 1437 dma_addr_t phys_addr, size_t size, uint32_t tov) 1438 { 1439 int rval; 1440 mbx_cmd_t mc; 1441 mbx_cmd_t *mcp = &mc; 1442 1443 if (!vha->hw->flags.fw_started) 1444 return QLA_INVALID_COMMAND; 1445 1446 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, 1447 "Entered %s.\n", __func__); 1448 1449 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 1450 mcp->mb[1] = 0; 1451 mcp->mb[2] = MSW(LSD(phys_addr)); 1452 mcp->mb[3] = LSW(LSD(phys_addr)); 1453 mcp->mb[6] = MSW(MSD(phys_addr)); 1454 mcp->mb[7] = LSW(MSD(phys_addr)); 1455 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1456 mcp->in_mb = MBX_1|MBX_0; 1457 mcp->tov = tov; 1458 mcp->flags = 0; 1459 rval = qla2x00_mailbox_command(vha, mcp); 1460 1461 if (rval != QLA_SUCCESS) { 1462 /*EMPTY*/ 1463 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); 1464 } else { 1465 sts_entry_t *sts_entry = buffer; 1466 1467 /* Mask reserved bits. */ 1468 sts_entry->entry_status &= 1469 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 1470 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, 1471 "Done %s (status=%x).\n", __func__, 1472 sts_entry->entry_status); 1473 } 1474 1475 return rval; 1476 } 1477 1478 int 1479 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, 1480 size_t size) 1481 { 1482 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, 1483 MBX_TOV_SECONDS); 1484 } 1485 1486 /* 1487 * qla2x00_abort_command 1488 * Abort command aborts a specified IOCB. 1489 * 1490 * Input: 1491 * ha = adapter block pointer. 1492 * sp = SB structure pointer. 1493 * 1494 * Returns: 1495 * qla2x00 local function return status code. 1496 * 1497 * Context: 1498 * Kernel context. 1499 */ 1500 int 1501 qla2x00_abort_command(srb_t *sp) 1502 { 1503 unsigned long flags = 0; 1504 int rval; 1505 uint32_t handle = 0; 1506 mbx_cmd_t mc; 1507 mbx_cmd_t *mcp = &mc; 1508 fc_port_t *fcport = sp->fcport; 1509 scsi_qla_host_t *vha = fcport->vha; 1510 struct qla_hw_data *ha = vha->hw; 1511 struct req_que *req; 1512 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1513 1514 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, 1515 "Entered %s.\n", __func__); 1516 1517 if (sp->qpair) 1518 req = sp->qpair->req; 1519 else 1520 req = vha->req; 1521 1522 spin_lock_irqsave(&ha->hardware_lock, flags); 1523 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1524 if (req->outstanding_cmds[handle] == sp) 1525 break; 1526 } 1527 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1528 1529 if (handle == req->num_outstanding_cmds) { 1530 /* command not found */ 1531 return QLA_FUNCTION_FAILED; 1532 } 1533 1534 mcp->mb[0] = MBC_ABORT_COMMAND; 1535 if (HAS_EXTENDED_IDS(ha)) 1536 mcp->mb[1] = fcport->loop_id; 1537 else 1538 mcp->mb[1] = fcport->loop_id << 8; 1539 mcp->mb[2] = (uint16_t)handle; 1540 mcp->mb[3] = (uint16_t)(handle >> 16); 1541 mcp->mb[6] = (uint16_t)cmd->device->lun; 1542 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1543 mcp->in_mb = MBX_0; 1544 mcp->tov = MBX_TOV_SECONDS; 1545 mcp->flags = 0; 1546 rval = qla2x00_mailbox_command(vha, mcp); 1547 1548 if (rval != QLA_SUCCESS) { 1549 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); 1550 } else { 1551 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, 1552 "Done %s.\n", __func__); 1553 } 1554 1555 return rval; 1556 } 1557 1558 int 1559 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) 1560 { 1561 int rval, rval2; 1562 mbx_cmd_t mc; 1563 mbx_cmd_t *mcp = &mc; 1564 scsi_qla_host_t *vha; 1565 1566 vha = fcport->vha; 1567 1568 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, 1569 "Entered %s.\n", __func__); 1570 1571 mcp->mb[0] = MBC_ABORT_TARGET; 1572 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 1573 if (HAS_EXTENDED_IDS(vha->hw)) { 1574 mcp->mb[1] = fcport->loop_id; 1575 mcp->mb[10] = 0; 1576 mcp->out_mb |= MBX_10; 1577 } else { 1578 mcp->mb[1] = fcport->loop_id << 8; 1579 } 1580 mcp->mb[2] = vha->hw->loop_reset_delay; 1581 mcp->mb[9] = vha->vp_idx; 1582 1583 mcp->in_mb = MBX_0; 1584 mcp->tov = MBX_TOV_SECONDS; 1585 mcp->flags = 0; 1586 rval = qla2x00_mailbox_command(vha, mcp); 1587 if (rval != QLA_SUCCESS) { 1588 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, 1589 "Failed=%x.\n", rval); 1590 } 1591 1592 /* Issue marker IOCB. */ 1593 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0, 1594 MK_SYNC_ID); 1595 if (rval2 != QLA_SUCCESS) { 1596 ql_dbg(ql_dbg_mbx, vha, 0x1040, 1597 "Failed to issue marker IOCB (%x).\n", rval2); 1598 } else { 1599 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, 1600 "Done %s.\n", __func__); 1601 } 1602 1603 return rval; 1604 } 1605 1606 int 1607 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 1608 { 1609 int rval, rval2; 1610 mbx_cmd_t mc; 1611 mbx_cmd_t *mcp = &mc; 1612 scsi_qla_host_t *vha; 1613 1614 vha = fcport->vha; 1615 1616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, 1617 "Entered %s.\n", __func__); 1618 1619 mcp->mb[0] = MBC_LUN_RESET; 1620 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 1621 if (HAS_EXTENDED_IDS(vha->hw)) 1622 mcp->mb[1] = fcport->loop_id; 1623 else 1624 mcp->mb[1] = fcport->loop_id << 8; 1625 mcp->mb[2] = (u32)l; 1626 mcp->mb[3] = 0; 1627 mcp->mb[9] = vha->vp_idx; 1628 1629 mcp->in_mb = MBX_0; 1630 mcp->tov = MBX_TOV_SECONDS; 1631 mcp->flags = 0; 1632 rval = qla2x00_mailbox_command(vha, mcp); 1633 if (rval != QLA_SUCCESS) { 1634 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); 1635 } 1636 1637 /* Issue marker IOCB. */ 1638 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l, 1639 MK_SYNC_ID_LUN); 1640 if (rval2 != QLA_SUCCESS) { 1641 ql_dbg(ql_dbg_mbx, vha, 0x1044, 1642 "Failed to issue marker IOCB (%x).\n", rval2); 1643 } else { 1644 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, 1645 "Done %s.\n", __func__); 1646 } 1647 1648 return rval; 1649 } 1650 1651 /* 1652 * qla2x00_get_adapter_id 1653 * Get adapter ID and topology. 1654 * 1655 * Input: 1656 * ha = adapter block pointer. 1657 * id = pointer for loop ID. 1658 * al_pa = pointer for AL_PA. 1659 * area = pointer for area. 1660 * domain = pointer for domain. 1661 * top = pointer for topology. 1662 * TARGET_QUEUE_LOCK must be released. 1663 * ADAPTER_STATE_LOCK must be released. 1664 * 1665 * Returns: 1666 * qla2x00 local function return status code. 1667 * 1668 * Context: 1669 * Kernel context. 1670 */ 1671 int 1672 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, 1673 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) 1674 { 1675 int rval; 1676 mbx_cmd_t mc; 1677 mbx_cmd_t *mcp = &mc; 1678 1679 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, 1680 "Entered %s.\n", __func__); 1681 1682 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1683 mcp->mb[9] = vha->vp_idx; 1684 mcp->out_mb = MBX_9|MBX_0; 1685 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1686 if (IS_CNA_CAPABLE(vha->hw)) 1687 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; 1688 if (IS_FWI2_CAPABLE(vha->hw)) 1689 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; 1690 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) { 1691 mcp->in_mb |= MBX_15; 1692 mcp->out_mb |= MBX_7|MBX_21|MBX_22|MBX_23; 1693 } 1694 1695 mcp->tov = MBX_TOV_SECONDS; 1696 mcp->flags = 0; 1697 rval = qla2x00_mailbox_command(vha, mcp); 1698 if (mcp->mb[0] == MBS_COMMAND_ERROR) 1699 rval = QLA_COMMAND_ERROR; 1700 else if (mcp->mb[0] == MBS_INVALID_COMMAND) 1701 rval = QLA_INVALID_COMMAND; 1702 1703 /* Return data. */ 1704 *id = mcp->mb[1]; 1705 *al_pa = LSB(mcp->mb[2]); 1706 *area = MSB(mcp->mb[2]); 1707 *domain = LSB(mcp->mb[3]); 1708 *top = mcp->mb[6]; 1709 *sw_cap = mcp->mb[7]; 1710 1711 if (rval != QLA_SUCCESS) { 1712 /*EMPTY*/ 1713 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); 1714 } else { 1715 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, 1716 "Done %s.\n", __func__); 1717 1718 if (IS_CNA_CAPABLE(vha->hw)) { 1719 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1720 vha->fcoe_fcf_idx = mcp->mb[10]; 1721 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; 1722 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; 1723 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; 1724 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; 1725 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; 1726 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; 1727 } 1728 /* If FA-WWN supported */ 1729 if (IS_FAWWN_CAPABLE(vha->hw)) { 1730 if (mcp->mb[7] & BIT_14) { 1731 vha->port_name[0] = MSB(mcp->mb[16]); 1732 vha->port_name[1] = LSB(mcp->mb[16]); 1733 vha->port_name[2] = MSB(mcp->mb[17]); 1734 vha->port_name[3] = LSB(mcp->mb[17]); 1735 vha->port_name[4] = MSB(mcp->mb[18]); 1736 vha->port_name[5] = LSB(mcp->mb[18]); 1737 vha->port_name[6] = MSB(mcp->mb[19]); 1738 vha->port_name[7] = LSB(mcp->mb[19]); 1739 fc_host_port_name(vha->host) = 1740 wwn_to_u64(vha->port_name); 1741 ql_dbg(ql_dbg_mbx, vha, 0x10ca, 1742 "FA-WWN acquired %016llx\n", 1743 wwn_to_u64(vha->port_name)); 1744 } 1745 } 1746 1747 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) { 1748 vha->bbcr = mcp->mb[15]; 1749 if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) { 1750 ql_log(ql_log_info, vha, 0x11a4, 1751 "SCM: EDC ELS completed, flags 0x%x\n", 1752 mcp->mb[21]); 1753 } 1754 if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) { 1755 vha->hw->flags.scm_enabled = 1; 1756 vha->scm_fabric_connection_flags |= 1757 SCM_FLAG_RDF_COMPLETED; 1758 ql_log(ql_log_info, vha, 0x11a5, 1759 "SCM: RDF ELS completed, flags 0x%x\n", 1760 mcp->mb[23]); 1761 } 1762 } 1763 } 1764 1765 return rval; 1766 } 1767 1768 /* 1769 * qla2x00_get_retry_cnt 1770 * Get current firmware login retry count and delay. 1771 * 1772 * Input: 1773 * ha = adapter block pointer. 1774 * retry_cnt = pointer to login retry count. 1775 * tov = pointer to login timeout value. 1776 * 1777 * Returns: 1778 * qla2x00 local function return status code. 1779 * 1780 * Context: 1781 * Kernel context. 1782 */ 1783 int 1784 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, 1785 uint16_t *r_a_tov) 1786 { 1787 int rval; 1788 uint16_t ratov; 1789 mbx_cmd_t mc; 1790 mbx_cmd_t *mcp = &mc; 1791 1792 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, 1793 "Entered %s.\n", __func__); 1794 1795 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1796 mcp->out_mb = MBX_0; 1797 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1798 mcp->tov = MBX_TOV_SECONDS; 1799 mcp->flags = 0; 1800 rval = qla2x00_mailbox_command(vha, mcp); 1801 1802 if (rval != QLA_SUCCESS) { 1803 /*EMPTY*/ 1804 ql_dbg(ql_dbg_mbx, vha, 0x104a, 1805 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 1806 } else { 1807 /* Convert returned data and check our values. */ 1808 *r_a_tov = mcp->mb[3] / 2; 1809 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ 1810 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { 1811 /* Update to the larger values */ 1812 *retry_cnt = (uint8_t)mcp->mb[1]; 1813 *tov = ratov; 1814 } 1815 1816 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, 1817 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); 1818 } 1819 1820 return rval; 1821 } 1822 1823 /* 1824 * qla2x00_init_firmware 1825 * Initialize adapter firmware. 1826 * 1827 * Input: 1828 * ha = adapter block pointer. 1829 * dptr = Initialization control block pointer. 1830 * size = size of initialization control block. 1831 * TARGET_QUEUE_LOCK must be released. 1832 * ADAPTER_STATE_LOCK must be released. 1833 * 1834 * Returns: 1835 * qla2x00 local function return status code. 1836 * 1837 * Context: 1838 * Kernel context. 1839 */ 1840 int 1841 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 1842 { 1843 int rval; 1844 mbx_cmd_t mc; 1845 mbx_cmd_t *mcp = &mc; 1846 struct qla_hw_data *ha = vha->hw; 1847 1848 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, 1849 "Entered %s.\n", __func__); 1850 1851 if (IS_P3P_TYPE(ha) && ql2xdbwr) 1852 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, 1853 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); 1854 1855 if (ha->flags.npiv_supported) 1856 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1857 else 1858 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 1859 1860 mcp->mb[1] = 0; 1861 mcp->mb[2] = MSW(ha->init_cb_dma); 1862 mcp->mb[3] = LSW(ha->init_cb_dma); 1863 mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); 1864 mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); 1865 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1866 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1867 mcp->mb[1] = BIT_0; 1868 mcp->mb[10] = MSW(ha->ex_init_cb_dma); 1869 mcp->mb[11] = LSW(ha->ex_init_cb_dma); 1870 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); 1871 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); 1872 mcp->mb[14] = sizeof(*ha->ex_init_cb); 1873 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; 1874 } 1875 1876 if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) { 1877 mcp->mb[1] |= BIT_1; 1878 mcp->mb[16] = MSW(ha->sf_init_cb_dma); 1879 mcp->mb[17] = LSW(ha->sf_init_cb_dma); 1880 mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma)); 1881 mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma)); 1882 mcp->mb[15] = sizeof(*ha->sf_init_cb); 1883 mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15; 1884 } 1885 1886 /* 1 and 2 should normally be captured. */ 1887 mcp->in_mb = MBX_2|MBX_1|MBX_0; 1888 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1889 /* mb3 is additional info about the installed SFP. */ 1890 mcp->in_mb |= MBX_3; 1891 mcp->buf_size = size; 1892 mcp->flags = MBX_DMA_OUT; 1893 mcp->tov = MBX_TOV_SECONDS; 1894 rval = qla2x00_mailbox_command(vha, mcp); 1895 1896 if (rval != QLA_SUCCESS) { 1897 /*EMPTY*/ 1898 ql_dbg(ql_dbg_mbx, vha, 0x104d, 1899 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n", 1900 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); 1901 if (ha->init_cb) { 1902 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n"); 1903 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1904 0x0104d, ha->init_cb, sizeof(*ha->init_cb)); 1905 } 1906 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1907 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n"); 1908 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1909 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb)); 1910 } 1911 } else { 1912 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1913 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 1914 ql_dbg(ql_dbg_mbx, vha, 0x119d, 1915 "Invalid SFP/Validation Failed\n"); 1916 } 1917 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, 1918 "Done %s.\n", __func__); 1919 } 1920 1921 return rval; 1922 } 1923 1924 1925 /* 1926 * qla2x00_get_port_database 1927 * Issue normal/enhanced get port database mailbox command 1928 * and copy device name as necessary. 1929 * 1930 * Input: 1931 * ha = adapter state pointer. 1932 * dev = structure pointer. 1933 * opt = enhanced cmd option byte. 1934 * 1935 * Returns: 1936 * qla2x00 local function return status code. 1937 * 1938 * Context: 1939 * Kernel context. 1940 */ 1941 int 1942 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) 1943 { 1944 int rval; 1945 mbx_cmd_t mc; 1946 mbx_cmd_t *mcp = &mc; 1947 port_database_t *pd; 1948 struct port_database_24xx *pd24; 1949 dma_addr_t pd_dma; 1950 struct qla_hw_data *ha = vha->hw; 1951 1952 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, 1953 "Entered %s.\n", __func__); 1954 1955 pd24 = NULL; 1956 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1957 if (pd == NULL) { 1958 ql_log(ql_log_warn, vha, 0x1050, 1959 "Failed to allocate port database structure.\n"); 1960 fcport->query = 0; 1961 return QLA_MEMORY_ALLOC_FAILED; 1962 } 1963 1964 mcp->mb[0] = MBC_GET_PORT_DATABASE; 1965 if (opt != 0 && !IS_FWI2_CAPABLE(ha)) 1966 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; 1967 mcp->mb[2] = MSW(pd_dma); 1968 mcp->mb[3] = LSW(pd_dma); 1969 mcp->mb[6] = MSW(MSD(pd_dma)); 1970 mcp->mb[7] = LSW(MSD(pd_dma)); 1971 mcp->mb[9] = vha->vp_idx; 1972 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1973 mcp->in_mb = MBX_0; 1974 if (IS_FWI2_CAPABLE(ha)) { 1975 mcp->mb[1] = fcport->loop_id; 1976 mcp->mb[10] = opt; 1977 mcp->out_mb |= MBX_10|MBX_1; 1978 mcp->in_mb |= MBX_1; 1979 } else if (HAS_EXTENDED_IDS(ha)) { 1980 mcp->mb[1] = fcport->loop_id; 1981 mcp->mb[10] = opt; 1982 mcp->out_mb |= MBX_10|MBX_1; 1983 } else { 1984 mcp->mb[1] = fcport->loop_id << 8 | opt; 1985 mcp->out_mb |= MBX_1; 1986 } 1987 mcp->buf_size = IS_FWI2_CAPABLE(ha) ? 1988 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; 1989 mcp->flags = MBX_DMA_IN; 1990 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1991 rval = qla2x00_mailbox_command(vha, mcp); 1992 if (rval != QLA_SUCCESS) 1993 goto gpd_error_out; 1994 1995 if (IS_FWI2_CAPABLE(ha)) { 1996 uint64_t zero = 0; 1997 u8 current_login_state, last_login_state; 1998 1999 pd24 = (struct port_database_24xx *) pd; 2000 2001 /* Check for logged in state. */ 2002 if (NVME_TARGET(ha, fcport)) { 2003 current_login_state = pd24->current_login_state >> 4; 2004 last_login_state = pd24->last_login_state >> 4; 2005 } else { 2006 current_login_state = pd24->current_login_state & 0xf; 2007 last_login_state = pd24->last_login_state & 0xf; 2008 } 2009 fcport->current_login_state = pd24->current_login_state; 2010 fcport->last_login_state = pd24->last_login_state; 2011 2012 /* Check for logged in state. */ 2013 if (current_login_state != PDS_PRLI_COMPLETE && 2014 last_login_state != PDS_PRLI_COMPLETE) { 2015 ql_dbg(ql_dbg_mbx, vha, 0x119a, 2016 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 2017 current_login_state, last_login_state, 2018 fcport->loop_id); 2019 rval = QLA_FUNCTION_FAILED; 2020 2021 if (!fcport->query) 2022 goto gpd_error_out; 2023 } 2024 2025 if (fcport->loop_id == FC_NO_LOOP_ID || 2026 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 2027 memcmp(fcport->port_name, pd24->port_name, 8))) { 2028 /* We lost the device mid way. */ 2029 rval = QLA_NOT_LOGGED_IN; 2030 goto gpd_error_out; 2031 } 2032 2033 /* Names are little-endian. */ 2034 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); 2035 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); 2036 2037 /* Get port_id of device. */ 2038 fcport->d_id.b.domain = pd24->port_id[0]; 2039 fcport->d_id.b.area = pd24->port_id[1]; 2040 fcport->d_id.b.al_pa = pd24->port_id[2]; 2041 fcport->d_id.b.rsvd_1 = 0; 2042 2043 /* If not target must be initiator or unknown type. */ 2044 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) 2045 fcport->port_type = FCT_INITIATOR; 2046 else 2047 fcport->port_type = FCT_TARGET; 2048 2049 /* Passback COS information. */ 2050 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? 2051 FC_COS_CLASS2 : FC_COS_CLASS3; 2052 2053 if (pd24->prli_svc_param_word_3[0] & BIT_7) 2054 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 2055 } else { 2056 uint64_t zero = 0; 2057 2058 /* Check for logged in state. */ 2059 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 2060 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 2061 ql_dbg(ql_dbg_mbx, vha, 0x100a, 2062 "Unable to verify login-state (%x/%x) - " 2063 "portid=%02x%02x%02x.\n", pd->master_state, 2064 pd->slave_state, fcport->d_id.b.domain, 2065 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2066 rval = QLA_FUNCTION_FAILED; 2067 goto gpd_error_out; 2068 } 2069 2070 if (fcport->loop_id == FC_NO_LOOP_ID || 2071 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 2072 memcmp(fcport->port_name, pd->port_name, 8))) { 2073 /* We lost the device mid way. */ 2074 rval = QLA_NOT_LOGGED_IN; 2075 goto gpd_error_out; 2076 } 2077 2078 /* Names are little-endian. */ 2079 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 2080 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 2081 2082 /* Get port_id of device. */ 2083 fcport->d_id.b.domain = pd->port_id[0]; 2084 fcport->d_id.b.area = pd->port_id[3]; 2085 fcport->d_id.b.al_pa = pd->port_id[2]; 2086 fcport->d_id.b.rsvd_1 = 0; 2087 2088 /* If not target must be initiator or unknown type. */ 2089 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 2090 fcport->port_type = FCT_INITIATOR; 2091 else 2092 fcport->port_type = FCT_TARGET; 2093 2094 /* Passback COS information. */ 2095 fcport->supported_classes = (pd->options & BIT_4) ? 2096 FC_COS_CLASS2 : FC_COS_CLASS3; 2097 } 2098 2099 gpd_error_out: 2100 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 2101 fcport->query = 0; 2102 2103 if (rval != QLA_SUCCESS) { 2104 ql_dbg(ql_dbg_mbx, vha, 0x1052, 2105 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, 2106 mcp->mb[0], mcp->mb[1]); 2107 } else { 2108 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, 2109 "Done %s.\n", __func__); 2110 } 2111 2112 return rval; 2113 } 2114 2115 int 2116 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle, 2117 struct port_database_24xx *pdb) 2118 { 2119 mbx_cmd_t mc; 2120 mbx_cmd_t *mcp = &mc; 2121 dma_addr_t pdb_dma; 2122 int rval; 2123 2124 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115, 2125 "Entered %s.\n", __func__); 2126 2127 memset(pdb, 0, sizeof(*pdb)); 2128 2129 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb, 2130 sizeof(*pdb), DMA_FROM_DEVICE); 2131 if (!pdb_dma) { 2132 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n"); 2133 return QLA_MEMORY_ALLOC_FAILED; 2134 } 2135 2136 mcp->mb[0] = MBC_GET_PORT_DATABASE; 2137 mcp->mb[1] = nport_handle; 2138 mcp->mb[2] = MSW(LSD(pdb_dma)); 2139 mcp->mb[3] = LSW(LSD(pdb_dma)); 2140 mcp->mb[6] = MSW(MSD(pdb_dma)); 2141 mcp->mb[7] = LSW(MSD(pdb_dma)); 2142 mcp->mb[9] = 0; 2143 mcp->mb[10] = 0; 2144 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2145 mcp->in_mb = MBX_1|MBX_0; 2146 mcp->buf_size = sizeof(*pdb); 2147 mcp->flags = MBX_DMA_IN; 2148 mcp->tov = vha->hw->login_timeout * 2; 2149 rval = qla2x00_mailbox_command(vha, mcp); 2150 2151 if (rval != QLA_SUCCESS) { 2152 ql_dbg(ql_dbg_mbx, vha, 0x111a, 2153 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2154 rval, mcp->mb[0], mcp->mb[1]); 2155 } else { 2156 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b, 2157 "Done %s.\n", __func__); 2158 } 2159 2160 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma, 2161 sizeof(*pdb), DMA_FROM_DEVICE); 2162 2163 return rval; 2164 } 2165 2166 /* 2167 * qla2x00_get_firmware_state 2168 * Get adapter firmware state. 2169 * 2170 * Input: 2171 * ha = adapter block pointer. 2172 * dptr = pointer for firmware state. 2173 * TARGET_QUEUE_LOCK must be released. 2174 * ADAPTER_STATE_LOCK must be released. 2175 * 2176 * Returns: 2177 * qla2x00 local function return status code. 2178 * 2179 * Context: 2180 * Kernel context. 2181 */ 2182 int 2183 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) 2184 { 2185 int rval; 2186 mbx_cmd_t mc; 2187 mbx_cmd_t *mcp = &mc; 2188 struct qla_hw_data *ha = vha->hw; 2189 2190 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, 2191 "Entered %s.\n", __func__); 2192 2193 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 2194 mcp->out_mb = MBX_0; 2195 if (IS_FWI2_CAPABLE(vha->hw)) 2196 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2197 else 2198 mcp->in_mb = MBX_1|MBX_0; 2199 mcp->tov = MBX_TOV_SECONDS; 2200 mcp->flags = 0; 2201 rval = qla2x00_mailbox_command(vha, mcp); 2202 2203 /* Return firmware states. */ 2204 states[0] = mcp->mb[1]; 2205 if (IS_FWI2_CAPABLE(vha->hw)) { 2206 states[1] = mcp->mb[2]; 2207 states[2] = mcp->mb[3]; /* SFP info */ 2208 states[3] = mcp->mb[4]; 2209 states[4] = mcp->mb[5]; 2210 states[5] = mcp->mb[6]; /* DPORT status */ 2211 } 2212 2213 if (rval != QLA_SUCCESS) { 2214 /*EMPTY*/ 2215 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); 2216 } else { 2217 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 2218 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 2219 ql_dbg(ql_dbg_mbx, vha, 0x119e, 2220 "Invalid SFP/Validation Failed\n"); 2221 } 2222 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, 2223 "Done %s.\n", __func__); 2224 } 2225 2226 return rval; 2227 } 2228 2229 /* 2230 * qla2x00_get_port_name 2231 * Issue get port name mailbox command. 2232 * Returned name is in big endian format. 2233 * 2234 * Input: 2235 * ha = adapter block pointer. 2236 * loop_id = loop ID of device. 2237 * name = pointer for name. 2238 * TARGET_QUEUE_LOCK must be released. 2239 * ADAPTER_STATE_LOCK must be released. 2240 * 2241 * Returns: 2242 * qla2x00 local function return status code. 2243 * 2244 * Context: 2245 * Kernel context. 2246 */ 2247 int 2248 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, 2249 uint8_t opt) 2250 { 2251 int rval; 2252 mbx_cmd_t mc; 2253 mbx_cmd_t *mcp = &mc; 2254 2255 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, 2256 "Entered %s.\n", __func__); 2257 2258 mcp->mb[0] = MBC_GET_PORT_NAME; 2259 mcp->mb[9] = vha->vp_idx; 2260 mcp->out_mb = MBX_9|MBX_1|MBX_0; 2261 if (HAS_EXTENDED_IDS(vha->hw)) { 2262 mcp->mb[1] = loop_id; 2263 mcp->mb[10] = opt; 2264 mcp->out_mb |= MBX_10; 2265 } else { 2266 mcp->mb[1] = loop_id << 8 | opt; 2267 } 2268 2269 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2270 mcp->tov = MBX_TOV_SECONDS; 2271 mcp->flags = 0; 2272 rval = qla2x00_mailbox_command(vha, mcp); 2273 2274 if (rval != QLA_SUCCESS) { 2275 /*EMPTY*/ 2276 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); 2277 } else { 2278 if (name != NULL) { 2279 /* This function returns name in big endian. */ 2280 name[0] = MSB(mcp->mb[2]); 2281 name[1] = LSB(mcp->mb[2]); 2282 name[2] = MSB(mcp->mb[3]); 2283 name[3] = LSB(mcp->mb[3]); 2284 name[4] = MSB(mcp->mb[6]); 2285 name[5] = LSB(mcp->mb[6]); 2286 name[6] = MSB(mcp->mb[7]); 2287 name[7] = LSB(mcp->mb[7]); 2288 } 2289 2290 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, 2291 "Done %s.\n", __func__); 2292 } 2293 2294 return rval; 2295 } 2296 2297 /* 2298 * qla24xx_link_initialization 2299 * Issue link initialization mailbox command. 2300 * 2301 * Input: 2302 * ha = adapter block pointer. 2303 * TARGET_QUEUE_LOCK must be released. 2304 * ADAPTER_STATE_LOCK must be released. 2305 * 2306 * Returns: 2307 * qla2x00 local function return status code. 2308 * 2309 * Context: 2310 * Kernel context. 2311 */ 2312 int 2313 qla24xx_link_initialize(scsi_qla_host_t *vha) 2314 { 2315 int rval; 2316 mbx_cmd_t mc; 2317 mbx_cmd_t *mcp = &mc; 2318 2319 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, 2320 "Entered %s.\n", __func__); 2321 2322 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) 2323 return QLA_FUNCTION_FAILED; 2324 2325 mcp->mb[0] = MBC_LINK_INITIALIZATION; 2326 mcp->mb[1] = BIT_4; 2327 if (vha->hw->operating_mode == LOOP) 2328 mcp->mb[1] |= BIT_6; 2329 else 2330 mcp->mb[1] |= BIT_5; 2331 mcp->mb[2] = 0; 2332 mcp->mb[3] = 0; 2333 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2334 mcp->in_mb = MBX_0; 2335 mcp->tov = MBX_TOV_SECONDS; 2336 mcp->flags = 0; 2337 rval = qla2x00_mailbox_command(vha, mcp); 2338 2339 if (rval != QLA_SUCCESS) { 2340 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); 2341 } else { 2342 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, 2343 "Done %s.\n", __func__); 2344 } 2345 2346 return rval; 2347 } 2348 2349 /* 2350 * qla2x00_lip_reset 2351 * Issue LIP reset mailbox command. 2352 * 2353 * Input: 2354 * ha = adapter block pointer. 2355 * TARGET_QUEUE_LOCK must be released. 2356 * ADAPTER_STATE_LOCK must be released. 2357 * 2358 * Returns: 2359 * qla2x00 local function return status code. 2360 * 2361 * Context: 2362 * Kernel context. 2363 */ 2364 int 2365 qla2x00_lip_reset(scsi_qla_host_t *vha) 2366 { 2367 int rval; 2368 mbx_cmd_t mc; 2369 mbx_cmd_t *mcp = &mc; 2370 2371 ql_dbg(ql_dbg_disc, vha, 0x105a, 2372 "Entered %s.\n", __func__); 2373 2374 if (IS_CNA_CAPABLE(vha->hw)) { 2375 /* Logout across all FCFs. */ 2376 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2377 mcp->mb[1] = BIT_1; 2378 mcp->mb[2] = 0; 2379 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2380 } else if (IS_FWI2_CAPABLE(vha->hw)) { 2381 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2382 mcp->mb[1] = BIT_4; 2383 mcp->mb[2] = 0; 2384 mcp->mb[3] = vha->hw->loop_reset_delay; 2385 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2386 } else { 2387 mcp->mb[0] = MBC_LIP_RESET; 2388 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2389 if (HAS_EXTENDED_IDS(vha->hw)) { 2390 mcp->mb[1] = 0x00ff; 2391 mcp->mb[10] = 0; 2392 mcp->out_mb |= MBX_10; 2393 } else { 2394 mcp->mb[1] = 0xff00; 2395 } 2396 mcp->mb[2] = vha->hw->loop_reset_delay; 2397 mcp->mb[3] = 0; 2398 } 2399 mcp->in_mb = MBX_0; 2400 mcp->tov = MBX_TOV_SECONDS; 2401 mcp->flags = 0; 2402 rval = qla2x00_mailbox_command(vha, mcp); 2403 2404 if (rval != QLA_SUCCESS) { 2405 /*EMPTY*/ 2406 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); 2407 } else { 2408 /*EMPTY*/ 2409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, 2410 "Done %s.\n", __func__); 2411 } 2412 2413 return rval; 2414 } 2415 2416 /* 2417 * qla2x00_send_sns 2418 * Send SNS command. 2419 * 2420 * Input: 2421 * ha = adapter block pointer. 2422 * sns = pointer for command. 2423 * cmd_size = command size. 2424 * buf_size = response/command size. 2425 * TARGET_QUEUE_LOCK must be released. 2426 * ADAPTER_STATE_LOCK must be released. 2427 * 2428 * Returns: 2429 * qla2x00 local function return status code. 2430 * 2431 * Context: 2432 * Kernel context. 2433 */ 2434 int 2435 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, 2436 uint16_t cmd_size, size_t buf_size) 2437 { 2438 int rval; 2439 mbx_cmd_t mc; 2440 mbx_cmd_t *mcp = &mc; 2441 2442 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, 2443 "Entered %s.\n", __func__); 2444 2445 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, 2446 "Retry cnt=%d ratov=%d total tov=%d.\n", 2447 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); 2448 2449 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 2450 mcp->mb[1] = cmd_size; 2451 mcp->mb[2] = MSW(sns_phys_address); 2452 mcp->mb[3] = LSW(sns_phys_address); 2453 mcp->mb[6] = MSW(MSD(sns_phys_address)); 2454 mcp->mb[7] = LSW(MSD(sns_phys_address)); 2455 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2456 mcp->in_mb = MBX_0|MBX_1; 2457 mcp->buf_size = buf_size; 2458 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; 2459 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); 2460 rval = qla2x00_mailbox_command(vha, mcp); 2461 2462 if (rval != QLA_SUCCESS) { 2463 /*EMPTY*/ 2464 ql_dbg(ql_dbg_mbx, vha, 0x105f, 2465 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2466 rval, mcp->mb[0], mcp->mb[1]); 2467 } else { 2468 /*EMPTY*/ 2469 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, 2470 "Done %s.\n", __func__); 2471 } 2472 2473 return rval; 2474 } 2475 2476 int 2477 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2478 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2479 { 2480 int rval; 2481 2482 struct logio_entry_24xx *lg; 2483 dma_addr_t lg_dma; 2484 uint32_t iop[2]; 2485 struct qla_hw_data *ha = vha->hw; 2486 struct req_que *req; 2487 2488 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, 2489 "Entered %s.\n", __func__); 2490 2491 if (vha->vp_idx && vha->qpair) 2492 req = vha->qpair->req; 2493 else 2494 req = ha->req_q_map[0]; 2495 2496 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2497 if (lg == NULL) { 2498 ql_log(ql_log_warn, vha, 0x1062, 2499 "Failed to allocate login IOCB.\n"); 2500 return QLA_MEMORY_ALLOC_FAILED; 2501 } 2502 2503 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2504 lg->entry_count = 1; 2505 lg->handle = make_handle(req->id, lg->handle); 2506 lg->nport_handle = cpu_to_le16(loop_id); 2507 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2508 if (opt & BIT_0) 2509 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2510 if (opt & BIT_1) 2511 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2512 lg->port_id[0] = al_pa; 2513 lg->port_id[1] = area; 2514 lg->port_id[2] = domain; 2515 lg->vp_index = vha->vp_idx; 2516 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2517 (ha->r_a_tov / 10 * 2) + 2); 2518 if (rval != QLA_SUCCESS) { 2519 ql_dbg(ql_dbg_mbx, vha, 0x1063, 2520 "Failed to issue login IOCB (%x).\n", rval); 2521 } else if (lg->entry_status != 0) { 2522 ql_dbg(ql_dbg_mbx, vha, 0x1064, 2523 "Failed to complete IOCB -- error status (%x).\n", 2524 lg->entry_status); 2525 rval = QLA_FUNCTION_FAILED; 2526 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2527 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2528 iop[1] = le32_to_cpu(lg->io_parameter[1]); 2529 2530 ql_dbg(ql_dbg_mbx, vha, 0x1065, 2531 "Failed to complete IOCB -- completion status (%x) " 2532 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2533 iop[0], iop[1]); 2534 2535 switch (iop[0]) { 2536 case LSC_SCODE_PORTID_USED: 2537 mb[0] = MBS_PORT_ID_USED; 2538 mb[1] = LSW(iop[1]); 2539 break; 2540 case LSC_SCODE_NPORT_USED: 2541 mb[0] = MBS_LOOP_ID_USED; 2542 break; 2543 case LSC_SCODE_NOLINK: 2544 case LSC_SCODE_NOIOCB: 2545 case LSC_SCODE_NOXCB: 2546 case LSC_SCODE_CMD_FAILED: 2547 case LSC_SCODE_NOFABRIC: 2548 case LSC_SCODE_FW_NOT_READY: 2549 case LSC_SCODE_NOT_LOGGED_IN: 2550 case LSC_SCODE_NOPCB: 2551 case LSC_SCODE_ELS_REJECT: 2552 case LSC_SCODE_CMD_PARAM_ERR: 2553 case LSC_SCODE_NONPORT: 2554 case LSC_SCODE_LOGGED_IN: 2555 case LSC_SCODE_NOFLOGI_ACC: 2556 default: 2557 mb[0] = MBS_COMMAND_ERROR; 2558 break; 2559 } 2560 } else { 2561 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, 2562 "Done %s.\n", __func__); 2563 2564 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2565 2566 mb[0] = MBS_COMMAND_COMPLETE; 2567 mb[1] = 0; 2568 if (iop[0] & BIT_4) { 2569 if (iop[0] & BIT_8) 2570 mb[1] |= BIT_1; 2571 } else 2572 mb[1] = BIT_0; 2573 2574 /* Passback COS information. */ 2575 mb[10] = 0; 2576 if (lg->io_parameter[7] || lg->io_parameter[8]) 2577 mb[10] |= BIT_0; /* Class 2. */ 2578 if (lg->io_parameter[9] || lg->io_parameter[10]) 2579 mb[10] |= BIT_1; /* Class 3. */ 2580 if (lg->io_parameter[0] & cpu_to_le32(BIT_7)) 2581 mb[10] |= BIT_7; /* Confirmed Completion 2582 * Allowed 2583 */ 2584 } 2585 2586 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2587 2588 return rval; 2589 } 2590 2591 /* 2592 * qla2x00_login_fabric 2593 * Issue login fabric port mailbox command. 2594 * 2595 * Input: 2596 * ha = adapter block pointer. 2597 * loop_id = device loop ID. 2598 * domain = device domain. 2599 * area = device area. 2600 * al_pa = device AL_PA. 2601 * status = pointer for return status. 2602 * opt = command options. 2603 * TARGET_QUEUE_LOCK must be released. 2604 * ADAPTER_STATE_LOCK must be released. 2605 * 2606 * Returns: 2607 * qla2x00 local function return status code. 2608 * 2609 * Context: 2610 * Kernel context. 2611 */ 2612 int 2613 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2614 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2615 { 2616 int rval; 2617 mbx_cmd_t mc; 2618 mbx_cmd_t *mcp = &mc; 2619 struct qla_hw_data *ha = vha->hw; 2620 2621 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, 2622 "Entered %s.\n", __func__); 2623 2624 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 2625 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2626 if (HAS_EXTENDED_IDS(ha)) { 2627 mcp->mb[1] = loop_id; 2628 mcp->mb[10] = opt; 2629 mcp->out_mb |= MBX_10; 2630 } else { 2631 mcp->mb[1] = (loop_id << 8) | opt; 2632 } 2633 mcp->mb[2] = domain; 2634 mcp->mb[3] = area << 8 | al_pa; 2635 2636 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; 2637 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2638 mcp->flags = 0; 2639 rval = qla2x00_mailbox_command(vha, mcp); 2640 2641 /* Return mailbox statuses. */ 2642 if (mb != NULL) { 2643 mb[0] = mcp->mb[0]; 2644 mb[1] = mcp->mb[1]; 2645 mb[2] = mcp->mb[2]; 2646 mb[6] = mcp->mb[6]; 2647 mb[7] = mcp->mb[7]; 2648 /* COS retrieved from Get-Port-Database mailbox command. */ 2649 mb[10] = 0; 2650 } 2651 2652 if (rval != QLA_SUCCESS) { 2653 /* RLU tmp code: need to change main mailbox_command function to 2654 * return ok even when the mailbox completion value is not 2655 * SUCCESS. The caller needs to be responsible to interpret 2656 * the return values of this mailbox command if we're not 2657 * to change too much of the existing code. 2658 */ 2659 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || 2660 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || 2661 mcp->mb[0] == 0x4006) 2662 rval = QLA_SUCCESS; 2663 2664 /*EMPTY*/ 2665 ql_dbg(ql_dbg_mbx, vha, 0x1068, 2666 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 2667 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 2668 } else { 2669 /*EMPTY*/ 2670 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, 2671 "Done %s.\n", __func__); 2672 } 2673 2674 return rval; 2675 } 2676 2677 /* 2678 * qla2x00_login_local_device 2679 * Issue login loop port mailbox command. 2680 * 2681 * Input: 2682 * ha = adapter block pointer. 2683 * loop_id = device loop ID. 2684 * opt = command options. 2685 * 2686 * Returns: 2687 * Return status code. 2688 * 2689 * Context: 2690 * Kernel context. 2691 * 2692 */ 2693 int 2694 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, 2695 uint16_t *mb_ret, uint8_t opt) 2696 { 2697 int rval; 2698 mbx_cmd_t mc; 2699 mbx_cmd_t *mcp = &mc; 2700 struct qla_hw_data *ha = vha->hw; 2701 2702 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, 2703 "Entered %s.\n", __func__); 2704 2705 if (IS_FWI2_CAPABLE(ha)) 2706 return qla24xx_login_fabric(vha, fcport->loop_id, 2707 fcport->d_id.b.domain, fcport->d_id.b.area, 2708 fcport->d_id.b.al_pa, mb_ret, opt); 2709 2710 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 2711 if (HAS_EXTENDED_IDS(ha)) 2712 mcp->mb[1] = fcport->loop_id; 2713 else 2714 mcp->mb[1] = fcport->loop_id << 8; 2715 mcp->mb[2] = opt; 2716 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2717 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; 2718 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2719 mcp->flags = 0; 2720 rval = qla2x00_mailbox_command(vha, mcp); 2721 2722 /* Return mailbox statuses. */ 2723 if (mb_ret != NULL) { 2724 mb_ret[0] = mcp->mb[0]; 2725 mb_ret[1] = mcp->mb[1]; 2726 mb_ret[6] = mcp->mb[6]; 2727 mb_ret[7] = mcp->mb[7]; 2728 } 2729 2730 if (rval != QLA_SUCCESS) { 2731 /* AV tmp code: need to change main mailbox_command function to 2732 * return ok even when the mailbox completion value is not 2733 * SUCCESS. The caller needs to be responsible to interpret 2734 * the return values of this mailbox command if we're not 2735 * to change too much of the existing code. 2736 */ 2737 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 2738 rval = QLA_SUCCESS; 2739 2740 ql_dbg(ql_dbg_mbx, vha, 0x106b, 2741 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", 2742 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); 2743 } else { 2744 /*EMPTY*/ 2745 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, 2746 "Done %s.\n", __func__); 2747 } 2748 2749 return (rval); 2750 } 2751 2752 int 2753 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2754 uint8_t area, uint8_t al_pa) 2755 { 2756 int rval; 2757 struct logio_entry_24xx *lg; 2758 dma_addr_t lg_dma; 2759 struct qla_hw_data *ha = vha->hw; 2760 struct req_que *req; 2761 2762 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, 2763 "Entered %s.\n", __func__); 2764 2765 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2766 if (lg == NULL) { 2767 ql_log(ql_log_warn, vha, 0x106e, 2768 "Failed to allocate logout IOCB.\n"); 2769 return QLA_MEMORY_ALLOC_FAILED; 2770 } 2771 2772 req = vha->req; 2773 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2774 lg->entry_count = 1; 2775 lg->handle = make_handle(req->id, lg->handle); 2776 lg->nport_handle = cpu_to_le16(loop_id); 2777 lg->control_flags = 2778 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| 2779 LCF_FREE_NPORT); 2780 lg->port_id[0] = al_pa; 2781 lg->port_id[1] = area; 2782 lg->port_id[2] = domain; 2783 lg->vp_index = vha->vp_idx; 2784 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2785 (ha->r_a_tov / 10 * 2) + 2); 2786 if (rval != QLA_SUCCESS) { 2787 ql_dbg(ql_dbg_mbx, vha, 0x106f, 2788 "Failed to issue logout IOCB (%x).\n", rval); 2789 } else if (lg->entry_status != 0) { 2790 ql_dbg(ql_dbg_mbx, vha, 0x1070, 2791 "Failed to complete IOCB -- error status (%x).\n", 2792 lg->entry_status); 2793 rval = QLA_FUNCTION_FAILED; 2794 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2795 ql_dbg(ql_dbg_mbx, vha, 0x1071, 2796 "Failed to complete IOCB -- completion status (%x) " 2797 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2798 le32_to_cpu(lg->io_parameter[0]), 2799 le32_to_cpu(lg->io_parameter[1])); 2800 } else { 2801 /*EMPTY*/ 2802 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, 2803 "Done %s.\n", __func__); 2804 } 2805 2806 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2807 2808 return rval; 2809 } 2810 2811 /* 2812 * qla2x00_fabric_logout 2813 * Issue logout fabric port mailbox command. 2814 * 2815 * Input: 2816 * ha = adapter block pointer. 2817 * loop_id = device loop ID. 2818 * TARGET_QUEUE_LOCK must be released. 2819 * ADAPTER_STATE_LOCK must be released. 2820 * 2821 * Returns: 2822 * qla2x00 local function return status code. 2823 * 2824 * Context: 2825 * Kernel context. 2826 */ 2827 int 2828 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2829 uint8_t area, uint8_t al_pa) 2830 { 2831 int rval; 2832 mbx_cmd_t mc; 2833 mbx_cmd_t *mcp = &mc; 2834 2835 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, 2836 "Entered %s.\n", __func__); 2837 2838 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 2839 mcp->out_mb = MBX_1|MBX_0; 2840 if (HAS_EXTENDED_IDS(vha->hw)) { 2841 mcp->mb[1] = loop_id; 2842 mcp->mb[10] = 0; 2843 mcp->out_mb |= MBX_10; 2844 } else { 2845 mcp->mb[1] = loop_id << 8; 2846 } 2847 2848 mcp->in_mb = MBX_1|MBX_0; 2849 mcp->tov = MBX_TOV_SECONDS; 2850 mcp->flags = 0; 2851 rval = qla2x00_mailbox_command(vha, mcp); 2852 2853 if (rval != QLA_SUCCESS) { 2854 /*EMPTY*/ 2855 ql_dbg(ql_dbg_mbx, vha, 0x1074, 2856 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); 2857 } else { 2858 /*EMPTY*/ 2859 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, 2860 "Done %s.\n", __func__); 2861 } 2862 2863 return rval; 2864 } 2865 2866 /* 2867 * qla2x00_full_login_lip 2868 * Issue full login LIP mailbox command. 2869 * 2870 * Input: 2871 * ha = adapter block pointer. 2872 * TARGET_QUEUE_LOCK must be released. 2873 * ADAPTER_STATE_LOCK must be released. 2874 * 2875 * Returns: 2876 * qla2x00 local function return status code. 2877 * 2878 * Context: 2879 * Kernel context. 2880 */ 2881 int 2882 qla2x00_full_login_lip(scsi_qla_host_t *vha) 2883 { 2884 int rval; 2885 mbx_cmd_t mc; 2886 mbx_cmd_t *mcp = &mc; 2887 2888 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, 2889 "Entered %s.\n", __func__); 2890 2891 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2892 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0; 2893 mcp->mb[2] = 0; 2894 mcp->mb[3] = 0; 2895 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2896 mcp->in_mb = MBX_0; 2897 mcp->tov = MBX_TOV_SECONDS; 2898 mcp->flags = 0; 2899 rval = qla2x00_mailbox_command(vha, mcp); 2900 2901 if (rval != QLA_SUCCESS) { 2902 /*EMPTY*/ 2903 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); 2904 } else { 2905 /*EMPTY*/ 2906 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, 2907 "Done %s.\n", __func__); 2908 } 2909 2910 return rval; 2911 } 2912 2913 /* 2914 * qla2x00_get_id_list 2915 * 2916 * Input: 2917 * ha = adapter block pointer. 2918 * 2919 * Returns: 2920 * qla2x00 local function return status code. 2921 * 2922 * Context: 2923 * Kernel context. 2924 */ 2925 int 2926 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, 2927 uint16_t *entries) 2928 { 2929 int rval; 2930 mbx_cmd_t mc; 2931 mbx_cmd_t *mcp = &mc; 2932 2933 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, 2934 "Entered %s.\n", __func__); 2935 2936 if (id_list == NULL) 2937 return QLA_FUNCTION_FAILED; 2938 2939 mcp->mb[0] = MBC_GET_ID_LIST; 2940 mcp->out_mb = MBX_0; 2941 if (IS_FWI2_CAPABLE(vha->hw)) { 2942 mcp->mb[2] = MSW(id_list_dma); 2943 mcp->mb[3] = LSW(id_list_dma); 2944 mcp->mb[6] = MSW(MSD(id_list_dma)); 2945 mcp->mb[7] = LSW(MSD(id_list_dma)); 2946 mcp->mb[8] = 0; 2947 mcp->mb[9] = vha->vp_idx; 2948 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 2949 } else { 2950 mcp->mb[1] = MSW(id_list_dma); 2951 mcp->mb[2] = LSW(id_list_dma); 2952 mcp->mb[3] = MSW(MSD(id_list_dma)); 2953 mcp->mb[6] = LSW(MSD(id_list_dma)); 2954 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; 2955 } 2956 mcp->in_mb = MBX_1|MBX_0; 2957 mcp->tov = MBX_TOV_SECONDS; 2958 mcp->flags = 0; 2959 rval = qla2x00_mailbox_command(vha, mcp); 2960 2961 if (rval != QLA_SUCCESS) { 2962 /*EMPTY*/ 2963 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); 2964 } else { 2965 *entries = mcp->mb[1]; 2966 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, 2967 "Done %s.\n", __func__); 2968 } 2969 2970 return rval; 2971 } 2972 2973 /* 2974 * qla2x00_get_resource_cnts 2975 * Get current firmware resource counts. 2976 * 2977 * Input: 2978 * ha = adapter block pointer. 2979 * 2980 * Returns: 2981 * qla2x00 local function return status code. 2982 * 2983 * Context: 2984 * Kernel context. 2985 */ 2986 int 2987 qla2x00_get_resource_cnts(scsi_qla_host_t *vha) 2988 { 2989 struct qla_hw_data *ha = vha->hw; 2990 int rval; 2991 mbx_cmd_t mc; 2992 mbx_cmd_t *mcp = &mc; 2993 2994 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, 2995 "Entered %s.\n", __func__); 2996 2997 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2998 mcp->out_mb = MBX_0; 2999 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 3000 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 3001 IS_QLA27XX(ha) || IS_QLA28XX(ha)) 3002 mcp->in_mb |= MBX_12; 3003 mcp->tov = MBX_TOV_SECONDS; 3004 mcp->flags = 0; 3005 rval = qla2x00_mailbox_command(vha, mcp); 3006 3007 if (rval != QLA_SUCCESS) { 3008 /*EMPTY*/ 3009 ql_dbg(ql_dbg_mbx, vha, 0x107d, 3010 "Failed mb[0]=%x.\n", mcp->mb[0]); 3011 } else { 3012 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, 3013 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " 3014 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], 3015 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 3016 mcp->mb[11], mcp->mb[12]); 3017 3018 ha->orig_fw_tgt_xcb_count = mcp->mb[1]; 3019 ha->cur_fw_tgt_xcb_count = mcp->mb[2]; 3020 ha->cur_fw_xcb_count = mcp->mb[3]; 3021 ha->orig_fw_xcb_count = mcp->mb[6]; 3022 ha->cur_fw_iocb_count = mcp->mb[7]; 3023 ha->orig_fw_iocb_count = mcp->mb[10]; 3024 if (ha->flags.npiv_supported) 3025 ha->max_npiv_vports = mcp->mb[11]; 3026 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 3027 IS_QLA28XX(ha)) 3028 ha->fw_max_fcf_count = mcp->mb[12]; 3029 } 3030 3031 return (rval); 3032 } 3033 3034 /* 3035 * qla2x00_get_fcal_position_map 3036 * Get FCAL (LILP) position map using mailbox command 3037 * 3038 * Input: 3039 * ha = adapter state pointer. 3040 * pos_map = buffer pointer (can be NULL). 3041 * 3042 * Returns: 3043 * qla2x00 local function return status code. 3044 * 3045 * Context: 3046 * Kernel context. 3047 */ 3048 int 3049 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) 3050 { 3051 int rval; 3052 mbx_cmd_t mc; 3053 mbx_cmd_t *mcp = &mc; 3054 char *pmap; 3055 dma_addr_t pmap_dma; 3056 struct qla_hw_data *ha = vha->hw; 3057 3058 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, 3059 "Entered %s.\n", __func__); 3060 3061 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 3062 if (pmap == NULL) { 3063 ql_log(ql_log_warn, vha, 0x1080, 3064 "Memory alloc failed.\n"); 3065 return QLA_MEMORY_ALLOC_FAILED; 3066 } 3067 3068 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; 3069 mcp->mb[2] = MSW(pmap_dma); 3070 mcp->mb[3] = LSW(pmap_dma); 3071 mcp->mb[6] = MSW(MSD(pmap_dma)); 3072 mcp->mb[7] = LSW(MSD(pmap_dma)); 3073 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3074 mcp->in_mb = MBX_1|MBX_0; 3075 mcp->buf_size = FCAL_MAP_SIZE; 3076 mcp->flags = MBX_DMA_IN; 3077 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 3078 rval = qla2x00_mailbox_command(vha, mcp); 3079 3080 if (rval == QLA_SUCCESS) { 3081 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, 3082 "mb0/mb1=%x/%X FC/AL position map size (%x).\n", 3083 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); 3084 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, 3085 pmap, pmap[0] + 1); 3086 3087 if (pos_map) 3088 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 3089 } 3090 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 3091 3092 if (rval != QLA_SUCCESS) { 3093 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); 3094 } else { 3095 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, 3096 "Done %s.\n", __func__); 3097 } 3098 3099 return rval; 3100 } 3101 3102 /* 3103 * qla2x00_get_link_status 3104 * 3105 * Input: 3106 * ha = adapter block pointer. 3107 * loop_id = device loop ID. 3108 * ret_buf = pointer to link status return buffer. 3109 * 3110 * Returns: 3111 * 0 = success. 3112 * BIT_0 = mem alloc error. 3113 * BIT_1 = mailbox error. 3114 */ 3115 int 3116 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, 3117 struct link_statistics *stats, dma_addr_t stats_dma) 3118 { 3119 int rval; 3120 mbx_cmd_t mc; 3121 mbx_cmd_t *mcp = &mc; 3122 uint32_t *iter = (uint32_t *)stats; 3123 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); 3124 struct qla_hw_data *ha = vha->hw; 3125 3126 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, 3127 "Entered %s.\n", __func__); 3128 3129 mcp->mb[0] = MBC_GET_LINK_STATUS; 3130 mcp->mb[2] = MSW(LSD(stats_dma)); 3131 mcp->mb[3] = LSW(LSD(stats_dma)); 3132 mcp->mb[6] = MSW(MSD(stats_dma)); 3133 mcp->mb[7] = LSW(MSD(stats_dma)); 3134 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3135 mcp->in_mb = MBX_0; 3136 if (IS_FWI2_CAPABLE(ha)) { 3137 mcp->mb[1] = loop_id; 3138 mcp->mb[4] = 0; 3139 mcp->mb[10] = 0; 3140 mcp->out_mb |= MBX_10|MBX_4|MBX_1; 3141 mcp->in_mb |= MBX_1; 3142 } else if (HAS_EXTENDED_IDS(ha)) { 3143 mcp->mb[1] = loop_id; 3144 mcp->mb[10] = 0; 3145 mcp->out_mb |= MBX_10|MBX_1; 3146 } else { 3147 mcp->mb[1] = loop_id << 8; 3148 mcp->out_mb |= MBX_1; 3149 } 3150 mcp->tov = MBX_TOV_SECONDS; 3151 mcp->flags = IOCTL_CMD; 3152 rval = qla2x00_mailbox_command(vha, mcp); 3153 3154 if (rval == QLA_SUCCESS) { 3155 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3156 ql_dbg(ql_dbg_mbx, vha, 0x1085, 3157 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3158 rval = QLA_FUNCTION_FAILED; 3159 } else { 3160 /* Re-endianize - firmware data is le32. */ 3161 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, 3162 "Done %s.\n", __func__); 3163 for ( ; dwords--; iter++) 3164 le32_to_cpus(iter); 3165 } 3166 } else { 3167 /* Failed. */ 3168 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); 3169 } 3170 3171 return rval; 3172 } 3173 3174 int 3175 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, 3176 dma_addr_t stats_dma, uint16_t options) 3177 { 3178 int rval; 3179 mbx_cmd_t mc; 3180 mbx_cmd_t *mcp = &mc; 3181 uint32_t *iter = (uint32_t *)stats; 3182 ushort dwords = sizeof(*stats)/sizeof(*iter); 3183 3184 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, 3185 "Entered %s.\n", __func__); 3186 3187 memset(&mc, 0, sizeof(mc)); 3188 mc.mb[0] = MBC_GET_LINK_PRIV_STATS; 3189 mc.mb[2] = MSW(LSD(stats_dma)); 3190 mc.mb[3] = LSW(LSD(stats_dma)); 3191 mc.mb[6] = MSW(MSD(stats_dma)); 3192 mc.mb[7] = LSW(MSD(stats_dma)); 3193 mc.mb[8] = dwords; 3194 mc.mb[9] = vha->vp_idx; 3195 mc.mb[10] = options; 3196 3197 rval = qla24xx_send_mb_cmd(vha, &mc); 3198 3199 if (rval == QLA_SUCCESS) { 3200 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3201 ql_dbg(ql_dbg_mbx, vha, 0x1089, 3202 "Failed mb[0]=%x.\n", mcp->mb[0]); 3203 rval = QLA_FUNCTION_FAILED; 3204 } else { 3205 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, 3206 "Done %s.\n", __func__); 3207 /* Re-endianize - firmware data is le32. */ 3208 for ( ; dwords--; iter++) 3209 le32_to_cpus(iter); 3210 } 3211 } else { 3212 /* Failed. */ 3213 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); 3214 } 3215 3216 return rval; 3217 } 3218 3219 int 3220 qla24xx_abort_command(srb_t *sp) 3221 { 3222 int rval; 3223 unsigned long flags = 0; 3224 3225 struct abort_entry_24xx *abt; 3226 dma_addr_t abt_dma; 3227 uint32_t handle; 3228 fc_port_t *fcport = sp->fcport; 3229 struct scsi_qla_host *vha = fcport->vha; 3230 struct qla_hw_data *ha = vha->hw; 3231 struct req_que *req = vha->req; 3232 struct qla_qpair *qpair = sp->qpair; 3233 3234 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 3235 "Entered %s.\n", __func__); 3236 3237 if (sp->qpair) 3238 req = sp->qpair->req; 3239 else 3240 return QLA_FUNCTION_FAILED; 3241 3242 if (ql2xasynctmfenable) 3243 return qla24xx_async_abort_command(sp); 3244 3245 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3246 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 3247 if (req->outstanding_cmds[handle] == sp) 3248 break; 3249 } 3250 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3251 if (handle == req->num_outstanding_cmds) { 3252 /* Command not found. */ 3253 return QLA_FUNCTION_FAILED; 3254 } 3255 3256 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 3257 if (abt == NULL) { 3258 ql_log(ql_log_warn, vha, 0x108d, 3259 "Failed to allocate abort IOCB.\n"); 3260 return QLA_MEMORY_ALLOC_FAILED; 3261 } 3262 3263 abt->entry_type = ABORT_IOCB_TYPE; 3264 abt->entry_count = 1; 3265 abt->handle = make_handle(req->id, abt->handle); 3266 abt->nport_handle = cpu_to_le16(fcport->loop_id); 3267 abt->handle_to_abort = make_handle(req->id, handle); 3268 abt->port_id[0] = fcport->d_id.b.al_pa; 3269 abt->port_id[1] = fcport->d_id.b.area; 3270 abt->port_id[2] = fcport->d_id.b.domain; 3271 abt->vp_index = fcport->vha->vp_idx; 3272 3273 abt->req_que_no = cpu_to_le16(req->id); 3274 /* Need to pass original sp */ 3275 qla_nvme_abort_set_option(abt, sp); 3276 3277 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 3278 if (rval != QLA_SUCCESS) { 3279 ql_dbg(ql_dbg_mbx, vha, 0x108e, 3280 "Failed to issue IOCB (%x).\n", rval); 3281 } else if (abt->entry_status != 0) { 3282 ql_dbg(ql_dbg_mbx, vha, 0x108f, 3283 "Failed to complete IOCB -- error status (%x).\n", 3284 abt->entry_status); 3285 rval = QLA_FUNCTION_FAILED; 3286 } else if (abt->nport_handle != cpu_to_le16(0)) { 3287 ql_dbg(ql_dbg_mbx, vha, 0x1090, 3288 "Failed to complete IOCB -- completion status (%x).\n", 3289 le16_to_cpu(abt->nport_handle)); 3290 if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR)) 3291 rval = QLA_FUNCTION_PARAMETER_ERROR; 3292 else 3293 rval = QLA_FUNCTION_FAILED; 3294 } else { 3295 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, 3296 "Done %s.\n", __func__); 3297 } 3298 if (rval == QLA_SUCCESS) 3299 qla_nvme_abort_process_comp_status(abt, sp); 3300 3301 qla_wait_nvme_release_cmd_kref(sp); 3302 3303 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 3304 3305 return rval; 3306 } 3307 3308 struct tsk_mgmt_cmd { 3309 union { 3310 struct tsk_mgmt_entry tsk; 3311 struct sts_entry_24xx sts; 3312 } p; 3313 }; 3314 3315 static int 3316 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 3317 uint64_t l, int tag) 3318 { 3319 int rval, rval2; 3320 struct tsk_mgmt_cmd *tsk; 3321 struct sts_entry_24xx *sts; 3322 dma_addr_t tsk_dma; 3323 scsi_qla_host_t *vha; 3324 struct qla_hw_data *ha; 3325 struct req_que *req; 3326 struct qla_qpair *qpair; 3327 3328 vha = fcport->vha; 3329 ha = vha->hw; 3330 req = vha->req; 3331 3332 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, 3333 "Entered %s.\n", __func__); 3334 3335 if (vha->vp_idx && vha->qpair) { 3336 /* NPIV port */ 3337 qpair = vha->qpair; 3338 req = qpair->req; 3339 } 3340 3341 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 3342 if (tsk == NULL) { 3343 ql_log(ql_log_warn, vha, 0x1093, 3344 "Failed to allocate task management IOCB.\n"); 3345 return QLA_MEMORY_ALLOC_FAILED; 3346 } 3347 3348 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 3349 tsk->p.tsk.entry_count = 1; 3350 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle); 3351 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 3352 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 3353 tsk->p.tsk.control_flags = cpu_to_le32(type); 3354 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 3355 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 3356 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 3357 tsk->p.tsk.vp_index = fcport->vha->vp_idx; 3358 if (type == TCF_LUN_RESET) { 3359 int_to_scsilun(l, &tsk->p.tsk.lun); 3360 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, 3361 sizeof(tsk->p.tsk.lun)); 3362 } 3363 3364 sts = &tsk->p.sts; 3365 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 3366 if (rval != QLA_SUCCESS) { 3367 ql_dbg(ql_dbg_mbx, vha, 0x1094, 3368 "Failed to issue %s reset IOCB (%x).\n", name, rval); 3369 } else if (sts->entry_status != 0) { 3370 ql_dbg(ql_dbg_mbx, vha, 0x1095, 3371 "Failed to complete IOCB -- error status (%x).\n", 3372 sts->entry_status); 3373 rval = QLA_FUNCTION_FAILED; 3374 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 3375 ql_dbg(ql_dbg_mbx, vha, 0x1096, 3376 "Failed to complete IOCB -- completion status (%x).\n", 3377 le16_to_cpu(sts->comp_status)); 3378 rval = QLA_FUNCTION_FAILED; 3379 } else if (le16_to_cpu(sts->scsi_status) & 3380 SS_RESPONSE_INFO_LEN_VALID) { 3381 if (le32_to_cpu(sts->rsp_data_len) < 4) { 3382 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, 3383 "Ignoring inconsistent data length -- not enough " 3384 "response info (%d).\n", 3385 le32_to_cpu(sts->rsp_data_len)); 3386 } else if (sts->data[3]) { 3387 ql_dbg(ql_dbg_mbx, vha, 0x1098, 3388 "Failed to complete IOCB -- response (%x).\n", 3389 sts->data[3]); 3390 rval = QLA_FUNCTION_FAILED; 3391 } 3392 } 3393 3394 /* Issue marker IOCB. */ 3395 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l, 3396 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 3397 if (rval2 != QLA_SUCCESS) { 3398 ql_dbg(ql_dbg_mbx, vha, 0x1099, 3399 "Failed to issue marker IOCB (%x).\n", rval2); 3400 } else { 3401 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, 3402 "Done %s.\n", __func__); 3403 } 3404 3405 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 3406 3407 return rval; 3408 } 3409 3410 int 3411 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) 3412 { 3413 struct qla_hw_data *ha = fcport->vha->hw; 3414 3415 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3416 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 3417 3418 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); 3419 } 3420 3421 int 3422 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 3423 { 3424 struct qla_hw_data *ha = fcport->vha->hw; 3425 3426 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3427 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 3428 3429 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); 3430 } 3431 3432 int 3433 qla2x00_system_error(scsi_qla_host_t *vha) 3434 { 3435 int rval; 3436 mbx_cmd_t mc; 3437 mbx_cmd_t *mcp = &mc; 3438 struct qla_hw_data *ha = vha->hw; 3439 3440 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 3441 return QLA_FUNCTION_FAILED; 3442 3443 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, 3444 "Entered %s.\n", __func__); 3445 3446 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 3447 mcp->out_mb = MBX_0; 3448 mcp->in_mb = MBX_0; 3449 mcp->tov = 5; 3450 mcp->flags = 0; 3451 rval = qla2x00_mailbox_command(vha, mcp); 3452 3453 if (rval != QLA_SUCCESS) { 3454 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); 3455 } else { 3456 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, 3457 "Done %s.\n", __func__); 3458 } 3459 3460 return rval; 3461 } 3462 3463 int 3464 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) 3465 { 3466 int rval; 3467 mbx_cmd_t mc; 3468 mbx_cmd_t *mcp = &mc; 3469 3470 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3471 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3472 return QLA_FUNCTION_FAILED; 3473 3474 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, 3475 "Entered %s.\n", __func__); 3476 3477 mcp->mb[0] = MBC_WRITE_SERDES; 3478 mcp->mb[1] = addr; 3479 if (IS_QLA2031(vha->hw)) 3480 mcp->mb[2] = data & 0xff; 3481 else 3482 mcp->mb[2] = data; 3483 3484 mcp->mb[3] = 0; 3485 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 3486 mcp->in_mb = MBX_0; 3487 mcp->tov = MBX_TOV_SECONDS; 3488 mcp->flags = 0; 3489 rval = qla2x00_mailbox_command(vha, mcp); 3490 3491 if (rval != QLA_SUCCESS) { 3492 ql_dbg(ql_dbg_mbx, vha, 0x1183, 3493 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3494 } else { 3495 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, 3496 "Done %s.\n", __func__); 3497 } 3498 3499 return rval; 3500 } 3501 3502 int 3503 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) 3504 { 3505 int rval; 3506 mbx_cmd_t mc; 3507 mbx_cmd_t *mcp = &mc; 3508 3509 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3510 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3511 return QLA_FUNCTION_FAILED; 3512 3513 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, 3514 "Entered %s.\n", __func__); 3515 3516 mcp->mb[0] = MBC_READ_SERDES; 3517 mcp->mb[1] = addr; 3518 mcp->mb[3] = 0; 3519 mcp->out_mb = MBX_3|MBX_1|MBX_0; 3520 mcp->in_mb = MBX_1|MBX_0; 3521 mcp->tov = MBX_TOV_SECONDS; 3522 mcp->flags = 0; 3523 rval = qla2x00_mailbox_command(vha, mcp); 3524 3525 if (IS_QLA2031(vha->hw)) 3526 *data = mcp->mb[1] & 0xff; 3527 else 3528 *data = mcp->mb[1]; 3529 3530 if (rval != QLA_SUCCESS) { 3531 ql_dbg(ql_dbg_mbx, vha, 0x1186, 3532 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3533 } else { 3534 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, 3535 "Done %s.\n", __func__); 3536 } 3537 3538 return rval; 3539 } 3540 3541 int 3542 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) 3543 { 3544 int rval; 3545 mbx_cmd_t mc; 3546 mbx_cmd_t *mcp = &mc; 3547 3548 if (!IS_QLA8044(vha->hw)) 3549 return QLA_FUNCTION_FAILED; 3550 3551 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0, 3552 "Entered %s.\n", __func__); 3553 3554 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3555 mcp->mb[1] = HCS_WRITE_SERDES; 3556 mcp->mb[3] = LSW(addr); 3557 mcp->mb[4] = MSW(addr); 3558 mcp->mb[5] = LSW(data); 3559 mcp->mb[6] = MSW(data); 3560 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 3561 mcp->in_mb = MBX_0; 3562 mcp->tov = MBX_TOV_SECONDS; 3563 mcp->flags = 0; 3564 rval = qla2x00_mailbox_command(vha, mcp); 3565 3566 if (rval != QLA_SUCCESS) { 3567 ql_dbg(ql_dbg_mbx, vha, 0x11a1, 3568 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3569 } else { 3570 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, 3571 "Done %s.\n", __func__); 3572 } 3573 3574 return rval; 3575 } 3576 3577 int 3578 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) 3579 { 3580 int rval; 3581 mbx_cmd_t mc; 3582 mbx_cmd_t *mcp = &mc; 3583 3584 if (!IS_QLA8044(vha->hw)) 3585 return QLA_FUNCTION_FAILED; 3586 3587 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, 3588 "Entered %s.\n", __func__); 3589 3590 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3591 mcp->mb[1] = HCS_READ_SERDES; 3592 mcp->mb[3] = LSW(addr); 3593 mcp->mb[4] = MSW(addr); 3594 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; 3595 mcp->in_mb = MBX_2|MBX_1|MBX_0; 3596 mcp->tov = MBX_TOV_SECONDS; 3597 mcp->flags = 0; 3598 rval = qla2x00_mailbox_command(vha, mcp); 3599 3600 *data = mcp->mb[2] << 16 | mcp->mb[1]; 3601 3602 if (rval != QLA_SUCCESS) { 3603 ql_dbg(ql_dbg_mbx, vha, 0x118a, 3604 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3605 } else { 3606 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, 3607 "Done %s.\n", __func__); 3608 } 3609 3610 return rval; 3611 } 3612 3613 /** 3614 * qla2x00_set_serdes_params() - 3615 * @vha: HA context 3616 * @sw_em_1g: serial link options 3617 * @sw_em_2g: serial link options 3618 * @sw_em_4g: serial link options 3619 * 3620 * Returns 3621 */ 3622 int 3623 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, 3624 uint16_t sw_em_2g, uint16_t sw_em_4g) 3625 { 3626 int rval; 3627 mbx_cmd_t mc; 3628 mbx_cmd_t *mcp = &mc; 3629 3630 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, 3631 "Entered %s.\n", __func__); 3632 3633 mcp->mb[0] = MBC_SERDES_PARAMS; 3634 mcp->mb[1] = BIT_0; 3635 mcp->mb[2] = sw_em_1g | BIT_15; 3636 mcp->mb[3] = sw_em_2g | BIT_15; 3637 mcp->mb[4] = sw_em_4g | BIT_15; 3638 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3639 mcp->in_mb = MBX_0; 3640 mcp->tov = MBX_TOV_SECONDS; 3641 mcp->flags = 0; 3642 rval = qla2x00_mailbox_command(vha, mcp); 3643 3644 if (rval != QLA_SUCCESS) { 3645 /*EMPTY*/ 3646 ql_dbg(ql_dbg_mbx, vha, 0x109f, 3647 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3648 } else { 3649 /*EMPTY*/ 3650 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, 3651 "Done %s.\n", __func__); 3652 } 3653 3654 return rval; 3655 } 3656 3657 int 3658 qla2x00_stop_firmware(scsi_qla_host_t *vha) 3659 { 3660 int rval; 3661 mbx_cmd_t mc; 3662 mbx_cmd_t *mcp = &mc; 3663 3664 if (!IS_FWI2_CAPABLE(vha->hw)) 3665 return QLA_FUNCTION_FAILED; 3666 3667 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, 3668 "Entered %s.\n", __func__); 3669 3670 mcp->mb[0] = MBC_STOP_FIRMWARE; 3671 mcp->mb[1] = 0; 3672 mcp->out_mb = MBX_1|MBX_0; 3673 mcp->in_mb = MBX_0; 3674 mcp->tov = 5; 3675 mcp->flags = 0; 3676 rval = qla2x00_mailbox_command(vha, mcp); 3677 3678 if (rval != QLA_SUCCESS) { 3679 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); 3680 if (mcp->mb[0] == MBS_INVALID_COMMAND) 3681 rval = QLA_INVALID_COMMAND; 3682 } else { 3683 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, 3684 "Done %s.\n", __func__); 3685 } 3686 3687 return rval; 3688 } 3689 3690 int 3691 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, 3692 uint16_t buffers) 3693 { 3694 int rval; 3695 mbx_cmd_t mc; 3696 mbx_cmd_t *mcp = &mc; 3697 3698 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, 3699 "Entered %s.\n", __func__); 3700 3701 if (!IS_FWI2_CAPABLE(vha->hw)) 3702 return QLA_FUNCTION_FAILED; 3703 3704 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3705 return QLA_FUNCTION_FAILED; 3706 3707 mcp->mb[0] = MBC_TRACE_CONTROL; 3708 mcp->mb[1] = TC_EFT_ENABLE; 3709 mcp->mb[2] = LSW(eft_dma); 3710 mcp->mb[3] = MSW(eft_dma); 3711 mcp->mb[4] = LSW(MSD(eft_dma)); 3712 mcp->mb[5] = MSW(MSD(eft_dma)); 3713 mcp->mb[6] = buffers; 3714 mcp->mb[7] = TC_AEN_DISABLE; 3715 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3716 mcp->in_mb = MBX_1|MBX_0; 3717 mcp->tov = MBX_TOV_SECONDS; 3718 mcp->flags = 0; 3719 rval = qla2x00_mailbox_command(vha, mcp); 3720 if (rval != QLA_SUCCESS) { 3721 ql_dbg(ql_dbg_mbx, vha, 0x10a5, 3722 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3723 rval, mcp->mb[0], mcp->mb[1]); 3724 } else { 3725 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, 3726 "Done %s.\n", __func__); 3727 } 3728 3729 return rval; 3730 } 3731 3732 int 3733 qla2x00_disable_eft_trace(scsi_qla_host_t *vha) 3734 { 3735 int rval; 3736 mbx_cmd_t mc; 3737 mbx_cmd_t *mcp = &mc; 3738 3739 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, 3740 "Entered %s.\n", __func__); 3741 3742 if (!IS_FWI2_CAPABLE(vha->hw)) 3743 return QLA_FUNCTION_FAILED; 3744 3745 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3746 return QLA_FUNCTION_FAILED; 3747 3748 mcp->mb[0] = MBC_TRACE_CONTROL; 3749 mcp->mb[1] = TC_EFT_DISABLE; 3750 mcp->out_mb = MBX_1|MBX_0; 3751 mcp->in_mb = MBX_1|MBX_0; 3752 mcp->tov = MBX_TOV_SECONDS; 3753 mcp->flags = 0; 3754 rval = qla2x00_mailbox_command(vha, mcp); 3755 if (rval != QLA_SUCCESS) { 3756 ql_dbg(ql_dbg_mbx, vha, 0x10a8, 3757 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3758 rval, mcp->mb[0], mcp->mb[1]); 3759 } else { 3760 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, 3761 "Done %s.\n", __func__); 3762 } 3763 3764 return rval; 3765 } 3766 3767 int 3768 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, 3769 uint16_t buffers, uint16_t *mb, uint32_t *dwords) 3770 { 3771 int rval; 3772 mbx_cmd_t mc; 3773 mbx_cmd_t *mcp = &mc; 3774 3775 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, 3776 "Entered %s.\n", __func__); 3777 3778 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 3779 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 3780 !IS_QLA28XX(vha->hw)) 3781 return QLA_FUNCTION_FAILED; 3782 3783 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3784 return QLA_FUNCTION_FAILED; 3785 3786 mcp->mb[0] = MBC_TRACE_CONTROL; 3787 mcp->mb[1] = TC_FCE_ENABLE; 3788 mcp->mb[2] = LSW(fce_dma); 3789 mcp->mb[3] = MSW(fce_dma); 3790 mcp->mb[4] = LSW(MSD(fce_dma)); 3791 mcp->mb[5] = MSW(MSD(fce_dma)); 3792 mcp->mb[6] = buffers; 3793 mcp->mb[7] = TC_AEN_DISABLE; 3794 mcp->mb[8] = 0; 3795 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; 3796 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; 3797 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3798 MBX_1|MBX_0; 3799 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3800 mcp->tov = MBX_TOV_SECONDS; 3801 mcp->flags = 0; 3802 rval = qla2x00_mailbox_command(vha, mcp); 3803 if (rval != QLA_SUCCESS) { 3804 ql_dbg(ql_dbg_mbx, vha, 0x10ab, 3805 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3806 rval, mcp->mb[0], mcp->mb[1]); 3807 } else { 3808 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, 3809 "Done %s.\n", __func__); 3810 3811 if (mb) 3812 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 3813 if (dwords) 3814 *dwords = buffers; 3815 } 3816 3817 return rval; 3818 } 3819 3820 int 3821 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) 3822 { 3823 int rval; 3824 mbx_cmd_t mc; 3825 mbx_cmd_t *mcp = &mc; 3826 3827 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, 3828 "Entered %s.\n", __func__); 3829 3830 if (!IS_FWI2_CAPABLE(vha->hw)) 3831 return QLA_FUNCTION_FAILED; 3832 3833 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3834 return QLA_FUNCTION_FAILED; 3835 3836 mcp->mb[0] = MBC_TRACE_CONTROL; 3837 mcp->mb[1] = TC_FCE_DISABLE; 3838 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 3839 mcp->out_mb = MBX_2|MBX_1|MBX_0; 3840 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3841 MBX_1|MBX_0; 3842 mcp->tov = MBX_TOV_SECONDS; 3843 mcp->flags = 0; 3844 rval = qla2x00_mailbox_command(vha, mcp); 3845 if (rval != QLA_SUCCESS) { 3846 ql_dbg(ql_dbg_mbx, vha, 0x10ae, 3847 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3848 rval, mcp->mb[0], mcp->mb[1]); 3849 } else { 3850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, 3851 "Done %s.\n", __func__); 3852 3853 if (wr) 3854 *wr = (uint64_t) mcp->mb[5] << 48 | 3855 (uint64_t) mcp->mb[4] << 32 | 3856 (uint64_t) mcp->mb[3] << 16 | 3857 (uint64_t) mcp->mb[2]; 3858 if (rd) 3859 *rd = (uint64_t) mcp->mb[9] << 48 | 3860 (uint64_t) mcp->mb[8] << 32 | 3861 (uint64_t) mcp->mb[7] << 16 | 3862 (uint64_t) mcp->mb[6]; 3863 } 3864 3865 return rval; 3866 } 3867 3868 int 3869 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3870 uint16_t *port_speed, uint16_t *mb) 3871 { 3872 int rval; 3873 mbx_cmd_t mc; 3874 mbx_cmd_t *mcp = &mc; 3875 3876 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, 3877 "Entered %s.\n", __func__); 3878 3879 if (!IS_IIDMA_CAPABLE(vha->hw)) 3880 return QLA_FUNCTION_FAILED; 3881 3882 mcp->mb[0] = MBC_PORT_PARAMS; 3883 mcp->mb[1] = loop_id; 3884 mcp->mb[2] = mcp->mb[3] = 0; 3885 mcp->mb[9] = vha->vp_idx; 3886 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3887 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3888 mcp->tov = MBX_TOV_SECONDS; 3889 mcp->flags = 0; 3890 rval = qla2x00_mailbox_command(vha, mcp); 3891 3892 /* Return mailbox statuses. */ 3893 if (mb) { 3894 mb[0] = mcp->mb[0]; 3895 mb[1] = mcp->mb[1]; 3896 mb[3] = mcp->mb[3]; 3897 } 3898 3899 if (rval != QLA_SUCCESS) { 3900 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); 3901 } else { 3902 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, 3903 "Done %s.\n", __func__); 3904 if (port_speed) 3905 *port_speed = mcp->mb[3]; 3906 } 3907 3908 return rval; 3909 } 3910 3911 int 3912 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3913 uint16_t port_speed, uint16_t *mb) 3914 { 3915 int rval; 3916 mbx_cmd_t mc; 3917 mbx_cmd_t *mcp = &mc; 3918 3919 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, 3920 "Entered %s.\n", __func__); 3921 3922 if (!IS_IIDMA_CAPABLE(vha->hw)) 3923 return QLA_FUNCTION_FAILED; 3924 3925 mcp->mb[0] = MBC_PORT_PARAMS; 3926 mcp->mb[1] = loop_id; 3927 mcp->mb[2] = BIT_0; 3928 mcp->mb[3] = port_speed & 0x3F; 3929 mcp->mb[9] = vha->vp_idx; 3930 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3931 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3932 mcp->tov = MBX_TOV_SECONDS; 3933 mcp->flags = 0; 3934 rval = qla2x00_mailbox_command(vha, mcp); 3935 3936 /* Return mailbox statuses. */ 3937 if (mb) { 3938 mb[0] = mcp->mb[0]; 3939 mb[1] = mcp->mb[1]; 3940 mb[3] = mcp->mb[3]; 3941 } 3942 3943 if (rval != QLA_SUCCESS) { 3944 ql_dbg(ql_dbg_mbx, vha, 0x10b4, 3945 "Failed=%x.\n", rval); 3946 } else { 3947 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, 3948 "Done %s.\n", __func__); 3949 } 3950 3951 return rval; 3952 } 3953 3954 void 3955 qla24xx_report_id_acquisition(scsi_qla_host_t *vha, 3956 struct vp_rpt_id_entry_24xx *rptid_entry) 3957 { 3958 struct qla_hw_data *ha = vha->hw; 3959 scsi_qla_host_t *vp = NULL; 3960 unsigned long flags; 3961 int found; 3962 port_id_t id; 3963 struct fc_port *fcport; 3964 3965 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3966 "Entered %s.\n", __func__); 3967 3968 if (rptid_entry->entry_status != 0) 3969 return; 3970 3971 id.b.domain = rptid_entry->port_id[2]; 3972 id.b.area = rptid_entry->port_id[1]; 3973 id.b.al_pa = rptid_entry->port_id[0]; 3974 id.b.rsvd_1 = 0; 3975 ha->flags.n2n_ae = 0; 3976 3977 if (rptid_entry->format == 0) { 3978 /* loop */ 3979 ql_dbg(ql_dbg_async, vha, 0x10b7, 3980 "Format 0 : Number of VPs setup %d, number of " 3981 "VPs acquired %d.\n", rptid_entry->vp_setup, 3982 rptid_entry->vp_acquired); 3983 ql_dbg(ql_dbg_async, vha, 0x10b8, 3984 "Primary port id %02x%02x%02x.\n", 3985 rptid_entry->port_id[2], rptid_entry->port_id[1], 3986 rptid_entry->port_id[0]); 3987 ha->current_topology = ISP_CFG_NL; 3988 qlt_update_host_map(vha, id); 3989 3990 } else if (rptid_entry->format == 1) { 3991 /* fabric */ 3992 ql_dbg(ql_dbg_async, vha, 0x10b9, 3993 "Format 1: VP[%d] enabled - status %d - with " 3994 "port id %02x%02x%02x.\n", rptid_entry->vp_idx, 3995 rptid_entry->vp_status, 3996 rptid_entry->port_id[2], rptid_entry->port_id[1], 3997 rptid_entry->port_id[0]); 3998 ql_dbg(ql_dbg_async, vha, 0x5075, 3999 "Format 1: Remote WWPN %8phC.\n", 4000 rptid_entry->u.f1.port_name); 4001 4002 ql_dbg(ql_dbg_async, vha, 0x5075, 4003 "Format 1: WWPN %8phC.\n", 4004 vha->port_name); 4005 4006 switch (rptid_entry->u.f1.flags & TOPO_MASK) { 4007 case TOPO_N2N: 4008 ha->current_topology = ISP_CFG_N; 4009 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4010 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4011 fcport->scan_state = QLA_FCPORT_SCAN; 4012 fcport->n2n_flag = 0; 4013 } 4014 id.b24 = 0; 4015 if (wwn_to_u64(vha->port_name) > 4016 wwn_to_u64(rptid_entry->u.f1.port_name)) { 4017 vha->d_id.b24 = 0; 4018 vha->d_id.b.al_pa = 1; 4019 ha->flags.n2n_bigger = 1; 4020 4021 id.b.al_pa = 2; 4022 ql_dbg(ql_dbg_async, vha, 0x5075, 4023 "Format 1: assign local id %x remote id %x\n", 4024 vha->d_id.b24, id.b24); 4025 } else { 4026 ql_dbg(ql_dbg_async, vha, 0x5075, 4027 "Format 1: Remote login - Waiting for WWPN %8phC.\n", 4028 rptid_entry->u.f1.port_name); 4029 ha->flags.n2n_bigger = 0; 4030 } 4031 4032 fcport = qla2x00_find_fcport_by_wwpn(vha, 4033 rptid_entry->u.f1.port_name, 1); 4034 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4035 4036 4037 if (fcport) { 4038 fcport->plogi_nack_done_deadline = jiffies + HZ; 4039 fcport->dm_login_expire = jiffies + 4040 QLA_N2N_WAIT_TIME * HZ; 4041 fcport->scan_state = QLA_FCPORT_FOUND; 4042 fcport->n2n_flag = 1; 4043 fcport->keep_nport_handle = 1; 4044 4045 if (wwn_to_u64(vha->port_name) > 4046 wwn_to_u64(fcport->port_name)) { 4047 fcport->d_id = id; 4048 } 4049 4050 switch (fcport->disc_state) { 4051 case DSC_DELETED: 4052 set_bit(RELOGIN_NEEDED, 4053 &vha->dpc_flags); 4054 break; 4055 case DSC_DELETE_PEND: 4056 break; 4057 default: 4058 qlt_schedule_sess_for_deletion(fcport); 4059 break; 4060 } 4061 } else { 4062 qla24xx_post_newsess_work(vha, &id, 4063 rptid_entry->u.f1.port_name, 4064 rptid_entry->u.f1.node_name, 4065 NULL, 4066 FS_FCP_IS_N2N); 4067 } 4068 4069 /* if our portname is higher then initiate N2N login */ 4070 4071 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); 4072 return; 4073 case TOPO_FL: 4074 ha->current_topology = ISP_CFG_FL; 4075 break; 4076 case TOPO_F: 4077 ha->current_topology = ISP_CFG_F; 4078 break; 4079 default: 4080 break; 4081 } 4082 4083 ha->flags.gpsc_supported = 1; 4084 ha->current_topology = ISP_CFG_F; 4085 /* buffer to buffer credit flag */ 4086 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; 4087 4088 if (rptid_entry->vp_idx == 0) { 4089 if (rptid_entry->vp_status == VP_STAT_COMPL) { 4090 /* FA-WWN is only for physical port */ 4091 if (qla_ini_mode_enabled(vha) && 4092 ha->flags.fawwpn_enabled && 4093 (rptid_entry->u.f1.flags & 4094 BIT_6)) { 4095 memcpy(vha->port_name, 4096 rptid_entry->u.f1.port_name, 4097 WWN_SIZE); 4098 } 4099 4100 qlt_update_host_map(vha, id); 4101 } 4102 4103 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 4104 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 4105 } else { 4106 if (rptid_entry->vp_status != VP_STAT_COMPL && 4107 rptid_entry->vp_status != VP_STAT_ID_CHG) { 4108 ql_dbg(ql_dbg_mbx, vha, 0x10ba, 4109 "Could not acquire ID for VP[%d].\n", 4110 rptid_entry->vp_idx); 4111 return; 4112 } 4113 4114 found = 0; 4115 spin_lock_irqsave(&ha->vport_slock, flags); 4116 list_for_each_entry(vp, &ha->vp_list, list) { 4117 if (rptid_entry->vp_idx == vp->vp_idx) { 4118 found = 1; 4119 break; 4120 } 4121 } 4122 spin_unlock_irqrestore(&ha->vport_slock, flags); 4123 4124 if (!found) 4125 return; 4126 4127 qlt_update_host_map(vp, id); 4128 4129 /* 4130 * Cannot configure here as we are still sitting on the 4131 * response queue. Handle it in dpc context. 4132 */ 4133 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); 4134 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); 4135 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); 4136 } 4137 set_bit(VP_DPC_NEEDED, &vha->dpc_flags); 4138 qla2xxx_wake_dpc(vha); 4139 } else if (rptid_entry->format == 2) { 4140 ql_dbg(ql_dbg_async, vha, 0x505f, 4141 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", 4142 rptid_entry->port_id[2], rptid_entry->port_id[1], 4143 rptid_entry->port_id[0]); 4144 4145 ql_dbg(ql_dbg_async, vha, 0x5075, 4146 "N2N: Remote WWPN %8phC.\n", 4147 rptid_entry->u.f2.port_name); 4148 4149 /* N2N. direct connect */ 4150 ha->current_topology = ISP_CFG_N; 4151 ha->flags.rida_fmt2 = 1; 4152 vha->d_id.b.domain = rptid_entry->port_id[2]; 4153 vha->d_id.b.area = rptid_entry->port_id[1]; 4154 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 4155 4156 ha->flags.n2n_ae = 1; 4157 spin_lock_irqsave(&ha->vport_slock, flags); 4158 qlt_update_vp_map(vha, SET_AL_PA); 4159 spin_unlock_irqrestore(&ha->vport_slock, flags); 4160 4161 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4162 fcport->scan_state = QLA_FCPORT_SCAN; 4163 fcport->n2n_flag = 0; 4164 } 4165 4166 fcport = qla2x00_find_fcport_by_wwpn(vha, 4167 rptid_entry->u.f2.port_name, 1); 4168 4169 if (fcport) { 4170 fcport->login_retry = vha->hw->login_retry_count; 4171 fcport->plogi_nack_done_deadline = jiffies + HZ; 4172 fcport->scan_state = QLA_FCPORT_FOUND; 4173 fcport->keep_nport_handle = 1; 4174 fcport->n2n_flag = 1; 4175 fcport->d_id.b.domain = 4176 rptid_entry->u.f2.remote_nport_id[2]; 4177 fcport->d_id.b.area = 4178 rptid_entry->u.f2.remote_nport_id[1]; 4179 fcport->d_id.b.al_pa = 4180 rptid_entry->u.f2.remote_nport_id[0]; 4181 } 4182 } 4183 } 4184 4185 /* 4186 * qla24xx_modify_vp_config 4187 * Change VP configuration for vha 4188 * 4189 * Input: 4190 * vha = adapter block pointer. 4191 * 4192 * Returns: 4193 * qla2xxx local function return status code. 4194 * 4195 * Context: 4196 * Kernel context. 4197 */ 4198 int 4199 qla24xx_modify_vp_config(scsi_qla_host_t *vha) 4200 { 4201 int rval; 4202 struct vp_config_entry_24xx *vpmod; 4203 dma_addr_t vpmod_dma; 4204 struct qla_hw_data *ha = vha->hw; 4205 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4206 4207 /* This can be called by the parent */ 4208 4209 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, 4210 "Entered %s.\n", __func__); 4211 4212 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 4213 if (!vpmod) { 4214 ql_log(ql_log_warn, vha, 0x10bc, 4215 "Failed to allocate modify VP IOCB.\n"); 4216 return QLA_MEMORY_ALLOC_FAILED; 4217 } 4218 4219 vpmod->entry_type = VP_CONFIG_IOCB_TYPE; 4220 vpmod->entry_count = 1; 4221 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; 4222 vpmod->vp_count = 1; 4223 vpmod->vp_index1 = vha->vp_idx; 4224 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; 4225 4226 qlt_modify_vp_config(vha, vpmod); 4227 4228 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); 4229 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 4230 vpmod->entry_count = 1; 4231 4232 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 4233 if (rval != QLA_SUCCESS) { 4234 ql_dbg(ql_dbg_mbx, vha, 0x10bd, 4235 "Failed to issue VP config IOCB (%x).\n", rval); 4236 } else if (vpmod->comp_status != 0) { 4237 ql_dbg(ql_dbg_mbx, vha, 0x10be, 4238 "Failed to complete IOCB -- error status (%x).\n", 4239 vpmod->comp_status); 4240 rval = QLA_FUNCTION_FAILED; 4241 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) { 4242 ql_dbg(ql_dbg_mbx, vha, 0x10bf, 4243 "Failed to complete IOCB -- completion status (%x).\n", 4244 le16_to_cpu(vpmod->comp_status)); 4245 rval = QLA_FUNCTION_FAILED; 4246 } else { 4247 /* EMPTY */ 4248 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, 4249 "Done %s.\n", __func__); 4250 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 4251 } 4252 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 4253 4254 return rval; 4255 } 4256 4257 /* 4258 * qla2x00_send_change_request 4259 * Receive or disable RSCN request from fabric controller 4260 * 4261 * Input: 4262 * ha = adapter block pointer 4263 * format = registration format: 4264 * 0 - Reserved 4265 * 1 - Fabric detected registration 4266 * 2 - N_port detected registration 4267 * 3 - Full registration 4268 * FF - clear registration 4269 * vp_idx = Virtual port index 4270 * 4271 * Returns: 4272 * qla2x00 local function return status code. 4273 * 4274 * Context: 4275 * Kernel Context 4276 */ 4277 4278 int 4279 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, 4280 uint16_t vp_idx) 4281 { 4282 int rval; 4283 mbx_cmd_t mc; 4284 mbx_cmd_t *mcp = &mc; 4285 4286 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, 4287 "Entered %s.\n", __func__); 4288 4289 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; 4290 mcp->mb[1] = format; 4291 mcp->mb[9] = vp_idx; 4292 mcp->out_mb = MBX_9|MBX_1|MBX_0; 4293 mcp->in_mb = MBX_0|MBX_1; 4294 mcp->tov = MBX_TOV_SECONDS; 4295 mcp->flags = 0; 4296 rval = qla2x00_mailbox_command(vha, mcp); 4297 4298 if (rval == QLA_SUCCESS) { 4299 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 4300 rval = BIT_1; 4301 } 4302 } else 4303 rval = BIT_1; 4304 4305 return rval; 4306 } 4307 4308 int 4309 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 4310 uint32_t size) 4311 { 4312 int rval; 4313 mbx_cmd_t mc; 4314 mbx_cmd_t *mcp = &mc; 4315 4316 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, 4317 "Entered %s.\n", __func__); 4318 4319 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 4320 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 4321 mcp->mb[8] = MSW(addr); 4322 mcp->mb[10] = 0; 4323 mcp->out_mb = MBX_10|MBX_8|MBX_0; 4324 } else { 4325 mcp->mb[0] = MBC_DUMP_RISC_RAM; 4326 mcp->out_mb = MBX_0; 4327 } 4328 mcp->mb[1] = LSW(addr); 4329 mcp->mb[2] = MSW(req_dma); 4330 mcp->mb[3] = LSW(req_dma); 4331 mcp->mb[6] = MSW(MSD(req_dma)); 4332 mcp->mb[7] = LSW(MSD(req_dma)); 4333 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 4334 if (IS_FWI2_CAPABLE(vha->hw)) { 4335 mcp->mb[4] = MSW(size); 4336 mcp->mb[5] = LSW(size); 4337 mcp->out_mb |= MBX_5|MBX_4; 4338 } else { 4339 mcp->mb[4] = LSW(size); 4340 mcp->out_mb |= MBX_4; 4341 } 4342 4343 mcp->in_mb = MBX_0; 4344 mcp->tov = MBX_TOV_SECONDS; 4345 mcp->flags = 0; 4346 rval = qla2x00_mailbox_command(vha, mcp); 4347 4348 if (rval != QLA_SUCCESS) { 4349 ql_dbg(ql_dbg_mbx, vha, 0x1008, 4350 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4351 } else { 4352 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, 4353 "Done %s.\n", __func__); 4354 } 4355 4356 return rval; 4357 } 4358 /* 84XX Support **************************************************************/ 4359 4360 struct cs84xx_mgmt_cmd { 4361 union { 4362 struct verify_chip_entry_84xx req; 4363 struct verify_chip_rsp_84xx rsp; 4364 } p; 4365 }; 4366 4367 int 4368 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) 4369 { 4370 int rval, retry; 4371 struct cs84xx_mgmt_cmd *mn; 4372 dma_addr_t mn_dma; 4373 uint16_t options; 4374 unsigned long flags; 4375 struct qla_hw_data *ha = vha->hw; 4376 4377 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, 4378 "Entered %s.\n", __func__); 4379 4380 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 4381 if (mn == NULL) { 4382 return QLA_MEMORY_ALLOC_FAILED; 4383 } 4384 4385 /* Force Update? */ 4386 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; 4387 /* Diagnostic firmware? */ 4388 /* options |= MENLO_DIAG_FW; */ 4389 /* We update the firmware with only one data sequence. */ 4390 options |= VCO_END_OF_DATA; 4391 4392 do { 4393 retry = 0; 4394 memset(mn, 0, sizeof(*mn)); 4395 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; 4396 mn->p.req.entry_count = 1; 4397 mn->p.req.options = cpu_to_le16(options); 4398 4399 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, 4400 "Dump of Verify Request.\n"); 4401 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, 4402 mn, sizeof(*mn)); 4403 4404 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 4405 if (rval != QLA_SUCCESS) { 4406 ql_dbg(ql_dbg_mbx, vha, 0x10cb, 4407 "Failed to issue verify IOCB (%x).\n", rval); 4408 goto verify_done; 4409 } 4410 4411 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, 4412 "Dump of Verify Response.\n"); 4413 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, 4414 mn, sizeof(*mn)); 4415 4416 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 4417 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 4418 le16_to_cpu(mn->p.rsp.failure_code) : 0; 4419 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, 4420 "cs=%x fc=%x.\n", status[0], status[1]); 4421 4422 if (status[0] != CS_COMPLETE) { 4423 rval = QLA_FUNCTION_FAILED; 4424 if (!(options & VCO_DONT_UPDATE_FW)) { 4425 ql_dbg(ql_dbg_mbx, vha, 0x10cf, 4426 "Firmware update failed. Retrying " 4427 "without update firmware.\n"); 4428 options |= VCO_DONT_UPDATE_FW; 4429 options &= ~VCO_FORCE_UPDATE; 4430 retry = 1; 4431 } 4432 } else { 4433 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, 4434 "Firmware updated to %x.\n", 4435 le32_to_cpu(mn->p.rsp.fw_ver)); 4436 4437 /* NOTE: we only update OP firmware. */ 4438 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 4439 ha->cs84xx->op_fw_version = 4440 le32_to_cpu(mn->p.rsp.fw_ver); 4441 spin_unlock_irqrestore(&ha->cs84xx->access_lock, 4442 flags); 4443 } 4444 } while (retry); 4445 4446 verify_done: 4447 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 4448 4449 if (rval != QLA_SUCCESS) { 4450 ql_dbg(ql_dbg_mbx, vha, 0x10d1, 4451 "Failed=%x.\n", rval); 4452 } else { 4453 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, 4454 "Done %s.\n", __func__); 4455 } 4456 4457 return rval; 4458 } 4459 4460 int 4461 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) 4462 { 4463 int rval; 4464 unsigned long flags; 4465 mbx_cmd_t mc; 4466 mbx_cmd_t *mcp = &mc; 4467 struct qla_hw_data *ha = vha->hw; 4468 4469 if (!ha->flags.fw_started) 4470 return QLA_SUCCESS; 4471 4472 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 4473 "Entered %s.\n", __func__); 4474 4475 if (IS_SHADOW_REG_CAPABLE(ha)) 4476 req->options |= BIT_13; 4477 4478 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4479 mcp->mb[1] = req->options; 4480 mcp->mb[2] = MSW(LSD(req->dma)); 4481 mcp->mb[3] = LSW(LSD(req->dma)); 4482 mcp->mb[6] = MSW(MSD(req->dma)); 4483 mcp->mb[7] = LSW(MSD(req->dma)); 4484 mcp->mb[5] = req->length; 4485 if (req->rsp) 4486 mcp->mb[10] = req->rsp->id; 4487 mcp->mb[12] = req->qos; 4488 mcp->mb[11] = req->vp_idx; 4489 mcp->mb[13] = req->rid; 4490 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4491 mcp->mb[15] = 0; 4492 4493 mcp->mb[4] = req->id; 4494 /* que in ptr index */ 4495 mcp->mb[8] = 0; 4496 /* que out ptr index */ 4497 mcp->mb[9] = *req->out_ptr = 0; 4498 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| 4499 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4500 mcp->in_mb = MBX_0; 4501 mcp->flags = MBX_DMA_OUT; 4502 mcp->tov = MBX_TOV_SECONDS * 2; 4503 4504 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4505 IS_QLA28XX(ha)) 4506 mcp->in_mb |= MBX_1; 4507 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4508 mcp->out_mb |= MBX_15; 4509 /* debug q create issue in SR-IOV */ 4510 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4511 } 4512 4513 spin_lock_irqsave(&ha->hardware_lock, flags); 4514 if (!(req->options & BIT_0)) { 4515 wrt_reg_dword(req->req_q_in, 0); 4516 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4517 wrt_reg_dword(req->req_q_out, 0); 4518 } 4519 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4520 4521 rval = qla2x00_mailbox_command(vha, mcp); 4522 if (rval != QLA_SUCCESS) { 4523 ql_dbg(ql_dbg_mbx, vha, 0x10d4, 4524 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4525 } else { 4526 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, 4527 "Done %s.\n", __func__); 4528 } 4529 4530 return rval; 4531 } 4532 4533 int 4534 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 4535 { 4536 int rval; 4537 unsigned long flags; 4538 mbx_cmd_t mc; 4539 mbx_cmd_t *mcp = &mc; 4540 struct qla_hw_data *ha = vha->hw; 4541 4542 if (!ha->flags.fw_started) 4543 return QLA_SUCCESS; 4544 4545 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 4546 "Entered %s.\n", __func__); 4547 4548 if (IS_SHADOW_REG_CAPABLE(ha)) 4549 rsp->options |= BIT_13; 4550 4551 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4552 mcp->mb[1] = rsp->options; 4553 mcp->mb[2] = MSW(LSD(rsp->dma)); 4554 mcp->mb[3] = LSW(LSD(rsp->dma)); 4555 mcp->mb[6] = MSW(MSD(rsp->dma)); 4556 mcp->mb[7] = LSW(MSD(rsp->dma)); 4557 mcp->mb[5] = rsp->length; 4558 mcp->mb[14] = rsp->msix->entry; 4559 mcp->mb[13] = rsp->rid; 4560 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4561 mcp->mb[15] = 0; 4562 4563 mcp->mb[4] = rsp->id; 4564 /* que in ptr index */ 4565 mcp->mb[8] = *rsp->in_ptr = 0; 4566 /* que out ptr index */ 4567 mcp->mb[9] = 0; 4568 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 4569 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4570 mcp->in_mb = MBX_0; 4571 mcp->flags = MBX_DMA_OUT; 4572 mcp->tov = MBX_TOV_SECONDS * 2; 4573 4574 if (IS_QLA81XX(ha)) { 4575 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 4576 mcp->in_mb |= MBX_1; 4577 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4578 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; 4579 mcp->in_mb |= MBX_1; 4580 /* debug q create issue in SR-IOV */ 4581 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4582 } 4583 4584 spin_lock_irqsave(&ha->hardware_lock, flags); 4585 if (!(rsp->options & BIT_0)) { 4586 wrt_reg_dword(rsp->rsp_q_out, 0); 4587 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4588 wrt_reg_dword(rsp->rsp_q_in, 0); 4589 } 4590 4591 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4592 4593 rval = qla2x00_mailbox_command(vha, mcp); 4594 if (rval != QLA_SUCCESS) { 4595 ql_dbg(ql_dbg_mbx, vha, 0x10d7, 4596 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4597 } else { 4598 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, 4599 "Done %s.\n", __func__); 4600 } 4601 4602 return rval; 4603 } 4604 4605 int 4606 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) 4607 { 4608 int rval; 4609 mbx_cmd_t mc; 4610 mbx_cmd_t *mcp = &mc; 4611 4612 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, 4613 "Entered %s.\n", __func__); 4614 4615 mcp->mb[0] = MBC_IDC_ACK; 4616 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4617 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4618 mcp->in_mb = MBX_0; 4619 mcp->tov = MBX_TOV_SECONDS; 4620 mcp->flags = 0; 4621 rval = qla2x00_mailbox_command(vha, mcp); 4622 4623 if (rval != QLA_SUCCESS) { 4624 ql_dbg(ql_dbg_mbx, vha, 0x10da, 4625 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4626 } else { 4627 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, 4628 "Done %s.\n", __func__); 4629 } 4630 4631 return rval; 4632 } 4633 4634 int 4635 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) 4636 { 4637 int rval; 4638 mbx_cmd_t mc; 4639 mbx_cmd_t *mcp = &mc; 4640 4641 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, 4642 "Entered %s.\n", __func__); 4643 4644 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4645 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4646 return QLA_FUNCTION_FAILED; 4647 4648 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4649 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 4650 mcp->out_mb = MBX_1|MBX_0; 4651 mcp->in_mb = MBX_1|MBX_0; 4652 mcp->tov = MBX_TOV_SECONDS; 4653 mcp->flags = 0; 4654 rval = qla2x00_mailbox_command(vha, mcp); 4655 4656 if (rval != QLA_SUCCESS) { 4657 ql_dbg(ql_dbg_mbx, vha, 0x10dd, 4658 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4659 rval, mcp->mb[0], mcp->mb[1]); 4660 } else { 4661 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, 4662 "Done %s.\n", __func__); 4663 *sector_size = mcp->mb[1]; 4664 } 4665 4666 return rval; 4667 } 4668 4669 int 4670 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) 4671 { 4672 int rval; 4673 mbx_cmd_t mc; 4674 mbx_cmd_t *mcp = &mc; 4675 4676 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4677 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4678 return QLA_FUNCTION_FAILED; 4679 4680 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, 4681 "Entered %s.\n", __func__); 4682 4683 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4684 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 4685 FAC_OPT_CMD_WRITE_PROTECT; 4686 mcp->out_mb = MBX_1|MBX_0; 4687 mcp->in_mb = MBX_1|MBX_0; 4688 mcp->tov = MBX_TOV_SECONDS; 4689 mcp->flags = 0; 4690 rval = qla2x00_mailbox_command(vha, mcp); 4691 4692 if (rval != QLA_SUCCESS) { 4693 ql_dbg(ql_dbg_mbx, vha, 0x10e0, 4694 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4695 rval, mcp->mb[0], mcp->mb[1]); 4696 } else { 4697 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, 4698 "Done %s.\n", __func__); 4699 } 4700 4701 return rval; 4702 } 4703 4704 int 4705 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) 4706 { 4707 int rval; 4708 mbx_cmd_t mc; 4709 mbx_cmd_t *mcp = &mc; 4710 4711 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4712 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4713 return QLA_FUNCTION_FAILED; 4714 4715 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4716 "Entered %s.\n", __func__); 4717 4718 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4719 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 4720 mcp->mb[2] = LSW(start); 4721 mcp->mb[3] = MSW(start); 4722 mcp->mb[4] = LSW(finish); 4723 mcp->mb[5] = MSW(finish); 4724 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4725 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4726 mcp->tov = MBX_TOV_SECONDS; 4727 mcp->flags = 0; 4728 rval = qla2x00_mailbox_command(vha, mcp); 4729 4730 if (rval != QLA_SUCCESS) { 4731 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4732 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4733 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4734 } else { 4735 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4736 "Done %s.\n", __func__); 4737 } 4738 4739 return rval; 4740 } 4741 4742 int 4743 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock) 4744 { 4745 int rval = QLA_SUCCESS; 4746 mbx_cmd_t mc; 4747 mbx_cmd_t *mcp = &mc; 4748 struct qla_hw_data *ha = vha->hw; 4749 4750 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 4751 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4752 return rval; 4753 4754 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4755 "Entered %s.\n", __func__); 4756 4757 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4758 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE : 4759 FAC_OPT_CMD_UNLOCK_SEMAPHORE); 4760 mcp->out_mb = MBX_1|MBX_0; 4761 mcp->in_mb = MBX_1|MBX_0; 4762 mcp->tov = MBX_TOV_SECONDS; 4763 mcp->flags = 0; 4764 rval = qla2x00_mailbox_command(vha, mcp); 4765 4766 if (rval != QLA_SUCCESS) { 4767 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4768 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4769 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4770 } else { 4771 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4772 "Done %s.\n", __func__); 4773 } 4774 4775 return rval; 4776 } 4777 4778 int 4779 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) 4780 { 4781 int rval = 0; 4782 mbx_cmd_t mc; 4783 mbx_cmd_t *mcp = &mc; 4784 4785 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, 4786 "Entered %s.\n", __func__); 4787 4788 mcp->mb[0] = MBC_RESTART_MPI_FW; 4789 mcp->out_mb = MBX_0; 4790 mcp->in_mb = MBX_0|MBX_1; 4791 mcp->tov = MBX_TOV_SECONDS; 4792 mcp->flags = 0; 4793 rval = qla2x00_mailbox_command(vha, mcp); 4794 4795 if (rval != QLA_SUCCESS) { 4796 ql_dbg(ql_dbg_mbx, vha, 0x10e6, 4797 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4798 rval, mcp->mb[0], mcp->mb[1]); 4799 } else { 4800 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, 4801 "Done %s.\n", __func__); 4802 } 4803 4804 return rval; 4805 } 4806 4807 int 4808 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4809 { 4810 int rval; 4811 mbx_cmd_t mc; 4812 mbx_cmd_t *mcp = &mc; 4813 int i; 4814 int len; 4815 __le16 *str; 4816 struct qla_hw_data *ha = vha->hw; 4817 4818 if (!IS_P3P_TYPE(ha)) 4819 return QLA_FUNCTION_FAILED; 4820 4821 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, 4822 "Entered %s.\n", __func__); 4823 4824 str = (__force __le16 *)version; 4825 len = strlen(version); 4826 4827 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4828 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; 4829 mcp->out_mb = MBX_1|MBX_0; 4830 for (i = 4; i < 16 && len; i++, str++, len -= 2) { 4831 mcp->mb[i] = le16_to_cpup(str); 4832 mcp->out_mb |= 1<<i; 4833 } 4834 for (; i < 16; i++) { 4835 mcp->mb[i] = 0; 4836 mcp->out_mb |= 1<<i; 4837 } 4838 mcp->in_mb = MBX_1|MBX_0; 4839 mcp->tov = MBX_TOV_SECONDS; 4840 mcp->flags = 0; 4841 rval = qla2x00_mailbox_command(vha, mcp); 4842 4843 if (rval != QLA_SUCCESS) { 4844 ql_dbg(ql_dbg_mbx, vha, 0x117c, 4845 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4846 } else { 4847 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, 4848 "Done %s.\n", __func__); 4849 } 4850 4851 return rval; 4852 } 4853 4854 int 4855 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4856 { 4857 int rval; 4858 mbx_cmd_t mc; 4859 mbx_cmd_t *mcp = &mc; 4860 int len; 4861 uint16_t dwlen; 4862 uint8_t *str; 4863 dma_addr_t str_dma; 4864 struct qla_hw_data *ha = vha->hw; 4865 4866 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || 4867 IS_P3P_TYPE(ha)) 4868 return QLA_FUNCTION_FAILED; 4869 4870 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, 4871 "Entered %s.\n", __func__); 4872 4873 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); 4874 if (!str) { 4875 ql_log(ql_log_warn, vha, 0x117f, 4876 "Failed to allocate driver version param.\n"); 4877 return QLA_MEMORY_ALLOC_FAILED; 4878 } 4879 4880 memcpy(str, "\x7\x3\x11\x0", 4); 4881 dwlen = str[0]; 4882 len = dwlen * 4 - 4; 4883 memset(str + 4, 0, len); 4884 if (len > strlen(version)) 4885 len = strlen(version); 4886 memcpy(str + 4, version, len); 4887 4888 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4889 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; 4890 mcp->mb[2] = MSW(LSD(str_dma)); 4891 mcp->mb[3] = LSW(LSD(str_dma)); 4892 mcp->mb[6] = MSW(MSD(str_dma)); 4893 mcp->mb[7] = LSW(MSD(str_dma)); 4894 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4895 mcp->in_mb = MBX_1|MBX_0; 4896 mcp->tov = MBX_TOV_SECONDS; 4897 mcp->flags = 0; 4898 rval = qla2x00_mailbox_command(vha, mcp); 4899 4900 if (rval != QLA_SUCCESS) { 4901 ql_dbg(ql_dbg_mbx, vha, 0x1180, 4902 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4903 } else { 4904 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, 4905 "Done %s.\n", __func__); 4906 } 4907 4908 dma_pool_free(ha->s_dma_pool, str, str_dma); 4909 4910 return rval; 4911 } 4912 4913 int 4914 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, 4915 void *buf, uint16_t bufsiz) 4916 { 4917 int rval, i; 4918 mbx_cmd_t mc; 4919 mbx_cmd_t *mcp = &mc; 4920 uint32_t *bp; 4921 4922 if (!IS_FWI2_CAPABLE(vha->hw)) 4923 return QLA_FUNCTION_FAILED; 4924 4925 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4926 "Entered %s.\n", __func__); 4927 4928 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4929 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8; 4930 mcp->mb[2] = MSW(buf_dma); 4931 mcp->mb[3] = LSW(buf_dma); 4932 mcp->mb[6] = MSW(MSD(buf_dma)); 4933 mcp->mb[7] = LSW(MSD(buf_dma)); 4934 mcp->mb[8] = bufsiz/4; 4935 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4936 mcp->in_mb = MBX_1|MBX_0; 4937 mcp->tov = MBX_TOV_SECONDS; 4938 mcp->flags = 0; 4939 rval = qla2x00_mailbox_command(vha, mcp); 4940 4941 if (rval != QLA_SUCCESS) { 4942 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4943 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4944 } else { 4945 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4946 "Done %s.\n", __func__); 4947 bp = (uint32_t *) buf; 4948 for (i = 0; i < (bufsiz-4)/4; i++, bp++) 4949 *bp = le32_to_cpu((__force __le32)*bp); 4950 } 4951 4952 return rval; 4953 } 4954 4955 #define PUREX_CMD_COUNT 4 4956 int 4957 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) 4958 { 4959 int rval; 4960 mbx_cmd_t mc; 4961 mbx_cmd_t *mcp = &mc; 4962 uint8_t *els_cmd_map; 4963 uint8_t active_cnt = 0; 4964 dma_addr_t els_cmd_map_dma; 4965 uint8_t cmd_opcode[PUREX_CMD_COUNT]; 4966 uint8_t i, index, purex_bit; 4967 struct qla_hw_data *ha = vha->hw; 4968 4969 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && 4970 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4971 return QLA_SUCCESS; 4972 4973 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197, 4974 "Entered %s.\n", __func__); 4975 4976 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, 4977 &els_cmd_map_dma, GFP_KERNEL); 4978 if (!els_cmd_map) { 4979 ql_log(ql_log_warn, vha, 0x7101, 4980 "Failed to allocate RDP els command param.\n"); 4981 return QLA_MEMORY_ALLOC_FAILED; 4982 } 4983 4984 /* List of Purex ELS */ 4985 if (ql2xrdpenable) { 4986 cmd_opcode[active_cnt] = ELS_RDP; 4987 active_cnt++; 4988 } 4989 if (ha->flags.scm_supported_f) { 4990 cmd_opcode[active_cnt] = ELS_FPIN; 4991 active_cnt++; 4992 } 4993 if (ha->flags.edif_enabled) { 4994 cmd_opcode[active_cnt] = ELS_AUTH_ELS; 4995 active_cnt++; 4996 } 4997 4998 for (i = 0; i < active_cnt; i++) { 4999 index = cmd_opcode[i] / 8; 5000 purex_bit = cmd_opcode[i] % 8; 5001 els_cmd_map[index] |= 1 << purex_bit; 5002 } 5003 5004 mcp->mb[0] = MBC_SET_RNID_PARAMS; 5005 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8; 5006 mcp->mb[2] = MSW(LSD(els_cmd_map_dma)); 5007 mcp->mb[3] = LSW(LSD(els_cmd_map_dma)); 5008 mcp->mb[6] = MSW(MSD(els_cmd_map_dma)); 5009 mcp->mb[7] = LSW(MSD(els_cmd_map_dma)); 5010 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5011 mcp->in_mb = MBX_1|MBX_0; 5012 mcp->tov = MBX_TOV_SECONDS; 5013 mcp->flags = MBX_DMA_OUT; 5014 mcp->buf_size = ELS_CMD_MAP_SIZE; 5015 rval = qla2x00_mailbox_command(vha, mcp); 5016 5017 if (rval != QLA_SUCCESS) { 5018 ql_dbg(ql_dbg_mbx, vha, 0x118d, 5019 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]); 5020 } else { 5021 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 5022 "Done %s.\n", __func__); 5023 } 5024 5025 dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, 5026 els_cmd_map, els_cmd_map_dma); 5027 5028 return rval; 5029 } 5030 5031 static int 5032 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 5033 { 5034 int rval; 5035 mbx_cmd_t mc; 5036 mbx_cmd_t *mcp = &mc; 5037 5038 if (!IS_FWI2_CAPABLE(vha->hw)) 5039 return QLA_FUNCTION_FAILED; 5040 5041 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 5042 "Entered %s.\n", __func__); 5043 5044 mcp->mb[0] = MBC_GET_RNID_PARAMS; 5045 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; 5046 mcp->out_mb = MBX_1|MBX_0; 5047 mcp->in_mb = MBX_1|MBX_0; 5048 mcp->tov = MBX_TOV_SECONDS; 5049 mcp->flags = 0; 5050 rval = qla2x00_mailbox_command(vha, mcp); 5051 *temp = mcp->mb[1]; 5052 5053 if (rval != QLA_SUCCESS) { 5054 ql_dbg(ql_dbg_mbx, vha, 0x115a, 5055 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 5056 } else { 5057 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 5058 "Done %s.\n", __func__); 5059 } 5060 5061 return rval; 5062 } 5063 5064 int 5065 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 5066 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 5067 { 5068 int rval; 5069 mbx_cmd_t mc; 5070 mbx_cmd_t *mcp = &mc; 5071 struct qla_hw_data *ha = vha->hw; 5072 5073 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 5074 "Entered %s.\n", __func__); 5075 5076 if (!IS_FWI2_CAPABLE(ha)) 5077 return QLA_FUNCTION_FAILED; 5078 5079 if (len == 1) 5080 opt |= BIT_0; 5081 5082 mcp->mb[0] = MBC_READ_SFP; 5083 mcp->mb[1] = dev; 5084 mcp->mb[2] = MSW(LSD(sfp_dma)); 5085 mcp->mb[3] = LSW(LSD(sfp_dma)); 5086 mcp->mb[6] = MSW(MSD(sfp_dma)); 5087 mcp->mb[7] = LSW(MSD(sfp_dma)); 5088 mcp->mb[8] = len; 5089 mcp->mb[9] = off; 5090 mcp->mb[10] = opt; 5091 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5092 mcp->in_mb = MBX_1|MBX_0; 5093 mcp->tov = MBX_TOV_SECONDS; 5094 mcp->flags = 0; 5095 rval = qla2x00_mailbox_command(vha, mcp); 5096 5097 if (opt & BIT_0) 5098 *sfp = mcp->mb[1]; 5099 5100 if (rval != QLA_SUCCESS) { 5101 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 5102 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5103 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) { 5104 /* sfp is not there */ 5105 rval = QLA_INTERFACE_ERROR; 5106 } 5107 } else { 5108 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 5109 "Done %s.\n", __func__); 5110 } 5111 5112 return rval; 5113 } 5114 5115 int 5116 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 5117 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 5118 { 5119 int rval; 5120 mbx_cmd_t mc; 5121 mbx_cmd_t *mcp = &mc; 5122 struct qla_hw_data *ha = vha->hw; 5123 5124 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, 5125 "Entered %s.\n", __func__); 5126 5127 if (!IS_FWI2_CAPABLE(ha)) 5128 return QLA_FUNCTION_FAILED; 5129 5130 if (len == 1) 5131 opt |= BIT_0; 5132 5133 if (opt & BIT_0) 5134 len = *sfp; 5135 5136 mcp->mb[0] = MBC_WRITE_SFP; 5137 mcp->mb[1] = dev; 5138 mcp->mb[2] = MSW(LSD(sfp_dma)); 5139 mcp->mb[3] = LSW(LSD(sfp_dma)); 5140 mcp->mb[6] = MSW(MSD(sfp_dma)); 5141 mcp->mb[7] = LSW(MSD(sfp_dma)); 5142 mcp->mb[8] = len; 5143 mcp->mb[9] = off; 5144 mcp->mb[10] = opt; 5145 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5146 mcp->in_mb = MBX_1|MBX_0; 5147 mcp->tov = MBX_TOV_SECONDS; 5148 mcp->flags = 0; 5149 rval = qla2x00_mailbox_command(vha, mcp); 5150 5151 if (rval != QLA_SUCCESS) { 5152 ql_dbg(ql_dbg_mbx, vha, 0x10ec, 5153 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5154 } else { 5155 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, 5156 "Done %s.\n", __func__); 5157 } 5158 5159 return rval; 5160 } 5161 5162 int 5163 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, 5164 uint16_t size_in_bytes, uint16_t *actual_size) 5165 { 5166 int rval; 5167 mbx_cmd_t mc; 5168 mbx_cmd_t *mcp = &mc; 5169 5170 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, 5171 "Entered %s.\n", __func__); 5172 5173 if (!IS_CNA_CAPABLE(vha->hw)) 5174 return QLA_FUNCTION_FAILED; 5175 5176 mcp->mb[0] = MBC_GET_XGMAC_STATS; 5177 mcp->mb[2] = MSW(stats_dma); 5178 mcp->mb[3] = LSW(stats_dma); 5179 mcp->mb[6] = MSW(MSD(stats_dma)); 5180 mcp->mb[7] = LSW(MSD(stats_dma)); 5181 mcp->mb[8] = size_in_bytes >> 2; 5182 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 5183 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5184 mcp->tov = MBX_TOV_SECONDS; 5185 mcp->flags = 0; 5186 rval = qla2x00_mailbox_command(vha, mcp); 5187 5188 if (rval != QLA_SUCCESS) { 5189 ql_dbg(ql_dbg_mbx, vha, 0x10ef, 5190 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5191 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5192 } else { 5193 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, 5194 "Done %s.\n", __func__); 5195 5196 5197 *actual_size = mcp->mb[2] << 2; 5198 } 5199 5200 return rval; 5201 } 5202 5203 int 5204 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, 5205 uint16_t size) 5206 { 5207 int rval; 5208 mbx_cmd_t mc; 5209 mbx_cmd_t *mcp = &mc; 5210 5211 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, 5212 "Entered %s.\n", __func__); 5213 5214 if (!IS_CNA_CAPABLE(vha->hw)) 5215 return QLA_FUNCTION_FAILED; 5216 5217 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 5218 mcp->mb[1] = 0; 5219 mcp->mb[2] = MSW(tlv_dma); 5220 mcp->mb[3] = LSW(tlv_dma); 5221 mcp->mb[6] = MSW(MSD(tlv_dma)); 5222 mcp->mb[7] = LSW(MSD(tlv_dma)); 5223 mcp->mb[8] = size; 5224 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5225 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5226 mcp->tov = MBX_TOV_SECONDS; 5227 mcp->flags = 0; 5228 rval = qla2x00_mailbox_command(vha, mcp); 5229 5230 if (rval != QLA_SUCCESS) { 5231 ql_dbg(ql_dbg_mbx, vha, 0x10f2, 5232 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5233 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5234 } else { 5235 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, 5236 "Done %s.\n", __func__); 5237 } 5238 5239 return rval; 5240 } 5241 5242 int 5243 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) 5244 { 5245 int rval; 5246 mbx_cmd_t mc; 5247 mbx_cmd_t *mcp = &mc; 5248 5249 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, 5250 "Entered %s.\n", __func__); 5251 5252 if (!IS_FWI2_CAPABLE(vha->hw)) 5253 return QLA_FUNCTION_FAILED; 5254 5255 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 5256 mcp->mb[1] = LSW(risc_addr); 5257 mcp->mb[8] = MSW(risc_addr); 5258 mcp->out_mb = MBX_8|MBX_1|MBX_0; 5259 mcp->in_mb = MBX_3|MBX_2|MBX_0; 5260 mcp->tov = MBX_TOV_SECONDS; 5261 mcp->flags = 0; 5262 rval = qla2x00_mailbox_command(vha, mcp); 5263 if (rval != QLA_SUCCESS) { 5264 ql_dbg(ql_dbg_mbx, vha, 0x10f5, 5265 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5266 } else { 5267 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, 5268 "Done %s.\n", __func__); 5269 *data = mcp->mb[3] << 16 | mcp->mb[2]; 5270 } 5271 5272 return rval; 5273 } 5274 5275 int 5276 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5277 uint16_t *mresp) 5278 { 5279 int rval; 5280 mbx_cmd_t mc; 5281 mbx_cmd_t *mcp = &mc; 5282 5283 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, 5284 "Entered %s.\n", __func__); 5285 5286 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5287 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 5288 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing 5289 5290 /* transfer count */ 5291 mcp->mb[10] = LSW(mreq->transfer_size); 5292 mcp->mb[11] = MSW(mreq->transfer_size); 5293 5294 /* send data address */ 5295 mcp->mb[14] = LSW(mreq->send_dma); 5296 mcp->mb[15] = MSW(mreq->send_dma); 5297 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5298 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5299 5300 /* receive data address */ 5301 mcp->mb[16] = LSW(mreq->rcv_dma); 5302 mcp->mb[17] = MSW(mreq->rcv_dma); 5303 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5304 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5305 5306 /* Iteration count */ 5307 mcp->mb[18] = LSW(mreq->iteration_count); 5308 mcp->mb[19] = MSW(mreq->iteration_count); 5309 5310 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| 5311 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5312 if (IS_CNA_CAPABLE(vha->hw)) 5313 mcp->out_mb |= MBX_2; 5314 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; 5315 5316 mcp->buf_size = mreq->transfer_size; 5317 mcp->tov = MBX_TOV_SECONDS; 5318 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5319 5320 rval = qla2x00_mailbox_command(vha, mcp); 5321 5322 if (rval != QLA_SUCCESS) { 5323 ql_dbg(ql_dbg_mbx, vha, 0x10f8, 5324 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " 5325 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], 5326 mcp->mb[3], mcp->mb[18], mcp->mb[19]); 5327 } else { 5328 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, 5329 "Done %s.\n", __func__); 5330 } 5331 5332 /* Copy mailbox information */ 5333 memcpy( mresp, mcp->mb, 64); 5334 return rval; 5335 } 5336 5337 int 5338 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5339 uint16_t *mresp) 5340 { 5341 int rval; 5342 mbx_cmd_t mc; 5343 mbx_cmd_t *mcp = &mc; 5344 struct qla_hw_data *ha = vha->hw; 5345 5346 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, 5347 "Entered %s.\n", __func__); 5348 5349 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5350 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 5351 /* BIT_6 specifies 64bit address */ 5352 mcp->mb[1] = mreq->options | BIT_15 | BIT_6; 5353 if (IS_CNA_CAPABLE(ha)) { 5354 mcp->mb[2] = vha->fcoe_fcf_idx; 5355 } 5356 mcp->mb[16] = LSW(mreq->rcv_dma); 5357 mcp->mb[17] = MSW(mreq->rcv_dma); 5358 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5359 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5360 5361 mcp->mb[10] = LSW(mreq->transfer_size); 5362 5363 mcp->mb[14] = LSW(mreq->send_dma); 5364 mcp->mb[15] = MSW(mreq->send_dma); 5365 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5366 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5367 5368 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| 5369 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5370 if (IS_CNA_CAPABLE(ha)) 5371 mcp->out_mb |= MBX_2; 5372 5373 mcp->in_mb = MBX_0; 5374 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 5375 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5376 mcp->in_mb |= MBX_1; 5377 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 5378 IS_QLA28XX(ha)) 5379 mcp->in_mb |= MBX_3; 5380 5381 mcp->tov = MBX_TOV_SECONDS; 5382 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5383 mcp->buf_size = mreq->transfer_size; 5384 5385 rval = qla2x00_mailbox_command(vha, mcp); 5386 5387 if (rval != QLA_SUCCESS) { 5388 ql_dbg(ql_dbg_mbx, vha, 0x10fb, 5389 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5390 rval, mcp->mb[0], mcp->mb[1]); 5391 } else { 5392 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, 5393 "Done %s.\n", __func__); 5394 } 5395 5396 /* Copy mailbox information */ 5397 memcpy(mresp, mcp->mb, 64); 5398 return rval; 5399 } 5400 5401 int 5402 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) 5403 { 5404 int rval; 5405 mbx_cmd_t mc; 5406 mbx_cmd_t *mcp = &mc; 5407 5408 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, 5409 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); 5410 5411 mcp->mb[0] = MBC_ISP84XX_RESET; 5412 mcp->mb[1] = enable_diagnostic; 5413 mcp->out_mb = MBX_1|MBX_0; 5414 mcp->in_mb = MBX_1|MBX_0; 5415 mcp->tov = MBX_TOV_SECONDS; 5416 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5417 rval = qla2x00_mailbox_command(vha, mcp); 5418 5419 if (rval != QLA_SUCCESS) 5420 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); 5421 else 5422 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, 5423 "Done %s.\n", __func__); 5424 5425 return rval; 5426 } 5427 5428 int 5429 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) 5430 { 5431 int rval; 5432 mbx_cmd_t mc; 5433 mbx_cmd_t *mcp = &mc; 5434 5435 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, 5436 "Entered %s.\n", __func__); 5437 5438 if (!IS_FWI2_CAPABLE(vha->hw)) 5439 return QLA_FUNCTION_FAILED; 5440 5441 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 5442 mcp->mb[1] = LSW(risc_addr); 5443 mcp->mb[2] = LSW(data); 5444 mcp->mb[3] = MSW(data); 5445 mcp->mb[8] = MSW(risc_addr); 5446 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; 5447 mcp->in_mb = MBX_1|MBX_0; 5448 mcp->tov = MBX_TOV_SECONDS; 5449 mcp->flags = 0; 5450 rval = qla2x00_mailbox_command(vha, mcp); 5451 if (rval != QLA_SUCCESS) { 5452 ql_dbg(ql_dbg_mbx, vha, 0x1101, 5453 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5454 rval, mcp->mb[0], mcp->mb[1]); 5455 } else { 5456 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, 5457 "Done %s.\n", __func__); 5458 } 5459 5460 return rval; 5461 } 5462 5463 int 5464 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) 5465 { 5466 int rval; 5467 uint32_t stat, timer; 5468 uint16_t mb0 = 0; 5469 struct qla_hw_data *ha = vha->hw; 5470 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 5471 5472 rval = QLA_SUCCESS; 5473 5474 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, 5475 "Entered %s.\n", __func__); 5476 5477 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 5478 5479 /* Write the MBC data to the registers */ 5480 wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER); 5481 wrt_reg_word(®->mailbox1, mb[0]); 5482 wrt_reg_word(®->mailbox2, mb[1]); 5483 wrt_reg_word(®->mailbox3, mb[2]); 5484 wrt_reg_word(®->mailbox4, mb[3]); 5485 5486 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); 5487 5488 /* Poll for MBC interrupt */ 5489 for (timer = 6000000; timer; timer--) { 5490 /* Check for pending interrupts. */ 5491 stat = rd_reg_dword(®->host_status); 5492 if (stat & HSRX_RISC_INT) { 5493 stat &= 0xff; 5494 5495 if (stat == 0x1 || stat == 0x2 || 5496 stat == 0x10 || stat == 0x11) { 5497 set_bit(MBX_INTERRUPT, 5498 &ha->mbx_cmd_flags); 5499 mb0 = rd_reg_word(®->mailbox0); 5500 wrt_reg_dword(®->hccr, 5501 HCCRX_CLR_RISC_INT); 5502 rd_reg_dword(®->hccr); 5503 break; 5504 } 5505 } 5506 udelay(5); 5507 } 5508 5509 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) 5510 rval = mb0 & MBS_MASK; 5511 else 5512 rval = QLA_FUNCTION_FAILED; 5513 5514 if (rval != QLA_SUCCESS) { 5515 ql_dbg(ql_dbg_mbx, vha, 0x1104, 5516 "Failed=%x mb[0]=%x.\n", rval, mb[0]); 5517 } else { 5518 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, 5519 "Done %s.\n", __func__); 5520 } 5521 5522 return rval; 5523 } 5524 5525 /* Set the specified data rate */ 5526 int 5527 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode) 5528 { 5529 int rval; 5530 mbx_cmd_t mc; 5531 mbx_cmd_t *mcp = &mc; 5532 struct qla_hw_data *ha = vha->hw; 5533 uint16_t val; 5534 5535 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5536 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate, 5537 mode); 5538 5539 if (!IS_FWI2_CAPABLE(ha)) 5540 return QLA_FUNCTION_FAILED; 5541 5542 memset(mcp, 0, sizeof(*mcp)); 5543 switch (ha->set_data_rate) { 5544 case PORT_SPEED_AUTO: 5545 case PORT_SPEED_4GB: 5546 case PORT_SPEED_8GB: 5547 case PORT_SPEED_16GB: 5548 case PORT_SPEED_32GB: 5549 val = ha->set_data_rate; 5550 break; 5551 default: 5552 ql_log(ql_log_warn, vha, 0x1199, 5553 "Unrecognized speed setting:%d. Setting Autoneg\n", 5554 ha->set_data_rate); 5555 val = ha->set_data_rate = PORT_SPEED_AUTO; 5556 break; 5557 } 5558 5559 mcp->mb[0] = MBC_DATA_RATE; 5560 mcp->mb[1] = mode; 5561 mcp->mb[2] = val; 5562 5563 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5564 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5565 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5566 mcp->in_mb |= MBX_4|MBX_3; 5567 mcp->tov = MBX_TOV_SECONDS; 5568 mcp->flags = 0; 5569 rval = qla2x00_mailbox_command(vha, mcp); 5570 if (rval != QLA_SUCCESS) { 5571 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5572 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5573 } else { 5574 if (mcp->mb[1] != 0x7) 5575 ql_dbg(ql_dbg_mbx, vha, 0x1179, 5576 "Speed set:0x%x\n", mcp->mb[1]); 5577 5578 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5579 "Done %s.\n", __func__); 5580 } 5581 5582 return rval; 5583 } 5584 5585 int 5586 qla2x00_get_data_rate(scsi_qla_host_t *vha) 5587 { 5588 int rval; 5589 mbx_cmd_t mc; 5590 mbx_cmd_t *mcp = &mc; 5591 struct qla_hw_data *ha = vha->hw; 5592 5593 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5594 "Entered %s.\n", __func__); 5595 5596 if (!IS_FWI2_CAPABLE(ha)) 5597 return QLA_FUNCTION_FAILED; 5598 5599 mcp->mb[0] = MBC_DATA_RATE; 5600 mcp->mb[1] = QLA_GET_DATA_RATE; 5601 mcp->out_mb = MBX_1|MBX_0; 5602 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5603 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5604 mcp->in_mb |= MBX_3; 5605 mcp->tov = MBX_TOV_SECONDS; 5606 mcp->flags = 0; 5607 rval = qla2x00_mailbox_command(vha, mcp); 5608 if (rval != QLA_SUCCESS) { 5609 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5610 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5611 } else { 5612 if (mcp->mb[1] != 0x7) 5613 ha->link_data_rate = mcp->mb[1]; 5614 5615 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 5616 if (mcp->mb[4] & BIT_0) 5617 ql_log(ql_log_info, vha, 0x11a2, 5618 "FEC=enabled (data rate).\n"); 5619 } 5620 5621 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5622 "Done %s.\n", __func__); 5623 if (mcp->mb[1] != 0x7) 5624 ha->link_data_rate = mcp->mb[1]; 5625 } 5626 5627 return rval; 5628 } 5629 5630 int 5631 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5632 { 5633 int rval; 5634 mbx_cmd_t mc; 5635 mbx_cmd_t *mcp = &mc; 5636 struct qla_hw_data *ha = vha->hw; 5637 5638 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, 5639 "Entered %s.\n", __func__); 5640 5641 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && 5642 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 5643 return QLA_FUNCTION_FAILED; 5644 mcp->mb[0] = MBC_GET_PORT_CONFIG; 5645 mcp->out_mb = MBX_0; 5646 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5647 mcp->tov = MBX_TOV_SECONDS; 5648 mcp->flags = 0; 5649 5650 rval = qla2x00_mailbox_command(vha, mcp); 5651 5652 if (rval != QLA_SUCCESS) { 5653 ql_dbg(ql_dbg_mbx, vha, 0x110a, 5654 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5655 } else { 5656 /* Copy all bits to preserve original value */ 5657 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 5658 5659 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, 5660 "Done %s.\n", __func__); 5661 } 5662 return rval; 5663 } 5664 5665 int 5666 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5667 { 5668 int rval; 5669 mbx_cmd_t mc; 5670 mbx_cmd_t *mcp = &mc; 5671 5672 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, 5673 "Entered %s.\n", __func__); 5674 5675 mcp->mb[0] = MBC_SET_PORT_CONFIG; 5676 /* Copy all bits to preserve original setting */ 5677 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); 5678 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5679 mcp->in_mb = MBX_0; 5680 mcp->tov = MBX_TOV_SECONDS; 5681 mcp->flags = 0; 5682 rval = qla2x00_mailbox_command(vha, mcp); 5683 5684 if (rval != QLA_SUCCESS) { 5685 ql_dbg(ql_dbg_mbx, vha, 0x110d, 5686 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5687 } else 5688 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, 5689 "Done %s.\n", __func__); 5690 5691 return rval; 5692 } 5693 5694 5695 int 5696 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, 5697 uint16_t *mb) 5698 { 5699 int rval; 5700 mbx_cmd_t mc; 5701 mbx_cmd_t *mcp = &mc; 5702 struct qla_hw_data *ha = vha->hw; 5703 5704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, 5705 "Entered %s.\n", __func__); 5706 5707 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 5708 return QLA_FUNCTION_FAILED; 5709 5710 mcp->mb[0] = MBC_PORT_PARAMS; 5711 mcp->mb[1] = loop_id; 5712 if (ha->flags.fcp_prio_enabled) 5713 mcp->mb[2] = BIT_1; 5714 else 5715 mcp->mb[2] = BIT_2; 5716 mcp->mb[4] = priority & 0xf; 5717 mcp->mb[9] = vha->vp_idx; 5718 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5719 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 5720 mcp->tov = MBX_TOV_SECONDS; 5721 mcp->flags = 0; 5722 rval = qla2x00_mailbox_command(vha, mcp); 5723 if (mb != NULL) { 5724 mb[0] = mcp->mb[0]; 5725 mb[1] = mcp->mb[1]; 5726 mb[3] = mcp->mb[3]; 5727 mb[4] = mcp->mb[4]; 5728 } 5729 5730 if (rval != QLA_SUCCESS) { 5731 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); 5732 } else { 5733 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, 5734 "Done %s.\n", __func__); 5735 } 5736 5737 return rval; 5738 } 5739 5740 int 5741 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) 5742 { 5743 int rval = QLA_FUNCTION_FAILED; 5744 struct qla_hw_data *ha = vha->hw; 5745 uint8_t byte; 5746 5747 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { 5748 ql_dbg(ql_dbg_mbx, vha, 0x1150, 5749 "Thermal not supported by this card.\n"); 5750 return rval; 5751 } 5752 5753 if (IS_QLA25XX(ha)) { 5754 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 5755 ha->pdev->subsystem_device == 0x0175) { 5756 rval = qla2x00_read_sfp(vha, 0, &byte, 5757 0x98, 0x1, 1, BIT_13|BIT_0); 5758 *temp = byte; 5759 return rval; 5760 } 5761 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 5762 ha->pdev->subsystem_device == 0x338e) { 5763 rval = qla2x00_read_sfp(vha, 0, &byte, 5764 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); 5765 *temp = byte; 5766 return rval; 5767 } 5768 ql_dbg(ql_dbg_mbx, vha, 0x10c9, 5769 "Thermal not supported by this card.\n"); 5770 return rval; 5771 } 5772 5773 if (IS_QLA82XX(ha)) { 5774 *temp = qla82xx_read_temperature(vha); 5775 rval = QLA_SUCCESS; 5776 return rval; 5777 } else if (IS_QLA8044(ha)) { 5778 *temp = qla8044_read_temperature(vha); 5779 rval = QLA_SUCCESS; 5780 return rval; 5781 } 5782 5783 rval = qla2x00_read_asic_temperature(vha, temp); 5784 return rval; 5785 } 5786 5787 int 5788 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) 5789 { 5790 int rval; 5791 struct qla_hw_data *ha = vha->hw; 5792 mbx_cmd_t mc; 5793 mbx_cmd_t *mcp = &mc; 5794 5795 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, 5796 "Entered %s.\n", __func__); 5797 5798 if (!IS_FWI2_CAPABLE(ha)) 5799 return QLA_FUNCTION_FAILED; 5800 5801 memset(mcp, 0, sizeof(mbx_cmd_t)); 5802 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5803 mcp->mb[1] = 1; 5804 5805 mcp->out_mb = MBX_1|MBX_0; 5806 mcp->in_mb = MBX_0; 5807 mcp->tov = MBX_TOV_SECONDS; 5808 mcp->flags = 0; 5809 5810 rval = qla2x00_mailbox_command(vha, mcp); 5811 if (rval != QLA_SUCCESS) { 5812 ql_dbg(ql_dbg_mbx, vha, 0x1016, 5813 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5814 } else { 5815 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, 5816 "Done %s.\n", __func__); 5817 } 5818 5819 return rval; 5820 } 5821 5822 int 5823 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) 5824 { 5825 int rval; 5826 struct qla_hw_data *ha = vha->hw; 5827 mbx_cmd_t mc; 5828 mbx_cmd_t *mcp = &mc; 5829 5830 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, 5831 "Entered %s.\n", __func__); 5832 5833 if (!IS_P3P_TYPE(ha)) 5834 return QLA_FUNCTION_FAILED; 5835 5836 memset(mcp, 0, sizeof(mbx_cmd_t)); 5837 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5838 mcp->mb[1] = 0; 5839 5840 mcp->out_mb = MBX_1|MBX_0; 5841 mcp->in_mb = MBX_0; 5842 mcp->tov = MBX_TOV_SECONDS; 5843 mcp->flags = 0; 5844 5845 rval = qla2x00_mailbox_command(vha, mcp); 5846 if (rval != QLA_SUCCESS) { 5847 ql_dbg(ql_dbg_mbx, vha, 0x100c, 5848 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5849 } else { 5850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, 5851 "Done %s.\n", __func__); 5852 } 5853 5854 return rval; 5855 } 5856 5857 int 5858 qla82xx_md_get_template_size(scsi_qla_host_t *vha) 5859 { 5860 struct qla_hw_data *ha = vha->hw; 5861 mbx_cmd_t mc; 5862 mbx_cmd_t *mcp = &mc; 5863 int rval = QLA_FUNCTION_FAILED; 5864 5865 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, 5866 "Entered %s.\n", __func__); 5867 5868 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5869 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5870 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5871 mcp->mb[2] = LSW(RQST_TMPLT_SIZE); 5872 mcp->mb[3] = MSW(RQST_TMPLT_SIZE); 5873 5874 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5875 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 5876 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5877 5878 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5879 mcp->tov = MBX_TOV_SECONDS; 5880 rval = qla2x00_mailbox_command(vha, mcp); 5881 5882 /* Always copy back return mailbox values. */ 5883 if (rval != QLA_SUCCESS) { 5884 ql_dbg(ql_dbg_mbx, vha, 0x1120, 5885 "mailbox command FAILED=0x%x, subcode=%x.\n", 5886 (mcp->mb[1] << 16) | mcp->mb[0], 5887 (mcp->mb[3] << 16) | mcp->mb[2]); 5888 } else { 5889 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, 5890 "Done %s.\n", __func__); 5891 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); 5892 if (!ha->md_template_size) { 5893 ql_dbg(ql_dbg_mbx, vha, 0x1122, 5894 "Null template size obtained.\n"); 5895 rval = QLA_FUNCTION_FAILED; 5896 } 5897 } 5898 return rval; 5899 } 5900 5901 int 5902 qla82xx_md_get_template(scsi_qla_host_t *vha) 5903 { 5904 struct qla_hw_data *ha = vha->hw; 5905 mbx_cmd_t mc; 5906 mbx_cmd_t *mcp = &mc; 5907 int rval = QLA_FUNCTION_FAILED; 5908 5909 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, 5910 "Entered %s.\n", __func__); 5911 5912 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5913 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5914 if (!ha->md_tmplt_hdr) { 5915 ql_log(ql_log_warn, vha, 0x1124, 5916 "Unable to allocate memory for Minidump template.\n"); 5917 return rval; 5918 } 5919 5920 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5921 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5922 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5923 mcp->mb[2] = LSW(RQST_TMPLT); 5924 mcp->mb[3] = MSW(RQST_TMPLT); 5925 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); 5926 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); 5927 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); 5928 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); 5929 mcp->mb[8] = LSW(ha->md_template_size); 5930 mcp->mb[9] = MSW(ha->md_template_size); 5931 5932 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5933 mcp->tov = MBX_TOV_SECONDS; 5934 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5935 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5936 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5937 rval = qla2x00_mailbox_command(vha, mcp); 5938 5939 if (rval != QLA_SUCCESS) { 5940 ql_dbg(ql_dbg_mbx, vha, 0x1125, 5941 "mailbox command FAILED=0x%x, subcode=%x.\n", 5942 ((mcp->mb[1] << 16) | mcp->mb[0]), 5943 ((mcp->mb[3] << 16) | mcp->mb[2])); 5944 } else 5945 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, 5946 "Done %s.\n", __func__); 5947 return rval; 5948 } 5949 5950 int 5951 qla8044_md_get_template(scsi_qla_host_t *vha) 5952 { 5953 struct qla_hw_data *ha = vha->hw; 5954 mbx_cmd_t mc; 5955 mbx_cmd_t *mcp = &mc; 5956 int rval = QLA_FUNCTION_FAILED; 5957 int offset = 0, size = MINIDUMP_SIZE_36K; 5958 5959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, 5960 "Entered %s.\n", __func__); 5961 5962 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5963 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5964 if (!ha->md_tmplt_hdr) { 5965 ql_log(ql_log_warn, vha, 0xb11b, 5966 "Unable to allocate memory for Minidump template.\n"); 5967 return rval; 5968 } 5969 5970 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5971 while (offset < ha->md_template_size) { 5972 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5973 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5974 mcp->mb[2] = LSW(RQST_TMPLT); 5975 mcp->mb[3] = MSW(RQST_TMPLT); 5976 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5977 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5978 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5979 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5980 mcp->mb[8] = LSW(size); 5981 mcp->mb[9] = MSW(size); 5982 mcp->mb[10] = offset & 0x0000FFFF; 5983 mcp->mb[11] = offset & 0xFFFF0000; 5984 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5985 mcp->tov = MBX_TOV_SECONDS; 5986 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5987 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5988 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5989 rval = qla2x00_mailbox_command(vha, mcp); 5990 5991 if (rval != QLA_SUCCESS) { 5992 ql_dbg(ql_dbg_mbx, vha, 0xb11c, 5993 "mailbox command FAILED=0x%x, subcode=%x.\n", 5994 ((mcp->mb[1] << 16) | mcp->mb[0]), 5995 ((mcp->mb[3] << 16) | mcp->mb[2])); 5996 return rval; 5997 } else 5998 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, 5999 "Done %s.\n", __func__); 6000 offset = offset + size; 6001 } 6002 return rval; 6003 } 6004 6005 int 6006 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 6007 { 6008 int rval; 6009 struct qla_hw_data *ha = vha->hw; 6010 mbx_cmd_t mc; 6011 mbx_cmd_t *mcp = &mc; 6012 6013 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 6014 return QLA_FUNCTION_FAILED; 6015 6016 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, 6017 "Entered %s.\n", __func__); 6018 6019 memset(mcp, 0, sizeof(mbx_cmd_t)); 6020 mcp->mb[0] = MBC_SET_LED_CONFIG; 6021 mcp->mb[1] = led_cfg[0]; 6022 mcp->mb[2] = led_cfg[1]; 6023 if (IS_QLA8031(ha)) { 6024 mcp->mb[3] = led_cfg[2]; 6025 mcp->mb[4] = led_cfg[3]; 6026 mcp->mb[5] = led_cfg[4]; 6027 mcp->mb[6] = led_cfg[5]; 6028 } 6029 6030 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6031 if (IS_QLA8031(ha)) 6032 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 6033 mcp->in_mb = MBX_0; 6034 mcp->tov = MBX_TOV_SECONDS; 6035 mcp->flags = 0; 6036 6037 rval = qla2x00_mailbox_command(vha, mcp); 6038 if (rval != QLA_SUCCESS) { 6039 ql_dbg(ql_dbg_mbx, vha, 0x1134, 6040 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6041 } else { 6042 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, 6043 "Done %s.\n", __func__); 6044 } 6045 6046 return rval; 6047 } 6048 6049 int 6050 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 6051 { 6052 int rval; 6053 struct qla_hw_data *ha = vha->hw; 6054 mbx_cmd_t mc; 6055 mbx_cmd_t *mcp = &mc; 6056 6057 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 6058 return QLA_FUNCTION_FAILED; 6059 6060 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, 6061 "Entered %s.\n", __func__); 6062 6063 memset(mcp, 0, sizeof(mbx_cmd_t)); 6064 mcp->mb[0] = MBC_GET_LED_CONFIG; 6065 6066 mcp->out_mb = MBX_0; 6067 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6068 if (IS_QLA8031(ha)) 6069 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 6070 mcp->tov = MBX_TOV_SECONDS; 6071 mcp->flags = 0; 6072 6073 rval = qla2x00_mailbox_command(vha, mcp); 6074 if (rval != QLA_SUCCESS) { 6075 ql_dbg(ql_dbg_mbx, vha, 0x1137, 6076 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6077 } else { 6078 led_cfg[0] = mcp->mb[1]; 6079 led_cfg[1] = mcp->mb[2]; 6080 if (IS_QLA8031(ha)) { 6081 led_cfg[2] = mcp->mb[3]; 6082 led_cfg[3] = mcp->mb[4]; 6083 led_cfg[4] = mcp->mb[5]; 6084 led_cfg[5] = mcp->mb[6]; 6085 } 6086 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, 6087 "Done %s.\n", __func__); 6088 } 6089 6090 return rval; 6091 } 6092 6093 int 6094 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) 6095 { 6096 int rval; 6097 struct qla_hw_data *ha = vha->hw; 6098 mbx_cmd_t mc; 6099 mbx_cmd_t *mcp = &mc; 6100 6101 if (!IS_P3P_TYPE(ha)) 6102 return QLA_FUNCTION_FAILED; 6103 6104 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, 6105 "Entered %s.\n", __func__); 6106 6107 memset(mcp, 0, sizeof(mbx_cmd_t)); 6108 mcp->mb[0] = MBC_SET_LED_CONFIG; 6109 if (enable) 6110 mcp->mb[7] = 0xE; 6111 else 6112 mcp->mb[7] = 0xD; 6113 6114 mcp->out_mb = MBX_7|MBX_0; 6115 mcp->in_mb = MBX_0; 6116 mcp->tov = MBX_TOV_SECONDS; 6117 mcp->flags = 0; 6118 6119 rval = qla2x00_mailbox_command(vha, mcp); 6120 if (rval != QLA_SUCCESS) { 6121 ql_dbg(ql_dbg_mbx, vha, 0x1128, 6122 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6123 } else { 6124 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, 6125 "Done %s.\n", __func__); 6126 } 6127 6128 return rval; 6129 } 6130 6131 int 6132 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) 6133 { 6134 int rval; 6135 struct qla_hw_data *ha = vha->hw; 6136 mbx_cmd_t mc; 6137 mbx_cmd_t *mcp = &mc; 6138 6139 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6140 return QLA_FUNCTION_FAILED; 6141 6142 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, 6143 "Entered %s.\n", __func__); 6144 6145 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6146 mcp->mb[1] = LSW(reg); 6147 mcp->mb[2] = MSW(reg); 6148 mcp->mb[3] = LSW(data); 6149 mcp->mb[4] = MSW(data); 6150 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6151 6152 mcp->in_mb = MBX_1|MBX_0; 6153 mcp->tov = MBX_TOV_SECONDS; 6154 mcp->flags = 0; 6155 rval = qla2x00_mailbox_command(vha, mcp); 6156 6157 if (rval != QLA_SUCCESS) { 6158 ql_dbg(ql_dbg_mbx, vha, 0x1131, 6159 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6160 } else { 6161 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, 6162 "Done %s.\n", __func__); 6163 } 6164 6165 return rval; 6166 } 6167 6168 int 6169 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) 6170 { 6171 int rval; 6172 struct qla_hw_data *ha = vha->hw; 6173 mbx_cmd_t mc; 6174 mbx_cmd_t *mcp = &mc; 6175 6176 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 6177 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, 6178 "Implicit LOGO Unsupported.\n"); 6179 return QLA_FUNCTION_FAILED; 6180 } 6181 6182 6183 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, 6184 "Entering %s.\n", __func__); 6185 6186 /* Perform Implicit LOGO. */ 6187 mcp->mb[0] = MBC_PORT_LOGOUT; 6188 mcp->mb[1] = fcport->loop_id; 6189 mcp->mb[10] = BIT_15; 6190 mcp->out_mb = MBX_10|MBX_1|MBX_0; 6191 mcp->in_mb = MBX_0; 6192 mcp->tov = MBX_TOV_SECONDS; 6193 mcp->flags = 0; 6194 rval = qla2x00_mailbox_command(vha, mcp); 6195 if (rval != QLA_SUCCESS) 6196 ql_dbg(ql_dbg_mbx, vha, 0x113d, 6197 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6198 else 6199 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, 6200 "Done %s.\n", __func__); 6201 6202 return rval; 6203 } 6204 6205 int 6206 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) 6207 { 6208 int rval; 6209 mbx_cmd_t mc; 6210 mbx_cmd_t *mcp = &mc; 6211 struct qla_hw_data *ha = vha->hw; 6212 unsigned long retry_max_time = jiffies + (2 * HZ); 6213 6214 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6215 return QLA_FUNCTION_FAILED; 6216 6217 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); 6218 6219 retry_rd_reg: 6220 mcp->mb[0] = MBC_READ_REMOTE_REG; 6221 mcp->mb[1] = LSW(reg); 6222 mcp->mb[2] = MSW(reg); 6223 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6224 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 6225 mcp->tov = MBX_TOV_SECONDS; 6226 mcp->flags = 0; 6227 rval = qla2x00_mailbox_command(vha, mcp); 6228 6229 if (rval != QLA_SUCCESS) { 6230 ql_dbg(ql_dbg_mbx, vha, 0x114c, 6231 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6232 rval, mcp->mb[0], mcp->mb[1]); 6233 } else { 6234 *data = (mcp->mb[3] | (mcp->mb[4] << 16)); 6235 if (*data == QLA8XXX_BAD_VALUE) { 6236 /* 6237 * During soft-reset CAMRAM register reads might 6238 * return 0xbad0bad0. So retry for MAX of 2 sec 6239 * while reading camram registers. 6240 */ 6241 if (time_after(jiffies, retry_max_time)) { 6242 ql_dbg(ql_dbg_mbx, vha, 0x1141, 6243 "Failure to read CAMRAM register. " 6244 "data=0x%x.\n", *data); 6245 return QLA_FUNCTION_FAILED; 6246 } 6247 msleep(100); 6248 goto retry_rd_reg; 6249 } 6250 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); 6251 } 6252 6253 return rval; 6254 } 6255 6256 int 6257 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) 6258 { 6259 int rval; 6260 mbx_cmd_t mc; 6261 mbx_cmd_t *mcp = &mc; 6262 struct qla_hw_data *ha = vha->hw; 6263 6264 if (!IS_QLA83XX(ha)) 6265 return QLA_FUNCTION_FAILED; 6266 6267 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 6268 6269 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; 6270 mcp->out_mb = MBX_0; 6271 mcp->in_mb = MBX_1|MBX_0; 6272 mcp->tov = MBX_TOV_SECONDS; 6273 mcp->flags = 0; 6274 rval = qla2x00_mailbox_command(vha, mcp); 6275 6276 if (rval != QLA_SUCCESS) { 6277 ql_dbg(ql_dbg_mbx, vha, 0x1144, 6278 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6279 rval, mcp->mb[0], mcp->mb[1]); 6280 qla2xxx_dump_fw(vha); 6281 } else { 6282 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); 6283 } 6284 6285 return rval; 6286 } 6287 6288 int 6289 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, 6290 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) 6291 { 6292 int rval; 6293 mbx_cmd_t mc; 6294 mbx_cmd_t *mcp = &mc; 6295 uint8_t subcode = (uint8_t)options; 6296 struct qla_hw_data *ha = vha->hw; 6297 6298 if (!IS_QLA8031(ha)) 6299 return QLA_FUNCTION_FAILED; 6300 6301 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); 6302 6303 mcp->mb[0] = MBC_SET_ACCESS_CONTROL; 6304 mcp->mb[1] = options; 6305 mcp->out_mb = MBX_1|MBX_0; 6306 if (subcode & BIT_2) { 6307 mcp->mb[2] = LSW(start_addr); 6308 mcp->mb[3] = MSW(start_addr); 6309 mcp->mb[4] = LSW(end_addr); 6310 mcp->mb[5] = MSW(end_addr); 6311 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; 6312 } 6313 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6314 if (!(subcode & (BIT_2 | BIT_5))) 6315 mcp->in_mb |= MBX_4|MBX_3; 6316 mcp->tov = MBX_TOV_SECONDS; 6317 mcp->flags = 0; 6318 rval = qla2x00_mailbox_command(vha, mcp); 6319 6320 if (rval != QLA_SUCCESS) { 6321 ql_dbg(ql_dbg_mbx, vha, 0x1147, 6322 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", 6323 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], 6324 mcp->mb[4]); 6325 qla2xxx_dump_fw(vha); 6326 } else { 6327 if (subcode & BIT_5) 6328 *sector_size = mcp->mb[1]; 6329 else if (subcode & (BIT_6 | BIT_7)) { 6330 ql_dbg(ql_dbg_mbx, vha, 0x1148, 6331 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6332 } else if (subcode & (BIT_3 | BIT_4)) { 6333 ql_dbg(ql_dbg_mbx, vha, 0x1149, 6334 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6335 } 6336 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); 6337 } 6338 6339 return rval; 6340 } 6341 6342 int 6343 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 6344 uint32_t size) 6345 { 6346 int rval; 6347 mbx_cmd_t mc; 6348 mbx_cmd_t *mcp = &mc; 6349 6350 if (!IS_MCTP_CAPABLE(vha->hw)) 6351 return QLA_FUNCTION_FAILED; 6352 6353 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, 6354 "Entered %s.\n", __func__); 6355 6356 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 6357 mcp->mb[1] = LSW(addr); 6358 mcp->mb[2] = MSW(req_dma); 6359 mcp->mb[3] = LSW(req_dma); 6360 mcp->mb[4] = MSW(size); 6361 mcp->mb[5] = LSW(size); 6362 mcp->mb[6] = MSW(MSD(req_dma)); 6363 mcp->mb[7] = LSW(MSD(req_dma)); 6364 mcp->mb[8] = MSW(addr); 6365 /* Setting RAM ID to valid */ 6366 /* For MCTP RAM ID is 0x40 */ 6367 mcp->mb[10] = BIT_7 | 0x40; 6368 6369 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| 6370 MBX_0; 6371 6372 mcp->in_mb = MBX_0; 6373 mcp->tov = MBX_TOV_SECONDS; 6374 mcp->flags = 0; 6375 rval = qla2x00_mailbox_command(vha, mcp); 6376 6377 if (rval != QLA_SUCCESS) { 6378 ql_dbg(ql_dbg_mbx, vha, 0x114e, 6379 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6380 } else { 6381 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, 6382 "Done %s.\n", __func__); 6383 } 6384 6385 return rval; 6386 } 6387 6388 int 6389 qla26xx_dport_diagnostics(scsi_qla_host_t *vha, 6390 void *dd_buf, uint size, uint options) 6391 { 6392 int rval; 6393 mbx_cmd_t mc; 6394 mbx_cmd_t *mcp = &mc; 6395 dma_addr_t dd_dma; 6396 6397 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 6398 !IS_QLA28XX(vha->hw)) 6399 return QLA_FUNCTION_FAILED; 6400 6401 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, 6402 "Entered %s.\n", __func__); 6403 6404 dd_dma = dma_map_single(&vha->hw->pdev->dev, 6405 dd_buf, size, DMA_FROM_DEVICE); 6406 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { 6407 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); 6408 return QLA_MEMORY_ALLOC_FAILED; 6409 } 6410 6411 memset(dd_buf, 0, size); 6412 6413 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; 6414 mcp->mb[1] = options; 6415 mcp->mb[2] = MSW(LSD(dd_dma)); 6416 mcp->mb[3] = LSW(LSD(dd_dma)); 6417 mcp->mb[6] = MSW(MSD(dd_dma)); 6418 mcp->mb[7] = LSW(MSD(dd_dma)); 6419 mcp->mb[8] = size; 6420 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 6421 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 6422 mcp->buf_size = size; 6423 mcp->flags = MBX_DMA_IN; 6424 mcp->tov = MBX_TOV_SECONDS * 4; 6425 rval = qla2x00_mailbox_command(vha, mcp); 6426 6427 if (rval != QLA_SUCCESS) { 6428 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); 6429 } else { 6430 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, 6431 "Done %s.\n", __func__); 6432 } 6433 6434 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, 6435 size, DMA_FROM_DEVICE); 6436 6437 return rval; 6438 } 6439 6440 static void qla2x00_async_mb_sp_done(srb_t *sp, int res) 6441 { 6442 sp->u.iocb_cmd.u.mbx.rc = res; 6443 6444 complete(&sp->u.iocb_cmd.u.mbx.comp); 6445 /* don't free sp here. Let the caller do the free */ 6446 } 6447 6448 /* 6449 * This mailbox uses the iocb interface to send MB command. 6450 * This allows non-critial (non chip setup) command to go 6451 * out in parrallel. 6452 */ 6453 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) 6454 { 6455 int rval = QLA_FUNCTION_FAILED; 6456 srb_t *sp; 6457 struct srb_iocb *c; 6458 6459 if (!vha->hw->flags.fw_started) 6460 goto done; 6461 6462 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 6463 if (!sp) 6464 goto done; 6465 6466 sp->type = SRB_MB_IOCB; 6467 sp->name = mb_to_str(mcp->mb[0]); 6468 6469 c = &sp->u.iocb_cmd; 6470 c->timeout = qla2x00_async_iocb_timeout; 6471 init_completion(&c->u.mbx.comp); 6472 6473 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 6474 6475 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); 6476 6477 sp->done = qla2x00_async_mb_sp_done; 6478 6479 rval = qla2x00_start_sp(sp); 6480 if (rval != QLA_SUCCESS) { 6481 ql_dbg(ql_dbg_mbx, vha, 0x1018, 6482 "%s: %s Failed submission. %x.\n", 6483 __func__, sp->name, rval); 6484 goto done_free_sp; 6485 } 6486 6487 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n", 6488 sp->name, sp->handle); 6489 6490 wait_for_completion(&c->u.mbx.comp); 6491 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); 6492 6493 rval = c->u.mbx.rc; 6494 switch (rval) { 6495 case QLA_FUNCTION_TIMEOUT: 6496 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n", 6497 __func__, sp->name, rval); 6498 break; 6499 case QLA_SUCCESS: 6500 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", 6501 __func__, sp->name); 6502 break; 6503 default: 6504 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", 6505 __func__, sp->name, rval); 6506 break; 6507 } 6508 6509 done_free_sp: 6510 sp->free(sp); 6511 done: 6512 return rval; 6513 } 6514 6515 /* 6516 * qla24xx_gpdb_wait 6517 * NOTE: Do not call this routine from DPC thread 6518 */ 6519 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 6520 { 6521 int rval = QLA_FUNCTION_FAILED; 6522 dma_addr_t pd_dma; 6523 struct port_database_24xx *pd; 6524 struct qla_hw_data *ha = vha->hw; 6525 mbx_cmd_t mc; 6526 6527 if (!vha->hw->flags.fw_started) 6528 goto done; 6529 6530 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 6531 if (pd == NULL) { 6532 ql_log(ql_log_warn, vha, 0xd047, 6533 "Failed to allocate port database structure.\n"); 6534 goto done_free_sp; 6535 } 6536 6537 memset(&mc, 0, sizeof(mc)); 6538 mc.mb[0] = MBC_GET_PORT_DATABASE; 6539 mc.mb[1] = fcport->loop_id; 6540 mc.mb[2] = MSW(pd_dma); 6541 mc.mb[3] = LSW(pd_dma); 6542 mc.mb[6] = MSW(MSD(pd_dma)); 6543 mc.mb[7] = LSW(MSD(pd_dma)); 6544 mc.mb[9] = vha->vp_idx; 6545 mc.mb[10] = opt; 6546 6547 rval = qla24xx_send_mb_cmd(vha, &mc); 6548 if (rval != QLA_SUCCESS) { 6549 ql_dbg(ql_dbg_mbx, vha, 0x1193, 6550 "%s: %8phC fail\n", __func__, fcport->port_name); 6551 goto done_free_sp; 6552 } 6553 6554 rval = __qla24xx_parse_gpdb(vha, fcport, pd); 6555 6556 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", 6557 __func__, fcport->port_name); 6558 6559 done_free_sp: 6560 if (pd) 6561 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 6562 done: 6563 return rval; 6564 } 6565 6566 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, 6567 struct port_database_24xx *pd) 6568 { 6569 int rval = QLA_SUCCESS; 6570 uint64_t zero = 0; 6571 u8 current_login_state, last_login_state; 6572 6573 if (NVME_TARGET(vha->hw, fcport)) { 6574 current_login_state = pd->current_login_state >> 4; 6575 last_login_state = pd->last_login_state >> 4; 6576 } else { 6577 current_login_state = pd->current_login_state & 0xf; 6578 last_login_state = pd->last_login_state & 0xf; 6579 } 6580 6581 /* Check for logged in state. */ 6582 if (current_login_state != PDS_PRLI_COMPLETE) { 6583 ql_dbg(ql_dbg_mbx, vha, 0x119a, 6584 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 6585 current_login_state, last_login_state, fcport->loop_id); 6586 rval = QLA_FUNCTION_FAILED; 6587 goto gpd_error_out; 6588 } 6589 6590 if (fcport->loop_id == FC_NO_LOOP_ID || 6591 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 6592 memcmp(fcport->port_name, pd->port_name, 8))) { 6593 /* We lost the device mid way. */ 6594 rval = QLA_NOT_LOGGED_IN; 6595 goto gpd_error_out; 6596 } 6597 6598 /* Names are little-endian. */ 6599 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 6600 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 6601 6602 /* Get port_id of device. */ 6603 fcport->d_id.b.domain = pd->port_id[0]; 6604 fcport->d_id.b.area = pd->port_id[1]; 6605 fcport->d_id.b.al_pa = pd->port_id[2]; 6606 fcport->d_id.b.rsvd_1 = 0; 6607 6608 ql_dbg(ql_dbg_disc, vha, 0x2062, 6609 "%8phC SVC Param w3 %02x%02x", 6610 fcport->port_name, 6611 pd->prli_svc_param_word_3[1], 6612 pd->prli_svc_param_word_3[0]); 6613 6614 if (NVME_TARGET(vha->hw, fcport)) { 6615 fcport->port_type = FCT_NVME; 6616 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) 6617 fcport->port_type |= FCT_NVME_INITIATOR; 6618 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6619 fcport->port_type |= FCT_NVME_TARGET; 6620 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0) 6621 fcport->port_type |= FCT_NVME_DISCOVERY; 6622 } else { 6623 /* If not target must be initiator or unknown type. */ 6624 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6625 fcport->port_type = FCT_INITIATOR; 6626 else 6627 fcport->port_type = FCT_TARGET; 6628 } 6629 /* Passback COS information. */ 6630 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? 6631 FC_COS_CLASS2 : FC_COS_CLASS3; 6632 6633 if (pd->prli_svc_param_word_3[0] & BIT_7) { 6634 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 6635 fcport->conf_compl_supported = 1; 6636 } 6637 6638 gpd_error_out: 6639 return rval; 6640 } 6641 6642 /* 6643 * qla24xx_gidlist__wait 6644 * NOTE: don't call this routine from DPC thread. 6645 */ 6646 int qla24xx_gidlist_wait(struct scsi_qla_host *vha, 6647 void *id_list, dma_addr_t id_list_dma, uint16_t *entries) 6648 { 6649 int rval = QLA_FUNCTION_FAILED; 6650 mbx_cmd_t mc; 6651 6652 if (!vha->hw->flags.fw_started) 6653 goto done; 6654 6655 memset(&mc, 0, sizeof(mc)); 6656 mc.mb[0] = MBC_GET_ID_LIST; 6657 mc.mb[2] = MSW(id_list_dma); 6658 mc.mb[3] = LSW(id_list_dma); 6659 mc.mb[6] = MSW(MSD(id_list_dma)); 6660 mc.mb[7] = LSW(MSD(id_list_dma)); 6661 mc.mb[8] = 0; 6662 mc.mb[9] = vha->vp_idx; 6663 6664 rval = qla24xx_send_mb_cmd(vha, &mc); 6665 if (rval != QLA_SUCCESS) { 6666 ql_dbg(ql_dbg_mbx, vha, 0x119b, 6667 "%s: fail\n", __func__); 6668 } else { 6669 *entries = mc.mb[1]; 6670 ql_dbg(ql_dbg_mbx, vha, 0x119c, 6671 "%s: done\n", __func__); 6672 } 6673 done: 6674 return rval; 6675 } 6676 6677 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) 6678 { 6679 int rval; 6680 mbx_cmd_t mc; 6681 mbx_cmd_t *mcp = &mc; 6682 6683 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200, 6684 "Entered %s\n", __func__); 6685 6686 memset(mcp->mb, 0 , sizeof(mcp->mb)); 6687 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6688 mcp->mb[1] = 1; 6689 mcp->mb[2] = value; 6690 mcp->out_mb = MBX_2 | MBX_1 | MBX_0; 6691 mcp->in_mb = MBX_2 | MBX_0; 6692 mcp->tov = MBX_TOV_SECONDS; 6693 mcp->flags = 0; 6694 6695 rval = qla2x00_mailbox_command(vha, mcp); 6696 6697 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n", 6698 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6699 6700 return rval; 6701 } 6702 6703 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) 6704 { 6705 int rval; 6706 mbx_cmd_t mc; 6707 mbx_cmd_t *mcp = &mc; 6708 6709 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203, 6710 "Entered %s\n", __func__); 6711 6712 memset(mcp->mb, 0, sizeof(mcp->mb)); 6713 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6714 mcp->mb[1] = 0; 6715 mcp->out_mb = MBX_1 | MBX_0; 6716 mcp->in_mb = MBX_2 | MBX_0; 6717 mcp->tov = MBX_TOV_SECONDS; 6718 mcp->flags = 0; 6719 6720 rval = qla2x00_mailbox_command(vha, mcp); 6721 if (rval == QLA_SUCCESS) 6722 *value = mc.mb[2]; 6723 6724 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n", 6725 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6726 6727 return rval; 6728 } 6729 6730 int 6731 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count) 6732 { 6733 struct qla_hw_data *ha = vha->hw; 6734 uint16_t iter, addr, offset; 6735 dma_addr_t phys_addr; 6736 int rval, c; 6737 u8 *sfp_data; 6738 6739 memset(ha->sfp_data, 0, SFP_DEV_SIZE); 6740 addr = 0xa0; 6741 phys_addr = ha->sfp_data_dma; 6742 sfp_data = ha->sfp_data; 6743 offset = c = 0; 6744 6745 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) { 6746 if (iter == 4) { 6747 /* Skip to next device address. */ 6748 addr = 0xa2; 6749 offset = 0; 6750 } 6751 6752 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data, 6753 addr, offset, SFP_BLOCK_SIZE, BIT_1); 6754 if (rval != QLA_SUCCESS) { 6755 ql_log(ql_log_warn, vha, 0x706d, 6756 "Unable to read SFP data (%x/%x/%x).\n", rval, 6757 addr, offset); 6758 6759 return rval; 6760 } 6761 6762 if (buf && (c < count)) { 6763 u16 sz; 6764 6765 if ((count - c) >= SFP_BLOCK_SIZE) 6766 sz = SFP_BLOCK_SIZE; 6767 else 6768 sz = count - c; 6769 6770 memcpy(buf, sfp_data, sz); 6771 buf += SFP_BLOCK_SIZE; 6772 c += sz; 6773 } 6774 phys_addr += SFP_BLOCK_SIZE; 6775 sfp_data += SFP_BLOCK_SIZE; 6776 offset += SFP_BLOCK_SIZE; 6777 } 6778 6779 return rval; 6780 } 6781 6782 int qla24xx_res_count_wait(struct scsi_qla_host *vha, 6783 uint16_t *out_mb, int out_mb_sz) 6784 { 6785 int rval = QLA_FUNCTION_FAILED; 6786 mbx_cmd_t mc; 6787 6788 if (!vha->hw->flags.fw_started) 6789 goto done; 6790 6791 memset(&mc, 0, sizeof(mc)); 6792 mc.mb[0] = MBC_GET_RESOURCE_COUNTS; 6793 6794 rval = qla24xx_send_mb_cmd(vha, &mc); 6795 if (rval != QLA_SUCCESS) { 6796 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6797 "%s: fail\n", __func__); 6798 } else { 6799 if (out_mb_sz <= SIZEOF_IOCB_MB_REG) 6800 memcpy(out_mb, mc.mb, out_mb_sz); 6801 else 6802 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG); 6803 6804 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6805 "%s: done\n", __func__); 6806 } 6807 done: 6808 return rval; 6809 } 6810 6811 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts, 6812 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr, 6813 uint32_t sfub_len) 6814 { 6815 int rval; 6816 mbx_cmd_t mc; 6817 mbx_cmd_t *mcp = &mc; 6818 6819 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE; 6820 mcp->mb[1] = opts; 6821 mcp->mb[2] = region; 6822 mcp->mb[3] = MSW(len); 6823 mcp->mb[4] = LSW(len); 6824 mcp->mb[5] = MSW(sfub_dma_addr); 6825 mcp->mb[6] = LSW(sfub_dma_addr); 6826 mcp->mb[7] = MSW(MSD(sfub_dma_addr)); 6827 mcp->mb[8] = LSW(MSD(sfub_dma_addr)); 6828 mcp->mb[9] = sfub_len; 6829 mcp->out_mb = 6830 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6831 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6832 mcp->tov = MBX_TOV_SECONDS; 6833 mcp->flags = 0; 6834 rval = qla2x00_mailbox_command(vha, mcp); 6835 6836 if (rval != QLA_SUCCESS) { 6837 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x", 6838 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], 6839 mcp->mb[2]); 6840 } 6841 6842 return rval; 6843 } 6844 6845 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6846 uint32_t data) 6847 { 6848 int rval; 6849 mbx_cmd_t mc; 6850 mbx_cmd_t *mcp = &mc; 6851 6852 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6853 "Entered %s.\n", __func__); 6854 6855 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6856 mcp->mb[1] = LSW(addr); 6857 mcp->mb[2] = MSW(addr); 6858 mcp->mb[3] = LSW(data); 6859 mcp->mb[4] = MSW(data); 6860 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6861 mcp->in_mb = MBX_1|MBX_0; 6862 mcp->tov = MBX_TOV_SECONDS; 6863 mcp->flags = 0; 6864 rval = qla2x00_mailbox_command(vha, mcp); 6865 6866 if (rval != QLA_SUCCESS) { 6867 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6868 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6869 } else { 6870 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6871 "Done %s.\n", __func__); 6872 } 6873 6874 return rval; 6875 } 6876 6877 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6878 uint32_t *data) 6879 { 6880 int rval; 6881 mbx_cmd_t mc; 6882 mbx_cmd_t *mcp = &mc; 6883 6884 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6885 "Entered %s.\n", __func__); 6886 6887 mcp->mb[0] = MBC_READ_REMOTE_REG; 6888 mcp->mb[1] = LSW(addr); 6889 mcp->mb[2] = MSW(addr); 6890 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6891 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6892 mcp->tov = MBX_TOV_SECONDS; 6893 mcp->flags = 0; 6894 rval = qla2x00_mailbox_command(vha, mcp); 6895 6896 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]); 6897 6898 if (rval != QLA_SUCCESS) { 6899 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6900 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6901 } else { 6902 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6903 "Done %s.\n", __func__); 6904 } 6905 6906 return rval; 6907 } 6908 6909 int 6910 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led) 6911 { 6912 struct qla_hw_data *ha = vha->hw; 6913 mbx_cmd_t mc; 6914 mbx_cmd_t *mcp = &mc; 6915 int rval; 6916 6917 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6918 return QLA_FUNCTION_FAILED; 6919 6920 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n", 6921 __func__, options); 6922 6923 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG; 6924 mcp->mb[1] = options; 6925 mcp->out_mb = MBX_1|MBX_0; 6926 mcp->in_mb = MBX_1|MBX_0; 6927 if (options & BIT_0) { 6928 if (options & BIT_1) { 6929 mcp->mb[2] = led[2]; 6930 mcp->out_mb |= MBX_2; 6931 } 6932 if (options & BIT_2) { 6933 mcp->mb[3] = led[0]; 6934 mcp->out_mb |= MBX_3; 6935 } 6936 if (options & BIT_3) { 6937 mcp->mb[4] = led[1]; 6938 mcp->out_mb |= MBX_4; 6939 } 6940 } else { 6941 mcp->in_mb |= MBX_4|MBX_3|MBX_2; 6942 } 6943 mcp->tov = MBX_TOV_SECONDS; 6944 mcp->flags = 0; 6945 rval = qla2x00_mailbox_command(vha, mcp); 6946 if (rval) { 6947 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n", 6948 __func__, rval, mcp->mb[0], mcp->mb[1]); 6949 return rval; 6950 } 6951 6952 if (options & BIT_0) { 6953 ha->beacon_blink_led = 0; 6954 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__); 6955 } else { 6956 led[2] = mcp->mb[2]; 6957 led[0] = mcp->mb[3]; 6958 led[1] = mcp->mb[4]; 6959 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n", 6960 __func__, led[0], led[1], led[2]); 6961 } 6962 6963 return rval; 6964 } 6965 6966 /** 6967 * qla_no_op_mb(): This MB is used to check if FW is still alive and 6968 * able to generate an interrupt. Otherwise, a timeout will trigger 6969 * FW dump + reset 6970 * @vha: host adapter pointer 6971 * Return: None 6972 */ 6973 void qla_no_op_mb(struct scsi_qla_host *vha) 6974 { 6975 mbx_cmd_t mc; 6976 mbx_cmd_t *mcp = &mc; 6977 int rval; 6978 6979 memset(&mc, 0, sizeof(mc)); 6980 mcp->mb[0] = 0; // noop cmd= 0 6981 mcp->out_mb = MBX_0; 6982 mcp->in_mb = MBX_0; 6983 mcp->tov = 5; 6984 mcp->flags = 0; 6985 rval = qla2x00_mailbox_command(vha, mcp); 6986 6987 if (rval) { 6988 ql_dbg(ql_dbg_async, vha, 0x7071, 6989 "Failed %s %x\n", __func__, rval); 6990 } 6991 } 6992