1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/gfp.h> 12 13 static struct mb_cmd_name { 14 uint16_t cmd; 15 const char *str; 16 } mb_str[] = { 17 {MBC_GET_PORT_DATABASE, "GPDB"}, 18 {MBC_GET_ID_LIST, "GIDList"}, 19 {MBC_GET_LINK_PRIV_STATS, "Stats"}, 20 {MBC_GET_RESOURCE_COUNTS, "ResCnt"}, 21 }; 22 23 static const char *mb_to_str(uint16_t cmd) 24 { 25 int i; 26 struct mb_cmd_name *e; 27 28 for (i = 0; i < ARRAY_SIZE(mb_str); i++) { 29 e = mb_str + i; 30 if (cmd == e->cmd) 31 return e->str; 32 } 33 return "unknown"; 34 } 35 36 static struct rom_cmd { 37 uint16_t cmd; 38 } rom_cmds[] = { 39 { MBC_LOAD_RAM }, 40 { MBC_EXECUTE_FIRMWARE }, 41 { MBC_READ_RAM_WORD }, 42 { MBC_MAILBOX_REGISTER_TEST }, 43 { MBC_VERIFY_CHECKSUM }, 44 { MBC_GET_FIRMWARE_VERSION }, 45 { MBC_LOAD_RISC_RAM }, 46 { MBC_DUMP_RISC_RAM }, 47 { MBC_LOAD_RISC_RAM_EXTENDED }, 48 { MBC_DUMP_RISC_RAM_EXTENDED }, 49 { MBC_WRITE_RAM_WORD_EXTENDED }, 50 { MBC_READ_RAM_EXTENDED }, 51 { MBC_GET_RESOURCE_COUNTS }, 52 { MBC_SET_FIRMWARE_OPTION }, 53 { MBC_MID_INITIALIZE_FIRMWARE }, 54 { MBC_GET_FIRMWARE_STATE }, 55 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, 56 { MBC_GET_RETRY_COUNT }, 57 { MBC_TRACE_CONTROL }, 58 { MBC_INITIALIZE_MULTIQ }, 59 { MBC_IOCB_COMMAND_A64 }, 60 { MBC_GET_ADAPTER_LOOP_ID }, 61 { MBC_READ_SFP }, 62 { MBC_GET_RNID_PARAMS }, 63 { MBC_GET_SET_ZIO_THRESHOLD }, 64 }; 65 66 static int is_rom_cmd(uint16_t cmd) 67 { 68 int i; 69 struct rom_cmd *wc; 70 71 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) { 72 wc = rom_cmds + i; 73 if (wc->cmd == cmd) 74 return 1; 75 } 76 77 return 0; 78 } 79 80 /* 81 * qla2x00_mailbox_command 82 * Issue mailbox command and waits for completion. 83 * 84 * Input: 85 * ha = adapter block pointer. 86 * mcp = driver internal mbx struct pointer. 87 * 88 * Output: 89 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 90 * 91 * Returns: 92 * 0 : QLA_SUCCESS = cmd performed success 93 * 1 : QLA_FUNCTION_FAILED (error encountered) 94 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 95 * 96 * Context: 97 * Kernel context. 98 */ 99 static int 100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) 101 { 102 int rval, i; 103 unsigned long flags = 0; 104 device_reg_t *reg; 105 uint8_t abort_active; 106 uint8_t io_lock_on; 107 uint16_t command = 0; 108 uint16_t *iptr; 109 __le16 __iomem *optr; 110 uint32_t cnt; 111 uint32_t mboxes; 112 unsigned long wait_time; 113 struct qla_hw_data *ha = vha->hw; 114 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 115 u32 chip_reset; 116 117 118 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); 119 120 if (ha->pdev->error_state == pci_channel_io_perm_failure) { 121 ql_log(ql_log_warn, vha, 0x1001, 122 "PCI channel failed permanently, exiting.\n"); 123 return QLA_FUNCTION_TIMEOUT; 124 } 125 126 if (vha->device_flags & DFLG_DEV_FAILED) { 127 ql_log(ql_log_warn, vha, 0x1002, 128 "Device in failed state, exiting.\n"); 129 return QLA_FUNCTION_TIMEOUT; 130 } 131 132 /* if PCI error, then avoid mbx processing.*/ 133 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && 134 test_bit(UNLOADING, &base_vha->dpc_flags)) { 135 ql_log(ql_log_warn, vha, 0xd04e, 136 "PCI error, exiting.\n"); 137 return QLA_FUNCTION_TIMEOUT; 138 } 139 140 reg = ha->iobase; 141 io_lock_on = base_vha->flags.init_done; 142 143 rval = QLA_SUCCESS; 144 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 145 chip_reset = ha->chip_reset; 146 147 if (ha->flags.pci_channel_io_perm_failure) { 148 ql_log(ql_log_warn, vha, 0x1003, 149 "Perm failure on EEH timeout MBX, exiting.\n"); 150 return QLA_FUNCTION_TIMEOUT; 151 } 152 153 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 154 /* Setting Link-Down error */ 155 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 156 ql_log(ql_log_warn, vha, 0x1004, 157 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 158 return QLA_FUNCTION_TIMEOUT; 159 } 160 161 /* check if ISP abort is active and return cmd with timeout */ 162 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 163 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 164 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && 165 !is_rom_cmd(mcp->mb[0])) { 166 ql_log(ql_log_info, vha, 0x1005, 167 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", 168 mcp->mb[0]); 169 return QLA_FUNCTION_TIMEOUT; 170 } 171 172 atomic_inc(&ha->num_pend_mbx_stage1); 173 /* 174 * Wait for active mailbox commands to finish by waiting at most tov 175 * seconds. This is to serialize actual issuing of mailbox cmds during 176 * non ISP abort time. 177 */ 178 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 179 /* Timeout occurred. Return error. */ 180 ql_log(ql_log_warn, vha, 0xd035, 181 "Cmd access timeout, cmd=0x%x, Exiting.\n", 182 mcp->mb[0]); 183 atomic_dec(&ha->num_pend_mbx_stage1); 184 return QLA_FUNCTION_TIMEOUT; 185 } 186 atomic_dec(&ha->num_pend_mbx_stage1); 187 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { 188 rval = QLA_ABORTED; 189 goto premature_exit; 190 } 191 192 193 /* Save mailbox command for debug */ 194 ha->mcp = mcp; 195 196 ql_dbg(ql_dbg_mbx, vha, 0x1006, 197 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 198 199 spin_lock_irqsave(&ha->hardware_lock, flags); 200 201 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || 202 ha->flags.mbox_busy) { 203 rval = QLA_ABORTED; 204 spin_unlock_irqrestore(&ha->hardware_lock, flags); 205 goto premature_exit; 206 } 207 ha->flags.mbox_busy = 1; 208 209 /* Load mailbox registers. */ 210 if (IS_P3P_TYPE(ha)) 211 optr = ®->isp82.mailbox_in[0]; 212 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) 213 optr = ®->isp24.mailbox0; 214 else 215 optr = MAILBOX_REG(ha, ®->isp, 0); 216 217 iptr = mcp->mb; 218 command = mcp->mb[0]; 219 mboxes = mcp->out_mb; 220 221 ql_dbg(ql_dbg_mbx, vha, 0x1111, 222 "Mailbox registers (OUT):\n"); 223 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 224 if (IS_QLA2200(ha) && cnt == 8) 225 optr = MAILBOX_REG(ha, ®->isp, 8); 226 if (mboxes & BIT_0) { 227 ql_dbg(ql_dbg_mbx, vha, 0x1112, 228 "mbox[%d]<-0x%04x\n", cnt, *iptr); 229 wrt_reg_word(optr, *iptr); 230 } 231 232 mboxes >>= 1; 233 optr++; 234 iptr++; 235 } 236 237 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, 238 "I/O Address = %p.\n", optr); 239 240 /* Issue set host interrupt command to send cmd out. */ 241 ha->flags.mbox_int = 0; 242 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 243 244 /* Unlock mbx registers and wait for interrupt */ 245 ql_dbg(ql_dbg_mbx, vha, 0x100f, 246 "Going to unlock irq & waiting for interrupts. " 247 "jiffies=%lx.\n", jiffies); 248 249 /* Wait for mbx cmd completion until timeout */ 250 atomic_inc(&ha->num_pend_mbx_stage2); 251 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 252 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 253 254 if (IS_P3P_TYPE(ha)) 255 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); 256 else if (IS_FWI2_CAPABLE(ha)) 257 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); 258 else 259 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); 260 spin_unlock_irqrestore(&ha->hardware_lock, flags); 261 262 wait_time = jiffies; 263 atomic_inc(&ha->num_pend_mbx_stage3); 264 if (!wait_for_completion_timeout(&ha->mbx_intr_comp, 265 mcp->tov * HZ)) { 266 if (chip_reset != ha->chip_reset) { 267 spin_lock_irqsave(&ha->hardware_lock, flags); 268 ha->flags.mbox_busy = 0; 269 spin_unlock_irqrestore(&ha->hardware_lock, 270 flags); 271 atomic_dec(&ha->num_pend_mbx_stage2); 272 atomic_dec(&ha->num_pend_mbx_stage3); 273 rval = QLA_ABORTED; 274 goto premature_exit; 275 } 276 ql_dbg(ql_dbg_mbx, vha, 0x117a, 277 "cmd=%x Timeout.\n", command); 278 spin_lock_irqsave(&ha->hardware_lock, flags); 279 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 280 spin_unlock_irqrestore(&ha->hardware_lock, flags); 281 282 } else if (ha->flags.purge_mbox || 283 chip_reset != ha->chip_reset) { 284 spin_lock_irqsave(&ha->hardware_lock, flags); 285 ha->flags.mbox_busy = 0; 286 spin_unlock_irqrestore(&ha->hardware_lock, flags); 287 atomic_dec(&ha->num_pend_mbx_stage2); 288 atomic_dec(&ha->num_pend_mbx_stage3); 289 rval = QLA_ABORTED; 290 goto premature_exit; 291 } 292 atomic_dec(&ha->num_pend_mbx_stage3); 293 294 if (time_after(jiffies, wait_time + 5 * HZ)) 295 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", 296 command, jiffies_to_msecs(jiffies - wait_time)); 297 } else { 298 ql_dbg(ql_dbg_mbx, vha, 0x1011, 299 "Cmd=%x Polling Mode.\n", command); 300 301 if (IS_P3P_TYPE(ha)) { 302 if (rd_reg_dword(®->isp82.hint) & 303 HINT_MBX_INT_PENDING) { 304 ha->flags.mbox_busy = 0; 305 spin_unlock_irqrestore(&ha->hardware_lock, 306 flags); 307 atomic_dec(&ha->num_pend_mbx_stage2); 308 ql_dbg(ql_dbg_mbx, vha, 0x1012, 309 "Pending mailbox timeout, exiting.\n"); 310 rval = QLA_FUNCTION_TIMEOUT; 311 goto premature_exit; 312 } 313 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); 314 } else if (IS_FWI2_CAPABLE(ha)) 315 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); 316 else 317 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); 318 spin_unlock_irqrestore(&ha->hardware_lock, flags); 319 320 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 321 while (!ha->flags.mbox_int) { 322 if (ha->flags.purge_mbox || 323 chip_reset != ha->chip_reset) { 324 spin_lock_irqsave(&ha->hardware_lock, flags); 325 ha->flags.mbox_busy = 0; 326 spin_unlock_irqrestore(&ha->hardware_lock, 327 flags); 328 atomic_dec(&ha->num_pend_mbx_stage2); 329 rval = QLA_ABORTED; 330 goto premature_exit; 331 } 332 333 if (time_after(jiffies, wait_time)) 334 break; 335 336 /* 337 * Check if it's UNLOADING, cause we cannot poll in 338 * this case, or else a NULL pointer dereference 339 * is triggered. 340 */ 341 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) 342 return QLA_FUNCTION_TIMEOUT; 343 344 /* Check for pending interrupts. */ 345 qla2x00_poll(ha->rsp_q_map[0]); 346 347 if (!ha->flags.mbox_int && 348 !(IS_QLA2200(ha) && 349 command == MBC_LOAD_RISC_RAM_EXTENDED)) 350 msleep(10); 351 } /* while */ 352 ql_dbg(ql_dbg_mbx, vha, 0x1013, 353 "Waited %d sec.\n", 354 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 355 } 356 atomic_dec(&ha->num_pend_mbx_stage2); 357 358 /* Check whether we timed out */ 359 if (ha->flags.mbox_int) { 360 uint16_t *iptr2; 361 362 ql_dbg(ql_dbg_mbx, vha, 0x1014, 363 "Cmd=%x completed.\n", command); 364 365 /* Got interrupt. Clear the flag. */ 366 ha->flags.mbox_int = 0; 367 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 368 369 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 370 spin_lock_irqsave(&ha->hardware_lock, flags); 371 ha->flags.mbox_busy = 0; 372 spin_unlock_irqrestore(&ha->hardware_lock, flags); 373 374 /* Setting Link-Down error */ 375 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 376 ha->mcp = NULL; 377 rval = QLA_FUNCTION_FAILED; 378 ql_log(ql_log_warn, vha, 0xd048, 379 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 380 goto premature_exit; 381 } 382 383 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) { 384 ql_dbg(ql_dbg_mbx, vha, 0x11ff, 385 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0], 386 MBS_COMMAND_COMPLETE); 387 rval = QLA_FUNCTION_FAILED; 388 } 389 390 /* Load return mailbox registers. */ 391 iptr2 = mcp->mb; 392 iptr = (uint16_t *)&ha->mailbox_out[0]; 393 mboxes = mcp->in_mb; 394 395 ql_dbg(ql_dbg_mbx, vha, 0x1113, 396 "Mailbox registers (IN):\n"); 397 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 398 if (mboxes & BIT_0) { 399 *iptr2 = *iptr; 400 ql_dbg(ql_dbg_mbx, vha, 0x1114, 401 "mbox[%d]->0x%04x\n", cnt, *iptr2); 402 } 403 404 mboxes >>= 1; 405 iptr2++; 406 iptr++; 407 } 408 } else { 409 410 uint16_t mb[8]; 411 uint32_t ictrl, host_status, hccr; 412 uint16_t w; 413 414 if (IS_FWI2_CAPABLE(ha)) { 415 mb[0] = rd_reg_word(®->isp24.mailbox0); 416 mb[1] = rd_reg_word(®->isp24.mailbox1); 417 mb[2] = rd_reg_word(®->isp24.mailbox2); 418 mb[3] = rd_reg_word(®->isp24.mailbox3); 419 mb[7] = rd_reg_word(®->isp24.mailbox7); 420 ictrl = rd_reg_dword(®->isp24.ictrl); 421 host_status = rd_reg_dword(®->isp24.host_status); 422 hccr = rd_reg_dword(®->isp24.hccr); 423 424 ql_log(ql_log_warn, vha, 0xd04c, 425 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 426 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", 427 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], 428 mb[7], host_status, hccr); 429 430 } else { 431 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); 432 ictrl = rd_reg_word(®->isp.ictrl); 433 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, 434 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 435 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); 436 } 437 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 438 439 /* Capture FW dump only, if PCI device active */ 440 if (!pci_channel_offline(vha->hw->pdev)) { 441 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 442 if (w == 0xffff || ictrl == 0xffffffff || 443 (chip_reset != ha->chip_reset)) { 444 /* This is special case if there is unload 445 * of driver happening and if PCI device go 446 * into bad state due to PCI error condition 447 * then only PCI ERR flag would be set. 448 * we will do premature exit for above case. 449 */ 450 spin_lock_irqsave(&ha->hardware_lock, flags); 451 ha->flags.mbox_busy = 0; 452 spin_unlock_irqrestore(&ha->hardware_lock, 453 flags); 454 rval = QLA_FUNCTION_TIMEOUT; 455 goto premature_exit; 456 } 457 458 /* Attempt to capture firmware dump for further 459 * anallysis of the current formware state. we do not 460 * need to do this if we are intentionally generating 461 * a dump 462 */ 463 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) 464 qla2xxx_dump_fw(vha); 465 rval = QLA_FUNCTION_TIMEOUT; 466 } 467 } 468 spin_lock_irqsave(&ha->hardware_lock, flags); 469 ha->flags.mbox_busy = 0; 470 spin_unlock_irqrestore(&ha->hardware_lock, flags); 471 472 /* Clean up */ 473 ha->mcp = NULL; 474 475 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 476 ql_dbg(ql_dbg_mbx, vha, 0x101a, 477 "Checking for additional resp interrupt.\n"); 478 479 /* polling mode for non isp_abort commands. */ 480 qla2x00_poll(ha->rsp_q_map[0]); 481 } 482 483 if (rval == QLA_FUNCTION_TIMEOUT && 484 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 485 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 486 ha->flags.eeh_busy) { 487 /* not in dpc. schedule it for dpc to take over. */ 488 ql_dbg(ql_dbg_mbx, vha, 0x101b, 489 "Timeout, schedule isp_abort_needed.\n"); 490 491 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 492 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 493 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 494 if (IS_QLA82XX(ha)) { 495 ql_dbg(ql_dbg_mbx, vha, 0x112a, 496 "disabling pause transmit on port " 497 "0 & 1.\n"); 498 qla82xx_wr_32(ha, 499 QLA82XX_CRB_NIU + 0x98, 500 CRB_NIU_XG_PAUSE_CTL_P0| 501 CRB_NIU_XG_PAUSE_CTL_P1); 502 } 503 ql_log(ql_log_info, base_vha, 0x101c, 504 "Mailbox cmd timeout occurred, cmd=0x%x, " 505 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 506 "abort.\n", command, mcp->mb[0], 507 ha->flags.eeh_busy); 508 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 509 qla2xxx_wake_dpc(vha); 510 } 511 } else if (current == ha->dpc_thread) { 512 /* call abort directly since we are in the DPC thread */ 513 ql_dbg(ql_dbg_mbx, vha, 0x101d, 514 "Timeout, calling abort_isp.\n"); 515 516 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 517 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 518 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 519 if (IS_QLA82XX(ha)) { 520 ql_dbg(ql_dbg_mbx, vha, 0x112b, 521 "disabling pause transmit on port " 522 "0 & 1.\n"); 523 qla82xx_wr_32(ha, 524 QLA82XX_CRB_NIU + 0x98, 525 CRB_NIU_XG_PAUSE_CTL_P0| 526 CRB_NIU_XG_PAUSE_CTL_P1); 527 } 528 ql_log(ql_log_info, base_vha, 0x101e, 529 "Mailbox cmd timeout occurred, cmd=0x%x, " 530 "mb[0]=0x%x. Scheduling ISP abort ", 531 command, mcp->mb[0]); 532 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 533 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 534 /* Allow next mbx cmd to come in. */ 535 complete(&ha->mbx_cmd_comp); 536 if (ha->isp_ops->abort_isp(vha)) { 537 /* Failed. retry later. */ 538 set_bit(ISP_ABORT_NEEDED, 539 &vha->dpc_flags); 540 } 541 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 542 ql_dbg(ql_dbg_mbx, vha, 0x101f, 543 "Finished abort_isp.\n"); 544 goto mbx_done; 545 } 546 } 547 } 548 549 premature_exit: 550 /* Allow next mbx cmd to come in. */ 551 complete(&ha->mbx_cmd_comp); 552 553 mbx_done: 554 if (rval == QLA_ABORTED) { 555 ql_log(ql_log_info, vha, 0xd035, 556 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n", 557 mcp->mb[0]); 558 } else if (rval) { 559 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { 560 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR, 561 dev_name(&ha->pdev->dev), 0x1020+0x800, 562 vha->host_no, rval); 563 mboxes = mcp->in_mb; 564 cnt = 4; 565 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1) 566 if (mboxes & BIT_0) { 567 printk(" mb[%u]=%x", i, mcp->mb[i]); 568 cnt--; 569 } 570 pr_warn(" cmd=%x ****\n", command); 571 } 572 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { 573 ql_dbg(ql_dbg_mbx, vha, 0x1198, 574 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", 575 rd_reg_dword(®->isp24.host_status), 576 rd_reg_dword(®->isp24.ictrl), 577 rd_reg_dword(®->isp24.istatus)); 578 } else { 579 ql_dbg(ql_dbg_mbx, vha, 0x1206, 580 "ctrl_status=%#x ictrl=%#x istatus=%#x\n", 581 rd_reg_word(®->isp.ctrl_status), 582 rd_reg_word(®->isp.ictrl), 583 rd_reg_word(®->isp.istatus)); 584 } 585 } else { 586 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); 587 } 588 589 return rval; 590 } 591 592 int 593 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, 594 uint32_t risc_code_size) 595 { 596 int rval; 597 struct qla_hw_data *ha = vha->hw; 598 mbx_cmd_t mc; 599 mbx_cmd_t *mcp = &mc; 600 601 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, 602 "Entered %s.\n", __func__); 603 604 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 605 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 606 mcp->mb[8] = MSW(risc_addr); 607 mcp->out_mb = MBX_8|MBX_0; 608 } else { 609 mcp->mb[0] = MBC_LOAD_RISC_RAM; 610 mcp->out_mb = MBX_0; 611 } 612 mcp->mb[1] = LSW(risc_addr); 613 mcp->mb[2] = MSW(req_dma); 614 mcp->mb[3] = LSW(req_dma); 615 mcp->mb[6] = MSW(MSD(req_dma)); 616 mcp->mb[7] = LSW(MSD(req_dma)); 617 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 618 if (IS_FWI2_CAPABLE(ha)) { 619 mcp->mb[4] = MSW(risc_code_size); 620 mcp->mb[5] = LSW(risc_code_size); 621 mcp->out_mb |= MBX_5|MBX_4; 622 } else { 623 mcp->mb[4] = LSW(risc_code_size); 624 mcp->out_mb |= MBX_4; 625 } 626 627 mcp->in_mb = MBX_1|MBX_0; 628 mcp->tov = MBX_TOV_SECONDS; 629 mcp->flags = 0; 630 rval = qla2x00_mailbox_command(vha, mcp); 631 632 if (rval != QLA_SUCCESS) { 633 ql_dbg(ql_dbg_mbx, vha, 0x1023, 634 "Failed=%x mb[0]=%x mb[1]=%x.\n", 635 rval, mcp->mb[0], mcp->mb[1]); 636 } else { 637 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, 638 "Done %s.\n", __func__); 639 } 640 641 return rval; 642 } 643 644 #define NVME_ENABLE_FLAG BIT_3 645 646 /* 647 * qla2x00_execute_fw 648 * Start adapter firmware. 649 * 650 * Input: 651 * ha = adapter block pointer. 652 * TARGET_QUEUE_LOCK must be released. 653 * ADAPTER_STATE_LOCK must be released. 654 * 655 * Returns: 656 * qla2x00 local function return status code. 657 * 658 * Context: 659 * Kernel context. 660 */ 661 int 662 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) 663 { 664 int rval; 665 struct qla_hw_data *ha = vha->hw; 666 mbx_cmd_t mc; 667 mbx_cmd_t *mcp = &mc; 668 u8 semaphore = 0; 669 #define EXE_FW_FORCE_SEMAPHORE BIT_7 670 u8 retry = 3; 671 672 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, 673 "Entered %s.\n", __func__); 674 675 again: 676 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 677 mcp->out_mb = MBX_0; 678 mcp->in_mb = MBX_0; 679 if (IS_FWI2_CAPABLE(ha)) { 680 mcp->mb[1] = MSW(risc_addr); 681 mcp->mb[2] = LSW(risc_addr); 682 mcp->mb[3] = 0; 683 mcp->mb[4] = 0; 684 mcp->mb[11] = 0; 685 686 /* Enable BPM? */ 687 if (ha->flags.lr_detected) { 688 mcp->mb[4] = BIT_0; 689 if (IS_BPM_RANGE_CAPABLE(ha)) 690 mcp->mb[4] |= 691 ha->lr_distance << LR_DIST_FW_POS; 692 } 693 694 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha))) 695 mcp->mb[4] |= NVME_ENABLE_FLAG; 696 697 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 698 struct nvram_81xx *nv = ha->nvram; 699 /* set minimum speed if specified in nvram */ 700 if (nv->min_supported_speed >= 2 && 701 nv->min_supported_speed <= 5) { 702 mcp->mb[4] |= BIT_4; 703 mcp->mb[11] |= nv->min_supported_speed & 0xF; 704 mcp->out_mb |= MBX_11; 705 mcp->in_mb |= BIT_5; 706 vha->min_supported_speed = 707 nv->min_supported_speed; 708 } 709 } 710 711 if (ha->flags.exlogins_enabled) 712 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; 713 714 if (ha->flags.exchoffld_enabled) 715 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; 716 717 if (semaphore) 718 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE; 719 720 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; 721 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1; 722 } else { 723 mcp->mb[1] = LSW(risc_addr); 724 mcp->out_mb |= MBX_1; 725 if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 726 mcp->mb[2] = 0; 727 mcp->out_mb |= MBX_2; 728 } 729 } 730 731 mcp->tov = MBX_TOV_SECONDS; 732 mcp->flags = 0; 733 rval = qla2x00_mailbox_command(vha, mcp); 734 735 if (rval != QLA_SUCCESS) { 736 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR && 737 mcp->mb[1] == 0x27 && retry) { 738 semaphore = 1; 739 retry--; 740 ql_dbg(ql_dbg_async, vha, 0x1026, 741 "Exe FW: force semaphore.\n"); 742 goto again; 743 } 744 745 ql_dbg(ql_dbg_mbx, vha, 0x1026, 746 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 747 return rval; 748 } 749 750 if (!IS_FWI2_CAPABLE(ha)) 751 goto done; 752 753 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; 754 ql_dbg(ql_dbg_mbx, vha, 0x119a, 755 "fw_ability_mask=%x.\n", ha->fw_ability_mask); 756 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]); 757 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 758 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1); 759 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n", 760 ha->max_supported_speed == 0 ? "16Gps" : 761 ha->max_supported_speed == 1 ? "32Gps" : 762 ha->max_supported_speed == 2 ? "64Gps" : "unknown"); 763 if (vha->min_supported_speed) { 764 ha->min_supported_speed = mcp->mb[5] & 765 (BIT_0 | BIT_1 | BIT_2); 766 ql_dbg(ql_dbg_mbx, vha, 0x119c, 767 "min_supported_speed=%s.\n", 768 ha->min_supported_speed == 6 ? "64Gps" : 769 ha->min_supported_speed == 5 ? "32Gps" : 770 ha->min_supported_speed == 4 ? "16Gps" : 771 ha->min_supported_speed == 3 ? "8Gps" : 772 ha->min_supported_speed == 2 ? "4Gps" : "unknown"); 773 } 774 } 775 776 done: 777 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, 778 "Done %s.\n", __func__); 779 780 return rval; 781 } 782 783 /* 784 * qla_get_exlogin_status 785 * Get extended login status 786 * uses the memory offload control/status Mailbox 787 * 788 * Input: 789 * ha: adapter state pointer. 790 * fwopt: firmware options 791 * 792 * Returns: 793 * qla2x00 local function status 794 * 795 * Context: 796 * Kernel context. 797 */ 798 #define FETCH_XLOGINS_STAT 0x8 799 int 800 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 801 uint16_t *ex_logins_cnt) 802 { 803 int rval; 804 mbx_cmd_t mc; 805 mbx_cmd_t *mcp = &mc; 806 807 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f, 808 "Entered %s\n", __func__); 809 810 memset(mcp->mb, 0 , sizeof(mcp->mb)); 811 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 812 mcp->mb[1] = FETCH_XLOGINS_STAT; 813 mcp->out_mb = MBX_1|MBX_0; 814 mcp->in_mb = MBX_10|MBX_4|MBX_0; 815 mcp->tov = MBX_TOV_SECONDS; 816 mcp->flags = 0; 817 818 rval = qla2x00_mailbox_command(vha, mcp); 819 if (rval != QLA_SUCCESS) { 820 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval); 821 } else { 822 *buf_sz = mcp->mb[4]; 823 *ex_logins_cnt = mcp->mb[10]; 824 825 ql_log(ql_log_info, vha, 0x1190, 826 "buffer size 0x%x, exchange login count=%d\n", 827 mcp->mb[4], mcp->mb[10]); 828 829 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116, 830 "Done %s.\n", __func__); 831 } 832 833 return rval; 834 } 835 836 /* 837 * qla_set_exlogin_mem_cfg 838 * set extended login memory configuration 839 * Mbx needs to be issues before init_cb is set 840 * 841 * Input: 842 * ha: adapter state pointer. 843 * buffer: buffer pointer 844 * phys_addr: physical address of buffer 845 * size: size of buffer 846 * TARGET_QUEUE_LOCK must be released 847 * ADAPTER_STATE_LOCK must be release 848 * 849 * Returns: 850 * qla2x00 local funxtion status code. 851 * 852 * Context: 853 * Kernel context. 854 */ 855 #define CONFIG_XLOGINS_MEM 0x3 856 int 857 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) 858 { 859 int rval; 860 mbx_cmd_t mc; 861 mbx_cmd_t *mcp = &mc; 862 struct qla_hw_data *ha = vha->hw; 863 864 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, 865 "Entered %s.\n", __func__); 866 867 memset(mcp->mb, 0 , sizeof(mcp->mb)); 868 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 869 mcp->mb[1] = CONFIG_XLOGINS_MEM; 870 mcp->mb[2] = MSW(phys_addr); 871 mcp->mb[3] = LSW(phys_addr); 872 mcp->mb[6] = MSW(MSD(phys_addr)); 873 mcp->mb[7] = LSW(MSD(phys_addr)); 874 mcp->mb[8] = MSW(ha->exlogin_size); 875 mcp->mb[9] = LSW(ha->exlogin_size); 876 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 877 mcp->in_mb = MBX_11|MBX_0; 878 mcp->tov = MBX_TOV_SECONDS; 879 mcp->flags = 0; 880 rval = qla2x00_mailbox_command(vha, mcp); 881 if (rval != QLA_SUCCESS) { 882 /*EMPTY*/ 883 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval); 884 } else { 885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 886 "Done %s.\n", __func__); 887 } 888 889 return rval; 890 } 891 892 /* 893 * qla_get_exchoffld_status 894 * Get exchange offload status 895 * uses the memory offload control/status Mailbox 896 * 897 * Input: 898 * ha: adapter state pointer. 899 * fwopt: firmware options 900 * 901 * Returns: 902 * qla2x00 local function status 903 * 904 * Context: 905 * Kernel context. 906 */ 907 #define FETCH_XCHOFFLD_STAT 0x2 908 int 909 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 910 uint16_t *ex_logins_cnt) 911 { 912 int rval; 913 mbx_cmd_t mc; 914 mbx_cmd_t *mcp = &mc; 915 916 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019, 917 "Entered %s\n", __func__); 918 919 memset(mcp->mb, 0 , sizeof(mcp->mb)); 920 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 921 mcp->mb[1] = FETCH_XCHOFFLD_STAT; 922 mcp->out_mb = MBX_1|MBX_0; 923 mcp->in_mb = MBX_10|MBX_4|MBX_0; 924 mcp->tov = MBX_TOV_SECONDS; 925 mcp->flags = 0; 926 927 rval = qla2x00_mailbox_command(vha, mcp); 928 if (rval != QLA_SUCCESS) { 929 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval); 930 } else { 931 *buf_sz = mcp->mb[4]; 932 *ex_logins_cnt = mcp->mb[10]; 933 934 ql_log(ql_log_info, vha, 0x118e, 935 "buffer size 0x%x, exchange offload count=%d\n", 936 mcp->mb[4], mcp->mb[10]); 937 938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156, 939 "Done %s.\n", __func__); 940 } 941 942 return rval; 943 } 944 945 /* 946 * qla_set_exchoffld_mem_cfg 947 * Set exchange offload memory configuration 948 * Mbx needs to be issues before init_cb is set 949 * 950 * Input: 951 * ha: adapter state pointer. 952 * buffer: buffer pointer 953 * phys_addr: physical address of buffer 954 * size: size of buffer 955 * TARGET_QUEUE_LOCK must be released 956 * ADAPTER_STATE_LOCK must be release 957 * 958 * Returns: 959 * qla2x00 local funxtion status code. 960 * 961 * Context: 962 * Kernel context. 963 */ 964 #define CONFIG_XCHOFFLD_MEM 0x3 965 int 966 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha) 967 { 968 int rval; 969 mbx_cmd_t mc; 970 mbx_cmd_t *mcp = &mc; 971 struct qla_hw_data *ha = vha->hw; 972 973 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157, 974 "Entered %s.\n", __func__); 975 976 memset(mcp->mb, 0 , sizeof(mcp->mb)); 977 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 978 mcp->mb[1] = CONFIG_XCHOFFLD_MEM; 979 mcp->mb[2] = MSW(ha->exchoffld_buf_dma); 980 mcp->mb[3] = LSW(ha->exchoffld_buf_dma); 981 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma)); 982 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma)); 983 mcp->mb[8] = MSW(ha->exchoffld_size); 984 mcp->mb[9] = LSW(ha->exchoffld_size); 985 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 986 mcp->in_mb = MBX_11|MBX_0; 987 mcp->tov = MBX_TOV_SECONDS; 988 mcp->flags = 0; 989 rval = qla2x00_mailbox_command(vha, mcp); 990 if (rval != QLA_SUCCESS) { 991 /*EMPTY*/ 992 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval); 993 } else { 994 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, 995 "Done %s.\n", __func__); 996 } 997 998 return rval; 999 } 1000 1001 /* 1002 * qla2x00_get_fw_version 1003 * Get firmware version. 1004 * 1005 * Input: 1006 * ha: adapter state pointer. 1007 * major: pointer for major number. 1008 * minor: pointer for minor number. 1009 * subminor: pointer for subminor number. 1010 * 1011 * Returns: 1012 * qla2x00 local function return status code. 1013 * 1014 * Context: 1015 * Kernel context. 1016 */ 1017 int 1018 qla2x00_get_fw_version(scsi_qla_host_t *vha) 1019 { 1020 int rval; 1021 mbx_cmd_t mc; 1022 mbx_cmd_t *mcp = &mc; 1023 struct qla_hw_data *ha = vha->hw; 1024 1025 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, 1026 "Entered %s.\n", __func__); 1027 1028 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 1029 mcp->out_mb = MBX_0; 1030 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1031 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) 1032 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 1033 if (IS_FWI2_CAPABLE(ha)) 1034 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 1035 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1036 mcp->in_mb |= 1037 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| 1038 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7; 1039 1040 mcp->flags = 0; 1041 mcp->tov = MBX_TOV_SECONDS; 1042 rval = qla2x00_mailbox_command(vha, mcp); 1043 if (rval != QLA_SUCCESS) 1044 goto failed; 1045 1046 /* Return mailbox data. */ 1047 ha->fw_major_version = mcp->mb[1]; 1048 ha->fw_minor_version = mcp->mb[2]; 1049 ha->fw_subminor_version = mcp->mb[3]; 1050 ha->fw_attributes = mcp->mb[6]; 1051 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) 1052 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ 1053 else 1054 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; 1055 1056 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1057 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1058 ha->mpi_version[1] = mcp->mb[11] >> 8; 1059 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1060 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; 1061 ha->phy_version[0] = mcp->mb[8] & 0xff; 1062 ha->phy_version[1] = mcp->mb[9] >> 8; 1063 ha->phy_version[2] = mcp->mb[9] & 0xff; 1064 } 1065 1066 if (IS_FWI2_CAPABLE(ha)) { 1067 ha->fw_attributes_h = mcp->mb[15]; 1068 ha->fw_attributes_ext[0] = mcp->mb[16]; 1069 ha->fw_attributes_ext[1] = mcp->mb[17]; 1070 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, 1071 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 1072 __func__, mcp->mb[15], mcp->mb[6]); 1073 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, 1074 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 1075 __func__, mcp->mb[17], mcp->mb[16]); 1076 1077 if (ha->fw_attributes_h & 0x4) 1078 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d, 1079 "%s: Firmware supports Extended Login 0x%x\n", 1080 __func__, ha->fw_attributes_h); 1081 1082 if (ha->fw_attributes_h & 0x8) 1083 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, 1084 "%s: Firmware supports Exchange Offload 0x%x\n", 1085 __func__, ha->fw_attributes_h); 1086 1087 /* 1088 * FW supports nvme and driver load parameter requested nvme. 1089 * BIT 26 of fw_attributes indicates NVMe support. 1090 */ 1091 if ((ha->fw_attributes_h & 1092 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) && 1093 ql2xnvmeenable) { 1094 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST) 1095 vha->flags.nvme_first_burst = 1; 1096 1097 vha->flags.nvme_enabled = 1; 1098 ql_log(ql_log_info, vha, 0xd302, 1099 "%s: FC-NVMe is Enabled (0x%x)\n", 1100 __func__, ha->fw_attributes_h); 1101 } 1102 } 1103 1104 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1105 ha->serdes_version[0] = mcp->mb[7] & 0xff; 1106 ha->serdes_version[1] = mcp->mb[8] >> 8; 1107 ha->serdes_version[2] = mcp->mb[8] & 0xff; 1108 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1109 ha->mpi_version[1] = mcp->mb[11] >> 8; 1110 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1111 ha->pep_version[0] = mcp->mb[13] & 0xff; 1112 ha->pep_version[1] = mcp->mb[14] >> 8; 1113 ha->pep_version[2] = mcp->mb[14] & 0xff; 1114 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; 1115 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; 1116 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; 1117 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; 1118 if (IS_QLA28XX(ha)) { 1119 if (mcp->mb[16] & BIT_10) 1120 ha->flags.secure_fw = 1; 1121 1122 ql_log(ql_log_info, vha, 0xffff, 1123 "Secure Flash Update in FW: %s\n", 1124 (ha->flags.secure_fw) ? "Supported" : 1125 "Not Supported"); 1126 } 1127 } 1128 1129 failed: 1130 if (rval != QLA_SUCCESS) { 1131 /*EMPTY*/ 1132 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); 1133 } else { 1134 /*EMPTY*/ 1135 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, 1136 "Done %s.\n", __func__); 1137 } 1138 return rval; 1139 } 1140 1141 /* 1142 * qla2x00_get_fw_options 1143 * Set firmware options. 1144 * 1145 * Input: 1146 * ha = adapter block pointer. 1147 * fwopt = pointer for firmware options. 1148 * 1149 * Returns: 1150 * qla2x00 local function return status code. 1151 * 1152 * Context: 1153 * Kernel context. 1154 */ 1155 int 1156 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1157 { 1158 int rval; 1159 mbx_cmd_t mc; 1160 mbx_cmd_t *mcp = &mc; 1161 1162 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, 1163 "Entered %s.\n", __func__); 1164 1165 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 1166 mcp->out_mb = MBX_0; 1167 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1168 mcp->tov = MBX_TOV_SECONDS; 1169 mcp->flags = 0; 1170 rval = qla2x00_mailbox_command(vha, mcp); 1171 1172 if (rval != QLA_SUCCESS) { 1173 /*EMPTY*/ 1174 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); 1175 } else { 1176 fwopts[0] = mcp->mb[0]; 1177 fwopts[1] = mcp->mb[1]; 1178 fwopts[2] = mcp->mb[2]; 1179 fwopts[3] = mcp->mb[3]; 1180 1181 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, 1182 "Done %s.\n", __func__); 1183 } 1184 1185 return rval; 1186 } 1187 1188 1189 /* 1190 * qla2x00_set_fw_options 1191 * Set firmware options. 1192 * 1193 * Input: 1194 * ha = adapter block pointer. 1195 * fwopt = pointer for firmware options. 1196 * 1197 * Returns: 1198 * qla2x00 local function return status code. 1199 * 1200 * Context: 1201 * Kernel context. 1202 */ 1203 int 1204 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1205 { 1206 int rval; 1207 mbx_cmd_t mc; 1208 mbx_cmd_t *mcp = &mc; 1209 1210 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, 1211 "Entered %s.\n", __func__); 1212 1213 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 1214 mcp->mb[1] = fwopts[1]; 1215 mcp->mb[2] = fwopts[2]; 1216 mcp->mb[3] = fwopts[3]; 1217 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1218 mcp->in_mb = MBX_0; 1219 if (IS_FWI2_CAPABLE(vha->hw)) { 1220 mcp->in_mb |= MBX_1; 1221 mcp->mb[10] = fwopts[10]; 1222 mcp->out_mb |= MBX_10; 1223 } else { 1224 mcp->mb[10] = fwopts[10]; 1225 mcp->mb[11] = fwopts[11]; 1226 mcp->mb[12] = 0; /* Undocumented, but used */ 1227 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 1228 } 1229 mcp->tov = MBX_TOV_SECONDS; 1230 mcp->flags = 0; 1231 rval = qla2x00_mailbox_command(vha, mcp); 1232 1233 fwopts[0] = mcp->mb[0]; 1234 1235 if (rval != QLA_SUCCESS) { 1236 /*EMPTY*/ 1237 ql_dbg(ql_dbg_mbx, vha, 0x1030, 1238 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); 1239 } else { 1240 /*EMPTY*/ 1241 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, 1242 "Done %s.\n", __func__); 1243 } 1244 1245 return rval; 1246 } 1247 1248 /* 1249 * qla2x00_mbx_reg_test 1250 * Mailbox register wrap test. 1251 * 1252 * Input: 1253 * ha = adapter block pointer. 1254 * TARGET_QUEUE_LOCK must be released. 1255 * ADAPTER_STATE_LOCK must be released. 1256 * 1257 * Returns: 1258 * qla2x00 local function return status code. 1259 * 1260 * Context: 1261 * Kernel context. 1262 */ 1263 int 1264 qla2x00_mbx_reg_test(scsi_qla_host_t *vha) 1265 { 1266 int rval; 1267 mbx_cmd_t mc; 1268 mbx_cmd_t *mcp = &mc; 1269 1270 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, 1271 "Entered %s.\n", __func__); 1272 1273 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 1274 mcp->mb[1] = 0xAAAA; 1275 mcp->mb[2] = 0x5555; 1276 mcp->mb[3] = 0xAA55; 1277 mcp->mb[4] = 0x55AA; 1278 mcp->mb[5] = 0xA5A5; 1279 mcp->mb[6] = 0x5A5A; 1280 mcp->mb[7] = 0x2525; 1281 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1282 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1283 mcp->tov = MBX_TOV_SECONDS; 1284 mcp->flags = 0; 1285 rval = qla2x00_mailbox_command(vha, mcp); 1286 1287 if (rval == QLA_SUCCESS) { 1288 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || 1289 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) 1290 rval = QLA_FUNCTION_FAILED; 1291 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || 1292 mcp->mb[7] != 0x2525) 1293 rval = QLA_FUNCTION_FAILED; 1294 } 1295 1296 if (rval != QLA_SUCCESS) { 1297 /*EMPTY*/ 1298 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); 1299 } else { 1300 /*EMPTY*/ 1301 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, 1302 "Done %s.\n", __func__); 1303 } 1304 1305 return rval; 1306 } 1307 1308 /* 1309 * qla2x00_verify_checksum 1310 * Verify firmware checksum. 1311 * 1312 * Input: 1313 * ha = adapter block pointer. 1314 * TARGET_QUEUE_LOCK must be released. 1315 * ADAPTER_STATE_LOCK must be released. 1316 * 1317 * Returns: 1318 * qla2x00 local function return status code. 1319 * 1320 * Context: 1321 * Kernel context. 1322 */ 1323 int 1324 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) 1325 { 1326 int rval; 1327 mbx_cmd_t mc; 1328 mbx_cmd_t *mcp = &mc; 1329 1330 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, 1331 "Entered %s.\n", __func__); 1332 1333 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 1334 mcp->out_mb = MBX_0; 1335 mcp->in_mb = MBX_0; 1336 if (IS_FWI2_CAPABLE(vha->hw)) { 1337 mcp->mb[1] = MSW(risc_addr); 1338 mcp->mb[2] = LSW(risc_addr); 1339 mcp->out_mb |= MBX_2|MBX_1; 1340 mcp->in_mb |= MBX_2|MBX_1; 1341 } else { 1342 mcp->mb[1] = LSW(risc_addr); 1343 mcp->out_mb |= MBX_1; 1344 mcp->in_mb |= MBX_1; 1345 } 1346 1347 mcp->tov = MBX_TOV_SECONDS; 1348 mcp->flags = 0; 1349 rval = qla2x00_mailbox_command(vha, mcp); 1350 1351 if (rval != QLA_SUCCESS) { 1352 ql_dbg(ql_dbg_mbx, vha, 0x1036, 1353 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? 1354 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); 1355 } else { 1356 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, 1357 "Done %s.\n", __func__); 1358 } 1359 1360 return rval; 1361 } 1362 1363 /* 1364 * qla2x00_issue_iocb 1365 * Issue IOCB using mailbox command 1366 * 1367 * Input: 1368 * ha = adapter state pointer. 1369 * buffer = buffer pointer. 1370 * phys_addr = physical address of buffer. 1371 * size = size of buffer. 1372 * TARGET_QUEUE_LOCK must be released. 1373 * ADAPTER_STATE_LOCK must be released. 1374 * 1375 * Returns: 1376 * qla2x00 local function return status code. 1377 * 1378 * Context: 1379 * Kernel context. 1380 */ 1381 int 1382 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, 1383 dma_addr_t phys_addr, size_t size, uint32_t tov) 1384 { 1385 int rval; 1386 mbx_cmd_t mc; 1387 mbx_cmd_t *mcp = &mc; 1388 1389 if (!vha->hw->flags.fw_started) 1390 return QLA_INVALID_COMMAND; 1391 1392 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, 1393 "Entered %s.\n", __func__); 1394 1395 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 1396 mcp->mb[1] = 0; 1397 mcp->mb[2] = MSW(LSD(phys_addr)); 1398 mcp->mb[3] = LSW(LSD(phys_addr)); 1399 mcp->mb[6] = MSW(MSD(phys_addr)); 1400 mcp->mb[7] = LSW(MSD(phys_addr)); 1401 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1402 mcp->in_mb = MBX_1|MBX_0; 1403 mcp->tov = tov; 1404 mcp->flags = 0; 1405 rval = qla2x00_mailbox_command(vha, mcp); 1406 1407 if (rval != QLA_SUCCESS) { 1408 /*EMPTY*/ 1409 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); 1410 } else { 1411 sts_entry_t *sts_entry = buffer; 1412 1413 /* Mask reserved bits. */ 1414 sts_entry->entry_status &= 1415 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 1416 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, 1417 "Done %s (status=%x).\n", __func__, 1418 sts_entry->entry_status); 1419 } 1420 1421 return rval; 1422 } 1423 1424 int 1425 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, 1426 size_t size) 1427 { 1428 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, 1429 MBX_TOV_SECONDS); 1430 } 1431 1432 /* 1433 * qla2x00_abort_command 1434 * Abort command aborts a specified IOCB. 1435 * 1436 * Input: 1437 * ha = adapter block pointer. 1438 * sp = SB structure pointer. 1439 * 1440 * Returns: 1441 * qla2x00 local function return status code. 1442 * 1443 * Context: 1444 * Kernel context. 1445 */ 1446 int 1447 qla2x00_abort_command(srb_t *sp) 1448 { 1449 unsigned long flags = 0; 1450 int rval; 1451 uint32_t handle = 0; 1452 mbx_cmd_t mc; 1453 mbx_cmd_t *mcp = &mc; 1454 fc_port_t *fcport = sp->fcport; 1455 scsi_qla_host_t *vha = fcport->vha; 1456 struct qla_hw_data *ha = vha->hw; 1457 struct req_que *req; 1458 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1459 1460 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, 1461 "Entered %s.\n", __func__); 1462 1463 if (sp->qpair) 1464 req = sp->qpair->req; 1465 else 1466 req = vha->req; 1467 1468 spin_lock_irqsave(&ha->hardware_lock, flags); 1469 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1470 if (req->outstanding_cmds[handle] == sp) 1471 break; 1472 } 1473 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1474 1475 if (handle == req->num_outstanding_cmds) { 1476 /* command not found */ 1477 return QLA_FUNCTION_FAILED; 1478 } 1479 1480 mcp->mb[0] = MBC_ABORT_COMMAND; 1481 if (HAS_EXTENDED_IDS(ha)) 1482 mcp->mb[1] = fcport->loop_id; 1483 else 1484 mcp->mb[1] = fcport->loop_id << 8; 1485 mcp->mb[2] = (uint16_t)handle; 1486 mcp->mb[3] = (uint16_t)(handle >> 16); 1487 mcp->mb[6] = (uint16_t)cmd->device->lun; 1488 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1489 mcp->in_mb = MBX_0; 1490 mcp->tov = MBX_TOV_SECONDS; 1491 mcp->flags = 0; 1492 rval = qla2x00_mailbox_command(vha, mcp); 1493 1494 if (rval != QLA_SUCCESS) { 1495 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); 1496 } else { 1497 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, 1498 "Done %s.\n", __func__); 1499 } 1500 1501 return rval; 1502 } 1503 1504 int 1505 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) 1506 { 1507 int rval, rval2; 1508 mbx_cmd_t mc; 1509 mbx_cmd_t *mcp = &mc; 1510 scsi_qla_host_t *vha; 1511 1512 vha = fcport->vha; 1513 1514 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, 1515 "Entered %s.\n", __func__); 1516 1517 mcp->mb[0] = MBC_ABORT_TARGET; 1518 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 1519 if (HAS_EXTENDED_IDS(vha->hw)) { 1520 mcp->mb[1] = fcport->loop_id; 1521 mcp->mb[10] = 0; 1522 mcp->out_mb |= MBX_10; 1523 } else { 1524 mcp->mb[1] = fcport->loop_id << 8; 1525 } 1526 mcp->mb[2] = vha->hw->loop_reset_delay; 1527 mcp->mb[9] = vha->vp_idx; 1528 1529 mcp->in_mb = MBX_0; 1530 mcp->tov = MBX_TOV_SECONDS; 1531 mcp->flags = 0; 1532 rval = qla2x00_mailbox_command(vha, mcp); 1533 if (rval != QLA_SUCCESS) { 1534 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, 1535 "Failed=%x.\n", rval); 1536 } 1537 1538 /* Issue marker IOCB. */ 1539 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0, 1540 MK_SYNC_ID); 1541 if (rval2 != QLA_SUCCESS) { 1542 ql_dbg(ql_dbg_mbx, vha, 0x1040, 1543 "Failed to issue marker IOCB (%x).\n", rval2); 1544 } else { 1545 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, 1546 "Done %s.\n", __func__); 1547 } 1548 1549 return rval; 1550 } 1551 1552 int 1553 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 1554 { 1555 int rval, rval2; 1556 mbx_cmd_t mc; 1557 mbx_cmd_t *mcp = &mc; 1558 scsi_qla_host_t *vha; 1559 1560 vha = fcport->vha; 1561 1562 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, 1563 "Entered %s.\n", __func__); 1564 1565 mcp->mb[0] = MBC_LUN_RESET; 1566 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 1567 if (HAS_EXTENDED_IDS(vha->hw)) 1568 mcp->mb[1] = fcport->loop_id; 1569 else 1570 mcp->mb[1] = fcport->loop_id << 8; 1571 mcp->mb[2] = (u32)l; 1572 mcp->mb[3] = 0; 1573 mcp->mb[9] = vha->vp_idx; 1574 1575 mcp->in_mb = MBX_0; 1576 mcp->tov = MBX_TOV_SECONDS; 1577 mcp->flags = 0; 1578 rval = qla2x00_mailbox_command(vha, mcp); 1579 if (rval != QLA_SUCCESS) { 1580 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); 1581 } 1582 1583 /* Issue marker IOCB. */ 1584 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l, 1585 MK_SYNC_ID_LUN); 1586 if (rval2 != QLA_SUCCESS) { 1587 ql_dbg(ql_dbg_mbx, vha, 0x1044, 1588 "Failed to issue marker IOCB (%x).\n", rval2); 1589 } else { 1590 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, 1591 "Done %s.\n", __func__); 1592 } 1593 1594 return rval; 1595 } 1596 1597 /* 1598 * qla2x00_get_adapter_id 1599 * Get adapter ID and topology. 1600 * 1601 * Input: 1602 * ha = adapter block pointer. 1603 * id = pointer for loop ID. 1604 * al_pa = pointer for AL_PA. 1605 * area = pointer for area. 1606 * domain = pointer for domain. 1607 * top = pointer for topology. 1608 * TARGET_QUEUE_LOCK must be released. 1609 * ADAPTER_STATE_LOCK must be released. 1610 * 1611 * Returns: 1612 * qla2x00 local function return status code. 1613 * 1614 * Context: 1615 * Kernel context. 1616 */ 1617 int 1618 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, 1619 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) 1620 { 1621 int rval; 1622 mbx_cmd_t mc; 1623 mbx_cmd_t *mcp = &mc; 1624 1625 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, 1626 "Entered %s.\n", __func__); 1627 1628 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1629 mcp->mb[9] = vha->vp_idx; 1630 mcp->out_mb = MBX_9|MBX_0; 1631 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1632 if (IS_CNA_CAPABLE(vha->hw)) 1633 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; 1634 if (IS_FWI2_CAPABLE(vha->hw)) 1635 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; 1636 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) 1637 mcp->in_mb |= MBX_15; 1638 mcp->tov = MBX_TOV_SECONDS; 1639 mcp->flags = 0; 1640 rval = qla2x00_mailbox_command(vha, mcp); 1641 if (mcp->mb[0] == MBS_COMMAND_ERROR) 1642 rval = QLA_COMMAND_ERROR; 1643 else if (mcp->mb[0] == MBS_INVALID_COMMAND) 1644 rval = QLA_INVALID_COMMAND; 1645 1646 /* Return data. */ 1647 *id = mcp->mb[1]; 1648 *al_pa = LSB(mcp->mb[2]); 1649 *area = MSB(mcp->mb[2]); 1650 *domain = LSB(mcp->mb[3]); 1651 *top = mcp->mb[6]; 1652 *sw_cap = mcp->mb[7]; 1653 1654 if (rval != QLA_SUCCESS) { 1655 /*EMPTY*/ 1656 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); 1657 } else { 1658 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, 1659 "Done %s.\n", __func__); 1660 1661 if (IS_CNA_CAPABLE(vha->hw)) { 1662 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1663 vha->fcoe_fcf_idx = mcp->mb[10]; 1664 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; 1665 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; 1666 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; 1667 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; 1668 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; 1669 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; 1670 } 1671 /* If FA-WWN supported */ 1672 if (IS_FAWWN_CAPABLE(vha->hw)) { 1673 if (mcp->mb[7] & BIT_14) { 1674 vha->port_name[0] = MSB(mcp->mb[16]); 1675 vha->port_name[1] = LSB(mcp->mb[16]); 1676 vha->port_name[2] = MSB(mcp->mb[17]); 1677 vha->port_name[3] = LSB(mcp->mb[17]); 1678 vha->port_name[4] = MSB(mcp->mb[18]); 1679 vha->port_name[5] = LSB(mcp->mb[18]); 1680 vha->port_name[6] = MSB(mcp->mb[19]); 1681 vha->port_name[7] = LSB(mcp->mb[19]); 1682 fc_host_port_name(vha->host) = 1683 wwn_to_u64(vha->port_name); 1684 ql_dbg(ql_dbg_mbx, vha, 0x10ca, 1685 "FA-WWN acquired %016llx\n", 1686 wwn_to_u64(vha->port_name)); 1687 } 1688 } 1689 1690 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) 1691 vha->bbcr = mcp->mb[15]; 1692 } 1693 1694 return rval; 1695 } 1696 1697 /* 1698 * qla2x00_get_retry_cnt 1699 * Get current firmware login retry count and delay. 1700 * 1701 * Input: 1702 * ha = adapter block pointer. 1703 * retry_cnt = pointer to login retry count. 1704 * tov = pointer to login timeout value. 1705 * 1706 * Returns: 1707 * qla2x00 local function return status code. 1708 * 1709 * Context: 1710 * Kernel context. 1711 */ 1712 int 1713 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, 1714 uint16_t *r_a_tov) 1715 { 1716 int rval; 1717 uint16_t ratov; 1718 mbx_cmd_t mc; 1719 mbx_cmd_t *mcp = &mc; 1720 1721 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, 1722 "Entered %s.\n", __func__); 1723 1724 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1725 mcp->out_mb = MBX_0; 1726 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1727 mcp->tov = MBX_TOV_SECONDS; 1728 mcp->flags = 0; 1729 rval = qla2x00_mailbox_command(vha, mcp); 1730 1731 if (rval != QLA_SUCCESS) { 1732 /*EMPTY*/ 1733 ql_dbg(ql_dbg_mbx, vha, 0x104a, 1734 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 1735 } else { 1736 /* Convert returned data and check our values. */ 1737 *r_a_tov = mcp->mb[3] / 2; 1738 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ 1739 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { 1740 /* Update to the larger values */ 1741 *retry_cnt = (uint8_t)mcp->mb[1]; 1742 *tov = ratov; 1743 } 1744 1745 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, 1746 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); 1747 } 1748 1749 return rval; 1750 } 1751 1752 /* 1753 * qla2x00_init_firmware 1754 * Initialize adapter firmware. 1755 * 1756 * Input: 1757 * ha = adapter block pointer. 1758 * dptr = Initialization control block pointer. 1759 * size = size of initialization control block. 1760 * TARGET_QUEUE_LOCK must be released. 1761 * ADAPTER_STATE_LOCK must be released. 1762 * 1763 * Returns: 1764 * qla2x00 local function return status code. 1765 * 1766 * Context: 1767 * Kernel context. 1768 */ 1769 int 1770 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 1771 { 1772 int rval; 1773 mbx_cmd_t mc; 1774 mbx_cmd_t *mcp = &mc; 1775 struct qla_hw_data *ha = vha->hw; 1776 1777 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, 1778 "Entered %s.\n", __func__); 1779 1780 if (IS_P3P_TYPE(ha) && ql2xdbwr) 1781 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, 1782 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); 1783 1784 if (ha->flags.npiv_supported) 1785 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1786 else 1787 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 1788 1789 mcp->mb[1] = 0; 1790 mcp->mb[2] = MSW(ha->init_cb_dma); 1791 mcp->mb[3] = LSW(ha->init_cb_dma); 1792 mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); 1793 mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); 1794 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1795 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1796 mcp->mb[1] = BIT_0; 1797 mcp->mb[10] = MSW(ha->ex_init_cb_dma); 1798 mcp->mb[11] = LSW(ha->ex_init_cb_dma); 1799 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); 1800 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); 1801 mcp->mb[14] = sizeof(*ha->ex_init_cb); 1802 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; 1803 } 1804 /* 1 and 2 should normally be captured. */ 1805 mcp->in_mb = MBX_2|MBX_1|MBX_0; 1806 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1807 /* mb3 is additional info about the installed SFP. */ 1808 mcp->in_mb |= MBX_3; 1809 mcp->buf_size = size; 1810 mcp->flags = MBX_DMA_OUT; 1811 mcp->tov = MBX_TOV_SECONDS; 1812 rval = qla2x00_mailbox_command(vha, mcp); 1813 1814 if (rval != QLA_SUCCESS) { 1815 /*EMPTY*/ 1816 ql_dbg(ql_dbg_mbx, vha, 0x104d, 1817 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n", 1818 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); 1819 if (ha->init_cb) { 1820 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n"); 1821 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1822 0x0104d, ha->init_cb, sizeof(*ha->init_cb)); 1823 } 1824 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1825 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n"); 1826 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1827 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb)); 1828 } 1829 } else { 1830 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1831 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 1832 ql_dbg(ql_dbg_mbx, vha, 0x119d, 1833 "Invalid SFP/Validation Failed\n"); 1834 } 1835 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, 1836 "Done %s.\n", __func__); 1837 } 1838 1839 return rval; 1840 } 1841 1842 1843 /* 1844 * qla2x00_get_port_database 1845 * Issue normal/enhanced get port database mailbox command 1846 * and copy device name as necessary. 1847 * 1848 * Input: 1849 * ha = adapter state pointer. 1850 * dev = structure pointer. 1851 * opt = enhanced cmd option byte. 1852 * 1853 * Returns: 1854 * qla2x00 local function return status code. 1855 * 1856 * Context: 1857 * Kernel context. 1858 */ 1859 int 1860 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) 1861 { 1862 int rval; 1863 mbx_cmd_t mc; 1864 mbx_cmd_t *mcp = &mc; 1865 port_database_t *pd; 1866 struct port_database_24xx *pd24; 1867 dma_addr_t pd_dma; 1868 struct qla_hw_data *ha = vha->hw; 1869 1870 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, 1871 "Entered %s.\n", __func__); 1872 1873 pd24 = NULL; 1874 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1875 if (pd == NULL) { 1876 ql_log(ql_log_warn, vha, 0x1050, 1877 "Failed to allocate port database structure.\n"); 1878 fcport->query = 0; 1879 return QLA_MEMORY_ALLOC_FAILED; 1880 } 1881 1882 mcp->mb[0] = MBC_GET_PORT_DATABASE; 1883 if (opt != 0 && !IS_FWI2_CAPABLE(ha)) 1884 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; 1885 mcp->mb[2] = MSW(pd_dma); 1886 mcp->mb[3] = LSW(pd_dma); 1887 mcp->mb[6] = MSW(MSD(pd_dma)); 1888 mcp->mb[7] = LSW(MSD(pd_dma)); 1889 mcp->mb[9] = vha->vp_idx; 1890 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1891 mcp->in_mb = MBX_0; 1892 if (IS_FWI2_CAPABLE(ha)) { 1893 mcp->mb[1] = fcport->loop_id; 1894 mcp->mb[10] = opt; 1895 mcp->out_mb |= MBX_10|MBX_1; 1896 mcp->in_mb |= MBX_1; 1897 } else if (HAS_EXTENDED_IDS(ha)) { 1898 mcp->mb[1] = fcport->loop_id; 1899 mcp->mb[10] = opt; 1900 mcp->out_mb |= MBX_10|MBX_1; 1901 } else { 1902 mcp->mb[1] = fcport->loop_id << 8 | opt; 1903 mcp->out_mb |= MBX_1; 1904 } 1905 mcp->buf_size = IS_FWI2_CAPABLE(ha) ? 1906 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; 1907 mcp->flags = MBX_DMA_IN; 1908 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1909 rval = qla2x00_mailbox_command(vha, mcp); 1910 if (rval != QLA_SUCCESS) 1911 goto gpd_error_out; 1912 1913 if (IS_FWI2_CAPABLE(ha)) { 1914 uint64_t zero = 0; 1915 u8 current_login_state, last_login_state; 1916 1917 pd24 = (struct port_database_24xx *) pd; 1918 1919 /* Check for logged in state. */ 1920 if (NVME_TARGET(ha, fcport)) { 1921 current_login_state = pd24->current_login_state >> 4; 1922 last_login_state = pd24->last_login_state >> 4; 1923 } else { 1924 current_login_state = pd24->current_login_state & 0xf; 1925 last_login_state = pd24->last_login_state & 0xf; 1926 } 1927 fcport->current_login_state = pd24->current_login_state; 1928 fcport->last_login_state = pd24->last_login_state; 1929 1930 /* Check for logged in state. */ 1931 if (current_login_state != PDS_PRLI_COMPLETE && 1932 last_login_state != PDS_PRLI_COMPLETE) { 1933 ql_dbg(ql_dbg_mbx, vha, 0x119a, 1934 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 1935 current_login_state, last_login_state, 1936 fcport->loop_id); 1937 rval = QLA_FUNCTION_FAILED; 1938 1939 if (!fcport->query) 1940 goto gpd_error_out; 1941 } 1942 1943 if (fcport->loop_id == FC_NO_LOOP_ID || 1944 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 1945 memcmp(fcport->port_name, pd24->port_name, 8))) { 1946 /* We lost the device mid way. */ 1947 rval = QLA_NOT_LOGGED_IN; 1948 goto gpd_error_out; 1949 } 1950 1951 /* Names are little-endian. */ 1952 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); 1953 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); 1954 1955 /* Get port_id of device. */ 1956 fcport->d_id.b.domain = pd24->port_id[0]; 1957 fcport->d_id.b.area = pd24->port_id[1]; 1958 fcport->d_id.b.al_pa = pd24->port_id[2]; 1959 fcport->d_id.b.rsvd_1 = 0; 1960 1961 /* If not target must be initiator or unknown type. */ 1962 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) 1963 fcport->port_type = FCT_INITIATOR; 1964 else 1965 fcport->port_type = FCT_TARGET; 1966 1967 /* Passback COS information. */ 1968 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? 1969 FC_COS_CLASS2 : FC_COS_CLASS3; 1970 1971 if (pd24->prli_svc_param_word_3[0] & BIT_7) 1972 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1973 } else { 1974 uint64_t zero = 0; 1975 1976 /* Check for logged in state. */ 1977 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 1978 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 1979 ql_dbg(ql_dbg_mbx, vha, 0x100a, 1980 "Unable to verify login-state (%x/%x) - " 1981 "portid=%02x%02x%02x.\n", pd->master_state, 1982 pd->slave_state, fcport->d_id.b.domain, 1983 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1984 rval = QLA_FUNCTION_FAILED; 1985 goto gpd_error_out; 1986 } 1987 1988 if (fcport->loop_id == FC_NO_LOOP_ID || 1989 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 1990 memcmp(fcport->port_name, pd->port_name, 8))) { 1991 /* We lost the device mid way. */ 1992 rval = QLA_NOT_LOGGED_IN; 1993 goto gpd_error_out; 1994 } 1995 1996 /* Names are little-endian. */ 1997 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 1998 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 1999 2000 /* Get port_id of device. */ 2001 fcport->d_id.b.domain = pd->port_id[0]; 2002 fcport->d_id.b.area = pd->port_id[3]; 2003 fcport->d_id.b.al_pa = pd->port_id[2]; 2004 fcport->d_id.b.rsvd_1 = 0; 2005 2006 /* If not target must be initiator or unknown type. */ 2007 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 2008 fcport->port_type = FCT_INITIATOR; 2009 else 2010 fcport->port_type = FCT_TARGET; 2011 2012 /* Passback COS information. */ 2013 fcport->supported_classes = (pd->options & BIT_4) ? 2014 FC_COS_CLASS2 : FC_COS_CLASS3; 2015 } 2016 2017 gpd_error_out: 2018 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 2019 fcport->query = 0; 2020 2021 if (rval != QLA_SUCCESS) { 2022 ql_dbg(ql_dbg_mbx, vha, 0x1052, 2023 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, 2024 mcp->mb[0], mcp->mb[1]); 2025 } else { 2026 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, 2027 "Done %s.\n", __func__); 2028 } 2029 2030 return rval; 2031 } 2032 2033 int 2034 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle, 2035 struct port_database_24xx *pdb) 2036 { 2037 mbx_cmd_t mc; 2038 mbx_cmd_t *mcp = &mc; 2039 dma_addr_t pdb_dma; 2040 int rval; 2041 2042 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115, 2043 "Entered %s.\n", __func__); 2044 2045 memset(pdb, 0, sizeof(*pdb)); 2046 2047 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb, 2048 sizeof(*pdb), DMA_FROM_DEVICE); 2049 if (!pdb_dma) { 2050 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n"); 2051 return QLA_MEMORY_ALLOC_FAILED; 2052 } 2053 2054 mcp->mb[0] = MBC_GET_PORT_DATABASE; 2055 mcp->mb[1] = nport_handle; 2056 mcp->mb[2] = MSW(LSD(pdb_dma)); 2057 mcp->mb[3] = LSW(LSD(pdb_dma)); 2058 mcp->mb[6] = MSW(MSD(pdb_dma)); 2059 mcp->mb[7] = LSW(MSD(pdb_dma)); 2060 mcp->mb[9] = 0; 2061 mcp->mb[10] = 0; 2062 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2063 mcp->in_mb = MBX_1|MBX_0; 2064 mcp->buf_size = sizeof(*pdb); 2065 mcp->flags = MBX_DMA_IN; 2066 mcp->tov = vha->hw->login_timeout * 2; 2067 rval = qla2x00_mailbox_command(vha, mcp); 2068 2069 if (rval != QLA_SUCCESS) { 2070 ql_dbg(ql_dbg_mbx, vha, 0x111a, 2071 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2072 rval, mcp->mb[0], mcp->mb[1]); 2073 } else { 2074 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b, 2075 "Done %s.\n", __func__); 2076 } 2077 2078 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma, 2079 sizeof(*pdb), DMA_FROM_DEVICE); 2080 2081 return rval; 2082 } 2083 2084 /* 2085 * qla2x00_get_firmware_state 2086 * Get adapter firmware state. 2087 * 2088 * Input: 2089 * ha = adapter block pointer. 2090 * dptr = pointer for firmware state. 2091 * TARGET_QUEUE_LOCK must be released. 2092 * ADAPTER_STATE_LOCK must be released. 2093 * 2094 * Returns: 2095 * qla2x00 local function return status code. 2096 * 2097 * Context: 2098 * Kernel context. 2099 */ 2100 int 2101 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) 2102 { 2103 int rval; 2104 mbx_cmd_t mc; 2105 mbx_cmd_t *mcp = &mc; 2106 struct qla_hw_data *ha = vha->hw; 2107 2108 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, 2109 "Entered %s.\n", __func__); 2110 2111 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 2112 mcp->out_mb = MBX_0; 2113 if (IS_FWI2_CAPABLE(vha->hw)) 2114 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2115 else 2116 mcp->in_mb = MBX_1|MBX_0; 2117 mcp->tov = MBX_TOV_SECONDS; 2118 mcp->flags = 0; 2119 rval = qla2x00_mailbox_command(vha, mcp); 2120 2121 /* Return firmware states. */ 2122 states[0] = mcp->mb[1]; 2123 if (IS_FWI2_CAPABLE(vha->hw)) { 2124 states[1] = mcp->mb[2]; 2125 states[2] = mcp->mb[3]; /* SFP info */ 2126 states[3] = mcp->mb[4]; 2127 states[4] = mcp->mb[5]; 2128 states[5] = mcp->mb[6]; /* DPORT status */ 2129 } 2130 2131 if (rval != QLA_SUCCESS) { 2132 /*EMPTY*/ 2133 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); 2134 } else { 2135 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 2136 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 2137 ql_dbg(ql_dbg_mbx, vha, 0x119e, 2138 "Invalid SFP/Validation Failed\n"); 2139 } 2140 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, 2141 "Done %s.\n", __func__); 2142 } 2143 2144 return rval; 2145 } 2146 2147 /* 2148 * qla2x00_get_port_name 2149 * Issue get port name mailbox command. 2150 * Returned name is in big endian format. 2151 * 2152 * Input: 2153 * ha = adapter block pointer. 2154 * loop_id = loop ID of device. 2155 * name = pointer for name. 2156 * TARGET_QUEUE_LOCK must be released. 2157 * ADAPTER_STATE_LOCK must be released. 2158 * 2159 * Returns: 2160 * qla2x00 local function return status code. 2161 * 2162 * Context: 2163 * Kernel context. 2164 */ 2165 int 2166 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, 2167 uint8_t opt) 2168 { 2169 int rval; 2170 mbx_cmd_t mc; 2171 mbx_cmd_t *mcp = &mc; 2172 2173 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, 2174 "Entered %s.\n", __func__); 2175 2176 mcp->mb[0] = MBC_GET_PORT_NAME; 2177 mcp->mb[9] = vha->vp_idx; 2178 mcp->out_mb = MBX_9|MBX_1|MBX_0; 2179 if (HAS_EXTENDED_IDS(vha->hw)) { 2180 mcp->mb[1] = loop_id; 2181 mcp->mb[10] = opt; 2182 mcp->out_mb |= MBX_10; 2183 } else { 2184 mcp->mb[1] = loop_id << 8 | opt; 2185 } 2186 2187 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2188 mcp->tov = MBX_TOV_SECONDS; 2189 mcp->flags = 0; 2190 rval = qla2x00_mailbox_command(vha, mcp); 2191 2192 if (rval != QLA_SUCCESS) { 2193 /*EMPTY*/ 2194 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); 2195 } else { 2196 if (name != NULL) { 2197 /* This function returns name in big endian. */ 2198 name[0] = MSB(mcp->mb[2]); 2199 name[1] = LSB(mcp->mb[2]); 2200 name[2] = MSB(mcp->mb[3]); 2201 name[3] = LSB(mcp->mb[3]); 2202 name[4] = MSB(mcp->mb[6]); 2203 name[5] = LSB(mcp->mb[6]); 2204 name[6] = MSB(mcp->mb[7]); 2205 name[7] = LSB(mcp->mb[7]); 2206 } 2207 2208 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, 2209 "Done %s.\n", __func__); 2210 } 2211 2212 return rval; 2213 } 2214 2215 /* 2216 * qla24xx_link_initialization 2217 * Issue link initialization mailbox command. 2218 * 2219 * Input: 2220 * ha = adapter block pointer. 2221 * TARGET_QUEUE_LOCK must be released. 2222 * ADAPTER_STATE_LOCK must be released. 2223 * 2224 * Returns: 2225 * qla2x00 local function return status code. 2226 * 2227 * Context: 2228 * Kernel context. 2229 */ 2230 int 2231 qla24xx_link_initialize(scsi_qla_host_t *vha) 2232 { 2233 int rval; 2234 mbx_cmd_t mc; 2235 mbx_cmd_t *mcp = &mc; 2236 2237 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, 2238 "Entered %s.\n", __func__); 2239 2240 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) 2241 return QLA_FUNCTION_FAILED; 2242 2243 mcp->mb[0] = MBC_LINK_INITIALIZATION; 2244 mcp->mb[1] = BIT_4; 2245 if (vha->hw->operating_mode == LOOP) 2246 mcp->mb[1] |= BIT_6; 2247 else 2248 mcp->mb[1] |= BIT_5; 2249 mcp->mb[2] = 0; 2250 mcp->mb[3] = 0; 2251 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2252 mcp->in_mb = MBX_0; 2253 mcp->tov = MBX_TOV_SECONDS; 2254 mcp->flags = 0; 2255 rval = qla2x00_mailbox_command(vha, mcp); 2256 2257 if (rval != QLA_SUCCESS) { 2258 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); 2259 } else { 2260 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, 2261 "Done %s.\n", __func__); 2262 } 2263 2264 return rval; 2265 } 2266 2267 /* 2268 * qla2x00_lip_reset 2269 * Issue LIP reset mailbox command. 2270 * 2271 * Input: 2272 * ha = adapter block pointer. 2273 * TARGET_QUEUE_LOCK must be released. 2274 * ADAPTER_STATE_LOCK must be released. 2275 * 2276 * Returns: 2277 * qla2x00 local function return status code. 2278 * 2279 * Context: 2280 * Kernel context. 2281 */ 2282 int 2283 qla2x00_lip_reset(scsi_qla_host_t *vha) 2284 { 2285 int rval; 2286 mbx_cmd_t mc; 2287 mbx_cmd_t *mcp = &mc; 2288 2289 ql_dbg(ql_dbg_disc, vha, 0x105a, 2290 "Entered %s.\n", __func__); 2291 2292 if (IS_CNA_CAPABLE(vha->hw)) { 2293 /* Logout across all FCFs. */ 2294 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2295 mcp->mb[1] = BIT_1; 2296 mcp->mb[2] = 0; 2297 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2298 } else if (IS_FWI2_CAPABLE(vha->hw)) { 2299 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2300 mcp->mb[1] = BIT_4; 2301 mcp->mb[2] = 0; 2302 mcp->mb[3] = vha->hw->loop_reset_delay; 2303 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2304 } else { 2305 mcp->mb[0] = MBC_LIP_RESET; 2306 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2307 if (HAS_EXTENDED_IDS(vha->hw)) { 2308 mcp->mb[1] = 0x00ff; 2309 mcp->mb[10] = 0; 2310 mcp->out_mb |= MBX_10; 2311 } else { 2312 mcp->mb[1] = 0xff00; 2313 } 2314 mcp->mb[2] = vha->hw->loop_reset_delay; 2315 mcp->mb[3] = 0; 2316 } 2317 mcp->in_mb = MBX_0; 2318 mcp->tov = MBX_TOV_SECONDS; 2319 mcp->flags = 0; 2320 rval = qla2x00_mailbox_command(vha, mcp); 2321 2322 if (rval != QLA_SUCCESS) { 2323 /*EMPTY*/ 2324 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); 2325 } else { 2326 /*EMPTY*/ 2327 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, 2328 "Done %s.\n", __func__); 2329 } 2330 2331 return rval; 2332 } 2333 2334 /* 2335 * qla2x00_send_sns 2336 * Send SNS command. 2337 * 2338 * Input: 2339 * ha = adapter block pointer. 2340 * sns = pointer for command. 2341 * cmd_size = command size. 2342 * buf_size = response/command size. 2343 * TARGET_QUEUE_LOCK must be released. 2344 * ADAPTER_STATE_LOCK must be released. 2345 * 2346 * Returns: 2347 * qla2x00 local function return status code. 2348 * 2349 * Context: 2350 * Kernel context. 2351 */ 2352 int 2353 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, 2354 uint16_t cmd_size, size_t buf_size) 2355 { 2356 int rval; 2357 mbx_cmd_t mc; 2358 mbx_cmd_t *mcp = &mc; 2359 2360 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, 2361 "Entered %s.\n", __func__); 2362 2363 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, 2364 "Retry cnt=%d ratov=%d total tov=%d.\n", 2365 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); 2366 2367 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 2368 mcp->mb[1] = cmd_size; 2369 mcp->mb[2] = MSW(sns_phys_address); 2370 mcp->mb[3] = LSW(sns_phys_address); 2371 mcp->mb[6] = MSW(MSD(sns_phys_address)); 2372 mcp->mb[7] = LSW(MSD(sns_phys_address)); 2373 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2374 mcp->in_mb = MBX_0|MBX_1; 2375 mcp->buf_size = buf_size; 2376 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; 2377 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); 2378 rval = qla2x00_mailbox_command(vha, mcp); 2379 2380 if (rval != QLA_SUCCESS) { 2381 /*EMPTY*/ 2382 ql_dbg(ql_dbg_mbx, vha, 0x105f, 2383 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2384 rval, mcp->mb[0], mcp->mb[1]); 2385 } else { 2386 /*EMPTY*/ 2387 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, 2388 "Done %s.\n", __func__); 2389 } 2390 2391 return rval; 2392 } 2393 2394 int 2395 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2396 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2397 { 2398 int rval; 2399 2400 struct logio_entry_24xx *lg; 2401 dma_addr_t lg_dma; 2402 uint32_t iop[2]; 2403 struct qla_hw_data *ha = vha->hw; 2404 struct req_que *req; 2405 2406 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, 2407 "Entered %s.\n", __func__); 2408 2409 if (vha->vp_idx && vha->qpair) 2410 req = vha->qpair->req; 2411 else 2412 req = ha->req_q_map[0]; 2413 2414 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2415 if (lg == NULL) { 2416 ql_log(ql_log_warn, vha, 0x1062, 2417 "Failed to allocate login IOCB.\n"); 2418 return QLA_MEMORY_ALLOC_FAILED; 2419 } 2420 2421 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2422 lg->entry_count = 1; 2423 lg->handle = make_handle(req->id, lg->handle); 2424 lg->nport_handle = cpu_to_le16(loop_id); 2425 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2426 if (opt & BIT_0) 2427 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2428 if (opt & BIT_1) 2429 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2430 lg->port_id[0] = al_pa; 2431 lg->port_id[1] = area; 2432 lg->port_id[2] = domain; 2433 lg->vp_index = vha->vp_idx; 2434 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2435 (ha->r_a_tov / 10 * 2) + 2); 2436 if (rval != QLA_SUCCESS) { 2437 ql_dbg(ql_dbg_mbx, vha, 0x1063, 2438 "Failed to issue login IOCB (%x).\n", rval); 2439 } else if (lg->entry_status != 0) { 2440 ql_dbg(ql_dbg_mbx, vha, 0x1064, 2441 "Failed to complete IOCB -- error status (%x).\n", 2442 lg->entry_status); 2443 rval = QLA_FUNCTION_FAILED; 2444 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2445 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2446 iop[1] = le32_to_cpu(lg->io_parameter[1]); 2447 2448 ql_dbg(ql_dbg_mbx, vha, 0x1065, 2449 "Failed to complete IOCB -- completion status (%x) " 2450 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2451 iop[0], iop[1]); 2452 2453 switch (iop[0]) { 2454 case LSC_SCODE_PORTID_USED: 2455 mb[0] = MBS_PORT_ID_USED; 2456 mb[1] = LSW(iop[1]); 2457 break; 2458 case LSC_SCODE_NPORT_USED: 2459 mb[0] = MBS_LOOP_ID_USED; 2460 break; 2461 case LSC_SCODE_NOLINK: 2462 case LSC_SCODE_NOIOCB: 2463 case LSC_SCODE_NOXCB: 2464 case LSC_SCODE_CMD_FAILED: 2465 case LSC_SCODE_NOFABRIC: 2466 case LSC_SCODE_FW_NOT_READY: 2467 case LSC_SCODE_NOT_LOGGED_IN: 2468 case LSC_SCODE_NOPCB: 2469 case LSC_SCODE_ELS_REJECT: 2470 case LSC_SCODE_CMD_PARAM_ERR: 2471 case LSC_SCODE_NONPORT: 2472 case LSC_SCODE_LOGGED_IN: 2473 case LSC_SCODE_NOFLOGI_ACC: 2474 default: 2475 mb[0] = MBS_COMMAND_ERROR; 2476 break; 2477 } 2478 } else { 2479 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, 2480 "Done %s.\n", __func__); 2481 2482 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2483 2484 mb[0] = MBS_COMMAND_COMPLETE; 2485 mb[1] = 0; 2486 if (iop[0] & BIT_4) { 2487 if (iop[0] & BIT_8) 2488 mb[1] |= BIT_1; 2489 } else 2490 mb[1] = BIT_0; 2491 2492 /* Passback COS information. */ 2493 mb[10] = 0; 2494 if (lg->io_parameter[7] || lg->io_parameter[8]) 2495 mb[10] |= BIT_0; /* Class 2. */ 2496 if (lg->io_parameter[9] || lg->io_parameter[10]) 2497 mb[10] |= BIT_1; /* Class 3. */ 2498 if (lg->io_parameter[0] & cpu_to_le32(BIT_7)) 2499 mb[10] |= BIT_7; /* Confirmed Completion 2500 * Allowed 2501 */ 2502 } 2503 2504 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2505 2506 return rval; 2507 } 2508 2509 /* 2510 * qla2x00_login_fabric 2511 * Issue login fabric port mailbox command. 2512 * 2513 * Input: 2514 * ha = adapter block pointer. 2515 * loop_id = device loop ID. 2516 * domain = device domain. 2517 * area = device area. 2518 * al_pa = device AL_PA. 2519 * status = pointer for return status. 2520 * opt = command options. 2521 * TARGET_QUEUE_LOCK must be released. 2522 * ADAPTER_STATE_LOCK must be released. 2523 * 2524 * Returns: 2525 * qla2x00 local function return status code. 2526 * 2527 * Context: 2528 * Kernel context. 2529 */ 2530 int 2531 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2532 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2533 { 2534 int rval; 2535 mbx_cmd_t mc; 2536 mbx_cmd_t *mcp = &mc; 2537 struct qla_hw_data *ha = vha->hw; 2538 2539 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, 2540 "Entered %s.\n", __func__); 2541 2542 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 2543 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2544 if (HAS_EXTENDED_IDS(ha)) { 2545 mcp->mb[1] = loop_id; 2546 mcp->mb[10] = opt; 2547 mcp->out_mb |= MBX_10; 2548 } else { 2549 mcp->mb[1] = (loop_id << 8) | opt; 2550 } 2551 mcp->mb[2] = domain; 2552 mcp->mb[3] = area << 8 | al_pa; 2553 2554 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; 2555 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2556 mcp->flags = 0; 2557 rval = qla2x00_mailbox_command(vha, mcp); 2558 2559 /* Return mailbox statuses. */ 2560 if (mb != NULL) { 2561 mb[0] = mcp->mb[0]; 2562 mb[1] = mcp->mb[1]; 2563 mb[2] = mcp->mb[2]; 2564 mb[6] = mcp->mb[6]; 2565 mb[7] = mcp->mb[7]; 2566 /* COS retrieved from Get-Port-Database mailbox command. */ 2567 mb[10] = 0; 2568 } 2569 2570 if (rval != QLA_SUCCESS) { 2571 /* RLU tmp code: need to change main mailbox_command function to 2572 * return ok even when the mailbox completion value is not 2573 * SUCCESS. The caller needs to be responsible to interpret 2574 * the return values of this mailbox command if we're not 2575 * to change too much of the existing code. 2576 */ 2577 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || 2578 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || 2579 mcp->mb[0] == 0x4006) 2580 rval = QLA_SUCCESS; 2581 2582 /*EMPTY*/ 2583 ql_dbg(ql_dbg_mbx, vha, 0x1068, 2584 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 2585 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 2586 } else { 2587 /*EMPTY*/ 2588 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, 2589 "Done %s.\n", __func__); 2590 } 2591 2592 return rval; 2593 } 2594 2595 /* 2596 * qla2x00_login_local_device 2597 * Issue login loop port mailbox command. 2598 * 2599 * Input: 2600 * ha = adapter block pointer. 2601 * loop_id = device loop ID. 2602 * opt = command options. 2603 * 2604 * Returns: 2605 * Return status code. 2606 * 2607 * Context: 2608 * Kernel context. 2609 * 2610 */ 2611 int 2612 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, 2613 uint16_t *mb_ret, uint8_t opt) 2614 { 2615 int rval; 2616 mbx_cmd_t mc; 2617 mbx_cmd_t *mcp = &mc; 2618 struct qla_hw_data *ha = vha->hw; 2619 2620 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, 2621 "Entered %s.\n", __func__); 2622 2623 if (IS_FWI2_CAPABLE(ha)) 2624 return qla24xx_login_fabric(vha, fcport->loop_id, 2625 fcport->d_id.b.domain, fcport->d_id.b.area, 2626 fcport->d_id.b.al_pa, mb_ret, opt); 2627 2628 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 2629 if (HAS_EXTENDED_IDS(ha)) 2630 mcp->mb[1] = fcport->loop_id; 2631 else 2632 mcp->mb[1] = fcport->loop_id << 8; 2633 mcp->mb[2] = opt; 2634 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2635 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; 2636 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2637 mcp->flags = 0; 2638 rval = qla2x00_mailbox_command(vha, mcp); 2639 2640 /* Return mailbox statuses. */ 2641 if (mb_ret != NULL) { 2642 mb_ret[0] = mcp->mb[0]; 2643 mb_ret[1] = mcp->mb[1]; 2644 mb_ret[6] = mcp->mb[6]; 2645 mb_ret[7] = mcp->mb[7]; 2646 } 2647 2648 if (rval != QLA_SUCCESS) { 2649 /* AV tmp code: need to change main mailbox_command function to 2650 * return ok even when the mailbox completion value is not 2651 * SUCCESS. The caller needs to be responsible to interpret 2652 * the return values of this mailbox command if we're not 2653 * to change too much of the existing code. 2654 */ 2655 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 2656 rval = QLA_SUCCESS; 2657 2658 ql_dbg(ql_dbg_mbx, vha, 0x106b, 2659 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", 2660 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); 2661 } else { 2662 /*EMPTY*/ 2663 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, 2664 "Done %s.\n", __func__); 2665 } 2666 2667 return (rval); 2668 } 2669 2670 int 2671 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2672 uint8_t area, uint8_t al_pa) 2673 { 2674 int rval; 2675 struct logio_entry_24xx *lg; 2676 dma_addr_t lg_dma; 2677 struct qla_hw_data *ha = vha->hw; 2678 struct req_que *req; 2679 2680 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, 2681 "Entered %s.\n", __func__); 2682 2683 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2684 if (lg == NULL) { 2685 ql_log(ql_log_warn, vha, 0x106e, 2686 "Failed to allocate logout IOCB.\n"); 2687 return QLA_MEMORY_ALLOC_FAILED; 2688 } 2689 2690 req = vha->req; 2691 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2692 lg->entry_count = 1; 2693 lg->handle = make_handle(req->id, lg->handle); 2694 lg->nport_handle = cpu_to_le16(loop_id); 2695 lg->control_flags = 2696 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| 2697 LCF_FREE_NPORT); 2698 lg->port_id[0] = al_pa; 2699 lg->port_id[1] = area; 2700 lg->port_id[2] = domain; 2701 lg->vp_index = vha->vp_idx; 2702 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2703 (ha->r_a_tov / 10 * 2) + 2); 2704 if (rval != QLA_SUCCESS) { 2705 ql_dbg(ql_dbg_mbx, vha, 0x106f, 2706 "Failed to issue logout IOCB (%x).\n", rval); 2707 } else if (lg->entry_status != 0) { 2708 ql_dbg(ql_dbg_mbx, vha, 0x1070, 2709 "Failed to complete IOCB -- error status (%x).\n", 2710 lg->entry_status); 2711 rval = QLA_FUNCTION_FAILED; 2712 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2713 ql_dbg(ql_dbg_mbx, vha, 0x1071, 2714 "Failed to complete IOCB -- completion status (%x) " 2715 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2716 le32_to_cpu(lg->io_parameter[0]), 2717 le32_to_cpu(lg->io_parameter[1])); 2718 } else { 2719 /*EMPTY*/ 2720 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, 2721 "Done %s.\n", __func__); 2722 } 2723 2724 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2725 2726 return rval; 2727 } 2728 2729 /* 2730 * qla2x00_fabric_logout 2731 * Issue logout fabric port mailbox command. 2732 * 2733 * Input: 2734 * ha = adapter block pointer. 2735 * loop_id = device loop ID. 2736 * TARGET_QUEUE_LOCK must be released. 2737 * ADAPTER_STATE_LOCK must be released. 2738 * 2739 * Returns: 2740 * qla2x00 local function return status code. 2741 * 2742 * Context: 2743 * Kernel context. 2744 */ 2745 int 2746 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2747 uint8_t area, uint8_t al_pa) 2748 { 2749 int rval; 2750 mbx_cmd_t mc; 2751 mbx_cmd_t *mcp = &mc; 2752 2753 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, 2754 "Entered %s.\n", __func__); 2755 2756 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 2757 mcp->out_mb = MBX_1|MBX_0; 2758 if (HAS_EXTENDED_IDS(vha->hw)) { 2759 mcp->mb[1] = loop_id; 2760 mcp->mb[10] = 0; 2761 mcp->out_mb |= MBX_10; 2762 } else { 2763 mcp->mb[1] = loop_id << 8; 2764 } 2765 2766 mcp->in_mb = MBX_1|MBX_0; 2767 mcp->tov = MBX_TOV_SECONDS; 2768 mcp->flags = 0; 2769 rval = qla2x00_mailbox_command(vha, mcp); 2770 2771 if (rval != QLA_SUCCESS) { 2772 /*EMPTY*/ 2773 ql_dbg(ql_dbg_mbx, vha, 0x1074, 2774 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); 2775 } else { 2776 /*EMPTY*/ 2777 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, 2778 "Done %s.\n", __func__); 2779 } 2780 2781 return rval; 2782 } 2783 2784 /* 2785 * qla2x00_full_login_lip 2786 * Issue full login LIP mailbox command. 2787 * 2788 * Input: 2789 * ha = adapter block pointer. 2790 * TARGET_QUEUE_LOCK must be released. 2791 * ADAPTER_STATE_LOCK must be released. 2792 * 2793 * Returns: 2794 * qla2x00 local function return status code. 2795 * 2796 * Context: 2797 * Kernel context. 2798 */ 2799 int 2800 qla2x00_full_login_lip(scsi_qla_host_t *vha) 2801 { 2802 int rval; 2803 mbx_cmd_t mc; 2804 mbx_cmd_t *mcp = &mc; 2805 2806 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, 2807 "Entered %s.\n", __func__); 2808 2809 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2810 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0; 2811 mcp->mb[2] = 0; 2812 mcp->mb[3] = 0; 2813 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2814 mcp->in_mb = MBX_0; 2815 mcp->tov = MBX_TOV_SECONDS; 2816 mcp->flags = 0; 2817 rval = qla2x00_mailbox_command(vha, mcp); 2818 2819 if (rval != QLA_SUCCESS) { 2820 /*EMPTY*/ 2821 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); 2822 } else { 2823 /*EMPTY*/ 2824 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, 2825 "Done %s.\n", __func__); 2826 } 2827 2828 return rval; 2829 } 2830 2831 /* 2832 * qla2x00_get_id_list 2833 * 2834 * Input: 2835 * ha = adapter block pointer. 2836 * 2837 * Returns: 2838 * qla2x00 local function return status code. 2839 * 2840 * Context: 2841 * Kernel context. 2842 */ 2843 int 2844 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, 2845 uint16_t *entries) 2846 { 2847 int rval; 2848 mbx_cmd_t mc; 2849 mbx_cmd_t *mcp = &mc; 2850 2851 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, 2852 "Entered %s.\n", __func__); 2853 2854 if (id_list == NULL) 2855 return QLA_FUNCTION_FAILED; 2856 2857 mcp->mb[0] = MBC_GET_ID_LIST; 2858 mcp->out_mb = MBX_0; 2859 if (IS_FWI2_CAPABLE(vha->hw)) { 2860 mcp->mb[2] = MSW(id_list_dma); 2861 mcp->mb[3] = LSW(id_list_dma); 2862 mcp->mb[6] = MSW(MSD(id_list_dma)); 2863 mcp->mb[7] = LSW(MSD(id_list_dma)); 2864 mcp->mb[8] = 0; 2865 mcp->mb[9] = vha->vp_idx; 2866 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 2867 } else { 2868 mcp->mb[1] = MSW(id_list_dma); 2869 mcp->mb[2] = LSW(id_list_dma); 2870 mcp->mb[3] = MSW(MSD(id_list_dma)); 2871 mcp->mb[6] = LSW(MSD(id_list_dma)); 2872 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; 2873 } 2874 mcp->in_mb = MBX_1|MBX_0; 2875 mcp->tov = MBX_TOV_SECONDS; 2876 mcp->flags = 0; 2877 rval = qla2x00_mailbox_command(vha, mcp); 2878 2879 if (rval != QLA_SUCCESS) { 2880 /*EMPTY*/ 2881 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); 2882 } else { 2883 *entries = mcp->mb[1]; 2884 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, 2885 "Done %s.\n", __func__); 2886 } 2887 2888 return rval; 2889 } 2890 2891 /* 2892 * qla2x00_get_resource_cnts 2893 * Get current firmware resource counts. 2894 * 2895 * Input: 2896 * ha = adapter block pointer. 2897 * 2898 * Returns: 2899 * qla2x00 local function return status code. 2900 * 2901 * Context: 2902 * Kernel context. 2903 */ 2904 int 2905 qla2x00_get_resource_cnts(scsi_qla_host_t *vha) 2906 { 2907 struct qla_hw_data *ha = vha->hw; 2908 int rval; 2909 mbx_cmd_t mc; 2910 mbx_cmd_t *mcp = &mc; 2911 2912 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, 2913 "Entered %s.\n", __func__); 2914 2915 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2916 mcp->out_mb = MBX_0; 2917 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2918 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 2919 IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2920 mcp->in_mb |= MBX_12; 2921 mcp->tov = MBX_TOV_SECONDS; 2922 mcp->flags = 0; 2923 rval = qla2x00_mailbox_command(vha, mcp); 2924 2925 if (rval != QLA_SUCCESS) { 2926 /*EMPTY*/ 2927 ql_dbg(ql_dbg_mbx, vha, 0x107d, 2928 "Failed mb[0]=%x.\n", mcp->mb[0]); 2929 } else { 2930 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, 2931 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " 2932 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], 2933 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 2934 mcp->mb[11], mcp->mb[12]); 2935 2936 ha->orig_fw_tgt_xcb_count = mcp->mb[1]; 2937 ha->cur_fw_tgt_xcb_count = mcp->mb[2]; 2938 ha->cur_fw_xcb_count = mcp->mb[3]; 2939 ha->orig_fw_xcb_count = mcp->mb[6]; 2940 ha->cur_fw_iocb_count = mcp->mb[7]; 2941 ha->orig_fw_iocb_count = mcp->mb[10]; 2942 if (ha->flags.npiv_supported) 2943 ha->max_npiv_vports = mcp->mb[11]; 2944 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 2945 IS_QLA28XX(ha)) 2946 ha->fw_max_fcf_count = mcp->mb[12]; 2947 } 2948 2949 return (rval); 2950 } 2951 2952 /* 2953 * qla2x00_get_fcal_position_map 2954 * Get FCAL (LILP) position map using mailbox command 2955 * 2956 * Input: 2957 * ha = adapter state pointer. 2958 * pos_map = buffer pointer (can be NULL). 2959 * 2960 * Returns: 2961 * qla2x00 local function return status code. 2962 * 2963 * Context: 2964 * Kernel context. 2965 */ 2966 int 2967 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) 2968 { 2969 int rval; 2970 mbx_cmd_t mc; 2971 mbx_cmd_t *mcp = &mc; 2972 char *pmap; 2973 dma_addr_t pmap_dma; 2974 struct qla_hw_data *ha = vha->hw; 2975 2976 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, 2977 "Entered %s.\n", __func__); 2978 2979 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2980 if (pmap == NULL) { 2981 ql_log(ql_log_warn, vha, 0x1080, 2982 "Memory alloc failed.\n"); 2983 return QLA_MEMORY_ALLOC_FAILED; 2984 } 2985 2986 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; 2987 mcp->mb[2] = MSW(pmap_dma); 2988 mcp->mb[3] = LSW(pmap_dma); 2989 mcp->mb[6] = MSW(MSD(pmap_dma)); 2990 mcp->mb[7] = LSW(MSD(pmap_dma)); 2991 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2992 mcp->in_mb = MBX_1|MBX_0; 2993 mcp->buf_size = FCAL_MAP_SIZE; 2994 mcp->flags = MBX_DMA_IN; 2995 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2996 rval = qla2x00_mailbox_command(vha, mcp); 2997 2998 if (rval == QLA_SUCCESS) { 2999 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, 3000 "mb0/mb1=%x/%X FC/AL position map size (%x).\n", 3001 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); 3002 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, 3003 pmap, pmap[0] + 1); 3004 3005 if (pos_map) 3006 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 3007 } 3008 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 3009 3010 if (rval != QLA_SUCCESS) { 3011 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); 3012 } else { 3013 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, 3014 "Done %s.\n", __func__); 3015 } 3016 3017 return rval; 3018 } 3019 3020 /* 3021 * qla2x00_get_link_status 3022 * 3023 * Input: 3024 * ha = adapter block pointer. 3025 * loop_id = device loop ID. 3026 * ret_buf = pointer to link status return buffer. 3027 * 3028 * Returns: 3029 * 0 = success. 3030 * BIT_0 = mem alloc error. 3031 * BIT_1 = mailbox error. 3032 */ 3033 int 3034 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, 3035 struct link_statistics *stats, dma_addr_t stats_dma) 3036 { 3037 int rval; 3038 mbx_cmd_t mc; 3039 mbx_cmd_t *mcp = &mc; 3040 uint32_t *iter = (uint32_t *)stats; 3041 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); 3042 struct qla_hw_data *ha = vha->hw; 3043 3044 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, 3045 "Entered %s.\n", __func__); 3046 3047 mcp->mb[0] = MBC_GET_LINK_STATUS; 3048 mcp->mb[2] = MSW(LSD(stats_dma)); 3049 mcp->mb[3] = LSW(LSD(stats_dma)); 3050 mcp->mb[6] = MSW(MSD(stats_dma)); 3051 mcp->mb[7] = LSW(MSD(stats_dma)); 3052 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3053 mcp->in_mb = MBX_0; 3054 if (IS_FWI2_CAPABLE(ha)) { 3055 mcp->mb[1] = loop_id; 3056 mcp->mb[4] = 0; 3057 mcp->mb[10] = 0; 3058 mcp->out_mb |= MBX_10|MBX_4|MBX_1; 3059 mcp->in_mb |= MBX_1; 3060 } else if (HAS_EXTENDED_IDS(ha)) { 3061 mcp->mb[1] = loop_id; 3062 mcp->mb[10] = 0; 3063 mcp->out_mb |= MBX_10|MBX_1; 3064 } else { 3065 mcp->mb[1] = loop_id << 8; 3066 mcp->out_mb |= MBX_1; 3067 } 3068 mcp->tov = MBX_TOV_SECONDS; 3069 mcp->flags = IOCTL_CMD; 3070 rval = qla2x00_mailbox_command(vha, mcp); 3071 3072 if (rval == QLA_SUCCESS) { 3073 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3074 ql_dbg(ql_dbg_mbx, vha, 0x1085, 3075 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3076 rval = QLA_FUNCTION_FAILED; 3077 } else { 3078 /* Re-endianize - firmware data is le32. */ 3079 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, 3080 "Done %s.\n", __func__); 3081 for ( ; dwords--; iter++) 3082 le32_to_cpus(iter); 3083 } 3084 } else { 3085 /* Failed. */ 3086 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); 3087 } 3088 3089 return rval; 3090 } 3091 3092 int 3093 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, 3094 dma_addr_t stats_dma, uint16_t options) 3095 { 3096 int rval; 3097 mbx_cmd_t mc; 3098 mbx_cmd_t *mcp = &mc; 3099 uint32_t *iter = (uint32_t *)stats; 3100 ushort dwords = sizeof(*stats)/sizeof(*iter); 3101 3102 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, 3103 "Entered %s.\n", __func__); 3104 3105 memset(&mc, 0, sizeof(mc)); 3106 mc.mb[0] = MBC_GET_LINK_PRIV_STATS; 3107 mc.mb[2] = MSW(LSD(stats_dma)); 3108 mc.mb[3] = LSW(LSD(stats_dma)); 3109 mc.mb[6] = MSW(MSD(stats_dma)); 3110 mc.mb[7] = LSW(MSD(stats_dma)); 3111 mc.mb[8] = dwords; 3112 mc.mb[9] = vha->vp_idx; 3113 mc.mb[10] = options; 3114 3115 rval = qla24xx_send_mb_cmd(vha, &mc); 3116 3117 if (rval == QLA_SUCCESS) { 3118 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3119 ql_dbg(ql_dbg_mbx, vha, 0x1089, 3120 "Failed mb[0]=%x.\n", mcp->mb[0]); 3121 rval = QLA_FUNCTION_FAILED; 3122 } else { 3123 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, 3124 "Done %s.\n", __func__); 3125 /* Re-endianize - firmware data is le32. */ 3126 for ( ; dwords--; iter++) 3127 le32_to_cpus(iter); 3128 } 3129 } else { 3130 /* Failed. */ 3131 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); 3132 } 3133 3134 return rval; 3135 } 3136 3137 int 3138 qla24xx_abort_command(srb_t *sp) 3139 { 3140 int rval; 3141 unsigned long flags = 0; 3142 3143 struct abort_entry_24xx *abt; 3144 dma_addr_t abt_dma; 3145 uint32_t handle; 3146 fc_port_t *fcport = sp->fcport; 3147 struct scsi_qla_host *vha = fcport->vha; 3148 struct qla_hw_data *ha = vha->hw; 3149 struct req_que *req = vha->req; 3150 struct qla_qpair *qpair = sp->qpair; 3151 3152 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 3153 "Entered %s.\n", __func__); 3154 3155 if (sp->qpair) 3156 req = sp->qpair->req; 3157 else 3158 return QLA_FUNCTION_FAILED; 3159 3160 if (ql2xasynctmfenable) 3161 return qla24xx_async_abort_command(sp); 3162 3163 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3164 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 3165 if (req->outstanding_cmds[handle] == sp) 3166 break; 3167 } 3168 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3169 if (handle == req->num_outstanding_cmds) { 3170 /* Command not found. */ 3171 return QLA_FUNCTION_FAILED; 3172 } 3173 3174 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 3175 if (abt == NULL) { 3176 ql_log(ql_log_warn, vha, 0x108d, 3177 "Failed to allocate abort IOCB.\n"); 3178 return QLA_MEMORY_ALLOC_FAILED; 3179 } 3180 3181 abt->entry_type = ABORT_IOCB_TYPE; 3182 abt->entry_count = 1; 3183 abt->handle = make_handle(req->id, abt->handle); 3184 abt->nport_handle = cpu_to_le16(fcport->loop_id); 3185 abt->handle_to_abort = make_handle(req->id, handle); 3186 abt->port_id[0] = fcport->d_id.b.al_pa; 3187 abt->port_id[1] = fcport->d_id.b.area; 3188 abt->port_id[2] = fcport->d_id.b.domain; 3189 abt->vp_index = fcport->vha->vp_idx; 3190 3191 abt->req_que_no = cpu_to_le16(req->id); 3192 3193 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 3194 if (rval != QLA_SUCCESS) { 3195 ql_dbg(ql_dbg_mbx, vha, 0x108e, 3196 "Failed to issue IOCB (%x).\n", rval); 3197 } else if (abt->entry_status != 0) { 3198 ql_dbg(ql_dbg_mbx, vha, 0x108f, 3199 "Failed to complete IOCB -- error status (%x).\n", 3200 abt->entry_status); 3201 rval = QLA_FUNCTION_FAILED; 3202 } else if (abt->nport_handle != cpu_to_le16(0)) { 3203 ql_dbg(ql_dbg_mbx, vha, 0x1090, 3204 "Failed to complete IOCB -- completion status (%x).\n", 3205 le16_to_cpu(abt->nport_handle)); 3206 if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR)) 3207 rval = QLA_FUNCTION_PARAMETER_ERROR; 3208 else 3209 rval = QLA_FUNCTION_FAILED; 3210 } else { 3211 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, 3212 "Done %s.\n", __func__); 3213 } 3214 3215 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 3216 3217 return rval; 3218 } 3219 3220 struct tsk_mgmt_cmd { 3221 union { 3222 struct tsk_mgmt_entry tsk; 3223 struct sts_entry_24xx sts; 3224 } p; 3225 }; 3226 3227 static int 3228 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 3229 uint64_t l, int tag) 3230 { 3231 int rval, rval2; 3232 struct tsk_mgmt_cmd *tsk; 3233 struct sts_entry_24xx *sts; 3234 dma_addr_t tsk_dma; 3235 scsi_qla_host_t *vha; 3236 struct qla_hw_data *ha; 3237 struct req_que *req; 3238 struct qla_qpair *qpair; 3239 3240 vha = fcport->vha; 3241 ha = vha->hw; 3242 req = vha->req; 3243 3244 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, 3245 "Entered %s.\n", __func__); 3246 3247 if (vha->vp_idx && vha->qpair) { 3248 /* NPIV port */ 3249 qpair = vha->qpair; 3250 req = qpair->req; 3251 } 3252 3253 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 3254 if (tsk == NULL) { 3255 ql_log(ql_log_warn, vha, 0x1093, 3256 "Failed to allocate task management IOCB.\n"); 3257 return QLA_MEMORY_ALLOC_FAILED; 3258 } 3259 3260 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 3261 tsk->p.tsk.entry_count = 1; 3262 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle); 3263 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 3264 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 3265 tsk->p.tsk.control_flags = cpu_to_le32(type); 3266 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 3267 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 3268 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 3269 tsk->p.tsk.vp_index = fcport->vha->vp_idx; 3270 if (type == TCF_LUN_RESET) { 3271 int_to_scsilun(l, &tsk->p.tsk.lun); 3272 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, 3273 sizeof(tsk->p.tsk.lun)); 3274 } 3275 3276 sts = &tsk->p.sts; 3277 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 3278 if (rval != QLA_SUCCESS) { 3279 ql_dbg(ql_dbg_mbx, vha, 0x1094, 3280 "Failed to issue %s reset IOCB (%x).\n", name, rval); 3281 } else if (sts->entry_status != 0) { 3282 ql_dbg(ql_dbg_mbx, vha, 0x1095, 3283 "Failed to complete IOCB -- error status (%x).\n", 3284 sts->entry_status); 3285 rval = QLA_FUNCTION_FAILED; 3286 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 3287 ql_dbg(ql_dbg_mbx, vha, 0x1096, 3288 "Failed to complete IOCB -- completion status (%x).\n", 3289 le16_to_cpu(sts->comp_status)); 3290 rval = QLA_FUNCTION_FAILED; 3291 } else if (le16_to_cpu(sts->scsi_status) & 3292 SS_RESPONSE_INFO_LEN_VALID) { 3293 if (le32_to_cpu(sts->rsp_data_len) < 4) { 3294 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, 3295 "Ignoring inconsistent data length -- not enough " 3296 "response info (%d).\n", 3297 le32_to_cpu(sts->rsp_data_len)); 3298 } else if (sts->data[3]) { 3299 ql_dbg(ql_dbg_mbx, vha, 0x1098, 3300 "Failed to complete IOCB -- response (%x).\n", 3301 sts->data[3]); 3302 rval = QLA_FUNCTION_FAILED; 3303 } 3304 } 3305 3306 /* Issue marker IOCB. */ 3307 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l, 3308 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 3309 if (rval2 != QLA_SUCCESS) { 3310 ql_dbg(ql_dbg_mbx, vha, 0x1099, 3311 "Failed to issue marker IOCB (%x).\n", rval2); 3312 } else { 3313 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, 3314 "Done %s.\n", __func__); 3315 } 3316 3317 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 3318 3319 return rval; 3320 } 3321 3322 int 3323 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) 3324 { 3325 struct qla_hw_data *ha = fcport->vha->hw; 3326 3327 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3328 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 3329 3330 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); 3331 } 3332 3333 int 3334 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 3335 { 3336 struct qla_hw_data *ha = fcport->vha->hw; 3337 3338 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3339 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 3340 3341 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); 3342 } 3343 3344 int 3345 qla2x00_system_error(scsi_qla_host_t *vha) 3346 { 3347 int rval; 3348 mbx_cmd_t mc; 3349 mbx_cmd_t *mcp = &mc; 3350 struct qla_hw_data *ha = vha->hw; 3351 3352 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 3353 return QLA_FUNCTION_FAILED; 3354 3355 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, 3356 "Entered %s.\n", __func__); 3357 3358 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 3359 mcp->out_mb = MBX_0; 3360 mcp->in_mb = MBX_0; 3361 mcp->tov = 5; 3362 mcp->flags = 0; 3363 rval = qla2x00_mailbox_command(vha, mcp); 3364 3365 if (rval != QLA_SUCCESS) { 3366 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); 3367 } else { 3368 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, 3369 "Done %s.\n", __func__); 3370 } 3371 3372 return rval; 3373 } 3374 3375 int 3376 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) 3377 { 3378 int rval; 3379 mbx_cmd_t mc; 3380 mbx_cmd_t *mcp = &mc; 3381 3382 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3383 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3384 return QLA_FUNCTION_FAILED; 3385 3386 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, 3387 "Entered %s.\n", __func__); 3388 3389 mcp->mb[0] = MBC_WRITE_SERDES; 3390 mcp->mb[1] = addr; 3391 if (IS_QLA2031(vha->hw)) 3392 mcp->mb[2] = data & 0xff; 3393 else 3394 mcp->mb[2] = data; 3395 3396 mcp->mb[3] = 0; 3397 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 3398 mcp->in_mb = MBX_0; 3399 mcp->tov = MBX_TOV_SECONDS; 3400 mcp->flags = 0; 3401 rval = qla2x00_mailbox_command(vha, mcp); 3402 3403 if (rval != QLA_SUCCESS) { 3404 ql_dbg(ql_dbg_mbx, vha, 0x1183, 3405 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3406 } else { 3407 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, 3408 "Done %s.\n", __func__); 3409 } 3410 3411 return rval; 3412 } 3413 3414 int 3415 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) 3416 { 3417 int rval; 3418 mbx_cmd_t mc; 3419 mbx_cmd_t *mcp = &mc; 3420 3421 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3422 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3423 return QLA_FUNCTION_FAILED; 3424 3425 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, 3426 "Entered %s.\n", __func__); 3427 3428 mcp->mb[0] = MBC_READ_SERDES; 3429 mcp->mb[1] = addr; 3430 mcp->mb[3] = 0; 3431 mcp->out_mb = MBX_3|MBX_1|MBX_0; 3432 mcp->in_mb = MBX_1|MBX_0; 3433 mcp->tov = MBX_TOV_SECONDS; 3434 mcp->flags = 0; 3435 rval = qla2x00_mailbox_command(vha, mcp); 3436 3437 if (IS_QLA2031(vha->hw)) 3438 *data = mcp->mb[1] & 0xff; 3439 else 3440 *data = mcp->mb[1]; 3441 3442 if (rval != QLA_SUCCESS) { 3443 ql_dbg(ql_dbg_mbx, vha, 0x1186, 3444 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3445 } else { 3446 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, 3447 "Done %s.\n", __func__); 3448 } 3449 3450 return rval; 3451 } 3452 3453 int 3454 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) 3455 { 3456 int rval; 3457 mbx_cmd_t mc; 3458 mbx_cmd_t *mcp = &mc; 3459 3460 if (!IS_QLA8044(vha->hw)) 3461 return QLA_FUNCTION_FAILED; 3462 3463 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0, 3464 "Entered %s.\n", __func__); 3465 3466 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3467 mcp->mb[1] = HCS_WRITE_SERDES; 3468 mcp->mb[3] = LSW(addr); 3469 mcp->mb[4] = MSW(addr); 3470 mcp->mb[5] = LSW(data); 3471 mcp->mb[6] = MSW(data); 3472 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 3473 mcp->in_mb = MBX_0; 3474 mcp->tov = MBX_TOV_SECONDS; 3475 mcp->flags = 0; 3476 rval = qla2x00_mailbox_command(vha, mcp); 3477 3478 if (rval != QLA_SUCCESS) { 3479 ql_dbg(ql_dbg_mbx, vha, 0x11a1, 3480 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3481 } else { 3482 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, 3483 "Done %s.\n", __func__); 3484 } 3485 3486 return rval; 3487 } 3488 3489 int 3490 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) 3491 { 3492 int rval; 3493 mbx_cmd_t mc; 3494 mbx_cmd_t *mcp = &mc; 3495 3496 if (!IS_QLA8044(vha->hw)) 3497 return QLA_FUNCTION_FAILED; 3498 3499 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, 3500 "Entered %s.\n", __func__); 3501 3502 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3503 mcp->mb[1] = HCS_READ_SERDES; 3504 mcp->mb[3] = LSW(addr); 3505 mcp->mb[4] = MSW(addr); 3506 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; 3507 mcp->in_mb = MBX_2|MBX_1|MBX_0; 3508 mcp->tov = MBX_TOV_SECONDS; 3509 mcp->flags = 0; 3510 rval = qla2x00_mailbox_command(vha, mcp); 3511 3512 *data = mcp->mb[2] << 16 | mcp->mb[1]; 3513 3514 if (rval != QLA_SUCCESS) { 3515 ql_dbg(ql_dbg_mbx, vha, 0x118a, 3516 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3517 } else { 3518 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, 3519 "Done %s.\n", __func__); 3520 } 3521 3522 return rval; 3523 } 3524 3525 /** 3526 * qla2x00_set_serdes_params() - 3527 * @vha: HA context 3528 * @sw_em_1g: serial link options 3529 * @sw_em_2g: serial link options 3530 * @sw_em_4g: serial link options 3531 * 3532 * Returns 3533 */ 3534 int 3535 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, 3536 uint16_t sw_em_2g, uint16_t sw_em_4g) 3537 { 3538 int rval; 3539 mbx_cmd_t mc; 3540 mbx_cmd_t *mcp = &mc; 3541 3542 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, 3543 "Entered %s.\n", __func__); 3544 3545 mcp->mb[0] = MBC_SERDES_PARAMS; 3546 mcp->mb[1] = BIT_0; 3547 mcp->mb[2] = sw_em_1g | BIT_15; 3548 mcp->mb[3] = sw_em_2g | BIT_15; 3549 mcp->mb[4] = sw_em_4g | BIT_15; 3550 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3551 mcp->in_mb = MBX_0; 3552 mcp->tov = MBX_TOV_SECONDS; 3553 mcp->flags = 0; 3554 rval = qla2x00_mailbox_command(vha, mcp); 3555 3556 if (rval != QLA_SUCCESS) { 3557 /*EMPTY*/ 3558 ql_dbg(ql_dbg_mbx, vha, 0x109f, 3559 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3560 } else { 3561 /*EMPTY*/ 3562 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, 3563 "Done %s.\n", __func__); 3564 } 3565 3566 return rval; 3567 } 3568 3569 int 3570 qla2x00_stop_firmware(scsi_qla_host_t *vha) 3571 { 3572 int rval; 3573 mbx_cmd_t mc; 3574 mbx_cmd_t *mcp = &mc; 3575 3576 if (!IS_FWI2_CAPABLE(vha->hw)) 3577 return QLA_FUNCTION_FAILED; 3578 3579 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, 3580 "Entered %s.\n", __func__); 3581 3582 mcp->mb[0] = MBC_STOP_FIRMWARE; 3583 mcp->mb[1] = 0; 3584 mcp->out_mb = MBX_1|MBX_0; 3585 mcp->in_mb = MBX_0; 3586 mcp->tov = 5; 3587 mcp->flags = 0; 3588 rval = qla2x00_mailbox_command(vha, mcp); 3589 3590 if (rval != QLA_SUCCESS) { 3591 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); 3592 if (mcp->mb[0] == MBS_INVALID_COMMAND) 3593 rval = QLA_INVALID_COMMAND; 3594 } else { 3595 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, 3596 "Done %s.\n", __func__); 3597 } 3598 3599 return rval; 3600 } 3601 3602 int 3603 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, 3604 uint16_t buffers) 3605 { 3606 int rval; 3607 mbx_cmd_t mc; 3608 mbx_cmd_t *mcp = &mc; 3609 3610 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, 3611 "Entered %s.\n", __func__); 3612 3613 if (!IS_FWI2_CAPABLE(vha->hw)) 3614 return QLA_FUNCTION_FAILED; 3615 3616 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3617 return QLA_FUNCTION_FAILED; 3618 3619 mcp->mb[0] = MBC_TRACE_CONTROL; 3620 mcp->mb[1] = TC_EFT_ENABLE; 3621 mcp->mb[2] = LSW(eft_dma); 3622 mcp->mb[3] = MSW(eft_dma); 3623 mcp->mb[4] = LSW(MSD(eft_dma)); 3624 mcp->mb[5] = MSW(MSD(eft_dma)); 3625 mcp->mb[6] = buffers; 3626 mcp->mb[7] = TC_AEN_DISABLE; 3627 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3628 mcp->in_mb = MBX_1|MBX_0; 3629 mcp->tov = MBX_TOV_SECONDS; 3630 mcp->flags = 0; 3631 rval = qla2x00_mailbox_command(vha, mcp); 3632 if (rval != QLA_SUCCESS) { 3633 ql_dbg(ql_dbg_mbx, vha, 0x10a5, 3634 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3635 rval, mcp->mb[0], mcp->mb[1]); 3636 } else { 3637 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, 3638 "Done %s.\n", __func__); 3639 } 3640 3641 return rval; 3642 } 3643 3644 int 3645 qla2x00_disable_eft_trace(scsi_qla_host_t *vha) 3646 { 3647 int rval; 3648 mbx_cmd_t mc; 3649 mbx_cmd_t *mcp = &mc; 3650 3651 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, 3652 "Entered %s.\n", __func__); 3653 3654 if (!IS_FWI2_CAPABLE(vha->hw)) 3655 return QLA_FUNCTION_FAILED; 3656 3657 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3658 return QLA_FUNCTION_FAILED; 3659 3660 mcp->mb[0] = MBC_TRACE_CONTROL; 3661 mcp->mb[1] = TC_EFT_DISABLE; 3662 mcp->out_mb = MBX_1|MBX_0; 3663 mcp->in_mb = MBX_1|MBX_0; 3664 mcp->tov = MBX_TOV_SECONDS; 3665 mcp->flags = 0; 3666 rval = qla2x00_mailbox_command(vha, mcp); 3667 if (rval != QLA_SUCCESS) { 3668 ql_dbg(ql_dbg_mbx, vha, 0x10a8, 3669 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3670 rval, mcp->mb[0], mcp->mb[1]); 3671 } else { 3672 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, 3673 "Done %s.\n", __func__); 3674 } 3675 3676 return rval; 3677 } 3678 3679 int 3680 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, 3681 uint16_t buffers, uint16_t *mb, uint32_t *dwords) 3682 { 3683 int rval; 3684 mbx_cmd_t mc; 3685 mbx_cmd_t *mcp = &mc; 3686 3687 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, 3688 "Entered %s.\n", __func__); 3689 3690 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 3691 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 3692 !IS_QLA28XX(vha->hw)) 3693 return QLA_FUNCTION_FAILED; 3694 3695 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3696 return QLA_FUNCTION_FAILED; 3697 3698 mcp->mb[0] = MBC_TRACE_CONTROL; 3699 mcp->mb[1] = TC_FCE_ENABLE; 3700 mcp->mb[2] = LSW(fce_dma); 3701 mcp->mb[3] = MSW(fce_dma); 3702 mcp->mb[4] = LSW(MSD(fce_dma)); 3703 mcp->mb[5] = MSW(MSD(fce_dma)); 3704 mcp->mb[6] = buffers; 3705 mcp->mb[7] = TC_AEN_DISABLE; 3706 mcp->mb[8] = 0; 3707 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; 3708 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; 3709 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3710 MBX_1|MBX_0; 3711 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3712 mcp->tov = MBX_TOV_SECONDS; 3713 mcp->flags = 0; 3714 rval = qla2x00_mailbox_command(vha, mcp); 3715 if (rval != QLA_SUCCESS) { 3716 ql_dbg(ql_dbg_mbx, vha, 0x10ab, 3717 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3718 rval, mcp->mb[0], mcp->mb[1]); 3719 } else { 3720 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, 3721 "Done %s.\n", __func__); 3722 3723 if (mb) 3724 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 3725 if (dwords) 3726 *dwords = buffers; 3727 } 3728 3729 return rval; 3730 } 3731 3732 int 3733 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) 3734 { 3735 int rval; 3736 mbx_cmd_t mc; 3737 mbx_cmd_t *mcp = &mc; 3738 3739 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, 3740 "Entered %s.\n", __func__); 3741 3742 if (!IS_FWI2_CAPABLE(vha->hw)) 3743 return QLA_FUNCTION_FAILED; 3744 3745 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3746 return QLA_FUNCTION_FAILED; 3747 3748 mcp->mb[0] = MBC_TRACE_CONTROL; 3749 mcp->mb[1] = TC_FCE_DISABLE; 3750 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 3751 mcp->out_mb = MBX_2|MBX_1|MBX_0; 3752 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3753 MBX_1|MBX_0; 3754 mcp->tov = MBX_TOV_SECONDS; 3755 mcp->flags = 0; 3756 rval = qla2x00_mailbox_command(vha, mcp); 3757 if (rval != QLA_SUCCESS) { 3758 ql_dbg(ql_dbg_mbx, vha, 0x10ae, 3759 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3760 rval, mcp->mb[0], mcp->mb[1]); 3761 } else { 3762 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, 3763 "Done %s.\n", __func__); 3764 3765 if (wr) 3766 *wr = (uint64_t) mcp->mb[5] << 48 | 3767 (uint64_t) mcp->mb[4] << 32 | 3768 (uint64_t) mcp->mb[3] << 16 | 3769 (uint64_t) mcp->mb[2]; 3770 if (rd) 3771 *rd = (uint64_t) mcp->mb[9] << 48 | 3772 (uint64_t) mcp->mb[8] << 32 | 3773 (uint64_t) mcp->mb[7] << 16 | 3774 (uint64_t) mcp->mb[6]; 3775 } 3776 3777 return rval; 3778 } 3779 3780 int 3781 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3782 uint16_t *port_speed, uint16_t *mb) 3783 { 3784 int rval; 3785 mbx_cmd_t mc; 3786 mbx_cmd_t *mcp = &mc; 3787 3788 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, 3789 "Entered %s.\n", __func__); 3790 3791 if (!IS_IIDMA_CAPABLE(vha->hw)) 3792 return QLA_FUNCTION_FAILED; 3793 3794 mcp->mb[0] = MBC_PORT_PARAMS; 3795 mcp->mb[1] = loop_id; 3796 mcp->mb[2] = mcp->mb[3] = 0; 3797 mcp->mb[9] = vha->vp_idx; 3798 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3799 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3800 mcp->tov = MBX_TOV_SECONDS; 3801 mcp->flags = 0; 3802 rval = qla2x00_mailbox_command(vha, mcp); 3803 3804 /* Return mailbox statuses. */ 3805 if (mb) { 3806 mb[0] = mcp->mb[0]; 3807 mb[1] = mcp->mb[1]; 3808 mb[3] = mcp->mb[3]; 3809 } 3810 3811 if (rval != QLA_SUCCESS) { 3812 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); 3813 } else { 3814 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, 3815 "Done %s.\n", __func__); 3816 if (port_speed) 3817 *port_speed = mcp->mb[3]; 3818 } 3819 3820 return rval; 3821 } 3822 3823 int 3824 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3825 uint16_t port_speed, uint16_t *mb) 3826 { 3827 int rval; 3828 mbx_cmd_t mc; 3829 mbx_cmd_t *mcp = &mc; 3830 3831 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, 3832 "Entered %s.\n", __func__); 3833 3834 if (!IS_IIDMA_CAPABLE(vha->hw)) 3835 return QLA_FUNCTION_FAILED; 3836 3837 mcp->mb[0] = MBC_PORT_PARAMS; 3838 mcp->mb[1] = loop_id; 3839 mcp->mb[2] = BIT_0; 3840 mcp->mb[3] = port_speed & 0x3F; 3841 mcp->mb[9] = vha->vp_idx; 3842 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3843 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3844 mcp->tov = MBX_TOV_SECONDS; 3845 mcp->flags = 0; 3846 rval = qla2x00_mailbox_command(vha, mcp); 3847 3848 /* Return mailbox statuses. */ 3849 if (mb) { 3850 mb[0] = mcp->mb[0]; 3851 mb[1] = mcp->mb[1]; 3852 mb[3] = mcp->mb[3]; 3853 } 3854 3855 if (rval != QLA_SUCCESS) { 3856 ql_dbg(ql_dbg_mbx, vha, 0x10b4, 3857 "Failed=%x.\n", rval); 3858 } else { 3859 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, 3860 "Done %s.\n", __func__); 3861 } 3862 3863 return rval; 3864 } 3865 3866 void 3867 qla24xx_report_id_acquisition(scsi_qla_host_t *vha, 3868 struct vp_rpt_id_entry_24xx *rptid_entry) 3869 { 3870 struct qla_hw_data *ha = vha->hw; 3871 scsi_qla_host_t *vp = NULL; 3872 unsigned long flags; 3873 int found; 3874 port_id_t id; 3875 struct fc_port *fcport; 3876 3877 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3878 "Entered %s.\n", __func__); 3879 3880 if (rptid_entry->entry_status != 0) 3881 return; 3882 3883 id.b.domain = rptid_entry->port_id[2]; 3884 id.b.area = rptid_entry->port_id[1]; 3885 id.b.al_pa = rptid_entry->port_id[0]; 3886 id.b.rsvd_1 = 0; 3887 ha->flags.n2n_ae = 0; 3888 3889 if (rptid_entry->format == 0) { 3890 /* loop */ 3891 ql_dbg(ql_dbg_async, vha, 0x10b7, 3892 "Format 0 : Number of VPs setup %d, number of " 3893 "VPs acquired %d.\n", rptid_entry->vp_setup, 3894 rptid_entry->vp_acquired); 3895 ql_dbg(ql_dbg_async, vha, 0x10b8, 3896 "Primary port id %02x%02x%02x.\n", 3897 rptid_entry->port_id[2], rptid_entry->port_id[1], 3898 rptid_entry->port_id[0]); 3899 ha->current_topology = ISP_CFG_NL; 3900 qlt_update_host_map(vha, id); 3901 3902 } else if (rptid_entry->format == 1) { 3903 /* fabric */ 3904 ql_dbg(ql_dbg_async, vha, 0x10b9, 3905 "Format 1: VP[%d] enabled - status %d - with " 3906 "port id %02x%02x%02x.\n", rptid_entry->vp_idx, 3907 rptid_entry->vp_status, 3908 rptid_entry->port_id[2], rptid_entry->port_id[1], 3909 rptid_entry->port_id[0]); 3910 ql_dbg(ql_dbg_async, vha, 0x5075, 3911 "Format 1: Remote WWPN %8phC.\n", 3912 rptid_entry->u.f1.port_name); 3913 3914 ql_dbg(ql_dbg_async, vha, 0x5075, 3915 "Format 1: WWPN %8phC.\n", 3916 vha->port_name); 3917 3918 switch (rptid_entry->u.f1.flags & TOPO_MASK) { 3919 case TOPO_N2N: 3920 ha->current_topology = ISP_CFG_N; 3921 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 3922 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3923 fcport->scan_state = QLA_FCPORT_SCAN; 3924 fcport->n2n_flag = 0; 3925 } 3926 id.b24 = 0; 3927 if (wwn_to_u64(vha->port_name) > 3928 wwn_to_u64(rptid_entry->u.f1.port_name)) { 3929 vha->d_id.b24 = 0; 3930 vha->d_id.b.al_pa = 1; 3931 ha->flags.n2n_bigger = 1; 3932 3933 id.b.al_pa = 2; 3934 ql_dbg(ql_dbg_async, vha, 0x5075, 3935 "Format 1: assign local id %x remote id %x\n", 3936 vha->d_id.b24, id.b24); 3937 } else { 3938 ql_dbg(ql_dbg_async, vha, 0x5075, 3939 "Format 1: Remote login - Waiting for WWPN %8phC.\n", 3940 rptid_entry->u.f1.port_name); 3941 ha->flags.n2n_bigger = 0; 3942 } 3943 3944 fcport = qla2x00_find_fcport_by_wwpn(vha, 3945 rptid_entry->u.f1.port_name, 1); 3946 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 3947 3948 3949 if (fcport) { 3950 fcport->plogi_nack_done_deadline = jiffies + HZ; 3951 fcport->dm_login_expire = jiffies + 2*HZ; 3952 fcport->scan_state = QLA_FCPORT_FOUND; 3953 fcport->n2n_flag = 1; 3954 fcport->keep_nport_handle = 1; 3955 fcport->fc4_type = FS_FC4TYPE_FCP; 3956 if (vha->flags.nvme_enabled) 3957 fcport->fc4_type |= FS_FC4TYPE_NVME; 3958 3959 if (wwn_to_u64(vha->port_name) > 3960 wwn_to_u64(fcport->port_name)) { 3961 fcport->d_id = id; 3962 } 3963 3964 switch (fcport->disc_state) { 3965 case DSC_DELETED: 3966 set_bit(RELOGIN_NEEDED, 3967 &vha->dpc_flags); 3968 break; 3969 case DSC_DELETE_PEND: 3970 break; 3971 default: 3972 qlt_schedule_sess_for_deletion(fcport); 3973 break; 3974 } 3975 } else { 3976 qla24xx_post_newsess_work(vha, &id, 3977 rptid_entry->u.f1.port_name, 3978 rptid_entry->u.f1.node_name, 3979 NULL, 3980 FS_FCP_IS_N2N); 3981 } 3982 3983 /* if our portname is higher then initiate N2N login */ 3984 3985 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); 3986 return; 3987 break; 3988 case TOPO_FL: 3989 ha->current_topology = ISP_CFG_FL; 3990 break; 3991 case TOPO_F: 3992 ha->current_topology = ISP_CFG_F; 3993 break; 3994 default: 3995 break; 3996 } 3997 3998 ha->flags.gpsc_supported = 1; 3999 ha->current_topology = ISP_CFG_F; 4000 /* buffer to buffer credit flag */ 4001 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; 4002 4003 if (rptid_entry->vp_idx == 0) { 4004 if (rptid_entry->vp_status == VP_STAT_COMPL) { 4005 /* FA-WWN is only for physical port */ 4006 if (qla_ini_mode_enabled(vha) && 4007 ha->flags.fawwpn_enabled && 4008 (rptid_entry->u.f1.flags & 4009 BIT_6)) { 4010 memcpy(vha->port_name, 4011 rptid_entry->u.f1.port_name, 4012 WWN_SIZE); 4013 } 4014 4015 qlt_update_host_map(vha, id); 4016 } 4017 4018 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 4019 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 4020 } else { 4021 if (rptid_entry->vp_status != VP_STAT_COMPL && 4022 rptid_entry->vp_status != VP_STAT_ID_CHG) { 4023 ql_dbg(ql_dbg_mbx, vha, 0x10ba, 4024 "Could not acquire ID for VP[%d].\n", 4025 rptid_entry->vp_idx); 4026 return; 4027 } 4028 4029 found = 0; 4030 spin_lock_irqsave(&ha->vport_slock, flags); 4031 list_for_each_entry(vp, &ha->vp_list, list) { 4032 if (rptid_entry->vp_idx == vp->vp_idx) { 4033 found = 1; 4034 break; 4035 } 4036 } 4037 spin_unlock_irqrestore(&ha->vport_slock, flags); 4038 4039 if (!found) 4040 return; 4041 4042 qlt_update_host_map(vp, id); 4043 4044 /* 4045 * Cannot configure here as we are still sitting on the 4046 * response queue. Handle it in dpc context. 4047 */ 4048 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); 4049 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); 4050 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); 4051 } 4052 set_bit(VP_DPC_NEEDED, &vha->dpc_flags); 4053 qla2xxx_wake_dpc(vha); 4054 } else if (rptid_entry->format == 2) { 4055 ql_dbg(ql_dbg_async, vha, 0x505f, 4056 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", 4057 rptid_entry->port_id[2], rptid_entry->port_id[1], 4058 rptid_entry->port_id[0]); 4059 4060 ql_dbg(ql_dbg_async, vha, 0x5075, 4061 "N2N: Remote WWPN %8phC.\n", 4062 rptid_entry->u.f2.port_name); 4063 4064 /* N2N. direct connect */ 4065 ha->current_topology = ISP_CFG_N; 4066 ha->flags.rida_fmt2 = 1; 4067 vha->d_id.b.domain = rptid_entry->port_id[2]; 4068 vha->d_id.b.area = rptid_entry->port_id[1]; 4069 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 4070 4071 ha->flags.n2n_ae = 1; 4072 spin_lock_irqsave(&ha->vport_slock, flags); 4073 qlt_update_vp_map(vha, SET_AL_PA); 4074 spin_unlock_irqrestore(&ha->vport_slock, flags); 4075 4076 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4077 fcport->scan_state = QLA_FCPORT_SCAN; 4078 fcport->n2n_flag = 0; 4079 } 4080 4081 fcport = qla2x00_find_fcport_by_wwpn(vha, 4082 rptid_entry->u.f2.port_name, 1); 4083 4084 if (fcport) { 4085 fcport->login_retry = vha->hw->login_retry_count; 4086 fcport->plogi_nack_done_deadline = jiffies + HZ; 4087 fcport->scan_state = QLA_FCPORT_FOUND; 4088 fcport->keep_nport_handle = 1; 4089 fcport->n2n_flag = 1; 4090 fcport->d_id.b.domain = 4091 rptid_entry->u.f2.remote_nport_id[2]; 4092 fcport->d_id.b.area = 4093 rptid_entry->u.f2.remote_nport_id[1]; 4094 fcport->d_id.b.al_pa = 4095 rptid_entry->u.f2.remote_nport_id[0]; 4096 } 4097 } 4098 } 4099 4100 /* 4101 * qla24xx_modify_vp_config 4102 * Change VP configuration for vha 4103 * 4104 * Input: 4105 * vha = adapter block pointer. 4106 * 4107 * Returns: 4108 * qla2xxx local function return status code. 4109 * 4110 * Context: 4111 * Kernel context. 4112 */ 4113 int 4114 qla24xx_modify_vp_config(scsi_qla_host_t *vha) 4115 { 4116 int rval; 4117 struct vp_config_entry_24xx *vpmod; 4118 dma_addr_t vpmod_dma; 4119 struct qla_hw_data *ha = vha->hw; 4120 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4121 4122 /* This can be called by the parent */ 4123 4124 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, 4125 "Entered %s.\n", __func__); 4126 4127 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 4128 if (!vpmod) { 4129 ql_log(ql_log_warn, vha, 0x10bc, 4130 "Failed to allocate modify VP IOCB.\n"); 4131 return QLA_MEMORY_ALLOC_FAILED; 4132 } 4133 4134 vpmod->entry_type = VP_CONFIG_IOCB_TYPE; 4135 vpmod->entry_count = 1; 4136 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; 4137 vpmod->vp_count = 1; 4138 vpmod->vp_index1 = vha->vp_idx; 4139 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; 4140 4141 qlt_modify_vp_config(vha, vpmod); 4142 4143 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); 4144 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 4145 vpmod->entry_count = 1; 4146 4147 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 4148 if (rval != QLA_SUCCESS) { 4149 ql_dbg(ql_dbg_mbx, vha, 0x10bd, 4150 "Failed to issue VP config IOCB (%x).\n", rval); 4151 } else if (vpmod->comp_status != 0) { 4152 ql_dbg(ql_dbg_mbx, vha, 0x10be, 4153 "Failed to complete IOCB -- error status (%x).\n", 4154 vpmod->comp_status); 4155 rval = QLA_FUNCTION_FAILED; 4156 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) { 4157 ql_dbg(ql_dbg_mbx, vha, 0x10bf, 4158 "Failed to complete IOCB -- completion status (%x).\n", 4159 le16_to_cpu(vpmod->comp_status)); 4160 rval = QLA_FUNCTION_FAILED; 4161 } else { 4162 /* EMPTY */ 4163 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, 4164 "Done %s.\n", __func__); 4165 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 4166 } 4167 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 4168 4169 return rval; 4170 } 4171 4172 /* 4173 * qla2x00_send_change_request 4174 * Receive or disable RSCN request from fabric controller 4175 * 4176 * Input: 4177 * ha = adapter block pointer 4178 * format = registration format: 4179 * 0 - Reserved 4180 * 1 - Fabric detected registration 4181 * 2 - N_port detected registration 4182 * 3 - Full registration 4183 * FF - clear registration 4184 * vp_idx = Virtual port index 4185 * 4186 * Returns: 4187 * qla2x00 local function return status code. 4188 * 4189 * Context: 4190 * Kernel Context 4191 */ 4192 4193 int 4194 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, 4195 uint16_t vp_idx) 4196 { 4197 int rval; 4198 mbx_cmd_t mc; 4199 mbx_cmd_t *mcp = &mc; 4200 4201 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, 4202 "Entered %s.\n", __func__); 4203 4204 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; 4205 mcp->mb[1] = format; 4206 mcp->mb[9] = vp_idx; 4207 mcp->out_mb = MBX_9|MBX_1|MBX_0; 4208 mcp->in_mb = MBX_0|MBX_1; 4209 mcp->tov = MBX_TOV_SECONDS; 4210 mcp->flags = 0; 4211 rval = qla2x00_mailbox_command(vha, mcp); 4212 4213 if (rval == QLA_SUCCESS) { 4214 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 4215 rval = BIT_1; 4216 } 4217 } else 4218 rval = BIT_1; 4219 4220 return rval; 4221 } 4222 4223 int 4224 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 4225 uint32_t size) 4226 { 4227 int rval; 4228 mbx_cmd_t mc; 4229 mbx_cmd_t *mcp = &mc; 4230 4231 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, 4232 "Entered %s.\n", __func__); 4233 4234 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 4235 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 4236 mcp->mb[8] = MSW(addr); 4237 mcp->out_mb = MBX_8|MBX_0; 4238 } else { 4239 mcp->mb[0] = MBC_DUMP_RISC_RAM; 4240 mcp->out_mb = MBX_0; 4241 } 4242 mcp->mb[1] = LSW(addr); 4243 mcp->mb[2] = MSW(req_dma); 4244 mcp->mb[3] = LSW(req_dma); 4245 mcp->mb[6] = MSW(MSD(req_dma)); 4246 mcp->mb[7] = LSW(MSD(req_dma)); 4247 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 4248 if (IS_FWI2_CAPABLE(vha->hw)) { 4249 mcp->mb[4] = MSW(size); 4250 mcp->mb[5] = LSW(size); 4251 mcp->out_mb |= MBX_5|MBX_4; 4252 } else { 4253 mcp->mb[4] = LSW(size); 4254 mcp->out_mb |= MBX_4; 4255 } 4256 4257 mcp->in_mb = MBX_0; 4258 mcp->tov = MBX_TOV_SECONDS; 4259 mcp->flags = 0; 4260 rval = qla2x00_mailbox_command(vha, mcp); 4261 4262 if (rval != QLA_SUCCESS) { 4263 ql_dbg(ql_dbg_mbx, vha, 0x1008, 4264 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4265 } else { 4266 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, 4267 "Done %s.\n", __func__); 4268 } 4269 4270 return rval; 4271 } 4272 /* 84XX Support **************************************************************/ 4273 4274 struct cs84xx_mgmt_cmd { 4275 union { 4276 struct verify_chip_entry_84xx req; 4277 struct verify_chip_rsp_84xx rsp; 4278 } p; 4279 }; 4280 4281 int 4282 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) 4283 { 4284 int rval, retry; 4285 struct cs84xx_mgmt_cmd *mn; 4286 dma_addr_t mn_dma; 4287 uint16_t options; 4288 unsigned long flags; 4289 struct qla_hw_data *ha = vha->hw; 4290 4291 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, 4292 "Entered %s.\n", __func__); 4293 4294 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 4295 if (mn == NULL) { 4296 return QLA_MEMORY_ALLOC_FAILED; 4297 } 4298 4299 /* Force Update? */ 4300 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; 4301 /* Diagnostic firmware? */ 4302 /* options |= MENLO_DIAG_FW; */ 4303 /* We update the firmware with only one data sequence. */ 4304 options |= VCO_END_OF_DATA; 4305 4306 do { 4307 retry = 0; 4308 memset(mn, 0, sizeof(*mn)); 4309 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; 4310 mn->p.req.entry_count = 1; 4311 mn->p.req.options = cpu_to_le16(options); 4312 4313 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, 4314 "Dump of Verify Request.\n"); 4315 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, 4316 mn, sizeof(*mn)); 4317 4318 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 4319 if (rval != QLA_SUCCESS) { 4320 ql_dbg(ql_dbg_mbx, vha, 0x10cb, 4321 "Failed to issue verify IOCB (%x).\n", rval); 4322 goto verify_done; 4323 } 4324 4325 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, 4326 "Dump of Verify Response.\n"); 4327 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, 4328 mn, sizeof(*mn)); 4329 4330 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 4331 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 4332 le16_to_cpu(mn->p.rsp.failure_code) : 0; 4333 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, 4334 "cs=%x fc=%x.\n", status[0], status[1]); 4335 4336 if (status[0] != CS_COMPLETE) { 4337 rval = QLA_FUNCTION_FAILED; 4338 if (!(options & VCO_DONT_UPDATE_FW)) { 4339 ql_dbg(ql_dbg_mbx, vha, 0x10cf, 4340 "Firmware update failed. Retrying " 4341 "without update firmware.\n"); 4342 options |= VCO_DONT_UPDATE_FW; 4343 options &= ~VCO_FORCE_UPDATE; 4344 retry = 1; 4345 } 4346 } else { 4347 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, 4348 "Firmware updated to %x.\n", 4349 le32_to_cpu(mn->p.rsp.fw_ver)); 4350 4351 /* NOTE: we only update OP firmware. */ 4352 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 4353 ha->cs84xx->op_fw_version = 4354 le32_to_cpu(mn->p.rsp.fw_ver); 4355 spin_unlock_irqrestore(&ha->cs84xx->access_lock, 4356 flags); 4357 } 4358 } while (retry); 4359 4360 verify_done: 4361 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 4362 4363 if (rval != QLA_SUCCESS) { 4364 ql_dbg(ql_dbg_mbx, vha, 0x10d1, 4365 "Failed=%x.\n", rval); 4366 } else { 4367 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, 4368 "Done %s.\n", __func__); 4369 } 4370 4371 return rval; 4372 } 4373 4374 int 4375 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) 4376 { 4377 int rval; 4378 unsigned long flags; 4379 mbx_cmd_t mc; 4380 mbx_cmd_t *mcp = &mc; 4381 struct qla_hw_data *ha = vha->hw; 4382 4383 if (!ha->flags.fw_started) 4384 return QLA_SUCCESS; 4385 4386 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 4387 "Entered %s.\n", __func__); 4388 4389 if (IS_SHADOW_REG_CAPABLE(ha)) 4390 req->options |= BIT_13; 4391 4392 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4393 mcp->mb[1] = req->options; 4394 mcp->mb[2] = MSW(LSD(req->dma)); 4395 mcp->mb[3] = LSW(LSD(req->dma)); 4396 mcp->mb[6] = MSW(MSD(req->dma)); 4397 mcp->mb[7] = LSW(MSD(req->dma)); 4398 mcp->mb[5] = req->length; 4399 if (req->rsp) 4400 mcp->mb[10] = req->rsp->id; 4401 mcp->mb[12] = req->qos; 4402 mcp->mb[11] = req->vp_idx; 4403 mcp->mb[13] = req->rid; 4404 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4405 mcp->mb[15] = 0; 4406 4407 mcp->mb[4] = req->id; 4408 /* que in ptr index */ 4409 mcp->mb[8] = 0; 4410 /* que out ptr index */ 4411 mcp->mb[9] = *req->out_ptr = 0; 4412 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| 4413 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4414 mcp->in_mb = MBX_0; 4415 mcp->flags = MBX_DMA_OUT; 4416 mcp->tov = MBX_TOV_SECONDS * 2; 4417 4418 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4419 IS_QLA28XX(ha)) 4420 mcp->in_mb |= MBX_1; 4421 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4422 mcp->out_mb |= MBX_15; 4423 /* debug q create issue in SR-IOV */ 4424 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4425 } 4426 4427 spin_lock_irqsave(&ha->hardware_lock, flags); 4428 if (!(req->options & BIT_0)) { 4429 wrt_reg_dword(req->req_q_in, 0); 4430 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4431 wrt_reg_dword(req->req_q_out, 0); 4432 } 4433 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4434 4435 rval = qla2x00_mailbox_command(vha, mcp); 4436 if (rval != QLA_SUCCESS) { 4437 ql_dbg(ql_dbg_mbx, vha, 0x10d4, 4438 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4439 } else { 4440 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, 4441 "Done %s.\n", __func__); 4442 } 4443 4444 return rval; 4445 } 4446 4447 int 4448 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 4449 { 4450 int rval; 4451 unsigned long flags; 4452 mbx_cmd_t mc; 4453 mbx_cmd_t *mcp = &mc; 4454 struct qla_hw_data *ha = vha->hw; 4455 4456 if (!ha->flags.fw_started) 4457 return QLA_SUCCESS; 4458 4459 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 4460 "Entered %s.\n", __func__); 4461 4462 if (IS_SHADOW_REG_CAPABLE(ha)) 4463 rsp->options |= BIT_13; 4464 4465 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4466 mcp->mb[1] = rsp->options; 4467 mcp->mb[2] = MSW(LSD(rsp->dma)); 4468 mcp->mb[3] = LSW(LSD(rsp->dma)); 4469 mcp->mb[6] = MSW(MSD(rsp->dma)); 4470 mcp->mb[7] = LSW(MSD(rsp->dma)); 4471 mcp->mb[5] = rsp->length; 4472 mcp->mb[14] = rsp->msix->entry; 4473 mcp->mb[13] = rsp->rid; 4474 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4475 mcp->mb[15] = 0; 4476 4477 mcp->mb[4] = rsp->id; 4478 /* que in ptr index */ 4479 mcp->mb[8] = *rsp->in_ptr = 0; 4480 /* que out ptr index */ 4481 mcp->mb[9] = 0; 4482 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 4483 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4484 mcp->in_mb = MBX_0; 4485 mcp->flags = MBX_DMA_OUT; 4486 mcp->tov = MBX_TOV_SECONDS * 2; 4487 4488 if (IS_QLA81XX(ha)) { 4489 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 4490 mcp->in_mb |= MBX_1; 4491 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4492 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; 4493 mcp->in_mb |= MBX_1; 4494 /* debug q create issue in SR-IOV */ 4495 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4496 } 4497 4498 spin_lock_irqsave(&ha->hardware_lock, flags); 4499 if (!(rsp->options & BIT_0)) { 4500 wrt_reg_dword(rsp->rsp_q_out, 0); 4501 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4502 wrt_reg_dword(rsp->rsp_q_in, 0); 4503 } 4504 4505 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4506 4507 rval = qla2x00_mailbox_command(vha, mcp); 4508 if (rval != QLA_SUCCESS) { 4509 ql_dbg(ql_dbg_mbx, vha, 0x10d7, 4510 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4511 } else { 4512 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, 4513 "Done %s.\n", __func__); 4514 } 4515 4516 return rval; 4517 } 4518 4519 int 4520 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) 4521 { 4522 int rval; 4523 mbx_cmd_t mc; 4524 mbx_cmd_t *mcp = &mc; 4525 4526 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, 4527 "Entered %s.\n", __func__); 4528 4529 mcp->mb[0] = MBC_IDC_ACK; 4530 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4531 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4532 mcp->in_mb = MBX_0; 4533 mcp->tov = MBX_TOV_SECONDS; 4534 mcp->flags = 0; 4535 rval = qla2x00_mailbox_command(vha, mcp); 4536 4537 if (rval != QLA_SUCCESS) { 4538 ql_dbg(ql_dbg_mbx, vha, 0x10da, 4539 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4540 } else { 4541 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, 4542 "Done %s.\n", __func__); 4543 } 4544 4545 return rval; 4546 } 4547 4548 int 4549 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) 4550 { 4551 int rval; 4552 mbx_cmd_t mc; 4553 mbx_cmd_t *mcp = &mc; 4554 4555 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, 4556 "Entered %s.\n", __func__); 4557 4558 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4559 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4560 return QLA_FUNCTION_FAILED; 4561 4562 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4563 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 4564 mcp->out_mb = MBX_1|MBX_0; 4565 mcp->in_mb = MBX_1|MBX_0; 4566 mcp->tov = MBX_TOV_SECONDS; 4567 mcp->flags = 0; 4568 rval = qla2x00_mailbox_command(vha, mcp); 4569 4570 if (rval != QLA_SUCCESS) { 4571 ql_dbg(ql_dbg_mbx, vha, 0x10dd, 4572 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4573 rval, mcp->mb[0], mcp->mb[1]); 4574 } else { 4575 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, 4576 "Done %s.\n", __func__); 4577 *sector_size = mcp->mb[1]; 4578 } 4579 4580 return rval; 4581 } 4582 4583 int 4584 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) 4585 { 4586 int rval; 4587 mbx_cmd_t mc; 4588 mbx_cmd_t *mcp = &mc; 4589 4590 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4591 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4592 return QLA_FUNCTION_FAILED; 4593 4594 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, 4595 "Entered %s.\n", __func__); 4596 4597 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4598 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 4599 FAC_OPT_CMD_WRITE_PROTECT; 4600 mcp->out_mb = MBX_1|MBX_0; 4601 mcp->in_mb = MBX_1|MBX_0; 4602 mcp->tov = MBX_TOV_SECONDS; 4603 mcp->flags = 0; 4604 rval = qla2x00_mailbox_command(vha, mcp); 4605 4606 if (rval != QLA_SUCCESS) { 4607 ql_dbg(ql_dbg_mbx, vha, 0x10e0, 4608 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4609 rval, mcp->mb[0], mcp->mb[1]); 4610 } else { 4611 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, 4612 "Done %s.\n", __func__); 4613 } 4614 4615 return rval; 4616 } 4617 4618 int 4619 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) 4620 { 4621 int rval; 4622 mbx_cmd_t mc; 4623 mbx_cmd_t *mcp = &mc; 4624 4625 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4626 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4627 return QLA_FUNCTION_FAILED; 4628 4629 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4630 "Entered %s.\n", __func__); 4631 4632 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4633 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 4634 mcp->mb[2] = LSW(start); 4635 mcp->mb[3] = MSW(start); 4636 mcp->mb[4] = LSW(finish); 4637 mcp->mb[5] = MSW(finish); 4638 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4639 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4640 mcp->tov = MBX_TOV_SECONDS; 4641 mcp->flags = 0; 4642 rval = qla2x00_mailbox_command(vha, mcp); 4643 4644 if (rval != QLA_SUCCESS) { 4645 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4646 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4647 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4648 } else { 4649 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4650 "Done %s.\n", __func__); 4651 } 4652 4653 return rval; 4654 } 4655 4656 int 4657 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock) 4658 { 4659 int rval = QLA_SUCCESS; 4660 mbx_cmd_t mc; 4661 mbx_cmd_t *mcp = &mc; 4662 struct qla_hw_data *ha = vha->hw; 4663 4664 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 4665 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4666 return rval; 4667 4668 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4669 "Entered %s.\n", __func__); 4670 4671 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4672 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE : 4673 FAC_OPT_CMD_UNLOCK_SEMAPHORE); 4674 mcp->out_mb = MBX_1|MBX_0; 4675 mcp->in_mb = MBX_1|MBX_0; 4676 mcp->tov = MBX_TOV_SECONDS; 4677 mcp->flags = 0; 4678 rval = qla2x00_mailbox_command(vha, mcp); 4679 4680 if (rval != QLA_SUCCESS) { 4681 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4682 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4683 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4684 } else { 4685 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4686 "Done %s.\n", __func__); 4687 } 4688 4689 return rval; 4690 } 4691 4692 int 4693 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) 4694 { 4695 int rval = 0; 4696 mbx_cmd_t mc; 4697 mbx_cmd_t *mcp = &mc; 4698 4699 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, 4700 "Entered %s.\n", __func__); 4701 4702 mcp->mb[0] = MBC_RESTART_MPI_FW; 4703 mcp->out_mb = MBX_0; 4704 mcp->in_mb = MBX_0|MBX_1; 4705 mcp->tov = MBX_TOV_SECONDS; 4706 mcp->flags = 0; 4707 rval = qla2x00_mailbox_command(vha, mcp); 4708 4709 if (rval != QLA_SUCCESS) { 4710 ql_dbg(ql_dbg_mbx, vha, 0x10e6, 4711 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4712 rval, mcp->mb[0], mcp->mb[1]); 4713 } else { 4714 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, 4715 "Done %s.\n", __func__); 4716 } 4717 4718 return rval; 4719 } 4720 4721 int 4722 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4723 { 4724 int rval; 4725 mbx_cmd_t mc; 4726 mbx_cmd_t *mcp = &mc; 4727 int i; 4728 int len; 4729 __le16 *str; 4730 struct qla_hw_data *ha = vha->hw; 4731 4732 if (!IS_P3P_TYPE(ha)) 4733 return QLA_FUNCTION_FAILED; 4734 4735 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, 4736 "Entered %s.\n", __func__); 4737 4738 str = (__force __le16 *)version; 4739 len = strlen(version); 4740 4741 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4742 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; 4743 mcp->out_mb = MBX_1|MBX_0; 4744 for (i = 4; i < 16 && len; i++, str++, len -= 2) { 4745 mcp->mb[i] = le16_to_cpup(str); 4746 mcp->out_mb |= 1<<i; 4747 } 4748 for (; i < 16; i++) { 4749 mcp->mb[i] = 0; 4750 mcp->out_mb |= 1<<i; 4751 } 4752 mcp->in_mb = MBX_1|MBX_0; 4753 mcp->tov = MBX_TOV_SECONDS; 4754 mcp->flags = 0; 4755 rval = qla2x00_mailbox_command(vha, mcp); 4756 4757 if (rval != QLA_SUCCESS) { 4758 ql_dbg(ql_dbg_mbx, vha, 0x117c, 4759 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4760 } else { 4761 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, 4762 "Done %s.\n", __func__); 4763 } 4764 4765 return rval; 4766 } 4767 4768 int 4769 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4770 { 4771 int rval; 4772 mbx_cmd_t mc; 4773 mbx_cmd_t *mcp = &mc; 4774 int len; 4775 uint16_t dwlen; 4776 uint8_t *str; 4777 dma_addr_t str_dma; 4778 struct qla_hw_data *ha = vha->hw; 4779 4780 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || 4781 IS_P3P_TYPE(ha)) 4782 return QLA_FUNCTION_FAILED; 4783 4784 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, 4785 "Entered %s.\n", __func__); 4786 4787 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); 4788 if (!str) { 4789 ql_log(ql_log_warn, vha, 0x117f, 4790 "Failed to allocate driver version param.\n"); 4791 return QLA_MEMORY_ALLOC_FAILED; 4792 } 4793 4794 memcpy(str, "\x7\x3\x11\x0", 4); 4795 dwlen = str[0]; 4796 len = dwlen * 4 - 4; 4797 memset(str + 4, 0, len); 4798 if (len > strlen(version)) 4799 len = strlen(version); 4800 memcpy(str + 4, version, len); 4801 4802 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4803 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; 4804 mcp->mb[2] = MSW(LSD(str_dma)); 4805 mcp->mb[3] = LSW(LSD(str_dma)); 4806 mcp->mb[6] = MSW(MSD(str_dma)); 4807 mcp->mb[7] = LSW(MSD(str_dma)); 4808 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4809 mcp->in_mb = MBX_1|MBX_0; 4810 mcp->tov = MBX_TOV_SECONDS; 4811 mcp->flags = 0; 4812 rval = qla2x00_mailbox_command(vha, mcp); 4813 4814 if (rval != QLA_SUCCESS) { 4815 ql_dbg(ql_dbg_mbx, vha, 0x1180, 4816 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4817 } else { 4818 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, 4819 "Done %s.\n", __func__); 4820 } 4821 4822 dma_pool_free(ha->s_dma_pool, str, str_dma); 4823 4824 return rval; 4825 } 4826 4827 int 4828 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, 4829 void *buf, uint16_t bufsiz) 4830 { 4831 int rval, i; 4832 mbx_cmd_t mc; 4833 mbx_cmd_t *mcp = &mc; 4834 uint32_t *bp; 4835 4836 if (!IS_FWI2_CAPABLE(vha->hw)) 4837 return QLA_FUNCTION_FAILED; 4838 4839 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4840 "Entered %s.\n", __func__); 4841 4842 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4843 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8; 4844 mcp->mb[2] = MSW(buf_dma); 4845 mcp->mb[3] = LSW(buf_dma); 4846 mcp->mb[6] = MSW(MSD(buf_dma)); 4847 mcp->mb[7] = LSW(MSD(buf_dma)); 4848 mcp->mb[8] = bufsiz/4; 4849 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4850 mcp->in_mb = MBX_1|MBX_0; 4851 mcp->tov = MBX_TOV_SECONDS; 4852 mcp->flags = 0; 4853 rval = qla2x00_mailbox_command(vha, mcp); 4854 4855 if (rval != QLA_SUCCESS) { 4856 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4857 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4858 } else { 4859 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4860 "Done %s.\n", __func__); 4861 bp = (uint32_t *) buf; 4862 for (i = 0; i < (bufsiz-4)/4; i++, bp++) 4863 *bp = le32_to_cpu((__force __le32)*bp); 4864 } 4865 4866 return rval; 4867 } 4868 4869 int 4870 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) 4871 { 4872 int rval; 4873 mbx_cmd_t mc; 4874 mbx_cmd_t *mcp = &mc; 4875 uint8_t *els_cmd_map; 4876 dma_addr_t els_cmd_map_dma; 4877 uint cmd_opcode = ELS_COMMAND_RDP; 4878 uint index = cmd_opcode / 8; 4879 uint bit = cmd_opcode % 8; 4880 struct qla_hw_data *ha = vha->hw; 4881 4882 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha)) 4883 return QLA_SUCCESS; 4884 4885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197, 4886 "Entered %s.\n", __func__); 4887 4888 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, 4889 &els_cmd_map_dma, GFP_KERNEL); 4890 if (!els_cmd_map) { 4891 ql_log(ql_log_warn, vha, 0x7101, 4892 "Failed to allocate RDP els command param.\n"); 4893 return QLA_MEMORY_ALLOC_FAILED; 4894 } 4895 4896 els_cmd_map[index] |= 1 << bit; 4897 4898 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4899 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8; 4900 mcp->mb[2] = MSW(LSD(els_cmd_map_dma)); 4901 mcp->mb[3] = LSW(LSD(els_cmd_map_dma)); 4902 mcp->mb[6] = MSW(MSD(els_cmd_map_dma)); 4903 mcp->mb[7] = LSW(MSD(els_cmd_map_dma)); 4904 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4905 mcp->in_mb = MBX_1|MBX_0; 4906 mcp->tov = MBX_TOV_SECONDS; 4907 mcp->flags = MBX_DMA_OUT; 4908 mcp->buf_size = ELS_CMD_MAP_SIZE; 4909 rval = qla2x00_mailbox_command(vha, mcp); 4910 4911 if (rval != QLA_SUCCESS) { 4912 ql_dbg(ql_dbg_mbx, vha, 0x118d, 4913 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]); 4914 } else { 4915 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 4916 "Done %s.\n", __func__); 4917 } 4918 4919 dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE, 4920 els_cmd_map, els_cmd_map_dma); 4921 4922 return rval; 4923 } 4924 4925 int 4926 qla24xx_get_buffer_credits(scsi_qla_host_t *vha, struct buffer_credit_24xx *bbc, 4927 dma_addr_t bbc_dma) 4928 { 4929 mbx_cmd_t mc; 4930 mbx_cmd_t *mcp = &mc; 4931 int rval; 4932 4933 if (!IS_FWI2_CAPABLE(vha->hw)) 4934 return QLA_FUNCTION_FAILED; 4935 4936 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118e, 4937 "Entered %s.\n", __func__); 4938 4939 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4940 mcp->mb[1] = RNID_BUFFER_CREDITS << 8; 4941 mcp->mb[2] = MSW(LSD(bbc_dma)); 4942 mcp->mb[3] = LSW(LSD(bbc_dma)); 4943 mcp->mb[6] = MSW(MSD(bbc_dma)); 4944 mcp->mb[7] = LSW(MSD(bbc_dma)); 4945 mcp->mb[8] = sizeof(*bbc) / sizeof(*bbc->parameter); 4946 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4947 mcp->in_mb = MBX_1|MBX_0; 4948 mcp->buf_size = sizeof(*bbc); 4949 mcp->flags = MBX_DMA_IN; 4950 mcp->tov = MBX_TOV_SECONDS; 4951 rval = qla2x00_mailbox_command(vha, mcp); 4952 4953 if (rval != QLA_SUCCESS) { 4954 ql_dbg(ql_dbg_mbx, vha, 0x118f, 4955 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4956 } else { 4957 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1190, 4958 "Done %s.\n", __func__); 4959 } 4960 4961 return rval; 4962 } 4963 4964 static int 4965 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 4966 { 4967 int rval; 4968 mbx_cmd_t mc; 4969 mbx_cmd_t *mcp = &mc; 4970 4971 if (!IS_FWI2_CAPABLE(vha->hw)) 4972 return QLA_FUNCTION_FAILED; 4973 4974 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4975 "Entered %s.\n", __func__); 4976 4977 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4978 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; 4979 mcp->out_mb = MBX_1|MBX_0; 4980 mcp->in_mb = MBX_1|MBX_0; 4981 mcp->tov = MBX_TOV_SECONDS; 4982 mcp->flags = 0; 4983 rval = qla2x00_mailbox_command(vha, mcp); 4984 *temp = mcp->mb[1]; 4985 4986 if (rval != QLA_SUCCESS) { 4987 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4988 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4989 } else { 4990 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4991 "Done %s.\n", __func__); 4992 } 4993 4994 return rval; 4995 } 4996 4997 int 4998 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 4999 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 5000 { 5001 int rval; 5002 mbx_cmd_t mc; 5003 mbx_cmd_t *mcp = &mc; 5004 struct qla_hw_data *ha = vha->hw; 5005 5006 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 5007 "Entered %s.\n", __func__); 5008 5009 if (!IS_FWI2_CAPABLE(ha)) 5010 return QLA_FUNCTION_FAILED; 5011 5012 if (len == 1) 5013 opt |= BIT_0; 5014 5015 mcp->mb[0] = MBC_READ_SFP; 5016 mcp->mb[1] = dev; 5017 mcp->mb[2] = MSW(LSD(sfp_dma)); 5018 mcp->mb[3] = LSW(LSD(sfp_dma)); 5019 mcp->mb[6] = MSW(MSD(sfp_dma)); 5020 mcp->mb[7] = LSW(MSD(sfp_dma)); 5021 mcp->mb[8] = len; 5022 mcp->mb[9] = off; 5023 mcp->mb[10] = opt; 5024 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5025 mcp->in_mb = MBX_1|MBX_0; 5026 mcp->tov = MBX_TOV_SECONDS; 5027 mcp->flags = 0; 5028 rval = qla2x00_mailbox_command(vha, mcp); 5029 5030 if (opt & BIT_0) 5031 *sfp = mcp->mb[1]; 5032 5033 if (rval != QLA_SUCCESS) { 5034 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 5035 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5036 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) { 5037 /* sfp is not there */ 5038 rval = QLA_INTERFACE_ERROR; 5039 } 5040 } else { 5041 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 5042 "Done %s.\n", __func__); 5043 } 5044 5045 return rval; 5046 } 5047 5048 int 5049 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 5050 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 5051 { 5052 int rval; 5053 mbx_cmd_t mc; 5054 mbx_cmd_t *mcp = &mc; 5055 struct qla_hw_data *ha = vha->hw; 5056 5057 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, 5058 "Entered %s.\n", __func__); 5059 5060 if (!IS_FWI2_CAPABLE(ha)) 5061 return QLA_FUNCTION_FAILED; 5062 5063 if (len == 1) 5064 opt |= BIT_0; 5065 5066 if (opt & BIT_0) 5067 len = *sfp; 5068 5069 mcp->mb[0] = MBC_WRITE_SFP; 5070 mcp->mb[1] = dev; 5071 mcp->mb[2] = MSW(LSD(sfp_dma)); 5072 mcp->mb[3] = LSW(LSD(sfp_dma)); 5073 mcp->mb[6] = MSW(MSD(sfp_dma)); 5074 mcp->mb[7] = LSW(MSD(sfp_dma)); 5075 mcp->mb[8] = len; 5076 mcp->mb[9] = off; 5077 mcp->mb[10] = opt; 5078 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5079 mcp->in_mb = MBX_1|MBX_0; 5080 mcp->tov = MBX_TOV_SECONDS; 5081 mcp->flags = 0; 5082 rval = qla2x00_mailbox_command(vha, mcp); 5083 5084 if (rval != QLA_SUCCESS) { 5085 ql_dbg(ql_dbg_mbx, vha, 0x10ec, 5086 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5087 } else { 5088 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, 5089 "Done %s.\n", __func__); 5090 } 5091 5092 return rval; 5093 } 5094 5095 int 5096 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, 5097 uint16_t size_in_bytes, uint16_t *actual_size) 5098 { 5099 int rval; 5100 mbx_cmd_t mc; 5101 mbx_cmd_t *mcp = &mc; 5102 5103 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, 5104 "Entered %s.\n", __func__); 5105 5106 if (!IS_CNA_CAPABLE(vha->hw)) 5107 return QLA_FUNCTION_FAILED; 5108 5109 mcp->mb[0] = MBC_GET_XGMAC_STATS; 5110 mcp->mb[2] = MSW(stats_dma); 5111 mcp->mb[3] = LSW(stats_dma); 5112 mcp->mb[6] = MSW(MSD(stats_dma)); 5113 mcp->mb[7] = LSW(MSD(stats_dma)); 5114 mcp->mb[8] = size_in_bytes >> 2; 5115 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 5116 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5117 mcp->tov = MBX_TOV_SECONDS; 5118 mcp->flags = 0; 5119 rval = qla2x00_mailbox_command(vha, mcp); 5120 5121 if (rval != QLA_SUCCESS) { 5122 ql_dbg(ql_dbg_mbx, vha, 0x10ef, 5123 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5124 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5125 } else { 5126 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, 5127 "Done %s.\n", __func__); 5128 5129 5130 *actual_size = mcp->mb[2] << 2; 5131 } 5132 5133 return rval; 5134 } 5135 5136 int 5137 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, 5138 uint16_t size) 5139 { 5140 int rval; 5141 mbx_cmd_t mc; 5142 mbx_cmd_t *mcp = &mc; 5143 5144 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, 5145 "Entered %s.\n", __func__); 5146 5147 if (!IS_CNA_CAPABLE(vha->hw)) 5148 return QLA_FUNCTION_FAILED; 5149 5150 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 5151 mcp->mb[1] = 0; 5152 mcp->mb[2] = MSW(tlv_dma); 5153 mcp->mb[3] = LSW(tlv_dma); 5154 mcp->mb[6] = MSW(MSD(tlv_dma)); 5155 mcp->mb[7] = LSW(MSD(tlv_dma)); 5156 mcp->mb[8] = size; 5157 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5158 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5159 mcp->tov = MBX_TOV_SECONDS; 5160 mcp->flags = 0; 5161 rval = qla2x00_mailbox_command(vha, mcp); 5162 5163 if (rval != QLA_SUCCESS) { 5164 ql_dbg(ql_dbg_mbx, vha, 0x10f2, 5165 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5166 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5167 } else { 5168 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, 5169 "Done %s.\n", __func__); 5170 } 5171 5172 return rval; 5173 } 5174 5175 int 5176 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) 5177 { 5178 int rval; 5179 mbx_cmd_t mc; 5180 mbx_cmd_t *mcp = &mc; 5181 5182 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, 5183 "Entered %s.\n", __func__); 5184 5185 if (!IS_FWI2_CAPABLE(vha->hw)) 5186 return QLA_FUNCTION_FAILED; 5187 5188 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 5189 mcp->mb[1] = LSW(risc_addr); 5190 mcp->mb[8] = MSW(risc_addr); 5191 mcp->out_mb = MBX_8|MBX_1|MBX_0; 5192 mcp->in_mb = MBX_3|MBX_2|MBX_0; 5193 mcp->tov = 30; 5194 mcp->flags = 0; 5195 rval = qla2x00_mailbox_command(vha, mcp); 5196 if (rval != QLA_SUCCESS) { 5197 ql_dbg(ql_dbg_mbx, vha, 0x10f5, 5198 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5199 } else { 5200 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, 5201 "Done %s.\n", __func__); 5202 *data = mcp->mb[3] << 16 | mcp->mb[2]; 5203 } 5204 5205 return rval; 5206 } 5207 5208 int 5209 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5210 uint16_t *mresp) 5211 { 5212 int rval; 5213 mbx_cmd_t mc; 5214 mbx_cmd_t *mcp = &mc; 5215 5216 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, 5217 "Entered %s.\n", __func__); 5218 5219 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5220 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 5221 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing 5222 5223 /* transfer count */ 5224 mcp->mb[10] = LSW(mreq->transfer_size); 5225 mcp->mb[11] = MSW(mreq->transfer_size); 5226 5227 /* send data address */ 5228 mcp->mb[14] = LSW(mreq->send_dma); 5229 mcp->mb[15] = MSW(mreq->send_dma); 5230 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5231 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5232 5233 /* receive data address */ 5234 mcp->mb[16] = LSW(mreq->rcv_dma); 5235 mcp->mb[17] = MSW(mreq->rcv_dma); 5236 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5237 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5238 5239 /* Iteration count */ 5240 mcp->mb[18] = LSW(mreq->iteration_count); 5241 mcp->mb[19] = MSW(mreq->iteration_count); 5242 5243 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| 5244 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5245 if (IS_CNA_CAPABLE(vha->hw)) 5246 mcp->out_mb |= MBX_2; 5247 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; 5248 5249 mcp->buf_size = mreq->transfer_size; 5250 mcp->tov = MBX_TOV_SECONDS; 5251 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5252 5253 rval = qla2x00_mailbox_command(vha, mcp); 5254 5255 if (rval != QLA_SUCCESS) { 5256 ql_dbg(ql_dbg_mbx, vha, 0x10f8, 5257 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " 5258 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], 5259 mcp->mb[3], mcp->mb[18], mcp->mb[19]); 5260 } else { 5261 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, 5262 "Done %s.\n", __func__); 5263 } 5264 5265 /* Copy mailbox information */ 5266 memcpy( mresp, mcp->mb, 64); 5267 return rval; 5268 } 5269 5270 int 5271 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5272 uint16_t *mresp) 5273 { 5274 int rval; 5275 mbx_cmd_t mc; 5276 mbx_cmd_t *mcp = &mc; 5277 struct qla_hw_data *ha = vha->hw; 5278 5279 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, 5280 "Entered %s.\n", __func__); 5281 5282 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5283 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 5284 /* BIT_6 specifies 64bit address */ 5285 mcp->mb[1] = mreq->options | BIT_15 | BIT_6; 5286 if (IS_CNA_CAPABLE(ha)) { 5287 mcp->mb[2] = vha->fcoe_fcf_idx; 5288 } 5289 mcp->mb[16] = LSW(mreq->rcv_dma); 5290 mcp->mb[17] = MSW(mreq->rcv_dma); 5291 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5292 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5293 5294 mcp->mb[10] = LSW(mreq->transfer_size); 5295 5296 mcp->mb[14] = LSW(mreq->send_dma); 5297 mcp->mb[15] = MSW(mreq->send_dma); 5298 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5299 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5300 5301 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| 5302 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5303 if (IS_CNA_CAPABLE(ha)) 5304 mcp->out_mb |= MBX_2; 5305 5306 mcp->in_mb = MBX_0; 5307 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 5308 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5309 mcp->in_mb |= MBX_1; 5310 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 5311 IS_QLA28XX(ha)) 5312 mcp->in_mb |= MBX_3; 5313 5314 mcp->tov = MBX_TOV_SECONDS; 5315 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5316 mcp->buf_size = mreq->transfer_size; 5317 5318 rval = qla2x00_mailbox_command(vha, mcp); 5319 5320 if (rval != QLA_SUCCESS) { 5321 ql_dbg(ql_dbg_mbx, vha, 0x10fb, 5322 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5323 rval, mcp->mb[0], mcp->mb[1]); 5324 } else { 5325 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, 5326 "Done %s.\n", __func__); 5327 } 5328 5329 /* Copy mailbox information */ 5330 memcpy(mresp, mcp->mb, 64); 5331 return rval; 5332 } 5333 5334 int 5335 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) 5336 { 5337 int rval; 5338 mbx_cmd_t mc; 5339 mbx_cmd_t *mcp = &mc; 5340 5341 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, 5342 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); 5343 5344 mcp->mb[0] = MBC_ISP84XX_RESET; 5345 mcp->mb[1] = enable_diagnostic; 5346 mcp->out_mb = MBX_1|MBX_0; 5347 mcp->in_mb = MBX_1|MBX_0; 5348 mcp->tov = MBX_TOV_SECONDS; 5349 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5350 rval = qla2x00_mailbox_command(vha, mcp); 5351 5352 if (rval != QLA_SUCCESS) 5353 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); 5354 else 5355 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, 5356 "Done %s.\n", __func__); 5357 5358 return rval; 5359 } 5360 5361 int 5362 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) 5363 { 5364 int rval; 5365 mbx_cmd_t mc; 5366 mbx_cmd_t *mcp = &mc; 5367 5368 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, 5369 "Entered %s.\n", __func__); 5370 5371 if (!IS_FWI2_CAPABLE(vha->hw)) 5372 return QLA_FUNCTION_FAILED; 5373 5374 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 5375 mcp->mb[1] = LSW(risc_addr); 5376 mcp->mb[2] = LSW(data); 5377 mcp->mb[3] = MSW(data); 5378 mcp->mb[8] = MSW(risc_addr); 5379 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; 5380 mcp->in_mb = MBX_1|MBX_0; 5381 mcp->tov = 30; 5382 mcp->flags = 0; 5383 rval = qla2x00_mailbox_command(vha, mcp); 5384 if (rval != QLA_SUCCESS) { 5385 ql_dbg(ql_dbg_mbx, vha, 0x1101, 5386 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5387 rval, mcp->mb[0], mcp->mb[1]); 5388 } else { 5389 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, 5390 "Done %s.\n", __func__); 5391 } 5392 5393 return rval; 5394 } 5395 5396 int 5397 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) 5398 { 5399 int rval; 5400 uint32_t stat, timer; 5401 uint16_t mb0 = 0; 5402 struct qla_hw_data *ha = vha->hw; 5403 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 5404 5405 rval = QLA_SUCCESS; 5406 5407 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, 5408 "Entered %s.\n", __func__); 5409 5410 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 5411 5412 /* Write the MBC data to the registers */ 5413 wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER); 5414 wrt_reg_word(®->mailbox1, mb[0]); 5415 wrt_reg_word(®->mailbox2, mb[1]); 5416 wrt_reg_word(®->mailbox3, mb[2]); 5417 wrt_reg_word(®->mailbox4, mb[3]); 5418 5419 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); 5420 5421 /* Poll for MBC interrupt */ 5422 for (timer = 6000000; timer; timer--) { 5423 /* Check for pending interrupts. */ 5424 stat = rd_reg_dword(®->host_status); 5425 if (stat & HSRX_RISC_INT) { 5426 stat &= 0xff; 5427 5428 if (stat == 0x1 || stat == 0x2 || 5429 stat == 0x10 || stat == 0x11) { 5430 set_bit(MBX_INTERRUPT, 5431 &ha->mbx_cmd_flags); 5432 mb0 = rd_reg_word(®->mailbox0); 5433 wrt_reg_dword(®->hccr, 5434 HCCRX_CLR_RISC_INT); 5435 rd_reg_dword(®->hccr); 5436 break; 5437 } 5438 } 5439 udelay(5); 5440 } 5441 5442 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) 5443 rval = mb0 & MBS_MASK; 5444 else 5445 rval = QLA_FUNCTION_FAILED; 5446 5447 if (rval != QLA_SUCCESS) { 5448 ql_dbg(ql_dbg_mbx, vha, 0x1104, 5449 "Failed=%x mb[0]=%x.\n", rval, mb[0]); 5450 } else { 5451 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, 5452 "Done %s.\n", __func__); 5453 } 5454 5455 return rval; 5456 } 5457 5458 /* Set the specified data rate */ 5459 int 5460 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode) 5461 { 5462 int rval; 5463 mbx_cmd_t mc; 5464 mbx_cmd_t *mcp = &mc; 5465 struct qla_hw_data *ha = vha->hw; 5466 uint16_t val; 5467 5468 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5469 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate, 5470 mode); 5471 5472 if (!IS_FWI2_CAPABLE(ha)) 5473 return QLA_FUNCTION_FAILED; 5474 5475 memset(mcp, 0, sizeof(*mcp)); 5476 switch (ha->set_data_rate) { 5477 case PORT_SPEED_AUTO: 5478 case PORT_SPEED_4GB: 5479 case PORT_SPEED_8GB: 5480 case PORT_SPEED_16GB: 5481 case PORT_SPEED_32GB: 5482 val = ha->set_data_rate; 5483 break; 5484 default: 5485 ql_log(ql_log_warn, vha, 0x1199, 5486 "Unrecognized speed setting:%d. Setting Autoneg\n", 5487 ha->set_data_rate); 5488 val = ha->set_data_rate = PORT_SPEED_AUTO; 5489 break; 5490 } 5491 5492 mcp->mb[0] = MBC_DATA_RATE; 5493 mcp->mb[1] = mode; 5494 mcp->mb[2] = val; 5495 5496 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5497 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5498 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5499 mcp->in_mb |= MBX_4|MBX_3; 5500 mcp->tov = MBX_TOV_SECONDS; 5501 mcp->flags = 0; 5502 rval = qla2x00_mailbox_command(vha, mcp); 5503 if (rval != QLA_SUCCESS) { 5504 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5505 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5506 } else { 5507 if (mcp->mb[1] != 0x7) 5508 ql_dbg(ql_dbg_mbx, vha, 0x1179, 5509 "Speed set:0x%x\n", mcp->mb[1]); 5510 5511 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5512 "Done %s.\n", __func__); 5513 } 5514 5515 return rval; 5516 } 5517 5518 int 5519 qla2x00_get_data_rate(scsi_qla_host_t *vha) 5520 { 5521 int rval; 5522 mbx_cmd_t mc; 5523 mbx_cmd_t *mcp = &mc; 5524 struct qla_hw_data *ha = vha->hw; 5525 5526 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5527 "Entered %s.\n", __func__); 5528 5529 if (!IS_FWI2_CAPABLE(ha)) 5530 return QLA_FUNCTION_FAILED; 5531 5532 mcp->mb[0] = MBC_DATA_RATE; 5533 mcp->mb[1] = QLA_GET_DATA_RATE; 5534 mcp->out_mb = MBX_1|MBX_0; 5535 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5536 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5537 mcp->in_mb |= MBX_3; 5538 mcp->tov = MBX_TOV_SECONDS; 5539 mcp->flags = 0; 5540 rval = qla2x00_mailbox_command(vha, mcp); 5541 if (rval != QLA_SUCCESS) { 5542 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5543 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5544 } else { 5545 if (mcp->mb[1] != 0x7) 5546 ha->link_data_rate = mcp->mb[1]; 5547 5548 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 5549 if (mcp->mb[4] & BIT_0) 5550 ql_log(ql_log_info, vha, 0x11a2, 5551 "FEC=enabled (data rate).\n"); 5552 } 5553 5554 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5555 "Done %s.\n", __func__); 5556 if (mcp->mb[1] != 0x7) 5557 ha->link_data_rate = mcp->mb[1]; 5558 } 5559 5560 return rval; 5561 } 5562 5563 int 5564 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5565 { 5566 int rval; 5567 mbx_cmd_t mc; 5568 mbx_cmd_t *mcp = &mc; 5569 struct qla_hw_data *ha = vha->hw; 5570 5571 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, 5572 "Entered %s.\n", __func__); 5573 5574 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && 5575 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 5576 return QLA_FUNCTION_FAILED; 5577 mcp->mb[0] = MBC_GET_PORT_CONFIG; 5578 mcp->out_mb = MBX_0; 5579 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5580 mcp->tov = MBX_TOV_SECONDS; 5581 mcp->flags = 0; 5582 5583 rval = qla2x00_mailbox_command(vha, mcp); 5584 5585 if (rval != QLA_SUCCESS) { 5586 ql_dbg(ql_dbg_mbx, vha, 0x110a, 5587 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5588 } else { 5589 /* Copy all bits to preserve original value */ 5590 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 5591 5592 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, 5593 "Done %s.\n", __func__); 5594 } 5595 return rval; 5596 } 5597 5598 int 5599 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5600 { 5601 int rval; 5602 mbx_cmd_t mc; 5603 mbx_cmd_t *mcp = &mc; 5604 5605 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, 5606 "Entered %s.\n", __func__); 5607 5608 mcp->mb[0] = MBC_SET_PORT_CONFIG; 5609 /* Copy all bits to preserve original setting */ 5610 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); 5611 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5612 mcp->in_mb = MBX_0; 5613 mcp->tov = MBX_TOV_SECONDS; 5614 mcp->flags = 0; 5615 rval = qla2x00_mailbox_command(vha, mcp); 5616 5617 if (rval != QLA_SUCCESS) { 5618 ql_dbg(ql_dbg_mbx, vha, 0x110d, 5619 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5620 } else 5621 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, 5622 "Done %s.\n", __func__); 5623 5624 return rval; 5625 } 5626 5627 5628 int 5629 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, 5630 uint16_t *mb) 5631 { 5632 int rval; 5633 mbx_cmd_t mc; 5634 mbx_cmd_t *mcp = &mc; 5635 struct qla_hw_data *ha = vha->hw; 5636 5637 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, 5638 "Entered %s.\n", __func__); 5639 5640 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 5641 return QLA_FUNCTION_FAILED; 5642 5643 mcp->mb[0] = MBC_PORT_PARAMS; 5644 mcp->mb[1] = loop_id; 5645 if (ha->flags.fcp_prio_enabled) 5646 mcp->mb[2] = BIT_1; 5647 else 5648 mcp->mb[2] = BIT_2; 5649 mcp->mb[4] = priority & 0xf; 5650 mcp->mb[9] = vha->vp_idx; 5651 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5652 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 5653 mcp->tov = 30; 5654 mcp->flags = 0; 5655 rval = qla2x00_mailbox_command(vha, mcp); 5656 if (mb != NULL) { 5657 mb[0] = mcp->mb[0]; 5658 mb[1] = mcp->mb[1]; 5659 mb[3] = mcp->mb[3]; 5660 mb[4] = mcp->mb[4]; 5661 } 5662 5663 if (rval != QLA_SUCCESS) { 5664 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); 5665 } else { 5666 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, 5667 "Done %s.\n", __func__); 5668 } 5669 5670 return rval; 5671 } 5672 5673 int 5674 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) 5675 { 5676 int rval = QLA_FUNCTION_FAILED; 5677 struct qla_hw_data *ha = vha->hw; 5678 uint8_t byte; 5679 5680 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { 5681 ql_dbg(ql_dbg_mbx, vha, 0x1150, 5682 "Thermal not supported by this card.\n"); 5683 return rval; 5684 } 5685 5686 if (IS_QLA25XX(ha)) { 5687 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 5688 ha->pdev->subsystem_device == 0x0175) { 5689 rval = qla2x00_read_sfp(vha, 0, &byte, 5690 0x98, 0x1, 1, BIT_13|BIT_0); 5691 *temp = byte; 5692 return rval; 5693 } 5694 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 5695 ha->pdev->subsystem_device == 0x338e) { 5696 rval = qla2x00_read_sfp(vha, 0, &byte, 5697 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); 5698 *temp = byte; 5699 return rval; 5700 } 5701 ql_dbg(ql_dbg_mbx, vha, 0x10c9, 5702 "Thermal not supported by this card.\n"); 5703 return rval; 5704 } 5705 5706 if (IS_QLA82XX(ha)) { 5707 *temp = qla82xx_read_temperature(vha); 5708 rval = QLA_SUCCESS; 5709 return rval; 5710 } else if (IS_QLA8044(ha)) { 5711 *temp = qla8044_read_temperature(vha); 5712 rval = QLA_SUCCESS; 5713 return rval; 5714 } 5715 5716 rval = qla2x00_read_asic_temperature(vha, temp); 5717 return rval; 5718 } 5719 5720 int 5721 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) 5722 { 5723 int rval; 5724 struct qla_hw_data *ha = vha->hw; 5725 mbx_cmd_t mc; 5726 mbx_cmd_t *mcp = &mc; 5727 5728 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, 5729 "Entered %s.\n", __func__); 5730 5731 if (!IS_FWI2_CAPABLE(ha)) 5732 return QLA_FUNCTION_FAILED; 5733 5734 memset(mcp, 0, sizeof(mbx_cmd_t)); 5735 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5736 mcp->mb[1] = 1; 5737 5738 mcp->out_mb = MBX_1|MBX_0; 5739 mcp->in_mb = MBX_0; 5740 mcp->tov = 30; 5741 mcp->flags = 0; 5742 5743 rval = qla2x00_mailbox_command(vha, mcp); 5744 if (rval != QLA_SUCCESS) { 5745 ql_dbg(ql_dbg_mbx, vha, 0x1016, 5746 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5747 } else { 5748 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, 5749 "Done %s.\n", __func__); 5750 } 5751 5752 return rval; 5753 } 5754 5755 int 5756 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) 5757 { 5758 int rval; 5759 struct qla_hw_data *ha = vha->hw; 5760 mbx_cmd_t mc; 5761 mbx_cmd_t *mcp = &mc; 5762 5763 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, 5764 "Entered %s.\n", __func__); 5765 5766 if (!IS_P3P_TYPE(ha)) 5767 return QLA_FUNCTION_FAILED; 5768 5769 memset(mcp, 0, sizeof(mbx_cmd_t)); 5770 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5771 mcp->mb[1] = 0; 5772 5773 mcp->out_mb = MBX_1|MBX_0; 5774 mcp->in_mb = MBX_0; 5775 mcp->tov = 30; 5776 mcp->flags = 0; 5777 5778 rval = qla2x00_mailbox_command(vha, mcp); 5779 if (rval != QLA_SUCCESS) { 5780 ql_dbg(ql_dbg_mbx, vha, 0x100c, 5781 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5782 } else { 5783 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, 5784 "Done %s.\n", __func__); 5785 } 5786 5787 return rval; 5788 } 5789 5790 int 5791 qla82xx_md_get_template_size(scsi_qla_host_t *vha) 5792 { 5793 struct qla_hw_data *ha = vha->hw; 5794 mbx_cmd_t mc; 5795 mbx_cmd_t *mcp = &mc; 5796 int rval = QLA_FUNCTION_FAILED; 5797 5798 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, 5799 "Entered %s.\n", __func__); 5800 5801 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5802 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5803 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5804 mcp->mb[2] = LSW(RQST_TMPLT_SIZE); 5805 mcp->mb[3] = MSW(RQST_TMPLT_SIZE); 5806 5807 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5808 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 5809 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5810 5811 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5812 mcp->tov = MBX_TOV_SECONDS; 5813 rval = qla2x00_mailbox_command(vha, mcp); 5814 5815 /* Always copy back return mailbox values. */ 5816 if (rval != QLA_SUCCESS) { 5817 ql_dbg(ql_dbg_mbx, vha, 0x1120, 5818 "mailbox command FAILED=0x%x, subcode=%x.\n", 5819 (mcp->mb[1] << 16) | mcp->mb[0], 5820 (mcp->mb[3] << 16) | mcp->mb[2]); 5821 } else { 5822 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, 5823 "Done %s.\n", __func__); 5824 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); 5825 if (!ha->md_template_size) { 5826 ql_dbg(ql_dbg_mbx, vha, 0x1122, 5827 "Null template size obtained.\n"); 5828 rval = QLA_FUNCTION_FAILED; 5829 } 5830 } 5831 return rval; 5832 } 5833 5834 int 5835 qla82xx_md_get_template(scsi_qla_host_t *vha) 5836 { 5837 struct qla_hw_data *ha = vha->hw; 5838 mbx_cmd_t mc; 5839 mbx_cmd_t *mcp = &mc; 5840 int rval = QLA_FUNCTION_FAILED; 5841 5842 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, 5843 "Entered %s.\n", __func__); 5844 5845 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5846 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5847 if (!ha->md_tmplt_hdr) { 5848 ql_log(ql_log_warn, vha, 0x1124, 5849 "Unable to allocate memory for Minidump template.\n"); 5850 return rval; 5851 } 5852 5853 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5854 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5855 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5856 mcp->mb[2] = LSW(RQST_TMPLT); 5857 mcp->mb[3] = MSW(RQST_TMPLT); 5858 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); 5859 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); 5860 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); 5861 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); 5862 mcp->mb[8] = LSW(ha->md_template_size); 5863 mcp->mb[9] = MSW(ha->md_template_size); 5864 5865 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5866 mcp->tov = MBX_TOV_SECONDS; 5867 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5868 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5869 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5870 rval = qla2x00_mailbox_command(vha, mcp); 5871 5872 if (rval != QLA_SUCCESS) { 5873 ql_dbg(ql_dbg_mbx, vha, 0x1125, 5874 "mailbox command FAILED=0x%x, subcode=%x.\n", 5875 ((mcp->mb[1] << 16) | mcp->mb[0]), 5876 ((mcp->mb[3] << 16) | mcp->mb[2])); 5877 } else 5878 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, 5879 "Done %s.\n", __func__); 5880 return rval; 5881 } 5882 5883 int 5884 qla8044_md_get_template(scsi_qla_host_t *vha) 5885 { 5886 struct qla_hw_data *ha = vha->hw; 5887 mbx_cmd_t mc; 5888 mbx_cmd_t *mcp = &mc; 5889 int rval = QLA_FUNCTION_FAILED; 5890 int offset = 0, size = MINIDUMP_SIZE_36K; 5891 5892 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, 5893 "Entered %s.\n", __func__); 5894 5895 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5896 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5897 if (!ha->md_tmplt_hdr) { 5898 ql_log(ql_log_warn, vha, 0xb11b, 5899 "Unable to allocate memory for Minidump template.\n"); 5900 return rval; 5901 } 5902 5903 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5904 while (offset < ha->md_template_size) { 5905 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5906 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5907 mcp->mb[2] = LSW(RQST_TMPLT); 5908 mcp->mb[3] = MSW(RQST_TMPLT); 5909 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5910 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5911 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5912 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5913 mcp->mb[8] = LSW(size); 5914 mcp->mb[9] = MSW(size); 5915 mcp->mb[10] = offset & 0x0000FFFF; 5916 mcp->mb[11] = offset & 0xFFFF0000; 5917 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5918 mcp->tov = MBX_TOV_SECONDS; 5919 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5920 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5921 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5922 rval = qla2x00_mailbox_command(vha, mcp); 5923 5924 if (rval != QLA_SUCCESS) { 5925 ql_dbg(ql_dbg_mbx, vha, 0xb11c, 5926 "mailbox command FAILED=0x%x, subcode=%x.\n", 5927 ((mcp->mb[1] << 16) | mcp->mb[0]), 5928 ((mcp->mb[3] << 16) | mcp->mb[2])); 5929 return rval; 5930 } else 5931 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, 5932 "Done %s.\n", __func__); 5933 offset = offset + size; 5934 } 5935 return rval; 5936 } 5937 5938 int 5939 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5940 { 5941 int rval; 5942 struct qla_hw_data *ha = vha->hw; 5943 mbx_cmd_t mc; 5944 mbx_cmd_t *mcp = &mc; 5945 5946 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5947 return QLA_FUNCTION_FAILED; 5948 5949 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, 5950 "Entered %s.\n", __func__); 5951 5952 memset(mcp, 0, sizeof(mbx_cmd_t)); 5953 mcp->mb[0] = MBC_SET_LED_CONFIG; 5954 mcp->mb[1] = led_cfg[0]; 5955 mcp->mb[2] = led_cfg[1]; 5956 if (IS_QLA8031(ha)) { 5957 mcp->mb[3] = led_cfg[2]; 5958 mcp->mb[4] = led_cfg[3]; 5959 mcp->mb[5] = led_cfg[4]; 5960 mcp->mb[6] = led_cfg[5]; 5961 } 5962 5963 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5964 if (IS_QLA8031(ha)) 5965 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 5966 mcp->in_mb = MBX_0; 5967 mcp->tov = 30; 5968 mcp->flags = 0; 5969 5970 rval = qla2x00_mailbox_command(vha, mcp); 5971 if (rval != QLA_SUCCESS) { 5972 ql_dbg(ql_dbg_mbx, vha, 0x1134, 5973 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5974 } else { 5975 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, 5976 "Done %s.\n", __func__); 5977 } 5978 5979 return rval; 5980 } 5981 5982 int 5983 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5984 { 5985 int rval; 5986 struct qla_hw_data *ha = vha->hw; 5987 mbx_cmd_t mc; 5988 mbx_cmd_t *mcp = &mc; 5989 5990 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5991 return QLA_FUNCTION_FAILED; 5992 5993 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, 5994 "Entered %s.\n", __func__); 5995 5996 memset(mcp, 0, sizeof(mbx_cmd_t)); 5997 mcp->mb[0] = MBC_GET_LED_CONFIG; 5998 5999 mcp->out_mb = MBX_0; 6000 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6001 if (IS_QLA8031(ha)) 6002 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 6003 mcp->tov = 30; 6004 mcp->flags = 0; 6005 6006 rval = qla2x00_mailbox_command(vha, mcp); 6007 if (rval != QLA_SUCCESS) { 6008 ql_dbg(ql_dbg_mbx, vha, 0x1137, 6009 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6010 } else { 6011 led_cfg[0] = mcp->mb[1]; 6012 led_cfg[1] = mcp->mb[2]; 6013 if (IS_QLA8031(ha)) { 6014 led_cfg[2] = mcp->mb[3]; 6015 led_cfg[3] = mcp->mb[4]; 6016 led_cfg[4] = mcp->mb[5]; 6017 led_cfg[5] = mcp->mb[6]; 6018 } 6019 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, 6020 "Done %s.\n", __func__); 6021 } 6022 6023 return rval; 6024 } 6025 6026 int 6027 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) 6028 { 6029 int rval; 6030 struct qla_hw_data *ha = vha->hw; 6031 mbx_cmd_t mc; 6032 mbx_cmd_t *mcp = &mc; 6033 6034 if (!IS_P3P_TYPE(ha)) 6035 return QLA_FUNCTION_FAILED; 6036 6037 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, 6038 "Entered %s.\n", __func__); 6039 6040 memset(mcp, 0, sizeof(mbx_cmd_t)); 6041 mcp->mb[0] = MBC_SET_LED_CONFIG; 6042 if (enable) 6043 mcp->mb[7] = 0xE; 6044 else 6045 mcp->mb[7] = 0xD; 6046 6047 mcp->out_mb = MBX_7|MBX_0; 6048 mcp->in_mb = MBX_0; 6049 mcp->tov = MBX_TOV_SECONDS; 6050 mcp->flags = 0; 6051 6052 rval = qla2x00_mailbox_command(vha, mcp); 6053 if (rval != QLA_SUCCESS) { 6054 ql_dbg(ql_dbg_mbx, vha, 0x1128, 6055 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6056 } else { 6057 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, 6058 "Done %s.\n", __func__); 6059 } 6060 6061 return rval; 6062 } 6063 6064 int 6065 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) 6066 { 6067 int rval; 6068 struct qla_hw_data *ha = vha->hw; 6069 mbx_cmd_t mc; 6070 mbx_cmd_t *mcp = &mc; 6071 6072 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6073 return QLA_FUNCTION_FAILED; 6074 6075 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, 6076 "Entered %s.\n", __func__); 6077 6078 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6079 mcp->mb[1] = LSW(reg); 6080 mcp->mb[2] = MSW(reg); 6081 mcp->mb[3] = LSW(data); 6082 mcp->mb[4] = MSW(data); 6083 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6084 6085 mcp->in_mb = MBX_1|MBX_0; 6086 mcp->tov = MBX_TOV_SECONDS; 6087 mcp->flags = 0; 6088 rval = qla2x00_mailbox_command(vha, mcp); 6089 6090 if (rval != QLA_SUCCESS) { 6091 ql_dbg(ql_dbg_mbx, vha, 0x1131, 6092 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6093 } else { 6094 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, 6095 "Done %s.\n", __func__); 6096 } 6097 6098 return rval; 6099 } 6100 6101 int 6102 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) 6103 { 6104 int rval; 6105 struct qla_hw_data *ha = vha->hw; 6106 mbx_cmd_t mc; 6107 mbx_cmd_t *mcp = &mc; 6108 6109 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 6110 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, 6111 "Implicit LOGO Unsupported.\n"); 6112 return QLA_FUNCTION_FAILED; 6113 } 6114 6115 6116 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, 6117 "Entering %s.\n", __func__); 6118 6119 /* Perform Implicit LOGO. */ 6120 mcp->mb[0] = MBC_PORT_LOGOUT; 6121 mcp->mb[1] = fcport->loop_id; 6122 mcp->mb[10] = BIT_15; 6123 mcp->out_mb = MBX_10|MBX_1|MBX_0; 6124 mcp->in_mb = MBX_0; 6125 mcp->tov = MBX_TOV_SECONDS; 6126 mcp->flags = 0; 6127 rval = qla2x00_mailbox_command(vha, mcp); 6128 if (rval != QLA_SUCCESS) 6129 ql_dbg(ql_dbg_mbx, vha, 0x113d, 6130 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6131 else 6132 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, 6133 "Done %s.\n", __func__); 6134 6135 return rval; 6136 } 6137 6138 int 6139 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) 6140 { 6141 int rval; 6142 mbx_cmd_t mc; 6143 mbx_cmd_t *mcp = &mc; 6144 struct qla_hw_data *ha = vha->hw; 6145 unsigned long retry_max_time = jiffies + (2 * HZ); 6146 6147 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6148 return QLA_FUNCTION_FAILED; 6149 6150 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); 6151 6152 retry_rd_reg: 6153 mcp->mb[0] = MBC_READ_REMOTE_REG; 6154 mcp->mb[1] = LSW(reg); 6155 mcp->mb[2] = MSW(reg); 6156 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6157 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 6158 mcp->tov = MBX_TOV_SECONDS; 6159 mcp->flags = 0; 6160 rval = qla2x00_mailbox_command(vha, mcp); 6161 6162 if (rval != QLA_SUCCESS) { 6163 ql_dbg(ql_dbg_mbx, vha, 0x114c, 6164 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6165 rval, mcp->mb[0], mcp->mb[1]); 6166 } else { 6167 *data = (mcp->mb[3] | (mcp->mb[4] << 16)); 6168 if (*data == QLA8XXX_BAD_VALUE) { 6169 /* 6170 * During soft-reset CAMRAM register reads might 6171 * return 0xbad0bad0. So retry for MAX of 2 sec 6172 * while reading camram registers. 6173 */ 6174 if (time_after(jiffies, retry_max_time)) { 6175 ql_dbg(ql_dbg_mbx, vha, 0x1141, 6176 "Failure to read CAMRAM register. " 6177 "data=0x%x.\n", *data); 6178 return QLA_FUNCTION_FAILED; 6179 } 6180 msleep(100); 6181 goto retry_rd_reg; 6182 } 6183 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); 6184 } 6185 6186 return rval; 6187 } 6188 6189 int 6190 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) 6191 { 6192 int rval; 6193 mbx_cmd_t mc; 6194 mbx_cmd_t *mcp = &mc; 6195 struct qla_hw_data *ha = vha->hw; 6196 6197 if (!IS_QLA83XX(ha)) 6198 return QLA_FUNCTION_FAILED; 6199 6200 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 6201 6202 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; 6203 mcp->out_mb = MBX_0; 6204 mcp->in_mb = MBX_1|MBX_0; 6205 mcp->tov = MBX_TOV_SECONDS; 6206 mcp->flags = 0; 6207 rval = qla2x00_mailbox_command(vha, mcp); 6208 6209 if (rval != QLA_SUCCESS) { 6210 ql_dbg(ql_dbg_mbx, vha, 0x1144, 6211 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6212 rval, mcp->mb[0], mcp->mb[1]); 6213 qla2xxx_dump_fw(vha); 6214 } else { 6215 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); 6216 } 6217 6218 return rval; 6219 } 6220 6221 int 6222 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, 6223 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) 6224 { 6225 int rval; 6226 mbx_cmd_t mc; 6227 mbx_cmd_t *mcp = &mc; 6228 uint8_t subcode = (uint8_t)options; 6229 struct qla_hw_data *ha = vha->hw; 6230 6231 if (!IS_QLA8031(ha)) 6232 return QLA_FUNCTION_FAILED; 6233 6234 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); 6235 6236 mcp->mb[0] = MBC_SET_ACCESS_CONTROL; 6237 mcp->mb[1] = options; 6238 mcp->out_mb = MBX_1|MBX_0; 6239 if (subcode & BIT_2) { 6240 mcp->mb[2] = LSW(start_addr); 6241 mcp->mb[3] = MSW(start_addr); 6242 mcp->mb[4] = LSW(end_addr); 6243 mcp->mb[5] = MSW(end_addr); 6244 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; 6245 } 6246 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6247 if (!(subcode & (BIT_2 | BIT_5))) 6248 mcp->in_mb |= MBX_4|MBX_3; 6249 mcp->tov = MBX_TOV_SECONDS; 6250 mcp->flags = 0; 6251 rval = qla2x00_mailbox_command(vha, mcp); 6252 6253 if (rval != QLA_SUCCESS) { 6254 ql_dbg(ql_dbg_mbx, vha, 0x1147, 6255 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", 6256 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], 6257 mcp->mb[4]); 6258 qla2xxx_dump_fw(vha); 6259 } else { 6260 if (subcode & BIT_5) 6261 *sector_size = mcp->mb[1]; 6262 else if (subcode & (BIT_6 | BIT_7)) { 6263 ql_dbg(ql_dbg_mbx, vha, 0x1148, 6264 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6265 } else if (subcode & (BIT_3 | BIT_4)) { 6266 ql_dbg(ql_dbg_mbx, vha, 0x1149, 6267 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6268 } 6269 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); 6270 } 6271 6272 return rval; 6273 } 6274 6275 int 6276 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 6277 uint32_t size) 6278 { 6279 int rval; 6280 mbx_cmd_t mc; 6281 mbx_cmd_t *mcp = &mc; 6282 6283 if (!IS_MCTP_CAPABLE(vha->hw)) 6284 return QLA_FUNCTION_FAILED; 6285 6286 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, 6287 "Entered %s.\n", __func__); 6288 6289 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 6290 mcp->mb[1] = LSW(addr); 6291 mcp->mb[2] = MSW(req_dma); 6292 mcp->mb[3] = LSW(req_dma); 6293 mcp->mb[4] = MSW(size); 6294 mcp->mb[5] = LSW(size); 6295 mcp->mb[6] = MSW(MSD(req_dma)); 6296 mcp->mb[7] = LSW(MSD(req_dma)); 6297 mcp->mb[8] = MSW(addr); 6298 /* Setting RAM ID to valid */ 6299 /* For MCTP RAM ID is 0x40 */ 6300 mcp->mb[10] = BIT_7 | 0x40; 6301 6302 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| 6303 MBX_0; 6304 6305 mcp->in_mb = MBX_0; 6306 mcp->tov = MBX_TOV_SECONDS; 6307 mcp->flags = 0; 6308 rval = qla2x00_mailbox_command(vha, mcp); 6309 6310 if (rval != QLA_SUCCESS) { 6311 ql_dbg(ql_dbg_mbx, vha, 0x114e, 6312 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6313 } else { 6314 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, 6315 "Done %s.\n", __func__); 6316 } 6317 6318 return rval; 6319 } 6320 6321 int 6322 qla26xx_dport_diagnostics(scsi_qla_host_t *vha, 6323 void *dd_buf, uint size, uint options) 6324 { 6325 int rval; 6326 mbx_cmd_t mc; 6327 mbx_cmd_t *mcp = &mc; 6328 dma_addr_t dd_dma; 6329 6330 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 6331 !IS_QLA28XX(vha->hw)) 6332 return QLA_FUNCTION_FAILED; 6333 6334 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, 6335 "Entered %s.\n", __func__); 6336 6337 dd_dma = dma_map_single(&vha->hw->pdev->dev, 6338 dd_buf, size, DMA_FROM_DEVICE); 6339 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { 6340 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); 6341 return QLA_MEMORY_ALLOC_FAILED; 6342 } 6343 6344 memset(dd_buf, 0, size); 6345 6346 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; 6347 mcp->mb[1] = options; 6348 mcp->mb[2] = MSW(LSD(dd_dma)); 6349 mcp->mb[3] = LSW(LSD(dd_dma)); 6350 mcp->mb[6] = MSW(MSD(dd_dma)); 6351 mcp->mb[7] = LSW(MSD(dd_dma)); 6352 mcp->mb[8] = size; 6353 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 6354 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 6355 mcp->buf_size = size; 6356 mcp->flags = MBX_DMA_IN; 6357 mcp->tov = MBX_TOV_SECONDS * 4; 6358 rval = qla2x00_mailbox_command(vha, mcp); 6359 6360 if (rval != QLA_SUCCESS) { 6361 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); 6362 } else { 6363 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, 6364 "Done %s.\n", __func__); 6365 } 6366 6367 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, 6368 size, DMA_FROM_DEVICE); 6369 6370 return rval; 6371 } 6372 6373 static void qla2x00_async_mb_sp_done(srb_t *sp, int res) 6374 { 6375 sp->u.iocb_cmd.u.mbx.rc = res; 6376 6377 complete(&sp->u.iocb_cmd.u.mbx.comp); 6378 /* don't free sp here. Let the caller do the free */ 6379 } 6380 6381 /* 6382 * This mailbox uses the iocb interface to send MB command. 6383 * This allows non-critial (non chip setup) command to go 6384 * out in parrallel. 6385 */ 6386 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) 6387 { 6388 int rval = QLA_FUNCTION_FAILED; 6389 srb_t *sp; 6390 struct srb_iocb *c; 6391 6392 if (!vha->hw->flags.fw_started) 6393 goto done; 6394 6395 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 6396 if (!sp) 6397 goto done; 6398 6399 sp->type = SRB_MB_IOCB; 6400 sp->name = mb_to_str(mcp->mb[0]); 6401 6402 c = &sp->u.iocb_cmd; 6403 c->timeout = qla2x00_async_iocb_timeout; 6404 init_completion(&c->u.mbx.comp); 6405 6406 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 6407 6408 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); 6409 6410 sp->done = qla2x00_async_mb_sp_done; 6411 6412 rval = qla2x00_start_sp(sp); 6413 if (rval != QLA_SUCCESS) { 6414 ql_dbg(ql_dbg_mbx, vha, 0x1018, 6415 "%s: %s Failed submission. %x.\n", 6416 __func__, sp->name, rval); 6417 goto done_free_sp; 6418 } 6419 6420 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n", 6421 sp->name, sp->handle); 6422 6423 wait_for_completion(&c->u.mbx.comp); 6424 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); 6425 6426 rval = c->u.mbx.rc; 6427 switch (rval) { 6428 case QLA_FUNCTION_TIMEOUT: 6429 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n", 6430 __func__, sp->name, rval); 6431 break; 6432 case QLA_SUCCESS: 6433 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", 6434 __func__, sp->name); 6435 break; 6436 default: 6437 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", 6438 __func__, sp->name, rval); 6439 break; 6440 } 6441 6442 done_free_sp: 6443 sp->free(sp); 6444 done: 6445 return rval; 6446 } 6447 6448 /* 6449 * qla24xx_gpdb_wait 6450 * NOTE: Do not call this routine from DPC thread 6451 */ 6452 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 6453 { 6454 int rval = QLA_FUNCTION_FAILED; 6455 dma_addr_t pd_dma; 6456 struct port_database_24xx *pd; 6457 struct qla_hw_data *ha = vha->hw; 6458 mbx_cmd_t mc; 6459 6460 if (!vha->hw->flags.fw_started) 6461 goto done; 6462 6463 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 6464 if (pd == NULL) { 6465 ql_log(ql_log_warn, vha, 0xd047, 6466 "Failed to allocate port database structure.\n"); 6467 goto done_free_sp; 6468 } 6469 6470 memset(&mc, 0, sizeof(mc)); 6471 mc.mb[0] = MBC_GET_PORT_DATABASE; 6472 mc.mb[1] = fcport->loop_id; 6473 mc.mb[2] = MSW(pd_dma); 6474 mc.mb[3] = LSW(pd_dma); 6475 mc.mb[6] = MSW(MSD(pd_dma)); 6476 mc.mb[7] = LSW(MSD(pd_dma)); 6477 mc.mb[9] = vha->vp_idx; 6478 mc.mb[10] = opt; 6479 6480 rval = qla24xx_send_mb_cmd(vha, &mc); 6481 if (rval != QLA_SUCCESS) { 6482 ql_dbg(ql_dbg_mbx, vha, 0x1193, 6483 "%s: %8phC fail\n", __func__, fcport->port_name); 6484 goto done_free_sp; 6485 } 6486 6487 rval = __qla24xx_parse_gpdb(vha, fcport, pd); 6488 6489 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", 6490 __func__, fcport->port_name); 6491 6492 done_free_sp: 6493 if (pd) 6494 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 6495 done: 6496 return rval; 6497 } 6498 6499 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, 6500 struct port_database_24xx *pd) 6501 { 6502 int rval = QLA_SUCCESS; 6503 uint64_t zero = 0; 6504 u8 current_login_state, last_login_state; 6505 6506 if (NVME_TARGET(vha->hw, fcport)) { 6507 current_login_state = pd->current_login_state >> 4; 6508 last_login_state = pd->last_login_state >> 4; 6509 } else { 6510 current_login_state = pd->current_login_state & 0xf; 6511 last_login_state = pd->last_login_state & 0xf; 6512 } 6513 6514 /* Check for logged in state. */ 6515 if (current_login_state != PDS_PRLI_COMPLETE) { 6516 ql_dbg(ql_dbg_mbx, vha, 0x119a, 6517 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 6518 current_login_state, last_login_state, fcport->loop_id); 6519 rval = QLA_FUNCTION_FAILED; 6520 goto gpd_error_out; 6521 } 6522 6523 if (fcport->loop_id == FC_NO_LOOP_ID || 6524 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 6525 memcmp(fcport->port_name, pd->port_name, 8))) { 6526 /* We lost the device mid way. */ 6527 rval = QLA_NOT_LOGGED_IN; 6528 goto gpd_error_out; 6529 } 6530 6531 /* Names are little-endian. */ 6532 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 6533 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 6534 6535 /* Get port_id of device. */ 6536 fcport->d_id.b.domain = pd->port_id[0]; 6537 fcport->d_id.b.area = pd->port_id[1]; 6538 fcport->d_id.b.al_pa = pd->port_id[2]; 6539 fcport->d_id.b.rsvd_1 = 0; 6540 6541 if (NVME_TARGET(vha->hw, fcport)) { 6542 fcport->port_type = FCT_NVME; 6543 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) 6544 fcport->port_type |= FCT_NVME_INITIATOR; 6545 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6546 fcport->port_type |= FCT_NVME_TARGET; 6547 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0) 6548 fcport->port_type |= FCT_NVME_DISCOVERY; 6549 } else { 6550 /* If not target must be initiator or unknown type. */ 6551 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6552 fcport->port_type = FCT_INITIATOR; 6553 else 6554 fcport->port_type = FCT_TARGET; 6555 } 6556 /* Passback COS information. */ 6557 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? 6558 FC_COS_CLASS2 : FC_COS_CLASS3; 6559 6560 if (pd->prli_svc_param_word_3[0] & BIT_7) { 6561 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 6562 fcport->conf_compl_supported = 1; 6563 } 6564 6565 gpd_error_out: 6566 return rval; 6567 } 6568 6569 /* 6570 * qla24xx_gidlist__wait 6571 * NOTE: don't call this routine from DPC thread. 6572 */ 6573 int qla24xx_gidlist_wait(struct scsi_qla_host *vha, 6574 void *id_list, dma_addr_t id_list_dma, uint16_t *entries) 6575 { 6576 int rval = QLA_FUNCTION_FAILED; 6577 mbx_cmd_t mc; 6578 6579 if (!vha->hw->flags.fw_started) 6580 goto done; 6581 6582 memset(&mc, 0, sizeof(mc)); 6583 mc.mb[0] = MBC_GET_ID_LIST; 6584 mc.mb[2] = MSW(id_list_dma); 6585 mc.mb[3] = LSW(id_list_dma); 6586 mc.mb[6] = MSW(MSD(id_list_dma)); 6587 mc.mb[7] = LSW(MSD(id_list_dma)); 6588 mc.mb[8] = 0; 6589 mc.mb[9] = vha->vp_idx; 6590 6591 rval = qla24xx_send_mb_cmd(vha, &mc); 6592 if (rval != QLA_SUCCESS) { 6593 ql_dbg(ql_dbg_mbx, vha, 0x119b, 6594 "%s: fail\n", __func__); 6595 } else { 6596 *entries = mc.mb[1]; 6597 ql_dbg(ql_dbg_mbx, vha, 0x119c, 6598 "%s: done\n", __func__); 6599 } 6600 done: 6601 return rval; 6602 } 6603 6604 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) 6605 { 6606 int rval; 6607 mbx_cmd_t mc; 6608 mbx_cmd_t *mcp = &mc; 6609 6610 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200, 6611 "Entered %s\n", __func__); 6612 6613 memset(mcp->mb, 0 , sizeof(mcp->mb)); 6614 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6615 mcp->mb[1] = 1; 6616 mcp->mb[2] = value; 6617 mcp->out_mb = MBX_2 | MBX_1 | MBX_0; 6618 mcp->in_mb = MBX_2 | MBX_0; 6619 mcp->tov = MBX_TOV_SECONDS; 6620 mcp->flags = 0; 6621 6622 rval = qla2x00_mailbox_command(vha, mcp); 6623 6624 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n", 6625 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6626 6627 return rval; 6628 } 6629 6630 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) 6631 { 6632 int rval; 6633 mbx_cmd_t mc; 6634 mbx_cmd_t *mcp = &mc; 6635 6636 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203, 6637 "Entered %s\n", __func__); 6638 6639 memset(mcp->mb, 0, sizeof(mcp->mb)); 6640 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6641 mcp->mb[1] = 0; 6642 mcp->out_mb = MBX_1 | MBX_0; 6643 mcp->in_mb = MBX_2 | MBX_0; 6644 mcp->tov = MBX_TOV_SECONDS; 6645 mcp->flags = 0; 6646 6647 rval = qla2x00_mailbox_command(vha, mcp); 6648 if (rval == QLA_SUCCESS) 6649 *value = mc.mb[2]; 6650 6651 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n", 6652 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6653 6654 return rval; 6655 } 6656 6657 int 6658 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count) 6659 { 6660 struct qla_hw_data *ha = vha->hw; 6661 uint16_t iter, addr, offset; 6662 dma_addr_t phys_addr; 6663 int rval, c; 6664 u8 *sfp_data; 6665 6666 memset(ha->sfp_data, 0, SFP_DEV_SIZE); 6667 addr = 0xa0; 6668 phys_addr = ha->sfp_data_dma; 6669 sfp_data = ha->sfp_data; 6670 offset = c = 0; 6671 6672 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) { 6673 if (iter == 4) { 6674 /* Skip to next device address. */ 6675 addr = 0xa2; 6676 offset = 0; 6677 } 6678 6679 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data, 6680 addr, offset, SFP_BLOCK_SIZE, BIT_1); 6681 if (rval != QLA_SUCCESS) { 6682 ql_log(ql_log_warn, vha, 0x706d, 6683 "Unable to read SFP data (%x/%x/%x).\n", rval, 6684 addr, offset); 6685 6686 return rval; 6687 } 6688 6689 if (buf && (c < count)) { 6690 u16 sz; 6691 6692 if ((count - c) >= SFP_BLOCK_SIZE) 6693 sz = SFP_BLOCK_SIZE; 6694 else 6695 sz = count - c; 6696 6697 memcpy(buf, sfp_data, sz); 6698 buf += SFP_BLOCK_SIZE; 6699 c += sz; 6700 } 6701 phys_addr += SFP_BLOCK_SIZE; 6702 sfp_data += SFP_BLOCK_SIZE; 6703 offset += SFP_BLOCK_SIZE; 6704 } 6705 6706 return rval; 6707 } 6708 6709 int qla24xx_res_count_wait(struct scsi_qla_host *vha, 6710 uint16_t *out_mb, int out_mb_sz) 6711 { 6712 int rval = QLA_FUNCTION_FAILED; 6713 mbx_cmd_t mc; 6714 6715 if (!vha->hw->flags.fw_started) 6716 goto done; 6717 6718 memset(&mc, 0, sizeof(mc)); 6719 mc.mb[0] = MBC_GET_RESOURCE_COUNTS; 6720 6721 rval = qla24xx_send_mb_cmd(vha, &mc); 6722 if (rval != QLA_SUCCESS) { 6723 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6724 "%s: fail\n", __func__); 6725 } else { 6726 if (out_mb_sz <= SIZEOF_IOCB_MB_REG) 6727 memcpy(out_mb, mc.mb, out_mb_sz); 6728 else 6729 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG); 6730 6731 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6732 "%s: done\n", __func__); 6733 } 6734 done: 6735 return rval; 6736 } 6737 6738 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts, 6739 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr, 6740 uint32_t sfub_len) 6741 { 6742 int rval; 6743 mbx_cmd_t mc; 6744 mbx_cmd_t *mcp = &mc; 6745 6746 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE; 6747 mcp->mb[1] = opts; 6748 mcp->mb[2] = region; 6749 mcp->mb[3] = MSW(len); 6750 mcp->mb[4] = LSW(len); 6751 mcp->mb[5] = MSW(sfub_dma_addr); 6752 mcp->mb[6] = LSW(sfub_dma_addr); 6753 mcp->mb[7] = MSW(MSD(sfub_dma_addr)); 6754 mcp->mb[8] = LSW(MSD(sfub_dma_addr)); 6755 mcp->mb[9] = sfub_len; 6756 mcp->out_mb = 6757 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6758 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6759 mcp->tov = MBX_TOV_SECONDS; 6760 mcp->flags = 0; 6761 rval = qla2x00_mailbox_command(vha, mcp); 6762 6763 if (rval != QLA_SUCCESS) { 6764 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x", 6765 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], 6766 mcp->mb[2]); 6767 } 6768 6769 return rval; 6770 } 6771 6772 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6773 uint32_t data) 6774 { 6775 int rval; 6776 mbx_cmd_t mc; 6777 mbx_cmd_t *mcp = &mc; 6778 6779 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6780 "Entered %s.\n", __func__); 6781 6782 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6783 mcp->mb[1] = LSW(addr); 6784 mcp->mb[2] = MSW(addr); 6785 mcp->mb[3] = LSW(data); 6786 mcp->mb[4] = MSW(data); 6787 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6788 mcp->in_mb = MBX_1|MBX_0; 6789 mcp->tov = MBX_TOV_SECONDS; 6790 mcp->flags = 0; 6791 rval = qla2x00_mailbox_command(vha, mcp); 6792 6793 if (rval != QLA_SUCCESS) { 6794 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6795 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6796 } else { 6797 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6798 "Done %s.\n", __func__); 6799 } 6800 6801 return rval; 6802 } 6803 6804 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6805 uint32_t *data) 6806 { 6807 int rval; 6808 mbx_cmd_t mc; 6809 mbx_cmd_t *mcp = &mc; 6810 6811 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6812 "Entered %s.\n", __func__); 6813 6814 mcp->mb[0] = MBC_READ_REMOTE_REG; 6815 mcp->mb[1] = LSW(addr); 6816 mcp->mb[2] = MSW(addr); 6817 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6818 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6819 mcp->tov = MBX_TOV_SECONDS; 6820 mcp->flags = 0; 6821 rval = qla2x00_mailbox_command(vha, mcp); 6822 6823 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]); 6824 6825 if (rval != QLA_SUCCESS) { 6826 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6827 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6828 } else { 6829 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6830 "Done %s.\n", __func__); 6831 } 6832 6833 return rval; 6834 } 6835 6836 int 6837 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led) 6838 { 6839 struct qla_hw_data *ha = vha->hw; 6840 mbx_cmd_t mc; 6841 mbx_cmd_t *mcp = &mc; 6842 int rval; 6843 6844 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6845 return QLA_FUNCTION_FAILED; 6846 6847 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n", 6848 __func__, options); 6849 6850 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG; 6851 mcp->mb[1] = options; 6852 mcp->out_mb = MBX_1|MBX_0; 6853 mcp->in_mb = MBX_1|MBX_0; 6854 if (options & BIT_0) { 6855 if (options & BIT_1) { 6856 mcp->mb[2] = led[2]; 6857 mcp->out_mb |= MBX_2; 6858 } 6859 if (options & BIT_2) { 6860 mcp->mb[3] = led[0]; 6861 mcp->out_mb |= MBX_3; 6862 } 6863 if (options & BIT_3) { 6864 mcp->mb[4] = led[1]; 6865 mcp->out_mb |= MBX_4; 6866 } 6867 } else { 6868 mcp->in_mb |= MBX_4|MBX_3|MBX_2; 6869 } 6870 mcp->tov = MBX_TOV_SECONDS; 6871 mcp->flags = 0; 6872 rval = qla2x00_mailbox_command(vha, mcp); 6873 if (rval) { 6874 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n", 6875 __func__, rval, mcp->mb[0], mcp->mb[1]); 6876 return rval; 6877 } 6878 6879 if (options & BIT_0) { 6880 ha->beacon_blink_led = 0; 6881 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__); 6882 } else { 6883 led[2] = mcp->mb[2]; 6884 led[0] = mcp->mb[3]; 6885 led[1] = mcp->mb[4]; 6886 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n", 6887 __func__, led[0], led[1], led[2]); 6888 } 6889 6890 return rval; 6891 } 6892