1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/gfp.h> 12 13 static struct mb_cmd_name { 14 uint16_t cmd; 15 const char *str; 16 } mb_str[] = { 17 {MBC_GET_PORT_DATABASE, "GPDB"}, 18 {MBC_GET_ID_LIST, "GIDList"}, 19 {MBC_GET_LINK_PRIV_STATS, "Stats"}, 20 {MBC_GET_RESOURCE_COUNTS, "ResCnt"}, 21 }; 22 23 static const char *mb_to_str(uint16_t cmd) 24 { 25 int i; 26 struct mb_cmd_name *e; 27 28 for (i = 0; i < ARRAY_SIZE(mb_str); i++) { 29 e = mb_str + i; 30 if (cmd == e->cmd) 31 return e->str; 32 } 33 return "unknown"; 34 } 35 36 static struct rom_cmd { 37 uint16_t cmd; 38 } rom_cmds[] = { 39 { MBC_LOAD_RAM }, 40 { MBC_EXECUTE_FIRMWARE }, 41 { MBC_READ_RAM_WORD }, 42 { MBC_MAILBOX_REGISTER_TEST }, 43 { MBC_VERIFY_CHECKSUM }, 44 { MBC_GET_FIRMWARE_VERSION }, 45 { MBC_LOAD_RISC_RAM }, 46 { MBC_DUMP_RISC_RAM }, 47 { MBC_LOAD_RISC_RAM_EXTENDED }, 48 { MBC_DUMP_RISC_RAM_EXTENDED }, 49 { MBC_WRITE_RAM_WORD_EXTENDED }, 50 { MBC_READ_RAM_EXTENDED }, 51 { MBC_GET_RESOURCE_COUNTS }, 52 { MBC_SET_FIRMWARE_OPTION }, 53 { MBC_MID_INITIALIZE_FIRMWARE }, 54 { MBC_GET_FIRMWARE_STATE }, 55 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, 56 { MBC_GET_RETRY_COUNT }, 57 { MBC_TRACE_CONTROL }, 58 { MBC_INITIALIZE_MULTIQ }, 59 { MBC_IOCB_COMMAND_A64 }, 60 { MBC_GET_ADAPTER_LOOP_ID }, 61 { MBC_READ_SFP }, 62 }; 63 64 static int is_rom_cmd(uint16_t cmd) 65 { 66 int i; 67 struct rom_cmd *wc; 68 69 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) { 70 wc = rom_cmds + i; 71 if (wc->cmd == cmd) 72 return 1; 73 } 74 75 return 0; 76 } 77 78 /* 79 * qla2x00_mailbox_command 80 * Issue mailbox command and waits for completion. 81 * 82 * Input: 83 * ha = adapter block pointer. 84 * mcp = driver internal mbx struct pointer. 85 * 86 * Output: 87 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 88 * 89 * Returns: 90 * 0 : QLA_SUCCESS = cmd performed success 91 * 1 : QLA_FUNCTION_FAILED (error encountered) 92 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 93 * 94 * Context: 95 * Kernel context. 96 */ 97 static int 98 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) 99 { 100 int rval, i; 101 unsigned long flags = 0; 102 device_reg_t *reg; 103 uint8_t abort_active; 104 uint8_t io_lock_on; 105 uint16_t command = 0; 106 uint16_t *iptr; 107 uint16_t __iomem *optr; 108 uint32_t cnt; 109 uint32_t mboxes; 110 unsigned long wait_time; 111 struct qla_hw_data *ha = vha->hw; 112 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 113 114 115 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); 116 117 if (ha->pdev->error_state > pci_channel_io_frozen) { 118 ql_log(ql_log_warn, vha, 0x1001, 119 "error_state is greater than pci_channel_io_frozen, " 120 "exiting.\n"); 121 return QLA_FUNCTION_TIMEOUT; 122 } 123 124 if (vha->device_flags & DFLG_DEV_FAILED) { 125 ql_log(ql_log_warn, vha, 0x1002, 126 "Device in failed state, exiting.\n"); 127 return QLA_FUNCTION_TIMEOUT; 128 } 129 130 /* if PCI error, then avoid mbx processing.*/ 131 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && 132 test_bit(UNLOADING, &base_vha->dpc_flags)) { 133 ql_log(ql_log_warn, vha, 0xd04e, 134 "PCI error, exiting.\n"); 135 return QLA_FUNCTION_TIMEOUT; 136 } 137 138 reg = ha->iobase; 139 io_lock_on = base_vha->flags.init_done; 140 141 rval = QLA_SUCCESS; 142 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 143 144 145 if (ha->flags.pci_channel_io_perm_failure) { 146 ql_log(ql_log_warn, vha, 0x1003, 147 "Perm failure on EEH timeout MBX, exiting.\n"); 148 return QLA_FUNCTION_TIMEOUT; 149 } 150 151 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 152 /* Setting Link-Down error */ 153 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 154 ql_log(ql_log_warn, vha, 0x1004, 155 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 156 return QLA_FUNCTION_TIMEOUT; 157 } 158 159 /* check if ISP abort is active and return cmd with timeout */ 160 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 161 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 162 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && 163 !is_rom_cmd(mcp->mb[0])) { 164 ql_log(ql_log_info, vha, 0x1005, 165 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", 166 mcp->mb[0]); 167 return QLA_FUNCTION_TIMEOUT; 168 } 169 170 /* 171 * Wait for active mailbox commands to finish by waiting at most tov 172 * seconds. This is to serialize actual issuing of mailbox cmds during 173 * non ISP abort time. 174 */ 175 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 176 /* Timeout occurred. Return error. */ 177 ql_log(ql_log_warn, vha, 0xd035, 178 "Cmd access timeout, cmd=0x%x, Exiting.\n", 179 mcp->mb[0]); 180 return QLA_FUNCTION_TIMEOUT; 181 } 182 183 ha->flags.mbox_busy = 1; 184 /* Save mailbox command for debug */ 185 ha->mcp = mcp; 186 187 ql_dbg(ql_dbg_mbx, vha, 0x1006, 188 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 189 190 spin_lock_irqsave(&ha->hardware_lock, flags); 191 192 /* Load mailbox registers. */ 193 if (IS_P3P_TYPE(ha)) 194 optr = (uint16_t __iomem *)®->isp82.mailbox_in[0]; 195 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) 196 optr = (uint16_t __iomem *)®->isp24.mailbox0; 197 else 198 optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0); 199 200 iptr = mcp->mb; 201 command = mcp->mb[0]; 202 mboxes = mcp->out_mb; 203 204 ql_dbg(ql_dbg_mbx, vha, 0x1111, 205 "Mailbox registers (OUT):\n"); 206 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 207 if (IS_QLA2200(ha) && cnt == 8) 208 optr = 209 (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8); 210 if (mboxes & BIT_0) { 211 ql_dbg(ql_dbg_mbx, vha, 0x1112, 212 "mbox[%d]<-0x%04x\n", cnt, *iptr); 213 WRT_REG_WORD(optr, *iptr); 214 } 215 216 mboxes >>= 1; 217 optr++; 218 iptr++; 219 } 220 221 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, 222 "I/O Address = %p.\n", optr); 223 224 /* Issue set host interrupt command to send cmd out. */ 225 ha->flags.mbox_int = 0; 226 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 227 228 /* Unlock mbx registers and wait for interrupt */ 229 ql_dbg(ql_dbg_mbx, vha, 0x100f, 230 "Going to unlock irq & waiting for interrupts. " 231 "jiffies=%lx.\n", jiffies); 232 233 /* Wait for mbx cmd completion until timeout */ 234 235 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 236 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 237 238 if (IS_P3P_TYPE(ha)) { 239 if (RD_REG_DWORD(®->isp82.hint) & 240 HINT_MBX_INT_PENDING) { 241 spin_unlock_irqrestore(&ha->hardware_lock, 242 flags); 243 ha->flags.mbox_busy = 0; 244 ql_dbg(ql_dbg_mbx, vha, 0x1010, 245 "Pending mailbox timeout, exiting.\n"); 246 rval = QLA_FUNCTION_TIMEOUT; 247 goto premature_exit; 248 } 249 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); 250 } else if (IS_FWI2_CAPABLE(ha)) 251 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); 252 else 253 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); 254 spin_unlock_irqrestore(&ha->hardware_lock, flags); 255 256 wait_time = jiffies; 257 if (!wait_for_completion_timeout(&ha->mbx_intr_comp, 258 mcp->tov * HZ)) { 259 ql_dbg(ql_dbg_mbx, vha, 0x117a, 260 "cmd=%x Timeout.\n", command); 261 spin_lock_irqsave(&ha->hardware_lock, flags); 262 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 263 spin_unlock_irqrestore(&ha->hardware_lock, flags); 264 } 265 if (time_after(jiffies, wait_time + 5 * HZ)) 266 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", 267 command, jiffies_to_msecs(jiffies - wait_time)); 268 } else { 269 ql_dbg(ql_dbg_mbx, vha, 0x1011, 270 "Cmd=%x Polling Mode.\n", command); 271 272 if (IS_P3P_TYPE(ha)) { 273 if (RD_REG_DWORD(®->isp82.hint) & 274 HINT_MBX_INT_PENDING) { 275 spin_unlock_irqrestore(&ha->hardware_lock, 276 flags); 277 ha->flags.mbox_busy = 0; 278 ql_dbg(ql_dbg_mbx, vha, 0x1012, 279 "Pending mailbox timeout, exiting.\n"); 280 rval = QLA_FUNCTION_TIMEOUT; 281 goto premature_exit; 282 } 283 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); 284 } else if (IS_FWI2_CAPABLE(ha)) 285 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); 286 else 287 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); 288 spin_unlock_irqrestore(&ha->hardware_lock, flags); 289 290 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 291 while (!ha->flags.mbox_int) { 292 if (time_after(jiffies, wait_time)) 293 break; 294 295 /* 296 * Check if it's UNLOADING, cause we cannot poll in 297 * this case, or else a NULL pointer dereference 298 * is triggered. 299 */ 300 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) 301 return QLA_FUNCTION_TIMEOUT; 302 303 /* Check for pending interrupts. */ 304 qla2x00_poll(ha->rsp_q_map[0]); 305 306 if (!ha->flags.mbox_int && 307 !(IS_QLA2200(ha) && 308 command == MBC_LOAD_RISC_RAM_EXTENDED)) 309 msleep(10); 310 } /* while */ 311 ql_dbg(ql_dbg_mbx, vha, 0x1013, 312 "Waited %d sec.\n", 313 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 314 } 315 316 /* Check whether we timed out */ 317 if (ha->flags.mbox_int) { 318 uint16_t *iptr2; 319 320 ql_dbg(ql_dbg_mbx, vha, 0x1014, 321 "Cmd=%x completed.\n", command); 322 323 /* Got interrupt. Clear the flag. */ 324 ha->flags.mbox_int = 0; 325 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 326 327 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 328 ha->flags.mbox_busy = 0; 329 /* Setting Link-Down error */ 330 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 331 ha->mcp = NULL; 332 rval = QLA_FUNCTION_FAILED; 333 ql_log(ql_log_warn, vha, 0xd048, 334 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 335 goto premature_exit; 336 } 337 338 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) 339 rval = QLA_FUNCTION_FAILED; 340 341 /* Load return mailbox registers. */ 342 iptr2 = mcp->mb; 343 iptr = (uint16_t *)&ha->mailbox_out[0]; 344 mboxes = mcp->in_mb; 345 346 ql_dbg(ql_dbg_mbx, vha, 0x1113, 347 "Mailbox registers (IN):\n"); 348 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 349 if (mboxes & BIT_0) { 350 *iptr2 = *iptr; 351 ql_dbg(ql_dbg_mbx, vha, 0x1114, 352 "mbox[%d]->0x%04x\n", cnt, *iptr2); 353 } 354 355 mboxes >>= 1; 356 iptr2++; 357 iptr++; 358 } 359 } else { 360 361 uint16_t mb[8]; 362 uint32_t ictrl, host_status, hccr; 363 uint16_t w; 364 365 if (IS_FWI2_CAPABLE(ha)) { 366 mb[0] = RD_REG_WORD(®->isp24.mailbox0); 367 mb[1] = RD_REG_WORD(®->isp24.mailbox1); 368 mb[2] = RD_REG_WORD(®->isp24.mailbox2); 369 mb[3] = RD_REG_WORD(®->isp24.mailbox3); 370 mb[7] = RD_REG_WORD(®->isp24.mailbox7); 371 ictrl = RD_REG_DWORD(®->isp24.ictrl); 372 host_status = RD_REG_DWORD(®->isp24.host_status); 373 hccr = RD_REG_DWORD(®->isp24.hccr); 374 375 ql_log(ql_log_warn, vha, 0xd04c, 376 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 377 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", 378 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], 379 mb[7], host_status, hccr); 380 381 } else { 382 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); 383 ictrl = RD_REG_WORD(®->isp.ictrl); 384 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, 385 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 386 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); 387 } 388 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 389 390 /* Capture FW dump only, if PCI device active */ 391 if (!pci_channel_offline(vha->hw->pdev)) { 392 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 393 if (w == 0xffff || ictrl == 0xffffffff) { 394 /* This is special case if there is unload 395 * of driver happening and if PCI device go 396 * into bad state due to PCI error condition 397 * then only PCI ERR flag would be set. 398 * we will do premature exit for above case. 399 */ 400 ha->flags.mbox_busy = 0; 401 rval = QLA_FUNCTION_TIMEOUT; 402 goto premature_exit; 403 } 404 405 /* Attempt to capture firmware dump for further 406 * anallysis of the current formware state. we do not 407 * need to do this if we are intentionally generating 408 * a dump 409 */ 410 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) 411 ha->isp_ops->fw_dump(vha, 0); 412 rval = QLA_FUNCTION_TIMEOUT; 413 } 414 } 415 416 ha->flags.mbox_busy = 0; 417 418 /* Clean up */ 419 ha->mcp = NULL; 420 421 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 422 ql_dbg(ql_dbg_mbx, vha, 0x101a, 423 "Checking for additional resp interrupt.\n"); 424 425 /* polling mode for non isp_abort commands. */ 426 qla2x00_poll(ha->rsp_q_map[0]); 427 } 428 429 if (rval == QLA_FUNCTION_TIMEOUT && 430 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 431 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 432 ha->flags.eeh_busy) { 433 /* not in dpc. schedule it for dpc to take over. */ 434 ql_dbg(ql_dbg_mbx, vha, 0x101b, 435 "Timeout, schedule isp_abort_needed.\n"); 436 437 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 438 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 439 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 440 if (IS_QLA82XX(ha)) { 441 ql_dbg(ql_dbg_mbx, vha, 0x112a, 442 "disabling pause transmit on port " 443 "0 & 1.\n"); 444 qla82xx_wr_32(ha, 445 QLA82XX_CRB_NIU + 0x98, 446 CRB_NIU_XG_PAUSE_CTL_P0| 447 CRB_NIU_XG_PAUSE_CTL_P1); 448 } 449 ql_log(ql_log_info, base_vha, 0x101c, 450 "Mailbox cmd timeout occurred, cmd=0x%x, " 451 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 452 "abort.\n", command, mcp->mb[0], 453 ha->flags.eeh_busy); 454 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 455 qla2xxx_wake_dpc(vha); 456 } 457 } else if (!abort_active) { 458 /* call abort directly since we are in the DPC thread */ 459 ql_dbg(ql_dbg_mbx, vha, 0x101d, 460 "Timeout, calling abort_isp.\n"); 461 462 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 463 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 464 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 465 if (IS_QLA82XX(ha)) { 466 ql_dbg(ql_dbg_mbx, vha, 0x112b, 467 "disabling pause transmit on port " 468 "0 & 1.\n"); 469 qla82xx_wr_32(ha, 470 QLA82XX_CRB_NIU + 0x98, 471 CRB_NIU_XG_PAUSE_CTL_P0| 472 CRB_NIU_XG_PAUSE_CTL_P1); 473 } 474 ql_log(ql_log_info, base_vha, 0x101e, 475 "Mailbox cmd timeout occurred, cmd=0x%x, " 476 "mb[0]=0x%x. Scheduling ISP abort ", 477 command, mcp->mb[0]); 478 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 479 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 480 /* Allow next mbx cmd to come in. */ 481 complete(&ha->mbx_cmd_comp); 482 if (ha->isp_ops->abort_isp(vha)) { 483 /* Failed. retry later. */ 484 set_bit(ISP_ABORT_NEEDED, 485 &vha->dpc_flags); 486 } 487 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 488 ql_dbg(ql_dbg_mbx, vha, 0x101f, 489 "Finished abort_isp.\n"); 490 goto mbx_done; 491 } 492 } 493 } 494 495 premature_exit: 496 /* Allow next mbx cmd to come in. */ 497 complete(&ha->mbx_cmd_comp); 498 499 mbx_done: 500 if (rval) { 501 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { 502 pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR, 503 dev_name(&ha->pdev->dev), 0x1020+0x800, 504 vha->host_no); 505 mboxes = mcp->in_mb; 506 cnt = 4; 507 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1) 508 if (mboxes & BIT_0) { 509 printk(" mb[%u]=%x", i, mcp->mb[i]); 510 cnt--; 511 } 512 pr_warn(" cmd=%x ****\n", command); 513 } 514 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { 515 ql_dbg(ql_dbg_mbx, vha, 0x1198, 516 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", 517 RD_REG_DWORD(®->isp24.host_status), 518 RD_REG_DWORD(®->isp24.ictrl), 519 RD_REG_DWORD(®->isp24.istatus)); 520 } else { 521 ql_dbg(ql_dbg_mbx, vha, 0x1206, 522 "ctrl_status=%#x ictrl=%#x istatus=%#x\n", 523 RD_REG_WORD(®->isp.ctrl_status), 524 RD_REG_WORD(®->isp.ictrl), 525 RD_REG_WORD(®->isp.istatus)); 526 } 527 } else { 528 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); 529 } 530 531 return rval; 532 } 533 534 int 535 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, 536 uint32_t risc_code_size) 537 { 538 int rval; 539 struct qla_hw_data *ha = vha->hw; 540 mbx_cmd_t mc; 541 mbx_cmd_t *mcp = &mc; 542 543 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, 544 "Entered %s.\n", __func__); 545 546 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 547 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 548 mcp->mb[8] = MSW(risc_addr); 549 mcp->out_mb = MBX_8|MBX_0; 550 } else { 551 mcp->mb[0] = MBC_LOAD_RISC_RAM; 552 mcp->out_mb = MBX_0; 553 } 554 mcp->mb[1] = LSW(risc_addr); 555 mcp->mb[2] = MSW(req_dma); 556 mcp->mb[3] = LSW(req_dma); 557 mcp->mb[6] = MSW(MSD(req_dma)); 558 mcp->mb[7] = LSW(MSD(req_dma)); 559 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 560 if (IS_FWI2_CAPABLE(ha)) { 561 mcp->mb[4] = MSW(risc_code_size); 562 mcp->mb[5] = LSW(risc_code_size); 563 mcp->out_mb |= MBX_5|MBX_4; 564 } else { 565 mcp->mb[4] = LSW(risc_code_size); 566 mcp->out_mb |= MBX_4; 567 } 568 569 mcp->in_mb = MBX_0; 570 mcp->tov = MBX_TOV_SECONDS; 571 mcp->flags = 0; 572 rval = qla2x00_mailbox_command(vha, mcp); 573 574 if (rval != QLA_SUCCESS) { 575 ql_dbg(ql_dbg_mbx, vha, 0x1023, 576 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 577 } else { 578 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, 579 "Done %s.\n", __func__); 580 } 581 582 return rval; 583 } 584 585 #define EXTENDED_BB_CREDITS BIT_0 586 #define NVME_ENABLE_FLAG BIT_3 587 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha) 588 { 589 uint16_t mb4 = BIT_0; 590 591 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 592 mb4 |= ha->long_range_distance << LR_DIST_FW_POS; 593 594 return mb4; 595 } 596 597 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha) 598 { 599 uint16_t mb4 = BIT_0; 600 601 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 602 struct nvram_81xx *nv = ha->nvram; 603 604 mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features); 605 } 606 607 return mb4; 608 } 609 610 /* 611 * qla2x00_execute_fw 612 * Start adapter firmware. 613 * 614 * Input: 615 * ha = adapter block pointer. 616 * TARGET_QUEUE_LOCK must be released. 617 * ADAPTER_STATE_LOCK must be released. 618 * 619 * Returns: 620 * qla2x00 local function return status code. 621 * 622 * Context: 623 * Kernel context. 624 */ 625 int 626 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) 627 { 628 int rval; 629 struct qla_hw_data *ha = vha->hw; 630 mbx_cmd_t mc; 631 mbx_cmd_t *mcp = &mc; 632 633 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, 634 "Entered %s.\n", __func__); 635 636 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 637 mcp->out_mb = MBX_0; 638 mcp->in_mb = MBX_0; 639 if (IS_FWI2_CAPABLE(ha)) { 640 mcp->mb[1] = MSW(risc_addr); 641 mcp->mb[2] = LSW(risc_addr); 642 mcp->mb[3] = 0; 643 mcp->mb[4] = 0; 644 ha->flags.using_lr_setting = 0; 645 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || 646 IS_QLA27XX(ha)) { 647 if (ql2xautodetectsfp) { 648 if (ha->flags.detected_lr_sfp) { 649 mcp->mb[4] |= 650 qla25xx_set_sfp_lr_dist(ha); 651 ha->flags.using_lr_setting = 1; 652 } 653 } else { 654 struct nvram_81xx *nv = ha->nvram; 655 /* set LR distance if specified in nvram */ 656 if (nv->enhanced_features & 657 NEF_LR_DIST_ENABLE) { 658 mcp->mb[4] |= 659 qla25xx_set_nvr_lr_dist(ha); 660 ha->flags.using_lr_setting = 1; 661 } 662 } 663 } 664 665 if (ql2xnvmeenable && IS_QLA27XX(ha)) 666 mcp->mb[4] |= NVME_ENABLE_FLAG; 667 668 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 669 struct nvram_81xx *nv = ha->nvram; 670 /* set minimum speed if specified in nvram */ 671 if (nv->min_link_speed >= 2 && 672 nv->min_link_speed <= 5) { 673 mcp->mb[4] |= BIT_4; 674 mcp->mb[11] = nv->min_link_speed; 675 mcp->out_mb |= MBX_11; 676 mcp->in_mb |= BIT_5; 677 vha->min_link_speed_feat = nv->min_link_speed; 678 } 679 } 680 681 if (ha->flags.exlogins_enabled) 682 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; 683 684 if (ha->flags.exchoffld_enabled) 685 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; 686 687 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1; 688 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1; 689 } else { 690 mcp->mb[1] = LSW(risc_addr); 691 mcp->out_mb |= MBX_1; 692 if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 693 mcp->mb[2] = 0; 694 mcp->out_mb |= MBX_2; 695 } 696 } 697 698 mcp->tov = MBX_TOV_SECONDS; 699 mcp->flags = 0; 700 rval = qla2x00_mailbox_command(vha, mcp); 701 702 if (rval != QLA_SUCCESS) { 703 ql_dbg(ql_dbg_mbx, vha, 0x1026, 704 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 705 } else { 706 if (IS_FWI2_CAPABLE(ha)) { 707 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; 708 ql_dbg(ql_dbg_mbx, vha, 0x119a, 709 "fw_ability_mask=%x.\n", ha->fw_ability_mask); 710 ql_dbg(ql_dbg_mbx, vha, 0x1027, 711 "exchanges=%x.\n", mcp->mb[1]); 712 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 713 ha->max_speed_sup = mcp->mb[2] & BIT_0; 714 ql_dbg(ql_dbg_mbx, vha, 0x119b, 715 "Maximum speed supported=%s.\n", 716 ha->max_speed_sup ? "32Gps" : "16Gps"); 717 if (vha->min_link_speed_feat) { 718 ha->min_link_speed = mcp->mb[5]; 719 ql_dbg(ql_dbg_mbx, vha, 0x119c, 720 "Minimum speed set=%s.\n", 721 mcp->mb[5] == 5 ? "32Gps" : 722 mcp->mb[5] == 4 ? "16Gps" : 723 mcp->mb[5] == 3 ? "8Gps" : 724 mcp->mb[5] == 2 ? "4Gps" : 725 "unknown"); 726 } 727 } 728 } 729 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, 730 "Done.\n"); 731 } 732 733 return rval; 734 } 735 736 /* 737 * qla_get_exlogin_status 738 * Get extended login status 739 * uses the memory offload control/status Mailbox 740 * 741 * Input: 742 * ha: adapter state pointer. 743 * fwopt: firmware options 744 * 745 * Returns: 746 * qla2x00 local function status 747 * 748 * Context: 749 * Kernel context. 750 */ 751 #define FETCH_XLOGINS_STAT 0x8 752 int 753 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 754 uint16_t *ex_logins_cnt) 755 { 756 int rval; 757 mbx_cmd_t mc; 758 mbx_cmd_t *mcp = &mc; 759 760 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f, 761 "Entered %s\n", __func__); 762 763 memset(mcp->mb, 0 , sizeof(mcp->mb)); 764 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 765 mcp->mb[1] = FETCH_XLOGINS_STAT; 766 mcp->out_mb = MBX_1|MBX_0; 767 mcp->in_mb = MBX_10|MBX_4|MBX_0; 768 mcp->tov = MBX_TOV_SECONDS; 769 mcp->flags = 0; 770 771 rval = qla2x00_mailbox_command(vha, mcp); 772 if (rval != QLA_SUCCESS) { 773 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval); 774 } else { 775 *buf_sz = mcp->mb[4]; 776 *ex_logins_cnt = mcp->mb[10]; 777 778 ql_log(ql_log_info, vha, 0x1190, 779 "buffer size 0x%x, exchange login count=%d\n", 780 mcp->mb[4], mcp->mb[10]); 781 782 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116, 783 "Done %s.\n", __func__); 784 } 785 786 return rval; 787 } 788 789 /* 790 * qla_set_exlogin_mem_cfg 791 * set extended login memory configuration 792 * Mbx needs to be issues before init_cb is set 793 * 794 * Input: 795 * ha: adapter state pointer. 796 * buffer: buffer pointer 797 * phys_addr: physical address of buffer 798 * size: size of buffer 799 * TARGET_QUEUE_LOCK must be released 800 * ADAPTER_STATE_LOCK must be release 801 * 802 * Returns: 803 * qla2x00 local funxtion status code. 804 * 805 * Context: 806 * Kernel context. 807 */ 808 #define CONFIG_XLOGINS_MEM 0x3 809 int 810 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) 811 { 812 int rval; 813 mbx_cmd_t mc; 814 mbx_cmd_t *mcp = &mc; 815 struct qla_hw_data *ha = vha->hw; 816 817 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, 818 "Entered %s.\n", __func__); 819 820 memset(mcp->mb, 0 , sizeof(mcp->mb)); 821 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 822 mcp->mb[1] = CONFIG_XLOGINS_MEM; 823 mcp->mb[2] = MSW(phys_addr); 824 mcp->mb[3] = LSW(phys_addr); 825 mcp->mb[6] = MSW(MSD(phys_addr)); 826 mcp->mb[7] = LSW(MSD(phys_addr)); 827 mcp->mb[8] = MSW(ha->exlogin_size); 828 mcp->mb[9] = LSW(ha->exlogin_size); 829 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 830 mcp->in_mb = MBX_11|MBX_0; 831 mcp->tov = MBX_TOV_SECONDS; 832 mcp->flags = 0; 833 rval = qla2x00_mailbox_command(vha, mcp); 834 if (rval != QLA_SUCCESS) { 835 /*EMPTY*/ 836 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval); 837 } else { 838 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 839 "Done %s.\n", __func__); 840 } 841 842 return rval; 843 } 844 845 /* 846 * qla_get_exchoffld_status 847 * Get exchange offload status 848 * uses the memory offload control/status Mailbox 849 * 850 * Input: 851 * ha: adapter state pointer. 852 * fwopt: firmware options 853 * 854 * Returns: 855 * qla2x00 local function status 856 * 857 * Context: 858 * Kernel context. 859 */ 860 #define FETCH_XCHOFFLD_STAT 0x2 861 int 862 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 863 uint16_t *ex_logins_cnt) 864 { 865 int rval; 866 mbx_cmd_t mc; 867 mbx_cmd_t *mcp = &mc; 868 869 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019, 870 "Entered %s\n", __func__); 871 872 memset(mcp->mb, 0 , sizeof(mcp->mb)); 873 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 874 mcp->mb[1] = FETCH_XCHOFFLD_STAT; 875 mcp->out_mb = MBX_1|MBX_0; 876 mcp->in_mb = MBX_10|MBX_4|MBX_0; 877 mcp->tov = MBX_TOV_SECONDS; 878 mcp->flags = 0; 879 880 rval = qla2x00_mailbox_command(vha, mcp); 881 if (rval != QLA_SUCCESS) { 882 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval); 883 } else { 884 *buf_sz = mcp->mb[4]; 885 *ex_logins_cnt = mcp->mb[10]; 886 887 ql_log(ql_log_info, vha, 0x118e, 888 "buffer size 0x%x, exchange offload count=%d\n", 889 mcp->mb[4], mcp->mb[10]); 890 891 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156, 892 "Done %s.\n", __func__); 893 } 894 895 return rval; 896 } 897 898 /* 899 * qla_set_exchoffld_mem_cfg 900 * Set exchange offload memory configuration 901 * Mbx needs to be issues before init_cb is set 902 * 903 * Input: 904 * ha: adapter state pointer. 905 * buffer: buffer pointer 906 * phys_addr: physical address of buffer 907 * size: size of buffer 908 * TARGET_QUEUE_LOCK must be released 909 * ADAPTER_STATE_LOCK must be release 910 * 911 * Returns: 912 * qla2x00 local funxtion status code. 913 * 914 * Context: 915 * Kernel context. 916 */ 917 #define CONFIG_XCHOFFLD_MEM 0x3 918 int 919 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha) 920 { 921 int rval; 922 mbx_cmd_t mc; 923 mbx_cmd_t *mcp = &mc; 924 struct qla_hw_data *ha = vha->hw; 925 926 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157, 927 "Entered %s.\n", __func__); 928 929 memset(mcp->mb, 0 , sizeof(mcp->mb)); 930 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 931 mcp->mb[1] = CONFIG_XCHOFFLD_MEM; 932 mcp->mb[2] = MSW(ha->exchoffld_buf_dma); 933 mcp->mb[3] = LSW(ha->exchoffld_buf_dma); 934 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma)); 935 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma)); 936 mcp->mb[8] = MSW(ha->exchoffld_size); 937 mcp->mb[9] = LSW(ha->exchoffld_size); 938 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 939 mcp->in_mb = MBX_11|MBX_0; 940 mcp->tov = MBX_TOV_SECONDS; 941 mcp->flags = 0; 942 rval = qla2x00_mailbox_command(vha, mcp); 943 if (rval != QLA_SUCCESS) { 944 /*EMPTY*/ 945 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval); 946 } else { 947 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, 948 "Done %s.\n", __func__); 949 } 950 951 return rval; 952 } 953 954 /* 955 * qla2x00_get_fw_version 956 * Get firmware version. 957 * 958 * Input: 959 * ha: adapter state pointer. 960 * major: pointer for major number. 961 * minor: pointer for minor number. 962 * subminor: pointer for subminor number. 963 * 964 * Returns: 965 * qla2x00 local function return status code. 966 * 967 * Context: 968 * Kernel context. 969 */ 970 int 971 qla2x00_get_fw_version(scsi_qla_host_t *vha) 972 { 973 int rval; 974 mbx_cmd_t mc; 975 mbx_cmd_t *mcp = &mc; 976 struct qla_hw_data *ha = vha->hw; 977 978 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, 979 "Entered %s.\n", __func__); 980 981 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 982 mcp->out_mb = MBX_0; 983 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 984 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) 985 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 986 if (IS_FWI2_CAPABLE(ha)) 987 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 988 if (IS_QLA27XX(ha)) 989 mcp->in_mb |= 990 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| 991 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8; 992 993 mcp->flags = 0; 994 mcp->tov = MBX_TOV_SECONDS; 995 rval = qla2x00_mailbox_command(vha, mcp); 996 if (rval != QLA_SUCCESS) 997 goto failed; 998 999 /* Return mailbox data. */ 1000 ha->fw_major_version = mcp->mb[1]; 1001 ha->fw_minor_version = mcp->mb[2]; 1002 ha->fw_subminor_version = mcp->mb[3]; 1003 ha->fw_attributes = mcp->mb[6]; 1004 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) 1005 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ 1006 else 1007 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; 1008 1009 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1010 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1011 ha->mpi_version[1] = mcp->mb[11] >> 8; 1012 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1013 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; 1014 ha->phy_version[0] = mcp->mb[8] & 0xff; 1015 ha->phy_version[1] = mcp->mb[9] >> 8; 1016 ha->phy_version[2] = mcp->mb[9] & 0xff; 1017 } 1018 1019 if (IS_FWI2_CAPABLE(ha)) { 1020 ha->fw_attributes_h = mcp->mb[15]; 1021 ha->fw_attributes_ext[0] = mcp->mb[16]; 1022 ha->fw_attributes_ext[1] = mcp->mb[17]; 1023 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, 1024 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 1025 __func__, mcp->mb[15], mcp->mb[6]); 1026 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, 1027 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 1028 __func__, mcp->mb[17], mcp->mb[16]); 1029 1030 if (ha->fw_attributes_h & 0x4) 1031 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d, 1032 "%s: Firmware supports Extended Login 0x%x\n", 1033 __func__, ha->fw_attributes_h); 1034 1035 if (ha->fw_attributes_h & 0x8) 1036 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, 1037 "%s: Firmware supports Exchange Offload 0x%x\n", 1038 __func__, ha->fw_attributes_h); 1039 1040 /* 1041 * FW supports nvme and driver load parameter requested nvme. 1042 * BIT 26 of fw_attributes indicates NVMe support. 1043 */ 1044 if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) { 1045 vha->flags.nvme_enabled = 1; 1046 ql_log(ql_log_info, vha, 0xd302, 1047 "%s: FC-NVMe is Enabled (0x%x)\n", 1048 __func__, ha->fw_attributes_h); 1049 } 1050 } 1051 1052 if (IS_QLA27XX(ha)) { 1053 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1054 ha->mpi_version[1] = mcp->mb[11] >> 8; 1055 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1056 ha->pep_version[0] = mcp->mb[13] & 0xff; 1057 ha->pep_version[1] = mcp->mb[14] >> 8; 1058 ha->pep_version[2] = mcp->mb[14] & 0xff; 1059 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; 1060 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; 1061 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; 1062 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; 1063 } 1064 1065 failed: 1066 if (rval != QLA_SUCCESS) { 1067 /*EMPTY*/ 1068 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); 1069 } else { 1070 /*EMPTY*/ 1071 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, 1072 "Done %s.\n", __func__); 1073 } 1074 return rval; 1075 } 1076 1077 /* 1078 * qla2x00_get_fw_options 1079 * Set firmware options. 1080 * 1081 * Input: 1082 * ha = adapter block pointer. 1083 * fwopt = pointer for firmware options. 1084 * 1085 * Returns: 1086 * qla2x00 local function return status code. 1087 * 1088 * Context: 1089 * Kernel context. 1090 */ 1091 int 1092 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1093 { 1094 int rval; 1095 mbx_cmd_t mc; 1096 mbx_cmd_t *mcp = &mc; 1097 1098 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, 1099 "Entered %s.\n", __func__); 1100 1101 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 1102 mcp->out_mb = MBX_0; 1103 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1104 mcp->tov = MBX_TOV_SECONDS; 1105 mcp->flags = 0; 1106 rval = qla2x00_mailbox_command(vha, mcp); 1107 1108 if (rval != QLA_SUCCESS) { 1109 /*EMPTY*/ 1110 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); 1111 } else { 1112 fwopts[0] = mcp->mb[0]; 1113 fwopts[1] = mcp->mb[1]; 1114 fwopts[2] = mcp->mb[2]; 1115 fwopts[3] = mcp->mb[3]; 1116 1117 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, 1118 "Done %s.\n", __func__); 1119 } 1120 1121 return rval; 1122 } 1123 1124 1125 /* 1126 * qla2x00_set_fw_options 1127 * Set firmware options. 1128 * 1129 * Input: 1130 * ha = adapter block pointer. 1131 * fwopt = pointer for firmware options. 1132 * 1133 * Returns: 1134 * qla2x00 local function return status code. 1135 * 1136 * Context: 1137 * Kernel context. 1138 */ 1139 int 1140 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1141 { 1142 int rval; 1143 mbx_cmd_t mc; 1144 mbx_cmd_t *mcp = &mc; 1145 1146 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, 1147 "Entered %s.\n", __func__); 1148 1149 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 1150 mcp->mb[1] = fwopts[1]; 1151 mcp->mb[2] = fwopts[2]; 1152 mcp->mb[3] = fwopts[3]; 1153 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1154 mcp->in_mb = MBX_0; 1155 if (IS_FWI2_CAPABLE(vha->hw)) { 1156 mcp->in_mb |= MBX_1; 1157 mcp->mb[10] = fwopts[10]; 1158 mcp->out_mb |= MBX_10; 1159 } else { 1160 mcp->mb[10] = fwopts[10]; 1161 mcp->mb[11] = fwopts[11]; 1162 mcp->mb[12] = 0; /* Undocumented, but used */ 1163 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 1164 } 1165 mcp->tov = MBX_TOV_SECONDS; 1166 mcp->flags = 0; 1167 rval = qla2x00_mailbox_command(vha, mcp); 1168 1169 fwopts[0] = mcp->mb[0]; 1170 1171 if (rval != QLA_SUCCESS) { 1172 /*EMPTY*/ 1173 ql_dbg(ql_dbg_mbx, vha, 0x1030, 1174 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); 1175 } else { 1176 /*EMPTY*/ 1177 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, 1178 "Done %s.\n", __func__); 1179 } 1180 1181 return rval; 1182 } 1183 1184 /* 1185 * qla2x00_mbx_reg_test 1186 * Mailbox register wrap test. 1187 * 1188 * Input: 1189 * ha = adapter block pointer. 1190 * TARGET_QUEUE_LOCK must be released. 1191 * ADAPTER_STATE_LOCK must be released. 1192 * 1193 * Returns: 1194 * qla2x00 local function return status code. 1195 * 1196 * Context: 1197 * Kernel context. 1198 */ 1199 int 1200 qla2x00_mbx_reg_test(scsi_qla_host_t *vha) 1201 { 1202 int rval; 1203 mbx_cmd_t mc; 1204 mbx_cmd_t *mcp = &mc; 1205 1206 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, 1207 "Entered %s.\n", __func__); 1208 1209 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 1210 mcp->mb[1] = 0xAAAA; 1211 mcp->mb[2] = 0x5555; 1212 mcp->mb[3] = 0xAA55; 1213 mcp->mb[4] = 0x55AA; 1214 mcp->mb[5] = 0xA5A5; 1215 mcp->mb[6] = 0x5A5A; 1216 mcp->mb[7] = 0x2525; 1217 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1218 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1219 mcp->tov = MBX_TOV_SECONDS; 1220 mcp->flags = 0; 1221 rval = qla2x00_mailbox_command(vha, mcp); 1222 1223 if (rval == QLA_SUCCESS) { 1224 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || 1225 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) 1226 rval = QLA_FUNCTION_FAILED; 1227 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || 1228 mcp->mb[7] != 0x2525) 1229 rval = QLA_FUNCTION_FAILED; 1230 } 1231 1232 if (rval != QLA_SUCCESS) { 1233 /*EMPTY*/ 1234 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); 1235 } else { 1236 /*EMPTY*/ 1237 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, 1238 "Done %s.\n", __func__); 1239 } 1240 1241 return rval; 1242 } 1243 1244 /* 1245 * qla2x00_verify_checksum 1246 * Verify firmware checksum. 1247 * 1248 * Input: 1249 * ha = adapter block pointer. 1250 * TARGET_QUEUE_LOCK must be released. 1251 * ADAPTER_STATE_LOCK must be released. 1252 * 1253 * Returns: 1254 * qla2x00 local function return status code. 1255 * 1256 * Context: 1257 * Kernel context. 1258 */ 1259 int 1260 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) 1261 { 1262 int rval; 1263 mbx_cmd_t mc; 1264 mbx_cmd_t *mcp = &mc; 1265 1266 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, 1267 "Entered %s.\n", __func__); 1268 1269 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 1270 mcp->out_mb = MBX_0; 1271 mcp->in_mb = MBX_0; 1272 if (IS_FWI2_CAPABLE(vha->hw)) { 1273 mcp->mb[1] = MSW(risc_addr); 1274 mcp->mb[2] = LSW(risc_addr); 1275 mcp->out_mb |= MBX_2|MBX_1; 1276 mcp->in_mb |= MBX_2|MBX_1; 1277 } else { 1278 mcp->mb[1] = LSW(risc_addr); 1279 mcp->out_mb |= MBX_1; 1280 mcp->in_mb |= MBX_1; 1281 } 1282 1283 mcp->tov = MBX_TOV_SECONDS; 1284 mcp->flags = 0; 1285 rval = qla2x00_mailbox_command(vha, mcp); 1286 1287 if (rval != QLA_SUCCESS) { 1288 ql_dbg(ql_dbg_mbx, vha, 0x1036, 1289 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? 1290 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); 1291 } else { 1292 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, 1293 "Done %s.\n", __func__); 1294 } 1295 1296 return rval; 1297 } 1298 1299 /* 1300 * qla2x00_issue_iocb 1301 * Issue IOCB using mailbox command 1302 * 1303 * Input: 1304 * ha = adapter state pointer. 1305 * buffer = buffer pointer. 1306 * phys_addr = physical address of buffer. 1307 * size = size of buffer. 1308 * TARGET_QUEUE_LOCK must be released. 1309 * ADAPTER_STATE_LOCK must be released. 1310 * 1311 * Returns: 1312 * qla2x00 local function return status code. 1313 * 1314 * Context: 1315 * Kernel context. 1316 */ 1317 int 1318 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, 1319 dma_addr_t phys_addr, size_t size, uint32_t tov) 1320 { 1321 int rval; 1322 mbx_cmd_t mc; 1323 mbx_cmd_t *mcp = &mc; 1324 1325 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, 1326 "Entered %s.\n", __func__); 1327 1328 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 1329 mcp->mb[1] = 0; 1330 mcp->mb[2] = MSW(phys_addr); 1331 mcp->mb[3] = LSW(phys_addr); 1332 mcp->mb[6] = MSW(MSD(phys_addr)); 1333 mcp->mb[7] = LSW(MSD(phys_addr)); 1334 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1335 mcp->in_mb = MBX_2|MBX_0; 1336 mcp->tov = tov; 1337 mcp->flags = 0; 1338 rval = qla2x00_mailbox_command(vha, mcp); 1339 1340 if (rval != QLA_SUCCESS) { 1341 /*EMPTY*/ 1342 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); 1343 } else { 1344 sts_entry_t *sts_entry = (sts_entry_t *) buffer; 1345 1346 /* Mask reserved bits. */ 1347 sts_entry->entry_status &= 1348 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 1349 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, 1350 "Done %s.\n", __func__); 1351 } 1352 1353 return rval; 1354 } 1355 1356 int 1357 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, 1358 size_t size) 1359 { 1360 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, 1361 MBX_TOV_SECONDS); 1362 } 1363 1364 /* 1365 * qla2x00_abort_command 1366 * Abort command aborts a specified IOCB. 1367 * 1368 * Input: 1369 * ha = adapter block pointer. 1370 * sp = SB structure pointer. 1371 * 1372 * Returns: 1373 * qla2x00 local function return status code. 1374 * 1375 * Context: 1376 * Kernel context. 1377 */ 1378 int 1379 qla2x00_abort_command(srb_t *sp) 1380 { 1381 unsigned long flags = 0; 1382 int rval; 1383 uint32_t handle = 0; 1384 mbx_cmd_t mc; 1385 mbx_cmd_t *mcp = &mc; 1386 fc_port_t *fcport = sp->fcport; 1387 scsi_qla_host_t *vha = fcport->vha; 1388 struct qla_hw_data *ha = vha->hw; 1389 struct req_que *req; 1390 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1391 1392 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, 1393 "Entered %s.\n", __func__); 1394 1395 if (vha->flags.qpairs_available && sp->qpair) 1396 req = sp->qpair->req; 1397 else 1398 req = vha->req; 1399 1400 spin_lock_irqsave(&ha->hardware_lock, flags); 1401 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1402 if (req->outstanding_cmds[handle] == sp) 1403 break; 1404 } 1405 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1406 1407 if (handle == req->num_outstanding_cmds) { 1408 /* command not found */ 1409 return QLA_FUNCTION_FAILED; 1410 } 1411 1412 mcp->mb[0] = MBC_ABORT_COMMAND; 1413 if (HAS_EXTENDED_IDS(ha)) 1414 mcp->mb[1] = fcport->loop_id; 1415 else 1416 mcp->mb[1] = fcport->loop_id << 8; 1417 mcp->mb[2] = (uint16_t)handle; 1418 mcp->mb[3] = (uint16_t)(handle >> 16); 1419 mcp->mb[6] = (uint16_t)cmd->device->lun; 1420 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1421 mcp->in_mb = MBX_0; 1422 mcp->tov = MBX_TOV_SECONDS; 1423 mcp->flags = 0; 1424 rval = qla2x00_mailbox_command(vha, mcp); 1425 1426 if (rval != QLA_SUCCESS) { 1427 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); 1428 } else { 1429 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, 1430 "Done %s.\n", __func__); 1431 } 1432 1433 return rval; 1434 } 1435 1436 int 1437 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) 1438 { 1439 int rval, rval2; 1440 mbx_cmd_t mc; 1441 mbx_cmd_t *mcp = &mc; 1442 scsi_qla_host_t *vha; 1443 struct req_que *req; 1444 struct rsp_que *rsp; 1445 1446 l = l; 1447 vha = fcport->vha; 1448 1449 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, 1450 "Entered %s.\n", __func__); 1451 1452 req = vha->hw->req_q_map[0]; 1453 rsp = req->rsp; 1454 mcp->mb[0] = MBC_ABORT_TARGET; 1455 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 1456 if (HAS_EXTENDED_IDS(vha->hw)) { 1457 mcp->mb[1] = fcport->loop_id; 1458 mcp->mb[10] = 0; 1459 mcp->out_mb |= MBX_10; 1460 } else { 1461 mcp->mb[1] = fcport->loop_id << 8; 1462 } 1463 mcp->mb[2] = vha->hw->loop_reset_delay; 1464 mcp->mb[9] = vha->vp_idx; 1465 1466 mcp->in_mb = MBX_0; 1467 mcp->tov = MBX_TOV_SECONDS; 1468 mcp->flags = 0; 1469 rval = qla2x00_mailbox_command(vha, mcp); 1470 if (rval != QLA_SUCCESS) { 1471 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, 1472 "Failed=%x.\n", rval); 1473 } 1474 1475 /* Issue marker IOCB. */ 1476 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0, 1477 MK_SYNC_ID); 1478 if (rval2 != QLA_SUCCESS) { 1479 ql_dbg(ql_dbg_mbx, vha, 0x1040, 1480 "Failed to issue marker IOCB (%x).\n", rval2); 1481 } else { 1482 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, 1483 "Done %s.\n", __func__); 1484 } 1485 1486 return rval; 1487 } 1488 1489 int 1490 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 1491 { 1492 int rval, rval2; 1493 mbx_cmd_t mc; 1494 mbx_cmd_t *mcp = &mc; 1495 scsi_qla_host_t *vha; 1496 struct req_que *req; 1497 struct rsp_que *rsp; 1498 1499 vha = fcport->vha; 1500 1501 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, 1502 "Entered %s.\n", __func__); 1503 1504 req = vha->hw->req_q_map[0]; 1505 rsp = req->rsp; 1506 mcp->mb[0] = MBC_LUN_RESET; 1507 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 1508 if (HAS_EXTENDED_IDS(vha->hw)) 1509 mcp->mb[1] = fcport->loop_id; 1510 else 1511 mcp->mb[1] = fcport->loop_id << 8; 1512 mcp->mb[2] = (u32)l; 1513 mcp->mb[3] = 0; 1514 mcp->mb[9] = vha->vp_idx; 1515 1516 mcp->in_mb = MBX_0; 1517 mcp->tov = MBX_TOV_SECONDS; 1518 mcp->flags = 0; 1519 rval = qla2x00_mailbox_command(vha, mcp); 1520 if (rval != QLA_SUCCESS) { 1521 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); 1522 } 1523 1524 /* Issue marker IOCB. */ 1525 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, 1526 MK_SYNC_ID_LUN); 1527 if (rval2 != QLA_SUCCESS) { 1528 ql_dbg(ql_dbg_mbx, vha, 0x1044, 1529 "Failed to issue marker IOCB (%x).\n", rval2); 1530 } else { 1531 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, 1532 "Done %s.\n", __func__); 1533 } 1534 1535 return rval; 1536 } 1537 1538 /* 1539 * qla2x00_get_adapter_id 1540 * Get adapter ID and topology. 1541 * 1542 * Input: 1543 * ha = adapter block pointer. 1544 * id = pointer for loop ID. 1545 * al_pa = pointer for AL_PA. 1546 * area = pointer for area. 1547 * domain = pointer for domain. 1548 * top = pointer for topology. 1549 * TARGET_QUEUE_LOCK must be released. 1550 * ADAPTER_STATE_LOCK must be released. 1551 * 1552 * Returns: 1553 * qla2x00 local function return status code. 1554 * 1555 * Context: 1556 * Kernel context. 1557 */ 1558 int 1559 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, 1560 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) 1561 { 1562 int rval; 1563 mbx_cmd_t mc; 1564 mbx_cmd_t *mcp = &mc; 1565 1566 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, 1567 "Entered %s.\n", __func__); 1568 1569 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1570 mcp->mb[9] = vha->vp_idx; 1571 mcp->out_mb = MBX_9|MBX_0; 1572 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1573 if (IS_CNA_CAPABLE(vha->hw)) 1574 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; 1575 if (IS_FWI2_CAPABLE(vha->hw)) 1576 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; 1577 if (IS_QLA27XX(vha->hw)) 1578 mcp->in_mb |= MBX_15; 1579 mcp->tov = MBX_TOV_SECONDS; 1580 mcp->flags = 0; 1581 rval = qla2x00_mailbox_command(vha, mcp); 1582 if (mcp->mb[0] == MBS_COMMAND_ERROR) 1583 rval = QLA_COMMAND_ERROR; 1584 else if (mcp->mb[0] == MBS_INVALID_COMMAND) 1585 rval = QLA_INVALID_COMMAND; 1586 1587 /* Return data. */ 1588 *id = mcp->mb[1]; 1589 *al_pa = LSB(mcp->mb[2]); 1590 *area = MSB(mcp->mb[2]); 1591 *domain = LSB(mcp->mb[3]); 1592 *top = mcp->mb[6]; 1593 *sw_cap = mcp->mb[7]; 1594 1595 if (rval != QLA_SUCCESS) { 1596 /*EMPTY*/ 1597 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); 1598 } else { 1599 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, 1600 "Done %s.\n", __func__); 1601 1602 if (IS_CNA_CAPABLE(vha->hw)) { 1603 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1604 vha->fcoe_fcf_idx = mcp->mb[10]; 1605 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; 1606 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; 1607 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; 1608 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; 1609 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; 1610 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; 1611 } 1612 /* If FA-WWN supported */ 1613 if (IS_FAWWN_CAPABLE(vha->hw)) { 1614 if (mcp->mb[7] & BIT_14) { 1615 vha->port_name[0] = MSB(mcp->mb[16]); 1616 vha->port_name[1] = LSB(mcp->mb[16]); 1617 vha->port_name[2] = MSB(mcp->mb[17]); 1618 vha->port_name[3] = LSB(mcp->mb[17]); 1619 vha->port_name[4] = MSB(mcp->mb[18]); 1620 vha->port_name[5] = LSB(mcp->mb[18]); 1621 vha->port_name[6] = MSB(mcp->mb[19]); 1622 vha->port_name[7] = LSB(mcp->mb[19]); 1623 fc_host_port_name(vha->host) = 1624 wwn_to_u64(vha->port_name); 1625 ql_dbg(ql_dbg_mbx, vha, 0x10ca, 1626 "FA-WWN acquired %016llx\n", 1627 wwn_to_u64(vha->port_name)); 1628 } 1629 } 1630 1631 if (IS_QLA27XX(vha->hw)) 1632 vha->bbcr = mcp->mb[15]; 1633 } 1634 1635 return rval; 1636 } 1637 1638 /* 1639 * qla2x00_get_retry_cnt 1640 * Get current firmware login retry count and delay. 1641 * 1642 * Input: 1643 * ha = adapter block pointer. 1644 * retry_cnt = pointer to login retry count. 1645 * tov = pointer to login timeout value. 1646 * 1647 * Returns: 1648 * qla2x00 local function return status code. 1649 * 1650 * Context: 1651 * Kernel context. 1652 */ 1653 int 1654 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, 1655 uint16_t *r_a_tov) 1656 { 1657 int rval; 1658 uint16_t ratov; 1659 mbx_cmd_t mc; 1660 mbx_cmd_t *mcp = &mc; 1661 1662 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, 1663 "Entered %s.\n", __func__); 1664 1665 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1666 mcp->out_mb = MBX_0; 1667 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1668 mcp->tov = MBX_TOV_SECONDS; 1669 mcp->flags = 0; 1670 rval = qla2x00_mailbox_command(vha, mcp); 1671 1672 if (rval != QLA_SUCCESS) { 1673 /*EMPTY*/ 1674 ql_dbg(ql_dbg_mbx, vha, 0x104a, 1675 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 1676 } else { 1677 /* Convert returned data and check our values. */ 1678 *r_a_tov = mcp->mb[3] / 2; 1679 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ 1680 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { 1681 /* Update to the larger values */ 1682 *retry_cnt = (uint8_t)mcp->mb[1]; 1683 *tov = ratov; 1684 } 1685 1686 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, 1687 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); 1688 } 1689 1690 return rval; 1691 } 1692 1693 /* 1694 * qla2x00_init_firmware 1695 * Initialize adapter firmware. 1696 * 1697 * Input: 1698 * ha = adapter block pointer. 1699 * dptr = Initialization control block pointer. 1700 * size = size of initialization control block. 1701 * TARGET_QUEUE_LOCK must be released. 1702 * ADAPTER_STATE_LOCK must be released. 1703 * 1704 * Returns: 1705 * qla2x00 local function return status code. 1706 * 1707 * Context: 1708 * Kernel context. 1709 */ 1710 int 1711 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 1712 { 1713 int rval; 1714 mbx_cmd_t mc; 1715 mbx_cmd_t *mcp = &mc; 1716 struct qla_hw_data *ha = vha->hw; 1717 1718 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, 1719 "Entered %s.\n", __func__); 1720 1721 if (IS_P3P_TYPE(ha) && ql2xdbwr) 1722 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, 1723 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); 1724 1725 if (ha->flags.npiv_supported) 1726 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1727 else 1728 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 1729 1730 mcp->mb[1] = 0; 1731 mcp->mb[2] = MSW(ha->init_cb_dma); 1732 mcp->mb[3] = LSW(ha->init_cb_dma); 1733 mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); 1734 mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); 1735 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1736 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1737 mcp->mb[1] = BIT_0; 1738 mcp->mb[10] = MSW(ha->ex_init_cb_dma); 1739 mcp->mb[11] = LSW(ha->ex_init_cb_dma); 1740 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); 1741 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); 1742 mcp->mb[14] = sizeof(*ha->ex_init_cb); 1743 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; 1744 } 1745 /* 1 and 2 should normally be captured. */ 1746 mcp->in_mb = MBX_2|MBX_1|MBX_0; 1747 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 1748 /* mb3 is additional info about the installed SFP. */ 1749 mcp->in_mb |= MBX_3; 1750 mcp->buf_size = size; 1751 mcp->flags = MBX_DMA_OUT; 1752 mcp->tov = MBX_TOV_SECONDS; 1753 rval = qla2x00_mailbox_command(vha, mcp); 1754 1755 if (rval != QLA_SUCCESS) { 1756 /*EMPTY*/ 1757 ql_dbg(ql_dbg_mbx, vha, 0x104d, 1758 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n", 1759 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); 1760 } else { 1761 if (IS_QLA27XX(ha)) { 1762 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 1763 ql_dbg(ql_dbg_mbx, vha, 0x119d, 1764 "Invalid SFP/Validation Failed\n"); 1765 } 1766 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, 1767 "Done %s.\n", __func__); 1768 } 1769 1770 return rval; 1771 } 1772 1773 1774 /* 1775 * qla2x00_get_port_database 1776 * Issue normal/enhanced get port database mailbox command 1777 * and copy device name as necessary. 1778 * 1779 * Input: 1780 * ha = adapter state pointer. 1781 * dev = structure pointer. 1782 * opt = enhanced cmd option byte. 1783 * 1784 * Returns: 1785 * qla2x00 local function return status code. 1786 * 1787 * Context: 1788 * Kernel context. 1789 */ 1790 int 1791 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) 1792 { 1793 int rval; 1794 mbx_cmd_t mc; 1795 mbx_cmd_t *mcp = &mc; 1796 port_database_t *pd; 1797 struct port_database_24xx *pd24; 1798 dma_addr_t pd_dma; 1799 struct qla_hw_data *ha = vha->hw; 1800 1801 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, 1802 "Entered %s.\n", __func__); 1803 1804 pd24 = NULL; 1805 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1806 if (pd == NULL) { 1807 ql_log(ql_log_warn, vha, 0x1050, 1808 "Failed to allocate port database structure.\n"); 1809 fcport->query = 0; 1810 return QLA_MEMORY_ALLOC_FAILED; 1811 } 1812 1813 mcp->mb[0] = MBC_GET_PORT_DATABASE; 1814 if (opt != 0 && !IS_FWI2_CAPABLE(ha)) 1815 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; 1816 mcp->mb[2] = MSW(pd_dma); 1817 mcp->mb[3] = LSW(pd_dma); 1818 mcp->mb[6] = MSW(MSD(pd_dma)); 1819 mcp->mb[7] = LSW(MSD(pd_dma)); 1820 mcp->mb[9] = vha->vp_idx; 1821 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1822 mcp->in_mb = MBX_0; 1823 if (IS_FWI2_CAPABLE(ha)) { 1824 mcp->mb[1] = fcport->loop_id; 1825 mcp->mb[10] = opt; 1826 mcp->out_mb |= MBX_10|MBX_1; 1827 mcp->in_mb |= MBX_1; 1828 } else if (HAS_EXTENDED_IDS(ha)) { 1829 mcp->mb[1] = fcport->loop_id; 1830 mcp->mb[10] = opt; 1831 mcp->out_mb |= MBX_10|MBX_1; 1832 } else { 1833 mcp->mb[1] = fcport->loop_id << 8 | opt; 1834 mcp->out_mb |= MBX_1; 1835 } 1836 mcp->buf_size = IS_FWI2_CAPABLE(ha) ? 1837 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; 1838 mcp->flags = MBX_DMA_IN; 1839 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1840 rval = qla2x00_mailbox_command(vha, mcp); 1841 if (rval != QLA_SUCCESS) 1842 goto gpd_error_out; 1843 1844 if (IS_FWI2_CAPABLE(ha)) { 1845 uint64_t zero = 0; 1846 u8 current_login_state, last_login_state; 1847 1848 pd24 = (struct port_database_24xx *) pd; 1849 1850 /* Check for logged in state. */ 1851 if (fcport->fc4f_nvme) { 1852 current_login_state = pd24->current_login_state >> 4; 1853 last_login_state = pd24->last_login_state >> 4; 1854 } else { 1855 current_login_state = pd24->current_login_state & 0xf; 1856 last_login_state = pd24->last_login_state & 0xf; 1857 } 1858 fcport->current_login_state = pd24->current_login_state; 1859 fcport->last_login_state = pd24->last_login_state; 1860 1861 /* Check for logged in state. */ 1862 if (current_login_state != PDS_PRLI_COMPLETE && 1863 last_login_state != PDS_PRLI_COMPLETE) { 1864 ql_dbg(ql_dbg_mbx, vha, 0x119a, 1865 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 1866 current_login_state, last_login_state, 1867 fcport->loop_id); 1868 rval = QLA_FUNCTION_FAILED; 1869 1870 if (!fcport->query) 1871 goto gpd_error_out; 1872 } 1873 1874 if (fcport->loop_id == FC_NO_LOOP_ID || 1875 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 1876 memcmp(fcport->port_name, pd24->port_name, 8))) { 1877 /* We lost the device mid way. */ 1878 rval = QLA_NOT_LOGGED_IN; 1879 goto gpd_error_out; 1880 } 1881 1882 /* Names are little-endian. */ 1883 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); 1884 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); 1885 1886 /* Get port_id of device. */ 1887 fcport->d_id.b.domain = pd24->port_id[0]; 1888 fcport->d_id.b.area = pd24->port_id[1]; 1889 fcport->d_id.b.al_pa = pd24->port_id[2]; 1890 fcport->d_id.b.rsvd_1 = 0; 1891 1892 /* If not target must be initiator or unknown type. */ 1893 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) 1894 fcport->port_type = FCT_INITIATOR; 1895 else 1896 fcport->port_type = FCT_TARGET; 1897 1898 /* Passback COS information. */ 1899 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? 1900 FC_COS_CLASS2 : FC_COS_CLASS3; 1901 1902 if (pd24->prli_svc_param_word_3[0] & BIT_7) 1903 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1904 } else { 1905 uint64_t zero = 0; 1906 1907 /* Check for logged in state. */ 1908 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 1909 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 1910 ql_dbg(ql_dbg_mbx, vha, 0x100a, 1911 "Unable to verify login-state (%x/%x) - " 1912 "portid=%02x%02x%02x.\n", pd->master_state, 1913 pd->slave_state, fcport->d_id.b.domain, 1914 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1915 rval = QLA_FUNCTION_FAILED; 1916 goto gpd_error_out; 1917 } 1918 1919 if (fcport->loop_id == FC_NO_LOOP_ID || 1920 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 1921 memcmp(fcport->port_name, pd->port_name, 8))) { 1922 /* We lost the device mid way. */ 1923 rval = QLA_NOT_LOGGED_IN; 1924 goto gpd_error_out; 1925 } 1926 1927 /* Names are little-endian. */ 1928 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 1929 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 1930 1931 /* Get port_id of device. */ 1932 fcport->d_id.b.domain = pd->port_id[0]; 1933 fcport->d_id.b.area = pd->port_id[3]; 1934 fcport->d_id.b.al_pa = pd->port_id[2]; 1935 fcport->d_id.b.rsvd_1 = 0; 1936 1937 /* If not target must be initiator or unknown type. */ 1938 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 1939 fcport->port_type = FCT_INITIATOR; 1940 else 1941 fcport->port_type = FCT_TARGET; 1942 1943 /* Passback COS information. */ 1944 fcport->supported_classes = (pd->options & BIT_4) ? 1945 FC_COS_CLASS2: FC_COS_CLASS3; 1946 } 1947 1948 gpd_error_out: 1949 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 1950 fcport->query = 0; 1951 1952 if (rval != QLA_SUCCESS) { 1953 ql_dbg(ql_dbg_mbx, vha, 0x1052, 1954 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, 1955 mcp->mb[0], mcp->mb[1]); 1956 } else { 1957 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, 1958 "Done %s.\n", __func__); 1959 } 1960 1961 return rval; 1962 } 1963 1964 /* 1965 * qla2x00_get_firmware_state 1966 * Get adapter firmware state. 1967 * 1968 * Input: 1969 * ha = adapter block pointer. 1970 * dptr = pointer for firmware state. 1971 * TARGET_QUEUE_LOCK must be released. 1972 * ADAPTER_STATE_LOCK must be released. 1973 * 1974 * Returns: 1975 * qla2x00 local function return status code. 1976 * 1977 * Context: 1978 * Kernel context. 1979 */ 1980 int 1981 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) 1982 { 1983 int rval; 1984 mbx_cmd_t mc; 1985 mbx_cmd_t *mcp = &mc; 1986 struct qla_hw_data *ha = vha->hw; 1987 1988 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, 1989 "Entered %s.\n", __func__); 1990 1991 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1992 mcp->out_mb = MBX_0; 1993 if (IS_FWI2_CAPABLE(vha->hw)) 1994 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1995 else 1996 mcp->in_mb = MBX_1|MBX_0; 1997 mcp->tov = MBX_TOV_SECONDS; 1998 mcp->flags = 0; 1999 rval = qla2x00_mailbox_command(vha, mcp); 2000 2001 /* Return firmware states. */ 2002 states[0] = mcp->mb[1]; 2003 if (IS_FWI2_CAPABLE(vha->hw)) { 2004 states[1] = mcp->mb[2]; 2005 states[2] = mcp->mb[3]; /* SFP info */ 2006 states[3] = mcp->mb[4]; 2007 states[4] = mcp->mb[5]; 2008 states[5] = mcp->mb[6]; /* DPORT status */ 2009 } 2010 2011 if (rval != QLA_SUCCESS) { 2012 /*EMPTY*/ 2013 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); 2014 } else { 2015 if (IS_QLA27XX(ha)) { 2016 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 2017 ql_dbg(ql_dbg_mbx, vha, 0x119e, 2018 "Invalid SFP/Validation Failed\n"); 2019 } 2020 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, 2021 "Done %s.\n", __func__); 2022 } 2023 2024 return rval; 2025 } 2026 2027 /* 2028 * qla2x00_get_port_name 2029 * Issue get port name mailbox command. 2030 * Returned name is in big endian format. 2031 * 2032 * Input: 2033 * ha = adapter block pointer. 2034 * loop_id = loop ID of device. 2035 * name = pointer for name. 2036 * TARGET_QUEUE_LOCK must be released. 2037 * ADAPTER_STATE_LOCK must be released. 2038 * 2039 * Returns: 2040 * qla2x00 local function return status code. 2041 * 2042 * Context: 2043 * Kernel context. 2044 */ 2045 int 2046 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, 2047 uint8_t opt) 2048 { 2049 int rval; 2050 mbx_cmd_t mc; 2051 mbx_cmd_t *mcp = &mc; 2052 2053 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, 2054 "Entered %s.\n", __func__); 2055 2056 mcp->mb[0] = MBC_GET_PORT_NAME; 2057 mcp->mb[9] = vha->vp_idx; 2058 mcp->out_mb = MBX_9|MBX_1|MBX_0; 2059 if (HAS_EXTENDED_IDS(vha->hw)) { 2060 mcp->mb[1] = loop_id; 2061 mcp->mb[10] = opt; 2062 mcp->out_mb |= MBX_10; 2063 } else { 2064 mcp->mb[1] = loop_id << 8 | opt; 2065 } 2066 2067 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2068 mcp->tov = MBX_TOV_SECONDS; 2069 mcp->flags = 0; 2070 rval = qla2x00_mailbox_command(vha, mcp); 2071 2072 if (rval != QLA_SUCCESS) { 2073 /*EMPTY*/ 2074 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); 2075 } else { 2076 if (name != NULL) { 2077 /* This function returns name in big endian. */ 2078 name[0] = MSB(mcp->mb[2]); 2079 name[1] = LSB(mcp->mb[2]); 2080 name[2] = MSB(mcp->mb[3]); 2081 name[3] = LSB(mcp->mb[3]); 2082 name[4] = MSB(mcp->mb[6]); 2083 name[5] = LSB(mcp->mb[6]); 2084 name[6] = MSB(mcp->mb[7]); 2085 name[7] = LSB(mcp->mb[7]); 2086 } 2087 2088 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, 2089 "Done %s.\n", __func__); 2090 } 2091 2092 return rval; 2093 } 2094 2095 /* 2096 * qla24xx_link_initialization 2097 * Issue link initialization mailbox command. 2098 * 2099 * Input: 2100 * ha = adapter block pointer. 2101 * TARGET_QUEUE_LOCK must be released. 2102 * ADAPTER_STATE_LOCK must be released. 2103 * 2104 * Returns: 2105 * qla2x00 local function return status code. 2106 * 2107 * Context: 2108 * Kernel context. 2109 */ 2110 int 2111 qla24xx_link_initialize(scsi_qla_host_t *vha) 2112 { 2113 int rval; 2114 mbx_cmd_t mc; 2115 mbx_cmd_t *mcp = &mc; 2116 2117 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, 2118 "Entered %s.\n", __func__); 2119 2120 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) 2121 return QLA_FUNCTION_FAILED; 2122 2123 mcp->mb[0] = MBC_LINK_INITIALIZATION; 2124 mcp->mb[1] = BIT_4; 2125 if (vha->hw->operating_mode == LOOP) 2126 mcp->mb[1] |= BIT_6; 2127 else 2128 mcp->mb[1] |= BIT_5; 2129 mcp->mb[2] = 0; 2130 mcp->mb[3] = 0; 2131 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2132 mcp->in_mb = MBX_0; 2133 mcp->tov = MBX_TOV_SECONDS; 2134 mcp->flags = 0; 2135 rval = qla2x00_mailbox_command(vha, mcp); 2136 2137 if (rval != QLA_SUCCESS) { 2138 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); 2139 } else { 2140 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, 2141 "Done %s.\n", __func__); 2142 } 2143 2144 return rval; 2145 } 2146 2147 /* 2148 * qla2x00_lip_reset 2149 * Issue LIP reset mailbox command. 2150 * 2151 * Input: 2152 * ha = adapter block pointer. 2153 * TARGET_QUEUE_LOCK must be released. 2154 * ADAPTER_STATE_LOCK must be released. 2155 * 2156 * Returns: 2157 * qla2x00 local function return status code. 2158 * 2159 * Context: 2160 * Kernel context. 2161 */ 2162 int 2163 qla2x00_lip_reset(scsi_qla_host_t *vha) 2164 { 2165 int rval; 2166 mbx_cmd_t mc; 2167 mbx_cmd_t *mcp = &mc; 2168 2169 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a, 2170 "Entered %s.\n", __func__); 2171 2172 if (IS_CNA_CAPABLE(vha->hw)) { 2173 /* Logout across all FCFs. */ 2174 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2175 mcp->mb[1] = BIT_1; 2176 mcp->mb[2] = 0; 2177 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2178 } else if (IS_FWI2_CAPABLE(vha->hw)) { 2179 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2180 mcp->mb[1] = BIT_6; 2181 mcp->mb[2] = 0; 2182 mcp->mb[3] = vha->hw->loop_reset_delay; 2183 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2184 } else { 2185 mcp->mb[0] = MBC_LIP_RESET; 2186 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2187 if (HAS_EXTENDED_IDS(vha->hw)) { 2188 mcp->mb[1] = 0x00ff; 2189 mcp->mb[10] = 0; 2190 mcp->out_mb |= MBX_10; 2191 } else { 2192 mcp->mb[1] = 0xff00; 2193 } 2194 mcp->mb[2] = vha->hw->loop_reset_delay; 2195 mcp->mb[3] = 0; 2196 } 2197 mcp->in_mb = MBX_0; 2198 mcp->tov = MBX_TOV_SECONDS; 2199 mcp->flags = 0; 2200 rval = qla2x00_mailbox_command(vha, mcp); 2201 2202 if (rval != QLA_SUCCESS) { 2203 /*EMPTY*/ 2204 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); 2205 } else { 2206 /*EMPTY*/ 2207 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, 2208 "Done %s.\n", __func__); 2209 } 2210 2211 return rval; 2212 } 2213 2214 /* 2215 * qla2x00_send_sns 2216 * Send SNS command. 2217 * 2218 * Input: 2219 * ha = adapter block pointer. 2220 * sns = pointer for command. 2221 * cmd_size = command size. 2222 * buf_size = response/command size. 2223 * TARGET_QUEUE_LOCK must be released. 2224 * ADAPTER_STATE_LOCK must be released. 2225 * 2226 * Returns: 2227 * qla2x00 local function return status code. 2228 * 2229 * Context: 2230 * Kernel context. 2231 */ 2232 int 2233 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, 2234 uint16_t cmd_size, size_t buf_size) 2235 { 2236 int rval; 2237 mbx_cmd_t mc; 2238 mbx_cmd_t *mcp = &mc; 2239 2240 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, 2241 "Entered %s.\n", __func__); 2242 2243 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, 2244 "Retry cnt=%d ratov=%d total tov=%d.\n", 2245 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); 2246 2247 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 2248 mcp->mb[1] = cmd_size; 2249 mcp->mb[2] = MSW(sns_phys_address); 2250 mcp->mb[3] = LSW(sns_phys_address); 2251 mcp->mb[6] = MSW(MSD(sns_phys_address)); 2252 mcp->mb[7] = LSW(MSD(sns_phys_address)); 2253 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2254 mcp->in_mb = MBX_0|MBX_1; 2255 mcp->buf_size = buf_size; 2256 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; 2257 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); 2258 rval = qla2x00_mailbox_command(vha, mcp); 2259 2260 if (rval != QLA_SUCCESS) { 2261 /*EMPTY*/ 2262 ql_dbg(ql_dbg_mbx, vha, 0x105f, 2263 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2264 rval, mcp->mb[0], mcp->mb[1]); 2265 } else { 2266 /*EMPTY*/ 2267 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, 2268 "Done %s.\n", __func__); 2269 } 2270 2271 return rval; 2272 } 2273 2274 int 2275 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2276 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2277 { 2278 int rval; 2279 2280 struct logio_entry_24xx *lg; 2281 dma_addr_t lg_dma; 2282 uint32_t iop[2]; 2283 struct qla_hw_data *ha = vha->hw; 2284 struct req_que *req; 2285 2286 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, 2287 "Entered %s.\n", __func__); 2288 2289 if (vha->vp_idx && vha->qpair) 2290 req = vha->qpair->req; 2291 else 2292 req = ha->req_q_map[0]; 2293 2294 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2295 if (lg == NULL) { 2296 ql_log(ql_log_warn, vha, 0x1062, 2297 "Failed to allocate login IOCB.\n"); 2298 return QLA_MEMORY_ALLOC_FAILED; 2299 } 2300 2301 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2302 lg->entry_count = 1; 2303 lg->handle = MAKE_HANDLE(req->id, lg->handle); 2304 lg->nport_handle = cpu_to_le16(loop_id); 2305 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2306 if (opt & BIT_0) 2307 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2308 if (opt & BIT_1) 2309 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2310 lg->port_id[0] = al_pa; 2311 lg->port_id[1] = area; 2312 lg->port_id[2] = domain; 2313 lg->vp_index = vha->vp_idx; 2314 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2315 (ha->r_a_tov / 10 * 2) + 2); 2316 if (rval != QLA_SUCCESS) { 2317 ql_dbg(ql_dbg_mbx, vha, 0x1063, 2318 "Failed to issue login IOCB (%x).\n", rval); 2319 } else if (lg->entry_status != 0) { 2320 ql_dbg(ql_dbg_mbx, vha, 0x1064, 2321 "Failed to complete IOCB -- error status (%x).\n", 2322 lg->entry_status); 2323 rval = QLA_FUNCTION_FAILED; 2324 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2325 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2326 iop[1] = le32_to_cpu(lg->io_parameter[1]); 2327 2328 ql_dbg(ql_dbg_mbx, vha, 0x1065, 2329 "Failed to complete IOCB -- completion status (%x) " 2330 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2331 iop[0], iop[1]); 2332 2333 switch (iop[0]) { 2334 case LSC_SCODE_PORTID_USED: 2335 mb[0] = MBS_PORT_ID_USED; 2336 mb[1] = LSW(iop[1]); 2337 break; 2338 case LSC_SCODE_NPORT_USED: 2339 mb[0] = MBS_LOOP_ID_USED; 2340 break; 2341 case LSC_SCODE_NOLINK: 2342 case LSC_SCODE_NOIOCB: 2343 case LSC_SCODE_NOXCB: 2344 case LSC_SCODE_CMD_FAILED: 2345 case LSC_SCODE_NOFABRIC: 2346 case LSC_SCODE_FW_NOT_READY: 2347 case LSC_SCODE_NOT_LOGGED_IN: 2348 case LSC_SCODE_NOPCB: 2349 case LSC_SCODE_ELS_REJECT: 2350 case LSC_SCODE_CMD_PARAM_ERR: 2351 case LSC_SCODE_NONPORT: 2352 case LSC_SCODE_LOGGED_IN: 2353 case LSC_SCODE_NOFLOGI_ACC: 2354 default: 2355 mb[0] = MBS_COMMAND_ERROR; 2356 break; 2357 } 2358 } else { 2359 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, 2360 "Done %s.\n", __func__); 2361 2362 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2363 2364 mb[0] = MBS_COMMAND_COMPLETE; 2365 mb[1] = 0; 2366 if (iop[0] & BIT_4) { 2367 if (iop[0] & BIT_8) 2368 mb[1] |= BIT_1; 2369 } else 2370 mb[1] = BIT_0; 2371 2372 /* Passback COS information. */ 2373 mb[10] = 0; 2374 if (lg->io_parameter[7] || lg->io_parameter[8]) 2375 mb[10] |= BIT_0; /* Class 2. */ 2376 if (lg->io_parameter[9] || lg->io_parameter[10]) 2377 mb[10] |= BIT_1; /* Class 3. */ 2378 if (lg->io_parameter[0] & cpu_to_le32(BIT_7)) 2379 mb[10] |= BIT_7; /* Confirmed Completion 2380 * Allowed 2381 */ 2382 } 2383 2384 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2385 2386 return rval; 2387 } 2388 2389 /* 2390 * qla2x00_login_fabric 2391 * Issue login fabric port mailbox command. 2392 * 2393 * Input: 2394 * ha = adapter block pointer. 2395 * loop_id = device loop ID. 2396 * domain = device domain. 2397 * area = device area. 2398 * al_pa = device AL_PA. 2399 * status = pointer for return status. 2400 * opt = command options. 2401 * TARGET_QUEUE_LOCK must be released. 2402 * ADAPTER_STATE_LOCK must be released. 2403 * 2404 * Returns: 2405 * qla2x00 local function return status code. 2406 * 2407 * Context: 2408 * Kernel context. 2409 */ 2410 int 2411 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2412 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2413 { 2414 int rval; 2415 mbx_cmd_t mc; 2416 mbx_cmd_t *mcp = &mc; 2417 struct qla_hw_data *ha = vha->hw; 2418 2419 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, 2420 "Entered %s.\n", __func__); 2421 2422 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 2423 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2424 if (HAS_EXTENDED_IDS(ha)) { 2425 mcp->mb[1] = loop_id; 2426 mcp->mb[10] = opt; 2427 mcp->out_mb |= MBX_10; 2428 } else { 2429 mcp->mb[1] = (loop_id << 8) | opt; 2430 } 2431 mcp->mb[2] = domain; 2432 mcp->mb[3] = area << 8 | al_pa; 2433 2434 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; 2435 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2436 mcp->flags = 0; 2437 rval = qla2x00_mailbox_command(vha, mcp); 2438 2439 /* Return mailbox statuses. */ 2440 if (mb != NULL) { 2441 mb[0] = mcp->mb[0]; 2442 mb[1] = mcp->mb[1]; 2443 mb[2] = mcp->mb[2]; 2444 mb[6] = mcp->mb[6]; 2445 mb[7] = mcp->mb[7]; 2446 /* COS retrieved from Get-Port-Database mailbox command. */ 2447 mb[10] = 0; 2448 } 2449 2450 if (rval != QLA_SUCCESS) { 2451 /* RLU tmp code: need to change main mailbox_command function to 2452 * return ok even when the mailbox completion value is not 2453 * SUCCESS. The caller needs to be responsible to interpret 2454 * the return values of this mailbox command if we're not 2455 * to change too much of the existing code. 2456 */ 2457 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || 2458 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || 2459 mcp->mb[0] == 0x4006) 2460 rval = QLA_SUCCESS; 2461 2462 /*EMPTY*/ 2463 ql_dbg(ql_dbg_mbx, vha, 0x1068, 2464 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 2465 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 2466 } else { 2467 /*EMPTY*/ 2468 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, 2469 "Done %s.\n", __func__); 2470 } 2471 2472 return rval; 2473 } 2474 2475 /* 2476 * qla2x00_login_local_device 2477 * Issue login loop port mailbox command. 2478 * 2479 * Input: 2480 * ha = adapter block pointer. 2481 * loop_id = device loop ID. 2482 * opt = command options. 2483 * 2484 * Returns: 2485 * Return status code. 2486 * 2487 * Context: 2488 * Kernel context. 2489 * 2490 */ 2491 int 2492 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, 2493 uint16_t *mb_ret, uint8_t opt) 2494 { 2495 int rval; 2496 mbx_cmd_t mc; 2497 mbx_cmd_t *mcp = &mc; 2498 struct qla_hw_data *ha = vha->hw; 2499 2500 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, 2501 "Entered %s.\n", __func__); 2502 2503 if (IS_FWI2_CAPABLE(ha)) 2504 return qla24xx_login_fabric(vha, fcport->loop_id, 2505 fcport->d_id.b.domain, fcport->d_id.b.area, 2506 fcport->d_id.b.al_pa, mb_ret, opt); 2507 2508 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 2509 if (HAS_EXTENDED_IDS(ha)) 2510 mcp->mb[1] = fcport->loop_id; 2511 else 2512 mcp->mb[1] = fcport->loop_id << 8; 2513 mcp->mb[2] = opt; 2514 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2515 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; 2516 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2517 mcp->flags = 0; 2518 rval = qla2x00_mailbox_command(vha, mcp); 2519 2520 /* Return mailbox statuses. */ 2521 if (mb_ret != NULL) { 2522 mb_ret[0] = mcp->mb[0]; 2523 mb_ret[1] = mcp->mb[1]; 2524 mb_ret[6] = mcp->mb[6]; 2525 mb_ret[7] = mcp->mb[7]; 2526 } 2527 2528 if (rval != QLA_SUCCESS) { 2529 /* AV tmp code: need to change main mailbox_command function to 2530 * return ok even when the mailbox completion value is not 2531 * SUCCESS. The caller needs to be responsible to interpret 2532 * the return values of this mailbox command if we're not 2533 * to change too much of the existing code. 2534 */ 2535 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 2536 rval = QLA_SUCCESS; 2537 2538 ql_dbg(ql_dbg_mbx, vha, 0x106b, 2539 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", 2540 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); 2541 } else { 2542 /*EMPTY*/ 2543 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, 2544 "Done %s.\n", __func__); 2545 } 2546 2547 return (rval); 2548 } 2549 2550 int 2551 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2552 uint8_t area, uint8_t al_pa) 2553 { 2554 int rval; 2555 struct logio_entry_24xx *lg; 2556 dma_addr_t lg_dma; 2557 struct qla_hw_data *ha = vha->hw; 2558 struct req_que *req; 2559 2560 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, 2561 "Entered %s.\n", __func__); 2562 2563 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2564 if (lg == NULL) { 2565 ql_log(ql_log_warn, vha, 0x106e, 2566 "Failed to allocate logout IOCB.\n"); 2567 return QLA_MEMORY_ALLOC_FAILED; 2568 } 2569 2570 req = vha->req; 2571 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2572 lg->entry_count = 1; 2573 lg->handle = MAKE_HANDLE(req->id, lg->handle); 2574 lg->nport_handle = cpu_to_le16(loop_id); 2575 lg->control_flags = 2576 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| 2577 LCF_FREE_NPORT); 2578 lg->port_id[0] = al_pa; 2579 lg->port_id[1] = area; 2580 lg->port_id[2] = domain; 2581 lg->vp_index = vha->vp_idx; 2582 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2583 (ha->r_a_tov / 10 * 2) + 2); 2584 if (rval != QLA_SUCCESS) { 2585 ql_dbg(ql_dbg_mbx, vha, 0x106f, 2586 "Failed to issue logout IOCB (%x).\n", rval); 2587 } else if (lg->entry_status != 0) { 2588 ql_dbg(ql_dbg_mbx, vha, 0x1070, 2589 "Failed to complete IOCB -- error status (%x).\n", 2590 lg->entry_status); 2591 rval = QLA_FUNCTION_FAILED; 2592 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2593 ql_dbg(ql_dbg_mbx, vha, 0x1071, 2594 "Failed to complete IOCB -- completion status (%x) " 2595 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2596 le32_to_cpu(lg->io_parameter[0]), 2597 le32_to_cpu(lg->io_parameter[1])); 2598 } else { 2599 /*EMPTY*/ 2600 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, 2601 "Done %s.\n", __func__); 2602 } 2603 2604 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2605 2606 return rval; 2607 } 2608 2609 /* 2610 * qla2x00_fabric_logout 2611 * Issue logout fabric port mailbox command. 2612 * 2613 * Input: 2614 * ha = adapter block pointer. 2615 * loop_id = device loop ID. 2616 * TARGET_QUEUE_LOCK must be released. 2617 * ADAPTER_STATE_LOCK must be released. 2618 * 2619 * Returns: 2620 * qla2x00 local function return status code. 2621 * 2622 * Context: 2623 * Kernel context. 2624 */ 2625 int 2626 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2627 uint8_t area, uint8_t al_pa) 2628 { 2629 int rval; 2630 mbx_cmd_t mc; 2631 mbx_cmd_t *mcp = &mc; 2632 2633 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, 2634 "Entered %s.\n", __func__); 2635 2636 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 2637 mcp->out_mb = MBX_1|MBX_0; 2638 if (HAS_EXTENDED_IDS(vha->hw)) { 2639 mcp->mb[1] = loop_id; 2640 mcp->mb[10] = 0; 2641 mcp->out_mb |= MBX_10; 2642 } else { 2643 mcp->mb[1] = loop_id << 8; 2644 } 2645 2646 mcp->in_mb = MBX_1|MBX_0; 2647 mcp->tov = MBX_TOV_SECONDS; 2648 mcp->flags = 0; 2649 rval = qla2x00_mailbox_command(vha, mcp); 2650 2651 if (rval != QLA_SUCCESS) { 2652 /*EMPTY*/ 2653 ql_dbg(ql_dbg_mbx, vha, 0x1074, 2654 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); 2655 } else { 2656 /*EMPTY*/ 2657 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, 2658 "Done %s.\n", __func__); 2659 } 2660 2661 return rval; 2662 } 2663 2664 /* 2665 * qla2x00_full_login_lip 2666 * Issue full login LIP mailbox command. 2667 * 2668 * Input: 2669 * ha = adapter block pointer. 2670 * TARGET_QUEUE_LOCK must be released. 2671 * ADAPTER_STATE_LOCK must be released. 2672 * 2673 * Returns: 2674 * qla2x00 local function return status code. 2675 * 2676 * Context: 2677 * Kernel context. 2678 */ 2679 int 2680 qla2x00_full_login_lip(scsi_qla_host_t *vha) 2681 { 2682 int rval; 2683 mbx_cmd_t mc; 2684 mbx_cmd_t *mcp = &mc; 2685 2686 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, 2687 "Entered %s.\n", __func__); 2688 2689 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2690 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; 2691 mcp->mb[2] = 0; 2692 mcp->mb[3] = 0; 2693 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2694 mcp->in_mb = MBX_0; 2695 mcp->tov = MBX_TOV_SECONDS; 2696 mcp->flags = 0; 2697 rval = qla2x00_mailbox_command(vha, mcp); 2698 2699 if (rval != QLA_SUCCESS) { 2700 /*EMPTY*/ 2701 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); 2702 } else { 2703 /*EMPTY*/ 2704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, 2705 "Done %s.\n", __func__); 2706 } 2707 2708 return rval; 2709 } 2710 2711 /* 2712 * qla2x00_get_id_list 2713 * 2714 * Input: 2715 * ha = adapter block pointer. 2716 * 2717 * Returns: 2718 * qla2x00 local function return status code. 2719 * 2720 * Context: 2721 * Kernel context. 2722 */ 2723 int 2724 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, 2725 uint16_t *entries) 2726 { 2727 int rval; 2728 mbx_cmd_t mc; 2729 mbx_cmd_t *mcp = &mc; 2730 2731 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, 2732 "Entered %s.\n", __func__); 2733 2734 if (id_list == NULL) 2735 return QLA_FUNCTION_FAILED; 2736 2737 mcp->mb[0] = MBC_GET_ID_LIST; 2738 mcp->out_mb = MBX_0; 2739 if (IS_FWI2_CAPABLE(vha->hw)) { 2740 mcp->mb[2] = MSW(id_list_dma); 2741 mcp->mb[3] = LSW(id_list_dma); 2742 mcp->mb[6] = MSW(MSD(id_list_dma)); 2743 mcp->mb[7] = LSW(MSD(id_list_dma)); 2744 mcp->mb[8] = 0; 2745 mcp->mb[9] = vha->vp_idx; 2746 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 2747 } else { 2748 mcp->mb[1] = MSW(id_list_dma); 2749 mcp->mb[2] = LSW(id_list_dma); 2750 mcp->mb[3] = MSW(MSD(id_list_dma)); 2751 mcp->mb[6] = LSW(MSD(id_list_dma)); 2752 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; 2753 } 2754 mcp->in_mb = MBX_1|MBX_0; 2755 mcp->tov = MBX_TOV_SECONDS; 2756 mcp->flags = 0; 2757 rval = qla2x00_mailbox_command(vha, mcp); 2758 2759 if (rval != QLA_SUCCESS) { 2760 /*EMPTY*/ 2761 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); 2762 } else { 2763 *entries = mcp->mb[1]; 2764 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, 2765 "Done %s.\n", __func__); 2766 } 2767 2768 return rval; 2769 } 2770 2771 /* 2772 * qla2x00_get_resource_cnts 2773 * Get current firmware resource counts. 2774 * 2775 * Input: 2776 * ha = adapter block pointer. 2777 * 2778 * Returns: 2779 * qla2x00 local function return status code. 2780 * 2781 * Context: 2782 * Kernel context. 2783 */ 2784 int 2785 qla2x00_get_resource_cnts(scsi_qla_host_t *vha) 2786 { 2787 struct qla_hw_data *ha = vha->hw; 2788 int rval; 2789 mbx_cmd_t mc; 2790 mbx_cmd_t *mcp = &mc; 2791 2792 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, 2793 "Entered %s.\n", __func__); 2794 2795 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2796 mcp->out_mb = MBX_0; 2797 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2798 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw)) 2799 mcp->in_mb |= MBX_12; 2800 mcp->tov = MBX_TOV_SECONDS; 2801 mcp->flags = 0; 2802 rval = qla2x00_mailbox_command(vha, mcp); 2803 2804 if (rval != QLA_SUCCESS) { 2805 /*EMPTY*/ 2806 ql_dbg(ql_dbg_mbx, vha, 0x107d, 2807 "Failed mb[0]=%x.\n", mcp->mb[0]); 2808 } else { 2809 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, 2810 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " 2811 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], 2812 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 2813 mcp->mb[11], mcp->mb[12]); 2814 2815 ha->orig_fw_tgt_xcb_count = mcp->mb[1]; 2816 ha->cur_fw_tgt_xcb_count = mcp->mb[2]; 2817 ha->cur_fw_xcb_count = mcp->mb[3]; 2818 ha->orig_fw_xcb_count = mcp->mb[6]; 2819 ha->cur_fw_iocb_count = mcp->mb[7]; 2820 ha->orig_fw_iocb_count = mcp->mb[10]; 2821 if (ha->flags.npiv_supported) 2822 ha->max_npiv_vports = mcp->mb[11]; 2823 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) 2824 ha->fw_max_fcf_count = mcp->mb[12]; 2825 } 2826 2827 return (rval); 2828 } 2829 2830 /* 2831 * qla2x00_get_fcal_position_map 2832 * Get FCAL (LILP) position map using mailbox command 2833 * 2834 * Input: 2835 * ha = adapter state pointer. 2836 * pos_map = buffer pointer (can be NULL). 2837 * 2838 * Returns: 2839 * qla2x00 local function return status code. 2840 * 2841 * Context: 2842 * Kernel context. 2843 */ 2844 int 2845 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) 2846 { 2847 int rval; 2848 mbx_cmd_t mc; 2849 mbx_cmd_t *mcp = &mc; 2850 char *pmap; 2851 dma_addr_t pmap_dma; 2852 struct qla_hw_data *ha = vha->hw; 2853 2854 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, 2855 "Entered %s.\n", __func__); 2856 2857 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2858 if (pmap == NULL) { 2859 ql_log(ql_log_warn, vha, 0x1080, 2860 "Memory alloc failed.\n"); 2861 return QLA_MEMORY_ALLOC_FAILED; 2862 } 2863 2864 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; 2865 mcp->mb[2] = MSW(pmap_dma); 2866 mcp->mb[3] = LSW(pmap_dma); 2867 mcp->mb[6] = MSW(MSD(pmap_dma)); 2868 mcp->mb[7] = LSW(MSD(pmap_dma)); 2869 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2870 mcp->in_mb = MBX_1|MBX_0; 2871 mcp->buf_size = FCAL_MAP_SIZE; 2872 mcp->flags = MBX_DMA_IN; 2873 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2874 rval = qla2x00_mailbox_command(vha, mcp); 2875 2876 if (rval == QLA_SUCCESS) { 2877 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, 2878 "mb0/mb1=%x/%X FC/AL position map size (%x).\n", 2879 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); 2880 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, 2881 pmap, pmap[0] + 1); 2882 2883 if (pos_map) 2884 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 2885 } 2886 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 2887 2888 if (rval != QLA_SUCCESS) { 2889 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); 2890 } else { 2891 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, 2892 "Done %s.\n", __func__); 2893 } 2894 2895 return rval; 2896 } 2897 2898 /* 2899 * qla2x00_get_link_status 2900 * 2901 * Input: 2902 * ha = adapter block pointer. 2903 * loop_id = device loop ID. 2904 * ret_buf = pointer to link status return buffer. 2905 * 2906 * Returns: 2907 * 0 = success. 2908 * BIT_0 = mem alloc error. 2909 * BIT_1 = mailbox error. 2910 */ 2911 int 2912 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, 2913 struct link_statistics *stats, dma_addr_t stats_dma) 2914 { 2915 int rval; 2916 mbx_cmd_t mc; 2917 mbx_cmd_t *mcp = &mc; 2918 uint32_t *iter = (void *)stats; 2919 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); 2920 struct qla_hw_data *ha = vha->hw; 2921 2922 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, 2923 "Entered %s.\n", __func__); 2924 2925 mcp->mb[0] = MBC_GET_LINK_STATUS; 2926 mcp->mb[2] = MSW(LSD(stats_dma)); 2927 mcp->mb[3] = LSW(LSD(stats_dma)); 2928 mcp->mb[6] = MSW(MSD(stats_dma)); 2929 mcp->mb[7] = LSW(MSD(stats_dma)); 2930 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2931 mcp->in_mb = MBX_0; 2932 if (IS_FWI2_CAPABLE(ha)) { 2933 mcp->mb[1] = loop_id; 2934 mcp->mb[4] = 0; 2935 mcp->mb[10] = 0; 2936 mcp->out_mb |= MBX_10|MBX_4|MBX_1; 2937 mcp->in_mb |= MBX_1; 2938 } else if (HAS_EXTENDED_IDS(ha)) { 2939 mcp->mb[1] = loop_id; 2940 mcp->mb[10] = 0; 2941 mcp->out_mb |= MBX_10|MBX_1; 2942 } else { 2943 mcp->mb[1] = loop_id << 8; 2944 mcp->out_mb |= MBX_1; 2945 } 2946 mcp->tov = MBX_TOV_SECONDS; 2947 mcp->flags = IOCTL_CMD; 2948 rval = qla2x00_mailbox_command(vha, mcp); 2949 2950 if (rval == QLA_SUCCESS) { 2951 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2952 ql_dbg(ql_dbg_mbx, vha, 0x1085, 2953 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 2954 rval = QLA_FUNCTION_FAILED; 2955 } else { 2956 /* Re-endianize - firmware data is le32. */ 2957 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, 2958 "Done %s.\n", __func__); 2959 for ( ; dwords--; iter++) 2960 le32_to_cpus(iter); 2961 } 2962 } else { 2963 /* Failed. */ 2964 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); 2965 } 2966 2967 return rval; 2968 } 2969 2970 int 2971 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, 2972 dma_addr_t stats_dma, uint16_t options) 2973 { 2974 int rval; 2975 mbx_cmd_t mc; 2976 mbx_cmd_t *mcp = &mc; 2977 uint32_t *iter, dwords; 2978 2979 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, 2980 "Entered %s.\n", __func__); 2981 2982 memset(&mc, 0, sizeof(mc)); 2983 mc.mb[0] = MBC_GET_LINK_PRIV_STATS; 2984 mc.mb[2] = MSW(stats_dma); 2985 mc.mb[3] = LSW(stats_dma); 2986 mc.mb[6] = MSW(MSD(stats_dma)); 2987 mc.mb[7] = LSW(MSD(stats_dma)); 2988 mc.mb[8] = sizeof(struct link_statistics) / 4; 2989 mc.mb[9] = cpu_to_le16(vha->vp_idx); 2990 mc.mb[10] = cpu_to_le16(options); 2991 2992 rval = qla24xx_send_mb_cmd(vha, &mc); 2993 2994 if (rval == QLA_SUCCESS) { 2995 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2996 ql_dbg(ql_dbg_mbx, vha, 0x1089, 2997 "Failed mb[0]=%x.\n", mcp->mb[0]); 2998 rval = QLA_FUNCTION_FAILED; 2999 } else { 3000 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, 3001 "Done %s.\n", __func__); 3002 /* Re-endianize - firmware data is le32. */ 3003 dwords = sizeof(struct link_statistics) / 4; 3004 iter = &stats->link_fail_cnt; 3005 for ( ; dwords--; iter++) 3006 le32_to_cpus(iter); 3007 } 3008 } else { 3009 /* Failed. */ 3010 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); 3011 } 3012 3013 return rval; 3014 } 3015 3016 int 3017 qla24xx_abort_command(srb_t *sp) 3018 { 3019 int rval; 3020 unsigned long flags = 0; 3021 3022 struct abort_entry_24xx *abt; 3023 dma_addr_t abt_dma; 3024 uint32_t handle; 3025 fc_port_t *fcport = sp->fcport; 3026 struct scsi_qla_host *vha = fcport->vha; 3027 struct qla_hw_data *ha = vha->hw; 3028 struct req_que *req = vha->req; 3029 3030 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 3031 "Entered %s.\n", __func__); 3032 3033 if (vha->flags.qpairs_available && sp->qpair) 3034 req = sp->qpair->req; 3035 3036 if (ql2xasynctmfenable) 3037 return qla24xx_async_abort_command(sp); 3038 3039 spin_lock_irqsave(&ha->hardware_lock, flags); 3040 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 3041 if (req->outstanding_cmds[handle] == sp) 3042 break; 3043 } 3044 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3045 if (handle == req->num_outstanding_cmds) { 3046 /* Command not found. */ 3047 return QLA_FUNCTION_FAILED; 3048 } 3049 3050 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 3051 if (abt == NULL) { 3052 ql_log(ql_log_warn, vha, 0x108d, 3053 "Failed to allocate abort IOCB.\n"); 3054 return QLA_MEMORY_ALLOC_FAILED; 3055 } 3056 3057 abt->entry_type = ABORT_IOCB_TYPE; 3058 abt->entry_count = 1; 3059 abt->handle = MAKE_HANDLE(req->id, abt->handle); 3060 abt->nport_handle = cpu_to_le16(fcport->loop_id); 3061 abt->handle_to_abort = MAKE_HANDLE(req->id, handle); 3062 abt->port_id[0] = fcport->d_id.b.al_pa; 3063 abt->port_id[1] = fcport->d_id.b.area; 3064 abt->port_id[2] = fcport->d_id.b.domain; 3065 abt->vp_index = fcport->vha->vp_idx; 3066 3067 abt->req_que_no = cpu_to_le16(req->id); 3068 3069 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 3070 if (rval != QLA_SUCCESS) { 3071 ql_dbg(ql_dbg_mbx, vha, 0x108e, 3072 "Failed to issue IOCB (%x).\n", rval); 3073 } else if (abt->entry_status != 0) { 3074 ql_dbg(ql_dbg_mbx, vha, 0x108f, 3075 "Failed to complete IOCB -- error status (%x).\n", 3076 abt->entry_status); 3077 rval = QLA_FUNCTION_FAILED; 3078 } else if (abt->nport_handle != cpu_to_le16(0)) { 3079 ql_dbg(ql_dbg_mbx, vha, 0x1090, 3080 "Failed to complete IOCB -- completion status (%x).\n", 3081 le16_to_cpu(abt->nport_handle)); 3082 if (abt->nport_handle == CS_IOCB_ERROR) 3083 rval = QLA_FUNCTION_PARAMETER_ERROR; 3084 else 3085 rval = QLA_FUNCTION_FAILED; 3086 } else { 3087 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, 3088 "Done %s.\n", __func__); 3089 } 3090 3091 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 3092 3093 return rval; 3094 } 3095 3096 struct tsk_mgmt_cmd { 3097 union { 3098 struct tsk_mgmt_entry tsk; 3099 struct sts_entry_24xx sts; 3100 } p; 3101 }; 3102 3103 static int 3104 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 3105 uint64_t l, int tag) 3106 { 3107 int rval, rval2; 3108 struct tsk_mgmt_cmd *tsk; 3109 struct sts_entry_24xx *sts; 3110 dma_addr_t tsk_dma; 3111 scsi_qla_host_t *vha; 3112 struct qla_hw_data *ha; 3113 struct req_que *req; 3114 struct rsp_que *rsp; 3115 struct qla_qpair *qpair; 3116 3117 vha = fcport->vha; 3118 ha = vha->hw; 3119 req = vha->req; 3120 3121 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, 3122 "Entered %s.\n", __func__); 3123 3124 if (vha->vp_idx && vha->qpair) { 3125 /* NPIV port */ 3126 qpair = vha->qpair; 3127 rsp = qpair->rsp; 3128 req = qpair->req; 3129 } else { 3130 rsp = req->rsp; 3131 } 3132 3133 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 3134 if (tsk == NULL) { 3135 ql_log(ql_log_warn, vha, 0x1093, 3136 "Failed to allocate task management IOCB.\n"); 3137 return QLA_MEMORY_ALLOC_FAILED; 3138 } 3139 3140 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 3141 tsk->p.tsk.entry_count = 1; 3142 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle); 3143 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 3144 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 3145 tsk->p.tsk.control_flags = cpu_to_le32(type); 3146 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 3147 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 3148 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 3149 tsk->p.tsk.vp_index = fcport->vha->vp_idx; 3150 if (type == TCF_LUN_RESET) { 3151 int_to_scsilun(l, &tsk->p.tsk.lun); 3152 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, 3153 sizeof(tsk->p.tsk.lun)); 3154 } 3155 3156 sts = &tsk->p.sts; 3157 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 3158 if (rval != QLA_SUCCESS) { 3159 ql_dbg(ql_dbg_mbx, vha, 0x1094, 3160 "Failed to issue %s reset IOCB (%x).\n", name, rval); 3161 } else if (sts->entry_status != 0) { 3162 ql_dbg(ql_dbg_mbx, vha, 0x1095, 3163 "Failed to complete IOCB -- error status (%x).\n", 3164 sts->entry_status); 3165 rval = QLA_FUNCTION_FAILED; 3166 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 3167 ql_dbg(ql_dbg_mbx, vha, 0x1096, 3168 "Failed to complete IOCB -- completion status (%x).\n", 3169 le16_to_cpu(sts->comp_status)); 3170 rval = QLA_FUNCTION_FAILED; 3171 } else if (le16_to_cpu(sts->scsi_status) & 3172 SS_RESPONSE_INFO_LEN_VALID) { 3173 if (le32_to_cpu(sts->rsp_data_len) < 4) { 3174 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, 3175 "Ignoring inconsistent data length -- not enough " 3176 "response info (%d).\n", 3177 le32_to_cpu(sts->rsp_data_len)); 3178 } else if (sts->data[3]) { 3179 ql_dbg(ql_dbg_mbx, vha, 0x1098, 3180 "Failed to complete IOCB -- response (%x).\n", 3181 sts->data[3]); 3182 rval = QLA_FUNCTION_FAILED; 3183 } 3184 } 3185 3186 /* Issue marker IOCB. */ 3187 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, 3188 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); 3189 if (rval2 != QLA_SUCCESS) { 3190 ql_dbg(ql_dbg_mbx, vha, 0x1099, 3191 "Failed to issue marker IOCB (%x).\n", rval2); 3192 } else { 3193 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, 3194 "Done %s.\n", __func__); 3195 } 3196 3197 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 3198 3199 return rval; 3200 } 3201 3202 int 3203 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) 3204 { 3205 struct qla_hw_data *ha = fcport->vha->hw; 3206 3207 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3208 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 3209 3210 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); 3211 } 3212 3213 int 3214 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 3215 { 3216 struct qla_hw_data *ha = fcport->vha->hw; 3217 3218 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3219 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 3220 3221 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); 3222 } 3223 3224 int 3225 qla2x00_system_error(scsi_qla_host_t *vha) 3226 { 3227 int rval; 3228 mbx_cmd_t mc; 3229 mbx_cmd_t *mcp = &mc; 3230 struct qla_hw_data *ha = vha->hw; 3231 3232 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 3233 return QLA_FUNCTION_FAILED; 3234 3235 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, 3236 "Entered %s.\n", __func__); 3237 3238 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 3239 mcp->out_mb = MBX_0; 3240 mcp->in_mb = MBX_0; 3241 mcp->tov = 5; 3242 mcp->flags = 0; 3243 rval = qla2x00_mailbox_command(vha, mcp); 3244 3245 if (rval != QLA_SUCCESS) { 3246 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); 3247 } else { 3248 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, 3249 "Done %s.\n", __func__); 3250 } 3251 3252 return rval; 3253 } 3254 3255 int 3256 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) 3257 { 3258 int rval; 3259 mbx_cmd_t mc; 3260 mbx_cmd_t *mcp = &mc; 3261 3262 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3263 !IS_QLA27XX(vha->hw)) 3264 return QLA_FUNCTION_FAILED; 3265 3266 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, 3267 "Entered %s.\n", __func__); 3268 3269 mcp->mb[0] = MBC_WRITE_SERDES; 3270 mcp->mb[1] = addr; 3271 if (IS_QLA2031(vha->hw)) 3272 mcp->mb[2] = data & 0xff; 3273 else 3274 mcp->mb[2] = data; 3275 3276 mcp->mb[3] = 0; 3277 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 3278 mcp->in_mb = MBX_0; 3279 mcp->tov = MBX_TOV_SECONDS; 3280 mcp->flags = 0; 3281 rval = qla2x00_mailbox_command(vha, mcp); 3282 3283 if (rval != QLA_SUCCESS) { 3284 ql_dbg(ql_dbg_mbx, vha, 0x1183, 3285 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3286 } else { 3287 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, 3288 "Done %s.\n", __func__); 3289 } 3290 3291 return rval; 3292 } 3293 3294 int 3295 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) 3296 { 3297 int rval; 3298 mbx_cmd_t mc; 3299 mbx_cmd_t *mcp = &mc; 3300 3301 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3302 !IS_QLA27XX(vha->hw)) 3303 return QLA_FUNCTION_FAILED; 3304 3305 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, 3306 "Entered %s.\n", __func__); 3307 3308 mcp->mb[0] = MBC_READ_SERDES; 3309 mcp->mb[1] = addr; 3310 mcp->mb[3] = 0; 3311 mcp->out_mb = MBX_3|MBX_1|MBX_0; 3312 mcp->in_mb = MBX_1|MBX_0; 3313 mcp->tov = MBX_TOV_SECONDS; 3314 mcp->flags = 0; 3315 rval = qla2x00_mailbox_command(vha, mcp); 3316 3317 if (IS_QLA2031(vha->hw)) 3318 *data = mcp->mb[1] & 0xff; 3319 else 3320 *data = mcp->mb[1]; 3321 3322 if (rval != QLA_SUCCESS) { 3323 ql_dbg(ql_dbg_mbx, vha, 0x1186, 3324 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3325 } else { 3326 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, 3327 "Done %s.\n", __func__); 3328 } 3329 3330 return rval; 3331 } 3332 3333 int 3334 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) 3335 { 3336 int rval; 3337 mbx_cmd_t mc; 3338 mbx_cmd_t *mcp = &mc; 3339 3340 if (!IS_QLA8044(vha->hw)) 3341 return QLA_FUNCTION_FAILED; 3342 3343 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0, 3344 "Entered %s.\n", __func__); 3345 3346 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3347 mcp->mb[1] = HCS_WRITE_SERDES; 3348 mcp->mb[3] = LSW(addr); 3349 mcp->mb[4] = MSW(addr); 3350 mcp->mb[5] = LSW(data); 3351 mcp->mb[6] = MSW(data); 3352 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 3353 mcp->in_mb = MBX_0; 3354 mcp->tov = MBX_TOV_SECONDS; 3355 mcp->flags = 0; 3356 rval = qla2x00_mailbox_command(vha, mcp); 3357 3358 if (rval != QLA_SUCCESS) { 3359 ql_dbg(ql_dbg_mbx, vha, 0x11a1, 3360 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3361 } else { 3362 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, 3363 "Done %s.\n", __func__); 3364 } 3365 3366 return rval; 3367 } 3368 3369 int 3370 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) 3371 { 3372 int rval; 3373 mbx_cmd_t mc; 3374 mbx_cmd_t *mcp = &mc; 3375 3376 if (!IS_QLA8044(vha->hw)) 3377 return QLA_FUNCTION_FAILED; 3378 3379 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, 3380 "Entered %s.\n", __func__); 3381 3382 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3383 mcp->mb[1] = HCS_READ_SERDES; 3384 mcp->mb[3] = LSW(addr); 3385 mcp->mb[4] = MSW(addr); 3386 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; 3387 mcp->in_mb = MBX_2|MBX_1|MBX_0; 3388 mcp->tov = MBX_TOV_SECONDS; 3389 mcp->flags = 0; 3390 rval = qla2x00_mailbox_command(vha, mcp); 3391 3392 *data = mcp->mb[2] << 16 | mcp->mb[1]; 3393 3394 if (rval != QLA_SUCCESS) { 3395 ql_dbg(ql_dbg_mbx, vha, 0x118a, 3396 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3397 } else { 3398 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, 3399 "Done %s.\n", __func__); 3400 } 3401 3402 return rval; 3403 } 3404 3405 /** 3406 * qla2x00_set_serdes_params() - 3407 * @vha: HA context 3408 * @sw_em_1g: 3409 * @sw_em_2g: 3410 * @sw_em_4g: 3411 * 3412 * Returns 3413 */ 3414 int 3415 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, 3416 uint16_t sw_em_2g, uint16_t sw_em_4g) 3417 { 3418 int rval; 3419 mbx_cmd_t mc; 3420 mbx_cmd_t *mcp = &mc; 3421 3422 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, 3423 "Entered %s.\n", __func__); 3424 3425 mcp->mb[0] = MBC_SERDES_PARAMS; 3426 mcp->mb[1] = BIT_0; 3427 mcp->mb[2] = sw_em_1g | BIT_15; 3428 mcp->mb[3] = sw_em_2g | BIT_15; 3429 mcp->mb[4] = sw_em_4g | BIT_15; 3430 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3431 mcp->in_mb = MBX_0; 3432 mcp->tov = MBX_TOV_SECONDS; 3433 mcp->flags = 0; 3434 rval = qla2x00_mailbox_command(vha, mcp); 3435 3436 if (rval != QLA_SUCCESS) { 3437 /*EMPTY*/ 3438 ql_dbg(ql_dbg_mbx, vha, 0x109f, 3439 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3440 } else { 3441 /*EMPTY*/ 3442 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, 3443 "Done %s.\n", __func__); 3444 } 3445 3446 return rval; 3447 } 3448 3449 int 3450 qla2x00_stop_firmware(scsi_qla_host_t *vha) 3451 { 3452 int rval; 3453 mbx_cmd_t mc; 3454 mbx_cmd_t *mcp = &mc; 3455 3456 if (!IS_FWI2_CAPABLE(vha->hw)) 3457 return QLA_FUNCTION_FAILED; 3458 3459 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, 3460 "Entered %s.\n", __func__); 3461 3462 mcp->mb[0] = MBC_STOP_FIRMWARE; 3463 mcp->mb[1] = 0; 3464 mcp->out_mb = MBX_1|MBX_0; 3465 mcp->in_mb = MBX_0; 3466 mcp->tov = 5; 3467 mcp->flags = 0; 3468 rval = qla2x00_mailbox_command(vha, mcp); 3469 3470 if (rval != QLA_SUCCESS) { 3471 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); 3472 if (mcp->mb[0] == MBS_INVALID_COMMAND) 3473 rval = QLA_INVALID_COMMAND; 3474 } else { 3475 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, 3476 "Done %s.\n", __func__); 3477 } 3478 3479 return rval; 3480 } 3481 3482 int 3483 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, 3484 uint16_t buffers) 3485 { 3486 int rval; 3487 mbx_cmd_t mc; 3488 mbx_cmd_t *mcp = &mc; 3489 3490 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, 3491 "Entered %s.\n", __func__); 3492 3493 if (!IS_FWI2_CAPABLE(vha->hw)) 3494 return QLA_FUNCTION_FAILED; 3495 3496 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3497 return QLA_FUNCTION_FAILED; 3498 3499 mcp->mb[0] = MBC_TRACE_CONTROL; 3500 mcp->mb[1] = TC_EFT_ENABLE; 3501 mcp->mb[2] = LSW(eft_dma); 3502 mcp->mb[3] = MSW(eft_dma); 3503 mcp->mb[4] = LSW(MSD(eft_dma)); 3504 mcp->mb[5] = MSW(MSD(eft_dma)); 3505 mcp->mb[6] = buffers; 3506 mcp->mb[7] = TC_AEN_DISABLE; 3507 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3508 mcp->in_mb = MBX_1|MBX_0; 3509 mcp->tov = MBX_TOV_SECONDS; 3510 mcp->flags = 0; 3511 rval = qla2x00_mailbox_command(vha, mcp); 3512 if (rval != QLA_SUCCESS) { 3513 ql_dbg(ql_dbg_mbx, vha, 0x10a5, 3514 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3515 rval, mcp->mb[0], mcp->mb[1]); 3516 } else { 3517 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, 3518 "Done %s.\n", __func__); 3519 } 3520 3521 return rval; 3522 } 3523 3524 int 3525 qla2x00_disable_eft_trace(scsi_qla_host_t *vha) 3526 { 3527 int rval; 3528 mbx_cmd_t mc; 3529 mbx_cmd_t *mcp = &mc; 3530 3531 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, 3532 "Entered %s.\n", __func__); 3533 3534 if (!IS_FWI2_CAPABLE(vha->hw)) 3535 return QLA_FUNCTION_FAILED; 3536 3537 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3538 return QLA_FUNCTION_FAILED; 3539 3540 mcp->mb[0] = MBC_TRACE_CONTROL; 3541 mcp->mb[1] = TC_EFT_DISABLE; 3542 mcp->out_mb = MBX_1|MBX_0; 3543 mcp->in_mb = MBX_1|MBX_0; 3544 mcp->tov = MBX_TOV_SECONDS; 3545 mcp->flags = 0; 3546 rval = qla2x00_mailbox_command(vha, mcp); 3547 if (rval != QLA_SUCCESS) { 3548 ql_dbg(ql_dbg_mbx, vha, 0x10a8, 3549 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3550 rval, mcp->mb[0], mcp->mb[1]); 3551 } else { 3552 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, 3553 "Done %s.\n", __func__); 3554 } 3555 3556 return rval; 3557 } 3558 3559 int 3560 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, 3561 uint16_t buffers, uint16_t *mb, uint32_t *dwords) 3562 { 3563 int rval; 3564 mbx_cmd_t mc; 3565 mbx_cmd_t *mcp = &mc; 3566 3567 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, 3568 "Entered %s.\n", __func__); 3569 3570 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 3571 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw)) 3572 return QLA_FUNCTION_FAILED; 3573 3574 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3575 return QLA_FUNCTION_FAILED; 3576 3577 mcp->mb[0] = MBC_TRACE_CONTROL; 3578 mcp->mb[1] = TC_FCE_ENABLE; 3579 mcp->mb[2] = LSW(fce_dma); 3580 mcp->mb[3] = MSW(fce_dma); 3581 mcp->mb[4] = LSW(MSD(fce_dma)); 3582 mcp->mb[5] = MSW(MSD(fce_dma)); 3583 mcp->mb[6] = buffers; 3584 mcp->mb[7] = TC_AEN_DISABLE; 3585 mcp->mb[8] = 0; 3586 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; 3587 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; 3588 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3589 MBX_1|MBX_0; 3590 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3591 mcp->tov = MBX_TOV_SECONDS; 3592 mcp->flags = 0; 3593 rval = qla2x00_mailbox_command(vha, mcp); 3594 if (rval != QLA_SUCCESS) { 3595 ql_dbg(ql_dbg_mbx, vha, 0x10ab, 3596 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3597 rval, mcp->mb[0], mcp->mb[1]); 3598 } else { 3599 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, 3600 "Done %s.\n", __func__); 3601 3602 if (mb) 3603 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 3604 if (dwords) 3605 *dwords = buffers; 3606 } 3607 3608 return rval; 3609 } 3610 3611 int 3612 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) 3613 { 3614 int rval; 3615 mbx_cmd_t mc; 3616 mbx_cmd_t *mcp = &mc; 3617 3618 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, 3619 "Entered %s.\n", __func__); 3620 3621 if (!IS_FWI2_CAPABLE(vha->hw)) 3622 return QLA_FUNCTION_FAILED; 3623 3624 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3625 return QLA_FUNCTION_FAILED; 3626 3627 mcp->mb[0] = MBC_TRACE_CONTROL; 3628 mcp->mb[1] = TC_FCE_DISABLE; 3629 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 3630 mcp->out_mb = MBX_2|MBX_1|MBX_0; 3631 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3632 MBX_1|MBX_0; 3633 mcp->tov = MBX_TOV_SECONDS; 3634 mcp->flags = 0; 3635 rval = qla2x00_mailbox_command(vha, mcp); 3636 if (rval != QLA_SUCCESS) { 3637 ql_dbg(ql_dbg_mbx, vha, 0x10ae, 3638 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3639 rval, mcp->mb[0], mcp->mb[1]); 3640 } else { 3641 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, 3642 "Done %s.\n", __func__); 3643 3644 if (wr) 3645 *wr = (uint64_t) mcp->mb[5] << 48 | 3646 (uint64_t) mcp->mb[4] << 32 | 3647 (uint64_t) mcp->mb[3] << 16 | 3648 (uint64_t) mcp->mb[2]; 3649 if (rd) 3650 *rd = (uint64_t) mcp->mb[9] << 48 | 3651 (uint64_t) mcp->mb[8] << 32 | 3652 (uint64_t) mcp->mb[7] << 16 | 3653 (uint64_t) mcp->mb[6]; 3654 } 3655 3656 return rval; 3657 } 3658 3659 int 3660 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3661 uint16_t *port_speed, uint16_t *mb) 3662 { 3663 int rval; 3664 mbx_cmd_t mc; 3665 mbx_cmd_t *mcp = &mc; 3666 3667 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, 3668 "Entered %s.\n", __func__); 3669 3670 if (!IS_IIDMA_CAPABLE(vha->hw)) 3671 return QLA_FUNCTION_FAILED; 3672 3673 mcp->mb[0] = MBC_PORT_PARAMS; 3674 mcp->mb[1] = loop_id; 3675 mcp->mb[2] = mcp->mb[3] = 0; 3676 mcp->mb[9] = vha->vp_idx; 3677 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3678 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3679 mcp->tov = MBX_TOV_SECONDS; 3680 mcp->flags = 0; 3681 rval = qla2x00_mailbox_command(vha, mcp); 3682 3683 /* Return mailbox statuses. */ 3684 if (mb != NULL) { 3685 mb[0] = mcp->mb[0]; 3686 mb[1] = mcp->mb[1]; 3687 mb[3] = mcp->mb[3]; 3688 } 3689 3690 if (rval != QLA_SUCCESS) { 3691 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); 3692 } else { 3693 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, 3694 "Done %s.\n", __func__); 3695 if (port_speed) 3696 *port_speed = mcp->mb[3]; 3697 } 3698 3699 return rval; 3700 } 3701 3702 int 3703 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3704 uint16_t port_speed, uint16_t *mb) 3705 { 3706 int rval; 3707 mbx_cmd_t mc; 3708 mbx_cmd_t *mcp = &mc; 3709 3710 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, 3711 "Entered %s.\n", __func__); 3712 3713 if (!IS_IIDMA_CAPABLE(vha->hw)) 3714 return QLA_FUNCTION_FAILED; 3715 3716 mcp->mb[0] = MBC_PORT_PARAMS; 3717 mcp->mb[1] = loop_id; 3718 mcp->mb[2] = BIT_0; 3719 if (IS_CNA_CAPABLE(vha->hw)) 3720 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); 3721 else 3722 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); 3723 mcp->mb[9] = vha->vp_idx; 3724 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3725 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3726 mcp->tov = MBX_TOV_SECONDS; 3727 mcp->flags = 0; 3728 rval = qla2x00_mailbox_command(vha, mcp); 3729 3730 /* Return mailbox statuses. */ 3731 if (mb != NULL) { 3732 mb[0] = mcp->mb[0]; 3733 mb[1] = mcp->mb[1]; 3734 mb[3] = mcp->mb[3]; 3735 } 3736 3737 if (rval != QLA_SUCCESS) { 3738 ql_dbg(ql_dbg_mbx, vha, 0x10b4, 3739 "Failed=%x.\n", rval); 3740 } else { 3741 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, 3742 "Done %s.\n", __func__); 3743 } 3744 3745 return rval; 3746 } 3747 3748 void 3749 qla24xx_report_id_acquisition(scsi_qla_host_t *vha, 3750 struct vp_rpt_id_entry_24xx *rptid_entry) 3751 { 3752 struct qla_hw_data *ha = vha->hw; 3753 scsi_qla_host_t *vp = NULL; 3754 unsigned long flags; 3755 int found; 3756 port_id_t id; 3757 struct fc_port *fcport; 3758 3759 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3760 "Entered %s.\n", __func__); 3761 3762 if (rptid_entry->entry_status != 0) 3763 return; 3764 3765 id.b.domain = rptid_entry->port_id[2]; 3766 id.b.area = rptid_entry->port_id[1]; 3767 id.b.al_pa = rptid_entry->port_id[0]; 3768 id.b.rsvd_1 = 0; 3769 ha->flags.n2n_ae = 0; 3770 3771 if (rptid_entry->format == 0) { 3772 /* loop */ 3773 ql_dbg(ql_dbg_async, vha, 0x10b7, 3774 "Format 0 : Number of VPs setup %d, number of " 3775 "VPs acquired %d.\n", rptid_entry->vp_setup, 3776 rptid_entry->vp_acquired); 3777 ql_dbg(ql_dbg_async, vha, 0x10b8, 3778 "Primary port id %02x%02x%02x.\n", 3779 rptid_entry->port_id[2], rptid_entry->port_id[1], 3780 rptid_entry->port_id[0]); 3781 ha->current_topology = ISP_CFG_NL; 3782 qlt_update_host_map(vha, id); 3783 3784 } else if (rptid_entry->format == 1) { 3785 /* fabric */ 3786 ql_dbg(ql_dbg_async, vha, 0x10b9, 3787 "Format 1: VP[%d] enabled - status %d - with " 3788 "port id %02x%02x%02x.\n", rptid_entry->vp_idx, 3789 rptid_entry->vp_status, 3790 rptid_entry->port_id[2], rptid_entry->port_id[1], 3791 rptid_entry->port_id[0]); 3792 ql_dbg(ql_dbg_async, vha, 0x5075, 3793 "Format 1: Remote WWPN %8phC.\n", 3794 rptid_entry->u.f1.port_name); 3795 3796 ql_dbg(ql_dbg_async, vha, 0x5075, 3797 "Format 1: WWPN %8phC.\n", 3798 vha->port_name); 3799 3800 /* N2N. direct connect */ 3801 if (IS_QLA27XX(ha) && 3802 ((rptid_entry->u.f1.flags>>1) & 0x7) == 2) { 3803 /* if our portname is higher then initiate N2N login */ 3804 if (wwn_to_u64(vha->port_name) > 3805 wwn_to_u64(rptid_entry->u.f1.port_name)) { 3806 // ??? qlt_update_host_map(vha, id); 3807 vha->n2n_id = 0x1; 3808 ql_dbg(ql_dbg_async, vha, 0x5075, 3809 "Format 1: Setting n2n_update_needed for id %d\n", 3810 vha->n2n_id); 3811 } else { 3812 ql_dbg(ql_dbg_async, vha, 0x5075, 3813 "Format 1: Remote login - Waiting for WWPN %8phC.\n", 3814 rptid_entry->u.f1.port_name); 3815 } 3816 3817 memcpy(vha->n2n_port_name, rptid_entry->u.f1.port_name, 3818 WWN_SIZE); 3819 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); 3820 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 3821 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 3822 ha->flags.n2n_ae = 1; 3823 return; 3824 } 3825 3826 ha->flags.gpsc_supported = 1; 3827 ha->current_topology = ISP_CFG_F; 3828 /* buffer to buffer credit flag */ 3829 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; 3830 3831 if (rptid_entry->vp_idx == 0) { 3832 if (rptid_entry->vp_status == VP_STAT_COMPL) { 3833 /* FA-WWN is only for physical port */ 3834 if (qla_ini_mode_enabled(vha) && 3835 ha->flags.fawwpn_enabled && 3836 (rptid_entry->u.f1.flags & 3837 BIT_6)) { 3838 memcpy(vha->port_name, 3839 rptid_entry->u.f1.port_name, 3840 WWN_SIZE); 3841 } 3842 3843 qlt_update_host_map(vha, id); 3844 } 3845 3846 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 3847 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 3848 } else { 3849 if (rptid_entry->vp_status != VP_STAT_COMPL && 3850 rptid_entry->vp_status != VP_STAT_ID_CHG) { 3851 ql_dbg(ql_dbg_mbx, vha, 0x10ba, 3852 "Could not acquire ID for VP[%d].\n", 3853 rptid_entry->vp_idx); 3854 return; 3855 } 3856 3857 found = 0; 3858 spin_lock_irqsave(&ha->vport_slock, flags); 3859 list_for_each_entry(vp, &ha->vp_list, list) { 3860 if (rptid_entry->vp_idx == vp->vp_idx) { 3861 found = 1; 3862 break; 3863 } 3864 } 3865 spin_unlock_irqrestore(&ha->vport_slock, flags); 3866 3867 if (!found) 3868 return; 3869 3870 qlt_update_host_map(vp, id); 3871 3872 /* 3873 * Cannot configure here as we are still sitting on the 3874 * response queue. Handle it in dpc context. 3875 */ 3876 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); 3877 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); 3878 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); 3879 } 3880 set_bit(VP_DPC_NEEDED, &vha->dpc_flags); 3881 qla2xxx_wake_dpc(vha); 3882 } else if (rptid_entry->format == 2) { 3883 ql_dbg(ql_dbg_async, vha, 0x505f, 3884 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", 3885 rptid_entry->port_id[2], rptid_entry->port_id[1], 3886 rptid_entry->port_id[0]); 3887 3888 ql_dbg(ql_dbg_async, vha, 0x5075, 3889 "N2N: Remote WWPN %8phC.\n", 3890 rptid_entry->u.f2.port_name); 3891 3892 /* N2N. direct connect */ 3893 ha->current_topology = ISP_CFG_N; 3894 ha->flags.rida_fmt2 = 1; 3895 vha->d_id.b.domain = rptid_entry->port_id[2]; 3896 vha->d_id.b.area = rptid_entry->port_id[1]; 3897 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 3898 3899 ha->flags.n2n_ae = 1; 3900 spin_lock_irqsave(&ha->vport_slock, flags); 3901 qlt_update_vp_map(vha, SET_AL_PA); 3902 spin_unlock_irqrestore(&ha->vport_slock, flags); 3903 3904 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3905 fcport->scan_state = QLA_FCPORT_SCAN; 3906 } 3907 3908 fcport = qla2x00_find_fcport_by_wwpn(vha, 3909 rptid_entry->u.f2.port_name, 1); 3910 3911 if (fcport) { 3912 fcport->plogi_nack_done_deadline = jiffies + HZ; 3913 fcport->scan_state = QLA_FCPORT_FOUND; 3914 switch (fcport->disc_state) { 3915 case DSC_DELETED: 3916 ql_dbg(ql_dbg_disc, vha, 0x210d, 3917 "%s %d %8phC login\n", 3918 __func__, __LINE__, fcport->port_name); 3919 qla24xx_fcport_handle_login(vha, fcport); 3920 break; 3921 case DSC_DELETE_PEND: 3922 break; 3923 default: 3924 qlt_schedule_sess_for_deletion(fcport); 3925 break; 3926 } 3927 } else { 3928 id.b.al_pa = rptid_entry->u.f2.remote_nport_id[0]; 3929 id.b.area = rptid_entry->u.f2.remote_nport_id[1]; 3930 id.b.domain = rptid_entry->u.f2.remote_nport_id[2]; 3931 qla24xx_post_newsess_work(vha, &id, 3932 rptid_entry->u.f2.port_name, 3933 rptid_entry->u.f2.node_name, 3934 NULL, 3935 FC4_TYPE_UNKNOWN); 3936 } 3937 } 3938 } 3939 3940 /* 3941 * qla24xx_modify_vp_config 3942 * Change VP configuration for vha 3943 * 3944 * Input: 3945 * vha = adapter block pointer. 3946 * 3947 * Returns: 3948 * qla2xxx local function return status code. 3949 * 3950 * Context: 3951 * Kernel context. 3952 */ 3953 int 3954 qla24xx_modify_vp_config(scsi_qla_host_t *vha) 3955 { 3956 int rval; 3957 struct vp_config_entry_24xx *vpmod; 3958 dma_addr_t vpmod_dma; 3959 struct qla_hw_data *ha = vha->hw; 3960 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 3961 3962 /* This can be called by the parent */ 3963 3964 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, 3965 "Entered %s.\n", __func__); 3966 3967 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 3968 if (!vpmod) { 3969 ql_log(ql_log_warn, vha, 0x10bc, 3970 "Failed to allocate modify VP IOCB.\n"); 3971 return QLA_MEMORY_ALLOC_FAILED; 3972 } 3973 3974 vpmod->entry_type = VP_CONFIG_IOCB_TYPE; 3975 vpmod->entry_count = 1; 3976 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; 3977 vpmod->vp_count = 1; 3978 vpmod->vp_index1 = vha->vp_idx; 3979 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; 3980 3981 qlt_modify_vp_config(vha, vpmod); 3982 3983 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); 3984 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 3985 vpmod->entry_count = 1; 3986 3987 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 3988 if (rval != QLA_SUCCESS) { 3989 ql_dbg(ql_dbg_mbx, vha, 0x10bd, 3990 "Failed to issue VP config IOCB (%x).\n", rval); 3991 } else if (vpmod->comp_status != 0) { 3992 ql_dbg(ql_dbg_mbx, vha, 0x10be, 3993 "Failed to complete IOCB -- error status (%x).\n", 3994 vpmod->comp_status); 3995 rval = QLA_FUNCTION_FAILED; 3996 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) { 3997 ql_dbg(ql_dbg_mbx, vha, 0x10bf, 3998 "Failed to complete IOCB -- completion status (%x).\n", 3999 le16_to_cpu(vpmod->comp_status)); 4000 rval = QLA_FUNCTION_FAILED; 4001 } else { 4002 /* EMPTY */ 4003 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, 4004 "Done %s.\n", __func__); 4005 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 4006 } 4007 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 4008 4009 return rval; 4010 } 4011 4012 /* 4013 * qla2x00_send_change_request 4014 * Receive or disable RSCN request from fabric controller 4015 * 4016 * Input: 4017 * ha = adapter block pointer 4018 * format = registration format: 4019 * 0 - Reserved 4020 * 1 - Fabric detected registration 4021 * 2 - N_port detected registration 4022 * 3 - Full registration 4023 * FF - clear registration 4024 * vp_idx = Virtual port index 4025 * 4026 * Returns: 4027 * qla2x00 local function return status code. 4028 * 4029 * Context: 4030 * Kernel Context 4031 */ 4032 4033 int 4034 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, 4035 uint16_t vp_idx) 4036 { 4037 int rval; 4038 mbx_cmd_t mc; 4039 mbx_cmd_t *mcp = &mc; 4040 4041 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, 4042 "Entered %s.\n", __func__); 4043 4044 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; 4045 mcp->mb[1] = format; 4046 mcp->mb[9] = vp_idx; 4047 mcp->out_mb = MBX_9|MBX_1|MBX_0; 4048 mcp->in_mb = MBX_0|MBX_1; 4049 mcp->tov = MBX_TOV_SECONDS; 4050 mcp->flags = 0; 4051 rval = qla2x00_mailbox_command(vha, mcp); 4052 4053 if (rval == QLA_SUCCESS) { 4054 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 4055 rval = BIT_1; 4056 } 4057 } else 4058 rval = BIT_1; 4059 4060 return rval; 4061 } 4062 4063 int 4064 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 4065 uint32_t size) 4066 { 4067 int rval; 4068 mbx_cmd_t mc; 4069 mbx_cmd_t *mcp = &mc; 4070 4071 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, 4072 "Entered %s.\n", __func__); 4073 4074 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 4075 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 4076 mcp->mb[8] = MSW(addr); 4077 mcp->out_mb = MBX_8|MBX_0; 4078 } else { 4079 mcp->mb[0] = MBC_DUMP_RISC_RAM; 4080 mcp->out_mb = MBX_0; 4081 } 4082 mcp->mb[1] = LSW(addr); 4083 mcp->mb[2] = MSW(req_dma); 4084 mcp->mb[3] = LSW(req_dma); 4085 mcp->mb[6] = MSW(MSD(req_dma)); 4086 mcp->mb[7] = LSW(MSD(req_dma)); 4087 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 4088 if (IS_FWI2_CAPABLE(vha->hw)) { 4089 mcp->mb[4] = MSW(size); 4090 mcp->mb[5] = LSW(size); 4091 mcp->out_mb |= MBX_5|MBX_4; 4092 } else { 4093 mcp->mb[4] = LSW(size); 4094 mcp->out_mb |= MBX_4; 4095 } 4096 4097 mcp->in_mb = MBX_0; 4098 mcp->tov = MBX_TOV_SECONDS; 4099 mcp->flags = 0; 4100 rval = qla2x00_mailbox_command(vha, mcp); 4101 4102 if (rval != QLA_SUCCESS) { 4103 ql_dbg(ql_dbg_mbx, vha, 0x1008, 4104 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4105 } else { 4106 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, 4107 "Done %s.\n", __func__); 4108 } 4109 4110 return rval; 4111 } 4112 /* 84XX Support **************************************************************/ 4113 4114 struct cs84xx_mgmt_cmd { 4115 union { 4116 struct verify_chip_entry_84xx req; 4117 struct verify_chip_rsp_84xx rsp; 4118 } p; 4119 }; 4120 4121 int 4122 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) 4123 { 4124 int rval, retry; 4125 struct cs84xx_mgmt_cmd *mn; 4126 dma_addr_t mn_dma; 4127 uint16_t options; 4128 unsigned long flags; 4129 struct qla_hw_data *ha = vha->hw; 4130 4131 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, 4132 "Entered %s.\n", __func__); 4133 4134 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 4135 if (mn == NULL) { 4136 return QLA_MEMORY_ALLOC_FAILED; 4137 } 4138 4139 /* Force Update? */ 4140 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; 4141 /* Diagnostic firmware? */ 4142 /* options |= MENLO_DIAG_FW; */ 4143 /* We update the firmware with only one data sequence. */ 4144 options |= VCO_END_OF_DATA; 4145 4146 do { 4147 retry = 0; 4148 memset(mn, 0, sizeof(*mn)); 4149 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; 4150 mn->p.req.entry_count = 1; 4151 mn->p.req.options = cpu_to_le16(options); 4152 4153 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, 4154 "Dump of Verify Request.\n"); 4155 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, 4156 (uint8_t *)mn, sizeof(*mn)); 4157 4158 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 4159 if (rval != QLA_SUCCESS) { 4160 ql_dbg(ql_dbg_mbx, vha, 0x10cb, 4161 "Failed to issue verify IOCB (%x).\n", rval); 4162 goto verify_done; 4163 } 4164 4165 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, 4166 "Dump of Verify Response.\n"); 4167 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, 4168 (uint8_t *)mn, sizeof(*mn)); 4169 4170 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 4171 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 4172 le16_to_cpu(mn->p.rsp.failure_code) : 0; 4173 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, 4174 "cs=%x fc=%x.\n", status[0], status[1]); 4175 4176 if (status[0] != CS_COMPLETE) { 4177 rval = QLA_FUNCTION_FAILED; 4178 if (!(options & VCO_DONT_UPDATE_FW)) { 4179 ql_dbg(ql_dbg_mbx, vha, 0x10cf, 4180 "Firmware update failed. Retrying " 4181 "without update firmware.\n"); 4182 options |= VCO_DONT_UPDATE_FW; 4183 options &= ~VCO_FORCE_UPDATE; 4184 retry = 1; 4185 } 4186 } else { 4187 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, 4188 "Firmware updated to %x.\n", 4189 le32_to_cpu(mn->p.rsp.fw_ver)); 4190 4191 /* NOTE: we only update OP firmware. */ 4192 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 4193 ha->cs84xx->op_fw_version = 4194 le32_to_cpu(mn->p.rsp.fw_ver); 4195 spin_unlock_irqrestore(&ha->cs84xx->access_lock, 4196 flags); 4197 } 4198 } while (retry); 4199 4200 verify_done: 4201 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 4202 4203 if (rval != QLA_SUCCESS) { 4204 ql_dbg(ql_dbg_mbx, vha, 0x10d1, 4205 "Failed=%x.\n", rval); 4206 } else { 4207 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, 4208 "Done %s.\n", __func__); 4209 } 4210 4211 return rval; 4212 } 4213 4214 int 4215 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) 4216 { 4217 int rval; 4218 unsigned long flags; 4219 mbx_cmd_t mc; 4220 mbx_cmd_t *mcp = &mc; 4221 struct qla_hw_data *ha = vha->hw; 4222 4223 if (!ha->flags.fw_started) 4224 return QLA_SUCCESS; 4225 4226 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 4227 "Entered %s.\n", __func__); 4228 4229 if (IS_SHADOW_REG_CAPABLE(ha)) 4230 req->options |= BIT_13; 4231 4232 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4233 mcp->mb[1] = req->options; 4234 mcp->mb[2] = MSW(LSD(req->dma)); 4235 mcp->mb[3] = LSW(LSD(req->dma)); 4236 mcp->mb[6] = MSW(MSD(req->dma)); 4237 mcp->mb[7] = LSW(MSD(req->dma)); 4238 mcp->mb[5] = req->length; 4239 if (req->rsp) 4240 mcp->mb[10] = req->rsp->id; 4241 mcp->mb[12] = req->qos; 4242 mcp->mb[11] = req->vp_idx; 4243 mcp->mb[13] = req->rid; 4244 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 4245 mcp->mb[15] = 0; 4246 4247 mcp->mb[4] = req->id; 4248 /* que in ptr index */ 4249 mcp->mb[8] = 0; 4250 /* que out ptr index */ 4251 mcp->mb[9] = *req->out_ptr = 0; 4252 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| 4253 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4254 mcp->in_mb = MBX_0; 4255 mcp->flags = MBX_DMA_OUT; 4256 mcp->tov = MBX_TOV_SECONDS * 2; 4257 4258 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) 4259 mcp->in_mb |= MBX_1; 4260 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 4261 mcp->out_mb |= MBX_15; 4262 /* debug q create issue in SR-IOV */ 4263 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4264 } 4265 4266 spin_lock_irqsave(&ha->hardware_lock, flags); 4267 if (!(req->options & BIT_0)) { 4268 WRT_REG_DWORD(req->req_q_in, 0); 4269 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) 4270 WRT_REG_DWORD(req->req_q_out, 0); 4271 } 4272 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4273 4274 rval = qla2x00_mailbox_command(vha, mcp); 4275 if (rval != QLA_SUCCESS) { 4276 ql_dbg(ql_dbg_mbx, vha, 0x10d4, 4277 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4278 } else { 4279 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, 4280 "Done %s.\n", __func__); 4281 } 4282 4283 return rval; 4284 } 4285 4286 int 4287 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 4288 { 4289 int rval; 4290 unsigned long flags; 4291 mbx_cmd_t mc; 4292 mbx_cmd_t *mcp = &mc; 4293 struct qla_hw_data *ha = vha->hw; 4294 4295 if (!ha->flags.fw_started) 4296 return QLA_SUCCESS; 4297 4298 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 4299 "Entered %s.\n", __func__); 4300 4301 if (IS_SHADOW_REG_CAPABLE(ha)) 4302 rsp->options |= BIT_13; 4303 4304 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4305 mcp->mb[1] = rsp->options; 4306 mcp->mb[2] = MSW(LSD(rsp->dma)); 4307 mcp->mb[3] = LSW(LSD(rsp->dma)); 4308 mcp->mb[6] = MSW(MSD(rsp->dma)); 4309 mcp->mb[7] = LSW(MSD(rsp->dma)); 4310 mcp->mb[5] = rsp->length; 4311 mcp->mb[14] = rsp->msix->entry; 4312 mcp->mb[13] = rsp->rid; 4313 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 4314 mcp->mb[15] = 0; 4315 4316 mcp->mb[4] = rsp->id; 4317 /* que in ptr index */ 4318 mcp->mb[8] = *rsp->in_ptr = 0; 4319 /* que out ptr index */ 4320 mcp->mb[9] = 0; 4321 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 4322 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4323 mcp->in_mb = MBX_0; 4324 mcp->flags = MBX_DMA_OUT; 4325 mcp->tov = MBX_TOV_SECONDS * 2; 4326 4327 if (IS_QLA81XX(ha)) { 4328 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 4329 mcp->in_mb |= MBX_1; 4330 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 4331 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; 4332 mcp->in_mb |= MBX_1; 4333 /* debug q create issue in SR-IOV */ 4334 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4335 } 4336 4337 spin_lock_irqsave(&ha->hardware_lock, flags); 4338 if (!(rsp->options & BIT_0)) { 4339 WRT_REG_DWORD(rsp->rsp_q_out, 0); 4340 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) 4341 WRT_REG_DWORD(rsp->rsp_q_in, 0); 4342 } 4343 4344 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4345 4346 rval = qla2x00_mailbox_command(vha, mcp); 4347 if (rval != QLA_SUCCESS) { 4348 ql_dbg(ql_dbg_mbx, vha, 0x10d7, 4349 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4350 } else { 4351 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, 4352 "Done %s.\n", __func__); 4353 } 4354 4355 return rval; 4356 } 4357 4358 int 4359 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) 4360 { 4361 int rval; 4362 mbx_cmd_t mc; 4363 mbx_cmd_t *mcp = &mc; 4364 4365 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, 4366 "Entered %s.\n", __func__); 4367 4368 mcp->mb[0] = MBC_IDC_ACK; 4369 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4370 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4371 mcp->in_mb = MBX_0; 4372 mcp->tov = MBX_TOV_SECONDS; 4373 mcp->flags = 0; 4374 rval = qla2x00_mailbox_command(vha, mcp); 4375 4376 if (rval != QLA_SUCCESS) { 4377 ql_dbg(ql_dbg_mbx, vha, 0x10da, 4378 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4379 } else { 4380 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, 4381 "Done %s.\n", __func__); 4382 } 4383 4384 return rval; 4385 } 4386 4387 int 4388 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) 4389 { 4390 int rval; 4391 mbx_cmd_t mc; 4392 mbx_cmd_t *mcp = &mc; 4393 4394 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, 4395 "Entered %s.\n", __func__); 4396 4397 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4398 !IS_QLA27XX(vha->hw)) 4399 return QLA_FUNCTION_FAILED; 4400 4401 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4402 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 4403 mcp->out_mb = MBX_1|MBX_0; 4404 mcp->in_mb = MBX_1|MBX_0; 4405 mcp->tov = MBX_TOV_SECONDS; 4406 mcp->flags = 0; 4407 rval = qla2x00_mailbox_command(vha, mcp); 4408 4409 if (rval != QLA_SUCCESS) { 4410 ql_dbg(ql_dbg_mbx, vha, 0x10dd, 4411 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4412 rval, mcp->mb[0], mcp->mb[1]); 4413 } else { 4414 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, 4415 "Done %s.\n", __func__); 4416 *sector_size = mcp->mb[1]; 4417 } 4418 4419 return rval; 4420 } 4421 4422 int 4423 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) 4424 { 4425 int rval; 4426 mbx_cmd_t mc; 4427 mbx_cmd_t *mcp = &mc; 4428 4429 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4430 !IS_QLA27XX(vha->hw)) 4431 return QLA_FUNCTION_FAILED; 4432 4433 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, 4434 "Entered %s.\n", __func__); 4435 4436 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4437 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 4438 FAC_OPT_CMD_WRITE_PROTECT; 4439 mcp->out_mb = MBX_1|MBX_0; 4440 mcp->in_mb = MBX_1|MBX_0; 4441 mcp->tov = MBX_TOV_SECONDS; 4442 mcp->flags = 0; 4443 rval = qla2x00_mailbox_command(vha, mcp); 4444 4445 if (rval != QLA_SUCCESS) { 4446 ql_dbg(ql_dbg_mbx, vha, 0x10e0, 4447 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4448 rval, mcp->mb[0], mcp->mb[1]); 4449 } else { 4450 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, 4451 "Done %s.\n", __func__); 4452 } 4453 4454 return rval; 4455 } 4456 4457 int 4458 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) 4459 { 4460 int rval; 4461 mbx_cmd_t mc; 4462 mbx_cmd_t *mcp = &mc; 4463 4464 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4465 !IS_QLA27XX(vha->hw)) 4466 return QLA_FUNCTION_FAILED; 4467 4468 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4469 "Entered %s.\n", __func__); 4470 4471 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4472 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 4473 mcp->mb[2] = LSW(start); 4474 mcp->mb[3] = MSW(start); 4475 mcp->mb[4] = LSW(finish); 4476 mcp->mb[5] = MSW(finish); 4477 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4478 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4479 mcp->tov = MBX_TOV_SECONDS; 4480 mcp->flags = 0; 4481 rval = qla2x00_mailbox_command(vha, mcp); 4482 4483 if (rval != QLA_SUCCESS) { 4484 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4485 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4486 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4487 } else { 4488 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4489 "Done %s.\n", __func__); 4490 } 4491 4492 return rval; 4493 } 4494 4495 int 4496 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) 4497 { 4498 int rval = 0; 4499 mbx_cmd_t mc; 4500 mbx_cmd_t *mcp = &mc; 4501 4502 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, 4503 "Entered %s.\n", __func__); 4504 4505 mcp->mb[0] = MBC_RESTART_MPI_FW; 4506 mcp->out_mb = MBX_0; 4507 mcp->in_mb = MBX_0|MBX_1; 4508 mcp->tov = MBX_TOV_SECONDS; 4509 mcp->flags = 0; 4510 rval = qla2x00_mailbox_command(vha, mcp); 4511 4512 if (rval != QLA_SUCCESS) { 4513 ql_dbg(ql_dbg_mbx, vha, 0x10e6, 4514 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4515 rval, mcp->mb[0], mcp->mb[1]); 4516 } else { 4517 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, 4518 "Done %s.\n", __func__); 4519 } 4520 4521 return rval; 4522 } 4523 4524 int 4525 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4526 { 4527 int rval; 4528 mbx_cmd_t mc; 4529 mbx_cmd_t *mcp = &mc; 4530 int i; 4531 int len; 4532 uint16_t *str; 4533 struct qla_hw_data *ha = vha->hw; 4534 4535 if (!IS_P3P_TYPE(ha)) 4536 return QLA_FUNCTION_FAILED; 4537 4538 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, 4539 "Entered %s.\n", __func__); 4540 4541 str = (void *)version; 4542 len = strlen(version); 4543 4544 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4545 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; 4546 mcp->out_mb = MBX_1|MBX_0; 4547 for (i = 4; i < 16 && len; i++, str++, len -= 2) { 4548 mcp->mb[i] = cpu_to_le16p(str); 4549 mcp->out_mb |= 1<<i; 4550 } 4551 for (; i < 16; i++) { 4552 mcp->mb[i] = 0; 4553 mcp->out_mb |= 1<<i; 4554 } 4555 mcp->in_mb = MBX_1|MBX_0; 4556 mcp->tov = MBX_TOV_SECONDS; 4557 mcp->flags = 0; 4558 rval = qla2x00_mailbox_command(vha, mcp); 4559 4560 if (rval != QLA_SUCCESS) { 4561 ql_dbg(ql_dbg_mbx, vha, 0x117c, 4562 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4563 } else { 4564 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, 4565 "Done %s.\n", __func__); 4566 } 4567 4568 return rval; 4569 } 4570 4571 int 4572 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4573 { 4574 int rval; 4575 mbx_cmd_t mc; 4576 mbx_cmd_t *mcp = &mc; 4577 int len; 4578 uint16_t dwlen; 4579 uint8_t *str; 4580 dma_addr_t str_dma; 4581 struct qla_hw_data *ha = vha->hw; 4582 4583 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || 4584 IS_P3P_TYPE(ha)) 4585 return QLA_FUNCTION_FAILED; 4586 4587 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, 4588 "Entered %s.\n", __func__); 4589 4590 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); 4591 if (!str) { 4592 ql_log(ql_log_warn, vha, 0x117f, 4593 "Failed to allocate driver version param.\n"); 4594 return QLA_MEMORY_ALLOC_FAILED; 4595 } 4596 4597 memcpy(str, "\x7\x3\x11\x0", 4); 4598 dwlen = str[0]; 4599 len = dwlen * 4 - 4; 4600 memset(str + 4, 0, len); 4601 if (len > strlen(version)) 4602 len = strlen(version); 4603 memcpy(str + 4, version, len); 4604 4605 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4606 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; 4607 mcp->mb[2] = MSW(LSD(str_dma)); 4608 mcp->mb[3] = LSW(LSD(str_dma)); 4609 mcp->mb[6] = MSW(MSD(str_dma)); 4610 mcp->mb[7] = LSW(MSD(str_dma)); 4611 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4612 mcp->in_mb = MBX_1|MBX_0; 4613 mcp->tov = MBX_TOV_SECONDS; 4614 mcp->flags = 0; 4615 rval = qla2x00_mailbox_command(vha, mcp); 4616 4617 if (rval != QLA_SUCCESS) { 4618 ql_dbg(ql_dbg_mbx, vha, 0x1180, 4619 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4620 } else { 4621 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, 4622 "Done %s.\n", __func__); 4623 } 4624 4625 dma_pool_free(ha->s_dma_pool, str, str_dma); 4626 4627 return rval; 4628 } 4629 4630 int 4631 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, 4632 void *buf, uint16_t bufsiz) 4633 { 4634 int rval, i; 4635 mbx_cmd_t mc; 4636 mbx_cmd_t *mcp = &mc; 4637 uint32_t *bp; 4638 4639 if (!IS_FWI2_CAPABLE(vha->hw)) 4640 return QLA_FUNCTION_FAILED; 4641 4642 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4643 "Entered %s.\n", __func__); 4644 4645 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4646 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8; 4647 mcp->mb[2] = MSW(buf_dma); 4648 mcp->mb[3] = LSW(buf_dma); 4649 mcp->mb[6] = MSW(MSD(buf_dma)); 4650 mcp->mb[7] = LSW(MSD(buf_dma)); 4651 mcp->mb[8] = bufsiz/4; 4652 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4653 mcp->in_mb = MBX_1|MBX_0; 4654 mcp->tov = MBX_TOV_SECONDS; 4655 mcp->flags = 0; 4656 rval = qla2x00_mailbox_command(vha, mcp); 4657 4658 if (rval != QLA_SUCCESS) { 4659 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4660 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4661 } else { 4662 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4663 "Done %s.\n", __func__); 4664 bp = (uint32_t *) buf; 4665 for (i = 0; i < (bufsiz-4)/4; i++, bp++) 4666 *bp = cpu_to_be32(*bp); 4667 } 4668 4669 return rval; 4670 } 4671 4672 static int 4673 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 4674 { 4675 int rval; 4676 mbx_cmd_t mc; 4677 mbx_cmd_t *mcp = &mc; 4678 4679 if (!IS_FWI2_CAPABLE(vha->hw)) 4680 return QLA_FUNCTION_FAILED; 4681 4682 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4683 "Entered %s.\n", __func__); 4684 4685 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4686 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; 4687 mcp->out_mb = MBX_1|MBX_0; 4688 mcp->in_mb = MBX_1|MBX_0; 4689 mcp->tov = MBX_TOV_SECONDS; 4690 mcp->flags = 0; 4691 rval = qla2x00_mailbox_command(vha, mcp); 4692 *temp = mcp->mb[1]; 4693 4694 if (rval != QLA_SUCCESS) { 4695 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4696 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4697 } else { 4698 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4699 "Done %s.\n", __func__); 4700 } 4701 4702 return rval; 4703 } 4704 4705 int 4706 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 4707 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 4708 { 4709 int rval; 4710 mbx_cmd_t mc; 4711 mbx_cmd_t *mcp = &mc; 4712 struct qla_hw_data *ha = vha->hw; 4713 4714 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 4715 "Entered %s.\n", __func__); 4716 4717 if (!IS_FWI2_CAPABLE(ha)) 4718 return QLA_FUNCTION_FAILED; 4719 4720 if (len == 1) 4721 opt |= BIT_0; 4722 4723 mcp->mb[0] = MBC_READ_SFP; 4724 mcp->mb[1] = dev; 4725 mcp->mb[2] = MSW(sfp_dma); 4726 mcp->mb[3] = LSW(sfp_dma); 4727 mcp->mb[6] = MSW(MSD(sfp_dma)); 4728 mcp->mb[7] = LSW(MSD(sfp_dma)); 4729 mcp->mb[8] = len; 4730 mcp->mb[9] = off; 4731 mcp->mb[10] = opt; 4732 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4733 mcp->in_mb = MBX_1|MBX_0; 4734 mcp->tov = MBX_TOV_SECONDS; 4735 mcp->flags = 0; 4736 rval = qla2x00_mailbox_command(vha, mcp); 4737 4738 if (opt & BIT_0) 4739 *sfp = mcp->mb[1]; 4740 4741 if (rval != QLA_SUCCESS) { 4742 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 4743 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4744 if (mcp->mb[0] == MBS_COMMAND_ERROR && 4745 mcp->mb[1] == 0x22) 4746 /* sfp is not there */ 4747 rval = QLA_INTERFACE_ERROR; 4748 } else { 4749 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 4750 "Done %s.\n", __func__); 4751 } 4752 4753 return rval; 4754 } 4755 4756 int 4757 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 4758 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 4759 { 4760 int rval; 4761 mbx_cmd_t mc; 4762 mbx_cmd_t *mcp = &mc; 4763 struct qla_hw_data *ha = vha->hw; 4764 4765 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, 4766 "Entered %s.\n", __func__); 4767 4768 if (!IS_FWI2_CAPABLE(ha)) 4769 return QLA_FUNCTION_FAILED; 4770 4771 if (len == 1) 4772 opt |= BIT_0; 4773 4774 if (opt & BIT_0) 4775 len = *sfp; 4776 4777 mcp->mb[0] = MBC_WRITE_SFP; 4778 mcp->mb[1] = dev; 4779 mcp->mb[2] = MSW(sfp_dma); 4780 mcp->mb[3] = LSW(sfp_dma); 4781 mcp->mb[6] = MSW(MSD(sfp_dma)); 4782 mcp->mb[7] = LSW(MSD(sfp_dma)); 4783 mcp->mb[8] = len; 4784 mcp->mb[9] = off; 4785 mcp->mb[10] = opt; 4786 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4787 mcp->in_mb = MBX_1|MBX_0; 4788 mcp->tov = MBX_TOV_SECONDS; 4789 mcp->flags = 0; 4790 rval = qla2x00_mailbox_command(vha, mcp); 4791 4792 if (rval != QLA_SUCCESS) { 4793 ql_dbg(ql_dbg_mbx, vha, 0x10ec, 4794 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4795 } else { 4796 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, 4797 "Done %s.\n", __func__); 4798 } 4799 4800 return rval; 4801 } 4802 4803 int 4804 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, 4805 uint16_t size_in_bytes, uint16_t *actual_size) 4806 { 4807 int rval; 4808 mbx_cmd_t mc; 4809 mbx_cmd_t *mcp = &mc; 4810 4811 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, 4812 "Entered %s.\n", __func__); 4813 4814 if (!IS_CNA_CAPABLE(vha->hw)) 4815 return QLA_FUNCTION_FAILED; 4816 4817 mcp->mb[0] = MBC_GET_XGMAC_STATS; 4818 mcp->mb[2] = MSW(stats_dma); 4819 mcp->mb[3] = LSW(stats_dma); 4820 mcp->mb[6] = MSW(MSD(stats_dma)); 4821 mcp->mb[7] = LSW(MSD(stats_dma)); 4822 mcp->mb[8] = size_in_bytes >> 2; 4823 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 4824 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4825 mcp->tov = MBX_TOV_SECONDS; 4826 mcp->flags = 0; 4827 rval = qla2x00_mailbox_command(vha, mcp); 4828 4829 if (rval != QLA_SUCCESS) { 4830 ql_dbg(ql_dbg_mbx, vha, 0x10ef, 4831 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4832 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4833 } else { 4834 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, 4835 "Done %s.\n", __func__); 4836 4837 4838 *actual_size = mcp->mb[2] << 2; 4839 } 4840 4841 return rval; 4842 } 4843 4844 int 4845 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, 4846 uint16_t size) 4847 { 4848 int rval; 4849 mbx_cmd_t mc; 4850 mbx_cmd_t *mcp = &mc; 4851 4852 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, 4853 "Entered %s.\n", __func__); 4854 4855 if (!IS_CNA_CAPABLE(vha->hw)) 4856 return QLA_FUNCTION_FAILED; 4857 4858 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 4859 mcp->mb[1] = 0; 4860 mcp->mb[2] = MSW(tlv_dma); 4861 mcp->mb[3] = LSW(tlv_dma); 4862 mcp->mb[6] = MSW(MSD(tlv_dma)); 4863 mcp->mb[7] = LSW(MSD(tlv_dma)); 4864 mcp->mb[8] = size; 4865 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4866 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4867 mcp->tov = MBX_TOV_SECONDS; 4868 mcp->flags = 0; 4869 rval = qla2x00_mailbox_command(vha, mcp); 4870 4871 if (rval != QLA_SUCCESS) { 4872 ql_dbg(ql_dbg_mbx, vha, 0x10f2, 4873 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4874 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4875 } else { 4876 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, 4877 "Done %s.\n", __func__); 4878 } 4879 4880 return rval; 4881 } 4882 4883 int 4884 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) 4885 { 4886 int rval; 4887 mbx_cmd_t mc; 4888 mbx_cmd_t *mcp = &mc; 4889 4890 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, 4891 "Entered %s.\n", __func__); 4892 4893 if (!IS_FWI2_CAPABLE(vha->hw)) 4894 return QLA_FUNCTION_FAILED; 4895 4896 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 4897 mcp->mb[1] = LSW(risc_addr); 4898 mcp->mb[8] = MSW(risc_addr); 4899 mcp->out_mb = MBX_8|MBX_1|MBX_0; 4900 mcp->in_mb = MBX_3|MBX_2|MBX_0; 4901 mcp->tov = 30; 4902 mcp->flags = 0; 4903 rval = qla2x00_mailbox_command(vha, mcp); 4904 if (rval != QLA_SUCCESS) { 4905 ql_dbg(ql_dbg_mbx, vha, 0x10f5, 4906 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4907 } else { 4908 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, 4909 "Done %s.\n", __func__); 4910 *data = mcp->mb[3] << 16 | mcp->mb[2]; 4911 } 4912 4913 return rval; 4914 } 4915 4916 int 4917 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 4918 uint16_t *mresp) 4919 { 4920 int rval; 4921 mbx_cmd_t mc; 4922 mbx_cmd_t *mcp = &mc; 4923 4924 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, 4925 "Entered %s.\n", __func__); 4926 4927 memset(mcp->mb, 0 , sizeof(mcp->mb)); 4928 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 4929 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing 4930 4931 /* transfer count */ 4932 mcp->mb[10] = LSW(mreq->transfer_size); 4933 mcp->mb[11] = MSW(mreq->transfer_size); 4934 4935 /* send data address */ 4936 mcp->mb[14] = LSW(mreq->send_dma); 4937 mcp->mb[15] = MSW(mreq->send_dma); 4938 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 4939 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 4940 4941 /* receive data address */ 4942 mcp->mb[16] = LSW(mreq->rcv_dma); 4943 mcp->mb[17] = MSW(mreq->rcv_dma); 4944 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 4945 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 4946 4947 /* Iteration count */ 4948 mcp->mb[18] = LSW(mreq->iteration_count); 4949 mcp->mb[19] = MSW(mreq->iteration_count); 4950 4951 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| 4952 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 4953 if (IS_CNA_CAPABLE(vha->hw)) 4954 mcp->out_mb |= MBX_2; 4955 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; 4956 4957 mcp->buf_size = mreq->transfer_size; 4958 mcp->tov = MBX_TOV_SECONDS; 4959 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 4960 4961 rval = qla2x00_mailbox_command(vha, mcp); 4962 4963 if (rval != QLA_SUCCESS) { 4964 ql_dbg(ql_dbg_mbx, vha, 0x10f8, 4965 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " 4966 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], 4967 mcp->mb[3], mcp->mb[18], mcp->mb[19]); 4968 } else { 4969 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, 4970 "Done %s.\n", __func__); 4971 } 4972 4973 /* Copy mailbox information */ 4974 memcpy( mresp, mcp->mb, 64); 4975 return rval; 4976 } 4977 4978 int 4979 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 4980 uint16_t *mresp) 4981 { 4982 int rval; 4983 mbx_cmd_t mc; 4984 mbx_cmd_t *mcp = &mc; 4985 struct qla_hw_data *ha = vha->hw; 4986 4987 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, 4988 "Entered %s.\n", __func__); 4989 4990 memset(mcp->mb, 0 , sizeof(mcp->mb)); 4991 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 4992 /* BIT_6 specifies 64bit address */ 4993 mcp->mb[1] = mreq->options | BIT_15 | BIT_6; 4994 if (IS_CNA_CAPABLE(ha)) { 4995 mcp->mb[2] = vha->fcoe_fcf_idx; 4996 } 4997 mcp->mb[16] = LSW(mreq->rcv_dma); 4998 mcp->mb[17] = MSW(mreq->rcv_dma); 4999 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5000 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5001 5002 mcp->mb[10] = LSW(mreq->transfer_size); 5003 5004 mcp->mb[14] = LSW(mreq->send_dma); 5005 mcp->mb[15] = MSW(mreq->send_dma); 5006 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5007 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5008 5009 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| 5010 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5011 if (IS_CNA_CAPABLE(ha)) 5012 mcp->out_mb |= MBX_2; 5013 5014 mcp->in_mb = MBX_0; 5015 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 5016 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) 5017 mcp->in_mb |= MBX_1; 5018 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) 5019 mcp->in_mb |= MBX_3; 5020 5021 mcp->tov = MBX_TOV_SECONDS; 5022 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5023 mcp->buf_size = mreq->transfer_size; 5024 5025 rval = qla2x00_mailbox_command(vha, mcp); 5026 5027 if (rval != QLA_SUCCESS) { 5028 ql_dbg(ql_dbg_mbx, vha, 0x10fb, 5029 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5030 rval, mcp->mb[0], mcp->mb[1]); 5031 } else { 5032 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, 5033 "Done %s.\n", __func__); 5034 } 5035 5036 /* Copy mailbox information */ 5037 memcpy(mresp, mcp->mb, 64); 5038 return rval; 5039 } 5040 5041 int 5042 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) 5043 { 5044 int rval; 5045 mbx_cmd_t mc; 5046 mbx_cmd_t *mcp = &mc; 5047 5048 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, 5049 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); 5050 5051 mcp->mb[0] = MBC_ISP84XX_RESET; 5052 mcp->mb[1] = enable_diagnostic; 5053 mcp->out_mb = MBX_1|MBX_0; 5054 mcp->in_mb = MBX_1|MBX_0; 5055 mcp->tov = MBX_TOV_SECONDS; 5056 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5057 rval = qla2x00_mailbox_command(vha, mcp); 5058 5059 if (rval != QLA_SUCCESS) 5060 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); 5061 else 5062 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, 5063 "Done %s.\n", __func__); 5064 5065 return rval; 5066 } 5067 5068 int 5069 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) 5070 { 5071 int rval; 5072 mbx_cmd_t mc; 5073 mbx_cmd_t *mcp = &mc; 5074 5075 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, 5076 "Entered %s.\n", __func__); 5077 5078 if (!IS_FWI2_CAPABLE(vha->hw)) 5079 return QLA_FUNCTION_FAILED; 5080 5081 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 5082 mcp->mb[1] = LSW(risc_addr); 5083 mcp->mb[2] = LSW(data); 5084 mcp->mb[3] = MSW(data); 5085 mcp->mb[8] = MSW(risc_addr); 5086 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; 5087 mcp->in_mb = MBX_0; 5088 mcp->tov = 30; 5089 mcp->flags = 0; 5090 rval = qla2x00_mailbox_command(vha, mcp); 5091 if (rval != QLA_SUCCESS) { 5092 ql_dbg(ql_dbg_mbx, vha, 0x1101, 5093 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5094 } else { 5095 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, 5096 "Done %s.\n", __func__); 5097 } 5098 5099 return rval; 5100 } 5101 5102 int 5103 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) 5104 { 5105 int rval; 5106 uint32_t stat, timer; 5107 uint16_t mb0 = 0; 5108 struct qla_hw_data *ha = vha->hw; 5109 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 5110 5111 rval = QLA_SUCCESS; 5112 5113 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, 5114 "Entered %s.\n", __func__); 5115 5116 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 5117 5118 /* Write the MBC data to the registers */ 5119 WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER); 5120 WRT_REG_WORD(®->mailbox1, mb[0]); 5121 WRT_REG_WORD(®->mailbox2, mb[1]); 5122 WRT_REG_WORD(®->mailbox3, mb[2]); 5123 WRT_REG_WORD(®->mailbox4, mb[3]); 5124 5125 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); 5126 5127 /* Poll for MBC interrupt */ 5128 for (timer = 6000000; timer; timer--) { 5129 /* Check for pending interrupts. */ 5130 stat = RD_REG_DWORD(®->host_status); 5131 if (stat & HSRX_RISC_INT) { 5132 stat &= 0xff; 5133 5134 if (stat == 0x1 || stat == 0x2 || 5135 stat == 0x10 || stat == 0x11) { 5136 set_bit(MBX_INTERRUPT, 5137 &ha->mbx_cmd_flags); 5138 mb0 = RD_REG_WORD(®->mailbox0); 5139 WRT_REG_DWORD(®->hccr, 5140 HCCRX_CLR_RISC_INT); 5141 RD_REG_DWORD(®->hccr); 5142 break; 5143 } 5144 } 5145 udelay(5); 5146 } 5147 5148 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) 5149 rval = mb0 & MBS_MASK; 5150 else 5151 rval = QLA_FUNCTION_FAILED; 5152 5153 if (rval != QLA_SUCCESS) { 5154 ql_dbg(ql_dbg_mbx, vha, 0x1104, 5155 "Failed=%x mb[0]=%x.\n", rval, mb[0]); 5156 } else { 5157 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, 5158 "Done %s.\n", __func__); 5159 } 5160 5161 return rval; 5162 } 5163 5164 int 5165 qla2x00_get_data_rate(scsi_qla_host_t *vha) 5166 { 5167 int rval; 5168 mbx_cmd_t mc; 5169 mbx_cmd_t *mcp = &mc; 5170 struct qla_hw_data *ha = vha->hw; 5171 5172 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5173 "Entered %s.\n", __func__); 5174 5175 if (!IS_FWI2_CAPABLE(ha)) 5176 return QLA_FUNCTION_FAILED; 5177 5178 mcp->mb[0] = MBC_DATA_RATE; 5179 mcp->mb[1] = 0; 5180 mcp->out_mb = MBX_1|MBX_0; 5181 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5182 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 5183 mcp->in_mb |= MBX_3; 5184 mcp->tov = MBX_TOV_SECONDS; 5185 mcp->flags = 0; 5186 rval = qla2x00_mailbox_command(vha, mcp); 5187 if (rval != QLA_SUCCESS) { 5188 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5189 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5190 } else { 5191 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5192 "Done %s.\n", __func__); 5193 if (mcp->mb[1] != 0x7) 5194 ha->link_data_rate = mcp->mb[1]; 5195 } 5196 5197 return rval; 5198 } 5199 5200 int 5201 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5202 { 5203 int rval; 5204 mbx_cmd_t mc; 5205 mbx_cmd_t *mcp = &mc; 5206 struct qla_hw_data *ha = vha->hw; 5207 5208 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, 5209 "Entered %s.\n", __func__); 5210 5211 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && 5212 !IS_QLA27XX(ha)) 5213 return QLA_FUNCTION_FAILED; 5214 mcp->mb[0] = MBC_GET_PORT_CONFIG; 5215 mcp->out_mb = MBX_0; 5216 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5217 mcp->tov = MBX_TOV_SECONDS; 5218 mcp->flags = 0; 5219 5220 rval = qla2x00_mailbox_command(vha, mcp); 5221 5222 if (rval != QLA_SUCCESS) { 5223 ql_dbg(ql_dbg_mbx, vha, 0x110a, 5224 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5225 } else { 5226 /* Copy all bits to preserve original value */ 5227 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 5228 5229 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, 5230 "Done %s.\n", __func__); 5231 } 5232 return rval; 5233 } 5234 5235 int 5236 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5237 { 5238 int rval; 5239 mbx_cmd_t mc; 5240 mbx_cmd_t *mcp = &mc; 5241 5242 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, 5243 "Entered %s.\n", __func__); 5244 5245 mcp->mb[0] = MBC_SET_PORT_CONFIG; 5246 /* Copy all bits to preserve original setting */ 5247 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); 5248 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5249 mcp->in_mb = MBX_0; 5250 mcp->tov = MBX_TOV_SECONDS; 5251 mcp->flags = 0; 5252 rval = qla2x00_mailbox_command(vha, mcp); 5253 5254 if (rval != QLA_SUCCESS) { 5255 ql_dbg(ql_dbg_mbx, vha, 0x110d, 5256 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5257 } else 5258 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, 5259 "Done %s.\n", __func__); 5260 5261 return rval; 5262 } 5263 5264 5265 int 5266 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, 5267 uint16_t *mb) 5268 { 5269 int rval; 5270 mbx_cmd_t mc; 5271 mbx_cmd_t *mcp = &mc; 5272 struct qla_hw_data *ha = vha->hw; 5273 5274 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, 5275 "Entered %s.\n", __func__); 5276 5277 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 5278 return QLA_FUNCTION_FAILED; 5279 5280 mcp->mb[0] = MBC_PORT_PARAMS; 5281 mcp->mb[1] = loop_id; 5282 if (ha->flags.fcp_prio_enabled) 5283 mcp->mb[2] = BIT_1; 5284 else 5285 mcp->mb[2] = BIT_2; 5286 mcp->mb[4] = priority & 0xf; 5287 mcp->mb[9] = vha->vp_idx; 5288 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5289 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 5290 mcp->tov = 30; 5291 mcp->flags = 0; 5292 rval = qla2x00_mailbox_command(vha, mcp); 5293 if (mb != NULL) { 5294 mb[0] = mcp->mb[0]; 5295 mb[1] = mcp->mb[1]; 5296 mb[3] = mcp->mb[3]; 5297 mb[4] = mcp->mb[4]; 5298 } 5299 5300 if (rval != QLA_SUCCESS) { 5301 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); 5302 } else { 5303 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, 5304 "Done %s.\n", __func__); 5305 } 5306 5307 return rval; 5308 } 5309 5310 int 5311 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) 5312 { 5313 int rval = QLA_FUNCTION_FAILED; 5314 struct qla_hw_data *ha = vha->hw; 5315 uint8_t byte; 5316 5317 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { 5318 ql_dbg(ql_dbg_mbx, vha, 0x1150, 5319 "Thermal not supported by this card.\n"); 5320 return rval; 5321 } 5322 5323 if (IS_QLA25XX(ha)) { 5324 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 5325 ha->pdev->subsystem_device == 0x0175) { 5326 rval = qla2x00_read_sfp(vha, 0, &byte, 5327 0x98, 0x1, 1, BIT_13|BIT_0); 5328 *temp = byte; 5329 return rval; 5330 } 5331 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 5332 ha->pdev->subsystem_device == 0x338e) { 5333 rval = qla2x00_read_sfp(vha, 0, &byte, 5334 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); 5335 *temp = byte; 5336 return rval; 5337 } 5338 ql_dbg(ql_dbg_mbx, vha, 0x10c9, 5339 "Thermal not supported by this card.\n"); 5340 return rval; 5341 } 5342 5343 if (IS_QLA82XX(ha)) { 5344 *temp = qla82xx_read_temperature(vha); 5345 rval = QLA_SUCCESS; 5346 return rval; 5347 } else if (IS_QLA8044(ha)) { 5348 *temp = qla8044_read_temperature(vha); 5349 rval = QLA_SUCCESS; 5350 return rval; 5351 } 5352 5353 rval = qla2x00_read_asic_temperature(vha, temp); 5354 return rval; 5355 } 5356 5357 int 5358 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) 5359 { 5360 int rval; 5361 struct qla_hw_data *ha = vha->hw; 5362 mbx_cmd_t mc; 5363 mbx_cmd_t *mcp = &mc; 5364 5365 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, 5366 "Entered %s.\n", __func__); 5367 5368 if (!IS_FWI2_CAPABLE(ha)) 5369 return QLA_FUNCTION_FAILED; 5370 5371 memset(mcp, 0, sizeof(mbx_cmd_t)); 5372 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5373 mcp->mb[1] = 1; 5374 5375 mcp->out_mb = MBX_1|MBX_0; 5376 mcp->in_mb = MBX_0; 5377 mcp->tov = 30; 5378 mcp->flags = 0; 5379 5380 rval = qla2x00_mailbox_command(vha, mcp); 5381 if (rval != QLA_SUCCESS) { 5382 ql_dbg(ql_dbg_mbx, vha, 0x1016, 5383 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5384 } else { 5385 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, 5386 "Done %s.\n", __func__); 5387 } 5388 5389 return rval; 5390 } 5391 5392 int 5393 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) 5394 { 5395 int rval; 5396 struct qla_hw_data *ha = vha->hw; 5397 mbx_cmd_t mc; 5398 mbx_cmd_t *mcp = &mc; 5399 5400 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, 5401 "Entered %s.\n", __func__); 5402 5403 if (!IS_P3P_TYPE(ha)) 5404 return QLA_FUNCTION_FAILED; 5405 5406 memset(mcp, 0, sizeof(mbx_cmd_t)); 5407 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5408 mcp->mb[1] = 0; 5409 5410 mcp->out_mb = MBX_1|MBX_0; 5411 mcp->in_mb = MBX_0; 5412 mcp->tov = 30; 5413 mcp->flags = 0; 5414 5415 rval = qla2x00_mailbox_command(vha, mcp); 5416 if (rval != QLA_SUCCESS) { 5417 ql_dbg(ql_dbg_mbx, vha, 0x100c, 5418 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5419 } else { 5420 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, 5421 "Done %s.\n", __func__); 5422 } 5423 5424 return rval; 5425 } 5426 5427 int 5428 qla82xx_md_get_template_size(scsi_qla_host_t *vha) 5429 { 5430 struct qla_hw_data *ha = vha->hw; 5431 mbx_cmd_t mc; 5432 mbx_cmd_t *mcp = &mc; 5433 int rval = QLA_FUNCTION_FAILED; 5434 5435 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, 5436 "Entered %s.\n", __func__); 5437 5438 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5439 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5440 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5441 mcp->mb[2] = LSW(RQST_TMPLT_SIZE); 5442 mcp->mb[3] = MSW(RQST_TMPLT_SIZE); 5443 5444 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5445 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 5446 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5447 5448 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5449 mcp->tov = MBX_TOV_SECONDS; 5450 rval = qla2x00_mailbox_command(vha, mcp); 5451 5452 /* Always copy back return mailbox values. */ 5453 if (rval != QLA_SUCCESS) { 5454 ql_dbg(ql_dbg_mbx, vha, 0x1120, 5455 "mailbox command FAILED=0x%x, subcode=%x.\n", 5456 (mcp->mb[1] << 16) | mcp->mb[0], 5457 (mcp->mb[3] << 16) | mcp->mb[2]); 5458 } else { 5459 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, 5460 "Done %s.\n", __func__); 5461 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); 5462 if (!ha->md_template_size) { 5463 ql_dbg(ql_dbg_mbx, vha, 0x1122, 5464 "Null template size obtained.\n"); 5465 rval = QLA_FUNCTION_FAILED; 5466 } 5467 } 5468 return rval; 5469 } 5470 5471 int 5472 qla82xx_md_get_template(scsi_qla_host_t *vha) 5473 { 5474 struct qla_hw_data *ha = vha->hw; 5475 mbx_cmd_t mc; 5476 mbx_cmd_t *mcp = &mc; 5477 int rval = QLA_FUNCTION_FAILED; 5478 5479 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, 5480 "Entered %s.\n", __func__); 5481 5482 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5483 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5484 if (!ha->md_tmplt_hdr) { 5485 ql_log(ql_log_warn, vha, 0x1124, 5486 "Unable to allocate memory for Minidump template.\n"); 5487 return rval; 5488 } 5489 5490 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5491 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5492 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5493 mcp->mb[2] = LSW(RQST_TMPLT); 5494 mcp->mb[3] = MSW(RQST_TMPLT); 5495 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); 5496 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); 5497 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); 5498 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); 5499 mcp->mb[8] = LSW(ha->md_template_size); 5500 mcp->mb[9] = MSW(ha->md_template_size); 5501 5502 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5503 mcp->tov = MBX_TOV_SECONDS; 5504 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5505 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5506 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5507 rval = qla2x00_mailbox_command(vha, mcp); 5508 5509 if (rval != QLA_SUCCESS) { 5510 ql_dbg(ql_dbg_mbx, vha, 0x1125, 5511 "mailbox command FAILED=0x%x, subcode=%x.\n", 5512 ((mcp->mb[1] << 16) | mcp->mb[0]), 5513 ((mcp->mb[3] << 16) | mcp->mb[2])); 5514 } else 5515 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, 5516 "Done %s.\n", __func__); 5517 return rval; 5518 } 5519 5520 int 5521 qla8044_md_get_template(scsi_qla_host_t *vha) 5522 { 5523 struct qla_hw_data *ha = vha->hw; 5524 mbx_cmd_t mc; 5525 mbx_cmd_t *mcp = &mc; 5526 int rval = QLA_FUNCTION_FAILED; 5527 int offset = 0, size = MINIDUMP_SIZE_36K; 5528 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, 5529 "Entered %s.\n", __func__); 5530 5531 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5532 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5533 if (!ha->md_tmplt_hdr) { 5534 ql_log(ql_log_warn, vha, 0xb11b, 5535 "Unable to allocate memory for Minidump template.\n"); 5536 return rval; 5537 } 5538 5539 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5540 while (offset < ha->md_template_size) { 5541 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5542 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5543 mcp->mb[2] = LSW(RQST_TMPLT); 5544 mcp->mb[3] = MSW(RQST_TMPLT); 5545 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5546 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5547 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5548 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5549 mcp->mb[8] = LSW(size); 5550 mcp->mb[9] = MSW(size); 5551 mcp->mb[10] = offset & 0x0000FFFF; 5552 mcp->mb[11] = offset & 0xFFFF0000; 5553 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5554 mcp->tov = MBX_TOV_SECONDS; 5555 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5556 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5557 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5558 rval = qla2x00_mailbox_command(vha, mcp); 5559 5560 if (rval != QLA_SUCCESS) { 5561 ql_dbg(ql_dbg_mbx, vha, 0xb11c, 5562 "mailbox command FAILED=0x%x, subcode=%x.\n", 5563 ((mcp->mb[1] << 16) | mcp->mb[0]), 5564 ((mcp->mb[3] << 16) | mcp->mb[2])); 5565 return rval; 5566 } else 5567 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, 5568 "Done %s.\n", __func__); 5569 offset = offset + size; 5570 } 5571 return rval; 5572 } 5573 5574 int 5575 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5576 { 5577 int rval; 5578 struct qla_hw_data *ha = vha->hw; 5579 mbx_cmd_t mc; 5580 mbx_cmd_t *mcp = &mc; 5581 5582 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5583 return QLA_FUNCTION_FAILED; 5584 5585 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, 5586 "Entered %s.\n", __func__); 5587 5588 memset(mcp, 0, sizeof(mbx_cmd_t)); 5589 mcp->mb[0] = MBC_SET_LED_CONFIG; 5590 mcp->mb[1] = led_cfg[0]; 5591 mcp->mb[2] = led_cfg[1]; 5592 if (IS_QLA8031(ha)) { 5593 mcp->mb[3] = led_cfg[2]; 5594 mcp->mb[4] = led_cfg[3]; 5595 mcp->mb[5] = led_cfg[4]; 5596 mcp->mb[6] = led_cfg[5]; 5597 } 5598 5599 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5600 if (IS_QLA8031(ha)) 5601 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 5602 mcp->in_mb = MBX_0; 5603 mcp->tov = 30; 5604 mcp->flags = 0; 5605 5606 rval = qla2x00_mailbox_command(vha, mcp); 5607 if (rval != QLA_SUCCESS) { 5608 ql_dbg(ql_dbg_mbx, vha, 0x1134, 5609 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5610 } else { 5611 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, 5612 "Done %s.\n", __func__); 5613 } 5614 5615 return rval; 5616 } 5617 5618 int 5619 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5620 { 5621 int rval; 5622 struct qla_hw_data *ha = vha->hw; 5623 mbx_cmd_t mc; 5624 mbx_cmd_t *mcp = &mc; 5625 5626 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5627 return QLA_FUNCTION_FAILED; 5628 5629 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, 5630 "Entered %s.\n", __func__); 5631 5632 memset(mcp, 0, sizeof(mbx_cmd_t)); 5633 mcp->mb[0] = MBC_GET_LED_CONFIG; 5634 5635 mcp->out_mb = MBX_0; 5636 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5637 if (IS_QLA8031(ha)) 5638 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 5639 mcp->tov = 30; 5640 mcp->flags = 0; 5641 5642 rval = qla2x00_mailbox_command(vha, mcp); 5643 if (rval != QLA_SUCCESS) { 5644 ql_dbg(ql_dbg_mbx, vha, 0x1137, 5645 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5646 } else { 5647 led_cfg[0] = mcp->mb[1]; 5648 led_cfg[1] = mcp->mb[2]; 5649 if (IS_QLA8031(ha)) { 5650 led_cfg[2] = mcp->mb[3]; 5651 led_cfg[3] = mcp->mb[4]; 5652 led_cfg[4] = mcp->mb[5]; 5653 led_cfg[5] = mcp->mb[6]; 5654 } 5655 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, 5656 "Done %s.\n", __func__); 5657 } 5658 5659 return rval; 5660 } 5661 5662 int 5663 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) 5664 { 5665 int rval; 5666 struct qla_hw_data *ha = vha->hw; 5667 mbx_cmd_t mc; 5668 mbx_cmd_t *mcp = &mc; 5669 5670 if (!IS_P3P_TYPE(ha)) 5671 return QLA_FUNCTION_FAILED; 5672 5673 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, 5674 "Entered %s.\n", __func__); 5675 5676 memset(mcp, 0, sizeof(mbx_cmd_t)); 5677 mcp->mb[0] = MBC_SET_LED_CONFIG; 5678 if (enable) 5679 mcp->mb[7] = 0xE; 5680 else 5681 mcp->mb[7] = 0xD; 5682 5683 mcp->out_mb = MBX_7|MBX_0; 5684 mcp->in_mb = MBX_0; 5685 mcp->tov = MBX_TOV_SECONDS; 5686 mcp->flags = 0; 5687 5688 rval = qla2x00_mailbox_command(vha, mcp); 5689 if (rval != QLA_SUCCESS) { 5690 ql_dbg(ql_dbg_mbx, vha, 0x1128, 5691 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5692 } else { 5693 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, 5694 "Done %s.\n", __func__); 5695 } 5696 5697 return rval; 5698 } 5699 5700 int 5701 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) 5702 { 5703 int rval; 5704 struct qla_hw_data *ha = vha->hw; 5705 mbx_cmd_t mc; 5706 mbx_cmd_t *mcp = &mc; 5707 5708 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) 5709 return QLA_FUNCTION_FAILED; 5710 5711 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, 5712 "Entered %s.\n", __func__); 5713 5714 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 5715 mcp->mb[1] = LSW(reg); 5716 mcp->mb[2] = MSW(reg); 5717 mcp->mb[3] = LSW(data); 5718 mcp->mb[4] = MSW(data); 5719 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5720 5721 mcp->in_mb = MBX_1|MBX_0; 5722 mcp->tov = MBX_TOV_SECONDS; 5723 mcp->flags = 0; 5724 rval = qla2x00_mailbox_command(vha, mcp); 5725 5726 if (rval != QLA_SUCCESS) { 5727 ql_dbg(ql_dbg_mbx, vha, 0x1131, 5728 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5729 } else { 5730 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, 5731 "Done %s.\n", __func__); 5732 } 5733 5734 return rval; 5735 } 5736 5737 int 5738 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) 5739 { 5740 int rval; 5741 struct qla_hw_data *ha = vha->hw; 5742 mbx_cmd_t mc; 5743 mbx_cmd_t *mcp = &mc; 5744 5745 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 5746 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, 5747 "Implicit LOGO Unsupported.\n"); 5748 return QLA_FUNCTION_FAILED; 5749 } 5750 5751 5752 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, 5753 "Entering %s.\n", __func__); 5754 5755 /* Perform Implicit LOGO. */ 5756 mcp->mb[0] = MBC_PORT_LOGOUT; 5757 mcp->mb[1] = fcport->loop_id; 5758 mcp->mb[10] = BIT_15; 5759 mcp->out_mb = MBX_10|MBX_1|MBX_0; 5760 mcp->in_mb = MBX_0; 5761 mcp->tov = MBX_TOV_SECONDS; 5762 mcp->flags = 0; 5763 rval = qla2x00_mailbox_command(vha, mcp); 5764 if (rval != QLA_SUCCESS) 5765 ql_dbg(ql_dbg_mbx, vha, 0x113d, 5766 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5767 else 5768 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, 5769 "Done %s.\n", __func__); 5770 5771 return rval; 5772 } 5773 5774 int 5775 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) 5776 { 5777 int rval; 5778 mbx_cmd_t mc; 5779 mbx_cmd_t *mcp = &mc; 5780 struct qla_hw_data *ha = vha->hw; 5781 unsigned long retry_max_time = jiffies + (2 * HZ); 5782 5783 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) 5784 return QLA_FUNCTION_FAILED; 5785 5786 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); 5787 5788 retry_rd_reg: 5789 mcp->mb[0] = MBC_READ_REMOTE_REG; 5790 mcp->mb[1] = LSW(reg); 5791 mcp->mb[2] = MSW(reg); 5792 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5793 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 5794 mcp->tov = MBX_TOV_SECONDS; 5795 mcp->flags = 0; 5796 rval = qla2x00_mailbox_command(vha, mcp); 5797 5798 if (rval != QLA_SUCCESS) { 5799 ql_dbg(ql_dbg_mbx, vha, 0x114c, 5800 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5801 rval, mcp->mb[0], mcp->mb[1]); 5802 } else { 5803 *data = (mcp->mb[3] | (mcp->mb[4] << 16)); 5804 if (*data == QLA8XXX_BAD_VALUE) { 5805 /* 5806 * During soft-reset CAMRAM register reads might 5807 * return 0xbad0bad0. So retry for MAX of 2 sec 5808 * while reading camram registers. 5809 */ 5810 if (time_after(jiffies, retry_max_time)) { 5811 ql_dbg(ql_dbg_mbx, vha, 0x1141, 5812 "Failure to read CAMRAM register. " 5813 "data=0x%x.\n", *data); 5814 return QLA_FUNCTION_FAILED; 5815 } 5816 msleep(100); 5817 goto retry_rd_reg; 5818 } 5819 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); 5820 } 5821 5822 return rval; 5823 } 5824 5825 int 5826 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) 5827 { 5828 int rval; 5829 mbx_cmd_t mc; 5830 mbx_cmd_t *mcp = &mc; 5831 struct qla_hw_data *ha = vha->hw; 5832 5833 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) 5834 return QLA_FUNCTION_FAILED; 5835 5836 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 5837 5838 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; 5839 mcp->out_mb = MBX_0; 5840 mcp->in_mb = MBX_1|MBX_0; 5841 mcp->tov = MBX_TOV_SECONDS; 5842 mcp->flags = 0; 5843 rval = qla2x00_mailbox_command(vha, mcp); 5844 5845 if (rval != QLA_SUCCESS) { 5846 ql_dbg(ql_dbg_mbx, vha, 0x1144, 5847 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5848 rval, mcp->mb[0], mcp->mb[1]); 5849 ha->isp_ops->fw_dump(vha, 0); 5850 } else { 5851 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); 5852 } 5853 5854 return rval; 5855 } 5856 5857 int 5858 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, 5859 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) 5860 { 5861 int rval; 5862 mbx_cmd_t mc; 5863 mbx_cmd_t *mcp = &mc; 5864 uint8_t subcode = (uint8_t)options; 5865 struct qla_hw_data *ha = vha->hw; 5866 5867 if (!IS_QLA8031(ha)) 5868 return QLA_FUNCTION_FAILED; 5869 5870 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); 5871 5872 mcp->mb[0] = MBC_SET_ACCESS_CONTROL; 5873 mcp->mb[1] = options; 5874 mcp->out_mb = MBX_1|MBX_0; 5875 if (subcode & BIT_2) { 5876 mcp->mb[2] = LSW(start_addr); 5877 mcp->mb[3] = MSW(start_addr); 5878 mcp->mb[4] = LSW(end_addr); 5879 mcp->mb[5] = MSW(end_addr); 5880 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; 5881 } 5882 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5883 if (!(subcode & (BIT_2 | BIT_5))) 5884 mcp->in_mb |= MBX_4|MBX_3; 5885 mcp->tov = MBX_TOV_SECONDS; 5886 mcp->flags = 0; 5887 rval = qla2x00_mailbox_command(vha, mcp); 5888 5889 if (rval != QLA_SUCCESS) { 5890 ql_dbg(ql_dbg_mbx, vha, 0x1147, 5891 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", 5892 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], 5893 mcp->mb[4]); 5894 ha->isp_ops->fw_dump(vha, 0); 5895 } else { 5896 if (subcode & BIT_5) 5897 *sector_size = mcp->mb[1]; 5898 else if (subcode & (BIT_6 | BIT_7)) { 5899 ql_dbg(ql_dbg_mbx, vha, 0x1148, 5900 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 5901 } else if (subcode & (BIT_3 | BIT_4)) { 5902 ql_dbg(ql_dbg_mbx, vha, 0x1149, 5903 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 5904 } 5905 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); 5906 } 5907 5908 return rval; 5909 } 5910 5911 int 5912 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 5913 uint32_t size) 5914 { 5915 int rval; 5916 mbx_cmd_t mc; 5917 mbx_cmd_t *mcp = &mc; 5918 5919 if (!IS_MCTP_CAPABLE(vha->hw)) 5920 return QLA_FUNCTION_FAILED; 5921 5922 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, 5923 "Entered %s.\n", __func__); 5924 5925 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 5926 mcp->mb[1] = LSW(addr); 5927 mcp->mb[2] = MSW(req_dma); 5928 mcp->mb[3] = LSW(req_dma); 5929 mcp->mb[4] = MSW(size); 5930 mcp->mb[5] = LSW(size); 5931 mcp->mb[6] = MSW(MSD(req_dma)); 5932 mcp->mb[7] = LSW(MSD(req_dma)); 5933 mcp->mb[8] = MSW(addr); 5934 /* Setting RAM ID to valid */ 5935 mcp->mb[10] |= BIT_7; 5936 /* For MCTP RAM ID is 0x40 */ 5937 mcp->mb[10] |= 0x40; 5938 5939 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| 5940 MBX_0; 5941 5942 mcp->in_mb = MBX_0; 5943 mcp->tov = MBX_TOV_SECONDS; 5944 mcp->flags = 0; 5945 rval = qla2x00_mailbox_command(vha, mcp); 5946 5947 if (rval != QLA_SUCCESS) { 5948 ql_dbg(ql_dbg_mbx, vha, 0x114e, 5949 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5950 } else { 5951 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, 5952 "Done %s.\n", __func__); 5953 } 5954 5955 return rval; 5956 } 5957 5958 int 5959 qla26xx_dport_diagnostics(scsi_qla_host_t *vha, 5960 void *dd_buf, uint size, uint options) 5961 { 5962 int rval; 5963 mbx_cmd_t mc; 5964 mbx_cmd_t *mcp = &mc; 5965 dma_addr_t dd_dma; 5966 5967 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw)) 5968 return QLA_FUNCTION_FAILED; 5969 5970 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, 5971 "Entered %s.\n", __func__); 5972 5973 dd_dma = dma_map_single(&vha->hw->pdev->dev, 5974 dd_buf, size, DMA_FROM_DEVICE); 5975 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { 5976 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); 5977 return QLA_MEMORY_ALLOC_FAILED; 5978 } 5979 5980 memset(dd_buf, 0, size); 5981 5982 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; 5983 mcp->mb[1] = options; 5984 mcp->mb[2] = MSW(LSD(dd_dma)); 5985 mcp->mb[3] = LSW(LSD(dd_dma)); 5986 mcp->mb[6] = MSW(MSD(dd_dma)); 5987 mcp->mb[7] = LSW(MSD(dd_dma)); 5988 mcp->mb[8] = size; 5989 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5990 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5991 mcp->buf_size = size; 5992 mcp->flags = MBX_DMA_IN; 5993 mcp->tov = MBX_TOV_SECONDS * 4; 5994 rval = qla2x00_mailbox_command(vha, mcp); 5995 5996 if (rval != QLA_SUCCESS) { 5997 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); 5998 } else { 5999 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, 6000 "Done %s.\n", __func__); 6001 } 6002 6003 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, 6004 size, DMA_FROM_DEVICE); 6005 6006 return rval; 6007 } 6008 6009 static void qla2x00_async_mb_sp_done(void *s, int res) 6010 { 6011 struct srb *sp = s; 6012 6013 sp->u.iocb_cmd.u.mbx.rc = res; 6014 6015 complete(&sp->u.iocb_cmd.u.mbx.comp); 6016 /* don't free sp here. Let the caller do the free */ 6017 } 6018 6019 /* 6020 * This mailbox uses the iocb interface to send MB command. 6021 * This allows non-critial (non chip setup) command to go 6022 * out in parrallel. 6023 */ 6024 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) 6025 { 6026 int rval = QLA_FUNCTION_FAILED; 6027 srb_t *sp; 6028 struct srb_iocb *c; 6029 6030 if (!vha->hw->flags.fw_started) 6031 goto done; 6032 6033 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 6034 if (!sp) 6035 goto done; 6036 6037 sp->type = SRB_MB_IOCB; 6038 sp->name = mb_to_str(mcp->mb[0]); 6039 6040 c = &sp->u.iocb_cmd; 6041 c->timeout = qla2x00_async_iocb_timeout; 6042 init_completion(&c->u.mbx.comp); 6043 6044 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 6045 6046 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); 6047 6048 sp->done = qla2x00_async_mb_sp_done; 6049 6050 rval = qla2x00_start_sp(sp); 6051 if (rval != QLA_SUCCESS) { 6052 ql_dbg(ql_dbg_mbx, vha, 0x1018, 6053 "%s: %s Failed submission. %x.\n", 6054 __func__, sp->name, rval); 6055 goto done_free_sp; 6056 } 6057 6058 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n", 6059 sp->name, sp->handle); 6060 6061 wait_for_completion(&c->u.mbx.comp); 6062 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); 6063 6064 rval = c->u.mbx.rc; 6065 switch (rval) { 6066 case QLA_FUNCTION_TIMEOUT: 6067 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n", 6068 __func__, sp->name, rval); 6069 break; 6070 case QLA_SUCCESS: 6071 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", 6072 __func__, sp->name); 6073 sp->free(sp); 6074 break; 6075 default: 6076 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", 6077 __func__, sp->name, rval); 6078 sp->free(sp); 6079 break; 6080 } 6081 6082 return rval; 6083 6084 done_free_sp: 6085 sp->free(sp); 6086 done: 6087 return rval; 6088 } 6089 6090 /* 6091 * qla24xx_gpdb_wait 6092 * NOTE: Do not call this routine from DPC thread 6093 */ 6094 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 6095 { 6096 int rval = QLA_FUNCTION_FAILED; 6097 dma_addr_t pd_dma; 6098 struct port_database_24xx *pd; 6099 struct qla_hw_data *ha = vha->hw; 6100 mbx_cmd_t mc; 6101 6102 if (!vha->hw->flags.fw_started) 6103 goto done; 6104 6105 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 6106 if (pd == NULL) { 6107 ql_log(ql_log_warn, vha, 0xd047, 6108 "Failed to allocate port database structure.\n"); 6109 goto done_free_sp; 6110 } 6111 6112 memset(&mc, 0, sizeof(mc)); 6113 mc.mb[0] = MBC_GET_PORT_DATABASE; 6114 mc.mb[1] = cpu_to_le16(fcport->loop_id); 6115 mc.mb[2] = MSW(pd_dma); 6116 mc.mb[3] = LSW(pd_dma); 6117 mc.mb[6] = MSW(MSD(pd_dma)); 6118 mc.mb[7] = LSW(MSD(pd_dma)); 6119 mc.mb[9] = cpu_to_le16(vha->vp_idx); 6120 mc.mb[10] = cpu_to_le16((uint16_t)opt); 6121 6122 rval = qla24xx_send_mb_cmd(vha, &mc); 6123 if (rval != QLA_SUCCESS) { 6124 ql_dbg(ql_dbg_mbx, vha, 0x1193, 6125 "%s: %8phC fail\n", __func__, fcport->port_name); 6126 goto done_free_sp; 6127 } 6128 6129 rval = __qla24xx_parse_gpdb(vha, fcport, pd); 6130 6131 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", 6132 __func__, fcport->port_name); 6133 6134 done_free_sp: 6135 if (pd) 6136 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 6137 done: 6138 return rval; 6139 } 6140 6141 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, 6142 struct port_database_24xx *pd) 6143 { 6144 int rval = QLA_SUCCESS; 6145 uint64_t zero = 0; 6146 u8 current_login_state, last_login_state; 6147 6148 if (fcport->fc4f_nvme) { 6149 current_login_state = pd->current_login_state >> 4; 6150 last_login_state = pd->last_login_state >> 4; 6151 } else { 6152 current_login_state = pd->current_login_state & 0xf; 6153 last_login_state = pd->last_login_state & 0xf; 6154 } 6155 6156 /* Check for logged in state. */ 6157 if (current_login_state != PDS_PRLI_COMPLETE) { 6158 ql_dbg(ql_dbg_mbx, vha, 0x119a, 6159 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 6160 current_login_state, last_login_state, fcport->loop_id); 6161 rval = QLA_FUNCTION_FAILED; 6162 goto gpd_error_out; 6163 } 6164 6165 if (fcport->loop_id == FC_NO_LOOP_ID || 6166 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 6167 memcmp(fcport->port_name, pd->port_name, 8))) { 6168 /* We lost the device mid way. */ 6169 rval = QLA_NOT_LOGGED_IN; 6170 goto gpd_error_out; 6171 } 6172 6173 /* Names are little-endian. */ 6174 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 6175 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 6176 6177 /* Get port_id of device. */ 6178 fcport->d_id.b.domain = pd->port_id[0]; 6179 fcport->d_id.b.area = pd->port_id[1]; 6180 fcport->d_id.b.al_pa = pd->port_id[2]; 6181 fcport->d_id.b.rsvd_1 = 0; 6182 6183 if (fcport->fc4f_nvme) { 6184 fcport->nvme_prli_service_param = 6185 pd->prli_nvme_svc_param_word_3; 6186 fcport->port_type = FCT_NVME; 6187 } else { 6188 /* If not target must be initiator or unknown type. */ 6189 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6190 fcport->port_type = FCT_INITIATOR; 6191 else 6192 fcport->port_type = FCT_TARGET; 6193 } 6194 /* Passback COS information. */ 6195 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? 6196 FC_COS_CLASS2 : FC_COS_CLASS3; 6197 6198 if (pd->prli_svc_param_word_3[0] & BIT_7) { 6199 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 6200 fcport->conf_compl_supported = 1; 6201 } 6202 6203 gpd_error_out: 6204 return rval; 6205 } 6206 6207 /* 6208 * qla24xx_gidlist__wait 6209 * NOTE: don't call this routine from DPC thread. 6210 */ 6211 int qla24xx_gidlist_wait(struct scsi_qla_host *vha, 6212 void *id_list, dma_addr_t id_list_dma, uint16_t *entries) 6213 { 6214 int rval = QLA_FUNCTION_FAILED; 6215 mbx_cmd_t mc; 6216 6217 if (!vha->hw->flags.fw_started) 6218 goto done; 6219 6220 memset(&mc, 0, sizeof(mc)); 6221 mc.mb[0] = MBC_GET_ID_LIST; 6222 mc.mb[2] = MSW(id_list_dma); 6223 mc.mb[3] = LSW(id_list_dma); 6224 mc.mb[6] = MSW(MSD(id_list_dma)); 6225 mc.mb[7] = LSW(MSD(id_list_dma)); 6226 mc.mb[8] = 0; 6227 mc.mb[9] = cpu_to_le16(vha->vp_idx); 6228 6229 rval = qla24xx_send_mb_cmd(vha, &mc); 6230 if (rval != QLA_SUCCESS) { 6231 ql_dbg(ql_dbg_mbx, vha, 0x119b, 6232 "%s: fail\n", __func__); 6233 } else { 6234 *entries = mc.mb[1]; 6235 ql_dbg(ql_dbg_mbx, vha, 0x119c, 6236 "%s: done\n", __func__); 6237 } 6238 done: 6239 return rval; 6240 } 6241 6242 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) 6243 { 6244 int rval; 6245 mbx_cmd_t mc; 6246 mbx_cmd_t *mcp = &mc; 6247 6248 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200, 6249 "Entered %s\n", __func__); 6250 6251 memset(mcp->mb, 0 , sizeof(mcp->mb)); 6252 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6253 mcp->mb[1] = cpu_to_le16(1); 6254 mcp->mb[2] = cpu_to_le16(value); 6255 mcp->out_mb = MBX_2 | MBX_1 | MBX_0; 6256 mcp->in_mb = MBX_2 | MBX_0; 6257 mcp->tov = MBX_TOV_SECONDS; 6258 mcp->flags = 0; 6259 6260 rval = qla2x00_mailbox_command(vha, mcp); 6261 6262 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n", 6263 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6264 6265 return rval; 6266 } 6267 6268 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) 6269 { 6270 int rval; 6271 mbx_cmd_t mc; 6272 mbx_cmd_t *mcp = &mc; 6273 6274 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203, 6275 "Entered %s\n", __func__); 6276 6277 memset(mcp->mb, 0, sizeof(mcp->mb)); 6278 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6279 mcp->mb[1] = cpu_to_le16(0); 6280 mcp->out_mb = MBX_1 | MBX_0; 6281 mcp->in_mb = MBX_2 | MBX_0; 6282 mcp->tov = MBX_TOV_SECONDS; 6283 mcp->flags = 0; 6284 6285 rval = qla2x00_mailbox_command(vha, mcp); 6286 if (rval == QLA_SUCCESS) 6287 *value = mc.mb[2]; 6288 6289 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n", 6290 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6291 6292 return rval; 6293 } 6294 6295 int 6296 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count) 6297 { 6298 struct qla_hw_data *ha = vha->hw; 6299 uint16_t iter, addr, offset; 6300 dma_addr_t phys_addr; 6301 int rval, c; 6302 u8 *sfp_data; 6303 6304 memset(ha->sfp_data, 0, SFP_DEV_SIZE); 6305 addr = 0xa0; 6306 phys_addr = ha->sfp_data_dma; 6307 sfp_data = ha->sfp_data; 6308 offset = c = 0; 6309 6310 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) { 6311 if (iter == 4) { 6312 /* Skip to next device address. */ 6313 addr = 0xa2; 6314 offset = 0; 6315 } 6316 6317 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data, 6318 addr, offset, SFP_BLOCK_SIZE, BIT_1); 6319 if (rval != QLA_SUCCESS) { 6320 ql_log(ql_log_warn, vha, 0x706d, 6321 "Unable to read SFP data (%x/%x/%x).\n", rval, 6322 addr, offset); 6323 6324 return rval; 6325 } 6326 6327 if (buf && (c < count)) { 6328 u16 sz; 6329 6330 if ((count - c) >= SFP_BLOCK_SIZE) 6331 sz = SFP_BLOCK_SIZE; 6332 else 6333 sz = count - c; 6334 6335 memcpy(buf, sfp_data, sz); 6336 buf += SFP_BLOCK_SIZE; 6337 c += sz; 6338 } 6339 phys_addr += SFP_BLOCK_SIZE; 6340 sfp_data += SFP_BLOCK_SIZE; 6341 offset += SFP_BLOCK_SIZE; 6342 } 6343 6344 return rval; 6345 } 6346 6347 int qla24xx_res_count_wait(struct scsi_qla_host *vha, 6348 uint16_t *out_mb, int out_mb_sz) 6349 { 6350 int rval = QLA_FUNCTION_FAILED; 6351 mbx_cmd_t mc; 6352 6353 if (!vha->hw->flags.fw_started) 6354 goto done; 6355 6356 memset(&mc, 0, sizeof(mc)); 6357 mc.mb[0] = MBC_GET_RESOURCE_COUNTS; 6358 6359 rval = qla24xx_send_mb_cmd(vha, &mc); 6360 if (rval != QLA_SUCCESS) { 6361 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6362 "%s: fail\n", __func__); 6363 } else { 6364 if (out_mb_sz <= SIZEOF_IOCB_MB_REG) 6365 memcpy(out_mb, mc.mb, out_mb_sz); 6366 else 6367 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG); 6368 6369 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6370 "%s: done\n", __func__); 6371 } 6372 done: 6373 return rval; 6374 } 6375