1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/gfp.h> 12 13 static struct mb_cmd_name { 14 uint16_t cmd; 15 const char *str; 16 } mb_str[] = { 17 {MBC_GET_PORT_DATABASE, "GPDB"}, 18 {MBC_GET_ID_LIST, "GIDList"}, 19 {MBC_GET_LINK_PRIV_STATS, "Stats"}, 20 {MBC_GET_RESOURCE_COUNTS, "ResCnt"}, 21 }; 22 23 static const char *mb_to_str(uint16_t cmd) 24 { 25 int i; 26 struct mb_cmd_name *e; 27 28 for (i = 0; i < ARRAY_SIZE(mb_str); i++) { 29 e = mb_str + i; 30 if (cmd == e->cmd) 31 return e->str; 32 } 33 return "unknown"; 34 } 35 36 static struct rom_cmd { 37 uint16_t cmd; 38 } rom_cmds[] = { 39 { MBC_LOAD_RAM }, 40 { MBC_EXECUTE_FIRMWARE }, 41 { MBC_READ_RAM_WORD }, 42 { MBC_MAILBOX_REGISTER_TEST }, 43 { MBC_VERIFY_CHECKSUM }, 44 { MBC_GET_FIRMWARE_VERSION }, 45 { MBC_LOAD_RISC_RAM }, 46 { MBC_DUMP_RISC_RAM }, 47 { MBC_LOAD_RISC_RAM_EXTENDED }, 48 { MBC_DUMP_RISC_RAM_EXTENDED }, 49 { MBC_WRITE_RAM_WORD_EXTENDED }, 50 { MBC_READ_RAM_EXTENDED }, 51 { MBC_GET_RESOURCE_COUNTS }, 52 { MBC_SET_FIRMWARE_OPTION }, 53 { MBC_MID_INITIALIZE_FIRMWARE }, 54 { MBC_GET_FIRMWARE_STATE }, 55 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, 56 { MBC_GET_RETRY_COUNT }, 57 { MBC_TRACE_CONTROL }, 58 { MBC_INITIALIZE_MULTIQ }, 59 { MBC_IOCB_COMMAND_A64 }, 60 { MBC_GET_ADAPTER_LOOP_ID }, 61 { MBC_READ_SFP }, 62 { MBC_SET_RNID_PARAMS }, 63 { MBC_GET_RNID_PARAMS }, 64 { MBC_GET_SET_ZIO_THRESHOLD }, 65 }; 66 67 static int is_rom_cmd(uint16_t cmd) 68 { 69 int i; 70 struct rom_cmd *wc; 71 72 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) { 73 wc = rom_cmds + i; 74 if (wc->cmd == cmd) 75 return 1; 76 } 77 78 return 0; 79 } 80 81 /* 82 * qla2x00_mailbox_command 83 * Issue mailbox command and waits for completion. 84 * 85 * Input: 86 * ha = adapter block pointer. 87 * mcp = driver internal mbx struct pointer. 88 * 89 * Output: 90 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 91 * 92 * Returns: 93 * 0 : QLA_SUCCESS = cmd performed success 94 * 1 : QLA_FUNCTION_FAILED (error encountered) 95 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 96 * 97 * Context: 98 * Kernel context. 99 */ 100 static int 101 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) 102 { 103 int rval, i; 104 unsigned long flags = 0; 105 device_reg_t *reg; 106 uint8_t abort_active; 107 uint8_t io_lock_on; 108 uint16_t command = 0; 109 uint16_t *iptr; 110 __le16 __iomem *optr; 111 uint32_t cnt; 112 uint32_t mboxes; 113 unsigned long wait_time; 114 struct qla_hw_data *ha = vha->hw; 115 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 116 u32 chip_reset; 117 118 119 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); 120 121 if (ha->pdev->error_state == pci_channel_io_perm_failure) { 122 ql_log(ql_log_warn, vha, 0x1001, 123 "PCI channel failed permanently, exiting.\n"); 124 return QLA_FUNCTION_TIMEOUT; 125 } 126 127 if (vha->device_flags & DFLG_DEV_FAILED) { 128 ql_log(ql_log_warn, vha, 0x1002, 129 "Device in failed state, exiting.\n"); 130 return QLA_FUNCTION_TIMEOUT; 131 } 132 133 /* if PCI error, then avoid mbx processing.*/ 134 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && 135 test_bit(UNLOADING, &base_vha->dpc_flags)) { 136 ql_log(ql_log_warn, vha, 0xd04e, 137 "PCI error, exiting.\n"); 138 return QLA_FUNCTION_TIMEOUT; 139 } 140 141 reg = ha->iobase; 142 io_lock_on = base_vha->flags.init_done; 143 144 rval = QLA_SUCCESS; 145 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 146 chip_reset = ha->chip_reset; 147 148 if (ha->flags.pci_channel_io_perm_failure) { 149 ql_log(ql_log_warn, vha, 0x1003, 150 "Perm failure on EEH timeout MBX, exiting.\n"); 151 return QLA_FUNCTION_TIMEOUT; 152 } 153 154 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 155 /* Setting Link-Down error */ 156 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 157 ql_log(ql_log_warn, vha, 0x1004, 158 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 159 return QLA_FUNCTION_TIMEOUT; 160 } 161 162 /* check if ISP abort is active and return cmd with timeout */ 163 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 164 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 165 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && 166 !is_rom_cmd(mcp->mb[0])) { 167 ql_log(ql_log_info, vha, 0x1005, 168 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", 169 mcp->mb[0]); 170 return QLA_FUNCTION_TIMEOUT; 171 } 172 173 atomic_inc(&ha->num_pend_mbx_stage1); 174 /* 175 * Wait for active mailbox commands to finish by waiting at most tov 176 * seconds. This is to serialize actual issuing of mailbox cmds during 177 * non ISP abort time. 178 */ 179 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 180 /* Timeout occurred. Return error. */ 181 ql_log(ql_log_warn, vha, 0xd035, 182 "Cmd access timeout, cmd=0x%x, Exiting.\n", 183 mcp->mb[0]); 184 atomic_dec(&ha->num_pend_mbx_stage1); 185 return QLA_FUNCTION_TIMEOUT; 186 } 187 atomic_dec(&ha->num_pend_mbx_stage1); 188 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { 189 rval = QLA_ABORTED; 190 goto premature_exit; 191 } 192 193 194 /* Save mailbox command for debug */ 195 ha->mcp = mcp; 196 197 ql_dbg(ql_dbg_mbx, vha, 0x1006, 198 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 199 200 spin_lock_irqsave(&ha->hardware_lock, flags); 201 202 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || 203 ha->flags.mbox_busy) { 204 rval = QLA_ABORTED; 205 spin_unlock_irqrestore(&ha->hardware_lock, flags); 206 goto premature_exit; 207 } 208 ha->flags.mbox_busy = 1; 209 210 /* Load mailbox registers. */ 211 if (IS_P3P_TYPE(ha)) 212 optr = ®->isp82.mailbox_in[0]; 213 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) 214 optr = ®->isp24.mailbox0; 215 else 216 optr = MAILBOX_REG(ha, ®->isp, 0); 217 218 iptr = mcp->mb; 219 command = mcp->mb[0]; 220 mboxes = mcp->out_mb; 221 222 ql_dbg(ql_dbg_mbx, vha, 0x1111, 223 "Mailbox registers (OUT):\n"); 224 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 225 if (IS_QLA2200(ha) && cnt == 8) 226 optr = MAILBOX_REG(ha, ®->isp, 8); 227 if (mboxes & BIT_0) { 228 ql_dbg(ql_dbg_mbx, vha, 0x1112, 229 "mbox[%d]<-0x%04x\n", cnt, *iptr); 230 wrt_reg_word(optr, *iptr); 231 } 232 233 mboxes >>= 1; 234 optr++; 235 iptr++; 236 } 237 238 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, 239 "I/O Address = %p.\n", optr); 240 241 /* Issue set host interrupt command to send cmd out. */ 242 ha->flags.mbox_int = 0; 243 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 244 245 /* Unlock mbx registers and wait for interrupt */ 246 ql_dbg(ql_dbg_mbx, vha, 0x100f, 247 "Going to unlock irq & waiting for interrupts. " 248 "jiffies=%lx.\n", jiffies); 249 250 /* Wait for mbx cmd completion until timeout */ 251 atomic_inc(&ha->num_pend_mbx_stage2); 252 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 253 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 254 255 if (IS_P3P_TYPE(ha)) 256 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); 257 else if (IS_FWI2_CAPABLE(ha)) 258 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); 259 else 260 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); 261 spin_unlock_irqrestore(&ha->hardware_lock, flags); 262 263 wait_time = jiffies; 264 atomic_inc(&ha->num_pend_mbx_stage3); 265 if (!wait_for_completion_timeout(&ha->mbx_intr_comp, 266 mcp->tov * HZ)) { 267 if (chip_reset != ha->chip_reset) { 268 spin_lock_irqsave(&ha->hardware_lock, flags); 269 ha->flags.mbox_busy = 0; 270 spin_unlock_irqrestore(&ha->hardware_lock, 271 flags); 272 atomic_dec(&ha->num_pend_mbx_stage2); 273 atomic_dec(&ha->num_pend_mbx_stage3); 274 rval = QLA_ABORTED; 275 goto premature_exit; 276 } 277 ql_dbg(ql_dbg_mbx, vha, 0x117a, 278 "cmd=%x Timeout.\n", command); 279 spin_lock_irqsave(&ha->hardware_lock, flags); 280 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 281 spin_unlock_irqrestore(&ha->hardware_lock, flags); 282 283 } else if (ha->flags.purge_mbox || 284 chip_reset != ha->chip_reset) { 285 spin_lock_irqsave(&ha->hardware_lock, flags); 286 ha->flags.mbox_busy = 0; 287 spin_unlock_irqrestore(&ha->hardware_lock, flags); 288 atomic_dec(&ha->num_pend_mbx_stage2); 289 atomic_dec(&ha->num_pend_mbx_stage3); 290 rval = QLA_ABORTED; 291 goto premature_exit; 292 } 293 atomic_dec(&ha->num_pend_mbx_stage3); 294 295 if (time_after(jiffies, wait_time + 5 * HZ)) 296 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", 297 command, jiffies_to_msecs(jiffies - wait_time)); 298 } else { 299 ql_dbg(ql_dbg_mbx, vha, 0x1011, 300 "Cmd=%x Polling Mode.\n", command); 301 302 if (IS_P3P_TYPE(ha)) { 303 if (rd_reg_dword(®->isp82.hint) & 304 HINT_MBX_INT_PENDING) { 305 ha->flags.mbox_busy = 0; 306 spin_unlock_irqrestore(&ha->hardware_lock, 307 flags); 308 atomic_dec(&ha->num_pend_mbx_stage2); 309 ql_dbg(ql_dbg_mbx, vha, 0x1012, 310 "Pending mailbox timeout, exiting.\n"); 311 rval = QLA_FUNCTION_TIMEOUT; 312 goto premature_exit; 313 } 314 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); 315 } else if (IS_FWI2_CAPABLE(ha)) 316 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); 317 else 318 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); 319 spin_unlock_irqrestore(&ha->hardware_lock, flags); 320 321 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 322 while (!ha->flags.mbox_int) { 323 if (ha->flags.purge_mbox || 324 chip_reset != ha->chip_reset) { 325 spin_lock_irqsave(&ha->hardware_lock, flags); 326 ha->flags.mbox_busy = 0; 327 spin_unlock_irqrestore(&ha->hardware_lock, 328 flags); 329 atomic_dec(&ha->num_pend_mbx_stage2); 330 rval = QLA_ABORTED; 331 goto premature_exit; 332 } 333 334 if (time_after(jiffies, wait_time)) 335 break; 336 337 /* Check for pending interrupts. */ 338 qla2x00_poll(ha->rsp_q_map[0]); 339 340 if (!ha->flags.mbox_int && 341 !(IS_QLA2200(ha) && 342 command == MBC_LOAD_RISC_RAM_EXTENDED)) 343 msleep(10); 344 } /* while */ 345 ql_dbg(ql_dbg_mbx, vha, 0x1013, 346 "Waited %d sec.\n", 347 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 348 } 349 atomic_dec(&ha->num_pend_mbx_stage2); 350 351 /* Check whether we timed out */ 352 if (ha->flags.mbox_int) { 353 uint16_t *iptr2; 354 355 ql_dbg(ql_dbg_mbx, vha, 0x1014, 356 "Cmd=%x completed.\n", command); 357 358 /* Got interrupt. Clear the flag. */ 359 ha->flags.mbox_int = 0; 360 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 361 362 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 363 spin_lock_irqsave(&ha->hardware_lock, flags); 364 ha->flags.mbox_busy = 0; 365 spin_unlock_irqrestore(&ha->hardware_lock, flags); 366 367 /* Setting Link-Down error */ 368 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 369 ha->mcp = NULL; 370 rval = QLA_FUNCTION_FAILED; 371 ql_log(ql_log_warn, vha, 0xd048, 372 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 373 goto premature_exit; 374 } 375 376 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) { 377 ql_dbg(ql_dbg_mbx, vha, 0x11ff, 378 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0], 379 MBS_COMMAND_COMPLETE); 380 rval = QLA_FUNCTION_FAILED; 381 } 382 383 /* Load return mailbox registers. */ 384 iptr2 = mcp->mb; 385 iptr = (uint16_t *)&ha->mailbox_out[0]; 386 mboxes = mcp->in_mb; 387 388 ql_dbg(ql_dbg_mbx, vha, 0x1113, 389 "Mailbox registers (IN):\n"); 390 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 391 if (mboxes & BIT_0) { 392 *iptr2 = *iptr; 393 ql_dbg(ql_dbg_mbx, vha, 0x1114, 394 "mbox[%d]->0x%04x\n", cnt, *iptr2); 395 } 396 397 mboxes >>= 1; 398 iptr2++; 399 iptr++; 400 } 401 } else { 402 403 uint16_t mb[8]; 404 uint32_t ictrl, host_status, hccr; 405 uint16_t w; 406 407 if (IS_FWI2_CAPABLE(ha)) { 408 mb[0] = rd_reg_word(®->isp24.mailbox0); 409 mb[1] = rd_reg_word(®->isp24.mailbox1); 410 mb[2] = rd_reg_word(®->isp24.mailbox2); 411 mb[3] = rd_reg_word(®->isp24.mailbox3); 412 mb[7] = rd_reg_word(®->isp24.mailbox7); 413 ictrl = rd_reg_dword(®->isp24.ictrl); 414 host_status = rd_reg_dword(®->isp24.host_status); 415 hccr = rd_reg_dword(®->isp24.hccr); 416 417 ql_log(ql_log_warn, vha, 0xd04c, 418 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 419 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", 420 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], 421 mb[7], host_status, hccr); 422 423 } else { 424 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); 425 ictrl = rd_reg_word(®->isp.ictrl); 426 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, 427 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 428 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); 429 } 430 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 431 432 /* Capture FW dump only, if PCI device active */ 433 if (!pci_channel_offline(vha->hw->pdev)) { 434 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 435 if (w == 0xffff || ictrl == 0xffffffff || 436 (chip_reset != ha->chip_reset)) { 437 /* This is special case if there is unload 438 * of driver happening and if PCI device go 439 * into bad state due to PCI error condition 440 * then only PCI ERR flag would be set. 441 * we will do premature exit for above case. 442 */ 443 spin_lock_irqsave(&ha->hardware_lock, flags); 444 ha->flags.mbox_busy = 0; 445 spin_unlock_irqrestore(&ha->hardware_lock, 446 flags); 447 rval = QLA_FUNCTION_TIMEOUT; 448 goto premature_exit; 449 } 450 451 /* Attempt to capture firmware dump for further 452 * anallysis of the current formware state. we do not 453 * need to do this if we are intentionally generating 454 * a dump 455 */ 456 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) 457 qla2xxx_dump_fw(vha); 458 rval = QLA_FUNCTION_TIMEOUT; 459 } 460 } 461 spin_lock_irqsave(&ha->hardware_lock, flags); 462 ha->flags.mbox_busy = 0; 463 spin_unlock_irqrestore(&ha->hardware_lock, flags); 464 465 /* Clean up */ 466 ha->mcp = NULL; 467 468 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 469 ql_dbg(ql_dbg_mbx, vha, 0x101a, 470 "Checking for additional resp interrupt.\n"); 471 472 /* polling mode for non isp_abort commands. */ 473 qla2x00_poll(ha->rsp_q_map[0]); 474 } 475 476 if (rval == QLA_FUNCTION_TIMEOUT && 477 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 478 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 479 ha->flags.eeh_busy) { 480 /* not in dpc. schedule it for dpc to take over. */ 481 ql_dbg(ql_dbg_mbx, vha, 0x101b, 482 "Timeout, schedule isp_abort_needed.\n"); 483 484 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 485 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 486 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 487 if (IS_QLA82XX(ha)) { 488 ql_dbg(ql_dbg_mbx, vha, 0x112a, 489 "disabling pause transmit on port " 490 "0 & 1.\n"); 491 qla82xx_wr_32(ha, 492 QLA82XX_CRB_NIU + 0x98, 493 CRB_NIU_XG_PAUSE_CTL_P0| 494 CRB_NIU_XG_PAUSE_CTL_P1); 495 } 496 ql_log(ql_log_info, base_vha, 0x101c, 497 "Mailbox cmd timeout occurred, cmd=0x%x, " 498 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 499 "abort.\n", command, mcp->mb[0], 500 ha->flags.eeh_busy); 501 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 502 qla2xxx_wake_dpc(vha); 503 } 504 } else if (current == ha->dpc_thread) { 505 /* call abort directly since we are in the DPC thread */ 506 ql_dbg(ql_dbg_mbx, vha, 0x101d, 507 "Timeout, calling abort_isp.\n"); 508 509 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 510 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 511 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 512 if (IS_QLA82XX(ha)) { 513 ql_dbg(ql_dbg_mbx, vha, 0x112b, 514 "disabling pause transmit on port " 515 "0 & 1.\n"); 516 qla82xx_wr_32(ha, 517 QLA82XX_CRB_NIU + 0x98, 518 CRB_NIU_XG_PAUSE_CTL_P0| 519 CRB_NIU_XG_PAUSE_CTL_P1); 520 } 521 ql_log(ql_log_info, base_vha, 0x101e, 522 "Mailbox cmd timeout occurred, cmd=0x%x, " 523 "mb[0]=0x%x. Scheduling ISP abort ", 524 command, mcp->mb[0]); 525 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 526 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 527 /* Allow next mbx cmd to come in. */ 528 complete(&ha->mbx_cmd_comp); 529 if (ha->isp_ops->abort_isp(vha)) { 530 /* Failed. retry later. */ 531 set_bit(ISP_ABORT_NEEDED, 532 &vha->dpc_flags); 533 } 534 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 535 ql_dbg(ql_dbg_mbx, vha, 0x101f, 536 "Finished abort_isp.\n"); 537 goto mbx_done; 538 } 539 } 540 } 541 542 premature_exit: 543 /* Allow next mbx cmd to come in. */ 544 complete(&ha->mbx_cmd_comp); 545 546 mbx_done: 547 if (rval == QLA_ABORTED) { 548 ql_log(ql_log_info, vha, 0xd035, 549 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n", 550 mcp->mb[0]); 551 } else if (rval) { 552 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { 553 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR, 554 dev_name(&ha->pdev->dev), 0x1020+0x800, 555 vha->host_no, rval); 556 mboxes = mcp->in_mb; 557 cnt = 4; 558 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1) 559 if (mboxes & BIT_0) { 560 printk(" mb[%u]=%x", i, mcp->mb[i]); 561 cnt--; 562 } 563 pr_warn(" cmd=%x ****\n", command); 564 } 565 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { 566 ql_dbg(ql_dbg_mbx, vha, 0x1198, 567 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", 568 rd_reg_dword(®->isp24.host_status), 569 rd_reg_dword(®->isp24.ictrl), 570 rd_reg_dword(®->isp24.istatus)); 571 } else { 572 ql_dbg(ql_dbg_mbx, vha, 0x1206, 573 "ctrl_status=%#x ictrl=%#x istatus=%#x\n", 574 rd_reg_word(®->isp.ctrl_status), 575 rd_reg_word(®->isp.ictrl), 576 rd_reg_word(®->isp.istatus)); 577 } 578 } else { 579 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); 580 } 581 582 return rval; 583 } 584 585 int 586 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, 587 uint32_t risc_code_size) 588 { 589 int rval; 590 struct qla_hw_data *ha = vha->hw; 591 mbx_cmd_t mc; 592 mbx_cmd_t *mcp = &mc; 593 594 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, 595 "Entered %s.\n", __func__); 596 597 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 598 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 599 mcp->mb[8] = MSW(risc_addr); 600 mcp->out_mb = MBX_8|MBX_0; 601 } else { 602 mcp->mb[0] = MBC_LOAD_RISC_RAM; 603 mcp->out_mb = MBX_0; 604 } 605 mcp->mb[1] = LSW(risc_addr); 606 mcp->mb[2] = MSW(req_dma); 607 mcp->mb[3] = LSW(req_dma); 608 mcp->mb[6] = MSW(MSD(req_dma)); 609 mcp->mb[7] = LSW(MSD(req_dma)); 610 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 611 if (IS_FWI2_CAPABLE(ha)) { 612 mcp->mb[4] = MSW(risc_code_size); 613 mcp->mb[5] = LSW(risc_code_size); 614 mcp->out_mb |= MBX_5|MBX_4; 615 } else { 616 mcp->mb[4] = LSW(risc_code_size); 617 mcp->out_mb |= MBX_4; 618 } 619 620 mcp->in_mb = MBX_1|MBX_0; 621 mcp->tov = MBX_TOV_SECONDS; 622 mcp->flags = 0; 623 rval = qla2x00_mailbox_command(vha, mcp); 624 625 if (rval != QLA_SUCCESS) { 626 ql_dbg(ql_dbg_mbx, vha, 0x1023, 627 "Failed=%x mb[0]=%x mb[1]=%x.\n", 628 rval, mcp->mb[0], mcp->mb[1]); 629 } else { 630 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, 631 "Done %s.\n", __func__); 632 } 633 634 return rval; 635 } 636 637 #define NVME_ENABLE_FLAG BIT_3 638 639 /* 640 * qla2x00_execute_fw 641 * Start adapter firmware. 642 * 643 * Input: 644 * ha = adapter block pointer. 645 * TARGET_QUEUE_LOCK must be released. 646 * ADAPTER_STATE_LOCK must be released. 647 * 648 * Returns: 649 * qla2x00 local function return status code. 650 * 651 * Context: 652 * Kernel context. 653 */ 654 int 655 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) 656 { 657 int rval; 658 struct qla_hw_data *ha = vha->hw; 659 mbx_cmd_t mc; 660 mbx_cmd_t *mcp = &mc; 661 u8 semaphore = 0; 662 #define EXE_FW_FORCE_SEMAPHORE BIT_7 663 u8 retry = 3; 664 665 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, 666 "Entered %s.\n", __func__); 667 668 again: 669 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 670 mcp->out_mb = MBX_0; 671 mcp->in_mb = MBX_0; 672 if (IS_FWI2_CAPABLE(ha)) { 673 mcp->mb[1] = MSW(risc_addr); 674 mcp->mb[2] = LSW(risc_addr); 675 mcp->mb[3] = 0; 676 mcp->mb[4] = 0; 677 mcp->mb[11] = 0; 678 679 /* Enable BPM? */ 680 if (ha->flags.lr_detected) { 681 mcp->mb[4] = BIT_0; 682 if (IS_BPM_RANGE_CAPABLE(ha)) 683 mcp->mb[4] |= 684 ha->lr_distance << LR_DIST_FW_POS; 685 } 686 687 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha))) 688 mcp->mb[4] |= NVME_ENABLE_FLAG; 689 690 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 691 struct nvram_81xx *nv = ha->nvram; 692 /* set minimum speed if specified in nvram */ 693 if (nv->min_supported_speed >= 2 && 694 nv->min_supported_speed <= 5) { 695 mcp->mb[4] |= BIT_4; 696 mcp->mb[11] |= nv->min_supported_speed & 0xF; 697 mcp->out_mb |= MBX_11; 698 mcp->in_mb |= BIT_5; 699 vha->min_supported_speed = 700 nv->min_supported_speed; 701 } 702 } 703 704 if (ha->flags.exlogins_enabled) 705 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; 706 707 if (ha->flags.exchoffld_enabled) 708 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; 709 710 if (semaphore) 711 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE; 712 713 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; 714 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1; 715 } else { 716 mcp->mb[1] = LSW(risc_addr); 717 mcp->out_mb |= MBX_1; 718 if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 719 mcp->mb[2] = 0; 720 mcp->out_mb |= MBX_2; 721 } 722 } 723 724 mcp->tov = MBX_TOV_SECONDS; 725 mcp->flags = 0; 726 rval = qla2x00_mailbox_command(vha, mcp); 727 728 if (rval != QLA_SUCCESS) { 729 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR && 730 mcp->mb[1] == 0x27 && retry) { 731 semaphore = 1; 732 retry--; 733 ql_dbg(ql_dbg_async, vha, 0x1026, 734 "Exe FW: force semaphore.\n"); 735 goto again; 736 } 737 738 ql_dbg(ql_dbg_mbx, vha, 0x1026, 739 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 740 return rval; 741 } 742 743 if (!IS_FWI2_CAPABLE(ha)) 744 goto done; 745 746 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; 747 ql_dbg(ql_dbg_mbx, vha, 0x119a, 748 "fw_ability_mask=%x.\n", ha->fw_ability_mask); 749 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]); 750 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 751 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1); 752 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n", 753 ha->max_supported_speed == 0 ? "16Gps" : 754 ha->max_supported_speed == 1 ? "32Gps" : 755 ha->max_supported_speed == 2 ? "64Gps" : "unknown"); 756 if (vha->min_supported_speed) { 757 ha->min_supported_speed = mcp->mb[5] & 758 (BIT_0 | BIT_1 | BIT_2); 759 ql_dbg(ql_dbg_mbx, vha, 0x119c, 760 "min_supported_speed=%s.\n", 761 ha->min_supported_speed == 6 ? "64Gps" : 762 ha->min_supported_speed == 5 ? "32Gps" : 763 ha->min_supported_speed == 4 ? "16Gps" : 764 ha->min_supported_speed == 3 ? "8Gps" : 765 ha->min_supported_speed == 2 ? "4Gps" : "unknown"); 766 } 767 } 768 769 done: 770 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, 771 "Done %s.\n", __func__); 772 773 return rval; 774 } 775 776 /* 777 * qla_get_exlogin_status 778 * Get extended login status 779 * uses the memory offload control/status Mailbox 780 * 781 * Input: 782 * ha: adapter state pointer. 783 * fwopt: firmware options 784 * 785 * Returns: 786 * qla2x00 local function status 787 * 788 * Context: 789 * Kernel context. 790 */ 791 #define FETCH_XLOGINS_STAT 0x8 792 int 793 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 794 uint16_t *ex_logins_cnt) 795 { 796 int rval; 797 mbx_cmd_t mc; 798 mbx_cmd_t *mcp = &mc; 799 800 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f, 801 "Entered %s\n", __func__); 802 803 memset(mcp->mb, 0 , sizeof(mcp->mb)); 804 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 805 mcp->mb[1] = FETCH_XLOGINS_STAT; 806 mcp->out_mb = MBX_1|MBX_0; 807 mcp->in_mb = MBX_10|MBX_4|MBX_0; 808 mcp->tov = MBX_TOV_SECONDS; 809 mcp->flags = 0; 810 811 rval = qla2x00_mailbox_command(vha, mcp); 812 if (rval != QLA_SUCCESS) { 813 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval); 814 } else { 815 *buf_sz = mcp->mb[4]; 816 *ex_logins_cnt = mcp->mb[10]; 817 818 ql_log(ql_log_info, vha, 0x1190, 819 "buffer size 0x%x, exchange login count=%d\n", 820 mcp->mb[4], mcp->mb[10]); 821 822 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116, 823 "Done %s.\n", __func__); 824 } 825 826 return rval; 827 } 828 829 /* 830 * qla_set_exlogin_mem_cfg 831 * set extended login memory configuration 832 * Mbx needs to be issues before init_cb is set 833 * 834 * Input: 835 * ha: adapter state pointer. 836 * buffer: buffer pointer 837 * phys_addr: physical address of buffer 838 * size: size of buffer 839 * TARGET_QUEUE_LOCK must be released 840 * ADAPTER_STATE_LOCK must be release 841 * 842 * Returns: 843 * qla2x00 local funxtion status code. 844 * 845 * Context: 846 * Kernel context. 847 */ 848 #define CONFIG_XLOGINS_MEM 0x9 849 int 850 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) 851 { 852 int rval; 853 mbx_cmd_t mc; 854 mbx_cmd_t *mcp = &mc; 855 struct qla_hw_data *ha = vha->hw; 856 857 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, 858 "Entered %s.\n", __func__); 859 860 memset(mcp->mb, 0 , sizeof(mcp->mb)); 861 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 862 mcp->mb[1] = CONFIG_XLOGINS_MEM; 863 mcp->mb[2] = MSW(phys_addr); 864 mcp->mb[3] = LSW(phys_addr); 865 mcp->mb[6] = MSW(MSD(phys_addr)); 866 mcp->mb[7] = LSW(MSD(phys_addr)); 867 mcp->mb[8] = MSW(ha->exlogin_size); 868 mcp->mb[9] = LSW(ha->exlogin_size); 869 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 870 mcp->in_mb = MBX_11|MBX_0; 871 mcp->tov = MBX_TOV_SECONDS; 872 mcp->flags = 0; 873 rval = qla2x00_mailbox_command(vha, mcp); 874 if (rval != QLA_SUCCESS) { 875 ql_dbg(ql_dbg_mbx, vha, 0x111b, 876 "EXlogin Failed=%x. MB0=%x MB11=%x\n", 877 rval, mcp->mb[0], mcp->mb[11]); 878 } else { 879 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 880 "Done %s.\n", __func__); 881 } 882 883 return rval; 884 } 885 886 /* 887 * qla_get_exchoffld_status 888 * Get exchange offload status 889 * uses the memory offload control/status Mailbox 890 * 891 * Input: 892 * ha: adapter state pointer. 893 * fwopt: firmware options 894 * 895 * Returns: 896 * qla2x00 local function status 897 * 898 * Context: 899 * Kernel context. 900 */ 901 #define FETCH_XCHOFFLD_STAT 0x2 902 int 903 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 904 uint16_t *ex_logins_cnt) 905 { 906 int rval; 907 mbx_cmd_t mc; 908 mbx_cmd_t *mcp = &mc; 909 910 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019, 911 "Entered %s\n", __func__); 912 913 memset(mcp->mb, 0 , sizeof(mcp->mb)); 914 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 915 mcp->mb[1] = FETCH_XCHOFFLD_STAT; 916 mcp->out_mb = MBX_1|MBX_0; 917 mcp->in_mb = MBX_10|MBX_4|MBX_0; 918 mcp->tov = MBX_TOV_SECONDS; 919 mcp->flags = 0; 920 921 rval = qla2x00_mailbox_command(vha, mcp); 922 if (rval != QLA_SUCCESS) { 923 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval); 924 } else { 925 *buf_sz = mcp->mb[4]; 926 *ex_logins_cnt = mcp->mb[10]; 927 928 ql_log(ql_log_info, vha, 0x118e, 929 "buffer size 0x%x, exchange offload count=%d\n", 930 mcp->mb[4], mcp->mb[10]); 931 932 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156, 933 "Done %s.\n", __func__); 934 } 935 936 return rval; 937 } 938 939 /* 940 * qla_set_exchoffld_mem_cfg 941 * Set exchange offload memory configuration 942 * Mbx needs to be issues before init_cb is set 943 * 944 * Input: 945 * ha: adapter state pointer. 946 * buffer: buffer pointer 947 * phys_addr: physical address of buffer 948 * size: size of buffer 949 * TARGET_QUEUE_LOCK must be released 950 * ADAPTER_STATE_LOCK must be release 951 * 952 * Returns: 953 * qla2x00 local funxtion status code. 954 * 955 * Context: 956 * Kernel context. 957 */ 958 #define CONFIG_XCHOFFLD_MEM 0x3 959 int 960 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha) 961 { 962 int rval; 963 mbx_cmd_t mc; 964 mbx_cmd_t *mcp = &mc; 965 struct qla_hw_data *ha = vha->hw; 966 967 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157, 968 "Entered %s.\n", __func__); 969 970 memset(mcp->mb, 0 , sizeof(mcp->mb)); 971 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 972 mcp->mb[1] = CONFIG_XCHOFFLD_MEM; 973 mcp->mb[2] = MSW(ha->exchoffld_buf_dma); 974 mcp->mb[3] = LSW(ha->exchoffld_buf_dma); 975 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma)); 976 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma)); 977 mcp->mb[8] = MSW(ha->exchoffld_size); 978 mcp->mb[9] = LSW(ha->exchoffld_size); 979 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 980 mcp->in_mb = MBX_11|MBX_0; 981 mcp->tov = MBX_TOV_SECONDS; 982 mcp->flags = 0; 983 rval = qla2x00_mailbox_command(vha, mcp); 984 if (rval != QLA_SUCCESS) { 985 /*EMPTY*/ 986 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval); 987 } else { 988 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, 989 "Done %s.\n", __func__); 990 } 991 992 return rval; 993 } 994 995 /* 996 * qla2x00_get_fw_version 997 * Get firmware version. 998 * 999 * Input: 1000 * ha: adapter state pointer. 1001 * major: pointer for major number. 1002 * minor: pointer for minor number. 1003 * subminor: pointer for subminor number. 1004 * 1005 * Returns: 1006 * qla2x00 local function return status code. 1007 * 1008 * Context: 1009 * Kernel context. 1010 */ 1011 int 1012 qla2x00_get_fw_version(scsi_qla_host_t *vha) 1013 { 1014 int rval; 1015 mbx_cmd_t mc; 1016 mbx_cmd_t *mcp = &mc; 1017 struct qla_hw_data *ha = vha->hw; 1018 1019 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, 1020 "Entered %s.\n", __func__); 1021 1022 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 1023 mcp->out_mb = MBX_0; 1024 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1025 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) 1026 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 1027 if (IS_FWI2_CAPABLE(ha)) 1028 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 1029 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1030 mcp->in_mb |= 1031 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| 1032 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7; 1033 1034 mcp->flags = 0; 1035 mcp->tov = MBX_TOV_SECONDS; 1036 rval = qla2x00_mailbox_command(vha, mcp); 1037 if (rval != QLA_SUCCESS) 1038 goto failed; 1039 1040 /* Return mailbox data. */ 1041 ha->fw_major_version = mcp->mb[1]; 1042 ha->fw_minor_version = mcp->mb[2]; 1043 ha->fw_subminor_version = mcp->mb[3]; 1044 ha->fw_attributes = mcp->mb[6]; 1045 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) 1046 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ 1047 else 1048 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; 1049 1050 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1051 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1052 ha->mpi_version[1] = mcp->mb[11] >> 8; 1053 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1054 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; 1055 ha->phy_version[0] = mcp->mb[8] & 0xff; 1056 ha->phy_version[1] = mcp->mb[9] >> 8; 1057 ha->phy_version[2] = mcp->mb[9] & 0xff; 1058 } 1059 1060 if (IS_FWI2_CAPABLE(ha)) { 1061 ha->fw_attributes_h = mcp->mb[15]; 1062 ha->fw_attributes_ext[0] = mcp->mb[16]; 1063 ha->fw_attributes_ext[1] = mcp->mb[17]; 1064 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, 1065 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 1066 __func__, mcp->mb[15], mcp->mb[6]); 1067 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, 1068 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 1069 __func__, mcp->mb[17], mcp->mb[16]); 1070 1071 if (ha->fw_attributes_h & 0x4) 1072 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d, 1073 "%s: Firmware supports Extended Login 0x%x\n", 1074 __func__, ha->fw_attributes_h); 1075 1076 if (ha->fw_attributes_h & 0x8) 1077 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, 1078 "%s: Firmware supports Exchange Offload 0x%x\n", 1079 __func__, ha->fw_attributes_h); 1080 1081 /* 1082 * FW supports nvme and driver load parameter requested nvme. 1083 * BIT 26 of fw_attributes indicates NVMe support. 1084 */ 1085 if ((ha->fw_attributes_h & 1086 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) && 1087 ql2xnvmeenable) { 1088 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST) 1089 vha->flags.nvme_first_burst = 1; 1090 1091 vha->flags.nvme_enabled = 1; 1092 ql_log(ql_log_info, vha, 0xd302, 1093 "%s: FC-NVMe is Enabled (0x%x)\n", 1094 __func__, ha->fw_attributes_h); 1095 } 1096 1097 /* BIT_13 of Extended FW Attributes informs about NVMe2 support */ 1098 if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) { 1099 ql_log(ql_log_info, vha, 0xd302, 1100 "Firmware supports NVMe2 0x%x\n", 1101 ha->fw_attributes_ext[0]); 1102 vha->flags.nvme2_enabled = 1; 1103 } 1104 } 1105 1106 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1107 ha->serdes_version[0] = mcp->mb[7] & 0xff; 1108 ha->serdes_version[1] = mcp->mb[8] >> 8; 1109 ha->serdes_version[2] = mcp->mb[8] & 0xff; 1110 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1111 ha->mpi_version[1] = mcp->mb[11] >> 8; 1112 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1113 ha->pep_version[0] = mcp->mb[13] & 0xff; 1114 ha->pep_version[1] = mcp->mb[14] >> 8; 1115 ha->pep_version[2] = mcp->mb[14] & 0xff; 1116 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; 1117 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; 1118 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; 1119 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; 1120 if (IS_QLA28XX(ha)) { 1121 if (mcp->mb[16] & BIT_10) 1122 ha->flags.secure_fw = 1; 1123 1124 ql_log(ql_log_info, vha, 0xffff, 1125 "Secure Flash Update in FW: %s\n", 1126 (ha->flags.secure_fw) ? "Supported" : 1127 "Not Supported"); 1128 } 1129 1130 if (ha->flags.scm_supported_a && 1131 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) { 1132 ha->flags.scm_supported_f = 1; 1133 ha->sf_init_cb->flags |= BIT_13; 1134 } 1135 ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n", 1136 (ha->flags.scm_supported_f) ? "Supported" : 1137 "Not Supported"); 1138 1139 if (vha->flags.nvme2_enabled) { 1140 /* set BIT_15 of special feature control block for SLER */ 1141 ha->sf_init_cb->flags |= BIT_15; 1142 /* set BIT_14 of special feature control block for PI CTRL*/ 1143 ha->sf_init_cb->flags |= BIT_14; 1144 } 1145 } 1146 1147 failed: 1148 if (rval != QLA_SUCCESS) { 1149 /*EMPTY*/ 1150 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); 1151 } else { 1152 /*EMPTY*/ 1153 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, 1154 "Done %s.\n", __func__); 1155 } 1156 return rval; 1157 } 1158 1159 /* 1160 * qla2x00_get_fw_options 1161 * Set firmware options. 1162 * 1163 * Input: 1164 * ha = adapter block pointer. 1165 * fwopt = pointer for firmware options. 1166 * 1167 * Returns: 1168 * qla2x00 local function return status code. 1169 * 1170 * Context: 1171 * Kernel context. 1172 */ 1173 int 1174 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1175 { 1176 int rval; 1177 mbx_cmd_t mc; 1178 mbx_cmd_t *mcp = &mc; 1179 1180 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, 1181 "Entered %s.\n", __func__); 1182 1183 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 1184 mcp->out_mb = MBX_0; 1185 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1186 mcp->tov = MBX_TOV_SECONDS; 1187 mcp->flags = 0; 1188 rval = qla2x00_mailbox_command(vha, mcp); 1189 1190 if (rval != QLA_SUCCESS) { 1191 /*EMPTY*/ 1192 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); 1193 } else { 1194 fwopts[0] = mcp->mb[0]; 1195 fwopts[1] = mcp->mb[1]; 1196 fwopts[2] = mcp->mb[2]; 1197 fwopts[3] = mcp->mb[3]; 1198 1199 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, 1200 "Done %s.\n", __func__); 1201 } 1202 1203 return rval; 1204 } 1205 1206 1207 /* 1208 * qla2x00_set_fw_options 1209 * Set firmware options. 1210 * 1211 * Input: 1212 * ha = adapter block pointer. 1213 * fwopt = pointer for firmware options. 1214 * 1215 * Returns: 1216 * qla2x00 local function return status code. 1217 * 1218 * Context: 1219 * Kernel context. 1220 */ 1221 int 1222 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1223 { 1224 int rval; 1225 mbx_cmd_t mc; 1226 mbx_cmd_t *mcp = &mc; 1227 1228 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, 1229 "Entered %s.\n", __func__); 1230 1231 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 1232 mcp->mb[1] = fwopts[1]; 1233 mcp->mb[2] = fwopts[2]; 1234 mcp->mb[3] = fwopts[3]; 1235 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1236 mcp->in_mb = MBX_0; 1237 if (IS_FWI2_CAPABLE(vha->hw)) { 1238 mcp->in_mb |= MBX_1; 1239 mcp->mb[10] = fwopts[10]; 1240 mcp->out_mb |= MBX_10; 1241 } else { 1242 mcp->mb[10] = fwopts[10]; 1243 mcp->mb[11] = fwopts[11]; 1244 mcp->mb[12] = 0; /* Undocumented, but used */ 1245 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 1246 } 1247 mcp->tov = MBX_TOV_SECONDS; 1248 mcp->flags = 0; 1249 rval = qla2x00_mailbox_command(vha, mcp); 1250 1251 fwopts[0] = mcp->mb[0]; 1252 1253 if (rval != QLA_SUCCESS) { 1254 /*EMPTY*/ 1255 ql_dbg(ql_dbg_mbx, vha, 0x1030, 1256 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); 1257 } else { 1258 /*EMPTY*/ 1259 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, 1260 "Done %s.\n", __func__); 1261 } 1262 1263 return rval; 1264 } 1265 1266 /* 1267 * qla2x00_mbx_reg_test 1268 * Mailbox register wrap test. 1269 * 1270 * Input: 1271 * ha = adapter block pointer. 1272 * TARGET_QUEUE_LOCK must be released. 1273 * ADAPTER_STATE_LOCK must be released. 1274 * 1275 * Returns: 1276 * qla2x00 local function return status code. 1277 * 1278 * Context: 1279 * Kernel context. 1280 */ 1281 int 1282 qla2x00_mbx_reg_test(scsi_qla_host_t *vha) 1283 { 1284 int rval; 1285 mbx_cmd_t mc; 1286 mbx_cmd_t *mcp = &mc; 1287 1288 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, 1289 "Entered %s.\n", __func__); 1290 1291 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 1292 mcp->mb[1] = 0xAAAA; 1293 mcp->mb[2] = 0x5555; 1294 mcp->mb[3] = 0xAA55; 1295 mcp->mb[4] = 0x55AA; 1296 mcp->mb[5] = 0xA5A5; 1297 mcp->mb[6] = 0x5A5A; 1298 mcp->mb[7] = 0x2525; 1299 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1300 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1301 mcp->tov = MBX_TOV_SECONDS; 1302 mcp->flags = 0; 1303 rval = qla2x00_mailbox_command(vha, mcp); 1304 1305 if (rval == QLA_SUCCESS) { 1306 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || 1307 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) 1308 rval = QLA_FUNCTION_FAILED; 1309 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || 1310 mcp->mb[7] != 0x2525) 1311 rval = QLA_FUNCTION_FAILED; 1312 } 1313 1314 if (rval != QLA_SUCCESS) { 1315 /*EMPTY*/ 1316 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); 1317 } else { 1318 /*EMPTY*/ 1319 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, 1320 "Done %s.\n", __func__); 1321 } 1322 1323 return rval; 1324 } 1325 1326 /* 1327 * qla2x00_verify_checksum 1328 * Verify firmware checksum. 1329 * 1330 * Input: 1331 * ha = adapter block pointer. 1332 * TARGET_QUEUE_LOCK must be released. 1333 * ADAPTER_STATE_LOCK must be released. 1334 * 1335 * Returns: 1336 * qla2x00 local function return status code. 1337 * 1338 * Context: 1339 * Kernel context. 1340 */ 1341 int 1342 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) 1343 { 1344 int rval; 1345 mbx_cmd_t mc; 1346 mbx_cmd_t *mcp = &mc; 1347 1348 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, 1349 "Entered %s.\n", __func__); 1350 1351 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 1352 mcp->out_mb = MBX_0; 1353 mcp->in_mb = MBX_0; 1354 if (IS_FWI2_CAPABLE(vha->hw)) { 1355 mcp->mb[1] = MSW(risc_addr); 1356 mcp->mb[2] = LSW(risc_addr); 1357 mcp->out_mb |= MBX_2|MBX_1; 1358 mcp->in_mb |= MBX_2|MBX_1; 1359 } else { 1360 mcp->mb[1] = LSW(risc_addr); 1361 mcp->out_mb |= MBX_1; 1362 mcp->in_mb |= MBX_1; 1363 } 1364 1365 mcp->tov = MBX_TOV_SECONDS; 1366 mcp->flags = 0; 1367 rval = qla2x00_mailbox_command(vha, mcp); 1368 1369 if (rval != QLA_SUCCESS) { 1370 ql_dbg(ql_dbg_mbx, vha, 0x1036, 1371 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? 1372 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); 1373 } else { 1374 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, 1375 "Done %s.\n", __func__); 1376 } 1377 1378 return rval; 1379 } 1380 1381 /* 1382 * qla2x00_issue_iocb 1383 * Issue IOCB using mailbox command 1384 * 1385 * Input: 1386 * ha = adapter state pointer. 1387 * buffer = buffer pointer. 1388 * phys_addr = physical address of buffer. 1389 * size = size of buffer. 1390 * TARGET_QUEUE_LOCK must be released. 1391 * ADAPTER_STATE_LOCK must be released. 1392 * 1393 * Returns: 1394 * qla2x00 local function return status code. 1395 * 1396 * Context: 1397 * Kernel context. 1398 */ 1399 int 1400 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, 1401 dma_addr_t phys_addr, size_t size, uint32_t tov) 1402 { 1403 int rval; 1404 mbx_cmd_t mc; 1405 mbx_cmd_t *mcp = &mc; 1406 1407 if (!vha->hw->flags.fw_started) 1408 return QLA_INVALID_COMMAND; 1409 1410 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, 1411 "Entered %s.\n", __func__); 1412 1413 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 1414 mcp->mb[1] = 0; 1415 mcp->mb[2] = MSW(LSD(phys_addr)); 1416 mcp->mb[3] = LSW(LSD(phys_addr)); 1417 mcp->mb[6] = MSW(MSD(phys_addr)); 1418 mcp->mb[7] = LSW(MSD(phys_addr)); 1419 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1420 mcp->in_mb = MBX_1|MBX_0; 1421 mcp->tov = tov; 1422 mcp->flags = 0; 1423 rval = qla2x00_mailbox_command(vha, mcp); 1424 1425 if (rval != QLA_SUCCESS) { 1426 /*EMPTY*/ 1427 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); 1428 } else { 1429 sts_entry_t *sts_entry = buffer; 1430 1431 /* Mask reserved bits. */ 1432 sts_entry->entry_status &= 1433 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 1434 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, 1435 "Done %s (status=%x).\n", __func__, 1436 sts_entry->entry_status); 1437 } 1438 1439 return rval; 1440 } 1441 1442 int 1443 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, 1444 size_t size) 1445 { 1446 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, 1447 MBX_TOV_SECONDS); 1448 } 1449 1450 /* 1451 * qla2x00_abort_command 1452 * Abort command aborts a specified IOCB. 1453 * 1454 * Input: 1455 * ha = adapter block pointer. 1456 * sp = SB structure pointer. 1457 * 1458 * Returns: 1459 * qla2x00 local function return status code. 1460 * 1461 * Context: 1462 * Kernel context. 1463 */ 1464 int 1465 qla2x00_abort_command(srb_t *sp) 1466 { 1467 unsigned long flags = 0; 1468 int rval; 1469 uint32_t handle = 0; 1470 mbx_cmd_t mc; 1471 mbx_cmd_t *mcp = &mc; 1472 fc_port_t *fcport = sp->fcport; 1473 scsi_qla_host_t *vha = fcport->vha; 1474 struct qla_hw_data *ha = vha->hw; 1475 struct req_que *req; 1476 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1477 1478 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, 1479 "Entered %s.\n", __func__); 1480 1481 if (sp->qpair) 1482 req = sp->qpair->req; 1483 else 1484 req = vha->req; 1485 1486 spin_lock_irqsave(&ha->hardware_lock, flags); 1487 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1488 if (req->outstanding_cmds[handle] == sp) 1489 break; 1490 } 1491 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1492 1493 if (handle == req->num_outstanding_cmds) { 1494 /* command not found */ 1495 return QLA_FUNCTION_FAILED; 1496 } 1497 1498 mcp->mb[0] = MBC_ABORT_COMMAND; 1499 if (HAS_EXTENDED_IDS(ha)) 1500 mcp->mb[1] = fcport->loop_id; 1501 else 1502 mcp->mb[1] = fcport->loop_id << 8; 1503 mcp->mb[2] = (uint16_t)handle; 1504 mcp->mb[3] = (uint16_t)(handle >> 16); 1505 mcp->mb[6] = (uint16_t)cmd->device->lun; 1506 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1507 mcp->in_mb = MBX_0; 1508 mcp->tov = MBX_TOV_SECONDS; 1509 mcp->flags = 0; 1510 rval = qla2x00_mailbox_command(vha, mcp); 1511 1512 if (rval != QLA_SUCCESS) { 1513 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); 1514 } else { 1515 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, 1516 "Done %s.\n", __func__); 1517 } 1518 1519 return rval; 1520 } 1521 1522 int 1523 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) 1524 { 1525 int rval, rval2; 1526 mbx_cmd_t mc; 1527 mbx_cmd_t *mcp = &mc; 1528 scsi_qla_host_t *vha; 1529 1530 vha = fcport->vha; 1531 1532 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, 1533 "Entered %s.\n", __func__); 1534 1535 mcp->mb[0] = MBC_ABORT_TARGET; 1536 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 1537 if (HAS_EXTENDED_IDS(vha->hw)) { 1538 mcp->mb[1] = fcport->loop_id; 1539 mcp->mb[10] = 0; 1540 mcp->out_mb |= MBX_10; 1541 } else { 1542 mcp->mb[1] = fcport->loop_id << 8; 1543 } 1544 mcp->mb[2] = vha->hw->loop_reset_delay; 1545 mcp->mb[9] = vha->vp_idx; 1546 1547 mcp->in_mb = MBX_0; 1548 mcp->tov = MBX_TOV_SECONDS; 1549 mcp->flags = 0; 1550 rval = qla2x00_mailbox_command(vha, mcp); 1551 if (rval != QLA_SUCCESS) { 1552 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, 1553 "Failed=%x.\n", rval); 1554 } 1555 1556 /* Issue marker IOCB. */ 1557 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0, 1558 MK_SYNC_ID); 1559 if (rval2 != QLA_SUCCESS) { 1560 ql_dbg(ql_dbg_mbx, vha, 0x1040, 1561 "Failed to issue marker IOCB (%x).\n", rval2); 1562 } else { 1563 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, 1564 "Done %s.\n", __func__); 1565 } 1566 1567 return rval; 1568 } 1569 1570 int 1571 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 1572 { 1573 int rval, rval2; 1574 mbx_cmd_t mc; 1575 mbx_cmd_t *mcp = &mc; 1576 scsi_qla_host_t *vha; 1577 1578 vha = fcport->vha; 1579 1580 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, 1581 "Entered %s.\n", __func__); 1582 1583 mcp->mb[0] = MBC_LUN_RESET; 1584 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 1585 if (HAS_EXTENDED_IDS(vha->hw)) 1586 mcp->mb[1] = fcport->loop_id; 1587 else 1588 mcp->mb[1] = fcport->loop_id << 8; 1589 mcp->mb[2] = (u32)l; 1590 mcp->mb[3] = 0; 1591 mcp->mb[9] = vha->vp_idx; 1592 1593 mcp->in_mb = MBX_0; 1594 mcp->tov = MBX_TOV_SECONDS; 1595 mcp->flags = 0; 1596 rval = qla2x00_mailbox_command(vha, mcp); 1597 if (rval != QLA_SUCCESS) { 1598 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); 1599 } 1600 1601 /* Issue marker IOCB. */ 1602 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l, 1603 MK_SYNC_ID_LUN); 1604 if (rval2 != QLA_SUCCESS) { 1605 ql_dbg(ql_dbg_mbx, vha, 0x1044, 1606 "Failed to issue marker IOCB (%x).\n", rval2); 1607 } else { 1608 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, 1609 "Done %s.\n", __func__); 1610 } 1611 1612 return rval; 1613 } 1614 1615 /* 1616 * qla2x00_get_adapter_id 1617 * Get adapter ID and topology. 1618 * 1619 * Input: 1620 * ha = adapter block pointer. 1621 * id = pointer for loop ID. 1622 * al_pa = pointer for AL_PA. 1623 * area = pointer for area. 1624 * domain = pointer for domain. 1625 * top = pointer for topology. 1626 * TARGET_QUEUE_LOCK must be released. 1627 * ADAPTER_STATE_LOCK must be released. 1628 * 1629 * Returns: 1630 * qla2x00 local function return status code. 1631 * 1632 * Context: 1633 * Kernel context. 1634 */ 1635 int 1636 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, 1637 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) 1638 { 1639 int rval; 1640 mbx_cmd_t mc; 1641 mbx_cmd_t *mcp = &mc; 1642 1643 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, 1644 "Entered %s.\n", __func__); 1645 1646 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1647 mcp->mb[9] = vha->vp_idx; 1648 mcp->out_mb = MBX_9|MBX_0; 1649 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1650 if (IS_CNA_CAPABLE(vha->hw)) 1651 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; 1652 if (IS_FWI2_CAPABLE(vha->hw)) 1653 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; 1654 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) { 1655 mcp->in_mb |= MBX_15; 1656 mcp->out_mb |= MBX_7|MBX_21|MBX_22|MBX_23; 1657 } 1658 1659 mcp->tov = MBX_TOV_SECONDS; 1660 mcp->flags = 0; 1661 rval = qla2x00_mailbox_command(vha, mcp); 1662 if (mcp->mb[0] == MBS_COMMAND_ERROR) 1663 rval = QLA_COMMAND_ERROR; 1664 else if (mcp->mb[0] == MBS_INVALID_COMMAND) 1665 rval = QLA_INVALID_COMMAND; 1666 1667 /* Return data. */ 1668 *id = mcp->mb[1]; 1669 *al_pa = LSB(mcp->mb[2]); 1670 *area = MSB(mcp->mb[2]); 1671 *domain = LSB(mcp->mb[3]); 1672 *top = mcp->mb[6]; 1673 *sw_cap = mcp->mb[7]; 1674 1675 if (rval != QLA_SUCCESS) { 1676 /*EMPTY*/ 1677 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); 1678 } else { 1679 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, 1680 "Done %s.\n", __func__); 1681 1682 if (IS_CNA_CAPABLE(vha->hw)) { 1683 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1684 vha->fcoe_fcf_idx = mcp->mb[10]; 1685 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; 1686 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; 1687 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; 1688 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; 1689 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; 1690 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; 1691 } 1692 /* If FA-WWN supported */ 1693 if (IS_FAWWN_CAPABLE(vha->hw)) { 1694 if (mcp->mb[7] & BIT_14) { 1695 vha->port_name[0] = MSB(mcp->mb[16]); 1696 vha->port_name[1] = LSB(mcp->mb[16]); 1697 vha->port_name[2] = MSB(mcp->mb[17]); 1698 vha->port_name[3] = LSB(mcp->mb[17]); 1699 vha->port_name[4] = MSB(mcp->mb[18]); 1700 vha->port_name[5] = LSB(mcp->mb[18]); 1701 vha->port_name[6] = MSB(mcp->mb[19]); 1702 vha->port_name[7] = LSB(mcp->mb[19]); 1703 fc_host_port_name(vha->host) = 1704 wwn_to_u64(vha->port_name); 1705 ql_dbg(ql_dbg_mbx, vha, 0x10ca, 1706 "FA-WWN acquired %016llx\n", 1707 wwn_to_u64(vha->port_name)); 1708 } 1709 } 1710 1711 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) { 1712 vha->bbcr = mcp->mb[15]; 1713 if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) { 1714 ql_log(ql_log_info, vha, 0x11a4, 1715 "SCM: EDC ELS completed, flags 0x%x\n", 1716 mcp->mb[21]); 1717 } 1718 if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) { 1719 vha->hw->flags.scm_enabled = 1; 1720 vha->scm_fabric_connection_flags |= 1721 SCM_FLAG_RDF_COMPLETED; 1722 ql_log(ql_log_info, vha, 0x11a5, 1723 "SCM: RDF ELS completed, flags 0x%x\n", 1724 mcp->mb[23]); 1725 } 1726 } 1727 } 1728 1729 return rval; 1730 } 1731 1732 /* 1733 * qla2x00_get_retry_cnt 1734 * Get current firmware login retry count and delay. 1735 * 1736 * Input: 1737 * ha = adapter block pointer. 1738 * retry_cnt = pointer to login retry count. 1739 * tov = pointer to login timeout value. 1740 * 1741 * Returns: 1742 * qla2x00 local function return status code. 1743 * 1744 * Context: 1745 * Kernel context. 1746 */ 1747 int 1748 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, 1749 uint16_t *r_a_tov) 1750 { 1751 int rval; 1752 uint16_t ratov; 1753 mbx_cmd_t mc; 1754 mbx_cmd_t *mcp = &mc; 1755 1756 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, 1757 "Entered %s.\n", __func__); 1758 1759 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1760 mcp->out_mb = MBX_0; 1761 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1762 mcp->tov = MBX_TOV_SECONDS; 1763 mcp->flags = 0; 1764 rval = qla2x00_mailbox_command(vha, mcp); 1765 1766 if (rval != QLA_SUCCESS) { 1767 /*EMPTY*/ 1768 ql_dbg(ql_dbg_mbx, vha, 0x104a, 1769 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 1770 } else { 1771 /* Convert returned data and check our values. */ 1772 *r_a_tov = mcp->mb[3] / 2; 1773 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ 1774 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { 1775 /* Update to the larger values */ 1776 *retry_cnt = (uint8_t)mcp->mb[1]; 1777 *tov = ratov; 1778 } 1779 1780 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, 1781 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); 1782 } 1783 1784 return rval; 1785 } 1786 1787 /* 1788 * qla2x00_init_firmware 1789 * Initialize adapter firmware. 1790 * 1791 * Input: 1792 * ha = adapter block pointer. 1793 * dptr = Initialization control block pointer. 1794 * size = size of initialization control block. 1795 * TARGET_QUEUE_LOCK must be released. 1796 * ADAPTER_STATE_LOCK must be released. 1797 * 1798 * Returns: 1799 * qla2x00 local function return status code. 1800 * 1801 * Context: 1802 * Kernel context. 1803 */ 1804 int 1805 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 1806 { 1807 int rval; 1808 mbx_cmd_t mc; 1809 mbx_cmd_t *mcp = &mc; 1810 struct qla_hw_data *ha = vha->hw; 1811 1812 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, 1813 "Entered %s.\n", __func__); 1814 1815 if (IS_P3P_TYPE(ha) && ql2xdbwr) 1816 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, 1817 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); 1818 1819 if (ha->flags.npiv_supported) 1820 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1821 else 1822 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 1823 1824 mcp->mb[1] = 0; 1825 mcp->mb[2] = MSW(ha->init_cb_dma); 1826 mcp->mb[3] = LSW(ha->init_cb_dma); 1827 mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); 1828 mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); 1829 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1830 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1831 mcp->mb[1] = BIT_0; 1832 mcp->mb[10] = MSW(ha->ex_init_cb_dma); 1833 mcp->mb[11] = LSW(ha->ex_init_cb_dma); 1834 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); 1835 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); 1836 mcp->mb[14] = sizeof(*ha->ex_init_cb); 1837 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; 1838 } 1839 1840 if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) { 1841 mcp->mb[1] |= BIT_1; 1842 mcp->mb[16] = MSW(ha->sf_init_cb_dma); 1843 mcp->mb[17] = LSW(ha->sf_init_cb_dma); 1844 mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma)); 1845 mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma)); 1846 mcp->mb[15] = sizeof(*ha->sf_init_cb); 1847 mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15; 1848 } 1849 1850 /* 1 and 2 should normally be captured. */ 1851 mcp->in_mb = MBX_2|MBX_1|MBX_0; 1852 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1853 /* mb3 is additional info about the installed SFP. */ 1854 mcp->in_mb |= MBX_3; 1855 mcp->buf_size = size; 1856 mcp->flags = MBX_DMA_OUT; 1857 mcp->tov = MBX_TOV_SECONDS; 1858 rval = qla2x00_mailbox_command(vha, mcp); 1859 1860 if (rval != QLA_SUCCESS) { 1861 /*EMPTY*/ 1862 ql_dbg(ql_dbg_mbx, vha, 0x104d, 1863 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n", 1864 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); 1865 if (ha->init_cb) { 1866 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n"); 1867 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1868 0x0104d, ha->init_cb, sizeof(*ha->init_cb)); 1869 } 1870 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1871 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n"); 1872 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1873 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb)); 1874 } 1875 } else { 1876 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1877 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 1878 ql_dbg(ql_dbg_mbx, vha, 0x119d, 1879 "Invalid SFP/Validation Failed\n"); 1880 } 1881 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, 1882 "Done %s.\n", __func__); 1883 } 1884 1885 return rval; 1886 } 1887 1888 1889 /* 1890 * qla2x00_get_port_database 1891 * Issue normal/enhanced get port database mailbox command 1892 * and copy device name as necessary. 1893 * 1894 * Input: 1895 * ha = adapter state pointer. 1896 * dev = structure pointer. 1897 * opt = enhanced cmd option byte. 1898 * 1899 * Returns: 1900 * qla2x00 local function return status code. 1901 * 1902 * Context: 1903 * Kernel context. 1904 */ 1905 int 1906 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) 1907 { 1908 int rval; 1909 mbx_cmd_t mc; 1910 mbx_cmd_t *mcp = &mc; 1911 port_database_t *pd; 1912 struct port_database_24xx *pd24; 1913 dma_addr_t pd_dma; 1914 struct qla_hw_data *ha = vha->hw; 1915 1916 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, 1917 "Entered %s.\n", __func__); 1918 1919 pd24 = NULL; 1920 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1921 if (pd == NULL) { 1922 ql_log(ql_log_warn, vha, 0x1050, 1923 "Failed to allocate port database structure.\n"); 1924 fcport->query = 0; 1925 return QLA_MEMORY_ALLOC_FAILED; 1926 } 1927 1928 mcp->mb[0] = MBC_GET_PORT_DATABASE; 1929 if (opt != 0 && !IS_FWI2_CAPABLE(ha)) 1930 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; 1931 mcp->mb[2] = MSW(pd_dma); 1932 mcp->mb[3] = LSW(pd_dma); 1933 mcp->mb[6] = MSW(MSD(pd_dma)); 1934 mcp->mb[7] = LSW(MSD(pd_dma)); 1935 mcp->mb[9] = vha->vp_idx; 1936 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1937 mcp->in_mb = MBX_0; 1938 if (IS_FWI2_CAPABLE(ha)) { 1939 mcp->mb[1] = fcport->loop_id; 1940 mcp->mb[10] = opt; 1941 mcp->out_mb |= MBX_10|MBX_1; 1942 mcp->in_mb |= MBX_1; 1943 } else if (HAS_EXTENDED_IDS(ha)) { 1944 mcp->mb[1] = fcport->loop_id; 1945 mcp->mb[10] = opt; 1946 mcp->out_mb |= MBX_10|MBX_1; 1947 } else { 1948 mcp->mb[1] = fcport->loop_id << 8 | opt; 1949 mcp->out_mb |= MBX_1; 1950 } 1951 mcp->buf_size = IS_FWI2_CAPABLE(ha) ? 1952 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; 1953 mcp->flags = MBX_DMA_IN; 1954 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1955 rval = qla2x00_mailbox_command(vha, mcp); 1956 if (rval != QLA_SUCCESS) 1957 goto gpd_error_out; 1958 1959 if (IS_FWI2_CAPABLE(ha)) { 1960 uint64_t zero = 0; 1961 u8 current_login_state, last_login_state; 1962 1963 pd24 = (struct port_database_24xx *) pd; 1964 1965 /* Check for logged in state. */ 1966 if (NVME_TARGET(ha, fcport)) { 1967 current_login_state = pd24->current_login_state >> 4; 1968 last_login_state = pd24->last_login_state >> 4; 1969 } else { 1970 current_login_state = pd24->current_login_state & 0xf; 1971 last_login_state = pd24->last_login_state & 0xf; 1972 } 1973 fcport->current_login_state = pd24->current_login_state; 1974 fcport->last_login_state = pd24->last_login_state; 1975 1976 /* Check for logged in state. */ 1977 if (current_login_state != PDS_PRLI_COMPLETE && 1978 last_login_state != PDS_PRLI_COMPLETE) { 1979 ql_dbg(ql_dbg_mbx, vha, 0x119a, 1980 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 1981 current_login_state, last_login_state, 1982 fcport->loop_id); 1983 rval = QLA_FUNCTION_FAILED; 1984 1985 if (!fcport->query) 1986 goto gpd_error_out; 1987 } 1988 1989 if (fcport->loop_id == FC_NO_LOOP_ID || 1990 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 1991 memcmp(fcport->port_name, pd24->port_name, 8))) { 1992 /* We lost the device mid way. */ 1993 rval = QLA_NOT_LOGGED_IN; 1994 goto gpd_error_out; 1995 } 1996 1997 /* Names are little-endian. */ 1998 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); 1999 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); 2000 2001 /* Get port_id of device. */ 2002 fcport->d_id.b.domain = pd24->port_id[0]; 2003 fcport->d_id.b.area = pd24->port_id[1]; 2004 fcport->d_id.b.al_pa = pd24->port_id[2]; 2005 fcport->d_id.b.rsvd_1 = 0; 2006 2007 /* If not target must be initiator or unknown type. */ 2008 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) 2009 fcport->port_type = FCT_INITIATOR; 2010 else 2011 fcport->port_type = FCT_TARGET; 2012 2013 /* Passback COS information. */ 2014 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? 2015 FC_COS_CLASS2 : FC_COS_CLASS3; 2016 2017 if (pd24->prli_svc_param_word_3[0] & BIT_7) 2018 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 2019 } else { 2020 uint64_t zero = 0; 2021 2022 /* Check for logged in state. */ 2023 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 2024 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 2025 ql_dbg(ql_dbg_mbx, vha, 0x100a, 2026 "Unable to verify login-state (%x/%x) - " 2027 "portid=%02x%02x%02x.\n", pd->master_state, 2028 pd->slave_state, fcport->d_id.b.domain, 2029 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2030 rval = QLA_FUNCTION_FAILED; 2031 goto gpd_error_out; 2032 } 2033 2034 if (fcport->loop_id == FC_NO_LOOP_ID || 2035 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 2036 memcmp(fcport->port_name, pd->port_name, 8))) { 2037 /* We lost the device mid way. */ 2038 rval = QLA_NOT_LOGGED_IN; 2039 goto gpd_error_out; 2040 } 2041 2042 /* Names are little-endian. */ 2043 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 2044 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 2045 2046 /* Get port_id of device. */ 2047 fcport->d_id.b.domain = pd->port_id[0]; 2048 fcport->d_id.b.area = pd->port_id[3]; 2049 fcport->d_id.b.al_pa = pd->port_id[2]; 2050 fcport->d_id.b.rsvd_1 = 0; 2051 2052 /* If not target must be initiator or unknown type. */ 2053 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 2054 fcport->port_type = FCT_INITIATOR; 2055 else 2056 fcport->port_type = FCT_TARGET; 2057 2058 /* Passback COS information. */ 2059 fcport->supported_classes = (pd->options & BIT_4) ? 2060 FC_COS_CLASS2 : FC_COS_CLASS3; 2061 } 2062 2063 gpd_error_out: 2064 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 2065 fcport->query = 0; 2066 2067 if (rval != QLA_SUCCESS) { 2068 ql_dbg(ql_dbg_mbx, vha, 0x1052, 2069 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, 2070 mcp->mb[0], mcp->mb[1]); 2071 } else { 2072 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, 2073 "Done %s.\n", __func__); 2074 } 2075 2076 return rval; 2077 } 2078 2079 int 2080 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle, 2081 struct port_database_24xx *pdb) 2082 { 2083 mbx_cmd_t mc; 2084 mbx_cmd_t *mcp = &mc; 2085 dma_addr_t pdb_dma; 2086 int rval; 2087 2088 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115, 2089 "Entered %s.\n", __func__); 2090 2091 memset(pdb, 0, sizeof(*pdb)); 2092 2093 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb, 2094 sizeof(*pdb), DMA_FROM_DEVICE); 2095 if (!pdb_dma) { 2096 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n"); 2097 return QLA_MEMORY_ALLOC_FAILED; 2098 } 2099 2100 mcp->mb[0] = MBC_GET_PORT_DATABASE; 2101 mcp->mb[1] = nport_handle; 2102 mcp->mb[2] = MSW(LSD(pdb_dma)); 2103 mcp->mb[3] = LSW(LSD(pdb_dma)); 2104 mcp->mb[6] = MSW(MSD(pdb_dma)); 2105 mcp->mb[7] = LSW(MSD(pdb_dma)); 2106 mcp->mb[9] = 0; 2107 mcp->mb[10] = 0; 2108 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2109 mcp->in_mb = MBX_1|MBX_0; 2110 mcp->buf_size = sizeof(*pdb); 2111 mcp->flags = MBX_DMA_IN; 2112 mcp->tov = vha->hw->login_timeout * 2; 2113 rval = qla2x00_mailbox_command(vha, mcp); 2114 2115 if (rval != QLA_SUCCESS) { 2116 ql_dbg(ql_dbg_mbx, vha, 0x111a, 2117 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2118 rval, mcp->mb[0], mcp->mb[1]); 2119 } else { 2120 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b, 2121 "Done %s.\n", __func__); 2122 } 2123 2124 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma, 2125 sizeof(*pdb), DMA_FROM_DEVICE); 2126 2127 return rval; 2128 } 2129 2130 /* 2131 * qla2x00_get_firmware_state 2132 * Get adapter firmware state. 2133 * 2134 * Input: 2135 * ha = adapter block pointer. 2136 * dptr = pointer for firmware state. 2137 * TARGET_QUEUE_LOCK must be released. 2138 * ADAPTER_STATE_LOCK must be released. 2139 * 2140 * Returns: 2141 * qla2x00 local function return status code. 2142 * 2143 * Context: 2144 * Kernel context. 2145 */ 2146 int 2147 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) 2148 { 2149 int rval; 2150 mbx_cmd_t mc; 2151 mbx_cmd_t *mcp = &mc; 2152 struct qla_hw_data *ha = vha->hw; 2153 2154 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, 2155 "Entered %s.\n", __func__); 2156 2157 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 2158 mcp->out_mb = MBX_0; 2159 if (IS_FWI2_CAPABLE(vha->hw)) 2160 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2161 else 2162 mcp->in_mb = MBX_1|MBX_0; 2163 mcp->tov = MBX_TOV_SECONDS; 2164 mcp->flags = 0; 2165 rval = qla2x00_mailbox_command(vha, mcp); 2166 2167 /* Return firmware states. */ 2168 states[0] = mcp->mb[1]; 2169 if (IS_FWI2_CAPABLE(vha->hw)) { 2170 states[1] = mcp->mb[2]; 2171 states[2] = mcp->mb[3]; /* SFP info */ 2172 states[3] = mcp->mb[4]; 2173 states[4] = mcp->mb[5]; 2174 states[5] = mcp->mb[6]; /* DPORT status */ 2175 } 2176 2177 if (rval != QLA_SUCCESS) { 2178 /*EMPTY*/ 2179 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); 2180 } else { 2181 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 2182 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 2183 ql_dbg(ql_dbg_mbx, vha, 0x119e, 2184 "Invalid SFP/Validation Failed\n"); 2185 } 2186 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, 2187 "Done %s.\n", __func__); 2188 } 2189 2190 return rval; 2191 } 2192 2193 /* 2194 * qla2x00_get_port_name 2195 * Issue get port name mailbox command. 2196 * Returned name is in big endian format. 2197 * 2198 * Input: 2199 * ha = adapter block pointer. 2200 * loop_id = loop ID of device. 2201 * name = pointer for name. 2202 * TARGET_QUEUE_LOCK must be released. 2203 * ADAPTER_STATE_LOCK must be released. 2204 * 2205 * Returns: 2206 * qla2x00 local function return status code. 2207 * 2208 * Context: 2209 * Kernel context. 2210 */ 2211 int 2212 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, 2213 uint8_t opt) 2214 { 2215 int rval; 2216 mbx_cmd_t mc; 2217 mbx_cmd_t *mcp = &mc; 2218 2219 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, 2220 "Entered %s.\n", __func__); 2221 2222 mcp->mb[0] = MBC_GET_PORT_NAME; 2223 mcp->mb[9] = vha->vp_idx; 2224 mcp->out_mb = MBX_9|MBX_1|MBX_0; 2225 if (HAS_EXTENDED_IDS(vha->hw)) { 2226 mcp->mb[1] = loop_id; 2227 mcp->mb[10] = opt; 2228 mcp->out_mb |= MBX_10; 2229 } else { 2230 mcp->mb[1] = loop_id << 8 | opt; 2231 } 2232 2233 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2234 mcp->tov = MBX_TOV_SECONDS; 2235 mcp->flags = 0; 2236 rval = qla2x00_mailbox_command(vha, mcp); 2237 2238 if (rval != QLA_SUCCESS) { 2239 /*EMPTY*/ 2240 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); 2241 } else { 2242 if (name != NULL) { 2243 /* This function returns name in big endian. */ 2244 name[0] = MSB(mcp->mb[2]); 2245 name[1] = LSB(mcp->mb[2]); 2246 name[2] = MSB(mcp->mb[3]); 2247 name[3] = LSB(mcp->mb[3]); 2248 name[4] = MSB(mcp->mb[6]); 2249 name[5] = LSB(mcp->mb[6]); 2250 name[6] = MSB(mcp->mb[7]); 2251 name[7] = LSB(mcp->mb[7]); 2252 } 2253 2254 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, 2255 "Done %s.\n", __func__); 2256 } 2257 2258 return rval; 2259 } 2260 2261 /* 2262 * qla24xx_link_initialization 2263 * Issue link initialization mailbox command. 2264 * 2265 * Input: 2266 * ha = adapter block pointer. 2267 * TARGET_QUEUE_LOCK must be released. 2268 * ADAPTER_STATE_LOCK must be released. 2269 * 2270 * Returns: 2271 * qla2x00 local function return status code. 2272 * 2273 * Context: 2274 * Kernel context. 2275 */ 2276 int 2277 qla24xx_link_initialize(scsi_qla_host_t *vha) 2278 { 2279 int rval; 2280 mbx_cmd_t mc; 2281 mbx_cmd_t *mcp = &mc; 2282 2283 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, 2284 "Entered %s.\n", __func__); 2285 2286 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) 2287 return QLA_FUNCTION_FAILED; 2288 2289 mcp->mb[0] = MBC_LINK_INITIALIZATION; 2290 mcp->mb[1] = BIT_4; 2291 if (vha->hw->operating_mode == LOOP) 2292 mcp->mb[1] |= BIT_6; 2293 else 2294 mcp->mb[1] |= BIT_5; 2295 mcp->mb[2] = 0; 2296 mcp->mb[3] = 0; 2297 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2298 mcp->in_mb = MBX_0; 2299 mcp->tov = MBX_TOV_SECONDS; 2300 mcp->flags = 0; 2301 rval = qla2x00_mailbox_command(vha, mcp); 2302 2303 if (rval != QLA_SUCCESS) { 2304 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); 2305 } else { 2306 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, 2307 "Done %s.\n", __func__); 2308 } 2309 2310 return rval; 2311 } 2312 2313 /* 2314 * qla2x00_lip_reset 2315 * Issue LIP reset mailbox command. 2316 * 2317 * Input: 2318 * ha = adapter block pointer. 2319 * TARGET_QUEUE_LOCK must be released. 2320 * ADAPTER_STATE_LOCK must be released. 2321 * 2322 * Returns: 2323 * qla2x00 local function return status code. 2324 * 2325 * Context: 2326 * Kernel context. 2327 */ 2328 int 2329 qla2x00_lip_reset(scsi_qla_host_t *vha) 2330 { 2331 int rval; 2332 mbx_cmd_t mc; 2333 mbx_cmd_t *mcp = &mc; 2334 2335 ql_dbg(ql_dbg_disc, vha, 0x105a, 2336 "Entered %s.\n", __func__); 2337 2338 if (IS_CNA_CAPABLE(vha->hw)) { 2339 /* Logout across all FCFs. */ 2340 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2341 mcp->mb[1] = BIT_1; 2342 mcp->mb[2] = 0; 2343 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2344 } else if (IS_FWI2_CAPABLE(vha->hw)) { 2345 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2346 mcp->mb[1] = BIT_4; 2347 mcp->mb[2] = 0; 2348 mcp->mb[3] = vha->hw->loop_reset_delay; 2349 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2350 } else { 2351 mcp->mb[0] = MBC_LIP_RESET; 2352 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2353 if (HAS_EXTENDED_IDS(vha->hw)) { 2354 mcp->mb[1] = 0x00ff; 2355 mcp->mb[10] = 0; 2356 mcp->out_mb |= MBX_10; 2357 } else { 2358 mcp->mb[1] = 0xff00; 2359 } 2360 mcp->mb[2] = vha->hw->loop_reset_delay; 2361 mcp->mb[3] = 0; 2362 } 2363 mcp->in_mb = MBX_0; 2364 mcp->tov = MBX_TOV_SECONDS; 2365 mcp->flags = 0; 2366 rval = qla2x00_mailbox_command(vha, mcp); 2367 2368 if (rval != QLA_SUCCESS) { 2369 /*EMPTY*/ 2370 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); 2371 } else { 2372 /*EMPTY*/ 2373 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, 2374 "Done %s.\n", __func__); 2375 } 2376 2377 return rval; 2378 } 2379 2380 /* 2381 * qla2x00_send_sns 2382 * Send SNS command. 2383 * 2384 * Input: 2385 * ha = adapter block pointer. 2386 * sns = pointer for command. 2387 * cmd_size = command size. 2388 * buf_size = response/command size. 2389 * TARGET_QUEUE_LOCK must be released. 2390 * ADAPTER_STATE_LOCK must be released. 2391 * 2392 * Returns: 2393 * qla2x00 local function return status code. 2394 * 2395 * Context: 2396 * Kernel context. 2397 */ 2398 int 2399 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, 2400 uint16_t cmd_size, size_t buf_size) 2401 { 2402 int rval; 2403 mbx_cmd_t mc; 2404 mbx_cmd_t *mcp = &mc; 2405 2406 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, 2407 "Entered %s.\n", __func__); 2408 2409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, 2410 "Retry cnt=%d ratov=%d total tov=%d.\n", 2411 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); 2412 2413 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 2414 mcp->mb[1] = cmd_size; 2415 mcp->mb[2] = MSW(sns_phys_address); 2416 mcp->mb[3] = LSW(sns_phys_address); 2417 mcp->mb[6] = MSW(MSD(sns_phys_address)); 2418 mcp->mb[7] = LSW(MSD(sns_phys_address)); 2419 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2420 mcp->in_mb = MBX_0|MBX_1; 2421 mcp->buf_size = buf_size; 2422 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; 2423 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); 2424 rval = qla2x00_mailbox_command(vha, mcp); 2425 2426 if (rval != QLA_SUCCESS) { 2427 /*EMPTY*/ 2428 ql_dbg(ql_dbg_mbx, vha, 0x105f, 2429 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2430 rval, mcp->mb[0], mcp->mb[1]); 2431 } else { 2432 /*EMPTY*/ 2433 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, 2434 "Done %s.\n", __func__); 2435 } 2436 2437 return rval; 2438 } 2439 2440 int 2441 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2442 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2443 { 2444 int rval; 2445 2446 struct logio_entry_24xx *lg; 2447 dma_addr_t lg_dma; 2448 uint32_t iop[2]; 2449 struct qla_hw_data *ha = vha->hw; 2450 struct req_que *req; 2451 2452 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, 2453 "Entered %s.\n", __func__); 2454 2455 if (vha->vp_idx && vha->qpair) 2456 req = vha->qpair->req; 2457 else 2458 req = ha->req_q_map[0]; 2459 2460 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2461 if (lg == NULL) { 2462 ql_log(ql_log_warn, vha, 0x1062, 2463 "Failed to allocate login IOCB.\n"); 2464 return QLA_MEMORY_ALLOC_FAILED; 2465 } 2466 2467 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2468 lg->entry_count = 1; 2469 lg->handle = make_handle(req->id, lg->handle); 2470 lg->nport_handle = cpu_to_le16(loop_id); 2471 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2472 if (opt & BIT_0) 2473 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2474 if (opt & BIT_1) 2475 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2476 lg->port_id[0] = al_pa; 2477 lg->port_id[1] = area; 2478 lg->port_id[2] = domain; 2479 lg->vp_index = vha->vp_idx; 2480 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2481 (ha->r_a_tov / 10 * 2) + 2); 2482 if (rval != QLA_SUCCESS) { 2483 ql_dbg(ql_dbg_mbx, vha, 0x1063, 2484 "Failed to issue login IOCB (%x).\n", rval); 2485 } else if (lg->entry_status != 0) { 2486 ql_dbg(ql_dbg_mbx, vha, 0x1064, 2487 "Failed to complete IOCB -- error status (%x).\n", 2488 lg->entry_status); 2489 rval = QLA_FUNCTION_FAILED; 2490 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2491 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2492 iop[1] = le32_to_cpu(lg->io_parameter[1]); 2493 2494 ql_dbg(ql_dbg_mbx, vha, 0x1065, 2495 "Failed to complete IOCB -- completion status (%x) " 2496 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2497 iop[0], iop[1]); 2498 2499 switch (iop[0]) { 2500 case LSC_SCODE_PORTID_USED: 2501 mb[0] = MBS_PORT_ID_USED; 2502 mb[1] = LSW(iop[1]); 2503 break; 2504 case LSC_SCODE_NPORT_USED: 2505 mb[0] = MBS_LOOP_ID_USED; 2506 break; 2507 case LSC_SCODE_NOLINK: 2508 case LSC_SCODE_NOIOCB: 2509 case LSC_SCODE_NOXCB: 2510 case LSC_SCODE_CMD_FAILED: 2511 case LSC_SCODE_NOFABRIC: 2512 case LSC_SCODE_FW_NOT_READY: 2513 case LSC_SCODE_NOT_LOGGED_IN: 2514 case LSC_SCODE_NOPCB: 2515 case LSC_SCODE_ELS_REJECT: 2516 case LSC_SCODE_CMD_PARAM_ERR: 2517 case LSC_SCODE_NONPORT: 2518 case LSC_SCODE_LOGGED_IN: 2519 case LSC_SCODE_NOFLOGI_ACC: 2520 default: 2521 mb[0] = MBS_COMMAND_ERROR; 2522 break; 2523 } 2524 } else { 2525 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, 2526 "Done %s.\n", __func__); 2527 2528 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2529 2530 mb[0] = MBS_COMMAND_COMPLETE; 2531 mb[1] = 0; 2532 if (iop[0] & BIT_4) { 2533 if (iop[0] & BIT_8) 2534 mb[1] |= BIT_1; 2535 } else 2536 mb[1] = BIT_0; 2537 2538 /* Passback COS information. */ 2539 mb[10] = 0; 2540 if (lg->io_parameter[7] || lg->io_parameter[8]) 2541 mb[10] |= BIT_0; /* Class 2. */ 2542 if (lg->io_parameter[9] || lg->io_parameter[10]) 2543 mb[10] |= BIT_1; /* Class 3. */ 2544 if (lg->io_parameter[0] & cpu_to_le32(BIT_7)) 2545 mb[10] |= BIT_7; /* Confirmed Completion 2546 * Allowed 2547 */ 2548 } 2549 2550 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2551 2552 return rval; 2553 } 2554 2555 /* 2556 * qla2x00_login_fabric 2557 * Issue login fabric port mailbox command. 2558 * 2559 * Input: 2560 * ha = adapter block pointer. 2561 * loop_id = device loop ID. 2562 * domain = device domain. 2563 * area = device area. 2564 * al_pa = device AL_PA. 2565 * status = pointer for return status. 2566 * opt = command options. 2567 * TARGET_QUEUE_LOCK must be released. 2568 * ADAPTER_STATE_LOCK must be released. 2569 * 2570 * Returns: 2571 * qla2x00 local function return status code. 2572 * 2573 * Context: 2574 * Kernel context. 2575 */ 2576 int 2577 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2578 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2579 { 2580 int rval; 2581 mbx_cmd_t mc; 2582 mbx_cmd_t *mcp = &mc; 2583 struct qla_hw_data *ha = vha->hw; 2584 2585 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, 2586 "Entered %s.\n", __func__); 2587 2588 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 2589 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2590 if (HAS_EXTENDED_IDS(ha)) { 2591 mcp->mb[1] = loop_id; 2592 mcp->mb[10] = opt; 2593 mcp->out_mb |= MBX_10; 2594 } else { 2595 mcp->mb[1] = (loop_id << 8) | opt; 2596 } 2597 mcp->mb[2] = domain; 2598 mcp->mb[3] = area << 8 | al_pa; 2599 2600 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; 2601 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2602 mcp->flags = 0; 2603 rval = qla2x00_mailbox_command(vha, mcp); 2604 2605 /* Return mailbox statuses. */ 2606 if (mb != NULL) { 2607 mb[0] = mcp->mb[0]; 2608 mb[1] = mcp->mb[1]; 2609 mb[2] = mcp->mb[2]; 2610 mb[6] = mcp->mb[6]; 2611 mb[7] = mcp->mb[7]; 2612 /* COS retrieved from Get-Port-Database mailbox command. */ 2613 mb[10] = 0; 2614 } 2615 2616 if (rval != QLA_SUCCESS) { 2617 /* RLU tmp code: need to change main mailbox_command function to 2618 * return ok even when the mailbox completion value is not 2619 * SUCCESS. The caller needs to be responsible to interpret 2620 * the return values of this mailbox command if we're not 2621 * to change too much of the existing code. 2622 */ 2623 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || 2624 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || 2625 mcp->mb[0] == 0x4006) 2626 rval = QLA_SUCCESS; 2627 2628 /*EMPTY*/ 2629 ql_dbg(ql_dbg_mbx, vha, 0x1068, 2630 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 2631 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 2632 } else { 2633 /*EMPTY*/ 2634 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, 2635 "Done %s.\n", __func__); 2636 } 2637 2638 return rval; 2639 } 2640 2641 /* 2642 * qla2x00_login_local_device 2643 * Issue login loop port mailbox command. 2644 * 2645 * Input: 2646 * ha = adapter block pointer. 2647 * loop_id = device loop ID. 2648 * opt = command options. 2649 * 2650 * Returns: 2651 * Return status code. 2652 * 2653 * Context: 2654 * Kernel context. 2655 * 2656 */ 2657 int 2658 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, 2659 uint16_t *mb_ret, uint8_t opt) 2660 { 2661 int rval; 2662 mbx_cmd_t mc; 2663 mbx_cmd_t *mcp = &mc; 2664 struct qla_hw_data *ha = vha->hw; 2665 2666 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, 2667 "Entered %s.\n", __func__); 2668 2669 if (IS_FWI2_CAPABLE(ha)) 2670 return qla24xx_login_fabric(vha, fcport->loop_id, 2671 fcport->d_id.b.domain, fcport->d_id.b.area, 2672 fcport->d_id.b.al_pa, mb_ret, opt); 2673 2674 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 2675 if (HAS_EXTENDED_IDS(ha)) 2676 mcp->mb[1] = fcport->loop_id; 2677 else 2678 mcp->mb[1] = fcport->loop_id << 8; 2679 mcp->mb[2] = opt; 2680 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2681 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; 2682 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2683 mcp->flags = 0; 2684 rval = qla2x00_mailbox_command(vha, mcp); 2685 2686 /* Return mailbox statuses. */ 2687 if (mb_ret != NULL) { 2688 mb_ret[0] = mcp->mb[0]; 2689 mb_ret[1] = mcp->mb[1]; 2690 mb_ret[6] = mcp->mb[6]; 2691 mb_ret[7] = mcp->mb[7]; 2692 } 2693 2694 if (rval != QLA_SUCCESS) { 2695 /* AV tmp code: need to change main mailbox_command function to 2696 * return ok even when the mailbox completion value is not 2697 * SUCCESS. The caller needs to be responsible to interpret 2698 * the return values of this mailbox command if we're not 2699 * to change too much of the existing code. 2700 */ 2701 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 2702 rval = QLA_SUCCESS; 2703 2704 ql_dbg(ql_dbg_mbx, vha, 0x106b, 2705 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", 2706 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); 2707 } else { 2708 /*EMPTY*/ 2709 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, 2710 "Done %s.\n", __func__); 2711 } 2712 2713 return (rval); 2714 } 2715 2716 int 2717 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2718 uint8_t area, uint8_t al_pa) 2719 { 2720 int rval; 2721 struct logio_entry_24xx *lg; 2722 dma_addr_t lg_dma; 2723 struct qla_hw_data *ha = vha->hw; 2724 struct req_que *req; 2725 2726 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, 2727 "Entered %s.\n", __func__); 2728 2729 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2730 if (lg == NULL) { 2731 ql_log(ql_log_warn, vha, 0x106e, 2732 "Failed to allocate logout IOCB.\n"); 2733 return QLA_MEMORY_ALLOC_FAILED; 2734 } 2735 2736 req = vha->req; 2737 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2738 lg->entry_count = 1; 2739 lg->handle = make_handle(req->id, lg->handle); 2740 lg->nport_handle = cpu_to_le16(loop_id); 2741 lg->control_flags = 2742 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| 2743 LCF_FREE_NPORT); 2744 lg->port_id[0] = al_pa; 2745 lg->port_id[1] = area; 2746 lg->port_id[2] = domain; 2747 lg->vp_index = vha->vp_idx; 2748 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2749 (ha->r_a_tov / 10 * 2) + 2); 2750 if (rval != QLA_SUCCESS) { 2751 ql_dbg(ql_dbg_mbx, vha, 0x106f, 2752 "Failed to issue logout IOCB (%x).\n", rval); 2753 } else if (lg->entry_status != 0) { 2754 ql_dbg(ql_dbg_mbx, vha, 0x1070, 2755 "Failed to complete IOCB -- error status (%x).\n", 2756 lg->entry_status); 2757 rval = QLA_FUNCTION_FAILED; 2758 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2759 ql_dbg(ql_dbg_mbx, vha, 0x1071, 2760 "Failed to complete IOCB -- completion status (%x) " 2761 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2762 le32_to_cpu(lg->io_parameter[0]), 2763 le32_to_cpu(lg->io_parameter[1])); 2764 } else { 2765 /*EMPTY*/ 2766 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, 2767 "Done %s.\n", __func__); 2768 } 2769 2770 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2771 2772 return rval; 2773 } 2774 2775 /* 2776 * qla2x00_fabric_logout 2777 * Issue logout fabric port mailbox command. 2778 * 2779 * Input: 2780 * ha = adapter block pointer. 2781 * loop_id = device loop ID. 2782 * TARGET_QUEUE_LOCK must be released. 2783 * ADAPTER_STATE_LOCK must be released. 2784 * 2785 * Returns: 2786 * qla2x00 local function return status code. 2787 * 2788 * Context: 2789 * Kernel context. 2790 */ 2791 int 2792 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2793 uint8_t area, uint8_t al_pa) 2794 { 2795 int rval; 2796 mbx_cmd_t mc; 2797 mbx_cmd_t *mcp = &mc; 2798 2799 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, 2800 "Entered %s.\n", __func__); 2801 2802 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 2803 mcp->out_mb = MBX_1|MBX_0; 2804 if (HAS_EXTENDED_IDS(vha->hw)) { 2805 mcp->mb[1] = loop_id; 2806 mcp->mb[10] = 0; 2807 mcp->out_mb |= MBX_10; 2808 } else { 2809 mcp->mb[1] = loop_id << 8; 2810 } 2811 2812 mcp->in_mb = MBX_1|MBX_0; 2813 mcp->tov = MBX_TOV_SECONDS; 2814 mcp->flags = 0; 2815 rval = qla2x00_mailbox_command(vha, mcp); 2816 2817 if (rval != QLA_SUCCESS) { 2818 /*EMPTY*/ 2819 ql_dbg(ql_dbg_mbx, vha, 0x1074, 2820 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); 2821 } else { 2822 /*EMPTY*/ 2823 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, 2824 "Done %s.\n", __func__); 2825 } 2826 2827 return rval; 2828 } 2829 2830 /* 2831 * qla2x00_full_login_lip 2832 * Issue full login LIP mailbox command. 2833 * 2834 * Input: 2835 * ha = adapter block pointer. 2836 * TARGET_QUEUE_LOCK must be released. 2837 * ADAPTER_STATE_LOCK must be released. 2838 * 2839 * Returns: 2840 * qla2x00 local function return status code. 2841 * 2842 * Context: 2843 * Kernel context. 2844 */ 2845 int 2846 qla2x00_full_login_lip(scsi_qla_host_t *vha) 2847 { 2848 int rval; 2849 mbx_cmd_t mc; 2850 mbx_cmd_t *mcp = &mc; 2851 2852 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, 2853 "Entered %s.\n", __func__); 2854 2855 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2856 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0; 2857 mcp->mb[2] = 0; 2858 mcp->mb[3] = 0; 2859 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2860 mcp->in_mb = MBX_0; 2861 mcp->tov = MBX_TOV_SECONDS; 2862 mcp->flags = 0; 2863 rval = qla2x00_mailbox_command(vha, mcp); 2864 2865 if (rval != QLA_SUCCESS) { 2866 /*EMPTY*/ 2867 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); 2868 } else { 2869 /*EMPTY*/ 2870 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, 2871 "Done %s.\n", __func__); 2872 } 2873 2874 return rval; 2875 } 2876 2877 /* 2878 * qla2x00_get_id_list 2879 * 2880 * Input: 2881 * ha = adapter block pointer. 2882 * 2883 * Returns: 2884 * qla2x00 local function return status code. 2885 * 2886 * Context: 2887 * Kernel context. 2888 */ 2889 int 2890 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, 2891 uint16_t *entries) 2892 { 2893 int rval; 2894 mbx_cmd_t mc; 2895 mbx_cmd_t *mcp = &mc; 2896 2897 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, 2898 "Entered %s.\n", __func__); 2899 2900 if (id_list == NULL) 2901 return QLA_FUNCTION_FAILED; 2902 2903 mcp->mb[0] = MBC_GET_ID_LIST; 2904 mcp->out_mb = MBX_0; 2905 if (IS_FWI2_CAPABLE(vha->hw)) { 2906 mcp->mb[2] = MSW(id_list_dma); 2907 mcp->mb[3] = LSW(id_list_dma); 2908 mcp->mb[6] = MSW(MSD(id_list_dma)); 2909 mcp->mb[7] = LSW(MSD(id_list_dma)); 2910 mcp->mb[8] = 0; 2911 mcp->mb[9] = vha->vp_idx; 2912 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 2913 } else { 2914 mcp->mb[1] = MSW(id_list_dma); 2915 mcp->mb[2] = LSW(id_list_dma); 2916 mcp->mb[3] = MSW(MSD(id_list_dma)); 2917 mcp->mb[6] = LSW(MSD(id_list_dma)); 2918 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; 2919 } 2920 mcp->in_mb = MBX_1|MBX_0; 2921 mcp->tov = MBX_TOV_SECONDS; 2922 mcp->flags = 0; 2923 rval = qla2x00_mailbox_command(vha, mcp); 2924 2925 if (rval != QLA_SUCCESS) { 2926 /*EMPTY*/ 2927 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); 2928 } else { 2929 *entries = mcp->mb[1]; 2930 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, 2931 "Done %s.\n", __func__); 2932 } 2933 2934 return rval; 2935 } 2936 2937 /* 2938 * qla2x00_get_resource_cnts 2939 * Get current firmware resource counts. 2940 * 2941 * Input: 2942 * ha = adapter block pointer. 2943 * 2944 * Returns: 2945 * qla2x00 local function return status code. 2946 * 2947 * Context: 2948 * Kernel context. 2949 */ 2950 int 2951 qla2x00_get_resource_cnts(scsi_qla_host_t *vha) 2952 { 2953 struct qla_hw_data *ha = vha->hw; 2954 int rval; 2955 mbx_cmd_t mc; 2956 mbx_cmd_t *mcp = &mc; 2957 2958 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, 2959 "Entered %s.\n", __func__); 2960 2961 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2962 mcp->out_mb = MBX_0; 2963 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2964 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 2965 IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2966 mcp->in_mb |= MBX_12; 2967 mcp->tov = MBX_TOV_SECONDS; 2968 mcp->flags = 0; 2969 rval = qla2x00_mailbox_command(vha, mcp); 2970 2971 if (rval != QLA_SUCCESS) { 2972 /*EMPTY*/ 2973 ql_dbg(ql_dbg_mbx, vha, 0x107d, 2974 "Failed mb[0]=%x.\n", mcp->mb[0]); 2975 } else { 2976 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, 2977 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " 2978 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], 2979 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 2980 mcp->mb[11], mcp->mb[12]); 2981 2982 ha->orig_fw_tgt_xcb_count = mcp->mb[1]; 2983 ha->cur_fw_tgt_xcb_count = mcp->mb[2]; 2984 ha->cur_fw_xcb_count = mcp->mb[3]; 2985 ha->orig_fw_xcb_count = mcp->mb[6]; 2986 ha->cur_fw_iocb_count = mcp->mb[7]; 2987 ha->orig_fw_iocb_count = mcp->mb[10]; 2988 if (ha->flags.npiv_supported) 2989 ha->max_npiv_vports = mcp->mb[11]; 2990 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 2991 IS_QLA28XX(ha)) 2992 ha->fw_max_fcf_count = mcp->mb[12]; 2993 } 2994 2995 return (rval); 2996 } 2997 2998 /* 2999 * qla2x00_get_fcal_position_map 3000 * Get FCAL (LILP) position map using mailbox command 3001 * 3002 * Input: 3003 * ha = adapter state pointer. 3004 * pos_map = buffer pointer (can be NULL). 3005 * 3006 * Returns: 3007 * qla2x00 local function return status code. 3008 * 3009 * Context: 3010 * Kernel context. 3011 */ 3012 int 3013 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) 3014 { 3015 int rval; 3016 mbx_cmd_t mc; 3017 mbx_cmd_t *mcp = &mc; 3018 char *pmap; 3019 dma_addr_t pmap_dma; 3020 struct qla_hw_data *ha = vha->hw; 3021 3022 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, 3023 "Entered %s.\n", __func__); 3024 3025 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 3026 if (pmap == NULL) { 3027 ql_log(ql_log_warn, vha, 0x1080, 3028 "Memory alloc failed.\n"); 3029 return QLA_MEMORY_ALLOC_FAILED; 3030 } 3031 3032 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; 3033 mcp->mb[2] = MSW(pmap_dma); 3034 mcp->mb[3] = LSW(pmap_dma); 3035 mcp->mb[6] = MSW(MSD(pmap_dma)); 3036 mcp->mb[7] = LSW(MSD(pmap_dma)); 3037 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3038 mcp->in_mb = MBX_1|MBX_0; 3039 mcp->buf_size = FCAL_MAP_SIZE; 3040 mcp->flags = MBX_DMA_IN; 3041 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 3042 rval = qla2x00_mailbox_command(vha, mcp); 3043 3044 if (rval == QLA_SUCCESS) { 3045 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, 3046 "mb0/mb1=%x/%X FC/AL position map size (%x).\n", 3047 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); 3048 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, 3049 pmap, pmap[0] + 1); 3050 3051 if (pos_map) 3052 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 3053 } 3054 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 3055 3056 if (rval != QLA_SUCCESS) { 3057 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); 3058 } else { 3059 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, 3060 "Done %s.\n", __func__); 3061 } 3062 3063 return rval; 3064 } 3065 3066 /* 3067 * qla2x00_get_link_status 3068 * 3069 * Input: 3070 * ha = adapter block pointer. 3071 * loop_id = device loop ID. 3072 * ret_buf = pointer to link status return buffer. 3073 * 3074 * Returns: 3075 * 0 = success. 3076 * BIT_0 = mem alloc error. 3077 * BIT_1 = mailbox error. 3078 */ 3079 int 3080 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, 3081 struct link_statistics *stats, dma_addr_t stats_dma) 3082 { 3083 int rval; 3084 mbx_cmd_t mc; 3085 mbx_cmd_t *mcp = &mc; 3086 uint32_t *iter = (uint32_t *)stats; 3087 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); 3088 struct qla_hw_data *ha = vha->hw; 3089 3090 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, 3091 "Entered %s.\n", __func__); 3092 3093 mcp->mb[0] = MBC_GET_LINK_STATUS; 3094 mcp->mb[2] = MSW(LSD(stats_dma)); 3095 mcp->mb[3] = LSW(LSD(stats_dma)); 3096 mcp->mb[6] = MSW(MSD(stats_dma)); 3097 mcp->mb[7] = LSW(MSD(stats_dma)); 3098 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3099 mcp->in_mb = MBX_0; 3100 if (IS_FWI2_CAPABLE(ha)) { 3101 mcp->mb[1] = loop_id; 3102 mcp->mb[4] = 0; 3103 mcp->mb[10] = 0; 3104 mcp->out_mb |= MBX_10|MBX_4|MBX_1; 3105 mcp->in_mb |= MBX_1; 3106 } else if (HAS_EXTENDED_IDS(ha)) { 3107 mcp->mb[1] = loop_id; 3108 mcp->mb[10] = 0; 3109 mcp->out_mb |= MBX_10|MBX_1; 3110 } else { 3111 mcp->mb[1] = loop_id << 8; 3112 mcp->out_mb |= MBX_1; 3113 } 3114 mcp->tov = MBX_TOV_SECONDS; 3115 mcp->flags = IOCTL_CMD; 3116 rval = qla2x00_mailbox_command(vha, mcp); 3117 3118 if (rval == QLA_SUCCESS) { 3119 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3120 ql_dbg(ql_dbg_mbx, vha, 0x1085, 3121 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3122 rval = QLA_FUNCTION_FAILED; 3123 } else { 3124 /* Re-endianize - firmware data is le32. */ 3125 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, 3126 "Done %s.\n", __func__); 3127 for ( ; dwords--; iter++) 3128 le32_to_cpus(iter); 3129 } 3130 } else { 3131 /* Failed. */ 3132 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); 3133 } 3134 3135 return rval; 3136 } 3137 3138 int 3139 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, 3140 dma_addr_t stats_dma, uint16_t options) 3141 { 3142 int rval; 3143 mbx_cmd_t mc; 3144 mbx_cmd_t *mcp = &mc; 3145 uint32_t *iter = (uint32_t *)stats; 3146 ushort dwords = sizeof(*stats)/sizeof(*iter); 3147 3148 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, 3149 "Entered %s.\n", __func__); 3150 3151 memset(&mc, 0, sizeof(mc)); 3152 mc.mb[0] = MBC_GET_LINK_PRIV_STATS; 3153 mc.mb[2] = MSW(LSD(stats_dma)); 3154 mc.mb[3] = LSW(LSD(stats_dma)); 3155 mc.mb[6] = MSW(MSD(stats_dma)); 3156 mc.mb[7] = LSW(MSD(stats_dma)); 3157 mc.mb[8] = dwords; 3158 mc.mb[9] = vha->vp_idx; 3159 mc.mb[10] = options; 3160 3161 rval = qla24xx_send_mb_cmd(vha, &mc); 3162 3163 if (rval == QLA_SUCCESS) { 3164 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3165 ql_dbg(ql_dbg_mbx, vha, 0x1089, 3166 "Failed mb[0]=%x.\n", mcp->mb[0]); 3167 rval = QLA_FUNCTION_FAILED; 3168 } else { 3169 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, 3170 "Done %s.\n", __func__); 3171 /* Re-endianize - firmware data is le32. */ 3172 for ( ; dwords--; iter++) 3173 le32_to_cpus(iter); 3174 } 3175 } else { 3176 /* Failed. */ 3177 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); 3178 } 3179 3180 return rval; 3181 } 3182 3183 int 3184 qla24xx_abort_command(srb_t *sp) 3185 { 3186 int rval; 3187 unsigned long flags = 0; 3188 3189 struct abort_entry_24xx *abt; 3190 dma_addr_t abt_dma; 3191 uint32_t handle; 3192 fc_port_t *fcport = sp->fcport; 3193 struct scsi_qla_host *vha = fcport->vha; 3194 struct qla_hw_data *ha = vha->hw; 3195 struct req_que *req = vha->req; 3196 struct qla_qpair *qpair = sp->qpair; 3197 3198 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 3199 "Entered %s.\n", __func__); 3200 3201 if (sp->qpair) 3202 req = sp->qpair->req; 3203 else 3204 return QLA_FUNCTION_FAILED; 3205 3206 if (ql2xasynctmfenable) 3207 return qla24xx_async_abort_command(sp); 3208 3209 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3210 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 3211 if (req->outstanding_cmds[handle] == sp) 3212 break; 3213 } 3214 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3215 if (handle == req->num_outstanding_cmds) { 3216 /* Command not found. */ 3217 return QLA_FUNCTION_FAILED; 3218 } 3219 3220 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 3221 if (abt == NULL) { 3222 ql_log(ql_log_warn, vha, 0x108d, 3223 "Failed to allocate abort IOCB.\n"); 3224 return QLA_MEMORY_ALLOC_FAILED; 3225 } 3226 3227 abt->entry_type = ABORT_IOCB_TYPE; 3228 abt->entry_count = 1; 3229 abt->handle = make_handle(req->id, abt->handle); 3230 abt->nport_handle = cpu_to_le16(fcport->loop_id); 3231 abt->handle_to_abort = make_handle(req->id, handle); 3232 abt->port_id[0] = fcport->d_id.b.al_pa; 3233 abt->port_id[1] = fcport->d_id.b.area; 3234 abt->port_id[2] = fcport->d_id.b.domain; 3235 abt->vp_index = fcport->vha->vp_idx; 3236 3237 abt->req_que_no = cpu_to_le16(req->id); 3238 3239 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 3240 if (rval != QLA_SUCCESS) { 3241 ql_dbg(ql_dbg_mbx, vha, 0x108e, 3242 "Failed to issue IOCB (%x).\n", rval); 3243 } else if (abt->entry_status != 0) { 3244 ql_dbg(ql_dbg_mbx, vha, 0x108f, 3245 "Failed to complete IOCB -- error status (%x).\n", 3246 abt->entry_status); 3247 rval = QLA_FUNCTION_FAILED; 3248 } else if (abt->nport_handle != cpu_to_le16(0)) { 3249 ql_dbg(ql_dbg_mbx, vha, 0x1090, 3250 "Failed to complete IOCB -- completion status (%x).\n", 3251 le16_to_cpu(abt->nport_handle)); 3252 if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR)) 3253 rval = QLA_FUNCTION_PARAMETER_ERROR; 3254 else 3255 rval = QLA_FUNCTION_FAILED; 3256 } else { 3257 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, 3258 "Done %s.\n", __func__); 3259 } 3260 3261 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 3262 3263 return rval; 3264 } 3265 3266 struct tsk_mgmt_cmd { 3267 union { 3268 struct tsk_mgmt_entry tsk; 3269 struct sts_entry_24xx sts; 3270 } p; 3271 }; 3272 3273 static int 3274 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 3275 uint64_t l, int tag) 3276 { 3277 int rval, rval2; 3278 struct tsk_mgmt_cmd *tsk; 3279 struct sts_entry_24xx *sts; 3280 dma_addr_t tsk_dma; 3281 scsi_qla_host_t *vha; 3282 struct qla_hw_data *ha; 3283 struct req_que *req; 3284 struct qla_qpair *qpair; 3285 3286 vha = fcport->vha; 3287 ha = vha->hw; 3288 req = vha->req; 3289 3290 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, 3291 "Entered %s.\n", __func__); 3292 3293 if (vha->vp_idx && vha->qpair) { 3294 /* NPIV port */ 3295 qpair = vha->qpair; 3296 req = qpair->req; 3297 } 3298 3299 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 3300 if (tsk == NULL) { 3301 ql_log(ql_log_warn, vha, 0x1093, 3302 "Failed to allocate task management IOCB.\n"); 3303 return QLA_MEMORY_ALLOC_FAILED; 3304 } 3305 3306 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 3307 tsk->p.tsk.entry_count = 1; 3308 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle); 3309 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 3310 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 3311 tsk->p.tsk.control_flags = cpu_to_le32(type); 3312 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 3313 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 3314 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 3315 tsk->p.tsk.vp_index = fcport->vha->vp_idx; 3316 if (type == TCF_LUN_RESET) { 3317 int_to_scsilun(l, &tsk->p.tsk.lun); 3318 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, 3319 sizeof(tsk->p.tsk.lun)); 3320 } 3321 3322 sts = &tsk->p.sts; 3323 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 3324 if (rval != QLA_SUCCESS) { 3325 ql_dbg(ql_dbg_mbx, vha, 0x1094, 3326 "Failed to issue %s reset IOCB (%x).\n", name, rval); 3327 } else if (sts->entry_status != 0) { 3328 ql_dbg(ql_dbg_mbx, vha, 0x1095, 3329 "Failed to complete IOCB -- error status (%x).\n", 3330 sts->entry_status); 3331 rval = QLA_FUNCTION_FAILED; 3332 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 3333 ql_dbg(ql_dbg_mbx, vha, 0x1096, 3334 "Failed to complete IOCB -- completion status (%x).\n", 3335 le16_to_cpu(sts->comp_status)); 3336 rval = QLA_FUNCTION_FAILED; 3337 } else if (le16_to_cpu(sts->scsi_status) & 3338 SS_RESPONSE_INFO_LEN_VALID) { 3339 if (le32_to_cpu(sts->rsp_data_len) < 4) { 3340 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, 3341 "Ignoring inconsistent data length -- not enough " 3342 "response info (%d).\n", 3343 le32_to_cpu(sts->rsp_data_len)); 3344 } else if (sts->data[3]) { 3345 ql_dbg(ql_dbg_mbx, vha, 0x1098, 3346 "Failed to complete IOCB -- response (%x).\n", 3347 sts->data[3]); 3348 rval = QLA_FUNCTION_FAILED; 3349 } 3350 } 3351 3352 /* Issue marker IOCB. */ 3353 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l, 3354 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 3355 if (rval2 != QLA_SUCCESS) { 3356 ql_dbg(ql_dbg_mbx, vha, 0x1099, 3357 "Failed to issue marker IOCB (%x).\n", rval2); 3358 } else { 3359 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, 3360 "Done %s.\n", __func__); 3361 } 3362 3363 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 3364 3365 return rval; 3366 } 3367 3368 int 3369 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) 3370 { 3371 struct qla_hw_data *ha = fcport->vha->hw; 3372 3373 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3374 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 3375 3376 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); 3377 } 3378 3379 int 3380 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 3381 { 3382 struct qla_hw_data *ha = fcport->vha->hw; 3383 3384 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3385 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 3386 3387 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); 3388 } 3389 3390 int 3391 qla2x00_system_error(scsi_qla_host_t *vha) 3392 { 3393 int rval; 3394 mbx_cmd_t mc; 3395 mbx_cmd_t *mcp = &mc; 3396 struct qla_hw_data *ha = vha->hw; 3397 3398 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 3399 return QLA_FUNCTION_FAILED; 3400 3401 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, 3402 "Entered %s.\n", __func__); 3403 3404 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 3405 mcp->out_mb = MBX_0; 3406 mcp->in_mb = MBX_0; 3407 mcp->tov = 5; 3408 mcp->flags = 0; 3409 rval = qla2x00_mailbox_command(vha, mcp); 3410 3411 if (rval != QLA_SUCCESS) { 3412 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); 3413 } else { 3414 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, 3415 "Done %s.\n", __func__); 3416 } 3417 3418 return rval; 3419 } 3420 3421 int 3422 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) 3423 { 3424 int rval; 3425 mbx_cmd_t mc; 3426 mbx_cmd_t *mcp = &mc; 3427 3428 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3429 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3430 return QLA_FUNCTION_FAILED; 3431 3432 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, 3433 "Entered %s.\n", __func__); 3434 3435 mcp->mb[0] = MBC_WRITE_SERDES; 3436 mcp->mb[1] = addr; 3437 if (IS_QLA2031(vha->hw)) 3438 mcp->mb[2] = data & 0xff; 3439 else 3440 mcp->mb[2] = data; 3441 3442 mcp->mb[3] = 0; 3443 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 3444 mcp->in_mb = MBX_0; 3445 mcp->tov = MBX_TOV_SECONDS; 3446 mcp->flags = 0; 3447 rval = qla2x00_mailbox_command(vha, mcp); 3448 3449 if (rval != QLA_SUCCESS) { 3450 ql_dbg(ql_dbg_mbx, vha, 0x1183, 3451 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3452 } else { 3453 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, 3454 "Done %s.\n", __func__); 3455 } 3456 3457 return rval; 3458 } 3459 3460 int 3461 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) 3462 { 3463 int rval; 3464 mbx_cmd_t mc; 3465 mbx_cmd_t *mcp = &mc; 3466 3467 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3468 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3469 return QLA_FUNCTION_FAILED; 3470 3471 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, 3472 "Entered %s.\n", __func__); 3473 3474 mcp->mb[0] = MBC_READ_SERDES; 3475 mcp->mb[1] = addr; 3476 mcp->mb[3] = 0; 3477 mcp->out_mb = MBX_3|MBX_1|MBX_0; 3478 mcp->in_mb = MBX_1|MBX_0; 3479 mcp->tov = MBX_TOV_SECONDS; 3480 mcp->flags = 0; 3481 rval = qla2x00_mailbox_command(vha, mcp); 3482 3483 if (IS_QLA2031(vha->hw)) 3484 *data = mcp->mb[1] & 0xff; 3485 else 3486 *data = mcp->mb[1]; 3487 3488 if (rval != QLA_SUCCESS) { 3489 ql_dbg(ql_dbg_mbx, vha, 0x1186, 3490 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3491 } else { 3492 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, 3493 "Done %s.\n", __func__); 3494 } 3495 3496 return rval; 3497 } 3498 3499 int 3500 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) 3501 { 3502 int rval; 3503 mbx_cmd_t mc; 3504 mbx_cmd_t *mcp = &mc; 3505 3506 if (!IS_QLA8044(vha->hw)) 3507 return QLA_FUNCTION_FAILED; 3508 3509 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0, 3510 "Entered %s.\n", __func__); 3511 3512 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3513 mcp->mb[1] = HCS_WRITE_SERDES; 3514 mcp->mb[3] = LSW(addr); 3515 mcp->mb[4] = MSW(addr); 3516 mcp->mb[5] = LSW(data); 3517 mcp->mb[6] = MSW(data); 3518 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 3519 mcp->in_mb = MBX_0; 3520 mcp->tov = MBX_TOV_SECONDS; 3521 mcp->flags = 0; 3522 rval = qla2x00_mailbox_command(vha, mcp); 3523 3524 if (rval != QLA_SUCCESS) { 3525 ql_dbg(ql_dbg_mbx, vha, 0x11a1, 3526 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3527 } else { 3528 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, 3529 "Done %s.\n", __func__); 3530 } 3531 3532 return rval; 3533 } 3534 3535 int 3536 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) 3537 { 3538 int rval; 3539 mbx_cmd_t mc; 3540 mbx_cmd_t *mcp = &mc; 3541 3542 if (!IS_QLA8044(vha->hw)) 3543 return QLA_FUNCTION_FAILED; 3544 3545 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, 3546 "Entered %s.\n", __func__); 3547 3548 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3549 mcp->mb[1] = HCS_READ_SERDES; 3550 mcp->mb[3] = LSW(addr); 3551 mcp->mb[4] = MSW(addr); 3552 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; 3553 mcp->in_mb = MBX_2|MBX_1|MBX_0; 3554 mcp->tov = MBX_TOV_SECONDS; 3555 mcp->flags = 0; 3556 rval = qla2x00_mailbox_command(vha, mcp); 3557 3558 *data = mcp->mb[2] << 16 | mcp->mb[1]; 3559 3560 if (rval != QLA_SUCCESS) { 3561 ql_dbg(ql_dbg_mbx, vha, 0x118a, 3562 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3563 } else { 3564 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, 3565 "Done %s.\n", __func__); 3566 } 3567 3568 return rval; 3569 } 3570 3571 /** 3572 * qla2x00_set_serdes_params() - 3573 * @vha: HA context 3574 * @sw_em_1g: serial link options 3575 * @sw_em_2g: serial link options 3576 * @sw_em_4g: serial link options 3577 * 3578 * Returns 3579 */ 3580 int 3581 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, 3582 uint16_t sw_em_2g, uint16_t sw_em_4g) 3583 { 3584 int rval; 3585 mbx_cmd_t mc; 3586 mbx_cmd_t *mcp = &mc; 3587 3588 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, 3589 "Entered %s.\n", __func__); 3590 3591 mcp->mb[0] = MBC_SERDES_PARAMS; 3592 mcp->mb[1] = BIT_0; 3593 mcp->mb[2] = sw_em_1g | BIT_15; 3594 mcp->mb[3] = sw_em_2g | BIT_15; 3595 mcp->mb[4] = sw_em_4g | BIT_15; 3596 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3597 mcp->in_mb = MBX_0; 3598 mcp->tov = MBX_TOV_SECONDS; 3599 mcp->flags = 0; 3600 rval = qla2x00_mailbox_command(vha, mcp); 3601 3602 if (rval != QLA_SUCCESS) { 3603 /*EMPTY*/ 3604 ql_dbg(ql_dbg_mbx, vha, 0x109f, 3605 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3606 } else { 3607 /*EMPTY*/ 3608 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, 3609 "Done %s.\n", __func__); 3610 } 3611 3612 return rval; 3613 } 3614 3615 int 3616 qla2x00_stop_firmware(scsi_qla_host_t *vha) 3617 { 3618 int rval; 3619 mbx_cmd_t mc; 3620 mbx_cmd_t *mcp = &mc; 3621 3622 if (!IS_FWI2_CAPABLE(vha->hw)) 3623 return QLA_FUNCTION_FAILED; 3624 3625 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, 3626 "Entered %s.\n", __func__); 3627 3628 mcp->mb[0] = MBC_STOP_FIRMWARE; 3629 mcp->mb[1] = 0; 3630 mcp->out_mb = MBX_1|MBX_0; 3631 mcp->in_mb = MBX_0; 3632 mcp->tov = 5; 3633 mcp->flags = 0; 3634 rval = qla2x00_mailbox_command(vha, mcp); 3635 3636 if (rval != QLA_SUCCESS) { 3637 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); 3638 if (mcp->mb[0] == MBS_INVALID_COMMAND) 3639 rval = QLA_INVALID_COMMAND; 3640 } else { 3641 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, 3642 "Done %s.\n", __func__); 3643 } 3644 3645 return rval; 3646 } 3647 3648 int 3649 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, 3650 uint16_t buffers) 3651 { 3652 int rval; 3653 mbx_cmd_t mc; 3654 mbx_cmd_t *mcp = &mc; 3655 3656 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, 3657 "Entered %s.\n", __func__); 3658 3659 if (!IS_FWI2_CAPABLE(vha->hw)) 3660 return QLA_FUNCTION_FAILED; 3661 3662 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3663 return QLA_FUNCTION_FAILED; 3664 3665 mcp->mb[0] = MBC_TRACE_CONTROL; 3666 mcp->mb[1] = TC_EFT_ENABLE; 3667 mcp->mb[2] = LSW(eft_dma); 3668 mcp->mb[3] = MSW(eft_dma); 3669 mcp->mb[4] = LSW(MSD(eft_dma)); 3670 mcp->mb[5] = MSW(MSD(eft_dma)); 3671 mcp->mb[6] = buffers; 3672 mcp->mb[7] = TC_AEN_DISABLE; 3673 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3674 mcp->in_mb = MBX_1|MBX_0; 3675 mcp->tov = MBX_TOV_SECONDS; 3676 mcp->flags = 0; 3677 rval = qla2x00_mailbox_command(vha, mcp); 3678 if (rval != QLA_SUCCESS) { 3679 ql_dbg(ql_dbg_mbx, vha, 0x10a5, 3680 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3681 rval, mcp->mb[0], mcp->mb[1]); 3682 } else { 3683 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, 3684 "Done %s.\n", __func__); 3685 } 3686 3687 return rval; 3688 } 3689 3690 int 3691 qla2x00_disable_eft_trace(scsi_qla_host_t *vha) 3692 { 3693 int rval; 3694 mbx_cmd_t mc; 3695 mbx_cmd_t *mcp = &mc; 3696 3697 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, 3698 "Entered %s.\n", __func__); 3699 3700 if (!IS_FWI2_CAPABLE(vha->hw)) 3701 return QLA_FUNCTION_FAILED; 3702 3703 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3704 return QLA_FUNCTION_FAILED; 3705 3706 mcp->mb[0] = MBC_TRACE_CONTROL; 3707 mcp->mb[1] = TC_EFT_DISABLE; 3708 mcp->out_mb = MBX_1|MBX_0; 3709 mcp->in_mb = MBX_1|MBX_0; 3710 mcp->tov = MBX_TOV_SECONDS; 3711 mcp->flags = 0; 3712 rval = qla2x00_mailbox_command(vha, mcp); 3713 if (rval != QLA_SUCCESS) { 3714 ql_dbg(ql_dbg_mbx, vha, 0x10a8, 3715 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3716 rval, mcp->mb[0], mcp->mb[1]); 3717 } else { 3718 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, 3719 "Done %s.\n", __func__); 3720 } 3721 3722 return rval; 3723 } 3724 3725 int 3726 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, 3727 uint16_t buffers, uint16_t *mb, uint32_t *dwords) 3728 { 3729 int rval; 3730 mbx_cmd_t mc; 3731 mbx_cmd_t *mcp = &mc; 3732 3733 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, 3734 "Entered %s.\n", __func__); 3735 3736 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 3737 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 3738 !IS_QLA28XX(vha->hw)) 3739 return QLA_FUNCTION_FAILED; 3740 3741 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3742 return QLA_FUNCTION_FAILED; 3743 3744 mcp->mb[0] = MBC_TRACE_CONTROL; 3745 mcp->mb[1] = TC_FCE_ENABLE; 3746 mcp->mb[2] = LSW(fce_dma); 3747 mcp->mb[3] = MSW(fce_dma); 3748 mcp->mb[4] = LSW(MSD(fce_dma)); 3749 mcp->mb[5] = MSW(MSD(fce_dma)); 3750 mcp->mb[6] = buffers; 3751 mcp->mb[7] = TC_AEN_DISABLE; 3752 mcp->mb[8] = 0; 3753 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; 3754 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; 3755 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3756 MBX_1|MBX_0; 3757 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3758 mcp->tov = MBX_TOV_SECONDS; 3759 mcp->flags = 0; 3760 rval = qla2x00_mailbox_command(vha, mcp); 3761 if (rval != QLA_SUCCESS) { 3762 ql_dbg(ql_dbg_mbx, vha, 0x10ab, 3763 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3764 rval, mcp->mb[0], mcp->mb[1]); 3765 } else { 3766 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, 3767 "Done %s.\n", __func__); 3768 3769 if (mb) 3770 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 3771 if (dwords) 3772 *dwords = buffers; 3773 } 3774 3775 return rval; 3776 } 3777 3778 int 3779 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) 3780 { 3781 int rval; 3782 mbx_cmd_t mc; 3783 mbx_cmd_t *mcp = &mc; 3784 3785 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, 3786 "Entered %s.\n", __func__); 3787 3788 if (!IS_FWI2_CAPABLE(vha->hw)) 3789 return QLA_FUNCTION_FAILED; 3790 3791 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3792 return QLA_FUNCTION_FAILED; 3793 3794 mcp->mb[0] = MBC_TRACE_CONTROL; 3795 mcp->mb[1] = TC_FCE_DISABLE; 3796 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 3797 mcp->out_mb = MBX_2|MBX_1|MBX_0; 3798 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3799 MBX_1|MBX_0; 3800 mcp->tov = MBX_TOV_SECONDS; 3801 mcp->flags = 0; 3802 rval = qla2x00_mailbox_command(vha, mcp); 3803 if (rval != QLA_SUCCESS) { 3804 ql_dbg(ql_dbg_mbx, vha, 0x10ae, 3805 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3806 rval, mcp->mb[0], mcp->mb[1]); 3807 } else { 3808 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, 3809 "Done %s.\n", __func__); 3810 3811 if (wr) 3812 *wr = (uint64_t) mcp->mb[5] << 48 | 3813 (uint64_t) mcp->mb[4] << 32 | 3814 (uint64_t) mcp->mb[3] << 16 | 3815 (uint64_t) mcp->mb[2]; 3816 if (rd) 3817 *rd = (uint64_t) mcp->mb[9] << 48 | 3818 (uint64_t) mcp->mb[8] << 32 | 3819 (uint64_t) mcp->mb[7] << 16 | 3820 (uint64_t) mcp->mb[6]; 3821 } 3822 3823 return rval; 3824 } 3825 3826 int 3827 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3828 uint16_t *port_speed, uint16_t *mb) 3829 { 3830 int rval; 3831 mbx_cmd_t mc; 3832 mbx_cmd_t *mcp = &mc; 3833 3834 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, 3835 "Entered %s.\n", __func__); 3836 3837 if (!IS_IIDMA_CAPABLE(vha->hw)) 3838 return QLA_FUNCTION_FAILED; 3839 3840 mcp->mb[0] = MBC_PORT_PARAMS; 3841 mcp->mb[1] = loop_id; 3842 mcp->mb[2] = mcp->mb[3] = 0; 3843 mcp->mb[9] = vha->vp_idx; 3844 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3845 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3846 mcp->tov = MBX_TOV_SECONDS; 3847 mcp->flags = 0; 3848 rval = qla2x00_mailbox_command(vha, mcp); 3849 3850 /* Return mailbox statuses. */ 3851 if (mb) { 3852 mb[0] = mcp->mb[0]; 3853 mb[1] = mcp->mb[1]; 3854 mb[3] = mcp->mb[3]; 3855 } 3856 3857 if (rval != QLA_SUCCESS) { 3858 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); 3859 } else { 3860 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, 3861 "Done %s.\n", __func__); 3862 if (port_speed) 3863 *port_speed = mcp->mb[3]; 3864 } 3865 3866 return rval; 3867 } 3868 3869 int 3870 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3871 uint16_t port_speed, uint16_t *mb) 3872 { 3873 int rval; 3874 mbx_cmd_t mc; 3875 mbx_cmd_t *mcp = &mc; 3876 3877 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, 3878 "Entered %s.\n", __func__); 3879 3880 if (!IS_IIDMA_CAPABLE(vha->hw)) 3881 return QLA_FUNCTION_FAILED; 3882 3883 mcp->mb[0] = MBC_PORT_PARAMS; 3884 mcp->mb[1] = loop_id; 3885 mcp->mb[2] = BIT_0; 3886 mcp->mb[3] = port_speed & 0x3F; 3887 mcp->mb[9] = vha->vp_idx; 3888 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3889 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3890 mcp->tov = MBX_TOV_SECONDS; 3891 mcp->flags = 0; 3892 rval = qla2x00_mailbox_command(vha, mcp); 3893 3894 /* Return mailbox statuses. */ 3895 if (mb) { 3896 mb[0] = mcp->mb[0]; 3897 mb[1] = mcp->mb[1]; 3898 mb[3] = mcp->mb[3]; 3899 } 3900 3901 if (rval != QLA_SUCCESS) { 3902 ql_dbg(ql_dbg_mbx, vha, 0x10b4, 3903 "Failed=%x.\n", rval); 3904 } else { 3905 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, 3906 "Done %s.\n", __func__); 3907 } 3908 3909 return rval; 3910 } 3911 3912 void 3913 qla24xx_report_id_acquisition(scsi_qla_host_t *vha, 3914 struct vp_rpt_id_entry_24xx *rptid_entry) 3915 { 3916 struct qla_hw_data *ha = vha->hw; 3917 scsi_qla_host_t *vp = NULL; 3918 unsigned long flags; 3919 int found; 3920 port_id_t id; 3921 struct fc_port *fcport; 3922 3923 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3924 "Entered %s.\n", __func__); 3925 3926 if (rptid_entry->entry_status != 0) 3927 return; 3928 3929 id.b.domain = rptid_entry->port_id[2]; 3930 id.b.area = rptid_entry->port_id[1]; 3931 id.b.al_pa = rptid_entry->port_id[0]; 3932 id.b.rsvd_1 = 0; 3933 ha->flags.n2n_ae = 0; 3934 3935 if (rptid_entry->format == 0) { 3936 /* loop */ 3937 ql_dbg(ql_dbg_async, vha, 0x10b7, 3938 "Format 0 : Number of VPs setup %d, number of " 3939 "VPs acquired %d.\n", rptid_entry->vp_setup, 3940 rptid_entry->vp_acquired); 3941 ql_dbg(ql_dbg_async, vha, 0x10b8, 3942 "Primary port id %02x%02x%02x.\n", 3943 rptid_entry->port_id[2], rptid_entry->port_id[1], 3944 rptid_entry->port_id[0]); 3945 ha->current_topology = ISP_CFG_NL; 3946 qlt_update_host_map(vha, id); 3947 3948 } else if (rptid_entry->format == 1) { 3949 /* fabric */ 3950 ql_dbg(ql_dbg_async, vha, 0x10b9, 3951 "Format 1: VP[%d] enabled - status %d - with " 3952 "port id %02x%02x%02x.\n", rptid_entry->vp_idx, 3953 rptid_entry->vp_status, 3954 rptid_entry->port_id[2], rptid_entry->port_id[1], 3955 rptid_entry->port_id[0]); 3956 ql_dbg(ql_dbg_async, vha, 0x5075, 3957 "Format 1: Remote WWPN %8phC.\n", 3958 rptid_entry->u.f1.port_name); 3959 3960 ql_dbg(ql_dbg_async, vha, 0x5075, 3961 "Format 1: WWPN %8phC.\n", 3962 vha->port_name); 3963 3964 switch (rptid_entry->u.f1.flags & TOPO_MASK) { 3965 case TOPO_N2N: 3966 ha->current_topology = ISP_CFG_N; 3967 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 3968 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3969 fcport->scan_state = QLA_FCPORT_SCAN; 3970 fcport->n2n_flag = 0; 3971 } 3972 id.b24 = 0; 3973 if (wwn_to_u64(vha->port_name) > 3974 wwn_to_u64(rptid_entry->u.f1.port_name)) { 3975 vha->d_id.b24 = 0; 3976 vha->d_id.b.al_pa = 1; 3977 ha->flags.n2n_bigger = 1; 3978 3979 id.b.al_pa = 2; 3980 ql_dbg(ql_dbg_async, vha, 0x5075, 3981 "Format 1: assign local id %x remote id %x\n", 3982 vha->d_id.b24, id.b24); 3983 } else { 3984 ql_dbg(ql_dbg_async, vha, 0x5075, 3985 "Format 1: Remote login - Waiting for WWPN %8phC.\n", 3986 rptid_entry->u.f1.port_name); 3987 ha->flags.n2n_bigger = 0; 3988 } 3989 3990 fcport = qla2x00_find_fcport_by_wwpn(vha, 3991 rptid_entry->u.f1.port_name, 1); 3992 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 3993 3994 3995 if (fcport) { 3996 fcport->plogi_nack_done_deadline = jiffies + HZ; 3997 fcport->dm_login_expire = jiffies + 3998 QLA_N2N_WAIT_TIME * HZ; 3999 fcport->scan_state = QLA_FCPORT_FOUND; 4000 fcport->n2n_flag = 1; 4001 fcport->keep_nport_handle = 1; 4002 fcport->fc4_type = FS_FC4TYPE_FCP; 4003 if (vha->flags.nvme_enabled) 4004 fcport->fc4_type |= FS_FC4TYPE_NVME; 4005 4006 if (wwn_to_u64(vha->port_name) > 4007 wwn_to_u64(fcport->port_name)) { 4008 fcport->d_id = id; 4009 } 4010 4011 switch (fcport->disc_state) { 4012 case DSC_DELETED: 4013 set_bit(RELOGIN_NEEDED, 4014 &vha->dpc_flags); 4015 break; 4016 case DSC_DELETE_PEND: 4017 break; 4018 default: 4019 qlt_schedule_sess_for_deletion(fcport); 4020 break; 4021 } 4022 } else { 4023 qla24xx_post_newsess_work(vha, &id, 4024 rptid_entry->u.f1.port_name, 4025 rptid_entry->u.f1.node_name, 4026 NULL, 4027 FS_FCP_IS_N2N); 4028 } 4029 4030 /* if our portname is higher then initiate N2N login */ 4031 4032 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); 4033 return; 4034 break; 4035 case TOPO_FL: 4036 ha->current_topology = ISP_CFG_FL; 4037 break; 4038 case TOPO_F: 4039 ha->current_topology = ISP_CFG_F; 4040 break; 4041 default: 4042 break; 4043 } 4044 4045 ha->flags.gpsc_supported = 1; 4046 ha->current_topology = ISP_CFG_F; 4047 /* buffer to buffer credit flag */ 4048 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; 4049 4050 if (rptid_entry->vp_idx == 0) { 4051 if (rptid_entry->vp_status == VP_STAT_COMPL) { 4052 /* FA-WWN is only for physical port */ 4053 if (qla_ini_mode_enabled(vha) && 4054 ha->flags.fawwpn_enabled && 4055 (rptid_entry->u.f1.flags & 4056 BIT_6)) { 4057 memcpy(vha->port_name, 4058 rptid_entry->u.f1.port_name, 4059 WWN_SIZE); 4060 } 4061 4062 qlt_update_host_map(vha, id); 4063 } 4064 4065 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 4066 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 4067 } else { 4068 if (rptid_entry->vp_status != VP_STAT_COMPL && 4069 rptid_entry->vp_status != VP_STAT_ID_CHG) { 4070 ql_dbg(ql_dbg_mbx, vha, 0x10ba, 4071 "Could not acquire ID for VP[%d].\n", 4072 rptid_entry->vp_idx); 4073 return; 4074 } 4075 4076 found = 0; 4077 spin_lock_irqsave(&ha->vport_slock, flags); 4078 list_for_each_entry(vp, &ha->vp_list, list) { 4079 if (rptid_entry->vp_idx == vp->vp_idx) { 4080 found = 1; 4081 break; 4082 } 4083 } 4084 spin_unlock_irqrestore(&ha->vport_slock, flags); 4085 4086 if (!found) 4087 return; 4088 4089 qlt_update_host_map(vp, id); 4090 4091 /* 4092 * Cannot configure here as we are still sitting on the 4093 * response queue. Handle it in dpc context. 4094 */ 4095 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); 4096 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); 4097 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); 4098 } 4099 set_bit(VP_DPC_NEEDED, &vha->dpc_flags); 4100 qla2xxx_wake_dpc(vha); 4101 } else if (rptid_entry->format == 2) { 4102 ql_dbg(ql_dbg_async, vha, 0x505f, 4103 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", 4104 rptid_entry->port_id[2], rptid_entry->port_id[1], 4105 rptid_entry->port_id[0]); 4106 4107 ql_dbg(ql_dbg_async, vha, 0x5075, 4108 "N2N: Remote WWPN %8phC.\n", 4109 rptid_entry->u.f2.port_name); 4110 4111 /* N2N. direct connect */ 4112 ha->current_topology = ISP_CFG_N; 4113 ha->flags.rida_fmt2 = 1; 4114 vha->d_id.b.domain = rptid_entry->port_id[2]; 4115 vha->d_id.b.area = rptid_entry->port_id[1]; 4116 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 4117 4118 ha->flags.n2n_ae = 1; 4119 spin_lock_irqsave(&ha->vport_slock, flags); 4120 qlt_update_vp_map(vha, SET_AL_PA); 4121 spin_unlock_irqrestore(&ha->vport_slock, flags); 4122 4123 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4124 fcport->scan_state = QLA_FCPORT_SCAN; 4125 fcport->n2n_flag = 0; 4126 } 4127 4128 fcport = qla2x00_find_fcport_by_wwpn(vha, 4129 rptid_entry->u.f2.port_name, 1); 4130 4131 if (fcport) { 4132 fcport->login_retry = vha->hw->login_retry_count; 4133 fcport->plogi_nack_done_deadline = jiffies + HZ; 4134 fcport->scan_state = QLA_FCPORT_FOUND; 4135 fcport->keep_nport_handle = 1; 4136 fcport->n2n_flag = 1; 4137 fcport->d_id.b.domain = 4138 rptid_entry->u.f2.remote_nport_id[2]; 4139 fcport->d_id.b.area = 4140 rptid_entry->u.f2.remote_nport_id[1]; 4141 fcport->d_id.b.al_pa = 4142 rptid_entry->u.f2.remote_nport_id[0]; 4143 } 4144 } 4145 } 4146 4147 /* 4148 * qla24xx_modify_vp_config 4149 * Change VP configuration for vha 4150 * 4151 * Input: 4152 * vha = adapter block pointer. 4153 * 4154 * Returns: 4155 * qla2xxx local function return status code. 4156 * 4157 * Context: 4158 * Kernel context. 4159 */ 4160 int 4161 qla24xx_modify_vp_config(scsi_qla_host_t *vha) 4162 { 4163 int rval; 4164 struct vp_config_entry_24xx *vpmod; 4165 dma_addr_t vpmod_dma; 4166 struct qla_hw_data *ha = vha->hw; 4167 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4168 4169 /* This can be called by the parent */ 4170 4171 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, 4172 "Entered %s.\n", __func__); 4173 4174 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 4175 if (!vpmod) { 4176 ql_log(ql_log_warn, vha, 0x10bc, 4177 "Failed to allocate modify VP IOCB.\n"); 4178 return QLA_MEMORY_ALLOC_FAILED; 4179 } 4180 4181 vpmod->entry_type = VP_CONFIG_IOCB_TYPE; 4182 vpmod->entry_count = 1; 4183 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; 4184 vpmod->vp_count = 1; 4185 vpmod->vp_index1 = vha->vp_idx; 4186 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; 4187 4188 qlt_modify_vp_config(vha, vpmod); 4189 4190 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); 4191 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 4192 vpmod->entry_count = 1; 4193 4194 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 4195 if (rval != QLA_SUCCESS) { 4196 ql_dbg(ql_dbg_mbx, vha, 0x10bd, 4197 "Failed to issue VP config IOCB (%x).\n", rval); 4198 } else if (vpmod->comp_status != 0) { 4199 ql_dbg(ql_dbg_mbx, vha, 0x10be, 4200 "Failed to complete IOCB -- error status (%x).\n", 4201 vpmod->comp_status); 4202 rval = QLA_FUNCTION_FAILED; 4203 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) { 4204 ql_dbg(ql_dbg_mbx, vha, 0x10bf, 4205 "Failed to complete IOCB -- completion status (%x).\n", 4206 le16_to_cpu(vpmod->comp_status)); 4207 rval = QLA_FUNCTION_FAILED; 4208 } else { 4209 /* EMPTY */ 4210 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, 4211 "Done %s.\n", __func__); 4212 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 4213 } 4214 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 4215 4216 return rval; 4217 } 4218 4219 /* 4220 * qla2x00_send_change_request 4221 * Receive or disable RSCN request from fabric controller 4222 * 4223 * Input: 4224 * ha = adapter block pointer 4225 * format = registration format: 4226 * 0 - Reserved 4227 * 1 - Fabric detected registration 4228 * 2 - N_port detected registration 4229 * 3 - Full registration 4230 * FF - clear registration 4231 * vp_idx = Virtual port index 4232 * 4233 * Returns: 4234 * qla2x00 local function return status code. 4235 * 4236 * Context: 4237 * Kernel Context 4238 */ 4239 4240 int 4241 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, 4242 uint16_t vp_idx) 4243 { 4244 int rval; 4245 mbx_cmd_t mc; 4246 mbx_cmd_t *mcp = &mc; 4247 4248 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, 4249 "Entered %s.\n", __func__); 4250 4251 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; 4252 mcp->mb[1] = format; 4253 mcp->mb[9] = vp_idx; 4254 mcp->out_mb = MBX_9|MBX_1|MBX_0; 4255 mcp->in_mb = MBX_0|MBX_1; 4256 mcp->tov = MBX_TOV_SECONDS; 4257 mcp->flags = 0; 4258 rval = qla2x00_mailbox_command(vha, mcp); 4259 4260 if (rval == QLA_SUCCESS) { 4261 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 4262 rval = BIT_1; 4263 } 4264 } else 4265 rval = BIT_1; 4266 4267 return rval; 4268 } 4269 4270 int 4271 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 4272 uint32_t size) 4273 { 4274 int rval; 4275 mbx_cmd_t mc; 4276 mbx_cmd_t *mcp = &mc; 4277 4278 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, 4279 "Entered %s.\n", __func__); 4280 4281 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 4282 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 4283 mcp->mb[8] = MSW(addr); 4284 mcp->out_mb = MBX_8|MBX_0; 4285 } else { 4286 mcp->mb[0] = MBC_DUMP_RISC_RAM; 4287 mcp->out_mb = MBX_0; 4288 } 4289 mcp->mb[1] = LSW(addr); 4290 mcp->mb[2] = MSW(req_dma); 4291 mcp->mb[3] = LSW(req_dma); 4292 mcp->mb[6] = MSW(MSD(req_dma)); 4293 mcp->mb[7] = LSW(MSD(req_dma)); 4294 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 4295 if (IS_FWI2_CAPABLE(vha->hw)) { 4296 mcp->mb[4] = MSW(size); 4297 mcp->mb[5] = LSW(size); 4298 mcp->out_mb |= MBX_5|MBX_4; 4299 } else { 4300 mcp->mb[4] = LSW(size); 4301 mcp->out_mb |= MBX_4; 4302 } 4303 4304 mcp->in_mb = MBX_0; 4305 mcp->tov = MBX_TOV_SECONDS; 4306 mcp->flags = 0; 4307 rval = qla2x00_mailbox_command(vha, mcp); 4308 4309 if (rval != QLA_SUCCESS) { 4310 ql_dbg(ql_dbg_mbx, vha, 0x1008, 4311 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4312 } else { 4313 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, 4314 "Done %s.\n", __func__); 4315 } 4316 4317 return rval; 4318 } 4319 /* 84XX Support **************************************************************/ 4320 4321 struct cs84xx_mgmt_cmd { 4322 union { 4323 struct verify_chip_entry_84xx req; 4324 struct verify_chip_rsp_84xx rsp; 4325 } p; 4326 }; 4327 4328 int 4329 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) 4330 { 4331 int rval, retry; 4332 struct cs84xx_mgmt_cmd *mn; 4333 dma_addr_t mn_dma; 4334 uint16_t options; 4335 unsigned long flags; 4336 struct qla_hw_data *ha = vha->hw; 4337 4338 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, 4339 "Entered %s.\n", __func__); 4340 4341 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 4342 if (mn == NULL) { 4343 return QLA_MEMORY_ALLOC_FAILED; 4344 } 4345 4346 /* Force Update? */ 4347 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; 4348 /* Diagnostic firmware? */ 4349 /* options |= MENLO_DIAG_FW; */ 4350 /* We update the firmware with only one data sequence. */ 4351 options |= VCO_END_OF_DATA; 4352 4353 do { 4354 retry = 0; 4355 memset(mn, 0, sizeof(*mn)); 4356 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; 4357 mn->p.req.entry_count = 1; 4358 mn->p.req.options = cpu_to_le16(options); 4359 4360 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, 4361 "Dump of Verify Request.\n"); 4362 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, 4363 mn, sizeof(*mn)); 4364 4365 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 4366 if (rval != QLA_SUCCESS) { 4367 ql_dbg(ql_dbg_mbx, vha, 0x10cb, 4368 "Failed to issue verify IOCB (%x).\n", rval); 4369 goto verify_done; 4370 } 4371 4372 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, 4373 "Dump of Verify Response.\n"); 4374 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, 4375 mn, sizeof(*mn)); 4376 4377 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 4378 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 4379 le16_to_cpu(mn->p.rsp.failure_code) : 0; 4380 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, 4381 "cs=%x fc=%x.\n", status[0], status[1]); 4382 4383 if (status[0] != CS_COMPLETE) { 4384 rval = QLA_FUNCTION_FAILED; 4385 if (!(options & VCO_DONT_UPDATE_FW)) { 4386 ql_dbg(ql_dbg_mbx, vha, 0x10cf, 4387 "Firmware update failed. Retrying " 4388 "without update firmware.\n"); 4389 options |= VCO_DONT_UPDATE_FW; 4390 options &= ~VCO_FORCE_UPDATE; 4391 retry = 1; 4392 } 4393 } else { 4394 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, 4395 "Firmware updated to %x.\n", 4396 le32_to_cpu(mn->p.rsp.fw_ver)); 4397 4398 /* NOTE: we only update OP firmware. */ 4399 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 4400 ha->cs84xx->op_fw_version = 4401 le32_to_cpu(mn->p.rsp.fw_ver); 4402 spin_unlock_irqrestore(&ha->cs84xx->access_lock, 4403 flags); 4404 } 4405 } while (retry); 4406 4407 verify_done: 4408 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 4409 4410 if (rval != QLA_SUCCESS) { 4411 ql_dbg(ql_dbg_mbx, vha, 0x10d1, 4412 "Failed=%x.\n", rval); 4413 } else { 4414 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, 4415 "Done %s.\n", __func__); 4416 } 4417 4418 return rval; 4419 } 4420 4421 int 4422 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) 4423 { 4424 int rval; 4425 unsigned long flags; 4426 mbx_cmd_t mc; 4427 mbx_cmd_t *mcp = &mc; 4428 struct qla_hw_data *ha = vha->hw; 4429 4430 if (!ha->flags.fw_started) 4431 return QLA_SUCCESS; 4432 4433 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 4434 "Entered %s.\n", __func__); 4435 4436 if (IS_SHADOW_REG_CAPABLE(ha)) 4437 req->options |= BIT_13; 4438 4439 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4440 mcp->mb[1] = req->options; 4441 mcp->mb[2] = MSW(LSD(req->dma)); 4442 mcp->mb[3] = LSW(LSD(req->dma)); 4443 mcp->mb[6] = MSW(MSD(req->dma)); 4444 mcp->mb[7] = LSW(MSD(req->dma)); 4445 mcp->mb[5] = req->length; 4446 if (req->rsp) 4447 mcp->mb[10] = req->rsp->id; 4448 mcp->mb[12] = req->qos; 4449 mcp->mb[11] = req->vp_idx; 4450 mcp->mb[13] = req->rid; 4451 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4452 mcp->mb[15] = 0; 4453 4454 mcp->mb[4] = req->id; 4455 /* que in ptr index */ 4456 mcp->mb[8] = 0; 4457 /* que out ptr index */ 4458 mcp->mb[9] = *req->out_ptr = 0; 4459 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| 4460 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4461 mcp->in_mb = MBX_0; 4462 mcp->flags = MBX_DMA_OUT; 4463 mcp->tov = MBX_TOV_SECONDS * 2; 4464 4465 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4466 IS_QLA28XX(ha)) 4467 mcp->in_mb |= MBX_1; 4468 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4469 mcp->out_mb |= MBX_15; 4470 /* debug q create issue in SR-IOV */ 4471 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4472 } 4473 4474 spin_lock_irqsave(&ha->hardware_lock, flags); 4475 if (!(req->options & BIT_0)) { 4476 wrt_reg_dword(req->req_q_in, 0); 4477 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4478 wrt_reg_dword(req->req_q_out, 0); 4479 } 4480 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4481 4482 rval = qla2x00_mailbox_command(vha, mcp); 4483 if (rval != QLA_SUCCESS) { 4484 ql_dbg(ql_dbg_mbx, vha, 0x10d4, 4485 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4486 } else { 4487 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, 4488 "Done %s.\n", __func__); 4489 } 4490 4491 return rval; 4492 } 4493 4494 int 4495 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 4496 { 4497 int rval; 4498 unsigned long flags; 4499 mbx_cmd_t mc; 4500 mbx_cmd_t *mcp = &mc; 4501 struct qla_hw_data *ha = vha->hw; 4502 4503 if (!ha->flags.fw_started) 4504 return QLA_SUCCESS; 4505 4506 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 4507 "Entered %s.\n", __func__); 4508 4509 if (IS_SHADOW_REG_CAPABLE(ha)) 4510 rsp->options |= BIT_13; 4511 4512 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4513 mcp->mb[1] = rsp->options; 4514 mcp->mb[2] = MSW(LSD(rsp->dma)); 4515 mcp->mb[3] = LSW(LSD(rsp->dma)); 4516 mcp->mb[6] = MSW(MSD(rsp->dma)); 4517 mcp->mb[7] = LSW(MSD(rsp->dma)); 4518 mcp->mb[5] = rsp->length; 4519 mcp->mb[14] = rsp->msix->entry; 4520 mcp->mb[13] = rsp->rid; 4521 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4522 mcp->mb[15] = 0; 4523 4524 mcp->mb[4] = rsp->id; 4525 /* que in ptr index */ 4526 mcp->mb[8] = *rsp->in_ptr = 0; 4527 /* que out ptr index */ 4528 mcp->mb[9] = 0; 4529 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 4530 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4531 mcp->in_mb = MBX_0; 4532 mcp->flags = MBX_DMA_OUT; 4533 mcp->tov = MBX_TOV_SECONDS * 2; 4534 4535 if (IS_QLA81XX(ha)) { 4536 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 4537 mcp->in_mb |= MBX_1; 4538 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4539 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; 4540 mcp->in_mb |= MBX_1; 4541 /* debug q create issue in SR-IOV */ 4542 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4543 } 4544 4545 spin_lock_irqsave(&ha->hardware_lock, flags); 4546 if (!(rsp->options & BIT_0)) { 4547 wrt_reg_dword(rsp->rsp_q_out, 0); 4548 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4549 wrt_reg_dword(rsp->rsp_q_in, 0); 4550 } 4551 4552 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4553 4554 rval = qla2x00_mailbox_command(vha, mcp); 4555 if (rval != QLA_SUCCESS) { 4556 ql_dbg(ql_dbg_mbx, vha, 0x10d7, 4557 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4558 } else { 4559 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, 4560 "Done %s.\n", __func__); 4561 } 4562 4563 return rval; 4564 } 4565 4566 int 4567 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) 4568 { 4569 int rval; 4570 mbx_cmd_t mc; 4571 mbx_cmd_t *mcp = &mc; 4572 4573 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, 4574 "Entered %s.\n", __func__); 4575 4576 mcp->mb[0] = MBC_IDC_ACK; 4577 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4578 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4579 mcp->in_mb = MBX_0; 4580 mcp->tov = MBX_TOV_SECONDS; 4581 mcp->flags = 0; 4582 rval = qla2x00_mailbox_command(vha, mcp); 4583 4584 if (rval != QLA_SUCCESS) { 4585 ql_dbg(ql_dbg_mbx, vha, 0x10da, 4586 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4587 } else { 4588 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, 4589 "Done %s.\n", __func__); 4590 } 4591 4592 return rval; 4593 } 4594 4595 int 4596 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) 4597 { 4598 int rval; 4599 mbx_cmd_t mc; 4600 mbx_cmd_t *mcp = &mc; 4601 4602 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, 4603 "Entered %s.\n", __func__); 4604 4605 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4606 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4607 return QLA_FUNCTION_FAILED; 4608 4609 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4610 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 4611 mcp->out_mb = MBX_1|MBX_0; 4612 mcp->in_mb = MBX_1|MBX_0; 4613 mcp->tov = MBX_TOV_SECONDS; 4614 mcp->flags = 0; 4615 rval = qla2x00_mailbox_command(vha, mcp); 4616 4617 if (rval != QLA_SUCCESS) { 4618 ql_dbg(ql_dbg_mbx, vha, 0x10dd, 4619 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4620 rval, mcp->mb[0], mcp->mb[1]); 4621 } else { 4622 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, 4623 "Done %s.\n", __func__); 4624 *sector_size = mcp->mb[1]; 4625 } 4626 4627 return rval; 4628 } 4629 4630 int 4631 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) 4632 { 4633 int rval; 4634 mbx_cmd_t mc; 4635 mbx_cmd_t *mcp = &mc; 4636 4637 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4638 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4639 return QLA_FUNCTION_FAILED; 4640 4641 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, 4642 "Entered %s.\n", __func__); 4643 4644 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4645 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 4646 FAC_OPT_CMD_WRITE_PROTECT; 4647 mcp->out_mb = MBX_1|MBX_0; 4648 mcp->in_mb = MBX_1|MBX_0; 4649 mcp->tov = MBX_TOV_SECONDS; 4650 mcp->flags = 0; 4651 rval = qla2x00_mailbox_command(vha, mcp); 4652 4653 if (rval != QLA_SUCCESS) { 4654 ql_dbg(ql_dbg_mbx, vha, 0x10e0, 4655 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4656 rval, mcp->mb[0], mcp->mb[1]); 4657 } else { 4658 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, 4659 "Done %s.\n", __func__); 4660 } 4661 4662 return rval; 4663 } 4664 4665 int 4666 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) 4667 { 4668 int rval; 4669 mbx_cmd_t mc; 4670 mbx_cmd_t *mcp = &mc; 4671 4672 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4673 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4674 return QLA_FUNCTION_FAILED; 4675 4676 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4677 "Entered %s.\n", __func__); 4678 4679 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4680 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 4681 mcp->mb[2] = LSW(start); 4682 mcp->mb[3] = MSW(start); 4683 mcp->mb[4] = LSW(finish); 4684 mcp->mb[5] = MSW(finish); 4685 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4686 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4687 mcp->tov = MBX_TOV_SECONDS; 4688 mcp->flags = 0; 4689 rval = qla2x00_mailbox_command(vha, mcp); 4690 4691 if (rval != QLA_SUCCESS) { 4692 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4693 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4694 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4695 } else { 4696 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4697 "Done %s.\n", __func__); 4698 } 4699 4700 return rval; 4701 } 4702 4703 int 4704 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock) 4705 { 4706 int rval = QLA_SUCCESS; 4707 mbx_cmd_t mc; 4708 mbx_cmd_t *mcp = &mc; 4709 struct qla_hw_data *ha = vha->hw; 4710 4711 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 4712 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4713 return rval; 4714 4715 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4716 "Entered %s.\n", __func__); 4717 4718 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4719 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE : 4720 FAC_OPT_CMD_UNLOCK_SEMAPHORE); 4721 mcp->out_mb = MBX_1|MBX_0; 4722 mcp->in_mb = MBX_1|MBX_0; 4723 mcp->tov = MBX_TOV_SECONDS; 4724 mcp->flags = 0; 4725 rval = qla2x00_mailbox_command(vha, mcp); 4726 4727 if (rval != QLA_SUCCESS) { 4728 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4729 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4730 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4731 } else { 4732 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4733 "Done %s.\n", __func__); 4734 } 4735 4736 return rval; 4737 } 4738 4739 int 4740 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) 4741 { 4742 int rval = 0; 4743 mbx_cmd_t mc; 4744 mbx_cmd_t *mcp = &mc; 4745 4746 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, 4747 "Entered %s.\n", __func__); 4748 4749 mcp->mb[0] = MBC_RESTART_MPI_FW; 4750 mcp->out_mb = MBX_0; 4751 mcp->in_mb = MBX_0|MBX_1; 4752 mcp->tov = MBX_TOV_SECONDS; 4753 mcp->flags = 0; 4754 rval = qla2x00_mailbox_command(vha, mcp); 4755 4756 if (rval != QLA_SUCCESS) { 4757 ql_dbg(ql_dbg_mbx, vha, 0x10e6, 4758 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4759 rval, mcp->mb[0], mcp->mb[1]); 4760 } else { 4761 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, 4762 "Done %s.\n", __func__); 4763 } 4764 4765 return rval; 4766 } 4767 4768 int 4769 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4770 { 4771 int rval; 4772 mbx_cmd_t mc; 4773 mbx_cmd_t *mcp = &mc; 4774 int i; 4775 int len; 4776 __le16 *str; 4777 struct qla_hw_data *ha = vha->hw; 4778 4779 if (!IS_P3P_TYPE(ha)) 4780 return QLA_FUNCTION_FAILED; 4781 4782 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, 4783 "Entered %s.\n", __func__); 4784 4785 str = (__force __le16 *)version; 4786 len = strlen(version); 4787 4788 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4789 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; 4790 mcp->out_mb = MBX_1|MBX_0; 4791 for (i = 4; i < 16 && len; i++, str++, len -= 2) { 4792 mcp->mb[i] = le16_to_cpup(str); 4793 mcp->out_mb |= 1<<i; 4794 } 4795 for (; i < 16; i++) { 4796 mcp->mb[i] = 0; 4797 mcp->out_mb |= 1<<i; 4798 } 4799 mcp->in_mb = MBX_1|MBX_0; 4800 mcp->tov = MBX_TOV_SECONDS; 4801 mcp->flags = 0; 4802 rval = qla2x00_mailbox_command(vha, mcp); 4803 4804 if (rval != QLA_SUCCESS) { 4805 ql_dbg(ql_dbg_mbx, vha, 0x117c, 4806 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4807 } else { 4808 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, 4809 "Done %s.\n", __func__); 4810 } 4811 4812 return rval; 4813 } 4814 4815 int 4816 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4817 { 4818 int rval; 4819 mbx_cmd_t mc; 4820 mbx_cmd_t *mcp = &mc; 4821 int len; 4822 uint16_t dwlen; 4823 uint8_t *str; 4824 dma_addr_t str_dma; 4825 struct qla_hw_data *ha = vha->hw; 4826 4827 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || 4828 IS_P3P_TYPE(ha)) 4829 return QLA_FUNCTION_FAILED; 4830 4831 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, 4832 "Entered %s.\n", __func__); 4833 4834 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); 4835 if (!str) { 4836 ql_log(ql_log_warn, vha, 0x117f, 4837 "Failed to allocate driver version param.\n"); 4838 return QLA_MEMORY_ALLOC_FAILED; 4839 } 4840 4841 memcpy(str, "\x7\x3\x11\x0", 4); 4842 dwlen = str[0]; 4843 len = dwlen * 4 - 4; 4844 memset(str + 4, 0, len); 4845 if (len > strlen(version)) 4846 len = strlen(version); 4847 memcpy(str + 4, version, len); 4848 4849 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4850 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; 4851 mcp->mb[2] = MSW(LSD(str_dma)); 4852 mcp->mb[3] = LSW(LSD(str_dma)); 4853 mcp->mb[6] = MSW(MSD(str_dma)); 4854 mcp->mb[7] = LSW(MSD(str_dma)); 4855 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4856 mcp->in_mb = MBX_1|MBX_0; 4857 mcp->tov = MBX_TOV_SECONDS; 4858 mcp->flags = 0; 4859 rval = qla2x00_mailbox_command(vha, mcp); 4860 4861 if (rval != QLA_SUCCESS) { 4862 ql_dbg(ql_dbg_mbx, vha, 0x1180, 4863 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4864 } else { 4865 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, 4866 "Done %s.\n", __func__); 4867 } 4868 4869 dma_pool_free(ha->s_dma_pool, str, str_dma); 4870 4871 return rval; 4872 } 4873 4874 int 4875 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, 4876 void *buf, uint16_t bufsiz) 4877 { 4878 int rval, i; 4879 mbx_cmd_t mc; 4880 mbx_cmd_t *mcp = &mc; 4881 uint32_t *bp; 4882 4883 if (!IS_FWI2_CAPABLE(vha->hw)) 4884 return QLA_FUNCTION_FAILED; 4885 4886 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4887 "Entered %s.\n", __func__); 4888 4889 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4890 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8; 4891 mcp->mb[2] = MSW(buf_dma); 4892 mcp->mb[3] = LSW(buf_dma); 4893 mcp->mb[6] = MSW(MSD(buf_dma)); 4894 mcp->mb[7] = LSW(MSD(buf_dma)); 4895 mcp->mb[8] = bufsiz/4; 4896 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4897 mcp->in_mb = MBX_1|MBX_0; 4898 mcp->tov = MBX_TOV_SECONDS; 4899 mcp->flags = 0; 4900 rval = qla2x00_mailbox_command(vha, mcp); 4901 4902 if (rval != QLA_SUCCESS) { 4903 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4904 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4905 } else { 4906 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4907 "Done %s.\n", __func__); 4908 bp = (uint32_t *) buf; 4909 for (i = 0; i < (bufsiz-4)/4; i++, bp++) 4910 *bp = le32_to_cpu((__force __le32)*bp); 4911 } 4912 4913 return rval; 4914 } 4915 4916 #define PUREX_CMD_COUNT 2 4917 int 4918 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) 4919 { 4920 int rval; 4921 mbx_cmd_t mc; 4922 mbx_cmd_t *mcp = &mc; 4923 uint8_t *els_cmd_map; 4924 dma_addr_t els_cmd_map_dma; 4925 uint8_t cmd_opcode[PUREX_CMD_COUNT]; 4926 uint8_t i, index, purex_bit; 4927 struct qla_hw_data *ha = vha->hw; 4928 4929 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && 4930 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4931 return QLA_SUCCESS; 4932 4933 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197, 4934 "Entered %s.\n", __func__); 4935 4936 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, 4937 &els_cmd_map_dma, GFP_KERNEL); 4938 if (!els_cmd_map) { 4939 ql_log(ql_log_warn, vha, 0x7101, 4940 "Failed to allocate RDP els command param.\n"); 4941 return QLA_MEMORY_ALLOC_FAILED; 4942 } 4943 4944 /* List of Purex ELS */ 4945 cmd_opcode[0] = ELS_FPIN; 4946 cmd_opcode[1] = ELS_RDP; 4947 4948 for (i = 0; i < PUREX_CMD_COUNT; i++) { 4949 index = cmd_opcode[i] / 8; 4950 purex_bit = cmd_opcode[i] % 8; 4951 els_cmd_map[index] |= 1 << purex_bit; 4952 } 4953 4954 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4955 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8; 4956 mcp->mb[2] = MSW(LSD(els_cmd_map_dma)); 4957 mcp->mb[3] = LSW(LSD(els_cmd_map_dma)); 4958 mcp->mb[6] = MSW(MSD(els_cmd_map_dma)); 4959 mcp->mb[7] = LSW(MSD(els_cmd_map_dma)); 4960 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4961 mcp->in_mb = MBX_1|MBX_0; 4962 mcp->tov = MBX_TOV_SECONDS; 4963 mcp->flags = MBX_DMA_OUT; 4964 mcp->buf_size = ELS_CMD_MAP_SIZE; 4965 rval = qla2x00_mailbox_command(vha, mcp); 4966 4967 if (rval != QLA_SUCCESS) { 4968 ql_dbg(ql_dbg_mbx, vha, 0x118d, 4969 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]); 4970 } else { 4971 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 4972 "Done %s.\n", __func__); 4973 } 4974 4975 dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, 4976 els_cmd_map, els_cmd_map_dma); 4977 4978 return rval; 4979 } 4980 4981 static int 4982 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 4983 { 4984 int rval; 4985 mbx_cmd_t mc; 4986 mbx_cmd_t *mcp = &mc; 4987 4988 if (!IS_FWI2_CAPABLE(vha->hw)) 4989 return QLA_FUNCTION_FAILED; 4990 4991 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4992 "Entered %s.\n", __func__); 4993 4994 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4995 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; 4996 mcp->out_mb = MBX_1|MBX_0; 4997 mcp->in_mb = MBX_1|MBX_0; 4998 mcp->tov = MBX_TOV_SECONDS; 4999 mcp->flags = 0; 5000 rval = qla2x00_mailbox_command(vha, mcp); 5001 *temp = mcp->mb[1]; 5002 5003 if (rval != QLA_SUCCESS) { 5004 ql_dbg(ql_dbg_mbx, vha, 0x115a, 5005 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 5006 } else { 5007 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 5008 "Done %s.\n", __func__); 5009 } 5010 5011 return rval; 5012 } 5013 5014 int 5015 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 5016 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 5017 { 5018 int rval; 5019 mbx_cmd_t mc; 5020 mbx_cmd_t *mcp = &mc; 5021 struct qla_hw_data *ha = vha->hw; 5022 5023 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 5024 "Entered %s.\n", __func__); 5025 5026 if (!IS_FWI2_CAPABLE(ha)) 5027 return QLA_FUNCTION_FAILED; 5028 5029 if (len == 1) 5030 opt |= BIT_0; 5031 5032 mcp->mb[0] = MBC_READ_SFP; 5033 mcp->mb[1] = dev; 5034 mcp->mb[2] = MSW(LSD(sfp_dma)); 5035 mcp->mb[3] = LSW(LSD(sfp_dma)); 5036 mcp->mb[6] = MSW(MSD(sfp_dma)); 5037 mcp->mb[7] = LSW(MSD(sfp_dma)); 5038 mcp->mb[8] = len; 5039 mcp->mb[9] = off; 5040 mcp->mb[10] = opt; 5041 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5042 mcp->in_mb = MBX_1|MBX_0; 5043 mcp->tov = MBX_TOV_SECONDS; 5044 mcp->flags = 0; 5045 rval = qla2x00_mailbox_command(vha, mcp); 5046 5047 if (opt & BIT_0) 5048 *sfp = mcp->mb[1]; 5049 5050 if (rval != QLA_SUCCESS) { 5051 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 5052 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5053 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) { 5054 /* sfp is not there */ 5055 rval = QLA_INTERFACE_ERROR; 5056 } 5057 } else { 5058 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 5059 "Done %s.\n", __func__); 5060 } 5061 5062 return rval; 5063 } 5064 5065 int 5066 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 5067 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 5068 { 5069 int rval; 5070 mbx_cmd_t mc; 5071 mbx_cmd_t *mcp = &mc; 5072 struct qla_hw_data *ha = vha->hw; 5073 5074 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, 5075 "Entered %s.\n", __func__); 5076 5077 if (!IS_FWI2_CAPABLE(ha)) 5078 return QLA_FUNCTION_FAILED; 5079 5080 if (len == 1) 5081 opt |= BIT_0; 5082 5083 if (opt & BIT_0) 5084 len = *sfp; 5085 5086 mcp->mb[0] = MBC_WRITE_SFP; 5087 mcp->mb[1] = dev; 5088 mcp->mb[2] = MSW(LSD(sfp_dma)); 5089 mcp->mb[3] = LSW(LSD(sfp_dma)); 5090 mcp->mb[6] = MSW(MSD(sfp_dma)); 5091 mcp->mb[7] = LSW(MSD(sfp_dma)); 5092 mcp->mb[8] = len; 5093 mcp->mb[9] = off; 5094 mcp->mb[10] = opt; 5095 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5096 mcp->in_mb = MBX_1|MBX_0; 5097 mcp->tov = MBX_TOV_SECONDS; 5098 mcp->flags = 0; 5099 rval = qla2x00_mailbox_command(vha, mcp); 5100 5101 if (rval != QLA_SUCCESS) { 5102 ql_dbg(ql_dbg_mbx, vha, 0x10ec, 5103 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5104 } else { 5105 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, 5106 "Done %s.\n", __func__); 5107 } 5108 5109 return rval; 5110 } 5111 5112 int 5113 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, 5114 uint16_t size_in_bytes, uint16_t *actual_size) 5115 { 5116 int rval; 5117 mbx_cmd_t mc; 5118 mbx_cmd_t *mcp = &mc; 5119 5120 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, 5121 "Entered %s.\n", __func__); 5122 5123 if (!IS_CNA_CAPABLE(vha->hw)) 5124 return QLA_FUNCTION_FAILED; 5125 5126 mcp->mb[0] = MBC_GET_XGMAC_STATS; 5127 mcp->mb[2] = MSW(stats_dma); 5128 mcp->mb[3] = LSW(stats_dma); 5129 mcp->mb[6] = MSW(MSD(stats_dma)); 5130 mcp->mb[7] = LSW(MSD(stats_dma)); 5131 mcp->mb[8] = size_in_bytes >> 2; 5132 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 5133 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5134 mcp->tov = MBX_TOV_SECONDS; 5135 mcp->flags = 0; 5136 rval = qla2x00_mailbox_command(vha, mcp); 5137 5138 if (rval != QLA_SUCCESS) { 5139 ql_dbg(ql_dbg_mbx, vha, 0x10ef, 5140 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5141 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5142 } else { 5143 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, 5144 "Done %s.\n", __func__); 5145 5146 5147 *actual_size = mcp->mb[2] << 2; 5148 } 5149 5150 return rval; 5151 } 5152 5153 int 5154 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, 5155 uint16_t size) 5156 { 5157 int rval; 5158 mbx_cmd_t mc; 5159 mbx_cmd_t *mcp = &mc; 5160 5161 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, 5162 "Entered %s.\n", __func__); 5163 5164 if (!IS_CNA_CAPABLE(vha->hw)) 5165 return QLA_FUNCTION_FAILED; 5166 5167 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 5168 mcp->mb[1] = 0; 5169 mcp->mb[2] = MSW(tlv_dma); 5170 mcp->mb[3] = LSW(tlv_dma); 5171 mcp->mb[6] = MSW(MSD(tlv_dma)); 5172 mcp->mb[7] = LSW(MSD(tlv_dma)); 5173 mcp->mb[8] = size; 5174 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5175 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5176 mcp->tov = MBX_TOV_SECONDS; 5177 mcp->flags = 0; 5178 rval = qla2x00_mailbox_command(vha, mcp); 5179 5180 if (rval != QLA_SUCCESS) { 5181 ql_dbg(ql_dbg_mbx, vha, 0x10f2, 5182 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5183 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5184 } else { 5185 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, 5186 "Done %s.\n", __func__); 5187 } 5188 5189 return rval; 5190 } 5191 5192 int 5193 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) 5194 { 5195 int rval; 5196 mbx_cmd_t mc; 5197 mbx_cmd_t *mcp = &mc; 5198 5199 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, 5200 "Entered %s.\n", __func__); 5201 5202 if (!IS_FWI2_CAPABLE(vha->hw)) 5203 return QLA_FUNCTION_FAILED; 5204 5205 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 5206 mcp->mb[1] = LSW(risc_addr); 5207 mcp->mb[8] = MSW(risc_addr); 5208 mcp->out_mb = MBX_8|MBX_1|MBX_0; 5209 mcp->in_mb = MBX_3|MBX_2|MBX_0; 5210 mcp->tov = MBX_TOV_SECONDS; 5211 mcp->flags = 0; 5212 rval = qla2x00_mailbox_command(vha, mcp); 5213 if (rval != QLA_SUCCESS) { 5214 ql_dbg(ql_dbg_mbx, vha, 0x10f5, 5215 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5216 } else { 5217 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, 5218 "Done %s.\n", __func__); 5219 *data = mcp->mb[3] << 16 | mcp->mb[2]; 5220 } 5221 5222 return rval; 5223 } 5224 5225 int 5226 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5227 uint16_t *mresp) 5228 { 5229 int rval; 5230 mbx_cmd_t mc; 5231 mbx_cmd_t *mcp = &mc; 5232 5233 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, 5234 "Entered %s.\n", __func__); 5235 5236 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5237 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 5238 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing 5239 5240 /* transfer count */ 5241 mcp->mb[10] = LSW(mreq->transfer_size); 5242 mcp->mb[11] = MSW(mreq->transfer_size); 5243 5244 /* send data address */ 5245 mcp->mb[14] = LSW(mreq->send_dma); 5246 mcp->mb[15] = MSW(mreq->send_dma); 5247 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5248 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5249 5250 /* receive data address */ 5251 mcp->mb[16] = LSW(mreq->rcv_dma); 5252 mcp->mb[17] = MSW(mreq->rcv_dma); 5253 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5254 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5255 5256 /* Iteration count */ 5257 mcp->mb[18] = LSW(mreq->iteration_count); 5258 mcp->mb[19] = MSW(mreq->iteration_count); 5259 5260 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| 5261 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5262 if (IS_CNA_CAPABLE(vha->hw)) 5263 mcp->out_mb |= MBX_2; 5264 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; 5265 5266 mcp->buf_size = mreq->transfer_size; 5267 mcp->tov = MBX_TOV_SECONDS; 5268 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5269 5270 rval = qla2x00_mailbox_command(vha, mcp); 5271 5272 if (rval != QLA_SUCCESS) { 5273 ql_dbg(ql_dbg_mbx, vha, 0x10f8, 5274 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " 5275 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], 5276 mcp->mb[3], mcp->mb[18], mcp->mb[19]); 5277 } else { 5278 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, 5279 "Done %s.\n", __func__); 5280 } 5281 5282 /* Copy mailbox information */ 5283 memcpy( mresp, mcp->mb, 64); 5284 return rval; 5285 } 5286 5287 int 5288 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5289 uint16_t *mresp) 5290 { 5291 int rval; 5292 mbx_cmd_t mc; 5293 mbx_cmd_t *mcp = &mc; 5294 struct qla_hw_data *ha = vha->hw; 5295 5296 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, 5297 "Entered %s.\n", __func__); 5298 5299 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5300 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 5301 /* BIT_6 specifies 64bit address */ 5302 mcp->mb[1] = mreq->options | BIT_15 | BIT_6; 5303 if (IS_CNA_CAPABLE(ha)) { 5304 mcp->mb[2] = vha->fcoe_fcf_idx; 5305 } 5306 mcp->mb[16] = LSW(mreq->rcv_dma); 5307 mcp->mb[17] = MSW(mreq->rcv_dma); 5308 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5309 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5310 5311 mcp->mb[10] = LSW(mreq->transfer_size); 5312 5313 mcp->mb[14] = LSW(mreq->send_dma); 5314 mcp->mb[15] = MSW(mreq->send_dma); 5315 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5316 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5317 5318 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| 5319 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5320 if (IS_CNA_CAPABLE(ha)) 5321 mcp->out_mb |= MBX_2; 5322 5323 mcp->in_mb = MBX_0; 5324 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 5325 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5326 mcp->in_mb |= MBX_1; 5327 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 5328 IS_QLA28XX(ha)) 5329 mcp->in_mb |= MBX_3; 5330 5331 mcp->tov = MBX_TOV_SECONDS; 5332 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5333 mcp->buf_size = mreq->transfer_size; 5334 5335 rval = qla2x00_mailbox_command(vha, mcp); 5336 5337 if (rval != QLA_SUCCESS) { 5338 ql_dbg(ql_dbg_mbx, vha, 0x10fb, 5339 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5340 rval, mcp->mb[0], mcp->mb[1]); 5341 } else { 5342 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, 5343 "Done %s.\n", __func__); 5344 } 5345 5346 /* Copy mailbox information */ 5347 memcpy(mresp, mcp->mb, 64); 5348 return rval; 5349 } 5350 5351 int 5352 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) 5353 { 5354 int rval; 5355 mbx_cmd_t mc; 5356 mbx_cmd_t *mcp = &mc; 5357 5358 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, 5359 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); 5360 5361 mcp->mb[0] = MBC_ISP84XX_RESET; 5362 mcp->mb[1] = enable_diagnostic; 5363 mcp->out_mb = MBX_1|MBX_0; 5364 mcp->in_mb = MBX_1|MBX_0; 5365 mcp->tov = MBX_TOV_SECONDS; 5366 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5367 rval = qla2x00_mailbox_command(vha, mcp); 5368 5369 if (rval != QLA_SUCCESS) 5370 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); 5371 else 5372 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, 5373 "Done %s.\n", __func__); 5374 5375 return rval; 5376 } 5377 5378 int 5379 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) 5380 { 5381 int rval; 5382 mbx_cmd_t mc; 5383 mbx_cmd_t *mcp = &mc; 5384 5385 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, 5386 "Entered %s.\n", __func__); 5387 5388 if (!IS_FWI2_CAPABLE(vha->hw)) 5389 return QLA_FUNCTION_FAILED; 5390 5391 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 5392 mcp->mb[1] = LSW(risc_addr); 5393 mcp->mb[2] = LSW(data); 5394 mcp->mb[3] = MSW(data); 5395 mcp->mb[8] = MSW(risc_addr); 5396 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; 5397 mcp->in_mb = MBX_1|MBX_0; 5398 mcp->tov = MBX_TOV_SECONDS; 5399 mcp->flags = 0; 5400 rval = qla2x00_mailbox_command(vha, mcp); 5401 if (rval != QLA_SUCCESS) { 5402 ql_dbg(ql_dbg_mbx, vha, 0x1101, 5403 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5404 rval, mcp->mb[0], mcp->mb[1]); 5405 } else { 5406 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, 5407 "Done %s.\n", __func__); 5408 } 5409 5410 return rval; 5411 } 5412 5413 int 5414 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) 5415 { 5416 int rval; 5417 uint32_t stat, timer; 5418 uint16_t mb0 = 0; 5419 struct qla_hw_data *ha = vha->hw; 5420 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 5421 5422 rval = QLA_SUCCESS; 5423 5424 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, 5425 "Entered %s.\n", __func__); 5426 5427 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 5428 5429 /* Write the MBC data to the registers */ 5430 wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER); 5431 wrt_reg_word(®->mailbox1, mb[0]); 5432 wrt_reg_word(®->mailbox2, mb[1]); 5433 wrt_reg_word(®->mailbox3, mb[2]); 5434 wrt_reg_word(®->mailbox4, mb[3]); 5435 5436 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); 5437 5438 /* Poll for MBC interrupt */ 5439 for (timer = 6000000; timer; timer--) { 5440 /* Check for pending interrupts. */ 5441 stat = rd_reg_dword(®->host_status); 5442 if (stat & HSRX_RISC_INT) { 5443 stat &= 0xff; 5444 5445 if (stat == 0x1 || stat == 0x2 || 5446 stat == 0x10 || stat == 0x11) { 5447 set_bit(MBX_INTERRUPT, 5448 &ha->mbx_cmd_flags); 5449 mb0 = rd_reg_word(®->mailbox0); 5450 wrt_reg_dword(®->hccr, 5451 HCCRX_CLR_RISC_INT); 5452 rd_reg_dword(®->hccr); 5453 break; 5454 } 5455 } 5456 udelay(5); 5457 } 5458 5459 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) 5460 rval = mb0 & MBS_MASK; 5461 else 5462 rval = QLA_FUNCTION_FAILED; 5463 5464 if (rval != QLA_SUCCESS) { 5465 ql_dbg(ql_dbg_mbx, vha, 0x1104, 5466 "Failed=%x mb[0]=%x.\n", rval, mb[0]); 5467 } else { 5468 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, 5469 "Done %s.\n", __func__); 5470 } 5471 5472 return rval; 5473 } 5474 5475 /* Set the specified data rate */ 5476 int 5477 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode) 5478 { 5479 int rval; 5480 mbx_cmd_t mc; 5481 mbx_cmd_t *mcp = &mc; 5482 struct qla_hw_data *ha = vha->hw; 5483 uint16_t val; 5484 5485 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5486 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate, 5487 mode); 5488 5489 if (!IS_FWI2_CAPABLE(ha)) 5490 return QLA_FUNCTION_FAILED; 5491 5492 memset(mcp, 0, sizeof(*mcp)); 5493 switch (ha->set_data_rate) { 5494 case PORT_SPEED_AUTO: 5495 case PORT_SPEED_4GB: 5496 case PORT_SPEED_8GB: 5497 case PORT_SPEED_16GB: 5498 case PORT_SPEED_32GB: 5499 val = ha->set_data_rate; 5500 break; 5501 default: 5502 ql_log(ql_log_warn, vha, 0x1199, 5503 "Unrecognized speed setting:%d. Setting Autoneg\n", 5504 ha->set_data_rate); 5505 val = ha->set_data_rate = PORT_SPEED_AUTO; 5506 break; 5507 } 5508 5509 mcp->mb[0] = MBC_DATA_RATE; 5510 mcp->mb[1] = mode; 5511 mcp->mb[2] = val; 5512 5513 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5514 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5515 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5516 mcp->in_mb |= MBX_4|MBX_3; 5517 mcp->tov = MBX_TOV_SECONDS; 5518 mcp->flags = 0; 5519 rval = qla2x00_mailbox_command(vha, mcp); 5520 if (rval != QLA_SUCCESS) { 5521 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5522 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5523 } else { 5524 if (mcp->mb[1] != 0x7) 5525 ql_dbg(ql_dbg_mbx, vha, 0x1179, 5526 "Speed set:0x%x\n", mcp->mb[1]); 5527 5528 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5529 "Done %s.\n", __func__); 5530 } 5531 5532 return rval; 5533 } 5534 5535 int 5536 qla2x00_get_data_rate(scsi_qla_host_t *vha) 5537 { 5538 int rval; 5539 mbx_cmd_t mc; 5540 mbx_cmd_t *mcp = &mc; 5541 struct qla_hw_data *ha = vha->hw; 5542 5543 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5544 "Entered %s.\n", __func__); 5545 5546 if (!IS_FWI2_CAPABLE(ha)) 5547 return QLA_FUNCTION_FAILED; 5548 5549 mcp->mb[0] = MBC_DATA_RATE; 5550 mcp->mb[1] = QLA_GET_DATA_RATE; 5551 mcp->out_mb = MBX_1|MBX_0; 5552 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5553 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5554 mcp->in_mb |= MBX_3; 5555 mcp->tov = MBX_TOV_SECONDS; 5556 mcp->flags = 0; 5557 rval = qla2x00_mailbox_command(vha, mcp); 5558 if (rval != QLA_SUCCESS) { 5559 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5560 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5561 } else { 5562 if (mcp->mb[1] != 0x7) 5563 ha->link_data_rate = mcp->mb[1]; 5564 5565 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 5566 if (mcp->mb[4] & BIT_0) 5567 ql_log(ql_log_info, vha, 0x11a2, 5568 "FEC=enabled (data rate).\n"); 5569 } 5570 5571 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5572 "Done %s.\n", __func__); 5573 if (mcp->mb[1] != 0x7) 5574 ha->link_data_rate = mcp->mb[1]; 5575 } 5576 5577 return rval; 5578 } 5579 5580 int 5581 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5582 { 5583 int rval; 5584 mbx_cmd_t mc; 5585 mbx_cmd_t *mcp = &mc; 5586 struct qla_hw_data *ha = vha->hw; 5587 5588 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, 5589 "Entered %s.\n", __func__); 5590 5591 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && 5592 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 5593 return QLA_FUNCTION_FAILED; 5594 mcp->mb[0] = MBC_GET_PORT_CONFIG; 5595 mcp->out_mb = MBX_0; 5596 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5597 mcp->tov = MBX_TOV_SECONDS; 5598 mcp->flags = 0; 5599 5600 rval = qla2x00_mailbox_command(vha, mcp); 5601 5602 if (rval != QLA_SUCCESS) { 5603 ql_dbg(ql_dbg_mbx, vha, 0x110a, 5604 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5605 } else { 5606 /* Copy all bits to preserve original value */ 5607 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 5608 5609 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, 5610 "Done %s.\n", __func__); 5611 } 5612 return rval; 5613 } 5614 5615 int 5616 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5617 { 5618 int rval; 5619 mbx_cmd_t mc; 5620 mbx_cmd_t *mcp = &mc; 5621 5622 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, 5623 "Entered %s.\n", __func__); 5624 5625 mcp->mb[0] = MBC_SET_PORT_CONFIG; 5626 /* Copy all bits to preserve original setting */ 5627 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); 5628 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5629 mcp->in_mb = MBX_0; 5630 mcp->tov = MBX_TOV_SECONDS; 5631 mcp->flags = 0; 5632 rval = qla2x00_mailbox_command(vha, mcp); 5633 5634 if (rval != QLA_SUCCESS) { 5635 ql_dbg(ql_dbg_mbx, vha, 0x110d, 5636 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5637 } else 5638 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, 5639 "Done %s.\n", __func__); 5640 5641 return rval; 5642 } 5643 5644 5645 int 5646 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, 5647 uint16_t *mb) 5648 { 5649 int rval; 5650 mbx_cmd_t mc; 5651 mbx_cmd_t *mcp = &mc; 5652 struct qla_hw_data *ha = vha->hw; 5653 5654 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, 5655 "Entered %s.\n", __func__); 5656 5657 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 5658 return QLA_FUNCTION_FAILED; 5659 5660 mcp->mb[0] = MBC_PORT_PARAMS; 5661 mcp->mb[1] = loop_id; 5662 if (ha->flags.fcp_prio_enabled) 5663 mcp->mb[2] = BIT_1; 5664 else 5665 mcp->mb[2] = BIT_2; 5666 mcp->mb[4] = priority & 0xf; 5667 mcp->mb[9] = vha->vp_idx; 5668 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5669 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 5670 mcp->tov = MBX_TOV_SECONDS; 5671 mcp->flags = 0; 5672 rval = qla2x00_mailbox_command(vha, mcp); 5673 if (mb != NULL) { 5674 mb[0] = mcp->mb[0]; 5675 mb[1] = mcp->mb[1]; 5676 mb[3] = mcp->mb[3]; 5677 mb[4] = mcp->mb[4]; 5678 } 5679 5680 if (rval != QLA_SUCCESS) { 5681 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); 5682 } else { 5683 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, 5684 "Done %s.\n", __func__); 5685 } 5686 5687 return rval; 5688 } 5689 5690 int 5691 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) 5692 { 5693 int rval = QLA_FUNCTION_FAILED; 5694 struct qla_hw_data *ha = vha->hw; 5695 uint8_t byte; 5696 5697 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { 5698 ql_dbg(ql_dbg_mbx, vha, 0x1150, 5699 "Thermal not supported by this card.\n"); 5700 return rval; 5701 } 5702 5703 if (IS_QLA25XX(ha)) { 5704 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 5705 ha->pdev->subsystem_device == 0x0175) { 5706 rval = qla2x00_read_sfp(vha, 0, &byte, 5707 0x98, 0x1, 1, BIT_13|BIT_0); 5708 *temp = byte; 5709 return rval; 5710 } 5711 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 5712 ha->pdev->subsystem_device == 0x338e) { 5713 rval = qla2x00_read_sfp(vha, 0, &byte, 5714 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); 5715 *temp = byte; 5716 return rval; 5717 } 5718 ql_dbg(ql_dbg_mbx, vha, 0x10c9, 5719 "Thermal not supported by this card.\n"); 5720 return rval; 5721 } 5722 5723 if (IS_QLA82XX(ha)) { 5724 *temp = qla82xx_read_temperature(vha); 5725 rval = QLA_SUCCESS; 5726 return rval; 5727 } else if (IS_QLA8044(ha)) { 5728 *temp = qla8044_read_temperature(vha); 5729 rval = QLA_SUCCESS; 5730 return rval; 5731 } 5732 5733 rval = qla2x00_read_asic_temperature(vha, temp); 5734 return rval; 5735 } 5736 5737 int 5738 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) 5739 { 5740 int rval; 5741 struct qla_hw_data *ha = vha->hw; 5742 mbx_cmd_t mc; 5743 mbx_cmd_t *mcp = &mc; 5744 5745 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, 5746 "Entered %s.\n", __func__); 5747 5748 if (!IS_FWI2_CAPABLE(ha)) 5749 return QLA_FUNCTION_FAILED; 5750 5751 memset(mcp, 0, sizeof(mbx_cmd_t)); 5752 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5753 mcp->mb[1] = 1; 5754 5755 mcp->out_mb = MBX_1|MBX_0; 5756 mcp->in_mb = MBX_0; 5757 mcp->tov = MBX_TOV_SECONDS; 5758 mcp->flags = 0; 5759 5760 rval = qla2x00_mailbox_command(vha, mcp); 5761 if (rval != QLA_SUCCESS) { 5762 ql_dbg(ql_dbg_mbx, vha, 0x1016, 5763 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5764 } else { 5765 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, 5766 "Done %s.\n", __func__); 5767 } 5768 5769 return rval; 5770 } 5771 5772 int 5773 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) 5774 { 5775 int rval; 5776 struct qla_hw_data *ha = vha->hw; 5777 mbx_cmd_t mc; 5778 mbx_cmd_t *mcp = &mc; 5779 5780 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, 5781 "Entered %s.\n", __func__); 5782 5783 if (!IS_P3P_TYPE(ha)) 5784 return QLA_FUNCTION_FAILED; 5785 5786 memset(mcp, 0, sizeof(mbx_cmd_t)); 5787 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5788 mcp->mb[1] = 0; 5789 5790 mcp->out_mb = MBX_1|MBX_0; 5791 mcp->in_mb = MBX_0; 5792 mcp->tov = MBX_TOV_SECONDS; 5793 mcp->flags = 0; 5794 5795 rval = qla2x00_mailbox_command(vha, mcp); 5796 if (rval != QLA_SUCCESS) { 5797 ql_dbg(ql_dbg_mbx, vha, 0x100c, 5798 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5799 } else { 5800 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, 5801 "Done %s.\n", __func__); 5802 } 5803 5804 return rval; 5805 } 5806 5807 int 5808 qla82xx_md_get_template_size(scsi_qla_host_t *vha) 5809 { 5810 struct qla_hw_data *ha = vha->hw; 5811 mbx_cmd_t mc; 5812 mbx_cmd_t *mcp = &mc; 5813 int rval = QLA_FUNCTION_FAILED; 5814 5815 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, 5816 "Entered %s.\n", __func__); 5817 5818 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5819 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5820 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5821 mcp->mb[2] = LSW(RQST_TMPLT_SIZE); 5822 mcp->mb[3] = MSW(RQST_TMPLT_SIZE); 5823 5824 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5825 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 5826 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5827 5828 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5829 mcp->tov = MBX_TOV_SECONDS; 5830 rval = qla2x00_mailbox_command(vha, mcp); 5831 5832 /* Always copy back return mailbox values. */ 5833 if (rval != QLA_SUCCESS) { 5834 ql_dbg(ql_dbg_mbx, vha, 0x1120, 5835 "mailbox command FAILED=0x%x, subcode=%x.\n", 5836 (mcp->mb[1] << 16) | mcp->mb[0], 5837 (mcp->mb[3] << 16) | mcp->mb[2]); 5838 } else { 5839 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, 5840 "Done %s.\n", __func__); 5841 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); 5842 if (!ha->md_template_size) { 5843 ql_dbg(ql_dbg_mbx, vha, 0x1122, 5844 "Null template size obtained.\n"); 5845 rval = QLA_FUNCTION_FAILED; 5846 } 5847 } 5848 return rval; 5849 } 5850 5851 int 5852 qla82xx_md_get_template(scsi_qla_host_t *vha) 5853 { 5854 struct qla_hw_data *ha = vha->hw; 5855 mbx_cmd_t mc; 5856 mbx_cmd_t *mcp = &mc; 5857 int rval = QLA_FUNCTION_FAILED; 5858 5859 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, 5860 "Entered %s.\n", __func__); 5861 5862 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5863 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5864 if (!ha->md_tmplt_hdr) { 5865 ql_log(ql_log_warn, vha, 0x1124, 5866 "Unable to allocate memory for Minidump template.\n"); 5867 return rval; 5868 } 5869 5870 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5871 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5872 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5873 mcp->mb[2] = LSW(RQST_TMPLT); 5874 mcp->mb[3] = MSW(RQST_TMPLT); 5875 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); 5876 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); 5877 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); 5878 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); 5879 mcp->mb[8] = LSW(ha->md_template_size); 5880 mcp->mb[9] = MSW(ha->md_template_size); 5881 5882 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5883 mcp->tov = MBX_TOV_SECONDS; 5884 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5885 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5886 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5887 rval = qla2x00_mailbox_command(vha, mcp); 5888 5889 if (rval != QLA_SUCCESS) { 5890 ql_dbg(ql_dbg_mbx, vha, 0x1125, 5891 "mailbox command FAILED=0x%x, subcode=%x.\n", 5892 ((mcp->mb[1] << 16) | mcp->mb[0]), 5893 ((mcp->mb[3] << 16) | mcp->mb[2])); 5894 } else 5895 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, 5896 "Done %s.\n", __func__); 5897 return rval; 5898 } 5899 5900 int 5901 qla8044_md_get_template(scsi_qla_host_t *vha) 5902 { 5903 struct qla_hw_data *ha = vha->hw; 5904 mbx_cmd_t mc; 5905 mbx_cmd_t *mcp = &mc; 5906 int rval = QLA_FUNCTION_FAILED; 5907 int offset = 0, size = MINIDUMP_SIZE_36K; 5908 5909 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, 5910 "Entered %s.\n", __func__); 5911 5912 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5913 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5914 if (!ha->md_tmplt_hdr) { 5915 ql_log(ql_log_warn, vha, 0xb11b, 5916 "Unable to allocate memory for Minidump template.\n"); 5917 return rval; 5918 } 5919 5920 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5921 while (offset < ha->md_template_size) { 5922 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5923 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5924 mcp->mb[2] = LSW(RQST_TMPLT); 5925 mcp->mb[3] = MSW(RQST_TMPLT); 5926 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5927 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5928 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5929 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5930 mcp->mb[8] = LSW(size); 5931 mcp->mb[9] = MSW(size); 5932 mcp->mb[10] = offset & 0x0000FFFF; 5933 mcp->mb[11] = offset & 0xFFFF0000; 5934 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5935 mcp->tov = MBX_TOV_SECONDS; 5936 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5937 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5938 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5939 rval = qla2x00_mailbox_command(vha, mcp); 5940 5941 if (rval != QLA_SUCCESS) { 5942 ql_dbg(ql_dbg_mbx, vha, 0xb11c, 5943 "mailbox command FAILED=0x%x, subcode=%x.\n", 5944 ((mcp->mb[1] << 16) | mcp->mb[0]), 5945 ((mcp->mb[3] << 16) | mcp->mb[2])); 5946 return rval; 5947 } else 5948 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, 5949 "Done %s.\n", __func__); 5950 offset = offset + size; 5951 } 5952 return rval; 5953 } 5954 5955 int 5956 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5957 { 5958 int rval; 5959 struct qla_hw_data *ha = vha->hw; 5960 mbx_cmd_t mc; 5961 mbx_cmd_t *mcp = &mc; 5962 5963 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5964 return QLA_FUNCTION_FAILED; 5965 5966 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, 5967 "Entered %s.\n", __func__); 5968 5969 memset(mcp, 0, sizeof(mbx_cmd_t)); 5970 mcp->mb[0] = MBC_SET_LED_CONFIG; 5971 mcp->mb[1] = led_cfg[0]; 5972 mcp->mb[2] = led_cfg[1]; 5973 if (IS_QLA8031(ha)) { 5974 mcp->mb[3] = led_cfg[2]; 5975 mcp->mb[4] = led_cfg[3]; 5976 mcp->mb[5] = led_cfg[4]; 5977 mcp->mb[6] = led_cfg[5]; 5978 } 5979 5980 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5981 if (IS_QLA8031(ha)) 5982 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 5983 mcp->in_mb = MBX_0; 5984 mcp->tov = MBX_TOV_SECONDS; 5985 mcp->flags = 0; 5986 5987 rval = qla2x00_mailbox_command(vha, mcp); 5988 if (rval != QLA_SUCCESS) { 5989 ql_dbg(ql_dbg_mbx, vha, 0x1134, 5990 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5991 } else { 5992 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, 5993 "Done %s.\n", __func__); 5994 } 5995 5996 return rval; 5997 } 5998 5999 int 6000 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 6001 { 6002 int rval; 6003 struct qla_hw_data *ha = vha->hw; 6004 mbx_cmd_t mc; 6005 mbx_cmd_t *mcp = &mc; 6006 6007 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 6008 return QLA_FUNCTION_FAILED; 6009 6010 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, 6011 "Entered %s.\n", __func__); 6012 6013 memset(mcp, 0, sizeof(mbx_cmd_t)); 6014 mcp->mb[0] = MBC_GET_LED_CONFIG; 6015 6016 mcp->out_mb = MBX_0; 6017 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6018 if (IS_QLA8031(ha)) 6019 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 6020 mcp->tov = MBX_TOV_SECONDS; 6021 mcp->flags = 0; 6022 6023 rval = qla2x00_mailbox_command(vha, mcp); 6024 if (rval != QLA_SUCCESS) { 6025 ql_dbg(ql_dbg_mbx, vha, 0x1137, 6026 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6027 } else { 6028 led_cfg[0] = mcp->mb[1]; 6029 led_cfg[1] = mcp->mb[2]; 6030 if (IS_QLA8031(ha)) { 6031 led_cfg[2] = mcp->mb[3]; 6032 led_cfg[3] = mcp->mb[4]; 6033 led_cfg[4] = mcp->mb[5]; 6034 led_cfg[5] = mcp->mb[6]; 6035 } 6036 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, 6037 "Done %s.\n", __func__); 6038 } 6039 6040 return rval; 6041 } 6042 6043 int 6044 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) 6045 { 6046 int rval; 6047 struct qla_hw_data *ha = vha->hw; 6048 mbx_cmd_t mc; 6049 mbx_cmd_t *mcp = &mc; 6050 6051 if (!IS_P3P_TYPE(ha)) 6052 return QLA_FUNCTION_FAILED; 6053 6054 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, 6055 "Entered %s.\n", __func__); 6056 6057 memset(mcp, 0, sizeof(mbx_cmd_t)); 6058 mcp->mb[0] = MBC_SET_LED_CONFIG; 6059 if (enable) 6060 mcp->mb[7] = 0xE; 6061 else 6062 mcp->mb[7] = 0xD; 6063 6064 mcp->out_mb = MBX_7|MBX_0; 6065 mcp->in_mb = MBX_0; 6066 mcp->tov = MBX_TOV_SECONDS; 6067 mcp->flags = 0; 6068 6069 rval = qla2x00_mailbox_command(vha, mcp); 6070 if (rval != QLA_SUCCESS) { 6071 ql_dbg(ql_dbg_mbx, vha, 0x1128, 6072 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6073 } else { 6074 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, 6075 "Done %s.\n", __func__); 6076 } 6077 6078 return rval; 6079 } 6080 6081 int 6082 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) 6083 { 6084 int rval; 6085 struct qla_hw_data *ha = vha->hw; 6086 mbx_cmd_t mc; 6087 mbx_cmd_t *mcp = &mc; 6088 6089 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6090 return QLA_FUNCTION_FAILED; 6091 6092 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, 6093 "Entered %s.\n", __func__); 6094 6095 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6096 mcp->mb[1] = LSW(reg); 6097 mcp->mb[2] = MSW(reg); 6098 mcp->mb[3] = LSW(data); 6099 mcp->mb[4] = MSW(data); 6100 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6101 6102 mcp->in_mb = MBX_1|MBX_0; 6103 mcp->tov = MBX_TOV_SECONDS; 6104 mcp->flags = 0; 6105 rval = qla2x00_mailbox_command(vha, mcp); 6106 6107 if (rval != QLA_SUCCESS) { 6108 ql_dbg(ql_dbg_mbx, vha, 0x1131, 6109 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6110 } else { 6111 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, 6112 "Done %s.\n", __func__); 6113 } 6114 6115 return rval; 6116 } 6117 6118 int 6119 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) 6120 { 6121 int rval; 6122 struct qla_hw_data *ha = vha->hw; 6123 mbx_cmd_t mc; 6124 mbx_cmd_t *mcp = &mc; 6125 6126 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 6127 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, 6128 "Implicit LOGO Unsupported.\n"); 6129 return QLA_FUNCTION_FAILED; 6130 } 6131 6132 6133 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, 6134 "Entering %s.\n", __func__); 6135 6136 /* Perform Implicit LOGO. */ 6137 mcp->mb[0] = MBC_PORT_LOGOUT; 6138 mcp->mb[1] = fcport->loop_id; 6139 mcp->mb[10] = BIT_15; 6140 mcp->out_mb = MBX_10|MBX_1|MBX_0; 6141 mcp->in_mb = MBX_0; 6142 mcp->tov = MBX_TOV_SECONDS; 6143 mcp->flags = 0; 6144 rval = qla2x00_mailbox_command(vha, mcp); 6145 if (rval != QLA_SUCCESS) 6146 ql_dbg(ql_dbg_mbx, vha, 0x113d, 6147 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6148 else 6149 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, 6150 "Done %s.\n", __func__); 6151 6152 return rval; 6153 } 6154 6155 int 6156 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) 6157 { 6158 int rval; 6159 mbx_cmd_t mc; 6160 mbx_cmd_t *mcp = &mc; 6161 struct qla_hw_data *ha = vha->hw; 6162 unsigned long retry_max_time = jiffies + (2 * HZ); 6163 6164 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6165 return QLA_FUNCTION_FAILED; 6166 6167 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); 6168 6169 retry_rd_reg: 6170 mcp->mb[0] = MBC_READ_REMOTE_REG; 6171 mcp->mb[1] = LSW(reg); 6172 mcp->mb[2] = MSW(reg); 6173 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6174 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 6175 mcp->tov = MBX_TOV_SECONDS; 6176 mcp->flags = 0; 6177 rval = qla2x00_mailbox_command(vha, mcp); 6178 6179 if (rval != QLA_SUCCESS) { 6180 ql_dbg(ql_dbg_mbx, vha, 0x114c, 6181 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6182 rval, mcp->mb[0], mcp->mb[1]); 6183 } else { 6184 *data = (mcp->mb[3] | (mcp->mb[4] << 16)); 6185 if (*data == QLA8XXX_BAD_VALUE) { 6186 /* 6187 * During soft-reset CAMRAM register reads might 6188 * return 0xbad0bad0. So retry for MAX of 2 sec 6189 * while reading camram registers. 6190 */ 6191 if (time_after(jiffies, retry_max_time)) { 6192 ql_dbg(ql_dbg_mbx, vha, 0x1141, 6193 "Failure to read CAMRAM register. " 6194 "data=0x%x.\n", *data); 6195 return QLA_FUNCTION_FAILED; 6196 } 6197 msleep(100); 6198 goto retry_rd_reg; 6199 } 6200 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); 6201 } 6202 6203 return rval; 6204 } 6205 6206 int 6207 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) 6208 { 6209 int rval; 6210 mbx_cmd_t mc; 6211 mbx_cmd_t *mcp = &mc; 6212 struct qla_hw_data *ha = vha->hw; 6213 6214 if (!IS_QLA83XX(ha)) 6215 return QLA_FUNCTION_FAILED; 6216 6217 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 6218 6219 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; 6220 mcp->out_mb = MBX_0; 6221 mcp->in_mb = MBX_1|MBX_0; 6222 mcp->tov = MBX_TOV_SECONDS; 6223 mcp->flags = 0; 6224 rval = qla2x00_mailbox_command(vha, mcp); 6225 6226 if (rval != QLA_SUCCESS) { 6227 ql_dbg(ql_dbg_mbx, vha, 0x1144, 6228 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6229 rval, mcp->mb[0], mcp->mb[1]); 6230 qla2xxx_dump_fw(vha); 6231 } else { 6232 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); 6233 } 6234 6235 return rval; 6236 } 6237 6238 int 6239 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, 6240 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) 6241 { 6242 int rval; 6243 mbx_cmd_t mc; 6244 mbx_cmd_t *mcp = &mc; 6245 uint8_t subcode = (uint8_t)options; 6246 struct qla_hw_data *ha = vha->hw; 6247 6248 if (!IS_QLA8031(ha)) 6249 return QLA_FUNCTION_FAILED; 6250 6251 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); 6252 6253 mcp->mb[0] = MBC_SET_ACCESS_CONTROL; 6254 mcp->mb[1] = options; 6255 mcp->out_mb = MBX_1|MBX_0; 6256 if (subcode & BIT_2) { 6257 mcp->mb[2] = LSW(start_addr); 6258 mcp->mb[3] = MSW(start_addr); 6259 mcp->mb[4] = LSW(end_addr); 6260 mcp->mb[5] = MSW(end_addr); 6261 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; 6262 } 6263 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6264 if (!(subcode & (BIT_2 | BIT_5))) 6265 mcp->in_mb |= MBX_4|MBX_3; 6266 mcp->tov = MBX_TOV_SECONDS; 6267 mcp->flags = 0; 6268 rval = qla2x00_mailbox_command(vha, mcp); 6269 6270 if (rval != QLA_SUCCESS) { 6271 ql_dbg(ql_dbg_mbx, vha, 0x1147, 6272 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", 6273 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], 6274 mcp->mb[4]); 6275 qla2xxx_dump_fw(vha); 6276 } else { 6277 if (subcode & BIT_5) 6278 *sector_size = mcp->mb[1]; 6279 else if (subcode & (BIT_6 | BIT_7)) { 6280 ql_dbg(ql_dbg_mbx, vha, 0x1148, 6281 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6282 } else if (subcode & (BIT_3 | BIT_4)) { 6283 ql_dbg(ql_dbg_mbx, vha, 0x1149, 6284 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6285 } 6286 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); 6287 } 6288 6289 return rval; 6290 } 6291 6292 int 6293 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 6294 uint32_t size) 6295 { 6296 int rval; 6297 mbx_cmd_t mc; 6298 mbx_cmd_t *mcp = &mc; 6299 6300 if (!IS_MCTP_CAPABLE(vha->hw)) 6301 return QLA_FUNCTION_FAILED; 6302 6303 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, 6304 "Entered %s.\n", __func__); 6305 6306 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 6307 mcp->mb[1] = LSW(addr); 6308 mcp->mb[2] = MSW(req_dma); 6309 mcp->mb[3] = LSW(req_dma); 6310 mcp->mb[4] = MSW(size); 6311 mcp->mb[5] = LSW(size); 6312 mcp->mb[6] = MSW(MSD(req_dma)); 6313 mcp->mb[7] = LSW(MSD(req_dma)); 6314 mcp->mb[8] = MSW(addr); 6315 /* Setting RAM ID to valid */ 6316 /* For MCTP RAM ID is 0x40 */ 6317 mcp->mb[10] = BIT_7 | 0x40; 6318 6319 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| 6320 MBX_0; 6321 6322 mcp->in_mb = MBX_0; 6323 mcp->tov = MBX_TOV_SECONDS; 6324 mcp->flags = 0; 6325 rval = qla2x00_mailbox_command(vha, mcp); 6326 6327 if (rval != QLA_SUCCESS) { 6328 ql_dbg(ql_dbg_mbx, vha, 0x114e, 6329 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6330 } else { 6331 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, 6332 "Done %s.\n", __func__); 6333 } 6334 6335 return rval; 6336 } 6337 6338 int 6339 qla26xx_dport_diagnostics(scsi_qla_host_t *vha, 6340 void *dd_buf, uint size, uint options) 6341 { 6342 int rval; 6343 mbx_cmd_t mc; 6344 mbx_cmd_t *mcp = &mc; 6345 dma_addr_t dd_dma; 6346 6347 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 6348 !IS_QLA28XX(vha->hw)) 6349 return QLA_FUNCTION_FAILED; 6350 6351 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, 6352 "Entered %s.\n", __func__); 6353 6354 dd_dma = dma_map_single(&vha->hw->pdev->dev, 6355 dd_buf, size, DMA_FROM_DEVICE); 6356 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { 6357 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); 6358 return QLA_MEMORY_ALLOC_FAILED; 6359 } 6360 6361 memset(dd_buf, 0, size); 6362 6363 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; 6364 mcp->mb[1] = options; 6365 mcp->mb[2] = MSW(LSD(dd_dma)); 6366 mcp->mb[3] = LSW(LSD(dd_dma)); 6367 mcp->mb[6] = MSW(MSD(dd_dma)); 6368 mcp->mb[7] = LSW(MSD(dd_dma)); 6369 mcp->mb[8] = size; 6370 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 6371 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 6372 mcp->buf_size = size; 6373 mcp->flags = MBX_DMA_IN; 6374 mcp->tov = MBX_TOV_SECONDS * 4; 6375 rval = qla2x00_mailbox_command(vha, mcp); 6376 6377 if (rval != QLA_SUCCESS) { 6378 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); 6379 } else { 6380 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, 6381 "Done %s.\n", __func__); 6382 } 6383 6384 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, 6385 size, DMA_FROM_DEVICE); 6386 6387 return rval; 6388 } 6389 6390 static void qla2x00_async_mb_sp_done(srb_t *sp, int res) 6391 { 6392 sp->u.iocb_cmd.u.mbx.rc = res; 6393 6394 complete(&sp->u.iocb_cmd.u.mbx.comp); 6395 /* don't free sp here. Let the caller do the free */ 6396 } 6397 6398 /* 6399 * This mailbox uses the iocb interface to send MB command. 6400 * This allows non-critial (non chip setup) command to go 6401 * out in parrallel. 6402 */ 6403 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) 6404 { 6405 int rval = QLA_FUNCTION_FAILED; 6406 srb_t *sp; 6407 struct srb_iocb *c; 6408 6409 if (!vha->hw->flags.fw_started) 6410 goto done; 6411 6412 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 6413 if (!sp) 6414 goto done; 6415 6416 sp->type = SRB_MB_IOCB; 6417 sp->name = mb_to_str(mcp->mb[0]); 6418 6419 c = &sp->u.iocb_cmd; 6420 c->timeout = qla2x00_async_iocb_timeout; 6421 init_completion(&c->u.mbx.comp); 6422 6423 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 6424 6425 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); 6426 6427 sp->done = qla2x00_async_mb_sp_done; 6428 6429 rval = qla2x00_start_sp(sp); 6430 if (rval != QLA_SUCCESS) { 6431 ql_dbg(ql_dbg_mbx, vha, 0x1018, 6432 "%s: %s Failed submission. %x.\n", 6433 __func__, sp->name, rval); 6434 goto done_free_sp; 6435 } 6436 6437 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n", 6438 sp->name, sp->handle); 6439 6440 wait_for_completion(&c->u.mbx.comp); 6441 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); 6442 6443 rval = c->u.mbx.rc; 6444 switch (rval) { 6445 case QLA_FUNCTION_TIMEOUT: 6446 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n", 6447 __func__, sp->name, rval); 6448 break; 6449 case QLA_SUCCESS: 6450 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", 6451 __func__, sp->name); 6452 break; 6453 default: 6454 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", 6455 __func__, sp->name, rval); 6456 break; 6457 } 6458 6459 done_free_sp: 6460 sp->free(sp); 6461 done: 6462 return rval; 6463 } 6464 6465 /* 6466 * qla24xx_gpdb_wait 6467 * NOTE: Do not call this routine from DPC thread 6468 */ 6469 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 6470 { 6471 int rval = QLA_FUNCTION_FAILED; 6472 dma_addr_t pd_dma; 6473 struct port_database_24xx *pd; 6474 struct qla_hw_data *ha = vha->hw; 6475 mbx_cmd_t mc; 6476 6477 if (!vha->hw->flags.fw_started) 6478 goto done; 6479 6480 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 6481 if (pd == NULL) { 6482 ql_log(ql_log_warn, vha, 0xd047, 6483 "Failed to allocate port database structure.\n"); 6484 goto done_free_sp; 6485 } 6486 6487 memset(&mc, 0, sizeof(mc)); 6488 mc.mb[0] = MBC_GET_PORT_DATABASE; 6489 mc.mb[1] = fcport->loop_id; 6490 mc.mb[2] = MSW(pd_dma); 6491 mc.mb[3] = LSW(pd_dma); 6492 mc.mb[6] = MSW(MSD(pd_dma)); 6493 mc.mb[7] = LSW(MSD(pd_dma)); 6494 mc.mb[9] = vha->vp_idx; 6495 mc.mb[10] = opt; 6496 6497 rval = qla24xx_send_mb_cmd(vha, &mc); 6498 if (rval != QLA_SUCCESS) { 6499 ql_dbg(ql_dbg_mbx, vha, 0x1193, 6500 "%s: %8phC fail\n", __func__, fcport->port_name); 6501 goto done_free_sp; 6502 } 6503 6504 rval = __qla24xx_parse_gpdb(vha, fcport, pd); 6505 6506 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", 6507 __func__, fcport->port_name); 6508 6509 done_free_sp: 6510 if (pd) 6511 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 6512 done: 6513 return rval; 6514 } 6515 6516 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, 6517 struct port_database_24xx *pd) 6518 { 6519 int rval = QLA_SUCCESS; 6520 uint64_t zero = 0; 6521 u8 current_login_state, last_login_state; 6522 6523 if (NVME_TARGET(vha->hw, fcport)) { 6524 current_login_state = pd->current_login_state >> 4; 6525 last_login_state = pd->last_login_state >> 4; 6526 } else { 6527 current_login_state = pd->current_login_state & 0xf; 6528 last_login_state = pd->last_login_state & 0xf; 6529 } 6530 6531 /* Check for logged in state. */ 6532 if (current_login_state != PDS_PRLI_COMPLETE) { 6533 ql_dbg(ql_dbg_mbx, vha, 0x119a, 6534 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 6535 current_login_state, last_login_state, fcport->loop_id); 6536 rval = QLA_FUNCTION_FAILED; 6537 goto gpd_error_out; 6538 } 6539 6540 if (fcport->loop_id == FC_NO_LOOP_ID || 6541 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 6542 memcmp(fcport->port_name, pd->port_name, 8))) { 6543 /* We lost the device mid way. */ 6544 rval = QLA_NOT_LOGGED_IN; 6545 goto gpd_error_out; 6546 } 6547 6548 /* Names are little-endian. */ 6549 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 6550 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 6551 6552 /* Get port_id of device. */ 6553 fcport->d_id.b.domain = pd->port_id[0]; 6554 fcport->d_id.b.area = pd->port_id[1]; 6555 fcport->d_id.b.al_pa = pd->port_id[2]; 6556 fcport->d_id.b.rsvd_1 = 0; 6557 6558 if (NVME_TARGET(vha->hw, fcport)) { 6559 fcport->port_type = FCT_NVME; 6560 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) 6561 fcport->port_type |= FCT_NVME_INITIATOR; 6562 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6563 fcport->port_type |= FCT_NVME_TARGET; 6564 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0) 6565 fcport->port_type |= FCT_NVME_DISCOVERY; 6566 } else { 6567 /* If not target must be initiator or unknown type. */ 6568 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6569 fcport->port_type = FCT_INITIATOR; 6570 else 6571 fcport->port_type = FCT_TARGET; 6572 } 6573 /* Passback COS information. */ 6574 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? 6575 FC_COS_CLASS2 : FC_COS_CLASS3; 6576 6577 if (pd->prli_svc_param_word_3[0] & BIT_7) { 6578 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 6579 fcport->conf_compl_supported = 1; 6580 } 6581 6582 gpd_error_out: 6583 return rval; 6584 } 6585 6586 /* 6587 * qla24xx_gidlist__wait 6588 * NOTE: don't call this routine from DPC thread. 6589 */ 6590 int qla24xx_gidlist_wait(struct scsi_qla_host *vha, 6591 void *id_list, dma_addr_t id_list_dma, uint16_t *entries) 6592 { 6593 int rval = QLA_FUNCTION_FAILED; 6594 mbx_cmd_t mc; 6595 6596 if (!vha->hw->flags.fw_started) 6597 goto done; 6598 6599 memset(&mc, 0, sizeof(mc)); 6600 mc.mb[0] = MBC_GET_ID_LIST; 6601 mc.mb[2] = MSW(id_list_dma); 6602 mc.mb[3] = LSW(id_list_dma); 6603 mc.mb[6] = MSW(MSD(id_list_dma)); 6604 mc.mb[7] = LSW(MSD(id_list_dma)); 6605 mc.mb[8] = 0; 6606 mc.mb[9] = vha->vp_idx; 6607 6608 rval = qla24xx_send_mb_cmd(vha, &mc); 6609 if (rval != QLA_SUCCESS) { 6610 ql_dbg(ql_dbg_mbx, vha, 0x119b, 6611 "%s: fail\n", __func__); 6612 } else { 6613 *entries = mc.mb[1]; 6614 ql_dbg(ql_dbg_mbx, vha, 0x119c, 6615 "%s: done\n", __func__); 6616 } 6617 done: 6618 return rval; 6619 } 6620 6621 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) 6622 { 6623 int rval; 6624 mbx_cmd_t mc; 6625 mbx_cmd_t *mcp = &mc; 6626 6627 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200, 6628 "Entered %s\n", __func__); 6629 6630 memset(mcp->mb, 0 , sizeof(mcp->mb)); 6631 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6632 mcp->mb[1] = 1; 6633 mcp->mb[2] = value; 6634 mcp->out_mb = MBX_2 | MBX_1 | MBX_0; 6635 mcp->in_mb = MBX_2 | MBX_0; 6636 mcp->tov = MBX_TOV_SECONDS; 6637 mcp->flags = 0; 6638 6639 rval = qla2x00_mailbox_command(vha, mcp); 6640 6641 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n", 6642 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6643 6644 return rval; 6645 } 6646 6647 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) 6648 { 6649 int rval; 6650 mbx_cmd_t mc; 6651 mbx_cmd_t *mcp = &mc; 6652 6653 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203, 6654 "Entered %s\n", __func__); 6655 6656 memset(mcp->mb, 0, sizeof(mcp->mb)); 6657 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6658 mcp->mb[1] = 0; 6659 mcp->out_mb = MBX_1 | MBX_0; 6660 mcp->in_mb = MBX_2 | MBX_0; 6661 mcp->tov = MBX_TOV_SECONDS; 6662 mcp->flags = 0; 6663 6664 rval = qla2x00_mailbox_command(vha, mcp); 6665 if (rval == QLA_SUCCESS) 6666 *value = mc.mb[2]; 6667 6668 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n", 6669 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6670 6671 return rval; 6672 } 6673 6674 int 6675 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count) 6676 { 6677 struct qla_hw_data *ha = vha->hw; 6678 uint16_t iter, addr, offset; 6679 dma_addr_t phys_addr; 6680 int rval, c; 6681 u8 *sfp_data; 6682 6683 memset(ha->sfp_data, 0, SFP_DEV_SIZE); 6684 addr = 0xa0; 6685 phys_addr = ha->sfp_data_dma; 6686 sfp_data = ha->sfp_data; 6687 offset = c = 0; 6688 6689 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) { 6690 if (iter == 4) { 6691 /* Skip to next device address. */ 6692 addr = 0xa2; 6693 offset = 0; 6694 } 6695 6696 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data, 6697 addr, offset, SFP_BLOCK_SIZE, BIT_1); 6698 if (rval != QLA_SUCCESS) { 6699 ql_log(ql_log_warn, vha, 0x706d, 6700 "Unable to read SFP data (%x/%x/%x).\n", rval, 6701 addr, offset); 6702 6703 return rval; 6704 } 6705 6706 if (buf && (c < count)) { 6707 u16 sz; 6708 6709 if ((count - c) >= SFP_BLOCK_SIZE) 6710 sz = SFP_BLOCK_SIZE; 6711 else 6712 sz = count - c; 6713 6714 memcpy(buf, sfp_data, sz); 6715 buf += SFP_BLOCK_SIZE; 6716 c += sz; 6717 } 6718 phys_addr += SFP_BLOCK_SIZE; 6719 sfp_data += SFP_BLOCK_SIZE; 6720 offset += SFP_BLOCK_SIZE; 6721 } 6722 6723 return rval; 6724 } 6725 6726 int qla24xx_res_count_wait(struct scsi_qla_host *vha, 6727 uint16_t *out_mb, int out_mb_sz) 6728 { 6729 int rval = QLA_FUNCTION_FAILED; 6730 mbx_cmd_t mc; 6731 6732 if (!vha->hw->flags.fw_started) 6733 goto done; 6734 6735 memset(&mc, 0, sizeof(mc)); 6736 mc.mb[0] = MBC_GET_RESOURCE_COUNTS; 6737 6738 rval = qla24xx_send_mb_cmd(vha, &mc); 6739 if (rval != QLA_SUCCESS) { 6740 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6741 "%s: fail\n", __func__); 6742 } else { 6743 if (out_mb_sz <= SIZEOF_IOCB_MB_REG) 6744 memcpy(out_mb, mc.mb, out_mb_sz); 6745 else 6746 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG); 6747 6748 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6749 "%s: done\n", __func__); 6750 } 6751 done: 6752 return rval; 6753 } 6754 6755 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts, 6756 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr, 6757 uint32_t sfub_len) 6758 { 6759 int rval; 6760 mbx_cmd_t mc; 6761 mbx_cmd_t *mcp = &mc; 6762 6763 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE; 6764 mcp->mb[1] = opts; 6765 mcp->mb[2] = region; 6766 mcp->mb[3] = MSW(len); 6767 mcp->mb[4] = LSW(len); 6768 mcp->mb[5] = MSW(sfub_dma_addr); 6769 mcp->mb[6] = LSW(sfub_dma_addr); 6770 mcp->mb[7] = MSW(MSD(sfub_dma_addr)); 6771 mcp->mb[8] = LSW(MSD(sfub_dma_addr)); 6772 mcp->mb[9] = sfub_len; 6773 mcp->out_mb = 6774 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6775 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6776 mcp->tov = MBX_TOV_SECONDS; 6777 mcp->flags = 0; 6778 rval = qla2x00_mailbox_command(vha, mcp); 6779 6780 if (rval != QLA_SUCCESS) { 6781 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x", 6782 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], 6783 mcp->mb[2]); 6784 } 6785 6786 return rval; 6787 } 6788 6789 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6790 uint32_t data) 6791 { 6792 int rval; 6793 mbx_cmd_t mc; 6794 mbx_cmd_t *mcp = &mc; 6795 6796 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6797 "Entered %s.\n", __func__); 6798 6799 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6800 mcp->mb[1] = LSW(addr); 6801 mcp->mb[2] = MSW(addr); 6802 mcp->mb[3] = LSW(data); 6803 mcp->mb[4] = MSW(data); 6804 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6805 mcp->in_mb = MBX_1|MBX_0; 6806 mcp->tov = MBX_TOV_SECONDS; 6807 mcp->flags = 0; 6808 rval = qla2x00_mailbox_command(vha, mcp); 6809 6810 if (rval != QLA_SUCCESS) { 6811 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6812 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6813 } else { 6814 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6815 "Done %s.\n", __func__); 6816 } 6817 6818 return rval; 6819 } 6820 6821 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6822 uint32_t *data) 6823 { 6824 int rval; 6825 mbx_cmd_t mc; 6826 mbx_cmd_t *mcp = &mc; 6827 6828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6829 "Entered %s.\n", __func__); 6830 6831 mcp->mb[0] = MBC_READ_REMOTE_REG; 6832 mcp->mb[1] = LSW(addr); 6833 mcp->mb[2] = MSW(addr); 6834 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6835 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6836 mcp->tov = MBX_TOV_SECONDS; 6837 mcp->flags = 0; 6838 rval = qla2x00_mailbox_command(vha, mcp); 6839 6840 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]); 6841 6842 if (rval != QLA_SUCCESS) { 6843 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6844 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6845 } else { 6846 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6847 "Done %s.\n", __func__); 6848 } 6849 6850 return rval; 6851 } 6852 6853 int 6854 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led) 6855 { 6856 struct qla_hw_data *ha = vha->hw; 6857 mbx_cmd_t mc; 6858 mbx_cmd_t *mcp = &mc; 6859 int rval; 6860 6861 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6862 return QLA_FUNCTION_FAILED; 6863 6864 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n", 6865 __func__, options); 6866 6867 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG; 6868 mcp->mb[1] = options; 6869 mcp->out_mb = MBX_1|MBX_0; 6870 mcp->in_mb = MBX_1|MBX_0; 6871 if (options & BIT_0) { 6872 if (options & BIT_1) { 6873 mcp->mb[2] = led[2]; 6874 mcp->out_mb |= MBX_2; 6875 } 6876 if (options & BIT_2) { 6877 mcp->mb[3] = led[0]; 6878 mcp->out_mb |= MBX_3; 6879 } 6880 if (options & BIT_3) { 6881 mcp->mb[4] = led[1]; 6882 mcp->out_mb |= MBX_4; 6883 } 6884 } else { 6885 mcp->in_mb |= MBX_4|MBX_3|MBX_2; 6886 } 6887 mcp->tov = MBX_TOV_SECONDS; 6888 mcp->flags = 0; 6889 rval = qla2x00_mailbox_command(vha, mcp); 6890 if (rval) { 6891 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n", 6892 __func__, rval, mcp->mb[0], mcp->mb[1]); 6893 return rval; 6894 } 6895 6896 if (options & BIT_0) { 6897 ha->beacon_blink_led = 0; 6898 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__); 6899 } else { 6900 led[2] = mcp->mb[2]; 6901 led[0] = mcp->mb[3]; 6902 led[1] = mcp->mb[4]; 6903 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n", 6904 __func__, led[0], led[1], led[2]); 6905 } 6906 6907 return rval; 6908 } 6909