1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/gfp.h> 12 13 static struct mb_cmd_name { 14 uint16_t cmd; 15 const char *str; 16 } mb_str[] = { 17 {MBC_GET_PORT_DATABASE, "GPDB"}, 18 {MBC_GET_ID_LIST, "GIDList"}, 19 {MBC_GET_LINK_PRIV_STATS, "Stats"}, 20 {MBC_GET_RESOURCE_COUNTS, "ResCnt"}, 21 }; 22 23 static const char *mb_to_str(uint16_t cmd) 24 { 25 int i; 26 struct mb_cmd_name *e; 27 28 for (i = 0; i < ARRAY_SIZE(mb_str); i++) { 29 e = mb_str + i; 30 if (cmd == e->cmd) 31 return e->str; 32 } 33 return "unknown"; 34 } 35 36 static struct rom_cmd { 37 uint16_t cmd; 38 } rom_cmds[] = { 39 { MBC_LOAD_RAM }, 40 { MBC_EXECUTE_FIRMWARE }, 41 { MBC_READ_RAM_WORD }, 42 { MBC_MAILBOX_REGISTER_TEST }, 43 { MBC_VERIFY_CHECKSUM }, 44 { MBC_GET_FIRMWARE_VERSION }, 45 { MBC_LOAD_RISC_RAM }, 46 { MBC_DUMP_RISC_RAM }, 47 { MBC_LOAD_RISC_RAM_EXTENDED }, 48 { MBC_DUMP_RISC_RAM_EXTENDED }, 49 { MBC_WRITE_RAM_WORD_EXTENDED }, 50 { MBC_READ_RAM_EXTENDED }, 51 { MBC_GET_RESOURCE_COUNTS }, 52 { MBC_SET_FIRMWARE_OPTION }, 53 { MBC_MID_INITIALIZE_FIRMWARE }, 54 { MBC_GET_FIRMWARE_STATE }, 55 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, 56 { MBC_GET_RETRY_COUNT }, 57 { MBC_TRACE_CONTROL }, 58 { MBC_INITIALIZE_MULTIQ }, 59 { MBC_IOCB_COMMAND_A64 }, 60 { MBC_GET_ADAPTER_LOOP_ID }, 61 { MBC_READ_SFP }, 62 { MBC_GET_RNID_PARAMS }, 63 { MBC_GET_SET_ZIO_THRESHOLD }, 64 }; 65 66 static int is_rom_cmd(uint16_t cmd) 67 { 68 int i; 69 struct rom_cmd *wc; 70 71 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) { 72 wc = rom_cmds + i; 73 if (wc->cmd == cmd) 74 return 1; 75 } 76 77 return 0; 78 } 79 80 /* 81 * qla2x00_mailbox_command 82 * Issue mailbox command and waits for completion. 83 * 84 * Input: 85 * ha = adapter block pointer. 86 * mcp = driver internal mbx struct pointer. 87 * 88 * Output: 89 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 90 * 91 * Returns: 92 * 0 : QLA_SUCCESS = cmd performed success 93 * 1 : QLA_FUNCTION_FAILED (error encountered) 94 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 95 * 96 * Context: 97 * Kernel context. 98 */ 99 static int 100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) 101 { 102 int rval, i; 103 unsigned long flags = 0; 104 device_reg_t *reg; 105 uint8_t abort_active; 106 uint8_t io_lock_on; 107 uint16_t command = 0; 108 uint16_t *iptr; 109 uint16_t __iomem *optr; 110 uint32_t cnt; 111 uint32_t mboxes; 112 unsigned long wait_time; 113 struct qla_hw_data *ha = vha->hw; 114 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 115 u32 chip_reset; 116 117 118 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); 119 120 if (ha->pdev->error_state > pci_channel_io_frozen) { 121 ql_log(ql_log_warn, vha, 0x1001, 122 "error_state is greater than pci_channel_io_frozen, " 123 "exiting.\n"); 124 return QLA_FUNCTION_TIMEOUT; 125 } 126 127 if (vha->device_flags & DFLG_DEV_FAILED) { 128 ql_log(ql_log_warn, vha, 0x1002, 129 "Device in failed state, exiting.\n"); 130 return QLA_FUNCTION_TIMEOUT; 131 } 132 133 /* if PCI error, then avoid mbx processing.*/ 134 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && 135 test_bit(UNLOADING, &base_vha->dpc_flags)) { 136 ql_log(ql_log_warn, vha, 0xd04e, 137 "PCI error, exiting.\n"); 138 return QLA_FUNCTION_TIMEOUT; 139 } 140 141 reg = ha->iobase; 142 io_lock_on = base_vha->flags.init_done; 143 144 rval = QLA_SUCCESS; 145 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 146 chip_reset = ha->chip_reset; 147 148 if (ha->flags.pci_channel_io_perm_failure) { 149 ql_log(ql_log_warn, vha, 0x1003, 150 "Perm failure on EEH timeout MBX, exiting.\n"); 151 return QLA_FUNCTION_TIMEOUT; 152 } 153 154 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 155 /* Setting Link-Down error */ 156 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 157 ql_log(ql_log_warn, vha, 0x1004, 158 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 159 return QLA_FUNCTION_TIMEOUT; 160 } 161 162 /* check if ISP abort is active and return cmd with timeout */ 163 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 164 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 165 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && 166 !is_rom_cmd(mcp->mb[0])) { 167 ql_log(ql_log_info, vha, 0x1005, 168 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", 169 mcp->mb[0]); 170 return QLA_FUNCTION_TIMEOUT; 171 } 172 173 atomic_inc(&ha->num_pend_mbx_stage1); 174 /* 175 * Wait for active mailbox commands to finish by waiting at most tov 176 * seconds. This is to serialize actual issuing of mailbox cmds during 177 * non ISP abort time. 178 */ 179 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 180 /* Timeout occurred. Return error. */ 181 ql_log(ql_log_warn, vha, 0xd035, 182 "Cmd access timeout, cmd=0x%x, Exiting.\n", 183 mcp->mb[0]); 184 atomic_dec(&ha->num_pend_mbx_stage1); 185 return QLA_FUNCTION_TIMEOUT; 186 } 187 atomic_dec(&ha->num_pend_mbx_stage1); 188 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { 189 rval = QLA_ABORTED; 190 goto premature_exit; 191 } 192 193 194 /* Save mailbox command for debug */ 195 ha->mcp = mcp; 196 197 ql_dbg(ql_dbg_mbx, vha, 0x1006, 198 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 199 200 spin_lock_irqsave(&ha->hardware_lock, flags); 201 202 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || 203 ha->flags.mbox_busy) { 204 rval = QLA_ABORTED; 205 spin_unlock_irqrestore(&ha->hardware_lock, flags); 206 goto premature_exit; 207 } 208 ha->flags.mbox_busy = 1; 209 210 /* Load mailbox registers. */ 211 if (IS_P3P_TYPE(ha)) 212 optr = (uint16_t __iomem *)®->isp82.mailbox_in[0]; 213 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) 214 optr = (uint16_t __iomem *)®->isp24.mailbox0; 215 else 216 optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0); 217 218 iptr = mcp->mb; 219 command = mcp->mb[0]; 220 mboxes = mcp->out_mb; 221 222 ql_dbg(ql_dbg_mbx, vha, 0x1111, 223 "Mailbox registers (OUT):\n"); 224 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 225 if (IS_QLA2200(ha) && cnt == 8) 226 optr = 227 (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8); 228 if (mboxes & BIT_0) { 229 ql_dbg(ql_dbg_mbx, vha, 0x1112, 230 "mbox[%d]<-0x%04x\n", cnt, *iptr); 231 WRT_REG_WORD(optr, *iptr); 232 } 233 234 mboxes >>= 1; 235 optr++; 236 iptr++; 237 } 238 239 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, 240 "I/O Address = %p.\n", optr); 241 242 /* Issue set host interrupt command to send cmd out. */ 243 ha->flags.mbox_int = 0; 244 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 245 246 /* Unlock mbx registers and wait for interrupt */ 247 ql_dbg(ql_dbg_mbx, vha, 0x100f, 248 "Going to unlock irq & waiting for interrupts. " 249 "jiffies=%lx.\n", jiffies); 250 251 /* Wait for mbx cmd completion until timeout */ 252 atomic_inc(&ha->num_pend_mbx_stage2); 253 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 254 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 255 256 if (IS_P3P_TYPE(ha)) 257 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); 258 else if (IS_FWI2_CAPABLE(ha)) 259 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); 260 else 261 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); 262 spin_unlock_irqrestore(&ha->hardware_lock, flags); 263 264 wait_time = jiffies; 265 atomic_inc(&ha->num_pend_mbx_stage3); 266 if (!wait_for_completion_timeout(&ha->mbx_intr_comp, 267 mcp->tov * HZ)) { 268 if (chip_reset != ha->chip_reset) { 269 spin_lock_irqsave(&ha->hardware_lock, flags); 270 ha->flags.mbox_busy = 0; 271 spin_unlock_irqrestore(&ha->hardware_lock, 272 flags); 273 atomic_dec(&ha->num_pend_mbx_stage2); 274 atomic_dec(&ha->num_pend_mbx_stage3); 275 rval = QLA_ABORTED; 276 goto premature_exit; 277 } 278 ql_dbg(ql_dbg_mbx, vha, 0x117a, 279 "cmd=%x Timeout.\n", command); 280 spin_lock_irqsave(&ha->hardware_lock, flags); 281 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 282 spin_unlock_irqrestore(&ha->hardware_lock, flags); 283 284 } else if (ha->flags.purge_mbox || 285 chip_reset != ha->chip_reset) { 286 spin_lock_irqsave(&ha->hardware_lock, flags); 287 ha->flags.mbox_busy = 0; 288 spin_unlock_irqrestore(&ha->hardware_lock, flags); 289 atomic_dec(&ha->num_pend_mbx_stage2); 290 atomic_dec(&ha->num_pend_mbx_stage3); 291 rval = QLA_ABORTED; 292 goto premature_exit; 293 } 294 atomic_dec(&ha->num_pend_mbx_stage3); 295 296 if (time_after(jiffies, wait_time + 5 * HZ)) 297 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", 298 command, jiffies_to_msecs(jiffies - wait_time)); 299 } else { 300 ql_dbg(ql_dbg_mbx, vha, 0x1011, 301 "Cmd=%x Polling Mode.\n", command); 302 303 if (IS_P3P_TYPE(ha)) { 304 if (RD_REG_DWORD(®->isp82.hint) & 305 HINT_MBX_INT_PENDING) { 306 ha->flags.mbox_busy = 0; 307 spin_unlock_irqrestore(&ha->hardware_lock, 308 flags); 309 atomic_dec(&ha->num_pend_mbx_stage2); 310 ql_dbg(ql_dbg_mbx, vha, 0x1012, 311 "Pending mailbox timeout, exiting.\n"); 312 rval = QLA_FUNCTION_TIMEOUT; 313 goto premature_exit; 314 } 315 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); 316 } else if (IS_FWI2_CAPABLE(ha)) 317 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); 318 else 319 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); 320 spin_unlock_irqrestore(&ha->hardware_lock, flags); 321 322 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 323 while (!ha->flags.mbox_int) { 324 if (ha->flags.purge_mbox || 325 chip_reset != ha->chip_reset) { 326 spin_lock_irqsave(&ha->hardware_lock, flags); 327 ha->flags.mbox_busy = 0; 328 spin_unlock_irqrestore(&ha->hardware_lock, 329 flags); 330 atomic_dec(&ha->num_pend_mbx_stage2); 331 rval = QLA_ABORTED; 332 goto premature_exit; 333 } 334 335 if (time_after(jiffies, wait_time)) 336 break; 337 338 /* 339 * Check if it's UNLOADING, cause we cannot poll in 340 * this case, or else a NULL pointer dereference 341 * is triggered. 342 */ 343 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) 344 return QLA_FUNCTION_TIMEOUT; 345 346 /* Check for pending interrupts. */ 347 qla2x00_poll(ha->rsp_q_map[0]); 348 349 if (!ha->flags.mbox_int && 350 !(IS_QLA2200(ha) && 351 command == MBC_LOAD_RISC_RAM_EXTENDED)) 352 msleep(10); 353 } /* while */ 354 ql_dbg(ql_dbg_mbx, vha, 0x1013, 355 "Waited %d sec.\n", 356 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 357 } 358 atomic_dec(&ha->num_pend_mbx_stage2); 359 360 /* Check whether we timed out */ 361 if (ha->flags.mbox_int) { 362 uint16_t *iptr2; 363 364 ql_dbg(ql_dbg_mbx, vha, 0x1014, 365 "Cmd=%x completed.\n", command); 366 367 /* Got interrupt. Clear the flag. */ 368 ha->flags.mbox_int = 0; 369 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 370 371 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 372 spin_lock_irqsave(&ha->hardware_lock, flags); 373 ha->flags.mbox_busy = 0; 374 spin_unlock_irqrestore(&ha->hardware_lock, flags); 375 376 /* Setting Link-Down error */ 377 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 378 ha->mcp = NULL; 379 rval = QLA_FUNCTION_FAILED; 380 ql_log(ql_log_warn, vha, 0xd048, 381 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 382 goto premature_exit; 383 } 384 385 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) { 386 ql_dbg(ql_dbg_mbx, vha, 0x11ff, 387 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0], 388 MBS_COMMAND_COMPLETE); 389 rval = QLA_FUNCTION_FAILED; 390 } 391 392 /* Load return mailbox registers. */ 393 iptr2 = mcp->mb; 394 iptr = (uint16_t *)&ha->mailbox_out[0]; 395 mboxes = mcp->in_mb; 396 397 ql_dbg(ql_dbg_mbx, vha, 0x1113, 398 "Mailbox registers (IN):\n"); 399 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 400 if (mboxes & BIT_0) { 401 *iptr2 = *iptr; 402 ql_dbg(ql_dbg_mbx, vha, 0x1114, 403 "mbox[%d]->0x%04x\n", cnt, *iptr2); 404 } 405 406 mboxes >>= 1; 407 iptr2++; 408 iptr++; 409 } 410 } else { 411 412 uint16_t mb[8]; 413 uint32_t ictrl, host_status, hccr; 414 uint16_t w; 415 416 if (IS_FWI2_CAPABLE(ha)) { 417 mb[0] = RD_REG_WORD(®->isp24.mailbox0); 418 mb[1] = RD_REG_WORD(®->isp24.mailbox1); 419 mb[2] = RD_REG_WORD(®->isp24.mailbox2); 420 mb[3] = RD_REG_WORD(®->isp24.mailbox3); 421 mb[7] = RD_REG_WORD(®->isp24.mailbox7); 422 ictrl = RD_REG_DWORD(®->isp24.ictrl); 423 host_status = RD_REG_DWORD(®->isp24.host_status); 424 hccr = RD_REG_DWORD(®->isp24.hccr); 425 426 ql_log(ql_log_warn, vha, 0xd04c, 427 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 428 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", 429 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], 430 mb[7], host_status, hccr); 431 432 } else { 433 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); 434 ictrl = RD_REG_WORD(®->isp.ictrl); 435 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, 436 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 437 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); 438 } 439 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 440 441 /* Capture FW dump only, if PCI device active */ 442 if (!pci_channel_offline(vha->hw->pdev)) { 443 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 444 if (w == 0xffff || ictrl == 0xffffffff || 445 (chip_reset != ha->chip_reset)) { 446 /* This is special case if there is unload 447 * of driver happening and if PCI device go 448 * into bad state due to PCI error condition 449 * then only PCI ERR flag would be set. 450 * we will do premature exit for above case. 451 */ 452 spin_lock_irqsave(&ha->hardware_lock, flags); 453 ha->flags.mbox_busy = 0; 454 spin_unlock_irqrestore(&ha->hardware_lock, 455 flags); 456 rval = QLA_FUNCTION_TIMEOUT; 457 goto premature_exit; 458 } 459 460 /* Attempt to capture firmware dump for further 461 * anallysis of the current formware state. we do not 462 * need to do this if we are intentionally generating 463 * a dump 464 */ 465 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) 466 ha->isp_ops->fw_dump(vha, 0); 467 rval = QLA_FUNCTION_TIMEOUT; 468 } 469 } 470 spin_lock_irqsave(&ha->hardware_lock, flags); 471 ha->flags.mbox_busy = 0; 472 spin_unlock_irqrestore(&ha->hardware_lock, flags); 473 474 /* Clean up */ 475 ha->mcp = NULL; 476 477 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 478 ql_dbg(ql_dbg_mbx, vha, 0x101a, 479 "Checking for additional resp interrupt.\n"); 480 481 /* polling mode for non isp_abort commands. */ 482 qla2x00_poll(ha->rsp_q_map[0]); 483 } 484 485 if (rval == QLA_FUNCTION_TIMEOUT && 486 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 487 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 488 ha->flags.eeh_busy) { 489 /* not in dpc. schedule it for dpc to take over. */ 490 ql_dbg(ql_dbg_mbx, vha, 0x101b, 491 "Timeout, schedule isp_abort_needed.\n"); 492 493 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 494 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 495 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 496 if (IS_QLA82XX(ha)) { 497 ql_dbg(ql_dbg_mbx, vha, 0x112a, 498 "disabling pause transmit on port " 499 "0 & 1.\n"); 500 qla82xx_wr_32(ha, 501 QLA82XX_CRB_NIU + 0x98, 502 CRB_NIU_XG_PAUSE_CTL_P0| 503 CRB_NIU_XG_PAUSE_CTL_P1); 504 } 505 ql_log(ql_log_info, base_vha, 0x101c, 506 "Mailbox cmd timeout occurred, cmd=0x%x, " 507 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 508 "abort.\n", command, mcp->mb[0], 509 ha->flags.eeh_busy); 510 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 511 qla2xxx_wake_dpc(vha); 512 } 513 } else if (current == ha->dpc_thread) { 514 /* call abort directly since we are in the DPC thread */ 515 ql_dbg(ql_dbg_mbx, vha, 0x101d, 516 "Timeout, calling abort_isp.\n"); 517 518 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 519 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 520 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 521 if (IS_QLA82XX(ha)) { 522 ql_dbg(ql_dbg_mbx, vha, 0x112b, 523 "disabling pause transmit on port " 524 "0 & 1.\n"); 525 qla82xx_wr_32(ha, 526 QLA82XX_CRB_NIU + 0x98, 527 CRB_NIU_XG_PAUSE_CTL_P0| 528 CRB_NIU_XG_PAUSE_CTL_P1); 529 } 530 ql_log(ql_log_info, base_vha, 0x101e, 531 "Mailbox cmd timeout occurred, cmd=0x%x, " 532 "mb[0]=0x%x. Scheduling ISP abort ", 533 command, mcp->mb[0]); 534 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 535 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 536 /* Allow next mbx cmd to come in. */ 537 complete(&ha->mbx_cmd_comp); 538 if (ha->isp_ops->abort_isp(vha)) { 539 /* Failed. retry later. */ 540 set_bit(ISP_ABORT_NEEDED, 541 &vha->dpc_flags); 542 } 543 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 544 ql_dbg(ql_dbg_mbx, vha, 0x101f, 545 "Finished abort_isp.\n"); 546 goto mbx_done; 547 } 548 } 549 } 550 551 premature_exit: 552 /* Allow next mbx cmd to come in. */ 553 complete(&ha->mbx_cmd_comp); 554 555 mbx_done: 556 if (rval == QLA_ABORTED) { 557 ql_log(ql_log_info, vha, 0xd035, 558 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n", 559 mcp->mb[0]); 560 } else if (rval) { 561 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { 562 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR, 563 dev_name(&ha->pdev->dev), 0x1020+0x800, 564 vha->host_no, rval); 565 mboxes = mcp->in_mb; 566 cnt = 4; 567 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1) 568 if (mboxes & BIT_0) { 569 printk(" mb[%u]=%x", i, mcp->mb[i]); 570 cnt--; 571 } 572 pr_warn(" cmd=%x ****\n", command); 573 } 574 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { 575 ql_dbg(ql_dbg_mbx, vha, 0x1198, 576 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", 577 RD_REG_DWORD(®->isp24.host_status), 578 RD_REG_DWORD(®->isp24.ictrl), 579 RD_REG_DWORD(®->isp24.istatus)); 580 } else { 581 ql_dbg(ql_dbg_mbx, vha, 0x1206, 582 "ctrl_status=%#x ictrl=%#x istatus=%#x\n", 583 RD_REG_WORD(®->isp.ctrl_status), 584 RD_REG_WORD(®->isp.ictrl), 585 RD_REG_WORD(®->isp.istatus)); 586 } 587 } else { 588 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); 589 } 590 591 return rval; 592 } 593 594 int 595 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, 596 uint32_t risc_code_size) 597 { 598 int rval; 599 struct qla_hw_data *ha = vha->hw; 600 mbx_cmd_t mc; 601 mbx_cmd_t *mcp = &mc; 602 603 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, 604 "Entered %s.\n", __func__); 605 606 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 607 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 608 mcp->mb[8] = MSW(risc_addr); 609 mcp->out_mb = MBX_8|MBX_0; 610 } else { 611 mcp->mb[0] = MBC_LOAD_RISC_RAM; 612 mcp->out_mb = MBX_0; 613 } 614 mcp->mb[1] = LSW(risc_addr); 615 mcp->mb[2] = MSW(req_dma); 616 mcp->mb[3] = LSW(req_dma); 617 mcp->mb[6] = MSW(MSD(req_dma)); 618 mcp->mb[7] = LSW(MSD(req_dma)); 619 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 620 if (IS_FWI2_CAPABLE(ha)) { 621 mcp->mb[4] = MSW(risc_code_size); 622 mcp->mb[5] = LSW(risc_code_size); 623 mcp->out_mb |= MBX_5|MBX_4; 624 } else { 625 mcp->mb[4] = LSW(risc_code_size); 626 mcp->out_mb |= MBX_4; 627 } 628 629 mcp->in_mb = MBX_1|MBX_0; 630 mcp->tov = MBX_TOV_SECONDS; 631 mcp->flags = 0; 632 rval = qla2x00_mailbox_command(vha, mcp); 633 634 if (rval != QLA_SUCCESS) { 635 ql_dbg(ql_dbg_mbx, vha, 0x1023, 636 "Failed=%x mb[0]=%x mb[1]=%x.\n", 637 rval, mcp->mb[0], mcp->mb[1]); 638 } else { 639 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, 640 "Done %s.\n", __func__); 641 } 642 643 return rval; 644 } 645 646 #define EXTENDED_BB_CREDITS BIT_0 647 #define NVME_ENABLE_FLAG BIT_3 648 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha) 649 { 650 uint16_t mb4 = BIT_0; 651 652 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 653 mb4 |= ha->long_range_distance << LR_DIST_FW_POS; 654 655 return mb4; 656 } 657 658 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha) 659 { 660 uint16_t mb4 = BIT_0; 661 662 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 663 struct nvram_81xx *nv = ha->nvram; 664 665 mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features); 666 } 667 668 return mb4; 669 } 670 671 /* 672 * qla2x00_execute_fw 673 * Start adapter firmware. 674 * 675 * Input: 676 * ha = adapter block pointer. 677 * TARGET_QUEUE_LOCK must be released. 678 * ADAPTER_STATE_LOCK must be released. 679 * 680 * Returns: 681 * qla2x00 local function return status code. 682 * 683 * Context: 684 * Kernel context. 685 */ 686 int 687 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) 688 { 689 int rval; 690 struct qla_hw_data *ha = vha->hw; 691 mbx_cmd_t mc; 692 mbx_cmd_t *mcp = &mc; 693 694 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, 695 "Entered %s.\n", __func__); 696 697 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 698 mcp->out_mb = MBX_0; 699 mcp->in_mb = MBX_0; 700 if (IS_FWI2_CAPABLE(ha)) { 701 mcp->mb[1] = MSW(risc_addr); 702 mcp->mb[2] = LSW(risc_addr); 703 mcp->mb[3] = 0; 704 mcp->mb[4] = 0; 705 mcp->mb[11] = 0; 706 ha->flags.using_lr_setting = 0; 707 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || 708 IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 709 if (ql2xautodetectsfp) { 710 if (ha->flags.detected_lr_sfp) { 711 mcp->mb[4] |= 712 qla25xx_set_sfp_lr_dist(ha); 713 ha->flags.using_lr_setting = 1; 714 } 715 } else { 716 struct nvram_81xx *nv = ha->nvram; 717 /* set LR distance if specified in nvram */ 718 if (nv->enhanced_features & 719 NEF_LR_DIST_ENABLE) { 720 mcp->mb[4] |= 721 qla25xx_set_nvr_lr_dist(ha); 722 ha->flags.using_lr_setting = 1; 723 } 724 } 725 } 726 727 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha))) 728 mcp->mb[4] |= NVME_ENABLE_FLAG; 729 730 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 731 struct nvram_81xx *nv = ha->nvram; 732 /* set minimum speed if specified in nvram */ 733 if (nv->min_supported_speed >= 2 && 734 nv->min_supported_speed <= 5) { 735 mcp->mb[4] |= BIT_4; 736 mcp->mb[11] |= nv->min_supported_speed & 0xF; 737 mcp->out_mb |= MBX_11; 738 mcp->in_mb |= BIT_5; 739 vha->min_supported_speed = 740 nv->min_supported_speed; 741 } 742 } 743 744 if (ha->flags.exlogins_enabled) 745 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; 746 747 if (ha->flags.exchoffld_enabled) 748 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; 749 750 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; 751 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1; 752 } else { 753 mcp->mb[1] = LSW(risc_addr); 754 mcp->out_mb |= MBX_1; 755 if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 756 mcp->mb[2] = 0; 757 mcp->out_mb |= MBX_2; 758 } 759 } 760 761 mcp->tov = MBX_TOV_SECONDS; 762 mcp->flags = 0; 763 rval = qla2x00_mailbox_command(vha, mcp); 764 765 if (rval != QLA_SUCCESS) { 766 ql_dbg(ql_dbg_mbx, vha, 0x1026, 767 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 768 return rval; 769 } 770 771 if (!IS_FWI2_CAPABLE(ha)) 772 goto done; 773 774 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; 775 ql_dbg(ql_dbg_mbx, vha, 0x119a, 776 "fw_ability_mask=%x.\n", ha->fw_ability_mask); 777 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]); 778 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 779 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1); 780 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n", 781 ha->max_supported_speed == 0 ? "16Gps" : 782 ha->max_supported_speed == 1 ? "32Gps" : 783 ha->max_supported_speed == 2 ? "64Gps" : "unknown"); 784 if (vha->min_supported_speed) { 785 ha->min_supported_speed = mcp->mb[5] & 786 (BIT_0 | BIT_1 | BIT_2); 787 ql_dbg(ql_dbg_mbx, vha, 0x119c, 788 "min_supported_speed=%s.\n", 789 ha->min_supported_speed == 6 ? "64Gps" : 790 ha->min_supported_speed == 5 ? "32Gps" : 791 ha->min_supported_speed == 4 ? "16Gps" : 792 ha->min_supported_speed == 3 ? "8Gps" : 793 ha->min_supported_speed == 2 ? "4Gps" : "unknown"); 794 } 795 } 796 797 done: 798 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, 799 "Done %s.\n", __func__); 800 801 return rval; 802 } 803 804 /* 805 * qla_get_exlogin_status 806 * Get extended login status 807 * uses the memory offload control/status Mailbox 808 * 809 * Input: 810 * ha: adapter state pointer. 811 * fwopt: firmware options 812 * 813 * Returns: 814 * qla2x00 local function status 815 * 816 * Context: 817 * Kernel context. 818 */ 819 #define FETCH_XLOGINS_STAT 0x8 820 int 821 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 822 uint16_t *ex_logins_cnt) 823 { 824 int rval; 825 mbx_cmd_t mc; 826 mbx_cmd_t *mcp = &mc; 827 828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f, 829 "Entered %s\n", __func__); 830 831 memset(mcp->mb, 0 , sizeof(mcp->mb)); 832 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 833 mcp->mb[1] = FETCH_XLOGINS_STAT; 834 mcp->out_mb = MBX_1|MBX_0; 835 mcp->in_mb = MBX_10|MBX_4|MBX_0; 836 mcp->tov = MBX_TOV_SECONDS; 837 mcp->flags = 0; 838 839 rval = qla2x00_mailbox_command(vha, mcp); 840 if (rval != QLA_SUCCESS) { 841 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval); 842 } else { 843 *buf_sz = mcp->mb[4]; 844 *ex_logins_cnt = mcp->mb[10]; 845 846 ql_log(ql_log_info, vha, 0x1190, 847 "buffer size 0x%x, exchange login count=%d\n", 848 mcp->mb[4], mcp->mb[10]); 849 850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116, 851 "Done %s.\n", __func__); 852 } 853 854 return rval; 855 } 856 857 /* 858 * qla_set_exlogin_mem_cfg 859 * set extended login memory configuration 860 * Mbx needs to be issues before init_cb is set 861 * 862 * Input: 863 * ha: adapter state pointer. 864 * buffer: buffer pointer 865 * phys_addr: physical address of buffer 866 * size: size of buffer 867 * TARGET_QUEUE_LOCK must be released 868 * ADAPTER_STATE_LOCK must be release 869 * 870 * Returns: 871 * qla2x00 local funxtion status code. 872 * 873 * Context: 874 * Kernel context. 875 */ 876 #define CONFIG_XLOGINS_MEM 0x3 877 int 878 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) 879 { 880 int rval; 881 mbx_cmd_t mc; 882 mbx_cmd_t *mcp = &mc; 883 struct qla_hw_data *ha = vha->hw; 884 885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, 886 "Entered %s.\n", __func__); 887 888 memset(mcp->mb, 0 , sizeof(mcp->mb)); 889 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 890 mcp->mb[1] = CONFIG_XLOGINS_MEM; 891 mcp->mb[2] = MSW(phys_addr); 892 mcp->mb[3] = LSW(phys_addr); 893 mcp->mb[6] = MSW(MSD(phys_addr)); 894 mcp->mb[7] = LSW(MSD(phys_addr)); 895 mcp->mb[8] = MSW(ha->exlogin_size); 896 mcp->mb[9] = LSW(ha->exlogin_size); 897 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 898 mcp->in_mb = MBX_11|MBX_0; 899 mcp->tov = MBX_TOV_SECONDS; 900 mcp->flags = 0; 901 rval = qla2x00_mailbox_command(vha, mcp); 902 if (rval != QLA_SUCCESS) { 903 /*EMPTY*/ 904 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval); 905 } else { 906 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 907 "Done %s.\n", __func__); 908 } 909 910 return rval; 911 } 912 913 /* 914 * qla_get_exchoffld_status 915 * Get exchange offload status 916 * uses the memory offload control/status Mailbox 917 * 918 * Input: 919 * ha: adapter state pointer. 920 * fwopt: firmware options 921 * 922 * Returns: 923 * qla2x00 local function status 924 * 925 * Context: 926 * Kernel context. 927 */ 928 #define FETCH_XCHOFFLD_STAT 0x2 929 int 930 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 931 uint16_t *ex_logins_cnt) 932 { 933 int rval; 934 mbx_cmd_t mc; 935 mbx_cmd_t *mcp = &mc; 936 937 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019, 938 "Entered %s\n", __func__); 939 940 memset(mcp->mb, 0 , sizeof(mcp->mb)); 941 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 942 mcp->mb[1] = FETCH_XCHOFFLD_STAT; 943 mcp->out_mb = MBX_1|MBX_0; 944 mcp->in_mb = MBX_10|MBX_4|MBX_0; 945 mcp->tov = MBX_TOV_SECONDS; 946 mcp->flags = 0; 947 948 rval = qla2x00_mailbox_command(vha, mcp); 949 if (rval != QLA_SUCCESS) { 950 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval); 951 } else { 952 *buf_sz = mcp->mb[4]; 953 *ex_logins_cnt = mcp->mb[10]; 954 955 ql_log(ql_log_info, vha, 0x118e, 956 "buffer size 0x%x, exchange offload count=%d\n", 957 mcp->mb[4], mcp->mb[10]); 958 959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156, 960 "Done %s.\n", __func__); 961 } 962 963 return rval; 964 } 965 966 /* 967 * qla_set_exchoffld_mem_cfg 968 * Set exchange offload memory configuration 969 * Mbx needs to be issues before init_cb is set 970 * 971 * Input: 972 * ha: adapter state pointer. 973 * buffer: buffer pointer 974 * phys_addr: physical address of buffer 975 * size: size of buffer 976 * TARGET_QUEUE_LOCK must be released 977 * ADAPTER_STATE_LOCK must be release 978 * 979 * Returns: 980 * qla2x00 local funxtion status code. 981 * 982 * Context: 983 * Kernel context. 984 */ 985 #define CONFIG_XCHOFFLD_MEM 0x3 986 int 987 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha) 988 { 989 int rval; 990 mbx_cmd_t mc; 991 mbx_cmd_t *mcp = &mc; 992 struct qla_hw_data *ha = vha->hw; 993 994 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157, 995 "Entered %s.\n", __func__); 996 997 memset(mcp->mb, 0 , sizeof(mcp->mb)); 998 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 999 mcp->mb[1] = CONFIG_XCHOFFLD_MEM; 1000 mcp->mb[2] = MSW(ha->exchoffld_buf_dma); 1001 mcp->mb[3] = LSW(ha->exchoffld_buf_dma); 1002 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma)); 1003 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma)); 1004 mcp->mb[8] = MSW(ha->exchoffld_size); 1005 mcp->mb[9] = LSW(ha->exchoffld_size); 1006 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1007 mcp->in_mb = MBX_11|MBX_0; 1008 mcp->tov = MBX_TOV_SECONDS; 1009 mcp->flags = 0; 1010 rval = qla2x00_mailbox_command(vha, mcp); 1011 if (rval != QLA_SUCCESS) { 1012 /*EMPTY*/ 1013 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval); 1014 } else { 1015 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, 1016 "Done %s.\n", __func__); 1017 } 1018 1019 return rval; 1020 } 1021 1022 /* 1023 * qla2x00_get_fw_version 1024 * Get firmware version. 1025 * 1026 * Input: 1027 * ha: adapter state pointer. 1028 * major: pointer for major number. 1029 * minor: pointer for minor number. 1030 * subminor: pointer for subminor number. 1031 * 1032 * Returns: 1033 * qla2x00 local function return status code. 1034 * 1035 * Context: 1036 * Kernel context. 1037 */ 1038 int 1039 qla2x00_get_fw_version(scsi_qla_host_t *vha) 1040 { 1041 int rval; 1042 mbx_cmd_t mc; 1043 mbx_cmd_t *mcp = &mc; 1044 struct qla_hw_data *ha = vha->hw; 1045 1046 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, 1047 "Entered %s.\n", __func__); 1048 1049 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 1050 mcp->out_mb = MBX_0; 1051 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1052 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) 1053 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 1054 if (IS_FWI2_CAPABLE(ha)) 1055 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 1056 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1057 mcp->in_mb |= 1058 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| 1059 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7; 1060 1061 mcp->flags = 0; 1062 mcp->tov = MBX_TOV_SECONDS; 1063 rval = qla2x00_mailbox_command(vha, mcp); 1064 if (rval != QLA_SUCCESS) 1065 goto failed; 1066 1067 /* Return mailbox data. */ 1068 ha->fw_major_version = mcp->mb[1]; 1069 ha->fw_minor_version = mcp->mb[2]; 1070 ha->fw_subminor_version = mcp->mb[3]; 1071 ha->fw_attributes = mcp->mb[6]; 1072 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) 1073 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ 1074 else 1075 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; 1076 1077 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1078 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1079 ha->mpi_version[1] = mcp->mb[11] >> 8; 1080 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1081 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; 1082 ha->phy_version[0] = mcp->mb[8] & 0xff; 1083 ha->phy_version[1] = mcp->mb[9] >> 8; 1084 ha->phy_version[2] = mcp->mb[9] & 0xff; 1085 } 1086 1087 if (IS_FWI2_CAPABLE(ha)) { 1088 ha->fw_attributes_h = mcp->mb[15]; 1089 ha->fw_attributes_ext[0] = mcp->mb[16]; 1090 ha->fw_attributes_ext[1] = mcp->mb[17]; 1091 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, 1092 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 1093 __func__, mcp->mb[15], mcp->mb[6]); 1094 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, 1095 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 1096 __func__, mcp->mb[17], mcp->mb[16]); 1097 1098 if (ha->fw_attributes_h & 0x4) 1099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d, 1100 "%s: Firmware supports Extended Login 0x%x\n", 1101 __func__, ha->fw_attributes_h); 1102 1103 if (ha->fw_attributes_h & 0x8) 1104 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, 1105 "%s: Firmware supports Exchange Offload 0x%x\n", 1106 __func__, ha->fw_attributes_h); 1107 1108 /* 1109 * FW supports nvme and driver load parameter requested nvme. 1110 * BIT 26 of fw_attributes indicates NVMe support. 1111 */ 1112 if ((ha->fw_attributes_h & 1113 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) && 1114 ql2xnvmeenable) { 1115 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST) 1116 vha->flags.nvme_first_burst = 1; 1117 1118 vha->flags.nvme_enabled = 1; 1119 ql_log(ql_log_info, vha, 0xd302, 1120 "%s: FC-NVMe is Enabled (0x%x)\n", 1121 __func__, ha->fw_attributes_h); 1122 } 1123 } 1124 1125 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1126 ha->serdes_version[0] = mcp->mb[7] & 0xff; 1127 ha->serdes_version[1] = mcp->mb[8] >> 8; 1128 ha->serdes_version[2] = mcp->mb[8] & 0xff; 1129 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1130 ha->mpi_version[1] = mcp->mb[11] >> 8; 1131 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1132 ha->pep_version[0] = mcp->mb[13] & 0xff; 1133 ha->pep_version[1] = mcp->mb[14] >> 8; 1134 ha->pep_version[2] = mcp->mb[14] & 0xff; 1135 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; 1136 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; 1137 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; 1138 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; 1139 if (IS_QLA28XX(ha)) { 1140 if (mcp->mb[16] & BIT_10) { 1141 ql_log(ql_log_info, vha, 0xffff, 1142 "FW support secure flash updates\n"); 1143 ha->flags.secure_fw = 1; 1144 } 1145 } 1146 } 1147 1148 failed: 1149 if (rval != QLA_SUCCESS) { 1150 /*EMPTY*/ 1151 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); 1152 } else { 1153 /*EMPTY*/ 1154 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, 1155 "Done %s.\n", __func__); 1156 } 1157 return rval; 1158 } 1159 1160 /* 1161 * qla2x00_get_fw_options 1162 * Set firmware options. 1163 * 1164 * Input: 1165 * ha = adapter block pointer. 1166 * fwopt = pointer for firmware options. 1167 * 1168 * Returns: 1169 * qla2x00 local function return status code. 1170 * 1171 * Context: 1172 * Kernel context. 1173 */ 1174 int 1175 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1176 { 1177 int rval; 1178 mbx_cmd_t mc; 1179 mbx_cmd_t *mcp = &mc; 1180 1181 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, 1182 "Entered %s.\n", __func__); 1183 1184 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 1185 mcp->out_mb = MBX_0; 1186 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1187 mcp->tov = MBX_TOV_SECONDS; 1188 mcp->flags = 0; 1189 rval = qla2x00_mailbox_command(vha, mcp); 1190 1191 if (rval != QLA_SUCCESS) { 1192 /*EMPTY*/ 1193 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); 1194 } else { 1195 fwopts[0] = mcp->mb[0]; 1196 fwopts[1] = mcp->mb[1]; 1197 fwopts[2] = mcp->mb[2]; 1198 fwopts[3] = mcp->mb[3]; 1199 1200 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, 1201 "Done %s.\n", __func__); 1202 } 1203 1204 return rval; 1205 } 1206 1207 1208 /* 1209 * qla2x00_set_fw_options 1210 * Set firmware options. 1211 * 1212 * Input: 1213 * ha = adapter block pointer. 1214 * fwopt = pointer for firmware options. 1215 * 1216 * Returns: 1217 * qla2x00 local function return status code. 1218 * 1219 * Context: 1220 * Kernel context. 1221 */ 1222 int 1223 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1224 { 1225 int rval; 1226 mbx_cmd_t mc; 1227 mbx_cmd_t *mcp = &mc; 1228 1229 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, 1230 "Entered %s.\n", __func__); 1231 1232 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 1233 mcp->mb[1] = fwopts[1]; 1234 mcp->mb[2] = fwopts[2]; 1235 mcp->mb[3] = fwopts[3]; 1236 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1237 mcp->in_mb = MBX_0; 1238 if (IS_FWI2_CAPABLE(vha->hw)) { 1239 mcp->in_mb |= MBX_1; 1240 mcp->mb[10] = fwopts[10]; 1241 mcp->out_mb |= MBX_10; 1242 } else { 1243 mcp->mb[10] = fwopts[10]; 1244 mcp->mb[11] = fwopts[11]; 1245 mcp->mb[12] = 0; /* Undocumented, but used */ 1246 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 1247 } 1248 mcp->tov = MBX_TOV_SECONDS; 1249 mcp->flags = 0; 1250 rval = qla2x00_mailbox_command(vha, mcp); 1251 1252 fwopts[0] = mcp->mb[0]; 1253 1254 if (rval != QLA_SUCCESS) { 1255 /*EMPTY*/ 1256 ql_dbg(ql_dbg_mbx, vha, 0x1030, 1257 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); 1258 } else { 1259 /*EMPTY*/ 1260 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, 1261 "Done %s.\n", __func__); 1262 } 1263 1264 return rval; 1265 } 1266 1267 /* 1268 * qla2x00_mbx_reg_test 1269 * Mailbox register wrap test. 1270 * 1271 * Input: 1272 * ha = adapter block pointer. 1273 * TARGET_QUEUE_LOCK must be released. 1274 * ADAPTER_STATE_LOCK must be released. 1275 * 1276 * Returns: 1277 * qla2x00 local function return status code. 1278 * 1279 * Context: 1280 * Kernel context. 1281 */ 1282 int 1283 qla2x00_mbx_reg_test(scsi_qla_host_t *vha) 1284 { 1285 int rval; 1286 mbx_cmd_t mc; 1287 mbx_cmd_t *mcp = &mc; 1288 1289 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, 1290 "Entered %s.\n", __func__); 1291 1292 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 1293 mcp->mb[1] = 0xAAAA; 1294 mcp->mb[2] = 0x5555; 1295 mcp->mb[3] = 0xAA55; 1296 mcp->mb[4] = 0x55AA; 1297 mcp->mb[5] = 0xA5A5; 1298 mcp->mb[6] = 0x5A5A; 1299 mcp->mb[7] = 0x2525; 1300 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1301 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1302 mcp->tov = MBX_TOV_SECONDS; 1303 mcp->flags = 0; 1304 rval = qla2x00_mailbox_command(vha, mcp); 1305 1306 if (rval == QLA_SUCCESS) { 1307 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || 1308 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) 1309 rval = QLA_FUNCTION_FAILED; 1310 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || 1311 mcp->mb[7] != 0x2525) 1312 rval = QLA_FUNCTION_FAILED; 1313 } 1314 1315 if (rval != QLA_SUCCESS) { 1316 /*EMPTY*/ 1317 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); 1318 } else { 1319 /*EMPTY*/ 1320 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, 1321 "Done %s.\n", __func__); 1322 } 1323 1324 return rval; 1325 } 1326 1327 /* 1328 * qla2x00_verify_checksum 1329 * Verify firmware checksum. 1330 * 1331 * Input: 1332 * ha = adapter block pointer. 1333 * TARGET_QUEUE_LOCK must be released. 1334 * ADAPTER_STATE_LOCK must be released. 1335 * 1336 * Returns: 1337 * qla2x00 local function return status code. 1338 * 1339 * Context: 1340 * Kernel context. 1341 */ 1342 int 1343 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) 1344 { 1345 int rval; 1346 mbx_cmd_t mc; 1347 mbx_cmd_t *mcp = &mc; 1348 1349 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, 1350 "Entered %s.\n", __func__); 1351 1352 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 1353 mcp->out_mb = MBX_0; 1354 mcp->in_mb = MBX_0; 1355 if (IS_FWI2_CAPABLE(vha->hw)) { 1356 mcp->mb[1] = MSW(risc_addr); 1357 mcp->mb[2] = LSW(risc_addr); 1358 mcp->out_mb |= MBX_2|MBX_1; 1359 mcp->in_mb |= MBX_2|MBX_1; 1360 } else { 1361 mcp->mb[1] = LSW(risc_addr); 1362 mcp->out_mb |= MBX_1; 1363 mcp->in_mb |= MBX_1; 1364 } 1365 1366 mcp->tov = MBX_TOV_SECONDS; 1367 mcp->flags = 0; 1368 rval = qla2x00_mailbox_command(vha, mcp); 1369 1370 if (rval != QLA_SUCCESS) { 1371 ql_dbg(ql_dbg_mbx, vha, 0x1036, 1372 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? 1373 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); 1374 } else { 1375 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, 1376 "Done %s.\n", __func__); 1377 } 1378 1379 return rval; 1380 } 1381 1382 /* 1383 * qla2x00_issue_iocb 1384 * Issue IOCB using mailbox command 1385 * 1386 * Input: 1387 * ha = adapter state pointer. 1388 * buffer = buffer pointer. 1389 * phys_addr = physical address of buffer. 1390 * size = size of buffer. 1391 * TARGET_QUEUE_LOCK must be released. 1392 * ADAPTER_STATE_LOCK must be released. 1393 * 1394 * Returns: 1395 * qla2x00 local function return status code. 1396 * 1397 * Context: 1398 * Kernel context. 1399 */ 1400 int 1401 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, 1402 dma_addr_t phys_addr, size_t size, uint32_t tov) 1403 { 1404 int rval; 1405 mbx_cmd_t mc; 1406 mbx_cmd_t *mcp = &mc; 1407 1408 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, 1409 "Entered %s.\n", __func__); 1410 1411 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 1412 mcp->mb[1] = 0; 1413 mcp->mb[2] = MSW(phys_addr); 1414 mcp->mb[3] = LSW(phys_addr); 1415 mcp->mb[6] = MSW(MSD(phys_addr)); 1416 mcp->mb[7] = LSW(MSD(phys_addr)); 1417 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1418 mcp->in_mb = MBX_2|MBX_0; 1419 mcp->tov = tov; 1420 mcp->flags = 0; 1421 rval = qla2x00_mailbox_command(vha, mcp); 1422 1423 if (rval != QLA_SUCCESS) { 1424 /*EMPTY*/ 1425 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); 1426 } else { 1427 sts_entry_t *sts_entry = (sts_entry_t *) buffer; 1428 1429 /* Mask reserved bits. */ 1430 sts_entry->entry_status &= 1431 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 1432 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, 1433 "Done %s.\n", __func__); 1434 } 1435 1436 return rval; 1437 } 1438 1439 int 1440 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, 1441 size_t size) 1442 { 1443 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, 1444 MBX_TOV_SECONDS); 1445 } 1446 1447 /* 1448 * qla2x00_abort_command 1449 * Abort command aborts a specified IOCB. 1450 * 1451 * Input: 1452 * ha = adapter block pointer. 1453 * sp = SB structure pointer. 1454 * 1455 * Returns: 1456 * qla2x00 local function return status code. 1457 * 1458 * Context: 1459 * Kernel context. 1460 */ 1461 int 1462 qla2x00_abort_command(srb_t *sp) 1463 { 1464 unsigned long flags = 0; 1465 int rval; 1466 uint32_t handle = 0; 1467 mbx_cmd_t mc; 1468 mbx_cmd_t *mcp = &mc; 1469 fc_port_t *fcport = sp->fcport; 1470 scsi_qla_host_t *vha = fcport->vha; 1471 struct qla_hw_data *ha = vha->hw; 1472 struct req_que *req; 1473 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1474 1475 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, 1476 "Entered %s.\n", __func__); 1477 1478 if (vha->flags.qpairs_available && sp->qpair) 1479 req = sp->qpair->req; 1480 else 1481 req = vha->req; 1482 1483 spin_lock_irqsave(&ha->hardware_lock, flags); 1484 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1485 if (req->outstanding_cmds[handle] == sp) 1486 break; 1487 } 1488 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1489 1490 if (handle == req->num_outstanding_cmds) { 1491 /* command not found */ 1492 return QLA_FUNCTION_FAILED; 1493 } 1494 1495 mcp->mb[0] = MBC_ABORT_COMMAND; 1496 if (HAS_EXTENDED_IDS(ha)) 1497 mcp->mb[1] = fcport->loop_id; 1498 else 1499 mcp->mb[1] = fcport->loop_id << 8; 1500 mcp->mb[2] = (uint16_t)handle; 1501 mcp->mb[3] = (uint16_t)(handle >> 16); 1502 mcp->mb[6] = (uint16_t)cmd->device->lun; 1503 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1504 mcp->in_mb = MBX_0; 1505 mcp->tov = MBX_TOV_SECONDS; 1506 mcp->flags = 0; 1507 rval = qla2x00_mailbox_command(vha, mcp); 1508 1509 if (rval != QLA_SUCCESS) { 1510 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); 1511 } else { 1512 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, 1513 "Done %s.\n", __func__); 1514 } 1515 1516 return rval; 1517 } 1518 1519 int 1520 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) 1521 { 1522 int rval, rval2; 1523 mbx_cmd_t mc; 1524 mbx_cmd_t *mcp = &mc; 1525 scsi_qla_host_t *vha; 1526 1527 vha = fcport->vha; 1528 1529 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, 1530 "Entered %s.\n", __func__); 1531 1532 mcp->mb[0] = MBC_ABORT_TARGET; 1533 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 1534 if (HAS_EXTENDED_IDS(vha->hw)) { 1535 mcp->mb[1] = fcport->loop_id; 1536 mcp->mb[10] = 0; 1537 mcp->out_mb |= MBX_10; 1538 } else { 1539 mcp->mb[1] = fcport->loop_id << 8; 1540 } 1541 mcp->mb[2] = vha->hw->loop_reset_delay; 1542 mcp->mb[9] = vha->vp_idx; 1543 1544 mcp->in_mb = MBX_0; 1545 mcp->tov = MBX_TOV_SECONDS; 1546 mcp->flags = 0; 1547 rval = qla2x00_mailbox_command(vha, mcp); 1548 if (rval != QLA_SUCCESS) { 1549 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, 1550 "Failed=%x.\n", rval); 1551 } 1552 1553 /* Issue marker IOCB. */ 1554 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0, 1555 MK_SYNC_ID); 1556 if (rval2 != QLA_SUCCESS) { 1557 ql_dbg(ql_dbg_mbx, vha, 0x1040, 1558 "Failed to issue marker IOCB (%x).\n", rval2); 1559 } else { 1560 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, 1561 "Done %s.\n", __func__); 1562 } 1563 1564 return rval; 1565 } 1566 1567 int 1568 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 1569 { 1570 int rval, rval2; 1571 mbx_cmd_t mc; 1572 mbx_cmd_t *mcp = &mc; 1573 scsi_qla_host_t *vha; 1574 1575 vha = fcport->vha; 1576 1577 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, 1578 "Entered %s.\n", __func__); 1579 1580 mcp->mb[0] = MBC_LUN_RESET; 1581 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 1582 if (HAS_EXTENDED_IDS(vha->hw)) 1583 mcp->mb[1] = fcport->loop_id; 1584 else 1585 mcp->mb[1] = fcport->loop_id << 8; 1586 mcp->mb[2] = (u32)l; 1587 mcp->mb[3] = 0; 1588 mcp->mb[9] = vha->vp_idx; 1589 1590 mcp->in_mb = MBX_0; 1591 mcp->tov = MBX_TOV_SECONDS; 1592 mcp->flags = 0; 1593 rval = qla2x00_mailbox_command(vha, mcp); 1594 if (rval != QLA_SUCCESS) { 1595 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); 1596 } 1597 1598 /* Issue marker IOCB. */ 1599 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l, 1600 MK_SYNC_ID_LUN); 1601 if (rval2 != QLA_SUCCESS) { 1602 ql_dbg(ql_dbg_mbx, vha, 0x1044, 1603 "Failed to issue marker IOCB (%x).\n", rval2); 1604 } else { 1605 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, 1606 "Done %s.\n", __func__); 1607 } 1608 1609 return rval; 1610 } 1611 1612 /* 1613 * qla2x00_get_adapter_id 1614 * Get adapter ID and topology. 1615 * 1616 * Input: 1617 * ha = adapter block pointer. 1618 * id = pointer for loop ID. 1619 * al_pa = pointer for AL_PA. 1620 * area = pointer for area. 1621 * domain = pointer for domain. 1622 * top = pointer for topology. 1623 * TARGET_QUEUE_LOCK must be released. 1624 * ADAPTER_STATE_LOCK must be released. 1625 * 1626 * Returns: 1627 * qla2x00 local function return status code. 1628 * 1629 * Context: 1630 * Kernel context. 1631 */ 1632 int 1633 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, 1634 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) 1635 { 1636 int rval; 1637 mbx_cmd_t mc; 1638 mbx_cmd_t *mcp = &mc; 1639 1640 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, 1641 "Entered %s.\n", __func__); 1642 1643 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1644 mcp->mb[9] = vha->vp_idx; 1645 mcp->out_mb = MBX_9|MBX_0; 1646 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1647 if (IS_CNA_CAPABLE(vha->hw)) 1648 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; 1649 if (IS_FWI2_CAPABLE(vha->hw)) 1650 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; 1651 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) 1652 mcp->in_mb |= MBX_15; 1653 mcp->tov = MBX_TOV_SECONDS; 1654 mcp->flags = 0; 1655 rval = qla2x00_mailbox_command(vha, mcp); 1656 if (mcp->mb[0] == MBS_COMMAND_ERROR) 1657 rval = QLA_COMMAND_ERROR; 1658 else if (mcp->mb[0] == MBS_INVALID_COMMAND) 1659 rval = QLA_INVALID_COMMAND; 1660 1661 /* Return data. */ 1662 *id = mcp->mb[1]; 1663 *al_pa = LSB(mcp->mb[2]); 1664 *area = MSB(mcp->mb[2]); 1665 *domain = LSB(mcp->mb[3]); 1666 *top = mcp->mb[6]; 1667 *sw_cap = mcp->mb[7]; 1668 1669 if (rval != QLA_SUCCESS) { 1670 /*EMPTY*/ 1671 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); 1672 } else { 1673 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, 1674 "Done %s.\n", __func__); 1675 1676 if (IS_CNA_CAPABLE(vha->hw)) { 1677 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1678 vha->fcoe_fcf_idx = mcp->mb[10]; 1679 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; 1680 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; 1681 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; 1682 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; 1683 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; 1684 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; 1685 } 1686 /* If FA-WWN supported */ 1687 if (IS_FAWWN_CAPABLE(vha->hw)) { 1688 if (mcp->mb[7] & BIT_14) { 1689 vha->port_name[0] = MSB(mcp->mb[16]); 1690 vha->port_name[1] = LSB(mcp->mb[16]); 1691 vha->port_name[2] = MSB(mcp->mb[17]); 1692 vha->port_name[3] = LSB(mcp->mb[17]); 1693 vha->port_name[4] = MSB(mcp->mb[18]); 1694 vha->port_name[5] = LSB(mcp->mb[18]); 1695 vha->port_name[6] = MSB(mcp->mb[19]); 1696 vha->port_name[7] = LSB(mcp->mb[19]); 1697 fc_host_port_name(vha->host) = 1698 wwn_to_u64(vha->port_name); 1699 ql_dbg(ql_dbg_mbx, vha, 0x10ca, 1700 "FA-WWN acquired %016llx\n", 1701 wwn_to_u64(vha->port_name)); 1702 } 1703 } 1704 1705 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) 1706 vha->bbcr = mcp->mb[15]; 1707 } 1708 1709 return rval; 1710 } 1711 1712 /* 1713 * qla2x00_get_retry_cnt 1714 * Get current firmware login retry count and delay. 1715 * 1716 * Input: 1717 * ha = adapter block pointer. 1718 * retry_cnt = pointer to login retry count. 1719 * tov = pointer to login timeout value. 1720 * 1721 * Returns: 1722 * qla2x00 local function return status code. 1723 * 1724 * Context: 1725 * Kernel context. 1726 */ 1727 int 1728 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, 1729 uint16_t *r_a_tov) 1730 { 1731 int rval; 1732 uint16_t ratov; 1733 mbx_cmd_t mc; 1734 mbx_cmd_t *mcp = &mc; 1735 1736 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, 1737 "Entered %s.\n", __func__); 1738 1739 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1740 mcp->out_mb = MBX_0; 1741 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1742 mcp->tov = MBX_TOV_SECONDS; 1743 mcp->flags = 0; 1744 rval = qla2x00_mailbox_command(vha, mcp); 1745 1746 if (rval != QLA_SUCCESS) { 1747 /*EMPTY*/ 1748 ql_dbg(ql_dbg_mbx, vha, 0x104a, 1749 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 1750 } else { 1751 /* Convert returned data and check our values. */ 1752 *r_a_tov = mcp->mb[3] / 2; 1753 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ 1754 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { 1755 /* Update to the larger values */ 1756 *retry_cnt = (uint8_t)mcp->mb[1]; 1757 *tov = ratov; 1758 } 1759 1760 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, 1761 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); 1762 } 1763 1764 return rval; 1765 } 1766 1767 /* 1768 * qla2x00_init_firmware 1769 * Initialize adapter firmware. 1770 * 1771 * Input: 1772 * ha = adapter block pointer. 1773 * dptr = Initialization control block pointer. 1774 * size = size of initialization control block. 1775 * TARGET_QUEUE_LOCK must be released. 1776 * ADAPTER_STATE_LOCK must be released. 1777 * 1778 * Returns: 1779 * qla2x00 local function return status code. 1780 * 1781 * Context: 1782 * Kernel context. 1783 */ 1784 int 1785 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 1786 { 1787 int rval; 1788 mbx_cmd_t mc; 1789 mbx_cmd_t *mcp = &mc; 1790 struct qla_hw_data *ha = vha->hw; 1791 1792 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, 1793 "Entered %s.\n", __func__); 1794 1795 if (IS_P3P_TYPE(ha) && ql2xdbwr) 1796 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, 1797 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); 1798 1799 if (ha->flags.npiv_supported) 1800 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1801 else 1802 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 1803 1804 mcp->mb[1] = 0; 1805 mcp->mb[2] = MSW(ha->init_cb_dma); 1806 mcp->mb[3] = LSW(ha->init_cb_dma); 1807 mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); 1808 mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); 1809 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1810 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1811 mcp->mb[1] = BIT_0; 1812 mcp->mb[10] = MSW(ha->ex_init_cb_dma); 1813 mcp->mb[11] = LSW(ha->ex_init_cb_dma); 1814 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); 1815 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); 1816 mcp->mb[14] = sizeof(*ha->ex_init_cb); 1817 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; 1818 } 1819 /* 1 and 2 should normally be captured. */ 1820 mcp->in_mb = MBX_2|MBX_1|MBX_0; 1821 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1822 /* mb3 is additional info about the installed SFP. */ 1823 mcp->in_mb |= MBX_3; 1824 mcp->buf_size = size; 1825 mcp->flags = MBX_DMA_OUT; 1826 mcp->tov = MBX_TOV_SECONDS; 1827 rval = qla2x00_mailbox_command(vha, mcp); 1828 1829 if (rval != QLA_SUCCESS) { 1830 /*EMPTY*/ 1831 ql_dbg(ql_dbg_mbx, vha, 0x104d, 1832 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n", 1833 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); 1834 if (ha->init_cb) { 1835 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n"); 1836 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1837 0x0104d, ha->init_cb, sizeof(*ha->init_cb)); 1838 } 1839 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1840 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n"); 1841 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1842 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb)); 1843 } 1844 } else { 1845 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1846 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 1847 ql_dbg(ql_dbg_mbx, vha, 0x119d, 1848 "Invalid SFP/Validation Failed\n"); 1849 } 1850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, 1851 "Done %s.\n", __func__); 1852 } 1853 1854 return rval; 1855 } 1856 1857 1858 /* 1859 * qla2x00_get_port_database 1860 * Issue normal/enhanced get port database mailbox command 1861 * and copy device name as necessary. 1862 * 1863 * Input: 1864 * ha = adapter state pointer. 1865 * dev = structure pointer. 1866 * opt = enhanced cmd option byte. 1867 * 1868 * Returns: 1869 * qla2x00 local function return status code. 1870 * 1871 * Context: 1872 * Kernel context. 1873 */ 1874 int 1875 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) 1876 { 1877 int rval; 1878 mbx_cmd_t mc; 1879 mbx_cmd_t *mcp = &mc; 1880 port_database_t *pd; 1881 struct port_database_24xx *pd24; 1882 dma_addr_t pd_dma; 1883 struct qla_hw_data *ha = vha->hw; 1884 1885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, 1886 "Entered %s.\n", __func__); 1887 1888 pd24 = NULL; 1889 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1890 if (pd == NULL) { 1891 ql_log(ql_log_warn, vha, 0x1050, 1892 "Failed to allocate port database structure.\n"); 1893 fcport->query = 0; 1894 return QLA_MEMORY_ALLOC_FAILED; 1895 } 1896 1897 mcp->mb[0] = MBC_GET_PORT_DATABASE; 1898 if (opt != 0 && !IS_FWI2_CAPABLE(ha)) 1899 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; 1900 mcp->mb[2] = MSW(pd_dma); 1901 mcp->mb[3] = LSW(pd_dma); 1902 mcp->mb[6] = MSW(MSD(pd_dma)); 1903 mcp->mb[7] = LSW(MSD(pd_dma)); 1904 mcp->mb[9] = vha->vp_idx; 1905 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1906 mcp->in_mb = MBX_0; 1907 if (IS_FWI2_CAPABLE(ha)) { 1908 mcp->mb[1] = fcport->loop_id; 1909 mcp->mb[10] = opt; 1910 mcp->out_mb |= MBX_10|MBX_1; 1911 mcp->in_mb |= MBX_1; 1912 } else if (HAS_EXTENDED_IDS(ha)) { 1913 mcp->mb[1] = fcport->loop_id; 1914 mcp->mb[10] = opt; 1915 mcp->out_mb |= MBX_10|MBX_1; 1916 } else { 1917 mcp->mb[1] = fcport->loop_id << 8 | opt; 1918 mcp->out_mb |= MBX_1; 1919 } 1920 mcp->buf_size = IS_FWI2_CAPABLE(ha) ? 1921 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; 1922 mcp->flags = MBX_DMA_IN; 1923 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1924 rval = qla2x00_mailbox_command(vha, mcp); 1925 if (rval != QLA_SUCCESS) 1926 goto gpd_error_out; 1927 1928 if (IS_FWI2_CAPABLE(ha)) { 1929 uint64_t zero = 0; 1930 u8 current_login_state, last_login_state; 1931 1932 pd24 = (struct port_database_24xx *) pd; 1933 1934 /* Check for logged in state. */ 1935 if (NVME_TARGET(ha, fcport)) { 1936 current_login_state = pd24->current_login_state >> 4; 1937 last_login_state = pd24->last_login_state >> 4; 1938 } else { 1939 current_login_state = pd24->current_login_state & 0xf; 1940 last_login_state = pd24->last_login_state & 0xf; 1941 } 1942 fcport->current_login_state = pd24->current_login_state; 1943 fcport->last_login_state = pd24->last_login_state; 1944 1945 /* Check for logged in state. */ 1946 if (current_login_state != PDS_PRLI_COMPLETE && 1947 last_login_state != PDS_PRLI_COMPLETE) { 1948 ql_dbg(ql_dbg_mbx, vha, 0x119a, 1949 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 1950 current_login_state, last_login_state, 1951 fcport->loop_id); 1952 rval = QLA_FUNCTION_FAILED; 1953 1954 if (!fcport->query) 1955 goto gpd_error_out; 1956 } 1957 1958 if (fcport->loop_id == FC_NO_LOOP_ID || 1959 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 1960 memcmp(fcport->port_name, pd24->port_name, 8))) { 1961 /* We lost the device mid way. */ 1962 rval = QLA_NOT_LOGGED_IN; 1963 goto gpd_error_out; 1964 } 1965 1966 /* Names are little-endian. */ 1967 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); 1968 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); 1969 1970 /* Get port_id of device. */ 1971 fcport->d_id.b.domain = pd24->port_id[0]; 1972 fcport->d_id.b.area = pd24->port_id[1]; 1973 fcport->d_id.b.al_pa = pd24->port_id[2]; 1974 fcport->d_id.b.rsvd_1 = 0; 1975 1976 /* If not target must be initiator or unknown type. */ 1977 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) 1978 fcport->port_type = FCT_INITIATOR; 1979 else 1980 fcport->port_type = FCT_TARGET; 1981 1982 /* Passback COS information. */ 1983 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? 1984 FC_COS_CLASS2 : FC_COS_CLASS3; 1985 1986 if (pd24->prli_svc_param_word_3[0] & BIT_7) 1987 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1988 } else { 1989 uint64_t zero = 0; 1990 1991 /* Check for logged in state. */ 1992 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 1993 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 1994 ql_dbg(ql_dbg_mbx, vha, 0x100a, 1995 "Unable to verify login-state (%x/%x) - " 1996 "portid=%02x%02x%02x.\n", pd->master_state, 1997 pd->slave_state, fcport->d_id.b.domain, 1998 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1999 rval = QLA_FUNCTION_FAILED; 2000 goto gpd_error_out; 2001 } 2002 2003 if (fcport->loop_id == FC_NO_LOOP_ID || 2004 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 2005 memcmp(fcport->port_name, pd->port_name, 8))) { 2006 /* We lost the device mid way. */ 2007 rval = QLA_NOT_LOGGED_IN; 2008 goto gpd_error_out; 2009 } 2010 2011 /* Names are little-endian. */ 2012 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 2013 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 2014 2015 /* Get port_id of device. */ 2016 fcport->d_id.b.domain = pd->port_id[0]; 2017 fcport->d_id.b.area = pd->port_id[3]; 2018 fcport->d_id.b.al_pa = pd->port_id[2]; 2019 fcport->d_id.b.rsvd_1 = 0; 2020 2021 /* If not target must be initiator or unknown type. */ 2022 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 2023 fcport->port_type = FCT_INITIATOR; 2024 else 2025 fcport->port_type = FCT_TARGET; 2026 2027 /* Passback COS information. */ 2028 fcport->supported_classes = (pd->options & BIT_4) ? 2029 FC_COS_CLASS2 : FC_COS_CLASS3; 2030 } 2031 2032 gpd_error_out: 2033 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 2034 fcport->query = 0; 2035 2036 if (rval != QLA_SUCCESS) { 2037 ql_dbg(ql_dbg_mbx, vha, 0x1052, 2038 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, 2039 mcp->mb[0], mcp->mb[1]); 2040 } else { 2041 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, 2042 "Done %s.\n", __func__); 2043 } 2044 2045 return rval; 2046 } 2047 2048 /* 2049 * qla2x00_get_firmware_state 2050 * Get adapter firmware state. 2051 * 2052 * Input: 2053 * ha = adapter block pointer. 2054 * dptr = pointer for firmware state. 2055 * TARGET_QUEUE_LOCK must be released. 2056 * ADAPTER_STATE_LOCK must be released. 2057 * 2058 * Returns: 2059 * qla2x00 local function return status code. 2060 * 2061 * Context: 2062 * Kernel context. 2063 */ 2064 int 2065 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) 2066 { 2067 int rval; 2068 mbx_cmd_t mc; 2069 mbx_cmd_t *mcp = &mc; 2070 struct qla_hw_data *ha = vha->hw; 2071 2072 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, 2073 "Entered %s.\n", __func__); 2074 2075 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 2076 mcp->out_mb = MBX_0; 2077 if (IS_FWI2_CAPABLE(vha->hw)) 2078 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2079 else 2080 mcp->in_mb = MBX_1|MBX_0; 2081 mcp->tov = MBX_TOV_SECONDS; 2082 mcp->flags = 0; 2083 rval = qla2x00_mailbox_command(vha, mcp); 2084 2085 /* Return firmware states. */ 2086 states[0] = mcp->mb[1]; 2087 if (IS_FWI2_CAPABLE(vha->hw)) { 2088 states[1] = mcp->mb[2]; 2089 states[2] = mcp->mb[3]; /* SFP info */ 2090 states[3] = mcp->mb[4]; 2091 states[4] = mcp->mb[5]; 2092 states[5] = mcp->mb[6]; /* DPORT status */ 2093 } 2094 2095 if (rval != QLA_SUCCESS) { 2096 /*EMPTY*/ 2097 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); 2098 } else { 2099 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 2100 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 2101 ql_dbg(ql_dbg_mbx, vha, 0x119e, 2102 "Invalid SFP/Validation Failed\n"); 2103 } 2104 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, 2105 "Done %s.\n", __func__); 2106 } 2107 2108 return rval; 2109 } 2110 2111 /* 2112 * qla2x00_get_port_name 2113 * Issue get port name mailbox command. 2114 * Returned name is in big endian format. 2115 * 2116 * Input: 2117 * ha = adapter block pointer. 2118 * loop_id = loop ID of device. 2119 * name = pointer for name. 2120 * TARGET_QUEUE_LOCK must be released. 2121 * ADAPTER_STATE_LOCK must be released. 2122 * 2123 * Returns: 2124 * qla2x00 local function return status code. 2125 * 2126 * Context: 2127 * Kernel context. 2128 */ 2129 int 2130 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, 2131 uint8_t opt) 2132 { 2133 int rval; 2134 mbx_cmd_t mc; 2135 mbx_cmd_t *mcp = &mc; 2136 2137 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, 2138 "Entered %s.\n", __func__); 2139 2140 mcp->mb[0] = MBC_GET_PORT_NAME; 2141 mcp->mb[9] = vha->vp_idx; 2142 mcp->out_mb = MBX_9|MBX_1|MBX_0; 2143 if (HAS_EXTENDED_IDS(vha->hw)) { 2144 mcp->mb[1] = loop_id; 2145 mcp->mb[10] = opt; 2146 mcp->out_mb |= MBX_10; 2147 } else { 2148 mcp->mb[1] = loop_id << 8 | opt; 2149 } 2150 2151 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2152 mcp->tov = MBX_TOV_SECONDS; 2153 mcp->flags = 0; 2154 rval = qla2x00_mailbox_command(vha, mcp); 2155 2156 if (rval != QLA_SUCCESS) { 2157 /*EMPTY*/ 2158 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); 2159 } else { 2160 if (name != NULL) { 2161 /* This function returns name in big endian. */ 2162 name[0] = MSB(mcp->mb[2]); 2163 name[1] = LSB(mcp->mb[2]); 2164 name[2] = MSB(mcp->mb[3]); 2165 name[3] = LSB(mcp->mb[3]); 2166 name[4] = MSB(mcp->mb[6]); 2167 name[5] = LSB(mcp->mb[6]); 2168 name[6] = MSB(mcp->mb[7]); 2169 name[7] = LSB(mcp->mb[7]); 2170 } 2171 2172 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, 2173 "Done %s.\n", __func__); 2174 } 2175 2176 return rval; 2177 } 2178 2179 /* 2180 * qla24xx_link_initialization 2181 * Issue link initialization mailbox command. 2182 * 2183 * Input: 2184 * ha = adapter block pointer. 2185 * TARGET_QUEUE_LOCK must be released. 2186 * ADAPTER_STATE_LOCK must be released. 2187 * 2188 * Returns: 2189 * qla2x00 local function return status code. 2190 * 2191 * Context: 2192 * Kernel context. 2193 */ 2194 int 2195 qla24xx_link_initialize(scsi_qla_host_t *vha) 2196 { 2197 int rval; 2198 mbx_cmd_t mc; 2199 mbx_cmd_t *mcp = &mc; 2200 2201 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, 2202 "Entered %s.\n", __func__); 2203 2204 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) 2205 return QLA_FUNCTION_FAILED; 2206 2207 mcp->mb[0] = MBC_LINK_INITIALIZATION; 2208 mcp->mb[1] = BIT_4; 2209 if (vha->hw->operating_mode == LOOP) 2210 mcp->mb[1] |= BIT_6; 2211 else 2212 mcp->mb[1] |= BIT_5; 2213 mcp->mb[2] = 0; 2214 mcp->mb[3] = 0; 2215 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2216 mcp->in_mb = MBX_0; 2217 mcp->tov = MBX_TOV_SECONDS; 2218 mcp->flags = 0; 2219 rval = qla2x00_mailbox_command(vha, mcp); 2220 2221 if (rval != QLA_SUCCESS) { 2222 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); 2223 } else { 2224 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, 2225 "Done %s.\n", __func__); 2226 } 2227 2228 return rval; 2229 } 2230 2231 /* 2232 * qla2x00_lip_reset 2233 * Issue LIP reset mailbox command. 2234 * 2235 * Input: 2236 * ha = adapter block pointer. 2237 * TARGET_QUEUE_LOCK must be released. 2238 * ADAPTER_STATE_LOCK must be released. 2239 * 2240 * Returns: 2241 * qla2x00 local function return status code. 2242 * 2243 * Context: 2244 * Kernel context. 2245 */ 2246 int 2247 qla2x00_lip_reset(scsi_qla_host_t *vha) 2248 { 2249 int rval; 2250 mbx_cmd_t mc; 2251 mbx_cmd_t *mcp = &mc; 2252 2253 ql_dbg(ql_dbg_disc, vha, 0x105a, 2254 "Entered %s.\n", __func__); 2255 2256 if (IS_CNA_CAPABLE(vha->hw)) { 2257 /* Logout across all FCFs. */ 2258 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2259 mcp->mb[1] = BIT_1; 2260 mcp->mb[2] = 0; 2261 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2262 } else if (IS_FWI2_CAPABLE(vha->hw)) { 2263 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2264 mcp->mb[1] = BIT_4; 2265 mcp->mb[2] = 0; 2266 mcp->mb[3] = vha->hw->loop_reset_delay; 2267 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2268 } else { 2269 mcp->mb[0] = MBC_LIP_RESET; 2270 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2271 if (HAS_EXTENDED_IDS(vha->hw)) { 2272 mcp->mb[1] = 0x00ff; 2273 mcp->mb[10] = 0; 2274 mcp->out_mb |= MBX_10; 2275 } else { 2276 mcp->mb[1] = 0xff00; 2277 } 2278 mcp->mb[2] = vha->hw->loop_reset_delay; 2279 mcp->mb[3] = 0; 2280 } 2281 mcp->in_mb = MBX_0; 2282 mcp->tov = MBX_TOV_SECONDS; 2283 mcp->flags = 0; 2284 rval = qla2x00_mailbox_command(vha, mcp); 2285 2286 if (rval != QLA_SUCCESS) { 2287 /*EMPTY*/ 2288 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); 2289 } else { 2290 /*EMPTY*/ 2291 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, 2292 "Done %s.\n", __func__); 2293 } 2294 2295 return rval; 2296 } 2297 2298 /* 2299 * qla2x00_send_sns 2300 * Send SNS command. 2301 * 2302 * Input: 2303 * ha = adapter block pointer. 2304 * sns = pointer for command. 2305 * cmd_size = command size. 2306 * buf_size = response/command size. 2307 * TARGET_QUEUE_LOCK must be released. 2308 * ADAPTER_STATE_LOCK must be released. 2309 * 2310 * Returns: 2311 * qla2x00 local function return status code. 2312 * 2313 * Context: 2314 * Kernel context. 2315 */ 2316 int 2317 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, 2318 uint16_t cmd_size, size_t buf_size) 2319 { 2320 int rval; 2321 mbx_cmd_t mc; 2322 mbx_cmd_t *mcp = &mc; 2323 2324 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, 2325 "Entered %s.\n", __func__); 2326 2327 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, 2328 "Retry cnt=%d ratov=%d total tov=%d.\n", 2329 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); 2330 2331 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 2332 mcp->mb[1] = cmd_size; 2333 mcp->mb[2] = MSW(sns_phys_address); 2334 mcp->mb[3] = LSW(sns_phys_address); 2335 mcp->mb[6] = MSW(MSD(sns_phys_address)); 2336 mcp->mb[7] = LSW(MSD(sns_phys_address)); 2337 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2338 mcp->in_mb = MBX_0|MBX_1; 2339 mcp->buf_size = buf_size; 2340 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; 2341 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); 2342 rval = qla2x00_mailbox_command(vha, mcp); 2343 2344 if (rval != QLA_SUCCESS) { 2345 /*EMPTY*/ 2346 ql_dbg(ql_dbg_mbx, vha, 0x105f, 2347 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2348 rval, mcp->mb[0], mcp->mb[1]); 2349 } else { 2350 /*EMPTY*/ 2351 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, 2352 "Done %s.\n", __func__); 2353 } 2354 2355 return rval; 2356 } 2357 2358 int 2359 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2360 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2361 { 2362 int rval; 2363 2364 struct logio_entry_24xx *lg; 2365 dma_addr_t lg_dma; 2366 uint32_t iop[2]; 2367 struct qla_hw_data *ha = vha->hw; 2368 struct req_que *req; 2369 2370 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, 2371 "Entered %s.\n", __func__); 2372 2373 if (vha->vp_idx && vha->qpair) 2374 req = vha->qpair->req; 2375 else 2376 req = ha->req_q_map[0]; 2377 2378 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2379 if (lg == NULL) { 2380 ql_log(ql_log_warn, vha, 0x1062, 2381 "Failed to allocate login IOCB.\n"); 2382 return QLA_MEMORY_ALLOC_FAILED; 2383 } 2384 2385 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2386 lg->entry_count = 1; 2387 lg->handle = MAKE_HANDLE(req->id, lg->handle); 2388 lg->nport_handle = cpu_to_le16(loop_id); 2389 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2390 if (opt & BIT_0) 2391 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2392 if (opt & BIT_1) 2393 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2394 lg->port_id[0] = al_pa; 2395 lg->port_id[1] = area; 2396 lg->port_id[2] = domain; 2397 lg->vp_index = vha->vp_idx; 2398 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2399 (ha->r_a_tov / 10 * 2) + 2); 2400 if (rval != QLA_SUCCESS) { 2401 ql_dbg(ql_dbg_mbx, vha, 0x1063, 2402 "Failed to issue login IOCB (%x).\n", rval); 2403 } else if (lg->entry_status != 0) { 2404 ql_dbg(ql_dbg_mbx, vha, 0x1064, 2405 "Failed to complete IOCB -- error status (%x).\n", 2406 lg->entry_status); 2407 rval = QLA_FUNCTION_FAILED; 2408 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2409 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2410 iop[1] = le32_to_cpu(lg->io_parameter[1]); 2411 2412 ql_dbg(ql_dbg_mbx, vha, 0x1065, 2413 "Failed to complete IOCB -- completion status (%x) " 2414 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2415 iop[0], iop[1]); 2416 2417 switch (iop[0]) { 2418 case LSC_SCODE_PORTID_USED: 2419 mb[0] = MBS_PORT_ID_USED; 2420 mb[1] = LSW(iop[1]); 2421 break; 2422 case LSC_SCODE_NPORT_USED: 2423 mb[0] = MBS_LOOP_ID_USED; 2424 break; 2425 case LSC_SCODE_NOLINK: 2426 case LSC_SCODE_NOIOCB: 2427 case LSC_SCODE_NOXCB: 2428 case LSC_SCODE_CMD_FAILED: 2429 case LSC_SCODE_NOFABRIC: 2430 case LSC_SCODE_FW_NOT_READY: 2431 case LSC_SCODE_NOT_LOGGED_IN: 2432 case LSC_SCODE_NOPCB: 2433 case LSC_SCODE_ELS_REJECT: 2434 case LSC_SCODE_CMD_PARAM_ERR: 2435 case LSC_SCODE_NONPORT: 2436 case LSC_SCODE_LOGGED_IN: 2437 case LSC_SCODE_NOFLOGI_ACC: 2438 default: 2439 mb[0] = MBS_COMMAND_ERROR; 2440 break; 2441 } 2442 } else { 2443 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, 2444 "Done %s.\n", __func__); 2445 2446 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2447 2448 mb[0] = MBS_COMMAND_COMPLETE; 2449 mb[1] = 0; 2450 if (iop[0] & BIT_4) { 2451 if (iop[0] & BIT_8) 2452 mb[1] |= BIT_1; 2453 } else 2454 mb[1] = BIT_0; 2455 2456 /* Passback COS information. */ 2457 mb[10] = 0; 2458 if (lg->io_parameter[7] || lg->io_parameter[8]) 2459 mb[10] |= BIT_0; /* Class 2. */ 2460 if (lg->io_parameter[9] || lg->io_parameter[10]) 2461 mb[10] |= BIT_1; /* Class 3. */ 2462 if (lg->io_parameter[0] & cpu_to_le32(BIT_7)) 2463 mb[10] |= BIT_7; /* Confirmed Completion 2464 * Allowed 2465 */ 2466 } 2467 2468 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2469 2470 return rval; 2471 } 2472 2473 /* 2474 * qla2x00_login_fabric 2475 * Issue login fabric port mailbox command. 2476 * 2477 * Input: 2478 * ha = adapter block pointer. 2479 * loop_id = device loop ID. 2480 * domain = device domain. 2481 * area = device area. 2482 * al_pa = device AL_PA. 2483 * status = pointer for return status. 2484 * opt = command options. 2485 * TARGET_QUEUE_LOCK must be released. 2486 * ADAPTER_STATE_LOCK must be released. 2487 * 2488 * Returns: 2489 * qla2x00 local function return status code. 2490 * 2491 * Context: 2492 * Kernel context. 2493 */ 2494 int 2495 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2496 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2497 { 2498 int rval; 2499 mbx_cmd_t mc; 2500 mbx_cmd_t *mcp = &mc; 2501 struct qla_hw_data *ha = vha->hw; 2502 2503 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, 2504 "Entered %s.\n", __func__); 2505 2506 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 2507 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2508 if (HAS_EXTENDED_IDS(ha)) { 2509 mcp->mb[1] = loop_id; 2510 mcp->mb[10] = opt; 2511 mcp->out_mb |= MBX_10; 2512 } else { 2513 mcp->mb[1] = (loop_id << 8) | opt; 2514 } 2515 mcp->mb[2] = domain; 2516 mcp->mb[3] = area << 8 | al_pa; 2517 2518 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; 2519 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2520 mcp->flags = 0; 2521 rval = qla2x00_mailbox_command(vha, mcp); 2522 2523 /* Return mailbox statuses. */ 2524 if (mb != NULL) { 2525 mb[0] = mcp->mb[0]; 2526 mb[1] = mcp->mb[1]; 2527 mb[2] = mcp->mb[2]; 2528 mb[6] = mcp->mb[6]; 2529 mb[7] = mcp->mb[7]; 2530 /* COS retrieved from Get-Port-Database mailbox command. */ 2531 mb[10] = 0; 2532 } 2533 2534 if (rval != QLA_SUCCESS) { 2535 /* RLU tmp code: need to change main mailbox_command function to 2536 * return ok even when the mailbox completion value is not 2537 * SUCCESS. The caller needs to be responsible to interpret 2538 * the return values of this mailbox command if we're not 2539 * to change too much of the existing code. 2540 */ 2541 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || 2542 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || 2543 mcp->mb[0] == 0x4006) 2544 rval = QLA_SUCCESS; 2545 2546 /*EMPTY*/ 2547 ql_dbg(ql_dbg_mbx, vha, 0x1068, 2548 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 2549 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 2550 } else { 2551 /*EMPTY*/ 2552 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, 2553 "Done %s.\n", __func__); 2554 } 2555 2556 return rval; 2557 } 2558 2559 /* 2560 * qla2x00_login_local_device 2561 * Issue login loop port mailbox command. 2562 * 2563 * Input: 2564 * ha = adapter block pointer. 2565 * loop_id = device loop ID. 2566 * opt = command options. 2567 * 2568 * Returns: 2569 * Return status code. 2570 * 2571 * Context: 2572 * Kernel context. 2573 * 2574 */ 2575 int 2576 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, 2577 uint16_t *mb_ret, uint8_t opt) 2578 { 2579 int rval; 2580 mbx_cmd_t mc; 2581 mbx_cmd_t *mcp = &mc; 2582 struct qla_hw_data *ha = vha->hw; 2583 2584 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, 2585 "Entered %s.\n", __func__); 2586 2587 if (IS_FWI2_CAPABLE(ha)) 2588 return qla24xx_login_fabric(vha, fcport->loop_id, 2589 fcport->d_id.b.domain, fcport->d_id.b.area, 2590 fcport->d_id.b.al_pa, mb_ret, opt); 2591 2592 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 2593 if (HAS_EXTENDED_IDS(ha)) 2594 mcp->mb[1] = fcport->loop_id; 2595 else 2596 mcp->mb[1] = fcport->loop_id << 8; 2597 mcp->mb[2] = opt; 2598 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2599 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; 2600 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2601 mcp->flags = 0; 2602 rval = qla2x00_mailbox_command(vha, mcp); 2603 2604 /* Return mailbox statuses. */ 2605 if (mb_ret != NULL) { 2606 mb_ret[0] = mcp->mb[0]; 2607 mb_ret[1] = mcp->mb[1]; 2608 mb_ret[6] = mcp->mb[6]; 2609 mb_ret[7] = mcp->mb[7]; 2610 } 2611 2612 if (rval != QLA_SUCCESS) { 2613 /* AV tmp code: need to change main mailbox_command function to 2614 * return ok even when the mailbox completion value is not 2615 * SUCCESS. The caller needs to be responsible to interpret 2616 * the return values of this mailbox command if we're not 2617 * to change too much of the existing code. 2618 */ 2619 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 2620 rval = QLA_SUCCESS; 2621 2622 ql_dbg(ql_dbg_mbx, vha, 0x106b, 2623 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", 2624 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); 2625 } else { 2626 /*EMPTY*/ 2627 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, 2628 "Done %s.\n", __func__); 2629 } 2630 2631 return (rval); 2632 } 2633 2634 int 2635 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2636 uint8_t area, uint8_t al_pa) 2637 { 2638 int rval; 2639 struct logio_entry_24xx *lg; 2640 dma_addr_t lg_dma; 2641 struct qla_hw_data *ha = vha->hw; 2642 struct req_que *req; 2643 2644 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, 2645 "Entered %s.\n", __func__); 2646 2647 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2648 if (lg == NULL) { 2649 ql_log(ql_log_warn, vha, 0x106e, 2650 "Failed to allocate logout IOCB.\n"); 2651 return QLA_MEMORY_ALLOC_FAILED; 2652 } 2653 2654 req = vha->req; 2655 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2656 lg->entry_count = 1; 2657 lg->handle = MAKE_HANDLE(req->id, lg->handle); 2658 lg->nport_handle = cpu_to_le16(loop_id); 2659 lg->control_flags = 2660 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| 2661 LCF_FREE_NPORT); 2662 lg->port_id[0] = al_pa; 2663 lg->port_id[1] = area; 2664 lg->port_id[2] = domain; 2665 lg->vp_index = vha->vp_idx; 2666 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2667 (ha->r_a_tov / 10 * 2) + 2); 2668 if (rval != QLA_SUCCESS) { 2669 ql_dbg(ql_dbg_mbx, vha, 0x106f, 2670 "Failed to issue logout IOCB (%x).\n", rval); 2671 } else if (lg->entry_status != 0) { 2672 ql_dbg(ql_dbg_mbx, vha, 0x1070, 2673 "Failed to complete IOCB -- error status (%x).\n", 2674 lg->entry_status); 2675 rval = QLA_FUNCTION_FAILED; 2676 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2677 ql_dbg(ql_dbg_mbx, vha, 0x1071, 2678 "Failed to complete IOCB -- completion status (%x) " 2679 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2680 le32_to_cpu(lg->io_parameter[0]), 2681 le32_to_cpu(lg->io_parameter[1])); 2682 } else { 2683 /*EMPTY*/ 2684 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, 2685 "Done %s.\n", __func__); 2686 } 2687 2688 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2689 2690 return rval; 2691 } 2692 2693 /* 2694 * qla2x00_fabric_logout 2695 * Issue logout fabric port mailbox command. 2696 * 2697 * Input: 2698 * ha = adapter block pointer. 2699 * loop_id = device loop ID. 2700 * TARGET_QUEUE_LOCK must be released. 2701 * ADAPTER_STATE_LOCK must be released. 2702 * 2703 * Returns: 2704 * qla2x00 local function return status code. 2705 * 2706 * Context: 2707 * Kernel context. 2708 */ 2709 int 2710 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2711 uint8_t area, uint8_t al_pa) 2712 { 2713 int rval; 2714 mbx_cmd_t mc; 2715 mbx_cmd_t *mcp = &mc; 2716 2717 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, 2718 "Entered %s.\n", __func__); 2719 2720 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 2721 mcp->out_mb = MBX_1|MBX_0; 2722 if (HAS_EXTENDED_IDS(vha->hw)) { 2723 mcp->mb[1] = loop_id; 2724 mcp->mb[10] = 0; 2725 mcp->out_mb |= MBX_10; 2726 } else { 2727 mcp->mb[1] = loop_id << 8; 2728 } 2729 2730 mcp->in_mb = MBX_1|MBX_0; 2731 mcp->tov = MBX_TOV_SECONDS; 2732 mcp->flags = 0; 2733 rval = qla2x00_mailbox_command(vha, mcp); 2734 2735 if (rval != QLA_SUCCESS) { 2736 /*EMPTY*/ 2737 ql_dbg(ql_dbg_mbx, vha, 0x1074, 2738 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); 2739 } else { 2740 /*EMPTY*/ 2741 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, 2742 "Done %s.\n", __func__); 2743 } 2744 2745 return rval; 2746 } 2747 2748 /* 2749 * qla2x00_full_login_lip 2750 * Issue full login LIP mailbox command. 2751 * 2752 * Input: 2753 * ha = adapter block pointer. 2754 * TARGET_QUEUE_LOCK must be released. 2755 * ADAPTER_STATE_LOCK must be released. 2756 * 2757 * Returns: 2758 * qla2x00 local function return status code. 2759 * 2760 * Context: 2761 * Kernel context. 2762 */ 2763 int 2764 qla2x00_full_login_lip(scsi_qla_host_t *vha) 2765 { 2766 int rval; 2767 mbx_cmd_t mc; 2768 mbx_cmd_t *mcp = &mc; 2769 2770 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, 2771 "Entered %s.\n", __func__); 2772 2773 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2774 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0; 2775 mcp->mb[2] = 0; 2776 mcp->mb[3] = 0; 2777 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2778 mcp->in_mb = MBX_0; 2779 mcp->tov = MBX_TOV_SECONDS; 2780 mcp->flags = 0; 2781 rval = qla2x00_mailbox_command(vha, mcp); 2782 2783 if (rval != QLA_SUCCESS) { 2784 /*EMPTY*/ 2785 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); 2786 } else { 2787 /*EMPTY*/ 2788 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, 2789 "Done %s.\n", __func__); 2790 } 2791 2792 return rval; 2793 } 2794 2795 /* 2796 * qla2x00_get_id_list 2797 * 2798 * Input: 2799 * ha = adapter block pointer. 2800 * 2801 * Returns: 2802 * qla2x00 local function return status code. 2803 * 2804 * Context: 2805 * Kernel context. 2806 */ 2807 int 2808 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, 2809 uint16_t *entries) 2810 { 2811 int rval; 2812 mbx_cmd_t mc; 2813 mbx_cmd_t *mcp = &mc; 2814 2815 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, 2816 "Entered %s.\n", __func__); 2817 2818 if (id_list == NULL) 2819 return QLA_FUNCTION_FAILED; 2820 2821 mcp->mb[0] = MBC_GET_ID_LIST; 2822 mcp->out_mb = MBX_0; 2823 if (IS_FWI2_CAPABLE(vha->hw)) { 2824 mcp->mb[2] = MSW(id_list_dma); 2825 mcp->mb[3] = LSW(id_list_dma); 2826 mcp->mb[6] = MSW(MSD(id_list_dma)); 2827 mcp->mb[7] = LSW(MSD(id_list_dma)); 2828 mcp->mb[8] = 0; 2829 mcp->mb[9] = vha->vp_idx; 2830 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 2831 } else { 2832 mcp->mb[1] = MSW(id_list_dma); 2833 mcp->mb[2] = LSW(id_list_dma); 2834 mcp->mb[3] = MSW(MSD(id_list_dma)); 2835 mcp->mb[6] = LSW(MSD(id_list_dma)); 2836 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; 2837 } 2838 mcp->in_mb = MBX_1|MBX_0; 2839 mcp->tov = MBX_TOV_SECONDS; 2840 mcp->flags = 0; 2841 rval = qla2x00_mailbox_command(vha, mcp); 2842 2843 if (rval != QLA_SUCCESS) { 2844 /*EMPTY*/ 2845 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); 2846 } else { 2847 *entries = mcp->mb[1]; 2848 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, 2849 "Done %s.\n", __func__); 2850 } 2851 2852 return rval; 2853 } 2854 2855 /* 2856 * qla2x00_get_resource_cnts 2857 * Get current firmware resource counts. 2858 * 2859 * Input: 2860 * ha = adapter block pointer. 2861 * 2862 * Returns: 2863 * qla2x00 local function return status code. 2864 * 2865 * Context: 2866 * Kernel context. 2867 */ 2868 int 2869 qla2x00_get_resource_cnts(scsi_qla_host_t *vha) 2870 { 2871 struct qla_hw_data *ha = vha->hw; 2872 int rval; 2873 mbx_cmd_t mc; 2874 mbx_cmd_t *mcp = &mc; 2875 2876 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, 2877 "Entered %s.\n", __func__); 2878 2879 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2880 mcp->out_mb = MBX_0; 2881 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2882 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 2883 IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2884 mcp->in_mb |= MBX_12; 2885 mcp->tov = MBX_TOV_SECONDS; 2886 mcp->flags = 0; 2887 rval = qla2x00_mailbox_command(vha, mcp); 2888 2889 if (rval != QLA_SUCCESS) { 2890 /*EMPTY*/ 2891 ql_dbg(ql_dbg_mbx, vha, 0x107d, 2892 "Failed mb[0]=%x.\n", mcp->mb[0]); 2893 } else { 2894 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, 2895 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " 2896 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], 2897 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 2898 mcp->mb[11], mcp->mb[12]); 2899 2900 ha->orig_fw_tgt_xcb_count = mcp->mb[1]; 2901 ha->cur_fw_tgt_xcb_count = mcp->mb[2]; 2902 ha->cur_fw_xcb_count = mcp->mb[3]; 2903 ha->orig_fw_xcb_count = mcp->mb[6]; 2904 ha->cur_fw_iocb_count = mcp->mb[7]; 2905 ha->orig_fw_iocb_count = mcp->mb[10]; 2906 if (ha->flags.npiv_supported) 2907 ha->max_npiv_vports = mcp->mb[11]; 2908 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 2909 IS_QLA28XX(ha)) 2910 ha->fw_max_fcf_count = mcp->mb[12]; 2911 } 2912 2913 return (rval); 2914 } 2915 2916 /* 2917 * qla2x00_get_fcal_position_map 2918 * Get FCAL (LILP) position map using mailbox command 2919 * 2920 * Input: 2921 * ha = adapter state pointer. 2922 * pos_map = buffer pointer (can be NULL). 2923 * 2924 * Returns: 2925 * qla2x00 local function return status code. 2926 * 2927 * Context: 2928 * Kernel context. 2929 */ 2930 int 2931 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) 2932 { 2933 int rval; 2934 mbx_cmd_t mc; 2935 mbx_cmd_t *mcp = &mc; 2936 char *pmap; 2937 dma_addr_t pmap_dma; 2938 struct qla_hw_data *ha = vha->hw; 2939 2940 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, 2941 "Entered %s.\n", __func__); 2942 2943 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2944 if (pmap == NULL) { 2945 ql_log(ql_log_warn, vha, 0x1080, 2946 "Memory alloc failed.\n"); 2947 return QLA_MEMORY_ALLOC_FAILED; 2948 } 2949 2950 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; 2951 mcp->mb[2] = MSW(pmap_dma); 2952 mcp->mb[3] = LSW(pmap_dma); 2953 mcp->mb[6] = MSW(MSD(pmap_dma)); 2954 mcp->mb[7] = LSW(MSD(pmap_dma)); 2955 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2956 mcp->in_mb = MBX_1|MBX_0; 2957 mcp->buf_size = FCAL_MAP_SIZE; 2958 mcp->flags = MBX_DMA_IN; 2959 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2960 rval = qla2x00_mailbox_command(vha, mcp); 2961 2962 if (rval == QLA_SUCCESS) { 2963 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, 2964 "mb0/mb1=%x/%X FC/AL position map size (%x).\n", 2965 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); 2966 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, 2967 pmap, pmap[0] + 1); 2968 2969 if (pos_map) 2970 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 2971 } 2972 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 2973 2974 if (rval != QLA_SUCCESS) { 2975 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); 2976 } else { 2977 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, 2978 "Done %s.\n", __func__); 2979 } 2980 2981 return rval; 2982 } 2983 2984 /* 2985 * qla2x00_get_link_status 2986 * 2987 * Input: 2988 * ha = adapter block pointer. 2989 * loop_id = device loop ID. 2990 * ret_buf = pointer to link status return buffer. 2991 * 2992 * Returns: 2993 * 0 = success. 2994 * BIT_0 = mem alloc error. 2995 * BIT_1 = mailbox error. 2996 */ 2997 int 2998 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, 2999 struct link_statistics *stats, dma_addr_t stats_dma) 3000 { 3001 int rval; 3002 mbx_cmd_t mc; 3003 mbx_cmd_t *mcp = &mc; 3004 uint32_t *iter = (void *)stats; 3005 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); 3006 struct qla_hw_data *ha = vha->hw; 3007 3008 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, 3009 "Entered %s.\n", __func__); 3010 3011 mcp->mb[0] = MBC_GET_LINK_STATUS; 3012 mcp->mb[2] = MSW(LSD(stats_dma)); 3013 mcp->mb[3] = LSW(LSD(stats_dma)); 3014 mcp->mb[6] = MSW(MSD(stats_dma)); 3015 mcp->mb[7] = LSW(MSD(stats_dma)); 3016 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3017 mcp->in_mb = MBX_0; 3018 if (IS_FWI2_CAPABLE(ha)) { 3019 mcp->mb[1] = loop_id; 3020 mcp->mb[4] = 0; 3021 mcp->mb[10] = 0; 3022 mcp->out_mb |= MBX_10|MBX_4|MBX_1; 3023 mcp->in_mb |= MBX_1; 3024 } else if (HAS_EXTENDED_IDS(ha)) { 3025 mcp->mb[1] = loop_id; 3026 mcp->mb[10] = 0; 3027 mcp->out_mb |= MBX_10|MBX_1; 3028 } else { 3029 mcp->mb[1] = loop_id << 8; 3030 mcp->out_mb |= MBX_1; 3031 } 3032 mcp->tov = MBX_TOV_SECONDS; 3033 mcp->flags = IOCTL_CMD; 3034 rval = qla2x00_mailbox_command(vha, mcp); 3035 3036 if (rval == QLA_SUCCESS) { 3037 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3038 ql_dbg(ql_dbg_mbx, vha, 0x1085, 3039 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3040 rval = QLA_FUNCTION_FAILED; 3041 } else { 3042 /* Re-endianize - firmware data is le32. */ 3043 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, 3044 "Done %s.\n", __func__); 3045 for ( ; dwords--; iter++) 3046 le32_to_cpus(iter); 3047 } 3048 } else { 3049 /* Failed. */ 3050 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); 3051 } 3052 3053 return rval; 3054 } 3055 3056 int 3057 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, 3058 dma_addr_t stats_dma, uint16_t options) 3059 { 3060 int rval; 3061 mbx_cmd_t mc; 3062 mbx_cmd_t *mcp = &mc; 3063 uint32_t *iter, dwords; 3064 3065 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, 3066 "Entered %s.\n", __func__); 3067 3068 memset(&mc, 0, sizeof(mc)); 3069 mc.mb[0] = MBC_GET_LINK_PRIV_STATS; 3070 mc.mb[2] = MSW(stats_dma); 3071 mc.mb[3] = LSW(stats_dma); 3072 mc.mb[6] = MSW(MSD(stats_dma)); 3073 mc.mb[7] = LSW(MSD(stats_dma)); 3074 mc.mb[8] = sizeof(struct link_statistics) / 4; 3075 mc.mb[9] = cpu_to_le16(vha->vp_idx); 3076 mc.mb[10] = cpu_to_le16(options); 3077 3078 rval = qla24xx_send_mb_cmd(vha, &mc); 3079 3080 if (rval == QLA_SUCCESS) { 3081 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3082 ql_dbg(ql_dbg_mbx, vha, 0x1089, 3083 "Failed mb[0]=%x.\n", mcp->mb[0]); 3084 rval = QLA_FUNCTION_FAILED; 3085 } else { 3086 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, 3087 "Done %s.\n", __func__); 3088 /* Re-endianize - firmware data is le32. */ 3089 dwords = sizeof(struct link_statistics) / 4; 3090 iter = &stats->link_fail_cnt; 3091 for ( ; dwords--; iter++) 3092 le32_to_cpus(iter); 3093 } 3094 } else { 3095 /* Failed. */ 3096 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); 3097 } 3098 3099 return rval; 3100 } 3101 3102 int 3103 qla24xx_abort_command(srb_t *sp) 3104 { 3105 int rval; 3106 unsigned long flags = 0; 3107 3108 struct abort_entry_24xx *abt; 3109 dma_addr_t abt_dma; 3110 uint32_t handle; 3111 fc_port_t *fcport = sp->fcport; 3112 struct scsi_qla_host *vha = fcport->vha; 3113 struct qla_hw_data *ha = vha->hw; 3114 struct req_que *req = vha->req; 3115 struct qla_qpair *qpair = sp->qpair; 3116 3117 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 3118 "Entered %s.\n", __func__); 3119 3120 if (vha->flags.qpairs_available && sp->qpair) 3121 req = sp->qpair->req; 3122 else 3123 return QLA_FUNCTION_FAILED; 3124 3125 if (ql2xasynctmfenable) 3126 return qla24xx_async_abort_command(sp); 3127 3128 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3129 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 3130 if (req->outstanding_cmds[handle] == sp) 3131 break; 3132 } 3133 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3134 if (handle == req->num_outstanding_cmds) { 3135 /* Command not found. */ 3136 return QLA_FUNCTION_FAILED; 3137 } 3138 3139 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 3140 if (abt == NULL) { 3141 ql_log(ql_log_warn, vha, 0x108d, 3142 "Failed to allocate abort IOCB.\n"); 3143 return QLA_MEMORY_ALLOC_FAILED; 3144 } 3145 3146 abt->entry_type = ABORT_IOCB_TYPE; 3147 abt->entry_count = 1; 3148 abt->handle = MAKE_HANDLE(req->id, abt->handle); 3149 abt->nport_handle = cpu_to_le16(fcport->loop_id); 3150 abt->handle_to_abort = MAKE_HANDLE(req->id, handle); 3151 abt->port_id[0] = fcport->d_id.b.al_pa; 3152 abt->port_id[1] = fcport->d_id.b.area; 3153 abt->port_id[2] = fcport->d_id.b.domain; 3154 abt->vp_index = fcport->vha->vp_idx; 3155 3156 abt->req_que_no = cpu_to_le16(req->id); 3157 3158 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 3159 if (rval != QLA_SUCCESS) { 3160 ql_dbg(ql_dbg_mbx, vha, 0x108e, 3161 "Failed to issue IOCB (%x).\n", rval); 3162 } else if (abt->entry_status != 0) { 3163 ql_dbg(ql_dbg_mbx, vha, 0x108f, 3164 "Failed to complete IOCB -- error status (%x).\n", 3165 abt->entry_status); 3166 rval = QLA_FUNCTION_FAILED; 3167 } else if (abt->nport_handle != cpu_to_le16(0)) { 3168 ql_dbg(ql_dbg_mbx, vha, 0x1090, 3169 "Failed to complete IOCB -- completion status (%x).\n", 3170 le16_to_cpu(abt->nport_handle)); 3171 if (abt->nport_handle == CS_IOCB_ERROR) 3172 rval = QLA_FUNCTION_PARAMETER_ERROR; 3173 else 3174 rval = QLA_FUNCTION_FAILED; 3175 } else { 3176 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, 3177 "Done %s.\n", __func__); 3178 } 3179 3180 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 3181 3182 return rval; 3183 } 3184 3185 struct tsk_mgmt_cmd { 3186 union { 3187 struct tsk_mgmt_entry tsk; 3188 struct sts_entry_24xx sts; 3189 } p; 3190 }; 3191 3192 static int 3193 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 3194 uint64_t l, int tag) 3195 { 3196 int rval, rval2; 3197 struct tsk_mgmt_cmd *tsk; 3198 struct sts_entry_24xx *sts; 3199 dma_addr_t tsk_dma; 3200 scsi_qla_host_t *vha; 3201 struct qla_hw_data *ha; 3202 struct req_que *req; 3203 struct qla_qpair *qpair; 3204 3205 vha = fcport->vha; 3206 ha = vha->hw; 3207 req = vha->req; 3208 3209 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, 3210 "Entered %s.\n", __func__); 3211 3212 if (vha->vp_idx && vha->qpair) { 3213 /* NPIV port */ 3214 qpair = vha->qpair; 3215 req = qpair->req; 3216 } 3217 3218 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 3219 if (tsk == NULL) { 3220 ql_log(ql_log_warn, vha, 0x1093, 3221 "Failed to allocate task management IOCB.\n"); 3222 return QLA_MEMORY_ALLOC_FAILED; 3223 } 3224 3225 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 3226 tsk->p.tsk.entry_count = 1; 3227 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle); 3228 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 3229 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 3230 tsk->p.tsk.control_flags = cpu_to_le32(type); 3231 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 3232 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 3233 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 3234 tsk->p.tsk.vp_index = fcport->vha->vp_idx; 3235 if (type == TCF_LUN_RESET) { 3236 int_to_scsilun(l, &tsk->p.tsk.lun); 3237 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, 3238 sizeof(tsk->p.tsk.lun)); 3239 } 3240 3241 sts = &tsk->p.sts; 3242 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 3243 if (rval != QLA_SUCCESS) { 3244 ql_dbg(ql_dbg_mbx, vha, 0x1094, 3245 "Failed to issue %s reset IOCB (%x).\n", name, rval); 3246 } else if (sts->entry_status != 0) { 3247 ql_dbg(ql_dbg_mbx, vha, 0x1095, 3248 "Failed to complete IOCB -- error status (%x).\n", 3249 sts->entry_status); 3250 rval = QLA_FUNCTION_FAILED; 3251 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 3252 ql_dbg(ql_dbg_mbx, vha, 0x1096, 3253 "Failed to complete IOCB -- completion status (%x).\n", 3254 le16_to_cpu(sts->comp_status)); 3255 rval = QLA_FUNCTION_FAILED; 3256 } else if (le16_to_cpu(sts->scsi_status) & 3257 SS_RESPONSE_INFO_LEN_VALID) { 3258 if (le32_to_cpu(sts->rsp_data_len) < 4) { 3259 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, 3260 "Ignoring inconsistent data length -- not enough " 3261 "response info (%d).\n", 3262 le32_to_cpu(sts->rsp_data_len)); 3263 } else if (sts->data[3]) { 3264 ql_dbg(ql_dbg_mbx, vha, 0x1098, 3265 "Failed to complete IOCB -- response (%x).\n", 3266 sts->data[3]); 3267 rval = QLA_FUNCTION_FAILED; 3268 } 3269 } 3270 3271 /* Issue marker IOCB. */ 3272 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l, 3273 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 3274 if (rval2 != QLA_SUCCESS) { 3275 ql_dbg(ql_dbg_mbx, vha, 0x1099, 3276 "Failed to issue marker IOCB (%x).\n", rval2); 3277 } else { 3278 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, 3279 "Done %s.\n", __func__); 3280 } 3281 3282 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 3283 3284 return rval; 3285 } 3286 3287 int 3288 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) 3289 { 3290 struct qla_hw_data *ha = fcport->vha->hw; 3291 3292 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3293 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 3294 3295 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); 3296 } 3297 3298 int 3299 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 3300 { 3301 struct qla_hw_data *ha = fcport->vha->hw; 3302 3303 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3304 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 3305 3306 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); 3307 } 3308 3309 int 3310 qla2x00_system_error(scsi_qla_host_t *vha) 3311 { 3312 int rval; 3313 mbx_cmd_t mc; 3314 mbx_cmd_t *mcp = &mc; 3315 struct qla_hw_data *ha = vha->hw; 3316 3317 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 3318 return QLA_FUNCTION_FAILED; 3319 3320 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, 3321 "Entered %s.\n", __func__); 3322 3323 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 3324 mcp->out_mb = MBX_0; 3325 mcp->in_mb = MBX_0; 3326 mcp->tov = 5; 3327 mcp->flags = 0; 3328 rval = qla2x00_mailbox_command(vha, mcp); 3329 3330 if (rval != QLA_SUCCESS) { 3331 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); 3332 } else { 3333 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, 3334 "Done %s.\n", __func__); 3335 } 3336 3337 return rval; 3338 } 3339 3340 int 3341 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) 3342 { 3343 int rval; 3344 mbx_cmd_t mc; 3345 mbx_cmd_t *mcp = &mc; 3346 3347 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3348 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3349 return QLA_FUNCTION_FAILED; 3350 3351 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, 3352 "Entered %s.\n", __func__); 3353 3354 mcp->mb[0] = MBC_WRITE_SERDES; 3355 mcp->mb[1] = addr; 3356 if (IS_QLA2031(vha->hw)) 3357 mcp->mb[2] = data & 0xff; 3358 else 3359 mcp->mb[2] = data; 3360 3361 mcp->mb[3] = 0; 3362 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 3363 mcp->in_mb = MBX_0; 3364 mcp->tov = MBX_TOV_SECONDS; 3365 mcp->flags = 0; 3366 rval = qla2x00_mailbox_command(vha, mcp); 3367 3368 if (rval != QLA_SUCCESS) { 3369 ql_dbg(ql_dbg_mbx, vha, 0x1183, 3370 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3371 } else { 3372 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, 3373 "Done %s.\n", __func__); 3374 } 3375 3376 return rval; 3377 } 3378 3379 int 3380 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) 3381 { 3382 int rval; 3383 mbx_cmd_t mc; 3384 mbx_cmd_t *mcp = &mc; 3385 3386 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3387 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3388 return QLA_FUNCTION_FAILED; 3389 3390 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, 3391 "Entered %s.\n", __func__); 3392 3393 mcp->mb[0] = MBC_READ_SERDES; 3394 mcp->mb[1] = addr; 3395 mcp->mb[3] = 0; 3396 mcp->out_mb = MBX_3|MBX_1|MBX_0; 3397 mcp->in_mb = MBX_1|MBX_0; 3398 mcp->tov = MBX_TOV_SECONDS; 3399 mcp->flags = 0; 3400 rval = qla2x00_mailbox_command(vha, mcp); 3401 3402 if (IS_QLA2031(vha->hw)) 3403 *data = mcp->mb[1] & 0xff; 3404 else 3405 *data = mcp->mb[1]; 3406 3407 if (rval != QLA_SUCCESS) { 3408 ql_dbg(ql_dbg_mbx, vha, 0x1186, 3409 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3410 } else { 3411 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, 3412 "Done %s.\n", __func__); 3413 } 3414 3415 return rval; 3416 } 3417 3418 int 3419 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) 3420 { 3421 int rval; 3422 mbx_cmd_t mc; 3423 mbx_cmd_t *mcp = &mc; 3424 3425 if (!IS_QLA8044(vha->hw)) 3426 return QLA_FUNCTION_FAILED; 3427 3428 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0, 3429 "Entered %s.\n", __func__); 3430 3431 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3432 mcp->mb[1] = HCS_WRITE_SERDES; 3433 mcp->mb[3] = LSW(addr); 3434 mcp->mb[4] = MSW(addr); 3435 mcp->mb[5] = LSW(data); 3436 mcp->mb[6] = MSW(data); 3437 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 3438 mcp->in_mb = MBX_0; 3439 mcp->tov = MBX_TOV_SECONDS; 3440 mcp->flags = 0; 3441 rval = qla2x00_mailbox_command(vha, mcp); 3442 3443 if (rval != QLA_SUCCESS) { 3444 ql_dbg(ql_dbg_mbx, vha, 0x11a1, 3445 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3446 } else { 3447 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, 3448 "Done %s.\n", __func__); 3449 } 3450 3451 return rval; 3452 } 3453 3454 int 3455 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) 3456 { 3457 int rval; 3458 mbx_cmd_t mc; 3459 mbx_cmd_t *mcp = &mc; 3460 3461 if (!IS_QLA8044(vha->hw)) 3462 return QLA_FUNCTION_FAILED; 3463 3464 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, 3465 "Entered %s.\n", __func__); 3466 3467 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3468 mcp->mb[1] = HCS_READ_SERDES; 3469 mcp->mb[3] = LSW(addr); 3470 mcp->mb[4] = MSW(addr); 3471 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; 3472 mcp->in_mb = MBX_2|MBX_1|MBX_0; 3473 mcp->tov = MBX_TOV_SECONDS; 3474 mcp->flags = 0; 3475 rval = qla2x00_mailbox_command(vha, mcp); 3476 3477 *data = mcp->mb[2] << 16 | mcp->mb[1]; 3478 3479 if (rval != QLA_SUCCESS) { 3480 ql_dbg(ql_dbg_mbx, vha, 0x118a, 3481 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3482 } else { 3483 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, 3484 "Done %s.\n", __func__); 3485 } 3486 3487 return rval; 3488 } 3489 3490 /** 3491 * qla2x00_set_serdes_params() - 3492 * @vha: HA context 3493 * @sw_em_1g: serial link options 3494 * @sw_em_2g: serial link options 3495 * @sw_em_4g: serial link options 3496 * 3497 * Returns 3498 */ 3499 int 3500 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, 3501 uint16_t sw_em_2g, uint16_t sw_em_4g) 3502 { 3503 int rval; 3504 mbx_cmd_t mc; 3505 mbx_cmd_t *mcp = &mc; 3506 3507 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, 3508 "Entered %s.\n", __func__); 3509 3510 mcp->mb[0] = MBC_SERDES_PARAMS; 3511 mcp->mb[1] = BIT_0; 3512 mcp->mb[2] = sw_em_1g | BIT_15; 3513 mcp->mb[3] = sw_em_2g | BIT_15; 3514 mcp->mb[4] = sw_em_4g | BIT_15; 3515 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3516 mcp->in_mb = MBX_0; 3517 mcp->tov = MBX_TOV_SECONDS; 3518 mcp->flags = 0; 3519 rval = qla2x00_mailbox_command(vha, mcp); 3520 3521 if (rval != QLA_SUCCESS) { 3522 /*EMPTY*/ 3523 ql_dbg(ql_dbg_mbx, vha, 0x109f, 3524 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3525 } else { 3526 /*EMPTY*/ 3527 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, 3528 "Done %s.\n", __func__); 3529 } 3530 3531 return rval; 3532 } 3533 3534 int 3535 qla2x00_stop_firmware(scsi_qla_host_t *vha) 3536 { 3537 int rval; 3538 mbx_cmd_t mc; 3539 mbx_cmd_t *mcp = &mc; 3540 3541 if (!IS_FWI2_CAPABLE(vha->hw)) 3542 return QLA_FUNCTION_FAILED; 3543 3544 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, 3545 "Entered %s.\n", __func__); 3546 3547 mcp->mb[0] = MBC_STOP_FIRMWARE; 3548 mcp->mb[1] = 0; 3549 mcp->out_mb = MBX_1|MBX_0; 3550 mcp->in_mb = MBX_0; 3551 mcp->tov = 5; 3552 mcp->flags = 0; 3553 rval = qla2x00_mailbox_command(vha, mcp); 3554 3555 if (rval != QLA_SUCCESS) { 3556 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); 3557 if (mcp->mb[0] == MBS_INVALID_COMMAND) 3558 rval = QLA_INVALID_COMMAND; 3559 } else { 3560 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, 3561 "Done %s.\n", __func__); 3562 } 3563 3564 return rval; 3565 } 3566 3567 int 3568 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, 3569 uint16_t buffers) 3570 { 3571 int rval; 3572 mbx_cmd_t mc; 3573 mbx_cmd_t *mcp = &mc; 3574 3575 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, 3576 "Entered %s.\n", __func__); 3577 3578 if (!IS_FWI2_CAPABLE(vha->hw)) 3579 return QLA_FUNCTION_FAILED; 3580 3581 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3582 return QLA_FUNCTION_FAILED; 3583 3584 mcp->mb[0] = MBC_TRACE_CONTROL; 3585 mcp->mb[1] = TC_EFT_ENABLE; 3586 mcp->mb[2] = LSW(eft_dma); 3587 mcp->mb[3] = MSW(eft_dma); 3588 mcp->mb[4] = LSW(MSD(eft_dma)); 3589 mcp->mb[5] = MSW(MSD(eft_dma)); 3590 mcp->mb[6] = buffers; 3591 mcp->mb[7] = TC_AEN_DISABLE; 3592 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3593 mcp->in_mb = MBX_1|MBX_0; 3594 mcp->tov = MBX_TOV_SECONDS; 3595 mcp->flags = 0; 3596 rval = qla2x00_mailbox_command(vha, mcp); 3597 if (rval != QLA_SUCCESS) { 3598 ql_dbg(ql_dbg_mbx, vha, 0x10a5, 3599 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3600 rval, mcp->mb[0], mcp->mb[1]); 3601 } else { 3602 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, 3603 "Done %s.\n", __func__); 3604 } 3605 3606 return rval; 3607 } 3608 3609 int 3610 qla2x00_disable_eft_trace(scsi_qla_host_t *vha) 3611 { 3612 int rval; 3613 mbx_cmd_t mc; 3614 mbx_cmd_t *mcp = &mc; 3615 3616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, 3617 "Entered %s.\n", __func__); 3618 3619 if (!IS_FWI2_CAPABLE(vha->hw)) 3620 return QLA_FUNCTION_FAILED; 3621 3622 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3623 return QLA_FUNCTION_FAILED; 3624 3625 mcp->mb[0] = MBC_TRACE_CONTROL; 3626 mcp->mb[1] = TC_EFT_DISABLE; 3627 mcp->out_mb = MBX_1|MBX_0; 3628 mcp->in_mb = MBX_1|MBX_0; 3629 mcp->tov = MBX_TOV_SECONDS; 3630 mcp->flags = 0; 3631 rval = qla2x00_mailbox_command(vha, mcp); 3632 if (rval != QLA_SUCCESS) { 3633 ql_dbg(ql_dbg_mbx, vha, 0x10a8, 3634 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3635 rval, mcp->mb[0], mcp->mb[1]); 3636 } else { 3637 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, 3638 "Done %s.\n", __func__); 3639 } 3640 3641 return rval; 3642 } 3643 3644 int 3645 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, 3646 uint16_t buffers, uint16_t *mb, uint32_t *dwords) 3647 { 3648 int rval; 3649 mbx_cmd_t mc; 3650 mbx_cmd_t *mcp = &mc; 3651 3652 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, 3653 "Entered %s.\n", __func__); 3654 3655 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 3656 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 3657 !IS_QLA28XX(vha->hw)) 3658 return QLA_FUNCTION_FAILED; 3659 3660 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3661 return QLA_FUNCTION_FAILED; 3662 3663 mcp->mb[0] = MBC_TRACE_CONTROL; 3664 mcp->mb[1] = TC_FCE_ENABLE; 3665 mcp->mb[2] = LSW(fce_dma); 3666 mcp->mb[3] = MSW(fce_dma); 3667 mcp->mb[4] = LSW(MSD(fce_dma)); 3668 mcp->mb[5] = MSW(MSD(fce_dma)); 3669 mcp->mb[6] = buffers; 3670 mcp->mb[7] = TC_AEN_DISABLE; 3671 mcp->mb[8] = 0; 3672 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; 3673 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; 3674 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3675 MBX_1|MBX_0; 3676 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3677 mcp->tov = MBX_TOV_SECONDS; 3678 mcp->flags = 0; 3679 rval = qla2x00_mailbox_command(vha, mcp); 3680 if (rval != QLA_SUCCESS) { 3681 ql_dbg(ql_dbg_mbx, vha, 0x10ab, 3682 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3683 rval, mcp->mb[0], mcp->mb[1]); 3684 } else { 3685 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, 3686 "Done %s.\n", __func__); 3687 3688 if (mb) 3689 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 3690 if (dwords) 3691 *dwords = buffers; 3692 } 3693 3694 return rval; 3695 } 3696 3697 int 3698 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) 3699 { 3700 int rval; 3701 mbx_cmd_t mc; 3702 mbx_cmd_t *mcp = &mc; 3703 3704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, 3705 "Entered %s.\n", __func__); 3706 3707 if (!IS_FWI2_CAPABLE(vha->hw)) 3708 return QLA_FUNCTION_FAILED; 3709 3710 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3711 return QLA_FUNCTION_FAILED; 3712 3713 mcp->mb[0] = MBC_TRACE_CONTROL; 3714 mcp->mb[1] = TC_FCE_DISABLE; 3715 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 3716 mcp->out_mb = MBX_2|MBX_1|MBX_0; 3717 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3718 MBX_1|MBX_0; 3719 mcp->tov = MBX_TOV_SECONDS; 3720 mcp->flags = 0; 3721 rval = qla2x00_mailbox_command(vha, mcp); 3722 if (rval != QLA_SUCCESS) { 3723 ql_dbg(ql_dbg_mbx, vha, 0x10ae, 3724 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3725 rval, mcp->mb[0], mcp->mb[1]); 3726 } else { 3727 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, 3728 "Done %s.\n", __func__); 3729 3730 if (wr) 3731 *wr = (uint64_t) mcp->mb[5] << 48 | 3732 (uint64_t) mcp->mb[4] << 32 | 3733 (uint64_t) mcp->mb[3] << 16 | 3734 (uint64_t) mcp->mb[2]; 3735 if (rd) 3736 *rd = (uint64_t) mcp->mb[9] << 48 | 3737 (uint64_t) mcp->mb[8] << 32 | 3738 (uint64_t) mcp->mb[7] << 16 | 3739 (uint64_t) mcp->mb[6]; 3740 } 3741 3742 return rval; 3743 } 3744 3745 int 3746 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3747 uint16_t *port_speed, uint16_t *mb) 3748 { 3749 int rval; 3750 mbx_cmd_t mc; 3751 mbx_cmd_t *mcp = &mc; 3752 3753 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, 3754 "Entered %s.\n", __func__); 3755 3756 if (!IS_IIDMA_CAPABLE(vha->hw)) 3757 return QLA_FUNCTION_FAILED; 3758 3759 mcp->mb[0] = MBC_PORT_PARAMS; 3760 mcp->mb[1] = loop_id; 3761 mcp->mb[2] = mcp->mb[3] = 0; 3762 mcp->mb[9] = vha->vp_idx; 3763 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3764 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3765 mcp->tov = MBX_TOV_SECONDS; 3766 mcp->flags = 0; 3767 rval = qla2x00_mailbox_command(vha, mcp); 3768 3769 /* Return mailbox statuses. */ 3770 if (mb) { 3771 mb[0] = mcp->mb[0]; 3772 mb[1] = mcp->mb[1]; 3773 mb[3] = mcp->mb[3]; 3774 } 3775 3776 if (rval != QLA_SUCCESS) { 3777 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); 3778 } else { 3779 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, 3780 "Done %s.\n", __func__); 3781 if (port_speed) 3782 *port_speed = mcp->mb[3]; 3783 } 3784 3785 return rval; 3786 } 3787 3788 int 3789 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3790 uint16_t port_speed, uint16_t *mb) 3791 { 3792 int rval; 3793 mbx_cmd_t mc; 3794 mbx_cmd_t *mcp = &mc; 3795 3796 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, 3797 "Entered %s.\n", __func__); 3798 3799 if (!IS_IIDMA_CAPABLE(vha->hw)) 3800 return QLA_FUNCTION_FAILED; 3801 3802 mcp->mb[0] = MBC_PORT_PARAMS; 3803 mcp->mb[1] = loop_id; 3804 mcp->mb[2] = BIT_0; 3805 mcp->mb[3] = port_speed & 0x3F; 3806 mcp->mb[9] = vha->vp_idx; 3807 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3808 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3809 mcp->tov = MBX_TOV_SECONDS; 3810 mcp->flags = 0; 3811 rval = qla2x00_mailbox_command(vha, mcp); 3812 3813 /* Return mailbox statuses. */ 3814 if (mb) { 3815 mb[0] = mcp->mb[0]; 3816 mb[1] = mcp->mb[1]; 3817 mb[3] = mcp->mb[3]; 3818 } 3819 3820 if (rval != QLA_SUCCESS) { 3821 ql_dbg(ql_dbg_mbx, vha, 0x10b4, 3822 "Failed=%x.\n", rval); 3823 } else { 3824 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, 3825 "Done %s.\n", __func__); 3826 } 3827 3828 return rval; 3829 } 3830 3831 void 3832 qla24xx_report_id_acquisition(scsi_qla_host_t *vha, 3833 struct vp_rpt_id_entry_24xx *rptid_entry) 3834 { 3835 struct qla_hw_data *ha = vha->hw; 3836 scsi_qla_host_t *vp = NULL; 3837 unsigned long flags; 3838 int found; 3839 port_id_t id; 3840 struct fc_port *fcport; 3841 3842 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3843 "Entered %s.\n", __func__); 3844 3845 if (rptid_entry->entry_status != 0) 3846 return; 3847 3848 id.b.domain = rptid_entry->port_id[2]; 3849 id.b.area = rptid_entry->port_id[1]; 3850 id.b.al_pa = rptid_entry->port_id[0]; 3851 id.b.rsvd_1 = 0; 3852 ha->flags.n2n_ae = 0; 3853 3854 if (rptid_entry->format == 0) { 3855 /* loop */ 3856 ql_dbg(ql_dbg_async, vha, 0x10b7, 3857 "Format 0 : Number of VPs setup %d, number of " 3858 "VPs acquired %d.\n", rptid_entry->vp_setup, 3859 rptid_entry->vp_acquired); 3860 ql_dbg(ql_dbg_async, vha, 0x10b8, 3861 "Primary port id %02x%02x%02x.\n", 3862 rptid_entry->port_id[2], rptid_entry->port_id[1], 3863 rptid_entry->port_id[0]); 3864 ha->current_topology = ISP_CFG_NL; 3865 qlt_update_host_map(vha, id); 3866 3867 } else if (rptid_entry->format == 1) { 3868 /* fabric */ 3869 ql_dbg(ql_dbg_async, vha, 0x10b9, 3870 "Format 1: VP[%d] enabled - status %d - with " 3871 "port id %02x%02x%02x.\n", rptid_entry->vp_idx, 3872 rptid_entry->vp_status, 3873 rptid_entry->port_id[2], rptid_entry->port_id[1], 3874 rptid_entry->port_id[0]); 3875 ql_dbg(ql_dbg_async, vha, 0x5075, 3876 "Format 1: Remote WWPN %8phC.\n", 3877 rptid_entry->u.f1.port_name); 3878 3879 ql_dbg(ql_dbg_async, vha, 0x5075, 3880 "Format 1: WWPN %8phC.\n", 3881 vha->port_name); 3882 3883 switch (rptid_entry->u.f1.flags & TOPO_MASK) { 3884 case TOPO_N2N: 3885 ha->current_topology = ISP_CFG_N; 3886 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 3887 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3888 fcport->scan_state = QLA_FCPORT_SCAN; 3889 fcport->n2n_flag = 0; 3890 } 3891 3892 fcport = qla2x00_find_fcport_by_wwpn(vha, 3893 rptid_entry->u.f1.port_name, 1); 3894 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 3895 3896 if (fcport) { 3897 fcport->plogi_nack_done_deadline = jiffies + HZ; 3898 fcport->dm_login_expire = jiffies + 2*HZ; 3899 fcport->scan_state = QLA_FCPORT_FOUND; 3900 fcport->n2n_flag = 1; 3901 fcport->keep_nport_handle = 1; 3902 fcport->fc4_type = FS_FC4TYPE_FCP; 3903 if (vha->flags.nvme_enabled) 3904 fcport->fc4_type |= FS_FC4TYPE_NVME; 3905 3906 switch (fcport->disc_state) { 3907 case DSC_DELETED: 3908 set_bit(RELOGIN_NEEDED, 3909 &vha->dpc_flags); 3910 break; 3911 case DSC_DELETE_PEND: 3912 break; 3913 default: 3914 qlt_schedule_sess_for_deletion(fcport); 3915 break; 3916 } 3917 } else { 3918 id.b24 = 0; 3919 if (wwn_to_u64(vha->port_name) > 3920 wwn_to_u64(rptid_entry->u.f1.port_name)) { 3921 vha->d_id.b24 = 0; 3922 vha->d_id.b.al_pa = 1; 3923 ha->flags.n2n_bigger = 1; 3924 3925 id.b.al_pa = 2; 3926 ql_dbg(ql_dbg_async, vha, 0x5075, 3927 "Format 1: assign local id %x remote id %x\n", 3928 vha->d_id.b24, id.b24); 3929 } else { 3930 ql_dbg(ql_dbg_async, vha, 0x5075, 3931 "Format 1: Remote login - Waiting for WWPN %8phC.\n", 3932 rptid_entry->u.f1.port_name); 3933 ha->flags.n2n_bigger = 0; 3934 } 3935 qla24xx_post_newsess_work(vha, &id, 3936 rptid_entry->u.f1.port_name, 3937 rptid_entry->u.f1.node_name, 3938 NULL, 3939 FS_FCP_IS_N2N); 3940 } 3941 3942 /* if our portname is higher then initiate N2N login */ 3943 3944 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); 3945 ha->flags.n2n_ae = 1; 3946 return; 3947 break; 3948 case TOPO_FL: 3949 ha->current_topology = ISP_CFG_FL; 3950 break; 3951 case TOPO_F: 3952 ha->current_topology = ISP_CFG_F; 3953 break; 3954 default: 3955 break; 3956 } 3957 3958 ha->flags.gpsc_supported = 1; 3959 ha->current_topology = ISP_CFG_F; 3960 /* buffer to buffer credit flag */ 3961 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; 3962 3963 if (rptid_entry->vp_idx == 0) { 3964 if (rptid_entry->vp_status == VP_STAT_COMPL) { 3965 /* FA-WWN is only for physical port */ 3966 if (qla_ini_mode_enabled(vha) && 3967 ha->flags.fawwpn_enabled && 3968 (rptid_entry->u.f1.flags & 3969 BIT_6)) { 3970 memcpy(vha->port_name, 3971 rptid_entry->u.f1.port_name, 3972 WWN_SIZE); 3973 } 3974 3975 qlt_update_host_map(vha, id); 3976 } 3977 3978 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 3979 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 3980 } else { 3981 if (rptid_entry->vp_status != VP_STAT_COMPL && 3982 rptid_entry->vp_status != VP_STAT_ID_CHG) { 3983 ql_dbg(ql_dbg_mbx, vha, 0x10ba, 3984 "Could not acquire ID for VP[%d].\n", 3985 rptid_entry->vp_idx); 3986 return; 3987 } 3988 3989 found = 0; 3990 spin_lock_irqsave(&ha->vport_slock, flags); 3991 list_for_each_entry(vp, &ha->vp_list, list) { 3992 if (rptid_entry->vp_idx == vp->vp_idx) { 3993 found = 1; 3994 break; 3995 } 3996 } 3997 spin_unlock_irqrestore(&ha->vport_slock, flags); 3998 3999 if (!found) 4000 return; 4001 4002 qlt_update_host_map(vp, id); 4003 4004 /* 4005 * Cannot configure here as we are still sitting on the 4006 * response queue. Handle it in dpc context. 4007 */ 4008 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); 4009 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); 4010 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); 4011 } 4012 set_bit(VP_DPC_NEEDED, &vha->dpc_flags); 4013 qla2xxx_wake_dpc(vha); 4014 } else if (rptid_entry->format == 2) { 4015 ql_dbg(ql_dbg_async, vha, 0x505f, 4016 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", 4017 rptid_entry->port_id[2], rptid_entry->port_id[1], 4018 rptid_entry->port_id[0]); 4019 4020 ql_dbg(ql_dbg_async, vha, 0x5075, 4021 "N2N: Remote WWPN %8phC.\n", 4022 rptid_entry->u.f2.port_name); 4023 4024 /* N2N. direct connect */ 4025 ha->current_topology = ISP_CFG_N; 4026 ha->flags.rida_fmt2 = 1; 4027 vha->d_id.b.domain = rptid_entry->port_id[2]; 4028 vha->d_id.b.area = rptid_entry->port_id[1]; 4029 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 4030 4031 ha->flags.n2n_ae = 1; 4032 spin_lock_irqsave(&ha->vport_slock, flags); 4033 qlt_update_vp_map(vha, SET_AL_PA); 4034 spin_unlock_irqrestore(&ha->vport_slock, flags); 4035 4036 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4037 fcport->scan_state = QLA_FCPORT_SCAN; 4038 fcport->n2n_flag = 0; 4039 } 4040 4041 fcport = qla2x00_find_fcport_by_wwpn(vha, 4042 rptid_entry->u.f2.port_name, 1); 4043 4044 if (fcport) { 4045 fcport->login_retry = vha->hw->login_retry_count; 4046 fcport->plogi_nack_done_deadline = jiffies + HZ; 4047 fcport->scan_state = QLA_FCPORT_FOUND; 4048 fcport->keep_nport_handle = 1; 4049 fcport->n2n_flag = 1; 4050 fcport->d_id.b.domain = 4051 rptid_entry->u.f2.remote_nport_id[2]; 4052 fcport->d_id.b.area = 4053 rptid_entry->u.f2.remote_nport_id[1]; 4054 fcport->d_id.b.al_pa = 4055 rptid_entry->u.f2.remote_nport_id[0]; 4056 } 4057 } 4058 } 4059 4060 /* 4061 * qla24xx_modify_vp_config 4062 * Change VP configuration for vha 4063 * 4064 * Input: 4065 * vha = adapter block pointer. 4066 * 4067 * Returns: 4068 * qla2xxx local function return status code. 4069 * 4070 * Context: 4071 * Kernel context. 4072 */ 4073 int 4074 qla24xx_modify_vp_config(scsi_qla_host_t *vha) 4075 { 4076 int rval; 4077 struct vp_config_entry_24xx *vpmod; 4078 dma_addr_t vpmod_dma; 4079 struct qla_hw_data *ha = vha->hw; 4080 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4081 4082 /* This can be called by the parent */ 4083 4084 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, 4085 "Entered %s.\n", __func__); 4086 4087 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 4088 if (!vpmod) { 4089 ql_log(ql_log_warn, vha, 0x10bc, 4090 "Failed to allocate modify VP IOCB.\n"); 4091 return QLA_MEMORY_ALLOC_FAILED; 4092 } 4093 4094 vpmod->entry_type = VP_CONFIG_IOCB_TYPE; 4095 vpmod->entry_count = 1; 4096 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; 4097 vpmod->vp_count = 1; 4098 vpmod->vp_index1 = vha->vp_idx; 4099 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; 4100 4101 qlt_modify_vp_config(vha, vpmod); 4102 4103 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); 4104 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 4105 vpmod->entry_count = 1; 4106 4107 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 4108 if (rval != QLA_SUCCESS) { 4109 ql_dbg(ql_dbg_mbx, vha, 0x10bd, 4110 "Failed to issue VP config IOCB (%x).\n", rval); 4111 } else if (vpmod->comp_status != 0) { 4112 ql_dbg(ql_dbg_mbx, vha, 0x10be, 4113 "Failed to complete IOCB -- error status (%x).\n", 4114 vpmod->comp_status); 4115 rval = QLA_FUNCTION_FAILED; 4116 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) { 4117 ql_dbg(ql_dbg_mbx, vha, 0x10bf, 4118 "Failed to complete IOCB -- completion status (%x).\n", 4119 le16_to_cpu(vpmod->comp_status)); 4120 rval = QLA_FUNCTION_FAILED; 4121 } else { 4122 /* EMPTY */ 4123 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, 4124 "Done %s.\n", __func__); 4125 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 4126 } 4127 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 4128 4129 return rval; 4130 } 4131 4132 /* 4133 * qla2x00_send_change_request 4134 * Receive or disable RSCN request from fabric controller 4135 * 4136 * Input: 4137 * ha = adapter block pointer 4138 * format = registration format: 4139 * 0 - Reserved 4140 * 1 - Fabric detected registration 4141 * 2 - N_port detected registration 4142 * 3 - Full registration 4143 * FF - clear registration 4144 * vp_idx = Virtual port index 4145 * 4146 * Returns: 4147 * qla2x00 local function return status code. 4148 * 4149 * Context: 4150 * Kernel Context 4151 */ 4152 4153 int 4154 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, 4155 uint16_t vp_idx) 4156 { 4157 int rval; 4158 mbx_cmd_t mc; 4159 mbx_cmd_t *mcp = &mc; 4160 4161 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, 4162 "Entered %s.\n", __func__); 4163 4164 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; 4165 mcp->mb[1] = format; 4166 mcp->mb[9] = vp_idx; 4167 mcp->out_mb = MBX_9|MBX_1|MBX_0; 4168 mcp->in_mb = MBX_0|MBX_1; 4169 mcp->tov = MBX_TOV_SECONDS; 4170 mcp->flags = 0; 4171 rval = qla2x00_mailbox_command(vha, mcp); 4172 4173 if (rval == QLA_SUCCESS) { 4174 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 4175 rval = BIT_1; 4176 } 4177 } else 4178 rval = BIT_1; 4179 4180 return rval; 4181 } 4182 4183 int 4184 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 4185 uint32_t size) 4186 { 4187 int rval; 4188 mbx_cmd_t mc; 4189 mbx_cmd_t *mcp = &mc; 4190 4191 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, 4192 "Entered %s.\n", __func__); 4193 4194 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 4195 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 4196 mcp->mb[8] = MSW(addr); 4197 mcp->out_mb = MBX_8|MBX_0; 4198 } else { 4199 mcp->mb[0] = MBC_DUMP_RISC_RAM; 4200 mcp->out_mb = MBX_0; 4201 } 4202 mcp->mb[1] = LSW(addr); 4203 mcp->mb[2] = MSW(req_dma); 4204 mcp->mb[3] = LSW(req_dma); 4205 mcp->mb[6] = MSW(MSD(req_dma)); 4206 mcp->mb[7] = LSW(MSD(req_dma)); 4207 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 4208 if (IS_FWI2_CAPABLE(vha->hw)) { 4209 mcp->mb[4] = MSW(size); 4210 mcp->mb[5] = LSW(size); 4211 mcp->out_mb |= MBX_5|MBX_4; 4212 } else { 4213 mcp->mb[4] = LSW(size); 4214 mcp->out_mb |= MBX_4; 4215 } 4216 4217 mcp->in_mb = MBX_0; 4218 mcp->tov = MBX_TOV_SECONDS; 4219 mcp->flags = 0; 4220 rval = qla2x00_mailbox_command(vha, mcp); 4221 4222 if (rval != QLA_SUCCESS) { 4223 ql_dbg(ql_dbg_mbx, vha, 0x1008, 4224 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4225 } else { 4226 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, 4227 "Done %s.\n", __func__); 4228 } 4229 4230 return rval; 4231 } 4232 /* 84XX Support **************************************************************/ 4233 4234 struct cs84xx_mgmt_cmd { 4235 union { 4236 struct verify_chip_entry_84xx req; 4237 struct verify_chip_rsp_84xx rsp; 4238 } p; 4239 }; 4240 4241 int 4242 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) 4243 { 4244 int rval, retry; 4245 struct cs84xx_mgmt_cmd *mn; 4246 dma_addr_t mn_dma; 4247 uint16_t options; 4248 unsigned long flags; 4249 struct qla_hw_data *ha = vha->hw; 4250 4251 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, 4252 "Entered %s.\n", __func__); 4253 4254 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 4255 if (mn == NULL) { 4256 return QLA_MEMORY_ALLOC_FAILED; 4257 } 4258 4259 /* Force Update? */ 4260 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; 4261 /* Diagnostic firmware? */ 4262 /* options |= MENLO_DIAG_FW; */ 4263 /* We update the firmware with only one data sequence. */ 4264 options |= VCO_END_OF_DATA; 4265 4266 do { 4267 retry = 0; 4268 memset(mn, 0, sizeof(*mn)); 4269 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; 4270 mn->p.req.entry_count = 1; 4271 mn->p.req.options = cpu_to_le16(options); 4272 4273 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, 4274 "Dump of Verify Request.\n"); 4275 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, 4276 mn, sizeof(*mn)); 4277 4278 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 4279 if (rval != QLA_SUCCESS) { 4280 ql_dbg(ql_dbg_mbx, vha, 0x10cb, 4281 "Failed to issue verify IOCB (%x).\n", rval); 4282 goto verify_done; 4283 } 4284 4285 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, 4286 "Dump of Verify Response.\n"); 4287 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, 4288 mn, sizeof(*mn)); 4289 4290 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 4291 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 4292 le16_to_cpu(mn->p.rsp.failure_code) : 0; 4293 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, 4294 "cs=%x fc=%x.\n", status[0], status[1]); 4295 4296 if (status[0] != CS_COMPLETE) { 4297 rval = QLA_FUNCTION_FAILED; 4298 if (!(options & VCO_DONT_UPDATE_FW)) { 4299 ql_dbg(ql_dbg_mbx, vha, 0x10cf, 4300 "Firmware update failed. Retrying " 4301 "without update firmware.\n"); 4302 options |= VCO_DONT_UPDATE_FW; 4303 options &= ~VCO_FORCE_UPDATE; 4304 retry = 1; 4305 } 4306 } else { 4307 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, 4308 "Firmware updated to %x.\n", 4309 le32_to_cpu(mn->p.rsp.fw_ver)); 4310 4311 /* NOTE: we only update OP firmware. */ 4312 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 4313 ha->cs84xx->op_fw_version = 4314 le32_to_cpu(mn->p.rsp.fw_ver); 4315 spin_unlock_irqrestore(&ha->cs84xx->access_lock, 4316 flags); 4317 } 4318 } while (retry); 4319 4320 verify_done: 4321 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 4322 4323 if (rval != QLA_SUCCESS) { 4324 ql_dbg(ql_dbg_mbx, vha, 0x10d1, 4325 "Failed=%x.\n", rval); 4326 } else { 4327 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, 4328 "Done %s.\n", __func__); 4329 } 4330 4331 return rval; 4332 } 4333 4334 int 4335 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) 4336 { 4337 int rval; 4338 unsigned long flags; 4339 mbx_cmd_t mc; 4340 mbx_cmd_t *mcp = &mc; 4341 struct qla_hw_data *ha = vha->hw; 4342 4343 if (!ha->flags.fw_started) 4344 return QLA_SUCCESS; 4345 4346 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 4347 "Entered %s.\n", __func__); 4348 4349 if (IS_SHADOW_REG_CAPABLE(ha)) 4350 req->options |= BIT_13; 4351 4352 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4353 mcp->mb[1] = req->options; 4354 mcp->mb[2] = MSW(LSD(req->dma)); 4355 mcp->mb[3] = LSW(LSD(req->dma)); 4356 mcp->mb[6] = MSW(MSD(req->dma)); 4357 mcp->mb[7] = LSW(MSD(req->dma)); 4358 mcp->mb[5] = req->length; 4359 if (req->rsp) 4360 mcp->mb[10] = req->rsp->id; 4361 mcp->mb[12] = req->qos; 4362 mcp->mb[11] = req->vp_idx; 4363 mcp->mb[13] = req->rid; 4364 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4365 mcp->mb[15] = 0; 4366 4367 mcp->mb[4] = req->id; 4368 /* que in ptr index */ 4369 mcp->mb[8] = 0; 4370 /* que out ptr index */ 4371 mcp->mb[9] = *req->out_ptr = 0; 4372 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| 4373 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4374 mcp->in_mb = MBX_0; 4375 mcp->flags = MBX_DMA_OUT; 4376 mcp->tov = MBX_TOV_SECONDS * 2; 4377 4378 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4379 IS_QLA28XX(ha)) 4380 mcp->in_mb |= MBX_1; 4381 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4382 mcp->out_mb |= MBX_15; 4383 /* debug q create issue in SR-IOV */ 4384 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4385 } 4386 4387 spin_lock_irqsave(&ha->hardware_lock, flags); 4388 if (!(req->options & BIT_0)) { 4389 WRT_REG_DWORD(req->req_q_in, 0); 4390 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4391 WRT_REG_DWORD(req->req_q_out, 0); 4392 } 4393 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4394 4395 rval = qla2x00_mailbox_command(vha, mcp); 4396 if (rval != QLA_SUCCESS) { 4397 ql_dbg(ql_dbg_mbx, vha, 0x10d4, 4398 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4399 } else { 4400 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, 4401 "Done %s.\n", __func__); 4402 } 4403 4404 return rval; 4405 } 4406 4407 int 4408 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 4409 { 4410 int rval; 4411 unsigned long flags; 4412 mbx_cmd_t mc; 4413 mbx_cmd_t *mcp = &mc; 4414 struct qla_hw_data *ha = vha->hw; 4415 4416 if (!ha->flags.fw_started) 4417 return QLA_SUCCESS; 4418 4419 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 4420 "Entered %s.\n", __func__); 4421 4422 if (IS_SHADOW_REG_CAPABLE(ha)) 4423 rsp->options |= BIT_13; 4424 4425 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4426 mcp->mb[1] = rsp->options; 4427 mcp->mb[2] = MSW(LSD(rsp->dma)); 4428 mcp->mb[3] = LSW(LSD(rsp->dma)); 4429 mcp->mb[6] = MSW(MSD(rsp->dma)); 4430 mcp->mb[7] = LSW(MSD(rsp->dma)); 4431 mcp->mb[5] = rsp->length; 4432 mcp->mb[14] = rsp->msix->entry; 4433 mcp->mb[13] = rsp->rid; 4434 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4435 mcp->mb[15] = 0; 4436 4437 mcp->mb[4] = rsp->id; 4438 /* que in ptr index */ 4439 mcp->mb[8] = *rsp->in_ptr = 0; 4440 /* que out ptr index */ 4441 mcp->mb[9] = 0; 4442 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 4443 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4444 mcp->in_mb = MBX_0; 4445 mcp->flags = MBX_DMA_OUT; 4446 mcp->tov = MBX_TOV_SECONDS * 2; 4447 4448 if (IS_QLA81XX(ha)) { 4449 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 4450 mcp->in_mb |= MBX_1; 4451 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4452 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; 4453 mcp->in_mb |= MBX_1; 4454 /* debug q create issue in SR-IOV */ 4455 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4456 } 4457 4458 spin_lock_irqsave(&ha->hardware_lock, flags); 4459 if (!(rsp->options & BIT_0)) { 4460 WRT_REG_DWORD(rsp->rsp_q_out, 0); 4461 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4462 WRT_REG_DWORD(rsp->rsp_q_in, 0); 4463 } 4464 4465 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4466 4467 rval = qla2x00_mailbox_command(vha, mcp); 4468 if (rval != QLA_SUCCESS) { 4469 ql_dbg(ql_dbg_mbx, vha, 0x10d7, 4470 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4471 } else { 4472 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, 4473 "Done %s.\n", __func__); 4474 } 4475 4476 return rval; 4477 } 4478 4479 int 4480 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) 4481 { 4482 int rval; 4483 mbx_cmd_t mc; 4484 mbx_cmd_t *mcp = &mc; 4485 4486 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, 4487 "Entered %s.\n", __func__); 4488 4489 mcp->mb[0] = MBC_IDC_ACK; 4490 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4491 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4492 mcp->in_mb = MBX_0; 4493 mcp->tov = MBX_TOV_SECONDS; 4494 mcp->flags = 0; 4495 rval = qla2x00_mailbox_command(vha, mcp); 4496 4497 if (rval != QLA_SUCCESS) { 4498 ql_dbg(ql_dbg_mbx, vha, 0x10da, 4499 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4500 } else { 4501 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, 4502 "Done %s.\n", __func__); 4503 } 4504 4505 return rval; 4506 } 4507 4508 int 4509 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) 4510 { 4511 int rval; 4512 mbx_cmd_t mc; 4513 mbx_cmd_t *mcp = &mc; 4514 4515 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, 4516 "Entered %s.\n", __func__); 4517 4518 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4519 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4520 return QLA_FUNCTION_FAILED; 4521 4522 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4523 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 4524 mcp->out_mb = MBX_1|MBX_0; 4525 mcp->in_mb = MBX_1|MBX_0; 4526 mcp->tov = MBX_TOV_SECONDS; 4527 mcp->flags = 0; 4528 rval = qla2x00_mailbox_command(vha, mcp); 4529 4530 if (rval != QLA_SUCCESS) { 4531 ql_dbg(ql_dbg_mbx, vha, 0x10dd, 4532 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4533 rval, mcp->mb[0], mcp->mb[1]); 4534 } else { 4535 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, 4536 "Done %s.\n", __func__); 4537 *sector_size = mcp->mb[1]; 4538 } 4539 4540 return rval; 4541 } 4542 4543 int 4544 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) 4545 { 4546 int rval; 4547 mbx_cmd_t mc; 4548 mbx_cmd_t *mcp = &mc; 4549 4550 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4551 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4552 return QLA_FUNCTION_FAILED; 4553 4554 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, 4555 "Entered %s.\n", __func__); 4556 4557 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4558 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 4559 FAC_OPT_CMD_WRITE_PROTECT; 4560 mcp->out_mb = MBX_1|MBX_0; 4561 mcp->in_mb = MBX_1|MBX_0; 4562 mcp->tov = MBX_TOV_SECONDS; 4563 mcp->flags = 0; 4564 rval = qla2x00_mailbox_command(vha, mcp); 4565 4566 if (rval != QLA_SUCCESS) { 4567 ql_dbg(ql_dbg_mbx, vha, 0x10e0, 4568 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4569 rval, mcp->mb[0], mcp->mb[1]); 4570 } else { 4571 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, 4572 "Done %s.\n", __func__); 4573 } 4574 4575 return rval; 4576 } 4577 4578 int 4579 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) 4580 { 4581 int rval; 4582 mbx_cmd_t mc; 4583 mbx_cmd_t *mcp = &mc; 4584 4585 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4586 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4587 return QLA_FUNCTION_FAILED; 4588 4589 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4590 "Entered %s.\n", __func__); 4591 4592 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4593 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 4594 mcp->mb[2] = LSW(start); 4595 mcp->mb[3] = MSW(start); 4596 mcp->mb[4] = LSW(finish); 4597 mcp->mb[5] = MSW(finish); 4598 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4599 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4600 mcp->tov = MBX_TOV_SECONDS; 4601 mcp->flags = 0; 4602 rval = qla2x00_mailbox_command(vha, mcp); 4603 4604 if (rval != QLA_SUCCESS) { 4605 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4606 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4607 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4608 } else { 4609 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4610 "Done %s.\n", __func__); 4611 } 4612 4613 return rval; 4614 } 4615 4616 int 4617 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock) 4618 { 4619 int rval = QLA_SUCCESS; 4620 mbx_cmd_t mc; 4621 mbx_cmd_t *mcp = &mc; 4622 struct qla_hw_data *ha = vha->hw; 4623 4624 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 4625 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4626 return rval; 4627 4628 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4629 "Entered %s.\n", __func__); 4630 4631 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4632 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE : 4633 FAC_OPT_CMD_UNLOCK_SEMAPHORE); 4634 mcp->out_mb = MBX_1|MBX_0; 4635 mcp->in_mb = MBX_1|MBX_0; 4636 mcp->tov = MBX_TOV_SECONDS; 4637 mcp->flags = 0; 4638 rval = qla2x00_mailbox_command(vha, mcp); 4639 4640 if (rval != QLA_SUCCESS) { 4641 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4642 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4643 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4644 } else { 4645 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4646 "Done %s.\n", __func__); 4647 } 4648 4649 return rval; 4650 } 4651 4652 int 4653 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) 4654 { 4655 int rval = 0; 4656 mbx_cmd_t mc; 4657 mbx_cmd_t *mcp = &mc; 4658 4659 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, 4660 "Entered %s.\n", __func__); 4661 4662 mcp->mb[0] = MBC_RESTART_MPI_FW; 4663 mcp->out_mb = MBX_0; 4664 mcp->in_mb = MBX_0|MBX_1; 4665 mcp->tov = MBX_TOV_SECONDS; 4666 mcp->flags = 0; 4667 rval = qla2x00_mailbox_command(vha, mcp); 4668 4669 if (rval != QLA_SUCCESS) { 4670 ql_dbg(ql_dbg_mbx, vha, 0x10e6, 4671 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4672 rval, mcp->mb[0], mcp->mb[1]); 4673 } else { 4674 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, 4675 "Done %s.\n", __func__); 4676 } 4677 4678 return rval; 4679 } 4680 4681 int 4682 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4683 { 4684 int rval; 4685 mbx_cmd_t mc; 4686 mbx_cmd_t *mcp = &mc; 4687 int i; 4688 int len; 4689 uint16_t *str; 4690 struct qla_hw_data *ha = vha->hw; 4691 4692 if (!IS_P3P_TYPE(ha)) 4693 return QLA_FUNCTION_FAILED; 4694 4695 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, 4696 "Entered %s.\n", __func__); 4697 4698 str = (void *)version; 4699 len = strlen(version); 4700 4701 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4702 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; 4703 mcp->out_mb = MBX_1|MBX_0; 4704 for (i = 4; i < 16 && len; i++, str++, len -= 2) { 4705 mcp->mb[i] = cpu_to_le16p(str); 4706 mcp->out_mb |= 1<<i; 4707 } 4708 for (; i < 16; i++) { 4709 mcp->mb[i] = 0; 4710 mcp->out_mb |= 1<<i; 4711 } 4712 mcp->in_mb = MBX_1|MBX_0; 4713 mcp->tov = MBX_TOV_SECONDS; 4714 mcp->flags = 0; 4715 rval = qla2x00_mailbox_command(vha, mcp); 4716 4717 if (rval != QLA_SUCCESS) { 4718 ql_dbg(ql_dbg_mbx, vha, 0x117c, 4719 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4720 } else { 4721 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, 4722 "Done %s.\n", __func__); 4723 } 4724 4725 return rval; 4726 } 4727 4728 int 4729 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4730 { 4731 int rval; 4732 mbx_cmd_t mc; 4733 mbx_cmd_t *mcp = &mc; 4734 int len; 4735 uint16_t dwlen; 4736 uint8_t *str; 4737 dma_addr_t str_dma; 4738 struct qla_hw_data *ha = vha->hw; 4739 4740 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || 4741 IS_P3P_TYPE(ha)) 4742 return QLA_FUNCTION_FAILED; 4743 4744 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, 4745 "Entered %s.\n", __func__); 4746 4747 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); 4748 if (!str) { 4749 ql_log(ql_log_warn, vha, 0x117f, 4750 "Failed to allocate driver version param.\n"); 4751 return QLA_MEMORY_ALLOC_FAILED; 4752 } 4753 4754 memcpy(str, "\x7\x3\x11\x0", 4); 4755 dwlen = str[0]; 4756 len = dwlen * 4 - 4; 4757 memset(str + 4, 0, len); 4758 if (len > strlen(version)) 4759 len = strlen(version); 4760 memcpy(str + 4, version, len); 4761 4762 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4763 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; 4764 mcp->mb[2] = MSW(LSD(str_dma)); 4765 mcp->mb[3] = LSW(LSD(str_dma)); 4766 mcp->mb[6] = MSW(MSD(str_dma)); 4767 mcp->mb[7] = LSW(MSD(str_dma)); 4768 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4769 mcp->in_mb = MBX_1|MBX_0; 4770 mcp->tov = MBX_TOV_SECONDS; 4771 mcp->flags = 0; 4772 rval = qla2x00_mailbox_command(vha, mcp); 4773 4774 if (rval != QLA_SUCCESS) { 4775 ql_dbg(ql_dbg_mbx, vha, 0x1180, 4776 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4777 } else { 4778 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, 4779 "Done %s.\n", __func__); 4780 } 4781 4782 dma_pool_free(ha->s_dma_pool, str, str_dma); 4783 4784 return rval; 4785 } 4786 4787 int 4788 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, 4789 void *buf, uint16_t bufsiz) 4790 { 4791 int rval, i; 4792 mbx_cmd_t mc; 4793 mbx_cmd_t *mcp = &mc; 4794 uint32_t *bp; 4795 4796 if (!IS_FWI2_CAPABLE(vha->hw)) 4797 return QLA_FUNCTION_FAILED; 4798 4799 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4800 "Entered %s.\n", __func__); 4801 4802 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4803 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8; 4804 mcp->mb[2] = MSW(buf_dma); 4805 mcp->mb[3] = LSW(buf_dma); 4806 mcp->mb[6] = MSW(MSD(buf_dma)); 4807 mcp->mb[7] = LSW(MSD(buf_dma)); 4808 mcp->mb[8] = bufsiz/4; 4809 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4810 mcp->in_mb = MBX_1|MBX_0; 4811 mcp->tov = MBX_TOV_SECONDS; 4812 mcp->flags = 0; 4813 rval = qla2x00_mailbox_command(vha, mcp); 4814 4815 if (rval != QLA_SUCCESS) { 4816 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4817 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4818 } else { 4819 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4820 "Done %s.\n", __func__); 4821 bp = (uint32_t *) buf; 4822 for (i = 0; i < (bufsiz-4)/4; i++, bp++) 4823 *bp = le32_to_cpu(*bp); 4824 } 4825 4826 return rval; 4827 } 4828 4829 static int 4830 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 4831 { 4832 int rval; 4833 mbx_cmd_t mc; 4834 mbx_cmd_t *mcp = &mc; 4835 4836 if (!IS_FWI2_CAPABLE(vha->hw)) 4837 return QLA_FUNCTION_FAILED; 4838 4839 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4840 "Entered %s.\n", __func__); 4841 4842 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4843 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; 4844 mcp->out_mb = MBX_1|MBX_0; 4845 mcp->in_mb = MBX_1|MBX_0; 4846 mcp->tov = MBX_TOV_SECONDS; 4847 mcp->flags = 0; 4848 rval = qla2x00_mailbox_command(vha, mcp); 4849 *temp = mcp->mb[1]; 4850 4851 if (rval != QLA_SUCCESS) { 4852 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4853 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4854 } else { 4855 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4856 "Done %s.\n", __func__); 4857 } 4858 4859 return rval; 4860 } 4861 4862 int 4863 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 4864 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 4865 { 4866 int rval; 4867 mbx_cmd_t mc; 4868 mbx_cmd_t *mcp = &mc; 4869 struct qla_hw_data *ha = vha->hw; 4870 4871 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 4872 "Entered %s.\n", __func__); 4873 4874 if (!IS_FWI2_CAPABLE(ha)) 4875 return QLA_FUNCTION_FAILED; 4876 4877 if (len == 1) 4878 opt |= BIT_0; 4879 4880 mcp->mb[0] = MBC_READ_SFP; 4881 mcp->mb[1] = dev; 4882 mcp->mb[2] = MSW(sfp_dma); 4883 mcp->mb[3] = LSW(sfp_dma); 4884 mcp->mb[6] = MSW(MSD(sfp_dma)); 4885 mcp->mb[7] = LSW(MSD(sfp_dma)); 4886 mcp->mb[8] = len; 4887 mcp->mb[9] = off; 4888 mcp->mb[10] = opt; 4889 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4890 mcp->in_mb = MBX_1|MBX_0; 4891 mcp->tov = MBX_TOV_SECONDS; 4892 mcp->flags = 0; 4893 rval = qla2x00_mailbox_command(vha, mcp); 4894 4895 if (opt & BIT_0) 4896 *sfp = mcp->mb[1]; 4897 4898 if (rval != QLA_SUCCESS) { 4899 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 4900 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4901 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) { 4902 /* sfp is not there */ 4903 rval = QLA_INTERFACE_ERROR; 4904 } 4905 } else { 4906 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 4907 "Done %s.\n", __func__); 4908 } 4909 4910 return rval; 4911 } 4912 4913 int 4914 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 4915 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 4916 { 4917 int rval; 4918 mbx_cmd_t mc; 4919 mbx_cmd_t *mcp = &mc; 4920 struct qla_hw_data *ha = vha->hw; 4921 4922 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, 4923 "Entered %s.\n", __func__); 4924 4925 if (!IS_FWI2_CAPABLE(ha)) 4926 return QLA_FUNCTION_FAILED; 4927 4928 if (len == 1) 4929 opt |= BIT_0; 4930 4931 if (opt & BIT_0) 4932 len = *sfp; 4933 4934 mcp->mb[0] = MBC_WRITE_SFP; 4935 mcp->mb[1] = dev; 4936 mcp->mb[2] = MSW(sfp_dma); 4937 mcp->mb[3] = LSW(sfp_dma); 4938 mcp->mb[6] = MSW(MSD(sfp_dma)); 4939 mcp->mb[7] = LSW(MSD(sfp_dma)); 4940 mcp->mb[8] = len; 4941 mcp->mb[9] = off; 4942 mcp->mb[10] = opt; 4943 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4944 mcp->in_mb = MBX_1|MBX_0; 4945 mcp->tov = MBX_TOV_SECONDS; 4946 mcp->flags = 0; 4947 rval = qla2x00_mailbox_command(vha, mcp); 4948 4949 if (rval != QLA_SUCCESS) { 4950 ql_dbg(ql_dbg_mbx, vha, 0x10ec, 4951 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4952 } else { 4953 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, 4954 "Done %s.\n", __func__); 4955 } 4956 4957 return rval; 4958 } 4959 4960 int 4961 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, 4962 uint16_t size_in_bytes, uint16_t *actual_size) 4963 { 4964 int rval; 4965 mbx_cmd_t mc; 4966 mbx_cmd_t *mcp = &mc; 4967 4968 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, 4969 "Entered %s.\n", __func__); 4970 4971 if (!IS_CNA_CAPABLE(vha->hw)) 4972 return QLA_FUNCTION_FAILED; 4973 4974 mcp->mb[0] = MBC_GET_XGMAC_STATS; 4975 mcp->mb[2] = MSW(stats_dma); 4976 mcp->mb[3] = LSW(stats_dma); 4977 mcp->mb[6] = MSW(MSD(stats_dma)); 4978 mcp->mb[7] = LSW(MSD(stats_dma)); 4979 mcp->mb[8] = size_in_bytes >> 2; 4980 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 4981 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4982 mcp->tov = MBX_TOV_SECONDS; 4983 mcp->flags = 0; 4984 rval = qla2x00_mailbox_command(vha, mcp); 4985 4986 if (rval != QLA_SUCCESS) { 4987 ql_dbg(ql_dbg_mbx, vha, 0x10ef, 4988 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4989 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4990 } else { 4991 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, 4992 "Done %s.\n", __func__); 4993 4994 4995 *actual_size = mcp->mb[2] << 2; 4996 } 4997 4998 return rval; 4999 } 5000 5001 int 5002 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, 5003 uint16_t size) 5004 { 5005 int rval; 5006 mbx_cmd_t mc; 5007 mbx_cmd_t *mcp = &mc; 5008 5009 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, 5010 "Entered %s.\n", __func__); 5011 5012 if (!IS_CNA_CAPABLE(vha->hw)) 5013 return QLA_FUNCTION_FAILED; 5014 5015 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 5016 mcp->mb[1] = 0; 5017 mcp->mb[2] = MSW(tlv_dma); 5018 mcp->mb[3] = LSW(tlv_dma); 5019 mcp->mb[6] = MSW(MSD(tlv_dma)); 5020 mcp->mb[7] = LSW(MSD(tlv_dma)); 5021 mcp->mb[8] = size; 5022 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5023 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5024 mcp->tov = MBX_TOV_SECONDS; 5025 mcp->flags = 0; 5026 rval = qla2x00_mailbox_command(vha, mcp); 5027 5028 if (rval != QLA_SUCCESS) { 5029 ql_dbg(ql_dbg_mbx, vha, 0x10f2, 5030 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5031 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5032 } else { 5033 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, 5034 "Done %s.\n", __func__); 5035 } 5036 5037 return rval; 5038 } 5039 5040 int 5041 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) 5042 { 5043 int rval; 5044 mbx_cmd_t mc; 5045 mbx_cmd_t *mcp = &mc; 5046 5047 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, 5048 "Entered %s.\n", __func__); 5049 5050 if (!IS_FWI2_CAPABLE(vha->hw)) 5051 return QLA_FUNCTION_FAILED; 5052 5053 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 5054 mcp->mb[1] = LSW(risc_addr); 5055 mcp->mb[8] = MSW(risc_addr); 5056 mcp->out_mb = MBX_8|MBX_1|MBX_0; 5057 mcp->in_mb = MBX_3|MBX_2|MBX_0; 5058 mcp->tov = 30; 5059 mcp->flags = 0; 5060 rval = qla2x00_mailbox_command(vha, mcp); 5061 if (rval != QLA_SUCCESS) { 5062 ql_dbg(ql_dbg_mbx, vha, 0x10f5, 5063 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5064 } else { 5065 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, 5066 "Done %s.\n", __func__); 5067 *data = mcp->mb[3] << 16 | mcp->mb[2]; 5068 } 5069 5070 return rval; 5071 } 5072 5073 int 5074 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5075 uint16_t *mresp) 5076 { 5077 int rval; 5078 mbx_cmd_t mc; 5079 mbx_cmd_t *mcp = &mc; 5080 5081 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, 5082 "Entered %s.\n", __func__); 5083 5084 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5085 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 5086 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing 5087 5088 /* transfer count */ 5089 mcp->mb[10] = LSW(mreq->transfer_size); 5090 mcp->mb[11] = MSW(mreq->transfer_size); 5091 5092 /* send data address */ 5093 mcp->mb[14] = LSW(mreq->send_dma); 5094 mcp->mb[15] = MSW(mreq->send_dma); 5095 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5096 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5097 5098 /* receive data address */ 5099 mcp->mb[16] = LSW(mreq->rcv_dma); 5100 mcp->mb[17] = MSW(mreq->rcv_dma); 5101 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5102 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5103 5104 /* Iteration count */ 5105 mcp->mb[18] = LSW(mreq->iteration_count); 5106 mcp->mb[19] = MSW(mreq->iteration_count); 5107 5108 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| 5109 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5110 if (IS_CNA_CAPABLE(vha->hw)) 5111 mcp->out_mb |= MBX_2; 5112 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; 5113 5114 mcp->buf_size = mreq->transfer_size; 5115 mcp->tov = MBX_TOV_SECONDS; 5116 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5117 5118 rval = qla2x00_mailbox_command(vha, mcp); 5119 5120 if (rval != QLA_SUCCESS) { 5121 ql_dbg(ql_dbg_mbx, vha, 0x10f8, 5122 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " 5123 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], 5124 mcp->mb[3], mcp->mb[18], mcp->mb[19]); 5125 } else { 5126 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, 5127 "Done %s.\n", __func__); 5128 } 5129 5130 /* Copy mailbox information */ 5131 memcpy( mresp, mcp->mb, 64); 5132 return rval; 5133 } 5134 5135 int 5136 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5137 uint16_t *mresp) 5138 { 5139 int rval; 5140 mbx_cmd_t mc; 5141 mbx_cmd_t *mcp = &mc; 5142 struct qla_hw_data *ha = vha->hw; 5143 5144 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, 5145 "Entered %s.\n", __func__); 5146 5147 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5148 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 5149 /* BIT_6 specifies 64bit address */ 5150 mcp->mb[1] = mreq->options | BIT_15 | BIT_6; 5151 if (IS_CNA_CAPABLE(ha)) { 5152 mcp->mb[2] = vha->fcoe_fcf_idx; 5153 } 5154 mcp->mb[16] = LSW(mreq->rcv_dma); 5155 mcp->mb[17] = MSW(mreq->rcv_dma); 5156 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5157 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5158 5159 mcp->mb[10] = LSW(mreq->transfer_size); 5160 5161 mcp->mb[14] = LSW(mreq->send_dma); 5162 mcp->mb[15] = MSW(mreq->send_dma); 5163 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5164 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5165 5166 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| 5167 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5168 if (IS_CNA_CAPABLE(ha)) 5169 mcp->out_mb |= MBX_2; 5170 5171 mcp->in_mb = MBX_0; 5172 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 5173 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) 5174 mcp->in_mb |= MBX_1; 5175 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) 5176 mcp->in_mb |= MBX_3; 5177 5178 mcp->tov = MBX_TOV_SECONDS; 5179 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5180 mcp->buf_size = mreq->transfer_size; 5181 5182 rval = qla2x00_mailbox_command(vha, mcp); 5183 5184 if (rval != QLA_SUCCESS) { 5185 ql_dbg(ql_dbg_mbx, vha, 0x10fb, 5186 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5187 rval, mcp->mb[0], mcp->mb[1]); 5188 } else { 5189 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, 5190 "Done %s.\n", __func__); 5191 } 5192 5193 /* Copy mailbox information */ 5194 memcpy(mresp, mcp->mb, 64); 5195 return rval; 5196 } 5197 5198 int 5199 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) 5200 { 5201 int rval; 5202 mbx_cmd_t mc; 5203 mbx_cmd_t *mcp = &mc; 5204 5205 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, 5206 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); 5207 5208 mcp->mb[0] = MBC_ISP84XX_RESET; 5209 mcp->mb[1] = enable_diagnostic; 5210 mcp->out_mb = MBX_1|MBX_0; 5211 mcp->in_mb = MBX_1|MBX_0; 5212 mcp->tov = MBX_TOV_SECONDS; 5213 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5214 rval = qla2x00_mailbox_command(vha, mcp); 5215 5216 if (rval != QLA_SUCCESS) 5217 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); 5218 else 5219 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, 5220 "Done %s.\n", __func__); 5221 5222 return rval; 5223 } 5224 5225 int 5226 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) 5227 { 5228 int rval; 5229 mbx_cmd_t mc; 5230 mbx_cmd_t *mcp = &mc; 5231 5232 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, 5233 "Entered %s.\n", __func__); 5234 5235 if (!IS_FWI2_CAPABLE(vha->hw)) 5236 return QLA_FUNCTION_FAILED; 5237 5238 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 5239 mcp->mb[1] = LSW(risc_addr); 5240 mcp->mb[2] = LSW(data); 5241 mcp->mb[3] = MSW(data); 5242 mcp->mb[8] = MSW(risc_addr); 5243 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; 5244 mcp->in_mb = MBX_1|MBX_0; 5245 mcp->tov = 30; 5246 mcp->flags = 0; 5247 rval = qla2x00_mailbox_command(vha, mcp); 5248 if (rval != QLA_SUCCESS) { 5249 ql_dbg(ql_dbg_mbx, vha, 0x1101, 5250 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5251 rval, mcp->mb[0], mcp->mb[1]); 5252 } else { 5253 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, 5254 "Done %s.\n", __func__); 5255 } 5256 5257 return rval; 5258 } 5259 5260 int 5261 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) 5262 { 5263 int rval; 5264 uint32_t stat, timer; 5265 uint16_t mb0 = 0; 5266 struct qla_hw_data *ha = vha->hw; 5267 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 5268 5269 rval = QLA_SUCCESS; 5270 5271 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, 5272 "Entered %s.\n", __func__); 5273 5274 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 5275 5276 /* Write the MBC data to the registers */ 5277 WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER); 5278 WRT_REG_WORD(®->mailbox1, mb[0]); 5279 WRT_REG_WORD(®->mailbox2, mb[1]); 5280 WRT_REG_WORD(®->mailbox3, mb[2]); 5281 WRT_REG_WORD(®->mailbox4, mb[3]); 5282 5283 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); 5284 5285 /* Poll for MBC interrupt */ 5286 for (timer = 6000000; timer; timer--) { 5287 /* Check for pending interrupts. */ 5288 stat = RD_REG_DWORD(®->host_status); 5289 if (stat & HSRX_RISC_INT) { 5290 stat &= 0xff; 5291 5292 if (stat == 0x1 || stat == 0x2 || 5293 stat == 0x10 || stat == 0x11) { 5294 set_bit(MBX_INTERRUPT, 5295 &ha->mbx_cmd_flags); 5296 mb0 = RD_REG_WORD(®->mailbox0); 5297 WRT_REG_DWORD(®->hccr, 5298 HCCRX_CLR_RISC_INT); 5299 RD_REG_DWORD(®->hccr); 5300 break; 5301 } 5302 } 5303 udelay(5); 5304 } 5305 5306 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) 5307 rval = mb0 & MBS_MASK; 5308 else 5309 rval = QLA_FUNCTION_FAILED; 5310 5311 if (rval != QLA_SUCCESS) { 5312 ql_dbg(ql_dbg_mbx, vha, 0x1104, 5313 "Failed=%x mb[0]=%x.\n", rval, mb[0]); 5314 } else { 5315 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, 5316 "Done %s.\n", __func__); 5317 } 5318 5319 return rval; 5320 } 5321 5322 /* Set the specified data rate */ 5323 int 5324 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode) 5325 { 5326 int rval; 5327 mbx_cmd_t mc; 5328 mbx_cmd_t *mcp = &mc; 5329 struct qla_hw_data *ha = vha->hw; 5330 uint16_t val; 5331 5332 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5333 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate, 5334 mode); 5335 5336 if (!IS_FWI2_CAPABLE(ha)) 5337 return QLA_FUNCTION_FAILED; 5338 5339 memset(mcp, 0, sizeof(*mcp)); 5340 switch (ha->set_data_rate) { 5341 case PORT_SPEED_AUTO: 5342 case PORT_SPEED_4GB: 5343 case PORT_SPEED_8GB: 5344 case PORT_SPEED_16GB: 5345 case PORT_SPEED_32GB: 5346 val = ha->set_data_rate; 5347 break; 5348 default: 5349 ql_log(ql_log_warn, vha, 0x1199, 5350 "Unrecognized speed setting:%d. Setting Autoneg\n", 5351 ha->set_data_rate); 5352 val = ha->set_data_rate = PORT_SPEED_AUTO; 5353 break; 5354 } 5355 5356 mcp->mb[0] = MBC_DATA_RATE; 5357 mcp->mb[1] = mode; 5358 mcp->mb[2] = val; 5359 5360 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5361 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5362 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5363 mcp->in_mb |= MBX_4|MBX_3; 5364 mcp->tov = MBX_TOV_SECONDS; 5365 mcp->flags = 0; 5366 rval = qla2x00_mailbox_command(vha, mcp); 5367 if (rval != QLA_SUCCESS) { 5368 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5369 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5370 } else { 5371 if (mcp->mb[1] != 0x7) 5372 ql_dbg(ql_dbg_mbx, vha, 0x1179, 5373 "Speed set:0x%x\n", mcp->mb[1]); 5374 5375 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5376 "Done %s.\n", __func__); 5377 } 5378 5379 return rval; 5380 } 5381 5382 int 5383 qla2x00_get_data_rate(scsi_qla_host_t *vha) 5384 { 5385 int rval; 5386 mbx_cmd_t mc; 5387 mbx_cmd_t *mcp = &mc; 5388 struct qla_hw_data *ha = vha->hw; 5389 5390 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5391 "Entered %s.\n", __func__); 5392 5393 if (!IS_FWI2_CAPABLE(ha)) 5394 return QLA_FUNCTION_FAILED; 5395 5396 mcp->mb[0] = MBC_DATA_RATE; 5397 mcp->mb[1] = QLA_GET_DATA_RATE; 5398 mcp->out_mb = MBX_1|MBX_0; 5399 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5400 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5401 mcp->in_mb |= MBX_3; 5402 mcp->tov = MBX_TOV_SECONDS; 5403 mcp->flags = 0; 5404 rval = qla2x00_mailbox_command(vha, mcp); 5405 if (rval != QLA_SUCCESS) { 5406 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5407 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5408 } else { 5409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5410 "Done %s.\n", __func__); 5411 if (mcp->mb[1] != 0x7) 5412 ha->link_data_rate = mcp->mb[1]; 5413 } 5414 5415 return rval; 5416 } 5417 5418 int 5419 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5420 { 5421 int rval; 5422 mbx_cmd_t mc; 5423 mbx_cmd_t *mcp = &mc; 5424 struct qla_hw_data *ha = vha->hw; 5425 5426 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, 5427 "Entered %s.\n", __func__); 5428 5429 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && 5430 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 5431 return QLA_FUNCTION_FAILED; 5432 mcp->mb[0] = MBC_GET_PORT_CONFIG; 5433 mcp->out_mb = MBX_0; 5434 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5435 mcp->tov = MBX_TOV_SECONDS; 5436 mcp->flags = 0; 5437 5438 rval = qla2x00_mailbox_command(vha, mcp); 5439 5440 if (rval != QLA_SUCCESS) { 5441 ql_dbg(ql_dbg_mbx, vha, 0x110a, 5442 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5443 } else { 5444 /* Copy all bits to preserve original value */ 5445 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 5446 5447 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, 5448 "Done %s.\n", __func__); 5449 } 5450 return rval; 5451 } 5452 5453 int 5454 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5455 { 5456 int rval; 5457 mbx_cmd_t mc; 5458 mbx_cmd_t *mcp = &mc; 5459 5460 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, 5461 "Entered %s.\n", __func__); 5462 5463 mcp->mb[0] = MBC_SET_PORT_CONFIG; 5464 /* Copy all bits to preserve original setting */ 5465 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); 5466 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5467 mcp->in_mb = MBX_0; 5468 mcp->tov = MBX_TOV_SECONDS; 5469 mcp->flags = 0; 5470 rval = qla2x00_mailbox_command(vha, mcp); 5471 5472 if (rval != QLA_SUCCESS) { 5473 ql_dbg(ql_dbg_mbx, vha, 0x110d, 5474 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5475 } else 5476 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, 5477 "Done %s.\n", __func__); 5478 5479 return rval; 5480 } 5481 5482 5483 int 5484 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, 5485 uint16_t *mb) 5486 { 5487 int rval; 5488 mbx_cmd_t mc; 5489 mbx_cmd_t *mcp = &mc; 5490 struct qla_hw_data *ha = vha->hw; 5491 5492 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, 5493 "Entered %s.\n", __func__); 5494 5495 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 5496 return QLA_FUNCTION_FAILED; 5497 5498 mcp->mb[0] = MBC_PORT_PARAMS; 5499 mcp->mb[1] = loop_id; 5500 if (ha->flags.fcp_prio_enabled) 5501 mcp->mb[2] = BIT_1; 5502 else 5503 mcp->mb[2] = BIT_2; 5504 mcp->mb[4] = priority & 0xf; 5505 mcp->mb[9] = vha->vp_idx; 5506 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5507 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 5508 mcp->tov = 30; 5509 mcp->flags = 0; 5510 rval = qla2x00_mailbox_command(vha, mcp); 5511 if (mb != NULL) { 5512 mb[0] = mcp->mb[0]; 5513 mb[1] = mcp->mb[1]; 5514 mb[3] = mcp->mb[3]; 5515 mb[4] = mcp->mb[4]; 5516 } 5517 5518 if (rval != QLA_SUCCESS) { 5519 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); 5520 } else { 5521 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, 5522 "Done %s.\n", __func__); 5523 } 5524 5525 return rval; 5526 } 5527 5528 int 5529 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) 5530 { 5531 int rval = QLA_FUNCTION_FAILED; 5532 struct qla_hw_data *ha = vha->hw; 5533 uint8_t byte; 5534 5535 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { 5536 ql_dbg(ql_dbg_mbx, vha, 0x1150, 5537 "Thermal not supported by this card.\n"); 5538 return rval; 5539 } 5540 5541 if (IS_QLA25XX(ha)) { 5542 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 5543 ha->pdev->subsystem_device == 0x0175) { 5544 rval = qla2x00_read_sfp(vha, 0, &byte, 5545 0x98, 0x1, 1, BIT_13|BIT_0); 5546 *temp = byte; 5547 return rval; 5548 } 5549 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 5550 ha->pdev->subsystem_device == 0x338e) { 5551 rval = qla2x00_read_sfp(vha, 0, &byte, 5552 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); 5553 *temp = byte; 5554 return rval; 5555 } 5556 ql_dbg(ql_dbg_mbx, vha, 0x10c9, 5557 "Thermal not supported by this card.\n"); 5558 return rval; 5559 } 5560 5561 if (IS_QLA82XX(ha)) { 5562 *temp = qla82xx_read_temperature(vha); 5563 rval = QLA_SUCCESS; 5564 return rval; 5565 } else if (IS_QLA8044(ha)) { 5566 *temp = qla8044_read_temperature(vha); 5567 rval = QLA_SUCCESS; 5568 return rval; 5569 } 5570 5571 rval = qla2x00_read_asic_temperature(vha, temp); 5572 return rval; 5573 } 5574 5575 int 5576 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) 5577 { 5578 int rval; 5579 struct qla_hw_data *ha = vha->hw; 5580 mbx_cmd_t mc; 5581 mbx_cmd_t *mcp = &mc; 5582 5583 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, 5584 "Entered %s.\n", __func__); 5585 5586 if (!IS_FWI2_CAPABLE(ha)) 5587 return QLA_FUNCTION_FAILED; 5588 5589 memset(mcp, 0, sizeof(mbx_cmd_t)); 5590 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5591 mcp->mb[1] = 1; 5592 5593 mcp->out_mb = MBX_1|MBX_0; 5594 mcp->in_mb = MBX_0; 5595 mcp->tov = 30; 5596 mcp->flags = 0; 5597 5598 rval = qla2x00_mailbox_command(vha, mcp); 5599 if (rval != QLA_SUCCESS) { 5600 ql_dbg(ql_dbg_mbx, vha, 0x1016, 5601 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5602 } else { 5603 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, 5604 "Done %s.\n", __func__); 5605 } 5606 5607 return rval; 5608 } 5609 5610 int 5611 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) 5612 { 5613 int rval; 5614 struct qla_hw_data *ha = vha->hw; 5615 mbx_cmd_t mc; 5616 mbx_cmd_t *mcp = &mc; 5617 5618 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, 5619 "Entered %s.\n", __func__); 5620 5621 if (!IS_P3P_TYPE(ha)) 5622 return QLA_FUNCTION_FAILED; 5623 5624 memset(mcp, 0, sizeof(mbx_cmd_t)); 5625 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5626 mcp->mb[1] = 0; 5627 5628 mcp->out_mb = MBX_1|MBX_0; 5629 mcp->in_mb = MBX_0; 5630 mcp->tov = 30; 5631 mcp->flags = 0; 5632 5633 rval = qla2x00_mailbox_command(vha, mcp); 5634 if (rval != QLA_SUCCESS) { 5635 ql_dbg(ql_dbg_mbx, vha, 0x100c, 5636 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5637 } else { 5638 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, 5639 "Done %s.\n", __func__); 5640 } 5641 5642 return rval; 5643 } 5644 5645 int 5646 qla82xx_md_get_template_size(scsi_qla_host_t *vha) 5647 { 5648 struct qla_hw_data *ha = vha->hw; 5649 mbx_cmd_t mc; 5650 mbx_cmd_t *mcp = &mc; 5651 int rval = QLA_FUNCTION_FAILED; 5652 5653 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, 5654 "Entered %s.\n", __func__); 5655 5656 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5657 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5658 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5659 mcp->mb[2] = LSW(RQST_TMPLT_SIZE); 5660 mcp->mb[3] = MSW(RQST_TMPLT_SIZE); 5661 5662 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5663 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 5664 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5665 5666 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5667 mcp->tov = MBX_TOV_SECONDS; 5668 rval = qla2x00_mailbox_command(vha, mcp); 5669 5670 /* Always copy back return mailbox values. */ 5671 if (rval != QLA_SUCCESS) { 5672 ql_dbg(ql_dbg_mbx, vha, 0x1120, 5673 "mailbox command FAILED=0x%x, subcode=%x.\n", 5674 (mcp->mb[1] << 16) | mcp->mb[0], 5675 (mcp->mb[3] << 16) | mcp->mb[2]); 5676 } else { 5677 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, 5678 "Done %s.\n", __func__); 5679 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); 5680 if (!ha->md_template_size) { 5681 ql_dbg(ql_dbg_mbx, vha, 0x1122, 5682 "Null template size obtained.\n"); 5683 rval = QLA_FUNCTION_FAILED; 5684 } 5685 } 5686 return rval; 5687 } 5688 5689 int 5690 qla82xx_md_get_template(scsi_qla_host_t *vha) 5691 { 5692 struct qla_hw_data *ha = vha->hw; 5693 mbx_cmd_t mc; 5694 mbx_cmd_t *mcp = &mc; 5695 int rval = QLA_FUNCTION_FAILED; 5696 5697 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, 5698 "Entered %s.\n", __func__); 5699 5700 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5701 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5702 if (!ha->md_tmplt_hdr) { 5703 ql_log(ql_log_warn, vha, 0x1124, 5704 "Unable to allocate memory for Minidump template.\n"); 5705 return rval; 5706 } 5707 5708 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5709 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5710 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5711 mcp->mb[2] = LSW(RQST_TMPLT); 5712 mcp->mb[3] = MSW(RQST_TMPLT); 5713 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); 5714 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); 5715 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); 5716 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); 5717 mcp->mb[8] = LSW(ha->md_template_size); 5718 mcp->mb[9] = MSW(ha->md_template_size); 5719 5720 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5721 mcp->tov = MBX_TOV_SECONDS; 5722 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5723 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5724 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5725 rval = qla2x00_mailbox_command(vha, mcp); 5726 5727 if (rval != QLA_SUCCESS) { 5728 ql_dbg(ql_dbg_mbx, vha, 0x1125, 5729 "mailbox command FAILED=0x%x, subcode=%x.\n", 5730 ((mcp->mb[1] << 16) | mcp->mb[0]), 5731 ((mcp->mb[3] << 16) | mcp->mb[2])); 5732 } else 5733 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, 5734 "Done %s.\n", __func__); 5735 return rval; 5736 } 5737 5738 int 5739 qla8044_md_get_template(scsi_qla_host_t *vha) 5740 { 5741 struct qla_hw_data *ha = vha->hw; 5742 mbx_cmd_t mc; 5743 mbx_cmd_t *mcp = &mc; 5744 int rval = QLA_FUNCTION_FAILED; 5745 int offset = 0, size = MINIDUMP_SIZE_36K; 5746 5747 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, 5748 "Entered %s.\n", __func__); 5749 5750 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5751 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5752 if (!ha->md_tmplt_hdr) { 5753 ql_log(ql_log_warn, vha, 0xb11b, 5754 "Unable to allocate memory for Minidump template.\n"); 5755 return rval; 5756 } 5757 5758 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5759 while (offset < ha->md_template_size) { 5760 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5761 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5762 mcp->mb[2] = LSW(RQST_TMPLT); 5763 mcp->mb[3] = MSW(RQST_TMPLT); 5764 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5765 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5766 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5767 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5768 mcp->mb[8] = LSW(size); 5769 mcp->mb[9] = MSW(size); 5770 mcp->mb[10] = offset & 0x0000FFFF; 5771 mcp->mb[11] = offset & 0xFFFF0000; 5772 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5773 mcp->tov = MBX_TOV_SECONDS; 5774 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5775 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5776 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5777 rval = qla2x00_mailbox_command(vha, mcp); 5778 5779 if (rval != QLA_SUCCESS) { 5780 ql_dbg(ql_dbg_mbx, vha, 0xb11c, 5781 "mailbox command FAILED=0x%x, subcode=%x.\n", 5782 ((mcp->mb[1] << 16) | mcp->mb[0]), 5783 ((mcp->mb[3] << 16) | mcp->mb[2])); 5784 return rval; 5785 } else 5786 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, 5787 "Done %s.\n", __func__); 5788 offset = offset + size; 5789 } 5790 return rval; 5791 } 5792 5793 int 5794 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5795 { 5796 int rval; 5797 struct qla_hw_data *ha = vha->hw; 5798 mbx_cmd_t mc; 5799 mbx_cmd_t *mcp = &mc; 5800 5801 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5802 return QLA_FUNCTION_FAILED; 5803 5804 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, 5805 "Entered %s.\n", __func__); 5806 5807 memset(mcp, 0, sizeof(mbx_cmd_t)); 5808 mcp->mb[0] = MBC_SET_LED_CONFIG; 5809 mcp->mb[1] = led_cfg[0]; 5810 mcp->mb[2] = led_cfg[1]; 5811 if (IS_QLA8031(ha)) { 5812 mcp->mb[3] = led_cfg[2]; 5813 mcp->mb[4] = led_cfg[3]; 5814 mcp->mb[5] = led_cfg[4]; 5815 mcp->mb[6] = led_cfg[5]; 5816 } 5817 5818 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5819 if (IS_QLA8031(ha)) 5820 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 5821 mcp->in_mb = MBX_0; 5822 mcp->tov = 30; 5823 mcp->flags = 0; 5824 5825 rval = qla2x00_mailbox_command(vha, mcp); 5826 if (rval != QLA_SUCCESS) { 5827 ql_dbg(ql_dbg_mbx, vha, 0x1134, 5828 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5829 } else { 5830 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, 5831 "Done %s.\n", __func__); 5832 } 5833 5834 return rval; 5835 } 5836 5837 int 5838 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5839 { 5840 int rval; 5841 struct qla_hw_data *ha = vha->hw; 5842 mbx_cmd_t mc; 5843 mbx_cmd_t *mcp = &mc; 5844 5845 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5846 return QLA_FUNCTION_FAILED; 5847 5848 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, 5849 "Entered %s.\n", __func__); 5850 5851 memset(mcp, 0, sizeof(mbx_cmd_t)); 5852 mcp->mb[0] = MBC_GET_LED_CONFIG; 5853 5854 mcp->out_mb = MBX_0; 5855 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5856 if (IS_QLA8031(ha)) 5857 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 5858 mcp->tov = 30; 5859 mcp->flags = 0; 5860 5861 rval = qla2x00_mailbox_command(vha, mcp); 5862 if (rval != QLA_SUCCESS) { 5863 ql_dbg(ql_dbg_mbx, vha, 0x1137, 5864 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5865 } else { 5866 led_cfg[0] = mcp->mb[1]; 5867 led_cfg[1] = mcp->mb[2]; 5868 if (IS_QLA8031(ha)) { 5869 led_cfg[2] = mcp->mb[3]; 5870 led_cfg[3] = mcp->mb[4]; 5871 led_cfg[4] = mcp->mb[5]; 5872 led_cfg[5] = mcp->mb[6]; 5873 } 5874 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, 5875 "Done %s.\n", __func__); 5876 } 5877 5878 return rval; 5879 } 5880 5881 int 5882 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) 5883 { 5884 int rval; 5885 struct qla_hw_data *ha = vha->hw; 5886 mbx_cmd_t mc; 5887 mbx_cmd_t *mcp = &mc; 5888 5889 if (!IS_P3P_TYPE(ha)) 5890 return QLA_FUNCTION_FAILED; 5891 5892 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, 5893 "Entered %s.\n", __func__); 5894 5895 memset(mcp, 0, sizeof(mbx_cmd_t)); 5896 mcp->mb[0] = MBC_SET_LED_CONFIG; 5897 if (enable) 5898 mcp->mb[7] = 0xE; 5899 else 5900 mcp->mb[7] = 0xD; 5901 5902 mcp->out_mb = MBX_7|MBX_0; 5903 mcp->in_mb = MBX_0; 5904 mcp->tov = MBX_TOV_SECONDS; 5905 mcp->flags = 0; 5906 5907 rval = qla2x00_mailbox_command(vha, mcp); 5908 if (rval != QLA_SUCCESS) { 5909 ql_dbg(ql_dbg_mbx, vha, 0x1128, 5910 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5911 } else { 5912 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, 5913 "Done %s.\n", __func__); 5914 } 5915 5916 return rval; 5917 } 5918 5919 int 5920 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) 5921 { 5922 int rval; 5923 struct qla_hw_data *ha = vha->hw; 5924 mbx_cmd_t mc; 5925 mbx_cmd_t *mcp = &mc; 5926 5927 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 5928 return QLA_FUNCTION_FAILED; 5929 5930 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, 5931 "Entered %s.\n", __func__); 5932 5933 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 5934 mcp->mb[1] = LSW(reg); 5935 mcp->mb[2] = MSW(reg); 5936 mcp->mb[3] = LSW(data); 5937 mcp->mb[4] = MSW(data); 5938 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5939 5940 mcp->in_mb = MBX_1|MBX_0; 5941 mcp->tov = MBX_TOV_SECONDS; 5942 mcp->flags = 0; 5943 rval = qla2x00_mailbox_command(vha, mcp); 5944 5945 if (rval != QLA_SUCCESS) { 5946 ql_dbg(ql_dbg_mbx, vha, 0x1131, 5947 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5948 } else { 5949 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, 5950 "Done %s.\n", __func__); 5951 } 5952 5953 return rval; 5954 } 5955 5956 int 5957 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) 5958 { 5959 int rval; 5960 struct qla_hw_data *ha = vha->hw; 5961 mbx_cmd_t mc; 5962 mbx_cmd_t *mcp = &mc; 5963 5964 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 5965 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, 5966 "Implicit LOGO Unsupported.\n"); 5967 return QLA_FUNCTION_FAILED; 5968 } 5969 5970 5971 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, 5972 "Entering %s.\n", __func__); 5973 5974 /* Perform Implicit LOGO. */ 5975 mcp->mb[0] = MBC_PORT_LOGOUT; 5976 mcp->mb[1] = fcport->loop_id; 5977 mcp->mb[10] = BIT_15; 5978 mcp->out_mb = MBX_10|MBX_1|MBX_0; 5979 mcp->in_mb = MBX_0; 5980 mcp->tov = MBX_TOV_SECONDS; 5981 mcp->flags = 0; 5982 rval = qla2x00_mailbox_command(vha, mcp); 5983 if (rval != QLA_SUCCESS) 5984 ql_dbg(ql_dbg_mbx, vha, 0x113d, 5985 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5986 else 5987 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, 5988 "Done %s.\n", __func__); 5989 5990 return rval; 5991 } 5992 5993 int 5994 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) 5995 { 5996 int rval; 5997 mbx_cmd_t mc; 5998 mbx_cmd_t *mcp = &mc; 5999 struct qla_hw_data *ha = vha->hw; 6000 unsigned long retry_max_time = jiffies + (2 * HZ); 6001 6002 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6003 return QLA_FUNCTION_FAILED; 6004 6005 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); 6006 6007 retry_rd_reg: 6008 mcp->mb[0] = MBC_READ_REMOTE_REG; 6009 mcp->mb[1] = LSW(reg); 6010 mcp->mb[2] = MSW(reg); 6011 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6012 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 6013 mcp->tov = MBX_TOV_SECONDS; 6014 mcp->flags = 0; 6015 rval = qla2x00_mailbox_command(vha, mcp); 6016 6017 if (rval != QLA_SUCCESS) { 6018 ql_dbg(ql_dbg_mbx, vha, 0x114c, 6019 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6020 rval, mcp->mb[0], mcp->mb[1]); 6021 } else { 6022 *data = (mcp->mb[3] | (mcp->mb[4] << 16)); 6023 if (*data == QLA8XXX_BAD_VALUE) { 6024 /* 6025 * During soft-reset CAMRAM register reads might 6026 * return 0xbad0bad0. So retry for MAX of 2 sec 6027 * while reading camram registers. 6028 */ 6029 if (time_after(jiffies, retry_max_time)) { 6030 ql_dbg(ql_dbg_mbx, vha, 0x1141, 6031 "Failure to read CAMRAM register. " 6032 "data=0x%x.\n", *data); 6033 return QLA_FUNCTION_FAILED; 6034 } 6035 msleep(100); 6036 goto retry_rd_reg; 6037 } 6038 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); 6039 } 6040 6041 return rval; 6042 } 6043 6044 int 6045 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) 6046 { 6047 int rval; 6048 mbx_cmd_t mc; 6049 mbx_cmd_t *mcp = &mc; 6050 struct qla_hw_data *ha = vha->hw; 6051 6052 if (!IS_QLA83XX(ha)) 6053 return QLA_FUNCTION_FAILED; 6054 6055 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 6056 6057 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; 6058 mcp->out_mb = MBX_0; 6059 mcp->in_mb = MBX_1|MBX_0; 6060 mcp->tov = MBX_TOV_SECONDS; 6061 mcp->flags = 0; 6062 rval = qla2x00_mailbox_command(vha, mcp); 6063 6064 if (rval != QLA_SUCCESS) { 6065 ql_dbg(ql_dbg_mbx, vha, 0x1144, 6066 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6067 rval, mcp->mb[0], mcp->mb[1]); 6068 ha->isp_ops->fw_dump(vha, 0); 6069 } else { 6070 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); 6071 } 6072 6073 return rval; 6074 } 6075 6076 int 6077 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, 6078 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) 6079 { 6080 int rval; 6081 mbx_cmd_t mc; 6082 mbx_cmd_t *mcp = &mc; 6083 uint8_t subcode = (uint8_t)options; 6084 struct qla_hw_data *ha = vha->hw; 6085 6086 if (!IS_QLA8031(ha)) 6087 return QLA_FUNCTION_FAILED; 6088 6089 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); 6090 6091 mcp->mb[0] = MBC_SET_ACCESS_CONTROL; 6092 mcp->mb[1] = options; 6093 mcp->out_mb = MBX_1|MBX_0; 6094 if (subcode & BIT_2) { 6095 mcp->mb[2] = LSW(start_addr); 6096 mcp->mb[3] = MSW(start_addr); 6097 mcp->mb[4] = LSW(end_addr); 6098 mcp->mb[5] = MSW(end_addr); 6099 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; 6100 } 6101 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6102 if (!(subcode & (BIT_2 | BIT_5))) 6103 mcp->in_mb |= MBX_4|MBX_3; 6104 mcp->tov = MBX_TOV_SECONDS; 6105 mcp->flags = 0; 6106 rval = qla2x00_mailbox_command(vha, mcp); 6107 6108 if (rval != QLA_SUCCESS) { 6109 ql_dbg(ql_dbg_mbx, vha, 0x1147, 6110 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", 6111 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], 6112 mcp->mb[4]); 6113 ha->isp_ops->fw_dump(vha, 0); 6114 } else { 6115 if (subcode & BIT_5) 6116 *sector_size = mcp->mb[1]; 6117 else if (subcode & (BIT_6 | BIT_7)) { 6118 ql_dbg(ql_dbg_mbx, vha, 0x1148, 6119 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6120 } else if (subcode & (BIT_3 | BIT_4)) { 6121 ql_dbg(ql_dbg_mbx, vha, 0x1149, 6122 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6123 } 6124 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); 6125 } 6126 6127 return rval; 6128 } 6129 6130 int 6131 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 6132 uint32_t size) 6133 { 6134 int rval; 6135 mbx_cmd_t mc; 6136 mbx_cmd_t *mcp = &mc; 6137 6138 if (!IS_MCTP_CAPABLE(vha->hw)) 6139 return QLA_FUNCTION_FAILED; 6140 6141 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, 6142 "Entered %s.\n", __func__); 6143 6144 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 6145 mcp->mb[1] = LSW(addr); 6146 mcp->mb[2] = MSW(req_dma); 6147 mcp->mb[3] = LSW(req_dma); 6148 mcp->mb[4] = MSW(size); 6149 mcp->mb[5] = LSW(size); 6150 mcp->mb[6] = MSW(MSD(req_dma)); 6151 mcp->mb[7] = LSW(MSD(req_dma)); 6152 mcp->mb[8] = MSW(addr); 6153 /* Setting RAM ID to valid */ 6154 mcp->mb[10] |= BIT_7; 6155 /* For MCTP RAM ID is 0x40 */ 6156 mcp->mb[10] |= 0x40; 6157 6158 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| 6159 MBX_0; 6160 6161 mcp->in_mb = MBX_0; 6162 mcp->tov = MBX_TOV_SECONDS; 6163 mcp->flags = 0; 6164 rval = qla2x00_mailbox_command(vha, mcp); 6165 6166 if (rval != QLA_SUCCESS) { 6167 ql_dbg(ql_dbg_mbx, vha, 0x114e, 6168 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6169 } else { 6170 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, 6171 "Done %s.\n", __func__); 6172 } 6173 6174 return rval; 6175 } 6176 6177 int 6178 qla26xx_dport_diagnostics(scsi_qla_host_t *vha, 6179 void *dd_buf, uint size, uint options) 6180 { 6181 int rval; 6182 mbx_cmd_t mc; 6183 mbx_cmd_t *mcp = &mc; 6184 dma_addr_t dd_dma; 6185 6186 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 6187 !IS_QLA28XX(vha->hw)) 6188 return QLA_FUNCTION_FAILED; 6189 6190 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, 6191 "Entered %s.\n", __func__); 6192 6193 dd_dma = dma_map_single(&vha->hw->pdev->dev, 6194 dd_buf, size, DMA_FROM_DEVICE); 6195 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { 6196 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); 6197 return QLA_MEMORY_ALLOC_FAILED; 6198 } 6199 6200 memset(dd_buf, 0, size); 6201 6202 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; 6203 mcp->mb[1] = options; 6204 mcp->mb[2] = MSW(LSD(dd_dma)); 6205 mcp->mb[3] = LSW(LSD(dd_dma)); 6206 mcp->mb[6] = MSW(MSD(dd_dma)); 6207 mcp->mb[7] = LSW(MSD(dd_dma)); 6208 mcp->mb[8] = size; 6209 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 6210 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 6211 mcp->buf_size = size; 6212 mcp->flags = MBX_DMA_IN; 6213 mcp->tov = MBX_TOV_SECONDS * 4; 6214 rval = qla2x00_mailbox_command(vha, mcp); 6215 6216 if (rval != QLA_SUCCESS) { 6217 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); 6218 } else { 6219 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, 6220 "Done %s.\n", __func__); 6221 } 6222 6223 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, 6224 size, DMA_FROM_DEVICE); 6225 6226 return rval; 6227 } 6228 6229 static void qla2x00_async_mb_sp_done(srb_t *sp, int res) 6230 { 6231 sp->u.iocb_cmd.u.mbx.rc = res; 6232 6233 complete(&sp->u.iocb_cmd.u.mbx.comp); 6234 /* don't free sp here. Let the caller do the free */ 6235 } 6236 6237 /* 6238 * This mailbox uses the iocb interface to send MB command. 6239 * This allows non-critial (non chip setup) command to go 6240 * out in parrallel. 6241 */ 6242 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) 6243 { 6244 int rval = QLA_FUNCTION_FAILED; 6245 srb_t *sp; 6246 struct srb_iocb *c; 6247 6248 if (!vha->hw->flags.fw_started) 6249 goto done; 6250 6251 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 6252 if (!sp) 6253 goto done; 6254 6255 sp->type = SRB_MB_IOCB; 6256 sp->name = mb_to_str(mcp->mb[0]); 6257 6258 c = &sp->u.iocb_cmd; 6259 c->timeout = qla2x00_async_iocb_timeout; 6260 init_completion(&c->u.mbx.comp); 6261 6262 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 6263 6264 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); 6265 6266 sp->done = qla2x00_async_mb_sp_done; 6267 6268 rval = qla2x00_start_sp(sp); 6269 if (rval != QLA_SUCCESS) { 6270 ql_dbg(ql_dbg_mbx, vha, 0x1018, 6271 "%s: %s Failed submission. %x.\n", 6272 __func__, sp->name, rval); 6273 goto done_free_sp; 6274 } 6275 6276 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n", 6277 sp->name, sp->handle); 6278 6279 wait_for_completion(&c->u.mbx.comp); 6280 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); 6281 6282 rval = c->u.mbx.rc; 6283 switch (rval) { 6284 case QLA_FUNCTION_TIMEOUT: 6285 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n", 6286 __func__, sp->name, rval); 6287 break; 6288 case QLA_SUCCESS: 6289 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", 6290 __func__, sp->name); 6291 break; 6292 default: 6293 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", 6294 __func__, sp->name, rval); 6295 break; 6296 } 6297 6298 done_free_sp: 6299 sp->free(sp); 6300 done: 6301 return rval; 6302 } 6303 6304 /* 6305 * qla24xx_gpdb_wait 6306 * NOTE: Do not call this routine from DPC thread 6307 */ 6308 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 6309 { 6310 int rval = QLA_FUNCTION_FAILED; 6311 dma_addr_t pd_dma; 6312 struct port_database_24xx *pd; 6313 struct qla_hw_data *ha = vha->hw; 6314 mbx_cmd_t mc; 6315 6316 if (!vha->hw->flags.fw_started) 6317 goto done; 6318 6319 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 6320 if (pd == NULL) { 6321 ql_log(ql_log_warn, vha, 0xd047, 6322 "Failed to allocate port database structure.\n"); 6323 goto done_free_sp; 6324 } 6325 6326 memset(&mc, 0, sizeof(mc)); 6327 mc.mb[0] = MBC_GET_PORT_DATABASE; 6328 mc.mb[1] = cpu_to_le16(fcport->loop_id); 6329 mc.mb[2] = MSW(pd_dma); 6330 mc.mb[3] = LSW(pd_dma); 6331 mc.mb[6] = MSW(MSD(pd_dma)); 6332 mc.mb[7] = LSW(MSD(pd_dma)); 6333 mc.mb[9] = cpu_to_le16(vha->vp_idx); 6334 mc.mb[10] = cpu_to_le16((uint16_t)opt); 6335 6336 rval = qla24xx_send_mb_cmd(vha, &mc); 6337 if (rval != QLA_SUCCESS) { 6338 ql_dbg(ql_dbg_mbx, vha, 0x1193, 6339 "%s: %8phC fail\n", __func__, fcport->port_name); 6340 goto done_free_sp; 6341 } 6342 6343 rval = __qla24xx_parse_gpdb(vha, fcport, pd); 6344 6345 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", 6346 __func__, fcport->port_name); 6347 6348 done_free_sp: 6349 if (pd) 6350 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 6351 done: 6352 return rval; 6353 } 6354 6355 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, 6356 struct port_database_24xx *pd) 6357 { 6358 int rval = QLA_SUCCESS; 6359 uint64_t zero = 0; 6360 u8 current_login_state, last_login_state; 6361 6362 if (NVME_TARGET(vha->hw, fcport)) { 6363 current_login_state = pd->current_login_state >> 4; 6364 last_login_state = pd->last_login_state >> 4; 6365 } else { 6366 current_login_state = pd->current_login_state & 0xf; 6367 last_login_state = pd->last_login_state & 0xf; 6368 } 6369 6370 /* Check for logged in state. */ 6371 if (current_login_state != PDS_PRLI_COMPLETE) { 6372 ql_dbg(ql_dbg_mbx, vha, 0x119a, 6373 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 6374 current_login_state, last_login_state, fcport->loop_id); 6375 rval = QLA_FUNCTION_FAILED; 6376 goto gpd_error_out; 6377 } 6378 6379 if (fcport->loop_id == FC_NO_LOOP_ID || 6380 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 6381 memcmp(fcport->port_name, pd->port_name, 8))) { 6382 /* We lost the device mid way. */ 6383 rval = QLA_NOT_LOGGED_IN; 6384 goto gpd_error_out; 6385 } 6386 6387 /* Names are little-endian. */ 6388 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 6389 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 6390 6391 /* Get port_id of device. */ 6392 fcport->d_id.b.domain = pd->port_id[0]; 6393 fcport->d_id.b.area = pd->port_id[1]; 6394 fcport->d_id.b.al_pa = pd->port_id[2]; 6395 fcport->d_id.b.rsvd_1 = 0; 6396 6397 if (NVME_TARGET(vha->hw, fcport)) { 6398 fcport->port_type = FCT_NVME; 6399 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) 6400 fcport->port_type |= FCT_NVME_INITIATOR; 6401 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6402 fcport->port_type |= FCT_NVME_TARGET; 6403 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0) 6404 fcport->port_type |= FCT_NVME_DISCOVERY; 6405 } else { 6406 /* If not target must be initiator or unknown type. */ 6407 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6408 fcport->port_type = FCT_INITIATOR; 6409 else 6410 fcport->port_type = FCT_TARGET; 6411 } 6412 /* Passback COS information. */ 6413 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? 6414 FC_COS_CLASS2 : FC_COS_CLASS3; 6415 6416 if (pd->prli_svc_param_word_3[0] & BIT_7) { 6417 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 6418 fcport->conf_compl_supported = 1; 6419 } 6420 6421 gpd_error_out: 6422 return rval; 6423 } 6424 6425 /* 6426 * qla24xx_gidlist__wait 6427 * NOTE: don't call this routine from DPC thread. 6428 */ 6429 int qla24xx_gidlist_wait(struct scsi_qla_host *vha, 6430 void *id_list, dma_addr_t id_list_dma, uint16_t *entries) 6431 { 6432 int rval = QLA_FUNCTION_FAILED; 6433 mbx_cmd_t mc; 6434 6435 if (!vha->hw->flags.fw_started) 6436 goto done; 6437 6438 memset(&mc, 0, sizeof(mc)); 6439 mc.mb[0] = MBC_GET_ID_LIST; 6440 mc.mb[2] = MSW(id_list_dma); 6441 mc.mb[3] = LSW(id_list_dma); 6442 mc.mb[6] = MSW(MSD(id_list_dma)); 6443 mc.mb[7] = LSW(MSD(id_list_dma)); 6444 mc.mb[8] = 0; 6445 mc.mb[9] = cpu_to_le16(vha->vp_idx); 6446 6447 rval = qla24xx_send_mb_cmd(vha, &mc); 6448 if (rval != QLA_SUCCESS) { 6449 ql_dbg(ql_dbg_mbx, vha, 0x119b, 6450 "%s: fail\n", __func__); 6451 } else { 6452 *entries = mc.mb[1]; 6453 ql_dbg(ql_dbg_mbx, vha, 0x119c, 6454 "%s: done\n", __func__); 6455 } 6456 done: 6457 return rval; 6458 } 6459 6460 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) 6461 { 6462 int rval; 6463 mbx_cmd_t mc; 6464 mbx_cmd_t *mcp = &mc; 6465 6466 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200, 6467 "Entered %s\n", __func__); 6468 6469 memset(mcp->mb, 0 , sizeof(mcp->mb)); 6470 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6471 mcp->mb[1] = cpu_to_le16(1); 6472 mcp->mb[2] = cpu_to_le16(value); 6473 mcp->out_mb = MBX_2 | MBX_1 | MBX_0; 6474 mcp->in_mb = MBX_2 | MBX_0; 6475 mcp->tov = MBX_TOV_SECONDS; 6476 mcp->flags = 0; 6477 6478 rval = qla2x00_mailbox_command(vha, mcp); 6479 6480 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n", 6481 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6482 6483 return rval; 6484 } 6485 6486 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) 6487 { 6488 int rval; 6489 mbx_cmd_t mc; 6490 mbx_cmd_t *mcp = &mc; 6491 6492 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203, 6493 "Entered %s\n", __func__); 6494 6495 memset(mcp->mb, 0, sizeof(mcp->mb)); 6496 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6497 mcp->mb[1] = cpu_to_le16(0); 6498 mcp->out_mb = MBX_1 | MBX_0; 6499 mcp->in_mb = MBX_2 | MBX_0; 6500 mcp->tov = MBX_TOV_SECONDS; 6501 mcp->flags = 0; 6502 6503 rval = qla2x00_mailbox_command(vha, mcp); 6504 if (rval == QLA_SUCCESS) 6505 *value = mc.mb[2]; 6506 6507 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n", 6508 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6509 6510 return rval; 6511 } 6512 6513 int 6514 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count) 6515 { 6516 struct qla_hw_data *ha = vha->hw; 6517 uint16_t iter, addr, offset; 6518 dma_addr_t phys_addr; 6519 int rval, c; 6520 u8 *sfp_data; 6521 6522 memset(ha->sfp_data, 0, SFP_DEV_SIZE); 6523 addr = 0xa0; 6524 phys_addr = ha->sfp_data_dma; 6525 sfp_data = ha->sfp_data; 6526 offset = c = 0; 6527 6528 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) { 6529 if (iter == 4) { 6530 /* Skip to next device address. */ 6531 addr = 0xa2; 6532 offset = 0; 6533 } 6534 6535 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data, 6536 addr, offset, SFP_BLOCK_SIZE, BIT_1); 6537 if (rval != QLA_SUCCESS) { 6538 ql_log(ql_log_warn, vha, 0x706d, 6539 "Unable to read SFP data (%x/%x/%x).\n", rval, 6540 addr, offset); 6541 6542 return rval; 6543 } 6544 6545 if (buf && (c < count)) { 6546 u16 sz; 6547 6548 if ((count - c) >= SFP_BLOCK_SIZE) 6549 sz = SFP_BLOCK_SIZE; 6550 else 6551 sz = count - c; 6552 6553 memcpy(buf, sfp_data, sz); 6554 buf += SFP_BLOCK_SIZE; 6555 c += sz; 6556 } 6557 phys_addr += SFP_BLOCK_SIZE; 6558 sfp_data += SFP_BLOCK_SIZE; 6559 offset += SFP_BLOCK_SIZE; 6560 } 6561 6562 return rval; 6563 } 6564 6565 int qla24xx_res_count_wait(struct scsi_qla_host *vha, 6566 uint16_t *out_mb, int out_mb_sz) 6567 { 6568 int rval = QLA_FUNCTION_FAILED; 6569 mbx_cmd_t mc; 6570 6571 if (!vha->hw->flags.fw_started) 6572 goto done; 6573 6574 memset(&mc, 0, sizeof(mc)); 6575 mc.mb[0] = MBC_GET_RESOURCE_COUNTS; 6576 6577 rval = qla24xx_send_mb_cmd(vha, &mc); 6578 if (rval != QLA_SUCCESS) { 6579 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6580 "%s: fail\n", __func__); 6581 } else { 6582 if (out_mb_sz <= SIZEOF_IOCB_MB_REG) 6583 memcpy(out_mb, mc.mb, out_mb_sz); 6584 else 6585 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG); 6586 6587 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6588 "%s: done\n", __func__); 6589 } 6590 done: 6591 return rval; 6592 } 6593 6594 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts, 6595 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr, 6596 uint32_t sfub_len) 6597 { 6598 int rval; 6599 mbx_cmd_t mc; 6600 mbx_cmd_t *mcp = &mc; 6601 6602 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE; 6603 mcp->mb[1] = opts; 6604 mcp->mb[2] = region; 6605 mcp->mb[3] = MSW(len); 6606 mcp->mb[4] = LSW(len); 6607 mcp->mb[5] = MSW(sfub_dma_addr); 6608 mcp->mb[6] = LSW(sfub_dma_addr); 6609 mcp->mb[7] = MSW(MSD(sfub_dma_addr)); 6610 mcp->mb[8] = LSW(MSD(sfub_dma_addr)); 6611 mcp->mb[9] = sfub_len; 6612 mcp->out_mb = 6613 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6614 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6615 mcp->tov = MBX_TOV_SECONDS; 6616 mcp->flags = 0; 6617 rval = qla2x00_mailbox_command(vha, mcp); 6618 6619 if (rval != QLA_SUCCESS) { 6620 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x", 6621 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], 6622 mcp->mb[2]); 6623 } 6624 6625 return rval; 6626 } 6627 6628 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6629 uint32_t data) 6630 { 6631 int rval; 6632 mbx_cmd_t mc; 6633 mbx_cmd_t *mcp = &mc; 6634 6635 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6636 "Entered %s.\n", __func__); 6637 6638 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6639 mcp->mb[1] = LSW(addr); 6640 mcp->mb[2] = MSW(addr); 6641 mcp->mb[3] = LSW(data); 6642 mcp->mb[4] = MSW(data); 6643 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6644 mcp->in_mb = MBX_1|MBX_0; 6645 mcp->tov = MBX_TOV_SECONDS; 6646 mcp->flags = 0; 6647 rval = qla2x00_mailbox_command(vha, mcp); 6648 6649 if (rval != QLA_SUCCESS) { 6650 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6651 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6652 } else { 6653 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6654 "Done %s.\n", __func__); 6655 } 6656 6657 return rval; 6658 } 6659 6660 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6661 uint32_t *data) 6662 { 6663 int rval; 6664 mbx_cmd_t mc; 6665 mbx_cmd_t *mcp = &mc; 6666 6667 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6668 "Entered %s.\n", __func__); 6669 6670 mcp->mb[0] = MBC_READ_REMOTE_REG; 6671 mcp->mb[1] = LSW(addr); 6672 mcp->mb[2] = MSW(addr); 6673 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6674 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6675 mcp->tov = MBX_TOV_SECONDS; 6676 mcp->flags = 0; 6677 rval = qla2x00_mailbox_command(vha, mcp); 6678 6679 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]); 6680 6681 if (rval != QLA_SUCCESS) { 6682 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6683 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6684 } else { 6685 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6686 "Done %s.\n", __func__); 6687 } 6688 6689 return rval; 6690 } 6691