1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/gfp.h> 12 13 static struct mb_cmd_name { 14 uint16_t cmd; 15 const char *str; 16 } mb_str[] = { 17 {MBC_GET_PORT_DATABASE, "GPDB"}, 18 {MBC_GET_ID_LIST, "GIDList"}, 19 {MBC_GET_LINK_PRIV_STATS, "Stats"}, 20 {MBC_GET_RESOURCE_COUNTS, "ResCnt"}, 21 }; 22 23 static const char *mb_to_str(uint16_t cmd) 24 { 25 int i; 26 struct mb_cmd_name *e; 27 28 for (i = 0; i < ARRAY_SIZE(mb_str); i++) { 29 e = mb_str + i; 30 if (cmd == e->cmd) 31 return e->str; 32 } 33 return "unknown"; 34 } 35 36 static struct rom_cmd { 37 uint16_t cmd; 38 } rom_cmds[] = { 39 { MBC_LOAD_RAM }, 40 { MBC_EXECUTE_FIRMWARE }, 41 { MBC_READ_RAM_WORD }, 42 { MBC_MAILBOX_REGISTER_TEST }, 43 { MBC_VERIFY_CHECKSUM }, 44 { MBC_GET_FIRMWARE_VERSION }, 45 { MBC_LOAD_RISC_RAM }, 46 { MBC_DUMP_RISC_RAM }, 47 { MBC_LOAD_RISC_RAM_EXTENDED }, 48 { MBC_DUMP_RISC_RAM_EXTENDED }, 49 { MBC_WRITE_RAM_WORD_EXTENDED }, 50 { MBC_READ_RAM_EXTENDED }, 51 { MBC_GET_RESOURCE_COUNTS }, 52 { MBC_SET_FIRMWARE_OPTION }, 53 { MBC_MID_INITIALIZE_FIRMWARE }, 54 { MBC_GET_FIRMWARE_STATE }, 55 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, 56 { MBC_GET_RETRY_COUNT }, 57 { MBC_TRACE_CONTROL }, 58 { MBC_INITIALIZE_MULTIQ }, 59 { MBC_IOCB_COMMAND_A64 }, 60 { MBC_GET_ADAPTER_LOOP_ID }, 61 { MBC_READ_SFP }, 62 { MBC_GET_RNID_PARAMS }, 63 { MBC_GET_SET_ZIO_THRESHOLD }, 64 }; 65 66 static int is_rom_cmd(uint16_t cmd) 67 { 68 int i; 69 struct rom_cmd *wc; 70 71 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) { 72 wc = rom_cmds + i; 73 if (wc->cmd == cmd) 74 return 1; 75 } 76 77 return 0; 78 } 79 80 /* 81 * qla2x00_mailbox_command 82 * Issue mailbox command and waits for completion. 83 * 84 * Input: 85 * ha = adapter block pointer. 86 * mcp = driver internal mbx struct pointer. 87 * 88 * Output: 89 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 90 * 91 * Returns: 92 * 0 : QLA_SUCCESS = cmd performed success 93 * 1 : QLA_FUNCTION_FAILED (error encountered) 94 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 95 * 96 * Context: 97 * Kernel context. 98 */ 99 static int 100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) 101 { 102 int rval, i; 103 unsigned long flags = 0; 104 device_reg_t *reg; 105 uint8_t abort_active; 106 uint8_t io_lock_on; 107 uint16_t command = 0; 108 uint16_t *iptr; 109 uint16_t __iomem *optr; 110 uint32_t cnt; 111 uint32_t mboxes; 112 unsigned long wait_time; 113 struct qla_hw_data *ha = vha->hw; 114 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 115 u32 chip_reset; 116 117 118 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); 119 120 if (ha->pdev->error_state > pci_channel_io_frozen) { 121 ql_log(ql_log_warn, vha, 0x1001, 122 "error_state is greater than pci_channel_io_frozen, " 123 "exiting.\n"); 124 return QLA_FUNCTION_TIMEOUT; 125 } 126 127 if (vha->device_flags & DFLG_DEV_FAILED) { 128 ql_log(ql_log_warn, vha, 0x1002, 129 "Device in failed state, exiting.\n"); 130 return QLA_FUNCTION_TIMEOUT; 131 } 132 133 /* if PCI error, then avoid mbx processing.*/ 134 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && 135 test_bit(UNLOADING, &base_vha->dpc_flags)) { 136 ql_log(ql_log_warn, vha, 0xd04e, 137 "PCI error, exiting.\n"); 138 return QLA_FUNCTION_TIMEOUT; 139 } 140 141 reg = ha->iobase; 142 io_lock_on = base_vha->flags.init_done; 143 144 rval = QLA_SUCCESS; 145 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 146 chip_reset = ha->chip_reset; 147 148 if (ha->flags.pci_channel_io_perm_failure) { 149 ql_log(ql_log_warn, vha, 0x1003, 150 "Perm failure on EEH timeout MBX, exiting.\n"); 151 return QLA_FUNCTION_TIMEOUT; 152 } 153 154 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 155 /* Setting Link-Down error */ 156 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 157 ql_log(ql_log_warn, vha, 0x1004, 158 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 159 return QLA_FUNCTION_TIMEOUT; 160 } 161 162 /* check if ISP abort is active and return cmd with timeout */ 163 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 164 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 165 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && 166 !is_rom_cmd(mcp->mb[0])) { 167 ql_log(ql_log_info, vha, 0x1005, 168 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", 169 mcp->mb[0]); 170 return QLA_FUNCTION_TIMEOUT; 171 } 172 173 atomic_inc(&ha->num_pend_mbx_stage1); 174 /* 175 * Wait for active mailbox commands to finish by waiting at most tov 176 * seconds. This is to serialize actual issuing of mailbox cmds during 177 * non ISP abort time. 178 */ 179 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 180 /* Timeout occurred. Return error. */ 181 ql_log(ql_log_warn, vha, 0xd035, 182 "Cmd access timeout, cmd=0x%x, Exiting.\n", 183 mcp->mb[0]); 184 atomic_dec(&ha->num_pend_mbx_stage1); 185 return QLA_FUNCTION_TIMEOUT; 186 } 187 atomic_dec(&ha->num_pend_mbx_stage1); 188 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { 189 rval = QLA_ABORTED; 190 goto premature_exit; 191 } 192 193 194 /* Save mailbox command for debug */ 195 ha->mcp = mcp; 196 197 ql_dbg(ql_dbg_mbx, vha, 0x1006, 198 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 199 200 spin_lock_irqsave(&ha->hardware_lock, flags); 201 202 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || 203 ha->flags.mbox_busy) { 204 rval = QLA_ABORTED; 205 spin_unlock_irqrestore(&ha->hardware_lock, flags); 206 goto premature_exit; 207 } 208 ha->flags.mbox_busy = 1; 209 210 /* Load mailbox registers. */ 211 if (IS_P3P_TYPE(ha)) 212 optr = (uint16_t __iomem *)®->isp82.mailbox_in[0]; 213 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) 214 optr = (uint16_t __iomem *)®->isp24.mailbox0; 215 else 216 optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0); 217 218 iptr = mcp->mb; 219 command = mcp->mb[0]; 220 mboxes = mcp->out_mb; 221 222 ql_dbg(ql_dbg_mbx, vha, 0x1111, 223 "Mailbox registers (OUT):\n"); 224 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 225 if (IS_QLA2200(ha) && cnt == 8) 226 optr = 227 (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8); 228 if (mboxes & BIT_0) { 229 ql_dbg(ql_dbg_mbx, vha, 0x1112, 230 "mbox[%d]<-0x%04x\n", cnt, *iptr); 231 WRT_REG_WORD(optr, *iptr); 232 } 233 234 mboxes >>= 1; 235 optr++; 236 iptr++; 237 } 238 239 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, 240 "I/O Address = %p.\n", optr); 241 242 /* Issue set host interrupt command to send cmd out. */ 243 ha->flags.mbox_int = 0; 244 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 245 246 /* Unlock mbx registers and wait for interrupt */ 247 ql_dbg(ql_dbg_mbx, vha, 0x100f, 248 "Going to unlock irq & waiting for interrupts. " 249 "jiffies=%lx.\n", jiffies); 250 251 /* Wait for mbx cmd completion until timeout */ 252 atomic_inc(&ha->num_pend_mbx_stage2); 253 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 254 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 255 256 if (IS_P3P_TYPE(ha)) 257 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); 258 else if (IS_FWI2_CAPABLE(ha)) 259 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); 260 else 261 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); 262 spin_unlock_irqrestore(&ha->hardware_lock, flags); 263 264 wait_time = jiffies; 265 atomic_inc(&ha->num_pend_mbx_stage3); 266 if (!wait_for_completion_timeout(&ha->mbx_intr_comp, 267 mcp->tov * HZ)) { 268 if (chip_reset != ha->chip_reset) { 269 spin_lock_irqsave(&ha->hardware_lock, flags); 270 ha->flags.mbox_busy = 0; 271 spin_unlock_irqrestore(&ha->hardware_lock, 272 flags); 273 atomic_dec(&ha->num_pend_mbx_stage2); 274 atomic_dec(&ha->num_pend_mbx_stage3); 275 rval = QLA_ABORTED; 276 goto premature_exit; 277 } 278 ql_dbg(ql_dbg_mbx, vha, 0x117a, 279 "cmd=%x Timeout.\n", command); 280 spin_lock_irqsave(&ha->hardware_lock, flags); 281 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 282 spin_unlock_irqrestore(&ha->hardware_lock, flags); 283 284 } else if (ha->flags.purge_mbox || 285 chip_reset != ha->chip_reset) { 286 spin_lock_irqsave(&ha->hardware_lock, flags); 287 ha->flags.mbox_busy = 0; 288 spin_unlock_irqrestore(&ha->hardware_lock, flags); 289 atomic_dec(&ha->num_pend_mbx_stage2); 290 atomic_dec(&ha->num_pend_mbx_stage3); 291 rval = QLA_ABORTED; 292 goto premature_exit; 293 } 294 atomic_dec(&ha->num_pend_mbx_stage3); 295 296 if (time_after(jiffies, wait_time + 5 * HZ)) 297 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", 298 command, jiffies_to_msecs(jiffies - wait_time)); 299 } else { 300 ql_dbg(ql_dbg_mbx, vha, 0x1011, 301 "Cmd=%x Polling Mode.\n", command); 302 303 if (IS_P3P_TYPE(ha)) { 304 if (RD_REG_DWORD(®->isp82.hint) & 305 HINT_MBX_INT_PENDING) { 306 ha->flags.mbox_busy = 0; 307 spin_unlock_irqrestore(&ha->hardware_lock, 308 flags); 309 atomic_dec(&ha->num_pend_mbx_stage2); 310 ql_dbg(ql_dbg_mbx, vha, 0x1012, 311 "Pending mailbox timeout, exiting.\n"); 312 rval = QLA_FUNCTION_TIMEOUT; 313 goto premature_exit; 314 } 315 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); 316 } else if (IS_FWI2_CAPABLE(ha)) 317 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); 318 else 319 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); 320 spin_unlock_irqrestore(&ha->hardware_lock, flags); 321 322 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 323 while (!ha->flags.mbox_int) { 324 if (ha->flags.purge_mbox || 325 chip_reset != ha->chip_reset) { 326 spin_lock_irqsave(&ha->hardware_lock, flags); 327 ha->flags.mbox_busy = 0; 328 spin_unlock_irqrestore(&ha->hardware_lock, 329 flags); 330 atomic_dec(&ha->num_pend_mbx_stage2); 331 rval = QLA_ABORTED; 332 goto premature_exit; 333 } 334 335 if (time_after(jiffies, wait_time)) 336 break; 337 338 /* 339 * Check if it's UNLOADING, cause we cannot poll in 340 * this case, or else a NULL pointer dereference 341 * is triggered. 342 */ 343 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) 344 return QLA_FUNCTION_TIMEOUT; 345 346 /* Check for pending interrupts. */ 347 qla2x00_poll(ha->rsp_q_map[0]); 348 349 if (!ha->flags.mbox_int && 350 !(IS_QLA2200(ha) && 351 command == MBC_LOAD_RISC_RAM_EXTENDED)) 352 msleep(10); 353 } /* while */ 354 ql_dbg(ql_dbg_mbx, vha, 0x1013, 355 "Waited %d sec.\n", 356 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 357 } 358 atomic_dec(&ha->num_pend_mbx_stage2); 359 360 /* Check whether we timed out */ 361 if (ha->flags.mbox_int) { 362 uint16_t *iptr2; 363 364 ql_dbg(ql_dbg_mbx, vha, 0x1014, 365 "Cmd=%x completed.\n", command); 366 367 /* Got interrupt. Clear the flag. */ 368 ha->flags.mbox_int = 0; 369 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 370 371 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 372 spin_lock_irqsave(&ha->hardware_lock, flags); 373 ha->flags.mbox_busy = 0; 374 spin_unlock_irqrestore(&ha->hardware_lock, flags); 375 376 /* Setting Link-Down error */ 377 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 378 ha->mcp = NULL; 379 rval = QLA_FUNCTION_FAILED; 380 ql_log(ql_log_warn, vha, 0xd048, 381 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 382 goto premature_exit; 383 } 384 385 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) { 386 ql_dbg(ql_dbg_mbx, vha, 0x11ff, 387 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0], 388 MBS_COMMAND_COMPLETE); 389 rval = QLA_FUNCTION_FAILED; 390 } 391 392 /* Load return mailbox registers. */ 393 iptr2 = mcp->mb; 394 iptr = (uint16_t *)&ha->mailbox_out[0]; 395 mboxes = mcp->in_mb; 396 397 ql_dbg(ql_dbg_mbx, vha, 0x1113, 398 "Mailbox registers (IN):\n"); 399 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 400 if (mboxes & BIT_0) { 401 *iptr2 = *iptr; 402 ql_dbg(ql_dbg_mbx, vha, 0x1114, 403 "mbox[%d]->0x%04x\n", cnt, *iptr2); 404 } 405 406 mboxes >>= 1; 407 iptr2++; 408 iptr++; 409 } 410 } else { 411 412 uint16_t mb[8]; 413 uint32_t ictrl, host_status, hccr; 414 uint16_t w; 415 416 if (IS_FWI2_CAPABLE(ha)) { 417 mb[0] = RD_REG_WORD(®->isp24.mailbox0); 418 mb[1] = RD_REG_WORD(®->isp24.mailbox1); 419 mb[2] = RD_REG_WORD(®->isp24.mailbox2); 420 mb[3] = RD_REG_WORD(®->isp24.mailbox3); 421 mb[7] = RD_REG_WORD(®->isp24.mailbox7); 422 ictrl = RD_REG_DWORD(®->isp24.ictrl); 423 host_status = RD_REG_DWORD(®->isp24.host_status); 424 hccr = RD_REG_DWORD(®->isp24.hccr); 425 426 ql_log(ql_log_warn, vha, 0xd04c, 427 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 428 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", 429 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], 430 mb[7], host_status, hccr); 431 432 } else { 433 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); 434 ictrl = RD_REG_WORD(®->isp.ictrl); 435 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, 436 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 437 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); 438 } 439 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 440 441 /* Capture FW dump only, if PCI device active */ 442 if (!pci_channel_offline(vha->hw->pdev)) { 443 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 444 if (w == 0xffff || ictrl == 0xffffffff || 445 (chip_reset != ha->chip_reset)) { 446 /* This is special case if there is unload 447 * of driver happening and if PCI device go 448 * into bad state due to PCI error condition 449 * then only PCI ERR flag would be set. 450 * we will do premature exit for above case. 451 */ 452 spin_lock_irqsave(&ha->hardware_lock, flags); 453 ha->flags.mbox_busy = 0; 454 spin_unlock_irqrestore(&ha->hardware_lock, 455 flags); 456 rval = QLA_FUNCTION_TIMEOUT; 457 goto premature_exit; 458 } 459 460 /* Attempt to capture firmware dump for further 461 * anallysis of the current formware state. we do not 462 * need to do this if we are intentionally generating 463 * a dump 464 */ 465 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) 466 ha->isp_ops->fw_dump(vha, 0); 467 rval = QLA_FUNCTION_TIMEOUT; 468 } 469 } 470 spin_lock_irqsave(&ha->hardware_lock, flags); 471 ha->flags.mbox_busy = 0; 472 spin_unlock_irqrestore(&ha->hardware_lock, flags); 473 474 /* Clean up */ 475 ha->mcp = NULL; 476 477 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 478 ql_dbg(ql_dbg_mbx, vha, 0x101a, 479 "Checking for additional resp interrupt.\n"); 480 481 /* polling mode for non isp_abort commands. */ 482 qla2x00_poll(ha->rsp_q_map[0]); 483 } 484 485 if (rval == QLA_FUNCTION_TIMEOUT && 486 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 487 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 488 ha->flags.eeh_busy) { 489 /* not in dpc. schedule it for dpc to take over. */ 490 ql_dbg(ql_dbg_mbx, vha, 0x101b, 491 "Timeout, schedule isp_abort_needed.\n"); 492 493 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 494 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 495 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 496 if (IS_QLA82XX(ha)) { 497 ql_dbg(ql_dbg_mbx, vha, 0x112a, 498 "disabling pause transmit on port " 499 "0 & 1.\n"); 500 qla82xx_wr_32(ha, 501 QLA82XX_CRB_NIU + 0x98, 502 CRB_NIU_XG_PAUSE_CTL_P0| 503 CRB_NIU_XG_PAUSE_CTL_P1); 504 } 505 ql_log(ql_log_info, base_vha, 0x101c, 506 "Mailbox cmd timeout occurred, cmd=0x%x, " 507 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 508 "abort.\n", command, mcp->mb[0], 509 ha->flags.eeh_busy); 510 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 511 qla2xxx_wake_dpc(vha); 512 } 513 } else if (current == ha->dpc_thread) { 514 /* call abort directly since we are in the DPC thread */ 515 ql_dbg(ql_dbg_mbx, vha, 0x101d, 516 "Timeout, calling abort_isp.\n"); 517 518 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 519 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 520 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 521 if (IS_QLA82XX(ha)) { 522 ql_dbg(ql_dbg_mbx, vha, 0x112b, 523 "disabling pause transmit on port " 524 "0 & 1.\n"); 525 qla82xx_wr_32(ha, 526 QLA82XX_CRB_NIU + 0x98, 527 CRB_NIU_XG_PAUSE_CTL_P0| 528 CRB_NIU_XG_PAUSE_CTL_P1); 529 } 530 ql_log(ql_log_info, base_vha, 0x101e, 531 "Mailbox cmd timeout occurred, cmd=0x%x, " 532 "mb[0]=0x%x. Scheduling ISP abort ", 533 command, mcp->mb[0]); 534 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 535 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 536 /* Allow next mbx cmd to come in. */ 537 complete(&ha->mbx_cmd_comp); 538 if (ha->isp_ops->abort_isp(vha)) { 539 /* Failed. retry later. */ 540 set_bit(ISP_ABORT_NEEDED, 541 &vha->dpc_flags); 542 } 543 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 544 ql_dbg(ql_dbg_mbx, vha, 0x101f, 545 "Finished abort_isp.\n"); 546 goto mbx_done; 547 } 548 } 549 } 550 551 premature_exit: 552 /* Allow next mbx cmd to come in. */ 553 complete(&ha->mbx_cmd_comp); 554 555 mbx_done: 556 if (rval == QLA_ABORTED) { 557 ql_log(ql_log_info, vha, 0xd035, 558 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n", 559 mcp->mb[0]); 560 } else if (rval) { 561 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { 562 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR, 563 dev_name(&ha->pdev->dev), 0x1020+0x800, 564 vha->host_no, rval); 565 mboxes = mcp->in_mb; 566 cnt = 4; 567 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1) 568 if (mboxes & BIT_0) { 569 printk(" mb[%u]=%x", i, mcp->mb[i]); 570 cnt--; 571 } 572 pr_warn(" cmd=%x ****\n", command); 573 } 574 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { 575 ql_dbg(ql_dbg_mbx, vha, 0x1198, 576 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", 577 RD_REG_DWORD(®->isp24.host_status), 578 RD_REG_DWORD(®->isp24.ictrl), 579 RD_REG_DWORD(®->isp24.istatus)); 580 } else { 581 ql_dbg(ql_dbg_mbx, vha, 0x1206, 582 "ctrl_status=%#x ictrl=%#x istatus=%#x\n", 583 RD_REG_WORD(®->isp.ctrl_status), 584 RD_REG_WORD(®->isp.ictrl), 585 RD_REG_WORD(®->isp.istatus)); 586 } 587 } else { 588 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); 589 } 590 591 return rval; 592 } 593 594 int 595 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, 596 uint32_t risc_code_size) 597 { 598 int rval; 599 struct qla_hw_data *ha = vha->hw; 600 mbx_cmd_t mc; 601 mbx_cmd_t *mcp = &mc; 602 603 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, 604 "Entered %s.\n", __func__); 605 606 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 607 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 608 mcp->mb[8] = MSW(risc_addr); 609 mcp->out_mb = MBX_8|MBX_0; 610 } else { 611 mcp->mb[0] = MBC_LOAD_RISC_RAM; 612 mcp->out_mb = MBX_0; 613 } 614 mcp->mb[1] = LSW(risc_addr); 615 mcp->mb[2] = MSW(req_dma); 616 mcp->mb[3] = LSW(req_dma); 617 mcp->mb[6] = MSW(MSD(req_dma)); 618 mcp->mb[7] = LSW(MSD(req_dma)); 619 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 620 if (IS_FWI2_CAPABLE(ha)) { 621 mcp->mb[4] = MSW(risc_code_size); 622 mcp->mb[5] = LSW(risc_code_size); 623 mcp->out_mb |= MBX_5|MBX_4; 624 } else { 625 mcp->mb[4] = LSW(risc_code_size); 626 mcp->out_mb |= MBX_4; 627 } 628 629 mcp->in_mb = MBX_1|MBX_0; 630 mcp->tov = MBX_TOV_SECONDS; 631 mcp->flags = 0; 632 rval = qla2x00_mailbox_command(vha, mcp); 633 634 if (rval != QLA_SUCCESS) { 635 ql_dbg(ql_dbg_mbx, vha, 0x1023, 636 "Failed=%x mb[0]=%x mb[1]=%x.\n", 637 rval, mcp->mb[0], mcp->mb[1]); 638 } else { 639 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, 640 "Done %s.\n", __func__); 641 } 642 643 return rval; 644 } 645 646 #define EXTENDED_BB_CREDITS BIT_0 647 #define NVME_ENABLE_FLAG BIT_3 648 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha) 649 { 650 uint16_t mb4 = BIT_0; 651 652 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 653 mb4 |= ha->long_range_distance << LR_DIST_FW_POS; 654 655 return mb4; 656 } 657 658 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha) 659 { 660 uint16_t mb4 = BIT_0; 661 662 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 663 struct nvram_81xx *nv = ha->nvram; 664 665 mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features); 666 } 667 668 return mb4; 669 } 670 671 /* 672 * qla2x00_execute_fw 673 * Start adapter firmware. 674 * 675 * Input: 676 * ha = adapter block pointer. 677 * TARGET_QUEUE_LOCK must be released. 678 * ADAPTER_STATE_LOCK must be released. 679 * 680 * Returns: 681 * qla2x00 local function return status code. 682 * 683 * Context: 684 * Kernel context. 685 */ 686 int 687 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) 688 { 689 int rval; 690 struct qla_hw_data *ha = vha->hw; 691 mbx_cmd_t mc; 692 mbx_cmd_t *mcp = &mc; 693 694 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, 695 "Entered %s.\n", __func__); 696 697 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 698 mcp->out_mb = MBX_0; 699 mcp->in_mb = MBX_0; 700 if (IS_FWI2_CAPABLE(ha)) { 701 mcp->mb[1] = MSW(risc_addr); 702 mcp->mb[2] = LSW(risc_addr); 703 mcp->mb[3] = 0; 704 mcp->mb[4] = 0; 705 ha->flags.using_lr_setting = 0; 706 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || 707 IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 708 if (ql2xautodetectsfp) { 709 if (ha->flags.detected_lr_sfp) { 710 mcp->mb[4] |= 711 qla25xx_set_sfp_lr_dist(ha); 712 ha->flags.using_lr_setting = 1; 713 } 714 } else { 715 struct nvram_81xx *nv = ha->nvram; 716 /* set LR distance if specified in nvram */ 717 if (nv->enhanced_features & 718 NEF_LR_DIST_ENABLE) { 719 mcp->mb[4] |= 720 qla25xx_set_nvr_lr_dist(ha); 721 ha->flags.using_lr_setting = 1; 722 } 723 } 724 } 725 726 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha))) 727 mcp->mb[4] |= NVME_ENABLE_FLAG; 728 729 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 730 struct nvram_81xx *nv = ha->nvram; 731 /* set minimum speed if specified in nvram */ 732 if (nv->min_supported_speed >= 2 && 733 nv->min_supported_speed <= 5) { 734 mcp->mb[4] |= BIT_4; 735 mcp->mb[11] |= nv->min_supported_speed & 0xF; 736 mcp->out_mb |= MBX_11; 737 mcp->in_mb |= BIT_5; 738 vha->min_supported_speed = 739 nv->min_supported_speed; 740 } 741 } 742 743 if (ha->flags.exlogins_enabled) 744 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; 745 746 if (ha->flags.exchoffld_enabled) 747 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; 748 749 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1; 750 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1; 751 } else { 752 mcp->mb[1] = LSW(risc_addr); 753 mcp->out_mb |= MBX_1; 754 if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 755 mcp->mb[2] = 0; 756 mcp->out_mb |= MBX_2; 757 } 758 } 759 760 mcp->tov = MBX_TOV_SECONDS; 761 mcp->flags = 0; 762 rval = qla2x00_mailbox_command(vha, mcp); 763 764 if (rval != QLA_SUCCESS) { 765 ql_dbg(ql_dbg_mbx, vha, 0x1026, 766 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 767 return rval; 768 } 769 770 if (!IS_FWI2_CAPABLE(ha)) 771 goto done; 772 773 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; 774 ql_dbg(ql_dbg_mbx, vha, 0x119a, 775 "fw_ability_mask=%x.\n", ha->fw_ability_mask); 776 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]); 777 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 778 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1); 779 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n", 780 ha->max_supported_speed == 0 ? "16Gps" : 781 ha->max_supported_speed == 1 ? "32Gps" : 782 ha->max_supported_speed == 2 ? "64Gps" : "unknown"); 783 if (vha->min_supported_speed) { 784 ha->min_supported_speed = mcp->mb[5] & 785 (BIT_0 | BIT_1 | BIT_2); 786 ql_dbg(ql_dbg_mbx, vha, 0x119c, 787 "min_supported_speed=%s.\n", 788 ha->min_supported_speed == 6 ? "64Gps" : 789 ha->min_supported_speed == 5 ? "32Gps" : 790 ha->min_supported_speed == 4 ? "16Gps" : 791 ha->min_supported_speed == 3 ? "8Gps" : 792 ha->min_supported_speed == 2 ? "4Gps" : "unknown"); 793 } 794 } 795 796 done: 797 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, 798 "Done %s.\n", __func__); 799 800 return rval; 801 } 802 803 /* 804 * qla_get_exlogin_status 805 * Get extended login status 806 * uses the memory offload control/status Mailbox 807 * 808 * Input: 809 * ha: adapter state pointer. 810 * fwopt: firmware options 811 * 812 * Returns: 813 * qla2x00 local function status 814 * 815 * Context: 816 * Kernel context. 817 */ 818 #define FETCH_XLOGINS_STAT 0x8 819 int 820 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 821 uint16_t *ex_logins_cnt) 822 { 823 int rval; 824 mbx_cmd_t mc; 825 mbx_cmd_t *mcp = &mc; 826 827 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f, 828 "Entered %s\n", __func__); 829 830 memset(mcp->mb, 0 , sizeof(mcp->mb)); 831 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 832 mcp->mb[1] = FETCH_XLOGINS_STAT; 833 mcp->out_mb = MBX_1|MBX_0; 834 mcp->in_mb = MBX_10|MBX_4|MBX_0; 835 mcp->tov = MBX_TOV_SECONDS; 836 mcp->flags = 0; 837 838 rval = qla2x00_mailbox_command(vha, mcp); 839 if (rval != QLA_SUCCESS) { 840 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval); 841 } else { 842 *buf_sz = mcp->mb[4]; 843 *ex_logins_cnt = mcp->mb[10]; 844 845 ql_log(ql_log_info, vha, 0x1190, 846 "buffer size 0x%x, exchange login count=%d\n", 847 mcp->mb[4], mcp->mb[10]); 848 849 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116, 850 "Done %s.\n", __func__); 851 } 852 853 return rval; 854 } 855 856 /* 857 * qla_set_exlogin_mem_cfg 858 * set extended login memory configuration 859 * Mbx needs to be issues before init_cb is set 860 * 861 * Input: 862 * ha: adapter state pointer. 863 * buffer: buffer pointer 864 * phys_addr: physical address of buffer 865 * size: size of buffer 866 * TARGET_QUEUE_LOCK must be released 867 * ADAPTER_STATE_LOCK must be release 868 * 869 * Returns: 870 * qla2x00 local funxtion status code. 871 * 872 * Context: 873 * Kernel context. 874 */ 875 #define CONFIG_XLOGINS_MEM 0x3 876 int 877 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) 878 { 879 int rval; 880 mbx_cmd_t mc; 881 mbx_cmd_t *mcp = &mc; 882 struct qla_hw_data *ha = vha->hw; 883 884 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, 885 "Entered %s.\n", __func__); 886 887 memset(mcp->mb, 0 , sizeof(mcp->mb)); 888 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 889 mcp->mb[1] = CONFIG_XLOGINS_MEM; 890 mcp->mb[2] = MSW(phys_addr); 891 mcp->mb[3] = LSW(phys_addr); 892 mcp->mb[6] = MSW(MSD(phys_addr)); 893 mcp->mb[7] = LSW(MSD(phys_addr)); 894 mcp->mb[8] = MSW(ha->exlogin_size); 895 mcp->mb[9] = LSW(ha->exlogin_size); 896 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 897 mcp->in_mb = MBX_11|MBX_0; 898 mcp->tov = MBX_TOV_SECONDS; 899 mcp->flags = 0; 900 rval = qla2x00_mailbox_command(vha, mcp); 901 if (rval != QLA_SUCCESS) { 902 /*EMPTY*/ 903 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval); 904 } else { 905 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 906 "Done %s.\n", __func__); 907 } 908 909 return rval; 910 } 911 912 /* 913 * qla_get_exchoffld_status 914 * Get exchange offload status 915 * uses the memory offload control/status Mailbox 916 * 917 * Input: 918 * ha: adapter state pointer. 919 * fwopt: firmware options 920 * 921 * Returns: 922 * qla2x00 local function status 923 * 924 * Context: 925 * Kernel context. 926 */ 927 #define FETCH_XCHOFFLD_STAT 0x2 928 int 929 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 930 uint16_t *ex_logins_cnt) 931 { 932 int rval; 933 mbx_cmd_t mc; 934 mbx_cmd_t *mcp = &mc; 935 936 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019, 937 "Entered %s\n", __func__); 938 939 memset(mcp->mb, 0 , sizeof(mcp->mb)); 940 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 941 mcp->mb[1] = FETCH_XCHOFFLD_STAT; 942 mcp->out_mb = MBX_1|MBX_0; 943 mcp->in_mb = MBX_10|MBX_4|MBX_0; 944 mcp->tov = MBX_TOV_SECONDS; 945 mcp->flags = 0; 946 947 rval = qla2x00_mailbox_command(vha, mcp); 948 if (rval != QLA_SUCCESS) { 949 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval); 950 } else { 951 *buf_sz = mcp->mb[4]; 952 *ex_logins_cnt = mcp->mb[10]; 953 954 ql_log(ql_log_info, vha, 0x118e, 955 "buffer size 0x%x, exchange offload count=%d\n", 956 mcp->mb[4], mcp->mb[10]); 957 958 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156, 959 "Done %s.\n", __func__); 960 } 961 962 return rval; 963 } 964 965 /* 966 * qla_set_exchoffld_mem_cfg 967 * Set exchange offload memory configuration 968 * Mbx needs to be issues before init_cb is set 969 * 970 * Input: 971 * ha: adapter state pointer. 972 * buffer: buffer pointer 973 * phys_addr: physical address of buffer 974 * size: size of buffer 975 * TARGET_QUEUE_LOCK must be released 976 * ADAPTER_STATE_LOCK must be release 977 * 978 * Returns: 979 * qla2x00 local funxtion status code. 980 * 981 * Context: 982 * Kernel context. 983 */ 984 #define CONFIG_XCHOFFLD_MEM 0x3 985 int 986 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha) 987 { 988 int rval; 989 mbx_cmd_t mc; 990 mbx_cmd_t *mcp = &mc; 991 struct qla_hw_data *ha = vha->hw; 992 993 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157, 994 "Entered %s.\n", __func__); 995 996 memset(mcp->mb, 0 , sizeof(mcp->mb)); 997 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 998 mcp->mb[1] = CONFIG_XCHOFFLD_MEM; 999 mcp->mb[2] = MSW(ha->exchoffld_buf_dma); 1000 mcp->mb[3] = LSW(ha->exchoffld_buf_dma); 1001 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma)); 1002 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma)); 1003 mcp->mb[8] = MSW(ha->exchoffld_size); 1004 mcp->mb[9] = LSW(ha->exchoffld_size); 1005 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1006 mcp->in_mb = MBX_11|MBX_0; 1007 mcp->tov = MBX_TOV_SECONDS; 1008 mcp->flags = 0; 1009 rval = qla2x00_mailbox_command(vha, mcp); 1010 if (rval != QLA_SUCCESS) { 1011 /*EMPTY*/ 1012 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval); 1013 } else { 1014 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, 1015 "Done %s.\n", __func__); 1016 } 1017 1018 return rval; 1019 } 1020 1021 /* 1022 * qla2x00_get_fw_version 1023 * Get firmware version. 1024 * 1025 * Input: 1026 * ha: adapter state pointer. 1027 * major: pointer for major number. 1028 * minor: pointer for minor number. 1029 * subminor: pointer for subminor number. 1030 * 1031 * Returns: 1032 * qla2x00 local function return status code. 1033 * 1034 * Context: 1035 * Kernel context. 1036 */ 1037 int 1038 qla2x00_get_fw_version(scsi_qla_host_t *vha) 1039 { 1040 int rval; 1041 mbx_cmd_t mc; 1042 mbx_cmd_t *mcp = &mc; 1043 struct qla_hw_data *ha = vha->hw; 1044 1045 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, 1046 "Entered %s.\n", __func__); 1047 1048 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 1049 mcp->out_mb = MBX_0; 1050 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1051 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) 1052 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 1053 if (IS_FWI2_CAPABLE(ha)) 1054 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 1055 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1056 mcp->in_mb |= 1057 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| 1058 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7; 1059 1060 mcp->flags = 0; 1061 mcp->tov = MBX_TOV_SECONDS; 1062 rval = qla2x00_mailbox_command(vha, mcp); 1063 if (rval != QLA_SUCCESS) 1064 goto failed; 1065 1066 /* Return mailbox data. */ 1067 ha->fw_major_version = mcp->mb[1]; 1068 ha->fw_minor_version = mcp->mb[2]; 1069 ha->fw_subminor_version = mcp->mb[3]; 1070 ha->fw_attributes = mcp->mb[6]; 1071 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) 1072 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ 1073 else 1074 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; 1075 1076 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1077 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1078 ha->mpi_version[1] = mcp->mb[11] >> 8; 1079 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1080 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; 1081 ha->phy_version[0] = mcp->mb[8] & 0xff; 1082 ha->phy_version[1] = mcp->mb[9] >> 8; 1083 ha->phy_version[2] = mcp->mb[9] & 0xff; 1084 } 1085 1086 if (IS_FWI2_CAPABLE(ha)) { 1087 ha->fw_attributes_h = mcp->mb[15]; 1088 ha->fw_attributes_ext[0] = mcp->mb[16]; 1089 ha->fw_attributes_ext[1] = mcp->mb[17]; 1090 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, 1091 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 1092 __func__, mcp->mb[15], mcp->mb[6]); 1093 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, 1094 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 1095 __func__, mcp->mb[17], mcp->mb[16]); 1096 1097 if (ha->fw_attributes_h & 0x4) 1098 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d, 1099 "%s: Firmware supports Extended Login 0x%x\n", 1100 __func__, ha->fw_attributes_h); 1101 1102 if (ha->fw_attributes_h & 0x8) 1103 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, 1104 "%s: Firmware supports Exchange Offload 0x%x\n", 1105 __func__, ha->fw_attributes_h); 1106 1107 /* 1108 * FW supports nvme and driver load parameter requested nvme. 1109 * BIT 26 of fw_attributes indicates NVMe support. 1110 */ 1111 if ((ha->fw_attributes_h & 1112 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) && 1113 ql2xnvmeenable) { 1114 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST) 1115 vha->flags.nvme_first_burst = 1; 1116 1117 vha->flags.nvme_enabled = 1; 1118 ql_log(ql_log_info, vha, 0xd302, 1119 "%s: FC-NVMe is Enabled (0x%x)\n", 1120 __func__, ha->fw_attributes_h); 1121 } 1122 } 1123 1124 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1125 ha->serdes_version[0] = mcp->mb[7] & 0xff; 1126 ha->serdes_version[1] = mcp->mb[8] >> 8; 1127 ha->serdes_version[2] = mcp->mb[8] & 0xff; 1128 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1129 ha->mpi_version[1] = mcp->mb[11] >> 8; 1130 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1131 ha->pep_version[0] = mcp->mb[13] & 0xff; 1132 ha->pep_version[1] = mcp->mb[14] >> 8; 1133 ha->pep_version[2] = mcp->mb[14] & 0xff; 1134 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; 1135 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; 1136 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; 1137 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; 1138 if (IS_QLA28XX(ha)) { 1139 if (mcp->mb[16] & BIT_10) { 1140 ql_log(ql_log_info, vha, 0xffff, 1141 "FW support secure flash updates\n"); 1142 ha->flags.secure_fw = 1; 1143 } 1144 } 1145 } 1146 1147 failed: 1148 if (rval != QLA_SUCCESS) { 1149 /*EMPTY*/ 1150 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); 1151 } else { 1152 /*EMPTY*/ 1153 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, 1154 "Done %s.\n", __func__); 1155 } 1156 return rval; 1157 } 1158 1159 /* 1160 * qla2x00_get_fw_options 1161 * Set firmware options. 1162 * 1163 * Input: 1164 * ha = adapter block pointer. 1165 * fwopt = pointer for firmware options. 1166 * 1167 * Returns: 1168 * qla2x00 local function return status code. 1169 * 1170 * Context: 1171 * Kernel context. 1172 */ 1173 int 1174 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1175 { 1176 int rval; 1177 mbx_cmd_t mc; 1178 mbx_cmd_t *mcp = &mc; 1179 1180 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, 1181 "Entered %s.\n", __func__); 1182 1183 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 1184 mcp->out_mb = MBX_0; 1185 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1186 mcp->tov = MBX_TOV_SECONDS; 1187 mcp->flags = 0; 1188 rval = qla2x00_mailbox_command(vha, mcp); 1189 1190 if (rval != QLA_SUCCESS) { 1191 /*EMPTY*/ 1192 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); 1193 } else { 1194 fwopts[0] = mcp->mb[0]; 1195 fwopts[1] = mcp->mb[1]; 1196 fwopts[2] = mcp->mb[2]; 1197 fwopts[3] = mcp->mb[3]; 1198 1199 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, 1200 "Done %s.\n", __func__); 1201 } 1202 1203 return rval; 1204 } 1205 1206 1207 /* 1208 * qla2x00_set_fw_options 1209 * Set firmware options. 1210 * 1211 * Input: 1212 * ha = adapter block pointer. 1213 * fwopt = pointer for firmware options. 1214 * 1215 * Returns: 1216 * qla2x00 local function return status code. 1217 * 1218 * Context: 1219 * Kernel context. 1220 */ 1221 int 1222 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1223 { 1224 int rval; 1225 mbx_cmd_t mc; 1226 mbx_cmd_t *mcp = &mc; 1227 1228 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, 1229 "Entered %s.\n", __func__); 1230 1231 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 1232 mcp->mb[1] = fwopts[1]; 1233 mcp->mb[2] = fwopts[2]; 1234 mcp->mb[3] = fwopts[3]; 1235 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1236 mcp->in_mb = MBX_0; 1237 if (IS_FWI2_CAPABLE(vha->hw)) { 1238 mcp->in_mb |= MBX_1; 1239 mcp->mb[10] = fwopts[10]; 1240 mcp->out_mb |= MBX_10; 1241 } else { 1242 mcp->mb[10] = fwopts[10]; 1243 mcp->mb[11] = fwopts[11]; 1244 mcp->mb[12] = 0; /* Undocumented, but used */ 1245 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 1246 } 1247 mcp->tov = MBX_TOV_SECONDS; 1248 mcp->flags = 0; 1249 rval = qla2x00_mailbox_command(vha, mcp); 1250 1251 fwopts[0] = mcp->mb[0]; 1252 1253 if (rval != QLA_SUCCESS) { 1254 /*EMPTY*/ 1255 ql_dbg(ql_dbg_mbx, vha, 0x1030, 1256 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); 1257 } else { 1258 /*EMPTY*/ 1259 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, 1260 "Done %s.\n", __func__); 1261 } 1262 1263 return rval; 1264 } 1265 1266 /* 1267 * qla2x00_mbx_reg_test 1268 * Mailbox register wrap test. 1269 * 1270 * Input: 1271 * ha = adapter block pointer. 1272 * TARGET_QUEUE_LOCK must be released. 1273 * ADAPTER_STATE_LOCK must be released. 1274 * 1275 * Returns: 1276 * qla2x00 local function return status code. 1277 * 1278 * Context: 1279 * Kernel context. 1280 */ 1281 int 1282 qla2x00_mbx_reg_test(scsi_qla_host_t *vha) 1283 { 1284 int rval; 1285 mbx_cmd_t mc; 1286 mbx_cmd_t *mcp = &mc; 1287 1288 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, 1289 "Entered %s.\n", __func__); 1290 1291 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 1292 mcp->mb[1] = 0xAAAA; 1293 mcp->mb[2] = 0x5555; 1294 mcp->mb[3] = 0xAA55; 1295 mcp->mb[4] = 0x55AA; 1296 mcp->mb[5] = 0xA5A5; 1297 mcp->mb[6] = 0x5A5A; 1298 mcp->mb[7] = 0x2525; 1299 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1300 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1301 mcp->tov = MBX_TOV_SECONDS; 1302 mcp->flags = 0; 1303 rval = qla2x00_mailbox_command(vha, mcp); 1304 1305 if (rval == QLA_SUCCESS) { 1306 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || 1307 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) 1308 rval = QLA_FUNCTION_FAILED; 1309 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || 1310 mcp->mb[7] != 0x2525) 1311 rval = QLA_FUNCTION_FAILED; 1312 } 1313 1314 if (rval != QLA_SUCCESS) { 1315 /*EMPTY*/ 1316 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); 1317 } else { 1318 /*EMPTY*/ 1319 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, 1320 "Done %s.\n", __func__); 1321 } 1322 1323 return rval; 1324 } 1325 1326 /* 1327 * qla2x00_verify_checksum 1328 * Verify firmware checksum. 1329 * 1330 * Input: 1331 * ha = adapter block pointer. 1332 * TARGET_QUEUE_LOCK must be released. 1333 * ADAPTER_STATE_LOCK must be released. 1334 * 1335 * Returns: 1336 * qla2x00 local function return status code. 1337 * 1338 * Context: 1339 * Kernel context. 1340 */ 1341 int 1342 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) 1343 { 1344 int rval; 1345 mbx_cmd_t mc; 1346 mbx_cmd_t *mcp = &mc; 1347 1348 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, 1349 "Entered %s.\n", __func__); 1350 1351 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 1352 mcp->out_mb = MBX_0; 1353 mcp->in_mb = MBX_0; 1354 if (IS_FWI2_CAPABLE(vha->hw)) { 1355 mcp->mb[1] = MSW(risc_addr); 1356 mcp->mb[2] = LSW(risc_addr); 1357 mcp->out_mb |= MBX_2|MBX_1; 1358 mcp->in_mb |= MBX_2|MBX_1; 1359 } else { 1360 mcp->mb[1] = LSW(risc_addr); 1361 mcp->out_mb |= MBX_1; 1362 mcp->in_mb |= MBX_1; 1363 } 1364 1365 mcp->tov = MBX_TOV_SECONDS; 1366 mcp->flags = 0; 1367 rval = qla2x00_mailbox_command(vha, mcp); 1368 1369 if (rval != QLA_SUCCESS) { 1370 ql_dbg(ql_dbg_mbx, vha, 0x1036, 1371 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? 1372 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); 1373 } else { 1374 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, 1375 "Done %s.\n", __func__); 1376 } 1377 1378 return rval; 1379 } 1380 1381 /* 1382 * qla2x00_issue_iocb 1383 * Issue IOCB using mailbox command 1384 * 1385 * Input: 1386 * ha = adapter state pointer. 1387 * buffer = buffer pointer. 1388 * phys_addr = physical address of buffer. 1389 * size = size of buffer. 1390 * TARGET_QUEUE_LOCK must be released. 1391 * ADAPTER_STATE_LOCK must be released. 1392 * 1393 * Returns: 1394 * qla2x00 local function return status code. 1395 * 1396 * Context: 1397 * Kernel context. 1398 */ 1399 int 1400 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, 1401 dma_addr_t phys_addr, size_t size, uint32_t tov) 1402 { 1403 int rval; 1404 mbx_cmd_t mc; 1405 mbx_cmd_t *mcp = &mc; 1406 1407 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, 1408 "Entered %s.\n", __func__); 1409 1410 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 1411 mcp->mb[1] = 0; 1412 mcp->mb[2] = MSW(phys_addr); 1413 mcp->mb[3] = LSW(phys_addr); 1414 mcp->mb[6] = MSW(MSD(phys_addr)); 1415 mcp->mb[7] = LSW(MSD(phys_addr)); 1416 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1417 mcp->in_mb = MBX_2|MBX_0; 1418 mcp->tov = tov; 1419 mcp->flags = 0; 1420 rval = qla2x00_mailbox_command(vha, mcp); 1421 1422 if (rval != QLA_SUCCESS) { 1423 /*EMPTY*/ 1424 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); 1425 } else { 1426 sts_entry_t *sts_entry = (sts_entry_t *) buffer; 1427 1428 /* Mask reserved bits. */ 1429 sts_entry->entry_status &= 1430 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 1431 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, 1432 "Done %s.\n", __func__); 1433 } 1434 1435 return rval; 1436 } 1437 1438 int 1439 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, 1440 size_t size) 1441 { 1442 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, 1443 MBX_TOV_SECONDS); 1444 } 1445 1446 /* 1447 * qla2x00_abort_command 1448 * Abort command aborts a specified IOCB. 1449 * 1450 * Input: 1451 * ha = adapter block pointer. 1452 * sp = SB structure pointer. 1453 * 1454 * Returns: 1455 * qla2x00 local function return status code. 1456 * 1457 * Context: 1458 * Kernel context. 1459 */ 1460 int 1461 qla2x00_abort_command(srb_t *sp) 1462 { 1463 unsigned long flags = 0; 1464 int rval; 1465 uint32_t handle = 0; 1466 mbx_cmd_t mc; 1467 mbx_cmd_t *mcp = &mc; 1468 fc_port_t *fcport = sp->fcport; 1469 scsi_qla_host_t *vha = fcport->vha; 1470 struct qla_hw_data *ha = vha->hw; 1471 struct req_que *req; 1472 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1473 1474 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, 1475 "Entered %s.\n", __func__); 1476 1477 if (vha->flags.qpairs_available && sp->qpair) 1478 req = sp->qpair->req; 1479 else 1480 req = vha->req; 1481 1482 spin_lock_irqsave(&ha->hardware_lock, flags); 1483 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1484 if (req->outstanding_cmds[handle] == sp) 1485 break; 1486 } 1487 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1488 1489 if (handle == req->num_outstanding_cmds) { 1490 /* command not found */ 1491 return QLA_FUNCTION_FAILED; 1492 } 1493 1494 mcp->mb[0] = MBC_ABORT_COMMAND; 1495 if (HAS_EXTENDED_IDS(ha)) 1496 mcp->mb[1] = fcport->loop_id; 1497 else 1498 mcp->mb[1] = fcport->loop_id << 8; 1499 mcp->mb[2] = (uint16_t)handle; 1500 mcp->mb[3] = (uint16_t)(handle >> 16); 1501 mcp->mb[6] = (uint16_t)cmd->device->lun; 1502 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1503 mcp->in_mb = MBX_0; 1504 mcp->tov = MBX_TOV_SECONDS; 1505 mcp->flags = 0; 1506 rval = qla2x00_mailbox_command(vha, mcp); 1507 1508 if (rval != QLA_SUCCESS) { 1509 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); 1510 } else { 1511 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, 1512 "Done %s.\n", __func__); 1513 } 1514 1515 return rval; 1516 } 1517 1518 int 1519 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) 1520 { 1521 int rval, rval2; 1522 mbx_cmd_t mc; 1523 mbx_cmd_t *mcp = &mc; 1524 scsi_qla_host_t *vha; 1525 1526 vha = fcport->vha; 1527 1528 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, 1529 "Entered %s.\n", __func__); 1530 1531 mcp->mb[0] = MBC_ABORT_TARGET; 1532 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 1533 if (HAS_EXTENDED_IDS(vha->hw)) { 1534 mcp->mb[1] = fcport->loop_id; 1535 mcp->mb[10] = 0; 1536 mcp->out_mb |= MBX_10; 1537 } else { 1538 mcp->mb[1] = fcport->loop_id << 8; 1539 } 1540 mcp->mb[2] = vha->hw->loop_reset_delay; 1541 mcp->mb[9] = vha->vp_idx; 1542 1543 mcp->in_mb = MBX_0; 1544 mcp->tov = MBX_TOV_SECONDS; 1545 mcp->flags = 0; 1546 rval = qla2x00_mailbox_command(vha, mcp); 1547 if (rval != QLA_SUCCESS) { 1548 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, 1549 "Failed=%x.\n", rval); 1550 } 1551 1552 /* Issue marker IOCB. */ 1553 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0, 1554 MK_SYNC_ID); 1555 if (rval2 != QLA_SUCCESS) { 1556 ql_dbg(ql_dbg_mbx, vha, 0x1040, 1557 "Failed to issue marker IOCB (%x).\n", rval2); 1558 } else { 1559 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, 1560 "Done %s.\n", __func__); 1561 } 1562 1563 return rval; 1564 } 1565 1566 int 1567 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 1568 { 1569 int rval, rval2; 1570 mbx_cmd_t mc; 1571 mbx_cmd_t *mcp = &mc; 1572 scsi_qla_host_t *vha; 1573 1574 vha = fcport->vha; 1575 1576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, 1577 "Entered %s.\n", __func__); 1578 1579 mcp->mb[0] = MBC_LUN_RESET; 1580 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 1581 if (HAS_EXTENDED_IDS(vha->hw)) 1582 mcp->mb[1] = fcport->loop_id; 1583 else 1584 mcp->mb[1] = fcport->loop_id << 8; 1585 mcp->mb[2] = (u32)l; 1586 mcp->mb[3] = 0; 1587 mcp->mb[9] = vha->vp_idx; 1588 1589 mcp->in_mb = MBX_0; 1590 mcp->tov = MBX_TOV_SECONDS; 1591 mcp->flags = 0; 1592 rval = qla2x00_mailbox_command(vha, mcp); 1593 if (rval != QLA_SUCCESS) { 1594 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); 1595 } 1596 1597 /* Issue marker IOCB. */ 1598 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l, 1599 MK_SYNC_ID_LUN); 1600 if (rval2 != QLA_SUCCESS) { 1601 ql_dbg(ql_dbg_mbx, vha, 0x1044, 1602 "Failed to issue marker IOCB (%x).\n", rval2); 1603 } else { 1604 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, 1605 "Done %s.\n", __func__); 1606 } 1607 1608 return rval; 1609 } 1610 1611 /* 1612 * qla2x00_get_adapter_id 1613 * Get adapter ID and topology. 1614 * 1615 * Input: 1616 * ha = adapter block pointer. 1617 * id = pointer for loop ID. 1618 * al_pa = pointer for AL_PA. 1619 * area = pointer for area. 1620 * domain = pointer for domain. 1621 * top = pointer for topology. 1622 * TARGET_QUEUE_LOCK must be released. 1623 * ADAPTER_STATE_LOCK must be released. 1624 * 1625 * Returns: 1626 * qla2x00 local function return status code. 1627 * 1628 * Context: 1629 * Kernel context. 1630 */ 1631 int 1632 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, 1633 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) 1634 { 1635 int rval; 1636 mbx_cmd_t mc; 1637 mbx_cmd_t *mcp = &mc; 1638 1639 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, 1640 "Entered %s.\n", __func__); 1641 1642 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1643 mcp->mb[9] = vha->vp_idx; 1644 mcp->out_mb = MBX_9|MBX_0; 1645 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1646 if (IS_CNA_CAPABLE(vha->hw)) 1647 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; 1648 if (IS_FWI2_CAPABLE(vha->hw)) 1649 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; 1650 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) 1651 mcp->in_mb |= MBX_15; 1652 mcp->tov = MBX_TOV_SECONDS; 1653 mcp->flags = 0; 1654 rval = qla2x00_mailbox_command(vha, mcp); 1655 if (mcp->mb[0] == MBS_COMMAND_ERROR) 1656 rval = QLA_COMMAND_ERROR; 1657 else if (mcp->mb[0] == MBS_INVALID_COMMAND) 1658 rval = QLA_INVALID_COMMAND; 1659 1660 /* Return data. */ 1661 *id = mcp->mb[1]; 1662 *al_pa = LSB(mcp->mb[2]); 1663 *area = MSB(mcp->mb[2]); 1664 *domain = LSB(mcp->mb[3]); 1665 *top = mcp->mb[6]; 1666 *sw_cap = mcp->mb[7]; 1667 1668 if (rval != QLA_SUCCESS) { 1669 /*EMPTY*/ 1670 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); 1671 } else { 1672 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, 1673 "Done %s.\n", __func__); 1674 1675 if (IS_CNA_CAPABLE(vha->hw)) { 1676 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1677 vha->fcoe_fcf_idx = mcp->mb[10]; 1678 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; 1679 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; 1680 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; 1681 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; 1682 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; 1683 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; 1684 } 1685 /* If FA-WWN supported */ 1686 if (IS_FAWWN_CAPABLE(vha->hw)) { 1687 if (mcp->mb[7] & BIT_14) { 1688 vha->port_name[0] = MSB(mcp->mb[16]); 1689 vha->port_name[1] = LSB(mcp->mb[16]); 1690 vha->port_name[2] = MSB(mcp->mb[17]); 1691 vha->port_name[3] = LSB(mcp->mb[17]); 1692 vha->port_name[4] = MSB(mcp->mb[18]); 1693 vha->port_name[5] = LSB(mcp->mb[18]); 1694 vha->port_name[6] = MSB(mcp->mb[19]); 1695 vha->port_name[7] = LSB(mcp->mb[19]); 1696 fc_host_port_name(vha->host) = 1697 wwn_to_u64(vha->port_name); 1698 ql_dbg(ql_dbg_mbx, vha, 0x10ca, 1699 "FA-WWN acquired %016llx\n", 1700 wwn_to_u64(vha->port_name)); 1701 } 1702 } 1703 1704 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) 1705 vha->bbcr = mcp->mb[15]; 1706 } 1707 1708 return rval; 1709 } 1710 1711 /* 1712 * qla2x00_get_retry_cnt 1713 * Get current firmware login retry count and delay. 1714 * 1715 * Input: 1716 * ha = adapter block pointer. 1717 * retry_cnt = pointer to login retry count. 1718 * tov = pointer to login timeout value. 1719 * 1720 * Returns: 1721 * qla2x00 local function return status code. 1722 * 1723 * Context: 1724 * Kernel context. 1725 */ 1726 int 1727 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, 1728 uint16_t *r_a_tov) 1729 { 1730 int rval; 1731 uint16_t ratov; 1732 mbx_cmd_t mc; 1733 mbx_cmd_t *mcp = &mc; 1734 1735 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, 1736 "Entered %s.\n", __func__); 1737 1738 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1739 mcp->out_mb = MBX_0; 1740 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1741 mcp->tov = MBX_TOV_SECONDS; 1742 mcp->flags = 0; 1743 rval = qla2x00_mailbox_command(vha, mcp); 1744 1745 if (rval != QLA_SUCCESS) { 1746 /*EMPTY*/ 1747 ql_dbg(ql_dbg_mbx, vha, 0x104a, 1748 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 1749 } else { 1750 /* Convert returned data and check our values. */ 1751 *r_a_tov = mcp->mb[3] / 2; 1752 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ 1753 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { 1754 /* Update to the larger values */ 1755 *retry_cnt = (uint8_t)mcp->mb[1]; 1756 *tov = ratov; 1757 } 1758 1759 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, 1760 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); 1761 } 1762 1763 return rval; 1764 } 1765 1766 /* 1767 * qla2x00_init_firmware 1768 * Initialize adapter firmware. 1769 * 1770 * Input: 1771 * ha = adapter block pointer. 1772 * dptr = Initialization control block pointer. 1773 * size = size of initialization control block. 1774 * TARGET_QUEUE_LOCK must be released. 1775 * ADAPTER_STATE_LOCK must be released. 1776 * 1777 * Returns: 1778 * qla2x00 local function return status code. 1779 * 1780 * Context: 1781 * Kernel context. 1782 */ 1783 int 1784 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 1785 { 1786 int rval; 1787 mbx_cmd_t mc; 1788 mbx_cmd_t *mcp = &mc; 1789 struct qla_hw_data *ha = vha->hw; 1790 1791 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, 1792 "Entered %s.\n", __func__); 1793 1794 if (IS_P3P_TYPE(ha) && ql2xdbwr) 1795 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, 1796 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); 1797 1798 if (ha->flags.npiv_supported) 1799 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1800 else 1801 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 1802 1803 mcp->mb[1] = 0; 1804 mcp->mb[2] = MSW(ha->init_cb_dma); 1805 mcp->mb[3] = LSW(ha->init_cb_dma); 1806 mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); 1807 mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); 1808 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1809 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1810 mcp->mb[1] = BIT_0; 1811 mcp->mb[10] = MSW(ha->ex_init_cb_dma); 1812 mcp->mb[11] = LSW(ha->ex_init_cb_dma); 1813 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); 1814 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); 1815 mcp->mb[14] = sizeof(*ha->ex_init_cb); 1816 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; 1817 } 1818 /* 1 and 2 should normally be captured. */ 1819 mcp->in_mb = MBX_2|MBX_1|MBX_0; 1820 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1821 /* mb3 is additional info about the installed SFP. */ 1822 mcp->in_mb |= MBX_3; 1823 mcp->buf_size = size; 1824 mcp->flags = MBX_DMA_OUT; 1825 mcp->tov = MBX_TOV_SECONDS; 1826 rval = qla2x00_mailbox_command(vha, mcp); 1827 1828 if (rval != QLA_SUCCESS) { 1829 /*EMPTY*/ 1830 ql_dbg(ql_dbg_mbx, vha, 0x104d, 1831 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n", 1832 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); 1833 if (ha->init_cb) { 1834 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n"); 1835 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1836 0x0104d, ha->init_cb, sizeof(*ha->init_cb)); 1837 } 1838 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1839 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n"); 1840 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1841 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb)); 1842 } 1843 } else { 1844 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1845 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 1846 ql_dbg(ql_dbg_mbx, vha, 0x119d, 1847 "Invalid SFP/Validation Failed\n"); 1848 } 1849 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, 1850 "Done %s.\n", __func__); 1851 } 1852 1853 return rval; 1854 } 1855 1856 1857 /* 1858 * qla2x00_get_port_database 1859 * Issue normal/enhanced get port database mailbox command 1860 * and copy device name as necessary. 1861 * 1862 * Input: 1863 * ha = adapter state pointer. 1864 * dev = structure pointer. 1865 * opt = enhanced cmd option byte. 1866 * 1867 * Returns: 1868 * qla2x00 local function return status code. 1869 * 1870 * Context: 1871 * Kernel context. 1872 */ 1873 int 1874 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) 1875 { 1876 int rval; 1877 mbx_cmd_t mc; 1878 mbx_cmd_t *mcp = &mc; 1879 port_database_t *pd; 1880 struct port_database_24xx *pd24; 1881 dma_addr_t pd_dma; 1882 struct qla_hw_data *ha = vha->hw; 1883 1884 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, 1885 "Entered %s.\n", __func__); 1886 1887 pd24 = NULL; 1888 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1889 if (pd == NULL) { 1890 ql_log(ql_log_warn, vha, 0x1050, 1891 "Failed to allocate port database structure.\n"); 1892 fcport->query = 0; 1893 return QLA_MEMORY_ALLOC_FAILED; 1894 } 1895 1896 mcp->mb[0] = MBC_GET_PORT_DATABASE; 1897 if (opt != 0 && !IS_FWI2_CAPABLE(ha)) 1898 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; 1899 mcp->mb[2] = MSW(pd_dma); 1900 mcp->mb[3] = LSW(pd_dma); 1901 mcp->mb[6] = MSW(MSD(pd_dma)); 1902 mcp->mb[7] = LSW(MSD(pd_dma)); 1903 mcp->mb[9] = vha->vp_idx; 1904 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1905 mcp->in_mb = MBX_0; 1906 if (IS_FWI2_CAPABLE(ha)) { 1907 mcp->mb[1] = fcport->loop_id; 1908 mcp->mb[10] = opt; 1909 mcp->out_mb |= MBX_10|MBX_1; 1910 mcp->in_mb |= MBX_1; 1911 } else if (HAS_EXTENDED_IDS(ha)) { 1912 mcp->mb[1] = fcport->loop_id; 1913 mcp->mb[10] = opt; 1914 mcp->out_mb |= MBX_10|MBX_1; 1915 } else { 1916 mcp->mb[1] = fcport->loop_id << 8 | opt; 1917 mcp->out_mb |= MBX_1; 1918 } 1919 mcp->buf_size = IS_FWI2_CAPABLE(ha) ? 1920 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; 1921 mcp->flags = MBX_DMA_IN; 1922 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1923 rval = qla2x00_mailbox_command(vha, mcp); 1924 if (rval != QLA_SUCCESS) 1925 goto gpd_error_out; 1926 1927 if (IS_FWI2_CAPABLE(ha)) { 1928 uint64_t zero = 0; 1929 u8 current_login_state, last_login_state; 1930 1931 pd24 = (struct port_database_24xx *) pd; 1932 1933 /* Check for logged in state. */ 1934 if (fcport->fc4f_nvme) { 1935 current_login_state = pd24->current_login_state >> 4; 1936 last_login_state = pd24->last_login_state >> 4; 1937 } else { 1938 current_login_state = pd24->current_login_state & 0xf; 1939 last_login_state = pd24->last_login_state & 0xf; 1940 } 1941 fcport->current_login_state = pd24->current_login_state; 1942 fcport->last_login_state = pd24->last_login_state; 1943 1944 /* Check for logged in state. */ 1945 if (current_login_state != PDS_PRLI_COMPLETE && 1946 last_login_state != PDS_PRLI_COMPLETE) { 1947 ql_dbg(ql_dbg_mbx, vha, 0x119a, 1948 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 1949 current_login_state, last_login_state, 1950 fcport->loop_id); 1951 rval = QLA_FUNCTION_FAILED; 1952 1953 if (!fcport->query) 1954 goto gpd_error_out; 1955 } 1956 1957 if (fcport->loop_id == FC_NO_LOOP_ID || 1958 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 1959 memcmp(fcport->port_name, pd24->port_name, 8))) { 1960 /* We lost the device mid way. */ 1961 rval = QLA_NOT_LOGGED_IN; 1962 goto gpd_error_out; 1963 } 1964 1965 /* Names are little-endian. */ 1966 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); 1967 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); 1968 1969 /* Get port_id of device. */ 1970 fcport->d_id.b.domain = pd24->port_id[0]; 1971 fcport->d_id.b.area = pd24->port_id[1]; 1972 fcport->d_id.b.al_pa = pd24->port_id[2]; 1973 fcport->d_id.b.rsvd_1 = 0; 1974 1975 /* If not target must be initiator or unknown type. */ 1976 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) 1977 fcport->port_type = FCT_INITIATOR; 1978 else 1979 fcport->port_type = FCT_TARGET; 1980 1981 /* Passback COS information. */ 1982 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? 1983 FC_COS_CLASS2 : FC_COS_CLASS3; 1984 1985 if (pd24->prli_svc_param_word_3[0] & BIT_7) 1986 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1987 } else { 1988 uint64_t zero = 0; 1989 1990 /* Check for logged in state. */ 1991 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 1992 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 1993 ql_dbg(ql_dbg_mbx, vha, 0x100a, 1994 "Unable to verify login-state (%x/%x) - " 1995 "portid=%02x%02x%02x.\n", pd->master_state, 1996 pd->slave_state, fcport->d_id.b.domain, 1997 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1998 rval = QLA_FUNCTION_FAILED; 1999 goto gpd_error_out; 2000 } 2001 2002 if (fcport->loop_id == FC_NO_LOOP_ID || 2003 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 2004 memcmp(fcport->port_name, pd->port_name, 8))) { 2005 /* We lost the device mid way. */ 2006 rval = QLA_NOT_LOGGED_IN; 2007 goto gpd_error_out; 2008 } 2009 2010 /* Names are little-endian. */ 2011 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 2012 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 2013 2014 /* Get port_id of device. */ 2015 fcport->d_id.b.domain = pd->port_id[0]; 2016 fcport->d_id.b.area = pd->port_id[3]; 2017 fcport->d_id.b.al_pa = pd->port_id[2]; 2018 fcport->d_id.b.rsvd_1 = 0; 2019 2020 /* If not target must be initiator or unknown type. */ 2021 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 2022 fcport->port_type = FCT_INITIATOR; 2023 else 2024 fcport->port_type = FCT_TARGET; 2025 2026 /* Passback COS information. */ 2027 fcport->supported_classes = (pd->options & BIT_4) ? 2028 FC_COS_CLASS2 : FC_COS_CLASS3; 2029 } 2030 2031 gpd_error_out: 2032 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 2033 fcport->query = 0; 2034 2035 if (rval != QLA_SUCCESS) { 2036 ql_dbg(ql_dbg_mbx, vha, 0x1052, 2037 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, 2038 mcp->mb[0], mcp->mb[1]); 2039 } else { 2040 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, 2041 "Done %s.\n", __func__); 2042 } 2043 2044 return rval; 2045 } 2046 2047 /* 2048 * qla2x00_get_firmware_state 2049 * Get adapter firmware state. 2050 * 2051 * Input: 2052 * ha = adapter block pointer. 2053 * dptr = pointer for firmware state. 2054 * TARGET_QUEUE_LOCK must be released. 2055 * ADAPTER_STATE_LOCK must be released. 2056 * 2057 * Returns: 2058 * qla2x00 local function return status code. 2059 * 2060 * Context: 2061 * Kernel context. 2062 */ 2063 int 2064 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) 2065 { 2066 int rval; 2067 mbx_cmd_t mc; 2068 mbx_cmd_t *mcp = &mc; 2069 struct qla_hw_data *ha = vha->hw; 2070 2071 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, 2072 "Entered %s.\n", __func__); 2073 2074 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 2075 mcp->out_mb = MBX_0; 2076 if (IS_FWI2_CAPABLE(vha->hw)) 2077 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2078 else 2079 mcp->in_mb = MBX_1|MBX_0; 2080 mcp->tov = MBX_TOV_SECONDS; 2081 mcp->flags = 0; 2082 rval = qla2x00_mailbox_command(vha, mcp); 2083 2084 /* Return firmware states. */ 2085 states[0] = mcp->mb[1]; 2086 if (IS_FWI2_CAPABLE(vha->hw)) { 2087 states[1] = mcp->mb[2]; 2088 states[2] = mcp->mb[3]; /* SFP info */ 2089 states[3] = mcp->mb[4]; 2090 states[4] = mcp->mb[5]; 2091 states[5] = mcp->mb[6]; /* DPORT status */ 2092 } 2093 2094 if (rval != QLA_SUCCESS) { 2095 /*EMPTY*/ 2096 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); 2097 } else { 2098 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 2099 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 2100 ql_dbg(ql_dbg_mbx, vha, 0x119e, 2101 "Invalid SFP/Validation Failed\n"); 2102 } 2103 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, 2104 "Done %s.\n", __func__); 2105 } 2106 2107 return rval; 2108 } 2109 2110 /* 2111 * qla2x00_get_port_name 2112 * Issue get port name mailbox command. 2113 * Returned name is in big endian format. 2114 * 2115 * Input: 2116 * ha = adapter block pointer. 2117 * loop_id = loop ID of device. 2118 * name = pointer for name. 2119 * TARGET_QUEUE_LOCK must be released. 2120 * ADAPTER_STATE_LOCK must be released. 2121 * 2122 * Returns: 2123 * qla2x00 local function return status code. 2124 * 2125 * Context: 2126 * Kernel context. 2127 */ 2128 int 2129 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, 2130 uint8_t opt) 2131 { 2132 int rval; 2133 mbx_cmd_t mc; 2134 mbx_cmd_t *mcp = &mc; 2135 2136 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, 2137 "Entered %s.\n", __func__); 2138 2139 mcp->mb[0] = MBC_GET_PORT_NAME; 2140 mcp->mb[9] = vha->vp_idx; 2141 mcp->out_mb = MBX_9|MBX_1|MBX_0; 2142 if (HAS_EXTENDED_IDS(vha->hw)) { 2143 mcp->mb[1] = loop_id; 2144 mcp->mb[10] = opt; 2145 mcp->out_mb |= MBX_10; 2146 } else { 2147 mcp->mb[1] = loop_id << 8 | opt; 2148 } 2149 2150 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2151 mcp->tov = MBX_TOV_SECONDS; 2152 mcp->flags = 0; 2153 rval = qla2x00_mailbox_command(vha, mcp); 2154 2155 if (rval != QLA_SUCCESS) { 2156 /*EMPTY*/ 2157 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); 2158 } else { 2159 if (name != NULL) { 2160 /* This function returns name in big endian. */ 2161 name[0] = MSB(mcp->mb[2]); 2162 name[1] = LSB(mcp->mb[2]); 2163 name[2] = MSB(mcp->mb[3]); 2164 name[3] = LSB(mcp->mb[3]); 2165 name[4] = MSB(mcp->mb[6]); 2166 name[5] = LSB(mcp->mb[6]); 2167 name[6] = MSB(mcp->mb[7]); 2168 name[7] = LSB(mcp->mb[7]); 2169 } 2170 2171 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, 2172 "Done %s.\n", __func__); 2173 } 2174 2175 return rval; 2176 } 2177 2178 /* 2179 * qla24xx_link_initialization 2180 * Issue link initialization mailbox command. 2181 * 2182 * Input: 2183 * ha = adapter block pointer. 2184 * TARGET_QUEUE_LOCK must be released. 2185 * ADAPTER_STATE_LOCK must be released. 2186 * 2187 * Returns: 2188 * qla2x00 local function return status code. 2189 * 2190 * Context: 2191 * Kernel context. 2192 */ 2193 int 2194 qla24xx_link_initialize(scsi_qla_host_t *vha) 2195 { 2196 int rval; 2197 mbx_cmd_t mc; 2198 mbx_cmd_t *mcp = &mc; 2199 2200 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, 2201 "Entered %s.\n", __func__); 2202 2203 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) 2204 return QLA_FUNCTION_FAILED; 2205 2206 mcp->mb[0] = MBC_LINK_INITIALIZATION; 2207 mcp->mb[1] = BIT_4; 2208 if (vha->hw->operating_mode == LOOP) 2209 mcp->mb[1] |= BIT_6; 2210 else 2211 mcp->mb[1] |= BIT_5; 2212 mcp->mb[2] = 0; 2213 mcp->mb[3] = 0; 2214 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2215 mcp->in_mb = MBX_0; 2216 mcp->tov = MBX_TOV_SECONDS; 2217 mcp->flags = 0; 2218 rval = qla2x00_mailbox_command(vha, mcp); 2219 2220 if (rval != QLA_SUCCESS) { 2221 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); 2222 } else { 2223 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, 2224 "Done %s.\n", __func__); 2225 } 2226 2227 return rval; 2228 } 2229 2230 /* 2231 * qla2x00_lip_reset 2232 * Issue LIP reset mailbox command. 2233 * 2234 * Input: 2235 * ha = adapter block pointer. 2236 * TARGET_QUEUE_LOCK must be released. 2237 * ADAPTER_STATE_LOCK must be released. 2238 * 2239 * Returns: 2240 * qla2x00 local function return status code. 2241 * 2242 * Context: 2243 * Kernel context. 2244 */ 2245 int 2246 qla2x00_lip_reset(scsi_qla_host_t *vha) 2247 { 2248 int rval; 2249 mbx_cmd_t mc; 2250 mbx_cmd_t *mcp = &mc; 2251 2252 ql_dbg(ql_dbg_disc, vha, 0x105a, 2253 "Entered %s.\n", __func__); 2254 2255 if (IS_CNA_CAPABLE(vha->hw)) { 2256 /* Logout across all FCFs. */ 2257 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2258 mcp->mb[1] = BIT_1; 2259 mcp->mb[2] = 0; 2260 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2261 } else if (IS_FWI2_CAPABLE(vha->hw)) { 2262 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2263 mcp->mb[1] = BIT_4; 2264 mcp->mb[2] = 0; 2265 mcp->mb[3] = vha->hw->loop_reset_delay; 2266 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2267 } else { 2268 mcp->mb[0] = MBC_LIP_RESET; 2269 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2270 if (HAS_EXTENDED_IDS(vha->hw)) { 2271 mcp->mb[1] = 0x00ff; 2272 mcp->mb[10] = 0; 2273 mcp->out_mb |= MBX_10; 2274 } else { 2275 mcp->mb[1] = 0xff00; 2276 } 2277 mcp->mb[2] = vha->hw->loop_reset_delay; 2278 mcp->mb[3] = 0; 2279 } 2280 mcp->in_mb = MBX_0; 2281 mcp->tov = MBX_TOV_SECONDS; 2282 mcp->flags = 0; 2283 rval = qla2x00_mailbox_command(vha, mcp); 2284 2285 if (rval != QLA_SUCCESS) { 2286 /*EMPTY*/ 2287 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); 2288 } else { 2289 /*EMPTY*/ 2290 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, 2291 "Done %s.\n", __func__); 2292 } 2293 2294 return rval; 2295 } 2296 2297 /* 2298 * qla2x00_send_sns 2299 * Send SNS command. 2300 * 2301 * Input: 2302 * ha = adapter block pointer. 2303 * sns = pointer for command. 2304 * cmd_size = command size. 2305 * buf_size = response/command size. 2306 * TARGET_QUEUE_LOCK must be released. 2307 * ADAPTER_STATE_LOCK must be released. 2308 * 2309 * Returns: 2310 * qla2x00 local function return status code. 2311 * 2312 * Context: 2313 * Kernel context. 2314 */ 2315 int 2316 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, 2317 uint16_t cmd_size, size_t buf_size) 2318 { 2319 int rval; 2320 mbx_cmd_t mc; 2321 mbx_cmd_t *mcp = &mc; 2322 2323 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, 2324 "Entered %s.\n", __func__); 2325 2326 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, 2327 "Retry cnt=%d ratov=%d total tov=%d.\n", 2328 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); 2329 2330 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 2331 mcp->mb[1] = cmd_size; 2332 mcp->mb[2] = MSW(sns_phys_address); 2333 mcp->mb[3] = LSW(sns_phys_address); 2334 mcp->mb[6] = MSW(MSD(sns_phys_address)); 2335 mcp->mb[7] = LSW(MSD(sns_phys_address)); 2336 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2337 mcp->in_mb = MBX_0|MBX_1; 2338 mcp->buf_size = buf_size; 2339 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; 2340 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); 2341 rval = qla2x00_mailbox_command(vha, mcp); 2342 2343 if (rval != QLA_SUCCESS) { 2344 /*EMPTY*/ 2345 ql_dbg(ql_dbg_mbx, vha, 0x105f, 2346 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2347 rval, mcp->mb[0], mcp->mb[1]); 2348 } else { 2349 /*EMPTY*/ 2350 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, 2351 "Done %s.\n", __func__); 2352 } 2353 2354 return rval; 2355 } 2356 2357 int 2358 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2359 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2360 { 2361 int rval; 2362 2363 struct logio_entry_24xx *lg; 2364 dma_addr_t lg_dma; 2365 uint32_t iop[2]; 2366 struct qla_hw_data *ha = vha->hw; 2367 struct req_que *req; 2368 2369 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, 2370 "Entered %s.\n", __func__); 2371 2372 if (vha->vp_idx && vha->qpair) 2373 req = vha->qpair->req; 2374 else 2375 req = ha->req_q_map[0]; 2376 2377 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2378 if (lg == NULL) { 2379 ql_log(ql_log_warn, vha, 0x1062, 2380 "Failed to allocate login IOCB.\n"); 2381 return QLA_MEMORY_ALLOC_FAILED; 2382 } 2383 2384 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2385 lg->entry_count = 1; 2386 lg->handle = MAKE_HANDLE(req->id, lg->handle); 2387 lg->nport_handle = cpu_to_le16(loop_id); 2388 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2389 if (opt & BIT_0) 2390 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2391 if (opt & BIT_1) 2392 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2393 lg->port_id[0] = al_pa; 2394 lg->port_id[1] = area; 2395 lg->port_id[2] = domain; 2396 lg->vp_index = vha->vp_idx; 2397 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2398 (ha->r_a_tov / 10 * 2) + 2); 2399 if (rval != QLA_SUCCESS) { 2400 ql_dbg(ql_dbg_mbx, vha, 0x1063, 2401 "Failed to issue login IOCB (%x).\n", rval); 2402 } else if (lg->entry_status != 0) { 2403 ql_dbg(ql_dbg_mbx, vha, 0x1064, 2404 "Failed to complete IOCB -- error status (%x).\n", 2405 lg->entry_status); 2406 rval = QLA_FUNCTION_FAILED; 2407 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2408 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2409 iop[1] = le32_to_cpu(lg->io_parameter[1]); 2410 2411 ql_dbg(ql_dbg_mbx, vha, 0x1065, 2412 "Failed to complete IOCB -- completion status (%x) " 2413 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2414 iop[0], iop[1]); 2415 2416 switch (iop[0]) { 2417 case LSC_SCODE_PORTID_USED: 2418 mb[0] = MBS_PORT_ID_USED; 2419 mb[1] = LSW(iop[1]); 2420 break; 2421 case LSC_SCODE_NPORT_USED: 2422 mb[0] = MBS_LOOP_ID_USED; 2423 break; 2424 case LSC_SCODE_NOLINK: 2425 case LSC_SCODE_NOIOCB: 2426 case LSC_SCODE_NOXCB: 2427 case LSC_SCODE_CMD_FAILED: 2428 case LSC_SCODE_NOFABRIC: 2429 case LSC_SCODE_FW_NOT_READY: 2430 case LSC_SCODE_NOT_LOGGED_IN: 2431 case LSC_SCODE_NOPCB: 2432 case LSC_SCODE_ELS_REJECT: 2433 case LSC_SCODE_CMD_PARAM_ERR: 2434 case LSC_SCODE_NONPORT: 2435 case LSC_SCODE_LOGGED_IN: 2436 case LSC_SCODE_NOFLOGI_ACC: 2437 default: 2438 mb[0] = MBS_COMMAND_ERROR; 2439 break; 2440 } 2441 } else { 2442 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, 2443 "Done %s.\n", __func__); 2444 2445 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2446 2447 mb[0] = MBS_COMMAND_COMPLETE; 2448 mb[1] = 0; 2449 if (iop[0] & BIT_4) { 2450 if (iop[0] & BIT_8) 2451 mb[1] |= BIT_1; 2452 } else 2453 mb[1] = BIT_0; 2454 2455 /* Passback COS information. */ 2456 mb[10] = 0; 2457 if (lg->io_parameter[7] || lg->io_parameter[8]) 2458 mb[10] |= BIT_0; /* Class 2. */ 2459 if (lg->io_parameter[9] || lg->io_parameter[10]) 2460 mb[10] |= BIT_1; /* Class 3. */ 2461 if (lg->io_parameter[0] & cpu_to_le32(BIT_7)) 2462 mb[10] |= BIT_7; /* Confirmed Completion 2463 * Allowed 2464 */ 2465 } 2466 2467 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2468 2469 return rval; 2470 } 2471 2472 /* 2473 * qla2x00_login_fabric 2474 * Issue login fabric port mailbox command. 2475 * 2476 * Input: 2477 * ha = adapter block pointer. 2478 * loop_id = device loop ID. 2479 * domain = device domain. 2480 * area = device area. 2481 * al_pa = device AL_PA. 2482 * status = pointer for return status. 2483 * opt = command options. 2484 * TARGET_QUEUE_LOCK must be released. 2485 * ADAPTER_STATE_LOCK must be released. 2486 * 2487 * Returns: 2488 * qla2x00 local function return status code. 2489 * 2490 * Context: 2491 * Kernel context. 2492 */ 2493 int 2494 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2495 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2496 { 2497 int rval; 2498 mbx_cmd_t mc; 2499 mbx_cmd_t *mcp = &mc; 2500 struct qla_hw_data *ha = vha->hw; 2501 2502 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, 2503 "Entered %s.\n", __func__); 2504 2505 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 2506 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2507 if (HAS_EXTENDED_IDS(ha)) { 2508 mcp->mb[1] = loop_id; 2509 mcp->mb[10] = opt; 2510 mcp->out_mb |= MBX_10; 2511 } else { 2512 mcp->mb[1] = (loop_id << 8) | opt; 2513 } 2514 mcp->mb[2] = domain; 2515 mcp->mb[3] = area << 8 | al_pa; 2516 2517 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; 2518 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2519 mcp->flags = 0; 2520 rval = qla2x00_mailbox_command(vha, mcp); 2521 2522 /* Return mailbox statuses. */ 2523 if (mb != NULL) { 2524 mb[0] = mcp->mb[0]; 2525 mb[1] = mcp->mb[1]; 2526 mb[2] = mcp->mb[2]; 2527 mb[6] = mcp->mb[6]; 2528 mb[7] = mcp->mb[7]; 2529 /* COS retrieved from Get-Port-Database mailbox command. */ 2530 mb[10] = 0; 2531 } 2532 2533 if (rval != QLA_SUCCESS) { 2534 /* RLU tmp code: need to change main mailbox_command function to 2535 * return ok even when the mailbox completion value is not 2536 * SUCCESS. The caller needs to be responsible to interpret 2537 * the return values of this mailbox command if we're not 2538 * to change too much of the existing code. 2539 */ 2540 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || 2541 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || 2542 mcp->mb[0] == 0x4006) 2543 rval = QLA_SUCCESS; 2544 2545 /*EMPTY*/ 2546 ql_dbg(ql_dbg_mbx, vha, 0x1068, 2547 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 2548 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 2549 } else { 2550 /*EMPTY*/ 2551 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, 2552 "Done %s.\n", __func__); 2553 } 2554 2555 return rval; 2556 } 2557 2558 /* 2559 * qla2x00_login_local_device 2560 * Issue login loop port mailbox command. 2561 * 2562 * Input: 2563 * ha = adapter block pointer. 2564 * loop_id = device loop ID. 2565 * opt = command options. 2566 * 2567 * Returns: 2568 * Return status code. 2569 * 2570 * Context: 2571 * Kernel context. 2572 * 2573 */ 2574 int 2575 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, 2576 uint16_t *mb_ret, uint8_t opt) 2577 { 2578 int rval; 2579 mbx_cmd_t mc; 2580 mbx_cmd_t *mcp = &mc; 2581 struct qla_hw_data *ha = vha->hw; 2582 2583 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, 2584 "Entered %s.\n", __func__); 2585 2586 if (IS_FWI2_CAPABLE(ha)) 2587 return qla24xx_login_fabric(vha, fcport->loop_id, 2588 fcport->d_id.b.domain, fcport->d_id.b.area, 2589 fcport->d_id.b.al_pa, mb_ret, opt); 2590 2591 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 2592 if (HAS_EXTENDED_IDS(ha)) 2593 mcp->mb[1] = fcport->loop_id; 2594 else 2595 mcp->mb[1] = fcport->loop_id << 8; 2596 mcp->mb[2] = opt; 2597 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2598 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; 2599 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2600 mcp->flags = 0; 2601 rval = qla2x00_mailbox_command(vha, mcp); 2602 2603 /* Return mailbox statuses. */ 2604 if (mb_ret != NULL) { 2605 mb_ret[0] = mcp->mb[0]; 2606 mb_ret[1] = mcp->mb[1]; 2607 mb_ret[6] = mcp->mb[6]; 2608 mb_ret[7] = mcp->mb[7]; 2609 } 2610 2611 if (rval != QLA_SUCCESS) { 2612 /* AV tmp code: need to change main mailbox_command function to 2613 * return ok even when the mailbox completion value is not 2614 * SUCCESS. The caller needs to be responsible to interpret 2615 * the return values of this mailbox command if we're not 2616 * to change too much of the existing code. 2617 */ 2618 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 2619 rval = QLA_SUCCESS; 2620 2621 ql_dbg(ql_dbg_mbx, vha, 0x106b, 2622 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", 2623 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); 2624 } else { 2625 /*EMPTY*/ 2626 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, 2627 "Done %s.\n", __func__); 2628 } 2629 2630 return (rval); 2631 } 2632 2633 int 2634 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2635 uint8_t area, uint8_t al_pa) 2636 { 2637 int rval; 2638 struct logio_entry_24xx *lg; 2639 dma_addr_t lg_dma; 2640 struct qla_hw_data *ha = vha->hw; 2641 struct req_que *req; 2642 2643 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, 2644 "Entered %s.\n", __func__); 2645 2646 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2647 if (lg == NULL) { 2648 ql_log(ql_log_warn, vha, 0x106e, 2649 "Failed to allocate logout IOCB.\n"); 2650 return QLA_MEMORY_ALLOC_FAILED; 2651 } 2652 2653 req = vha->req; 2654 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2655 lg->entry_count = 1; 2656 lg->handle = MAKE_HANDLE(req->id, lg->handle); 2657 lg->nport_handle = cpu_to_le16(loop_id); 2658 lg->control_flags = 2659 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| 2660 LCF_FREE_NPORT); 2661 lg->port_id[0] = al_pa; 2662 lg->port_id[1] = area; 2663 lg->port_id[2] = domain; 2664 lg->vp_index = vha->vp_idx; 2665 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2666 (ha->r_a_tov / 10 * 2) + 2); 2667 if (rval != QLA_SUCCESS) { 2668 ql_dbg(ql_dbg_mbx, vha, 0x106f, 2669 "Failed to issue logout IOCB (%x).\n", rval); 2670 } else if (lg->entry_status != 0) { 2671 ql_dbg(ql_dbg_mbx, vha, 0x1070, 2672 "Failed to complete IOCB -- error status (%x).\n", 2673 lg->entry_status); 2674 rval = QLA_FUNCTION_FAILED; 2675 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2676 ql_dbg(ql_dbg_mbx, vha, 0x1071, 2677 "Failed to complete IOCB -- completion status (%x) " 2678 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2679 le32_to_cpu(lg->io_parameter[0]), 2680 le32_to_cpu(lg->io_parameter[1])); 2681 } else { 2682 /*EMPTY*/ 2683 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, 2684 "Done %s.\n", __func__); 2685 } 2686 2687 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2688 2689 return rval; 2690 } 2691 2692 /* 2693 * qla2x00_fabric_logout 2694 * Issue logout fabric port mailbox command. 2695 * 2696 * Input: 2697 * ha = adapter block pointer. 2698 * loop_id = device loop ID. 2699 * TARGET_QUEUE_LOCK must be released. 2700 * ADAPTER_STATE_LOCK must be released. 2701 * 2702 * Returns: 2703 * qla2x00 local function return status code. 2704 * 2705 * Context: 2706 * Kernel context. 2707 */ 2708 int 2709 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2710 uint8_t area, uint8_t al_pa) 2711 { 2712 int rval; 2713 mbx_cmd_t mc; 2714 mbx_cmd_t *mcp = &mc; 2715 2716 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, 2717 "Entered %s.\n", __func__); 2718 2719 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 2720 mcp->out_mb = MBX_1|MBX_0; 2721 if (HAS_EXTENDED_IDS(vha->hw)) { 2722 mcp->mb[1] = loop_id; 2723 mcp->mb[10] = 0; 2724 mcp->out_mb |= MBX_10; 2725 } else { 2726 mcp->mb[1] = loop_id << 8; 2727 } 2728 2729 mcp->in_mb = MBX_1|MBX_0; 2730 mcp->tov = MBX_TOV_SECONDS; 2731 mcp->flags = 0; 2732 rval = qla2x00_mailbox_command(vha, mcp); 2733 2734 if (rval != QLA_SUCCESS) { 2735 /*EMPTY*/ 2736 ql_dbg(ql_dbg_mbx, vha, 0x1074, 2737 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); 2738 } else { 2739 /*EMPTY*/ 2740 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, 2741 "Done %s.\n", __func__); 2742 } 2743 2744 return rval; 2745 } 2746 2747 /* 2748 * qla2x00_full_login_lip 2749 * Issue full login LIP mailbox command. 2750 * 2751 * Input: 2752 * ha = adapter block pointer. 2753 * TARGET_QUEUE_LOCK must be released. 2754 * ADAPTER_STATE_LOCK must be released. 2755 * 2756 * Returns: 2757 * qla2x00 local function return status code. 2758 * 2759 * Context: 2760 * Kernel context. 2761 */ 2762 int 2763 qla2x00_full_login_lip(scsi_qla_host_t *vha) 2764 { 2765 int rval; 2766 mbx_cmd_t mc; 2767 mbx_cmd_t *mcp = &mc; 2768 2769 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, 2770 "Entered %s.\n", __func__); 2771 2772 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2773 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0; 2774 mcp->mb[2] = 0; 2775 mcp->mb[3] = 0; 2776 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2777 mcp->in_mb = MBX_0; 2778 mcp->tov = MBX_TOV_SECONDS; 2779 mcp->flags = 0; 2780 rval = qla2x00_mailbox_command(vha, mcp); 2781 2782 if (rval != QLA_SUCCESS) { 2783 /*EMPTY*/ 2784 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); 2785 } else { 2786 /*EMPTY*/ 2787 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, 2788 "Done %s.\n", __func__); 2789 } 2790 2791 return rval; 2792 } 2793 2794 /* 2795 * qla2x00_get_id_list 2796 * 2797 * Input: 2798 * ha = adapter block pointer. 2799 * 2800 * Returns: 2801 * qla2x00 local function return status code. 2802 * 2803 * Context: 2804 * Kernel context. 2805 */ 2806 int 2807 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, 2808 uint16_t *entries) 2809 { 2810 int rval; 2811 mbx_cmd_t mc; 2812 mbx_cmd_t *mcp = &mc; 2813 2814 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, 2815 "Entered %s.\n", __func__); 2816 2817 if (id_list == NULL) 2818 return QLA_FUNCTION_FAILED; 2819 2820 mcp->mb[0] = MBC_GET_ID_LIST; 2821 mcp->out_mb = MBX_0; 2822 if (IS_FWI2_CAPABLE(vha->hw)) { 2823 mcp->mb[2] = MSW(id_list_dma); 2824 mcp->mb[3] = LSW(id_list_dma); 2825 mcp->mb[6] = MSW(MSD(id_list_dma)); 2826 mcp->mb[7] = LSW(MSD(id_list_dma)); 2827 mcp->mb[8] = 0; 2828 mcp->mb[9] = vha->vp_idx; 2829 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 2830 } else { 2831 mcp->mb[1] = MSW(id_list_dma); 2832 mcp->mb[2] = LSW(id_list_dma); 2833 mcp->mb[3] = MSW(MSD(id_list_dma)); 2834 mcp->mb[6] = LSW(MSD(id_list_dma)); 2835 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; 2836 } 2837 mcp->in_mb = MBX_1|MBX_0; 2838 mcp->tov = MBX_TOV_SECONDS; 2839 mcp->flags = 0; 2840 rval = qla2x00_mailbox_command(vha, mcp); 2841 2842 if (rval != QLA_SUCCESS) { 2843 /*EMPTY*/ 2844 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); 2845 } else { 2846 *entries = mcp->mb[1]; 2847 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, 2848 "Done %s.\n", __func__); 2849 } 2850 2851 return rval; 2852 } 2853 2854 /* 2855 * qla2x00_get_resource_cnts 2856 * Get current firmware resource counts. 2857 * 2858 * Input: 2859 * ha = adapter block pointer. 2860 * 2861 * Returns: 2862 * qla2x00 local function return status code. 2863 * 2864 * Context: 2865 * Kernel context. 2866 */ 2867 int 2868 qla2x00_get_resource_cnts(scsi_qla_host_t *vha) 2869 { 2870 struct qla_hw_data *ha = vha->hw; 2871 int rval; 2872 mbx_cmd_t mc; 2873 mbx_cmd_t *mcp = &mc; 2874 2875 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, 2876 "Entered %s.\n", __func__); 2877 2878 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2879 mcp->out_mb = MBX_0; 2880 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2881 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 2882 IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2883 mcp->in_mb |= MBX_12; 2884 mcp->tov = MBX_TOV_SECONDS; 2885 mcp->flags = 0; 2886 rval = qla2x00_mailbox_command(vha, mcp); 2887 2888 if (rval != QLA_SUCCESS) { 2889 /*EMPTY*/ 2890 ql_dbg(ql_dbg_mbx, vha, 0x107d, 2891 "Failed mb[0]=%x.\n", mcp->mb[0]); 2892 } else { 2893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, 2894 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " 2895 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], 2896 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 2897 mcp->mb[11], mcp->mb[12]); 2898 2899 ha->orig_fw_tgt_xcb_count = mcp->mb[1]; 2900 ha->cur_fw_tgt_xcb_count = mcp->mb[2]; 2901 ha->cur_fw_xcb_count = mcp->mb[3]; 2902 ha->orig_fw_xcb_count = mcp->mb[6]; 2903 ha->cur_fw_iocb_count = mcp->mb[7]; 2904 ha->orig_fw_iocb_count = mcp->mb[10]; 2905 if (ha->flags.npiv_supported) 2906 ha->max_npiv_vports = mcp->mb[11]; 2907 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 2908 IS_QLA28XX(ha)) 2909 ha->fw_max_fcf_count = mcp->mb[12]; 2910 } 2911 2912 return (rval); 2913 } 2914 2915 /* 2916 * qla2x00_get_fcal_position_map 2917 * Get FCAL (LILP) position map using mailbox command 2918 * 2919 * Input: 2920 * ha = adapter state pointer. 2921 * pos_map = buffer pointer (can be NULL). 2922 * 2923 * Returns: 2924 * qla2x00 local function return status code. 2925 * 2926 * Context: 2927 * Kernel context. 2928 */ 2929 int 2930 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) 2931 { 2932 int rval; 2933 mbx_cmd_t mc; 2934 mbx_cmd_t *mcp = &mc; 2935 char *pmap; 2936 dma_addr_t pmap_dma; 2937 struct qla_hw_data *ha = vha->hw; 2938 2939 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, 2940 "Entered %s.\n", __func__); 2941 2942 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2943 if (pmap == NULL) { 2944 ql_log(ql_log_warn, vha, 0x1080, 2945 "Memory alloc failed.\n"); 2946 return QLA_MEMORY_ALLOC_FAILED; 2947 } 2948 2949 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; 2950 mcp->mb[2] = MSW(pmap_dma); 2951 mcp->mb[3] = LSW(pmap_dma); 2952 mcp->mb[6] = MSW(MSD(pmap_dma)); 2953 mcp->mb[7] = LSW(MSD(pmap_dma)); 2954 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2955 mcp->in_mb = MBX_1|MBX_0; 2956 mcp->buf_size = FCAL_MAP_SIZE; 2957 mcp->flags = MBX_DMA_IN; 2958 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2959 rval = qla2x00_mailbox_command(vha, mcp); 2960 2961 if (rval == QLA_SUCCESS) { 2962 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, 2963 "mb0/mb1=%x/%X FC/AL position map size (%x).\n", 2964 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); 2965 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, 2966 pmap, pmap[0] + 1); 2967 2968 if (pos_map) 2969 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 2970 } 2971 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 2972 2973 if (rval != QLA_SUCCESS) { 2974 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); 2975 } else { 2976 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, 2977 "Done %s.\n", __func__); 2978 } 2979 2980 return rval; 2981 } 2982 2983 /* 2984 * qla2x00_get_link_status 2985 * 2986 * Input: 2987 * ha = adapter block pointer. 2988 * loop_id = device loop ID. 2989 * ret_buf = pointer to link status return buffer. 2990 * 2991 * Returns: 2992 * 0 = success. 2993 * BIT_0 = mem alloc error. 2994 * BIT_1 = mailbox error. 2995 */ 2996 int 2997 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, 2998 struct link_statistics *stats, dma_addr_t stats_dma) 2999 { 3000 int rval; 3001 mbx_cmd_t mc; 3002 mbx_cmd_t *mcp = &mc; 3003 uint32_t *iter = (void *)stats; 3004 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); 3005 struct qla_hw_data *ha = vha->hw; 3006 3007 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, 3008 "Entered %s.\n", __func__); 3009 3010 mcp->mb[0] = MBC_GET_LINK_STATUS; 3011 mcp->mb[2] = MSW(LSD(stats_dma)); 3012 mcp->mb[3] = LSW(LSD(stats_dma)); 3013 mcp->mb[6] = MSW(MSD(stats_dma)); 3014 mcp->mb[7] = LSW(MSD(stats_dma)); 3015 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3016 mcp->in_mb = MBX_0; 3017 if (IS_FWI2_CAPABLE(ha)) { 3018 mcp->mb[1] = loop_id; 3019 mcp->mb[4] = 0; 3020 mcp->mb[10] = 0; 3021 mcp->out_mb |= MBX_10|MBX_4|MBX_1; 3022 mcp->in_mb |= MBX_1; 3023 } else if (HAS_EXTENDED_IDS(ha)) { 3024 mcp->mb[1] = loop_id; 3025 mcp->mb[10] = 0; 3026 mcp->out_mb |= MBX_10|MBX_1; 3027 } else { 3028 mcp->mb[1] = loop_id << 8; 3029 mcp->out_mb |= MBX_1; 3030 } 3031 mcp->tov = MBX_TOV_SECONDS; 3032 mcp->flags = IOCTL_CMD; 3033 rval = qla2x00_mailbox_command(vha, mcp); 3034 3035 if (rval == QLA_SUCCESS) { 3036 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3037 ql_dbg(ql_dbg_mbx, vha, 0x1085, 3038 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3039 rval = QLA_FUNCTION_FAILED; 3040 } else { 3041 /* Re-endianize - firmware data is le32. */ 3042 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, 3043 "Done %s.\n", __func__); 3044 for ( ; dwords--; iter++) 3045 le32_to_cpus(iter); 3046 } 3047 } else { 3048 /* Failed. */ 3049 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); 3050 } 3051 3052 return rval; 3053 } 3054 3055 int 3056 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, 3057 dma_addr_t stats_dma, uint16_t options) 3058 { 3059 int rval; 3060 mbx_cmd_t mc; 3061 mbx_cmd_t *mcp = &mc; 3062 uint32_t *iter, dwords; 3063 3064 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, 3065 "Entered %s.\n", __func__); 3066 3067 memset(&mc, 0, sizeof(mc)); 3068 mc.mb[0] = MBC_GET_LINK_PRIV_STATS; 3069 mc.mb[2] = MSW(stats_dma); 3070 mc.mb[3] = LSW(stats_dma); 3071 mc.mb[6] = MSW(MSD(stats_dma)); 3072 mc.mb[7] = LSW(MSD(stats_dma)); 3073 mc.mb[8] = sizeof(struct link_statistics) / 4; 3074 mc.mb[9] = cpu_to_le16(vha->vp_idx); 3075 mc.mb[10] = cpu_to_le16(options); 3076 3077 rval = qla24xx_send_mb_cmd(vha, &mc); 3078 3079 if (rval == QLA_SUCCESS) { 3080 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3081 ql_dbg(ql_dbg_mbx, vha, 0x1089, 3082 "Failed mb[0]=%x.\n", mcp->mb[0]); 3083 rval = QLA_FUNCTION_FAILED; 3084 } else { 3085 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, 3086 "Done %s.\n", __func__); 3087 /* Re-endianize - firmware data is le32. */ 3088 dwords = sizeof(struct link_statistics) / 4; 3089 iter = &stats->link_fail_cnt; 3090 for ( ; dwords--; iter++) 3091 le32_to_cpus(iter); 3092 } 3093 } else { 3094 /* Failed. */ 3095 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); 3096 } 3097 3098 return rval; 3099 } 3100 3101 int 3102 qla24xx_abort_command(srb_t *sp) 3103 { 3104 int rval; 3105 unsigned long flags = 0; 3106 3107 struct abort_entry_24xx *abt; 3108 dma_addr_t abt_dma; 3109 uint32_t handle; 3110 fc_port_t *fcport = sp->fcport; 3111 struct scsi_qla_host *vha = fcport->vha; 3112 struct qla_hw_data *ha = vha->hw; 3113 struct req_que *req = vha->req; 3114 struct qla_qpair *qpair = sp->qpair; 3115 3116 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 3117 "Entered %s.\n", __func__); 3118 3119 if (vha->flags.qpairs_available && sp->qpair) 3120 req = sp->qpair->req; 3121 else 3122 return QLA_FUNCTION_FAILED; 3123 3124 if (ql2xasynctmfenable) 3125 return qla24xx_async_abort_command(sp); 3126 3127 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3128 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 3129 if (req->outstanding_cmds[handle] == sp) 3130 break; 3131 } 3132 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3133 if (handle == req->num_outstanding_cmds) { 3134 /* Command not found. */ 3135 return QLA_FUNCTION_FAILED; 3136 } 3137 3138 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 3139 if (abt == NULL) { 3140 ql_log(ql_log_warn, vha, 0x108d, 3141 "Failed to allocate abort IOCB.\n"); 3142 return QLA_MEMORY_ALLOC_FAILED; 3143 } 3144 3145 abt->entry_type = ABORT_IOCB_TYPE; 3146 abt->entry_count = 1; 3147 abt->handle = MAKE_HANDLE(req->id, abt->handle); 3148 abt->nport_handle = cpu_to_le16(fcport->loop_id); 3149 abt->handle_to_abort = MAKE_HANDLE(req->id, handle); 3150 abt->port_id[0] = fcport->d_id.b.al_pa; 3151 abt->port_id[1] = fcport->d_id.b.area; 3152 abt->port_id[2] = fcport->d_id.b.domain; 3153 abt->vp_index = fcport->vha->vp_idx; 3154 3155 abt->req_que_no = cpu_to_le16(req->id); 3156 3157 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 3158 if (rval != QLA_SUCCESS) { 3159 ql_dbg(ql_dbg_mbx, vha, 0x108e, 3160 "Failed to issue IOCB (%x).\n", rval); 3161 } else if (abt->entry_status != 0) { 3162 ql_dbg(ql_dbg_mbx, vha, 0x108f, 3163 "Failed to complete IOCB -- error status (%x).\n", 3164 abt->entry_status); 3165 rval = QLA_FUNCTION_FAILED; 3166 } else if (abt->nport_handle != cpu_to_le16(0)) { 3167 ql_dbg(ql_dbg_mbx, vha, 0x1090, 3168 "Failed to complete IOCB -- completion status (%x).\n", 3169 le16_to_cpu(abt->nport_handle)); 3170 if (abt->nport_handle == CS_IOCB_ERROR) 3171 rval = QLA_FUNCTION_PARAMETER_ERROR; 3172 else 3173 rval = QLA_FUNCTION_FAILED; 3174 } else { 3175 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, 3176 "Done %s.\n", __func__); 3177 } 3178 3179 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 3180 3181 return rval; 3182 } 3183 3184 struct tsk_mgmt_cmd { 3185 union { 3186 struct tsk_mgmt_entry tsk; 3187 struct sts_entry_24xx sts; 3188 } p; 3189 }; 3190 3191 static int 3192 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 3193 uint64_t l, int tag) 3194 { 3195 int rval, rval2; 3196 struct tsk_mgmt_cmd *tsk; 3197 struct sts_entry_24xx *sts; 3198 dma_addr_t tsk_dma; 3199 scsi_qla_host_t *vha; 3200 struct qla_hw_data *ha; 3201 struct req_que *req; 3202 struct qla_qpair *qpair; 3203 3204 vha = fcport->vha; 3205 ha = vha->hw; 3206 req = vha->req; 3207 3208 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, 3209 "Entered %s.\n", __func__); 3210 3211 if (vha->vp_idx && vha->qpair) { 3212 /* NPIV port */ 3213 qpair = vha->qpair; 3214 req = qpair->req; 3215 } 3216 3217 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 3218 if (tsk == NULL) { 3219 ql_log(ql_log_warn, vha, 0x1093, 3220 "Failed to allocate task management IOCB.\n"); 3221 return QLA_MEMORY_ALLOC_FAILED; 3222 } 3223 3224 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 3225 tsk->p.tsk.entry_count = 1; 3226 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle); 3227 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 3228 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 3229 tsk->p.tsk.control_flags = cpu_to_le32(type); 3230 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 3231 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 3232 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 3233 tsk->p.tsk.vp_index = fcport->vha->vp_idx; 3234 if (type == TCF_LUN_RESET) { 3235 int_to_scsilun(l, &tsk->p.tsk.lun); 3236 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, 3237 sizeof(tsk->p.tsk.lun)); 3238 } 3239 3240 sts = &tsk->p.sts; 3241 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 3242 if (rval != QLA_SUCCESS) { 3243 ql_dbg(ql_dbg_mbx, vha, 0x1094, 3244 "Failed to issue %s reset IOCB (%x).\n", name, rval); 3245 } else if (sts->entry_status != 0) { 3246 ql_dbg(ql_dbg_mbx, vha, 0x1095, 3247 "Failed to complete IOCB -- error status (%x).\n", 3248 sts->entry_status); 3249 rval = QLA_FUNCTION_FAILED; 3250 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 3251 ql_dbg(ql_dbg_mbx, vha, 0x1096, 3252 "Failed to complete IOCB -- completion status (%x).\n", 3253 le16_to_cpu(sts->comp_status)); 3254 rval = QLA_FUNCTION_FAILED; 3255 } else if (le16_to_cpu(sts->scsi_status) & 3256 SS_RESPONSE_INFO_LEN_VALID) { 3257 if (le32_to_cpu(sts->rsp_data_len) < 4) { 3258 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, 3259 "Ignoring inconsistent data length -- not enough " 3260 "response info (%d).\n", 3261 le32_to_cpu(sts->rsp_data_len)); 3262 } else if (sts->data[3]) { 3263 ql_dbg(ql_dbg_mbx, vha, 0x1098, 3264 "Failed to complete IOCB -- response (%x).\n", 3265 sts->data[3]); 3266 rval = QLA_FUNCTION_FAILED; 3267 } 3268 } 3269 3270 /* Issue marker IOCB. */ 3271 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l, 3272 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 3273 if (rval2 != QLA_SUCCESS) { 3274 ql_dbg(ql_dbg_mbx, vha, 0x1099, 3275 "Failed to issue marker IOCB (%x).\n", rval2); 3276 } else { 3277 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, 3278 "Done %s.\n", __func__); 3279 } 3280 3281 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 3282 3283 return rval; 3284 } 3285 3286 int 3287 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) 3288 { 3289 struct qla_hw_data *ha = fcport->vha->hw; 3290 3291 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3292 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 3293 3294 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); 3295 } 3296 3297 int 3298 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 3299 { 3300 struct qla_hw_data *ha = fcport->vha->hw; 3301 3302 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3303 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 3304 3305 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); 3306 } 3307 3308 int 3309 qla2x00_system_error(scsi_qla_host_t *vha) 3310 { 3311 int rval; 3312 mbx_cmd_t mc; 3313 mbx_cmd_t *mcp = &mc; 3314 struct qla_hw_data *ha = vha->hw; 3315 3316 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 3317 return QLA_FUNCTION_FAILED; 3318 3319 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, 3320 "Entered %s.\n", __func__); 3321 3322 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 3323 mcp->out_mb = MBX_0; 3324 mcp->in_mb = MBX_0; 3325 mcp->tov = 5; 3326 mcp->flags = 0; 3327 rval = qla2x00_mailbox_command(vha, mcp); 3328 3329 if (rval != QLA_SUCCESS) { 3330 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); 3331 } else { 3332 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, 3333 "Done %s.\n", __func__); 3334 } 3335 3336 return rval; 3337 } 3338 3339 int 3340 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) 3341 { 3342 int rval; 3343 mbx_cmd_t mc; 3344 mbx_cmd_t *mcp = &mc; 3345 3346 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3347 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3348 return QLA_FUNCTION_FAILED; 3349 3350 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, 3351 "Entered %s.\n", __func__); 3352 3353 mcp->mb[0] = MBC_WRITE_SERDES; 3354 mcp->mb[1] = addr; 3355 if (IS_QLA2031(vha->hw)) 3356 mcp->mb[2] = data & 0xff; 3357 else 3358 mcp->mb[2] = data; 3359 3360 mcp->mb[3] = 0; 3361 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 3362 mcp->in_mb = MBX_0; 3363 mcp->tov = MBX_TOV_SECONDS; 3364 mcp->flags = 0; 3365 rval = qla2x00_mailbox_command(vha, mcp); 3366 3367 if (rval != QLA_SUCCESS) { 3368 ql_dbg(ql_dbg_mbx, vha, 0x1183, 3369 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3370 } else { 3371 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, 3372 "Done %s.\n", __func__); 3373 } 3374 3375 return rval; 3376 } 3377 3378 int 3379 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) 3380 { 3381 int rval; 3382 mbx_cmd_t mc; 3383 mbx_cmd_t *mcp = &mc; 3384 3385 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3386 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3387 return QLA_FUNCTION_FAILED; 3388 3389 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, 3390 "Entered %s.\n", __func__); 3391 3392 mcp->mb[0] = MBC_READ_SERDES; 3393 mcp->mb[1] = addr; 3394 mcp->mb[3] = 0; 3395 mcp->out_mb = MBX_3|MBX_1|MBX_0; 3396 mcp->in_mb = MBX_1|MBX_0; 3397 mcp->tov = MBX_TOV_SECONDS; 3398 mcp->flags = 0; 3399 rval = qla2x00_mailbox_command(vha, mcp); 3400 3401 if (IS_QLA2031(vha->hw)) 3402 *data = mcp->mb[1] & 0xff; 3403 else 3404 *data = mcp->mb[1]; 3405 3406 if (rval != QLA_SUCCESS) { 3407 ql_dbg(ql_dbg_mbx, vha, 0x1186, 3408 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3409 } else { 3410 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, 3411 "Done %s.\n", __func__); 3412 } 3413 3414 return rval; 3415 } 3416 3417 int 3418 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) 3419 { 3420 int rval; 3421 mbx_cmd_t mc; 3422 mbx_cmd_t *mcp = &mc; 3423 3424 if (!IS_QLA8044(vha->hw)) 3425 return QLA_FUNCTION_FAILED; 3426 3427 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0, 3428 "Entered %s.\n", __func__); 3429 3430 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3431 mcp->mb[1] = HCS_WRITE_SERDES; 3432 mcp->mb[3] = LSW(addr); 3433 mcp->mb[4] = MSW(addr); 3434 mcp->mb[5] = LSW(data); 3435 mcp->mb[6] = MSW(data); 3436 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 3437 mcp->in_mb = MBX_0; 3438 mcp->tov = MBX_TOV_SECONDS; 3439 mcp->flags = 0; 3440 rval = qla2x00_mailbox_command(vha, mcp); 3441 3442 if (rval != QLA_SUCCESS) { 3443 ql_dbg(ql_dbg_mbx, vha, 0x11a1, 3444 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3445 } else { 3446 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, 3447 "Done %s.\n", __func__); 3448 } 3449 3450 return rval; 3451 } 3452 3453 int 3454 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) 3455 { 3456 int rval; 3457 mbx_cmd_t mc; 3458 mbx_cmd_t *mcp = &mc; 3459 3460 if (!IS_QLA8044(vha->hw)) 3461 return QLA_FUNCTION_FAILED; 3462 3463 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, 3464 "Entered %s.\n", __func__); 3465 3466 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3467 mcp->mb[1] = HCS_READ_SERDES; 3468 mcp->mb[3] = LSW(addr); 3469 mcp->mb[4] = MSW(addr); 3470 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; 3471 mcp->in_mb = MBX_2|MBX_1|MBX_0; 3472 mcp->tov = MBX_TOV_SECONDS; 3473 mcp->flags = 0; 3474 rval = qla2x00_mailbox_command(vha, mcp); 3475 3476 *data = mcp->mb[2] << 16 | mcp->mb[1]; 3477 3478 if (rval != QLA_SUCCESS) { 3479 ql_dbg(ql_dbg_mbx, vha, 0x118a, 3480 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3481 } else { 3482 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, 3483 "Done %s.\n", __func__); 3484 } 3485 3486 return rval; 3487 } 3488 3489 /** 3490 * qla2x00_set_serdes_params() - 3491 * @vha: HA context 3492 * @sw_em_1g: serial link options 3493 * @sw_em_2g: serial link options 3494 * @sw_em_4g: serial link options 3495 * 3496 * Returns 3497 */ 3498 int 3499 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, 3500 uint16_t sw_em_2g, uint16_t sw_em_4g) 3501 { 3502 int rval; 3503 mbx_cmd_t mc; 3504 mbx_cmd_t *mcp = &mc; 3505 3506 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, 3507 "Entered %s.\n", __func__); 3508 3509 mcp->mb[0] = MBC_SERDES_PARAMS; 3510 mcp->mb[1] = BIT_0; 3511 mcp->mb[2] = sw_em_1g | BIT_15; 3512 mcp->mb[3] = sw_em_2g | BIT_15; 3513 mcp->mb[4] = sw_em_4g | BIT_15; 3514 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3515 mcp->in_mb = MBX_0; 3516 mcp->tov = MBX_TOV_SECONDS; 3517 mcp->flags = 0; 3518 rval = qla2x00_mailbox_command(vha, mcp); 3519 3520 if (rval != QLA_SUCCESS) { 3521 /*EMPTY*/ 3522 ql_dbg(ql_dbg_mbx, vha, 0x109f, 3523 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3524 } else { 3525 /*EMPTY*/ 3526 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, 3527 "Done %s.\n", __func__); 3528 } 3529 3530 return rval; 3531 } 3532 3533 int 3534 qla2x00_stop_firmware(scsi_qla_host_t *vha) 3535 { 3536 int rval; 3537 mbx_cmd_t mc; 3538 mbx_cmd_t *mcp = &mc; 3539 3540 if (!IS_FWI2_CAPABLE(vha->hw)) 3541 return QLA_FUNCTION_FAILED; 3542 3543 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, 3544 "Entered %s.\n", __func__); 3545 3546 mcp->mb[0] = MBC_STOP_FIRMWARE; 3547 mcp->mb[1] = 0; 3548 mcp->out_mb = MBX_1|MBX_0; 3549 mcp->in_mb = MBX_0; 3550 mcp->tov = 5; 3551 mcp->flags = 0; 3552 rval = qla2x00_mailbox_command(vha, mcp); 3553 3554 if (rval != QLA_SUCCESS) { 3555 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); 3556 if (mcp->mb[0] == MBS_INVALID_COMMAND) 3557 rval = QLA_INVALID_COMMAND; 3558 } else { 3559 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, 3560 "Done %s.\n", __func__); 3561 } 3562 3563 return rval; 3564 } 3565 3566 int 3567 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, 3568 uint16_t buffers) 3569 { 3570 int rval; 3571 mbx_cmd_t mc; 3572 mbx_cmd_t *mcp = &mc; 3573 3574 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, 3575 "Entered %s.\n", __func__); 3576 3577 if (!IS_FWI2_CAPABLE(vha->hw)) 3578 return QLA_FUNCTION_FAILED; 3579 3580 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3581 return QLA_FUNCTION_FAILED; 3582 3583 mcp->mb[0] = MBC_TRACE_CONTROL; 3584 mcp->mb[1] = TC_EFT_ENABLE; 3585 mcp->mb[2] = LSW(eft_dma); 3586 mcp->mb[3] = MSW(eft_dma); 3587 mcp->mb[4] = LSW(MSD(eft_dma)); 3588 mcp->mb[5] = MSW(MSD(eft_dma)); 3589 mcp->mb[6] = buffers; 3590 mcp->mb[7] = TC_AEN_DISABLE; 3591 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3592 mcp->in_mb = MBX_1|MBX_0; 3593 mcp->tov = MBX_TOV_SECONDS; 3594 mcp->flags = 0; 3595 rval = qla2x00_mailbox_command(vha, mcp); 3596 if (rval != QLA_SUCCESS) { 3597 ql_dbg(ql_dbg_mbx, vha, 0x10a5, 3598 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3599 rval, mcp->mb[0], mcp->mb[1]); 3600 } else { 3601 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, 3602 "Done %s.\n", __func__); 3603 } 3604 3605 return rval; 3606 } 3607 3608 int 3609 qla2x00_disable_eft_trace(scsi_qla_host_t *vha) 3610 { 3611 int rval; 3612 mbx_cmd_t mc; 3613 mbx_cmd_t *mcp = &mc; 3614 3615 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, 3616 "Entered %s.\n", __func__); 3617 3618 if (!IS_FWI2_CAPABLE(vha->hw)) 3619 return QLA_FUNCTION_FAILED; 3620 3621 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3622 return QLA_FUNCTION_FAILED; 3623 3624 mcp->mb[0] = MBC_TRACE_CONTROL; 3625 mcp->mb[1] = TC_EFT_DISABLE; 3626 mcp->out_mb = MBX_1|MBX_0; 3627 mcp->in_mb = MBX_1|MBX_0; 3628 mcp->tov = MBX_TOV_SECONDS; 3629 mcp->flags = 0; 3630 rval = qla2x00_mailbox_command(vha, mcp); 3631 if (rval != QLA_SUCCESS) { 3632 ql_dbg(ql_dbg_mbx, vha, 0x10a8, 3633 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3634 rval, mcp->mb[0], mcp->mb[1]); 3635 } else { 3636 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, 3637 "Done %s.\n", __func__); 3638 } 3639 3640 return rval; 3641 } 3642 3643 int 3644 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, 3645 uint16_t buffers, uint16_t *mb, uint32_t *dwords) 3646 { 3647 int rval; 3648 mbx_cmd_t mc; 3649 mbx_cmd_t *mcp = &mc; 3650 3651 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, 3652 "Entered %s.\n", __func__); 3653 3654 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 3655 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 3656 !IS_QLA28XX(vha->hw)) 3657 return QLA_FUNCTION_FAILED; 3658 3659 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3660 return QLA_FUNCTION_FAILED; 3661 3662 mcp->mb[0] = MBC_TRACE_CONTROL; 3663 mcp->mb[1] = TC_FCE_ENABLE; 3664 mcp->mb[2] = LSW(fce_dma); 3665 mcp->mb[3] = MSW(fce_dma); 3666 mcp->mb[4] = LSW(MSD(fce_dma)); 3667 mcp->mb[5] = MSW(MSD(fce_dma)); 3668 mcp->mb[6] = buffers; 3669 mcp->mb[7] = TC_AEN_DISABLE; 3670 mcp->mb[8] = 0; 3671 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; 3672 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; 3673 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3674 MBX_1|MBX_0; 3675 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3676 mcp->tov = MBX_TOV_SECONDS; 3677 mcp->flags = 0; 3678 rval = qla2x00_mailbox_command(vha, mcp); 3679 if (rval != QLA_SUCCESS) { 3680 ql_dbg(ql_dbg_mbx, vha, 0x10ab, 3681 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3682 rval, mcp->mb[0], mcp->mb[1]); 3683 } else { 3684 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, 3685 "Done %s.\n", __func__); 3686 3687 if (mb) 3688 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 3689 if (dwords) 3690 *dwords = buffers; 3691 } 3692 3693 return rval; 3694 } 3695 3696 int 3697 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) 3698 { 3699 int rval; 3700 mbx_cmd_t mc; 3701 mbx_cmd_t *mcp = &mc; 3702 3703 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, 3704 "Entered %s.\n", __func__); 3705 3706 if (!IS_FWI2_CAPABLE(vha->hw)) 3707 return QLA_FUNCTION_FAILED; 3708 3709 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3710 return QLA_FUNCTION_FAILED; 3711 3712 mcp->mb[0] = MBC_TRACE_CONTROL; 3713 mcp->mb[1] = TC_FCE_DISABLE; 3714 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 3715 mcp->out_mb = MBX_2|MBX_1|MBX_0; 3716 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3717 MBX_1|MBX_0; 3718 mcp->tov = MBX_TOV_SECONDS; 3719 mcp->flags = 0; 3720 rval = qla2x00_mailbox_command(vha, mcp); 3721 if (rval != QLA_SUCCESS) { 3722 ql_dbg(ql_dbg_mbx, vha, 0x10ae, 3723 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3724 rval, mcp->mb[0], mcp->mb[1]); 3725 } else { 3726 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, 3727 "Done %s.\n", __func__); 3728 3729 if (wr) 3730 *wr = (uint64_t) mcp->mb[5] << 48 | 3731 (uint64_t) mcp->mb[4] << 32 | 3732 (uint64_t) mcp->mb[3] << 16 | 3733 (uint64_t) mcp->mb[2]; 3734 if (rd) 3735 *rd = (uint64_t) mcp->mb[9] << 48 | 3736 (uint64_t) mcp->mb[8] << 32 | 3737 (uint64_t) mcp->mb[7] << 16 | 3738 (uint64_t) mcp->mb[6]; 3739 } 3740 3741 return rval; 3742 } 3743 3744 int 3745 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3746 uint16_t *port_speed, uint16_t *mb) 3747 { 3748 int rval; 3749 mbx_cmd_t mc; 3750 mbx_cmd_t *mcp = &mc; 3751 3752 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, 3753 "Entered %s.\n", __func__); 3754 3755 if (!IS_IIDMA_CAPABLE(vha->hw)) 3756 return QLA_FUNCTION_FAILED; 3757 3758 mcp->mb[0] = MBC_PORT_PARAMS; 3759 mcp->mb[1] = loop_id; 3760 mcp->mb[2] = mcp->mb[3] = 0; 3761 mcp->mb[9] = vha->vp_idx; 3762 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3763 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3764 mcp->tov = MBX_TOV_SECONDS; 3765 mcp->flags = 0; 3766 rval = qla2x00_mailbox_command(vha, mcp); 3767 3768 /* Return mailbox statuses. */ 3769 if (mb) { 3770 mb[0] = mcp->mb[0]; 3771 mb[1] = mcp->mb[1]; 3772 mb[3] = mcp->mb[3]; 3773 } 3774 3775 if (rval != QLA_SUCCESS) { 3776 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); 3777 } else { 3778 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, 3779 "Done %s.\n", __func__); 3780 if (port_speed) 3781 *port_speed = mcp->mb[3]; 3782 } 3783 3784 return rval; 3785 } 3786 3787 int 3788 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3789 uint16_t port_speed, uint16_t *mb) 3790 { 3791 int rval; 3792 mbx_cmd_t mc; 3793 mbx_cmd_t *mcp = &mc; 3794 3795 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, 3796 "Entered %s.\n", __func__); 3797 3798 if (!IS_IIDMA_CAPABLE(vha->hw)) 3799 return QLA_FUNCTION_FAILED; 3800 3801 mcp->mb[0] = MBC_PORT_PARAMS; 3802 mcp->mb[1] = loop_id; 3803 mcp->mb[2] = BIT_0; 3804 mcp->mb[3] = port_speed & 0x3F; 3805 mcp->mb[9] = vha->vp_idx; 3806 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3807 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3808 mcp->tov = MBX_TOV_SECONDS; 3809 mcp->flags = 0; 3810 rval = qla2x00_mailbox_command(vha, mcp); 3811 3812 /* Return mailbox statuses. */ 3813 if (mb) { 3814 mb[0] = mcp->mb[0]; 3815 mb[1] = mcp->mb[1]; 3816 mb[3] = mcp->mb[3]; 3817 } 3818 3819 if (rval != QLA_SUCCESS) { 3820 ql_dbg(ql_dbg_mbx, vha, 0x10b4, 3821 "Failed=%x.\n", rval); 3822 } else { 3823 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, 3824 "Done %s.\n", __func__); 3825 } 3826 3827 return rval; 3828 } 3829 3830 void 3831 qla24xx_report_id_acquisition(scsi_qla_host_t *vha, 3832 struct vp_rpt_id_entry_24xx *rptid_entry) 3833 { 3834 struct qla_hw_data *ha = vha->hw; 3835 scsi_qla_host_t *vp = NULL; 3836 unsigned long flags; 3837 int found; 3838 port_id_t id; 3839 struct fc_port *fcport; 3840 3841 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3842 "Entered %s.\n", __func__); 3843 3844 if (rptid_entry->entry_status != 0) 3845 return; 3846 3847 id.b.domain = rptid_entry->port_id[2]; 3848 id.b.area = rptid_entry->port_id[1]; 3849 id.b.al_pa = rptid_entry->port_id[0]; 3850 id.b.rsvd_1 = 0; 3851 ha->flags.n2n_ae = 0; 3852 3853 if (rptid_entry->format == 0) { 3854 /* loop */ 3855 ql_dbg(ql_dbg_async, vha, 0x10b7, 3856 "Format 0 : Number of VPs setup %d, number of " 3857 "VPs acquired %d.\n", rptid_entry->vp_setup, 3858 rptid_entry->vp_acquired); 3859 ql_dbg(ql_dbg_async, vha, 0x10b8, 3860 "Primary port id %02x%02x%02x.\n", 3861 rptid_entry->port_id[2], rptid_entry->port_id[1], 3862 rptid_entry->port_id[0]); 3863 ha->current_topology = ISP_CFG_NL; 3864 qlt_update_host_map(vha, id); 3865 3866 } else if (rptid_entry->format == 1) { 3867 /* fabric */ 3868 ql_dbg(ql_dbg_async, vha, 0x10b9, 3869 "Format 1: VP[%d] enabled - status %d - with " 3870 "port id %02x%02x%02x.\n", rptid_entry->vp_idx, 3871 rptid_entry->vp_status, 3872 rptid_entry->port_id[2], rptid_entry->port_id[1], 3873 rptid_entry->port_id[0]); 3874 ql_dbg(ql_dbg_async, vha, 0x5075, 3875 "Format 1: Remote WWPN %8phC.\n", 3876 rptid_entry->u.f1.port_name); 3877 3878 ql_dbg(ql_dbg_async, vha, 0x5075, 3879 "Format 1: WWPN %8phC.\n", 3880 vha->port_name); 3881 3882 switch (rptid_entry->u.f1.flags & TOPO_MASK) { 3883 case TOPO_N2N: 3884 ha->current_topology = ISP_CFG_N; 3885 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 3886 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3887 fcport->scan_state = QLA_FCPORT_SCAN; 3888 fcport->n2n_flag = 0; 3889 } 3890 3891 fcport = qla2x00_find_fcport_by_wwpn(vha, 3892 rptid_entry->u.f1.port_name, 1); 3893 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 3894 3895 if (fcport) { 3896 fcport->plogi_nack_done_deadline = jiffies + HZ; 3897 fcport->dm_login_expire = jiffies + 2*HZ; 3898 fcport->scan_state = QLA_FCPORT_FOUND; 3899 fcport->n2n_flag = 1; 3900 fcport->keep_nport_handle = 1; 3901 if (vha->flags.nvme_enabled) 3902 fcport->fc4f_nvme = 1; 3903 3904 switch (fcport->disc_state) { 3905 case DSC_DELETED: 3906 set_bit(RELOGIN_NEEDED, 3907 &vha->dpc_flags); 3908 break; 3909 case DSC_DELETE_PEND: 3910 break; 3911 default: 3912 qlt_schedule_sess_for_deletion(fcport); 3913 break; 3914 } 3915 } else { 3916 id.b24 = 0; 3917 if (wwn_to_u64(vha->port_name) > 3918 wwn_to_u64(rptid_entry->u.f1.port_name)) { 3919 vha->d_id.b24 = 0; 3920 vha->d_id.b.al_pa = 1; 3921 ha->flags.n2n_bigger = 1; 3922 3923 id.b.al_pa = 2; 3924 ql_dbg(ql_dbg_async, vha, 0x5075, 3925 "Format 1: assign local id %x remote id %x\n", 3926 vha->d_id.b24, id.b24); 3927 } else { 3928 ql_dbg(ql_dbg_async, vha, 0x5075, 3929 "Format 1: Remote login - Waiting for WWPN %8phC.\n", 3930 rptid_entry->u.f1.port_name); 3931 ha->flags.n2n_bigger = 0; 3932 } 3933 qla24xx_post_newsess_work(vha, &id, 3934 rptid_entry->u.f1.port_name, 3935 rptid_entry->u.f1.node_name, 3936 NULL, 3937 FS_FCP_IS_N2N); 3938 } 3939 3940 /* if our portname is higher then initiate N2N login */ 3941 3942 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); 3943 ha->flags.n2n_ae = 1; 3944 return; 3945 break; 3946 case TOPO_FL: 3947 ha->current_topology = ISP_CFG_FL; 3948 break; 3949 case TOPO_F: 3950 ha->current_topology = ISP_CFG_F; 3951 break; 3952 default: 3953 break; 3954 } 3955 3956 ha->flags.gpsc_supported = 1; 3957 ha->current_topology = ISP_CFG_F; 3958 /* buffer to buffer credit flag */ 3959 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; 3960 3961 if (rptid_entry->vp_idx == 0) { 3962 if (rptid_entry->vp_status == VP_STAT_COMPL) { 3963 /* FA-WWN is only for physical port */ 3964 if (qla_ini_mode_enabled(vha) && 3965 ha->flags.fawwpn_enabled && 3966 (rptid_entry->u.f1.flags & 3967 BIT_6)) { 3968 memcpy(vha->port_name, 3969 rptid_entry->u.f1.port_name, 3970 WWN_SIZE); 3971 } 3972 3973 qlt_update_host_map(vha, id); 3974 } 3975 3976 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 3977 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 3978 } else { 3979 if (rptid_entry->vp_status != VP_STAT_COMPL && 3980 rptid_entry->vp_status != VP_STAT_ID_CHG) { 3981 ql_dbg(ql_dbg_mbx, vha, 0x10ba, 3982 "Could not acquire ID for VP[%d].\n", 3983 rptid_entry->vp_idx); 3984 return; 3985 } 3986 3987 found = 0; 3988 spin_lock_irqsave(&ha->vport_slock, flags); 3989 list_for_each_entry(vp, &ha->vp_list, list) { 3990 if (rptid_entry->vp_idx == vp->vp_idx) { 3991 found = 1; 3992 break; 3993 } 3994 } 3995 spin_unlock_irqrestore(&ha->vport_slock, flags); 3996 3997 if (!found) 3998 return; 3999 4000 qlt_update_host_map(vp, id); 4001 4002 /* 4003 * Cannot configure here as we are still sitting on the 4004 * response queue. Handle it in dpc context. 4005 */ 4006 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); 4007 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); 4008 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); 4009 } 4010 set_bit(VP_DPC_NEEDED, &vha->dpc_flags); 4011 qla2xxx_wake_dpc(vha); 4012 } else if (rptid_entry->format == 2) { 4013 ql_dbg(ql_dbg_async, vha, 0x505f, 4014 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", 4015 rptid_entry->port_id[2], rptid_entry->port_id[1], 4016 rptid_entry->port_id[0]); 4017 4018 ql_dbg(ql_dbg_async, vha, 0x5075, 4019 "N2N: Remote WWPN %8phC.\n", 4020 rptid_entry->u.f2.port_name); 4021 4022 /* N2N. direct connect */ 4023 ha->current_topology = ISP_CFG_N; 4024 ha->flags.rida_fmt2 = 1; 4025 vha->d_id.b.domain = rptid_entry->port_id[2]; 4026 vha->d_id.b.area = rptid_entry->port_id[1]; 4027 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 4028 4029 ha->flags.n2n_ae = 1; 4030 spin_lock_irqsave(&ha->vport_slock, flags); 4031 qlt_update_vp_map(vha, SET_AL_PA); 4032 spin_unlock_irqrestore(&ha->vport_slock, flags); 4033 4034 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4035 fcport->scan_state = QLA_FCPORT_SCAN; 4036 fcport->n2n_flag = 0; 4037 } 4038 4039 fcport = qla2x00_find_fcport_by_wwpn(vha, 4040 rptid_entry->u.f2.port_name, 1); 4041 4042 if (fcport) { 4043 fcport->login_retry = vha->hw->login_retry_count; 4044 fcport->plogi_nack_done_deadline = jiffies + HZ; 4045 fcport->scan_state = QLA_FCPORT_FOUND; 4046 fcport->keep_nport_handle = 1; 4047 fcport->n2n_flag = 1; 4048 fcport->d_id.b.domain = 4049 rptid_entry->u.f2.remote_nport_id[2]; 4050 fcport->d_id.b.area = 4051 rptid_entry->u.f2.remote_nport_id[1]; 4052 fcport->d_id.b.al_pa = 4053 rptid_entry->u.f2.remote_nport_id[0]; 4054 } 4055 } 4056 } 4057 4058 /* 4059 * qla24xx_modify_vp_config 4060 * Change VP configuration for vha 4061 * 4062 * Input: 4063 * vha = adapter block pointer. 4064 * 4065 * Returns: 4066 * qla2xxx local function return status code. 4067 * 4068 * Context: 4069 * Kernel context. 4070 */ 4071 int 4072 qla24xx_modify_vp_config(scsi_qla_host_t *vha) 4073 { 4074 int rval; 4075 struct vp_config_entry_24xx *vpmod; 4076 dma_addr_t vpmod_dma; 4077 struct qla_hw_data *ha = vha->hw; 4078 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4079 4080 /* This can be called by the parent */ 4081 4082 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, 4083 "Entered %s.\n", __func__); 4084 4085 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 4086 if (!vpmod) { 4087 ql_log(ql_log_warn, vha, 0x10bc, 4088 "Failed to allocate modify VP IOCB.\n"); 4089 return QLA_MEMORY_ALLOC_FAILED; 4090 } 4091 4092 vpmod->entry_type = VP_CONFIG_IOCB_TYPE; 4093 vpmod->entry_count = 1; 4094 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; 4095 vpmod->vp_count = 1; 4096 vpmod->vp_index1 = vha->vp_idx; 4097 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; 4098 4099 qlt_modify_vp_config(vha, vpmod); 4100 4101 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); 4102 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 4103 vpmod->entry_count = 1; 4104 4105 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 4106 if (rval != QLA_SUCCESS) { 4107 ql_dbg(ql_dbg_mbx, vha, 0x10bd, 4108 "Failed to issue VP config IOCB (%x).\n", rval); 4109 } else if (vpmod->comp_status != 0) { 4110 ql_dbg(ql_dbg_mbx, vha, 0x10be, 4111 "Failed to complete IOCB -- error status (%x).\n", 4112 vpmod->comp_status); 4113 rval = QLA_FUNCTION_FAILED; 4114 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) { 4115 ql_dbg(ql_dbg_mbx, vha, 0x10bf, 4116 "Failed to complete IOCB -- completion status (%x).\n", 4117 le16_to_cpu(vpmod->comp_status)); 4118 rval = QLA_FUNCTION_FAILED; 4119 } else { 4120 /* EMPTY */ 4121 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, 4122 "Done %s.\n", __func__); 4123 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 4124 } 4125 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 4126 4127 return rval; 4128 } 4129 4130 /* 4131 * qla2x00_send_change_request 4132 * Receive or disable RSCN request from fabric controller 4133 * 4134 * Input: 4135 * ha = adapter block pointer 4136 * format = registration format: 4137 * 0 - Reserved 4138 * 1 - Fabric detected registration 4139 * 2 - N_port detected registration 4140 * 3 - Full registration 4141 * FF - clear registration 4142 * vp_idx = Virtual port index 4143 * 4144 * Returns: 4145 * qla2x00 local function return status code. 4146 * 4147 * Context: 4148 * Kernel Context 4149 */ 4150 4151 int 4152 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, 4153 uint16_t vp_idx) 4154 { 4155 int rval; 4156 mbx_cmd_t mc; 4157 mbx_cmd_t *mcp = &mc; 4158 4159 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, 4160 "Entered %s.\n", __func__); 4161 4162 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; 4163 mcp->mb[1] = format; 4164 mcp->mb[9] = vp_idx; 4165 mcp->out_mb = MBX_9|MBX_1|MBX_0; 4166 mcp->in_mb = MBX_0|MBX_1; 4167 mcp->tov = MBX_TOV_SECONDS; 4168 mcp->flags = 0; 4169 rval = qla2x00_mailbox_command(vha, mcp); 4170 4171 if (rval == QLA_SUCCESS) { 4172 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 4173 rval = BIT_1; 4174 } 4175 } else 4176 rval = BIT_1; 4177 4178 return rval; 4179 } 4180 4181 int 4182 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 4183 uint32_t size) 4184 { 4185 int rval; 4186 mbx_cmd_t mc; 4187 mbx_cmd_t *mcp = &mc; 4188 4189 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, 4190 "Entered %s.\n", __func__); 4191 4192 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 4193 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 4194 mcp->mb[8] = MSW(addr); 4195 mcp->out_mb = MBX_8|MBX_0; 4196 } else { 4197 mcp->mb[0] = MBC_DUMP_RISC_RAM; 4198 mcp->out_mb = MBX_0; 4199 } 4200 mcp->mb[1] = LSW(addr); 4201 mcp->mb[2] = MSW(req_dma); 4202 mcp->mb[3] = LSW(req_dma); 4203 mcp->mb[6] = MSW(MSD(req_dma)); 4204 mcp->mb[7] = LSW(MSD(req_dma)); 4205 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 4206 if (IS_FWI2_CAPABLE(vha->hw)) { 4207 mcp->mb[4] = MSW(size); 4208 mcp->mb[5] = LSW(size); 4209 mcp->out_mb |= MBX_5|MBX_4; 4210 } else { 4211 mcp->mb[4] = LSW(size); 4212 mcp->out_mb |= MBX_4; 4213 } 4214 4215 mcp->in_mb = MBX_0; 4216 mcp->tov = MBX_TOV_SECONDS; 4217 mcp->flags = 0; 4218 rval = qla2x00_mailbox_command(vha, mcp); 4219 4220 if (rval != QLA_SUCCESS) { 4221 ql_dbg(ql_dbg_mbx, vha, 0x1008, 4222 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4223 } else { 4224 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, 4225 "Done %s.\n", __func__); 4226 } 4227 4228 return rval; 4229 } 4230 /* 84XX Support **************************************************************/ 4231 4232 struct cs84xx_mgmt_cmd { 4233 union { 4234 struct verify_chip_entry_84xx req; 4235 struct verify_chip_rsp_84xx rsp; 4236 } p; 4237 }; 4238 4239 int 4240 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) 4241 { 4242 int rval, retry; 4243 struct cs84xx_mgmt_cmd *mn; 4244 dma_addr_t mn_dma; 4245 uint16_t options; 4246 unsigned long flags; 4247 struct qla_hw_data *ha = vha->hw; 4248 4249 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, 4250 "Entered %s.\n", __func__); 4251 4252 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 4253 if (mn == NULL) { 4254 return QLA_MEMORY_ALLOC_FAILED; 4255 } 4256 4257 /* Force Update? */ 4258 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; 4259 /* Diagnostic firmware? */ 4260 /* options |= MENLO_DIAG_FW; */ 4261 /* We update the firmware with only one data sequence. */ 4262 options |= VCO_END_OF_DATA; 4263 4264 do { 4265 retry = 0; 4266 memset(mn, 0, sizeof(*mn)); 4267 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; 4268 mn->p.req.entry_count = 1; 4269 mn->p.req.options = cpu_to_le16(options); 4270 4271 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, 4272 "Dump of Verify Request.\n"); 4273 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, 4274 mn, sizeof(*mn)); 4275 4276 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 4277 if (rval != QLA_SUCCESS) { 4278 ql_dbg(ql_dbg_mbx, vha, 0x10cb, 4279 "Failed to issue verify IOCB (%x).\n", rval); 4280 goto verify_done; 4281 } 4282 4283 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, 4284 "Dump of Verify Response.\n"); 4285 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, 4286 mn, sizeof(*mn)); 4287 4288 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 4289 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 4290 le16_to_cpu(mn->p.rsp.failure_code) : 0; 4291 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, 4292 "cs=%x fc=%x.\n", status[0], status[1]); 4293 4294 if (status[0] != CS_COMPLETE) { 4295 rval = QLA_FUNCTION_FAILED; 4296 if (!(options & VCO_DONT_UPDATE_FW)) { 4297 ql_dbg(ql_dbg_mbx, vha, 0x10cf, 4298 "Firmware update failed. Retrying " 4299 "without update firmware.\n"); 4300 options |= VCO_DONT_UPDATE_FW; 4301 options &= ~VCO_FORCE_UPDATE; 4302 retry = 1; 4303 } 4304 } else { 4305 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, 4306 "Firmware updated to %x.\n", 4307 le32_to_cpu(mn->p.rsp.fw_ver)); 4308 4309 /* NOTE: we only update OP firmware. */ 4310 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 4311 ha->cs84xx->op_fw_version = 4312 le32_to_cpu(mn->p.rsp.fw_ver); 4313 spin_unlock_irqrestore(&ha->cs84xx->access_lock, 4314 flags); 4315 } 4316 } while (retry); 4317 4318 verify_done: 4319 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 4320 4321 if (rval != QLA_SUCCESS) { 4322 ql_dbg(ql_dbg_mbx, vha, 0x10d1, 4323 "Failed=%x.\n", rval); 4324 } else { 4325 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, 4326 "Done %s.\n", __func__); 4327 } 4328 4329 return rval; 4330 } 4331 4332 int 4333 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) 4334 { 4335 int rval; 4336 unsigned long flags; 4337 mbx_cmd_t mc; 4338 mbx_cmd_t *mcp = &mc; 4339 struct qla_hw_data *ha = vha->hw; 4340 4341 if (!ha->flags.fw_started) 4342 return QLA_SUCCESS; 4343 4344 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 4345 "Entered %s.\n", __func__); 4346 4347 if (IS_SHADOW_REG_CAPABLE(ha)) 4348 req->options |= BIT_13; 4349 4350 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4351 mcp->mb[1] = req->options; 4352 mcp->mb[2] = MSW(LSD(req->dma)); 4353 mcp->mb[3] = LSW(LSD(req->dma)); 4354 mcp->mb[6] = MSW(MSD(req->dma)); 4355 mcp->mb[7] = LSW(MSD(req->dma)); 4356 mcp->mb[5] = req->length; 4357 if (req->rsp) 4358 mcp->mb[10] = req->rsp->id; 4359 mcp->mb[12] = req->qos; 4360 mcp->mb[11] = req->vp_idx; 4361 mcp->mb[13] = req->rid; 4362 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4363 mcp->mb[15] = 0; 4364 4365 mcp->mb[4] = req->id; 4366 /* que in ptr index */ 4367 mcp->mb[8] = 0; 4368 /* que out ptr index */ 4369 mcp->mb[9] = *req->out_ptr = 0; 4370 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| 4371 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4372 mcp->in_mb = MBX_0; 4373 mcp->flags = MBX_DMA_OUT; 4374 mcp->tov = MBX_TOV_SECONDS * 2; 4375 4376 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4377 IS_QLA28XX(ha)) 4378 mcp->in_mb |= MBX_1; 4379 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4380 mcp->out_mb |= MBX_15; 4381 /* debug q create issue in SR-IOV */ 4382 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4383 } 4384 4385 spin_lock_irqsave(&ha->hardware_lock, flags); 4386 if (!(req->options & BIT_0)) { 4387 WRT_REG_DWORD(req->req_q_in, 0); 4388 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4389 WRT_REG_DWORD(req->req_q_out, 0); 4390 } 4391 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4392 4393 rval = qla2x00_mailbox_command(vha, mcp); 4394 if (rval != QLA_SUCCESS) { 4395 ql_dbg(ql_dbg_mbx, vha, 0x10d4, 4396 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4397 } else { 4398 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, 4399 "Done %s.\n", __func__); 4400 } 4401 4402 return rval; 4403 } 4404 4405 int 4406 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 4407 { 4408 int rval; 4409 unsigned long flags; 4410 mbx_cmd_t mc; 4411 mbx_cmd_t *mcp = &mc; 4412 struct qla_hw_data *ha = vha->hw; 4413 4414 if (!ha->flags.fw_started) 4415 return QLA_SUCCESS; 4416 4417 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 4418 "Entered %s.\n", __func__); 4419 4420 if (IS_SHADOW_REG_CAPABLE(ha)) 4421 rsp->options |= BIT_13; 4422 4423 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4424 mcp->mb[1] = rsp->options; 4425 mcp->mb[2] = MSW(LSD(rsp->dma)); 4426 mcp->mb[3] = LSW(LSD(rsp->dma)); 4427 mcp->mb[6] = MSW(MSD(rsp->dma)); 4428 mcp->mb[7] = LSW(MSD(rsp->dma)); 4429 mcp->mb[5] = rsp->length; 4430 mcp->mb[14] = rsp->msix->entry; 4431 mcp->mb[13] = rsp->rid; 4432 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4433 mcp->mb[15] = 0; 4434 4435 mcp->mb[4] = rsp->id; 4436 /* que in ptr index */ 4437 mcp->mb[8] = *rsp->in_ptr = 0; 4438 /* que out ptr index */ 4439 mcp->mb[9] = 0; 4440 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 4441 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4442 mcp->in_mb = MBX_0; 4443 mcp->flags = MBX_DMA_OUT; 4444 mcp->tov = MBX_TOV_SECONDS * 2; 4445 4446 if (IS_QLA81XX(ha)) { 4447 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 4448 mcp->in_mb |= MBX_1; 4449 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4450 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; 4451 mcp->in_mb |= MBX_1; 4452 /* debug q create issue in SR-IOV */ 4453 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4454 } 4455 4456 spin_lock_irqsave(&ha->hardware_lock, flags); 4457 if (!(rsp->options & BIT_0)) { 4458 WRT_REG_DWORD(rsp->rsp_q_out, 0); 4459 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4460 WRT_REG_DWORD(rsp->rsp_q_in, 0); 4461 } 4462 4463 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4464 4465 rval = qla2x00_mailbox_command(vha, mcp); 4466 if (rval != QLA_SUCCESS) { 4467 ql_dbg(ql_dbg_mbx, vha, 0x10d7, 4468 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4469 } else { 4470 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, 4471 "Done %s.\n", __func__); 4472 } 4473 4474 return rval; 4475 } 4476 4477 int 4478 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) 4479 { 4480 int rval; 4481 mbx_cmd_t mc; 4482 mbx_cmd_t *mcp = &mc; 4483 4484 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, 4485 "Entered %s.\n", __func__); 4486 4487 mcp->mb[0] = MBC_IDC_ACK; 4488 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4489 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4490 mcp->in_mb = MBX_0; 4491 mcp->tov = MBX_TOV_SECONDS; 4492 mcp->flags = 0; 4493 rval = qla2x00_mailbox_command(vha, mcp); 4494 4495 if (rval != QLA_SUCCESS) { 4496 ql_dbg(ql_dbg_mbx, vha, 0x10da, 4497 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4498 } else { 4499 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, 4500 "Done %s.\n", __func__); 4501 } 4502 4503 return rval; 4504 } 4505 4506 int 4507 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) 4508 { 4509 int rval; 4510 mbx_cmd_t mc; 4511 mbx_cmd_t *mcp = &mc; 4512 4513 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, 4514 "Entered %s.\n", __func__); 4515 4516 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4517 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4518 return QLA_FUNCTION_FAILED; 4519 4520 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4521 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 4522 mcp->out_mb = MBX_1|MBX_0; 4523 mcp->in_mb = MBX_1|MBX_0; 4524 mcp->tov = MBX_TOV_SECONDS; 4525 mcp->flags = 0; 4526 rval = qla2x00_mailbox_command(vha, mcp); 4527 4528 if (rval != QLA_SUCCESS) { 4529 ql_dbg(ql_dbg_mbx, vha, 0x10dd, 4530 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4531 rval, mcp->mb[0], mcp->mb[1]); 4532 } else { 4533 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, 4534 "Done %s.\n", __func__); 4535 *sector_size = mcp->mb[1]; 4536 } 4537 4538 return rval; 4539 } 4540 4541 int 4542 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) 4543 { 4544 int rval; 4545 mbx_cmd_t mc; 4546 mbx_cmd_t *mcp = &mc; 4547 4548 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4549 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4550 return QLA_FUNCTION_FAILED; 4551 4552 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, 4553 "Entered %s.\n", __func__); 4554 4555 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4556 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 4557 FAC_OPT_CMD_WRITE_PROTECT; 4558 mcp->out_mb = MBX_1|MBX_0; 4559 mcp->in_mb = MBX_1|MBX_0; 4560 mcp->tov = MBX_TOV_SECONDS; 4561 mcp->flags = 0; 4562 rval = qla2x00_mailbox_command(vha, mcp); 4563 4564 if (rval != QLA_SUCCESS) { 4565 ql_dbg(ql_dbg_mbx, vha, 0x10e0, 4566 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4567 rval, mcp->mb[0], mcp->mb[1]); 4568 } else { 4569 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, 4570 "Done %s.\n", __func__); 4571 } 4572 4573 return rval; 4574 } 4575 4576 int 4577 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) 4578 { 4579 int rval; 4580 mbx_cmd_t mc; 4581 mbx_cmd_t *mcp = &mc; 4582 4583 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4584 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4585 return QLA_FUNCTION_FAILED; 4586 4587 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4588 "Entered %s.\n", __func__); 4589 4590 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4591 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 4592 mcp->mb[2] = LSW(start); 4593 mcp->mb[3] = MSW(start); 4594 mcp->mb[4] = LSW(finish); 4595 mcp->mb[5] = MSW(finish); 4596 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4597 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4598 mcp->tov = MBX_TOV_SECONDS; 4599 mcp->flags = 0; 4600 rval = qla2x00_mailbox_command(vha, mcp); 4601 4602 if (rval != QLA_SUCCESS) { 4603 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4604 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4605 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4606 } else { 4607 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4608 "Done %s.\n", __func__); 4609 } 4610 4611 return rval; 4612 } 4613 4614 int 4615 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock) 4616 { 4617 int rval = QLA_SUCCESS; 4618 mbx_cmd_t mc; 4619 mbx_cmd_t *mcp = &mc; 4620 struct qla_hw_data *ha = vha->hw; 4621 4622 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 4623 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4624 return rval; 4625 4626 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4627 "Entered %s.\n", __func__); 4628 4629 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4630 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE : 4631 FAC_OPT_CMD_UNLOCK_SEMAPHORE); 4632 mcp->out_mb = MBX_1|MBX_0; 4633 mcp->in_mb = MBX_1|MBX_0; 4634 mcp->tov = MBX_TOV_SECONDS; 4635 mcp->flags = 0; 4636 rval = qla2x00_mailbox_command(vha, mcp); 4637 4638 if (rval != QLA_SUCCESS) { 4639 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4640 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4641 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4642 } else { 4643 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4644 "Done %s.\n", __func__); 4645 } 4646 4647 return rval; 4648 } 4649 4650 int 4651 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) 4652 { 4653 int rval = 0; 4654 mbx_cmd_t mc; 4655 mbx_cmd_t *mcp = &mc; 4656 4657 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, 4658 "Entered %s.\n", __func__); 4659 4660 mcp->mb[0] = MBC_RESTART_MPI_FW; 4661 mcp->out_mb = MBX_0; 4662 mcp->in_mb = MBX_0|MBX_1; 4663 mcp->tov = MBX_TOV_SECONDS; 4664 mcp->flags = 0; 4665 rval = qla2x00_mailbox_command(vha, mcp); 4666 4667 if (rval != QLA_SUCCESS) { 4668 ql_dbg(ql_dbg_mbx, vha, 0x10e6, 4669 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4670 rval, mcp->mb[0], mcp->mb[1]); 4671 } else { 4672 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, 4673 "Done %s.\n", __func__); 4674 } 4675 4676 return rval; 4677 } 4678 4679 int 4680 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4681 { 4682 int rval; 4683 mbx_cmd_t mc; 4684 mbx_cmd_t *mcp = &mc; 4685 int i; 4686 int len; 4687 uint16_t *str; 4688 struct qla_hw_data *ha = vha->hw; 4689 4690 if (!IS_P3P_TYPE(ha)) 4691 return QLA_FUNCTION_FAILED; 4692 4693 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, 4694 "Entered %s.\n", __func__); 4695 4696 str = (void *)version; 4697 len = strlen(version); 4698 4699 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4700 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; 4701 mcp->out_mb = MBX_1|MBX_0; 4702 for (i = 4; i < 16 && len; i++, str++, len -= 2) { 4703 mcp->mb[i] = cpu_to_le16p(str); 4704 mcp->out_mb |= 1<<i; 4705 } 4706 for (; i < 16; i++) { 4707 mcp->mb[i] = 0; 4708 mcp->out_mb |= 1<<i; 4709 } 4710 mcp->in_mb = MBX_1|MBX_0; 4711 mcp->tov = MBX_TOV_SECONDS; 4712 mcp->flags = 0; 4713 rval = qla2x00_mailbox_command(vha, mcp); 4714 4715 if (rval != QLA_SUCCESS) { 4716 ql_dbg(ql_dbg_mbx, vha, 0x117c, 4717 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4718 } else { 4719 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, 4720 "Done %s.\n", __func__); 4721 } 4722 4723 return rval; 4724 } 4725 4726 int 4727 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4728 { 4729 int rval; 4730 mbx_cmd_t mc; 4731 mbx_cmd_t *mcp = &mc; 4732 int len; 4733 uint16_t dwlen; 4734 uint8_t *str; 4735 dma_addr_t str_dma; 4736 struct qla_hw_data *ha = vha->hw; 4737 4738 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || 4739 IS_P3P_TYPE(ha)) 4740 return QLA_FUNCTION_FAILED; 4741 4742 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, 4743 "Entered %s.\n", __func__); 4744 4745 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); 4746 if (!str) { 4747 ql_log(ql_log_warn, vha, 0x117f, 4748 "Failed to allocate driver version param.\n"); 4749 return QLA_MEMORY_ALLOC_FAILED; 4750 } 4751 4752 memcpy(str, "\x7\x3\x11\x0", 4); 4753 dwlen = str[0]; 4754 len = dwlen * 4 - 4; 4755 memset(str + 4, 0, len); 4756 if (len > strlen(version)) 4757 len = strlen(version); 4758 memcpy(str + 4, version, len); 4759 4760 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4761 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; 4762 mcp->mb[2] = MSW(LSD(str_dma)); 4763 mcp->mb[3] = LSW(LSD(str_dma)); 4764 mcp->mb[6] = MSW(MSD(str_dma)); 4765 mcp->mb[7] = LSW(MSD(str_dma)); 4766 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4767 mcp->in_mb = MBX_1|MBX_0; 4768 mcp->tov = MBX_TOV_SECONDS; 4769 mcp->flags = 0; 4770 rval = qla2x00_mailbox_command(vha, mcp); 4771 4772 if (rval != QLA_SUCCESS) { 4773 ql_dbg(ql_dbg_mbx, vha, 0x1180, 4774 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4775 } else { 4776 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, 4777 "Done %s.\n", __func__); 4778 } 4779 4780 dma_pool_free(ha->s_dma_pool, str, str_dma); 4781 4782 return rval; 4783 } 4784 4785 int 4786 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, 4787 void *buf, uint16_t bufsiz) 4788 { 4789 int rval, i; 4790 mbx_cmd_t mc; 4791 mbx_cmd_t *mcp = &mc; 4792 uint32_t *bp; 4793 4794 if (!IS_FWI2_CAPABLE(vha->hw)) 4795 return QLA_FUNCTION_FAILED; 4796 4797 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4798 "Entered %s.\n", __func__); 4799 4800 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4801 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8; 4802 mcp->mb[2] = MSW(buf_dma); 4803 mcp->mb[3] = LSW(buf_dma); 4804 mcp->mb[6] = MSW(MSD(buf_dma)); 4805 mcp->mb[7] = LSW(MSD(buf_dma)); 4806 mcp->mb[8] = bufsiz/4; 4807 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4808 mcp->in_mb = MBX_1|MBX_0; 4809 mcp->tov = MBX_TOV_SECONDS; 4810 mcp->flags = 0; 4811 rval = qla2x00_mailbox_command(vha, mcp); 4812 4813 if (rval != QLA_SUCCESS) { 4814 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4815 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4816 } else { 4817 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4818 "Done %s.\n", __func__); 4819 bp = (uint32_t *) buf; 4820 for (i = 0; i < (bufsiz-4)/4; i++, bp++) 4821 *bp = le32_to_cpu(*bp); 4822 } 4823 4824 return rval; 4825 } 4826 4827 static int 4828 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 4829 { 4830 int rval; 4831 mbx_cmd_t mc; 4832 mbx_cmd_t *mcp = &mc; 4833 4834 if (!IS_FWI2_CAPABLE(vha->hw)) 4835 return QLA_FUNCTION_FAILED; 4836 4837 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4838 "Entered %s.\n", __func__); 4839 4840 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4841 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; 4842 mcp->out_mb = MBX_1|MBX_0; 4843 mcp->in_mb = MBX_1|MBX_0; 4844 mcp->tov = MBX_TOV_SECONDS; 4845 mcp->flags = 0; 4846 rval = qla2x00_mailbox_command(vha, mcp); 4847 *temp = mcp->mb[1]; 4848 4849 if (rval != QLA_SUCCESS) { 4850 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4851 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4852 } else { 4853 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4854 "Done %s.\n", __func__); 4855 } 4856 4857 return rval; 4858 } 4859 4860 int 4861 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 4862 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 4863 { 4864 int rval; 4865 mbx_cmd_t mc; 4866 mbx_cmd_t *mcp = &mc; 4867 struct qla_hw_data *ha = vha->hw; 4868 4869 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 4870 "Entered %s.\n", __func__); 4871 4872 if (!IS_FWI2_CAPABLE(ha)) 4873 return QLA_FUNCTION_FAILED; 4874 4875 if (len == 1) 4876 opt |= BIT_0; 4877 4878 mcp->mb[0] = MBC_READ_SFP; 4879 mcp->mb[1] = dev; 4880 mcp->mb[2] = MSW(sfp_dma); 4881 mcp->mb[3] = LSW(sfp_dma); 4882 mcp->mb[6] = MSW(MSD(sfp_dma)); 4883 mcp->mb[7] = LSW(MSD(sfp_dma)); 4884 mcp->mb[8] = len; 4885 mcp->mb[9] = off; 4886 mcp->mb[10] = opt; 4887 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4888 mcp->in_mb = MBX_1|MBX_0; 4889 mcp->tov = MBX_TOV_SECONDS; 4890 mcp->flags = 0; 4891 rval = qla2x00_mailbox_command(vha, mcp); 4892 4893 if (opt & BIT_0) 4894 *sfp = mcp->mb[1]; 4895 4896 if (rval != QLA_SUCCESS) { 4897 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 4898 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4899 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) { 4900 /* sfp is not there */ 4901 rval = QLA_INTERFACE_ERROR; 4902 } 4903 } else { 4904 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 4905 "Done %s.\n", __func__); 4906 } 4907 4908 return rval; 4909 } 4910 4911 int 4912 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 4913 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 4914 { 4915 int rval; 4916 mbx_cmd_t mc; 4917 mbx_cmd_t *mcp = &mc; 4918 struct qla_hw_data *ha = vha->hw; 4919 4920 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, 4921 "Entered %s.\n", __func__); 4922 4923 if (!IS_FWI2_CAPABLE(ha)) 4924 return QLA_FUNCTION_FAILED; 4925 4926 if (len == 1) 4927 opt |= BIT_0; 4928 4929 if (opt & BIT_0) 4930 len = *sfp; 4931 4932 mcp->mb[0] = MBC_WRITE_SFP; 4933 mcp->mb[1] = dev; 4934 mcp->mb[2] = MSW(sfp_dma); 4935 mcp->mb[3] = LSW(sfp_dma); 4936 mcp->mb[6] = MSW(MSD(sfp_dma)); 4937 mcp->mb[7] = LSW(MSD(sfp_dma)); 4938 mcp->mb[8] = len; 4939 mcp->mb[9] = off; 4940 mcp->mb[10] = opt; 4941 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4942 mcp->in_mb = MBX_1|MBX_0; 4943 mcp->tov = MBX_TOV_SECONDS; 4944 mcp->flags = 0; 4945 rval = qla2x00_mailbox_command(vha, mcp); 4946 4947 if (rval != QLA_SUCCESS) { 4948 ql_dbg(ql_dbg_mbx, vha, 0x10ec, 4949 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4950 } else { 4951 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, 4952 "Done %s.\n", __func__); 4953 } 4954 4955 return rval; 4956 } 4957 4958 int 4959 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, 4960 uint16_t size_in_bytes, uint16_t *actual_size) 4961 { 4962 int rval; 4963 mbx_cmd_t mc; 4964 mbx_cmd_t *mcp = &mc; 4965 4966 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, 4967 "Entered %s.\n", __func__); 4968 4969 if (!IS_CNA_CAPABLE(vha->hw)) 4970 return QLA_FUNCTION_FAILED; 4971 4972 mcp->mb[0] = MBC_GET_XGMAC_STATS; 4973 mcp->mb[2] = MSW(stats_dma); 4974 mcp->mb[3] = LSW(stats_dma); 4975 mcp->mb[6] = MSW(MSD(stats_dma)); 4976 mcp->mb[7] = LSW(MSD(stats_dma)); 4977 mcp->mb[8] = size_in_bytes >> 2; 4978 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 4979 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4980 mcp->tov = MBX_TOV_SECONDS; 4981 mcp->flags = 0; 4982 rval = qla2x00_mailbox_command(vha, mcp); 4983 4984 if (rval != QLA_SUCCESS) { 4985 ql_dbg(ql_dbg_mbx, vha, 0x10ef, 4986 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4987 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4988 } else { 4989 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, 4990 "Done %s.\n", __func__); 4991 4992 4993 *actual_size = mcp->mb[2] << 2; 4994 } 4995 4996 return rval; 4997 } 4998 4999 int 5000 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, 5001 uint16_t size) 5002 { 5003 int rval; 5004 mbx_cmd_t mc; 5005 mbx_cmd_t *mcp = &mc; 5006 5007 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, 5008 "Entered %s.\n", __func__); 5009 5010 if (!IS_CNA_CAPABLE(vha->hw)) 5011 return QLA_FUNCTION_FAILED; 5012 5013 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 5014 mcp->mb[1] = 0; 5015 mcp->mb[2] = MSW(tlv_dma); 5016 mcp->mb[3] = LSW(tlv_dma); 5017 mcp->mb[6] = MSW(MSD(tlv_dma)); 5018 mcp->mb[7] = LSW(MSD(tlv_dma)); 5019 mcp->mb[8] = size; 5020 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5021 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5022 mcp->tov = MBX_TOV_SECONDS; 5023 mcp->flags = 0; 5024 rval = qla2x00_mailbox_command(vha, mcp); 5025 5026 if (rval != QLA_SUCCESS) { 5027 ql_dbg(ql_dbg_mbx, vha, 0x10f2, 5028 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5029 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5030 } else { 5031 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, 5032 "Done %s.\n", __func__); 5033 } 5034 5035 return rval; 5036 } 5037 5038 int 5039 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) 5040 { 5041 int rval; 5042 mbx_cmd_t mc; 5043 mbx_cmd_t *mcp = &mc; 5044 5045 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, 5046 "Entered %s.\n", __func__); 5047 5048 if (!IS_FWI2_CAPABLE(vha->hw)) 5049 return QLA_FUNCTION_FAILED; 5050 5051 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 5052 mcp->mb[1] = LSW(risc_addr); 5053 mcp->mb[8] = MSW(risc_addr); 5054 mcp->out_mb = MBX_8|MBX_1|MBX_0; 5055 mcp->in_mb = MBX_3|MBX_2|MBX_0; 5056 mcp->tov = 30; 5057 mcp->flags = 0; 5058 rval = qla2x00_mailbox_command(vha, mcp); 5059 if (rval != QLA_SUCCESS) { 5060 ql_dbg(ql_dbg_mbx, vha, 0x10f5, 5061 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5062 } else { 5063 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, 5064 "Done %s.\n", __func__); 5065 *data = mcp->mb[3] << 16 | mcp->mb[2]; 5066 } 5067 5068 return rval; 5069 } 5070 5071 int 5072 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5073 uint16_t *mresp) 5074 { 5075 int rval; 5076 mbx_cmd_t mc; 5077 mbx_cmd_t *mcp = &mc; 5078 5079 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, 5080 "Entered %s.\n", __func__); 5081 5082 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5083 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 5084 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing 5085 5086 /* transfer count */ 5087 mcp->mb[10] = LSW(mreq->transfer_size); 5088 mcp->mb[11] = MSW(mreq->transfer_size); 5089 5090 /* send data address */ 5091 mcp->mb[14] = LSW(mreq->send_dma); 5092 mcp->mb[15] = MSW(mreq->send_dma); 5093 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5094 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5095 5096 /* receive data address */ 5097 mcp->mb[16] = LSW(mreq->rcv_dma); 5098 mcp->mb[17] = MSW(mreq->rcv_dma); 5099 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5100 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5101 5102 /* Iteration count */ 5103 mcp->mb[18] = LSW(mreq->iteration_count); 5104 mcp->mb[19] = MSW(mreq->iteration_count); 5105 5106 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| 5107 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5108 if (IS_CNA_CAPABLE(vha->hw)) 5109 mcp->out_mb |= MBX_2; 5110 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; 5111 5112 mcp->buf_size = mreq->transfer_size; 5113 mcp->tov = MBX_TOV_SECONDS; 5114 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5115 5116 rval = qla2x00_mailbox_command(vha, mcp); 5117 5118 if (rval != QLA_SUCCESS) { 5119 ql_dbg(ql_dbg_mbx, vha, 0x10f8, 5120 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " 5121 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], 5122 mcp->mb[3], mcp->mb[18], mcp->mb[19]); 5123 } else { 5124 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, 5125 "Done %s.\n", __func__); 5126 } 5127 5128 /* Copy mailbox information */ 5129 memcpy( mresp, mcp->mb, 64); 5130 return rval; 5131 } 5132 5133 int 5134 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5135 uint16_t *mresp) 5136 { 5137 int rval; 5138 mbx_cmd_t mc; 5139 mbx_cmd_t *mcp = &mc; 5140 struct qla_hw_data *ha = vha->hw; 5141 5142 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, 5143 "Entered %s.\n", __func__); 5144 5145 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5146 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 5147 /* BIT_6 specifies 64bit address */ 5148 mcp->mb[1] = mreq->options | BIT_15 | BIT_6; 5149 if (IS_CNA_CAPABLE(ha)) { 5150 mcp->mb[2] = vha->fcoe_fcf_idx; 5151 } 5152 mcp->mb[16] = LSW(mreq->rcv_dma); 5153 mcp->mb[17] = MSW(mreq->rcv_dma); 5154 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5155 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5156 5157 mcp->mb[10] = LSW(mreq->transfer_size); 5158 5159 mcp->mb[14] = LSW(mreq->send_dma); 5160 mcp->mb[15] = MSW(mreq->send_dma); 5161 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5162 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5163 5164 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| 5165 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5166 if (IS_CNA_CAPABLE(ha)) 5167 mcp->out_mb |= MBX_2; 5168 5169 mcp->in_mb = MBX_0; 5170 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 5171 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) 5172 mcp->in_mb |= MBX_1; 5173 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) 5174 mcp->in_mb |= MBX_3; 5175 5176 mcp->tov = MBX_TOV_SECONDS; 5177 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5178 mcp->buf_size = mreq->transfer_size; 5179 5180 rval = qla2x00_mailbox_command(vha, mcp); 5181 5182 if (rval != QLA_SUCCESS) { 5183 ql_dbg(ql_dbg_mbx, vha, 0x10fb, 5184 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5185 rval, mcp->mb[0], mcp->mb[1]); 5186 } else { 5187 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, 5188 "Done %s.\n", __func__); 5189 } 5190 5191 /* Copy mailbox information */ 5192 memcpy(mresp, mcp->mb, 64); 5193 return rval; 5194 } 5195 5196 int 5197 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) 5198 { 5199 int rval; 5200 mbx_cmd_t mc; 5201 mbx_cmd_t *mcp = &mc; 5202 5203 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, 5204 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); 5205 5206 mcp->mb[0] = MBC_ISP84XX_RESET; 5207 mcp->mb[1] = enable_diagnostic; 5208 mcp->out_mb = MBX_1|MBX_0; 5209 mcp->in_mb = MBX_1|MBX_0; 5210 mcp->tov = MBX_TOV_SECONDS; 5211 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5212 rval = qla2x00_mailbox_command(vha, mcp); 5213 5214 if (rval != QLA_SUCCESS) 5215 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); 5216 else 5217 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, 5218 "Done %s.\n", __func__); 5219 5220 return rval; 5221 } 5222 5223 int 5224 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) 5225 { 5226 int rval; 5227 mbx_cmd_t mc; 5228 mbx_cmd_t *mcp = &mc; 5229 5230 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, 5231 "Entered %s.\n", __func__); 5232 5233 if (!IS_FWI2_CAPABLE(vha->hw)) 5234 return QLA_FUNCTION_FAILED; 5235 5236 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 5237 mcp->mb[1] = LSW(risc_addr); 5238 mcp->mb[2] = LSW(data); 5239 mcp->mb[3] = MSW(data); 5240 mcp->mb[8] = MSW(risc_addr); 5241 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; 5242 mcp->in_mb = MBX_1|MBX_0; 5243 mcp->tov = 30; 5244 mcp->flags = 0; 5245 rval = qla2x00_mailbox_command(vha, mcp); 5246 if (rval != QLA_SUCCESS) { 5247 ql_dbg(ql_dbg_mbx, vha, 0x1101, 5248 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5249 rval, mcp->mb[0], mcp->mb[1]); 5250 } else { 5251 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, 5252 "Done %s.\n", __func__); 5253 } 5254 5255 return rval; 5256 } 5257 5258 int 5259 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) 5260 { 5261 int rval; 5262 uint32_t stat, timer; 5263 uint16_t mb0 = 0; 5264 struct qla_hw_data *ha = vha->hw; 5265 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 5266 5267 rval = QLA_SUCCESS; 5268 5269 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, 5270 "Entered %s.\n", __func__); 5271 5272 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 5273 5274 /* Write the MBC data to the registers */ 5275 WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER); 5276 WRT_REG_WORD(®->mailbox1, mb[0]); 5277 WRT_REG_WORD(®->mailbox2, mb[1]); 5278 WRT_REG_WORD(®->mailbox3, mb[2]); 5279 WRT_REG_WORD(®->mailbox4, mb[3]); 5280 5281 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); 5282 5283 /* Poll for MBC interrupt */ 5284 for (timer = 6000000; timer; timer--) { 5285 /* Check for pending interrupts. */ 5286 stat = RD_REG_DWORD(®->host_status); 5287 if (stat & HSRX_RISC_INT) { 5288 stat &= 0xff; 5289 5290 if (stat == 0x1 || stat == 0x2 || 5291 stat == 0x10 || stat == 0x11) { 5292 set_bit(MBX_INTERRUPT, 5293 &ha->mbx_cmd_flags); 5294 mb0 = RD_REG_WORD(®->mailbox0); 5295 WRT_REG_DWORD(®->hccr, 5296 HCCRX_CLR_RISC_INT); 5297 RD_REG_DWORD(®->hccr); 5298 break; 5299 } 5300 } 5301 udelay(5); 5302 } 5303 5304 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) 5305 rval = mb0 & MBS_MASK; 5306 else 5307 rval = QLA_FUNCTION_FAILED; 5308 5309 if (rval != QLA_SUCCESS) { 5310 ql_dbg(ql_dbg_mbx, vha, 0x1104, 5311 "Failed=%x mb[0]=%x.\n", rval, mb[0]); 5312 } else { 5313 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, 5314 "Done %s.\n", __func__); 5315 } 5316 5317 return rval; 5318 } 5319 5320 /* Set the specified data rate */ 5321 int 5322 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode) 5323 { 5324 int rval; 5325 mbx_cmd_t mc; 5326 mbx_cmd_t *mcp = &mc; 5327 struct qla_hw_data *ha = vha->hw; 5328 uint16_t val; 5329 5330 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5331 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate, 5332 mode); 5333 5334 if (!IS_FWI2_CAPABLE(ha)) 5335 return QLA_FUNCTION_FAILED; 5336 5337 memset(mcp, 0, sizeof(*mcp)); 5338 switch (ha->set_data_rate) { 5339 case PORT_SPEED_AUTO: 5340 case PORT_SPEED_4GB: 5341 case PORT_SPEED_8GB: 5342 case PORT_SPEED_16GB: 5343 case PORT_SPEED_32GB: 5344 val = ha->set_data_rate; 5345 break; 5346 default: 5347 ql_log(ql_log_warn, vha, 0x1199, 5348 "Unrecognized speed setting:%d. Setting Autoneg\n", 5349 ha->set_data_rate); 5350 val = ha->set_data_rate = PORT_SPEED_AUTO; 5351 break; 5352 } 5353 5354 mcp->mb[0] = MBC_DATA_RATE; 5355 mcp->mb[1] = mode; 5356 mcp->mb[2] = val; 5357 5358 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5359 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5360 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5361 mcp->in_mb |= MBX_4|MBX_3; 5362 mcp->tov = MBX_TOV_SECONDS; 5363 mcp->flags = 0; 5364 rval = qla2x00_mailbox_command(vha, mcp); 5365 if (rval != QLA_SUCCESS) { 5366 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5367 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5368 } else { 5369 if (mcp->mb[1] != 0x7) 5370 ql_dbg(ql_dbg_mbx, vha, 0x1179, 5371 "Speed set:0x%x\n", mcp->mb[1]); 5372 5373 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5374 "Done %s.\n", __func__); 5375 } 5376 5377 return rval; 5378 } 5379 5380 int 5381 qla2x00_get_data_rate(scsi_qla_host_t *vha) 5382 { 5383 int rval; 5384 mbx_cmd_t mc; 5385 mbx_cmd_t *mcp = &mc; 5386 struct qla_hw_data *ha = vha->hw; 5387 5388 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5389 "Entered %s.\n", __func__); 5390 5391 if (!IS_FWI2_CAPABLE(ha)) 5392 return QLA_FUNCTION_FAILED; 5393 5394 mcp->mb[0] = MBC_DATA_RATE; 5395 mcp->mb[1] = QLA_GET_DATA_RATE; 5396 mcp->out_mb = MBX_1|MBX_0; 5397 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5398 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5399 mcp->in_mb |= MBX_3; 5400 mcp->tov = MBX_TOV_SECONDS; 5401 mcp->flags = 0; 5402 rval = qla2x00_mailbox_command(vha, mcp); 5403 if (rval != QLA_SUCCESS) { 5404 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5405 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5406 } else { 5407 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5408 "Done %s.\n", __func__); 5409 if (mcp->mb[1] != 0x7) 5410 ha->link_data_rate = mcp->mb[1]; 5411 } 5412 5413 return rval; 5414 } 5415 5416 int 5417 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5418 { 5419 int rval; 5420 mbx_cmd_t mc; 5421 mbx_cmd_t *mcp = &mc; 5422 struct qla_hw_data *ha = vha->hw; 5423 5424 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, 5425 "Entered %s.\n", __func__); 5426 5427 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && 5428 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 5429 return QLA_FUNCTION_FAILED; 5430 mcp->mb[0] = MBC_GET_PORT_CONFIG; 5431 mcp->out_mb = MBX_0; 5432 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5433 mcp->tov = MBX_TOV_SECONDS; 5434 mcp->flags = 0; 5435 5436 rval = qla2x00_mailbox_command(vha, mcp); 5437 5438 if (rval != QLA_SUCCESS) { 5439 ql_dbg(ql_dbg_mbx, vha, 0x110a, 5440 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5441 } else { 5442 /* Copy all bits to preserve original value */ 5443 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 5444 5445 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, 5446 "Done %s.\n", __func__); 5447 } 5448 return rval; 5449 } 5450 5451 int 5452 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5453 { 5454 int rval; 5455 mbx_cmd_t mc; 5456 mbx_cmd_t *mcp = &mc; 5457 5458 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, 5459 "Entered %s.\n", __func__); 5460 5461 mcp->mb[0] = MBC_SET_PORT_CONFIG; 5462 /* Copy all bits to preserve original setting */ 5463 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); 5464 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5465 mcp->in_mb = MBX_0; 5466 mcp->tov = MBX_TOV_SECONDS; 5467 mcp->flags = 0; 5468 rval = qla2x00_mailbox_command(vha, mcp); 5469 5470 if (rval != QLA_SUCCESS) { 5471 ql_dbg(ql_dbg_mbx, vha, 0x110d, 5472 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5473 } else 5474 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, 5475 "Done %s.\n", __func__); 5476 5477 return rval; 5478 } 5479 5480 5481 int 5482 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, 5483 uint16_t *mb) 5484 { 5485 int rval; 5486 mbx_cmd_t mc; 5487 mbx_cmd_t *mcp = &mc; 5488 struct qla_hw_data *ha = vha->hw; 5489 5490 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, 5491 "Entered %s.\n", __func__); 5492 5493 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 5494 return QLA_FUNCTION_FAILED; 5495 5496 mcp->mb[0] = MBC_PORT_PARAMS; 5497 mcp->mb[1] = loop_id; 5498 if (ha->flags.fcp_prio_enabled) 5499 mcp->mb[2] = BIT_1; 5500 else 5501 mcp->mb[2] = BIT_2; 5502 mcp->mb[4] = priority & 0xf; 5503 mcp->mb[9] = vha->vp_idx; 5504 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5505 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 5506 mcp->tov = 30; 5507 mcp->flags = 0; 5508 rval = qla2x00_mailbox_command(vha, mcp); 5509 if (mb != NULL) { 5510 mb[0] = mcp->mb[0]; 5511 mb[1] = mcp->mb[1]; 5512 mb[3] = mcp->mb[3]; 5513 mb[4] = mcp->mb[4]; 5514 } 5515 5516 if (rval != QLA_SUCCESS) { 5517 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); 5518 } else { 5519 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, 5520 "Done %s.\n", __func__); 5521 } 5522 5523 return rval; 5524 } 5525 5526 int 5527 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) 5528 { 5529 int rval = QLA_FUNCTION_FAILED; 5530 struct qla_hw_data *ha = vha->hw; 5531 uint8_t byte; 5532 5533 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { 5534 ql_dbg(ql_dbg_mbx, vha, 0x1150, 5535 "Thermal not supported by this card.\n"); 5536 return rval; 5537 } 5538 5539 if (IS_QLA25XX(ha)) { 5540 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 5541 ha->pdev->subsystem_device == 0x0175) { 5542 rval = qla2x00_read_sfp(vha, 0, &byte, 5543 0x98, 0x1, 1, BIT_13|BIT_0); 5544 *temp = byte; 5545 return rval; 5546 } 5547 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 5548 ha->pdev->subsystem_device == 0x338e) { 5549 rval = qla2x00_read_sfp(vha, 0, &byte, 5550 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); 5551 *temp = byte; 5552 return rval; 5553 } 5554 ql_dbg(ql_dbg_mbx, vha, 0x10c9, 5555 "Thermal not supported by this card.\n"); 5556 return rval; 5557 } 5558 5559 if (IS_QLA82XX(ha)) { 5560 *temp = qla82xx_read_temperature(vha); 5561 rval = QLA_SUCCESS; 5562 return rval; 5563 } else if (IS_QLA8044(ha)) { 5564 *temp = qla8044_read_temperature(vha); 5565 rval = QLA_SUCCESS; 5566 return rval; 5567 } 5568 5569 rval = qla2x00_read_asic_temperature(vha, temp); 5570 return rval; 5571 } 5572 5573 int 5574 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) 5575 { 5576 int rval; 5577 struct qla_hw_data *ha = vha->hw; 5578 mbx_cmd_t mc; 5579 mbx_cmd_t *mcp = &mc; 5580 5581 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, 5582 "Entered %s.\n", __func__); 5583 5584 if (!IS_FWI2_CAPABLE(ha)) 5585 return QLA_FUNCTION_FAILED; 5586 5587 memset(mcp, 0, sizeof(mbx_cmd_t)); 5588 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5589 mcp->mb[1] = 1; 5590 5591 mcp->out_mb = MBX_1|MBX_0; 5592 mcp->in_mb = MBX_0; 5593 mcp->tov = 30; 5594 mcp->flags = 0; 5595 5596 rval = qla2x00_mailbox_command(vha, mcp); 5597 if (rval != QLA_SUCCESS) { 5598 ql_dbg(ql_dbg_mbx, vha, 0x1016, 5599 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5600 } else { 5601 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, 5602 "Done %s.\n", __func__); 5603 } 5604 5605 return rval; 5606 } 5607 5608 int 5609 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) 5610 { 5611 int rval; 5612 struct qla_hw_data *ha = vha->hw; 5613 mbx_cmd_t mc; 5614 mbx_cmd_t *mcp = &mc; 5615 5616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, 5617 "Entered %s.\n", __func__); 5618 5619 if (!IS_P3P_TYPE(ha)) 5620 return QLA_FUNCTION_FAILED; 5621 5622 memset(mcp, 0, sizeof(mbx_cmd_t)); 5623 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5624 mcp->mb[1] = 0; 5625 5626 mcp->out_mb = MBX_1|MBX_0; 5627 mcp->in_mb = MBX_0; 5628 mcp->tov = 30; 5629 mcp->flags = 0; 5630 5631 rval = qla2x00_mailbox_command(vha, mcp); 5632 if (rval != QLA_SUCCESS) { 5633 ql_dbg(ql_dbg_mbx, vha, 0x100c, 5634 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5635 } else { 5636 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, 5637 "Done %s.\n", __func__); 5638 } 5639 5640 return rval; 5641 } 5642 5643 int 5644 qla82xx_md_get_template_size(scsi_qla_host_t *vha) 5645 { 5646 struct qla_hw_data *ha = vha->hw; 5647 mbx_cmd_t mc; 5648 mbx_cmd_t *mcp = &mc; 5649 int rval = QLA_FUNCTION_FAILED; 5650 5651 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, 5652 "Entered %s.\n", __func__); 5653 5654 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5655 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5656 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5657 mcp->mb[2] = LSW(RQST_TMPLT_SIZE); 5658 mcp->mb[3] = MSW(RQST_TMPLT_SIZE); 5659 5660 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5661 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 5662 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5663 5664 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5665 mcp->tov = MBX_TOV_SECONDS; 5666 rval = qla2x00_mailbox_command(vha, mcp); 5667 5668 /* Always copy back return mailbox values. */ 5669 if (rval != QLA_SUCCESS) { 5670 ql_dbg(ql_dbg_mbx, vha, 0x1120, 5671 "mailbox command FAILED=0x%x, subcode=%x.\n", 5672 (mcp->mb[1] << 16) | mcp->mb[0], 5673 (mcp->mb[3] << 16) | mcp->mb[2]); 5674 } else { 5675 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, 5676 "Done %s.\n", __func__); 5677 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); 5678 if (!ha->md_template_size) { 5679 ql_dbg(ql_dbg_mbx, vha, 0x1122, 5680 "Null template size obtained.\n"); 5681 rval = QLA_FUNCTION_FAILED; 5682 } 5683 } 5684 return rval; 5685 } 5686 5687 int 5688 qla82xx_md_get_template(scsi_qla_host_t *vha) 5689 { 5690 struct qla_hw_data *ha = vha->hw; 5691 mbx_cmd_t mc; 5692 mbx_cmd_t *mcp = &mc; 5693 int rval = QLA_FUNCTION_FAILED; 5694 5695 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, 5696 "Entered %s.\n", __func__); 5697 5698 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5699 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5700 if (!ha->md_tmplt_hdr) { 5701 ql_log(ql_log_warn, vha, 0x1124, 5702 "Unable to allocate memory for Minidump template.\n"); 5703 return rval; 5704 } 5705 5706 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5707 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5708 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5709 mcp->mb[2] = LSW(RQST_TMPLT); 5710 mcp->mb[3] = MSW(RQST_TMPLT); 5711 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); 5712 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); 5713 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); 5714 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); 5715 mcp->mb[8] = LSW(ha->md_template_size); 5716 mcp->mb[9] = MSW(ha->md_template_size); 5717 5718 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5719 mcp->tov = MBX_TOV_SECONDS; 5720 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5721 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5722 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5723 rval = qla2x00_mailbox_command(vha, mcp); 5724 5725 if (rval != QLA_SUCCESS) { 5726 ql_dbg(ql_dbg_mbx, vha, 0x1125, 5727 "mailbox command FAILED=0x%x, subcode=%x.\n", 5728 ((mcp->mb[1] << 16) | mcp->mb[0]), 5729 ((mcp->mb[3] << 16) | mcp->mb[2])); 5730 } else 5731 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, 5732 "Done %s.\n", __func__); 5733 return rval; 5734 } 5735 5736 int 5737 qla8044_md_get_template(scsi_qla_host_t *vha) 5738 { 5739 struct qla_hw_data *ha = vha->hw; 5740 mbx_cmd_t mc; 5741 mbx_cmd_t *mcp = &mc; 5742 int rval = QLA_FUNCTION_FAILED; 5743 int offset = 0, size = MINIDUMP_SIZE_36K; 5744 5745 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, 5746 "Entered %s.\n", __func__); 5747 5748 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5749 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5750 if (!ha->md_tmplt_hdr) { 5751 ql_log(ql_log_warn, vha, 0xb11b, 5752 "Unable to allocate memory for Minidump template.\n"); 5753 return rval; 5754 } 5755 5756 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5757 while (offset < ha->md_template_size) { 5758 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5759 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5760 mcp->mb[2] = LSW(RQST_TMPLT); 5761 mcp->mb[3] = MSW(RQST_TMPLT); 5762 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5763 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5764 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5765 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5766 mcp->mb[8] = LSW(size); 5767 mcp->mb[9] = MSW(size); 5768 mcp->mb[10] = offset & 0x0000FFFF; 5769 mcp->mb[11] = offset & 0xFFFF0000; 5770 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5771 mcp->tov = MBX_TOV_SECONDS; 5772 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5773 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5774 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5775 rval = qla2x00_mailbox_command(vha, mcp); 5776 5777 if (rval != QLA_SUCCESS) { 5778 ql_dbg(ql_dbg_mbx, vha, 0xb11c, 5779 "mailbox command FAILED=0x%x, subcode=%x.\n", 5780 ((mcp->mb[1] << 16) | mcp->mb[0]), 5781 ((mcp->mb[3] << 16) | mcp->mb[2])); 5782 return rval; 5783 } else 5784 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, 5785 "Done %s.\n", __func__); 5786 offset = offset + size; 5787 } 5788 return rval; 5789 } 5790 5791 int 5792 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5793 { 5794 int rval; 5795 struct qla_hw_data *ha = vha->hw; 5796 mbx_cmd_t mc; 5797 mbx_cmd_t *mcp = &mc; 5798 5799 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5800 return QLA_FUNCTION_FAILED; 5801 5802 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, 5803 "Entered %s.\n", __func__); 5804 5805 memset(mcp, 0, sizeof(mbx_cmd_t)); 5806 mcp->mb[0] = MBC_SET_LED_CONFIG; 5807 mcp->mb[1] = led_cfg[0]; 5808 mcp->mb[2] = led_cfg[1]; 5809 if (IS_QLA8031(ha)) { 5810 mcp->mb[3] = led_cfg[2]; 5811 mcp->mb[4] = led_cfg[3]; 5812 mcp->mb[5] = led_cfg[4]; 5813 mcp->mb[6] = led_cfg[5]; 5814 } 5815 5816 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5817 if (IS_QLA8031(ha)) 5818 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 5819 mcp->in_mb = MBX_0; 5820 mcp->tov = 30; 5821 mcp->flags = 0; 5822 5823 rval = qla2x00_mailbox_command(vha, mcp); 5824 if (rval != QLA_SUCCESS) { 5825 ql_dbg(ql_dbg_mbx, vha, 0x1134, 5826 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5827 } else { 5828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, 5829 "Done %s.\n", __func__); 5830 } 5831 5832 return rval; 5833 } 5834 5835 int 5836 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5837 { 5838 int rval; 5839 struct qla_hw_data *ha = vha->hw; 5840 mbx_cmd_t mc; 5841 mbx_cmd_t *mcp = &mc; 5842 5843 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5844 return QLA_FUNCTION_FAILED; 5845 5846 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, 5847 "Entered %s.\n", __func__); 5848 5849 memset(mcp, 0, sizeof(mbx_cmd_t)); 5850 mcp->mb[0] = MBC_GET_LED_CONFIG; 5851 5852 mcp->out_mb = MBX_0; 5853 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5854 if (IS_QLA8031(ha)) 5855 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 5856 mcp->tov = 30; 5857 mcp->flags = 0; 5858 5859 rval = qla2x00_mailbox_command(vha, mcp); 5860 if (rval != QLA_SUCCESS) { 5861 ql_dbg(ql_dbg_mbx, vha, 0x1137, 5862 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5863 } else { 5864 led_cfg[0] = mcp->mb[1]; 5865 led_cfg[1] = mcp->mb[2]; 5866 if (IS_QLA8031(ha)) { 5867 led_cfg[2] = mcp->mb[3]; 5868 led_cfg[3] = mcp->mb[4]; 5869 led_cfg[4] = mcp->mb[5]; 5870 led_cfg[5] = mcp->mb[6]; 5871 } 5872 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, 5873 "Done %s.\n", __func__); 5874 } 5875 5876 return rval; 5877 } 5878 5879 int 5880 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) 5881 { 5882 int rval; 5883 struct qla_hw_data *ha = vha->hw; 5884 mbx_cmd_t mc; 5885 mbx_cmd_t *mcp = &mc; 5886 5887 if (!IS_P3P_TYPE(ha)) 5888 return QLA_FUNCTION_FAILED; 5889 5890 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, 5891 "Entered %s.\n", __func__); 5892 5893 memset(mcp, 0, sizeof(mbx_cmd_t)); 5894 mcp->mb[0] = MBC_SET_LED_CONFIG; 5895 if (enable) 5896 mcp->mb[7] = 0xE; 5897 else 5898 mcp->mb[7] = 0xD; 5899 5900 mcp->out_mb = MBX_7|MBX_0; 5901 mcp->in_mb = MBX_0; 5902 mcp->tov = MBX_TOV_SECONDS; 5903 mcp->flags = 0; 5904 5905 rval = qla2x00_mailbox_command(vha, mcp); 5906 if (rval != QLA_SUCCESS) { 5907 ql_dbg(ql_dbg_mbx, vha, 0x1128, 5908 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5909 } else { 5910 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, 5911 "Done %s.\n", __func__); 5912 } 5913 5914 return rval; 5915 } 5916 5917 int 5918 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) 5919 { 5920 int rval; 5921 struct qla_hw_data *ha = vha->hw; 5922 mbx_cmd_t mc; 5923 mbx_cmd_t *mcp = &mc; 5924 5925 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 5926 return QLA_FUNCTION_FAILED; 5927 5928 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, 5929 "Entered %s.\n", __func__); 5930 5931 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 5932 mcp->mb[1] = LSW(reg); 5933 mcp->mb[2] = MSW(reg); 5934 mcp->mb[3] = LSW(data); 5935 mcp->mb[4] = MSW(data); 5936 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5937 5938 mcp->in_mb = MBX_1|MBX_0; 5939 mcp->tov = MBX_TOV_SECONDS; 5940 mcp->flags = 0; 5941 rval = qla2x00_mailbox_command(vha, mcp); 5942 5943 if (rval != QLA_SUCCESS) { 5944 ql_dbg(ql_dbg_mbx, vha, 0x1131, 5945 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5946 } else { 5947 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, 5948 "Done %s.\n", __func__); 5949 } 5950 5951 return rval; 5952 } 5953 5954 int 5955 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) 5956 { 5957 int rval; 5958 struct qla_hw_data *ha = vha->hw; 5959 mbx_cmd_t mc; 5960 mbx_cmd_t *mcp = &mc; 5961 5962 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 5963 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, 5964 "Implicit LOGO Unsupported.\n"); 5965 return QLA_FUNCTION_FAILED; 5966 } 5967 5968 5969 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, 5970 "Entering %s.\n", __func__); 5971 5972 /* Perform Implicit LOGO. */ 5973 mcp->mb[0] = MBC_PORT_LOGOUT; 5974 mcp->mb[1] = fcport->loop_id; 5975 mcp->mb[10] = BIT_15; 5976 mcp->out_mb = MBX_10|MBX_1|MBX_0; 5977 mcp->in_mb = MBX_0; 5978 mcp->tov = MBX_TOV_SECONDS; 5979 mcp->flags = 0; 5980 rval = qla2x00_mailbox_command(vha, mcp); 5981 if (rval != QLA_SUCCESS) 5982 ql_dbg(ql_dbg_mbx, vha, 0x113d, 5983 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5984 else 5985 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, 5986 "Done %s.\n", __func__); 5987 5988 return rval; 5989 } 5990 5991 int 5992 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) 5993 { 5994 int rval; 5995 mbx_cmd_t mc; 5996 mbx_cmd_t *mcp = &mc; 5997 struct qla_hw_data *ha = vha->hw; 5998 unsigned long retry_max_time = jiffies + (2 * HZ); 5999 6000 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6001 return QLA_FUNCTION_FAILED; 6002 6003 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); 6004 6005 retry_rd_reg: 6006 mcp->mb[0] = MBC_READ_REMOTE_REG; 6007 mcp->mb[1] = LSW(reg); 6008 mcp->mb[2] = MSW(reg); 6009 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6010 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 6011 mcp->tov = MBX_TOV_SECONDS; 6012 mcp->flags = 0; 6013 rval = qla2x00_mailbox_command(vha, mcp); 6014 6015 if (rval != QLA_SUCCESS) { 6016 ql_dbg(ql_dbg_mbx, vha, 0x114c, 6017 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6018 rval, mcp->mb[0], mcp->mb[1]); 6019 } else { 6020 *data = (mcp->mb[3] | (mcp->mb[4] << 16)); 6021 if (*data == QLA8XXX_BAD_VALUE) { 6022 /* 6023 * During soft-reset CAMRAM register reads might 6024 * return 0xbad0bad0. So retry for MAX of 2 sec 6025 * while reading camram registers. 6026 */ 6027 if (time_after(jiffies, retry_max_time)) { 6028 ql_dbg(ql_dbg_mbx, vha, 0x1141, 6029 "Failure to read CAMRAM register. " 6030 "data=0x%x.\n", *data); 6031 return QLA_FUNCTION_FAILED; 6032 } 6033 msleep(100); 6034 goto retry_rd_reg; 6035 } 6036 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); 6037 } 6038 6039 return rval; 6040 } 6041 6042 int 6043 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) 6044 { 6045 int rval; 6046 mbx_cmd_t mc; 6047 mbx_cmd_t *mcp = &mc; 6048 struct qla_hw_data *ha = vha->hw; 6049 6050 if (!IS_QLA83XX(ha)) 6051 return QLA_FUNCTION_FAILED; 6052 6053 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 6054 6055 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; 6056 mcp->out_mb = MBX_0; 6057 mcp->in_mb = MBX_1|MBX_0; 6058 mcp->tov = MBX_TOV_SECONDS; 6059 mcp->flags = 0; 6060 rval = qla2x00_mailbox_command(vha, mcp); 6061 6062 if (rval != QLA_SUCCESS) { 6063 ql_dbg(ql_dbg_mbx, vha, 0x1144, 6064 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6065 rval, mcp->mb[0], mcp->mb[1]); 6066 ha->isp_ops->fw_dump(vha, 0); 6067 } else { 6068 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); 6069 } 6070 6071 return rval; 6072 } 6073 6074 int 6075 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, 6076 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) 6077 { 6078 int rval; 6079 mbx_cmd_t mc; 6080 mbx_cmd_t *mcp = &mc; 6081 uint8_t subcode = (uint8_t)options; 6082 struct qla_hw_data *ha = vha->hw; 6083 6084 if (!IS_QLA8031(ha)) 6085 return QLA_FUNCTION_FAILED; 6086 6087 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); 6088 6089 mcp->mb[0] = MBC_SET_ACCESS_CONTROL; 6090 mcp->mb[1] = options; 6091 mcp->out_mb = MBX_1|MBX_0; 6092 if (subcode & BIT_2) { 6093 mcp->mb[2] = LSW(start_addr); 6094 mcp->mb[3] = MSW(start_addr); 6095 mcp->mb[4] = LSW(end_addr); 6096 mcp->mb[5] = MSW(end_addr); 6097 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; 6098 } 6099 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6100 if (!(subcode & (BIT_2 | BIT_5))) 6101 mcp->in_mb |= MBX_4|MBX_3; 6102 mcp->tov = MBX_TOV_SECONDS; 6103 mcp->flags = 0; 6104 rval = qla2x00_mailbox_command(vha, mcp); 6105 6106 if (rval != QLA_SUCCESS) { 6107 ql_dbg(ql_dbg_mbx, vha, 0x1147, 6108 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", 6109 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], 6110 mcp->mb[4]); 6111 ha->isp_ops->fw_dump(vha, 0); 6112 } else { 6113 if (subcode & BIT_5) 6114 *sector_size = mcp->mb[1]; 6115 else if (subcode & (BIT_6 | BIT_7)) { 6116 ql_dbg(ql_dbg_mbx, vha, 0x1148, 6117 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6118 } else if (subcode & (BIT_3 | BIT_4)) { 6119 ql_dbg(ql_dbg_mbx, vha, 0x1149, 6120 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6121 } 6122 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); 6123 } 6124 6125 return rval; 6126 } 6127 6128 int 6129 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 6130 uint32_t size) 6131 { 6132 int rval; 6133 mbx_cmd_t mc; 6134 mbx_cmd_t *mcp = &mc; 6135 6136 if (!IS_MCTP_CAPABLE(vha->hw)) 6137 return QLA_FUNCTION_FAILED; 6138 6139 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, 6140 "Entered %s.\n", __func__); 6141 6142 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 6143 mcp->mb[1] = LSW(addr); 6144 mcp->mb[2] = MSW(req_dma); 6145 mcp->mb[3] = LSW(req_dma); 6146 mcp->mb[4] = MSW(size); 6147 mcp->mb[5] = LSW(size); 6148 mcp->mb[6] = MSW(MSD(req_dma)); 6149 mcp->mb[7] = LSW(MSD(req_dma)); 6150 mcp->mb[8] = MSW(addr); 6151 /* Setting RAM ID to valid */ 6152 mcp->mb[10] |= BIT_7; 6153 /* For MCTP RAM ID is 0x40 */ 6154 mcp->mb[10] |= 0x40; 6155 6156 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| 6157 MBX_0; 6158 6159 mcp->in_mb = MBX_0; 6160 mcp->tov = MBX_TOV_SECONDS; 6161 mcp->flags = 0; 6162 rval = qla2x00_mailbox_command(vha, mcp); 6163 6164 if (rval != QLA_SUCCESS) { 6165 ql_dbg(ql_dbg_mbx, vha, 0x114e, 6166 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6167 } else { 6168 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, 6169 "Done %s.\n", __func__); 6170 } 6171 6172 return rval; 6173 } 6174 6175 int 6176 qla26xx_dport_diagnostics(scsi_qla_host_t *vha, 6177 void *dd_buf, uint size, uint options) 6178 { 6179 int rval; 6180 mbx_cmd_t mc; 6181 mbx_cmd_t *mcp = &mc; 6182 dma_addr_t dd_dma; 6183 6184 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 6185 !IS_QLA28XX(vha->hw)) 6186 return QLA_FUNCTION_FAILED; 6187 6188 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, 6189 "Entered %s.\n", __func__); 6190 6191 dd_dma = dma_map_single(&vha->hw->pdev->dev, 6192 dd_buf, size, DMA_FROM_DEVICE); 6193 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { 6194 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); 6195 return QLA_MEMORY_ALLOC_FAILED; 6196 } 6197 6198 memset(dd_buf, 0, size); 6199 6200 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; 6201 mcp->mb[1] = options; 6202 mcp->mb[2] = MSW(LSD(dd_dma)); 6203 mcp->mb[3] = LSW(LSD(dd_dma)); 6204 mcp->mb[6] = MSW(MSD(dd_dma)); 6205 mcp->mb[7] = LSW(MSD(dd_dma)); 6206 mcp->mb[8] = size; 6207 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 6208 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 6209 mcp->buf_size = size; 6210 mcp->flags = MBX_DMA_IN; 6211 mcp->tov = MBX_TOV_SECONDS * 4; 6212 rval = qla2x00_mailbox_command(vha, mcp); 6213 6214 if (rval != QLA_SUCCESS) { 6215 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); 6216 } else { 6217 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, 6218 "Done %s.\n", __func__); 6219 } 6220 6221 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, 6222 size, DMA_FROM_DEVICE); 6223 6224 return rval; 6225 } 6226 6227 static void qla2x00_async_mb_sp_done(srb_t *sp, int res) 6228 { 6229 sp->u.iocb_cmd.u.mbx.rc = res; 6230 6231 complete(&sp->u.iocb_cmd.u.mbx.comp); 6232 /* don't free sp here. Let the caller do the free */ 6233 } 6234 6235 /* 6236 * This mailbox uses the iocb interface to send MB command. 6237 * This allows non-critial (non chip setup) command to go 6238 * out in parrallel. 6239 */ 6240 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) 6241 { 6242 int rval = QLA_FUNCTION_FAILED; 6243 srb_t *sp; 6244 struct srb_iocb *c; 6245 6246 if (!vha->hw->flags.fw_started) 6247 goto done; 6248 6249 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 6250 if (!sp) 6251 goto done; 6252 6253 sp->type = SRB_MB_IOCB; 6254 sp->name = mb_to_str(mcp->mb[0]); 6255 6256 c = &sp->u.iocb_cmd; 6257 c->timeout = qla2x00_async_iocb_timeout; 6258 init_completion(&c->u.mbx.comp); 6259 6260 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 6261 6262 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); 6263 6264 sp->done = qla2x00_async_mb_sp_done; 6265 6266 rval = qla2x00_start_sp(sp); 6267 if (rval != QLA_SUCCESS) { 6268 ql_dbg(ql_dbg_mbx, vha, 0x1018, 6269 "%s: %s Failed submission. %x.\n", 6270 __func__, sp->name, rval); 6271 goto done_free_sp; 6272 } 6273 6274 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n", 6275 sp->name, sp->handle); 6276 6277 wait_for_completion(&c->u.mbx.comp); 6278 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); 6279 6280 rval = c->u.mbx.rc; 6281 switch (rval) { 6282 case QLA_FUNCTION_TIMEOUT: 6283 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n", 6284 __func__, sp->name, rval); 6285 break; 6286 case QLA_SUCCESS: 6287 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", 6288 __func__, sp->name); 6289 sp->free(sp); 6290 break; 6291 default: 6292 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", 6293 __func__, sp->name, rval); 6294 sp->free(sp); 6295 break; 6296 } 6297 6298 return rval; 6299 6300 done_free_sp: 6301 sp->free(sp); 6302 done: 6303 return rval; 6304 } 6305 6306 /* 6307 * qla24xx_gpdb_wait 6308 * NOTE: Do not call this routine from DPC thread 6309 */ 6310 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 6311 { 6312 int rval = QLA_FUNCTION_FAILED; 6313 dma_addr_t pd_dma; 6314 struct port_database_24xx *pd; 6315 struct qla_hw_data *ha = vha->hw; 6316 mbx_cmd_t mc; 6317 6318 if (!vha->hw->flags.fw_started) 6319 goto done; 6320 6321 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 6322 if (pd == NULL) { 6323 ql_log(ql_log_warn, vha, 0xd047, 6324 "Failed to allocate port database structure.\n"); 6325 goto done_free_sp; 6326 } 6327 6328 memset(&mc, 0, sizeof(mc)); 6329 mc.mb[0] = MBC_GET_PORT_DATABASE; 6330 mc.mb[1] = cpu_to_le16(fcport->loop_id); 6331 mc.mb[2] = MSW(pd_dma); 6332 mc.mb[3] = LSW(pd_dma); 6333 mc.mb[6] = MSW(MSD(pd_dma)); 6334 mc.mb[7] = LSW(MSD(pd_dma)); 6335 mc.mb[9] = cpu_to_le16(vha->vp_idx); 6336 mc.mb[10] = cpu_to_le16((uint16_t)opt); 6337 6338 rval = qla24xx_send_mb_cmd(vha, &mc); 6339 if (rval != QLA_SUCCESS) { 6340 ql_dbg(ql_dbg_mbx, vha, 0x1193, 6341 "%s: %8phC fail\n", __func__, fcport->port_name); 6342 goto done_free_sp; 6343 } 6344 6345 rval = __qla24xx_parse_gpdb(vha, fcport, pd); 6346 6347 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", 6348 __func__, fcport->port_name); 6349 6350 done_free_sp: 6351 if (pd) 6352 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 6353 done: 6354 return rval; 6355 } 6356 6357 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, 6358 struct port_database_24xx *pd) 6359 { 6360 int rval = QLA_SUCCESS; 6361 uint64_t zero = 0; 6362 u8 current_login_state, last_login_state; 6363 6364 if (fcport->fc4f_nvme) { 6365 current_login_state = pd->current_login_state >> 4; 6366 last_login_state = pd->last_login_state >> 4; 6367 } else { 6368 current_login_state = pd->current_login_state & 0xf; 6369 last_login_state = pd->last_login_state & 0xf; 6370 } 6371 6372 /* Check for logged in state. */ 6373 if (current_login_state != PDS_PRLI_COMPLETE) { 6374 ql_dbg(ql_dbg_mbx, vha, 0x119a, 6375 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 6376 current_login_state, last_login_state, fcport->loop_id); 6377 rval = QLA_FUNCTION_FAILED; 6378 goto gpd_error_out; 6379 } 6380 6381 if (fcport->loop_id == FC_NO_LOOP_ID || 6382 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 6383 memcmp(fcport->port_name, pd->port_name, 8))) { 6384 /* We lost the device mid way. */ 6385 rval = QLA_NOT_LOGGED_IN; 6386 goto gpd_error_out; 6387 } 6388 6389 /* Names are little-endian. */ 6390 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 6391 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 6392 6393 /* Get port_id of device. */ 6394 fcport->d_id.b.domain = pd->port_id[0]; 6395 fcport->d_id.b.area = pd->port_id[1]; 6396 fcport->d_id.b.al_pa = pd->port_id[2]; 6397 fcport->d_id.b.rsvd_1 = 0; 6398 6399 if (fcport->fc4f_nvme) { 6400 fcport->port_type = 0; 6401 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) 6402 fcport->port_type |= FCT_NVME_INITIATOR; 6403 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6404 fcport->port_type |= FCT_NVME_TARGET; 6405 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0) 6406 fcport->port_type |= FCT_NVME_DISCOVERY; 6407 } else { 6408 /* If not target must be initiator or unknown type. */ 6409 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6410 fcport->port_type = FCT_INITIATOR; 6411 else 6412 fcport->port_type = FCT_TARGET; 6413 } 6414 /* Passback COS information. */ 6415 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? 6416 FC_COS_CLASS2 : FC_COS_CLASS3; 6417 6418 if (pd->prli_svc_param_word_3[0] & BIT_7) { 6419 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 6420 fcport->conf_compl_supported = 1; 6421 } 6422 6423 gpd_error_out: 6424 return rval; 6425 } 6426 6427 /* 6428 * qla24xx_gidlist__wait 6429 * NOTE: don't call this routine from DPC thread. 6430 */ 6431 int qla24xx_gidlist_wait(struct scsi_qla_host *vha, 6432 void *id_list, dma_addr_t id_list_dma, uint16_t *entries) 6433 { 6434 int rval = QLA_FUNCTION_FAILED; 6435 mbx_cmd_t mc; 6436 6437 if (!vha->hw->flags.fw_started) 6438 goto done; 6439 6440 memset(&mc, 0, sizeof(mc)); 6441 mc.mb[0] = MBC_GET_ID_LIST; 6442 mc.mb[2] = MSW(id_list_dma); 6443 mc.mb[3] = LSW(id_list_dma); 6444 mc.mb[6] = MSW(MSD(id_list_dma)); 6445 mc.mb[7] = LSW(MSD(id_list_dma)); 6446 mc.mb[8] = 0; 6447 mc.mb[9] = cpu_to_le16(vha->vp_idx); 6448 6449 rval = qla24xx_send_mb_cmd(vha, &mc); 6450 if (rval != QLA_SUCCESS) { 6451 ql_dbg(ql_dbg_mbx, vha, 0x119b, 6452 "%s: fail\n", __func__); 6453 } else { 6454 *entries = mc.mb[1]; 6455 ql_dbg(ql_dbg_mbx, vha, 0x119c, 6456 "%s: done\n", __func__); 6457 } 6458 done: 6459 return rval; 6460 } 6461 6462 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) 6463 { 6464 int rval; 6465 mbx_cmd_t mc; 6466 mbx_cmd_t *mcp = &mc; 6467 6468 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200, 6469 "Entered %s\n", __func__); 6470 6471 memset(mcp->mb, 0 , sizeof(mcp->mb)); 6472 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6473 mcp->mb[1] = cpu_to_le16(1); 6474 mcp->mb[2] = cpu_to_le16(value); 6475 mcp->out_mb = MBX_2 | MBX_1 | MBX_0; 6476 mcp->in_mb = MBX_2 | MBX_0; 6477 mcp->tov = MBX_TOV_SECONDS; 6478 mcp->flags = 0; 6479 6480 rval = qla2x00_mailbox_command(vha, mcp); 6481 6482 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n", 6483 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6484 6485 return rval; 6486 } 6487 6488 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) 6489 { 6490 int rval; 6491 mbx_cmd_t mc; 6492 mbx_cmd_t *mcp = &mc; 6493 6494 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203, 6495 "Entered %s\n", __func__); 6496 6497 memset(mcp->mb, 0, sizeof(mcp->mb)); 6498 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6499 mcp->mb[1] = cpu_to_le16(0); 6500 mcp->out_mb = MBX_1 | MBX_0; 6501 mcp->in_mb = MBX_2 | MBX_0; 6502 mcp->tov = MBX_TOV_SECONDS; 6503 mcp->flags = 0; 6504 6505 rval = qla2x00_mailbox_command(vha, mcp); 6506 if (rval == QLA_SUCCESS) 6507 *value = mc.mb[2]; 6508 6509 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n", 6510 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6511 6512 return rval; 6513 } 6514 6515 int 6516 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count) 6517 { 6518 struct qla_hw_data *ha = vha->hw; 6519 uint16_t iter, addr, offset; 6520 dma_addr_t phys_addr; 6521 int rval, c; 6522 u8 *sfp_data; 6523 6524 memset(ha->sfp_data, 0, SFP_DEV_SIZE); 6525 addr = 0xa0; 6526 phys_addr = ha->sfp_data_dma; 6527 sfp_data = ha->sfp_data; 6528 offset = c = 0; 6529 6530 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) { 6531 if (iter == 4) { 6532 /* Skip to next device address. */ 6533 addr = 0xa2; 6534 offset = 0; 6535 } 6536 6537 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data, 6538 addr, offset, SFP_BLOCK_SIZE, BIT_1); 6539 if (rval != QLA_SUCCESS) { 6540 ql_log(ql_log_warn, vha, 0x706d, 6541 "Unable to read SFP data (%x/%x/%x).\n", rval, 6542 addr, offset); 6543 6544 return rval; 6545 } 6546 6547 if (buf && (c < count)) { 6548 u16 sz; 6549 6550 if ((count - c) >= SFP_BLOCK_SIZE) 6551 sz = SFP_BLOCK_SIZE; 6552 else 6553 sz = count - c; 6554 6555 memcpy(buf, sfp_data, sz); 6556 buf += SFP_BLOCK_SIZE; 6557 c += sz; 6558 } 6559 phys_addr += SFP_BLOCK_SIZE; 6560 sfp_data += SFP_BLOCK_SIZE; 6561 offset += SFP_BLOCK_SIZE; 6562 } 6563 6564 return rval; 6565 } 6566 6567 int qla24xx_res_count_wait(struct scsi_qla_host *vha, 6568 uint16_t *out_mb, int out_mb_sz) 6569 { 6570 int rval = QLA_FUNCTION_FAILED; 6571 mbx_cmd_t mc; 6572 6573 if (!vha->hw->flags.fw_started) 6574 goto done; 6575 6576 memset(&mc, 0, sizeof(mc)); 6577 mc.mb[0] = MBC_GET_RESOURCE_COUNTS; 6578 6579 rval = qla24xx_send_mb_cmd(vha, &mc); 6580 if (rval != QLA_SUCCESS) { 6581 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6582 "%s: fail\n", __func__); 6583 } else { 6584 if (out_mb_sz <= SIZEOF_IOCB_MB_REG) 6585 memcpy(out_mb, mc.mb, out_mb_sz); 6586 else 6587 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG); 6588 6589 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6590 "%s: done\n", __func__); 6591 } 6592 done: 6593 return rval; 6594 } 6595 6596 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts, 6597 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr, 6598 uint32_t sfub_len) 6599 { 6600 int rval; 6601 mbx_cmd_t mc; 6602 mbx_cmd_t *mcp = &mc; 6603 6604 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE; 6605 mcp->mb[1] = opts; 6606 mcp->mb[2] = region; 6607 mcp->mb[3] = MSW(len); 6608 mcp->mb[4] = LSW(len); 6609 mcp->mb[5] = MSW(sfub_dma_addr); 6610 mcp->mb[6] = LSW(sfub_dma_addr); 6611 mcp->mb[7] = MSW(MSD(sfub_dma_addr)); 6612 mcp->mb[8] = LSW(MSD(sfub_dma_addr)); 6613 mcp->mb[9] = sfub_len; 6614 mcp->out_mb = 6615 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6616 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6617 mcp->tov = MBX_TOV_SECONDS; 6618 mcp->flags = 0; 6619 rval = qla2x00_mailbox_command(vha, mcp); 6620 6621 if (rval != QLA_SUCCESS) { 6622 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x", 6623 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], 6624 mcp->mb[2]); 6625 } 6626 6627 return rval; 6628 } 6629 6630 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6631 uint32_t data) 6632 { 6633 int rval; 6634 mbx_cmd_t mc; 6635 mbx_cmd_t *mcp = &mc; 6636 6637 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6638 "Entered %s.\n", __func__); 6639 6640 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6641 mcp->mb[1] = LSW(addr); 6642 mcp->mb[2] = MSW(addr); 6643 mcp->mb[3] = LSW(data); 6644 mcp->mb[4] = MSW(data); 6645 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6646 mcp->in_mb = MBX_1|MBX_0; 6647 mcp->tov = MBX_TOV_SECONDS; 6648 mcp->flags = 0; 6649 rval = qla2x00_mailbox_command(vha, mcp); 6650 6651 if (rval != QLA_SUCCESS) { 6652 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6653 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6654 } else { 6655 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6656 "Done %s.\n", __func__); 6657 } 6658 6659 return rval; 6660 } 6661 6662 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6663 uint32_t *data) 6664 { 6665 int rval; 6666 mbx_cmd_t mc; 6667 mbx_cmd_t *mcp = &mc; 6668 6669 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6670 "Entered %s.\n", __func__); 6671 6672 mcp->mb[0] = MBC_READ_REMOTE_REG; 6673 mcp->mb[1] = LSW(addr); 6674 mcp->mb[2] = MSW(addr); 6675 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6676 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6677 mcp->tov = MBX_TOV_SECONDS; 6678 mcp->flags = 0; 6679 rval = qla2x00_mailbox_command(vha, mcp); 6680 6681 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]); 6682 6683 if (rval != QLA_SUCCESS) { 6684 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6685 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6686 } else { 6687 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6688 "Done %s.\n", __func__); 6689 } 6690 6691 return rval; 6692 } 6693