1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/gfp.h> 12 13 static struct mb_cmd_name { 14 uint16_t cmd; 15 const char *str; 16 } mb_str[] = { 17 {MBC_GET_PORT_DATABASE, "GPDB"}, 18 {MBC_GET_ID_LIST, "GIDList"}, 19 {MBC_GET_LINK_PRIV_STATS, "Stats"}, 20 {MBC_GET_RESOURCE_COUNTS, "ResCnt"}, 21 }; 22 23 static const char *mb_to_str(uint16_t cmd) 24 { 25 int i; 26 struct mb_cmd_name *e; 27 28 for (i = 0; i < ARRAY_SIZE(mb_str); i++) { 29 e = mb_str + i; 30 if (cmd == e->cmd) 31 return e->str; 32 } 33 return "unknown"; 34 } 35 36 static struct rom_cmd { 37 uint16_t cmd; 38 } rom_cmds[] = { 39 { MBC_LOAD_RAM }, 40 { MBC_EXECUTE_FIRMWARE }, 41 { MBC_READ_RAM_WORD }, 42 { MBC_MAILBOX_REGISTER_TEST }, 43 { MBC_VERIFY_CHECKSUM }, 44 { MBC_GET_FIRMWARE_VERSION }, 45 { MBC_LOAD_RISC_RAM }, 46 { MBC_DUMP_RISC_RAM }, 47 { MBC_LOAD_RISC_RAM_EXTENDED }, 48 { MBC_DUMP_RISC_RAM_EXTENDED }, 49 { MBC_WRITE_RAM_WORD_EXTENDED }, 50 { MBC_READ_RAM_EXTENDED }, 51 { MBC_GET_RESOURCE_COUNTS }, 52 { MBC_SET_FIRMWARE_OPTION }, 53 { MBC_MID_INITIALIZE_FIRMWARE }, 54 { MBC_GET_FIRMWARE_STATE }, 55 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, 56 { MBC_GET_RETRY_COUNT }, 57 { MBC_TRACE_CONTROL }, 58 { MBC_INITIALIZE_MULTIQ }, 59 { MBC_IOCB_COMMAND_A64 }, 60 { MBC_GET_ADAPTER_LOOP_ID }, 61 { MBC_READ_SFP }, 62 { MBC_GET_RNID_PARAMS }, 63 { MBC_GET_SET_ZIO_THRESHOLD }, 64 }; 65 66 static int is_rom_cmd(uint16_t cmd) 67 { 68 int i; 69 struct rom_cmd *wc; 70 71 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) { 72 wc = rom_cmds + i; 73 if (wc->cmd == cmd) 74 return 1; 75 } 76 77 return 0; 78 } 79 80 /* 81 * qla2x00_mailbox_command 82 * Issue mailbox command and waits for completion. 83 * 84 * Input: 85 * ha = adapter block pointer. 86 * mcp = driver internal mbx struct pointer. 87 * 88 * Output: 89 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 90 * 91 * Returns: 92 * 0 : QLA_SUCCESS = cmd performed success 93 * 1 : QLA_FUNCTION_FAILED (error encountered) 94 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 95 * 96 * Context: 97 * Kernel context. 98 */ 99 static int 100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) 101 { 102 int rval, i; 103 unsigned long flags = 0; 104 device_reg_t *reg; 105 uint8_t abort_active; 106 uint8_t io_lock_on; 107 uint16_t command = 0; 108 uint16_t *iptr; 109 uint16_t __iomem *optr; 110 uint32_t cnt; 111 uint32_t mboxes; 112 unsigned long wait_time; 113 struct qla_hw_data *ha = vha->hw; 114 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 115 u32 chip_reset; 116 117 118 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); 119 120 if (ha->pdev->error_state > pci_channel_io_frozen) { 121 ql_log(ql_log_warn, vha, 0x1001, 122 "error_state is greater than pci_channel_io_frozen, " 123 "exiting.\n"); 124 return QLA_FUNCTION_TIMEOUT; 125 } 126 127 if (vha->device_flags & DFLG_DEV_FAILED) { 128 ql_log(ql_log_warn, vha, 0x1002, 129 "Device in failed state, exiting.\n"); 130 return QLA_FUNCTION_TIMEOUT; 131 } 132 133 /* if PCI error, then avoid mbx processing.*/ 134 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && 135 test_bit(UNLOADING, &base_vha->dpc_flags)) { 136 ql_log(ql_log_warn, vha, 0xd04e, 137 "PCI error, exiting.\n"); 138 return QLA_FUNCTION_TIMEOUT; 139 } 140 141 reg = ha->iobase; 142 io_lock_on = base_vha->flags.init_done; 143 144 rval = QLA_SUCCESS; 145 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 146 chip_reset = ha->chip_reset; 147 148 if (ha->flags.pci_channel_io_perm_failure) { 149 ql_log(ql_log_warn, vha, 0x1003, 150 "Perm failure on EEH timeout MBX, exiting.\n"); 151 return QLA_FUNCTION_TIMEOUT; 152 } 153 154 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 155 /* Setting Link-Down error */ 156 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 157 ql_log(ql_log_warn, vha, 0x1004, 158 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 159 return QLA_FUNCTION_TIMEOUT; 160 } 161 162 /* check if ISP abort is active and return cmd with timeout */ 163 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 164 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 165 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && 166 !is_rom_cmd(mcp->mb[0])) { 167 ql_log(ql_log_info, vha, 0x1005, 168 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", 169 mcp->mb[0]); 170 return QLA_FUNCTION_TIMEOUT; 171 } 172 173 atomic_inc(&ha->num_pend_mbx_stage1); 174 /* 175 * Wait for active mailbox commands to finish by waiting at most tov 176 * seconds. This is to serialize actual issuing of mailbox cmds during 177 * non ISP abort time. 178 */ 179 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 180 /* Timeout occurred. Return error. */ 181 ql_log(ql_log_warn, vha, 0xd035, 182 "Cmd access timeout, cmd=0x%x, Exiting.\n", 183 mcp->mb[0]); 184 atomic_dec(&ha->num_pend_mbx_stage1); 185 return QLA_FUNCTION_TIMEOUT; 186 } 187 atomic_dec(&ha->num_pend_mbx_stage1); 188 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { 189 rval = QLA_ABORTED; 190 goto premature_exit; 191 } 192 193 194 /* Save mailbox command for debug */ 195 ha->mcp = mcp; 196 197 ql_dbg(ql_dbg_mbx, vha, 0x1006, 198 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 199 200 spin_lock_irqsave(&ha->hardware_lock, flags); 201 202 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || 203 ha->flags.mbox_busy) { 204 rval = QLA_ABORTED; 205 spin_unlock_irqrestore(&ha->hardware_lock, flags); 206 goto premature_exit; 207 } 208 ha->flags.mbox_busy = 1; 209 210 /* Load mailbox registers. */ 211 if (IS_P3P_TYPE(ha)) 212 optr = (uint16_t __iomem *)®->isp82.mailbox_in[0]; 213 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) 214 optr = (uint16_t __iomem *)®->isp24.mailbox0; 215 else 216 optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0); 217 218 iptr = mcp->mb; 219 command = mcp->mb[0]; 220 mboxes = mcp->out_mb; 221 222 ql_dbg(ql_dbg_mbx, vha, 0x1111, 223 "Mailbox registers (OUT):\n"); 224 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 225 if (IS_QLA2200(ha) && cnt == 8) 226 optr = 227 (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8); 228 if (mboxes & BIT_0) { 229 ql_dbg(ql_dbg_mbx, vha, 0x1112, 230 "mbox[%d]<-0x%04x\n", cnt, *iptr); 231 WRT_REG_WORD(optr, *iptr); 232 } 233 234 mboxes >>= 1; 235 optr++; 236 iptr++; 237 } 238 239 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, 240 "I/O Address = %p.\n", optr); 241 242 /* Issue set host interrupt command to send cmd out. */ 243 ha->flags.mbox_int = 0; 244 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 245 246 /* Unlock mbx registers and wait for interrupt */ 247 ql_dbg(ql_dbg_mbx, vha, 0x100f, 248 "Going to unlock irq & waiting for interrupts. " 249 "jiffies=%lx.\n", jiffies); 250 251 /* Wait for mbx cmd completion until timeout */ 252 atomic_inc(&ha->num_pend_mbx_stage2); 253 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 254 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 255 256 if (IS_P3P_TYPE(ha)) 257 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); 258 else if (IS_FWI2_CAPABLE(ha)) 259 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); 260 else 261 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); 262 spin_unlock_irqrestore(&ha->hardware_lock, flags); 263 264 wait_time = jiffies; 265 atomic_inc(&ha->num_pend_mbx_stage3); 266 if (!wait_for_completion_timeout(&ha->mbx_intr_comp, 267 mcp->tov * HZ)) { 268 if (chip_reset != ha->chip_reset) { 269 spin_lock_irqsave(&ha->hardware_lock, flags); 270 ha->flags.mbox_busy = 0; 271 spin_unlock_irqrestore(&ha->hardware_lock, 272 flags); 273 atomic_dec(&ha->num_pend_mbx_stage2); 274 atomic_dec(&ha->num_pend_mbx_stage3); 275 rval = QLA_ABORTED; 276 goto premature_exit; 277 } 278 ql_dbg(ql_dbg_mbx, vha, 0x117a, 279 "cmd=%x Timeout.\n", command); 280 spin_lock_irqsave(&ha->hardware_lock, flags); 281 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 282 spin_unlock_irqrestore(&ha->hardware_lock, flags); 283 284 } else if (ha->flags.purge_mbox || 285 chip_reset != ha->chip_reset) { 286 spin_lock_irqsave(&ha->hardware_lock, flags); 287 ha->flags.mbox_busy = 0; 288 spin_unlock_irqrestore(&ha->hardware_lock, flags); 289 atomic_dec(&ha->num_pend_mbx_stage2); 290 atomic_dec(&ha->num_pend_mbx_stage3); 291 rval = QLA_ABORTED; 292 goto premature_exit; 293 } 294 atomic_dec(&ha->num_pend_mbx_stage3); 295 296 if (time_after(jiffies, wait_time + 5 * HZ)) 297 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", 298 command, jiffies_to_msecs(jiffies - wait_time)); 299 } else { 300 ql_dbg(ql_dbg_mbx, vha, 0x1011, 301 "Cmd=%x Polling Mode.\n", command); 302 303 if (IS_P3P_TYPE(ha)) { 304 if (RD_REG_DWORD(®->isp82.hint) & 305 HINT_MBX_INT_PENDING) { 306 ha->flags.mbox_busy = 0; 307 spin_unlock_irqrestore(&ha->hardware_lock, 308 flags); 309 atomic_dec(&ha->num_pend_mbx_stage2); 310 ql_dbg(ql_dbg_mbx, vha, 0x1012, 311 "Pending mailbox timeout, exiting.\n"); 312 rval = QLA_FUNCTION_TIMEOUT; 313 goto premature_exit; 314 } 315 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); 316 } else if (IS_FWI2_CAPABLE(ha)) 317 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); 318 else 319 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); 320 spin_unlock_irqrestore(&ha->hardware_lock, flags); 321 322 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 323 while (!ha->flags.mbox_int) { 324 if (ha->flags.purge_mbox || 325 chip_reset != ha->chip_reset) { 326 spin_lock_irqsave(&ha->hardware_lock, flags); 327 ha->flags.mbox_busy = 0; 328 spin_unlock_irqrestore(&ha->hardware_lock, 329 flags); 330 atomic_dec(&ha->num_pend_mbx_stage2); 331 rval = QLA_ABORTED; 332 goto premature_exit; 333 } 334 335 if (time_after(jiffies, wait_time)) 336 break; 337 338 /* 339 * Check if it's UNLOADING, cause we cannot poll in 340 * this case, or else a NULL pointer dereference 341 * is triggered. 342 */ 343 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) 344 return QLA_FUNCTION_TIMEOUT; 345 346 /* Check for pending interrupts. */ 347 qla2x00_poll(ha->rsp_q_map[0]); 348 349 if (!ha->flags.mbox_int && 350 !(IS_QLA2200(ha) && 351 command == MBC_LOAD_RISC_RAM_EXTENDED)) 352 msleep(10); 353 } /* while */ 354 ql_dbg(ql_dbg_mbx, vha, 0x1013, 355 "Waited %d sec.\n", 356 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 357 } 358 atomic_dec(&ha->num_pend_mbx_stage2); 359 360 /* Check whether we timed out */ 361 if (ha->flags.mbox_int) { 362 uint16_t *iptr2; 363 364 ql_dbg(ql_dbg_mbx, vha, 0x1014, 365 "Cmd=%x completed.\n", command); 366 367 /* Got interrupt. Clear the flag. */ 368 ha->flags.mbox_int = 0; 369 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 370 371 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 372 spin_lock_irqsave(&ha->hardware_lock, flags); 373 ha->flags.mbox_busy = 0; 374 spin_unlock_irqrestore(&ha->hardware_lock, flags); 375 376 /* Setting Link-Down error */ 377 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 378 ha->mcp = NULL; 379 rval = QLA_FUNCTION_FAILED; 380 ql_log(ql_log_warn, vha, 0xd048, 381 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 382 goto premature_exit; 383 } 384 385 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) { 386 ql_dbg(ql_dbg_mbx, vha, 0x11ff, 387 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0], 388 MBS_COMMAND_COMPLETE); 389 rval = QLA_FUNCTION_FAILED; 390 } 391 392 /* Load return mailbox registers. */ 393 iptr2 = mcp->mb; 394 iptr = (uint16_t *)&ha->mailbox_out[0]; 395 mboxes = mcp->in_mb; 396 397 ql_dbg(ql_dbg_mbx, vha, 0x1113, 398 "Mailbox registers (IN):\n"); 399 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 400 if (mboxes & BIT_0) { 401 *iptr2 = *iptr; 402 ql_dbg(ql_dbg_mbx, vha, 0x1114, 403 "mbox[%d]->0x%04x\n", cnt, *iptr2); 404 } 405 406 mboxes >>= 1; 407 iptr2++; 408 iptr++; 409 } 410 } else { 411 412 uint16_t mb[8]; 413 uint32_t ictrl, host_status, hccr; 414 uint16_t w; 415 416 if (IS_FWI2_CAPABLE(ha)) { 417 mb[0] = RD_REG_WORD(®->isp24.mailbox0); 418 mb[1] = RD_REG_WORD(®->isp24.mailbox1); 419 mb[2] = RD_REG_WORD(®->isp24.mailbox2); 420 mb[3] = RD_REG_WORD(®->isp24.mailbox3); 421 mb[7] = RD_REG_WORD(®->isp24.mailbox7); 422 ictrl = RD_REG_DWORD(®->isp24.ictrl); 423 host_status = RD_REG_DWORD(®->isp24.host_status); 424 hccr = RD_REG_DWORD(®->isp24.hccr); 425 426 ql_log(ql_log_warn, vha, 0xd04c, 427 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 428 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", 429 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], 430 mb[7], host_status, hccr); 431 432 } else { 433 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); 434 ictrl = RD_REG_WORD(®->isp.ictrl); 435 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, 436 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 437 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); 438 } 439 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 440 441 /* Capture FW dump only, if PCI device active */ 442 if (!pci_channel_offline(vha->hw->pdev)) { 443 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 444 if (w == 0xffff || ictrl == 0xffffffff || 445 (chip_reset != ha->chip_reset)) { 446 /* This is special case if there is unload 447 * of driver happening and if PCI device go 448 * into bad state due to PCI error condition 449 * then only PCI ERR flag would be set. 450 * we will do premature exit for above case. 451 */ 452 spin_lock_irqsave(&ha->hardware_lock, flags); 453 ha->flags.mbox_busy = 0; 454 spin_unlock_irqrestore(&ha->hardware_lock, 455 flags); 456 rval = QLA_FUNCTION_TIMEOUT; 457 goto premature_exit; 458 } 459 460 /* Attempt to capture firmware dump for further 461 * anallysis of the current formware state. we do not 462 * need to do this if we are intentionally generating 463 * a dump 464 */ 465 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) 466 ha->isp_ops->fw_dump(vha, 0); 467 rval = QLA_FUNCTION_TIMEOUT; 468 } 469 } 470 spin_lock_irqsave(&ha->hardware_lock, flags); 471 ha->flags.mbox_busy = 0; 472 spin_unlock_irqrestore(&ha->hardware_lock, flags); 473 474 /* Clean up */ 475 ha->mcp = NULL; 476 477 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 478 ql_dbg(ql_dbg_mbx, vha, 0x101a, 479 "Checking for additional resp interrupt.\n"); 480 481 /* polling mode for non isp_abort commands. */ 482 qla2x00_poll(ha->rsp_q_map[0]); 483 } 484 485 if (rval == QLA_FUNCTION_TIMEOUT && 486 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 487 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 488 ha->flags.eeh_busy) { 489 /* not in dpc. schedule it for dpc to take over. */ 490 ql_dbg(ql_dbg_mbx, vha, 0x101b, 491 "Timeout, schedule isp_abort_needed.\n"); 492 493 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 494 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 495 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 496 if (IS_QLA82XX(ha)) { 497 ql_dbg(ql_dbg_mbx, vha, 0x112a, 498 "disabling pause transmit on port " 499 "0 & 1.\n"); 500 qla82xx_wr_32(ha, 501 QLA82XX_CRB_NIU + 0x98, 502 CRB_NIU_XG_PAUSE_CTL_P0| 503 CRB_NIU_XG_PAUSE_CTL_P1); 504 } 505 ql_log(ql_log_info, base_vha, 0x101c, 506 "Mailbox cmd timeout occurred, cmd=0x%x, " 507 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 508 "abort.\n", command, mcp->mb[0], 509 ha->flags.eeh_busy); 510 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 511 qla2xxx_wake_dpc(vha); 512 } 513 } else if (current == ha->dpc_thread) { 514 /* call abort directly since we are in the DPC thread */ 515 ql_dbg(ql_dbg_mbx, vha, 0x101d, 516 "Timeout, calling abort_isp.\n"); 517 518 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 519 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 520 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 521 if (IS_QLA82XX(ha)) { 522 ql_dbg(ql_dbg_mbx, vha, 0x112b, 523 "disabling pause transmit on port " 524 "0 & 1.\n"); 525 qla82xx_wr_32(ha, 526 QLA82XX_CRB_NIU + 0x98, 527 CRB_NIU_XG_PAUSE_CTL_P0| 528 CRB_NIU_XG_PAUSE_CTL_P1); 529 } 530 ql_log(ql_log_info, base_vha, 0x101e, 531 "Mailbox cmd timeout occurred, cmd=0x%x, " 532 "mb[0]=0x%x. Scheduling ISP abort ", 533 command, mcp->mb[0]); 534 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 535 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 536 /* Allow next mbx cmd to come in. */ 537 complete(&ha->mbx_cmd_comp); 538 if (ha->isp_ops->abort_isp(vha)) { 539 /* Failed. retry later. */ 540 set_bit(ISP_ABORT_NEEDED, 541 &vha->dpc_flags); 542 } 543 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 544 ql_dbg(ql_dbg_mbx, vha, 0x101f, 545 "Finished abort_isp.\n"); 546 goto mbx_done; 547 } 548 } 549 } 550 551 premature_exit: 552 /* Allow next mbx cmd to come in. */ 553 complete(&ha->mbx_cmd_comp); 554 555 mbx_done: 556 if (rval == QLA_ABORTED) { 557 ql_log(ql_log_info, vha, 0xd035, 558 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n", 559 mcp->mb[0]); 560 } else if (rval) { 561 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { 562 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR, 563 dev_name(&ha->pdev->dev), 0x1020+0x800, 564 vha->host_no, rval); 565 mboxes = mcp->in_mb; 566 cnt = 4; 567 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1) 568 if (mboxes & BIT_0) { 569 printk(" mb[%u]=%x", i, mcp->mb[i]); 570 cnt--; 571 } 572 pr_warn(" cmd=%x ****\n", command); 573 } 574 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { 575 ql_dbg(ql_dbg_mbx, vha, 0x1198, 576 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", 577 RD_REG_DWORD(®->isp24.host_status), 578 RD_REG_DWORD(®->isp24.ictrl), 579 RD_REG_DWORD(®->isp24.istatus)); 580 } else { 581 ql_dbg(ql_dbg_mbx, vha, 0x1206, 582 "ctrl_status=%#x ictrl=%#x istatus=%#x\n", 583 RD_REG_WORD(®->isp.ctrl_status), 584 RD_REG_WORD(®->isp.ictrl), 585 RD_REG_WORD(®->isp.istatus)); 586 } 587 } else { 588 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); 589 } 590 591 return rval; 592 } 593 594 int 595 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, 596 uint32_t risc_code_size) 597 { 598 int rval; 599 struct qla_hw_data *ha = vha->hw; 600 mbx_cmd_t mc; 601 mbx_cmd_t *mcp = &mc; 602 603 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, 604 "Entered %s.\n", __func__); 605 606 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 607 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 608 mcp->mb[8] = MSW(risc_addr); 609 mcp->out_mb = MBX_8|MBX_0; 610 } else { 611 mcp->mb[0] = MBC_LOAD_RISC_RAM; 612 mcp->out_mb = MBX_0; 613 } 614 mcp->mb[1] = LSW(risc_addr); 615 mcp->mb[2] = MSW(req_dma); 616 mcp->mb[3] = LSW(req_dma); 617 mcp->mb[6] = MSW(MSD(req_dma)); 618 mcp->mb[7] = LSW(MSD(req_dma)); 619 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 620 if (IS_FWI2_CAPABLE(ha)) { 621 mcp->mb[4] = MSW(risc_code_size); 622 mcp->mb[5] = LSW(risc_code_size); 623 mcp->out_mb |= MBX_5|MBX_4; 624 } else { 625 mcp->mb[4] = LSW(risc_code_size); 626 mcp->out_mb |= MBX_4; 627 } 628 629 mcp->in_mb = MBX_1|MBX_0; 630 mcp->tov = MBX_TOV_SECONDS; 631 mcp->flags = 0; 632 rval = qla2x00_mailbox_command(vha, mcp); 633 634 if (rval != QLA_SUCCESS) { 635 ql_dbg(ql_dbg_mbx, vha, 0x1023, 636 "Failed=%x mb[0]=%x mb[1]=%x.\n", 637 rval, mcp->mb[0], mcp->mb[1]); 638 } else { 639 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, 640 "Done %s.\n", __func__); 641 } 642 643 return rval; 644 } 645 646 #define EXTENDED_BB_CREDITS BIT_0 647 #define NVME_ENABLE_FLAG BIT_3 648 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha) 649 { 650 uint16_t mb4 = BIT_0; 651 652 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 653 mb4 |= ha->long_range_distance << LR_DIST_FW_POS; 654 655 return mb4; 656 } 657 658 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha) 659 { 660 uint16_t mb4 = BIT_0; 661 662 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 663 struct nvram_81xx *nv = ha->nvram; 664 665 mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features); 666 } 667 668 return mb4; 669 } 670 671 /* 672 * qla2x00_execute_fw 673 * Start adapter firmware. 674 * 675 * Input: 676 * ha = adapter block pointer. 677 * TARGET_QUEUE_LOCK must be released. 678 * ADAPTER_STATE_LOCK must be released. 679 * 680 * Returns: 681 * qla2x00 local function return status code. 682 * 683 * Context: 684 * Kernel context. 685 */ 686 int 687 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) 688 { 689 int rval; 690 struct qla_hw_data *ha = vha->hw; 691 mbx_cmd_t mc; 692 mbx_cmd_t *mcp = &mc; 693 694 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, 695 "Entered %s.\n", __func__); 696 697 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 698 mcp->out_mb = MBX_0; 699 mcp->in_mb = MBX_0; 700 if (IS_FWI2_CAPABLE(ha)) { 701 mcp->mb[1] = MSW(risc_addr); 702 mcp->mb[2] = LSW(risc_addr); 703 mcp->mb[3] = 0; 704 mcp->mb[4] = 0; 705 mcp->mb[11] = 0; 706 ha->flags.using_lr_setting = 0; 707 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || 708 IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 709 if (ql2xautodetectsfp) { 710 if (ha->flags.detected_lr_sfp) { 711 mcp->mb[4] |= 712 qla25xx_set_sfp_lr_dist(ha); 713 ha->flags.using_lr_setting = 1; 714 } 715 } else { 716 struct nvram_81xx *nv = ha->nvram; 717 /* set LR distance if specified in nvram */ 718 if (nv->enhanced_features & 719 NEF_LR_DIST_ENABLE) { 720 mcp->mb[4] |= 721 qla25xx_set_nvr_lr_dist(ha); 722 ha->flags.using_lr_setting = 1; 723 } 724 } 725 } 726 727 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha))) 728 mcp->mb[4] |= NVME_ENABLE_FLAG; 729 730 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 731 struct nvram_81xx *nv = ha->nvram; 732 /* set minimum speed if specified in nvram */ 733 if (nv->min_supported_speed >= 2 && 734 nv->min_supported_speed <= 5) { 735 mcp->mb[4] |= BIT_4; 736 mcp->mb[11] |= nv->min_supported_speed & 0xF; 737 mcp->out_mb |= MBX_11; 738 mcp->in_mb |= BIT_5; 739 vha->min_supported_speed = 740 nv->min_supported_speed; 741 } 742 } 743 744 if (ha->flags.exlogins_enabled) 745 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; 746 747 if (ha->flags.exchoffld_enabled) 748 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; 749 750 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; 751 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1; 752 } else { 753 mcp->mb[1] = LSW(risc_addr); 754 mcp->out_mb |= MBX_1; 755 if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 756 mcp->mb[2] = 0; 757 mcp->out_mb |= MBX_2; 758 } 759 } 760 761 mcp->tov = MBX_TOV_SECONDS; 762 mcp->flags = 0; 763 rval = qla2x00_mailbox_command(vha, mcp); 764 765 if (rval != QLA_SUCCESS) { 766 ql_dbg(ql_dbg_mbx, vha, 0x1026, 767 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 768 return rval; 769 } 770 771 if (!IS_FWI2_CAPABLE(ha)) 772 goto done; 773 774 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; 775 ql_dbg(ql_dbg_mbx, vha, 0x119a, 776 "fw_ability_mask=%x.\n", ha->fw_ability_mask); 777 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]); 778 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 779 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1); 780 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n", 781 ha->max_supported_speed == 0 ? "16Gps" : 782 ha->max_supported_speed == 1 ? "32Gps" : 783 ha->max_supported_speed == 2 ? "64Gps" : "unknown"); 784 if (vha->min_supported_speed) { 785 ha->min_supported_speed = mcp->mb[5] & 786 (BIT_0 | BIT_1 | BIT_2); 787 ql_dbg(ql_dbg_mbx, vha, 0x119c, 788 "min_supported_speed=%s.\n", 789 ha->min_supported_speed == 6 ? "64Gps" : 790 ha->min_supported_speed == 5 ? "32Gps" : 791 ha->min_supported_speed == 4 ? "16Gps" : 792 ha->min_supported_speed == 3 ? "8Gps" : 793 ha->min_supported_speed == 2 ? "4Gps" : "unknown"); 794 } 795 } 796 797 done: 798 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, 799 "Done %s.\n", __func__); 800 801 return rval; 802 } 803 804 /* 805 * qla_get_exlogin_status 806 * Get extended login status 807 * uses the memory offload control/status Mailbox 808 * 809 * Input: 810 * ha: adapter state pointer. 811 * fwopt: firmware options 812 * 813 * Returns: 814 * qla2x00 local function status 815 * 816 * Context: 817 * Kernel context. 818 */ 819 #define FETCH_XLOGINS_STAT 0x8 820 int 821 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 822 uint16_t *ex_logins_cnt) 823 { 824 int rval; 825 mbx_cmd_t mc; 826 mbx_cmd_t *mcp = &mc; 827 828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f, 829 "Entered %s\n", __func__); 830 831 memset(mcp->mb, 0 , sizeof(mcp->mb)); 832 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 833 mcp->mb[1] = FETCH_XLOGINS_STAT; 834 mcp->out_mb = MBX_1|MBX_0; 835 mcp->in_mb = MBX_10|MBX_4|MBX_0; 836 mcp->tov = MBX_TOV_SECONDS; 837 mcp->flags = 0; 838 839 rval = qla2x00_mailbox_command(vha, mcp); 840 if (rval != QLA_SUCCESS) { 841 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval); 842 } else { 843 *buf_sz = mcp->mb[4]; 844 *ex_logins_cnt = mcp->mb[10]; 845 846 ql_log(ql_log_info, vha, 0x1190, 847 "buffer size 0x%x, exchange login count=%d\n", 848 mcp->mb[4], mcp->mb[10]); 849 850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116, 851 "Done %s.\n", __func__); 852 } 853 854 return rval; 855 } 856 857 /* 858 * qla_set_exlogin_mem_cfg 859 * set extended login memory configuration 860 * Mbx needs to be issues before init_cb is set 861 * 862 * Input: 863 * ha: adapter state pointer. 864 * buffer: buffer pointer 865 * phys_addr: physical address of buffer 866 * size: size of buffer 867 * TARGET_QUEUE_LOCK must be released 868 * ADAPTER_STATE_LOCK must be release 869 * 870 * Returns: 871 * qla2x00 local funxtion status code. 872 * 873 * Context: 874 * Kernel context. 875 */ 876 #define CONFIG_XLOGINS_MEM 0x3 877 int 878 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) 879 { 880 int rval; 881 mbx_cmd_t mc; 882 mbx_cmd_t *mcp = &mc; 883 struct qla_hw_data *ha = vha->hw; 884 885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, 886 "Entered %s.\n", __func__); 887 888 memset(mcp->mb, 0 , sizeof(mcp->mb)); 889 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 890 mcp->mb[1] = CONFIG_XLOGINS_MEM; 891 mcp->mb[2] = MSW(phys_addr); 892 mcp->mb[3] = LSW(phys_addr); 893 mcp->mb[6] = MSW(MSD(phys_addr)); 894 mcp->mb[7] = LSW(MSD(phys_addr)); 895 mcp->mb[8] = MSW(ha->exlogin_size); 896 mcp->mb[9] = LSW(ha->exlogin_size); 897 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 898 mcp->in_mb = MBX_11|MBX_0; 899 mcp->tov = MBX_TOV_SECONDS; 900 mcp->flags = 0; 901 rval = qla2x00_mailbox_command(vha, mcp); 902 if (rval != QLA_SUCCESS) { 903 /*EMPTY*/ 904 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval); 905 } else { 906 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 907 "Done %s.\n", __func__); 908 } 909 910 return rval; 911 } 912 913 /* 914 * qla_get_exchoffld_status 915 * Get exchange offload status 916 * uses the memory offload control/status Mailbox 917 * 918 * Input: 919 * ha: adapter state pointer. 920 * fwopt: firmware options 921 * 922 * Returns: 923 * qla2x00 local function status 924 * 925 * Context: 926 * Kernel context. 927 */ 928 #define FETCH_XCHOFFLD_STAT 0x2 929 int 930 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 931 uint16_t *ex_logins_cnt) 932 { 933 int rval; 934 mbx_cmd_t mc; 935 mbx_cmd_t *mcp = &mc; 936 937 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019, 938 "Entered %s\n", __func__); 939 940 memset(mcp->mb, 0 , sizeof(mcp->mb)); 941 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 942 mcp->mb[1] = FETCH_XCHOFFLD_STAT; 943 mcp->out_mb = MBX_1|MBX_0; 944 mcp->in_mb = MBX_10|MBX_4|MBX_0; 945 mcp->tov = MBX_TOV_SECONDS; 946 mcp->flags = 0; 947 948 rval = qla2x00_mailbox_command(vha, mcp); 949 if (rval != QLA_SUCCESS) { 950 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval); 951 } else { 952 *buf_sz = mcp->mb[4]; 953 *ex_logins_cnt = mcp->mb[10]; 954 955 ql_log(ql_log_info, vha, 0x118e, 956 "buffer size 0x%x, exchange offload count=%d\n", 957 mcp->mb[4], mcp->mb[10]); 958 959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156, 960 "Done %s.\n", __func__); 961 } 962 963 return rval; 964 } 965 966 /* 967 * qla_set_exchoffld_mem_cfg 968 * Set exchange offload memory configuration 969 * Mbx needs to be issues before init_cb is set 970 * 971 * Input: 972 * ha: adapter state pointer. 973 * buffer: buffer pointer 974 * phys_addr: physical address of buffer 975 * size: size of buffer 976 * TARGET_QUEUE_LOCK must be released 977 * ADAPTER_STATE_LOCK must be release 978 * 979 * Returns: 980 * qla2x00 local funxtion status code. 981 * 982 * Context: 983 * Kernel context. 984 */ 985 #define CONFIG_XCHOFFLD_MEM 0x3 986 int 987 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha) 988 { 989 int rval; 990 mbx_cmd_t mc; 991 mbx_cmd_t *mcp = &mc; 992 struct qla_hw_data *ha = vha->hw; 993 994 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157, 995 "Entered %s.\n", __func__); 996 997 memset(mcp->mb, 0 , sizeof(mcp->mb)); 998 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 999 mcp->mb[1] = CONFIG_XCHOFFLD_MEM; 1000 mcp->mb[2] = MSW(ha->exchoffld_buf_dma); 1001 mcp->mb[3] = LSW(ha->exchoffld_buf_dma); 1002 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma)); 1003 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma)); 1004 mcp->mb[8] = MSW(ha->exchoffld_size); 1005 mcp->mb[9] = LSW(ha->exchoffld_size); 1006 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1007 mcp->in_mb = MBX_11|MBX_0; 1008 mcp->tov = MBX_TOV_SECONDS; 1009 mcp->flags = 0; 1010 rval = qla2x00_mailbox_command(vha, mcp); 1011 if (rval != QLA_SUCCESS) { 1012 /*EMPTY*/ 1013 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval); 1014 } else { 1015 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, 1016 "Done %s.\n", __func__); 1017 } 1018 1019 return rval; 1020 } 1021 1022 /* 1023 * qla2x00_get_fw_version 1024 * Get firmware version. 1025 * 1026 * Input: 1027 * ha: adapter state pointer. 1028 * major: pointer for major number. 1029 * minor: pointer for minor number. 1030 * subminor: pointer for subminor number. 1031 * 1032 * Returns: 1033 * qla2x00 local function return status code. 1034 * 1035 * Context: 1036 * Kernel context. 1037 */ 1038 int 1039 qla2x00_get_fw_version(scsi_qla_host_t *vha) 1040 { 1041 int rval; 1042 mbx_cmd_t mc; 1043 mbx_cmd_t *mcp = &mc; 1044 struct qla_hw_data *ha = vha->hw; 1045 1046 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, 1047 "Entered %s.\n", __func__); 1048 1049 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 1050 mcp->out_mb = MBX_0; 1051 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1052 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) 1053 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 1054 if (IS_FWI2_CAPABLE(ha)) 1055 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 1056 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1057 mcp->in_mb |= 1058 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| 1059 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7; 1060 1061 mcp->flags = 0; 1062 mcp->tov = MBX_TOV_SECONDS; 1063 rval = qla2x00_mailbox_command(vha, mcp); 1064 if (rval != QLA_SUCCESS) 1065 goto failed; 1066 1067 /* Return mailbox data. */ 1068 ha->fw_major_version = mcp->mb[1]; 1069 ha->fw_minor_version = mcp->mb[2]; 1070 ha->fw_subminor_version = mcp->mb[3]; 1071 ha->fw_attributes = mcp->mb[6]; 1072 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) 1073 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ 1074 else 1075 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; 1076 1077 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1078 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1079 ha->mpi_version[1] = mcp->mb[11] >> 8; 1080 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1081 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; 1082 ha->phy_version[0] = mcp->mb[8] & 0xff; 1083 ha->phy_version[1] = mcp->mb[9] >> 8; 1084 ha->phy_version[2] = mcp->mb[9] & 0xff; 1085 } 1086 1087 if (IS_FWI2_CAPABLE(ha)) { 1088 ha->fw_attributes_h = mcp->mb[15]; 1089 ha->fw_attributes_ext[0] = mcp->mb[16]; 1090 ha->fw_attributes_ext[1] = mcp->mb[17]; 1091 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, 1092 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 1093 __func__, mcp->mb[15], mcp->mb[6]); 1094 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, 1095 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 1096 __func__, mcp->mb[17], mcp->mb[16]); 1097 1098 if (ha->fw_attributes_h & 0x4) 1099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d, 1100 "%s: Firmware supports Extended Login 0x%x\n", 1101 __func__, ha->fw_attributes_h); 1102 1103 if (ha->fw_attributes_h & 0x8) 1104 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, 1105 "%s: Firmware supports Exchange Offload 0x%x\n", 1106 __func__, ha->fw_attributes_h); 1107 1108 /* 1109 * FW supports nvme and driver load parameter requested nvme. 1110 * BIT 26 of fw_attributes indicates NVMe support. 1111 */ 1112 if ((ha->fw_attributes_h & 1113 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) && 1114 ql2xnvmeenable) { 1115 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST) 1116 vha->flags.nvme_first_burst = 1; 1117 1118 vha->flags.nvme_enabled = 1; 1119 ql_log(ql_log_info, vha, 0xd302, 1120 "%s: FC-NVMe is Enabled (0x%x)\n", 1121 __func__, ha->fw_attributes_h); 1122 } 1123 } 1124 1125 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1126 ha->serdes_version[0] = mcp->mb[7] & 0xff; 1127 ha->serdes_version[1] = mcp->mb[8] >> 8; 1128 ha->serdes_version[2] = mcp->mb[8] & 0xff; 1129 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1130 ha->mpi_version[1] = mcp->mb[11] >> 8; 1131 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1132 ha->pep_version[0] = mcp->mb[13] & 0xff; 1133 ha->pep_version[1] = mcp->mb[14] >> 8; 1134 ha->pep_version[2] = mcp->mb[14] & 0xff; 1135 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; 1136 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; 1137 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; 1138 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; 1139 if (IS_QLA28XX(ha)) { 1140 if (mcp->mb[16] & BIT_10) { 1141 ql_log(ql_log_info, vha, 0xffff, 1142 "FW support secure flash updates\n"); 1143 ha->flags.secure_fw = 1; 1144 } 1145 } 1146 } 1147 1148 failed: 1149 if (rval != QLA_SUCCESS) { 1150 /*EMPTY*/ 1151 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); 1152 } else { 1153 /*EMPTY*/ 1154 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, 1155 "Done %s.\n", __func__); 1156 } 1157 return rval; 1158 } 1159 1160 /* 1161 * qla2x00_get_fw_options 1162 * Set firmware options. 1163 * 1164 * Input: 1165 * ha = adapter block pointer. 1166 * fwopt = pointer for firmware options. 1167 * 1168 * Returns: 1169 * qla2x00 local function return status code. 1170 * 1171 * Context: 1172 * Kernel context. 1173 */ 1174 int 1175 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1176 { 1177 int rval; 1178 mbx_cmd_t mc; 1179 mbx_cmd_t *mcp = &mc; 1180 1181 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, 1182 "Entered %s.\n", __func__); 1183 1184 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 1185 mcp->out_mb = MBX_0; 1186 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1187 mcp->tov = MBX_TOV_SECONDS; 1188 mcp->flags = 0; 1189 rval = qla2x00_mailbox_command(vha, mcp); 1190 1191 if (rval != QLA_SUCCESS) { 1192 /*EMPTY*/ 1193 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); 1194 } else { 1195 fwopts[0] = mcp->mb[0]; 1196 fwopts[1] = mcp->mb[1]; 1197 fwopts[2] = mcp->mb[2]; 1198 fwopts[3] = mcp->mb[3]; 1199 1200 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, 1201 "Done %s.\n", __func__); 1202 } 1203 1204 return rval; 1205 } 1206 1207 1208 /* 1209 * qla2x00_set_fw_options 1210 * Set firmware options. 1211 * 1212 * Input: 1213 * ha = adapter block pointer. 1214 * fwopt = pointer for firmware options. 1215 * 1216 * Returns: 1217 * qla2x00 local function return status code. 1218 * 1219 * Context: 1220 * Kernel context. 1221 */ 1222 int 1223 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1224 { 1225 int rval; 1226 mbx_cmd_t mc; 1227 mbx_cmd_t *mcp = &mc; 1228 1229 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, 1230 "Entered %s.\n", __func__); 1231 1232 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 1233 mcp->mb[1] = fwopts[1]; 1234 mcp->mb[2] = fwopts[2]; 1235 mcp->mb[3] = fwopts[3]; 1236 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1237 mcp->in_mb = MBX_0; 1238 if (IS_FWI2_CAPABLE(vha->hw)) { 1239 mcp->in_mb |= MBX_1; 1240 mcp->mb[10] = fwopts[10]; 1241 mcp->out_mb |= MBX_10; 1242 } else { 1243 mcp->mb[10] = fwopts[10]; 1244 mcp->mb[11] = fwopts[11]; 1245 mcp->mb[12] = 0; /* Undocumented, but used */ 1246 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 1247 } 1248 mcp->tov = MBX_TOV_SECONDS; 1249 mcp->flags = 0; 1250 rval = qla2x00_mailbox_command(vha, mcp); 1251 1252 fwopts[0] = mcp->mb[0]; 1253 1254 if (rval != QLA_SUCCESS) { 1255 /*EMPTY*/ 1256 ql_dbg(ql_dbg_mbx, vha, 0x1030, 1257 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); 1258 } else { 1259 /*EMPTY*/ 1260 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, 1261 "Done %s.\n", __func__); 1262 } 1263 1264 return rval; 1265 } 1266 1267 /* 1268 * qla2x00_mbx_reg_test 1269 * Mailbox register wrap test. 1270 * 1271 * Input: 1272 * ha = adapter block pointer. 1273 * TARGET_QUEUE_LOCK must be released. 1274 * ADAPTER_STATE_LOCK must be released. 1275 * 1276 * Returns: 1277 * qla2x00 local function return status code. 1278 * 1279 * Context: 1280 * Kernel context. 1281 */ 1282 int 1283 qla2x00_mbx_reg_test(scsi_qla_host_t *vha) 1284 { 1285 int rval; 1286 mbx_cmd_t mc; 1287 mbx_cmd_t *mcp = &mc; 1288 1289 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, 1290 "Entered %s.\n", __func__); 1291 1292 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 1293 mcp->mb[1] = 0xAAAA; 1294 mcp->mb[2] = 0x5555; 1295 mcp->mb[3] = 0xAA55; 1296 mcp->mb[4] = 0x55AA; 1297 mcp->mb[5] = 0xA5A5; 1298 mcp->mb[6] = 0x5A5A; 1299 mcp->mb[7] = 0x2525; 1300 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1301 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1302 mcp->tov = MBX_TOV_SECONDS; 1303 mcp->flags = 0; 1304 rval = qla2x00_mailbox_command(vha, mcp); 1305 1306 if (rval == QLA_SUCCESS) { 1307 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || 1308 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) 1309 rval = QLA_FUNCTION_FAILED; 1310 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || 1311 mcp->mb[7] != 0x2525) 1312 rval = QLA_FUNCTION_FAILED; 1313 } 1314 1315 if (rval != QLA_SUCCESS) { 1316 /*EMPTY*/ 1317 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); 1318 } else { 1319 /*EMPTY*/ 1320 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, 1321 "Done %s.\n", __func__); 1322 } 1323 1324 return rval; 1325 } 1326 1327 /* 1328 * qla2x00_verify_checksum 1329 * Verify firmware checksum. 1330 * 1331 * Input: 1332 * ha = adapter block pointer. 1333 * TARGET_QUEUE_LOCK must be released. 1334 * ADAPTER_STATE_LOCK must be released. 1335 * 1336 * Returns: 1337 * qla2x00 local function return status code. 1338 * 1339 * Context: 1340 * Kernel context. 1341 */ 1342 int 1343 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) 1344 { 1345 int rval; 1346 mbx_cmd_t mc; 1347 mbx_cmd_t *mcp = &mc; 1348 1349 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, 1350 "Entered %s.\n", __func__); 1351 1352 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 1353 mcp->out_mb = MBX_0; 1354 mcp->in_mb = MBX_0; 1355 if (IS_FWI2_CAPABLE(vha->hw)) { 1356 mcp->mb[1] = MSW(risc_addr); 1357 mcp->mb[2] = LSW(risc_addr); 1358 mcp->out_mb |= MBX_2|MBX_1; 1359 mcp->in_mb |= MBX_2|MBX_1; 1360 } else { 1361 mcp->mb[1] = LSW(risc_addr); 1362 mcp->out_mb |= MBX_1; 1363 mcp->in_mb |= MBX_1; 1364 } 1365 1366 mcp->tov = MBX_TOV_SECONDS; 1367 mcp->flags = 0; 1368 rval = qla2x00_mailbox_command(vha, mcp); 1369 1370 if (rval != QLA_SUCCESS) { 1371 ql_dbg(ql_dbg_mbx, vha, 0x1036, 1372 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? 1373 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); 1374 } else { 1375 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, 1376 "Done %s.\n", __func__); 1377 } 1378 1379 return rval; 1380 } 1381 1382 /* 1383 * qla2x00_issue_iocb 1384 * Issue IOCB using mailbox command 1385 * 1386 * Input: 1387 * ha = adapter state pointer. 1388 * buffer = buffer pointer. 1389 * phys_addr = physical address of buffer. 1390 * size = size of buffer. 1391 * TARGET_QUEUE_LOCK must be released. 1392 * ADAPTER_STATE_LOCK must be released. 1393 * 1394 * Returns: 1395 * qla2x00 local function return status code. 1396 * 1397 * Context: 1398 * Kernel context. 1399 */ 1400 int 1401 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, 1402 dma_addr_t phys_addr, size_t size, uint32_t tov) 1403 { 1404 int rval; 1405 mbx_cmd_t mc; 1406 mbx_cmd_t *mcp = &mc; 1407 1408 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, 1409 "Entered %s.\n", __func__); 1410 1411 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 1412 mcp->mb[1] = 0; 1413 mcp->mb[2] = MSW(phys_addr); 1414 mcp->mb[3] = LSW(phys_addr); 1415 mcp->mb[6] = MSW(MSD(phys_addr)); 1416 mcp->mb[7] = LSW(MSD(phys_addr)); 1417 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1418 mcp->in_mb = MBX_2|MBX_0; 1419 mcp->tov = tov; 1420 mcp->flags = 0; 1421 rval = qla2x00_mailbox_command(vha, mcp); 1422 1423 if (rval != QLA_SUCCESS) { 1424 /*EMPTY*/ 1425 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); 1426 } else { 1427 sts_entry_t *sts_entry = (sts_entry_t *) buffer; 1428 1429 /* Mask reserved bits. */ 1430 sts_entry->entry_status &= 1431 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 1432 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, 1433 "Done %s.\n", __func__); 1434 } 1435 1436 return rval; 1437 } 1438 1439 int 1440 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, 1441 size_t size) 1442 { 1443 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, 1444 MBX_TOV_SECONDS); 1445 } 1446 1447 /* 1448 * qla2x00_abort_command 1449 * Abort command aborts a specified IOCB. 1450 * 1451 * Input: 1452 * ha = adapter block pointer. 1453 * sp = SB structure pointer. 1454 * 1455 * Returns: 1456 * qla2x00 local function return status code. 1457 * 1458 * Context: 1459 * Kernel context. 1460 */ 1461 int 1462 qla2x00_abort_command(srb_t *sp) 1463 { 1464 unsigned long flags = 0; 1465 int rval; 1466 uint32_t handle = 0; 1467 mbx_cmd_t mc; 1468 mbx_cmd_t *mcp = &mc; 1469 fc_port_t *fcport = sp->fcport; 1470 scsi_qla_host_t *vha = fcport->vha; 1471 struct qla_hw_data *ha = vha->hw; 1472 struct req_que *req; 1473 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1474 1475 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, 1476 "Entered %s.\n", __func__); 1477 1478 if (vha->flags.qpairs_available && sp->qpair) 1479 req = sp->qpair->req; 1480 else 1481 req = vha->req; 1482 1483 spin_lock_irqsave(&ha->hardware_lock, flags); 1484 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1485 if (req->outstanding_cmds[handle] == sp) 1486 break; 1487 } 1488 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1489 1490 if (handle == req->num_outstanding_cmds) { 1491 /* command not found */ 1492 return QLA_FUNCTION_FAILED; 1493 } 1494 1495 mcp->mb[0] = MBC_ABORT_COMMAND; 1496 if (HAS_EXTENDED_IDS(ha)) 1497 mcp->mb[1] = fcport->loop_id; 1498 else 1499 mcp->mb[1] = fcport->loop_id << 8; 1500 mcp->mb[2] = (uint16_t)handle; 1501 mcp->mb[3] = (uint16_t)(handle >> 16); 1502 mcp->mb[6] = (uint16_t)cmd->device->lun; 1503 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1504 mcp->in_mb = MBX_0; 1505 mcp->tov = MBX_TOV_SECONDS; 1506 mcp->flags = 0; 1507 rval = qla2x00_mailbox_command(vha, mcp); 1508 1509 if (rval != QLA_SUCCESS) { 1510 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); 1511 } else { 1512 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, 1513 "Done %s.\n", __func__); 1514 } 1515 1516 return rval; 1517 } 1518 1519 int 1520 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) 1521 { 1522 int rval, rval2; 1523 mbx_cmd_t mc; 1524 mbx_cmd_t *mcp = &mc; 1525 scsi_qla_host_t *vha; 1526 1527 vha = fcport->vha; 1528 1529 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, 1530 "Entered %s.\n", __func__); 1531 1532 mcp->mb[0] = MBC_ABORT_TARGET; 1533 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 1534 if (HAS_EXTENDED_IDS(vha->hw)) { 1535 mcp->mb[1] = fcport->loop_id; 1536 mcp->mb[10] = 0; 1537 mcp->out_mb |= MBX_10; 1538 } else { 1539 mcp->mb[1] = fcport->loop_id << 8; 1540 } 1541 mcp->mb[2] = vha->hw->loop_reset_delay; 1542 mcp->mb[9] = vha->vp_idx; 1543 1544 mcp->in_mb = MBX_0; 1545 mcp->tov = MBX_TOV_SECONDS; 1546 mcp->flags = 0; 1547 rval = qla2x00_mailbox_command(vha, mcp); 1548 if (rval != QLA_SUCCESS) { 1549 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, 1550 "Failed=%x.\n", rval); 1551 } 1552 1553 /* Issue marker IOCB. */ 1554 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0, 1555 MK_SYNC_ID); 1556 if (rval2 != QLA_SUCCESS) { 1557 ql_dbg(ql_dbg_mbx, vha, 0x1040, 1558 "Failed to issue marker IOCB (%x).\n", rval2); 1559 } else { 1560 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, 1561 "Done %s.\n", __func__); 1562 } 1563 1564 return rval; 1565 } 1566 1567 int 1568 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 1569 { 1570 int rval, rval2; 1571 mbx_cmd_t mc; 1572 mbx_cmd_t *mcp = &mc; 1573 scsi_qla_host_t *vha; 1574 1575 vha = fcport->vha; 1576 1577 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, 1578 "Entered %s.\n", __func__); 1579 1580 mcp->mb[0] = MBC_LUN_RESET; 1581 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 1582 if (HAS_EXTENDED_IDS(vha->hw)) 1583 mcp->mb[1] = fcport->loop_id; 1584 else 1585 mcp->mb[1] = fcport->loop_id << 8; 1586 mcp->mb[2] = (u32)l; 1587 mcp->mb[3] = 0; 1588 mcp->mb[9] = vha->vp_idx; 1589 1590 mcp->in_mb = MBX_0; 1591 mcp->tov = MBX_TOV_SECONDS; 1592 mcp->flags = 0; 1593 rval = qla2x00_mailbox_command(vha, mcp); 1594 if (rval != QLA_SUCCESS) { 1595 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); 1596 } 1597 1598 /* Issue marker IOCB. */ 1599 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l, 1600 MK_SYNC_ID_LUN); 1601 if (rval2 != QLA_SUCCESS) { 1602 ql_dbg(ql_dbg_mbx, vha, 0x1044, 1603 "Failed to issue marker IOCB (%x).\n", rval2); 1604 } else { 1605 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, 1606 "Done %s.\n", __func__); 1607 } 1608 1609 return rval; 1610 } 1611 1612 /* 1613 * qla2x00_get_adapter_id 1614 * Get adapter ID and topology. 1615 * 1616 * Input: 1617 * ha = adapter block pointer. 1618 * id = pointer for loop ID. 1619 * al_pa = pointer for AL_PA. 1620 * area = pointer for area. 1621 * domain = pointer for domain. 1622 * top = pointer for topology. 1623 * TARGET_QUEUE_LOCK must be released. 1624 * ADAPTER_STATE_LOCK must be released. 1625 * 1626 * Returns: 1627 * qla2x00 local function return status code. 1628 * 1629 * Context: 1630 * Kernel context. 1631 */ 1632 int 1633 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, 1634 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) 1635 { 1636 int rval; 1637 mbx_cmd_t mc; 1638 mbx_cmd_t *mcp = &mc; 1639 1640 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, 1641 "Entered %s.\n", __func__); 1642 1643 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1644 mcp->mb[9] = vha->vp_idx; 1645 mcp->out_mb = MBX_9|MBX_0; 1646 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1647 if (IS_CNA_CAPABLE(vha->hw)) 1648 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; 1649 if (IS_FWI2_CAPABLE(vha->hw)) 1650 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; 1651 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) 1652 mcp->in_mb |= MBX_15; 1653 mcp->tov = MBX_TOV_SECONDS; 1654 mcp->flags = 0; 1655 rval = qla2x00_mailbox_command(vha, mcp); 1656 if (mcp->mb[0] == MBS_COMMAND_ERROR) 1657 rval = QLA_COMMAND_ERROR; 1658 else if (mcp->mb[0] == MBS_INVALID_COMMAND) 1659 rval = QLA_INVALID_COMMAND; 1660 1661 /* Return data. */ 1662 *id = mcp->mb[1]; 1663 *al_pa = LSB(mcp->mb[2]); 1664 *area = MSB(mcp->mb[2]); 1665 *domain = LSB(mcp->mb[3]); 1666 *top = mcp->mb[6]; 1667 *sw_cap = mcp->mb[7]; 1668 1669 if (rval != QLA_SUCCESS) { 1670 /*EMPTY*/ 1671 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); 1672 } else { 1673 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, 1674 "Done %s.\n", __func__); 1675 1676 if (IS_CNA_CAPABLE(vha->hw)) { 1677 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1678 vha->fcoe_fcf_idx = mcp->mb[10]; 1679 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; 1680 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; 1681 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; 1682 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; 1683 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; 1684 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; 1685 } 1686 /* If FA-WWN supported */ 1687 if (IS_FAWWN_CAPABLE(vha->hw)) { 1688 if (mcp->mb[7] & BIT_14) { 1689 vha->port_name[0] = MSB(mcp->mb[16]); 1690 vha->port_name[1] = LSB(mcp->mb[16]); 1691 vha->port_name[2] = MSB(mcp->mb[17]); 1692 vha->port_name[3] = LSB(mcp->mb[17]); 1693 vha->port_name[4] = MSB(mcp->mb[18]); 1694 vha->port_name[5] = LSB(mcp->mb[18]); 1695 vha->port_name[6] = MSB(mcp->mb[19]); 1696 vha->port_name[7] = LSB(mcp->mb[19]); 1697 fc_host_port_name(vha->host) = 1698 wwn_to_u64(vha->port_name); 1699 ql_dbg(ql_dbg_mbx, vha, 0x10ca, 1700 "FA-WWN acquired %016llx\n", 1701 wwn_to_u64(vha->port_name)); 1702 } 1703 } 1704 1705 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) 1706 vha->bbcr = mcp->mb[15]; 1707 } 1708 1709 return rval; 1710 } 1711 1712 /* 1713 * qla2x00_get_retry_cnt 1714 * Get current firmware login retry count and delay. 1715 * 1716 * Input: 1717 * ha = adapter block pointer. 1718 * retry_cnt = pointer to login retry count. 1719 * tov = pointer to login timeout value. 1720 * 1721 * Returns: 1722 * qla2x00 local function return status code. 1723 * 1724 * Context: 1725 * Kernel context. 1726 */ 1727 int 1728 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, 1729 uint16_t *r_a_tov) 1730 { 1731 int rval; 1732 uint16_t ratov; 1733 mbx_cmd_t mc; 1734 mbx_cmd_t *mcp = &mc; 1735 1736 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, 1737 "Entered %s.\n", __func__); 1738 1739 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1740 mcp->out_mb = MBX_0; 1741 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1742 mcp->tov = MBX_TOV_SECONDS; 1743 mcp->flags = 0; 1744 rval = qla2x00_mailbox_command(vha, mcp); 1745 1746 if (rval != QLA_SUCCESS) { 1747 /*EMPTY*/ 1748 ql_dbg(ql_dbg_mbx, vha, 0x104a, 1749 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 1750 } else { 1751 /* Convert returned data and check our values. */ 1752 *r_a_tov = mcp->mb[3] / 2; 1753 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ 1754 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { 1755 /* Update to the larger values */ 1756 *retry_cnt = (uint8_t)mcp->mb[1]; 1757 *tov = ratov; 1758 } 1759 1760 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, 1761 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); 1762 } 1763 1764 return rval; 1765 } 1766 1767 /* 1768 * qla2x00_init_firmware 1769 * Initialize adapter firmware. 1770 * 1771 * Input: 1772 * ha = adapter block pointer. 1773 * dptr = Initialization control block pointer. 1774 * size = size of initialization control block. 1775 * TARGET_QUEUE_LOCK must be released. 1776 * ADAPTER_STATE_LOCK must be released. 1777 * 1778 * Returns: 1779 * qla2x00 local function return status code. 1780 * 1781 * Context: 1782 * Kernel context. 1783 */ 1784 int 1785 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 1786 { 1787 int rval; 1788 mbx_cmd_t mc; 1789 mbx_cmd_t *mcp = &mc; 1790 struct qla_hw_data *ha = vha->hw; 1791 1792 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, 1793 "Entered %s.\n", __func__); 1794 1795 if (IS_P3P_TYPE(ha) && ql2xdbwr) 1796 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, 1797 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); 1798 1799 if (ha->flags.npiv_supported) 1800 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1801 else 1802 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 1803 1804 mcp->mb[1] = 0; 1805 mcp->mb[2] = MSW(ha->init_cb_dma); 1806 mcp->mb[3] = LSW(ha->init_cb_dma); 1807 mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); 1808 mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); 1809 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1810 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1811 mcp->mb[1] = BIT_0; 1812 mcp->mb[10] = MSW(ha->ex_init_cb_dma); 1813 mcp->mb[11] = LSW(ha->ex_init_cb_dma); 1814 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); 1815 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); 1816 mcp->mb[14] = sizeof(*ha->ex_init_cb); 1817 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; 1818 } 1819 /* 1 and 2 should normally be captured. */ 1820 mcp->in_mb = MBX_2|MBX_1|MBX_0; 1821 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1822 /* mb3 is additional info about the installed SFP. */ 1823 mcp->in_mb |= MBX_3; 1824 mcp->buf_size = size; 1825 mcp->flags = MBX_DMA_OUT; 1826 mcp->tov = MBX_TOV_SECONDS; 1827 rval = qla2x00_mailbox_command(vha, mcp); 1828 1829 if (rval != QLA_SUCCESS) { 1830 /*EMPTY*/ 1831 ql_dbg(ql_dbg_mbx, vha, 0x104d, 1832 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n", 1833 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); 1834 if (ha->init_cb) { 1835 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n"); 1836 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1837 0x0104d, ha->init_cb, sizeof(*ha->init_cb)); 1838 } 1839 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1840 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n"); 1841 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1842 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb)); 1843 } 1844 } else { 1845 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1846 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 1847 ql_dbg(ql_dbg_mbx, vha, 0x119d, 1848 "Invalid SFP/Validation Failed\n"); 1849 } 1850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, 1851 "Done %s.\n", __func__); 1852 } 1853 1854 return rval; 1855 } 1856 1857 1858 /* 1859 * qla2x00_get_port_database 1860 * Issue normal/enhanced get port database mailbox command 1861 * and copy device name as necessary. 1862 * 1863 * Input: 1864 * ha = adapter state pointer. 1865 * dev = structure pointer. 1866 * opt = enhanced cmd option byte. 1867 * 1868 * Returns: 1869 * qla2x00 local function return status code. 1870 * 1871 * Context: 1872 * Kernel context. 1873 */ 1874 int 1875 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) 1876 { 1877 int rval; 1878 mbx_cmd_t mc; 1879 mbx_cmd_t *mcp = &mc; 1880 port_database_t *pd; 1881 struct port_database_24xx *pd24; 1882 dma_addr_t pd_dma; 1883 struct qla_hw_data *ha = vha->hw; 1884 1885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, 1886 "Entered %s.\n", __func__); 1887 1888 pd24 = NULL; 1889 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1890 if (pd == NULL) { 1891 ql_log(ql_log_warn, vha, 0x1050, 1892 "Failed to allocate port database structure.\n"); 1893 fcport->query = 0; 1894 return QLA_MEMORY_ALLOC_FAILED; 1895 } 1896 1897 mcp->mb[0] = MBC_GET_PORT_DATABASE; 1898 if (opt != 0 && !IS_FWI2_CAPABLE(ha)) 1899 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; 1900 mcp->mb[2] = MSW(pd_dma); 1901 mcp->mb[3] = LSW(pd_dma); 1902 mcp->mb[6] = MSW(MSD(pd_dma)); 1903 mcp->mb[7] = LSW(MSD(pd_dma)); 1904 mcp->mb[9] = vha->vp_idx; 1905 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1906 mcp->in_mb = MBX_0; 1907 if (IS_FWI2_CAPABLE(ha)) { 1908 mcp->mb[1] = fcport->loop_id; 1909 mcp->mb[10] = opt; 1910 mcp->out_mb |= MBX_10|MBX_1; 1911 mcp->in_mb |= MBX_1; 1912 } else if (HAS_EXTENDED_IDS(ha)) { 1913 mcp->mb[1] = fcport->loop_id; 1914 mcp->mb[10] = opt; 1915 mcp->out_mb |= MBX_10|MBX_1; 1916 } else { 1917 mcp->mb[1] = fcport->loop_id << 8 | opt; 1918 mcp->out_mb |= MBX_1; 1919 } 1920 mcp->buf_size = IS_FWI2_CAPABLE(ha) ? 1921 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; 1922 mcp->flags = MBX_DMA_IN; 1923 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1924 rval = qla2x00_mailbox_command(vha, mcp); 1925 if (rval != QLA_SUCCESS) 1926 goto gpd_error_out; 1927 1928 if (IS_FWI2_CAPABLE(ha)) { 1929 uint64_t zero = 0; 1930 u8 current_login_state, last_login_state; 1931 1932 pd24 = (struct port_database_24xx *) pd; 1933 1934 /* Check for logged in state. */ 1935 if (NVME_TARGET(ha, fcport)) { 1936 current_login_state = pd24->current_login_state >> 4; 1937 last_login_state = pd24->last_login_state >> 4; 1938 } else { 1939 current_login_state = pd24->current_login_state & 0xf; 1940 last_login_state = pd24->last_login_state & 0xf; 1941 } 1942 fcport->current_login_state = pd24->current_login_state; 1943 fcport->last_login_state = pd24->last_login_state; 1944 1945 /* Check for logged in state. */ 1946 if (current_login_state != PDS_PRLI_COMPLETE && 1947 last_login_state != PDS_PRLI_COMPLETE) { 1948 ql_dbg(ql_dbg_mbx, vha, 0x119a, 1949 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 1950 current_login_state, last_login_state, 1951 fcport->loop_id); 1952 rval = QLA_FUNCTION_FAILED; 1953 1954 if (!fcport->query) 1955 goto gpd_error_out; 1956 } 1957 1958 if (fcport->loop_id == FC_NO_LOOP_ID || 1959 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 1960 memcmp(fcport->port_name, pd24->port_name, 8))) { 1961 /* We lost the device mid way. */ 1962 rval = QLA_NOT_LOGGED_IN; 1963 goto gpd_error_out; 1964 } 1965 1966 /* Names are little-endian. */ 1967 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); 1968 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); 1969 1970 /* Get port_id of device. */ 1971 fcport->d_id.b.domain = pd24->port_id[0]; 1972 fcport->d_id.b.area = pd24->port_id[1]; 1973 fcport->d_id.b.al_pa = pd24->port_id[2]; 1974 fcport->d_id.b.rsvd_1 = 0; 1975 1976 /* If not target must be initiator or unknown type. */ 1977 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) 1978 fcport->port_type = FCT_INITIATOR; 1979 else 1980 fcport->port_type = FCT_TARGET; 1981 1982 /* Passback COS information. */ 1983 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? 1984 FC_COS_CLASS2 : FC_COS_CLASS3; 1985 1986 if (pd24->prli_svc_param_word_3[0] & BIT_7) 1987 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1988 } else { 1989 uint64_t zero = 0; 1990 1991 /* Check for logged in state. */ 1992 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 1993 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 1994 ql_dbg(ql_dbg_mbx, vha, 0x100a, 1995 "Unable to verify login-state (%x/%x) - " 1996 "portid=%02x%02x%02x.\n", pd->master_state, 1997 pd->slave_state, fcport->d_id.b.domain, 1998 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1999 rval = QLA_FUNCTION_FAILED; 2000 goto gpd_error_out; 2001 } 2002 2003 if (fcport->loop_id == FC_NO_LOOP_ID || 2004 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 2005 memcmp(fcport->port_name, pd->port_name, 8))) { 2006 /* We lost the device mid way. */ 2007 rval = QLA_NOT_LOGGED_IN; 2008 goto gpd_error_out; 2009 } 2010 2011 /* Names are little-endian. */ 2012 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 2013 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 2014 2015 /* Get port_id of device. */ 2016 fcport->d_id.b.domain = pd->port_id[0]; 2017 fcport->d_id.b.area = pd->port_id[3]; 2018 fcport->d_id.b.al_pa = pd->port_id[2]; 2019 fcport->d_id.b.rsvd_1 = 0; 2020 2021 /* If not target must be initiator or unknown type. */ 2022 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 2023 fcport->port_type = FCT_INITIATOR; 2024 else 2025 fcport->port_type = FCT_TARGET; 2026 2027 /* Passback COS information. */ 2028 fcport->supported_classes = (pd->options & BIT_4) ? 2029 FC_COS_CLASS2 : FC_COS_CLASS3; 2030 } 2031 2032 gpd_error_out: 2033 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 2034 fcport->query = 0; 2035 2036 if (rval != QLA_SUCCESS) { 2037 ql_dbg(ql_dbg_mbx, vha, 0x1052, 2038 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, 2039 mcp->mb[0], mcp->mb[1]); 2040 } else { 2041 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, 2042 "Done %s.\n", __func__); 2043 } 2044 2045 return rval; 2046 } 2047 2048 /* 2049 * qla2x00_get_firmware_state 2050 * Get adapter firmware state. 2051 * 2052 * Input: 2053 * ha = adapter block pointer. 2054 * dptr = pointer for firmware state. 2055 * TARGET_QUEUE_LOCK must be released. 2056 * ADAPTER_STATE_LOCK must be released. 2057 * 2058 * Returns: 2059 * qla2x00 local function return status code. 2060 * 2061 * Context: 2062 * Kernel context. 2063 */ 2064 int 2065 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) 2066 { 2067 int rval; 2068 mbx_cmd_t mc; 2069 mbx_cmd_t *mcp = &mc; 2070 struct qla_hw_data *ha = vha->hw; 2071 2072 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, 2073 "Entered %s.\n", __func__); 2074 2075 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 2076 mcp->out_mb = MBX_0; 2077 if (IS_FWI2_CAPABLE(vha->hw)) 2078 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2079 else 2080 mcp->in_mb = MBX_1|MBX_0; 2081 mcp->tov = MBX_TOV_SECONDS; 2082 mcp->flags = 0; 2083 rval = qla2x00_mailbox_command(vha, mcp); 2084 2085 /* Return firmware states. */ 2086 states[0] = mcp->mb[1]; 2087 if (IS_FWI2_CAPABLE(vha->hw)) { 2088 states[1] = mcp->mb[2]; 2089 states[2] = mcp->mb[3]; /* SFP info */ 2090 states[3] = mcp->mb[4]; 2091 states[4] = mcp->mb[5]; 2092 states[5] = mcp->mb[6]; /* DPORT status */ 2093 } 2094 2095 if (rval != QLA_SUCCESS) { 2096 /*EMPTY*/ 2097 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); 2098 } else { 2099 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 2100 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 2101 ql_dbg(ql_dbg_mbx, vha, 0x119e, 2102 "Invalid SFP/Validation Failed\n"); 2103 } 2104 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, 2105 "Done %s.\n", __func__); 2106 } 2107 2108 return rval; 2109 } 2110 2111 /* 2112 * qla2x00_get_port_name 2113 * Issue get port name mailbox command. 2114 * Returned name is in big endian format. 2115 * 2116 * Input: 2117 * ha = adapter block pointer. 2118 * loop_id = loop ID of device. 2119 * name = pointer for name. 2120 * TARGET_QUEUE_LOCK must be released. 2121 * ADAPTER_STATE_LOCK must be released. 2122 * 2123 * Returns: 2124 * qla2x00 local function return status code. 2125 * 2126 * Context: 2127 * Kernel context. 2128 */ 2129 int 2130 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, 2131 uint8_t opt) 2132 { 2133 int rval; 2134 mbx_cmd_t mc; 2135 mbx_cmd_t *mcp = &mc; 2136 2137 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, 2138 "Entered %s.\n", __func__); 2139 2140 mcp->mb[0] = MBC_GET_PORT_NAME; 2141 mcp->mb[9] = vha->vp_idx; 2142 mcp->out_mb = MBX_9|MBX_1|MBX_0; 2143 if (HAS_EXTENDED_IDS(vha->hw)) { 2144 mcp->mb[1] = loop_id; 2145 mcp->mb[10] = opt; 2146 mcp->out_mb |= MBX_10; 2147 } else { 2148 mcp->mb[1] = loop_id << 8 | opt; 2149 } 2150 2151 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2152 mcp->tov = MBX_TOV_SECONDS; 2153 mcp->flags = 0; 2154 rval = qla2x00_mailbox_command(vha, mcp); 2155 2156 if (rval != QLA_SUCCESS) { 2157 /*EMPTY*/ 2158 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); 2159 } else { 2160 if (name != NULL) { 2161 /* This function returns name in big endian. */ 2162 name[0] = MSB(mcp->mb[2]); 2163 name[1] = LSB(mcp->mb[2]); 2164 name[2] = MSB(mcp->mb[3]); 2165 name[3] = LSB(mcp->mb[3]); 2166 name[4] = MSB(mcp->mb[6]); 2167 name[5] = LSB(mcp->mb[6]); 2168 name[6] = MSB(mcp->mb[7]); 2169 name[7] = LSB(mcp->mb[7]); 2170 } 2171 2172 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, 2173 "Done %s.\n", __func__); 2174 } 2175 2176 return rval; 2177 } 2178 2179 /* 2180 * qla24xx_link_initialization 2181 * Issue link initialization mailbox command. 2182 * 2183 * Input: 2184 * ha = adapter block pointer. 2185 * TARGET_QUEUE_LOCK must be released. 2186 * ADAPTER_STATE_LOCK must be released. 2187 * 2188 * Returns: 2189 * qla2x00 local function return status code. 2190 * 2191 * Context: 2192 * Kernel context. 2193 */ 2194 int 2195 qla24xx_link_initialize(scsi_qla_host_t *vha) 2196 { 2197 int rval; 2198 mbx_cmd_t mc; 2199 mbx_cmd_t *mcp = &mc; 2200 2201 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, 2202 "Entered %s.\n", __func__); 2203 2204 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) 2205 return QLA_FUNCTION_FAILED; 2206 2207 mcp->mb[0] = MBC_LINK_INITIALIZATION; 2208 mcp->mb[1] = BIT_4; 2209 if (vha->hw->operating_mode == LOOP) 2210 mcp->mb[1] |= BIT_6; 2211 else 2212 mcp->mb[1] |= BIT_5; 2213 mcp->mb[2] = 0; 2214 mcp->mb[3] = 0; 2215 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2216 mcp->in_mb = MBX_0; 2217 mcp->tov = MBX_TOV_SECONDS; 2218 mcp->flags = 0; 2219 rval = qla2x00_mailbox_command(vha, mcp); 2220 2221 if (rval != QLA_SUCCESS) { 2222 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); 2223 } else { 2224 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, 2225 "Done %s.\n", __func__); 2226 } 2227 2228 return rval; 2229 } 2230 2231 /* 2232 * qla2x00_lip_reset 2233 * Issue LIP reset mailbox command. 2234 * 2235 * Input: 2236 * ha = adapter block pointer. 2237 * TARGET_QUEUE_LOCK must be released. 2238 * ADAPTER_STATE_LOCK must be released. 2239 * 2240 * Returns: 2241 * qla2x00 local function return status code. 2242 * 2243 * Context: 2244 * Kernel context. 2245 */ 2246 int 2247 qla2x00_lip_reset(scsi_qla_host_t *vha) 2248 { 2249 int rval; 2250 mbx_cmd_t mc; 2251 mbx_cmd_t *mcp = &mc; 2252 2253 ql_dbg(ql_dbg_disc, vha, 0x105a, 2254 "Entered %s.\n", __func__); 2255 2256 if (IS_CNA_CAPABLE(vha->hw)) { 2257 /* Logout across all FCFs. */ 2258 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2259 mcp->mb[1] = BIT_1; 2260 mcp->mb[2] = 0; 2261 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2262 } else if (IS_FWI2_CAPABLE(vha->hw)) { 2263 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2264 mcp->mb[1] = BIT_4; 2265 mcp->mb[2] = 0; 2266 mcp->mb[3] = vha->hw->loop_reset_delay; 2267 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2268 } else { 2269 mcp->mb[0] = MBC_LIP_RESET; 2270 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2271 if (HAS_EXTENDED_IDS(vha->hw)) { 2272 mcp->mb[1] = 0x00ff; 2273 mcp->mb[10] = 0; 2274 mcp->out_mb |= MBX_10; 2275 } else { 2276 mcp->mb[1] = 0xff00; 2277 } 2278 mcp->mb[2] = vha->hw->loop_reset_delay; 2279 mcp->mb[3] = 0; 2280 } 2281 mcp->in_mb = MBX_0; 2282 mcp->tov = MBX_TOV_SECONDS; 2283 mcp->flags = 0; 2284 rval = qla2x00_mailbox_command(vha, mcp); 2285 2286 if (rval != QLA_SUCCESS) { 2287 /*EMPTY*/ 2288 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); 2289 } else { 2290 /*EMPTY*/ 2291 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, 2292 "Done %s.\n", __func__); 2293 } 2294 2295 return rval; 2296 } 2297 2298 /* 2299 * qla2x00_send_sns 2300 * Send SNS command. 2301 * 2302 * Input: 2303 * ha = adapter block pointer. 2304 * sns = pointer for command. 2305 * cmd_size = command size. 2306 * buf_size = response/command size. 2307 * TARGET_QUEUE_LOCK must be released. 2308 * ADAPTER_STATE_LOCK must be released. 2309 * 2310 * Returns: 2311 * qla2x00 local function return status code. 2312 * 2313 * Context: 2314 * Kernel context. 2315 */ 2316 int 2317 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, 2318 uint16_t cmd_size, size_t buf_size) 2319 { 2320 int rval; 2321 mbx_cmd_t mc; 2322 mbx_cmd_t *mcp = &mc; 2323 2324 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, 2325 "Entered %s.\n", __func__); 2326 2327 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, 2328 "Retry cnt=%d ratov=%d total tov=%d.\n", 2329 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); 2330 2331 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 2332 mcp->mb[1] = cmd_size; 2333 mcp->mb[2] = MSW(sns_phys_address); 2334 mcp->mb[3] = LSW(sns_phys_address); 2335 mcp->mb[6] = MSW(MSD(sns_phys_address)); 2336 mcp->mb[7] = LSW(MSD(sns_phys_address)); 2337 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2338 mcp->in_mb = MBX_0|MBX_1; 2339 mcp->buf_size = buf_size; 2340 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; 2341 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); 2342 rval = qla2x00_mailbox_command(vha, mcp); 2343 2344 if (rval != QLA_SUCCESS) { 2345 /*EMPTY*/ 2346 ql_dbg(ql_dbg_mbx, vha, 0x105f, 2347 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2348 rval, mcp->mb[0], mcp->mb[1]); 2349 } else { 2350 /*EMPTY*/ 2351 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, 2352 "Done %s.\n", __func__); 2353 } 2354 2355 return rval; 2356 } 2357 2358 int 2359 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2360 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2361 { 2362 int rval; 2363 2364 struct logio_entry_24xx *lg; 2365 dma_addr_t lg_dma; 2366 uint32_t iop[2]; 2367 struct qla_hw_data *ha = vha->hw; 2368 struct req_que *req; 2369 2370 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, 2371 "Entered %s.\n", __func__); 2372 2373 if (vha->vp_idx && vha->qpair) 2374 req = vha->qpair->req; 2375 else 2376 req = ha->req_q_map[0]; 2377 2378 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2379 if (lg == NULL) { 2380 ql_log(ql_log_warn, vha, 0x1062, 2381 "Failed to allocate login IOCB.\n"); 2382 return QLA_MEMORY_ALLOC_FAILED; 2383 } 2384 2385 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2386 lg->entry_count = 1; 2387 lg->handle = MAKE_HANDLE(req->id, lg->handle); 2388 lg->nport_handle = cpu_to_le16(loop_id); 2389 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2390 if (opt & BIT_0) 2391 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2392 if (opt & BIT_1) 2393 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2394 lg->port_id[0] = al_pa; 2395 lg->port_id[1] = area; 2396 lg->port_id[2] = domain; 2397 lg->vp_index = vha->vp_idx; 2398 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2399 (ha->r_a_tov / 10 * 2) + 2); 2400 if (rval != QLA_SUCCESS) { 2401 ql_dbg(ql_dbg_mbx, vha, 0x1063, 2402 "Failed to issue login IOCB (%x).\n", rval); 2403 } else if (lg->entry_status != 0) { 2404 ql_dbg(ql_dbg_mbx, vha, 0x1064, 2405 "Failed to complete IOCB -- error status (%x).\n", 2406 lg->entry_status); 2407 rval = QLA_FUNCTION_FAILED; 2408 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2409 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2410 iop[1] = le32_to_cpu(lg->io_parameter[1]); 2411 2412 ql_dbg(ql_dbg_mbx, vha, 0x1065, 2413 "Failed to complete IOCB -- completion status (%x) " 2414 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2415 iop[0], iop[1]); 2416 2417 switch (iop[0]) { 2418 case LSC_SCODE_PORTID_USED: 2419 mb[0] = MBS_PORT_ID_USED; 2420 mb[1] = LSW(iop[1]); 2421 break; 2422 case LSC_SCODE_NPORT_USED: 2423 mb[0] = MBS_LOOP_ID_USED; 2424 break; 2425 case LSC_SCODE_NOLINK: 2426 case LSC_SCODE_NOIOCB: 2427 case LSC_SCODE_NOXCB: 2428 case LSC_SCODE_CMD_FAILED: 2429 case LSC_SCODE_NOFABRIC: 2430 case LSC_SCODE_FW_NOT_READY: 2431 case LSC_SCODE_NOT_LOGGED_IN: 2432 case LSC_SCODE_NOPCB: 2433 case LSC_SCODE_ELS_REJECT: 2434 case LSC_SCODE_CMD_PARAM_ERR: 2435 case LSC_SCODE_NONPORT: 2436 case LSC_SCODE_LOGGED_IN: 2437 case LSC_SCODE_NOFLOGI_ACC: 2438 default: 2439 mb[0] = MBS_COMMAND_ERROR; 2440 break; 2441 } 2442 } else { 2443 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, 2444 "Done %s.\n", __func__); 2445 2446 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2447 2448 mb[0] = MBS_COMMAND_COMPLETE; 2449 mb[1] = 0; 2450 if (iop[0] & BIT_4) { 2451 if (iop[0] & BIT_8) 2452 mb[1] |= BIT_1; 2453 } else 2454 mb[1] = BIT_0; 2455 2456 /* Passback COS information. */ 2457 mb[10] = 0; 2458 if (lg->io_parameter[7] || lg->io_parameter[8]) 2459 mb[10] |= BIT_0; /* Class 2. */ 2460 if (lg->io_parameter[9] || lg->io_parameter[10]) 2461 mb[10] |= BIT_1; /* Class 3. */ 2462 if (lg->io_parameter[0] & cpu_to_le32(BIT_7)) 2463 mb[10] |= BIT_7; /* Confirmed Completion 2464 * Allowed 2465 */ 2466 } 2467 2468 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2469 2470 return rval; 2471 } 2472 2473 /* 2474 * qla2x00_login_fabric 2475 * Issue login fabric port mailbox command. 2476 * 2477 * Input: 2478 * ha = adapter block pointer. 2479 * loop_id = device loop ID. 2480 * domain = device domain. 2481 * area = device area. 2482 * al_pa = device AL_PA. 2483 * status = pointer for return status. 2484 * opt = command options. 2485 * TARGET_QUEUE_LOCK must be released. 2486 * ADAPTER_STATE_LOCK must be released. 2487 * 2488 * Returns: 2489 * qla2x00 local function return status code. 2490 * 2491 * Context: 2492 * Kernel context. 2493 */ 2494 int 2495 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2496 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2497 { 2498 int rval; 2499 mbx_cmd_t mc; 2500 mbx_cmd_t *mcp = &mc; 2501 struct qla_hw_data *ha = vha->hw; 2502 2503 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, 2504 "Entered %s.\n", __func__); 2505 2506 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 2507 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2508 if (HAS_EXTENDED_IDS(ha)) { 2509 mcp->mb[1] = loop_id; 2510 mcp->mb[10] = opt; 2511 mcp->out_mb |= MBX_10; 2512 } else { 2513 mcp->mb[1] = (loop_id << 8) | opt; 2514 } 2515 mcp->mb[2] = domain; 2516 mcp->mb[3] = area << 8 | al_pa; 2517 2518 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; 2519 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2520 mcp->flags = 0; 2521 rval = qla2x00_mailbox_command(vha, mcp); 2522 2523 /* Return mailbox statuses. */ 2524 if (mb != NULL) { 2525 mb[0] = mcp->mb[0]; 2526 mb[1] = mcp->mb[1]; 2527 mb[2] = mcp->mb[2]; 2528 mb[6] = mcp->mb[6]; 2529 mb[7] = mcp->mb[7]; 2530 /* COS retrieved from Get-Port-Database mailbox command. */ 2531 mb[10] = 0; 2532 } 2533 2534 if (rval != QLA_SUCCESS) { 2535 /* RLU tmp code: need to change main mailbox_command function to 2536 * return ok even when the mailbox completion value is not 2537 * SUCCESS. The caller needs to be responsible to interpret 2538 * the return values of this mailbox command if we're not 2539 * to change too much of the existing code. 2540 */ 2541 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || 2542 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || 2543 mcp->mb[0] == 0x4006) 2544 rval = QLA_SUCCESS; 2545 2546 /*EMPTY*/ 2547 ql_dbg(ql_dbg_mbx, vha, 0x1068, 2548 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 2549 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 2550 } else { 2551 /*EMPTY*/ 2552 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, 2553 "Done %s.\n", __func__); 2554 } 2555 2556 return rval; 2557 } 2558 2559 /* 2560 * qla2x00_login_local_device 2561 * Issue login loop port mailbox command. 2562 * 2563 * Input: 2564 * ha = adapter block pointer. 2565 * loop_id = device loop ID. 2566 * opt = command options. 2567 * 2568 * Returns: 2569 * Return status code. 2570 * 2571 * Context: 2572 * Kernel context. 2573 * 2574 */ 2575 int 2576 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, 2577 uint16_t *mb_ret, uint8_t opt) 2578 { 2579 int rval; 2580 mbx_cmd_t mc; 2581 mbx_cmd_t *mcp = &mc; 2582 struct qla_hw_data *ha = vha->hw; 2583 2584 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, 2585 "Entered %s.\n", __func__); 2586 2587 if (IS_FWI2_CAPABLE(ha)) 2588 return qla24xx_login_fabric(vha, fcport->loop_id, 2589 fcport->d_id.b.domain, fcport->d_id.b.area, 2590 fcport->d_id.b.al_pa, mb_ret, opt); 2591 2592 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 2593 if (HAS_EXTENDED_IDS(ha)) 2594 mcp->mb[1] = fcport->loop_id; 2595 else 2596 mcp->mb[1] = fcport->loop_id << 8; 2597 mcp->mb[2] = opt; 2598 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2599 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; 2600 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2601 mcp->flags = 0; 2602 rval = qla2x00_mailbox_command(vha, mcp); 2603 2604 /* Return mailbox statuses. */ 2605 if (mb_ret != NULL) { 2606 mb_ret[0] = mcp->mb[0]; 2607 mb_ret[1] = mcp->mb[1]; 2608 mb_ret[6] = mcp->mb[6]; 2609 mb_ret[7] = mcp->mb[7]; 2610 } 2611 2612 if (rval != QLA_SUCCESS) { 2613 /* AV tmp code: need to change main mailbox_command function to 2614 * return ok even when the mailbox completion value is not 2615 * SUCCESS. The caller needs to be responsible to interpret 2616 * the return values of this mailbox command if we're not 2617 * to change too much of the existing code. 2618 */ 2619 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 2620 rval = QLA_SUCCESS; 2621 2622 ql_dbg(ql_dbg_mbx, vha, 0x106b, 2623 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", 2624 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); 2625 } else { 2626 /*EMPTY*/ 2627 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, 2628 "Done %s.\n", __func__); 2629 } 2630 2631 return (rval); 2632 } 2633 2634 int 2635 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2636 uint8_t area, uint8_t al_pa) 2637 { 2638 int rval; 2639 struct logio_entry_24xx *lg; 2640 dma_addr_t lg_dma; 2641 struct qla_hw_data *ha = vha->hw; 2642 struct req_que *req; 2643 2644 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, 2645 "Entered %s.\n", __func__); 2646 2647 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2648 if (lg == NULL) { 2649 ql_log(ql_log_warn, vha, 0x106e, 2650 "Failed to allocate logout IOCB.\n"); 2651 return QLA_MEMORY_ALLOC_FAILED; 2652 } 2653 2654 req = vha->req; 2655 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2656 lg->entry_count = 1; 2657 lg->handle = MAKE_HANDLE(req->id, lg->handle); 2658 lg->nport_handle = cpu_to_le16(loop_id); 2659 lg->control_flags = 2660 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| 2661 LCF_FREE_NPORT); 2662 lg->port_id[0] = al_pa; 2663 lg->port_id[1] = area; 2664 lg->port_id[2] = domain; 2665 lg->vp_index = vha->vp_idx; 2666 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2667 (ha->r_a_tov / 10 * 2) + 2); 2668 if (rval != QLA_SUCCESS) { 2669 ql_dbg(ql_dbg_mbx, vha, 0x106f, 2670 "Failed to issue logout IOCB (%x).\n", rval); 2671 } else if (lg->entry_status != 0) { 2672 ql_dbg(ql_dbg_mbx, vha, 0x1070, 2673 "Failed to complete IOCB -- error status (%x).\n", 2674 lg->entry_status); 2675 rval = QLA_FUNCTION_FAILED; 2676 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2677 ql_dbg(ql_dbg_mbx, vha, 0x1071, 2678 "Failed to complete IOCB -- completion status (%x) " 2679 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2680 le32_to_cpu(lg->io_parameter[0]), 2681 le32_to_cpu(lg->io_parameter[1])); 2682 } else { 2683 /*EMPTY*/ 2684 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, 2685 "Done %s.\n", __func__); 2686 } 2687 2688 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2689 2690 return rval; 2691 } 2692 2693 /* 2694 * qla2x00_fabric_logout 2695 * Issue logout fabric port mailbox command. 2696 * 2697 * Input: 2698 * ha = adapter block pointer. 2699 * loop_id = device loop ID. 2700 * TARGET_QUEUE_LOCK must be released. 2701 * ADAPTER_STATE_LOCK must be released. 2702 * 2703 * Returns: 2704 * qla2x00 local function return status code. 2705 * 2706 * Context: 2707 * Kernel context. 2708 */ 2709 int 2710 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2711 uint8_t area, uint8_t al_pa) 2712 { 2713 int rval; 2714 mbx_cmd_t mc; 2715 mbx_cmd_t *mcp = &mc; 2716 2717 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, 2718 "Entered %s.\n", __func__); 2719 2720 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 2721 mcp->out_mb = MBX_1|MBX_0; 2722 if (HAS_EXTENDED_IDS(vha->hw)) { 2723 mcp->mb[1] = loop_id; 2724 mcp->mb[10] = 0; 2725 mcp->out_mb |= MBX_10; 2726 } else { 2727 mcp->mb[1] = loop_id << 8; 2728 } 2729 2730 mcp->in_mb = MBX_1|MBX_0; 2731 mcp->tov = MBX_TOV_SECONDS; 2732 mcp->flags = 0; 2733 rval = qla2x00_mailbox_command(vha, mcp); 2734 2735 if (rval != QLA_SUCCESS) { 2736 /*EMPTY*/ 2737 ql_dbg(ql_dbg_mbx, vha, 0x1074, 2738 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); 2739 } else { 2740 /*EMPTY*/ 2741 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, 2742 "Done %s.\n", __func__); 2743 } 2744 2745 return rval; 2746 } 2747 2748 /* 2749 * qla2x00_full_login_lip 2750 * Issue full login LIP mailbox command. 2751 * 2752 * Input: 2753 * ha = adapter block pointer. 2754 * TARGET_QUEUE_LOCK must be released. 2755 * ADAPTER_STATE_LOCK must be released. 2756 * 2757 * Returns: 2758 * qla2x00 local function return status code. 2759 * 2760 * Context: 2761 * Kernel context. 2762 */ 2763 int 2764 qla2x00_full_login_lip(scsi_qla_host_t *vha) 2765 { 2766 int rval; 2767 mbx_cmd_t mc; 2768 mbx_cmd_t *mcp = &mc; 2769 2770 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, 2771 "Entered %s.\n", __func__); 2772 2773 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2774 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0; 2775 mcp->mb[2] = 0; 2776 mcp->mb[3] = 0; 2777 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2778 mcp->in_mb = MBX_0; 2779 mcp->tov = MBX_TOV_SECONDS; 2780 mcp->flags = 0; 2781 rval = qla2x00_mailbox_command(vha, mcp); 2782 2783 if (rval != QLA_SUCCESS) { 2784 /*EMPTY*/ 2785 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); 2786 } else { 2787 /*EMPTY*/ 2788 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, 2789 "Done %s.\n", __func__); 2790 } 2791 2792 return rval; 2793 } 2794 2795 /* 2796 * qla2x00_get_id_list 2797 * 2798 * Input: 2799 * ha = adapter block pointer. 2800 * 2801 * Returns: 2802 * qla2x00 local function return status code. 2803 * 2804 * Context: 2805 * Kernel context. 2806 */ 2807 int 2808 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, 2809 uint16_t *entries) 2810 { 2811 int rval; 2812 mbx_cmd_t mc; 2813 mbx_cmd_t *mcp = &mc; 2814 2815 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, 2816 "Entered %s.\n", __func__); 2817 2818 if (id_list == NULL) 2819 return QLA_FUNCTION_FAILED; 2820 2821 mcp->mb[0] = MBC_GET_ID_LIST; 2822 mcp->out_mb = MBX_0; 2823 if (IS_FWI2_CAPABLE(vha->hw)) { 2824 mcp->mb[2] = MSW(id_list_dma); 2825 mcp->mb[3] = LSW(id_list_dma); 2826 mcp->mb[6] = MSW(MSD(id_list_dma)); 2827 mcp->mb[7] = LSW(MSD(id_list_dma)); 2828 mcp->mb[8] = 0; 2829 mcp->mb[9] = vha->vp_idx; 2830 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 2831 } else { 2832 mcp->mb[1] = MSW(id_list_dma); 2833 mcp->mb[2] = LSW(id_list_dma); 2834 mcp->mb[3] = MSW(MSD(id_list_dma)); 2835 mcp->mb[6] = LSW(MSD(id_list_dma)); 2836 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; 2837 } 2838 mcp->in_mb = MBX_1|MBX_0; 2839 mcp->tov = MBX_TOV_SECONDS; 2840 mcp->flags = 0; 2841 rval = qla2x00_mailbox_command(vha, mcp); 2842 2843 if (rval != QLA_SUCCESS) { 2844 /*EMPTY*/ 2845 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); 2846 } else { 2847 *entries = mcp->mb[1]; 2848 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, 2849 "Done %s.\n", __func__); 2850 } 2851 2852 return rval; 2853 } 2854 2855 /* 2856 * qla2x00_get_resource_cnts 2857 * Get current firmware resource counts. 2858 * 2859 * Input: 2860 * ha = adapter block pointer. 2861 * 2862 * Returns: 2863 * qla2x00 local function return status code. 2864 * 2865 * Context: 2866 * Kernel context. 2867 */ 2868 int 2869 qla2x00_get_resource_cnts(scsi_qla_host_t *vha) 2870 { 2871 struct qla_hw_data *ha = vha->hw; 2872 int rval; 2873 mbx_cmd_t mc; 2874 mbx_cmd_t *mcp = &mc; 2875 2876 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, 2877 "Entered %s.\n", __func__); 2878 2879 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2880 mcp->out_mb = MBX_0; 2881 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2882 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 2883 IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2884 mcp->in_mb |= MBX_12; 2885 mcp->tov = MBX_TOV_SECONDS; 2886 mcp->flags = 0; 2887 rval = qla2x00_mailbox_command(vha, mcp); 2888 2889 if (rval != QLA_SUCCESS) { 2890 /*EMPTY*/ 2891 ql_dbg(ql_dbg_mbx, vha, 0x107d, 2892 "Failed mb[0]=%x.\n", mcp->mb[0]); 2893 } else { 2894 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, 2895 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " 2896 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], 2897 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 2898 mcp->mb[11], mcp->mb[12]); 2899 2900 ha->orig_fw_tgt_xcb_count = mcp->mb[1]; 2901 ha->cur_fw_tgt_xcb_count = mcp->mb[2]; 2902 ha->cur_fw_xcb_count = mcp->mb[3]; 2903 ha->orig_fw_xcb_count = mcp->mb[6]; 2904 ha->cur_fw_iocb_count = mcp->mb[7]; 2905 ha->orig_fw_iocb_count = mcp->mb[10]; 2906 if (ha->flags.npiv_supported) 2907 ha->max_npiv_vports = mcp->mb[11]; 2908 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 2909 IS_QLA28XX(ha)) 2910 ha->fw_max_fcf_count = mcp->mb[12]; 2911 } 2912 2913 return (rval); 2914 } 2915 2916 /* 2917 * qla2x00_get_fcal_position_map 2918 * Get FCAL (LILP) position map using mailbox command 2919 * 2920 * Input: 2921 * ha = adapter state pointer. 2922 * pos_map = buffer pointer (can be NULL). 2923 * 2924 * Returns: 2925 * qla2x00 local function return status code. 2926 * 2927 * Context: 2928 * Kernel context. 2929 */ 2930 int 2931 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) 2932 { 2933 int rval; 2934 mbx_cmd_t mc; 2935 mbx_cmd_t *mcp = &mc; 2936 char *pmap; 2937 dma_addr_t pmap_dma; 2938 struct qla_hw_data *ha = vha->hw; 2939 2940 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, 2941 "Entered %s.\n", __func__); 2942 2943 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2944 if (pmap == NULL) { 2945 ql_log(ql_log_warn, vha, 0x1080, 2946 "Memory alloc failed.\n"); 2947 return QLA_MEMORY_ALLOC_FAILED; 2948 } 2949 2950 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; 2951 mcp->mb[2] = MSW(pmap_dma); 2952 mcp->mb[3] = LSW(pmap_dma); 2953 mcp->mb[6] = MSW(MSD(pmap_dma)); 2954 mcp->mb[7] = LSW(MSD(pmap_dma)); 2955 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2956 mcp->in_mb = MBX_1|MBX_0; 2957 mcp->buf_size = FCAL_MAP_SIZE; 2958 mcp->flags = MBX_DMA_IN; 2959 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2960 rval = qla2x00_mailbox_command(vha, mcp); 2961 2962 if (rval == QLA_SUCCESS) { 2963 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, 2964 "mb0/mb1=%x/%X FC/AL position map size (%x).\n", 2965 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); 2966 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, 2967 pmap, pmap[0] + 1); 2968 2969 if (pos_map) 2970 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 2971 } 2972 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 2973 2974 if (rval != QLA_SUCCESS) { 2975 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); 2976 } else { 2977 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, 2978 "Done %s.\n", __func__); 2979 } 2980 2981 return rval; 2982 } 2983 2984 /* 2985 * qla2x00_get_link_status 2986 * 2987 * Input: 2988 * ha = adapter block pointer. 2989 * loop_id = device loop ID. 2990 * ret_buf = pointer to link status return buffer. 2991 * 2992 * Returns: 2993 * 0 = success. 2994 * BIT_0 = mem alloc error. 2995 * BIT_1 = mailbox error. 2996 */ 2997 int 2998 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, 2999 struct link_statistics *stats, dma_addr_t stats_dma) 3000 { 3001 int rval; 3002 mbx_cmd_t mc; 3003 mbx_cmd_t *mcp = &mc; 3004 uint32_t *iter = (void *)stats; 3005 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); 3006 struct qla_hw_data *ha = vha->hw; 3007 3008 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, 3009 "Entered %s.\n", __func__); 3010 3011 mcp->mb[0] = MBC_GET_LINK_STATUS; 3012 mcp->mb[2] = MSW(LSD(stats_dma)); 3013 mcp->mb[3] = LSW(LSD(stats_dma)); 3014 mcp->mb[6] = MSW(MSD(stats_dma)); 3015 mcp->mb[7] = LSW(MSD(stats_dma)); 3016 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3017 mcp->in_mb = MBX_0; 3018 if (IS_FWI2_CAPABLE(ha)) { 3019 mcp->mb[1] = loop_id; 3020 mcp->mb[4] = 0; 3021 mcp->mb[10] = 0; 3022 mcp->out_mb |= MBX_10|MBX_4|MBX_1; 3023 mcp->in_mb |= MBX_1; 3024 } else if (HAS_EXTENDED_IDS(ha)) { 3025 mcp->mb[1] = loop_id; 3026 mcp->mb[10] = 0; 3027 mcp->out_mb |= MBX_10|MBX_1; 3028 } else { 3029 mcp->mb[1] = loop_id << 8; 3030 mcp->out_mb |= MBX_1; 3031 } 3032 mcp->tov = MBX_TOV_SECONDS; 3033 mcp->flags = IOCTL_CMD; 3034 rval = qla2x00_mailbox_command(vha, mcp); 3035 3036 if (rval == QLA_SUCCESS) { 3037 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3038 ql_dbg(ql_dbg_mbx, vha, 0x1085, 3039 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3040 rval = QLA_FUNCTION_FAILED; 3041 } else { 3042 /* Re-endianize - firmware data is le32. */ 3043 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, 3044 "Done %s.\n", __func__); 3045 for ( ; dwords--; iter++) 3046 le32_to_cpus(iter); 3047 } 3048 } else { 3049 /* Failed. */ 3050 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); 3051 } 3052 3053 return rval; 3054 } 3055 3056 int 3057 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, 3058 dma_addr_t stats_dma, uint16_t options) 3059 { 3060 int rval; 3061 mbx_cmd_t mc; 3062 mbx_cmd_t *mcp = &mc; 3063 uint32_t *iter, dwords; 3064 3065 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, 3066 "Entered %s.\n", __func__); 3067 3068 memset(&mc, 0, sizeof(mc)); 3069 mc.mb[0] = MBC_GET_LINK_PRIV_STATS; 3070 mc.mb[2] = MSW(stats_dma); 3071 mc.mb[3] = LSW(stats_dma); 3072 mc.mb[6] = MSW(MSD(stats_dma)); 3073 mc.mb[7] = LSW(MSD(stats_dma)); 3074 mc.mb[8] = sizeof(struct link_statistics) / 4; 3075 mc.mb[9] = cpu_to_le16(vha->vp_idx); 3076 mc.mb[10] = cpu_to_le16(options); 3077 3078 rval = qla24xx_send_mb_cmd(vha, &mc); 3079 3080 if (rval == QLA_SUCCESS) { 3081 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3082 ql_dbg(ql_dbg_mbx, vha, 0x1089, 3083 "Failed mb[0]=%x.\n", mcp->mb[0]); 3084 rval = QLA_FUNCTION_FAILED; 3085 } else { 3086 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, 3087 "Done %s.\n", __func__); 3088 /* Re-endianize - firmware data is le32. */ 3089 dwords = sizeof(struct link_statistics) / 4; 3090 iter = &stats->link_fail_cnt; 3091 for ( ; dwords--; iter++) 3092 le32_to_cpus(iter); 3093 } 3094 } else { 3095 /* Failed. */ 3096 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); 3097 } 3098 3099 return rval; 3100 } 3101 3102 int 3103 qla24xx_abort_command(srb_t *sp) 3104 { 3105 int rval; 3106 unsigned long flags = 0; 3107 3108 struct abort_entry_24xx *abt; 3109 dma_addr_t abt_dma; 3110 uint32_t handle; 3111 fc_port_t *fcport = sp->fcport; 3112 struct scsi_qla_host *vha = fcport->vha; 3113 struct qla_hw_data *ha = vha->hw; 3114 struct req_que *req = vha->req; 3115 struct qla_qpair *qpair = sp->qpair; 3116 3117 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 3118 "Entered %s.\n", __func__); 3119 3120 if (vha->flags.qpairs_available && sp->qpair) 3121 req = sp->qpair->req; 3122 else 3123 return QLA_FUNCTION_FAILED; 3124 3125 if (ql2xasynctmfenable) 3126 return qla24xx_async_abort_command(sp); 3127 3128 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3129 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 3130 if (req->outstanding_cmds[handle] == sp) 3131 break; 3132 } 3133 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3134 if (handle == req->num_outstanding_cmds) { 3135 /* Command not found. */ 3136 return QLA_FUNCTION_FAILED; 3137 } 3138 3139 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 3140 if (abt == NULL) { 3141 ql_log(ql_log_warn, vha, 0x108d, 3142 "Failed to allocate abort IOCB.\n"); 3143 return QLA_MEMORY_ALLOC_FAILED; 3144 } 3145 3146 abt->entry_type = ABORT_IOCB_TYPE; 3147 abt->entry_count = 1; 3148 abt->handle = MAKE_HANDLE(req->id, abt->handle); 3149 abt->nport_handle = cpu_to_le16(fcport->loop_id); 3150 abt->handle_to_abort = MAKE_HANDLE(req->id, handle); 3151 abt->port_id[0] = fcport->d_id.b.al_pa; 3152 abt->port_id[1] = fcport->d_id.b.area; 3153 abt->port_id[2] = fcport->d_id.b.domain; 3154 abt->vp_index = fcport->vha->vp_idx; 3155 3156 abt->req_que_no = cpu_to_le16(req->id); 3157 3158 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 3159 if (rval != QLA_SUCCESS) { 3160 ql_dbg(ql_dbg_mbx, vha, 0x108e, 3161 "Failed to issue IOCB (%x).\n", rval); 3162 } else if (abt->entry_status != 0) { 3163 ql_dbg(ql_dbg_mbx, vha, 0x108f, 3164 "Failed to complete IOCB -- error status (%x).\n", 3165 abt->entry_status); 3166 rval = QLA_FUNCTION_FAILED; 3167 } else if (abt->nport_handle != cpu_to_le16(0)) { 3168 ql_dbg(ql_dbg_mbx, vha, 0x1090, 3169 "Failed to complete IOCB -- completion status (%x).\n", 3170 le16_to_cpu(abt->nport_handle)); 3171 if (abt->nport_handle == CS_IOCB_ERROR) 3172 rval = QLA_FUNCTION_PARAMETER_ERROR; 3173 else 3174 rval = QLA_FUNCTION_FAILED; 3175 } else { 3176 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, 3177 "Done %s.\n", __func__); 3178 } 3179 3180 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 3181 3182 return rval; 3183 } 3184 3185 struct tsk_mgmt_cmd { 3186 union { 3187 struct tsk_mgmt_entry tsk; 3188 struct sts_entry_24xx sts; 3189 } p; 3190 }; 3191 3192 static int 3193 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 3194 uint64_t l, int tag) 3195 { 3196 int rval, rval2; 3197 struct tsk_mgmt_cmd *tsk; 3198 struct sts_entry_24xx *sts; 3199 dma_addr_t tsk_dma; 3200 scsi_qla_host_t *vha; 3201 struct qla_hw_data *ha; 3202 struct req_que *req; 3203 struct qla_qpair *qpair; 3204 3205 vha = fcport->vha; 3206 ha = vha->hw; 3207 req = vha->req; 3208 3209 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, 3210 "Entered %s.\n", __func__); 3211 3212 if (vha->vp_idx && vha->qpair) { 3213 /* NPIV port */ 3214 qpair = vha->qpair; 3215 req = qpair->req; 3216 } 3217 3218 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 3219 if (tsk == NULL) { 3220 ql_log(ql_log_warn, vha, 0x1093, 3221 "Failed to allocate task management IOCB.\n"); 3222 return QLA_MEMORY_ALLOC_FAILED; 3223 } 3224 3225 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 3226 tsk->p.tsk.entry_count = 1; 3227 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle); 3228 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 3229 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 3230 tsk->p.tsk.control_flags = cpu_to_le32(type); 3231 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 3232 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 3233 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 3234 tsk->p.tsk.vp_index = fcport->vha->vp_idx; 3235 if (type == TCF_LUN_RESET) { 3236 int_to_scsilun(l, &tsk->p.tsk.lun); 3237 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, 3238 sizeof(tsk->p.tsk.lun)); 3239 } 3240 3241 sts = &tsk->p.sts; 3242 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 3243 if (rval != QLA_SUCCESS) { 3244 ql_dbg(ql_dbg_mbx, vha, 0x1094, 3245 "Failed to issue %s reset IOCB (%x).\n", name, rval); 3246 } else if (sts->entry_status != 0) { 3247 ql_dbg(ql_dbg_mbx, vha, 0x1095, 3248 "Failed to complete IOCB -- error status (%x).\n", 3249 sts->entry_status); 3250 rval = QLA_FUNCTION_FAILED; 3251 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 3252 ql_dbg(ql_dbg_mbx, vha, 0x1096, 3253 "Failed to complete IOCB -- completion status (%x).\n", 3254 le16_to_cpu(sts->comp_status)); 3255 rval = QLA_FUNCTION_FAILED; 3256 } else if (le16_to_cpu(sts->scsi_status) & 3257 SS_RESPONSE_INFO_LEN_VALID) { 3258 if (le32_to_cpu(sts->rsp_data_len) < 4) { 3259 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, 3260 "Ignoring inconsistent data length -- not enough " 3261 "response info (%d).\n", 3262 le32_to_cpu(sts->rsp_data_len)); 3263 } else if (sts->data[3]) { 3264 ql_dbg(ql_dbg_mbx, vha, 0x1098, 3265 "Failed to complete IOCB -- response (%x).\n", 3266 sts->data[3]); 3267 rval = QLA_FUNCTION_FAILED; 3268 } 3269 } 3270 3271 /* Issue marker IOCB. */ 3272 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l, 3273 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 3274 if (rval2 != QLA_SUCCESS) { 3275 ql_dbg(ql_dbg_mbx, vha, 0x1099, 3276 "Failed to issue marker IOCB (%x).\n", rval2); 3277 } else { 3278 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, 3279 "Done %s.\n", __func__); 3280 } 3281 3282 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 3283 3284 return rval; 3285 } 3286 3287 int 3288 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) 3289 { 3290 struct qla_hw_data *ha = fcport->vha->hw; 3291 3292 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3293 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 3294 3295 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); 3296 } 3297 3298 int 3299 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 3300 { 3301 struct qla_hw_data *ha = fcport->vha->hw; 3302 3303 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3304 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 3305 3306 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); 3307 } 3308 3309 int 3310 qla2x00_system_error(scsi_qla_host_t *vha) 3311 { 3312 int rval; 3313 mbx_cmd_t mc; 3314 mbx_cmd_t *mcp = &mc; 3315 struct qla_hw_data *ha = vha->hw; 3316 3317 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 3318 return QLA_FUNCTION_FAILED; 3319 3320 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, 3321 "Entered %s.\n", __func__); 3322 3323 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 3324 mcp->out_mb = MBX_0; 3325 mcp->in_mb = MBX_0; 3326 mcp->tov = 5; 3327 mcp->flags = 0; 3328 rval = qla2x00_mailbox_command(vha, mcp); 3329 3330 if (rval != QLA_SUCCESS) { 3331 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); 3332 } else { 3333 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, 3334 "Done %s.\n", __func__); 3335 } 3336 3337 return rval; 3338 } 3339 3340 int 3341 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) 3342 { 3343 int rval; 3344 mbx_cmd_t mc; 3345 mbx_cmd_t *mcp = &mc; 3346 3347 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3348 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3349 return QLA_FUNCTION_FAILED; 3350 3351 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, 3352 "Entered %s.\n", __func__); 3353 3354 mcp->mb[0] = MBC_WRITE_SERDES; 3355 mcp->mb[1] = addr; 3356 if (IS_QLA2031(vha->hw)) 3357 mcp->mb[2] = data & 0xff; 3358 else 3359 mcp->mb[2] = data; 3360 3361 mcp->mb[3] = 0; 3362 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 3363 mcp->in_mb = MBX_0; 3364 mcp->tov = MBX_TOV_SECONDS; 3365 mcp->flags = 0; 3366 rval = qla2x00_mailbox_command(vha, mcp); 3367 3368 if (rval != QLA_SUCCESS) { 3369 ql_dbg(ql_dbg_mbx, vha, 0x1183, 3370 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3371 } else { 3372 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, 3373 "Done %s.\n", __func__); 3374 } 3375 3376 return rval; 3377 } 3378 3379 int 3380 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) 3381 { 3382 int rval; 3383 mbx_cmd_t mc; 3384 mbx_cmd_t *mcp = &mc; 3385 3386 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3387 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3388 return QLA_FUNCTION_FAILED; 3389 3390 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, 3391 "Entered %s.\n", __func__); 3392 3393 mcp->mb[0] = MBC_READ_SERDES; 3394 mcp->mb[1] = addr; 3395 mcp->mb[3] = 0; 3396 mcp->out_mb = MBX_3|MBX_1|MBX_0; 3397 mcp->in_mb = MBX_1|MBX_0; 3398 mcp->tov = MBX_TOV_SECONDS; 3399 mcp->flags = 0; 3400 rval = qla2x00_mailbox_command(vha, mcp); 3401 3402 if (IS_QLA2031(vha->hw)) 3403 *data = mcp->mb[1] & 0xff; 3404 else 3405 *data = mcp->mb[1]; 3406 3407 if (rval != QLA_SUCCESS) { 3408 ql_dbg(ql_dbg_mbx, vha, 0x1186, 3409 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3410 } else { 3411 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, 3412 "Done %s.\n", __func__); 3413 } 3414 3415 return rval; 3416 } 3417 3418 int 3419 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) 3420 { 3421 int rval; 3422 mbx_cmd_t mc; 3423 mbx_cmd_t *mcp = &mc; 3424 3425 if (!IS_QLA8044(vha->hw)) 3426 return QLA_FUNCTION_FAILED; 3427 3428 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0, 3429 "Entered %s.\n", __func__); 3430 3431 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3432 mcp->mb[1] = HCS_WRITE_SERDES; 3433 mcp->mb[3] = LSW(addr); 3434 mcp->mb[4] = MSW(addr); 3435 mcp->mb[5] = LSW(data); 3436 mcp->mb[6] = MSW(data); 3437 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 3438 mcp->in_mb = MBX_0; 3439 mcp->tov = MBX_TOV_SECONDS; 3440 mcp->flags = 0; 3441 rval = qla2x00_mailbox_command(vha, mcp); 3442 3443 if (rval != QLA_SUCCESS) { 3444 ql_dbg(ql_dbg_mbx, vha, 0x11a1, 3445 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3446 } else { 3447 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, 3448 "Done %s.\n", __func__); 3449 } 3450 3451 return rval; 3452 } 3453 3454 int 3455 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) 3456 { 3457 int rval; 3458 mbx_cmd_t mc; 3459 mbx_cmd_t *mcp = &mc; 3460 3461 if (!IS_QLA8044(vha->hw)) 3462 return QLA_FUNCTION_FAILED; 3463 3464 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, 3465 "Entered %s.\n", __func__); 3466 3467 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3468 mcp->mb[1] = HCS_READ_SERDES; 3469 mcp->mb[3] = LSW(addr); 3470 mcp->mb[4] = MSW(addr); 3471 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; 3472 mcp->in_mb = MBX_2|MBX_1|MBX_0; 3473 mcp->tov = MBX_TOV_SECONDS; 3474 mcp->flags = 0; 3475 rval = qla2x00_mailbox_command(vha, mcp); 3476 3477 *data = mcp->mb[2] << 16 | mcp->mb[1]; 3478 3479 if (rval != QLA_SUCCESS) { 3480 ql_dbg(ql_dbg_mbx, vha, 0x118a, 3481 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3482 } else { 3483 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, 3484 "Done %s.\n", __func__); 3485 } 3486 3487 return rval; 3488 } 3489 3490 /** 3491 * qla2x00_set_serdes_params() - 3492 * @vha: HA context 3493 * @sw_em_1g: serial link options 3494 * @sw_em_2g: serial link options 3495 * @sw_em_4g: serial link options 3496 * 3497 * Returns 3498 */ 3499 int 3500 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, 3501 uint16_t sw_em_2g, uint16_t sw_em_4g) 3502 { 3503 int rval; 3504 mbx_cmd_t mc; 3505 mbx_cmd_t *mcp = &mc; 3506 3507 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, 3508 "Entered %s.\n", __func__); 3509 3510 mcp->mb[0] = MBC_SERDES_PARAMS; 3511 mcp->mb[1] = BIT_0; 3512 mcp->mb[2] = sw_em_1g | BIT_15; 3513 mcp->mb[3] = sw_em_2g | BIT_15; 3514 mcp->mb[4] = sw_em_4g | BIT_15; 3515 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3516 mcp->in_mb = MBX_0; 3517 mcp->tov = MBX_TOV_SECONDS; 3518 mcp->flags = 0; 3519 rval = qla2x00_mailbox_command(vha, mcp); 3520 3521 if (rval != QLA_SUCCESS) { 3522 /*EMPTY*/ 3523 ql_dbg(ql_dbg_mbx, vha, 0x109f, 3524 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3525 } else { 3526 /*EMPTY*/ 3527 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, 3528 "Done %s.\n", __func__); 3529 } 3530 3531 return rval; 3532 } 3533 3534 int 3535 qla2x00_stop_firmware(scsi_qla_host_t *vha) 3536 { 3537 int rval; 3538 mbx_cmd_t mc; 3539 mbx_cmd_t *mcp = &mc; 3540 3541 if (!IS_FWI2_CAPABLE(vha->hw)) 3542 return QLA_FUNCTION_FAILED; 3543 3544 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, 3545 "Entered %s.\n", __func__); 3546 3547 mcp->mb[0] = MBC_STOP_FIRMWARE; 3548 mcp->mb[1] = 0; 3549 mcp->out_mb = MBX_1|MBX_0; 3550 mcp->in_mb = MBX_0; 3551 mcp->tov = 5; 3552 mcp->flags = 0; 3553 rval = qla2x00_mailbox_command(vha, mcp); 3554 3555 if (rval != QLA_SUCCESS) { 3556 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); 3557 if (mcp->mb[0] == MBS_INVALID_COMMAND) 3558 rval = QLA_INVALID_COMMAND; 3559 } else { 3560 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, 3561 "Done %s.\n", __func__); 3562 } 3563 3564 return rval; 3565 } 3566 3567 int 3568 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, 3569 uint16_t buffers) 3570 { 3571 int rval; 3572 mbx_cmd_t mc; 3573 mbx_cmd_t *mcp = &mc; 3574 3575 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, 3576 "Entered %s.\n", __func__); 3577 3578 if (!IS_FWI2_CAPABLE(vha->hw)) 3579 return QLA_FUNCTION_FAILED; 3580 3581 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3582 return QLA_FUNCTION_FAILED; 3583 3584 mcp->mb[0] = MBC_TRACE_CONTROL; 3585 mcp->mb[1] = TC_EFT_ENABLE; 3586 mcp->mb[2] = LSW(eft_dma); 3587 mcp->mb[3] = MSW(eft_dma); 3588 mcp->mb[4] = LSW(MSD(eft_dma)); 3589 mcp->mb[5] = MSW(MSD(eft_dma)); 3590 mcp->mb[6] = buffers; 3591 mcp->mb[7] = TC_AEN_DISABLE; 3592 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3593 mcp->in_mb = MBX_1|MBX_0; 3594 mcp->tov = MBX_TOV_SECONDS; 3595 mcp->flags = 0; 3596 rval = qla2x00_mailbox_command(vha, mcp); 3597 if (rval != QLA_SUCCESS) { 3598 ql_dbg(ql_dbg_mbx, vha, 0x10a5, 3599 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3600 rval, mcp->mb[0], mcp->mb[1]); 3601 } else { 3602 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, 3603 "Done %s.\n", __func__); 3604 } 3605 3606 return rval; 3607 } 3608 3609 int 3610 qla2x00_disable_eft_trace(scsi_qla_host_t *vha) 3611 { 3612 int rval; 3613 mbx_cmd_t mc; 3614 mbx_cmd_t *mcp = &mc; 3615 3616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, 3617 "Entered %s.\n", __func__); 3618 3619 if (!IS_FWI2_CAPABLE(vha->hw)) 3620 return QLA_FUNCTION_FAILED; 3621 3622 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3623 return QLA_FUNCTION_FAILED; 3624 3625 mcp->mb[0] = MBC_TRACE_CONTROL; 3626 mcp->mb[1] = TC_EFT_DISABLE; 3627 mcp->out_mb = MBX_1|MBX_0; 3628 mcp->in_mb = MBX_1|MBX_0; 3629 mcp->tov = MBX_TOV_SECONDS; 3630 mcp->flags = 0; 3631 rval = qla2x00_mailbox_command(vha, mcp); 3632 if (rval != QLA_SUCCESS) { 3633 ql_dbg(ql_dbg_mbx, vha, 0x10a8, 3634 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3635 rval, mcp->mb[0], mcp->mb[1]); 3636 } else { 3637 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, 3638 "Done %s.\n", __func__); 3639 } 3640 3641 return rval; 3642 } 3643 3644 int 3645 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, 3646 uint16_t buffers, uint16_t *mb, uint32_t *dwords) 3647 { 3648 int rval; 3649 mbx_cmd_t mc; 3650 mbx_cmd_t *mcp = &mc; 3651 3652 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, 3653 "Entered %s.\n", __func__); 3654 3655 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 3656 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 3657 !IS_QLA28XX(vha->hw)) 3658 return QLA_FUNCTION_FAILED; 3659 3660 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3661 return QLA_FUNCTION_FAILED; 3662 3663 mcp->mb[0] = MBC_TRACE_CONTROL; 3664 mcp->mb[1] = TC_FCE_ENABLE; 3665 mcp->mb[2] = LSW(fce_dma); 3666 mcp->mb[3] = MSW(fce_dma); 3667 mcp->mb[4] = LSW(MSD(fce_dma)); 3668 mcp->mb[5] = MSW(MSD(fce_dma)); 3669 mcp->mb[6] = buffers; 3670 mcp->mb[7] = TC_AEN_DISABLE; 3671 mcp->mb[8] = 0; 3672 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; 3673 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; 3674 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3675 MBX_1|MBX_0; 3676 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3677 mcp->tov = MBX_TOV_SECONDS; 3678 mcp->flags = 0; 3679 rval = qla2x00_mailbox_command(vha, mcp); 3680 if (rval != QLA_SUCCESS) { 3681 ql_dbg(ql_dbg_mbx, vha, 0x10ab, 3682 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3683 rval, mcp->mb[0], mcp->mb[1]); 3684 } else { 3685 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, 3686 "Done %s.\n", __func__); 3687 3688 if (mb) 3689 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 3690 if (dwords) 3691 *dwords = buffers; 3692 } 3693 3694 return rval; 3695 } 3696 3697 int 3698 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) 3699 { 3700 int rval; 3701 mbx_cmd_t mc; 3702 mbx_cmd_t *mcp = &mc; 3703 3704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, 3705 "Entered %s.\n", __func__); 3706 3707 if (!IS_FWI2_CAPABLE(vha->hw)) 3708 return QLA_FUNCTION_FAILED; 3709 3710 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3711 return QLA_FUNCTION_FAILED; 3712 3713 mcp->mb[0] = MBC_TRACE_CONTROL; 3714 mcp->mb[1] = TC_FCE_DISABLE; 3715 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 3716 mcp->out_mb = MBX_2|MBX_1|MBX_0; 3717 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3718 MBX_1|MBX_0; 3719 mcp->tov = MBX_TOV_SECONDS; 3720 mcp->flags = 0; 3721 rval = qla2x00_mailbox_command(vha, mcp); 3722 if (rval != QLA_SUCCESS) { 3723 ql_dbg(ql_dbg_mbx, vha, 0x10ae, 3724 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3725 rval, mcp->mb[0], mcp->mb[1]); 3726 } else { 3727 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, 3728 "Done %s.\n", __func__); 3729 3730 if (wr) 3731 *wr = (uint64_t) mcp->mb[5] << 48 | 3732 (uint64_t) mcp->mb[4] << 32 | 3733 (uint64_t) mcp->mb[3] << 16 | 3734 (uint64_t) mcp->mb[2]; 3735 if (rd) 3736 *rd = (uint64_t) mcp->mb[9] << 48 | 3737 (uint64_t) mcp->mb[8] << 32 | 3738 (uint64_t) mcp->mb[7] << 16 | 3739 (uint64_t) mcp->mb[6]; 3740 } 3741 3742 return rval; 3743 } 3744 3745 int 3746 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3747 uint16_t *port_speed, uint16_t *mb) 3748 { 3749 int rval; 3750 mbx_cmd_t mc; 3751 mbx_cmd_t *mcp = &mc; 3752 3753 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, 3754 "Entered %s.\n", __func__); 3755 3756 if (!IS_IIDMA_CAPABLE(vha->hw)) 3757 return QLA_FUNCTION_FAILED; 3758 3759 mcp->mb[0] = MBC_PORT_PARAMS; 3760 mcp->mb[1] = loop_id; 3761 mcp->mb[2] = mcp->mb[3] = 0; 3762 mcp->mb[9] = vha->vp_idx; 3763 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3764 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3765 mcp->tov = MBX_TOV_SECONDS; 3766 mcp->flags = 0; 3767 rval = qla2x00_mailbox_command(vha, mcp); 3768 3769 /* Return mailbox statuses. */ 3770 if (mb) { 3771 mb[0] = mcp->mb[0]; 3772 mb[1] = mcp->mb[1]; 3773 mb[3] = mcp->mb[3]; 3774 } 3775 3776 if (rval != QLA_SUCCESS) { 3777 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); 3778 } else { 3779 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, 3780 "Done %s.\n", __func__); 3781 if (port_speed) 3782 *port_speed = mcp->mb[3]; 3783 } 3784 3785 return rval; 3786 } 3787 3788 int 3789 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3790 uint16_t port_speed, uint16_t *mb) 3791 { 3792 int rval; 3793 mbx_cmd_t mc; 3794 mbx_cmd_t *mcp = &mc; 3795 3796 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, 3797 "Entered %s.\n", __func__); 3798 3799 if (!IS_IIDMA_CAPABLE(vha->hw)) 3800 return QLA_FUNCTION_FAILED; 3801 3802 mcp->mb[0] = MBC_PORT_PARAMS; 3803 mcp->mb[1] = loop_id; 3804 mcp->mb[2] = BIT_0; 3805 mcp->mb[3] = port_speed & 0x3F; 3806 mcp->mb[9] = vha->vp_idx; 3807 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3808 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3809 mcp->tov = MBX_TOV_SECONDS; 3810 mcp->flags = 0; 3811 rval = qla2x00_mailbox_command(vha, mcp); 3812 3813 /* Return mailbox statuses. */ 3814 if (mb) { 3815 mb[0] = mcp->mb[0]; 3816 mb[1] = mcp->mb[1]; 3817 mb[3] = mcp->mb[3]; 3818 } 3819 3820 if (rval != QLA_SUCCESS) { 3821 ql_dbg(ql_dbg_mbx, vha, 0x10b4, 3822 "Failed=%x.\n", rval); 3823 } else { 3824 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, 3825 "Done %s.\n", __func__); 3826 } 3827 3828 return rval; 3829 } 3830 3831 void 3832 qla24xx_report_id_acquisition(scsi_qla_host_t *vha, 3833 struct vp_rpt_id_entry_24xx *rptid_entry) 3834 { 3835 struct qla_hw_data *ha = vha->hw; 3836 scsi_qla_host_t *vp = NULL; 3837 unsigned long flags; 3838 int found; 3839 port_id_t id; 3840 struct fc_port *fcport; 3841 3842 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3843 "Entered %s.\n", __func__); 3844 3845 if (rptid_entry->entry_status != 0) 3846 return; 3847 3848 id.b.domain = rptid_entry->port_id[2]; 3849 id.b.area = rptid_entry->port_id[1]; 3850 id.b.al_pa = rptid_entry->port_id[0]; 3851 id.b.rsvd_1 = 0; 3852 ha->flags.n2n_ae = 0; 3853 3854 if (rptid_entry->format == 0) { 3855 /* loop */ 3856 ql_dbg(ql_dbg_async, vha, 0x10b7, 3857 "Format 0 : Number of VPs setup %d, number of " 3858 "VPs acquired %d.\n", rptid_entry->vp_setup, 3859 rptid_entry->vp_acquired); 3860 ql_dbg(ql_dbg_async, vha, 0x10b8, 3861 "Primary port id %02x%02x%02x.\n", 3862 rptid_entry->port_id[2], rptid_entry->port_id[1], 3863 rptid_entry->port_id[0]); 3864 ha->current_topology = ISP_CFG_NL; 3865 qlt_update_host_map(vha, id); 3866 3867 } else if (rptid_entry->format == 1) { 3868 /* fabric */ 3869 ql_dbg(ql_dbg_async, vha, 0x10b9, 3870 "Format 1: VP[%d] enabled - status %d - with " 3871 "port id %02x%02x%02x.\n", rptid_entry->vp_idx, 3872 rptid_entry->vp_status, 3873 rptid_entry->port_id[2], rptid_entry->port_id[1], 3874 rptid_entry->port_id[0]); 3875 ql_dbg(ql_dbg_async, vha, 0x5075, 3876 "Format 1: Remote WWPN %8phC.\n", 3877 rptid_entry->u.f1.port_name); 3878 3879 ql_dbg(ql_dbg_async, vha, 0x5075, 3880 "Format 1: WWPN %8phC.\n", 3881 vha->port_name); 3882 3883 switch (rptid_entry->u.f1.flags & TOPO_MASK) { 3884 case TOPO_N2N: 3885 ha->current_topology = ISP_CFG_N; 3886 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 3887 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3888 fcport->scan_state = QLA_FCPORT_SCAN; 3889 fcport->n2n_flag = 0; 3890 } 3891 3892 fcport = qla2x00_find_fcport_by_wwpn(vha, 3893 rptid_entry->u.f1.port_name, 1); 3894 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 3895 3896 if (fcport) { 3897 fcport->plogi_nack_done_deadline = jiffies + HZ; 3898 fcport->dm_login_expire = jiffies + 2*HZ; 3899 fcport->scan_state = QLA_FCPORT_FOUND; 3900 fcport->n2n_flag = 1; 3901 fcport->keep_nport_handle = 1; 3902 fcport->fc4_type = FS_FC4TYPE_FCP; 3903 if (vha->flags.nvme_enabled) 3904 fcport->fc4_type |= FS_FC4TYPE_NVME; 3905 3906 switch (fcport->disc_state) { 3907 case DSC_DELETED: 3908 set_bit(RELOGIN_NEEDED, 3909 &vha->dpc_flags); 3910 break; 3911 case DSC_DELETE_PEND: 3912 break; 3913 default: 3914 qlt_schedule_sess_for_deletion(fcport); 3915 break; 3916 } 3917 } else { 3918 id.b24 = 0; 3919 if (wwn_to_u64(vha->port_name) > 3920 wwn_to_u64(rptid_entry->u.f1.port_name)) { 3921 vha->d_id.b24 = 0; 3922 vha->d_id.b.al_pa = 1; 3923 ha->flags.n2n_bigger = 1; 3924 ha->flags.n2n_ae = 0; 3925 3926 id.b.al_pa = 2; 3927 ql_dbg(ql_dbg_async, vha, 0x5075, 3928 "Format 1: assign local id %x remote id %x\n", 3929 vha->d_id.b24, id.b24); 3930 } else { 3931 ql_dbg(ql_dbg_async, vha, 0x5075, 3932 "Format 1: Remote login - Waiting for WWPN %8phC.\n", 3933 rptid_entry->u.f1.port_name); 3934 ha->flags.n2n_bigger = 0; 3935 ha->flags.n2n_ae = 1; 3936 } 3937 qla24xx_post_newsess_work(vha, &id, 3938 rptid_entry->u.f1.port_name, 3939 rptid_entry->u.f1.node_name, 3940 NULL, 3941 FS_FCP_IS_N2N); 3942 } 3943 3944 /* if our portname is higher then initiate N2N login */ 3945 3946 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); 3947 return; 3948 break; 3949 case TOPO_FL: 3950 ha->current_topology = ISP_CFG_FL; 3951 break; 3952 case TOPO_F: 3953 ha->current_topology = ISP_CFG_F; 3954 break; 3955 default: 3956 break; 3957 } 3958 3959 ha->flags.gpsc_supported = 1; 3960 ha->current_topology = ISP_CFG_F; 3961 /* buffer to buffer credit flag */ 3962 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; 3963 3964 if (rptid_entry->vp_idx == 0) { 3965 if (rptid_entry->vp_status == VP_STAT_COMPL) { 3966 /* FA-WWN is only for physical port */ 3967 if (qla_ini_mode_enabled(vha) && 3968 ha->flags.fawwpn_enabled && 3969 (rptid_entry->u.f1.flags & 3970 BIT_6)) { 3971 memcpy(vha->port_name, 3972 rptid_entry->u.f1.port_name, 3973 WWN_SIZE); 3974 } 3975 3976 qlt_update_host_map(vha, id); 3977 } 3978 3979 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 3980 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 3981 } else { 3982 if (rptid_entry->vp_status != VP_STAT_COMPL && 3983 rptid_entry->vp_status != VP_STAT_ID_CHG) { 3984 ql_dbg(ql_dbg_mbx, vha, 0x10ba, 3985 "Could not acquire ID for VP[%d].\n", 3986 rptid_entry->vp_idx); 3987 return; 3988 } 3989 3990 found = 0; 3991 spin_lock_irqsave(&ha->vport_slock, flags); 3992 list_for_each_entry(vp, &ha->vp_list, list) { 3993 if (rptid_entry->vp_idx == vp->vp_idx) { 3994 found = 1; 3995 break; 3996 } 3997 } 3998 spin_unlock_irqrestore(&ha->vport_slock, flags); 3999 4000 if (!found) 4001 return; 4002 4003 qlt_update_host_map(vp, id); 4004 4005 /* 4006 * Cannot configure here as we are still sitting on the 4007 * response queue. Handle it in dpc context. 4008 */ 4009 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); 4010 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); 4011 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); 4012 } 4013 set_bit(VP_DPC_NEEDED, &vha->dpc_flags); 4014 qla2xxx_wake_dpc(vha); 4015 } else if (rptid_entry->format == 2) { 4016 ql_dbg(ql_dbg_async, vha, 0x505f, 4017 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", 4018 rptid_entry->port_id[2], rptid_entry->port_id[1], 4019 rptid_entry->port_id[0]); 4020 4021 ql_dbg(ql_dbg_async, vha, 0x5075, 4022 "N2N: Remote WWPN %8phC.\n", 4023 rptid_entry->u.f2.port_name); 4024 4025 /* N2N. direct connect */ 4026 ha->current_topology = ISP_CFG_N; 4027 ha->flags.rida_fmt2 = 1; 4028 vha->d_id.b.domain = rptid_entry->port_id[2]; 4029 vha->d_id.b.area = rptid_entry->port_id[1]; 4030 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 4031 4032 ha->flags.n2n_ae = 1; 4033 spin_lock_irqsave(&ha->vport_slock, flags); 4034 qlt_update_vp_map(vha, SET_AL_PA); 4035 spin_unlock_irqrestore(&ha->vport_slock, flags); 4036 4037 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4038 fcport->scan_state = QLA_FCPORT_SCAN; 4039 fcport->n2n_flag = 0; 4040 } 4041 4042 fcport = qla2x00_find_fcport_by_wwpn(vha, 4043 rptid_entry->u.f2.port_name, 1); 4044 4045 if (fcport) { 4046 fcport->login_retry = vha->hw->login_retry_count; 4047 fcport->plogi_nack_done_deadline = jiffies + HZ; 4048 fcport->scan_state = QLA_FCPORT_FOUND; 4049 fcport->keep_nport_handle = 1; 4050 fcport->n2n_flag = 1; 4051 fcport->d_id.b.domain = 4052 rptid_entry->u.f2.remote_nport_id[2]; 4053 fcport->d_id.b.area = 4054 rptid_entry->u.f2.remote_nport_id[1]; 4055 fcport->d_id.b.al_pa = 4056 rptid_entry->u.f2.remote_nport_id[0]; 4057 } 4058 } 4059 } 4060 4061 /* 4062 * qla24xx_modify_vp_config 4063 * Change VP configuration for vha 4064 * 4065 * Input: 4066 * vha = adapter block pointer. 4067 * 4068 * Returns: 4069 * qla2xxx local function return status code. 4070 * 4071 * Context: 4072 * Kernel context. 4073 */ 4074 int 4075 qla24xx_modify_vp_config(scsi_qla_host_t *vha) 4076 { 4077 int rval; 4078 struct vp_config_entry_24xx *vpmod; 4079 dma_addr_t vpmod_dma; 4080 struct qla_hw_data *ha = vha->hw; 4081 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4082 4083 /* This can be called by the parent */ 4084 4085 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, 4086 "Entered %s.\n", __func__); 4087 4088 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 4089 if (!vpmod) { 4090 ql_log(ql_log_warn, vha, 0x10bc, 4091 "Failed to allocate modify VP IOCB.\n"); 4092 return QLA_MEMORY_ALLOC_FAILED; 4093 } 4094 4095 vpmod->entry_type = VP_CONFIG_IOCB_TYPE; 4096 vpmod->entry_count = 1; 4097 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; 4098 vpmod->vp_count = 1; 4099 vpmod->vp_index1 = vha->vp_idx; 4100 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; 4101 4102 qlt_modify_vp_config(vha, vpmod); 4103 4104 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); 4105 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 4106 vpmod->entry_count = 1; 4107 4108 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 4109 if (rval != QLA_SUCCESS) { 4110 ql_dbg(ql_dbg_mbx, vha, 0x10bd, 4111 "Failed to issue VP config IOCB (%x).\n", rval); 4112 } else if (vpmod->comp_status != 0) { 4113 ql_dbg(ql_dbg_mbx, vha, 0x10be, 4114 "Failed to complete IOCB -- error status (%x).\n", 4115 vpmod->comp_status); 4116 rval = QLA_FUNCTION_FAILED; 4117 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) { 4118 ql_dbg(ql_dbg_mbx, vha, 0x10bf, 4119 "Failed to complete IOCB -- completion status (%x).\n", 4120 le16_to_cpu(vpmod->comp_status)); 4121 rval = QLA_FUNCTION_FAILED; 4122 } else { 4123 /* EMPTY */ 4124 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, 4125 "Done %s.\n", __func__); 4126 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 4127 } 4128 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 4129 4130 return rval; 4131 } 4132 4133 /* 4134 * qla2x00_send_change_request 4135 * Receive or disable RSCN request from fabric controller 4136 * 4137 * Input: 4138 * ha = adapter block pointer 4139 * format = registration format: 4140 * 0 - Reserved 4141 * 1 - Fabric detected registration 4142 * 2 - N_port detected registration 4143 * 3 - Full registration 4144 * FF - clear registration 4145 * vp_idx = Virtual port index 4146 * 4147 * Returns: 4148 * qla2x00 local function return status code. 4149 * 4150 * Context: 4151 * Kernel Context 4152 */ 4153 4154 int 4155 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, 4156 uint16_t vp_idx) 4157 { 4158 int rval; 4159 mbx_cmd_t mc; 4160 mbx_cmd_t *mcp = &mc; 4161 4162 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, 4163 "Entered %s.\n", __func__); 4164 4165 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; 4166 mcp->mb[1] = format; 4167 mcp->mb[9] = vp_idx; 4168 mcp->out_mb = MBX_9|MBX_1|MBX_0; 4169 mcp->in_mb = MBX_0|MBX_1; 4170 mcp->tov = MBX_TOV_SECONDS; 4171 mcp->flags = 0; 4172 rval = qla2x00_mailbox_command(vha, mcp); 4173 4174 if (rval == QLA_SUCCESS) { 4175 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 4176 rval = BIT_1; 4177 } 4178 } else 4179 rval = BIT_1; 4180 4181 return rval; 4182 } 4183 4184 int 4185 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 4186 uint32_t size) 4187 { 4188 int rval; 4189 mbx_cmd_t mc; 4190 mbx_cmd_t *mcp = &mc; 4191 4192 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, 4193 "Entered %s.\n", __func__); 4194 4195 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 4196 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 4197 mcp->mb[8] = MSW(addr); 4198 mcp->out_mb = MBX_8|MBX_0; 4199 } else { 4200 mcp->mb[0] = MBC_DUMP_RISC_RAM; 4201 mcp->out_mb = MBX_0; 4202 } 4203 mcp->mb[1] = LSW(addr); 4204 mcp->mb[2] = MSW(req_dma); 4205 mcp->mb[3] = LSW(req_dma); 4206 mcp->mb[6] = MSW(MSD(req_dma)); 4207 mcp->mb[7] = LSW(MSD(req_dma)); 4208 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 4209 if (IS_FWI2_CAPABLE(vha->hw)) { 4210 mcp->mb[4] = MSW(size); 4211 mcp->mb[5] = LSW(size); 4212 mcp->out_mb |= MBX_5|MBX_4; 4213 } else { 4214 mcp->mb[4] = LSW(size); 4215 mcp->out_mb |= MBX_4; 4216 } 4217 4218 mcp->in_mb = MBX_0; 4219 mcp->tov = MBX_TOV_SECONDS; 4220 mcp->flags = 0; 4221 rval = qla2x00_mailbox_command(vha, mcp); 4222 4223 if (rval != QLA_SUCCESS) { 4224 ql_dbg(ql_dbg_mbx, vha, 0x1008, 4225 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4226 } else { 4227 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, 4228 "Done %s.\n", __func__); 4229 } 4230 4231 return rval; 4232 } 4233 /* 84XX Support **************************************************************/ 4234 4235 struct cs84xx_mgmt_cmd { 4236 union { 4237 struct verify_chip_entry_84xx req; 4238 struct verify_chip_rsp_84xx rsp; 4239 } p; 4240 }; 4241 4242 int 4243 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) 4244 { 4245 int rval, retry; 4246 struct cs84xx_mgmt_cmd *mn; 4247 dma_addr_t mn_dma; 4248 uint16_t options; 4249 unsigned long flags; 4250 struct qla_hw_data *ha = vha->hw; 4251 4252 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, 4253 "Entered %s.\n", __func__); 4254 4255 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 4256 if (mn == NULL) { 4257 return QLA_MEMORY_ALLOC_FAILED; 4258 } 4259 4260 /* Force Update? */ 4261 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; 4262 /* Diagnostic firmware? */ 4263 /* options |= MENLO_DIAG_FW; */ 4264 /* We update the firmware with only one data sequence. */ 4265 options |= VCO_END_OF_DATA; 4266 4267 do { 4268 retry = 0; 4269 memset(mn, 0, sizeof(*mn)); 4270 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; 4271 mn->p.req.entry_count = 1; 4272 mn->p.req.options = cpu_to_le16(options); 4273 4274 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, 4275 "Dump of Verify Request.\n"); 4276 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, 4277 mn, sizeof(*mn)); 4278 4279 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 4280 if (rval != QLA_SUCCESS) { 4281 ql_dbg(ql_dbg_mbx, vha, 0x10cb, 4282 "Failed to issue verify IOCB (%x).\n", rval); 4283 goto verify_done; 4284 } 4285 4286 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, 4287 "Dump of Verify Response.\n"); 4288 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, 4289 mn, sizeof(*mn)); 4290 4291 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 4292 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 4293 le16_to_cpu(mn->p.rsp.failure_code) : 0; 4294 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, 4295 "cs=%x fc=%x.\n", status[0], status[1]); 4296 4297 if (status[0] != CS_COMPLETE) { 4298 rval = QLA_FUNCTION_FAILED; 4299 if (!(options & VCO_DONT_UPDATE_FW)) { 4300 ql_dbg(ql_dbg_mbx, vha, 0x10cf, 4301 "Firmware update failed. Retrying " 4302 "without update firmware.\n"); 4303 options |= VCO_DONT_UPDATE_FW; 4304 options &= ~VCO_FORCE_UPDATE; 4305 retry = 1; 4306 } 4307 } else { 4308 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, 4309 "Firmware updated to %x.\n", 4310 le32_to_cpu(mn->p.rsp.fw_ver)); 4311 4312 /* NOTE: we only update OP firmware. */ 4313 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 4314 ha->cs84xx->op_fw_version = 4315 le32_to_cpu(mn->p.rsp.fw_ver); 4316 spin_unlock_irqrestore(&ha->cs84xx->access_lock, 4317 flags); 4318 } 4319 } while (retry); 4320 4321 verify_done: 4322 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 4323 4324 if (rval != QLA_SUCCESS) { 4325 ql_dbg(ql_dbg_mbx, vha, 0x10d1, 4326 "Failed=%x.\n", rval); 4327 } else { 4328 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, 4329 "Done %s.\n", __func__); 4330 } 4331 4332 return rval; 4333 } 4334 4335 int 4336 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) 4337 { 4338 int rval; 4339 unsigned long flags; 4340 mbx_cmd_t mc; 4341 mbx_cmd_t *mcp = &mc; 4342 struct qla_hw_data *ha = vha->hw; 4343 4344 if (!ha->flags.fw_started) 4345 return QLA_SUCCESS; 4346 4347 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 4348 "Entered %s.\n", __func__); 4349 4350 if (IS_SHADOW_REG_CAPABLE(ha)) 4351 req->options |= BIT_13; 4352 4353 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4354 mcp->mb[1] = req->options; 4355 mcp->mb[2] = MSW(LSD(req->dma)); 4356 mcp->mb[3] = LSW(LSD(req->dma)); 4357 mcp->mb[6] = MSW(MSD(req->dma)); 4358 mcp->mb[7] = LSW(MSD(req->dma)); 4359 mcp->mb[5] = req->length; 4360 if (req->rsp) 4361 mcp->mb[10] = req->rsp->id; 4362 mcp->mb[12] = req->qos; 4363 mcp->mb[11] = req->vp_idx; 4364 mcp->mb[13] = req->rid; 4365 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4366 mcp->mb[15] = 0; 4367 4368 mcp->mb[4] = req->id; 4369 /* que in ptr index */ 4370 mcp->mb[8] = 0; 4371 /* que out ptr index */ 4372 mcp->mb[9] = *req->out_ptr = 0; 4373 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| 4374 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4375 mcp->in_mb = MBX_0; 4376 mcp->flags = MBX_DMA_OUT; 4377 mcp->tov = MBX_TOV_SECONDS * 2; 4378 4379 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4380 IS_QLA28XX(ha)) 4381 mcp->in_mb |= MBX_1; 4382 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4383 mcp->out_mb |= MBX_15; 4384 /* debug q create issue in SR-IOV */ 4385 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4386 } 4387 4388 spin_lock_irqsave(&ha->hardware_lock, flags); 4389 if (!(req->options & BIT_0)) { 4390 WRT_REG_DWORD(req->req_q_in, 0); 4391 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4392 WRT_REG_DWORD(req->req_q_out, 0); 4393 } 4394 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4395 4396 rval = qla2x00_mailbox_command(vha, mcp); 4397 if (rval != QLA_SUCCESS) { 4398 ql_dbg(ql_dbg_mbx, vha, 0x10d4, 4399 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4400 } else { 4401 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, 4402 "Done %s.\n", __func__); 4403 } 4404 4405 return rval; 4406 } 4407 4408 int 4409 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 4410 { 4411 int rval; 4412 unsigned long flags; 4413 mbx_cmd_t mc; 4414 mbx_cmd_t *mcp = &mc; 4415 struct qla_hw_data *ha = vha->hw; 4416 4417 if (!ha->flags.fw_started) 4418 return QLA_SUCCESS; 4419 4420 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 4421 "Entered %s.\n", __func__); 4422 4423 if (IS_SHADOW_REG_CAPABLE(ha)) 4424 rsp->options |= BIT_13; 4425 4426 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4427 mcp->mb[1] = rsp->options; 4428 mcp->mb[2] = MSW(LSD(rsp->dma)); 4429 mcp->mb[3] = LSW(LSD(rsp->dma)); 4430 mcp->mb[6] = MSW(MSD(rsp->dma)); 4431 mcp->mb[7] = LSW(MSD(rsp->dma)); 4432 mcp->mb[5] = rsp->length; 4433 mcp->mb[14] = rsp->msix->entry; 4434 mcp->mb[13] = rsp->rid; 4435 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4436 mcp->mb[15] = 0; 4437 4438 mcp->mb[4] = rsp->id; 4439 /* que in ptr index */ 4440 mcp->mb[8] = *rsp->in_ptr = 0; 4441 /* que out ptr index */ 4442 mcp->mb[9] = 0; 4443 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 4444 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4445 mcp->in_mb = MBX_0; 4446 mcp->flags = MBX_DMA_OUT; 4447 mcp->tov = MBX_TOV_SECONDS * 2; 4448 4449 if (IS_QLA81XX(ha)) { 4450 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 4451 mcp->in_mb |= MBX_1; 4452 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4453 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; 4454 mcp->in_mb |= MBX_1; 4455 /* debug q create issue in SR-IOV */ 4456 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4457 } 4458 4459 spin_lock_irqsave(&ha->hardware_lock, flags); 4460 if (!(rsp->options & BIT_0)) { 4461 WRT_REG_DWORD(rsp->rsp_q_out, 0); 4462 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4463 WRT_REG_DWORD(rsp->rsp_q_in, 0); 4464 } 4465 4466 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4467 4468 rval = qla2x00_mailbox_command(vha, mcp); 4469 if (rval != QLA_SUCCESS) { 4470 ql_dbg(ql_dbg_mbx, vha, 0x10d7, 4471 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4472 } else { 4473 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, 4474 "Done %s.\n", __func__); 4475 } 4476 4477 return rval; 4478 } 4479 4480 int 4481 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) 4482 { 4483 int rval; 4484 mbx_cmd_t mc; 4485 mbx_cmd_t *mcp = &mc; 4486 4487 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, 4488 "Entered %s.\n", __func__); 4489 4490 mcp->mb[0] = MBC_IDC_ACK; 4491 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4492 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4493 mcp->in_mb = MBX_0; 4494 mcp->tov = MBX_TOV_SECONDS; 4495 mcp->flags = 0; 4496 rval = qla2x00_mailbox_command(vha, mcp); 4497 4498 if (rval != QLA_SUCCESS) { 4499 ql_dbg(ql_dbg_mbx, vha, 0x10da, 4500 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4501 } else { 4502 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, 4503 "Done %s.\n", __func__); 4504 } 4505 4506 return rval; 4507 } 4508 4509 int 4510 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) 4511 { 4512 int rval; 4513 mbx_cmd_t mc; 4514 mbx_cmd_t *mcp = &mc; 4515 4516 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, 4517 "Entered %s.\n", __func__); 4518 4519 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4520 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4521 return QLA_FUNCTION_FAILED; 4522 4523 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4524 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 4525 mcp->out_mb = MBX_1|MBX_0; 4526 mcp->in_mb = MBX_1|MBX_0; 4527 mcp->tov = MBX_TOV_SECONDS; 4528 mcp->flags = 0; 4529 rval = qla2x00_mailbox_command(vha, mcp); 4530 4531 if (rval != QLA_SUCCESS) { 4532 ql_dbg(ql_dbg_mbx, vha, 0x10dd, 4533 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4534 rval, mcp->mb[0], mcp->mb[1]); 4535 } else { 4536 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, 4537 "Done %s.\n", __func__); 4538 *sector_size = mcp->mb[1]; 4539 } 4540 4541 return rval; 4542 } 4543 4544 int 4545 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) 4546 { 4547 int rval; 4548 mbx_cmd_t mc; 4549 mbx_cmd_t *mcp = &mc; 4550 4551 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4552 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4553 return QLA_FUNCTION_FAILED; 4554 4555 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, 4556 "Entered %s.\n", __func__); 4557 4558 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4559 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 4560 FAC_OPT_CMD_WRITE_PROTECT; 4561 mcp->out_mb = MBX_1|MBX_0; 4562 mcp->in_mb = MBX_1|MBX_0; 4563 mcp->tov = MBX_TOV_SECONDS; 4564 mcp->flags = 0; 4565 rval = qla2x00_mailbox_command(vha, mcp); 4566 4567 if (rval != QLA_SUCCESS) { 4568 ql_dbg(ql_dbg_mbx, vha, 0x10e0, 4569 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4570 rval, mcp->mb[0], mcp->mb[1]); 4571 } else { 4572 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, 4573 "Done %s.\n", __func__); 4574 } 4575 4576 return rval; 4577 } 4578 4579 int 4580 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) 4581 { 4582 int rval; 4583 mbx_cmd_t mc; 4584 mbx_cmd_t *mcp = &mc; 4585 4586 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4587 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4588 return QLA_FUNCTION_FAILED; 4589 4590 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4591 "Entered %s.\n", __func__); 4592 4593 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4594 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 4595 mcp->mb[2] = LSW(start); 4596 mcp->mb[3] = MSW(start); 4597 mcp->mb[4] = LSW(finish); 4598 mcp->mb[5] = MSW(finish); 4599 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4600 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4601 mcp->tov = MBX_TOV_SECONDS; 4602 mcp->flags = 0; 4603 rval = qla2x00_mailbox_command(vha, mcp); 4604 4605 if (rval != QLA_SUCCESS) { 4606 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4607 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4608 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4609 } else { 4610 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4611 "Done %s.\n", __func__); 4612 } 4613 4614 return rval; 4615 } 4616 4617 int 4618 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock) 4619 { 4620 int rval = QLA_SUCCESS; 4621 mbx_cmd_t mc; 4622 mbx_cmd_t *mcp = &mc; 4623 struct qla_hw_data *ha = vha->hw; 4624 4625 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 4626 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4627 return rval; 4628 4629 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4630 "Entered %s.\n", __func__); 4631 4632 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4633 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE : 4634 FAC_OPT_CMD_UNLOCK_SEMAPHORE); 4635 mcp->out_mb = MBX_1|MBX_0; 4636 mcp->in_mb = MBX_1|MBX_0; 4637 mcp->tov = MBX_TOV_SECONDS; 4638 mcp->flags = 0; 4639 rval = qla2x00_mailbox_command(vha, mcp); 4640 4641 if (rval != QLA_SUCCESS) { 4642 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4643 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4644 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4645 } else { 4646 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4647 "Done %s.\n", __func__); 4648 } 4649 4650 return rval; 4651 } 4652 4653 int 4654 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) 4655 { 4656 int rval = 0; 4657 mbx_cmd_t mc; 4658 mbx_cmd_t *mcp = &mc; 4659 4660 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, 4661 "Entered %s.\n", __func__); 4662 4663 mcp->mb[0] = MBC_RESTART_MPI_FW; 4664 mcp->out_mb = MBX_0; 4665 mcp->in_mb = MBX_0|MBX_1; 4666 mcp->tov = MBX_TOV_SECONDS; 4667 mcp->flags = 0; 4668 rval = qla2x00_mailbox_command(vha, mcp); 4669 4670 if (rval != QLA_SUCCESS) { 4671 ql_dbg(ql_dbg_mbx, vha, 0x10e6, 4672 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4673 rval, mcp->mb[0], mcp->mb[1]); 4674 } else { 4675 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, 4676 "Done %s.\n", __func__); 4677 } 4678 4679 return rval; 4680 } 4681 4682 int 4683 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4684 { 4685 int rval; 4686 mbx_cmd_t mc; 4687 mbx_cmd_t *mcp = &mc; 4688 int i; 4689 int len; 4690 uint16_t *str; 4691 struct qla_hw_data *ha = vha->hw; 4692 4693 if (!IS_P3P_TYPE(ha)) 4694 return QLA_FUNCTION_FAILED; 4695 4696 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, 4697 "Entered %s.\n", __func__); 4698 4699 str = (void *)version; 4700 len = strlen(version); 4701 4702 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4703 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; 4704 mcp->out_mb = MBX_1|MBX_0; 4705 for (i = 4; i < 16 && len; i++, str++, len -= 2) { 4706 mcp->mb[i] = cpu_to_le16p(str); 4707 mcp->out_mb |= 1<<i; 4708 } 4709 for (; i < 16; i++) { 4710 mcp->mb[i] = 0; 4711 mcp->out_mb |= 1<<i; 4712 } 4713 mcp->in_mb = MBX_1|MBX_0; 4714 mcp->tov = MBX_TOV_SECONDS; 4715 mcp->flags = 0; 4716 rval = qla2x00_mailbox_command(vha, mcp); 4717 4718 if (rval != QLA_SUCCESS) { 4719 ql_dbg(ql_dbg_mbx, vha, 0x117c, 4720 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4721 } else { 4722 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, 4723 "Done %s.\n", __func__); 4724 } 4725 4726 return rval; 4727 } 4728 4729 int 4730 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4731 { 4732 int rval; 4733 mbx_cmd_t mc; 4734 mbx_cmd_t *mcp = &mc; 4735 int len; 4736 uint16_t dwlen; 4737 uint8_t *str; 4738 dma_addr_t str_dma; 4739 struct qla_hw_data *ha = vha->hw; 4740 4741 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || 4742 IS_P3P_TYPE(ha)) 4743 return QLA_FUNCTION_FAILED; 4744 4745 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, 4746 "Entered %s.\n", __func__); 4747 4748 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); 4749 if (!str) { 4750 ql_log(ql_log_warn, vha, 0x117f, 4751 "Failed to allocate driver version param.\n"); 4752 return QLA_MEMORY_ALLOC_FAILED; 4753 } 4754 4755 memcpy(str, "\x7\x3\x11\x0", 4); 4756 dwlen = str[0]; 4757 len = dwlen * 4 - 4; 4758 memset(str + 4, 0, len); 4759 if (len > strlen(version)) 4760 len = strlen(version); 4761 memcpy(str + 4, version, len); 4762 4763 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4764 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; 4765 mcp->mb[2] = MSW(LSD(str_dma)); 4766 mcp->mb[3] = LSW(LSD(str_dma)); 4767 mcp->mb[6] = MSW(MSD(str_dma)); 4768 mcp->mb[7] = LSW(MSD(str_dma)); 4769 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4770 mcp->in_mb = MBX_1|MBX_0; 4771 mcp->tov = MBX_TOV_SECONDS; 4772 mcp->flags = 0; 4773 rval = qla2x00_mailbox_command(vha, mcp); 4774 4775 if (rval != QLA_SUCCESS) { 4776 ql_dbg(ql_dbg_mbx, vha, 0x1180, 4777 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4778 } else { 4779 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, 4780 "Done %s.\n", __func__); 4781 } 4782 4783 dma_pool_free(ha->s_dma_pool, str, str_dma); 4784 4785 return rval; 4786 } 4787 4788 int 4789 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, 4790 void *buf, uint16_t bufsiz) 4791 { 4792 int rval, i; 4793 mbx_cmd_t mc; 4794 mbx_cmd_t *mcp = &mc; 4795 uint32_t *bp; 4796 4797 if (!IS_FWI2_CAPABLE(vha->hw)) 4798 return QLA_FUNCTION_FAILED; 4799 4800 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4801 "Entered %s.\n", __func__); 4802 4803 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4804 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8; 4805 mcp->mb[2] = MSW(buf_dma); 4806 mcp->mb[3] = LSW(buf_dma); 4807 mcp->mb[6] = MSW(MSD(buf_dma)); 4808 mcp->mb[7] = LSW(MSD(buf_dma)); 4809 mcp->mb[8] = bufsiz/4; 4810 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4811 mcp->in_mb = MBX_1|MBX_0; 4812 mcp->tov = MBX_TOV_SECONDS; 4813 mcp->flags = 0; 4814 rval = qla2x00_mailbox_command(vha, mcp); 4815 4816 if (rval != QLA_SUCCESS) { 4817 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4818 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4819 } else { 4820 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4821 "Done %s.\n", __func__); 4822 bp = (uint32_t *) buf; 4823 for (i = 0; i < (bufsiz-4)/4; i++, bp++) 4824 *bp = le32_to_cpu(*bp); 4825 } 4826 4827 return rval; 4828 } 4829 4830 static int 4831 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 4832 { 4833 int rval; 4834 mbx_cmd_t mc; 4835 mbx_cmd_t *mcp = &mc; 4836 4837 if (!IS_FWI2_CAPABLE(vha->hw)) 4838 return QLA_FUNCTION_FAILED; 4839 4840 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4841 "Entered %s.\n", __func__); 4842 4843 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4844 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; 4845 mcp->out_mb = MBX_1|MBX_0; 4846 mcp->in_mb = MBX_1|MBX_0; 4847 mcp->tov = MBX_TOV_SECONDS; 4848 mcp->flags = 0; 4849 rval = qla2x00_mailbox_command(vha, mcp); 4850 *temp = mcp->mb[1]; 4851 4852 if (rval != QLA_SUCCESS) { 4853 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4854 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4855 } else { 4856 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4857 "Done %s.\n", __func__); 4858 } 4859 4860 return rval; 4861 } 4862 4863 int 4864 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 4865 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 4866 { 4867 int rval; 4868 mbx_cmd_t mc; 4869 mbx_cmd_t *mcp = &mc; 4870 struct qla_hw_data *ha = vha->hw; 4871 4872 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 4873 "Entered %s.\n", __func__); 4874 4875 if (!IS_FWI2_CAPABLE(ha)) 4876 return QLA_FUNCTION_FAILED; 4877 4878 if (len == 1) 4879 opt |= BIT_0; 4880 4881 mcp->mb[0] = MBC_READ_SFP; 4882 mcp->mb[1] = dev; 4883 mcp->mb[2] = MSW(sfp_dma); 4884 mcp->mb[3] = LSW(sfp_dma); 4885 mcp->mb[6] = MSW(MSD(sfp_dma)); 4886 mcp->mb[7] = LSW(MSD(sfp_dma)); 4887 mcp->mb[8] = len; 4888 mcp->mb[9] = off; 4889 mcp->mb[10] = opt; 4890 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4891 mcp->in_mb = MBX_1|MBX_0; 4892 mcp->tov = MBX_TOV_SECONDS; 4893 mcp->flags = 0; 4894 rval = qla2x00_mailbox_command(vha, mcp); 4895 4896 if (opt & BIT_0) 4897 *sfp = mcp->mb[1]; 4898 4899 if (rval != QLA_SUCCESS) { 4900 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 4901 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4902 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) { 4903 /* sfp is not there */ 4904 rval = QLA_INTERFACE_ERROR; 4905 } 4906 } else { 4907 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 4908 "Done %s.\n", __func__); 4909 } 4910 4911 return rval; 4912 } 4913 4914 int 4915 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 4916 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 4917 { 4918 int rval; 4919 mbx_cmd_t mc; 4920 mbx_cmd_t *mcp = &mc; 4921 struct qla_hw_data *ha = vha->hw; 4922 4923 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, 4924 "Entered %s.\n", __func__); 4925 4926 if (!IS_FWI2_CAPABLE(ha)) 4927 return QLA_FUNCTION_FAILED; 4928 4929 if (len == 1) 4930 opt |= BIT_0; 4931 4932 if (opt & BIT_0) 4933 len = *sfp; 4934 4935 mcp->mb[0] = MBC_WRITE_SFP; 4936 mcp->mb[1] = dev; 4937 mcp->mb[2] = MSW(sfp_dma); 4938 mcp->mb[3] = LSW(sfp_dma); 4939 mcp->mb[6] = MSW(MSD(sfp_dma)); 4940 mcp->mb[7] = LSW(MSD(sfp_dma)); 4941 mcp->mb[8] = len; 4942 mcp->mb[9] = off; 4943 mcp->mb[10] = opt; 4944 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4945 mcp->in_mb = MBX_1|MBX_0; 4946 mcp->tov = MBX_TOV_SECONDS; 4947 mcp->flags = 0; 4948 rval = qla2x00_mailbox_command(vha, mcp); 4949 4950 if (rval != QLA_SUCCESS) { 4951 ql_dbg(ql_dbg_mbx, vha, 0x10ec, 4952 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4953 } else { 4954 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, 4955 "Done %s.\n", __func__); 4956 } 4957 4958 return rval; 4959 } 4960 4961 int 4962 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, 4963 uint16_t size_in_bytes, uint16_t *actual_size) 4964 { 4965 int rval; 4966 mbx_cmd_t mc; 4967 mbx_cmd_t *mcp = &mc; 4968 4969 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, 4970 "Entered %s.\n", __func__); 4971 4972 if (!IS_CNA_CAPABLE(vha->hw)) 4973 return QLA_FUNCTION_FAILED; 4974 4975 mcp->mb[0] = MBC_GET_XGMAC_STATS; 4976 mcp->mb[2] = MSW(stats_dma); 4977 mcp->mb[3] = LSW(stats_dma); 4978 mcp->mb[6] = MSW(MSD(stats_dma)); 4979 mcp->mb[7] = LSW(MSD(stats_dma)); 4980 mcp->mb[8] = size_in_bytes >> 2; 4981 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 4982 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4983 mcp->tov = MBX_TOV_SECONDS; 4984 mcp->flags = 0; 4985 rval = qla2x00_mailbox_command(vha, mcp); 4986 4987 if (rval != QLA_SUCCESS) { 4988 ql_dbg(ql_dbg_mbx, vha, 0x10ef, 4989 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4990 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4991 } else { 4992 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, 4993 "Done %s.\n", __func__); 4994 4995 4996 *actual_size = mcp->mb[2] << 2; 4997 } 4998 4999 return rval; 5000 } 5001 5002 int 5003 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, 5004 uint16_t size) 5005 { 5006 int rval; 5007 mbx_cmd_t mc; 5008 mbx_cmd_t *mcp = &mc; 5009 5010 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, 5011 "Entered %s.\n", __func__); 5012 5013 if (!IS_CNA_CAPABLE(vha->hw)) 5014 return QLA_FUNCTION_FAILED; 5015 5016 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 5017 mcp->mb[1] = 0; 5018 mcp->mb[2] = MSW(tlv_dma); 5019 mcp->mb[3] = LSW(tlv_dma); 5020 mcp->mb[6] = MSW(MSD(tlv_dma)); 5021 mcp->mb[7] = LSW(MSD(tlv_dma)); 5022 mcp->mb[8] = size; 5023 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5024 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5025 mcp->tov = MBX_TOV_SECONDS; 5026 mcp->flags = 0; 5027 rval = qla2x00_mailbox_command(vha, mcp); 5028 5029 if (rval != QLA_SUCCESS) { 5030 ql_dbg(ql_dbg_mbx, vha, 0x10f2, 5031 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5032 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5033 } else { 5034 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, 5035 "Done %s.\n", __func__); 5036 } 5037 5038 return rval; 5039 } 5040 5041 int 5042 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) 5043 { 5044 int rval; 5045 mbx_cmd_t mc; 5046 mbx_cmd_t *mcp = &mc; 5047 5048 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, 5049 "Entered %s.\n", __func__); 5050 5051 if (!IS_FWI2_CAPABLE(vha->hw)) 5052 return QLA_FUNCTION_FAILED; 5053 5054 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 5055 mcp->mb[1] = LSW(risc_addr); 5056 mcp->mb[8] = MSW(risc_addr); 5057 mcp->out_mb = MBX_8|MBX_1|MBX_0; 5058 mcp->in_mb = MBX_3|MBX_2|MBX_0; 5059 mcp->tov = 30; 5060 mcp->flags = 0; 5061 rval = qla2x00_mailbox_command(vha, mcp); 5062 if (rval != QLA_SUCCESS) { 5063 ql_dbg(ql_dbg_mbx, vha, 0x10f5, 5064 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5065 } else { 5066 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, 5067 "Done %s.\n", __func__); 5068 *data = mcp->mb[3] << 16 | mcp->mb[2]; 5069 } 5070 5071 return rval; 5072 } 5073 5074 int 5075 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5076 uint16_t *mresp) 5077 { 5078 int rval; 5079 mbx_cmd_t mc; 5080 mbx_cmd_t *mcp = &mc; 5081 5082 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, 5083 "Entered %s.\n", __func__); 5084 5085 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5086 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 5087 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing 5088 5089 /* transfer count */ 5090 mcp->mb[10] = LSW(mreq->transfer_size); 5091 mcp->mb[11] = MSW(mreq->transfer_size); 5092 5093 /* send data address */ 5094 mcp->mb[14] = LSW(mreq->send_dma); 5095 mcp->mb[15] = MSW(mreq->send_dma); 5096 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5097 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5098 5099 /* receive data address */ 5100 mcp->mb[16] = LSW(mreq->rcv_dma); 5101 mcp->mb[17] = MSW(mreq->rcv_dma); 5102 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5103 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5104 5105 /* Iteration count */ 5106 mcp->mb[18] = LSW(mreq->iteration_count); 5107 mcp->mb[19] = MSW(mreq->iteration_count); 5108 5109 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| 5110 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5111 if (IS_CNA_CAPABLE(vha->hw)) 5112 mcp->out_mb |= MBX_2; 5113 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; 5114 5115 mcp->buf_size = mreq->transfer_size; 5116 mcp->tov = MBX_TOV_SECONDS; 5117 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5118 5119 rval = qla2x00_mailbox_command(vha, mcp); 5120 5121 if (rval != QLA_SUCCESS) { 5122 ql_dbg(ql_dbg_mbx, vha, 0x10f8, 5123 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " 5124 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], 5125 mcp->mb[3], mcp->mb[18], mcp->mb[19]); 5126 } else { 5127 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, 5128 "Done %s.\n", __func__); 5129 } 5130 5131 /* Copy mailbox information */ 5132 memcpy( mresp, mcp->mb, 64); 5133 return rval; 5134 } 5135 5136 int 5137 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5138 uint16_t *mresp) 5139 { 5140 int rval; 5141 mbx_cmd_t mc; 5142 mbx_cmd_t *mcp = &mc; 5143 struct qla_hw_data *ha = vha->hw; 5144 5145 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, 5146 "Entered %s.\n", __func__); 5147 5148 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5149 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 5150 /* BIT_6 specifies 64bit address */ 5151 mcp->mb[1] = mreq->options | BIT_15 | BIT_6; 5152 if (IS_CNA_CAPABLE(ha)) { 5153 mcp->mb[2] = vha->fcoe_fcf_idx; 5154 } 5155 mcp->mb[16] = LSW(mreq->rcv_dma); 5156 mcp->mb[17] = MSW(mreq->rcv_dma); 5157 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5158 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5159 5160 mcp->mb[10] = LSW(mreq->transfer_size); 5161 5162 mcp->mb[14] = LSW(mreq->send_dma); 5163 mcp->mb[15] = MSW(mreq->send_dma); 5164 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5165 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5166 5167 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| 5168 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5169 if (IS_CNA_CAPABLE(ha)) 5170 mcp->out_mb |= MBX_2; 5171 5172 mcp->in_mb = MBX_0; 5173 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 5174 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) 5175 mcp->in_mb |= MBX_1; 5176 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) 5177 mcp->in_mb |= MBX_3; 5178 5179 mcp->tov = MBX_TOV_SECONDS; 5180 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5181 mcp->buf_size = mreq->transfer_size; 5182 5183 rval = qla2x00_mailbox_command(vha, mcp); 5184 5185 if (rval != QLA_SUCCESS) { 5186 ql_dbg(ql_dbg_mbx, vha, 0x10fb, 5187 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5188 rval, mcp->mb[0], mcp->mb[1]); 5189 } else { 5190 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, 5191 "Done %s.\n", __func__); 5192 } 5193 5194 /* Copy mailbox information */ 5195 memcpy(mresp, mcp->mb, 64); 5196 return rval; 5197 } 5198 5199 int 5200 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) 5201 { 5202 int rval; 5203 mbx_cmd_t mc; 5204 mbx_cmd_t *mcp = &mc; 5205 5206 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, 5207 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); 5208 5209 mcp->mb[0] = MBC_ISP84XX_RESET; 5210 mcp->mb[1] = enable_diagnostic; 5211 mcp->out_mb = MBX_1|MBX_0; 5212 mcp->in_mb = MBX_1|MBX_0; 5213 mcp->tov = MBX_TOV_SECONDS; 5214 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5215 rval = qla2x00_mailbox_command(vha, mcp); 5216 5217 if (rval != QLA_SUCCESS) 5218 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); 5219 else 5220 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, 5221 "Done %s.\n", __func__); 5222 5223 return rval; 5224 } 5225 5226 int 5227 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) 5228 { 5229 int rval; 5230 mbx_cmd_t mc; 5231 mbx_cmd_t *mcp = &mc; 5232 5233 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, 5234 "Entered %s.\n", __func__); 5235 5236 if (!IS_FWI2_CAPABLE(vha->hw)) 5237 return QLA_FUNCTION_FAILED; 5238 5239 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 5240 mcp->mb[1] = LSW(risc_addr); 5241 mcp->mb[2] = LSW(data); 5242 mcp->mb[3] = MSW(data); 5243 mcp->mb[8] = MSW(risc_addr); 5244 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; 5245 mcp->in_mb = MBX_1|MBX_0; 5246 mcp->tov = 30; 5247 mcp->flags = 0; 5248 rval = qla2x00_mailbox_command(vha, mcp); 5249 if (rval != QLA_SUCCESS) { 5250 ql_dbg(ql_dbg_mbx, vha, 0x1101, 5251 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5252 rval, mcp->mb[0], mcp->mb[1]); 5253 } else { 5254 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, 5255 "Done %s.\n", __func__); 5256 } 5257 5258 return rval; 5259 } 5260 5261 int 5262 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) 5263 { 5264 int rval; 5265 uint32_t stat, timer; 5266 uint16_t mb0 = 0; 5267 struct qla_hw_data *ha = vha->hw; 5268 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 5269 5270 rval = QLA_SUCCESS; 5271 5272 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, 5273 "Entered %s.\n", __func__); 5274 5275 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 5276 5277 /* Write the MBC data to the registers */ 5278 WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER); 5279 WRT_REG_WORD(®->mailbox1, mb[0]); 5280 WRT_REG_WORD(®->mailbox2, mb[1]); 5281 WRT_REG_WORD(®->mailbox3, mb[2]); 5282 WRT_REG_WORD(®->mailbox4, mb[3]); 5283 5284 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); 5285 5286 /* Poll for MBC interrupt */ 5287 for (timer = 6000000; timer; timer--) { 5288 /* Check for pending interrupts. */ 5289 stat = RD_REG_DWORD(®->host_status); 5290 if (stat & HSRX_RISC_INT) { 5291 stat &= 0xff; 5292 5293 if (stat == 0x1 || stat == 0x2 || 5294 stat == 0x10 || stat == 0x11) { 5295 set_bit(MBX_INTERRUPT, 5296 &ha->mbx_cmd_flags); 5297 mb0 = RD_REG_WORD(®->mailbox0); 5298 WRT_REG_DWORD(®->hccr, 5299 HCCRX_CLR_RISC_INT); 5300 RD_REG_DWORD(®->hccr); 5301 break; 5302 } 5303 } 5304 udelay(5); 5305 } 5306 5307 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) 5308 rval = mb0 & MBS_MASK; 5309 else 5310 rval = QLA_FUNCTION_FAILED; 5311 5312 if (rval != QLA_SUCCESS) { 5313 ql_dbg(ql_dbg_mbx, vha, 0x1104, 5314 "Failed=%x mb[0]=%x.\n", rval, mb[0]); 5315 } else { 5316 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, 5317 "Done %s.\n", __func__); 5318 } 5319 5320 return rval; 5321 } 5322 5323 /* Set the specified data rate */ 5324 int 5325 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode) 5326 { 5327 int rval; 5328 mbx_cmd_t mc; 5329 mbx_cmd_t *mcp = &mc; 5330 struct qla_hw_data *ha = vha->hw; 5331 uint16_t val; 5332 5333 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5334 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate, 5335 mode); 5336 5337 if (!IS_FWI2_CAPABLE(ha)) 5338 return QLA_FUNCTION_FAILED; 5339 5340 memset(mcp, 0, sizeof(*mcp)); 5341 switch (ha->set_data_rate) { 5342 case PORT_SPEED_AUTO: 5343 case PORT_SPEED_4GB: 5344 case PORT_SPEED_8GB: 5345 case PORT_SPEED_16GB: 5346 case PORT_SPEED_32GB: 5347 val = ha->set_data_rate; 5348 break; 5349 default: 5350 ql_log(ql_log_warn, vha, 0x1199, 5351 "Unrecognized speed setting:%d. Setting Autoneg\n", 5352 ha->set_data_rate); 5353 val = ha->set_data_rate = PORT_SPEED_AUTO; 5354 break; 5355 } 5356 5357 mcp->mb[0] = MBC_DATA_RATE; 5358 mcp->mb[1] = mode; 5359 mcp->mb[2] = val; 5360 5361 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5362 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5363 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5364 mcp->in_mb |= MBX_4|MBX_3; 5365 mcp->tov = MBX_TOV_SECONDS; 5366 mcp->flags = 0; 5367 rval = qla2x00_mailbox_command(vha, mcp); 5368 if (rval != QLA_SUCCESS) { 5369 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5370 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5371 } else { 5372 if (mcp->mb[1] != 0x7) 5373 ql_dbg(ql_dbg_mbx, vha, 0x1179, 5374 "Speed set:0x%x\n", mcp->mb[1]); 5375 5376 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5377 "Done %s.\n", __func__); 5378 } 5379 5380 return rval; 5381 } 5382 5383 int 5384 qla2x00_get_data_rate(scsi_qla_host_t *vha) 5385 { 5386 int rval; 5387 mbx_cmd_t mc; 5388 mbx_cmd_t *mcp = &mc; 5389 struct qla_hw_data *ha = vha->hw; 5390 5391 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5392 "Entered %s.\n", __func__); 5393 5394 if (!IS_FWI2_CAPABLE(ha)) 5395 return QLA_FUNCTION_FAILED; 5396 5397 mcp->mb[0] = MBC_DATA_RATE; 5398 mcp->mb[1] = QLA_GET_DATA_RATE; 5399 mcp->out_mb = MBX_1|MBX_0; 5400 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5401 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5402 mcp->in_mb |= MBX_3; 5403 mcp->tov = MBX_TOV_SECONDS; 5404 mcp->flags = 0; 5405 rval = qla2x00_mailbox_command(vha, mcp); 5406 if (rval != QLA_SUCCESS) { 5407 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5408 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5409 } else { 5410 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5411 "Done %s.\n", __func__); 5412 if (mcp->mb[1] != 0x7) 5413 ha->link_data_rate = mcp->mb[1]; 5414 } 5415 5416 return rval; 5417 } 5418 5419 int 5420 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5421 { 5422 int rval; 5423 mbx_cmd_t mc; 5424 mbx_cmd_t *mcp = &mc; 5425 struct qla_hw_data *ha = vha->hw; 5426 5427 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, 5428 "Entered %s.\n", __func__); 5429 5430 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && 5431 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 5432 return QLA_FUNCTION_FAILED; 5433 mcp->mb[0] = MBC_GET_PORT_CONFIG; 5434 mcp->out_mb = MBX_0; 5435 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5436 mcp->tov = MBX_TOV_SECONDS; 5437 mcp->flags = 0; 5438 5439 rval = qla2x00_mailbox_command(vha, mcp); 5440 5441 if (rval != QLA_SUCCESS) { 5442 ql_dbg(ql_dbg_mbx, vha, 0x110a, 5443 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5444 } else { 5445 /* Copy all bits to preserve original value */ 5446 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 5447 5448 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, 5449 "Done %s.\n", __func__); 5450 } 5451 return rval; 5452 } 5453 5454 int 5455 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5456 { 5457 int rval; 5458 mbx_cmd_t mc; 5459 mbx_cmd_t *mcp = &mc; 5460 5461 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, 5462 "Entered %s.\n", __func__); 5463 5464 mcp->mb[0] = MBC_SET_PORT_CONFIG; 5465 /* Copy all bits to preserve original setting */ 5466 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); 5467 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5468 mcp->in_mb = MBX_0; 5469 mcp->tov = MBX_TOV_SECONDS; 5470 mcp->flags = 0; 5471 rval = qla2x00_mailbox_command(vha, mcp); 5472 5473 if (rval != QLA_SUCCESS) { 5474 ql_dbg(ql_dbg_mbx, vha, 0x110d, 5475 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5476 } else 5477 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, 5478 "Done %s.\n", __func__); 5479 5480 return rval; 5481 } 5482 5483 5484 int 5485 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, 5486 uint16_t *mb) 5487 { 5488 int rval; 5489 mbx_cmd_t mc; 5490 mbx_cmd_t *mcp = &mc; 5491 struct qla_hw_data *ha = vha->hw; 5492 5493 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, 5494 "Entered %s.\n", __func__); 5495 5496 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 5497 return QLA_FUNCTION_FAILED; 5498 5499 mcp->mb[0] = MBC_PORT_PARAMS; 5500 mcp->mb[1] = loop_id; 5501 if (ha->flags.fcp_prio_enabled) 5502 mcp->mb[2] = BIT_1; 5503 else 5504 mcp->mb[2] = BIT_2; 5505 mcp->mb[4] = priority & 0xf; 5506 mcp->mb[9] = vha->vp_idx; 5507 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5508 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 5509 mcp->tov = 30; 5510 mcp->flags = 0; 5511 rval = qla2x00_mailbox_command(vha, mcp); 5512 if (mb != NULL) { 5513 mb[0] = mcp->mb[0]; 5514 mb[1] = mcp->mb[1]; 5515 mb[3] = mcp->mb[3]; 5516 mb[4] = mcp->mb[4]; 5517 } 5518 5519 if (rval != QLA_SUCCESS) { 5520 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); 5521 } else { 5522 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, 5523 "Done %s.\n", __func__); 5524 } 5525 5526 return rval; 5527 } 5528 5529 int 5530 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) 5531 { 5532 int rval = QLA_FUNCTION_FAILED; 5533 struct qla_hw_data *ha = vha->hw; 5534 uint8_t byte; 5535 5536 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { 5537 ql_dbg(ql_dbg_mbx, vha, 0x1150, 5538 "Thermal not supported by this card.\n"); 5539 return rval; 5540 } 5541 5542 if (IS_QLA25XX(ha)) { 5543 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 5544 ha->pdev->subsystem_device == 0x0175) { 5545 rval = qla2x00_read_sfp(vha, 0, &byte, 5546 0x98, 0x1, 1, BIT_13|BIT_0); 5547 *temp = byte; 5548 return rval; 5549 } 5550 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 5551 ha->pdev->subsystem_device == 0x338e) { 5552 rval = qla2x00_read_sfp(vha, 0, &byte, 5553 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); 5554 *temp = byte; 5555 return rval; 5556 } 5557 ql_dbg(ql_dbg_mbx, vha, 0x10c9, 5558 "Thermal not supported by this card.\n"); 5559 return rval; 5560 } 5561 5562 if (IS_QLA82XX(ha)) { 5563 *temp = qla82xx_read_temperature(vha); 5564 rval = QLA_SUCCESS; 5565 return rval; 5566 } else if (IS_QLA8044(ha)) { 5567 *temp = qla8044_read_temperature(vha); 5568 rval = QLA_SUCCESS; 5569 return rval; 5570 } 5571 5572 rval = qla2x00_read_asic_temperature(vha, temp); 5573 return rval; 5574 } 5575 5576 int 5577 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) 5578 { 5579 int rval; 5580 struct qla_hw_data *ha = vha->hw; 5581 mbx_cmd_t mc; 5582 mbx_cmd_t *mcp = &mc; 5583 5584 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, 5585 "Entered %s.\n", __func__); 5586 5587 if (!IS_FWI2_CAPABLE(ha)) 5588 return QLA_FUNCTION_FAILED; 5589 5590 memset(mcp, 0, sizeof(mbx_cmd_t)); 5591 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5592 mcp->mb[1] = 1; 5593 5594 mcp->out_mb = MBX_1|MBX_0; 5595 mcp->in_mb = MBX_0; 5596 mcp->tov = 30; 5597 mcp->flags = 0; 5598 5599 rval = qla2x00_mailbox_command(vha, mcp); 5600 if (rval != QLA_SUCCESS) { 5601 ql_dbg(ql_dbg_mbx, vha, 0x1016, 5602 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5603 } else { 5604 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, 5605 "Done %s.\n", __func__); 5606 } 5607 5608 return rval; 5609 } 5610 5611 int 5612 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) 5613 { 5614 int rval; 5615 struct qla_hw_data *ha = vha->hw; 5616 mbx_cmd_t mc; 5617 mbx_cmd_t *mcp = &mc; 5618 5619 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, 5620 "Entered %s.\n", __func__); 5621 5622 if (!IS_P3P_TYPE(ha)) 5623 return QLA_FUNCTION_FAILED; 5624 5625 memset(mcp, 0, sizeof(mbx_cmd_t)); 5626 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5627 mcp->mb[1] = 0; 5628 5629 mcp->out_mb = MBX_1|MBX_0; 5630 mcp->in_mb = MBX_0; 5631 mcp->tov = 30; 5632 mcp->flags = 0; 5633 5634 rval = qla2x00_mailbox_command(vha, mcp); 5635 if (rval != QLA_SUCCESS) { 5636 ql_dbg(ql_dbg_mbx, vha, 0x100c, 5637 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5638 } else { 5639 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, 5640 "Done %s.\n", __func__); 5641 } 5642 5643 return rval; 5644 } 5645 5646 int 5647 qla82xx_md_get_template_size(scsi_qla_host_t *vha) 5648 { 5649 struct qla_hw_data *ha = vha->hw; 5650 mbx_cmd_t mc; 5651 mbx_cmd_t *mcp = &mc; 5652 int rval = QLA_FUNCTION_FAILED; 5653 5654 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, 5655 "Entered %s.\n", __func__); 5656 5657 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5658 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5659 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5660 mcp->mb[2] = LSW(RQST_TMPLT_SIZE); 5661 mcp->mb[3] = MSW(RQST_TMPLT_SIZE); 5662 5663 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5664 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 5665 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5666 5667 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5668 mcp->tov = MBX_TOV_SECONDS; 5669 rval = qla2x00_mailbox_command(vha, mcp); 5670 5671 /* Always copy back return mailbox values. */ 5672 if (rval != QLA_SUCCESS) { 5673 ql_dbg(ql_dbg_mbx, vha, 0x1120, 5674 "mailbox command FAILED=0x%x, subcode=%x.\n", 5675 (mcp->mb[1] << 16) | mcp->mb[0], 5676 (mcp->mb[3] << 16) | mcp->mb[2]); 5677 } else { 5678 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, 5679 "Done %s.\n", __func__); 5680 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); 5681 if (!ha->md_template_size) { 5682 ql_dbg(ql_dbg_mbx, vha, 0x1122, 5683 "Null template size obtained.\n"); 5684 rval = QLA_FUNCTION_FAILED; 5685 } 5686 } 5687 return rval; 5688 } 5689 5690 int 5691 qla82xx_md_get_template(scsi_qla_host_t *vha) 5692 { 5693 struct qla_hw_data *ha = vha->hw; 5694 mbx_cmd_t mc; 5695 mbx_cmd_t *mcp = &mc; 5696 int rval = QLA_FUNCTION_FAILED; 5697 5698 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, 5699 "Entered %s.\n", __func__); 5700 5701 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5702 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5703 if (!ha->md_tmplt_hdr) { 5704 ql_log(ql_log_warn, vha, 0x1124, 5705 "Unable to allocate memory for Minidump template.\n"); 5706 return rval; 5707 } 5708 5709 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5710 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5711 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5712 mcp->mb[2] = LSW(RQST_TMPLT); 5713 mcp->mb[3] = MSW(RQST_TMPLT); 5714 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); 5715 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); 5716 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); 5717 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); 5718 mcp->mb[8] = LSW(ha->md_template_size); 5719 mcp->mb[9] = MSW(ha->md_template_size); 5720 5721 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5722 mcp->tov = MBX_TOV_SECONDS; 5723 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5724 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5725 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5726 rval = qla2x00_mailbox_command(vha, mcp); 5727 5728 if (rval != QLA_SUCCESS) { 5729 ql_dbg(ql_dbg_mbx, vha, 0x1125, 5730 "mailbox command FAILED=0x%x, subcode=%x.\n", 5731 ((mcp->mb[1] << 16) | mcp->mb[0]), 5732 ((mcp->mb[3] << 16) | mcp->mb[2])); 5733 } else 5734 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, 5735 "Done %s.\n", __func__); 5736 return rval; 5737 } 5738 5739 int 5740 qla8044_md_get_template(scsi_qla_host_t *vha) 5741 { 5742 struct qla_hw_data *ha = vha->hw; 5743 mbx_cmd_t mc; 5744 mbx_cmd_t *mcp = &mc; 5745 int rval = QLA_FUNCTION_FAILED; 5746 int offset = 0, size = MINIDUMP_SIZE_36K; 5747 5748 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, 5749 "Entered %s.\n", __func__); 5750 5751 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5752 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5753 if (!ha->md_tmplt_hdr) { 5754 ql_log(ql_log_warn, vha, 0xb11b, 5755 "Unable to allocate memory for Minidump template.\n"); 5756 return rval; 5757 } 5758 5759 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5760 while (offset < ha->md_template_size) { 5761 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5762 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5763 mcp->mb[2] = LSW(RQST_TMPLT); 5764 mcp->mb[3] = MSW(RQST_TMPLT); 5765 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5766 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5767 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5768 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5769 mcp->mb[8] = LSW(size); 5770 mcp->mb[9] = MSW(size); 5771 mcp->mb[10] = offset & 0x0000FFFF; 5772 mcp->mb[11] = offset & 0xFFFF0000; 5773 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5774 mcp->tov = MBX_TOV_SECONDS; 5775 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5776 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5777 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5778 rval = qla2x00_mailbox_command(vha, mcp); 5779 5780 if (rval != QLA_SUCCESS) { 5781 ql_dbg(ql_dbg_mbx, vha, 0xb11c, 5782 "mailbox command FAILED=0x%x, subcode=%x.\n", 5783 ((mcp->mb[1] << 16) | mcp->mb[0]), 5784 ((mcp->mb[3] << 16) | mcp->mb[2])); 5785 return rval; 5786 } else 5787 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, 5788 "Done %s.\n", __func__); 5789 offset = offset + size; 5790 } 5791 return rval; 5792 } 5793 5794 int 5795 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5796 { 5797 int rval; 5798 struct qla_hw_data *ha = vha->hw; 5799 mbx_cmd_t mc; 5800 mbx_cmd_t *mcp = &mc; 5801 5802 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5803 return QLA_FUNCTION_FAILED; 5804 5805 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, 5806 "Entered %s.\n", __func__); 5807 5808 memset(mcp, 0, sizeof(mbx_cmd_t)); 5809 mcp->mb[0] = MBC_SET_LED_CONFIG; 5810 mcp->mb[1] = led_cfg[0]; 5811 mcp->mb[2] = led_cfg[1]; 5812 if (IS_QLA8031(ha)) { 5813 mcp->mb[3] = led_cfg[2]; 5814 mcp->mb[4] = led_cfg[3]; 5815 mcp->mb[5] = led_cfg[4]; 5816 mcp->mb[6] = led_cfg[5]; 5817 } 5818 5819 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5820 if (IS_QLA8031(ha)) 5821 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 5822 mcp->in_mb = MBX_0; 5823 mcp->tov = 30; 5824 mcp->flags = 0; 5825 5826 rval = qla2x00_mailbox_command(vha, mcp); 5827 if (rval != QLA_SUCCESS) { 5828 ql_dbg(ql_dbg_mbx, vha, 0x1134, 5829 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5830 } else { 5831 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, 5832 "Done %s.\n", __func__); 5833 } 5834 5835 return rval; 5836 } 5837 5838 int 5839 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5840 { 5841 int rval; 5842 struct qla_hw_data *ha = vha->hw; 5843 mbx_cmd_t mc; 5844 mbx_cmd_t *mcp = &mc; 5845 5846 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5847 return QLA_FUNCTION_FAILED; 5848 5849 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, 5850 "Entered %s.\n", __func__); 5851 5852 memset(mcp, 0, sizeof(mbx_cmd_t)); 5853 mcp->mb[0] = MBC_GET_LED_CONFIG; 5854 5855 mcp->out_mb = MBX_0; 5856 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5857 if (IS_QLA8031(ha)) 5858 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 5859 mcp->tov = 30; 5860 mcp->flags = 0; 5861 5862 rval = qla2x00_mailbox_command(vha, mcp); 5863 if (rval != QLA_SUCCESS) { 5864 ql_dbg(ql_dbg_mbx, vha, 0x1137, 5865 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5866 } else { 5867 led_cfg[0] = mcp->mb[1]; 5868 led_cfg[1] = mcp->mb[2]; 5869 if (IS_QLA8031(ha)) { 5870 led_cfg[2] = mcp->mb[3]; 5871 led_cfg[3] = mcp->mb[4]; 5872 led_cfg[4] = mcp->mb[5]; 5873 led_cfg[5] = mcp->mb[6]; 5874 } 5875 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, 5876 "Done %s.\n", __func__); 5877 } 5878 5879 return rval; 5880 } 5881 5882 int 5883 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) 5884 { 5885 int rval; 5886 struct qla_hw_data *ha = vha->hw; 5887 mbx_cmd_t mc; 5888 mbx_cmd_t *mcp = &mc; 5889 5890 if (!IS_P3P_TYPE(ha)) 5891 return QLA_FUNCTION_FAILED; 5892 5893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, 5894 "Entered %s.\n", __func__); 5895 5896 memset(mcp, 0, sizeof(mbx_cmd_t)); 5897 mcp->mb[0] = MBC_SET_LED_CONFIG; 5898 if (enable) 5899 mcp->mb[7] = 0xE; 5900 else 5901 mcp->mb[7] = 0xD; 5902 5903 mcp->out_mb = MBX_7|MBX_0; 5904 mcp->in_mb = MBX_0; 5905 mcp->tov = MBX_TOV_SECONDS; 5906 mcp->flags = 0; 5907 5908 rval = qla2x00_mailbox_command(vha, mcp); 5909 if (rval != QLA_SUCCESS) { 5910 ql_dbg(ql_dbg_mbx, vha, 0x1128, 5911 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5912 } else { 5913 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, 5914 "Done %s.\n", __func__); 5915 } 5916 5917 return rval; 5918 } 5919 5920 int 5921 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) 5922 { 5923 int rval; 5924 struct qla_hw_data *ha = vha->hw; 5925 mbx_cmd_t mc; 5926 mbx_cmd_t *mcp = &mc; 5927 5928 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 5929 return QLA_FUNCTION_FAILED; 5930 5931 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, 5932 "Entered %s.\n", __func__); 5933 5934 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 5935 mcp->mb[1] = LSW(reg); 5936 mcp->mb[2] = MSW(reg); 5937 mcp->mb[3] = LSW(data); 5938 mcp->mb[4] = MSW(data); 5939 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5940 5941 mcp->in_mb = MBX_1|MBX_0; 5942 mcp->tov = MBX_TOV_SECONDS; 5943 mcp->flags = 0; 5944 rval = qla2x00_mailbox_command(vha, mcp); 5945 5946 if (rval != QLA_SUCCESS) { 5947 ql_dbg(ql_dbg_mbx, vha, 0x1131, 5948 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5949 } else { 5950 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, 5951 "Done %s.\n", __func__); 5952 } 5953 5954 return rval; 5955 } 5956 5957 int 5958 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) 5959 { 5960 int rval; 5961 struct qla_hw_data *ha = vha->hw; 5962 mbx_cmd_t mc; 5963 mbx_cmd_t *mcp = &mc; 5964 5965 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 5966 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, 5967 "Implicit LOGO Unsupported.\n"); 5968 return QLA_FUNCTION_FAILED; 5969 } 5970 5971 5972 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, 5973 "Entering %s.\n", __func__); 5974 5975 /* Perform Implicit LOGO. */ 5976 mcp->mb[0] = MBC_PORT_LOGOUT; 5977 mcp->mb[1] = fcport->loop_id; 5978 mcp->mb[10] = BIT_15; 5979 mcp->out_mb = MBX_10|MBX_1|MBX_0; 5980 mcp->in_mb = MBX_0; 5981 mcp->tov = MBX_TOV_SECONDS; 5982 mcp->flags = 0; 5983 rval = qla2x00_mailbox_command(vha, mcp); 5984 if (rval != QLA_SUCCESS) 5985 ql_dbg(ql_dbg_mbx, vha, 0x113d, 5986 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5987 else 5988 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, 5989 "Done %s.\n", __func__); 5990 5991 return rval; 5992 } 5993 5994 int 5995 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) 5996 { 5997 int rval; 5998 mbx_cmd_t mc; 5999 mbx_cmd_t *mcp = &mc; 6000 struct qla_hw_data *ha = vha->hw; 6001 unsigned long retry_max_time = jiffies + (2 * HZ); 6002 6003 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6004 return QLA_FUNCTION_FAILED; 6005 6006 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); 6007 6008 retry_rd_reg: 6009 mcp->mb[0] = MBC_READ_REMOTE_REG; 6010 mcp->mb[1] = LSW(reg); 6011 mcp->mb[2] = MSW(reg); 6012 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6013 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 6014 mcp->tov = MBX_TOV_SECONDS; 6015 mcp->flags = 0; 6016 rval = qla2x00_mailbox_command(vha, mcp); 6017 6018 if (rval != QLA_SUCCESS) { 6019 ql_dbg(ql_dbg_mbx, vha, 0x114c, 6020 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6021 rval, mcp->mb[0], mcp->mb[1]); 6022 } else { 6023 *data = (mcp->mb[3] | (mcp->mb[4] << 16)); 6024 if (*data == QLA8XXX_BAD_VALUE) { 6025 /* 6026 * During soft-reset CAMRAM register reads might 6027 * return 0xbad0bad0. So retry for MAX of 2 sec 6028 * while reading camram registers. 6029 */ 6030 if (time_after(jiffies, retry_max_time)) { 6031 ql_dbg(ql_dbg_mbx, vha, 0x1141, 6032 "Failure to read CAMRAM register. " 6033 "data=0x%x.\n", *data); 6034 return QLA_FUNCTION_FAILED; 6035 } 6036 msleep(100); 6037 goto retry_rd_reg; 6038 } 6039 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); 6040 } 6041 6042 return rval; 6043 } 6044 6045 int 6046 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) 6047 { 6048 int rval; 6049 mbx_cmd_t mc; 6050 mbx_cmd_t *mcp = &mc; 6051 struct qla_hw_data *ha = vha->hw; 6052 6053 if (!IS_QLA83XX(ha)) 6054 return QLA_FUNCTION_FAILED; 6055 6056 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 6057 6058 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; 6059 mcp->out_mb = MBX_0; 6060 mcp->in_mb = MBX_1|MBX_0; 6061 mcp->tov = MBX_TOV_SECONDS; 6062 mcp->flags = 0; 6063 rval = qla2x00_mailbox_command(vha, mcp); 6064 6065 if (rval != QLA_SUCCESS) { 6066 ql_dbg(ql_dbg_mbx, vha, 0x1144, 6067 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6068 rval, mcp->mb[0], mcp->mb[1]); 6069 ha->isp_ops->fw_dump(vha, 0); 6070 } else { 6071 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); 6072 } 6073 6074 return rval; 6075 } 6076 6077 int 6078 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, 6079 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) 6080 { 6081 int rval; 6082 mbx_cmd_t mc; 6083 mbx_cmd_t *mcp = &mc; 6084 uint8_t subcode = (uint8_t)options; 6085 struct qla_hw_data *ha = vha->hw; 6086 6087 if (!IS_QLA8031(ha)) 6088 return QLA_FUNCTION_FAILED; 6089 6090 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); 6091 6092 mcp->mb[0] = MBC_SET_ACCESS_CONTROL; 6093 mcp->mb[1] = options; 6094 mcp->out_mb = MBX_1|MBX_0; 6095 if (subcode & BIT_2) { 6096 mcp->mb[2] = LSW(start_addr); 6097 mcp->mb[3] = MSW(start_addr); 6098 mcp->mb[4] = LSW(end_addr); 6099 mcp->mb[5] = MSW(end_addr); 6100 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; 6101 } 6102 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6103 if (!(subcode & (BIT_2 | BIT_5))) 6104 mcp->in_mb |= MBX_4|MBX_3; 6105 mcp->tov = MBX_TOV_SECONDS; 6106 mcp->flags = 0; 6107 rval = qla2x00_mailbox_command(vha, mcp); 6108 6109 if (rval != QLA_SUCCESS) { 6110 ql_dbg(ql_dbg_mbx, vha, 0x1147, 6111 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", 6112 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], 6113 mcp->mb[4]); 6114 ha->isp_ops->fw_dump(vha, 0); 6115 } else { 6116 if (subcode & BIT_5) 6117 *sector_size = mcp->mb[1]; 6118 else if (subcode & (BIT_6 | BIT_7)) { 6119 ql_dbg(ql_dbg_mbx, vha, 0x1148, 6120 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6121 } else if (subcode & (BIT_3 | BIT_4)) { 6122 ql_dbg(ql_dbg_mbx, vha, 0x1149, 6123 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6124 } 6125 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); 6126 } 6127 6128 return rval; 6129 } 6130 6131 int 6132 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 6133 uint32_t size) 6134 { 6135 int rval; 6136 mbx_cmd_t mc; 6137 mbx_cmd_t *mcp = &mc; 6138 6139 if (!IS_MCTP_CAPABLE(vha->hw)) 6140 return QLA_FUNCTION_FAILED; 6141 6142 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, 6143 "Entered %s.\n", __func__); 6144 6145 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 6146 mcp->mb[1] = LSW(addr); 6147 mcp->mb[2] = MSW(req_dma); 6148 mcp->mb[3] = LSW(req_dma); 6149 mcp->mb[4] = MSW(size); 6150 mcp->mb[5] = LSW(size); 6151 mcp->mb[6] = MSW(MSD(req_dma)); 6152 mcp->mb[7] = LSW(MSD(req_dma)); 6153 mcp->mb[8] = MSW(addr); 6154 /* Setting RAM ID to valid */ 6155 /* For MCTP RAM ID is 0x40 */ 6156 mcp->mb[10] = BIT_7 | 0x40; 6157 6158 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| 6159 MBX_0; 6160 6161 mcp->in_mb = MBX_0; 6162 mcp->tov = MBX_TOV_SECONDS; 6163 mcp->flags = 0; 6164 rval = qla2x00_mailbox_command(vha, mcp); 6165 6166 if (rval != QLA_SUCCESS) { 6167 ql_dbg(ql_dbg_mbx, vha, 0x114e, 6168 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6169 } else { 6170 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, 6171 "Done %s.\n", __func__); 6172 } 6173 6174 return rval; 6175 } 6176 6177 int 6178 qla26xx_dport_diagnostics(scsi_qla_host_t *vha, 6179 void *dd_buf, uint size, uint options) 6180 { 6181 int rval; 6182 mbx_cmd_t mc; 6183 mbx_cmd_t *mcp = &mc; 6184 dma_addr_t dd_dma; 6185 6186 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 6187 !IS_QLA28XX(vha->hw)) 6188 return QLA_FUNCTION_FAILED; 6189 6190 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, 6191 "Entered %s.\n", __func__); 6192 6193 dd_dma = dma_map_single(&vha->hw->pdev->dev, 6194 dd_buf, size, DMA_FROM_DEVICE); 6195 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { 6196 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); 6197 return QLA_MEMORY_ALLOC_FAILED; 6198 } 6199 6200 memset(dd_buf, 0, size); 6201 6202 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; 6203 mcp->mb[1] = options; 6204 mcp->mb[2] = MSW(LSD(dd_dma)); 6205 mcp->mb[3] = LSW(LSD(dd_dma)); 6206 mcp->mb[6] = MSW(MSD(dd_dma)); 6207 mcp->mb[7] = LSW(MSD(dd_dma)); 6208 mcp->mb[8] = size; 6209 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 6210 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 6211 mcp->buf_size = size; 6212 mcp->flags = MBX_DMA_IN; 6213 mcp->tov = MBX_TOV_SECONDS * 4; 6214 rval = qla2x00_mailbox_command(vha, mcp); 6215 6216 if (rval != QLA_SUCCESS) { 6217 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); 6218 } else { 6219 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, 6220 "Done %s.\n", __func__); 6221 } 6222 6223 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, 6224 size, DMA_FROM_DEVICE); 6225 6226 return rval; 6227 } 6228 6229 static void qla2x00_async_mb_sp_done(srb_t *sp, int res) 6230 { 6231 sp->u.iocb_cmd.u.mbx.rc = res; 6232 6233 complete(&sp->u.iocb_cmd.u.mbx.comp); 6234 /* don't free sp here. Let the caller do the free */ 6235 } 6236 6237 /* 6238 * This mailbox uses the iocb interface to send MB command. 6239 * This allows non-critial (non chip setup) command to go 6240 * out in parrallel. 6241 */ 6242 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) 6243 { 6244 int rval = QLA_FUNCTION_FAILED; 6245 srb_t *sp; 6246 struct srb_iocb *c; 6247 6248 if (!vha->hw->flags.fw_started) 6249 goto done; 6250 6251 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 6252 if (!sp) 6253 goto done; 6254 6255 sp->type = SRB_MB_IOCB; 6256 sp->name = mb_to_str(mcp->mb[0]); 6257 6258 c = &sp->u.iocb_cmd; 6259 c->timeout = qla2x00_async_iocb_timeout; 6260 init_completion(&c->u.mbx.comp); 6261 6262 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 6263 6264 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); 6265 6266 sp->done = qla2x00_async_mb_sp_done; 6267 6268 rval = qla2x00_start_sp(sp); 6269 if (rval != QLA_SUCCESS) { 6270 ql_dbg(ql_dbg_mbx, vha, 0x1018, 6271 "%s: %s Failed submission. %x.\n", 6272 __func__, sp->name, rval); 6273 goto done_free_sp; 6274 } 6275 6276 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n", 6277 sp->name, sp->handle); 6278 6279 wait_for_completion(&c->u.mbx.comp); 6280 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); 6281 6282 rval = c->u.mbx.rc; 6283 switch (rval) { 6284 case QLA_FUNCTION_TIMEOUT: 6285 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n", 6286 __func__, sp->name, rval); 6287 break; 6288 case QLA_SUCCESS: 6289 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", 6290 __func__, sp->name); 6291 break; 6292 default: 6293 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", 6294 __func__, sp->name, rval); 6295 break; 6296 } 6297 6298 done_free_sp: 6299 sp->free(sp); 6300 done: 6301 return rval; 6302 } 6303 6304 /* 6305 * qla24xx_gpdb_wait 6306 * NOTE: Do not call this routine from DPC thread 6307 */ 6308 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 6309 { 6310 int rval = QLA_FUNCTION_FAILED; 6311 dma_addr_t pd_dma; 6312 struct port_database_24xx *pd; 6313 struct qla_hw_data *ha = vha->hw; 6314 mbx_cmd_t mc; 6315 6316 if (!vha->hw->flags.fw_started) 6317 goto done; 6318 6319 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 6320 if (pd == NULL) { 6321 ql_log(ql_log_warn, vha, 0xd047, 6322 "Failed to allocate port database structure.\n"); 6323 goto done_free_sp; 6324 } 6325 6326 memset(&mc, 0, sizeof(mc)); 6327 mc.mb[0] = MBC_GET_PORT_DATABASE; 6328 mc.mb[1] = cpu_to_le16(fcport->loop_id); 6329 mc.mb[2] = MSW(pd_dma); 6330 mc.mb[3] = LSW(pd_dma); 6331 mc.mb[6] = MSW(MSD(pd_dma)); 6332 mc.mb[7] = LSW(MSD(pd_dma)); 6333 mc.mb[9] = cpu_to_le16(vha->vp_idx); 6334 mc.mb[10] = cpu_to_le16((uint16_t)opt); 6335 6336 rval = qla24xx_send_mb_cmd(vha, &mc); 6337 if (rval != QLA_SUCCESS) { 6338 ql_dbg(ql_dbg_mbx, vha, 0x1193, 6339 "%s: %8phC fail\n", __func__, fcport->port_name); 6340 goto done_free_sp; 6341 } 6342 6343 rval = __qla24xx_parse_gpdb(vha, fcport, pd); 6344 6345 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", 6346 __func__, fcport->port_name); 6347 6348 done_free_sp: 6349 if (pd) 6350 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 6351 done: 6352 return rval; 6353 } 6354 6355 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, 6356 struct port_database_24xx *pd) 6357 { 6358 int rval = QLA_SUCCESS; 6359 uint64_t zero = 0; 6360 u8 current_login_state, last_login_state; 6361 6362 if (NVME_TARGET(vha->hw, fcport)) { 6363 current_login_state = pd->current_login_state >> 4; 6364 last_login_state = pd->last_login_state >> 4; 6365 } else { 6366 current_login_state = pd->current_login_state & 0xf; 6367 last_login_state = pd->last_login_state & 0xf; 6368 } 6369 6370 /* Check for logged in state. */ 6371 if (current_login_state != PDS_PRLI_COMPLETE) { 6372 ql_dbg(ql_dbg_mbx, vha, 0x119a, 6373 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 6374 current_login_state, last_login_state, fcport->loop_id); 6375 rval = QLA_FUNCTION_FAILED; 6376 goto gpd_error_out; 6377 } 6378 6379 if (fcport->loop_id == FC_NO_LOOP_ID || 6380 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 6381 memcmp(fcport->port_name, pd->port_name, 8))) { 6382 /* We lost the device mid way. */ 6383 rval = QLA_NOT_LOGGED_IN; 6384 goto gpd_error_out; 6385 } 6386 6387 /* Names are little-endian. */ 6388 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 6389 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 6390 6391 /* Get port_id of device. */ 6392 fcport->d_id.b.domain = pd->port_id[0]; 6393 fcport->d_id.b.area = pd->port_id[1]; 6394 fcport->d_id.b.al_pa = pd->port_id[2]; 6395 fcport->d_id.b.rsvd_1 = 0; 6396 6397 if (NVME_TARGET(vha->hw, fcport)) { 6398 fcport->port_type = FCT_NVME; 6399 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) 6400 fcport->port_type |= FCT_NVME_INITIATOR; 6401 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6402 fcport->port_type |= FCT_NVME_TARGET; 6403 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0) 6404 fcport->port_type |= FCT_NVME_DISCOVERY; 6405 } else { 6406 /* If not target must be initiator or unknown type. */ 6407 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6408 fcport->port_type = FCT_INITIATOR; 6409 else 6410 fcport->port_type = FCT_TARGET; 6411 } 6412 /* Passback COS information. */ 6413 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? 6414 FC_COS_CLASS2 : FC_COS_CLASS3; 6415 6416 if (pd->prli_svc_param_word_3[0] & BIT_7) { 6417 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 6418 fcport->conf_compl_supported = 1; 6419 } 6420 6421 gpd_error_out: 6422 return rval; 6423 } 6424 6425 /* 6426 * qla24xx_gidlist__wait 6427 * NOTE: don't call this routine from DPC thread. 6428 */ 6429 int qla24xx_gidlist_wait(struct scsi_qla_host *vha, 6430 void *id_list, dma_addr_t id_list_dma, uint16_t *entries) 6431 { 6432 int rval = QLA_FUNCTION_FAILED; 6433 mbx_cmd_t mc; 6434 6435 if (!vha->hw->flags.fw_started) 6436 goto done; 6437 6438 memset(&mc, 0, sizeof(mc)); 6439 mc.mb[0] = MBC_GET_ID_LIST; 6440 mc.mb[2] = MSW(id_list_dma); 6441 mc.mb[3] = LSW(id_list_dma); 6442 mc.mb[6] = MSW(MSD(id_list_dma)); 6443 mc.mb[7] = LSW(MSD(id_list_dma)); 6444 mc.mb[8] = 0; 6445 mc.mb[9] = cpu_to_le16(vha->vp_idx); 6446 6447 rval = qla24xx_send_mb_cmd(vha, &mc); 6448 if (rval != QLA_SUCCESS) { 6449 ql_dbg(ql_dbg_mbx, vha, 0x119b, 6450 "%s: fail\n", __func__); 6451 } else { 6452 *entries = mc.mb[1]; 6453 ql_dbg(ql_dbg_mbx, vha, 0x119c, 6454 "%s: done\n", __func__); 6455 } 6456 done: 6457 return rval; 6458 } 6459 6460 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) 6461 { 6462 int rval; 6463 mbx_cmd_t mc; 6464 mbx_cmd_t *mcp = &mc; 6465 6466 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200, 6467 "Entered %s\n", __func__); 6468 6469 memset(mcp->mb, 0 , sizeof(mcp->mb)); 6470 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6471 mcp->mb[1] = cpu_to_le16(1); 6472 mcp->mb[2] = cpu_to_le16(value); 6473 mcp->out_mb = MBX_2 | MBX_1 | MBX_0; 6474 mcp->in_mb = MBX_2 | MBX_0; 6475 mcp->tov = MBX_TOV_SECONDS; 6476 mcp->flags = 0; 6477 6478 rval = qla2x00_mailbox_command(vha, mcp); 6479 6480 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n", 6481 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6482 6483 return rval; 6484 } 6485 6486 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) 6487 { 6488 int rval; 6489 mbx_cmd_t mc; 6490 mbx_cmd_t *mcp = &mc; 6491 6492 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203, 6493 "Entered %s\n", __func__); 6494 6495 memset(mcp->mb, 0, sizeof(mcp->mb)); 6496 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6497 mcp->mb[1] = cpu_to_le16(0); 6498 mcp->out_mb = MBX_1 | MBX_0; 6499 mcp->in_mb = MBX_2 | MBX_0; 6500 mcp->tov = MBX_TOV_SECONDS; 6501 mcp->flags = 0; 6502 6503 rval = qla2x00_mailbox_command(vha, mcp); 6504 if (rval == QLA_SUCCESS) 6505 *value = mc.mb[2]; 6506 6507 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n", 6508 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6509 6510 return rval; 6511 } 6512 6513 int 6514 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count) 6515 { 6516 struct qla_hw_data *ha = vha->hw; 6517 uint16_t iter, addr, offset; 6518 dma_addr_t phys_addr; 6519 int rval, c; 6520 u8 *sfp_data; 6521 6522 memset(ha->sfp_data, 0, SFP_DEV_SIZE); 6523 addr = 0xa0; 6524 phys_addr = ha->sfp_data_dma; 6525 sfp_data = ha->sfp_data; 6526 offset = c = 0; 6527 6528 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) { 6529 if (iter == 4) { 6530 /* Skip to next device address. */ 6531 addr = 0xa2; 6532 offset = 0; 6533 } 6534 6535 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data, 6536 addr, offset, SFP_BLOCK_SIZE, BIT_1); 6537 if (rval != QLA_SUCCESS) { 6538 ql_log(ql_log_warn, vha, 0x706d, 6539 "Unable to read SFP data (%x/%x/%x).\n", rval, 6540 addr, offset); 6541 6542 return rval; 6543 } 6544 6545 if (buf && (c < count)) { 6546 u16 sz; 6547 6548 if ((count - c) >= SFP_BLOCK_SIZE) 6549 sz = SFP_BLOCK_SIZE; 6550 else 6551 sz = count - c; 6552 6553 memcpy(buf, sfp_data, sz); 6554 buf += SFP_BLOCK_SIZE; 6555 c += sz; 6556 } 6557 phys_addr += SFP_BLOCK_SIZE; 6558 sfp_data += SFP_BLOCK_SIZE; 6559 offset += SFP_BLOCK_SIZE; 6560 } 6561 6562 return rval; 6563 } 6564 6565 int qla24xx_res_count_wait(struct scsi_qla_host *vha, 6566 uint16_t *out_mb, int out_mb_sz) 6567 { 6568 int rval = QLA_FUNCTION_FAILED; 6569 mbx_cmd_t mc; 6570 6571 if (!vha->hw->flags.fw_started) 6572 goto done; 6573 6574 memset(&mc, 0, sizeof(mc)); 6575 mc.mb[0] = MBC_GET_RESOURCE_COUNTS; 6576 6577 rval = qla24xx_send_mb_cmd(vha, &mc); 6578 if (rval != QLA_SUCCESS) { 6579 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6580 "%s: fail\n", __func__); 6581 } else { 6582 if (out_mb_sz <= SIZEOF_IOCB_MB_REG) 6583 memcpy(out_mb, mc.mb, out_mb_sz); 6584 else 6585 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG); 6586 6587 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6588 "%s: done\n", __func__); 6589 } 6590 done: 6591 return rval; 6592 } 6593 6594 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts, 6595 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr, 6596 uint32_t sfub_len) 6597 { 6598 int rval; 6599 mbx_cmd_t mc; 6600 mbx_cmd_t *mcp = &mc; 6601 6602 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE; 6603 mcp->mb[1] = opts; 6604 mcp->mb[2] = region; 6605 mcp->mb[3] = MSW(len); 6606 mcp->mb[4] = LSW(len); 6607 mcp->mb[5] = MSW(sfub_dma_addr); 6608 mcp->mb[6] = LSW(sfub_dma_addr); 6609 mcp->mb[7] = MSW(MSD(sfub_dma_addr)); 6610 mcp->mb[8] = LSW(MSD(sfub_dma_addr)); 6611 mcp->mb[9] = sfub_len; 6612 mcp->out_mb = 6613 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6614 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6615 mcp->tov = MBX_TOV_SECONDS; 6616 mcp->flags = 0; 6617 rval = qla2x00_mailbox_command(vha, mcp); 6618 6619 if (rval != QLA_SUCCESS) { 6620 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x", 6621 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], 6622 mcp->mb[2]); 6623 } 6624 6625 return rval; 6626 } 6627 6628 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6629 uint32_t data) 6630 { 6631 int rval; 6632 mbx_cmd_t mc; 6633 mbx_cmd_t *mcp = &mc; 6634 6635 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6636 "Entered %s.\n", __func__); 6637 6638 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6639 mcp->mb[1] = LSW(addr); 6640 mcp->mb[2] = MSW(addr); 6641 mcp->mb[3] = LSW(data); 6642 mcp->mb[4] = MSW(data); 6643 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6644 mcp->in_mb = MBX_1|MBX_0; 6645 mcp->tov = MBX_TOV_SECONDS; 6646 mcp->flags = 0; 6647 rval = qla2x00_mailbox_command(vha, mcp); 6648 6649 if (rval != QLA_SUCCESS) { 6650 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6651 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6652 } else { 6653 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6654 "Done %s.\n", __func__); 6655 } 6656 6657 return rval; 6658 } 6659 6660 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6661 uint32_t *data) 6662 { 6663 int rval; 6664 mbx_cmd_t mc; 6665 mbx_cmd_t *mcp = &mc; 6666 6667 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6668 "Entered %s.\n", __func__); 6669 6670 mcp->mb[0] = MBC_READ_REMOTE_REG; 6671 mcp->mb[1] = LSW(addr); 6672 mcp->mb[2] = MSW(addr); 6673 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6674 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6675 mcp->tov = MBX_TOV_SECONDS; 6676 mcp->flags = 0; 6677 rval = qla2x00_mailbox_command(vha, mcp); 6678 6679 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]); 6680 6681 if (rval != QLA_SUCCESS) { 6682 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6683 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6684 } else { 6685 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6686 "Done %s.\n", __func__); 6687 } 6688 6689 return rval; 6690 } 6691