1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/gfp.h> 12 13 static struct mb_cmd_name { 14 uint16_t cmd; 15 const char *str; 16 } mb_str[] = { 17 {MBC_GET_PORT_DATABASE, "GPDB"}, 18 {MBC_GET_ID_LIST, "GIDList"}, 19 {MBC_GET_LINK_PRIV_STATS, "Stats"}, 20 {MBC_GET_RESOURCE_COUNTS, "ResCnt"}, 21 }; 22 23 static const char *mb_to_str(uint16_t cmd) 24 { 25 int i; 26 struct mb_cmd_name *e; 27 28 for (i = 0; i < ARRAY_SIZE(mb_str); i++) { 29 e = mb_str + i; 30 if (cmd == e->cmd) 31 return e->str; 32 } 33 return "unknown"; 34 } 35 36 static struct rom_cmd { 37 uint16_t cmd; 38 } rom_cmds[] = { 39 { MBC_LOAD_RAM }, 40 { MBC_EXECUTE_FIRMWARE }, 41 { MBC_READ_RAM_WORD }, 42 { MBC_MAILBOX_REGISTER_TEST }, 43 { MBC_VERIFY_CHECKSUM }, 44 { MBC_GET_FIRMWARE_VERSION }, 45 { MBC_LOAD_RISC_RAM }, 46 { MBC_DUMP_RISC_RAM }, 47 { MBC_LOAD_RISC_RAM_EXTENDED }, 48 { MBC_DUMP_RISC_RAM_EXTENDED }, 49 { MBC_WRITE_RAM_WORD_EXTENDED }, 50 { MBC_READ_RAM_EXTENDED }, 51 { MBC_GET_RESOURCE_COUNTS }, 52 { MBC_SET_FIRMWARE_OPTION }, 53 { MBC_MID_INITIALIZE_FIRMWARE }, 54 { MBC_GET_FIRMWARE_STATE }, 55 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, 56 { MBC_GET_RETRY_COUNT }, 57 { MBC_TRACE_CONTROL }, 58 { MBC_INITIALIZE_MULTIQ }, 59 { MBC_IOCB_COMMAND_A64 }, 60 { MBC_GET_ADAPTER_LOOP_ID }, 61 { MBC_READ_SFP }, 62 { MBC_GET_RNID_PARAMS }, 63 { MBC_GET_SET_ZIO_THRESHOLD }, 64 }; 65 66 static int is_rom_cmd(uint16_t cmd) 67 { 68 int i; 69 struct rom_cmd *wc; 70 71 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) { 72 wc = rom_cmds + i; 73 if (wc->cmd == cmd) 74 return 1; 75 } 76 77 return 0; 78 } 79 80 /* 81 * qla2x00_mailbox_command 82 * Issue mailbox command and waits for completion. 83 * 84 * Input: 85 * ha = adapter block pointer. 86 * mcp = driver internal mbx struct pointer. 87 * 88 * Output: 89 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 90 * 91 * Returns: 92 * 0 : QLA_SUCCESS = cmd performed success 93 * 1 : QLA_FUNCTION_FAILED (error encountered) 94 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 95 * 96 * Context: 97 * Kernel context. 98 */ 99 static int 100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) 101 { 102 int rval, i; 103 unsigned long flags = 0; 104 device_reg_t *reg; 105 uint8_t abort_active; 106 uint8_t io_lock_on; 107 uint16_t command = 0; 108 uint16_t *iptr; 109 uint16_t __iomem *optr; 110 uint32_t cnt; 111 uint32_t mboxes; 112 unsigned long wait_time; 113 struct qla_hw_data *ha = vha->hw; 114 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 115 u32 chip_reset; 116 117 118 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); 119 120 if (ha->pdev->error_state == pci_channel_io_perm_failure) { 121 ql_log(ql_log_warn, vha, 0x1001, 122 "PCI channel failed permanently, exiting.\n"); 123 return QLA_FUNCTION_TIMEOUT; 124 } 125 126 if (vha->device_flags & DFLG_DEV_FAILED) { 127 ql_log(ql_log_warn, vha, 0x1002, 128 "Device in failed state, exiting.\n"); 129 return QLA_FUNCTION_TIMEOUT; 130 } 131 132 /* if PCI error, then avoid mbx processing.*/ 133 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && 134 test_bit(UNLOADING, &base_vha->dpc_flags)) { 135 ql_log(ql_log_warn, vha, 0xd04e, 136 "PCI error, exiting.\n"); 137 return QLA_FUNCTION_TIMEOUT; 138 } 139 140 reg = ha->iobase; 141 io_lock_on = base_vha->flags.init_done; 142 143 rval = QLA_SUCCESS; 144 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 145 chip_reset = ha->chip_reset; 146 147 if (ha->flags.pci_channel_io_perm_failure) { 148 ql_log(ql_log_warn, vha, 0x1003, 149 "Perm failure on EEH timeout MBX, exiting.\n"); 150 return QLA_FUNCTION_TIMEOUT; 151 } 152 153 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 154 /* Setting Link-Down error */ 155 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 156 ql_log(ql_log_warn, vha, 0x1004, 157 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 158 return QLA_FUNCTION_TIMEOUT; 159 } 160 161 /* check if ISP abort is active and return cmd with timeout */ 162 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 163 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 164 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && 165 !is_rom_cmd(mcp->mb[0])) { 166 ql_log(ql_log_info, vha, 0x1005, 167 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", 168 mcp->mb[0]); 169 return QLA_FUNCTION_TIMEOUT; 170 } 171 172 atomic_inc(&ha->num_pend_mbx_stage1); 173 /* 174 * Wait for active mailbox commands to finish by waiting at most tov 175 * seconds. This is to serialize actual issuing of mailbox cmds during 176 * non ISP abort time. 177 */ 178 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 179 /* Timeout occurred. Return error. */ 180 ql_log(ql_log_warn, vha, 0xd035, 181 "Cmd access timeout, cmd=0x%x, Exiting.\n", 182 mcp->mb[0]); 183 atomic_dec(&ha->num_pend_mbx_stage1); 184 return QLA_FUNCTION_TIMEOUT; 185 } 186 atomic_dec(&ha->num_pend_mbx_stage1); 187 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { 188 rval = QLA_ABORTED; 189 goto premature_exit; 190 } 191 192 193 /* Save mailbox command for debug */ 194 ha->mcp = mcp; 195 196 ql_dbg(ql_dbg_mbx, vha, 0x1006, 197 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 198 199 spin_lock_irqsave(&ha->hardware_lock, flags); 200 201 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || 202 ha->flags.mbox_busy) { 203 rval = QLA_ABORTED; 204 spin_unlock_irqrestore(&ha->hardware_lock, flags); 205 goto premature_exit; 206 } 207 ha->flags.mbox_busy = 1; 208 209 /* Load mailbox registers. */ 210 if (IS_P3P_TYPE(ha)) 211 optr = (uint16_t __iomem *)®->isp82.mailbox_in[0]; 212 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) 213 optr = (uint16_t __iomem *)®->isp24.mailbox0; 214 else 215 optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0); 216 217 iptr = mcp->mb; 218 command = mcp->mb[0]; 219 mboxes = mcp->out_mb; 220 221 ql_dbg(ql_dbg_mbx, vha, 0x1111, 222 "Mailbox registers (OUT):\n"); 223 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 224 if (IS_QLA2200(ha) && cnt == 8) 225 optr = 226 (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8); 227 if (mboxes & BIT_0) { 228 ql_dbg(ql_dbg_mbx, vha, 0x1112, 229 "mbox[%d]<-0x%04x\n", cnt, *iptr); 230 WRT_REG_WORD(optr, *iptr); 231 } 232 233 mboxes >>= 1; 234 optr++; 235 iptr++; 236 } 237 238 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, 239 "I/O Address = %p.\n", optr); 240 241 /* Issue set host interrupt command to send cmd out. */ 242 ha->flags.mbox_int = 0; 243 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 244 245 /* Unlock mbx registers and wait for interrupt */ 246 ql_dbg(ql_dbg_mbx, vha, 0x100f, 247 "Going to unlock irq & waiting for interrupts. " 248 "jiffies=%lx.\n", jiffies); 249 250 /* Wait for mbx cmd completion until timeout */ 251 atomic_inc(&ha->num_pend_mbx_stage2); 252 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 253 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 254 255 if (IS_P3P_TYPE(ha)) 256 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); 257 else if (IS_FWI2_CAPABLE(ha)) 258 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); 259 else 260 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); 261 spin_unlock_irqrestore(&ha->hardware_lock, flags); 262 263 wait_time = jiffies; 264 atomic_inc(&ha->num_pend_mbx_stage3); 265 if (!wait_for_completion_timeout(&ha->mbx_intr_comp, 266 mcp->tov * HZ)) { 267 if (chip_reset != ha->chip_reset) { 268 spin_lock_irqsave(&ha->hardware_lock, flags); 269 ha->flags.mbox_busy = 0; 270 spin_unlock_irqrestore(&ha->hardware_lock, 271 flags); 272 atomic_dec(&ha->num_pend_mbx_stage2); 273 atomic_dec(&ha->num_pend_mbx_stage3); 274 rval = QLA_ABORTED; 275 goto premature_exit; 276 } 277 ql_dbg(ql_dbg_mbx, vha, 0x117a, 278 "cmd=%x Timeout.\n", command); 279 spin_lock_irqsave(&ha->hardware_lock, flags); 280 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 281 spin_unlock_irqrestore(&ha->hardware_lock, flags); 282 283 } else if (ha->flags.purge_mbox || 284 chip_reset != ha->chip_reset) { 285 spin_lock_irqsave(&ha->hardware_lock, flags); 286 ha->flags.mbox_busy = 0; 287 spin_unlock_irqrestore(&ha->hardware_lock, flags); 288 atomic_dec(&ha->num_pend_mbx_stage2); 289 atomic_dec(&ha->num_pend_mbx_stage3); 290 rval = QLA_ABORTED; 291 goto premature_exit; 292 } 293 atomic_dec(&ha->num_pend_mbx_stage3); 294 295 if (time_after(jiffies, wait_time + 5 * HZ)) 296 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", 297 command, jiffies_to_msecs(jiffies - wait_time)); 298 } else { 299 ql_dbg(ql_dbg_mbx, vha, 0x1011, 300 "Cmd=%x Polling Mode.\n", command); 301 302 if (IS_P3P_TYPE(ha)) { 303 if (RD_REG_DWORD(®->isp82.hint) & 304 HINT_MBX_INT_PENDING) { 305 ha->flags.mbox_busy = 0; 306 spin_unlock_irqrestore(&ha->hardware_lock, 307 flags); 308 atomic_dec(&ha->num_pend_mbx_stage2); 309 ql_dbg(ql_dbg_mbx, vha, 0x1012, 310 "Pending mailbox timeout, exiting.\n"); 311 rval = QLA_FUNCTION_TIMEOUT; 312 goto premature_exit; 313 } 314 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); 315 } else if (IS_FWI2_CAPABLE(ha)) 316 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); 317 else 318 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); 319 spin_unlock_irqrestore(&ha->hardware_lock, flags); 320 321 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 322 while (!ha->flags.mbox_int) { 323 if (ha->flags.purge_mbox || 324 chip_reset != ha->chip_reset) { 325 spin_lock_irqsave(&ha->hardware_lock, flags); 326 ha->flags.mbox_busy = 0; 327 spin_unlock_irqrestore(&ha->hardware_lock, 328 flags); 329 atomic_dec(&ha->num_pend_mbx_stage2); 330 rval = QLA_ABORTED; 331 goto premature_exit; 332 } 333 334 if (time_after(jiffies, wait_time)) 335 break; 336 337 /* 338 * Check if it's UNLOADING, cause we cannot poll in 339 * this case, or else a NULL pointer dereference 340 * is triggered. 341 */ 342 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) 343 return QLA_FUNCTION_TIMEOUT; 344 345 /* Check for pending interrupts. */ 346 qla2x00_poll(ha->rsp_q_map[0]); 347 348 if (!ha->flags.mbox_int && 349 !(IS_QLA2200(ha) && 350 command == MBC_LOAD_RISC_RAM_EXTENDED)) 351 msleep(10); 352 } /* while */ 353 ql_dbg(ql_dbg_mbx, vha, 0x1013, 354 "Waited %d sec.\n", 355 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 356 } 357 atomic_dec(&ha->num_pend_mbx_stage2); 358 359 /* Check whether we timed out */ 360 if (ha->flags.mbox_int) { 361 uint16_t *iptr2; 362 363 ql_dbg(ql_dbg_mbx, vha, 0x1014, 364 "Cmd=%x completed.\n", command); 365 366 /* Got interrupt. Clear the flag. */ 367 ha->flags.mbox_int = 0; 368 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 369 370 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 371 spin_lock_irqsave(&ha->hardware_lock, flags); 372 ha->flags.mbox_busy = 0; 373 spin_unlock_irqrestore(&ha->hardware_lock, flags); 374 375 /* Setting Link-Down error */ 376 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 377 ha->mcp = NULL; 378 rval = QLA_FUNCTION_FAILED; 379 ql_log(ql_log_warn, vha, 0xd048, 380 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 381 goto premature_exit; 382 } 383 384 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) { 385 ql_dbg(ql_dbg_mbx, vha, 0x11ff, 386 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0], 387 MBS_COMMAND_COMPLETE); 388 rval = QLA_FUNCTION_FAILED; 389 } 390 391 /* Load return mailbox registers. */ 392 iptr2 = mcp->mb; 393 iptr = (uint16_t *)&ha->mailbox_out[0]; 394 mboxes = mcp->in_mb; 395 396 ql_dbg(ql_dbg_mbx, vha, 0x1113, 397 "Mailbox registers (IN):\n"); 398 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 399 if (mboxes & BIT_0) { 400 *iptr2 = *iptr; 401 ql_dbg(ql_dbg_mbx, vha, 0x1114, 402 "mbox[%d]->0x%04x\n", cnt, *iptr2); 403 } 404 405 mboxes >>= 1; 406 iptr2++; 407 iptr++; 408 } 409 } else { 410 411 uint16_t mb[8]; 412 uint32_t ictrl, host_status, hccr; 413 uint16_t w; 414 415 if (IS_FWI2_CAPABLE(ha)) { 416 mb[0] = RD_REG_WORD(®->isp24.mailbox0); 417 mb[1] = RD_REG_WORD(®->isp24.mailbox1); 418 mb[2] = RD_REG_WORD(®->isp24.mailbox2); 419 mb[3] = RD_REG_WORD(®->isp24.mailbox3); 420 mb[7] = RD_REG_WORD(®->isp24.mailbox7); 421 ictrl = RD_REG_DWORD(®->isp24.ictrl); 422 host_status = RD_REG_DWORD(®->isp24.host_status); 423 hccr = RD_REG_DWORD(®->isp24.hccr); 424 425 ql_log(ql_log_warn, vha, 0xd04c, 426 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 427 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", 428 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], 429 mb[7], host_status, hccr); 430 431 } else { 432 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); 433 ictrl = RD_REG_WORD(®->isp.ictrl); 434 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, 435 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 436 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); 437 } 438 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 439 440 /* Capture FW dump only, if PCI device active */ 441 if (!pci_channel_offline(vha->hw->pdev)) { 442 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 443 if (w == 0xffff || ictrl == 0xffffffff || 444 (chip_reset != ha->chip_reset)) { 445 /* This is special case if there is unload 446 * of driver happening and if PCI device go 447 * into bad state due to PCI error condition 448 * then only PCI ERR flag would be set. 449 * we will do premature exit for above case. 450 */ 451 spin_lock_irqsave(&ha->hardware_lock, flags); 452 ha->flags.mbox_busy = 0; 453 spin_unlock_irqrestore(&ha->hardware_lock, 454 flags); 455 rval = QLA_FUNCTION_TIMEOUT; 456 goto premature_exit; 457 } 458 459 /* Attempt to capture firmware dump for further 460 * anallysis of the current formware state. we do not 461 * need to do this if we are intentionally generating 462 * a dump 463 */ 464 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) 465 ha->isp_ops->fw_dump(vha, 0); 466 rval = QLA_FUNCTION_TIMEOUT; 467 } 468 } 469 spin_lock_irqsave(&ha->hardware_lock, flags); 470 ha->flags.mbox_busy = 0; 471 spin_unlock_irqrestore(&ha->hardware_lock, flags); 472 473 /* Clean up */ 474 ha->mcp = NULL; 475 476 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 477 ql_dbg(ql_dbg_mbx, vha, 0x101a, 478 "Checking for additional resp interrupt.\n"); 479 480 /* polling mode for non isp_abort commands. */ 481 qla2x00_poll(ha->rsp_q_map[0]); 482 } 483 484 if (rval == QLA_FUNCTION_TIMEOUT && 485 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 486 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 487 ha->flags.eeh_busy) { 488 /* not in dpc. schedule it for dpc to take over. */ 489 ql_dbg(ql_dbg_mbx, vha, 0x101b, 490 "Timeout, schedule isp_abort_needed.\n"); 491 492 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 493 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 494 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 495 if (IS_QLA82XX(ha)) { 496 ql_dbg(ql_dbg_mbx, vha, 0x112a, 497 "disabling pause transmit on port " 498 "0 & 1.\n"); 499 qla82xx_wr_32(ha, 500 QLA82XX_CRB_NIU + 0x98, 501 CRB_NIU_XG_PAUSE_CTL_P0| 502 CRB_NIU_XG_PAUSE_CTL_P1); 503 } 504 ql_log(ql_log_info, base_vha, 0x101c, 505 "Mailbox cmd timeout occurred, cmd=0x%x, " 506 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 507 "abort.\n", command, mcp->mb[0], 508 ha->flags.eeh_busy); 509 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 510 qla2xxx_wake_dpc(vha); 511 } 512 } else if (current == ha->dpc_thread) { 513 /* call abort directly since we are in the DPC thread */ 514 ql_dbg(ql_dbg_mbx, vha, 0x101d, 515 "Timeout, calling abort_isp.\n"); 516 517 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 518 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 519 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 520 if (IS_QLA82XX(ha)) { 521 ql_dbg(ql_dbg_mbx, vha, 0x112b, 522 "disabling pause transmit on port " 523 "0 & 1.\n"); 524 qla82xx_wr_32(ha, 525 QLA82XX_CRB_NIU + 0x98, 526 CRB_NIU_XG_PAUSE_CTL_P0| 527 CRB_NIU_XG_PAUSE_CTL_P1); 528 } 529 ql_log(ql_log_info, base_vha, 0x101e, 530 "Mailbox cmd timeout occurred, cmd=0x%x, " 531 "mb[0]=0x%x. Scheduling ISP abort ", 532 command, mcp->mb[0]); 533 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 534 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 535 /* Allow next mbx cmd to come in. */ 536 complete(&ha->mbx_cmd_comp); 537 if (ha->isp_ops->abort_isp(vha)) { 538 /* Failed. retry later. */ 539 set_bit(ISP_ABORT_NEEDED, 540 &vha->dpc_flags); 541 } 542 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 543 ql_dbg(ql_dbg_mbx, vha, 0x101f, 544 "Finished abort_isp.\n"); 545 goto mbx_done; 546 } 547 } 548 } 549 550 premature_exit: 551 /* Allow next mbx cmd to come in. */ 552 complete(&ha->mbx_cmd_comp); 553 554 mbx_done: 555 if (rval == QLA_ABORTED) { 556 ql_log(ql_log_info, vha, 0xd035, 557 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n", 558 mcp->mb[0]); 559 } else if (rval) { 560 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { 561 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR, 562 dev_name(&ha->pdev->dev), 0x1020+0x800, 563 vha->host_no, rval); 564 mboxes = mcp->in_mb; 565 cnt = 4; 566 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1) 567 if (mboxes & BIT_0) { 568 printk(" mb[%u]=%x", i, mcp->mb[i]); 569 cnt--; 570 } 571 pr_warn(" cmd=%x ****\n", command); 572 } 573 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { 574 ql_dbg(ql_dbg_mbx, vha, 0x1198, 575 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", 576 RD_REG_DWORD(®->isp24.host_status), 577 RD_REG_DWORD(®->isp24.ictrl), 578 RD_REG_DWORD(®->isp24.istatus)); 579 } else { 580 ql_dbg(ql_dbg_mbx, vha, 0x1206, 581 "ctrl_status=%#x ictrl=%#x istatus=%#x\n", 582 RD_REG_WORD(®->isp.ctrl_status), 583 RD_REG_WORD(®->isp.ictrl), 584 RD_REG_WORD(®->isp.istatus)); 585 } 586 } else { 587 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); 588 } 589 590 return rval; 591 } 592 593 int 594 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, 595 uint32_t risc_code_size) 596 { 597 int rval; 598 struct qla_hw_data *ha = vha->hw; 599 mbx_cmd_t mc; 600 mbx_cmd_t *mcp = &mc; 601 602 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, 603 "Entered %s.\n", __func__); 604 605 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 606 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 607 mcp->mb[8] = MSW(risc_addr); 608 mcp->out_mb = MBX_8|MBX_0; 609 } else { 610 mcp->mb[0] = MBC_LOAD_RISC_RAM; 611 mcp->out_mb = MBX_0; 612 } 613 mcp->mb[1] = LSW(risc_addr); 614 mcp->mb[2] = MSW(req_dma); 615 mcp->mb[3] = LSW(req_dma); 616 mcp->mb[6] = MSW(MSD(req_dma)); 617 mcp->mb[7] = LSW(MSD(req_dma)); 618 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 619 if (IS_FWI2_CAPABLE(ha)) { 620 mcp->mb[4] = MSW(risc_code_size); 621 mcp->mb[5] = LSW(risc_code_size); 622 mcp->out_mb |= MBX_5|MBX_4; 623 } else { 624 mcp->mb[4] = LSW(risc_code_size); 625 mcp->out_mb |= MBX_4; 626 } 627 628 mcp->in_mb = MBX_1|MBX_0; 629 mcp->tov = MBX_TOV_SECONDS; 630 mcp->flags = 0; 631 rval = qla2x00_mailbox_command(vha, mcp); 632 633 if (rval != QLA_SUCCESS) { 634 ql_dbg(ql_dbg_mbx, vha, 0x1023, 635 "Failed=%x mb[0]=%x mb[1]=%x.\n", 636 rval, mcp->mb[0], mcp->mb[1]); 637 } else { 638 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, 639 "Done %s.\n", __func__); 640 } 641 642 return rval; 643 } 644 645 #define NVME_ENABLE_FLAG BIT_3 646 647 /* 648 * qla2x00_execute_fw 649 * Start adapter firmware. 650 * 651 * Input: 652 * ha = adapter block pointer. 653 * TARGET_QUEUE_LOCK must be released. 654 * ADAPTER_STATE_LOCK must be released. 655 * 656 * Returns: 657 * qla2x00 local function return status code. 658 * 659 * Context: 660 * Kernel context. 661 */ 662 int 663 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) 664 { 665 int rval; 666 struct qla_hw_data *ha = vha->hw; 667 mbx_cmd_t mc; 668 mbx_cmd_t *mcp = &mc; 669 u8 semaphore = 0; 670 #define EXE_FW_FORCE_SEMAPHORE BIT_7 671 u8 retry = 3; 672 673 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, 674 "Entered %s.\n", __func__); 675 676 again: 677 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 678 mcp->out_mb = MBX_0; 679 mcp->in_mb = MBX_0; 680 if (IS_FWI2_CAPABLE(ha)) { 681 mcp->mb[1] = MSW(risc_addr); 682 mcp->mb[2] = LSW(risc_addr); 683 mcp->mb[3] = 0; 684 mcp->mb[4] = 0; 685 mcp->mb[11] = 0; 686 687 /* Enable BPM? */ 688 if (ha->flags.lr_detected) { 689 mcp->mb[4] = BIT_0; 690 if (IS_BPM_RANGE_CAPABLE(ha)) 691 mcp->mb[4] |= 692 ha->lr_distance << LR_DIST_FW_POS; 693 } 694 695 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha))) 696 mcp->mb[4] |= NVME_ENABLE_FLAG; 697 698 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 699 struct nvram_81xx *nv = ha->nvram; 700 /* set minimum speed if specified in nvram */ 701 if (nv->min_supported_speed >= 2 && 702 nv->min_supported_speed <= 5) { 703 mcp->mb[4] |= BIT_4; 704 mcp->mb[11] |= nv->min_supported_speed & 0xF; 705 mcp->out_mb |= MBX_11; 706 mcp->in_mb |= BIT_5; 707 vha->min_supported_speed = 708 nv->min_supported_speed; 709 } 710 } 711 712 if (ha->flags.exlogins_enabled) 713 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; 714 715 if (ha->flags.exchoffld_enabled) 716 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; 717 718 if (semaphore) 719 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE; 720 721 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; 722 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1; 723 } else { 724 mcp->mb[1] = LSW(risc_addr); 725 mcp->out_mb |= MBX_1; 726 if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 727 mcp->mb[2] = 0; 728 mcp->out_mb |= MBX_2; 729 } 730 } 731 732 mcp->tov = MBX_TOV_SECONDS; 733 mcp->flags = 0; 734 rval = qla2x00_mailbox_command(vha, mcp); 735 736 if (rval != QLA_SUCCESS) { 737 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR && 738 mcp->mb[1] == 0x27 && retry) { 739 semaphore = 1; 740 retry--; 741 ql_dbg(ql_dbg_async, vha, 0x1026, 742 "Exe FW: force semaphore.\n"); 743 goto again; 744 } 745 746 ql_dbg(ql_dbg_mbx, vha, 0x1026, 747 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 748 return rval; 749 } 750 751 if (!IS_FWI2_CAPABLE(ha)) 752 goto done; 753 754 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; 755 ql_dbg(ql_dbg_mbx, vha, 0x119a, 756 "fw_ability_mask=%x.\n", ha->fw_ability_mask); 757 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]); 758 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 759 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1); 760 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n", 761 ha->max_supported_speed == 0 ? "16Gps" : 762 ha->max_supported_speed == 1 ? "32Gps" : 763 ha->max_supported_speed == 2 ? "64Gps" : "unknown"); 764 if (vha->min_supported_speed) { 765 ha->min_supported_speed = mcp->mb[5] & 766 (BIT_0 | BIT_1 | BIT_2); 767 ql_dbg(ql_dbg_mbx, vha, 0x119c, 768 "min_supported_speed=%s.\n", 769 ha->min_supported_speed == 6 ? "64Gps" : 770 ha->min_supported_speed == 5 ? "32Gps" : 771 ha->min_supported_speed == 4 ? "16Gps" : 772 ha->min_supported_speed == 3 ? "8Gps" : 773 ha->min_supported_speed == 2 ? "4Gps" : "unknown"); 774 } 775 } 776 777 done: 778 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, 779 "Done %s.\n", __func__); 780 781 return rval; 782 } 783 784 /* 785 * qla_get_exlogin_status 786 * Get extended login status 787 * uses the memory offload control/status Mailbox 788 * 789 * Input: 790 * ha: adapter state pointer. 791 * fwopt: firmware options 792 * 793 * Returns: 794 * qla2x00 local function status 795 * 796 * Context: 797 * Kernel context. 798 */ 799 #define FETCH_XLOGINS_STAT 0x8 800 int 801 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 802 uint16_t *ex_logins_cnt) 803 { 804 int rval; 805 mbx_cmd_t mc; 806 mbx_cmd_t *mcp = &mc; 807 808 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f, 809 "Entered %s\n", __func__); 810 811 memset(mcp->mb, 0 , sizeof(mcp->mb)); 812 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 813 mcp->mb[1] = FETCH_XLOGINS_STAT; 814 mcp->out_mb = MBX_1|MBX_0; 815 mcp->in_mb = MBX_10|MBX_4|MBX_0; 816 mcp->tov = MBX_TOV_SECONDS; 817 mcp->flags = 0; 818 819 rval = qla2x00_mailbox_command(vha, mcp); 820 if (rval != QLA_SUCCESS) { 821 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval); 822 } else { 823 *buf_sz = mcp->mb[4]; 824 *ex_logins_cnt = mcp->mb[10]; 825 826 ql_log(ql_log_info, vha, 0x1190, 827 "buffer size 0x%x, exchange login count=%d\n", 828 mcp->mb[4], mcp->mb[10]); 829 830 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116, 831 "Done %s.\n", __func__); 832 } 833 834 return rval; 835 } 836 837 /* 838 * qla_set_exlogin_mem_cfg 839 * set extended login memory configuration 840 * Mbx needs to be issues before init_cb is set 841 * 842 * Input: 843 * ha: adapter state pointer. 844 * buffer: buffer pointer 845 * phys_addr: physical address of buffer 846 * size: size of buffer 847 * TARGET_QUEUE_LOCK must be released 848 * ADAPTER_STATE_LOCK must be release 849 * 850 * Returns: 851 * qla2x00 local funxtion status code. 852 * 853 * Context: 854 * Kernel context. 855 */ 856 #define CONFIG_XLOGINS_MEM 0x3 857 int 858 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) 859 { 860 int rval; 861 mbx_cmd_t mc; 862 mbx_cmd_t *mcp = &mc; 863 struct qla_hw_data *ha = vha->hw; 864 865 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, 866 "Entered %s.\n", __func__); 867 868 memset(mcp->mb, 0 , sizeof(mcp->mb)); 869 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 870 mcp->mb[1] = CONFIG_XLOGINS_MEM; 871 mcp->mb[2] = MSW(phys_addr); 872 mcp->mb[3] = LSW(phys_addr); 873 mcp->mb[6] = MSW(MSD(phys_addr)); 874 mcp->mb[7] = LSW(MSD(phys_addr)); 875 mcp->mb[8] = MSW(ha->exlogin_size); 876 mcp->mb[9] = LSW(ha->exlogin_size); 877 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 878 mcp->in_mb = MBX_11|MBX_0; 879 mcp->tov = MBX_TOV_SECONDS; 880 mcp->flags = 0; 881 rval = qla2x00_mailbox_command(vha, mcp); 882 if (rval != QLA_SUCCESS) { 883 /*EMPTY*/ 884 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval); 885 } else { 886 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 887 "Done %s.\n", __func__); 888 } 889 890 return rval; 891 } 892 893 /* 894 * qla_get_exchoffld_status 895 * Get exchange offload status 896 * uses the memory offload control/status Mailbox 897 * 898 * Input: 899 * ha: adapter state pointer. 900 * fwopt: firmware options 901 * 902 * Returns: 903 * qla2x00 local function status 904 * 905 * Context: 906 * Kernel context. 907 */ 908 #define FETCH_XCHOFFLD_STAT 0x2 909 int 910 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 911 uint16_t *ex_logins_cnt) 912 { 913 int rval; 914 mbx_cmd_t mc; 915 mbx_cmd_t *mcp = &mc; 916 917 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019, 918 "Entered %s\n", __func__); 919 920 memset(mcp->mb, 0 , sizeof(mcp->mb)); 921 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 922 mcp->mb[1] = FETCH_XCHOFFLD_STAT; 923 mcp->out_mb = MBX_1|MBX_0; 924 mcp->in_mb = MBX_10|MBX_4|MBX_0; 925 mcp->tov = MBX_TOV_SECONDS; 926 mcp->flags = 0; 927 928 rval = qla2x00_mailbox_command(vha, mcp); 929 if (rval != QLA_SUCCESS) { 930 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval); 931 } else { 932 *buf_sz = mcp->mb[4]; 933 *ex_logins_cnt = mcp->mb[10]; 934 935 ql_log(ql_log_info, vha, 0x118e, 936 "buffer size 0x%x, exchange offload count=%d\n", 937 mcp->mb[4], mcp->mb[10]); 938 939 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156, 940 "Done %s.\n", __func__); 941 } 942 943 return rval; 944 } 945 946 /* 947 * qla_set_exchoffld_mem_cfg 948 * Set exchange offload memory configuration 949 * Mbx needs to be issues before init_cb is set 950 * 951 * Input: 952 * ha: adapter state pointer. 953 * buffer: buffer pointer 954 * phys_addr: physical address of buffer 955 * size: size of buffer 956 * TARGET_QUEUE_LOCK must be released 957 * ADAPTER_STATE_LOCK must be release 958 * 959 * Returns: 960 * qla2x00 local funxtion status code. 961 * 962 * Context: 963 * Kernel context. 964 */ 965 #define CONFIG_XCHOFFLD_MEM 0x3 966 int 967 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha) 968 { 969 int rval; 970 mbx_cmd_t mc; 971 mbx_cmd_t *mcp = &mc; 972 struct qla_hw_data *ha = vha->hw; 973 974 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157, 975 "Entered %s.\n", __func__); 976 977 memset(mcp->mb, 0 , sizeof(mcp->mb)); 978 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 979 mcp->mb[1] = CONFIG_XCHOFFLD_MEM; 980 mcp->mb[2] = MSW(ha->exchoffld_buf_dma); 981 mcp->mb[3] = LSW(ha->exchoffld_buf_dma); 982 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma)); 983 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma)); 984 mcp->mb[8] = MSW(ha->exchoffld_size); 985 mcp->mb[9] = LSW(ha->exchoffld_size); 986 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 987 mcp->in_mb = MBX_11|MBX_0; 988 mcp->tov = MBX_TOV_SECONDS; 989 mcp->flags = 0; 990 rval = qla2x00_mailbox_command(vha, mcp); 991 if (rval != QLA_SUCCESS) { 992 /*EMPTY*/ 993 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval); 994 } else { 995 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, 996 "Done %s.\n", __func__); 997 } 998 999 return rval; 1000 } 1001 1002 /* 1003 * qla2x00_get_fw_version 1004 * Get firmware version. 1005 * 1006 * Input: 1007 * ha: adapter state pointer. 1008 * major: pointer for major number. 1009 * minor: pointer for minor number. 1010 * subminor: pointer for subminor number. 1011 * 1012 * Returns: 1013 * qla2x00 local function return status code. 1014 * 1015 * Context: 1016 * Kernel context. 1017 */ 1018 int 1019 qla2x00_get_fw_version(scsi_qla_host_t *vha) 1020 { 1021 int rval; 1022 mbx_cmd_t mc; 1023 mbx_cmd_t *mcp = &mc; 1024 struct qla_hw_data *ha = vha->hw; 1025 1026 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, 1027 "Entered %s.\n", __func__); 1028 1029 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 1030 mcp->out_mb = MBX_0; 1031 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1032 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) 1033 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 1034 if (IS_FWI2_CAPABLE(ha)) 1035 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 1036 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1037 mcp->in_mb |= 1038 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| 1039 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7; 1040 1041 mcp->flags = 0; 1042 mcp->tov = MBX_TOV_SECONDS; 1043 rval = qla2x00_mailbox_command(vha, mcp); 1044 if (rval != QLA_SUCCESS) 1045 goto failed; 1046 1047 /* Return mailbox data. */ 1048 ha->fw_major_version = mcp->mb[1]; 1049 ha->fw_minor_version = mcp->mb[2]; 1050 ha->fw_subminor_version = mcp->mb[3]; 1051 ha->fw_attributes = mcp->mb[6]; 1052 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) 1053 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ 1054 else 1055 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; 1056 1057 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1058 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1059 ha->mpi_version[1] = mcp->mb[11] >> 8; 1060 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1061 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; 1062 ha->phy_version[0] = mcp->mb[8] & 0xff; 1063 ha->phy_version[1] = mcp->mb[9] >> 8; 1064 ha->phy_version[2] = mcp->mb[9] & 0xff; 1065 } 1066 1067 if (IS_FWI2_CAPABLE(ha)) { 1068 ha->fw_attributes_h = mcp->mb[15]; 1069 ha->fw_attributes_ext[0] = mcp->mb[16]; 1070 ha->fw_attributes_ext[1] = mcp->mb[17]; 1071 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, 1072 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 1073 __func__, mcp->mb[15], mcp->mb[6]); 1074 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, 1075 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 1076 __func__, mcp->mb[17], mcp->mb[16]); 1077 1078 if (ha->fw_attributes_h & 0x4) 1079 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d, 1080 "%s: Firmware supports Extended Login 0x%x\n", 1081 __func__, ha->fw_attributes_h); 1082 1083 if (ha->fw_attributes_h & 0x8) 1084 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, 1085 "%s: Firmware supports Exchange Offload 0x%x\n", 1086 __func__, ha->fw_attributes_h); 1087 1088 /* 1089 * FW supports nvme and driver load parameter requested nvme. 1090 * BIT 26 of fw_attributes indicates NVMe support. 1091 */ 1092 if ((ha->fw_attributes_h & 1093 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) && 1094 ql2xnvmeenable) { 1095 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST) 1096 vha->flags.nvme_first_burst = 1; 1097 1098 vha->flags.nvme_enabled = 1; 1099 ql_log(ql_log_info, vha, 0xd302, 1100 "%s: FC-NVMe is Enabled (0x%x)\n", 1101 __func__, ha->fw_attributes_h); 1102 } 1103 } 1104 1105 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1106 ha->serdes_version[0] = mcp->mb[7] & 0xff; 1107 ha->serdes_version[1] = mcp->mb[8] >> 8; 1108 ha->serdes_version[2] = mcp->mb[8] & 0xff; 1109 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1110 ha->mpi_version[1] = mcp->mb[11] >> 8; 1111 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1112 ha->pep_version[0] = mcp->mb[13] & 0xff; 1113 ha->pep_version[1] = mcp->mb[14] >> 8; 1114 ha->pep_version[2] = mcp->mb[14] & 0xff; 1115 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; 1116 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; 1117 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; 1118 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; 1119 if (IS_QLA28XX(ha)) { 1120 if (mcp->mb[16] & BIT_10) 1121 ha->flags.secure_fw = 1; 1122 1123 ql_log(ql_log_info, vha, 0xffff, 1124 "Secure Flash Update in FW: %s\n", 1125 (ha->flags.secure_fw) ? "Supported" : 1126 "Not Supported"); 1127 } 1128 } 1129 1130 failed: 1131 if (rval != QLA_SUCCESS) { 1132 /*EMPTY*/ 1133 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); 1134 } else { 1135 /*EMPTY*/ 1136 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, 1137 "Done %s.\n", __func__); 1138 } 1139 return rval; 1140 } 1141 1142 /* 1143 * qla2x00_get_fw_options 1144 * Set firmware options. 1145 * 1146 * Input: 1147 * ha = adapter block pointer. 1148 * fwopt = pointer for firmware options. 1149 * 1150 * Returns: 1151 * qla2x00 local function return status code. 1152 * 1153 * Context: 1154 * Kernel context. 1155 */ 1156 int 1157 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1158 { 1159 int rval; 1160 mbx_cmd_t mc; 1161 mbx_cmd_t *mcp = &mc; 1162 1163 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, 1164 "Entered %s.\n", __func__); 1165 1166 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 1167 mcp->out_mb = MBX_0; 1168 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1169 mcp->tov = MBX_TOV_SECONDS; 1170 mcp->flags = 0; 1171 rval = qla2x00_mailbox_command(vha, mcp); 1172 1173 if (rval != QLA_SUCCESS) { 1174 /*EMPTY*/ 1175 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); 1176 } else { 1177 fwopts[0] = mcp->mb[0]; 1178 fwopts[1] = mcp->mb[1]; 1179 fwopts[2] = mcp->mb[2]; 1180 fwopts[3] = mcp->mb[3]; 1181 1182 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, 1183 "Done %s.\n", __func__); 1184 } 1185 1186 return rval; 1187 } 1188 1189 1190 /* 1191 * qla2x00_set_fw_options 1192 * Set firmware options. 1193 * 1194 * Input: 1195 * ha = adapter block pointer. 1196 * fwopt = pointer for firmware options. 1197 * 1198 * Returns: 1199 * qla2x00 local function return status code. 1200 * 1201 * Context: 1202 * Kernel context. 1203 */ 1204 int 1205 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1206 { 1207 int rval; 1208 mbx_cmd_t mc; 1209 mbx_cmd_t *mcp = &mc; 1210 1211 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, 1212 "Entered %s.\n", __func__); 1213 1214 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 1215 mcp->mb[1] = fwopts[1]; 1216 mcp->mb[2] = fwopts[2]; 1217 mcp->mb[3] = fwopts[3]; 1218 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1219 mcp->in_mb = MBX_0; 1220 if (IS_FWI2_CAPABLE(vha->hw)) { 1221 mcp->in_mb |= MBX_1; 1222 mcp->mb[10] = fwopts[10]; 1223 mcp->out_mb |= MBX_10; 1224 } else { 1225 mcp->mb[10] = fwopts[10]; 1226 mcp->mb[11] = fwopts[11]; 1227 mcp->mb[12] = 0; /* Undocumented, but used */ 1228 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 1229 } 1230 mcp->tov = MBX_TOV_SECONDS; 1231 mcp->flags = 0; 1232 rval = qla2x00_mailbox_command(vha, mcp); 1233 1234 fwopts[0] = mcp->mb[0]; 1235 1236 if (rval != QLA_SUCCESS) { 1237 /*EMPTY*/ 1238 ql_dbg(ql_dbg_mbx, vha, 0x1030, 1239 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); 1240 } else { 1241 /*EMPTY*/ 1242 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, 1243 "Done %s.\n", __func__); 1244 } 1245 1246 return rval; 1247 } 1248 1249 /* 1250 * qla2x00_mbx_reg_test 1251 * Mailbox register wrap test. 1252 * 1253 * Input: 1254 * ha = adapter block pointer. 1255 * TARGET_QUEUE_LOCK must be released. 1256 * ADAPTER_STATE_LOCK must be released. 1257 * 1258 * Returns: 1259 * qla2x00 local function return status code. 1260 * 1261 * Context: 1262 * Kernel context. 1263 */ 1264 int 1265 qla2x00_mbx_reg_test(scsi_qla_host_t *vha) 1266 { 1267 int rval; 1268 mbx_cmd_t mc; 1269 mbx_cmd_t *mcp = &mc; 1270 1271 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, 1272 "Entered %s.\n", __func__); 1273 1274 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 1275 mcp->mb[1] = 0xAAAA; 1276 mcp->mb[2] = 0x5555; 1277 mcp->mb[3] = 0xAA55; 1278 mcp->mb[4] = 0x55AA; 1279 mcp->mb[5] = 0xA5A5; 1280 mcp->mb[6] = 0x5A5A; 1281 mcp->mb[7] = 0x2525; 1282 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1283 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1284 mcp->tov = MBX_TOV_SECONDS; 1285 mcp->flags = 0; 1286 rval = qla2x00_mailbox_command(vha, mcp); 1287 1288 if (rval == QLA_SUCCESS) { 1289 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || 1290 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) 1291 rval = QLA_FUNCTION_FAILED; 1292 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || 1293 mcp->mb[7] != 0x2525) 1294 rval = QLA_FUNCTION_FAILED; 1295 } 1296 1297 if (rval != QLA_SUCCESS) { 1298 /*EMPTY*/ 1299 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); 1300 } else { 1301 /*EMPTY*/ 1302 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, 1303 "Done %s.\n", __func__); 1304 } 1305 1306 return rval; 1307 } 1308 1309 /* 1310 * qla2x00_verify_checksum 1311 * Verify firmware checksum. 1312 * 1313 * Input: 1314 * ha = adapter block pointer. 1315 * TARGET_QUEUE_LOCK must be released. 1316 * ADAPTER_STATE_LOCK must be released. 1317 * 1318 * Returns: 1319 * qla2x00 local function return status code. 1320 * 1321 * Context: 1322 * Kernel context. 1323 */ 1324 int 1325 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) 1326 { 1327 int rval; 1328 mbx_cmd_t mc; 1329 mbx_cmd_t *mcp = &mc; 1330 1331 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, 1332 "Entered %s.\n", __func__); 1333 1334 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 1335 mcp->out_mb = MBX_0; 1336 mcp->in_mb = MBX_0; 1337 if (IS_FWI2_CAPABLE(vha->hw)) { 1338 mcp->mb[1] = MSW(risc_addr); 1339 mcp->mb[2] = LSW(risc_addr); 1340 mcp->out_mb |= MBX_2|MBX_1; 1341 mcp->in_mb |= MBX_2|MBX_1; 1342 } else { 1343 mcp->mb[1] = LSW(risc_addr); 1344 mcp->out_mb |= MBX_1; 1345 mcp->in_mb |= MBX_1; 1346 } 1347 1348 mcp->tov = MBX_TOV_SECONDS; 1349 mcp->flags = 0; 1350 rval = qla2x00_mailbox_command(vha, mcp); 1351 1352 if (rval != QLA_SUCCESS) { 1353 ql_dbg(ql_dbg_mbx, vha, 0x1036, 1354 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? 1355 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); 1356 } else { 1357 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, 1358 "Done %s.\n", __func__); 1359 } 1360 1361 return rval; 1362 } 1363 1364 /* 1365 * qla2x00_issue_iocb 1366 * Issue IOCB using mailbox command 1367 * 1368 * Input: 1369 * ha = adapter state pointer. 1370 * buffer = buffer pointer. 1371 * phys_addr = physical address of buffer. 1372 * size = size of buffer. 1373 * TARGET_QUEUE_LOCK must be released. 1374 * ADAPTER_STATE_LOCK must be released. 1375 * 1376 * Returns: 1377 * qla2x00 local function return status code. 1378 * 1379 * Context: 1380 * Kernel context. 1381 */ 1382 int 1383 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, 1384 dma_addr_t phys_addr, size_t size, uint32_t tov) 1385 { 1386 int rval; 1387 mbx_cmd_t mc; 1388 mbx_cmd_t *mcp = &mc; 1389 1390 if (!vha->hw->flags.fw_started) 1391 return QLA_INVALID_COMMAND; 1392 1393 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, 1394 "Entered %s.\n", __func__); 1395 1396 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 1397 mcp->mb[1] = 0; 1398 mcp->mb[2] = MSW(LSD(phys_addr)); 1399 mcp->mb[3] = LSW(LSD(phys_addr)); 1400 mcp->mb[6] = MSW(MSD(phys_addr)); 1401 mcp->mb[7] = LSW(MSD(phys_addr)); 1402 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1403 mcp->in_mb = MBX_1|MBX_0; 1404 mcp->tov = tov; 1405 mcp->flags = 0; 1406 rval = qla2x00_mailbox_command(vha, mcp); 1407 1408 if (rval != QLA_SUCCESS) { 1409 /*EMPTY*/ 1410 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); 1411 } else { 1412 sts_entry_t *sts_entry = buffer; 1413 1414 /* Mask reserved bits. */ 1415 sts_entry->entry_status &= 1416 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 1417 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, 1418 "Done %s (status=%x).\n", __func__, 1419 sts_entry->entry_status); 1420 } 1421 1422 return rval; 1423 } 1424 1425 int 1426 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, 1427 size_t size) 1428 { 1429 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, 1430 MBX_TOV_SECONDS); 1431 } 1432 1433 /* 1434 * qla2x00_abort_command 1435 * Abort command aborts a specified IOCB. 1436 * 1437 * Input: 1438 * ha = adapter block pointer. 1439 * sp = SB structure pointer. 1440 * 1441 * Returns: 1442 * qla2x00 local function return status code. 1443 * 1444 * Context: 1445 * Kernel context. 1446 */ 1447 int 1448 qla2x00_abort_command(srb_t *sp) 1449 { 1450 unsigned long flags = 0; 1451 int rval; 1452 uint32_t handle = 0; 1453 mbx_cmd_t mc; 1454 mbx_cmd_t *mcp = &mc; 1455 fc_port_t *fcport = sp->fcport; 1456 scsi_qla_host_t *vha = fcport->vha; 1457 struct qla_hw_data *ha = vha->hw; 1458 struct req_que *req; 1459 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1460 1461 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, 1462 "Entered %s.\n", __func__); 1463 1464 if (sp->qpair) 1465 req = sp->qpair->req; 1466 else 1467 req = vha->req; 1468 1469 spin_lock_irqsave(&ha->hardware_lock, flags); 1470 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1471 if (req->outstanding_cmds[handle] == sp) 1472 break; 1473 } 1474 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1475 1476 if (handle == req->num_outstanding_cmds) { 1477 /* command not found */ 1478 return QLA_FUNCTION_FAILED; 1479 } 1480 1481 mcp->mb[0] = MBC_ABORT_COMMAND; 1482 if (HAS_EXTENDED_IDS(ha)) 1483 mcp->mb[1] = fcport->loop_id; 1484 else 1485 mcp->mb[1] = fcport->loop_id << 8; 1486 mcp->mb[2] = (uint16_t)handle; 1487 mcp->mb[3] = (uint16_t)(handle >> 16); 1488 mcp->mb[6] = (uint16_t)cmd->device->lun; 1489 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1490 mcp->in_mb = MBX_0; 1491 mcp->tov = MBX_TOV_SECONDS; 1492 mcp->flags = 0; 1493 rval = qla2x00_mailbox_command(vha, mcp); 1494 1495 if (rval != QLA_SUCCESS) { 1496 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); 1497 } else { 1498 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, 1499 "Done %s.\n", __func__); 1500 } 1501 1502 return rval; 1503 } 1504 1505 int 1506 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) 1507 { 1508 int rval, rval2; 1509 mbx_cmd_t mc; 1510 mbx_cmd_t *mcp = &mc; 1511 scsi_qla_host_t *vha; 1512 1513 vha = fcport->vha; 1514 1515 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, 1516 "Entered %s.\n", __func__); 1517 1518 mcp->mb[0] = MBC_ABORT_TARGET; 1519 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 1520 if (HAS_EXTENDED_IDS(vha->hw)) { 1521 mcp->mb[1] = fcport->loop_id; 1522 mcp->mb[10] = 0; 1523 mcp->out_mb |= MBX_10; 1524 } else { 1525 mcp->mb[1] = fcport->loop_id << 8; 1526 } 1527 mcp->mb[2] = vha->hw->loop_reset_delay; 1528 mcp->mb[9] = vha->vp_idx; 1529 1530 mcp->in_mb = MBX_0; 1531 mcp->tov = MBX_TOV_SECONDS; 1532 mcp->flags = 0; 1533 rval = qla2x00_mailbox_command(vha, mcp); 1534 if (rval != QLA_SUCCESS) { 1535 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, 1536 "Failed=%x.\n", rval); 1537 } 1538 1539 /* Issue marker IOCB. */ 1540 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0, 1541 MK_SYNC_ID); 1542 if (rval2 != QLA_SUCCESS) { 1543 ql_dbg(ql_dbg_mbx, vha, 0x1040, 1544 "Failed to issue marker IOCB (%x).\n", rval2); 1545 } else { 1546 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, 1547 "Done %s.\n", __func__); 1548 } 1549 1550 return rval; 1551 } 1552 1553 int 1554 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 1555 { 1556 int rval, rval2; 1557 mbx_cmd_t mc; 1558 mbx_cmd_t *mcp = &mc; 1559 scsi_qla_host_t *vha; 1560 1561 vha = fcport->vha; 1562 1563 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, 1564 "Entered %s.\n", __func__); 1565 1566 mcp->mb[0] = MBC_LUN_RESET; 1567 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 1568 if (HAS_EXTENDED_IDS(vha->hw)) 1569 mcp->mb[1] = fcport->loop_id; 1570 else 1571 mcp->mb[1] = fcport->loop_id << 8; 1572 mcp->mb[2] = (u32)l; 1573 mcp->mb[3] = 0; 1574 mcp->mb[9] = vha->vp_idx; 1575 1576 mcp->in_mb = MBX_0; 1577 mcp->tov = MBX_TOV_SECONDS; 1578 mcp->flags = 0; 1579 rval = qla2x00_mailbox_command(vha, mcp); 1580 if (rval != QLA_SUCCESS) { 1581 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); 1582 } 1583 1584 /* Issue marker IOCB. */ 1585 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l, 1586 MK_SYNC_ID_LUN); 1587 if (rval2 != QLA_SUCCESS) { 1588 ql_dbg(ql_dbg_mbx, vha, 0x1044, 1589 "Failed to issue marker IOCB (%x).\n", rval2); 1590 } else { 1591 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, 1592 "Done %s.\n", __func__); 1593 } 1594 1595 return rval; 1596 } 1597 1598 /* 1599 * qla2x00_get_adapter_id 1600 * Get adapter ID and topology. 1601 * 1602 * Input: 1603 * ha = adapter block pointer. 1604 * id = pointer for loop ID. 1605 * al_pa = pointer for AL_PA. 1606 * area = pointer for area. 1607 * domain = pointer for domain. 1608 * top = pointer for topology. 1609 * TARGET_QUEUE_LOCK must be released. 1610 * ADAPTER_STATE_LOCK must be released. 1611 * 1612 * Returns: 1613 * qla2x00 local function return status code. 1614 * 1615 * Context: 1616 * Kernel context. 1617 */ 1618 int 1619 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, 1620 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) 1621 { 1622 int rval; 1623 mbx_cmd_t mc; 1624 mbx_cmd_t *mcp = &mc; 1625 1626 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, 1627 "Entered %s.\n", __func__); 1628 1629 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1630 mcp->mb[9] = vha->vp_idx; 1631 mcp->out_mb = MBX_9|MBX_0; 1632 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1633 if (IS_CNA_CAPABLE(vha->hw)) 1634 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; 1635 if (IS_FWI2_CAPABLE(vha->hw)) 1636 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; 1637 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) 1638 mcp->in_mb |= MBX_15; 1639 mcp->tov = MBX_TOV_SECONDS; 1640 mcp->flags = 0; 1641 rval = qla2x00_mailbox_command(vha, mcp); 1642 if (mcp->mb[0] == MBS_COMMAND_ERROR) 1643 rval = QLA_COMMAND_ERROR; 1644 else if (mcp->mb[0] == MBS_INVALID_COMMAND) 1645 rval = QLA_INVALID_COMMAND; 1646 1647 /* Return data. */ 1648 *id = mcp->mb[1]; 1649 *al_pa = LSB(mcp->mb[2]); 1650 *area = MSB(mcp->mb[2]); 1651 *domain = LSB(mcp->mb[3]); 1652 *top = mcp->mb[6]; 1653 *sw_cap = mcp->mb[7]; 1654 1655 if (rval != QLA_SUCCESS) { 1656 /*EMPTY*/ 1657 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); 1658 } else { 1659 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, 1660 "Done %s.\n", __func__); 1661 1662 if (IS_CNA_CAPABLE(vha->hw)) { 1663 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1664 vha->fcoe_fcf_idx = mcp->mb[10]; 1665 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; 1666 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; 1667 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; 1668 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; 1669 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; 1670 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; 1671 } 1672 /* If FA-WWN supported */ 1673 if (IS_FAWWN_CAPABLE(vha->hw)) { 1674 if (mcp->mb[7] & BIT_14) { 1675 vha->port_name[0] = MSB(mcp->mb[16]); 1676 vha->port_name[1] = LSB(mcp->mb[16]); 1677 vha->port_name[2] = MSB(mcp->mb[17]); 1678 vha->port_name[3] = LSB(mcp->mb[17]); 1679 vha->port_name[4] = MSB(mcp->mb[18]); 1680 vha->port_name[5] = LSB(mcp->mb[18]); 1681 vha->port_name[6] = MSB(mcp->mb[19]); 1682 vha->port_name[7] = LSB(mcp->mb[19]); 1683 fc_host_port_name(vha->host) = 1684 wwn_to_u64(vha->port_name); 1685 ql_dbg(ql_dbg_mbx, vha, 0x10ca, 1686 "FA-WWN acquired %016llx\n", 1687 wwn_to_u64(vha->port_name)); 1688 } 1689 } 1690 1691 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) 1692 vha->bbcr = mcp->mb[15]; 1693 } 1694 1695 return rval; 1696 } 1697 1698 /* 1699 * qla2x00_get_retry_cnt 1700 * Get current firmware login retry count and delay. 1701 * 1702 * Input: 1703 * ha = adapter block pointer. 1704 * retry_cnt = pointer to login retry count. 1705 * tov = pointer to login timeout value. 1706 * 1707 * Returns: 1708 * qla2x00 local function return status code. 1709 * 1710 * Context: 1711 * Kernel context. 1712 */ 1713 int 1714 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, 1715 uint16_t *r_a_tov) 1716 { 1717 int rval; 1718 uint16_t ratov; 1719 mbx_cmd_t mc; 1720 mbx_cmd_t *mcp = &mc; 1721 1722 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, 1723 "Entered %s.\n", __func__); 1724 1725 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1726 mcp->out_mb = MBX_0; 1727 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1728 mcp->tov = MBX_TOV_SECONDS; 1729 mcp->flags = 0; 1730 rval = qla2x00_mailbox_command(vha, mcp); 1731 1732 if (rval != QLA_SUCCESS) { 1733 /*EMPTY*/ 1734 ql_dbg(ql_dbg_mbx, vha, 0x104a, 1735 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 1736 } else { 1737 /* Convert returned data and check our values. */ 1738 *r_a_tov = mcp->mb[3] / 2; 1739 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ 1740 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { 1741 /* Update to the larger values */ 1742 *retry_cnt = (uint8_t)mcp->mb[1]; 1743 *tov = ratov; 1744 } 1745 1746 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, 1747 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); 1748 } 1749 1750 return rval; 1751 } 1752 1753 /* 1754 * qla2x00_init_firmware 1755 * Initialize adapter firmware. 1756 * 1757 * Input: 1758 * ha = adapter block pointer. 1759 * dptr = Initialization control block pointer. 1760 * size = size of initialization control block. 1761 * TARGET_QUEUE_LOCK must be released. 1762 * ADAPTER_STATE_LOCK must be released. 1763 * 1764 * Returns: 1765 * qla2x00 local function return status code. 1766 * 1767 * Context: 1768 * Kernel context. 1769 */ 1770 int 1771 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 1772 { 1773 int rval; 1774 mbx_cmd_t mc; 1775 mbx_cmd_t *mcp = &mc; 1776 struct qla_hw_data *ha = vha->hw; 1777 1778 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, 1779 "Entered %s.\n", __func__); 1780 1781 if (IS_P3P_TYPE(ha) && ql2xdbwr) 1782 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, 1783 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); 1784 1785 if (ha->flags.npiv_supported) 1786 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1787 else 1788 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 1789 1790 mcp->mb[1] = 0; 1791 mcp->mb[2] = MSW(ha->init_cb_dma); 1792 mcp->mb[3] = LSW(ha->init_cb_dma); 1793 mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); 1794 mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); 1795 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1796 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1797 mcp->mb[1] = BIT_0; 1798 mcp->mb[10] = MSW(ha->ex_init_cb_dma); 1799 mcp->mb[11] = LSW(ha->ex_init_cb_dma); 1800 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); 1801 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); 1802 mcp->mb[14] = sizeof(*ha->ex_init_cb); 1803 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; 1804 } 1805 /* 1 and 2 should normally be captured. */ 1806 mcp->in_mb = MBX_2|MBX_1|MBX_0; 1807 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1808 /* mb3 is additional info about the installed SFP. */ 1809 mcp->in_mb |= MBX_3; 1810 mcp->buf_size = size; 1811 mcp->flags = MBX_DMA_OUT; 1812 mcp->tov = MBX_TOV_SECONDS; 1813 rval = qla2x00_mailbox_command(vha, mcp); 1814 1815 if (rval != QLA_SUCCESS) { 1816 /*EMPTY*/ 1817 ql_dbg(ql_dbg_mbx, vha, 0x104d, 1818 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n", 1819 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); 1820 if (ha->init_cb) { 1821 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n"); 1822 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1823 0x0104d, ha->init_cb, sizeof(*ha->init_cb)); 1824 } 1825 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1826 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n"); 1827 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1828 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb)); 1829 } 1830 } else { 1831 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1832 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 1833 ql_dbg(ql_dbg_mbx, vha, 0x119d, 1834 "Invalid SFP/Validation Failed\n"); 1835 } 1836 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, 1837 "Done %s.\n", __func__); 1838 } 1839 1840 return rval; 1841 } 1842 1843 1844 /* 1845 * qla2x00_get_port_database 1846 * Issue normal/enhanced get port database mailbox command 1847 * and copy device name as necessary. 1848 * 1849 * Input: 1850 * ha = adapter state pointer. 1851 * dev = structure pointer. 1852 * opt = enhanced cmd option byte. 1853 * 1854 * Returns: 1855 * qla2x00 local function return status code. 1856 * 1857 * Context: 1858 * Kernel context. 1859 */ 1860 int 1861 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) 1862 { 1863 int rval; 1864 mbx_cmd_t mc; 1865 mbx_cmd_t *mcp = &mc; 1866 port_database_t *pd; 1867 struct port_database_24xx *pd24; 1868 dma_addr_t pd_dma; 1869 struct qla_hw_data *ha = vha->hw; 1870 1871 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, 1872 "Entered %s.\n", __func__); 1873 1874 pd24 = NULL; 1875 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1876 if (pd == NULL) { 1877 ql_log(ql_log_warn, vha, 0x1050, 1878 "Failed to allocate port database structure.\n"); 1879 fcport->query = 0; 1880 return QLA_MEMORY_ALLOC_FAILED; 1881 } 1882 1883 mcp->mb[0] = MBC_GET_PORT_DATABASE; 1884 if (opt != 0 && !IS_FWI2_CAPABLE(ha)) 1885 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; 1886 mcp->mb[2] = MSW(pd_dma); 1887 mcp->mb[3] = LSW(pd_dma); 1888 mcp->mb[6] = MSW(MSD(pd_dma)); 1889 mcp->mb[7] = LSW(MSD(pd_dma)); 1890 mcp->mb[9] = vha->vp_idx; 1891 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1892 mcp->in_mb = MBX_0; 1893 if (IS_FWI2_CAPABLE(ha)) { 1894 mcp->mb[1] = fcport->loop_id; 1895 mcp->mb[10] = opt; 1896 mcp->out_mb |= MBX_10|MBX_1; 1897 mcp->in_mb |= MBX_1; 1898 } else if (HAS_EXTENDED_IDS(ha)) { 1899 mcp->mb[1] = fcport->loop_id; 1900 mcp->mb[10] = opt; 1901 mcp->out_mb |= MBX_10|MBX_1; 1902 } else { 1903 mcp->mb[1] = fcport->loop_id << 8 | opt; 1904 mcp->out_mb |= MBX_1; 1905 } 1906 mcp->buf_size = IS_FWI2_CAPABLE(ha) ? 1907 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; 1908 mcp->flags = MBX_DMA_IN; 1909 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1910 rval = qla2x00_mailbox_command(vha, mcp); 1911 if (rval != QLA_SUCCESS) 1912 goto gpd_error_out; 1913 1914 if (IS_FWI2_CAPABLE(ha)) { 1915 uint64_t zero = 0; 1916 u8 current_login_state, last_login_state; 1917 1918 pd24 = (struct port_database_24xx *) pd; 1919 1920 /* Check for logged in state. */ 1921 if (NVME_TARGET(ha, fcport)) { 1922 current_login_state = pd24->current_login_state >> 4; 1923 last_login_state = pd24->last_login_state >> 4; 1924 } else { 1925 current_login_state = pd24->current_login_state & 0xf; 1926 last_login_state = pd24->last_login_state & 0xf; 1927 } 1928 fcport->current_login_state = pd24->current_login_state; 1929 fcport->last_login_state = pd24->last_login_state; 1930 1931 /* Check for logged in state. */ 1932 if (current_login_state != PDS_PRLI_COMPLETE && 1933 last_login_state != PDS_PRLI_COMPLETE) { 1934 ql_dbg(ql_dbg_mbx, vha, 0x119a, 1935 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 1936 current_login_state, last_login_state, 1937 fcport->loop_id); 1938 rval = QLA_FUNCTION_FAILED; 1939 1940 if (!fcport->query) 1941 goto gpd_error_out; 1942 } 1943 1944 if (fcport->loop_id == FC_NO_LOOP_ID || 1945 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 1946 memcmp(fcport->port_name, pd24->port_name, 8))) { 1947 /* We lost the device mid way. */ 1948 rval = QLA_NOT_LOGGED_IN; 1949 goto gpd_error_out; 1950 } 1951 1952 /* Names are little-endian. */ 1953 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); 1954 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); 1955 1956 /* Get port_id of device. */ 1957 fcport->d_id.b.domain = pd24->port_id[0]; 1958 fcport->d_id.b.area = pd24->port_id[1]; 1959 fcport->d_id.b.al_pa = pd24->port_id[2]; 1960 fcport->d_id.b.rsvd_1 = 0; 1961 1962 /* If not target must be initiator or unknown type. */ 1963 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) 1964 fcport->port_type = FCT_INITIATOR; 1965 else 1966 fcport->port_type = FCT_TARGET; 1967 1968 /* Passback COS information. */ 1969 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? 1970 FC_COS_CLASS2 : FC_COS_CLASS3; 1971 1972 if (pd24->prli_svc_param_word_3[0] & BIT_7) 1973 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1974 } else { 1975 uint64_t zero = 0; 1976 1977 /* Check for logged in state. */ 1978 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 1979 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 1980 ql_dbg(ql_dbg_mbx, vha, 0x100a, 1981 "Unable to verify login-state (%x/%x) - " 1982 "portid=%02x%02x%02x.\n", pd->master_state, 1983 pd->slave_state, fcport->d_id.b.domain, 1984 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1985 rval = QLA_FUNCTION_FAILED; 1986 goto gpd_error_out; 1987 } 1988 1989 if (fcport->loop_id == FC_NO_LOOP_ID || 1990 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 1991 memcmp(fcport->port_name, pd->port_name, 8))) { 1992 /* We lost the device mid way. */ 1993 rval = QLA_NOT_LOGGED_IN; 1994 goto gpd_error_out; 1995 } 1996 1997 /* Names are little-endian. */ 1998 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 1999 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 2000 2001 /* Get port_id of device. */ 2002 fcport->d_id.b.domain = pd->port_id[0]; 2003 fcport->d_id.b.area = pd->port_id[3]; 2004 fcport->d_id.b.al_pa = pd->port_id[2]; 2005 fcport->d_id.b.rsvd_1 = 0; 2006 2007 /* If not target must be initiator or unknown type. */ 2008 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 2009 fcport->port_type = FCT_INITIATOR; 2010 else 2011 fcport->port_type = FCT_TARGET; 2012 2013 /* Passback COS information. */ 2014 fcport->supported_classes = (pd->options & BIT_4) ? 2015 FC_COS_CLASS2 : FC_COS_CLASS3; 2016 } 2017 2018 gpd_error_out: 2019 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 2020 fcport->query = 0; 2021 2022 if (rval != QLA_SUCCESS) { 2023 ql_dbg(ql_dbg_mbx, vha, 0x1052, 2024 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, 2025 mcp->mb[0], mcp->mb[1]); 2026 } else { 2027 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, 2028 "Done %s.\n", __func__); 2029 } 2030 2031 return rval; 2032 } 2033 2034 int 2035 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle, 2036 struct port_database_24xx *pdb) 2037 { 2038 mbx_cmd_t mc; 2039 mbx_cmd_t *mcp = &mc; 2040 dma_addr_t pdb_dma; 2041 int rval; 2042 2043 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115, 2044 "Entered %s.\n", __func__); 2045 2046 memset(pdb, 0, sizeof(*pdb)); 2047 2048 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb, 2049 sizeof(*pdb), DMA_FROM_DEVICE); 2050 if (!pdb_dma) { 2051 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n"); 2052 return QLA_MEMORY_ALLOC_FAILED; 2053 } 2054 2055 mcp->mb[0] = MBC_GET_PORT_DATABASE; 2056 mcp->mb[1] = nport_handle; 2057 mcp->mb[2] = MSW(LSD(pdb_dma)); 2058 mcp->mb[3] = LSW(LSD(pdb_dma)); 2059 mcp->mb[6] = MSW(MSD(pdb_dma)); 2060 mcp->mb[7] = LSW(MSD(pdb_dma)); 2061 mcp->mb[9] = 0; 2062 mcp->mb[10] = 0; 2063 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2064 mcp->in_mb = MBX_1|MBX_0; 2065 mcp->buf_size = sizeof(*pdb); 2066 mcp->flags = MBX_DMA_IN; 2067 mcp->tov = vha->hw->login_timeout * 2; 2068 rval = qla2x00_mailbox_command(vha, mcp); 2069 2070 if (rval != QLA_SUCCESS) { 2071 ql_dbg(ql_dbg_mbx, vha, 0x111a, 2072 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2073 rval, mcp->mb[0], mcp->mb[1]); 2074 } else { 2075 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b, 2076 "Done %s.\n", __func__); 2077 } 2078 2079 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma, 2080 sizeof(*pdb), DMA_FROM_DEVICE); 2081 2082 return rval; 2083 } 2084 2085 /* 2086 * qla2x00_get_firmware_state 2087 * Get adapter firmware state. 2088 * 2089 * Input: 2090 * ha = adapter block pointer. 2091 * dptr = pointer for firmware state. 2092 * TARGET_QUEUE_LOCK must be released. 2093 * ADAPTER_STATE_LOCK must be released. 2094 * 2095 * Returns: 2096 * qla2x00 local function return status code. 2097 * 2098 * Context: 2099 * Kernel context. 2100 */ 2101 int 2102 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) 2103 { 2104 int rval; 2105 mbx_cmd_t mc; 2106 mbx_cmd_t *mcp = &mc; 2107 struct qla_hw_data *ha = vha->hw; 2108 2109 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, 2110 "Entered %s.\n", __func__); 2111 2112 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 2113 mcp->out_mb = MBX_0; 2114 if (IS_FWI2_CAPABLE(vha->hw)) 2115 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2116 else 2117 mcp->in_mb = MBX_1|MBX_0; 2118 mcp->tov = MBX_TOV_SECONDS; 2119 mcp->flags = 0; 2120 rval = qla2x00_mailbox_command(vha, mcp); 2121 2122 /* Return firmware states. */ 2123 states[0] = mcp->mb[1]; 2124 if (IS_FWI2_CAPABLE(vha->hw)) { 2125 states[1] = mcp->mb[2]; 2126 states[2] = mcp->mb[3]; /* SFP info */ 2127 states[3] = mcp->mb[4]; 2128 states[4] = mcp->mb[5]; 2129 states[5] = mcp->mb[6]; /* DPORT status */ 2130 } 2131 2132 if (rval != QLA_SUCCESS) { 2133 /*EMPTY*/ 2134 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); 2135 } else { 2136 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 2137 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 2138 ql_dbg(ql_dbg_mbx, vha, 0x119e, 2139 "Invalid SFP/Validation Failed\n"); 2140 } 2141 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, 2142 "Done %s.\n", __func__); 2143 } 2144 2145 return rval; 2146 } 2147 2148 /* 2149 * qla2x00_get_port_name 2150 * Issue get port name mailbox command. 2151 * Returned name is in big endian format. 2152 * 2153 * Input: 2154 * ha = adapter block pointer. 2155 * loop_id = loop ID of device. 2156 * name = pointer for name. 2157 * TARGET_QUEUE_LOCK must be released. 2158 * ADAPTER_STATE_LOCK must be released. 2159 * 2160 * Returns: 2161 * qla2x00 local function return status code. 2162 * 2163 * Context: 2164 * Kernel context. 2165 */ 2166 int 2167 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, 2168 uint8_t opt) 2169 { 2170 int rval; 2171 mbx_cmd_t mc; 2172 mbx_cmd_t *mcp = &mc; 2173 2174 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, 2175 "Entered %s.\n", __func__); 2176 2177 mcp->mb[0] = MBC_GET_PORT_NAME; 2178 mcp->mb[9] = vha->vp_idx; 2179 mcp->out_mb = MBX_9|MBX_1|MBX_0; 2180 if (HAS_EXTENDED_IDS(vha->hw)) { 2181 mcp->mb[1] = loop_id; 2182 mcp->mb[10] = opt; 2183 mcp->out_mb |= MBX_10; 2184 } else { 2185 mcp->mb[1] = loop_id << 8 | opt; 2186 } 2187 2188 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2189 mcp->tov = MBX_TOV_SECONDS; 2190 mcp->flags = 0; 2191 rval = qla2x00_mailbox_command(vha, mcp); 2192 2193 if (rval != QLA_SUCCESS) { 2194 /*EMPTY*/ 2195 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); 2196 } else { 2197 if (name != NULL) { 2198 /* This function returns name in big endian. */ 2199 name[0] = MSB(mcp->mb[2]); 2200 name[1] = LSB(mcp->mb[2]); 2201 name[2] = MSB(mcp->mb[3]); 2202 name[3] = LSB(mcp->mb[3]); 2203 name[4] = MSB(mcp->mb[6]); 2204 name[5] = LSB(mcp->mb[6]); 2205 name[6] = MSB(mcp->mb[7]); 2206 name[7] = LSB(mcp->mb[7]); 2207 } 2208 2209 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, 2210 "Done %s.\n", __func__); 2211 } 2212 2213 return rval; 2214 } 2215 2216 /* 2217 * qla24xx_link_initialization 2218 * Issue link initialization mailbox command. 2219 * 2220 * Input: 2221 * ha = adapter block pointer. 2222 * TARGET_QUEUE_LOCK must be released. 2223 * ADAPTER_STATE_LOCK must be released. 2224 * 2225 * Returns: 2226 * qla2x00 local function return status code. 2227 * 2228 * Context: 2229 * Kernel context. 2230 */ 2231 int 2232 qla24xx_link_initialize(scsi_qla_host_t *vha) 2233 { 2234 int rval; 2235 mbx_cmd_t mc; 2236 mbx_cmd_t *mcp = &mc; 2237 2238 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, 2239 "Entered %s.\n", __func__); 2240 2241 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) 2242 return QLA_FUNCTION_FAILED; 2243 2244 mcp->mb[0] = MBC_LINK_INITIALIZATION; 2245 mcp->mb[1] = BIT_4; 2246 if (vha->hw->operating_mode == LOOP) 2247 mcp->mb[1] |= BIT_6; 2248 else 2249 mcp->mb[1] |= BIT_5; 2250 mcp->mb[2] = 0; 2251 mcp->mb[3] = 0; 2252 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2253 mcp->in_mb = MBX_0; 2254 mcp->tov = MBX_TOV_SECONDS; 2255 mcp->flags = 0; 2256 rval = qla2x00_mailbox_command(vha, mcp); 2257 2258 if (rval != QLA_SUCCESS) { 2259 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); 2260 } else { 2261 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, 2262 "Done %s.\n", __func__); 2263 } 2264 2265 return rval; 2266 } 2267 2268 /* 2269 * qla2x00_lip_reset 2270 * Issue LIP reset mailbox command. 2271 * 2272 * Input: 2273 * ha = adapter block pointer. 2274 * TARGET_QUEUE_LOCK must be released. 2275 * ADAPTER_STATE_LOCK must be released. 2276 * 2277 * Returns: 2278 * qla2x00 local function return status code. 2279 * 2280 * Context: 2281 * Kernel context. 2282 */ 2283 int 2284 qla2x00_lip_reset(scsi_qla_host_t *vha) 2285 { 2286 int rval; 2287 mbx_cmd_t mc; 2288 mbx_cmd_t *mcp = &mc; 2289 2290 ql_dbg(ql_dbg_disc, vha, 0x105a, 2291 "Entered %s.\n", __func__); 2292 2293 if (IS_CNA_CAPABLE(vha->hw)) { 2294 /* Logout across all FCFs. */ 2295 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2296 mcp->mb[1] = BIT_1; 2297 mcp->mb[2] = 0; 2298 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2299 } else if (IS_FWI2_CAPABLE(vha->hw)) { 2300 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2301 mcp->mb[1] = BIT_4; 2302 mcp->mb[2] = 0; 2303 mcp->mb[3] = vha->hw->loop_reset_delay; 2304 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2305 } else { 2306 mcp->mb[0] = MBC_LIP_RESET; 2307 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2308 if (HAS_EXTENDED_IDS(vha->hw)) { 2309 mcp->mb[1] = 0x00ff; 2310 mcp->mb[10] = 0; 2311 mcp->out_mb |= MBX_10; 2312 } else { 2313 mcp->mb[1] = 0xff00; 2314 } 2315 mcp->mb[2] = vha->hw->loop_reset_delay; 2316 mcp->mb[3] = 0; 2317 } 2318 mcp->in_mb = MBX_0; 2319 mcp->tov = MBX_TOV_SECONDS; 2320 mcp->flags = 0; 2321 rval = qla2x00_mailbox_command(vha, mcp); 2322 2323 if (rval != QLA_SUCCESS) { 2324 /*EMPTY*/ 2325 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); 2326 } else { 2327 /*EMPTY*/ 2328 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, 2329 "Done %s.\n", __func__); 2330 } 2331 2332 return rval; 2333 } 2334 2335 /* 2336 * qla2x00_send_sns 2337 * Send SNS command. 2338 * 2339 * Input: 2340 * ha = adapter block pointer. 2341 * sns = pointer for command. 2342 * cmd_size = command size. 2343 * buf_size = response/command size. 2344 * TARGET_QUEUE_LOCK must be released. 2345 * ADAPTER_STATE_LOCK must be released. 2346 * 2347 * Returns: 2348 * qla2x00 local function return status code. 2349 * 2350 * Context: 2351 * Kernel context. 2352 */ 2353 int 2354 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, 2355 uint16_t cmd_size, size_t buf_size) 2356 { 2357 int rval; 2358 mbx_cmd_t mc; 2359 mbx_cmd_t *mcp = &mc; 2360 2361 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, 2362 "Entered %s.\n", __func__); 2363 2364 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, 2365 "Retry cnt=%d ratov=%d total tov=%d.\n", 2366 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); 2367 2368 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 2369 mcp->mb[1] = cmd_size; 2370 mcp->mb[2] = MSW(sns_phys_address); 2371 mcp->mb[3] = LSW(sns_phys_address); 2372 mcp->mb[6] = MSW(MSD(sns_phys_address)); 2373 mcp->mb[7] = LSW(MSD(sns_phys_address)); 2374 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2375 mcp->in_mb = MBX_0|MBX_1; 2376 mcp->buf_size = buf_size; 2377 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; 2378 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); 2379 rval = qla2x00_mailbox_command(vha, mcp); 2380 2381 if (rval != QLA_SUCCESS) { 2382 /*EMPTY*/ 2383 ql_dbg(ql_dbg_mbx, vha, 0x105f, 2384 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2385 rval, mcp->mb[0], mcp->mb[1]); 2386 } else { 2387 /*EMPTY*/ 2388 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, 2389 "Done %s.\n", __func__); 2390 } 2391 2392 return rval; 2393 } 2394 2395 int 2396 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2397 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2398 { 2399 int rval; 2400 2401 struct logio_entry_24xx *lg; 2402 dma_addr_t lg_dma; 2403 uint32_t iop[2]; 2404 struct qla_hw_data *ha = vha->hw; 2405 struct req_que *req; 2406 2407 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, 2408 "Entered %s.\n", __func__); 2409 2410 if (vha->vp_idx && vha->qpair) 2411 req = vha->qpair->req; 2412 else 2413 req = ha->req_q_map[0]; 2414 2415 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2416 if (lg == NULL) { 2417 ql_log(ql_log_warn, vha, 0x1062, 2418 "Failed to allocate login IOCB.\n"); 2419 return QLA_MEMORY_ALLOC_FAILED; 2420 } 2421 2422 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2423 lg->entry_count = 1; 2424 lg->handle = make_handle(req->id, lg->handle); 2425 lg->nport_handle = cpu_to_le16(loop_id); 2426 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2427 if (opt & BIT_0) 2428 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2429 if (opt & BIT_1) 2430 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2431 lg->port_id[0] = al_pa; 2432 lg->port_id[1] = area; 2433 lg->port_id[2] = domain; 2434 lg->vp_index = vha->vp_idx; 2435 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2436 (ha->r_a_tov / 10 * 2) + 2); 2437 if (rval != QLA_SUCCESS) { 2438 ql_dbg(ql_dbg_mbx, vha, 0x1063, 2439 "Failed to issue login IOCB (%x).\n", rval); 2440 } else if (lg->entry_status != 0) { 2441 ql_dbg(ql_dbg_mbx, vha, 0x1064, 2442 "Failed to complete IOCB -- error status (%x).\n", 2443 lg->entry_status); 2444 rval = QLA_FUNCTION_FAILED; 2445 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2446 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2447 iop[1] = le32_to_cpu(lg->io_parameter[1]); 2448 2449 ql_dbg(ql_dbg_mbx, vha, 0x1065, 2450 "Failed to complete IOCB -- completion status (%x) " 2451 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2452 iop[0], iop[1]); 2453 2454 switch (iop[0]) { 2455 case LSC_SCODE_PORTID_USED: 2456 mb[0] = MBS_PORT_ID_USED; 2457 mb[1] = LSW(iop[1]); 2458 break; 2459 case LSC_SCODE_NPORT_USED: 2460 mb[0] = MBS_LOOP_ID_USED; 2461 break; 2462 case LSC_SCODE_NOLINK: 2463 case LSC_SCODE_NOIOCB: 2464 case LSC_SCODE_NOXCB: 2465 case LSC_SCODE_CMD_FAILED: 2466 case LSC_SCODE_NOFABRIC: 2467 case LSC_SCODE_FW_NOT_READY: 2468 case LSC_SCODE_NOT_LOGGED_IN: 2469 case LSC_SCODE_NOPCB: 2470 case LSC_SCODE_ELS_REJECT: 2471 case LSC_SCODE_CMD_PARAM_ERR: 2472 case LSC_SCODE_NONPORT: 2473 case LSC_SCODE_LOGGED_IN: 2474 case LSC_SCODE_NOFLOGI_ACC: 2475 default: 2476 mb[0] = MBS_COMMAND_ERROR; 2477 break; 2478 } 2479 } else { 2480 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, 2481 "Done %s.\n", __func__); 2482 2483 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2484 2485 mb[0] = MBS_COMMAND_COMPLETE; 2486 mb[1] = 0; 2487 if (iop[0] & BIT_4) { 2488 if (iop[0] & BIT_8) 2489 mb[1] |= BIT_1; 2490 } else 2491 mb[1] = BIT_0; 2492 2493 /* Passback COS information. */ 2494 mb[10] = 0; 2495 if (lg->io_parameter[7] || lg->io_parameter[8]) 2496 mb[10] |= BIT_0; /* Class 2. */ 2497 if (lg->io_parameter[9] || lg->io_parameter[10]) 2498 mb[10] |= BIT_1; /* Class 3. */ 2499 if (lg->io_parameter[0] & cpu_to_le32(BIT_7)) 2500 mb[10] |= BIT_7; /* Confirmed Completion 2501 * Allowed 2502 */ 2503 } 2504 2505 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2506 2507 return rval; 2508 } 2509 2510 /* 2511 * qla2x00_login_fabric 2512 * Issue login fabric port mailbox command. 2513 * 2514 * Input: 2515 * ha = adapter block pointer. 2516 * loop_id = device loop ID. 2517 * domain = device domain. 2518 * area = device area. 2519 * al_pa = device AL_PA. 2520 * status = pointer for return status. 2521 * opt = command options. 2522 * TARGET_QUEUE_LOCK must be released. 2523 * ADAPTER_STATE_LOCK must be released. 2524 * 2525 * Returns: 2526 * qla2x00 local function return status code. 2527 * 2528 * Context: 2529 * Kernel context. 2530 */ 2531 int 2532 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2533 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2534 { 2535 int rval; 2536 mbx_cmd_t mc; 2537 mbx_cmd_t *mcp = &mc; 2538 struct qla_hw_data *ha = vha->hw; 2539 2540 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, 2541 "Entered %s.\n", __func__); 2542 2543 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 2544 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2545 if (HAS_EXTENDED_IDS(ha)) { 2546 mcp->mb[1] = loop_id; 2547 mcp->mb[10] = opt; 2548 mcp->out_mb |= MBX_10; 2549 } else { 2550 mcp->mb[1] = (loop_id << 8) | opt; 2551 } 2552 mcp->mb[2] = domain; 2553 mcp->mb[3] = area << 8 | al_pa; 2554 2555 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; 2556 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2557 mcp->flags = 0; 2558 rval = qla2x00_mailbox_command(vha, mcp); 2559 2560 /* Return mailbox statuses. */ 2561 if (mb != NULL) { 2562 mb[0] = mcp->mb[0]; 2563 mb[1] = mcp->mb[1]; 2564 mb[2] = mcp->mb[2]; 2565 mb[6] = mcp->mb[6]; 2566 mb[7] = mcp->mb[7]; 2567 /* COS retrieved from Get-Port-Database mailbox command. */ 2568 mb[10] = 0; 2569 } 2570 2571 if (rval != QLA_SUCCESS) { 2572 /* RLU tmp code: need to change main mailbox_command function to 2573 * return ok even when the mailbox completion value is not 2574 * SUCCESS. The caller needs to be responsible to interpret 2575 * the return values of this mailbox command if we're not 2576 * to change too much of the existing code. 2577 */ 2578 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || 2579 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || 2580 mcp->mb[0] == 0x4006) 2581 rval = QLA_SUCCESS; 2582 2583 /*EMPTY*/ 2584 ql_dbg(ql_dbg_mbx, vha, 0x1068, 2585 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 2586 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 2587 } else { 2588 /*EMPTY*/ 2589 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, 2590 "Done %s.\n", __func__); 2591 } 2592 2593 return rval; 2594 } 2595 2596 /* 2597 * qla2x00_login_local_device 2598 * Issue login loop port mailbox command. 2599 * 2600 * Input: 2601 * ha = adapter block pointer. 2602 * loop_id = device loop ID. 2603 * opt = command options. 2604 * 2605 * Returns: 2606 * Return status code. 2607 * 2608 * Context: 2609 * Kernel context. 2610 * 2611 */ 2612 int 2613 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, 2614 uint16_t *mb_ret, uint8_t opt) 2615 { 2616 int rval; 2617 mbx_cmd_t mc; 2618 mbx_cmd_t *mcp = &mc; 2619 struct qla_hw_data *ha = vha->hw; 2620 2621 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, 2622 "Entered %s.\n", __func__); 2623 2624 if (IS_FWI2_CAPABLE(ha)) 2625 return qla24xx_login_fabric(vha, fcport->loop_id, 2626 fcport->d_id.b.domain, fcport->d_id.b.area, 2627 fcport->d_id.b.al_pa, mb_ret, opt); 2628 2629 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 2630 if (HAS_EXTENDED_IDS(ha)) 2631 mcp->mb[1] = fcport->loop_id; 2632 else 2633 mcp->mb[1] = fcport->loop_id << 8; 2634 mcp->mb[2] = opt; 2635 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2636 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; 2637 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2638 mcp->flags = 0; 2639 rval = qla2x00_mailbox_command(vha, mcp); 2640 2641 /* Return mailbox statuses. */ 2642 if (mb_ret != NULL) { 2643 mb_ret[0] = mcp->mb[0]; 2644 mb_ret[1] = mcp->mb[1]; 2645 mb_ret[6] = mcp->mb[6]; 2646 mb_ret[7] = mcp->mb[7]; 2647 } 2648 2649 if (rval != QLA_SUCCESS) { 2650 /* AV tmp code: need to change main mailbox_command function to 2651 * return ok even when the mailbox completion value is not 2652 * SUCCESS. The caller needs to be responsible to interpret 2653 * the return values of this mailbox command if we're not 2654 * to change too much of the existing code. 2655 */ 2656 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 2657 rval = QLA_SUCCESS; 2658 2659 ql_dbg(ql_dbg_mbx, vha, 0x106b, 2660 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", 2661 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); 2662 } else { 2663 /*EMPTY*/ 2664 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, 2665 "Done %s.\n", __func__); 2666 } 2667 2668 return (rval); 2669 } 2670 2671 int 2672 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2673 uint8_t area, uint8_t al_pa) 2674 { 2675 int rval; 2676 struct logio_entry_24xx *lg; 2677 dma_addr_t lg_dma; 2678 struct qla_hw_data *ha = vha->hw; 2679 struct req_que *req; 2680 2681 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, 2682 "Entered %s.\n", __func__); 2683 2684 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2685 if (lg == NULL) { 2686 ql_log(ql_log_warn, vha, 0x106e, 2687 "Failed to allocate logout IOCB.\n"); 2688 return QLA_MEMORY_ALLOC_FAILED; 2689 } 2690 2691 req = vha->req; 2692 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2693 lg->entry_count = 1; 2694 lg->handle = make_handle(req->id, lg->handle); 2695 lg->nport_handle = cpu_to_le16(loop_id); 2696 lg->control_flags = 2697 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| 2698 LCF_FREE_NPORT); 2699 lg->port_id[0] = al_pa; 2700 lg->port_id[1] = area; 2701 lg->port_id[2] = domain; 2702 lg->vp_index = vha->vp_idx; 2703 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2704 (ha->r_a_tov / 10 * 2) + 2); 2705 if (rval != QLA_SUCCESS) { 2706 ql_dbg(ql_dbg_mbx, vha, 0x106f, 2707 "Failed to issue logout IOCB (%x).\n", rval); 2708 } else if (lg->entry_status != 0) { 2709 ql_dbg(ql_dbg_mbx, vha, 0x1070, 2710 "Failed to complete IOCB -- error status (%x).\n", 2711 lg->entry_status); 2712 rval = QLA_FUNCTION_FAILED; 2713 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2714 ql_dbg(ql_dbg_mbx, vha, 0x1071, 2715 "Failed to complete IOCB -- completion status (%x) " 2716 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2717 le32_to_cpu(lg->io_parameter[0]), 2718 le32_to_cpu(lg->io_parameter[1])); 2719 } else { 2720 /*EMPTY*/ 2721 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, 2722 "Done %s.\n", __func__); 2723 } 2724 2725 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2726 2727 return rval; 2728 } 2729 2730 /* 2731 * qla2x00_fabric_logout 2732 * Issue logout fabric port mailbox command. 2733 * 2734 * Input: 2735 * ha = adapter block pointer. 2736 * loop_id = device loop ID. 2737 * TARGET_QUEUE_LOCK must be released. 2738 * ADAPTER_STATE_LOCK must be released. 2739 * 2740 * Returns: 2741 * qla2x00 local function return status code. 2742 * 2743 * Context: 2744 * Kernel context. 2745 */ 2746 int 2747 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2748 uint8_t area, uint8_t al_pa) 2749 { 2750 int rval; 2751 mbx_cmd_t mc; 2752 mbx_cmd_t *mcp = &mc; 2753 2754 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, 2755 "Entered %s.\n", __func__); 2756 2757 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 2758 mcp->out_mb = MBX_1|MBX_0; 2759 if (HAS_EXTENDED_IDS(vha->hw)) { 2760 mcp->mb[1] = loop_id; 2761 mcp->mb[10] = 0; 2762 mcp->out_mb |= MBX_10; 2763 } else { 2764 mcp->mb[1] = loop_id << 8; 2765 } 2766 2767 mcp->in_mb = MBX_1|MBX_0; 2768 mcp->tov = MBX_TOV_SECONDS; 2769 mcp->flags = 0; 2770 rval = qla2x00_mailbox_command(vha, mcp); 2771 2772 if (rval != QLA_SUCCESS) { 2773 /*EMPTY*/ 2774 ql_dbg(ql_dbg_mbx, vha, 0x1074, 2775 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); 2776 } else { 2777 /*EMPTY*/ 2778 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, 2779 "Done %s.\n", __func__); 2780 } 2781 2782 return rval; 2783 } 2784 2785 /* 2786 * qla2x00_full_login_lip 2787 * Issue full login LIP mailbox command. 2788 * 2789 * Input: 2790 * ha = adapter block pointer. 2791 * TARGET_QUEUE_LOCK must be released. 2792 * ADAPTER_STATE_LOCK must be released. 2793 * 2794 * Returns: 2795 * qla2x00 local function return status code. 2796 * 2797 * Context: 2798 * Kernel context. 2799 */ 2800 int 2801 qla2x00_full_login_lip(scsi_qla_host_t *vha) 2802 { 2803 int rval; 2804 mbx_cmd_t mc; 2805 mbx_cmd_t *mcp = &mc; 2806 2807 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, 2808 "Entered %s.\n", __func__); 2809 2810 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2811 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0; 2812 mcp->mb[2] = 0; 2813 mcp->mb[3] = 0; 2814 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2815 mcp->in_mb = MBX_0; 2816 mcp->tov = MBX_TOV_SECONDS; 2817 mcp->flags = 0; 2818 rval = qla2x00_mailbox_command(vha, mcp); 2819 2820 if (rval != QLA_SUCCESS) { 2821 /*EMPTY*/ 2822 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); 2823 } else { 2824 /*EMPTY*/ 2825 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, 2826 "Done %s.\n", __func__); 2827 } 2828 2829 return rval; 2830 } 2831 2832 /* 2833 * qla2x00_get_id_list 2834 * 2835 * Input: 2836 * ha = adapter block pointer. 2837 * 2838 * Returns: 2839 * qla2x00 local function return status code. 2840 * 2841 * Context: 2842 * Kernel context. 2843 */ 2844 int 2845 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, 2846 uint16_t *entries) 2847 { 2848 int rval; 2849 mbx_cmd_t mc; 2850 mbx_cmd_t *mcp = &mc; 2851 2852 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, 2853 "Entered %s.\n", __func__); 2854 2855 if (id_list == NULL) 2856 return QLA_FUNCTION_FAILED; 2857 2858 mcp->mb[0] = MBC_GET_ID_LIST; 2859 mcp->out_mb = MBX_0; 2860 if (IS_FWI2_CAPABLE(vha->hw)) { 2861 mcp->mb[2] = MSW(id_list_dma); 2862 mcp->mb[3] = LSW(id_list_dma); 2863 mcp->mb[6] = MSW(MSD(id_list_dma)); 2864 mcp->mb[7] = LSW(MSD(id_list_dma)); 2865 mcp->mb[8] = 0; 2866 mcp->mb[9] = vha->vp_idx; 2867 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 2868 } else { 2869 mcp->mb[1] = MSW(id_list_dma); 2870 mcp->mb[2] = LSW(id_list_dma); 2871 mcp->mb[3] = MSW(MSD(id_list_dma)); 2872 mcp->mb[6] = LSW(MSD(id_list_dma)); 2873 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; 2874 } 2875 mcp->in_mb = MBX_1|MBX_0; 2876 mcp->tov = MBX_TOV_SECONDS; 2877 mcp->flags = 0; 2878 rval = qla2x00_mailbox_command(vha, mcp); 2879 2880 if (rval != QLA_SUCCESS) { 2881 /*EMPTY*/ 2882 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); 2883 } else { 2884 *entries = mcp->mb[1]; 2885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, 2886 "Done %s.\n", __func__); 2887 } 2888 2889 return rval; 2890 } 2891 2892 /* 2893 * qla2x00_get_resource_cnts 2894 * Get current firmware resource counts. 2895 * 2896 * Input: 2897 * ha = adapter block pointer. 2898 * 2899 * Returns: 2900 * qla2x00 local function return status code. 2901 * 2902 * Context: 2903 * Kernel context. 2904 */ 2905 int 2906 qla2x00_get_resource_cnts(scsi_qla_host_t *vha) 2907 { 2908 struct qla_hw_data *ha = vha->hw; 2909 int rval; 2910 mbx_cmd_t mc; 2911 mbx_cmd_t *mcp = &mc; 2912 2913 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, 2914 "Entered %s.\n", __func__); 2915 2916 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2917 mcp->out_mb = MBX_0; 2918 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2919 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 2920 IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2921 mcp->in_mb |= MBX_12; 2922 mcp->tov = MBX_TOV_SECONDS; 2923 mcp->flags = 0; 2924 rval = qla2x00_mailbox_command(vha, mcp); 2925 2926 if (rval != QLA_SUCCESS) { 2927 /*EMPTY*/ 2928 ql_dbg(ql_dbg_mbx, vha, 0x107d, 2929 "Failed mb[0]=%x.\n", mcp->mb[0]); 2930 } else { 2931 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, 2932 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " 2933 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], 2934 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 2935 mcp->mb[11], mcp->mb[12]); 2936 2937 ha->orig_fw_tgt_xcb_count = mcp->mb[1]; 2938 ha->cur_fw_tgt_xcb_count = mcp->mb[2]; 2939 ha->cur_fw_xcb_count = mcp->mb[3]; 2940 ha->orig_fw_xcb_count = mcp->mb[6]; 2941 ha->cur_fw_iocb_count = mcp->mb[7]; 2942 ha->orig_fw_iocb_count = mcp->mb[10]; 2943 if (ha->flags.npiv_supported) 2944 ha->max_npiv_vports = mcp->mb[11]; 2945 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 2946 IS_QLA28XX(ha)) 2947 ha->fw_max_fcf_count = mcp->mb[12]; 2948 } 2949 2950 return (rval); 2951 } 2952 2953 /* 2954 * qla2x00_get_fcal_position_map 2955 * Get FCAL (LILP) position map using mailbox command 2956 * 2957 * Input: 2958 * ha = adapter state pointer. 2959 * pos_map = buffer pointer (can be NULL). 2960 * 2961 * Returns: 2962 * qla2x00 local function return status code. 2963 * 2964 * Context: 2965 * Kernel context. 2966 */ 2967 int 2968 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) 2969 { 2970 int rval; 2971 mbx_cmd_t mc; 2972 mbx_cmd_t *mcp = &mc; 2973 char *pmap; 2974 dma_addr_t pmap_dma; 2975 struct qla_hw_data *ha = vha->hw; 2976 2977 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, 2978 "Entered %s.\n", __func__); 2979 2980 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2981 if (pmap == NULL) { 2982 ql_log(ql_log_warn, vha, 0x1080, 2983 "Memory alloc failed.\n"); 2984 return QLA_MEMORY_ALLOC_FAILED; 2985 } 2986 2987 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; 2988 mcp->mb[2] = MSW(pmap_dma); 2989 mcp->mb[3] = LSW(pmap_dma); 2990 mcp->mb[6] = MSW(MSD(pmap_dma)); 2991 mcp->mb[7] = LSW(MSD(pmap_dma)); 2992 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2993 mcp->in_mb = MBX_1|MBX_0; 2994 mcp->buf_size = FCAL_MAP_SIZE; 2995 mcp->flags = MBX_DMA_IN; 2996 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2997 rval = qla2x00_mailbox_command(vha, mcp); 2998 2999 if (rval == QLA_SUCCESS) { 3000 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, 3001 "mb0/mb1=%x/%X FC/AL position map size (%x).\n", 3002 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); 3003 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, 3004 pmap, pmap[0] + 1); 3005 3006 if (pos_map) 3007 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 3008 } 3009 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 3010 3011 if (rval != QLA_SUCCESS) { 3012 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); 3013 } else { 3014 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, 3015 "Done %s.\n", __func__); 3016 } 3017 3018 return rval; 3019 } 3020 3021 /* 3022 * qla2x00_get_link_status 3023 * 3024 * Input: 3025 * ha = adapter block pointer. 3026 * loop_id = device loop ID. 3027 * ret_buf = pointer to link status return buffer. 3028 * 3029 * Returns: 3030 * 0 = success. 3031 * BIT_0 = mem alloc error. 3032 * BIT_1 = mailbox error. 3033 */ 3034 int 3035 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, 3036 struct link_statistics *stats, dma_addr_t stats_dma) 3037 { 3038 int rval; 3039 mbx_cmd_t mc; 3040 mbx_cmd_t *mcp = &mc; 3041 uint32_t *iter = (void *)stats; 3042 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); 3043 struct qla_hw_data *ha = vha->hw; 3044 3045 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, 3046 "Entered %s.\n", __func__); 3047 3048 mcp->mb[0] = MBC_GET_LINK_STATUS; 3049 mcp->mb[2] = MSW(LSD(stats_dma)); 3050 mcp->mb[3] = LSW(LSD(stats_dma)); 3051 mcp->mb[6] = MSW(MSD(stats_dma)); 3052 mcp->mb[7] = LSW(MSD(stats_dma)); 3053 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3054 mcp->in_mb = MBX_0; 3055 if (IS_FWI2_CAPABLE(ha)) { 3056 mcp->mb[1] = loop_id; 3057 mcp->mb[4] = 0; 3058 mcp->mb[10] = 0; 3059 mcp->out_mb |= MBX_10|MBX_4|MBX_1; 3060 mcp->in_mb |= MBX_1; 3061 } else if (HAS_EXTENDED_IDS(ha)) { 3062 mcp->mb[1] = loop_id; 3063 mcp->mb[10] = 0; 3064 mcp->out_mb |= MBX_10|MBX_1; 3065 } else { 3066 mcp->mb[1] = loop_id << 8; 3067 mcp->out_mb |= MBX_1; 3068 } 3069 mcp->tov = MBX_TOV_SECONDS; 3070 mcp->flags = IOCTL_CMD; 3071 rval = qla2x00_mailbox_command(vha, mcp); 3072 3073 if (rval == QLA_SUCCESS) { 3074 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3075 ql_dbg(ql_dbg_mbx, vha, 0x1085, 3076 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3077 rval = QLA_FUNCTION_FAILED; 3078 } else { 3079 /* Re-endianize - firmware data is le32. */ 3080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, 3081 "Done %s.\n", __func__); 3082 for ( ; dwords--; iter++) 3083 le32_to_cpus(iter); 3084 } 3085 } else { 3086 /* Failed. */ 3087 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); 3088 } 3089 3090 return rval; 3091 } 3092 3093 int 3094 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, 3095 dma_addr_t stats_dma, uint16_t options) 3096 { 3097 int rval; 3098 mbx_cmd_t mc; 3099 mbx_cmd_t *mcp = &mc; 3100 uint32_t *iter = (void *)stats; 3101 ushort dwords = sizeof(*stats)/sizeof(*iter); 3102 3103 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, 3104 "Entered %s.\n", __func__); 3105 3106 memset(&mc, 0, sizeof(mc)); 3107 mc.mb[0] = MBC_GET_LINK_PRIV_STATS; 3108 mc.mb[2] = MSW(LSD(stats_dma)); 3109 mc.mb[3] = LSW(LSD(stats_dma)); 3110 mc.mb[6] = MSW(MSD(stats_dma)); 3111 mc.mb[7] = LSW(MSD(stats_dma)); 3112 mc.mb[8] = dwords; 3113 mc.mb[9] = cpu_to_le16(vha->vp_idx); 3114 mc.mb[10] = cpu_to_le16(options); 3115 3116 rval = qla24xx_send_mb_cmd(vha, &mc); 3117 3118 if (rval == QLA_SUCCESS) { 3119 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3120 ql_dbg(ql_dbg_mbx, vha, 0x1089, 3121 "Failed mb[0]=%x.\n", mcp->mb[0]); 3122 rval = QLA_FUNCTION_FAILED; 3123 } else { 3124 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, 3125 "Done %s.\n", __func__); 3126 /* Re-endianize - firmware data is le32. */ 3127 for ( ; dwords--; iter++) 3128 le32_to_cpus(iter); 3129 } 3130 } else { 3131 /* Failed. */ 3132 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); 3133 } 3134 3135 return rval; 3136 } 3137 3138 int 3139 qla24xx_abort_command(srb_t *sp) 3140 { 3141 int rval; 3142 unsigned long flags = 0; 3143 3144 struct abort_entry_24xx *abt; 3145 dma_addr_t abt_dma; 3146 uint32_t handle; 3147 fc_port_t *fcport = sp->fcport; 3148 struct scsi_qla_host *vha = fcport->vha; 3149 struct qla_hw_data *ha = vha->hw; 3150 struct req_que *req = vha->req; 3151 struct qla_qpair *qpair = sp->qpair; 3152 3153 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 3154 "Entered %s.\n", __func__); 3155 3156 if (vha->flags.qpairs_available && sp->qpair) 3157 req = sp->qpair->req; 3158 else 3159 return QLA_FUNCTION_FAILED; 3160 3161 if (ql2xasynctmfenable) 3162 return qla24xx_async_abort_command(sp); 3163 3164 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3165 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 3166 if (req->outstanding_cmds[handle] == sp) 3167 break; 3168 } 3169 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3170 if (handle == req->num_outstanding_cmds) { 3171 /* Command not found. */ 3172 return QLA_FUNCTION_FAILED; 3173 } 3174 3175 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 3176 if (abt == NULL) { 3177 ql_log(ql_log_warn, vha, 0x108d, 3178 "Failed to allocate abort IOCB.\n"); 3179 return QLA_MEMORY_ALLOC_FAILED; 3180 } 3181 3182 abt->entry_type = ABORT_IOCB_TYPE; 3183 abt->entry_count = 1; 3184 abt->handle = make_handle(req->id, abt->handle); 3185 abt->nport_handle = cpu_to_le16(fcport->loop_id); 3186 abt->handle_to_abort = make_handle(req->id, handle); 3187 abt->port_id[0] = fcport->d_id.b.al_pa; 3188 abt->port_id[1] = fcport->d_id.b.area; 3189 abt->port_id[2] = fcport->d_id.b.domain; 3190 abt->vp_index = fcport->vha->vp_idx; 3191 3192 abt->req_que_no = cpu_to_le16(req->id); 3193 3194 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 3195 if (rval != QLA_SUCCESS) { 3196 ql_dbg(ql_dbg_mbx, vha, 0x108e, 3197 "Failed to issue IOCB (%x).\n", rval); 3198 } else if (abt->entry_status != 0) { 3199 ql_dbg(ql_dbg_mbx, vha, 0x108f, 3200 "Failed to complete IOCB -- error status (%x).\n", 3201 abt->entry_status); 3202 rval = QLA_FUNCTION_FAILED; 3203 } else if (abt->nport_handle != cpu_to_le16(0)) { 3204 ql_dbg(ql_dbg_mbx, vha, 0x1090, 3205 "Failed to complete IOCB -- completion status (%x).\n", 3206 le16_to_cpu(abt->nport_handle)); 3207 if (abt->nport_handle == CS_IOCB_ERROR) 3208 rval = QLA_FUNCTION_PARAMETER_ERROR; 3209 else 3210 rval = QLA_FUNCTION_FAILED; 3211 } else { 3212 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, 3213 "Done %s.\n", __func__); 3214 } 3215 3216 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 3217 3218 return rval; 3219 } 3220 3221 struct tsk_mgmt_cmd { 3222 union { 3223 struct tsk_mgmt_entry tsk; 3224 struct sts_entry_24xx sts; 3225 } p; 3226 }; 3227 3228 static int 3229 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 3230 uint64_t l, int tag) 3231 { 3232 int rval, rval2; 3233 struct tsk_mgmt_cmd *tsk; 3234 struct sts_entry_24xx *sts; 3235 dma_addr_t tsk_dma; 3236 scsi_qla_host_t *vha; 3237 struct qla_hw_data *ha; 3238 struct req_que *req; 3239 struct qla_qpair *qpair; 3240 3241 vha = fcport->vha; 3242 ha = vha->hw; 3243 req = vha->req; 3244 3245 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, 3246 "Entered %s.\n", __func__); 3247 3248 if (vha->vp_idx && vha->qpair) { 3249 /* NPIV port */ 3250 qpair = vha->qpair; 3251 req = qpair->req; 3252 } 3253 3254 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 3255 if (tsk == NULL) { 3256 ql_log(ql_log_warn, vha, 0x1093, 3257 "Failed to allocate task management IOCB.\n"); 3258 return QLA_MEMORY_ALLOC_FAILED; 3259 } 3260 3261 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 3262 tsk->p.tsk.entry_count = 1; 3263 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle); 3264 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 3265 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 3266 tsk->p.tsk.control_flags = cpu_to_le32(type); 3267 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 3268 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 3269 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 3270 tsk->p.tsk.vp_index = fcport->vha->vp_idx; 3271 if (type == TCF_LUN_RESET) { 3272 int_to_scsilun(l, &tsk->p.tsk.lun); 3273 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, 3274 sizeof(tsk->p.tsk.lun)); 3275 } 3276 3277 sts = &tsk->p.sts; 3278 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 3279 if (rval != QLA_SUCCESS) { 3280 ql_dbg(ql_dbg_mbx, vha, 0x1094, 3281 "Failed to issue %s reset IOCB (%x).\n", name, rval); 3282 } else if (sts->entry_status != 0) { 3283 ql_dbg(ql_dbg_mbx, vha, 0x1095, 3284 "Failed to complete IOCB -- error status (%x).\n", 3285 sts->entry_status); 3286 rval = QLA_FUNCTION_FAILED; 3287 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 3288 ql_dbg(ql_dbg_mbx, vha, 0x1096, 3289 "Failed to complete IOCB -- completion status (%x).\n", 3290 le16_to_cpu(sts->comp_status)); 3291 rval = QLA_FUNCTION_FAILED; 3292 } else if (le16_to_cpu(sts->scsi_status) & 3293 SS_RESPONSE_INFO_LEN_VALID) { 3294 if (le32_to_cpu(sts->rsp_data_len) < 4) { 3295 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, 3296 "Ignoring inconsistent data length -- not enough " 3297 "response info (%d).\n", 3298 le32_to_cpu(sts->rsp_data_len)); 3299 } else if (sts->data[3]) { 3300 ql_dbg(ql_dbg_mbx, vha, 0x1098, 3301 "Failed to complete IOCB -- response (%x).\n", 3302 sts->data[3]); 3303 rval = QLA_FUNCTION_FAILED; 3304 } 3305 } 3306 3307 /* Issue marker IOCB. */ 3308 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l, 3309 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 3310 if (rval2 != QLA_SUCCESS) { 3311 ql_dbg(ql_dbg_mbx, vha, 0x1099, 3312 "Failed to issue marker IOCB (%x).\n", rval2); 3313 } else { 3314 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, 3315 "Done %s.\n", __func__); 3316 } 3317 3318 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 3319 3320 return rval; 3321 } 3322 3323 int 3324 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) 3325 { 3326 struct qla_hw_data *ha = fcport->vha->hw; 3327 3328 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3329 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 3330 3331 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); 3332 } 3333 3334 int 3335 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 3336 { 3337 struct qla_hw_data *ha = fcport->vha->hw; 3338 3339 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3340 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 3341 3342 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); 3343 } 3344 3345 int 3346 qla2x00_system_error(scsi_qla_host_t *vha) 3347 { 3348 int rval; 3349 mbx_cmd_t mc; 3350 mbx_cmd_t *mcp = &mc; 3351 struct qla_hw_data *ha = vha->hw; 3352 3353 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 3354 return QLA_FUNCTION_FAILED; 3355 3356 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, 3357 "Entered %s.\n", __func__); 3358 3359 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 3360 mcp->out_mb = MBX_0; 3361 mcp->in_mb = MBX_0; 3362 mcp->tov = 5; 3363 mcp->flags = 0; 3364 rval = qla2x00_mailbox_command(vha, mcp); 3365 3366 if (rval != QLA_SUCCESS) { 3367 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); 3368 } else { 3369 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, 3370 "Done %s.\n", __func__); 3371 } 3372 3373 return rval; 3374 } 3375 3376 int 3377 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) 3378 { 3379 int rval; 3380 mbx_cmd_t mc; 3381 mbx_cmd_t *mcp = &mc; 3382 3383 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3384 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3385 return QLA_FUNCTION_FAILED; 3386 3387 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, 3388 "Entered %s.\n", __func__); 3389 3390 mcp->mb[0] = MBC_WRITE_SERDES; 3391 mcp->mb[1] = addr; 3392 if (IS_QLA2031(vha->hw)) 3393 mcp->mb[2] = data & 0xff; 3394 else 3395 mcp->mb[2] = data; 3396 3397 mcp->mb[3] = 0; 3398 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 3399 mcp->in_mb = MBX_0; 3400 mcp->tov = MBX_TOV_SECONDS; 3401 mcp->flags = 0; 3402 rval = qla2x00_mailbox_command(vha, mcp); 3403 3404 if (rval != QLA_SUCCESS) { 3405 ql_dbg(ql_dbg_mbx, vha, 0x1183, 3406 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3407 } else { 3408 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, 3409 "Done %s.\n", __func__); 3410 } 3411 3412 return rval; 3413 } 3414 3415 int 3416 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) 3417 { 3418 int rval; 3419 mbx_cmd_t mc; 3420 mbx_cmd_t *mcp = &mc; 3421 3422 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3423 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3424 return QLA_FUNCTION_FAILED; 3425 3426 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, 3427 "Entered %s.\n", __func__); 3428 3429 mcp->mb[0] = MBC_READ_SERDES; 3430 mcp->mb[1] = addr; 3431 mcp->mb[3] = 0; 3432 mcp->out_mb = MBX_3|MBX_1|MBX_0; 3433 mcp->in_mb = MBX_1|MBX_0; 3434 mcp->tov = MBX_TOV_SECONDS; 3435 mcp->flags = 0; 3436 rval = qla2x00_mailbox_command(vha, mcp); 3437 3438 if (IS_QLA2031(vha->hw)) 3439 *data = mcp->mb[1] & 0xff; 3440 else 3441 *data = mcp->mb[1]; 3442 3443 if (rval != QLA_SUCCESS) { 3444 ql_dbg(ql_dbg_mbx, vha, 0x1186, 3445 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3446 } else { 3447 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, 3448 "Done %s.\n", __func__); 3449 } 3450 3451 return rval; 3452 } 3453 3454 int 3455 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) 3456 { 3457 int rval; 3458 mbx_cmd_t mc; 3459 mbx_cmd_t *mcp = &mc; 3460 3461 if (!IS_QLA8044(vha->hw)) 3462 return QLA_FUNCTION_FAILED; 3463 3464 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0, 3465 "Entered %s.\n", __func__); 3466 3467 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3468 mcp->mb[1] = HCS_WRITE_SERDES; 3469 mcp->mb[3] = LSW(addr); 3470 mcp->mb[4] = MSW(addr); 3471 mcp->mb[5] = LSW(data); 3472 mcp->mb[6] = MSW(data); 3473 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 3474 mcp->in_mb = MBX_0; 3475 mcp->tov = MBX_TOV_SECONDS; 3476 mcp->flags = 0; 3477 rval = qla2x00_mailbox_command(vha, mcp); 3478 3479 if (rval != QLA_SUCCESS) { 3480 ql_dbg(ql_dbg_mbx, vha, 0x11a1, 3481 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3482 } else { 3483 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, 3484 "Done %s.\n", __func__); 3485 } 3486 3487 return rval; 3488 } 3489 3490 int 3491 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) 3492 { 3493 int rval; 3494 mbx_cmd_t mc; 3495 mbx_cmd_t *mcp = &mc; 3496 3497 if (!IS_QLA8044(vha->hw)) 3498 return QLA_FUNCTION_FAILED; 3499 3500 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, 3501 "Entered %s.\n", __func__); 3502 3503 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3504 mcp->mb[1] = HCS_READ_SERDES; 3505 mcp->mb[3] = LSW(addr); 3506 mcp->mb[4] = MSW(addr); 3507 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; 3508 mcp->in_mb = MBX_2|MBX_1|MBX_0; 3509 mcp->tov = MBX_TOV_SECONDS; 3510 mcp->flags = 0; 3511 rval = qla2x00_mailbox_command(vha, mcp); 3512 3513 *data = mcp->mb[2] << 16 | mcp->mb[1]; 3514 3515 if (rval != QLA_SUCCESS) { 3516 ql_dbg(ql_dbg_mbx, vha, 0x118a, 3517 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3518 } else { 3519 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, 3520 "Done %s.\n", __func__); 3521 } 3522 3523 return rval; 3524 } 3525 3526 /** 3527 * qla2x00_set_serdes_params() - 3528 * @vha: HA context 3529 * @sw_em_1g: serial link options 3530 * @sw_em_2g: serial link options 3531 * @sw_em_4g: serial link options 3532 * 3533 * Returns 3534 */ 3535 int 3536 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, 3537 uint16_t sw_em_2g, uint16_t sw_em_4g) 3538 { 3539 int rval; 3540 mbx_cmd_t mc; 3541 mbx_cmd_t *mcp = &mc; 3542 3543 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, 3544 "Entered %s.\n", __func__); 3545 3546 mcp->mb[0] = MBC_SERDES_PARAMS; 3547 mcp->mb[1] = BIT_0; 3548 mcp->mb[2] = sw_em_1g | BIT_15; 3549 mcp->mb[3] = sw_em_2g | BIT_15; 3550 mcp->mb[4] = sw_em_4g | BIT_15; 3551 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3552 mcp->in_mb = MBX_0; 3553 mcp->tov = MBX_TOV_SECONDS; 3554 mcp->flags = 0; 3555 rval = qla2x00_mailbox_command(vha, mcp); 3556 3557 if (rval != QLA_SUCCESS) { 3558 /*EMPTY*/ 3559 ql_dbg(ql_dbg_mbx, vha, 0x109f, 3560 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3561 } else { 3562 /*EMPTY*/ 3563 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, 3564 "Done %s.\n", __func__); 3565 } 3566 3567 return rval; 3568 } 3569 3570 int 3571 qla2x00_stop_firmware(scsi_qla_host_t *vha) 3572 { 3573 int rval; 3574 mbx_cmd_t mc; 3575 mbx_cmd_t *mcp = &mc; 3576 3577 if (!IS_FWI2_CAPABLE(vha->hw)) 3578 return QLA_FUNCTION_FAILED; 3579 3580 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, 3581 "Entered %s.\n", __func__); 3582 3583 mcp->mb[0] = MBC_STOP_FIRMWARE; 3584 mcp->mb[1] = 0; 3585 mcp->out_mb = MBX_1|MBX_0; 3586 mcp->in_mb = MBX_0; 3587 mcp->tov = 5; 3588 mcp->flags = 0; 3589 rval = qla2x00_mailbox_command(vha, mcp); 3590 3591 if (rval != QLA_SUCCESS) { 3592 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); 3593 if (mcp->mb[0] == MBS_INVALID_COMMAND) 3594 rval = QLA_INVALID_COMMAND; 3595 } else { 3596 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, 3597 "Done %s.\n", __func__); 3598 } 3599 3600 return rval; 3601 } 3602 3603 int 3604 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, 3605 uint16_t buffers) 3606 { 3607 int rval; 3608 mbx_cmd_t mc; 3609 mbx_cmd_t *mcp = &mc; 3610 3611 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, 3612 "Entered %s.\n", __func__); 3613 3614 if (!IS_FWI2_CAPABLE(vha->hw)) 3615 return QLA_FUNCTION_FAILED; 3616 3617 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3618 return QLA_FUNCTION_FAILED; 3619 3620 mcp->mb[0] = MBC_TRACE_CONTROL; 3621 mcp->mb[1] = TC_EFT_ENABLE; 3622 mcp->mb[2] = LSW(eft_dma); 3623 mcp->mb[3] = MSW(eft_dma); 3624 mcp->mb[4] = LSW(MSD(eft_dma)); 3625 mcp->mb[5] = MSW(MSD(eft_dma)); 3626 mcp->mb[6] = buffers; 3627 mcp->mb[7] = TC_AEN_DISABLE; 3628 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3629 mcp->in_mb = MBX_1|MBX_0; 3630 mcp->tov = MBX_TOV_SECONDS; 3631 mcp->flags = 0; 3632 rval = qla2x00_mailbox_command(vha, mcp); 3633 if (rval != QLA_SUCCESS) { 3634 ql_dbg(ql_dbg_mbx, vha, 0x10a5, 3635 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3636 rval, mcp->mb[0], mcp->mb[1]); 3637 } else { 3638 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, 3639 "Done %s.\n", __func__); 3640 } 3641 3642 return rval; 3643 } 3644 3645 int 3646 qla2x00_disable_eft_trace(scsi_qla_host_t *vha) 3647 { 3648 int rval; 3649 mbx_cmd_t mc; 3650 mbx_cmd_t *mcp = &mc; 3651 3652 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, 3653 "Entered %s.\n", __func__); 3654 3655 if (!IS_FWI2_CAPABLE(vha->hw)) 3656 return QLA_FUNCTION_FAILED; 3657 3658 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3659 return QLA_FUNCTION_FAILED; 3660 3661 mcp->mb[0] = MBC_TRACE_CONTROL; 3662 mcp->mb[1] = TC_EFT_DISABLE; 3663 mcp->out_mb = MBX_1|MBX_0; 3664 mcp->in_mb = MBX_1|MBX_0; 3665 mcp->tov = MBX_TOV_SECONDS; 3666 mcp->flags = 0; 3667 rval = qla2x00_mailbox_command(vha, mcp); 3668 if (rval != QLA_SUCCESS) { 3669 ql_dbg(ql_dbg_mbx, vha, 0x10a8, 3670 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3671 rval, mcp->mb[0], mcp->mb[1]); 3672 } else { 3673 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, 3674 "Done %s.\n", __func__); 3675 } 3676 3677 return rval; 3678 } 3679 3680 int 3681 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, 3682 uint16_t buffers, uint16_t *mb, uint32_t *dwords) 3683 { 3684 int rval; 3685 mbx_cmd_t mc; 3686 mbx_cmd_t *mcp = &mc; 3687 3688 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, 3689 "Entered %s.\n", __func__); 3690 3691 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 3692 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 3693 !IS_QLA28XX(vha->hw)) 3694 return QLA_FUNCTION_FAILED; 3695 3696 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3697 return QLA_FUNCTION_FAILED; 3698 3699 mcp->mb[0] = MBC_TRACE_CONTROL; 3700 mcp->mb[1] = TC_FCE_ENABLE; 3701 mcp->mb[2] = LSW(fce_dma); 3702 mcp->mb[3] = MSW(fce_dma); 3703 mcp->mb[4] = LSW(MSD(fce_dma)); 3704 mcp->mb[5] = MSW(MSD(fce_dma)); 3705 mcp->mb[6] = buffers; 3706 mcp->mb[7] = TC_AEN_DISABLE; 3707 mcp->mb[8] = 0; 3708 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; 3709 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; 3710 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3711 MBX_1|MBX_0; 3712 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3713 mcp->tov = MBX_TOV_SECONDS; 3714 mcp->flags = 0; 3715 rval = qla2x00_mailbox_command(vha, mcp); 3716 if (rval != QLA_SUCCESS) { 3717 ql_dbg(ql_dbg_mbx, vha, 0x10ab, 3718 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3719 rval, mcp->mb[0], mcp->mb[1]); 3720 } else { 3721 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, 3722 "Done %s.\n", __func__); 3723 3724 if (mb) 3725 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 3726 if (dwords) 3727 *dwords = buffers; 3728 } 3729 3730 return rval; 3731 } 3732 3733 int 3734 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) 3735 { 3736 int rval; 3737 mbx_cmd_t mc; 3738 mbx_cmd_t *mcp = &mc; 3739 3740 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, 3741 "Entered %s.\n", __func__); 3742 3743 if (!IS_FWI2_CAPABLE(vha->hw)) 3744 return QLA_FUNCTION_FAILED; 3745 3746 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3747 return QLA_FUNCTION_FAILED; 3748 3749 mcp->mb[0] = MBC_TRACE_CONTROL; 3750 mcp->mb[1] = TC_FCE_DISABLE; 3751 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 3752 mcp->out_mb = MBX_2|MBX_1|MBX_0; 3753 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3754 MBX_1|MBX_0; 3755 mcp->tov = MBX_TOV_SECONDS; 3756 mcp->flags = 0; 3757 rval = qla2x00_mailbox_command(vha, mcp); 3758 if (rval != QLA_SUCCESS) { 3759 ql_dbg(ql_dbg_mbx, vha, 0x10ae, 3760 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3761 rval, mcp->mb[0], mcp->mb[1]); 3762 } else { 3763 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, 3764 "Done %s.\n", __func__); 3765 3766 if (wr) 3767 *wr = (uint64_t) mcp->mb[5] << 48 | 3768 (uint64_t) mcp->mb[4] << 32 | 3769 (uint64_t) mcp->mb[3] << 16 | 3770 (uint64_t) mcp->mb[2]; 3771 if (rd) 3772 *rd = (uint64_t) mcp->mb[9] << 48 | 3773 (uint64_t) mcp->mb[8] << 32 | 3774 (uint64_t) mcp->mb[7] << 16 | 3775 (uint64_t) mcp->mb[6]; 3776 } 3777 3778 return rval; 3779 } 3780 3781 int 3782 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3783 uint16_t *port_speed, uint16_t *mb) 3784 { 3785 int rval; 3786 mbx_cmd_t mc; 3787 mbx_cmd_t *mcp = &mc; 3788 3789 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, 3790 "Entered %s.\n", __func__); 3791 3792 if (!IS_IIDMA_CAPABLE(vha->hw)) 3793 return QLA_FUNCTION_FAILED; 3794 3795 mcp->mb[0] = MBC_PORT_PARAMS; 3796 mcp->mb[1] = loop_id; 3797 mcp->mb[2] = mcp->mb[3] = 0; 3798 mcp->mb[9] = vha->vp_idx; 3799 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3800 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3801 mcp->tov = MBX_TOV_SECONDS; 3802 mcp->flags = 0; 3803 rval = qla2x00_mailbox_command(vha, mcp); 3804 3805 /* Return mailbox statuses. */ 3806 if (mb) { 3807 mb[0] = mcp->mb[0]; 3808 mb[1] = mcp->mb[1]; 3809 mb[3] = mcp->mb[3]; 3810 } 3811 3812 if (rval != QLA_SUCCESS) { 3813 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); 3814 } else { 3815 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, 3816 "Done %s.\n", __func__); 3817 if (port_speed) 3818 *port_speed = mcp->mb[3]; 3819 } 3820 3821 return rval; 3822 } 3823 3824 int 3825 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3826 uint16_t port_speed, uint16_t *mb) 3827 { 3828 int rval; 3829 mbx_cmd_t mc; 3830 mbx_cmd_t *mcp = &mc; 3831 3832 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, 3833 "Entered %s.\n", __func__); 3834 3835 if (!IS_IIDMA_CAPABLE(vha->hw)) 3836 return QLA_FUNCTION_FAILED; 3837 3838 mcp->mb[0] = MBC_PORT_PARAMS; 3839 mcp->mb[1] = loop_id; 3840 mcp->mb[2] = BIT_0; 3841 mcp->mb[3] = port_speed & 0x3F; 3842 mcp->mb[9] = vha->vp_idx; 3843 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3844 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3845 mcp->tov = MBX_TOV_SECONDS; 3846 mcp->flags = 0; 3847 rval = qla2x00_mailbox_command(vha, mcp); 3848 3849 /* Return mailbox statuses. */ 3850 if (mb) { 3851 mb[0] = mcp->mb[0]; 3852 mb[1] = mcp->mb[1]; 3853 mb[3] = mcp->mb[3]; 3854 } 3855 3856 if (rval != QLA_SUCCESS) { 3857 ql_dbg(ql_dbg_mbx, vha, 0x10b4, 3858 "Failed=%x.\n", rval); 3859 } else { 3860 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, 3861 "Done %s.\n", __func__); 3862 } 3863 3864 return rval; 3865 } 3866 3867 void 3868 qla24xx_report_id_acquisition(scsi_qla_host_t *vha, 3869 struct vp_rpt_id_entry_24xx *rptid_entry) 3870 { 3871 struct qla_hw_data *ha = vha->hw; 3872 scsi_qla_host_t *vp = NULL; 3873 unsigned long flags; 3874 int found; 3875 port_id_t id; 3876 struct fc_port *fcport; 3877 3878 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3879 "Entered %s.\n", __func__); 3880 3881 if (rptid_entry->entry_status != 0) 3882 return; 3883 3884 id.b.domain = rptid_entry->port_id[2]; 3885 id.b.area = rptid_entry->port_id[1]; 3886 id.b.al_pa = rptid_entry->port_id[0]; 3887 id.b.rsvd_1 = 0; 3888 ha->flags.n2n_ae = 0; 3889 3890 if (rptid_entry->format == 0) { 3891 /* loop */ 3892 ql_dbg(ql_dbg_async, vha, 0x10b7, 3893 "Format 0 : Number of VPs setup %d, number of " 3894 "VPs acquired %d.\n", rptid_entry->vp_setup, 3895 rptid_entry->vp_acquired); 3896 ql_dbg(ql_dbg_async, vha, 0x10b8, 3897 "Primary port id %02x%02x%02x.\n", 3898 rptid_entry->port_id[2], rptid_entry->port_id[1], 3899 rptid_entry->port_id[0]); 3900 ha->current_topology = ISP_CFG_NL; 3901 qlt_update_host_map(vha, id); 3902 3903 } else if (rptid_entry->format == 1) { 3904 /* fabric */ 3905 ql_dbg(ql_dbg_async, vha, 0x10b9, 3906 "Format 1: VP[%d] enabled - status %d - with " 3907 "port id %02x%02x%02x.\n", rptid_entry->vp_idx, 3908 rptid_entry->vp_status, 3909 rptid_entry->port_id[2], rptid_entry->port_id[1], 3910 rptid_entry->port_id[0]); 3911 ql_dbg(ql_dbg_async, vha, 0x5075, 3912 "Format 1: Remote WWPN %8phC.\n", 3913 rptid_entry->u.f1.port_name); 3914 3915 ql_dbg(ql_dbg_async, vha, 0x5075, 3916 "Format 1: WWPN %8phC.\n", 3917 vha->port_name); 3918 3919 switch (rptid_entry->u.f1.flags & TOPO_MASK) { 3920 case TOPO_N2N: 3921 ha->current_topology = ISP_CFG_N; 3922 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 3923 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3924 fcport->scan_state = QLA_FCPORT_SCAN; 3925 fcport->n2n_flag = 0; 3926 } 3927 id.b24 = 0; 3928 if (wwn_to_u64(vha->port_name) > 3929 wwn_to_u64(rptid_entry->u.f1.port_name)) { 3930 vha->d_id.b24 = 0; 3931 vha->d_id.b.al_pa = 1; 3932 ha->flags.n2n_bigger = 1; 3933 3934 id.b.al_pa = 2; 3935 ql_dbg(ql_dbg_async, vha, 0x5075, 3936 "Format 1: assign local id %x remote id %x\n", 3937 vha->d_id.b24, id.b24); 3938 } else { 3939 ql_dbg(ql_dbg_async, vha, 0x5075, 3940 "Format 1: Remote login - Waiting for WWPN %8phC.\n", 3941 rptid_entry->u.f1.port_name); 3942 ha->flags.n2n_bigger = 0; 3943 } 3944 3945 fcport = qla2x00_find_fcport_by_wwpn(vha, 3946 rptid_entry->u.f1.port_name, 1); 3947 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 3948 3949 3950 if (fcport) { 3951 fcport->plogi_nack_done_deadline = jiffies + HZ; 3952 fcport->dm_login_expire = jiffies + 2*HZ; 3953 fcport->scan_state = QLA_FCPORT_FOUND; 3954 fcport->n2n_flag = 1; 3955 fcport->keep_nport_handle = 1; 3956 fcport->fc4_type = FS_FC4TYPE_FCP; 3957 if (vha->flags.nvme_enabled) 3958 fcport->fc4_type |= FS_FC4TYPE_NVME; 3959 3960 if (wwn_to_u64(vha->port_name) > 3961 wwn_to_u64(fcport->port_name)) { 3962 fcport->d_id = id; 3963 } 3964 3965 switch (fcport->disc_state) { 3966 case DSC_DELETED: 3967 set_bit(RELOGIN_NEEDED, 3968 &vha->dpc_flags); 3969 break; 3970 case DSC_DELETE_PEND: 3971 break; 3972 default: 3973 qlt_schedule_sess_for_deletion(fcport); 3974 break; 3975 } 3976 } else { 3977 qla24xx_post_newsess_work(vha, &id, 3978 rptid_entry->u.f1.port_name, 3979 rptid_entry->u.f1.node_name, 3980 NULL, 3981 FS_FCP_IS_N2N); 3982 } 3983 3984 /* if our portname is higher then initiate N2N login */ 3985 3986 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); 3987 return; 3988 break; 3989 case TOPO_FL: 3990 ha->current_topology = ISP_CFG_FL; 3991 break; 3992 case TOPO_F: 3993 ha->current_topology = ISP_CFG_F; 3994 break; 3995 default: 3996 break; 3997 } 3998 3999 ha->flags.gpsc_supported = 1; 4000 ha->current_topology = ISP_CFG_F; 4001 /* buffer to buffer credit flag */ 4002 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; 4003 4004 if (rptid_entry->vp_idx == 0) { 4005 if (rptid_entry->vp_status == VP_STAT_COMPL) { 4006 /* FA-WWN is only for physical port */ 4007 if (qla_ini_mode_enabled(vha) && 4008 ha->flags.fawwpn_enabled && 4009 (rptid_entry->u.f1.flags & 4010 BIT_6)) { 4011 memcpy(vha->port_name, 4012 rptid_entry->u.f1.port_name, 4013 WWN_SIZE); 4014 } 4015 4016 qlt_update_host_map(vha, id); 4017 } 4018 4019 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 4020 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 4021 } else { 4022 if (rptid_entry->vp_status != VP_STAT_COMPL && 4023 rptid_entry->vp_status != VP_STAT_ID_CHG) { 4024 ql_dbg(ql_dbg_mbx, vha, 0x10ba, 4025 "Could not acquire ID for VP[%d].\n", 4026 rptid_entry->vp_idx); 4027 return; 4028 } 4029 4030 found = 0; 4031 spin_lock_irqsave(&ha->vport_slock, flags); 4032 list_for_each_entry(vp, &ha->vp_list, list) { 4033 if (rptid_entry->vp_idx == vp->vp_idx) { 4034 found = 1; 4035 break; 4036 } 4037 } 4038 spin_unlock_irqrestore(&ha->vport_slock, flags); 4039 4040 if (!found) 4041 return; 4042 4043 qlt_update_host_map(vp, id); 4044 4045 /* 4046 * Cannot configure here as we are still sitting on the 4047 * response queue. Handle it in dpc context. 4048 */ 4049 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); 4050 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); 4051 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); 4052 } 4053 set_bit(VP_DPC_NEEDED, &vha->dpc_flags); 4054 qla2xxx_wake_dpc(vha); 4055 } else if (rptid_entry->format == 2) { 4056 ql_dbg(ql_dbg_async, vha, 0x505f, 4057 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", 4058 rptid_entry->port_id[2], rptid_entry->port_id[1], 4059 rptid_entry->port_id[0]); 4060 4061 ql_dbg(ql_dbg_async, vha, 0x5075, 4062 "N2N: Remote WWPN %8phC.\n", 4063 rptid_entry->u.f2.port_name); 4064 4065 /* N2N. direct connect */ 4066 ha->current_topology = ISP_CFG_N; 4067 ha->flags.rida_fmt2 = 1; 4068 vha->d_id.b.domain = rptid_entry->port_id[2]; 4069 vha->d_id.b.area = rptid_entry->port_id[1]; 4070 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 4071 4072 ha->flags.n2n_ae = 1; 4073 spin_lock_irqsave(&ha->vport_slock, flags); 4074 qlt_update_vp_map(vha, SET_AL_PA); 4075 spin_unlock_irqrestore(&ha->vport_slock, flags); 4076 4077 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4078 fcport->scan_state = QLA_FCPORT_SCAN; 4079 fcport->n2n_flag = 0; 4080 } 4081 4082 fcport = qla2x00_find_fcport_by_wwpn(vha, 4083 rptid_entry->u.f2.port_name, 1); 4084 4085 if (fcport) { 4086 fcport->login_retry = vha->hw->login_retry_count; 4087 fcport->plogi_nack_done_deadline = jiffies + HZ; 4088 fcport->scan_state = QLA_FCPORT_FOUND; 4089 fcport->keep_nport_handle = 1; 4090 fcport->n2n_flag = 1; 4091 fcport->d_id.b.domain = 4092 rptid_entry->u.f2.remote_nport_id[2]; 4093 fcport->d_id.b.area = 4094 rptid_entry->u.f2.remote_nport_id[1]; 4095 fcport->d_id.b.al_pa = 4096 rptid_entry->u.f2.remote_nport_id[0]; 4097 } 4098 } 4099 } 4100 4101 /* 4102 * qla24xx_modify_vp_config 4103 * Change VP configuration for vha 4104 * 4105 * Input: 4106 * vha = adapter block pointer. 4107 * 4108 * Returns: 4109 * qla2xxx local function return status code. 4110 * 4111 * Context: 4112 * Kernel context. 4113 */ 4114 int 4115 qla24xx_modify_vp_config(scsi_qla_host_t *vha) 4116 { 4117 int rval; 4118 struct vp_config_entry_24xx *vpmod; 4119 dma_addr_t vpmod_dma; 4120 struct qla_hw_data *ha = vha->hw; 4121 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4122 4123 /* This can be called by the parent */ 4124 4125 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, 4126 "Entered %s.\n", __func__); 4127 4128 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 4129 if (!vpmod) { 4130 ql_log(ql_log_warn, vha, 0x10bc, 4131 "Failed to allocate modify VP IOCB.\n"); 4132 return QLA_MEMORY_ALLOC_FAILED; 4133 } 4134 4135 vpmod->entry_type = VP_CONFIG_IOCB_TYPE; 4136 vpmod->entry_count = 1; 4137 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; 4138 vpmod->vp_count = 1; 4139 vpmod->vp_index1 = vha->vp_idx; 4140 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; 4141 4142 qlt_modify_vp_config(vha, vpmod); 4143 4144 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); 4145 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 4146 vpmod->entry_count = 1; 4147 4148 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 4149 if (rval != QLA_SUCCESS) { 4150 ql_dbg(ql_dbg_mbx, vha, 0x10bd, 4151 "Failed to issue VP config IOCB (%x).\n", rval); 4152 } else if (vpmod->comp_status != 0) { 4153 ql_dbg(ql_dbg_mbx, vha, 0x10be, 4154 "Failed to complete IOCB -- error status (%x).\n", 4155 vpmod->comp_status); 4156 rval = QLA_FUNCTION_FAILED; 4157 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) { 4158 ql_dbg(ql_dbg_mbx, vha, 0x10bf, 4159 "Failed to complete IOCB -- completion status (%x).\n", 4160 le16_to_cpu(vpmod->comp_status)); 4161 rval = QLA_FUNCTION_FAILED; 4162 } else { 4163 /* EMPTY */ 4164 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, 4165 "Done %s.\n", __func__); 4166 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 4167 } 4168 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 4169 4170 return rval; 4171 } 4172 4173 /* 4174 * qla2x00_send_change_request 4175 * Receive or disable RSCN request from fabric controller 4176 * 4177 * Input: 4178 * ha = adapter block pointer 4179 * format = registration format: 4180 * 0 - Reserved 4181 * 1 - Fabric detected registration 4182 * 2 - N_port detected registration 4183 * 3 - Full registration 4184 * FF - clear registration 4185 * vp_idx = Virtual port index 4186 * 4187 * Returns: 4188 * qla2x00 local function return status code. 4189 * 4190 * Context: 4191 * Kernel Context 4192 */ 4193 4194 int 4195 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, 4196 uint16_t vp_idx) 4197 { 4198 int rval; 4199 mbx_cmd_t mc; 4200 mbx_cmd_t *mcp = &mc; 4201 4202 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, 4203 "Entered %s.\n", __func__); 4204 4205 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; 4206 mcp->mb[1] = format; 4207 mcp->mb[9] = vp_idx; 4208 mcp->out_mb = MBX_9|MBX_1|MBX_0; 4209 mcp->in_mb = MBX_0|MBX_1; 4210 mcp->tov = MBX_TOV_SECONDS; 4211 mcp->flags = 0; 4212 rval = qla2x00_mailbox_command(vha, mcp); 4213 4214 if (rval == QLA_SUCCESS) { 4215 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 4216 rval = BIT_1; 4217 } 4218 } else 4219 rval = BIT_1; 4220 4221 return rval; 4222 } 4223 4224 int 4225 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 4226 uint32_t size) 4227 { 4228 int rval; 4229 mbx_cmd_t mc; 4230 mbx_cmd_t *mcp = &mc; 4231 4232 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, 4233 "Entered %s.\n", __func__); 4234 4235 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 4236 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 4237 mcp->mb[8] = MSW(addr); 4238 mcp->out_mb = MBX_8|MBX_0; 4239 } else { 4240 mcp->mb[0] = MBC_DUMP_RISC_RAM; 4241 mcp->out_mb = MBX_0; 4242 } 4243 mcp->mb[1] = LSW(addr); 4244 mcp->mb[2] = MSW(req_dma); 4245 mcp->mb[3] = LSW(req_dma); 4246 mcp->mb[6] = MSW(MSD(req_dma)); 4247 mcp->mb[7] = LSW(MSD(req_dma)); 4248 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 4249 if (IS_FWI2_CAPABLE(vha->hw)) { 4250 mcp->mb[4] = MSW(size); 4251 mcp->mb[5] = LSW(size); 4252 mcp->out_mb |= MBX_5|MBX_4; 4253 } else { 4254 mcp->mb[4] = LSW(size); 4255 mcp->out_mb |= MBX_4; 4256 } 4257 4258 mcp->in_mb = MBX_0; 4259 mcp->tov = MBX_TOV_SECONDS; 4260 mcp->flags = 0; 4261 rval = qla2x00_mailbox_command(vha, mcp); 4262 4263 if (rval != QLA_SUCCESS) { 4264 ql_dbg(ql_dbg_mbx, vha, 0x1008, 4265 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4266 } else { 4267 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, 4268 "Done %s.\n", __func__); 4269 } 4270 4271 return rval; 4272 } 4273 /* 84XX Support **************************************************************/ 4274 4275 struct cs84xx_mgmt_cmd { 4276 union { 4277 struct verify_chip_entry_84xx req; 4278 struct verify_chip_rsp_84xx rsp; 4279 } p; 4280 }; 4281 4282 int 4283 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) 4284 { 4285 int rval, retry; 4286 struct cs84xx_mgmt_cmd *mn; 4287 dma_addr_t mn_dma; 4288 uint16_t options; 4289 unsigned long flags; 4290 struct qla_hw_data *ha = vha->hw; 4291 4292 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, 4293 "Entered %s.\n", __func__); 4294 4295 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 4296 if (mn == NULL) { 4297 return QLA_MEMORY_ALLOC_FAILED; 4298 } 4299 4300 /* Force Update? */ 4301 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; 4302 /* Diagnostic firmware? */ 4303 /* options |= MENLO_DIAG_FW; */ 4304 /* We update the firmware with only one data sequence. */ 4305 options |= VCO_END_OF_DATA; 4306 4307 do { 4308 retry = 0; 4309 memset(mn, 0, sizeof(*mn)); 4310 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; 4311 mn->p.req.entry_count = 1; 4312 mn->p.req.options = cpu_to_le16(options); 4313 4314 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, 4315 "Dump of Verify Request.\n"); 4316 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, 4317 mn, sizeof(*mn)); 4318 4319 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 4320 if (rval != QLA_SUCCESS) { 4321 ql_dbg(ql_dbg_mbx, vha, 0x10cb, 4322 "Failed to issue verify IOCB (%x).\n", rval); 4323 goto verify_done; 4324 } 4325 4326 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, 4327 "Dump of Verify Response.\n"); 4328 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, 4329 mn, sizeof(*mn)); 4330 4331 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 4332 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 4333 le16_to_cpu(mn->p.rsp.failure_code) : 0; 4334 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, 4335 "cs=%x fc=%x.\n", status[0], status[1]); 4336 4337 if (status[0] != CS_COMPLETE) { 4338 rval = QLA_FUNCTION_FAILED; 4339 if (!(options & VCO_DONT_UPDATE_FW)) { 4340 ql_dbg(ql_dbg_mbx, vha, 0x10cf, 4341 "Firmware update failed. Retrying " 4342 "without update firmware.\n"); 4343 options |= VCO_DONT_UPDATE_FW; 4344 options &= ~VCO_FORCE_UPDATE; 4345 retry = 1; 4346 } 4347 } else { 4348 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, 4349 "Firmware updated to %x.\n", 4350 le32_to_cpu(mn->p.rsp.fw_ver)); 4351 4352 /* NOTE: we only update OP firmware. */ 4353 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 4354 ha->cs84xx->op_fw_version = 4355 le32_to_cpu(mn->p.rsp.fw_ver); 4356 spin_unlock_irqrestore(&ha->cs84xx->access_lock, 4357 flags); 4358 } 4359 } while (retry); 4360 4361 verify_done: 4362 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 4363 4364 if (rval != QLA_SUCCESS) { 4365 ql_dbg(ql_dbg_mbx, vha, 0x10d1, 4366 "Failed=%x.\n", rval); 4367 } else { 4368 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, 4369 "Done %s.\n", __func__); 4370 } 4371 4372 return rval; 4373 } 4374 4375 int 4376 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) 4377 { 4378 int rval; 4379 unsigned long flags; 4380 mbx_cmd_t mc; 4381 mbx_cmd_t *mcp = &mc; 4382 struct qla_hw_data *ha = vha->hw; 4383 4384 if (!ha->flags.fw_started) 4385 return QLA_SUCCESS; 4386 4387 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 4388 "Entered %s.\n", __func__); 4389 4390 if (IS_SHADOW_REG_CAPABLE(ha)) 4391 req->options |= BIT_13; 4392 4393 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4394 mcp->mb[1] = req->options; 4395 mcp->mb[2] = MSW(LSD(req->dma)); 4396 mcp->mb[3] = LSW(LSD(req->dma)); 4397 mcp->mb[6] = MSW(MSD(req->dma)); 4398 mcp->mb[7] = LSW(MSD(req->dma)); 4399 mcp->mb[5] = req->length; 4400 if (req->rsp) 4401 mcp->mb[10] = req->rsp->id; 4402 mcp->mb[12] = req->qos; 4403 mcp->mb[11] = req->vp_idx; 4404 mcp->mb[13] = req->rid; 4405 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4406 mcp->mb[15] = 0; 4407 4408 mcp->mb[4] = req->id; 4409 /* que in ptr index */ 4410 mcp->mb[8] = 0; 4411 /* que out ptr index */ 4412 mcp->mb[9] = *req->out_ptr = 0; 4413 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| 4414 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4415 mcp->in_mb = MBX_0; 4416 mcp->flags = MBX_DMA_OUT; 4417 mcp->tov = MBX_TOV_SECONDS * 2; 4418 4419 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4420 IS_QLA28XX(ha)) 4421 mcp->in_mb |= MBX_1; 4422 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4423 mcp->out_mb |= MBX_15; 4424 /* debug q create issue in SR-IOV */ 4425 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4426 } 4427 4428 spin_lock_irqsave(&ha->hardware_lock, flags); 4429 if (!(req->options & BIT_0)) { 4430 WRT_REG_DWORD(req->req_q_in, 0); 4431 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4432 WRT_REG_DWORD(req->req_q_out, 0); 4433 } 4434 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4435 4436 rval = qla2x00_mailbox_command(vha, mcp); 4437 if (rval != QLA_SUCCESS) { 4438 ql_dbg(ql_dbg_mbx, vha, 0x10d4, 4439 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4440 } else { 4441 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, 4442 "Done %s.\n", __func__); 4443 } 4444 4445 return rval; 4446 } 4447 4448 int 4449 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 4450 { 4451 int rval; 4452 unsigned long flags; 4453 mbx_cmd_t mc; 4454 mbx_cmd_t *mcp = &mc; 4455 struct qla_hw_data *ha = vha->hw; 4456 4457 if (!ha->flags.fw_started) 4458 return QLA_SUCCESS; 4459 4460 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 4461 "Entered %s.\n", __func__); 4462 4463 if (IS_SHADOW_REG_CAPABLE(ha)) 4464 rsp->options |= BIT_13; 4465 4466 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4467 mcp->mb[1] = rsp->options; 4468 mcp->mb[2] = MSW(LSD(rsp->dma)); 4469 mcp->mb[3] = LSW(LSD(rsp->dma)); 4470 mcp->mb[6] = MSW(MSD(rsp->dma)); 4471 mcp->mb[7] = LSW(MSD(rsp->dma)); 4472 mcp->mb[5] = rsp->length; 4473 mcp->mb[14] = rsp->msix->entry; 4474 mcp->mb[13] = rsp->rid; 4475 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4476 mcp->mb[15] = 0; 4477 4478 mcp->mb[4] = rsp->id; 4479 /* que in ptr index */ 4480 mcp->mb[8] = *rsp->in_ptr = 0; 4481 /* que out ptr index */ 4482 mcp->mb[9] = 0; 4483 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 4484 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4485 mcp->in_mb = MBX_0; 4486 mcp->flags = MBX_DMA_OUT; 4487 mcp->tov = MBX_TOV_SECONDS * 2; 4488 4489 if (IS_QLA81XX(ha)) { 4490 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 4491 mcp->in_mb |= MBX_1; 4492 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4493 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; 4494 mcp->in_mb |= MBX_1; 4495 /* debug q create issue in SR-IOV */ 4496 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4497 } 4498 4499 spin_lock_irqsave(&ha->hardware_lock, flags); 4500 if (!(rsp->options & BIT_0)) { 4501 WRT_REG_DWORD(rsp->rsp_q_out, 0); 4502 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4503 WRT_REG_DWORD(rsp->rsp_q_in, 0); 4504 } 4505 4506 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4507 4508 rval = qla2x00_mailbox_command(vha, mcp); 4509 if (rval != QLA_SUCCESS) { 4510 ql_dbg(ql_dbg_mbx, vha, 0x10d7, 4511 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4512 } else { 4513 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, 4514 "Done %s.\n", __func__); 4515 } 4516 4517 return rval; 4518 } 4519 4520 int 4521 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) 4522 { 4523 int rval; 4524 mbx_cmd_t mc; 4525 mbx_cmd_t *mcp = &mc; 4526 4527 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, 4528 "Entered %s.\n", __func__); 4529 4530 mcp->mb[0] = MBC_IDC_ACK; 4531 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4532 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4533 mcp->in_mb = MBX_0; 4534 mcp->tov = MBX_TOV_SECONDS; 4535 mcp->flags = 0; 4536 rval = qla2x00_mailbox_command(vha, mcp); 4537 4538 if (rval != QLA_SUCCESS) { 4539 ql_dbg(ql_dbg_mbx, vha, 0x10da, 4540 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4541 } else { 4542 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, 4543 "Done %s.\n", __func__); 4544 } 4545 4546 return rval; 4547 } 4548 4549 int 4550 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) 4551 { 4552 int rval; 4553 mbx_cmd_t mc; 4554 mbx_cmd_t *mcp = &mc; 4555 4556 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, 4557 "Entered %s.\n", __func__); 4558 4559 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4560 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4561 return QLA_FUNCTION_FAILED; 4562 4563 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4564 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 4565 mcp->out_mb = MBX_1|MBX_0; 4566 mcp->in_mb = MBX_1|MBX_0; 4567 mcp->tov = MBX_TOV_SECONDS; 4568 mcp->flags = 0; 4569 rval = qla2x00_mailbox_command(vha, mcp); 4570 4571 if (rval != QLA_SUCCESS) { 4572 ql_dbg(ql_dbg_mbx, vha, 0x10dd, 4573 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4574 rval, mcp->mb[0], mcp->mb[1]); 4575 } else { 4576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, 4577 "Done %s.\n", __func__); 4578 *sector_size = mcp->mb[1]; 4579 } 4580 4581 return rval; 4582 } 4583 4584 int 4585 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) 4586 { 4587 int rval; 4588 mbx_cmd_t mc; 4589 mbx_cmd_t *mcp = &mc; 4590 4591 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4592 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4593 return QLA_FUNCTION_FAILED; 4594 4595 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, 4596 "Entered %s.\n", __func__); 4597 4598 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4599 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 4600 FAC_OPT_CMD_WRITE_PROTECT; 4601 mcp->out_mb = MBX_1|MBX_0; 4602 mcp->in_mb = MBX_1|MBX_0; 4603 mcp->tov = MBX_TOV_SECONDS; 4604 mcp->flags = 0; 4605 rval = qla2x00_mailbox_command(vha, mcp); 4606 4607 if (rval != QLA_SUCCESS) { 4608 ql_dbg(ql_dbg_mbx, vha, 0x10e0, 4609 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4610 rval, mcp->mb[0], mcp->mb[1]); 4611 } else { 4612 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, 4613 "Done %s.\n", __func__); 4614 } 4615 4616 return rval; 4617 } 4618 4619 int 4620 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) 4621 { 4622 int rval; 4623 mbx_cmd_t mc; 4624 mbx_cmd_t *mcp = &mc; 4625 4626 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4627 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4628 return QLA_FUNCTION_FAILED; 4629 4630 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4631 "Entered %s.\n", __func__); 4632 4633 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4634 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 4635 mcp->mb[2] = LSW(start); 4636 mcp->mb[3] = MSW(start); 4637 mcp->mb[4] = LSW(finish); 4638 mcp->mb[5] = MSW(finish); 4639 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4640 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4641 mcp->tov = MBX_TOV_SECONDS; 4642 mcp->flags = 0; 4643 rval = qla2x00_mailbox_command(vha, mcp); 4644 4645 if (rval != QLA_SUCCESS) { 4646 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4647 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4648 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4649 } else { 4650 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4651 "Done %s.\n", __func__); 4652 } 4653 4654 return rval; 4655 } 4656 4657 int 4658 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock) 4659 { 4660 int rval = QLA_SUCCESS; 4661 mbx_cmd_t mc; 4662 mbx_cmd_t *mcp = &mc; 4663 struct qla_hw_data *ha = vha->hw; 4664 4665 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 4666 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4667 return rval; 4668 4669 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4670 "Entered %s.\n", __func__); 4671 4672 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4673 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE : 4674 FAC_OPT_CMD_UNLOCK_SEMAPHORE); 4675 mcp->out_mb = MBX_1|MBX_0; 4676 mcp->in_mb = MBX_1|MBX_0; 4677 mcp->tov = MBX_TOV_SECONDS; 4678 mcp->flags = 0; 4679 rval = qla2x00_mailbox_command(vha, mcp); 4680 4681 if (rval != QLA_SUCCESS) { 4682 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4683 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4684 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4685 } else { 4686 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4687 "Done %s.\n", __func__); 4688 } 4689 4690 return rval; 4691 } 4692 4693 int 4694 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) 4695 { 4696 int rval = 0; 4697 mbx_cmd_t mc; 4698 mbx_cmd_t *mcp = &mc; 4699 4700 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, 4701 "Entered %s.\n", __func__); 4702 4703 mcp->mb[0] = MBC_RESTART_MPI_FW; 4704 mcp->out_mb = MBX_0; 4705 mcp->in_mb = MBX_0|MBX_1; 4706 mcp->tov = MBX_TOV_SECONDS; 4707 mcp->flags = 0; 4708 rval = qla2x00_mailbox_command(vha, mcp); 4709 4710 if (rval != QLA_SUCCESS) { 4711 ql_dbg(ql_dbg_mbx, vha, 0x10e6, 4712 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4713 rval, mcp->mb[0], mcp->mb[1]); 4714 } else { 4715 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, 4716 "Done %s.\n", __func__); 4717 } 4718 4719 return rval; 4720 } 4721 4722 int 4723 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4724 { 4725 int rval; 4726 mbx_cmd_t mc; 4727 mbx_cmd_t *mcp = &mc; 4728 int i; 4729 int len; 4730 uint16_t *str; 4731 struct qla_hw_data *ha = vha->hw; 4732 4733 if (!IS_P3P_TYPE(ha)) 4734 return QLA_FUNCTION_FAILED; 4735 4736 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, 4737 "Entered %s.\n", __func__); 4738 4739 str = (void *)version; 4740 len = strlen(version); 4741 4742 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4743 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; 4744 mcp->out_mb = MBX_1|MBX_0; 4745 for (i = 4; i < 16 && len; i++, str++, len -= 2) { 4746 mcp->mb[i] = cpu_to_le16p(str); 4747 mcp->out_mb |= 1<<i; 4748 } 4749 for (; i < 16; i++) { 4750 mcp->mb[i] = 0; 4751 mcp->out_mb |= 1<<i; 4752 } 4753 mcp->in_mb = MBX_1|MBX_0; 4754 mcp->tov = MBX_TOV_SECONDS; 4755 mcp->flags = 0; 4756 rval = qla2x00_mailbox_command(vha, mcp); 4757 4758 if (rval != QLA_SUCCESS) { 4759 ql_dbg(ql_dbg_mbx, vha, 0x117c, 4760 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4761 } else { 4762 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, 4763 "Done %s.\n", __func__); 4764 } 4765 4766 return rval; 4767 } 4768 4769 int 4770 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4771 { 4772 int rval; 4773 mbx_cmd_t mc; 4774 mbx_cmd_t *mcp = &mc; 4775 int len; 4776 uint16_t dwlen; 4777 uint8_t *str; 4778 dma_addr_t str_dma; 4779 struct qla_hw_data *ha = vha->hw; 4780 4781 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || 4782 IS_P3P_TYPE(ha)) 4783 return QLA_FUNCTION_FAILED; 4784 4785 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, 4786 "Entered %s.\n", __func__); 4787 4788 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); 4789 if (!str) { 4790 ql_log(ql_log_warn, vha, 0x117f, 4791 "Failed to allocate driver version param.\n"); 4792 return QLA_MEMORY_ALLOC_FAILED; 4793 } 4794 4795 memcpy(str, "\x7\x3\x11\x0", 4); 4796 dwlen = str[0]; 4797 len = dwlen * 4 - 4; 4798 memset(str + 4, 0, len); 4799 if (len > strlen(version)) 4800 len = strlen(version); 4801 memcpy(str + 4, version, len); 4802 4803 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4804 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; 4805 mcp->mb[2] = MSW(LSD(str_dma)); 4806 mcp->mb[3] = LSW(LSD(str_dma)); 4807 mcp->mb[6] = MSW(MSD(str_dma)); 4808 mcp->mb[7] = LSW(MSD(str_dma)); 4809 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4810 mcp->in_mb = MBX_1|MBX_0; 4811 mcp->tov = MBX_TOV_SECONDS; 4812 mcp->flags = 0; 4813 rval = qla2x00_mailbox_command(vha, mcp); 4814 4815 if (rval != QLA_SUCCESS) { 4816 ql_dbg(ql_dbg_mbx, vha, 0x1180, 4817 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4818 } else { 4819 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, 4820 "Done %s.\n", __func__); 4821 } 4822 4823 dma_pool_free(ha->s_dma_pool, str, str_dma); 4824 4825 return rval; 4826 } 4827 4828 int 4829 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, 4830 void *buf, uint16_t bufsiz) 4831 { 4832 int rval, i; 4833 mbx_cmd_t mc; 4834 mbx_cmd_t *mcp = &mc; 4835 uint32_t *bp; 4836 4837 if (!IS_FWI2_CAPABLE(vha->hw)) 4838 return QLA_FUNCTION_FAILED; 4839 4840 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4841 "Entered %s.\n", __func__); 4842 4843 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4844 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8; 4845 mcp->mb[2] = MSW(buf_dma); 4846 mcp->mb[3] = LSW(buf_dma); 4847 mcp->mb[6] = MSW(MSD(buf_dma)); 4848 mcp->mb[7] = LSW(MSD(buf_dma)); 4849 mcp->mb[8] = bufsiz/4; 4850 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4851 mcp->in_mb = MBX_1|MBX_0; 4852 mcp->tov = MBX_TOV_SECONDS; 4853 mcp->flags = 0; 4854 rval = qla2x00_mailbox_command(vha, mcp); 4855 4856 if (rval != QLA_SUCCESS) { 4857 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4858 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4859 } else { 4860 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4861 "Done %s.\n", __func__); 4862 bp = (uint32_t *) buf; 4863 for (i = 0; i < (bufsiz-4)/4; i++, bp++) 4864 *bp = le32_to_cpu(*bp); 4865 } 4866 4867 return rval; 4868 } 4869 4870 int 4871 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) 4872 { 4873 int rval; 4874 mbx_cmd_t mc; 4875 mbx_cmd_t *mcp = &mc; 4876 uint8_t *els_cmd_map; 4877 dma_addr_t els_cmd_map_dma; 4878 uint cmd_opcode = ELS_COMMAND_RDP; 4879 uint index = cmd_opcode / 8; 4880 uint bit = cmd_opcode % 8; 4881 struct qla_hw_data *ha = vha->hw; 4882 4883 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha)) 4884 return QLA_SUCCESS; 4885 4886 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197, 4887 "Entered %s.\n", __func__); 4888 4889 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, 4890 &els_cmd_map_dma, GFP_KERNEL); 4891 if (!els_cmd_map) { 4892 ql_log(ql_log_warn, vha, 0x7101, 4893 "Failed to allocate RDP els command param.\n"); 4894 return QLA_MEMORY_ALLOC_FAILED; 4895 } 4896 4897 memset(els_cmd_map, 0, ELS_CMD_MAP_SIZE); 4898 4899 els_cmd_map[index] |= 1 << bit; 4900 4901 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4902 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8; 4903 mcp->mb[2] = MSW(LSD(els_cmd_map_dma)); 4904 mcp->mb[3] = LSW(LSD(els_cmd_map_dma)); 4905 mcp->mb[6] = MSW(MSD(els_cmd_map_dma)); 4906 mcp->mb[7] = LSW(MSD(els_cmd_map_dma)); 4907 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4908 mcp->in_mb = MBX_1|MBX_0; 4909 mcp->tov = MBX_TOV_SECONDS; 4910 mcp->flags = MBX_DMA_OUT; 4911 mcp->buf_size = ELS_CMD_MAP_SIZE; 4912 rval = qla2x00_mailbox_command(vha, mcp); 4913 4914 if (rval != QLA_SUCCESS) { 4915 ql_dbg(ql_dbg_mbx, vha, 0x118d, 4916 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]); 4917 } else { 4918 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 4919 "Done %s.\n", __func__); 4920 } 4921 4922 dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE, 4923 els_cmd_map, els_cmd_map_dma); 4924 4925 return rval; 4926 } 4927 4928 int 4929 qla24xx_get_buffer_credits(scsi_qla_host_t *vha, struct buffer_credit_24xx *bbc, 4930 dma_addr_t bbc_dma) 4931 { 4932 mbx_cmd_t mc; 4933 mbx_cmd_t *mcp = &mc; 4934 int rval; 4935 4936 if (!IS_FWI2_CAPABLE(vha->hw)) 4937 return QLA_FUNCTION_FAILED; 4938 4939 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118e, 4940 "Entered %s.\n", __func__); 4941 4942 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4943 mcp->mb[1] = RNID_BUFFER_CREDITS << 8; 4944 mcp->mb[2] = MSW(LSD(bbc_dma)); 4945 mcp->mb[3] = LSW(LSD(bbc_dma)); 4946 mcp->mb[6] = MSW(MSD(bbc_dma)); 4947 mcp->mb[7] = LSW(MSD(bbc_dma)); 4948 mcp->mb[8] = sizeof(*bbc) / sizeof(*bbc->parameter); 4949 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4950 mcp->in_mb = MBX_1|MBX_0; 4951 mcp->buf_size = sizeof(*bbc); 4952 mcp->flags = MBX_DMA_IN; 4953 mcp->tov = MBX_TOV_SECONDS; 4954 rval = qla2x00_mailbox_command(vha, mcp); 4955 4956 if (rval != QLA_SUCCESS) { 4957 ql_dbg(ql_dbg_mbx, vha, 0x118f, 4958 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4959 } else { 4960 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1190, 4961 "Done %s.\n", __func__); 4962 } 4963 4964 return rval; 4965 } 4966 4967 static int 4968 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 4969 { 4970 int rval; 4971 mbx_cmd_t mc; 4972 mbx_cmd_t *mcp = &mc; 4973 4974 if (!IS_FWI2_CAPABLE(vha->hw)) 4975 return QLA_FUNCTION_FAILED; 4976 4977 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4978 "Entered %s.\n", __func__); 4979 4980 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4981 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; 4982 mcp->out_mb = MBX_1|MBX_0; 4983 mcp->in_mb = MBX_1|MBX_0; 4984 mcp->tov = MBX_TOV_SECONDS; 4985 mcp->flags = 0; 4986 rval = qla2x00_mailbox_command(vha, mcp); 4987 *temp = mcp->mb[1]; 4988 4989 if (rval != QLA_SUCCESS) { 4990 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4991 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4992 } else { 4993 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4994 "Done %s.\n", __func__); 4995 } 4996 4997 return rval; 4998 } 4999 5000 int 5001 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 5002 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 5003 { 5004 int rval; 5005 mbx_cmd_t mc; 5006 mbx_cmd_t *mcp = &mc; 5007 struct qla_hw_data *ha = vha->hw; 5008 5009 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 5010 "Entered %s.\n", __func__); 5011 5012 if (!IS_FWI2_CAPABLE(ha)) 5013 return QLA_FUNCTION_FAILED; 5014 5015 if (len == 1) 5016 opt |= BIT_0; 5017 5018 mcp->mb[0] = MBC_READ_SFP; 5019 mcp->mb[1] = dev; 5020 mcp->mb[2] = MSW(LSD(sfp_dma)); 5021 mcp->mb[3] = LSW(LSD(sfp_dma)); 5022 mcp->mb[6] = MSW(MSD(sfp_dma)); 5023 mcp->mb[7] = LSW(MSD(sfp_dma)); 5024 mcp->mb[8] = len; 5025 mcp->mb[9] = off; 5026 mcp->mb[10] = opt; 5027 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5028 mcp->in_mb = MBX_1|MBX_0; 5029 mcp->tov = MBX_TOV_SECONDS; 5030 mcp->flags = 0; 5031 rval = qla2x00_mailbox_command(vha, mcp); 5032 5033 if (opt & BIT_0) 5034 *sfp = mcp->mb[1]; 5035 5036 if (rval != QLA_SUCCESS) { 5037 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 5038 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5039 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) { 5040 /* sfp is not there */ 5041 rval = QLA_INTERFACE_ERROR; 5042 } 5043 } else { 5044 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 5045 "Done %s.\n", __func__); 5046 } 5047 5048 return rval; 5049 } 5050 5051 int 5052 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 5053 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 5054 { 5055 int rval; 5056 mbx_cmd_t mc; 5057 mbx_cmd_t *mcp = &mc; 5058 struct qla_hw_data *ha = vha->hw; 5059 5060 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, 5061 "Entered %s.\n", __func__); 5062 5063 if (!IS_FWI2_CAPABLE(ha)) 5064 return QLA_FUNCTION_FAILED; 5065 5066 if (len == 1) 5067 opt |= BIT_0; 5068 5069 if (opt & BIT_0) 5070 len = *sfp; 5071 5072 mcp->mb[0] = MBC_WRITE_SFP; 5073 mcp->mb[1] = dev; 5074 mcp->mb[2] = MSW(LSD(sfp_dma)); 5075 mcp->mb[3] = LSW(LSD(sfp_dma)); 5076 mcp->mb[6] = MSW(MSD(sfp_dma)); 5077 mcp->mb[7] = LSW(MSD(sfp_dma)); 5078 mcp->mb[8] = len; 5079 mcp->mb[9] = off; 5080 mcp->mb[10] = opt; 5081 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5082 mcp->in_mb = MBX_1|MBX_0; 5083 mcp->tov = MBX_TOV_SECONDS; 5084 mcp->flags = 0; 5085 rval = qla2x00_mailbox_command(vha, mcp); 5086 5087 if (rval != QLA_SUCCESS) { 5088 ql_dbg(ql_dbg_mbx, vha, 0x10ec, 5089 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5090 } else { 5091 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, 5092 "Done %s.\n", __func__); 5093 } 5094 5095 return rval; 5096 } 5097 5098 int 5099 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, 5100 uint16_t size_in_bytes, uint16_t *actual_size) 5101 { 5102 int rval; 5103 mbx_cmd_t mc; 5104 mbx_cmd_t *mcp = &mc; 5105 5106 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, 5107 "Entered %s.\n", __func__); 5108 5109 if (!IS_CNA_CAPABLE(vha->hw)) 5110 return QLA_FUNCTION_FAILED; 5111 5112 mcp->mb[0] = MBC_GET_XGMAC_STATS; 5113 mcp->mb[2] = MSW(stats_dma); 5114 mcp->mb[3] = LSW(stats_dma); 5115 mcp->mb[6] = MSW(MSD(stats_dma)); 5116 mcp->mb[7] = LSW(MSD(stats_dma)); 5117 mcp->mb[8] = size_in_bytes >> 2; 5118 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 5119 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5120 mcp->tov = MBX_TOV_SECONDS; 5121 mcp->flags = 0; 5122 rval = qla2x00_mailbox_command(vha, mcp); 5123 5124 if (rval != QLA_SUCCESS) { 5125 ql_dbg(ql_dbg_mbx, vha, 0x10ef, 5126 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5127 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5128 } else { 5129 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, 5130 "Done %s.\n", __func__); 5131 5132 5133 *actual_size = mcp->mb[2] << 2; 5134 } 5135 5136 return rval; 5137 } 5138 5139 int 5140 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, 5141 uint16_t size) 5142 { 5143 int rval; 5144 mbx_cmd_t mc; 5145 mbx_cmd_t *mcp = &mc; 5146 5147 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, 5148 "Entered %s.\n", __func__); 5149 5150 if (!IS_CNA_CAPABLE(vha->hw)) 5151 return QLA_FUNCTION_FAILED; 5152 5153 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 5154 mcp->mb[1] = 0; 5155 mcp->mb[2] = MSW(tlv_dma); 5156 mcp->mb[3] = LSW(tlv_dma); 5157 mcp->mb[6] = MSW(MSD(tlv_dma)); 5158 mcp->mb[7] = LSW(MSD(tlv_dma)); 5159 mcp->mb[8] = size; 5160 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5161 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5162 mcp->tov = MBX_TOV_SECONDS; 5163 mcp->flags = 0; 5164 rval = qla2x00_mailbox_command(vha, mcp); 5165 5166 if (rval != QLA_SUCCESS) { 5167 ql_dbg(ql_dbg_mbx, vha, 0x10f2, 5168 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5169 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5170 } else { 5171 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, 5172 "Done %s.\n", __func__); 5173 } 5174 5175 return rval; 5176 } 5177 5178 int 5179 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) 5180 { 5181 int rval; 5182 mbx_cmd_t mc; 5183 mbx_cmd_t *mcp = &mc; 5184 5185 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, 5186 "Entered %s.\n", __func__); 5187 5188 if (!IS_FWI2_CAPABLE(vha->hw)) 5189 return QLA_FUNCTION_FAILED; 5190 5191 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 5192 mcp->mb[1] = LSW(risc_addr); 5193 mcp->mb[8] = MSW(risc_addr); 5194 mcp->out_mb = MBX_8|MBX_1|MBX_0; 5195 mcp->in_mb = MBX_3|MBX_2|MBX_0; 5196 mcp->tov = 30; 5197 mcp->flags = 0; 5198 rval = qla2x00_mailbox_command(vha, mcp); 5199 if (rval != QLA_SUCCESS) { 5200 ql_dbg(ql_dbg_mbx, vha, 0x10f5, 5201 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5202 } else { 5203 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, 5204 "Done %s.\n", __func__); 5205 *data = mcp->mb[3] << 16 | mcp->mb[2]; 5206 } 5207 5208 return rval; 5209 } 5210 5211 int 5212 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5213 uint16_t *mresp) 5214 { 5215 int rval; 5216 mbx_cmd_t mc; 5217 mbx_cmd_t *mcp = &mc; 5218 5219 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, 5220 "Entered %s.\n", __func__); 5221 5222 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5223 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 5224 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing 5225 5226 /* transfer count */ 5227 mcp->mb[10] = LSW(mreq->transfer_size); 5228 mcp->mb[11] = MSW(mreq->transfer_size); 5229 5230 /* send data address */ 5231 mcp->mb[14] = LSW(mreq->send_dma); 5232 mcp->mb[15] = MSW(mreq->send_dma); 5233 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5234 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5235 5236 /* receive data address */ 5237 mcp->mb[16] = LSW(mreq->rcv_dma); 5238 mcp->mb[17] = MSW(mreq->rcv_dma); 5239 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5240 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5241 5242 /* Iteration count */ 5243 mcp->mb[18] = LSW(mreq->iteration_count); 5244 mcp->mb[19] = MSW(mreq->iteration_count); 5245 5246 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| 5247 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5248 if (IS_CNA_CAPABLE(vha->hw)) 5249 mcp->out_mb |= MBX_2; 5250 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; 5251 5252 mcp->buf_size = mreq->transfer_size; 5253 mcp->tov = MBX_TOV_SECONDS; 5254 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5255 5256 rval = qla2x00_mailbox_command(vha, mcp); 5257 5258 if (rval != QLA_SUCCESS) { 5259 ql_dbg(ql_dbg_mbx, vha, 0x10f8, 5260 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " 5261 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], 5262 mcp->mb[3], mcp->mb[18], mcp->mb[19]); 5263 } else { 5264 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, 5265 "Done %s.\n", __func__); 5266 } 5267 5268 /* Copy mailbox information */ 5269 memcpy( mresp, mcp->mb, 64); 5270 return rval; 5271 } 5272 5273 int 5274 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5275 uint16_t *mresp) 5276 { 5277 int rval; 5278 mbx_cmd_t mc; 5279 mbx_cmd_t *mcp = &mc; 5280 struct qla_hw_data *ha = vha->hw; 5281 5282 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, 5283 "Entered %s.\n", __func__); 5284 5285 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5286 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 5287 /* BIT_6 specifies 64bit address */ 5288 mcp->mb[1] = mreq->options | BIT_15 | BIT_6; 5289 if (IS_CNA_CAPABLE(ha)) { 5290 mcp->mb[2] = vha->fcoe_fcf_idx; 5291 } 5292 mcp->mb[16] = LSW(mreq->rcv_dma); 5293 mcp->mb[17] = MSW(mreq->rcv_dma); 5294 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5295 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5296 5297 mcp->mb[10] = LSW(mreq->transfer_size); 5298 5299 mcp->mb[14] = LSW(mreq->send_dma); 5300 mcp->mb[15] = MSW(mreq->send_dma); 5301 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5302 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5303 5304 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| 5305 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5306 if (IS_CNA_CAPABLE(ha)) 5307 mcp->out_mb |= MBX_2; 5308 5309 mcp->in_mb = MBX_0; 5310 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 5311 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5312 mcp->in_mb |= MBX_1; 5313 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 5314 IS_QLA28XX(ha)) 5315 mcp->in_mb |= MBX_3; 5316 5317 mcp->tov = MBX_TOV_SECONDS; 5318 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5319 mcp->buf_size = mreq->transfer_size; 5320 5321 rval = qla2x00_mailbox_command(vha, mcp); 5322 5323 if (rval != QLA_SUCCESS) { 5324 ql_dbg(ql_dbg_mbx, vha, 0x10fb, 5325 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5326 rval, mcp->mb[0], mcp->mb[1]); 5327 } else { 5328 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, 5329 "Done %s.\n", __func__); 5330 } 5331 5332 /* Copy mailbox information */ 5333 memcpy(mresp, mcp->mb, 64); 5334 return rval; 5335 } 5336 5337 int 5338 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) 5339 { 5340 int rval; 5341 mbx_cmd_t mc; 5342 mbx_cmd_t *mcp = &mc; 5343 5344 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, 5345 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); 5346 5347 mcp->mb[0] = MBC_ISP84XX_RESET; 5348 mcp->mb[1] = enable_diagnostic; 5349 mcp->out_mb = MBX_1|MBX_0; 5350 mcp->in_mb = MBX_1|MBX_0; 5351 mcp->tov = MBX_TOV_SECONDS; 5352 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5353 rval = qla2x00_mailbox_command(vha, mcp); 5354 5355 if (rval != QLA_SUCCESS) 5356 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); 5357 else 5358 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, 5359 "Done %s.\n", __func__); 5360 5361 return rval; 5362 } 5363 5364 int 5365 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) 5366 { 5367 int rval; 5368 mbx_cmd_t mc; 5369 mbx_cmd_t *mcp = &mc; 5370 5371 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, 5372 "Entered %s.\n", __func__); 5373 5374 if (!IS_FWI2_CAPABLE(vha->hw)) 5375 return QLA_FUNCTION_FAILED; 5376 5377 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 5378 mcp->mb[1] = LSW(risc_addr); 5379 mcp->mb[2] = LSW(data); 5380 mcp->mb[3] = MSW(data); 5381 mcp->mb[8] = MSW(risc_addr); 5382 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; 5383 mcp->in_mb = MBX_1|MBX_0; 5384 mcp->tov = 30; 5385 mcp->flags = 0; 5386 rval = qla2x00_mailbox_command(vha, mcp); 5387 if (rval != QLA_SUCCESS) { 5388 ql_dbg(ql_dbg_mbx, vha, 0x1101, 5389 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5390 rval, mcp->mb[0], mcp->mb[1]); 5391 } else { 5392 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, 5393 "Done %s.\n", __func__); 5394 } 5395 5396 return rval; 5397 } 5398 5399 int 5400 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) 5401 { 5402 int rval; 5403 uint32_t stat, timer; 5404 uint16_t mb0 = 0; 5405 struct qla_hw_data *ha = vha->hw; 5406 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 5407 5408 rval = QLA_SUCCESS; 5409 5410 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, 5411 "Entered %s.\n", __func__); 5412 5413 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 5414 5415 /* Write the MBC data to the registers */ 5416 WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER); 5417 WRT_REG_WORD(®->mailbox1, mb[0]); 5418 WRT_REG_WORD(®->mailbox2, mb[1]); 5419 WRT_REG_WORD(®->mailbox3, mb[2]); 5420 WRT_REG_WORD(®->mailbox4, mb[3]); 5421 5422 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); 5423 5424 /* Poll for MBC interrupt */ 5425 for (timer = 6000000; timer; timer--) { 5426 /* Check for pending interrupts. */ 5427 stat = RD_REG_DWORD(®->host_status); 5428 if (stat & HSRX_RISC_INT) { 5429 stat &= 0xff; 5430 5431 if (stat == 0x1 || stat == 0x2 || 5432 stat == 0x10 || stat == 0x11) { 5433 set_bit(MBX_INTERRUPT, 5434 &ha->mbx_cmd_flags); 5435 mb0 = RD_REG_WORD(®->mailbox0); 5436 WRT_REG_DWORD(®->hccr, 5437 HCCRX_CLR_RISC_INT); 5438 RD_REG_DWORD(®->hccr); 5439 break; 5440 } 5441 } 5442 udelay(5); 5443 } 5444 5445 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) 5446 rval = mb0 & MBS_MASK; 5447 else 5448 rval = QLA_FUNCTION_FAILED; 5449 5450 if (rval != QLA_SUCCESS) { 5451 ql_dbg(ql_dbg_mbx, vha, 0x1104, 5452 "Failed=%x mb[0]=%x.\n", rval, mb[0]); 5453 } else { 5454 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, 5455 "Done %s.\n", __func__); 5456 } 5457 5458 return rval; 5459 } 5460 5461 /* Set the specified data rate */ 5462 int 5463 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode) 5464 { 5465 int rval; 5466 mbx_cmd_t mc; 5467 mbx_cmd_t *mcp = &mc; 5468 struct qla_hw_data *ha = vha->hw; 5469 uint16_t val; 5470 5471 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5472 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate, 5473 mode); 5474 5475 if (!IS_FWI2_CAPABLE(ha)) 5476 return QLA_FUNCTION_FAILED; 5477 5478 memset(mcp, 0, sizeof(*mcp)); 5479 switch (ha->set_data_rate) { 5480 case PORT_SPEED_AUTO: 5481 case PORT_SPEED_4GB: 5482 case PORT_SPEED_8GB: 5483 case PORT_SPEED_16GB: 5484 case PORT_SPEED_32GB: 5485 val = ha->set_data_rate; 5486 break; 5487 default: 5488 ql_log(ql_log_warn, vha, 0x1199, 5489 "Unrecognized speed setting:%d. Setting Autoneg\n", 5490 ha->set_data_rate); 5491 val = ha->set_data_rate = PORT_SPEED_AUTO; 5492 break; 5493 } 5494 5495 mcp->mb[0] = MBC_DATA_RATE; 5496 mcp->mb[1] = mode; 5497 mcp->mb[2] = val; 5498 5499 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5500 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5501 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5502 mcp->in_mb |= MBX_4|MBX_3; 5503 mcp->tov = MBX_TOV_SECONDS; 5504 mcp->flags = 0; 5505 rval = qla2x00_mailbox_command(vha, mcp); 5506 if (rval != QLA_SUCCESS) { 5507 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5508 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5509 } else { 5510 if (mcp->mb[1] != 0x7) 5511 ql_dbg(ql_dbg_mbx, vha, 0x1179, 5512 "Speed set:0x%x\n", mcp->mb[1]); 5513 5514 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5515 "Done %s.\n", __func__); 5516 } 5517 5518 return rval; 5519 } 5520 5521 int 5522 qla2x00_get_data_rate(scsi_qla_host_t *vha) 5523 { 5524 int rval; 5525 mbx_cmd_t mc; 5526 mbx_cmd_t *mcp = &mc; 5527 struct qla_hw_data *ha = vha->hw; 5528 5529 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5530 "Entered %s.\n", __func__); 5531 5532 if (!IS_FWI2_CAPABLE(ha)) 5533 return QLA_FUNCTION_FAILED; 5534 5535 mcp->mb[0] = MBC_DATA_RATE; 5536 mcp->mb[1] = QLA_GET_DATA_RATE; 5537 mcp->out_mb = MBX_1|MBX_0; 5538 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5539 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5540 mcp->in_mb |= MBX_3; 5541 mcp->tov = MBX_TOV_SECONDS; 5542 mcp->flags = 0; 5543 rval = qla2x00_mailbox_command(vha, mcp); 5544 if (rval != QLA_SUCCESS) { 5545 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5546 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5547 } else { 5548 if (mcp->mb[1] != 0x7) 5549 ha->link_data_rate = mcp->mb[1]; 5550 5551 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 5552 if (mcp->mb[4] & BIT_0) 5553 ql_log(ql_log_info, vha, 0x11a2, 5554 "FEC=enabled (data rate).\n"); 5555 } 5556 5557 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5558 "Done %s.\n", __func__); 5559 if (mcp->mb[1] != 0x7) 5560 ha->link_data_rate = mcp->mb[1]; 5561 } 5562 5563 return rval; 5564 } 5565 5566 int 5567 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5568 { 5569 int rval; 5570 mbx_cmd_t mc; 5571 mbx_cmd_t *mcp = &mc; 5572 struct qla_hw_data *ha = vha->hw; 5573 5574 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, 5575 "Entered %s.\n", __func__); 5576 5577 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && 5578 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 5579 return QLA_FUNCTION_FAILED; 5580 mcp->mb[0] = MBC_GET_PORT_CONFIG; 5581 mcp->out_mb = MBX_0; 5582 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5583 mcp->tov = MBX_TOV_SECONDS; 5584 mcp->flags = 0; 5585 5586 rval = qla2x00_mailbox_command(vha, mcp); 5587 5588 if (rval != QLA_SUCCESS) { 5589 ql_dbg(ql_dbg_mbx, vha, 0x110a, 5590 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5591 } else { 5592 /* Copy all bits to preserve original value */ 5593 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 5594 5595 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, 5596 "Done %s.\n", __func__); 5597 } 5598 return rval; 5599 } 5600 5601 int 5602 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5603 { 5604 int rval; 5605 mbx_cmd_t mc; 5606 mbx_cmd_t *mcp = &mc; 5607 5608 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, 5609 "Entered %s.\n", __func__); 5610 5611 mcp->mb[0] = MBC_SET_PORT_CONFIG; 5612 /* Copy all bits to preserve original setting */ 5613 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); 5614 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5615 mcp->in_mb = MBX_0; 5616 mcp->tov = MBX_TOV_SECONDS; 5617 mcp->flags = 0; 5618 rval = qla2x00_mailbox_command(vha, mcp); 5619 5620 if (rval != QLA_SUCCESS) { 5621 ql_dbg(ql_dbg_mbx, vha, 0x110d, 5622 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5623 } else 5624 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, 5625 "Done %s.\n", __func__); 5626 5627 return rval; 5628 } 5629 5630 5631 int 5632 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, 5633 uint16_t *mb) 5634 { 5635 int rval; 5636 mbx_cmd_t mc; 5637 mbx_cmd_t *mcp = &mc; 5638 struct qla_hw_data *ha = vha->hw; 5639 5640 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, 5641 "Entered %s.\n", __func__); 5642 5643 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 5644 return QLA_FUNCTION_FAILED; 5645 5646 mcp->mb[0] = MBC_PORT_PARAMS; 5647 mcp->mb[1] = loop_id; 5648 if (ha->flags.fcp_prio_enabled) 5649 mcp->mb[2] = BIT_1; 5650 else 5651 mcp->mb[2] = BIT_2; 5652 mcp->mb[4] = priority & 0xf; 5653 mcp->mb[9] = vha->vp_idx; 5654 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5655 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 5656 mcp->tov = 30; 5657 mcp->flags = 0; 5658 rval = qla2x00_mailbox_command(vha, mcp); 5659 if (mb != NULL) { 5660 mb[0] = mcp->mb[0]; 5661 mb[1] = mcp->mb[1]; 5662 mb[3] = mcp->mb[3]; 5663 mb[4] = mcp->mb[4]; 5664 } 5665 5666 if (rval != QLA_SUCCESS) { 5667 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); 5668 } else { 5669 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, 5670 "Done %s.\n", __func__); 5671 } 5672 5673 return rval; 5674 } 5675 5676 int 5677 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) 5678 { 5679 int rval = QLA_FUNCTION_FAILED; 5680 struct qla_hw_data *ha = vha->hw; 5681 uint8_t byte; 5682 5683 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { 5684 ql_dbg(ql_dbg_mbx, vha, 0x1150, 5685 "Thermal not supported by this card.\n"); 5686 return rval; 5687 } 5688 5689 if (IS_QLA25XX(ha)) { 5690 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 5691 ha->pdev->subsystem_device == 0x0175) { 5692 rval = qla2x00_read_sfp(vha, 0, &byte, 5693 0x98, 0x1, 1, BIT_13|BIT_0); 5694 *temp = byte; 5695 return rval; 5696 } 5697 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 5698 ha->pdev->subsystem_device == 0x338e) { 5699 rval = qla2x00_read_sfp(vha, 0, &byte, 5700 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); 5701 *temp = byte; 5702 return rval; 5703 } 5704 ql_dbg(ql_dbg_mbx, vha, 0x10c9, 5705 "Thermal not supported by this card.\n"); 5706 return rval; 5707 } 5708 5709 if (IS_QLA82XX(ha)) { 5710 *temp = qla82xx_read_temperature(vha); 5711 rval = QLA_SUCCESS; 5712 return rval; 5713 } else if (IS_QLA8044(ha)) { 5714 *temp = qla8044_read_temperature(vha); 5715 rval = QLA_SUCCESS; 5716 return rval; 5717 } 5718 5719 rval = qla2x00_read_asic_temperature(vha, temp); 5720 return rval; 5721 } 5722 5723 int 5724 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) 5725 { 5726 int rval; 5727 struct qla_hw_data *ha = vha->hw; 5728 mbx_cmd_t mc; 5729 mbx_cmd_t *mcp = &mc; 5730 5731 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, 5732 "Entered %s.\n", __func__); 5733 5734 if (!IS_FWI2_CAPABLE(ha)) 5735 return QLA_FUNCTION_FAILED; 5736 5737 memset(mcp, 0, sizeof(mbx_cmd_t)); 5738 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5739 mcp->mb[1] = 1; 5740 5741 mcp->out_mb = MBX_1|MBX_0; 5742 mcp->in_mb = MBX_0; 5743 mcp->tov = 30; 5744 mcp->flags = 0; 5745 5746 rval = qla2x00_mailbox_command(vha, mcp); 5747 if (rval != QLA_SUCCESS) { 5748 ql_dbg(ql_dbg_mbx, vha, 0x1016, 5749 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5750 } else { 5751 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, 5752 "Done %s.\n", __func__); 5753 } 5754 5755 return rval; 5756 } 5757 5758 int 5759 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) 5760 { 5761 int rval; 5762 struct qla_hw_data *ha = vha->hw; 5763 mbx_cmd_t mc; 5764 mbx_cmd_t *mcp = &mc; 5765 5766 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, 5767 "Entered %s.\n", __func__); 5768 5769 if (!IS_P3P_TYPE(ha)) 5770 return QLA_FUNCTION_FAILED; 5771 5772 memset(mcp, 0, sizeof(mbx_cmd_t)); 5773 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5774 mcp->mb[1] = 0; 5775 5776 mcp->out_mb = MBX_1|MBX_0; 5777 mcp->in_mb = MBX_0; 5778 mcp->tov = 30; 5779 mcp->flags = 0; 5780 5781 rval = qla2x00_mailbox_command(vha, mcp); 5782 if (rval != QLA_SUCCESS) { 5783 ql_dbg(ql_dbg_mbx, vha, 0x100c, 5784 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5785 } else { 5786 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, 5787 "Done %s.\n", __func__); 5788 } 5789 5790 return rval; 5791 } 5792 5793 int 5794 qla82xx_md_get_template_size(scsi_qla_host_t *vha) 5795 { 5796 struct qla_hw_data *ha = vha->hw; 5797 mbx_cmd_t mc; 5798 mbx_cmd_t *mcp = &mc; 5799 int rval = QLA_FUNCTION_FAILED; 5800 5801 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, 5802 "Entered %s.\n", __func__); 5803 5804 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5805 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5806 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5807 mcp->mb[2] = LSW(RQST_TMPLT_SIZE); 5808 mcp->mb[3] = MSW(RQST_TMPLT_SIZE); 5809 5810 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5811 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 5812 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5813 5814 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5815 mcp->tov = MBX_TOV_SECONDS; 5816 rval = qla2x00_mailbox_command(vha, mcp); 5817 5818 /* Always copy back return mailbox values. */ 5819 if (rval != QLA_SUCCESS) { 5820 ql_dbg(ql_dbg_mbx, vha, 0x1120, 5821 "mailbox command FAILED=0x%x, subcode=%x.\n", 5822 (mcp->mb[1] << 16) | mcp->mb[0], 5823 (mcp->mb[3] << 16) | mcp->mb[2]); 5824 } else { 5825 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, 5826 "Done %s.\n", __func__); 5827 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); 5828 if (!ha->md_template_size) { 5829 ql_dbg(ql_dbg_mbx, vha, 0x1122, 5830 "Null template size obtained.\n"); 5831 rval = QLA_FUNCTION_FAILED; 5832 } 5833 } 5834 return rval; 5835 } 5836 5837 int 5838 qla82xx_md_get_template(scsi_qla_host_t *vha) 5839 { 5840 struct qla_hw_data *ha = vha->hw; 5841 mbx_cmd_t mc; 5842 mbx_cmd_t *mcp = &mc; 5843 int rval = QLA_FUNCTION_FAILED; 5844 5845 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, 5846 "Entered %s.\n", __func__); 5847 5848 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5849 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5850 if (!ha->md_tmplt_hdr) { 5851 ql_log(ql_log_warn, vha, 0x1124, 5852 "Unable to allocate memory for Minidump template.\n"); 5853 return rval; 5854 } 5855 5856 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5857 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5858 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5859 mcp->mb[2] = LSW(RQST_TMPLT); 5860 mcp->mb[3] = MSW(RQST_TMPLT); 5861 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); 5862 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); 5863 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); 5864 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); 5865 mcp->mb[8] = LSW(ha->md_template_size); 5866 mcp->mb[9] = MSW(ha->md_template_size); 5867 5868 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5869 mcp->tov = MBX_TOV_SECONDS; 5870 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5871 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5872 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5873 rval = qla2x00_mailbox_command(vha, mcp); 5874 5875 if (rval != QLA_SUCCESS) { 5876 ql_dbg(ql_dbg_mbx, vha, 0x1125, 5877 "mailbox command FAILED=0x%x, subcode=%x.\n", 5878 ((mcp->mb[1] << 16) | mcp->mb[0]), 5879 ((mcp->mb[3] << 16) | mcp->mb[2])); 5880 } else 5881 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, 5882 "Done %s.\n", __func__); 5883 return rval; 5884 } 5885 5886 int 5887 qla8044_md_get_template(scsi_qla_host_t *vha) 5888 { 5889 struct qla_hw_data *ha = vha->hw; 5890 mbx_cmd_t mc; 5891 mbx_cmd_t *mcp = &mc; 5892 int rval = QLA_FUNCTION_FAILED; 5893 int offset = 0, size = MINIDUMP_SIZE_36K; 5894 5895 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, 5896 "Entered %s.\n", __func__); 5897 5898 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5899 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5900 if (!ha->md_tmplt_hdr) { 5901 ql_log(ql_log_warn, vha, 0xb11b, 5902 "Unable to allocate memory for Minidump template.\n"); 5903 return rval; 5904 } 5905 5906 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5907 while (offset < ha->md_template_size) { 5908 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5909 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5910 mcp->mb[2] = LSW(RQST_TMPLT); 5911 mcp->mb[3] = MSW(RQST_TMPLT); 5912 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5913 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5914 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5915 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5916 mcp->mb[8] = LSW(size); 5917 mcp->mb[9] = MSW(size); 5918 mcp->mb[10] = offset & 0x0000FFFF; 5919 mcp->mb[11] = offset & 0xFFFF0000; 5920 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5921 mcp->tov = MBX_TOV_SECONDS; 5922 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5923 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5924 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5925 rval = qla2x00_mailbox_command(vha, mcp); 5926 5927 if (rval != QLA_SUCCESS) { 5928 ql_dbg(ql_dbg_mbx, vha, 0xb11c, 5929 "mailbox command FAILED=0x%x, subcode=%x.\n", 5930 ((mcp->mb[1] << 16) | mcp->mb[0]), 5931 ((mcp->mb[3] << 16) | mcp->mb[2])); 5932 return rval; 5933 } else 5934 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, 5935 "Done %s.\n", __func__); 5936 offset = offset + size; 5937 } 5938 return rval; 5939 } 5940 5941 int 5942 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5943 { 5944 int rval; 5945 struct qla_hw_data *ha = vha->hw; 5946 mbx_cmd_t mc; 5947 mbx_cmd_t *mcp = &mc; 5948 5949 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5950 return QLA_FUNCTION_FAILED; 5951 5952 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, 5953 "Entered %s.\n", __func__); 5954 5955 memset(mcp, 0, sizeof(mbx_cmd_t)); 5956 mcp->mb[0] = MBC_SET_LED_CONFIG; 5957 mcp->mb[1] = led_cfg[0]; 5958 mcp->mb[2] = led_cfg[1]; 5959 if (IS_QLA8031(ha)) { 5960 mcp->mb[3] = led_cfg[2]; 5961 mcp->mb[4] = led_cfg[3]; 5962 mcp->mb[5] = led_cfg[4]; 5963 mcp->mb[6] = led_cfg[5]; 5964 } 5965 5966 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5967 if (IS_QLA8031(ha)) 5968 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 5969 mcp->in_mb = MBX_0; 5970 mcp->tov = 30; 5971 mcp->flags = 0; 5972 5973 rval = qla2x00_mailbox_command(vha, mcp); 5974 if (rval != QLA_SUCCESS) { 5975 ql_dbg(ql_dbg_mbx, vha, 0x1134, 5976 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5977 } else { 5978 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, 5979 "Done %s.\n", __func__); 5980 } 5981 5982 return rval; 5983 } 5984 5985 int 5986 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5987 { 5988 int rval; 5989 struct qla_hw_data *ha = vha->hw; 5990 mbx_cmd_t mc; 5991 mbx_cmd_t *mcp = &mc; 5992 5993 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5994 return QLA_FUNCTION_FAILED; 5995 5996 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, 5997 "Entered %s.\n", __func__); 5998 5999 memset(mcp, 0, sizeof(mbx_cmd_t)); 6000 mcp->mb[0] = MBC_GET_LED_CONFIG; 6001 6002 mcp->out_mb = MBX_0; 6003 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6004 if (IS_QLA8031(ha)) 6005 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 6006 mcp->tov = 30; 6007 mcp->flags = 0; 6008 6009 rval = qla2x00_mailbox_command(vha, mcp); 6010 if (rval != QLA_SUCCESS) { 6011 ql_dbg(ql_dbg_mbx, vha, 0x1137, 6012 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6013 } else { 6014 led_cfg[0] = mcp->mb[1]; 6015 led_cfg[1] = mcp->mb[2]; 6016 if (IS_QLA8031(ha)) { 6017 led_cfg[2] = mcp->mb[3]; 6018 led_cfg[3] = mcp->mb[4]; 6019 led_cfg[4] = mcp->mb[5]; 6020 led_cfg[5] = mcp->mb[6]; 6021 } 6022 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, 6023 "Done %s.\n", __func__); 6024 } 6025 6026 return rval; 6027 } 6028 6029 int 6030 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) 6031 { 6032 int rval; 6033 struct qla_hw_data *ha = vha->hw; 6034 mbx_cmd_t mc; 6035 mbx_cmd_t *mcp = &mc; 6036 6037 if (!IS_P3P_TYPE(ha)) 6038 return QLA_FUNCTION_FAILED; 6039 6040 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, 6041 "Entered %s.\n", __func__); 6042 6043 memset(mcp, 0, sizeof(mbx_cmd_t)); 6044 mcp->mb[0] = MBC_SET_LED_CONFIG; 6045 if (enable) 6046 mcp->mb[7] = 0xE; 6047 else 6048 mcp->mb[7] = 0xD; 6049 6050 mcp->out_mb = MBX_7|MBX_0; 6051 mcp->in_mb = MBX_0; 6052 mcp->tov = MBX_TOV_SECONDS; 6053 mcp->flags = 0; 6054 6055 rval = qla2x00_mailbox_command(vha, mcp); 6056 if (rval != QLA_SUCCESS) { 6057 ql_dbg(ql_dbg_mbx, vha, 0x1128, 6058 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6059 } else { 6060 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, 6061 "Done %s.\n", __func__); 6062 } 6063 6064 return rval; 6065 } 6066 6067 int 6068 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) 6069 { 6070 int rval; 6071 struct qla_hw_data *ha = vha->hw; 6072 mbx_cmd_t mc; 6073 mbx_cmd_t *mcp = &mc; 6074 6075 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6076 return QLA_FUNCTION_FAILED; 6077 6078 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, 6079 "Entered %s.\n", __func__); 6080 6081 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6082 mcp->mb[1] = LSW(reg); 6083 mcp->mb[2] = MSW(reg); 6084 mcp->mb[3] = LSW(data); 6085 mcp->mb[4] = MSW(data); 6086 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6087 6088 mcp->in_mb = MBX_1|MBX_0; 6089 mcp->tov = MBX_TOV_SECONDS; 6090 mcp->flags = 0; 6091 rval = qla2x00_mailbox_command(vha, mcp); 6092 6093 if (rval != QLA_SUCCESS) { 6094 ql_dbg(ql_dbg_mbx, vha, 0x1131, 6095 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6096 } else { 6097 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, 6098 "Done %s.\n", __func__); 6099 } 6100 6101 return rval; 6102 } 6103 6104 int 6105 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) 6106 { 6107 int rval; 6108 struct qla_hw_data *ha = vha->hw; 6109 mbx_cmd_t mc; 6110 mbx_cmd_t *mcp = &mc; 6111 6112 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 6113 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, 6114 "Implicit LOGO Unsupported.\n"); 6115 return QLA_FUNCTION_FAILED; 6116 } 6117 6118 6119 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, 6120 "Entering %s.\n", __func__); 6121 6122 /* Perform Implicit LOGO. */ 6123 mcp->mb[0] = MBC_PORT_LOGOUT; 6124 mcp->mb[1] = fcport->loop_id; 6125 mcp->mb[10] = BIT_15; 6126 mcp->out_mb = MBX_10|MBX_1|MBX_0; 6127 mcp->in_mb = MBX_0; 6128 mcp->tov = MBX_TOV_SECONDS; 6129 mcp->flags = 0; 6130 rval = qla2x00_mailbox_command(vha, mcp); 6131 if (rval != QLA_SUCCESS) 6132 ql_dbg(ql_dbg_mbx, vha, 0x113d, 6133 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6134 else 6135 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, 6136 "Done %s.\n", __func__); 6137 6138 return rval; 6139 } 6140 6141 int 6142 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) 6143 { 6144 int rval; 6145 mbx_cmd_t mc; 6146 mbx_cmd_t *mcp = &mc; 6147 struct qla_hw_data *ha = vha->hw; 6148 unsigned long retry_max_time = jiffies + (2 * HZ); 6149 6150 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6151 return QLA_FUNCTION_FAILED; 6152 6153 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); 6154 6155 retry_rd_reg: 6156 mcp->mb[0] = MBC_READ_REMOTE_REG; 6157 mcp->mb[1] = LSW(reg); 6158 mcp->mb[2] = MSW(reg); 6159 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6160 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 6161 mcp->tov = MBX_TOV_SECONDS; 6162 mcp->flags = 0; 6163 rval = qla2x00_mailbox_command(vha, mcp); 6164 6165 if (rval != QLA_SUCCESS) { 6166 ql_dbg(ql_dbg_mbx, vha, 0x114c, 6167 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6168 rval, mcp->mb[0], mcp->mb[1]); 6169 } else { 6170 *data = (mcp->mb[3] | (mcp->mb[4] << 16)); 6171 if (*data == QLA8XXX_BAD_VALUE) { 6172 /* 6173 * During soft-reset CAMRAM register reads might 6174 * return 0xbad0bad0. So retry for MAX of 2 sec 6175 * while reading camram registers. 6176 */ 6177 if (time_after(jiffies, retry_max_time)) { 6178 ql_dbg(ql_dbg_mbx, vha, 0x1141, 6179 "Failure to read CAMRAM register. " 6180 "data=0x%x.\n", *data); 6181 return QLA_FUNCTION_FAILED; 6182 } 6183 msleep(100); 6184 goto retry_rd_reg; 6185 } 6186 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); 6187 } 6188 6189 return rval; 6190 } 6191 6192 int 6193 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) 6194 { 6195 int rval; 6196 mbx_cmd_t mc; 6197 mbx_cmd_t *mcp = &mc; 6198 struct qla_hw_data *ha = vha->hw; 6199 6200 if (!IS_QLA83XX(ha)) 6201 return QLA_FUNCTION_FAILED; 6202 6203 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 6204 6205 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; 6206 mcp->out_mb = MBX_0; 6207 mcp->in_mb = MBX_1|MBX_0; 6208 mcp->tov = MBX_TOV_SECONDS; 6209 mcp->flags = 0; 6210 rval = qla2x00_mailbox_command(vha, mcp); 6211 6212 if (rval != QLA_SUCCESS) { 6213 ql_dbg(ql_dbg_mbx, vha, 0x1144, 6214 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6215 rval, mcp->mb[0], mcp->mb[1]); 6216 ha->isp_ops->fw_dump(vha, 0); 6217 } else { 6218 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); 6219 } 6220 6221 return rval; 6222 } 6223 6224 int 6225 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, 6226 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) 6227 { 6228 int rval; 6229 mbx_cmd_t mc; 6230 mbx_cmd_t *mcp = &mc; 6231 uint8_t subcode = (uint8_t)options; 6232 struct qla_hw_data *ha = vha->hw; 6233 6234 if (!IS_QLA8031(ha)) 6235 return QLA_FUNCTION_FAILED; 6236 6237 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); 6238 6239 mcp->mb[0] = MBC_SET_ACCESS_CONTROL; 6240 mcp->mb[1] = options; 6241 mcp->out_mb = MBX_1|MBX_0; 6242 if (subcode & BIT_2) { 6243 mcp->mb[2] = LSW(start_addr); 6244 mcp->mb[3] = MSW(start_addr); 6245 mcp->mb[4] = LSW(end_addr); 6246 mcp->mb[5] = MSW(end_addr); 6247 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; 6248 } 6249 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6250 if (!(subcode & (BIT_2 | BIT_5))) 6251 mcp->in_mb |= MBX_4|MBX_3; 6252 mcp->tov = MBX_TOV_SECONDS; 6253 mcp->flags = 0; 6254 rval = qla2x00_mailbox_command(vha, mcp); 6255 6256 if (rval != QLA_SUCCESS) { 6257 ql_dbg(ql_dbg_mbx, vha, 0x1147, 6258 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", 6259 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], 6260 mcp->mb[4]); 6261 ha->isp_ops->fw_dump(vha, 0); 6262 } else { 6263 if (subcode & BIT_5) 6264 *sector_size = mcp->mb[1]; 6265 else if (subcode & (BIT_6 | BIT_7)) { 6266 ql_dbg(ql_dbg_mbx, vha, 0x1148, 6267 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6268 } else if (subcode & (BIT_3 | BIT_4)) { 6269 ql_dbg(ql_dbg_mbx, vha, 0x1149, 6270 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6271 } 6272 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); 6273 } 6274 6275 return rval; 6276 } 6277 6278 int 6279 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 6280 uint32_t size) 6281 { 6282 int rval; 6283 mbx_cmd_t mc; 6284 mbx_cmd_t *mcp = &mc; 6285 6286 if (!IS_MCTP_CAPABLE(vha->hw)) 6287 return QLA_FUNCTION_FAILED; 6288 6289 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, 6290 "Entered %s.\n", __func__); 6291 6292 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 6293 mcp->mb[1] = LSW(addr); 6294 mcp->mb[2] = MSW(req_dma); 6295 mcp->mb[3] = LSW(req_dma); 6296 mcp->mb[4] = MSW(size); 6297 mcp->mb[5] = LSW(size); 6298 mcp->mb[6] = MSW(MSD(req_dma)); 6299 mcp->mb[7] = LSW(MSD(req_dma)); 6300 mcp->mb[8] = MSW(addr); 6301 /* Setting RAM ID to valid */ 6302 /* For MCTP RAM ID is 0x40 */ 6303 mcp->mb[10] = BIT_7 | 0x40; 6304 6305 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| 6306 MBX_0; 6307 6308 mcp->in_mb = MBX_0; 6309 mcp->tov = MBX_TOV_SECONDS; 6310 mcp->flags = 0; 6311 rval = qla2x00_mailbox_command(vha, mcp); 6312 6313 if (rval != QLA_SUCCESS) { 6314 ql_dbg(ql_dbg_mbx, vha, 0x114e, 6315 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6316 } else { 6317 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, 6318 "Done %s.\n", __func__); 6319 } 6320 6321 return rval; 6322 } 6323 6324 int 6325 qla26xx_dport_diagnostics(scsi_qla_host_t *vha, 6326 void *dd_buf, uint size, uint options) 6327 { 6328 int rval; 6329 mbx_cmd_t mc; 6330 mbx_cmd_t *mcp = &mc; 6331 dma_addr_t dd_dma; 6332 6333 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 6334 !IS_QLA28XX(vha->hw)) 6335 return QLA_FUNCTION_FAILED; 6336 6337 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, 6338 "Entered %s.\n", __func__); 6339 6340 dd_dma = dma_map_single(&vha->hw->pdev->dev, 6341 dd_buf, size, DMA_FROM_DEVICE); 6342 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { 6343 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); 6344 return QLA_MEMORY_ALLOC_FAILED; 6345 } 6346 6347 memset(dd_buf, 0, size); 6348 6349 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; 6350 mcp->mb[1] = options; 6351 mcp->mb[2] = MSW(LSD(dd_dma)); 6352 mcp->mb[3] = LSW(LSD(dd_dma)); 6353 mcp->mb[6] = MSW(MSD(dd_dma)); 6354 mcp->mb[7] = LSW(MSD(dd_dma)); 6355 mcp->mb[8] = size; 6356 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 6357 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 6358 mcp->buf_size = size; 6359 mcp->flags = MBX_DMA_IN; 6360 mcp->tov = MBX_TOV_SECONDS * 4; 6361 rval = qla2x00_mailbox_command(vha, mcp); 6362 6363 if (rval != QLA_SUCCESS) { 6364 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); 6365 } else { 6366 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, 6367 "Done %s.\n", __func__); 6368 } 6369 6370 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, 6371 size, DMA_FROM_DEVICE); 6372 6373 return rval; 6374 } 6375 6376 static void qla2x00_async_mb_sp_done(srb_t *sp, int res) 6377 { 6378 sp->u.iocb_cmd.u.mbx.rc = res; 6379 6380 complete(&sp->u.iocb_cmd.u.mbx.comp); 6381 /* don't free sp here. Let the caller do the free */ 6382 } 6383 6384 /* 6385 * This mailbox uses the iocb interface to send MB command. 6386 * This allows non-critial (non chip setup) command to go 6387 * out in parrallel. 6388 */ 6389 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) 6390 { 6391 int rval = QLA_FUNCTION_FAILED; 6392 srb_t *sp; 6393 struct srb_iocb *c; 6394 6395 if (!vha->hw->flags.fw_started) 6396 goto done; 6397 6398 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 6399 if (!sp) 6400 goto done; 6401 6402 sp->type = SRB_MB_IOCB; 6403 sp->name = mb_to_str(mcp->mb[0]); 6404 6405 c = &sp->u.iocb_cmd; 6406 c->timeout = qla2x00_async_iocb_timeout; 6407 init_completion(&c->u.mbx.comp); 6408 6409 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 6410 6411 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); 6412 6413 sp->done = qla2x00_async_mb_sp_done; 6414 6415 rval = qla2x00_start_sp(sp); 6416 if (rval != QLA_SUCCESS) { 6417 ql_dbg(ql_dbg_mbx, vha, 0x1018, 6418 "%s: %s Failed submission. %x.\n", 6419 __func__, sp->name, rval); 6420 goto done_free_sp; 6421 } 6422 6423 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n", 6424 sp->name, sp->handle); 6425 6426 wait_for_completion(&c->u.mbx.comp); 6427 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); 6428 6429 rval = c->u.mbx.rc; 6430 switch (rval) { 6431 case QLA_FUNCTION_TIMEOUT: 6432 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n", 6433 __func__, sp->name, rval); 6434 break; 6435 case QLA_SUCCESS: 6436 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", 6437 __func__, sp->name); 6438 break; 6439 default: 6440 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", 6441 __func__, sp->name, rval); 6442 break; 6443 } 6444 6445 done_free_sp: 6446 sp->free(sp); 6447 done: 6448 return rval; 6449 } 6450 6451 /* 6452 * qla24xx_gpdb_wait 6453 * NOTE: Do not call this routine from DPC thread 6454 */ 6455 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 6456 { 6457 int rval = QLA_FUNCTION_FAILED; 6458 dma_addr_t pd_dma; 6459 struct port_database_24xx *pd; 6460 struct qla_hw_data *ha = vha->hw; 6461 mbx_cmd_t mc; 6462 6463 if (!vha->hw->flags.fw_started) 6464 goto done; 6465 6466 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 6467 if (pd == NULL) { 6468 ql_log(ql_log_warn, vha, 0xd047, 6469 "Failed to allocate port database structure.\n"); 6470 goto done_free_sp; 6471 } 6472 6473 memset(&mc, 0, sizeof(mc)); 6474 mc.mb[0] = MBC_GET_PORT_DATABASE; 6475 mc.mb[1] = cpu_to_le16(fcport->loop_id); 6476 mc.mb[2] = MSW(pd_dma); 6477 mc.mb[3] = LSW(pd_dma); 6478 mc.mb[6] = MSW(MSD(pd_dma)); 6479 mc.mb[7] = LSW(MSD(pd_dma)); 6480 mc.mb[9] = cpu_to_le16(vha->vp_idx); 6481 mc.mb[10] = cpu_to_le16((uint16_t)opt); 6482 6483 rval = qla24xx_send_mb_cmd(vha, &mc); 6484 if (rval != QLA_SUCCESS) { 6485 ql_dbg(ql_dbg_mbx, vha, 0x1193, 6486 "%s: %8phC fail\n", __func__, fcport->port_name); 6487 goto done_free_sp; 6488 } 6489 6490 rval = __qla24xx_parse_gpdb(vha, fcport, pd); 6491 6492 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", 6493 __func__, fcport->port_name); 6494 6495 done_free_sp: 6496 if (pd) 6497 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 6498 done: 6499 return rval; 6500 } 6501 6502 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, 6503 struct port_database_24xx *pd) 6504 { 6505 int rval = QLA_SUCCESS; 6506 uint64_t zero = 0; 6507 u8 current_login_state, last_login_state; 6508 6509 if (NVME_TARGET(vha->hw, fcport)) { 6510 current_login_state = pd->current_login_state >> 4; 6511 last_login_state = pd->last_login_state >> 4; 6512 } else { 6513 current_login_state = pd->current_login_state & 0xf; 6514 last_login_state = pd->last_login_state & 0xf; 6515 } 6516 6517 /* Check for logged in state. */ 6518 if (current_login_state != PDS_PRLI_COMPLETE) { 6519 ql_dbg(ql_dbg_mbx, vha, 0x119a, 6520 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 6521 current_login_state, last_login_state, fcport->loop_id); 6522 rval = QLA_FUNCTION_FAILED; 6523 goto gpd_error_out; 6524 } 6525 6526 if (fcport->loop_id == FC_NO_LOOP_ID || 6527 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 6528 memcmp(fcport->port_name, pd->port_name, 8))) { 6529 /* We lost the device mid way. */ 6530 rval = QLA_NOT_LOGGED_IN; 6531 goto gpd_error_out; 6532 } 6533 6534 /* Names are little-endian. */ 6535 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 6536 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 6537 6538 /* Get port_id of device. */ 6539 fcport->d_id.b.domain = pd->port_id[0]; 6540 fcport->d_id.b.area = pd->port_id[1]; 6541 fcport->d_id.b.al_pa = pd->port_id[2]; 6542 fcport->d_id.b.rsvd_1 = 0; 6543 6544 if (NVME_TARGET(vha->hw, fcport)) { 6545 fcport->port_type = FCT_NVME; 6546 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) 6547 fcport->port_type |= FCT_NVME_INITIATOR; 6548 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6549 fcport->port_type |= FCT_NVME_TARGET; 6550 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0) 6551 fcport->port_type |= FCT_NVME_DISCOVERY; 6552 } else { 6553 /* If not target must be initiator or unknown type. */ 6554 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6555 fcport->port_type = FCT_INITIATOR; 6556 else 6557 fcport->port_type = FCT_TARGET; 6558 } 6559 /* Passback COS information. */ 6560 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? 6561 FC_COS_CLASS2 : FC_COS_CLASS3; 6562 6563 if (pd->prli_svc_param_word_3[0] & BIT_7) { 6564 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 6565 fcport->conf_compl_supported = 1; 6566 } 6567 6568 gpd_error_out: 6569 return rval; 6570 } 6571 6572 /* 6573 * qla24xx_gidlist__wait 6574 * NOTE: don't call this routine from DPC thread. 6575 */ 6576 int qla24xx_gidlist_wait(struct scsi_qla_host *vha, 6577 void *id_list, dma_addr_t id_list_dma, uint16_t *entries) 6578 { 6579 int rval = QLA_FUNCTION_FAILED; 6580 mbx_cmd_t mc; 6581 6582 if (!vha->hw->flags.fw_started) 6583 goto done; 6584 6585 memset(&mc, 0, sizeof(mc)); 6586 mc.mb[0] = MBC_GET_ID_LIST; 6587 mc.mb[2] = MSW(id_list_dma); 6588 mc.mb[3] = LSW(id_list_dma); 6589 mc.mb[6] = MSW(MSD(id_list_dma)); 6590 mc.mb[7] = LSW(MSD(id_list_dma)); 6591 mc.mb[8] = 0; 6592 mc.mb[9] = cpu_to_le16(vha->vp_idx); 6593 6594 rval = qla24xx_send_mb_cmd(vha, &mc); 6595 if (rval != QLA_SUCCESS) { 6596 ql_dbg(ql_dbg_mbx, vha, 0x119b, 6597 "%s: fail\n", __func__); 6598 } else { 6599 *entries = mc.mb[1]; 6600 ql_dbg(ql_dbg_mbx, vha, 0x119c, 6601 "%s: done\n", __func__); 6602 } 6603 done: 6604 return rval; 6605 } 6606 6607 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) 6608 { 6609 int rval; 6610 mbx_cmd_t mc; 6611 mbx_cmd_t *mcp = &mc; 6612 6613 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200, 6614 "Entered %s\n", __func__); 6615 6616 memset(mcp->mb, 0 , sizeof(mcp->mb)); 6617 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6618 mcp->mb[1] = cpu_to_le16(1); 6619 mcp->mb[2] = cpu_to_le16(value); 6620 mcp->out_mb = MBX_2 | MBX_1 | MBX_0; 6621 mcp->in_mb = MBX_2 | MBX_0; 6622 mcp->tov = MBX_TOV_SECONDS; 6623 mcp->flags = 0; 6624 6625 rval = qla2x00_mailbox_command(vha, mcp); 6626 6627 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n", 6628 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6629 6630 return rval; 6631 } 6632 6633 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) 6634 { 6635 int rval; 6636 mbx_cmd_t mc; 6637 mbx_cmd_t *mcp = &mc; 6638 6639 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203, 6640 "Entered %s\n", __func__); 6641 6642 memset(mcp->mb, 0, sizeof(mcp->mb)); 6643 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6644 mcp->mb[1] = cpu_to_le16(0); 6645 mcp->out_mb = MBX_1 | MBX_0; 6646 mcp->in_mb = MBX_2 | MBX_0; 6647 mcp->tov = MBX_TOV_SECONDS; 6648 mcp->flags = 0; 6649 6650 rval = qla2x00_mailbox_command(vha, mcp); 6651 if (rval == QLA_SUCCESS) 6652 *value = mc.mb[2]; 6653 6654 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n", 6655 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6656 6657 return rval; 6658 } 6659 6660 int 6661 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count) 6662 { 6663 struct qla_hw_data *ha = vha->hw; 6664 uint16_t iter, addr, offset; 6665 dma_addr_t phys_addr; 6666 int rval, c; 6667 u8 *sfp_data; 6668 6669 memset(ha->sfp_data, 0, SFP_DEV_SIZE); 6670 addr = 0xa0; 6671 phys_addr = ha->sfp_data_dma; 6672 sfp_data = ha->sfp_data; 6673 offset = c = 0; 6674 6675 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) { 6676 if (iter == 4) { 6677 /* Skip to next device address. */ 6678 addr = 0xa2; 6679 offset = 0; 6680 } 6681 6682 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data, 6683 addr, offset, SFP_BLOCK_SIZE, BIT_1); 6684 if (rval != QLA_SUCCESS) { 6685 ql_log(ql_log_warn, vha, 0x706d, 6686 "Unable to read SFP data (%x/%x/%x).\n", rval, 6687 addr, offset); 6688 6689 return rval; 6690 } 6691 6692 if (buf && (c < count)) { 6693 u16 sz; 6694 6695 if ((count - c) >= SFP_BLOCK_SIZE) 6696 sz = SFP_BLOCK_SIZE; 6697 else 6698 sz = count - c; 6699 6700 memcpy(buf, sfp_data, sz); 6701 buf += SFP_BLOCK_SIZE; 6702 c += sz; 6703 } 6704 phys_addr += SFP_BLOCK_SIZE; 6705 sfp_data += SFP_BLOCK_SIZE; 6706 offset += SFP_BLOCK_SIZE; 6707 } 6708 6709 return rval; 6710 } 6711 6712 int qla24xx_res_count_wait(struct scsi_qla_host *vha, 6713 uint16_t *out_mb, int out_mb_sz) 6714 { 6715 int rval = QLA_FUNCTION_FAILED; 6716 mbx_cmd_t mc; 6717 6718 if (!vha->hw->flags.fw_started) 6719 goto done; 6720 6721 memset(&mc, 0, sizeof(mc)); 6722 mc.mb[0] = MBC_GET_RESOURCE_COUNTS; 6723 6724 rval = qla24xx_send_mb_cmd(vha, &mc); 6725 if (rval != QLA_SUCCESS) { 6726 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6727 "%s: fail\n", __func__); 6728 } else { 6729 if (out_mb_sz <= SIZEOF_IOCB_MB_REG) 6730 memcpy(out_mb, mc.mb, out_mb_sz); 6731 else 6732 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG); 6733 6734 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6735 "%s: done\n", __func__); 6736 } 6737 done: 6738 return rval; 6739 } 6740 6741 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts, 6742 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr, 6743 uint32_t sfub_len) 6744 { 6745 int rval; 6746 mbx_cmd_t mc; 6747 mbx_cmd_t *mcp = &mc; 6748 6749 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE; 6750 mcp->mb[1] = opts; 6751 mcp->mb[2] = region; 6752 mcp->mb[3] = MSW(len); 6753 mcp->mb[4] = LSW(len); 6754 mcp->mb[5] = MSW(sfub_dma_addr); 6755 mcp->mb[6] = LSW(sfub_dma_addr); 6756 mcp->mb[7] = MSW(MSD(sfub_dma_addr)); 6757 mcp->mb[8] = LSW(MSD(sfub_dma_addr)); 6758 mcp->mb[9] = sfub_len; 6759 mcp->out_mb = 6760 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6761 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6762 mcp->tov = MBX_TOV_SECONDS; 6763 mcp->flags = 0; 6764 rval = qla2x00_mailbox_command(vha, mcp); 6765 6766 if (rval != QLA_SUCCESS) { 6767 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x", 6768 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], 6769 mcp->mb[2]); 6770 } 6771 6772 return rval; 6773 } 6774 6775 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6776 uint32_t data) 6777 { 6778 int rval; 6779 mbx_cmd_t mc; 6780 mbx_cmd_t *mcp = &mc; 6781 6782 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6783 "Entered %s.\n", __func__); 6784 6785 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6786 mcp->mb[1] = LSW(addr); 6787 mcp->mb[2] = MSW(addr); 6788 mcp->mb[3] = LSW(data); 6789 mcp->mb[4] = MSW(data); 6790 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6791 mcp->in_mb = MBX_1|MBX_0; 6792 mcp->tov = MBX_TOV_SECONDS; 6793 mcp->flags = 0; 6794 rval = qla2x00_mailbox_command(vha, mcp); 6795 6796 if (rval != QLA_SUCCESS) { 6797 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6798 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6799 } else { 6800 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6801 "Done %s.\n", __func__); 6802 } 6803 6804 return rval; 6805 } 6806 6807 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6808 uint32_t *data) 6809 { 6810 int rval; 6811 mbx_cmd_t mc; 6812 mbx_cmd_t *mcp = &mc; 6813 6814 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6815 "Entered %s.\n", __func__); 6816 6817 mcp->mb[0] = MBC_READ_REMOTE_REG; 6818 mcp->mb[1] = LSW(addr); 6819 mcp->mb[2] = MSW(addr); 6820 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6821 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6822 mcp->tov = MBX_TOV_SECONDS; 6823 mcp->flags = 0; 6824 rval = qla2x00_mailbox_command(vha, mcp); 6825 6826 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]); 6827 6828 if (rval != QLA_SUCCESS) { 6829 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6830 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6831 } else { 6832 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6833 "Done %s.\n", __func__); 6834 } 6835 6836 return rval; 6837 } 6838 6839 int 6840 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led) 6841 { 6842 struct qla_hw_data *ha = vha->hw; 6843 mbx_cmd_t mc; 6844 mbx_cmd_t *mcp = &mc; 6845 int rval; 6846 6847 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6848 return QLA_FUNCTION_FAILED; 6849 6850 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n", 6851 __func__, options); 6852 6853 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG; 6854 mcp->mb[1] = options; 6855 mcp->out_mb = MBX_1|MBX_0; 6856 mcp->in_mb = MBX_1|MBX_0; 6857 if (options & BIT_0) { 6858 if (options & BIT_1) { 6859 mcp->mb[2] = led[2]; 6860 mcp->out_mb |= MBX_2; 6861 } 6862 if (options & BIT_2) { 6863 mcp->mb[3] = led[0]; 6864 mcp->out_mb |= MBX_3; 6865 } 6866 if (options & BIT_3) { 6867 mcp->mb[4] = led[1]; 6868 mcp->out_mb |= MBX_4; 6869 } 6870 } else { 6871 mcp->in_mb |= MBX_4|MBX_3|MBX_2; 6872 } 6873 mcp->tov = MBX_TOV_SECONDS; 6874 mcp->flags = 0; 6875 rval = qla2x00_mailbox_command(vha, mcp); 6876 if (rval) { 6877 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n", 6878 __func__, rval, mcp->mb[0], mcp->mb[1]); 6879 return rval; 6880 } 6881 6882 if (options & BIT_0) { 6883 ha->beacon_blink_led = 0; 6884 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__); 6885 } else { 6886 led[2] = mcp->mb[2]; 6887 led[0] = mcp->mb[3]; 6888 led[1] = mcp->mb[4]; 6889 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n", 6890 __func__, led[0], led[1], led[2]); 6891 } 6892 6893 return rval; 6894 } 6895