1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/gfp.h> 12 13 static struct mb_cmd_name { 14 uint16_t cmd; 15 const char *str; 16 } mb_str[] = { 17 {MBC_GET_PORT_DATABASE, "GPDB"}, 18 {MBC_GET_ID_LIST, "GIDList"}, 19 {MBC_GET_LINK_PRIV_STATS, "Stats"}, 20 }; 21 22 static const char *mb_to_str(uint16_t cmd) 23 { 24 int i; 25 struct mb_cmd_name *e; 26 27 for (i = 0; i < ARRAY_SIZE(mb_str); i++) { 28 e = mb_str + i; 29 if (cmd == e->cmd) 30 return e->str; 31 } 32 return "unknown"; 33 } 34 35 static struct rom_cmd { 36 uint16_t cmd; 37 } rom_cmds[] = { 38 { MBC_LOAD_RAM }, 39 { MBC_EXECUTE_FIRMWARE }, 40 { MBC_READ_RAM_WORD }, 41 { MBC_MAILBOX_REGISTER_TEST }, 42 { MBC_VERIFY_CHECKSUM }, 43 { MBC_GET_FIRMWARE_VERSION }, 44 { MBC_LOAD_RISC_RAM }, 45 { MBC_DUMP_RISC_RAM }, 46 { MBC_LOAD_RISC_RAM_EXTENDED }, 47 { MBC_DUMP_RISC_RAM_EXTENDED }, 48 { MBC_WRITE_RAM_WORD_EXTENDED }, 49 { MBC_READ_RAM_EXTENDED }, 50 { MBC_GET_RESOURCE_COUNTS }, 51 { MBC_SET_FIRMWARE_OPTION }, 52 { MBC_MID_INITIALIZE_FIRMWARE }, 53 { MBC_GET_FIRMWARE_STATE }, 54 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, 55 { MBC_GET_RETRY_COUNT }, 56 { MBC_TRACE_CONTROL }, 57 { MBC_INITIALIZE_MULTIQ }, 58 { MBC_IOCB_COMMAND_A64 }, 59 { MBC_GET_ADAPTER_LOOP_ID }, 60 { MBC_READ_SFP }, 61 }; 62 63 static int is_rom_cmd(uint16_t cmd) 64 { 65 int i; 66 struct rom_cmd *wc; 67 68 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) { 69 wc = rom_cmds + i; 70 if (wc->cmd == cmd) 71 return 1; 72 } 73 74 return 0; 75 } 76 77 /* 78 * qla2x00_mailbox_command 79 * Issue mailbox command and waits for completion. 80 * 81 * Input: 82 * ha = adapter block pointer. 83 * mcp = driver internal mbx struct pointer. 84 * 85 * Output: 86 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 87 * 88 * Returns: 89 * 0 : QLA_SUCCESS = cmd performed success 90 * 1 : QLA_FUNCTION_FAILED (error encountered) 91 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 92 * 93 * Context: 94 * Kernel context. 95 */ 96 static int 97 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) 98 { 99 int rval, i; 100 unsigned long flags = 0; 101 device_reg_t *reg; 102 uint8_t abort_active; 103 uint8_t io_lock_on; 104 uint16_t command = 0; 105 uint16_t *iptr; 106 uint16_t __iomem *optr; 107 uint32_t cnt; 108 uint32_t mboxes; 109 unsigned long wait_time; 110 struct qla_hw_data *ha = vha->hw; 111 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 112 113 114 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); 115 116 if (ha->pdev->error_state > pci_channel_io_frozen) { 117 ql_log(ql_log_warn, vha, 0x1001, 118 "error_state is greater than pci_channel_io_frozen, " 119 "exiting.\n"); 120 return QLA_FUNCTION_TIMEOUT; 121 } 122 123 if (vha->device_flags & DFLG_DEV_FAILED) { 124 ql_log(ql_log_warn, vha, 0x1002, 125 "Device in failed state, exiting.\n"); 126 return QLA_FUNCTION_TIMEOUT; 127 } 128 129 /* if PCI error, then avoid mbx processing.*/ 130 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && 131 test_bit(UNLOADING, &base_vha->dpc_flags)) { 132 ql_log(ql_log_warn, vha, 0xd04e, 133 "PCI error, exiting.\n"); 134 return QLA_FUNCTION_TIMEOUT; 135 } 136 137 reg = ha->iobase; 138 io_lock_on = base_vha->flags.init_done; 139 140 rval = QLA_SUCCESS; 141 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 142 143 144 if (ha->flags.pci_channel_io_perm_failure) { 145 ql_log(ql_log_warn, vha, 0x1003, 146 "Perm failure on EEH timeout MBX, exiting.\n"); 147 return QLA_FUNCTION_TIMEOUT; 148 } 149 150 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 151 /* Setting Link-Down error */ 152 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 153 ql_log(ql_log_warn, vha, 0x1004, 154 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 155 return QLA_FUNCTION_TIMEOUT; 156 } 157 158 /* check if ISP abort is active and return cmd with timeout */ 159 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 160 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 161 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && 162 !is_rom_cmd(mcp->mb[0])) { 163 ql_log(ql_log_info, vha, 0x1005, 164 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", 165 mcp->mb[0]); 166 return QLA_FUNCTION_TIMEOUT; 167 } 168 169 /* 170 * Wait for active mailbox commands to finish by waiting at most tov 171 * seconds. This is to serialize actual issuing of mailbox cmds during 172 * non ISP abort time. 173 */ 174 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 175 /* Timeout occurred. Return error. */ 176 ql_log(ql_log_warn, vha, 0xd035, 177 "Cmd access timeout, cmd=0x%x, Exiting.\n", 178 mcp->mb[0]); 179 return QLA_FUNCTION_TIMEOUT; 180 } 181 182 ha->flags.mbox_busy = 1; 183 /* Save mailbox command for debug */ 184 ha->mcp = mcp; 185 186 ql_dbg(ql_dbg_mbx, vha, 0x1006, 187 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 188 189 spin_lock_irqsave(&ha->hardware_lock, flags); 190 191 /* Load mailbox registers. */ 192 if (IS_P3P_TYPE(ha)) 193 optr = (uint16_t __iomem *)®->isp82.mailbox_in[0]; 194 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) 195 optr = (uint16_t __iomem *)®->isp24.mailbox0; 196 else 197 optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0); 198 199 iptr = mcp->mb; 200 command = mcp->mb[0]; 201 mboxes = mcp->out_mb; 202 203 ql_dbg(ql_dbg_mbx, vha, 0x1111, 204 "Mailbox registers (OUT):\n"); 205 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 206 if (IS_QLA2200(ha) && cnt == 8) 207 optr = 208 (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8); 209 if (mboxes & BIT_0) { 210 ql_dbg(ql_dbg_mbx, vha, 0x1112, 211 "mbox[%d]<-0x%04x\n", cnt, *iptr); 212 WRT_REG_WORD(optr, *iptr); 213 } 214 215 mboxes >>= 1; 216 optr++; 217 iptr++; 218 } 219 220 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, 221 "I/O Address = %p.\n", optr); 222 223 /* Issue set host interrupt command to send cmd out. */ 224 ha->flags.mbox_int = 0; 225 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 226 227 /* Unlock mbx registers and wait for interrupt */ 228 ql_dbg(ql_dbg_mbx, vha, 0x100f, 229 "Going to unlock irq & waiting for interrupts. " 230 "jiffies=%lx.\n", jiffies); 231 232 /* Wait for mbx cmd completion until timeout */ 233 234 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 235 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 236 237 if (IS_P3P_TYPE(ha)) { 238 if (RD_REG_DWORD(®->isp82.hint) & 239 HINT_MBX_INT_PENDING) { 240 spin_unlock_irqrestore(&ha->hardware_lock, 241 flags); 242 ha->flags.mbox_busy = 0; 243 ql_dbg(ql_dbg_mbx, vha, 0x1010, 244 "Pending mailbox timeout, exiting.\n"); 245 rval = QLA_FUNCTION_TIMEOUT; 246 goto premature_exit; 247 } 248 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); 249 } else if (IS_FWI2_CAPABLE(ha)) 250 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); 251 else 252 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); 253 spin_unlock_irqrestore(&ha->hardware_lock, flags); 254 255 wait_time = jiffies; 256 if (!wait_for_completion_timeout(&ha->mbx_intr_comp, 257 mcp->tov * HZ)) { 258 ql_dbg(ql_dbg_mbx, vha, 0x117a, 259 "cmd=%x Timeout.\n", command); 260 spin_lock_irqsave(&ha->hardware_lock, flags); 261 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 262 spin_unlock_irqrestore(&ha->hardware_lock, flags); 263 } 264 if (time_after(jiffies, wait_time + 5 * HZ)) 265 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", 266 command, jiffies_to_msecs(jiffies - wait_time)); 267 } else { 268 ql_dbg(ql_dbg_mbx, vha, 0x1011, 269 "Cmd=%x Polling Mode.\n", command); 270 271 if (IS_P3P_TYPE(ha)) { 272 if (RD_REG_DWORD(®->isp82.hint) & 273 HINT_MBX_INT_PENDING) { 274 spin_unlock_irqrestore(&ha->hardware_lock, 275 flags); 276 ha->flags.mbox_busy = 0; 277 ql_dbg(ql_dbg_mbx, vha, 0x1012, 278 "Pending mailbox timeout, exiting.\n"); 279 rval = QLA_FUNCTION_TIMEOUT; 280 goto premature_exit; 281 } 282 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); 283 } else if (IS_FWI2_CAPABLE(ha)) 284 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); 285 else 286 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); 287 spin_unlock_irqrestore(&ha->hardware_lock, flags); 288 289 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 290 while (!ha->flags.mbox_int) { 291 if (time_after(jiffies, wait_time)) 292 break; 293 294 /* Check for pending interrupts. */ 295 qla2x00_poll(ha->rsp_q_map[0]); 296 297 if (!ha->flags.mbox_int && 298 !(IS_QLA2200(ha) && 299 command == MBC_LOAD_RISC_RAM_EXTENDED)) 300 msleep(10); 301 } /* while */ 302 ql_dbg(ql_dbg_mbx, vha, 0x1013, 303 "Waited %d sec.\n", 304 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 305 } 306 307 /* Check whether we timed out */ 308 if (ha->flags.mbox_int) { 309 uint16_t *iptr2; 310 311 ql_dbg(ql_dbg_mbx, vha, 0x1014, 312 "Cmd=%x completed.\n", command); 313 314 /* Got interrupt. Clear the flag. */ 315 ha->flags.mbox_int = 0; 316 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 317 318 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 319 ha->flags.mbox_busy = 0; 320 /* Setting Link-Down error */ 321 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 322 ha->mcp = NULL; 323 rval = QLA_FUNCTION_FAILED; 324 ql_log(ql_log_warn, vha, 0xd048, 325 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 326 goto premature_exit; 327 } 328 329 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) 330 rval = QLA_FUNCTION_FAILED; 331 332 /* Load return mailbox registers. */ 333 iptr2 = mcp->mb; 334 iptr = (uint16_t *)&ha->mailbox_out[0]; 335 mboxes = mcp->in_mb; 336 337 ql_dbg(ql_dbg_mbx, vha, 0x1113, 338 "Mailbox registers (IN):\n"); 339 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 340 if (mboxes & BIT_0) { 341 *iptr2 = *iptr; 342 ql_dbg(ql_dbg_mbx, vha, 0x1114, 343 "mbox[%d]->0x%04x\n", cnt, *iptr2); 344 } 345 346 mboxes >>= 1; 347 iptr2++; 348 iptr++; 349 } 350 } else { 351 352 uint16_t mb[8]; 353 uint32_t ictrl, host_status, hccr; 354 uint16_t w; 355 356 if (IS_FWI2_CAPABLE(ha)) { 357 mb[0] = RD_REG_WORD(®->isp24.mailbox0); 358 mb[1] = RD_REG_WORD(®->isp24.mailbox1); 359 mb[2] = RD_REG_WORD(®->isp24.mailbox2); 360 mb[3] = RD_REG_WORD(®->isp24.mailbox3); 361 mb[7] = RD_REG_WORD(®->isp24.mailbox7); 362 ictrl = RD_REG_DWORD(®->isp24.ictrl); 363 host_status = RD_REG_DWORD(®->isp24.host_status); 364 hccr = RD_REG_DWORD(®->isp24.hccr); 365 366 ql_log(ql_log_warn, vha, 0xd04c, 367 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 368 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", 369 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], 370 mb[7], host_status, hccr); 371 372 } else { 373 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); 374 ictrl = RD_REG_WORD(®->isp.ictrl); 375 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, 376 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 377 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); 378 } 379 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 380 381 /* Capture FW dump only, if PCI device active */ 382 if (!pci_channel_offline(vha->hw->pdev)) { 383 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 384 if (w == 0xffff || ictrl == 0xffffffff) { 385 /* This is special case if there is unload 386 * of driver happening and if PCI device go 387 * into bad state due to PCI error condition 388 * then only PCI ERR flag would be set. 389 * we will do premature exit for above case. 390 */ 391 ha->flags.mbox_busy = 0; 392 rval = QLA_FUNCTION_TIMEOUT; 393 goto premature_exit; 394 } 395 396 /* Attempt to capture firmware dump for further 397 * anallysis of the current formware state. we do not 398 * need to do this if we are intentionally generating 399 * a dump 400 */ 401 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) 402 ha->isp_ops->fw_dump(vha, 0); 403 rval = QLA_FUNCTION_TIMEOUT; 404 } 405 } 406 407 ha->flags.mbox_busy = 0; 408 409 /* Clean up */ 410 ha->mcp = NULL; 411 412 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 413 ql_dbg(ql_dbg_mbx, vha, 0x101a, 414 "Checking for additional resp interrupt.\n"); 415 416 /* polling mode for non isp_abort commands. */ 417 qla2x00_poll(ha->rsp_q_map[0]); 418 } 419 420 if (rval == QLA_FUNCTION_TIMEOUT && 421 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 422 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 423 ha->flags.eeh_busy) { 424 /* not in dpc. schedule it for dpc to take over. */ 425 ql_dbg(ql_dbg_mbx, vha, 0x101b, 426 "Timeout, schedule isp_abort_needed.\n"); 427 428 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 429 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 430 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 431 if (IS_QLA82XX(ha)) { 432 ql_dbg(ql_dbg_mbx, vha, 0x112a, 433 "disabling pause transmit on port " 434 "0 & 1.\n"); 435 qla82xx_wr_32(ha, 436 QLA82XX_CRB_NIU + 0x98, 437 CRB_NIU_XG_PAUSE_CTL_P0| 438 CRB_NIU_XG_PAUSE_CTL_P1); 439 } 440 ql_log(ql_log_info, base_vha, 0x101c, 441 "Mailbox cmd timeout occurred, cmd=0x%x, " 442 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 443 "abort.\n", command, mcp->mb[0], 444 ha->flags.eeh_busy); 445 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 446 qla2xxx_wake_dpc(vha); 447 } 448 } else if (!abort_active) { 449 /* call abort directly since we are in the DPC thread */ 450 ql_dbg(ql_dbg_mbx, vha, 0x101d, 451 "Timeout, calling abort_isp.\n"); 452 453 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 454 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 455 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 456 if (IS_QLA82XX(ha)) { 457 ql_dbg(ql_dbg_mbx, vha, 0x112b, 458 "disabling pause transmit on port " 459 "0 & 1.\n"); 460 qla82xx_wr_32(ha, 461 QLA82XX_CRB_NIU + 0x98, 462 CRB_NIU_XG_PAUSE_CTL_P0| 463 CRB_NIU_XG_PAUSE_CTL_P1); 464 } 465 ql_log(ql_log_info, base_vha, 0x101e, 466 "Mailbox cmd timeout occurred, cmd=0x%x, " 467 "mb[0]=0x%x. Scheduling ISP abort ", 468 command, mcp->mb[0]); 469 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 470 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 471 /* Allow next mbx cmd to come in. */ 472 complete(&ha->mbx_cmd_comp); 473 if (ha->isp_ops->abort_isp(vha)) { 474 /* Failed. retry later. */ 475 set_bit(ISP_ABORT_NEEDED, 476 &vha->dpc_flags); 477 } 478 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 479 ql_dbg(ql_dbg_mbx, vha, 0x101f, 480 "Finished abort_isp.\n"); 481 goto mbx_done; 482 } 483 } 484 } 485 486 premature_exit: 487 /* Allow next mbx cmd to come in. */ 488 complete(&ha->mbx_cmd_comp); 489 490 mbx_done: 491 if (rval) { 492 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { 493 pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR, 494 dev_name(&ha->pdev->dev), 0x1020+0x800, 495 vha->host_no); 496 mboxes = mcp->in_mb; 497 cnt = 4; 498 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1) 499 if (mboxes & BIT_0) { 500 printk(" mb[%u]=%x", i, mcp->mb[i]); 501 cnt--; 502 } 503 pr_warn(" cmd=%x ****\n", command); 504 } 505 ql_dbg(ql_dbg_mbx, vha, 0x1198, 506 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", 507 RD_REG_DWORD(®->isp24.host_status), 508 RD_REG_DWORD(®->isp24.ictrl), 509 RD_REG_DWORD(®->isp24.istatus)); 510 } else { 511 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); 512 } 513 514 return rval; 515 } 516 517 int 518 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, 519 uint32_t risc_code_size) 520 { 521 int rval; 522 struct qla_hw_data *ha = vha->hw; 523 mbx_cmd_t mc; 524 mbx_cmd_t *mcp = &mc; 525 526 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, 527 "Entered %s.\n", __func__); 528 529 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 530 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 531 mcp->mb[8] = MSW(risc_addr); 532 mcp->out_mb = MBX_8|MBX_0; 533 } else { 534 mcp->mb[0] = MBC_LOAD_RISC_RAM; 535 mcp->out_mb = MBX_0; 536 } 537 mcp->mb[1] = LSW(risc_addr); 538 mcp->mb[2] = MSW(req_dma); 539 mcp->mb[3] = LSW(req_dma); 540 mcp->mb[6] = MSW(MSD(req_dma)); 541 mcp->mb[7] = LSW(MSD(req_dma)); 542 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 543 if (IS_FWI2_CAPABLE(ha)) { 544 mcp->mb[4] = MSW(risc_code_size); 545 mcp->mb[5] = LSW(risc_code_size); 546 mcp->out_mb |= MBX_5|MBX_4; 547 } else { 548 mcp->mb[4] = LSW(risc_code_size); 549 mcp->out_mb |= MBX_4; 550 } 551 552 mcp->in_mb = MBX_0; 553 mcp->tov = MBX_TOV_SECONDS; 554 mcp->flags = 0; 555 rval = qla2x00_mailbox_command(vha, mcp); 556 557 if (rval != QLA_SUCCESS) { 558 ql_dbg(ql_dbg_mbx, vha, 0x1023, 559 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 560 } else { 561 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, 562 "Done %s.\n", __func__); 563 } 564 565 return rval; 566 } 567 568 #define EXTENDED_BB_CREDITS BIT_0 569 #define NVME_ENABLE_FLAG BIT_3 570 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha) 571 { 572 uint16_t mb4 = BIT_0; 573 574 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 575 mb4 |= ha->long_range_distance << LR_DIST_FW_POS; 576 577 return mb4; 578 } 579 580 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha) 581 { 582 uint16_t mb4 = BIT_0; 583 584 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 585 struct nvram_81xx *nv = ha->nvram; 586 587 mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features); 588 } 589 590 return mb4; 591 } 592 593 /* 594 * qla2x00_execute_fw 595 * Start adapter firmware. 596 * 597 * Input: 598 * ha = adapter block pointer. 599 * TARGET_QUEUE_LOCK must be released. 600 * ADAPTER_STATE_LOCK must be released. 601 * 602 * Returns: 603 * qla2x00 local function return status code. 604 * 605 * Context: 606 * Kernel context. 607 */ 608 int 609 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) 610 { 611 int rval; 612 struct qla_hw_data *ha = vha->hw; 613 mbx_cmd_t mc; 614 mbx_cmd_t *mcp = &mc; 615 616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, 617 "Entered %s.\n", __func__); 618 619 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 620 mcp->out_mb = MBX_0; 621 mcp->in_mb = MBX_0; 622 if (IS_FWI2_CAPABLE(ha)) { 623 mcp->mb[1] = MSW(risc_addr); 624 mcp->mb[2] = LSW(risc_addr); 625 mcp->mb[3] = 0; 626 mcp->mb[4] = 0; 627 ha->flags.using_lr_setting = 0; 628 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || 629 IS_QLA27XX(ha)) { 630 if (ql2xautodetectsfp) { 631 if (ha->flags.detected_lr_sfp) { 632 mcp->mb[4] |= 633 qla25xx_set_sfp_lr_dist(ha); 634 ha->flags.using_lr_setting = 1; 635 } 636 } else { 637 struct nvram_81xx *nv = ha->nvram; 638 /* set LR distance if specified in nvram */ 639 if (nv->enhanced_features & 640 NEF_LR_DIST_ENABLE) { 641 mcp->mb[4] |= 642 qla25xx_set_nvr_lr_dist(ha); 643 ha->flags.using_lr_setting = 1; 644 } 645 } 646 } 647 648 if (ql2xnvmeenable && IS_QLA27XX(ha)) 649 mcp->mb[4] |= NVME_ENABLE_FLAG; 650 651 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 652 struct nvram_81xx *nv = ha->nvram; 653 /* set minimum speed if specified in nvram */ 654 if (nv->min_link_speed >= 2 && 655 nv->min_link_speed <= 5) { 656 mcp->mb[4] |= BIT_4; 657 mcp->mb[11] = nv->min_link_speed; 658 mcp->out_mb |= MBX_11; 659 mcp->in_mb |= BIT_5; 660 vha->min_link_speed_feat = nv->min_link_speed; 661 } 662 } 663 664 if (ha->flags.exlogins_enabled) 665 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; 666 667 if (ha->flags.exchoffld_enabled) 668 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; 669 670 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1; 671 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1; 672 } else { 673 mcp->mb[1] = LSW(risc_addr); 674 mcp->out_mb |= MBX_1; 675 if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 676 mcp->mb[2] = 0; 677 mcp->out_mb |= MBX_2; 678 } 679 } 680 681 mcp->tov = MBX_TOV_SECONDS; 682 mcp->flags = 0; 683 rval = qla2x00_mailbox_command(vha, mcp); 684 685 if (rval != QLA_SUCCESS) { 686 ql_dbg(ql_dbg_mbx, vha, 0x1026, 687 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 688 } else { 689 if (IS_FWI2_CAPABLE(ha)) { 690 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; 691 ql_dbg(ql_dbg_mbx, vha, 0x119a, 692 "fw_ability_mask=%x.\n", ha->fw_ability_mask); 693 ql_dbg(ql_dbg_mbx, vha, 0x1027, 694 "exchanges=%x.\n", mcp->mb[1]); 695 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 696 ha->max_speed_sup = mcp->mb[2] & BIT_0; 697 ql_dbg(ql_dbg_mbx, vha, 0x119b, 698 "Maximum speed supported=%s.\n", 699 ha->max_speed_sup ? "32Gps" : "16Gps"); 700 if (vha->min_link_speed_feat) { 701 ha->min_link_speed = mcp->mb[5]; 702 ql_dbg(ql_dbg_mbx, vha, 0x119c, 703 "Minimum speed set=%s.\n", 704 mcp->mb[5] == 5 ? "32Gps" : 705 mcp->mb[5] == 4 ? "16Gps" : 706 mcp->mb[5] == 3 ? "8Gps" : 707 mcp->mb[5] == 2 ? "4Gps" : 708 "unknown"); 709 } 710 } 711 } 712 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, 713 "Done.\n"); 714 } 715 716 return rval; 717 } 718 719 /* 720 * qla_get_exlogin_status 721 * Get extended login status 722 * uses the memory offload control/status Mailbox 723 * 724 * Input: 725 * ha: adapter state pointer. 726 * fwopt: firmware options 727 * 728 * Returns: 729 * qla2x00 local function status 730 * 731 * Context: 732 * Kernel context. 733 */ 734 #define FETCH_XLOGINS_STAT 0x8 735 int 736 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 737 uint16_t *ex_logins_cnt) 738 { 739 int rval; 740 mbx_cmd_t mc; 741 mbx_cmd_t *mcp = &mc; 742 743 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f, 744 "Entered %s\n", __func__); 745 746 memset(mcp->mb, 0 , sizeof(mcp->mb)); 747 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 748 mcp->mb[1] = FETCH_XLOGINS_STAT; 749 mcp->out_mb = MBX_1|MBX_0; 750 mcp->in_mb = MBX_10|MBX_4|MBX_0; 751 mcp->tov = MBX_TOV_SECONDS; 752 mcp->flags = 0; 753 754 rval = qla2x00_mailbox_command(vha, mcp); 755 if (rval != QLA_SUCCESS) { 756 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval); 757 } else { 758 *buf_sz = mcp->mb[4]; 759 *ex_logins_cnt = mcp->mb[10]; 760 761 ql_log(ql_log_info, vha, 0x1190, 762 "buffer size 0x%x, exchange login count=%d\n", 763 mcp->mb[4], mcp->mb[10]); 764 765 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116, 766 "Done %s.\n", __func__); 767 } 768 769 return rval; 770 } 771 772 /* 773 * qla_set_exlogin_mem_cfg 774 * set extended login memory configuration 775 * Mbx needs to be issues before init_cb is set 776 * 777 * Input: 778 * ha: adapter state pointer. 779 * buffer: buffer pointer 780 * phys_addr: physical address of buffer 781 * size: size of buffer 782 * TARGET_QUEUE_LOCK must be released 783 * ADAPTER_STATE_LOCK must be release 784 * 785 * Returns: 786 * qla2x00 local funxtion status code. 787 * 788 * Context: 789 * Kernel context. 790 */ 791 #define CONFIG_XLOGINS_MEM 0x3 792 int 793 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) 794 { 795 int rval; 796 mbx_cmd_t mc; 797 mbx_cmd_t *mcp = &mc; 798 struct qla_hw_data *ha = vha->hw; 799 800 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, 801 "Entered %s.\n", __func__); 802 803 memset(mcp->mb, 0 , sizeof(mcp->mb)); 804 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 805 mcp->mb[1] = CONFIG_XLOGINS_MEM; 806 mcp->mb[2] = MSW(phys_addr); 807 mcp->mb[3] = LSW(phys_addr); 808 mcp->mb[6] = MSW(MSD(phys_addr)); 809 mcp->mb[7] = LSW(MSD(phys_addr)); 810 mcp->mb[8] = MSW(ha->exlogin_size); 811 mcp->mb[9] = LSW(ha->exlogin_size); 812 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 813 mcp->in_mb = MBX_11|MBX_0; 814 mcp->tov = MBX_TOV_SECONDS; 815 mcp->flags = 0; 816 rval = qla2x00_mailbox_command(vha, mcp); 817 if (rval != QLA_SUCCESS) { 818 /*EMPTY*/ 819 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval); 820 } else { 821 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 822 "Done %s.\n", __func__); 823 } 824 825 return rval; 826 } 827 828 /* 829 * qla_get_exchoffld_status 830 * Get exchange offload status 831 * uses the memory offload control/status Mailbox 832 * 833 * Input: 834 * ha: adapter state pointer. 835 * fwopt: firmware options 836 * 837 * Returns: 838 * qla2x00 local function status 839 * 840 * Context: 841 * Kernel context. 842 */ 843 #define FETCH_XCHOFFLD_STAT 0x2 844 int 845 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 846 uint16_t *ex_logins_cnt) 847 { 848 int rval; 849 mbx_cmd_t mc; 850 mbx_cmd_t *mcp = &mc; 851 852 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019, 853 "Entered %s\n", __func__); 854 855 memset(mcp->mb, 0 , sizeof(mcp->mb)); 856 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 857 mcp->mb[1] = FETCH_XCHOFFLD_STAT; 858 mcp->out_mb = MBX_1|MBX_0; 859 mcp->in_mb = MBX_10|MBX_4|MBX_0; 860 mcp->tov = MBX_TOV_SECONDS; 861 mcp->flags = 0; 862 863 rval = qla2x00_mailbox_command(vha, mcp); 864 if (rval != QLA_SUCCESS) { 865 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval); 866 } else { 867 *buf_sz = mcp->mb[4]; 868 *ex_logins_cnt = mcp->mb[10]; 869 870 ql_log(ql_log_info, vha, 0x118e, 871 "buffer size 0x%x, exchange offload count=%d\n", 872 mcp->mb[4], mcp->mb[10]); 873 874 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156, 875 "Done %s.\n", __func__); 876 } 877 878 return rval; 879 } 880 881 /* 882 * qla_set_exchoffld_mem_cfg 883 * Set exchange offload memory configuration 884 * Mbx needs to be issues before init_cb is set 885 * 886 * Input: 887 * ha: adapter state pointer. 888 * buffer: buffer pointer 889 * phys_addr: physical address of buffer 890 * size: size of buffer 891 * TARGET_QUEUE_LOCK must be released 892 * ADAPTER_STATE_LOCK must be release 893 * 894 * Returns: 895 * qla2x00 local funxtion status code. 896 * 897 * Context: 898 * Kernel context. 899 */ 900 #define CONFIG_XCHOFFLD_MEM 0x3 901 int 902 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha) 903 { 904 int rval; 905 mbx_cmd_t mc; 906 mbx_cmd_t *mcp = &mc; 907 struct qla_hw_data *ha = vha->hw; 908 909 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157, 910 "Entered %s.\n", __func__); 911 912 memset(mcp->mb, 0 , sizeof(mcp->mb)); 913 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 914 mcp->mb[1] = CONFIG_XCHOFFLD_MEM; 915 mcp->mb[2] = MSW(ha->exchoffld_buf_dma); 916 mcp->mb[3] = LSW(ha->exchoffld_buf_dma); 917 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma)); 918 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma)); 919 mcp->mb[8] = MSW(ha->exchoffld_size); 920 mcp->mb[9] = LSW(ha->exchoffld_size); 921 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 922 mcp->in_mb = MBX_11|MBX_0; 923 mcp->tov = MBX_TOV_SECONDS; 924 mcp->flags = 0; 925 rval = qla2x00_mailbox_command(vha, mcp); 926 if (rval != QLA_SUCCESS) { 927 /*EMPTY*/ 928 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval); 929 } else { 930 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, 931 "Done %s.\n", __func__); 932 } 933 934 return rval; 935 } 936 937 /* 938 * qla2x00_get_fw_version 939 * Get firmware version. 940 * 941 * Input: 942 * ha: adapter state pointer. 943 * major: pointer for major number. 944 * minor: pointer for minor number. 945 * subminor: pointer for subminor number. 946 * 947 * Returns: 948 * qla2x00 local function return status code. 949 * 950 * Context: 951 * Kernel context. 952 */ 953 int 954 qla2x00_get_fw_version(scsi_qla_host_t *vha) 955 { 956 int rval; 957 mbx_cmd_t mc; 958 mbx_cmd_t *mcp = &mc; 959 struct qla_hw_data *ha = vha->hw; 960 961 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, 962 "Entered %s.\n", __func__); 963 964 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 965 mcp->out_mb = MBX_0; 966 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 967 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) 968 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 969 if (IS_FWI2_CAPABLE(ha)) 970 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 971 if (IS_QLA27XX(ha)) 972 mcp->in_mb |= 973 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| 974 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8; 975 976 mcp->flags = 0; 977 mcp->tov = MBX_TOV_SECONDS; 978 rval = qla2x00_mailbox_command(vha, mcp); 979 if (rval != QLA_SUCCESS) 980 goto failed; 981 982 /* Return mailbox data. */ 983 ha->fw_major_version = mcp->mb[1]; 984 ha->fw_minor_version = mcp->mb[2]; 985 ha->fw_subminor_version = mcp->mb[3]; 986 ha->fw_attributes = mcp->mb[6]; 987 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) 988 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ 989 else 990 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; 991 992 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 993 ha->mpi_version[0] = mcp->mb[10] & 0xff; 994 ha->mpi_version[1] = mcp->mb[11] >> 8; 995 ha->mpi_version[2] = mcp->mb[11] & 0xff; 996 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; 997 ha->phy_version[0] = mcp->mb[8] & 0xff; 998 ha->phy_version[1] = mcp->mb[9] >> 8; 999 ha->phy_version[2] = mcp->mb[9] & 0xff; 1000 } 1001 1002 if (IS_FWI2_CAPABLE(ha)) { 1003 ha->fw_attributes_h = mcp->mb[15]; 1004 ha->fw_attributes_ext[0] = mcp->mb[16]; 1005 ha->fw_attributes_ext[1] = mcp->mb[17]; 1006 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, 1007 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 1008 __func__, mcp->mb[15], mcp->mb[6]); 1009 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, 1010 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 1011 __func__, mcp->mb[17], mcp->mb[16]); 1012 1013 if (ha->fw_attributes_h & 0x4) 1014 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d, 1015 "%s: Firmware supports Extended Login 0x%x\n", 1016 __func__, ha->fw_attributes_h); 1017 1018 if (ha->fw_attributes_h & 0x8) 1019 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, 1020 "%s: Firmware supports Exchange Offload 0x%x\n", 1021 __func__, ha->fw_attributes_h); 1022 1023 /* 1024 * FW supports nvme and driver load parameter requested nvme. 1025 * BIT 26 of fw_attributes indicates NVMe support. 1026 */ 1027 if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) 1028 vha->flags.nvme_enabled = 1; 1029 1030 } 1031 1032 if (IS_QLA27XX(ha)) { 1033 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1034 ha->mpi_version[1] = mcp->mb[11] >> 8; 1035 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1036 ha->pep_version[0] = mcp->mb[13] & 0xff; 1037 ha->pep_version[1] = mcp->mb[14] >> 8; 1038 ha->pep_version[2] = mcp->mb[14] & 0xff; 1039 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; 1040 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; 1041 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; 1042 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; 1043 } 1044 1045 failed: 1046 if (rval != QLA_SUCCESS) { 1047 /*EMPTY*/ 1048 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); 1049 } else { 1050 /*EMPTY*/ 1051 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, 1052 "Done %s.\n", __func__); 1053 } 1054 return rval; 1055 } 1056 1057 /* 1058 * qla2x00_get_fw_options 1059 * Set firmware options. 1060 * 1061 * Input: 1062 * ha = adapter block pointer. 1063 * fwopt = pointer for firmware options. 1064 * 1065 * Returns: 1066 * qla2x00 local function return status code. 1067 * 1068 * Context: 1069 * Kernel context. 1070 */ 1071 int 1072 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1073 { 1074 int rval; 1075 mbx_cmd_t mc; 1076 mbx_cmd_t *mcp = &mc; 1077 1078 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, 1079 "Entered %s.\n", __func__); 1080 1081 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 1082 mcp->out_mb = MBX_0; 1083 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1084 mcp->tov = MBX_TOV_SECONDS; 1085 mcp->flags = 0; 1086 rval = qla2x00_mailbox_command(vha, mcp); 1087 1088 if (rval != QLA_SUCCESS) { 1089 /*EMPTY*/ 1090 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); 1091 } else { 1092 fwopts[0] = mcp->mb[0]; 1093 fwopts[1] = mcp->mb[1]; 1094 fwopts[2] = mcp->mb[2]; 1095 fwopts[3] = mcp->mb[3]; 1096 1097 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, 1098 "Done %s.\n", __func__); 1099 } 1100 1101 return rval; 1102 } 1103 1104 1105 /* 1106 * qla2x00_set_fw_options 1107 * Set firmware options. 1108 * 1109 * Input: 1110 * ha = adapter block pointer. 1111 * fwopt = pointer for firmware options. 1112 * 1113 * Returns: 1114 * qla2x00 local function return status code. 1115 * 1116 * Context: 1117 * Kernel context. 1118 */ 1119 int 1120 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1121 { 1122 int rval; 1123 mbx_cmd_t mc; 1124 mbx_cmd_t *mcp = &mc; 1125 1126 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, 1127 "Entered %s.\n", __func__); 1128 1129 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 1130 mcp->mb[1] = fwopts[1]; 1131 mcp->mb[2] = fwopts[2]; 1132 mcp->mb[3] = fwopts[3]; 1133 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1134 mcp->in_mb = MBX_0; 1135 if (IS_FWI2_CAPABLE(vha->hw)) { 1136 mcp->in_mb |= MBX_1; 1137 mcp->mb[10] = fwopts[10]; 1138 mcp->out_mb |= MBX_10; 1139 } else { 1140 mcp->mb[10] = fwopts[10]; 1141 mcp->mb[11] = fwopts[11]; 1142 mcp->mb[12] = 0; /* Undocumented, but used */ 1143 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 1144 } 1145 mcp->tov = MBX_TOV_SECONDS; 1146 mcp->flags = 0; 1147 rval = qla2x00_mailbox_command(vha, mcp); 1148 1149 fwopts[0] = mcp->mb[0]; 1150 1151 if (rval != QLA_SUCCESS) { 1152 /*EMPTY*/ 1153 ql_dbg(ql_dbg_mbx, vha, 0x1030, 1154 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); 1155 } else { 1156 /*EMPTY*/ 1157 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, 1158 "Done %s.\n", __func__); 1159 } 1160 1161 return rval; 1162 } 1163 1164 /* 1165 * qla2x00_mbx_reg_test 1166 * Mailbox register wrap test. 1167 * 1168 * Input: 1169 * ha = adapter block pointer. 1170 * TARGET_QUEUE_LOCK must be released. 1171 * ADAPTER_STATE_LOCK must be released. 1172 * 1173 * Returns: 1174 * qla2x00 local function return status code. 1175 * 1176 * Context: 1177 * Kernel context. 1178 */ 1179 int 1180 qla2x00_mbx_reg_test(scsi_qla_host_t *vha) 1181 { 1182 int rval; 1183 mbx_cmd_t mc; 1184 mbx_cmd_t *mcp = &mc; 1185 1186 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, 1187 "Entered %s.\n", __func__); 1188 1189 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 1190 mcp->mb[1] = 0xAAAA; 1191 mcp->mb[2] = 0x5555; 1192 mcp->mb[3] = 0xAA55; 1193 mcp->mb[4] = 0x55AA; 1194 mcp->mb[5] = 0xA5A5; 1195 mcp->mb[6] = 0x5A5A; 1196 mcp->mb[7] = 0x2525; 1197 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1198 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1199 mcp->tov = MBX_TOV_SECONDS; 1200 mcp->flags = 0; 1201 rval = qla2x00_mailbox_command(vha, mcp); 1202 1203 if (rval == QLA_SUCCESS) { 1204 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || 1205 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) 1206 rval = QLA_FUNCTION_FAILED; 1207 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || 1208 mcp->mb[7] != 0x2525) 1209 rval = QLA_FUNCTION_FAILED; 1210 } 1211 1212 if (rval != QLA_SUCCESS) { 1213 /*EMPTY*/ 1214 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); 1215 } else { 1216 /*EMPTY*/ 1217 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, 1218 "Done %s.\n", __func__); 1219 } 1220 1221 return rval; 1222 } 1223 1224 /* 1225 * qla2x00_verify_checksum 1226 * Verify firmware checksum. 1227 * 1228 * Input: 1229 * ha = adapter block pointer. 1230 * TARGET_QUEUE_LOCK must be released. 1231 * ADAPTER_STATE_LOCK must be released. 1232 * 1233 * Returns: 1234 * qla2x00 local function return status code. 1235 * 1236 * Context: 1237 * Kernel context. 1238 */ 1239 int 1240 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) 1241 { 1242 int rval; 1243 mbx_cmd_t mc; 1244 mbx_cmd_t *mcp = &mc; 1245 1246 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, 1247 "Entered %s.\n", __func__); 1248 1249 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 1250 mcp->out_mb = MBX_0; 1251 mcp->in_mb = MBX_0; 1252 if (IS_FWI2_CAPABLE(vha->hw)) { 1253 mcp->mb[1] = MSW(risc_addr); 1254 mcp->mb[2] = LSW(risc_addr); 1255 mcp->out_mb |= MBX_2|MBX_1; 1256 mcp->in_mb |= MBX_2|MBX_1; 1257 } else { 1258 mcp->mb[1] = LSW(risc_addr); 1259 mcp->out_mb |= MBX_1; 1260 mcp->in_mb |= MBX_1; 1261 } 1262 1263 mcp->tov = MBX_TOV_SECONDS; 1264 mcp->flags = 0; 1265 rval = qla2x00_mailbox_command(vha, mcp); 1266 1267 if (rval != QLA_SUCCESS) { 1268 ql_dbg(ql_dbg_mbx, vha, 0x1036, 1269 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? 1270 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); 1271 } else { 1272 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, 1273 "Done %s.\n", __func__); 1274 } 1275 1276 return rval; 1277 } 1278 1279 /* 1280 * qla2x00_issue_iocb 1281 * Issue IOCB using mailbox command 1282 * 1283 * Input: 1284 * ha = adapter state pointer. 1285 * buffer = buffer pointer. 1286 * phys_addr = physical address of buffer. 1287 * size = size of buffer. 1288 * TARGET_QUEUE_LOCK must be released. 1289 * ADAPTER_STATE_LOCK must be released. 1290 * 1291 * Returns: 1292 * qla2x00 local function return status code. 1293 * 1294 * Context: 1295 * Kernel context. 1296 */ 1297 int 1298 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, 1299 dma_addr_t phys_addr, size_t size, uint32_t tov) 1300 { 1301 int rval; 1302 mbx_cmd_t mc; 1303 mbx_cmd_t *mcp = &mc; 1304 1305 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, 1306 "Entered %s.\n", __func__); 1307 1308 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 1309 mcp->mb[1] = 0; 1310 mcp->mb[2] = MSW(phys_addr); 1311 mcp->mb[3] = LSW(phys_addr); 1312 mcp->mb[6] = MSW(MSD(phys_addr)); 1313 mcp->mb[7] = LSW(MSD(phys_addr)); 1314 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1315 mcp->in_mb = MBX_2|MBX_0; 1316 mcp->tov = tov; 1317 mcp->flags = 0; 1318 rval = qla2x00_mailbox_command(vha, mcp); 1319 1320 if (rval != QLA_SUCCESS) { 1321 /*EMPTY*/ 1322 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); 1323 } else { 1324 sts_entry_t *sts_entry = (sts_entry_t *) buffer; 1325 1326 /* Mask reserved bits. */ 1327 sts_entry->entry_status &= 1328 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 1329 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, 1330 "Done %s.\n", __func__); 1331 } 1332 1333 return rval; 1334 } 1335 1336 int 1337 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, 1338 size_t size) 1339 { 1340 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, 1341 MBX_TOV_SECONDS); 1342 } 1343 1344 /* 1345 * qla2x00_abort_command 1346 * Abort command aborts a specified IOCB. 1347 * 1348 * Input: 1349 * ha = adapter block pointer. 1350 * sp = SB structure pointer. 1351 * 1352 * Returns: 1353 * qla2x00 local function return status code. 1354 * 1355 * Context: 1356 * Kernel context. 1357 */ 1358 int 1359 qla2x00_abort_command(srb_t *sp) 1360 { 1361 unsigned long flags = 0; 1362 int rval; 1363 uint32_t handle = 0; 1364 mbx_cmd_t mc; 1365 mbx_cmd_t *mcp = &mc; 1366 fc_port_t *fcport = sp->fcport; 1367 scsi_qla_host_t *vha = fcport->vha; 1368 struct qla_hw_data *ha = vha->hw; 1369 struct req_que *req; 1370 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1371 1372 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, 1373 "Entered %s.\n", __func__); 1374 1375 if (vha->flags.qpairs_available && sp->qpair) 1376 req = sp->qpair->req; 1377 else 1378 req = vha->req; 1379 1380 spin_lock_irqsave(&ha->hardware_lock, flags); 1381 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1382 if (req->outstanding_cmds[handle] == sp) 1383 break; 1384 } 1385 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1386 1387 if (handle == req->num_outstanding_cmds) { 1388 /* command not found */ 1389 return QLA_FUNCTION_FAILED; 1390 } 1391 1392 mcp->mb[0] = MBC_ABORT_COMMAND; 1393 if (HAS_EXTENDED_IDS(ha)) 1394 mcp->mb[1] = fcport->loop_id; 1395 else 1396 mcp->mb[1] = fcport->loop_id << 8; 1397 mcp->mb[2] = (uint16_t)handle; 1398 mcp->mb[3] = (uint16_t)(handle >> 16); 1399 mcp->mb[6] = (uint16_t)cmd->device->lun; 1400 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1401 mcp->in_mb = MBX_0; 1402 mcp->tov = MBX_TOV_SECONDS; 1403 mcp->flags = 0; 1404 rval = qla2x00_mailbox_command(vha, mcp); 1405 1406 if (rval != QLA_SUCCESS) { 1407 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); 1408 } else { 1409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, 1410 "Done %s.\n", __func__); 1411 } 1412 1413 return rval; 1414 } 1415 1416 int 1417 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) 1418 { 1419 int rval, rval2; 1420 mbx_cmd_t mc; 1421 mbx_cmd_t *mcp = &mc; 1422 scsi_qla_host_t *vha; 1423 struct req_que *req; 1424 struct rsp_que *rsp; 1425 1426 l = l; 1427 vha = fcport->vha; 1428 1429 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, 1430 "Entered %s.\n", __func__); 1431 1432 req = vha->hw->req_q_map[0]; 1433 rsp = req->rsp; 1434 mcp->mb[0] = MBC_ABORT_TARGET; 1435 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 1436 if (HAS_EXTENDED_IDS(vha->hw)) { 1437 mcp->mb[1] = fcport->loop_id; 1438 mcp->mb[10] = 0; 1439 mcp->out_mb |= MBX_10; 1440 } else { 1441 mcp->mb[1] = fcport->loop_id << 8; 1442 } 1443 mcp->mb[2] = vha->hw->loop_reset_delay; 1444 mcp->mb[9] = vha->vp_idx; 1445 1446 mcp->in_mb = MBX_0; 1447 mcp->tov = MBX_TOV_SECONDS; 1448 mcp->flags = 0; 1449 rval = qla2x00_mailbox_command(vha, mcp); 1450 if (rval != QLA_SUCCESS) { 1451 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, 1452 "Failed=%x.\n", rval); 1453 } 1454 1455 /* Issue marker IOCB. */ 1456 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0, 1457 MK_SYNC_ID); 1458 if (rval2 != QLA_SUCCESS) { 1459 ql_dbg(ql_dbg_mbx, vha, 0x1040, 1460 "Failed to issue marker IOCB (%x).\n", rval2); 1461 } else { 1462 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, 1463 "Done %s.\n", __func__); 1464 } 1465 1466 return rval; 1467 } 1468 1469 int 1470 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 1471 { 1472 int rval, rval2; 1473 mbx_cmd_t mc; 1474 mbx_cmd_t *mcp = &mc; 1475 scsi_qla_host_t *vha; 1476 struct req_que *req; 1477 struct rsp_que *rsp; 1478 1479 vha = fcport->vha; 1480 1481 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, 1482 "Entered %s.\n", __func__); 1483 1484 req = vha->hw->req_q_map[0]; 1485 rsp = req->rsp; 1486 mcp->mb[0] = MBC_LUN_RESET; 1487 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 1488 if (HAS_EXTENDED_IDS(vha->hw)) 1489 mcp->mb[1] = fcport->loop_id; 1490 else 1491 mcp->mb[1] = fcport->loop_id << 8; 1492 mcp->mb[2] = (u32)l; 1493 mcp->mb[3] = 0; 1494 mcp->mb[9] = vha->vp_idx; 1495 1496 mcp->in_mb = MBX_0; 1497 mcp->tov = MBX_TOV_SECONDS; 1498 mcp->flags = 0; 1499 rval = qla2x00_mailbox_command(vha, mcp); 1500 if (rval != QLA_SUCCESS) { 1501 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); 1502 } 1503 1504 /* Issue marker IOCB. */ 1505 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, 1506 MK_SYNC_ID_LUN); 1507 if (rval2 != QLA_SUCCESS) { 1508 ql_dbg(ql_dbg_mbx, vha, 0x1044, 1509 "Failed to issue marker IOCB (%x).\n", rval2); 1510 } else { 1511 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, 1512 "Done %s.\n", __func__); 1513 } 1514 1515 return rval; 1516 } 1517 1518 /* 1519 * qla2x00_get_adapter_id 1520 * Get adapter ID and topology. 1521 * 1522 * Input: 1523 * ha = adapter block pointer. 1524 * id = pointer for loop ID. 1525 * al_pa = pointer for AL_PA. 1526 * area = pointer for area. 1527 * domain = pointer for domain. 1528 * top = pointer for topology. 1529 * TARGET_QUEUE_LOCK must be released. 1530 * ADAPTER_STATE_LOCK must be released. 1531 * 1532 * Returns: 1533 * qla2x00 local function return status code. 1534 * 1535 * Context: 1536 * Kernel context. 1537 */ 1538 int 1539 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, 1540 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) 1541 { 1542 int rval; 1543 mbx_cmd_t mc; 1544 mbx_cmd_t *mcp = &mc; 1545 1546 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, 1547 "Entered %s.\n", __func__); 1548 1549 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1550 mcp->mb[9] = vha->vp_idx; 1551 mcp->out_mb = MBX_9|MBX_0; 1552 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1553 if (IS_CNA_CAPABLE(vha->hw)) 1554 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; 1555 if (IS_FWI2_CAPABLE(vha->hw)) 1556 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; 1557 if (IS_QLA27XX(vha->hw)) 1558 mcp->in_mb |= MBX_15; 1559 mcp->tov = MBX_TOV_SECONDS; 1560 mcp->flags = 0; 1561 rval = qla2x00_mailbox_command(vha, mcp); 1562 if (mcp->mb[0] == MBS_COMMAND_ERROR) 1563 rval = QLA_COMMAND_ERROR; 1564 else if (mcp->mb[0] == MBS_INVALID_COMMAND) 1565 rval = QLA_INVALID_COMMAND; 1566 1567 /* Return data. */ 1568 *id = mcp->mb[1]; 1569 *al_pa = LSB(mcp->mb[2]); 1570 *area = MSB(mcp->mb[2]); 1571 *domain = LSB(mcp->mb[3]); 1572 *top = mcp->mb[6]; 1573 *sw_cap = mcp->mb[7]; 1574 1575 if (rval != QLA_SUCCESS) { 1576 /*EMPTY*/ 1577 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); 1578 } else { 1579 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, 1580 "Done %s.\n", __func__); 1581 1582 if (IS_CNA_CAPABLE(vha->hw)) { 1583 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1584 vha->fcoe_fcf_idx = mcp->mb[10]; 1585 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; 1586 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; 1587 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; 1588 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; 1589 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; 1590 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; 1591 } 1592 /* If FA-WWN supported */ 1593 if (IS_FAWWN_CAPABLE(vha->hw)) { 1594 if (mcp->mb[7] & BIT_14) { 1595 vha->port_name[0] = MSB(mcp->mb[16]); 1596 vha->port_name[1] = LSB(mcp->mb[16]); 1597 vha->port_name[2] = MSB(mcp->mb[17]); 1598 vha->port_name[3] = LSB(mcp->mb[17]); 1599 vha->port_name[4] = MSB(mcp->mb[18]); 1600 vha->port_name[5] = LSB(mcp->mb[18]); 1601 vha->port_name[6] = MSB(mcp->mb[19]); 1602 vha->port_name[7] = LSB(mcp->mb[19]); 1603 fc_host_port_name(vha->host) = 1604 wwn_to_u64(vha->port_name); 1605 ql_dbg(ql_dbg_mbx, vha, 0x10ca, 1606 "FA-WWN acquired %016llx\n", 1607 wwn_to_u64(vha->port_name)); 1608 } 1609 } 1610 1611 if (IS_QLA27XX(vha->hw)) 1612 vha->bbcr = mcp->mb[15]; 1613 } 1614 1615 return rval; 1616 } 1617 1618 /* 1619 * qla2x00_get_retry_cnt 1620 * Get current firmware login retry count and delay. 1621 * 1622 * Input: 1623 * ha = adapter block pointer. 1624 * retry_cnt = pointer to login retry count. 1625 * tov = pointer to login timeout value. 1626 * 1627 * Returns: 1628 * qla2x00 local function return status code. 1629 * 1630 * Context: 1631 * Kernel context. 1632 */ 1633 int 1634 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, 1635 uint16_t *r_a_tov) 1636 { 1637 int rval; 1638 uint16_t ratov; 1639 mbx_cmd_t mc; 1640 mbx_cmd_t *mcp = &mc; 1641 1642 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, 1643 "Entered %s.\n", __func__); 1644 1645 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1646 mcp->out_mb = MBX_0; 1647 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1648 mcp->tov = MBX_TOV_SECONDS; 1649 mcp->flags = 0; 1650 rval = qla2x00_mailbox_command(vha, mcp); 1651 1652 if (rval != QLA_SUCCESS) { 1653 /*EMPTY*/ 1654 ql_dbg(ql_dbg_mbx, vha, 0x104a, 1655 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 1656 } else { 1657 /* Convert returned data and check our values. */ 1658 *r_a_tov = mcp->mb[3] / 2; 1659 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ 1660 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { 1661 /* Update to the larger values */ 1662 *retry_cnt = (uint8_t)mcp->mb[1]; 1663 *tov = ratov; 1664 } 1665 1666 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, 1667 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); 1668 } 1669 1670 return rval; 1671 } 1672 1673 /* 1674 * qla2x00_init_firmware 1675 * Initialize adapter firmware. 1676 * 1677 * Input: 1678 * ha = adapter block pointer. 1679 * dptr = Initialization control block pointer. 1680 * size = size of initialization control block. 1681 * TARGET_QUEUE_LOCK must be released. 1682 * ADAPTER_STATE_LOCK must be released. 1683 * 1684 * Returns: 1685 * qla2x00 local function return status code. 1686 * 1687 * Context: 1688 * Kernel context. 1689 */ 1690 int 1691 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 1692 { 1693 int rval; 1694 mbx_cmd_t mc; 1695 mbx_cmd_t *mcp = &mc; 1696 struct qla_hw_data *ha = vha->hw; 1697 1698 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, 1699 "Entered %s.\n", __func__); 1700 1701 if (IS_P3P_TYPE(ha) && ql2xdbwr) 1702 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, 1703 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); 1704 1705 if (ha->flags.npiv_supported) 1706 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1707 else 1708 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 1709 1710 mcp->mb[1] = 0; 1711 mcp->mb[2] = MSW(ha->init_cb_dma); 1712 mcp->mb[3] = LSW(ha->init_cb_dma); 1713 mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); 1714 mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); 1715 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1716 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1717 mcp->mb[1] = BIT_0; 1718 mcp->mb[10] = MSW(ha->ex_init_cb_dma); 1719 mcp->mb[11] = LSW(ha->ex_init_cb_dma); 1720 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); 1721 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); 1722 mcp->mb[14] = sizeof(*ha->ex_init_cb); 1723 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; 1724 } 1725 /* 1 and 2 should normally be captured. */ 1726 mcp->in_mb = MBX_2|MBX_1|MBX_0; 1727 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 1728 /* mb3 is additional info about the installed SFP. */ 1729 mcp->in_mb |= MBX_3; 1730 mcp->buf_size = size; 1731 mcp->flags = MBX_DMA_OUT; 1732 mcp->tov = MBX_TOV_SECONDS; 1733 rval = qla2x00_mailbox_command(vha, mcp); 1734 1735 if (rval != QLA_SUCCESS) { 1736 /*EMPTY*/ 1737 ql_dbg(ql_dbg_mbx, vha, 0x104d, 1738 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n", 1739 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); 1740 } else { 1741 if (IS_QLA27XX(ha)) { 1742 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 1743 ql_dbg(ql_dbg_mbx, vha, 0x119d, 1744 "Invalid SFP/Validation Failed\n"); 1745 } 1746 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, 1747 "Done %s.\n", __func__); 1748 } 1749 1750 return rval; 1751 } 1752 1753 1754 /* 1755 * qla2x00_get_port_database 1756 * Issue normal/enhanced get port database mailbox command 1757 * and copy device name as necessary. 1758 * 1759 * Input: 1760 * ha = adapter state pointer. 1761 * dev = structure pointer. 1762 * opt = enhanced cmd option byte. 1763 * 1764 * Returns: 1765 * qla2x00 local function return status code. 1766 * 1767 * Context: 1768 * Kernel context. 1769 */ 1770 int 1771 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) 1772 { 1773 int rval; 1774 mbx_cmd_t mc; 1775 mbx_cmd_t *mcp = &mc; 1776 port_database_t *pd; 1777 struct port_database_24xx *pd24; 1778 dma_addr_t pd_dma; 1779 struct qla_hw_data *ha = vha->hw; 1780 1781 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, 1782 "Entered %s.\n", __func__); 1783 1784 pd24 = NULL; 1785 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1786 if (pd == NULL) { 1787 ql_log(ql_log_warn, vha, 0x1050, 1788 "Failed to allocate port database structure.\n"); 1789 return QLA_MEMORY_ALLOC_FAILED; 1790 } 1791 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); 1792 1793 mcp->mb[0] = MBC_GET_PORT_DATABASE; 1794 if (opt != 0 && !IS_FWI2_CAPABLE(ha)) 1795 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; 1796 mcp->mb[2] = MSW(pd_dma); 1797 mcp->mb[3] = LSW(pd_dma); 1798 mcp->mb[6] = MSW(MSD(pd_dma)); 1799 mcp->mb[7] = LSW(MSD(pd_dma)); 1800 mcp->mb[9] = vha->vp_idx; 1801 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1802 mcp->in_mb = MBX_0; 1803 if (IS_FWI2_CAPABLE(ha)) { 1804 mcp->mb[1] = fcport->loop_id; 1805 mcp->mb[10] = opt; 1806 mcp->out_mb |= MBX_10|MBX_1; 1807 mcp->in_mb |= MBX_1; 1808 } else if (HAS_EXTENDED_IDS(ha)) { 1809 mcp->mb[1] = fcport->loop_id; 1810 mcp->mb[10] = opt; 1811 mcp->out_mb |= MBX_10|MBX_1; 1812 } else { 1813 mcp->mb[1] = fcport->loop_id << 8 | opt; 1814 mcp->out_mb |= MBX_1; 1815 } 1816 mcp->buf_size = IS_FWI2_CAPABLE(ha) ? 1817 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; 1818 mcp->flags = MBX_DMA_IN; 1819 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1820 rval = qla2x00_mailbox_command(vha, mcp); 1821 if (rval != QLA_SUCCESS) 1822 goto gpd_error_out; 1823 1824 if (IS_FWI2_CAPABLE(ha)) { 1825 uint64_t zero = 0; 1826 pd24 = (struct port_database_24xx *) pd; 1827 1828 /* Check for logged in state. */ 1829 if (pd24->current_login_state != PDS_PRLI_COMPLETE && 1830 pd24->last_login_state != PDS_PRLI_COMPLETE) { 1831 ql_dbg(ql_dbg_mbx, vha, 0x1051, 1832 "Unable to verify login-state (%x/%x) for " 1833 "loop_id %x.\n", pd24->current_login_state, 1834 pd24->last_login_state, fcport->loop_id); 1835 rval = QLA_FUNCTION_FAILED; 1836 goto gpd_error_out; 1837 } 1838 1839 if (fcport->loop_id == FC_NO_LOOP_ID || 1840 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 1841 memcmp(fcport->port_name, pd24->port_name, 8))) { 1842 /* We lost the device mid way. */ 1843 rval = QLA_NOT_LOGGED_IN; 1844 goto gpd_error_out; 1845 } 1846 1847 /* Names are little-endian. */ 1848 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); 1849 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); 1850 1851 /* Get port_id of device. */ 1852 fcport->d_id.b.domain = pd24->port_id[0]; 1853 fcport->d_id.b.area = pd24->port_id[1]; 1854 fcport->d_id.b.al_pa = pd24->port_id[2]; 1855 fcport->d_id.b.rsvd_1 = 0; 1856 1857 /* If not target must be initiator or unknown type. */ 1858 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) 1859 fcport->port_type = FCT_INITIATOR; 1860 else 1861 fcport->port_type = FCT_TARGET; 1862 1863 /* Passback COS information. */ 1864 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? 1865 FC_COS_CLASS2 : FC_COS_CLASS3; 1866 1867 if (pd24->prli_svc_param_word_3[0] & BIT_7) 1868 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1869 } else { 1870 uint64_t zero = 0; 1871 1872 /* Check for logged in state. */ 1873 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 1874 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 1875 ql_dbg(ql_dbg_mbx, vha, 0x100a, 1876 "Unable to verify login-state (%x/%x) - " 1877 "portid=%02x%02x%02x.\n", pd->master_state, 1878 pd->slave_state, fcport->d_id.b.domain, 1879 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1880 rval = QLA_FUNCTION_FAILED; 1881 goto gpd_error_out; 1882 } 1883 1884 if (fcport->loop_id == FC_NO_LOOP_ID || 1885 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 1886 memcmp(fcport->port_name, pd->port_name, 8))) { 1887 /* We lost the device mid way. */ 1888 rval = QLA_NOT_LOGGED_IN; 1889 goto gpd_error_out; 1890 } 1891 1892 /* Names are little-endian. */ 1893 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 1894 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 1895 1896 /* Get port_id of device. */ 1897 fcport->d_id.b.domain = pd->port_id[0]; 1898 fcport->d_id.b.area = pd->port_id[3]; 1899 fcport->d_id.b.al_pa = pd->port_id[2]; 1900 fcport->d_id.b.rsvd_1 = 0; 1901 1902 /* If not target must be initiator or unknown type. */ 1903 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 1904 fcport->port_type = FCT_INITIATOR; 1905 else 1906 fcport->port_type = FCT_TARGET; 1907 1908 /* Passback COS information. */ 1909 fcport->supported_classes = (pd->options & BIT_4) ? 1910 FC_COS_CLASS2: FC_COS_CLASS3; 1911 } 1912 1913 gpd_error_out: 1914 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 1915 1916 if (rval != QLA_SUCCESS) { 1917 ql_dbg(ql_dbg_mbx, vha, 0x1052, 1918 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, 1919 mcp->mb[0], mcp->mb[1]); 1920 } else { 1921 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, 1922 "Done %s.\n", __func__); 1923 } 1924 1925 return rval; 1926 } 1927 1928 /* 1929 * qla2x00_get_firmware_state 1930 * Get adapter firmware state. 1931 * 1932 * Input: 1933 * ha = adapter block pointer. 1934 * dptr = pointer for firmware state. 1935 * TARGET_QUEUE_LOCK must be released. 1936 * ADAPTER_STATE_LOCK must be released. 1937 * 1938 * Returns: 1939 * qla2x00 local function return status code. 1940 * 1941 * Context: 1942 * Kernel context. 1943 */ 1944 int 1945 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) 1946 { 1947 int rval; 1948 mbx_cmd_t mc; 1949 mbx_cmd_t *mcp = &mc; 1950 struct qla_hw_data *ha = vha->hw; 1951 1952 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, 1953 "Entered %s.\n", __func__); 1954 1955 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1956 mcp->out_mb = MBX_0; 1957 if (IS_FWI2_CAPABLE(vha->hw)) 1958 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1959 else 1960 mcp->in_mb = MBX_1|MBX_0; 1961 mcp->tov = MBX_TOV_SECONDS; 1962 mcp->flags = 0; 1963 rval = qla2x00_mailbox_command(vha, mcp); 1964 1965 /* Return firmware states. */ 1966 states[0] = mcp->mb[1]; 1967 if (IS_FWI2_CAPABLE(vha->hw)) { 1968 states[1] = mcp->mb[2]; 1969 states[2] = mcp->mb[3]; /* SFP info */ 1970 states[3] = mcp->mb[4]; 1971 states[4] = mcp->mb[5]; 1972 states[5] = mcp->mb[6]; /* DPORT status */ 1973 } 1974 1975 if (rval != QLA_SUCCESS) { 1976 /*EMPTY*/ 1977 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); 1978 } else { 1979 if (IS_QLA27XX(ha)) { 1980 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 1981 ql_dbg(ql_dbg_mbx, vha, 0x119e, 1982 "Invalid SFP/Validation Failed\n"); 1983 } 1984 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, 1985 "Done %s.\n", __func__); 1986 } 1987 1988 return rval; 1989 } 1990 1991 /* 1992 * qla2x00_get_port_name 1993 * Issue get port name mailbox command. 1994 * Returned name is in big endian format. 1995 * 1996 * Input: 1997 * ha = adapter block pointer. 1998 * loop_id = loop ID of device. 1999 * name = pointer for name. 2000 * TARGET_QUEUE_LOCK must be released. 2001 * ADAPTER_STATE_LOCK must be released. 2002 * 2003 * Returns: 2004 * qla2x00 local function return status code. 2005 * 2006 * Context: 2007 * Kernel context. 2008 */ 2009 int 2010 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, 2011 uint8_t opt) 2012 { 2013 int rval; 2014 mbx_cmd_t mc; 2015 mbx_cmd_t *mcp = &mc; 2016 2017 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, 2018 "Entered %s.\n", __func__); 2019 2020 mcp->mb[0] = MBC_GET_PORT_NAME; 2021 mcp->mb[9] = vha->vp_idx; 2022 mcp->out_mb = MBX_9|MBX_1|MBX_0; 2023 if (HAS_EXTENDED_IDS(vha->hw)) { 2024 mcp->mb[1] = loop_id; 2025 mcp->mb[10] = opt; 2026 mcp->out_mb |= MBX_10; 2027 } else { 2028 mcp->mb[1] = loop_id << 8 | opt; 2029 } 2030 2031 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2032 mcp->tov = MBX_TOV_SECONDS; 2033 mcp->flags = 0; 2034 rval = qla2x00_mailbox_command(vha, mcp); 2035 2036 if (rval != QLA_SUCCESS) { 2037 /*EMPTY*/ 2038 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); 2039 } else { 2040 if (name != NULL) { 2041 /* This function returns name in big endian. */ 2042 name[0] = MSB(mcp->mb[2]); 2043 name[1] = LSB(mcp->mb[2]); 2044 name[2] = MSB(mcp->mb[3]); 2045 name[3] = LSB(mcp->mb[3]); 2046 name[4] = MSB(mcp->mb[6]); 2047 name[5] = LSB(mcp->mb[6]); 2048 name[6] = MSB(mcp->mb[7]); 2049 name[7] = LSB(mcp->mb[7]); 2050 } 2051 2052 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, 2053 "Done %s.\n", __func__); 2054 } 2055 2056 return rval; 2057 } 2058 2059 /* 2060 * qla24xx_link_initialization 2061 * Issue link initialization mailbox command. 2062 * 2063 * Input: 2064 * ha = adapter block pointer. 2065 * TARGET_QUEUE_LOCK must be released. 2066 * ADAPTER_STATE_LOCK must be released. 2067 * 2068 * Returns: 2069 * qla2x00 local function return status code. 2070 * 2071 * Context: 2072 * Kernel context. 2073 */ 2074 int 2075 qla24xx_link_initialize(scsi_qla_host_t *vha) 2076 { 2077 int rval; 2078 mbx_cmd_t mc; 2079 mbx_cmd_t *mcp = &mc; 2080 2081 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, 2082 "Entered %s.\n", __func__); 2083 2084 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) 2085 return QLA_FUNCTION_FAILED; 2086 2087 mcp->mb[0] = MBC_LINK_INITIALIZATION; 2088 mcp->mb[1] = BIT_4; 2089 if (vha->hw->operating_mode == LOOP) 2090 mcp->mb[1] |= BIT_6; 2091 else 2092 mcp->mb[1] |= BIT_5; 2093 mcp->mb[2] = 0; 2094 mcp->mb[3] = 0; 2095 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2096 mcp->in_mb = MBX_0; 2097 mcp->tov = MBX_TOV_SECONDS; 2098 mcp->flags = 0; 2099 rval = qla2x00_mailbox_command(vha, mcp); 2100 2101 if (rval != QLA_SUCCESS) { 2102 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); 2103 } else { 2104 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, 2105 "Done %s.\n", __func__); 2106 } 2107 2108 return rval; 2109 } 2110 2111 /* 2112 * qla2x00_lip_reset 2113 * Issue LIP reset mailbox command. 2114 * 2115 * Input: 2116 * ha = adapter block pointer. 2117 * TARGET_QUEUE_LOCK must be released. 2118 * ADAPTER_STATE_LOCK must be released. 2119 * 2120 * Returns: 2121 * qla2x00 local function return status code. 2122 * 2123 * Context: 2124 * Kernel context. 2125 */ 2126 int 2127 qla2x00_lip_reset(scsi_qla_host_t *vha) 2128 { 2129 int rval; 2130 mbx_cmd_t mc; 2131 mbx_cmd_t *mcp = &mc; 2132 2133 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a, 2134 "Entered %s.\n", __func__); 2135 2136 if (IS_CNA_CAPABLE(vha->hw)) { 2137 /* Logout across all FCFs. */ 2138 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2139 mcp->mb[1] = BIT_1; 2140 mcp->mb[2] = 0; 2141 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2142 } else if (IS_FWI2_CAPABLE(vha->hw)) { 2143 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2144 mcp->mb[1] = BIT_6; 2145 mcp->mb[2] = 0; 2146 mcp->mb[3] = vha->hw->loop_reset_delay; 2147 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2148 } else { 2149 mcp->mb[0] = MBC_LIP_RESET; 2150 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2151 if (HAS_EXTENDED_IDS(vha->hw)) { 2152 mcp->mb[1] = 0x00ff; 2153 mcp->mb[10] = 0; 2154 mcp->out_mb |= MBX_10; 2155 } else { 2156 mcp->mb[1] = 0xff00; 2157 } 2158 mcp->mb[2] = vha->hw->loop_reset_delay; 2159 mcp->mb[3] = 0; 2160 } 2161 mcp->in_mb = MBX_0; 2162 mcp->tov = MBX_TOV_SECONDS; 2163 mcp->flags = 0; 2164 rval = qla2x00_mailbox_command(vha, mcp); 2165 2166 if (rval != QLA_SUCCESS) { 2167 /*EMPTY*/ 2168 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); 2169 } else { 2170 /*EMPTY*/ 2171 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, 2172 "Done %s.\n", __func__); 2173 } 2174 2175 return rval; 2176 } 2177 2178 /* 2179 * qla2x00_send_sns 2180 * Send SNS command. 2181 * 2182 * Input: 2183 * ha = adapter block pointer. 2184 * sns = pointer for command. 2185 * cmd_size = command size. 2186 * buf_size = response/command size. 2187 * TARGET_QUEUE_LOCK must be released. 2188 * ADAPTER_STATE_LOCK must be released. 2189 * 2190 * Returns: 2191 * qla2x00 local function return status code. 2192 * 2193 * Context: 2194 * Kernel context. 2195 */ 2196 int 2197 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, 2198 uint16_t cmd_size, size_t buf_size) 2199 { 2200 int rval; 2201 mbx_cmd_t mc; 2202 mbx_cmd_t *mcp = &mc; 2203 2204 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, 2205 "Entered %s.\n", __func__); 2206 2207 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, 2208 "Retry cnt=%d ratov=%d total tov=%d.\n", 2209 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); 2210 2211 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 2212 mcp->mb[1] = cmd_size; 2213 mcp->mb[2] = MSW(sns_phys_address); 2214 mcp->mb[3] = LSW(sns_phys_address); 2215 mcp->mb[6] = MSW(MSD(sns_phys_address)); 2216 mcp->mb[7] = LSW(MSD(sns_phys_address)); 2217 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2218 mcp->in_mb = MBX_0|MBX_1; 2219 mcp->buf_size = buf_size; 2220 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; 2221 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); 2222 rval = qla2x00_mailbox_command(vha, mcp); 2223 2224 if (rval != QLA_SUCCESS) { 2225 /*EMPTY*/ 2226 ql_dbg(ql_dbg_mbx, vha, 0x105f, 2227 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2228 rval, mcp->mb[0], mcp->mb[1]); 2229 } else { 2230 /*EMPTY*/ 2231 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, 2232 "Done %s.\n", __func__); 2233 } 2234 2235 return rval; 2236 } 2237 2238 int 2239 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2240 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2241 { 2242 int rval; 2243 2244 struct logio_entry_24xx *lg; 2245 dma_addr_t lg_dma; 2246 uint32_t iop[2]; 2247 struct qla_hw_data *ha = vha->hw; 2248 struct req_que *req; 2249 2250 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, 2251 "Entered %s.\n", __func__); 2252 2253 if (vha->vp_idx && vha->qpair) 2254 req = vha->qpair->req; 2255 else 2256 req = ha->req_q_map[0]; 2257 2258 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2259 if (lg == NULL) { 2260 ql_log(ql_log_warn, vha, 0x1062, 2261 "Failed to allocate login IOCB.\n"); 2262 return QLA_MEMORY_ALLOC_FAILED; 2263 } 2264 memset(lg, 0, sizeof(struct logio_entry_24xx)); 2265 2266 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2267 lg->entry_count = 1; 2268 lg->handle = MAKE_HANDLE(req->id, lg->handle); 2269 lg->nport_handle = cpu_to_le16(loop_id); 2270 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2271 if (opt & BIT_0) 2272 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2273 if (opt & BIT_1) 2274 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2275 lg->port_id[0] = al_pa; 2276 lg->port_id[1] = area; 2277 lg->port_id[2] = domain; 2278 lg->vp_index = vha->vp_idx; 2279 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2280 (ha->r_a_tov / 10 * 2) + 2); 2281 if (rval != QLA_SUCCESS) { 2282 ql_dbg(ql_dbg_mbx, vha, 0x1063, 2283 "Failed to issue login IOCB (%x).\n", rval); 2284 } else if (lg->entry_status != 0) { 2285 ql_dbg(ql_dbg_mbx, vha, 0x1064, 2286 "Failed to complete IOCB -- error status (%x).\n", 2287 lg->entry_status); 2288 rval = QLA_FUNCTION_FAILED; 2289 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2290 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2291 iop[1] = le32_to_cpu(lg->io_parameter[1]); 2292 2293 ql_dbg(ql_dbg_mbx, vha, 0x1065, 2294 "Failed to complete IOCB -- completion status (%x) " 2295 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2296 iop[0], iop[1]); 2297 2298 switch (iop[0]) { 2299 case LSC_SCODE_PORTID_USED: 2300 mb[0] = MBS_PORT_ID_USED; 2301 mb[1] = LSW(iop[1]); 2302 break; 2303 case LSC_SCODE_NPORT_USED: 2304 mb[0] = MBS_LOOP_ID_USED; 2305 break; 2306 case LSC_SCODE_NOLINK: 2307 case LSC_SCODE_NOIOCB: 2308 case LSC_SCODE_NOXCB: 2309 case LSC_SCODE_CMD_FAILED: 2310 case LSC_SCODE_NOFABRIC: 2311 case LSC_SCODE_FW_NOT_READY: 2312 case LSC_SCODE_NOT_LOGGED_IN: 2313 case LSC_SCODE_NOPCB: 2314 case LSC_SCODE_ELS_REJECT: 2315 case LSC_SCODE_CMD_PARAM_ERR: 2316 case LSC_SCODE_NONPORT: 2317 case LSC_SCODE_LOGGED_IN: 2318 case LSC_SCODE_NOFLOGI_ACC: 2319 default: 2320 mb[0] = MBS_COMMAND_ERROR; 2321 break; 2322 } 2323 } else { 2324 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, 2325 "Done %s.\n", __func__); 2326 2327 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2328 2329 mb[0] = MBS_COMMAND_COMPLETE; 2330 mb[1] = 0; 2331 if (iop[0] & BIT_4) { 2332 if (iop[0] & BIT_8) 2333 mb[1] |= BIT_1; 2334 } else 2335 mb[1] = BIT_0; 2336 2337 /* Passback COS information. */ 2338 mb[10] = 0; 2339 if (lg->io_parameter[7] || lg->io_parameter[8]) 2340 mb[10] |= BIT_0; /* Class 2. */ 2341 if (lg->io_parameter[9] || lg->io_parameter[10]) 2342 mb[10] |= BIT_1; /* Class 3. */ 2343 if (lg->io_parameter[0] & cpu_to_le32(BIT_7)) 2344 mb[10] |= BIT_7; /* Confirmed Completion 2345 * Allowed 2346 */ 2347 } 2348 2349 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2350 2351 return rval; 2352 } 2353 2354 /* 2355 * qla2x00_login_fabric 2356 * Issue login fabric port mailbox command. 2357 * 2358 * Input: 2359 * ha = adapter block pointer. 2360 * loop_id = device loop ID. 2361 * domain = device domain. 2362 * area = device area. 2363 * al_pa = device AL_PA. 2364 * status = pointer for return status. 2365 * opt = command options. 2366 * TARGET_QUEUE_LOCK must be released. 2367 * ADAPTER_STATE_LOCK must be released. 2368 * 2369 * Returns: 2370 * qla2x00 local function return status code. 2371 * 2372 * Context: 2373 * Kernel context. 2374 */ 2375 int 2376 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2377 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2378 { 2379 int rval; 2380 mbx_cmd_t mc; 2381 mbx_cmd_t *mcp = &mc; 2382 struct qla_hw_data *ha = vha->hw; 2383 2384 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, 2385 "Entered %s.\n", __func__); 2386 2387 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 2388 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2389 if (HAS_EXTENDED_IDS(ha)) { 2390 mcp->mb[1] = loop_id; 2391 mcp->mb[10] = opt; 2392 mcp->out_mb |= MBX_10; 2393 } else { 2394 mcp->mb[1] = (loop_id << 8) | opt; 2395 } 2396 mcp->mb[2] = domain; 2397 mcp->mb[3] = area << 8 | al_pa; 2398 2399 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; 2400 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2401 mcp->flags = 0; 2402 rval = qla2x00_mailbox_command(vha, mcp); 2403 2404 /* Return mailbox statuses. */ 2405 if (mb != NULL) { 2406 mb[0] = mcp->mb[0]; 2407 mb[1] = mcp->mb[1]; 2408 mb[2] = mcp->mb[2]; 2409 mb[6] = mcp->mb[6]; 2410 mb[7] = mcp->mb[7]; 2411 /* COS retrieved from Get-Port-Database mailbox command. */ 2412 mb[10] = 0; 2413 } 2414 2415 if (rval != QLA_SUCCESS) { 2416 /* RLU tmp code: need to change main mailbox_command function to 2417 * return ok even when the mailbox completion value is not 2418 * SUCCESS. The caller needs to be responsible to interpret 2419 * the return values of this mailbox command if we're not 2420 * to change too much of the existing code. 2421 */ 2422 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || 2423 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || 2424 mcp->mb[0] == 0x4006) 2425 rval = QLA_SUCCESS; 2426 2427 /*EMPTY*/ 2428 ql_dbg(ql_dbg_mbx, vha, 0x1068, 2429 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 2430 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 2431 } else { 2432 /*EMPTY*/ 2433 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, 2434 "Done %s.\n", __func__); 2435 } 2436 2437 return rval; 2438 } 2439 2440 /* 2441 * qla2x00_login_local_device 2442 * Issue login loop port mailbox command. 2443 * 2444 * Input: 2445 * ha = adapter block pointer. 2446 * loop_id = device loop ID. 2447 * opt = command options. 2448 * 2449 * Returns: 2450 * Return status code. 2451 * 2452 * Context: 2453 * Kernel context. 2454 * 2455 */ 2456 int 2457 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, 2458 uint16_t *mb_ret, uint8_t opt) 2459 { 2460 int rval; 2461 mbx_cmd_t mc; 2462 mbx_cmd_t *mcp = &mc; 2463 struct qla_hw_data *ha = vha->hw; 2464 2465 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, 2466 "Entered %s.\n", __func__); 2467 2468 if (IS_FWI2_CAPABLE(ha)) 2469 return qla24xx_login_fabric(vha, fcport->loop_id, 2470 fcport->d_id.b.domain, fcport->d_id.b.area, 2471 fcport->d_id.b.al_pa, mb_ret, opt); 2472 2473 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 2474 if (HAS_EXTENDED_IDS(ha)) 2475 mcp->mb[1] = fcport->loop_id; 2476 else 2477 mcp->mb[1] = fcport->loop_id << 8; 2478 mcp->mb[2] = opt; 2479 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2480 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; 2481 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2482 mcp->flags = 0; 2483 rval = qla2x00_mailbox_command(vha, mcp); 2484 2485 /* Return mailbox statuses. */ 2486 if (mb_ret != NULL) { 2487 mb_ret[0] = mcp->mb[0]; 2488 mb_ret[1] = mcp->mb[1]; 2489 mb_ret[6] = mcp->mb[6]; 2490 mb_ret[7] = mcp->mb[7]; 2491 } 2492 2493 if (rval != QLA_SUCCESS) { 2494 /* AV tmp code: need to change main mailbox_command function to 2495 * return ok even when the mailbox completion value is not 2496 * SUCCESS. The caller needs to be responsible to interpret 2497 * the return values of this mailbox command if we're not 2498 * to change too much of the existing code. 2499 */ 2500 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 2501 rval = QLA_SUCCESS; 2502 2503 ql_dbg(ql_dbg_mbx, vha, 0x106b, 2504 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", 2505 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); 2506 } else { 2507 /*EMPTY*/ 2508 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, 2509 "Done %s.\n", __func__); 2510 } 2511 2512 return (rval); 2513 } 2514 2515 int 2516 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2517 uint8_t area, uint8_t al_pa) 2518 { 2519 int rval; 2520 struct logio_entry_24xx *lg; 2521 dma_addr_t lg_dma; 2522 struct qla_hw_data *ha = vha->hw; 2523 struct req_que *req; 2524 2525 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, 2526 "Entered %s.\n", __func__); 2527 2528 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2529 if (lg == NULL) { 2530 ql_log(ql_log_warn, vha, 0x106e, 2531 "Failed to allocate logout IOCB.\n"); 2532 return QLA_MEMORY_ALLOC_FAILED; 2533 } 2534 memset(lg, 0, sizeof(struct logio_entry_24xx)); 2535 2536 req = vha->req; 2537 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2538 lg->entry_count = 1; 2539 lg->handle = MAKE_HANDLE(req->id, lg->handle); 2540 lg->nport_handle = cpu_to_le16(loop_id); 2541 lg->control_flags = 2542 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| 2543 LCF_FREE_NPORT); 2544 lg->port_id[0] = al_pa; 2545 lg->port_id[1] = area; 2546 lg->port_id[2] = domain; 2547 lg->vp_index = vha->vp_idx; 2548 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2549 (ha->r_a_tov / 10 * 2) + 2); 2550 if (rval != QLA_SUCCESS) { 2551 ql_dbg(ql_dbg_mbx, vha, 0x106f, 2552 "Failed to issue logout IOCB (%x).\n", rval); 2553 } else if (lg->entry_status != 0) { 2554 ql_dbg(ql_dbg_mbx, vha, 0x1070, 2555 "Failed to complete IOCB -- error status (%x).\n", 2556 lg->entry_status); 2557 rval = QLA_FUNCTION_FAILED; 2558 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2559 ql_dbg(ql_dbg_mbx, vha, 0x1071, 2560 "Failed to complete IOCB -- completion status (%x) " 2561 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2562 le32_to_cpu(lg->io_parameter[0]), 2563 le32_to_cpu(lg->io_parameter[1])); 2564 } else { 2565 /*EMPTY*/ 2566 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, 2567 "Done %s.\n", __func__); 2568 } 2569 2570 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2571 2572 return rval; 2573 } 2574 2575 /* 2576 * qla2x00_fabric_logout 2577 * Issue logout fabric port mailbox command. 2578 * 2579 * Input: 2580 * ha = adapter block pointer. 2581 * loop_id = device loop ID. 2582 * TARGET_QUEUE_LOCK must be released. 2583 * ADAPTER_STATE_LOCK must be released. 2584 * 2585 * Returns: 2586 * qla2x00 local function return status code. 2587 * 2588 * Context: 2589 * Kernel context. 2590 */ 2591 int 2592 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2593 uint8_t area, uint8_t al_pa) 2594 { 2595 int rval; 2596 mbx_cmd_t mc; 2597 mbx_cmd_t *mcp = &mc; 2598 2599 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, 2600 "Entered %s.\n", __func__); 2601 2602 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 2603 mcp->out_mb = MBX_1|MBX_0; 2604 if (HAS_EXTENDED_IDS(vha->hw)) { 2605 mcp->mb[1] = loop_id; 2606 mcp->mb[10] = 0; 2607 mcp->out_mb |= MBX_10; 2608 } else { 2609 mcp->mb[1] = loop_id << 8; 2610 } 2611 2612 mcp->in_mb = MBX_1|MBX_0; 2613 mcp->tov = MBX_TOV_SECONDS; 2614 mcp->flags = 0; 2615 rval = qla2x00_mailbox_command(vha, mcp); 2616 2617 if (rval != QLA_SUCCESS) { 2618 /*EMPTY*/ 2619 ql_dbg(ql_dbg_mbx, vha, 0x1074, 2620 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); 2621 } else { 2622 /*EMPTY*/ 2623 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, 2624 "Done %s.\n", __func__); 2625 } 2626 2627 return rval; 2628 } 2629 2630 /* 2631 * qla2x00_full_login_lip 2632 * Issue full login LIP mailbox command. 2633 * 2634 * Input: 2635 * ha = adapter block pointer. 2636 * TARGET_QUEUE_LOCK must be released. 2637 * ADAPTER_STATE_LOCK must be released. 2638 * 2639 * Returns: 2640 * qla2x00 local function return status code. 2641 * 2642 * Context: 2643 * Kernel context. 2644 */ 2645 int 2646 qla2x00_full_login_lip(scsi_qla_host_t *vha) 2647 { 2648 int rval; 2649 mbx_cmd_t mc; 2650 mbx_cmd_t *mcp = &mc; 2651 2652 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, 2653 "Entered %s.\n", __func__); 2654 2655 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2656 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; 2657 mcp->mb[2] = 0; 2658 mcp->mb[3] = 0; 2659 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2660 mcp->in_mb = MBX_0; 2661 mcp->tov = MBX_TOV_SECONDS; 2662 mcp->flags = 0; 2663 rval = qla2x00_mailbox_command(vha, mcp); 2664 2665 if (rval != QLA_SUCCESS) { 2666 /*EMPTY*/ 2667 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); 2668 } else { 2669 /*EMPTY*/ 2670 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, 2671 "Done %s.\n", __func__); 2672 } 2673 2674 return rval; 2675 } 2676 2677 /* 2678 * qla2x00_get_id_list 2679 * 2680 * Input: 2681 * ha = adapter block pointer. 2682 * 2683 * Returns: 2684 * qla2x00 local function return status code. 2685 * 2686 * Context: 2687 * Kernel context. 2688 */ 2689 int 2690 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, 2691 uint16_t *entries) 2692 { 2693 int rval; 2694 mbx_cmd_t mc; 2695 mbx_cmd_t *mcp = &mc; 2696 2697 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, 2698 "Entered %s.\n", __func__); 2699 2700 if (id_list == NULL) 2701 return QLA_FUNCTION_FAILED; 2702 2703 mcp->mb[0] = MBC_GET_ID_LIST; 2704 mcp->out_mb = MBX_0; 2705 if (IS_FWI2_CAPABLE(vha->hw)) { 2706 mcp->mb[2] = MSW(id_list_dma); 2707 mcp->mb[3] = LSW(id_list_dma); 2708 mcp->mb[6] = MSW(MSD(id_list_dma)); 2709 mcp->mb[7] = LSW(MSD(id_list_dma)); 2710 mcp->mb[8] = 0; 2711 mcp->mb[9] = vha->vp_idx; 2712 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 2713 } else { 2714 mcp->mb[1] = MSW(id_list_dma); 2715 mcp->mb[2] = LSW(id_list_dma); 2716 mcp->mb[3] = MSW(MSD(id_list_dma)); 2717 mcp->mb[6] = LSW(MSD(id_list_dma)); 2718 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; 2719 } 2720 mcp->in_mb = MBX_1|MBX_0; 2721 mcp->tov = MBX_TOV_SECONDS; 2722 mcp->flags = 0; 2723 rval = qla2x00_mailbox_command(vha, mcp); 2724 2725 if (rval != QLA_SUCCESS) { 2726 /*EMPTY*/ 2727 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); 2728 } else { 2729 *entries = mcp->mb[1]; 2730 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, 2731 "Done %s.\n", __func__); 2732 } 2733 2734 return rval; 2735 } 2736 2737 /* 2738 * qla2x00_get_resource_cnts 2739 * Get current firmware resource counts. 2740 * 2741 * Input: 2742 * ha = adapter block pointer. 2743 * 2744 * Returns: 2745 * qla2x00 local function return status code. 2746 * 2747 * Context: 2748 * Kernel context. 2749 */ 2750 int 2751 qla2x00_get_resource_cnts(scsi_qla_host_t *vha) 2752 { 2753 struct qla_hw_data *ha = vha->hw; 2754 int rval; 2755 mbx_cmd_t mc; 2756 mbx_cmd_t *mcp = &mc; 2757 2758 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, 2759 "Entered %s.\n", __func__); 2760 2761 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2762 mcp->out_mb = MBX_0; 2763 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2764 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw)) 2765 mcp->in_mb |= MBX_12; 2766 mcp->tov = MBX_TOV_SECONDS; 2767 mcp->flags = 0; 2768 rval = qla2x00_mailbox_command(vha, mcp); 2769 2770 if (rval != QLA_SUCCESS) { 2771 /*EMPTY*/ 2772 ql_dbg(ql_dbg_mbx, vha, 0x107d, 2773 "Failed mb[0]=%x.\n", mcp->mb[0]); 2774 } else { 2775 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, 2776 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " 2777 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], 2778 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 2779 mcp->mb[11], mcp->mb[12]); 2780 2781 ha->orig_fw_tgt_xcb_count = mcp->mb[1]; 2782 ha->cur_fw_tgt_xcb_count = mcp->mb[2]; 2783 ha->cur_fw_xcb_count = mcp->mb[3]; 2784 ha->orig_fw_xcb_count = mcp->mb[6]; 2785 ha->cur_fw_iocb_count = mcp->mb[7]; 2786 ha->orig_fw_iocb_count = mcp->mb[10]; 2787 if (ha->flags.npiv_supported) 2788 ha->max_npiv_vports = mcp->mb[11]; 2789 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) 2790 ha->fw_max_fcf_count = mcp->mb[12]; 2791 } 2792 2793 return (rval); 2794 } 2795 2796 /* 2797 * qla2x00_get_fcal_position_map 2798 * Get FCAL (LILP) position map using mailbox command 2799 * 2800 * Input: 2801 * ha = adapter state pointer. 2802 * pos_map = buffer pointer (can be NULL). 2803 * 2804 * Returns: 2805 * qla2x00 local function return status code. 2806 * 2807 * Context: 2808 * Kernel context. 2809 */ 2810 int 2811 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) 2812 { 2813 int rval; 2814 mbx_cmd_t mc; 2815 mbx_cmd_t *mcp = &mc; 2816 char *pmap; 2817 dma_addr_t pmap_dma; 2818 struct qla_hw_data *ha = vha->hw; 2819 2820 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, 2821 "Entered %s.\n", __func__); 2822 2823 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2824 if (pmap == NULL) { 2825 ql_log(ql_log_warn, vha, 0x1080, 2826 "Memory alloc failed.\n"); 2827 return QLA_MEMORY_ALLOC_FAILED; 2828 } 2829 memset(pmap, 0, FCAL_MAP_SIZE); 2830 2831 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; 2832 mcp->mb[2] = MSW(pmap_dma); 2833 mcp->mb[3] = LSW(pmap_dma); 2834 mcp->mb[6] = MSW(MSD(pmap_dma)); 2835 mcp->mb[7] = LSW(MSD(pmap_dma)); 2836 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2837 mcp->in_mb = MBX_1|MBX_0; 2838 mcp->buf_size = FCAL_MAP_SIZE; 2839 mcp->flags = MBX_DMA_IN; 2840 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2841 rval = qla2x00_mailbox_command(vha, mcp); 2842 2843 if (rval == QLA_SUCCESS) { 2844 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, 2845 "mb0/mb1=%x/%X FC/AL position map size (%x).\n", 2846 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); 2847 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, 2848 pmap, pmap[0] + 1); 2849 2850 if (pos_map) 2851 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 2852 } 2853 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 2854 2855 if (rval != QLA_SUCCESS) { 2856 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); 2857 } else { 2858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, 2859 "Done %s.\n", __func__); 2860 } 2861 2862 return rval; 2863 } 2864 2865 /* 2866 * qla2x00_get_link_status 2867 * 2868 * Input: 2869 * ha = adapter block pointer. 2870 * loop_id = device loop ID. 2871 * ret_buf = pointer to link status return buffer. 2872 * 2873 * Returns: 2874 * 0 = success. 2875 * BIT_0 = mem alloc error. 2876 * BIT_1 = mailbox error. 2877 */ 2878 int 2879 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, 2880 struct link_statistics *stats, dma_addr_t stats_dma) 2881 { 2882 int rval; 2883 mbx_cmd_t mc; 2884 mbx_cmd_t *mcp = &mc; 2885 uint32_t *iter = (void *)stats; 2886 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); 2887 struct qla_hw_data *ha = vha->hw; 2888 2889 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, 2890 "Entered %s.\n", __func__); 2891 2892 mcp->mb[0] = MBC_GET_LINK_STATUS; 2893 mcp->mb[2] = MSW(LSD(stats_dma)); 2894 mcp->mb[3] = LSW(LSD(stats_dma)); 2895 mcp->mb[6] = MSW(MSD(stats_dma)); 2896 mcp->mb[7] = LSW(MSD(stats_dma)); 2897 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2898 mcp->in_mb = MBX_0; 2899 if (IS_FWI2_CAPABLE(ha)) { 2900 mcp->mb[1] = loop_id; 2901 mcp->mb[4] = 0; 2902 mcp->mb[10] = 0; 2903 mcp->out_mb |= MBX_10|MBX_4|MBX_1; 2904 mcp->in_mb |= MBX_1; 2905 } else if (HAS_EXTENDED_IDS(ha)) { 2906 mcp->mb[1] = loop_id; 2907 mcp->mb[10] = 0; 2908 mcp->out_mb |= MBX_10|MBX_1; 2909 } else { 2910 mcp->mb[1] = loop_id << 8; 2911 mcp->out_mb |= MBX_1; 2912 } 2913 mcp->tov = MBX_TOV_SECONDS; 2914 mcp->flags = IOCTL_CMD; 2915 rval = qla2x00_mailbox_command(vha, mcp); 2916 2917 if (rval == QLA_SUCCESS) { 2918 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2919 ql_dbg(ql_dbg_mbx, vha, 0x1085, 2920 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 2921 rval = QLA_FUNCTION_FAILED; 2922 } else { 2923 /* Re-endianize - firmware data is le32. */ 2924 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, 2925 "Done %s.\n", __func__); 2926 for ( ; dwords--; iter++) 2927 le32_to_cpus(iter); 2928 } 2929 } else { 2930 /* Failed. */ 2931 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); 2932 } 2933 2934 return rval; 2935 } 2936 2937 int 2938 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, 2939 dma_addr_t stats_dma, uint16_t options) 2940 { 2941 int rval; 2942 mbx_cmd_t mc; 2943 mbx_cmd_t *mcp = &mc; 2944 uint32_t *iter, dwords; 2945 2946 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, 2947 "Entered %s.\n", __func__); 2948 2949 memset(&mc, 0, sizeof(mc)); 2950 mc.mb[0] = MBC_GET_LINK_PRIV_STATS; 2951 mc.mb[2] = MSW(stats_dma); 2952 mc.mb[3] = LSW(stats_dma); 2953 mc.mb[6] = MSW(MSD(stats_dma)); 2954 mc.mb[7] = LSW(MSD(stats_dma)); 2955 mc.mb[8] = sizeof(struct link_statistics) / 4; 2956 mc.mb[9] = cpu_to_le16(vha->vp_idx); 2957 mc.mb[10] = cpu_to_le16(options); 2958 2959 rval = qla24xx_send_mb_cmd(vha, &mc); 2960 2961 if (rval == QLA_SUCCESS) { 2962 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2963 ql_dbg(ql_dbg_mbx, vha, 0x1089, 2964 "Failed mb[0]=%x.\n", mcp->mb[0]); 2965 rval = QLA_FUNCTION_FAILED; 2966 } else { 2967 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, 2968 "Done %s.\n", __func__); 2969 /* Re-endianize - firmware data is le32. */ 2970 dwords = sizeof(struct link_statistics) / 4; 2971 iter = &stats->link_fail_cnt; 2972 for ( ; dwords--; iter++) 2973 le32_to_cpus(iter); 2974 } 2975 } else { 2976 /* Failed. */ 2977 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); 2978 } 2979 2980 return rval; 2981 } 2982 2983 int 2984 qla24xx_abort_command(srb_t *sp) 2985 { 2986 int rval; 2987 unsigned long flags = 0; 2988 2989 struct abort_entry_24xx *abt; 2990 dma_addr_t abt_dma; 2991 uint32_t handle; 2992 fc_port_t *fcport = sp->fcport; 2993 struct scsi_qla_host *vha = fcport->vha; 2994 struct qla_hw_data *ha = vha->hw; 2995 struct req_que *req = vha->req; 2996 2997 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 2998 "Entered %s.\n", __func__); 2999 3000 if (vha->flags.qpairs_available && sp->qpair) 3001 req = sp->qpair->req; 3002 3003 if (ql2xasynctmfenable) 3004 return qla24xx_async_abort_command(sp); 3005 3006 spin_lock_irqsave(&ha->hardware_lock, flags); 3007 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 3008 if (req->outstanding_cmds[handle] == sp) 3009 break; 3010 } 3011 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3012 if (handle == req->num_outstanding_cmds) { 3013 /* Command not found. */ 3014 return QLA_FUNCTION_FAILED; 3015 } 3016 3017 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 3018 if (abt == NULL) { 3019 ql_log(ql_log_warn, vha, 0x108d, 3020 "Failed to allocate abort IOCB.\n"); 3021 return QLA_MEMORY_ALLOC_FAILED; 3022 } 3023 memset(abt, 0, sizeof(struct abort_entry_24xx)); 3024 3025 abt->entry_type = ABORT_IOCB_TYPE; 3026 abt->entry_count = 1; 3027 abt->handle = MAKE_HANDLE(req->id, abt->handle); 3028 abt->nport_handle = cpu_to_le16(fcport->loop_id); 3029 abt->handle_to_abort = MAKE_HANDLE(req->id, handle); 3030 abt->port_id[0] = fcport->d_id.b.al_pa; 3031 abt->port_id[1] = fcport->d_id.b.area; 3032 abt->port_id[2] = fcport->d_id.b.domain; 3033 abt->vp_index = fcport->vha->vp_idx; 3034 3035 abt->req_que_no = cpu_to_le16(req->id); 3036 3037 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 3038 if (rval != QLA_SUCCESS) { 3039 ql_dbg(ql_dbg_mbx, vha, 0x108e, 3040 "Failed to issue IOCB (%x).\n", rval); 3041 } else if (abt->entry_status != 0) { 3042 ql_dbg(ql_dbg_mbx, vha, 0x108f, 3043 "Failed to complete IOCB -- error status (%x).\n", 3044 abt->entry_status); 3045 rval = QLA_FUNCTION_FAILED; 3046 } else if (abt->nport_handle != cpu_to_le16(0)) { 3047 ql_dbg(ql_dbg_mbx, vha, 0x1090, 3048 "Failed to complete IOCB -- completion status (%x).\n", 3049 le16_to_cpu(abt->nport_handle)); 3050 if (abt->nport_handle == CS_IOCB_ERROR) 3051 rval = QLA_FUNCTION_PARAMETER_ERROR; 3052 else 3053 rval = QLA_FUNCTION_FAILED; 3054 } else { 3055 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, 3056 "Done %s.\n", __func__); 3057 } 3058 3059 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 3060 3061 return rval; 3062 } 3063 3064 struct tsk_mgmt_cmd { 3065 union { 3066 struct tsk_mgmt_entry tsk; 3067 struct sts_entry_24xx sts; 3068 } p; 3069 }; 3070 3071 static int 3072 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 3073 uint64_t l, int tag) 3074 { 3075 int rval, rval2; 3076 struct tsk_mgmt_cmd *tsk; 3077 struct sts_entry_24xx *sts; 3078 dma_addr_t tsk_dma; 3079 scsi_qla_host_t *vha; 3080 struct qla_hw_data *ha; 3081 struct req_que *req; 3082 struct rsp_que *rsp; 3083 struct qla_qpair *qpair; 3084 3085 vha = fcport->vha; 3086 ha = vha->hw; 3087 req = vha->req; 3088 3089 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, 3090 "Entered %s.\n", __func__); 3091 3092 if (vha->vp_idx && vha->qpair) { 3093 /* NPIV port */ 3094 qpair = vha->qpair; 3095 rsp = qpair->rsp; 3096 req = qpair->req; 3097 } else { 3098 rsp = req->rsp; 3099 } 3100 3101 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 3102 if (tsk == NULL) { 3103 ql_log(ql_log_warn, vha, 0x1093, 3104 "Failed to allocate task management IOCB.\n"); 3105 return QLA_MEMORY_ALLOC_FAILED; 3106 } 3107 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd)); 3108 3109 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 3110 tsk->p.tsk.entry_count = 1; 3111 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle); 3112 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 3113 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 3114 tsk->p.tsk.control_flags = cpu_to_le32(type); 3115 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 3116 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 3117 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 3118 tsk->p.tsk.vp_index = fcport->vha->vp_idx; 3119 if (type == TCF_LUN_RESET) { 3120 int_to_scsilun(l, &tsk->p.tsk.lun); 3121 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, 3122 sizeof(tsk->p.tsk.lun)); 3123 } 3124 3125 sts = &tsk->p.sts; 3126 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 3127 if (rval != QLA_SUCCESS) { 3128 ql_dbg(ql_dbg_mbx, vha, 0x1094, 3129 "Failed to issue %s reset IOCB (%x).\n", name, rval); 3130 } else if (sts->entry_status != 0) { 3131 ql_dbg(ql_dbg_mbx, vha, 0x1095, 3132 "Failed to complete IOCB -- error status (%x).\n", 3133 sts->entry_status); 3134 rval = QLA_FUNCTION_FAILED; 3135 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 3136 ql_dbg(ql_dbg_mbx, vha, 0x1096, 3137 "Failed to complete IOCB -- completion status (%x).\n", 3138 le16_to_cpu(sts->comp_status)); 3139 rval = QLA_FUNCTION_FAILED; 3140 } else if (le16_to_cpu(sts->scsi_status) & 3141 SS_RESPONSE_INFO_LEN_VALID) { 3142 if (le32_to_cpu(sts->rsp_data_len) < 4) { 3143 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, 3144 "Ignoring inconsistent data length -- not enough " 3145 "response info (%d).\n", 3146 le32_to_cpu(sts->rsp_data_len)); 3147 } else if (sts->data[3]) { 3148 ql_dbg(ql_dbg_mbx, vha, 0x1098, 3149 "Failed to complete IOCB -- response (%x).\n", 3150 sts->data[3]); 3151 rval = QLA_FUNCTION_FAILED; 3152 } 3153 } 3154 3155 /* Issue marker IOCB. */ 3156 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, 3157 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); 3158 if (rval2 != QLA_SUCCESS) { 3159 ql_dbg(ql_dbg_mbx, vha, 0x1099, 3160 "Failed to issue marker IOCB (%x).\n", rval2); 3161 } else { 3162 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, 3163 "Done %s.\n", __func__); 3164 } 3165 3166 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 3167 3168 return rval; 3169 } 3170 3171 int 3172 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) 3173 { 3174 struct qla_hw_data *ha = fcport->vha->hw; 3175 3176 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3177 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 3178 3179 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); 3180 } 3181 3182 int 3183 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 3184 { 3185 struct qla_hw_data *ha = fcport->vha->hw; 3186 3187 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3188 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 3189 3190 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); 3191 } 3192 3193 int 3194 qla2x00_system_error(scsi_qla_host_t *vha) 3195 { 3196 int rval; 3197 mbx_cmd_t mc; 3198 mbx_cmd_t *mcp = &mc; 3199 struct qla_hw_data *ha = vha->hw; 3200 3201 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 3202 return QLA_FUNCTION_FAILED; 3203 3204 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, 3205 "Entered %s.\n", __func__); 3206 3207 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 3208 mcp->out_mb = MBX_0; 3209 mcp->in_mb = MBX_0; 3210 mcp->tov = 5; 3211 mcp->flags = 0; 3212 rval = qla2x00_mailbox_command(vha, mcp); 3213 3214 if (rval != QLA_SUCCESS) { 3215 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); 3216 } else { 3217 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, 3218 "Done %s.\n", __func__); 3219 } 3220 3221 return rval; 3222 } 3223 3224 int 3225 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) 3226 { 3227 int rval; 3228 mbx_cmd_t mc; 3229 mbx_cmd_t *mcp = &mc; 3230 3231 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3232 !IS_QLA27XX(vha->hw)) 3233 return QLA_FUNCTION_FAILED; 3234 3235 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, 3236 "Entered %s.\n", __func__); 3237 3238 mcp->mb[0] = MBC_WRITE_SERDES; 3239 mcp->mb[1] = addr; 3240 if (IS_QLA2031(vha->hw)) 3241 mcp->mb[2] = data & 0xff; 3242 else 3243 mcp->mb[2] = data; 3244 3245 mcp->mb[3] = 0; 3246 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 3247 mcp->in_mb = MBX_0; 3248 mcp->tov = MBX_TOV_SECONDS; 3249 mcp->flags = 0; 3250 rval = qla2x00_mailbox_command(vha, mcp); 3251 3252 if (rval != QLA_SUCCESS) { 3253 ql_dbg(ql_dbg_mbx, vha, 0x1183, 3254 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3255 } else { 3256 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, 3257 "Done %s.\n", __func__); 3258 } 3259 3260 return rval; 3261 } 3262 3263 int 3264 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) 3265 { 3266 int rval; 3267 mbx_cmd_t mc; 3268 mbx_cmd_t *mcp = &mc; 3269 3270 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3271 !IS_QLA27XX(vha->hw)) 3272 return QLA_FUNCTION_FAILED; 3273 3274 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, 3275 "Entered %s.\n", __func__); 3276 3277 mcp->mb[0] = MBC_READ_SERDES; 3278 mcp->mb[1] = addr; 3279 mcp->mb[3] = 0; 3280 mcp->out_mb = MBX_3|MBX_1|MBX_0; 3281 mcp->in_mb = MBX_1|MBX_0; 3282 mcp->tov = MBX_TOV_SECONDS; 3283 mcp->flags = 0; 3284 rval = qla2x00_mailbox_command(vha, mcp); 3285 3286 if (IS_QLA2031(vha->hw)) 3287 *data = mcp->mb[1] & 0xff; 3288 else 3289 *data = mcp->mb[1]; 3290 3291 if (rval != QLA_SUCCESS) { 3292 ql_dbg(ql_dbg_mbx, vha, 0x1186, 3293 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3294 } else { 3295 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, 3296 "Done %s.\n", __func__); 3297 } 3298 3299 return rval; 3300 } 3301 3302 int 3303 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) 3304 { 3305 int rval; 3306 mbx_cmd_t mc; 3307 mbx_cmd_t *mcp = &mc; 3308 3309 if (!IS_QLA8044(vha->hw)) 3310 return QLA_FUNCTION_FAILED; 3311 3312 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0, 3313 "Entered %s.\n", __func__); 3314 3315 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3316 mcp->mb[1] = HCS_WRITE_SERDES; 3317 mcp->mb[3] = LSW(addr); 3318 mcp->mb[4] = MSW(addr); 3319 mcp->mb[5] = LSW(data); 3320 mcp->mb[6] = MSW(data); 3321 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 3322 mcp->in_mb = MBX_0; 3323 mcp->tov = MBX_TOV_SECONDS; 3324 mcp->flags = 0; 3325 rval = qla2x00_mailbox_command(vha, mcp); 3326 3327 if (rval != QLA_SUCCESS) { 3328 ql_dbg(ql_dbg_mbx, vha, 0x11a1, 3329 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3330 } else { 3331 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, 3332 "Done %s.\n", __func__); 3333 } 3334 3335 return rval; 3336 } 3337 3338 int 3339 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) 3340 { 3341 int rval; 3342 mbx_cmd_t mc; 3343 mbx_cmd_t *mcp = &mc; 3344 3345 if (!IS_QLA8044(vha->hw)) 3346 return QLA_FUNCTION_FAILED; 3347 3348 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, 3349 "Entered %s.\n", __func__); 3350 3351 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3352 mcp->mb[1] = HCS_READ_SERDES; 3353 mcp->mb[3] = LSW(addr); 3354 mcp->mb[4] = MSW(addr); 3355 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; 3356 mcp->in_mb = MBX_2|MBX_1|MBX_0; 3357 mcp->tov = MBX_TOV_SECONDS; 3358 mcp->flags = 0; 3359 rval = qla2x00_mailbox_command(vha, mcp); 3360 3361 *data = mcp->mb[2] << 16 | mcp->mb[1]; 3362 3363 if (rval != QLA_SUCCESS) { 3364 ql_dbg(ql_dbg_mbx, vha, 0x118a, 3365 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3366 } else { 3367 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, 3368 "Done %s.\n", __func__); 3369 } 3370 3371 return rval; 3372 } 3373 3374 /** 3375 * qla2x00_set_serdes_params() - 3376 * @ha: HA context 3377 * 3378 * Returns 3379 */ 3380 int 3381 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, 3382 uint16_t sw_em_2g, uint16_t sw_em_4g) 3383 { 3384 int rval; 3385 mbx_cmd_t mc; 3386 mbx_cmd_t *mcp = &mc; 3387 3388 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, 3389 "Entered %s.\n", __func__); 3390 3391 mcp->mb[0] = MBC_SERDES_PARAMS; 3392 mcp->mb[1] = BIT_0; 3393 mcp->mb[2] = sw_em_1g | BIT_15; 3394 mcp->mb[3] = sw_em_2g | BIT_15; 3395 mcp->mb[4] = sw_em_4g | BIT_15; 3396 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3397 mcp->in_mb = MBX_0; 3398 mcp->tov = MBX_TOV_SECONDS; 3399 mcp->flags = 0; 3400 rval = qla2x00_mailbox_command(vha, mcp); 3401 3402 if (rval != QLA_SUCCESS) { 3403 /*EMPTY*/ 3404 ql_dbg(ql_dbg_mbx, vha, 0x109f, 3405 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3406 } else { 3407 /*EMPTY*/ 3408 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, 3409 "Done %s.\n", __func__); 3410 } 3411 3412 return rval; 3413 } 3414 3415 int 3416 qla2x00_stop_firmware(scsi_qla_host_t *vha) 3417 { 3418 int rval; 3419 mbx_cmd_t mc; 3420 mbx_cmd_t *mcp = &mc; 3421 3422 if (!IS_FWI2_CAPABLE(vha->hw)) 3423 return QLA_FUNCTION_FAILED; 3424 3425 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, 3426 "Entered %s.\n", __func__); 3427 3428 mcp->mb[0] = MBC_STOP_FIRMWARE; 3429 mcp->mb[1] = 0; 3430 mcp->out_mb = MBX_1|MBX_0; 3431 mcp->in_mb = MBX_0; 3432 mcp->tov = 5; 3433 mcp->flags = 0; 3434 rval = qla2x00_mailbox_command(vha, mcp); 3435 3436 if (rval != QLA_SUCCESS) { 3437 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); 3438 if (mcp->mb[0] == MBS_INVALID_COMMAND) 3439 rval = QLA_INVALID_COMMAND; 3440 } else { 3441 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, 3442 "Done %s.\n", __func__); 3443 } 3444 3445 return rval; 3446 } 3447 3448 int 3449 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, 3450 uint16_t buffers) 3451 { 3452 int rval; 3453 mbx_cmd_t mc; 3454 mbx_cmd_t *mcp = &mc; 3455 3456 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, 3457 "Entered %s.\n", __func__); 3458 3459 if (!IS_FWI2_CAPABLE(vha->hw)) 3460 return QLA_FUNCTION_FAILED; 3461 3462 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3463 return QLA_FUNCTION_FAILED; 3464 3465 mcp->mb[0] = MBC_TRACE_CONTROL; 3466 mcp->mb[1] = TC_EFT_ENABLE; 3467 mcp->mb[2] = LSW(eft_dma); 3468 mcp->mb[3] = MSW(eft_dma); 3469 mcp->mb[4] = LSW(MSD(eft_dma)); 3470 mcp->mb[5] = MSW(MSD(eft_dma)); 3471 mcp->mb[6] = buffers; 3472 mcp->mb[7] = TC_AEN_DISABLE; 3473 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3474 mcp->in_mb = MBX_1|MBX_0; 3475 mcp->tov = MBX_TOV_SECONDS; 3476 mcp->flags = 0; 3477 rval = qla2x00_mailbox_command(vha, mcp); 3478 if (rval != QLA_SUCCESS) { 3479 ql_dbg(ql_dbg_mbx, vha, 0x10a5, 3480 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3481 rval, mcp->mb[0], mcp->mb[1]); 3482 } else { 3483 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, 3484 "Done %s.\n", __func__); 3485 } 3486 3487 return rval; 3488 } 3489 3490 int 3491 qla2x00_disable_eft_trace(scsi_qla_host_t *vha) 3492 { 3493 int rval; 3494 mbx_cmd_t mc; 3495 mbx_cmd_t *mcp = &mc; 3496 3497 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, 3498 "Entered %s.\n", __func__); 3499 3500 if (!IS_FWI2_CAPABLE(vha->hw)) 3501 return QLA_FUNCTION_FAILED; 3502 3503 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3504 return QLA_FUNCTION_FAILED; 3505 3506 mcp->mb[0] = MBC_TRACE_CONTROL; 3507 mcp->mb[1] = TC_EFT_DISABLE; 3508 mcp->out_mb = MBX_1|MBX_0; 3509 mcp->in_mb = MBX_1|MBX_0; 3510 mcp->tov = MBX_TOV_SECONDS; 3511 mcp->flags = 0; 3512 rval = qla2x00_mailbox_command(vha, mcp); 3513 if (rval != QLA_SUCCESS) { 3514 ql_dbg(ql_dbg_mbx, vha, 0x10a8, 3515 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3516 rval, mcp->mb[0], mcp->mb[1]); 3517 } else { 3518 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, 3519 "Done %s.\n", __func__); 3520 } 3521 3522 return rval; 3523 } 3524 3525 int 3526 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, 3527 uint16_t buffers, uint16_t *mb, uint32_t *dwords) 3528 { 3529 int rval; 3530 mbx_cmd_t mc; 3531 mbx_cmd_t *mcp = &mc; 3532 3533 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, 3534 "Entered %s.\n", __func__); 3535 3536 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 3537 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw)) 3538 return QLA_FUNCTION_FAILED; 3539 3540 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3541 return QLA_FUNCTION_FAILED; 3542 3543 mcp->mb[0] = MBC_TRACE_CONTROL; 3544 mcp->mb[1] = TC_FCE_ENABLE; 3545 mcp->mb[2] = LSW(fce_dma); 3546 mcp->mb[3] = MSW(fce_dma); 3547 mcp->mb[4] = LSW(MSD(fce_dma)); 3548 mcp->mb[5] = MSW(MSD(fce_dma)); 3549 mcp->mb[6] = buffers; 3550 mcp->mb[7] = TC_AEN_DISABLE; 3551 mcp->mb[8] = 0; 3552 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; 3553 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; 3554 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3555 MBX_1|MBX_0; 3556 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3557 mcp->tov = MBX_TOV_SECONDS; 3558 mcp->flags = 0; 3559 rval = qla2x00_mailbox_command(vha, mcp); 3560 if (rval != QLA_SUCCESS) { 3561 ql_dbg(ql_dbg_mbx, vha, 0x10ab, 3562 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3563 rval, mcp->mb[0], mcp->mb[1]); 3564 } else { 3565 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, 3566 "Done %s.\n", __func__); 3567 3568 if (mb) 3569 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 3570 if (dwords) 3571 *dwords = buffers; 3572 } 3573 3574 return rval; 3575 } 3576 3577 int 3578 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) 3579 { 3580 int rval; 3581 mbx_cmd_t mc; 3582 mbx_cmd_t *mcp = &mc; 3583 3584 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, 3585 "Entered %s.\n", __func__); 3586 3587 if (!IS_FWI2_CAPABLE(vha->hw)) 3588 return QLA_FUNCTION_FAILED; 3589 3590 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3591 return QLA_FUNCTION_FAILED; 3592 3593 mcp->mb[0] = MBC_TRACE_CONTROL; 3594 mcp->mb[1] = TC_FCE_DISABLE; 3595 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 3596 mcp->out_mb = MBX_2|MBX_1|MBX_0; 3597 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3598 MBX_1|MBX_0; 3599 mcp->tov = MBX_TOV_SECONDS; 3600 mcp->flags = 0; 3601 rval = qla2x00_mailbox_command(vha, mcp); 3602 if (rval != QLA_SUCCESS) { 3603 ql_dbg(ql_dbg_mbx, vha, 0x10ae, 3604 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3605 rval, mcp->mb[0], mcp->mb[1]); 3606 } else { 3607 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, 3608 "Done %s.\n", __func__); 3609 3610 if (wr) 3611 *wr = (uint64_t) mcp->mb[5] << 48 | 3612 (uint64_t) mcp->mb[4] << 32 | 3613 (uint64_t) mcp->mb[3] << 16 | 3614 (uint64_t) mcp->mb[2]; 3615 if (rd) 3616 *rd = (uint64_t) mcp->mb[9] << 48 | 3617 (uint64_t) mcp->mb[8] << 32 | 3618 (uint64_t) mcp->mb[7] << 16 | 3619 (uint64_t) mcp->mb[6]; 3620 } 3621 3622 return rval; 3623 } 3624 3625 int 3626 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3627 uint16_t *port_speed, uint16_t *mb) 3628 { 3629 int rval; 3630 mbx_cmd_t mc; 3631 mbx_cmd_t *mcp = &mc; 3632 3633 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, 3634 "Entered %s.\n", __func__); 3635 3636 if (!IS_IIDMA_CAPABLE(vha->hw)) 3637 return QLA_FUNCTION_FAILED; 3638 3639 mcp->mb[0] = MBC_PORT_PARAMS; 3640 mcp->mb[1] = loop_id; 3641 mcp->mb[2] = mcp->mb[3] = 0; 3642 mcp->mb[9] = vha->vp_idx; 3643 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3644 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3645 mcp->tov = MBX_TOV_SECONDS; 3646 mcp->flags = 0; 3647 rval = qla2x00_mailbox_command(vha, mcp); 3648 3649 /* Return mailbox statuses. */ 3650 if (mb != NULL) { 3651 mb[0] = mcp->mb[0]; 3652 mb[1] = mcp->mb[1]; 3653 mb[3] = mcp->mb[3]; 3654 } 3655 3656 if (rval != QLA_SUCCESS) { 3657 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); 3658 } else { 3659 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, 3660 "Done %s.\n", __func__); 3661 if (port_speed) 3662 *port_speed = mcp->mb[3]; 3663 } 3664 3665 return rval; 3666 } 3667 3668 int 3669 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3670 uint16_t port_speed, uint16_t *mb) 3671 { 3672 int rval; 3673 mbx_cmd_t mc; 3674 mbx_cmd_t *mcp = &mc; 3675 3676 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, 3677 "Entered %s.\n", __func__); 3678 3679 if (!IS_IIDMA_CAPABLE(vha->hw)) 3680 return QLA_FUNCTION_FAILED; 3681 3682 mcp->mb[0] = MBC_PORT_PARAMS; 3683 mcp->mb[1] = loop_id; 3684 mcp->mb[2] = BIT_0; 3685 if (IS_CNA_CAPABLE(vha->hw)) 3686 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); 3687 else 3688 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); 3689 mcp->mb[9] = vha->vp_idx; 3690 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3691 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3692 mcp->tov = MBX_TOV_SECONDS; 3693 mcp->flags = 0; 3694 rval = qla2x00_mailbox_command(vha, mcp); 3695 3696 /* Return mailbox statuses. */ 3697 if (mb != NULL) { 3698 mb[0] = mcp->mb[0]; 3699 mb[1] = mcp->mb[1]; 3700 mb[3] = mcp->mb[3]; 3701 } 3702 3703 if (rval != QLA_SUCCESS) { 3704 ql_dbg(ql_dbg_mbx, vha, 0x10b4, 3705 "Failed=%x.\n", rval); 3706 } else { 3707 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, 3708 "Done %s.\n", __func__); 3709 } 3710 3711 return rval; 3712 } 3713 3714 void 3715 qla24xx_report_id_acquisition(scsi_qla_host_t *vha, 3716 struct vp_rpt_id_entry_24xx *rptid_entry) 3717 { 3718 struct qla_hw_data *ha = vha->hw; 3719 scsi_qla_host_t *vp = NULL; 3720 unsigned long flags; 3721 int found; 3722 port_id_t id; 3723 3724 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3725 "Entered %s.\n", __func__); 3726 3727 if (rptid_entry->entry_status != 0) 3728 return; 3729 3730 id.b.domain = rptid_entry->port_id[2]; 3731 id.b.area = rptid_entry->port_id[1]; 3732 id.b.al_pa = rptid_entry->port_id[0]; 3733 id.b.rsvd_1 = 0; 3734 3735 if (rptid_entry->format == 0) { 3736 /* loop */ 3737 ql_dbg(ql_dbg_async, vha, 0x10b7, 3738 "Format 0 : Number of VPs setup %d, number of " 3739 "VPs acquired %d.\n", rptid_entry->vp_setup, 3740 rptid_entry->vp_acquired); 3741 ql_dbg(ql_dbg_async, vha, 0x10b8, 3742 "Primary port id %02x%02x%02x.\n", 3743 rptid_entry->port_id[2], rptid_entry->port_id[1], 3744 rptid_entry->port_id[0]); 3745 3746 qlt_update_host_map(vha, id); 3747 3748 } else if (rptid_entry->format == 1) { 3749 /* fabric */ 3750 ql_dbg(ql_dbg_async, vha, 0x10b9, 3751 "Format 1: VP[%d] enabled - status %d - with " 3752 "port id %02x%02x%02x.\n", rptid_entry->vp_idx, 3753 rptid_entry->vp_status, 3754 rptid_entry->port_id[2], rptid_entry->port_id[1], 3755 rptid_entry->port_id[0]); 3756 3757 /* buffer to buffer credit flag */ 3758 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; 3759 3760 if (rptid_entry->vp_idx == 0) { 3761 if (rptid_entry->vp_status == VP_STAT_COMPL) { 3762 /* FA-WWN is only for physical port */ 3763 if (qla_ini_mode_enabled(vha) && 3764 ha->flags.fawwpn_enabled && 3765 (rptid_entry->u.f1.flags & 3766 BIT_6)) { 3767 memcpy(vha->port_name, 3768 rptid_entry->u.f1.port_name, 3769 WWN_SIZE); 3770 } 3771 3772 qlt_update_host_map(vha, id); 3773 } 3774 3775 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 3776 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 3777 } else { 3778 if (rptid_entry->vp_status != VP_STAT_COMPL && 3779 rptid_entry->vp_status != VP_STAT_ID_CHG) { 3780 ql_dbg(ql_dbg_mbx, vha, 0x10ba, 3781 "Could not acquire ID for VP[%d].\n", 3782 rptid_entry->vp_idx); 3783 return; 3784 } 3785 3786 found = 0; 3787 spin_lock_irqsave(&ha->vport_slock, flags); 3788 list_for_each_entry(vp, &ha->vp_list, list) { 3789 if (rptid_entry->vp_idx == vp->vp_idx) { 3790 found = 1; 3791 break; 3792 } 3793 } 3794 spin_unlock_irqrestore(&ha->vport_slock, flags); 3795 3796 if (!found) 3797 return; 3798 3799 qlt_update_host_map(vp, id); 3800 3801 /* 3802 * Cannot configure here as we are still sitting on the 3803 * response queue. Handle it in dpc context. 3804 */ 3805 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); 3806 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); 3807 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); 3808 } 3809 set_bit(VP_DPC_NEEDED, &vha->dpc_flags); 3810 qla2xxx_wake_dpc(vha); 3811 } else if (rptid_entry->format == 2) { 3812 ql_dbg(ql_dbg_async, vha, 0x505f, 3813 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", 3814 rptid_entry->port_id[2], rptid_entry->port_id[1], 3815 rptid_entry->port_id[0]); 3816 3817 ql_dbg(ql_dbg_async, vha, 0x5075, 3818 "N2N: Remote WWPN %8phC.\n", 3819 rptid_entry->u.f2.port_name); 3820 3821 /* N2N. direct connect */ 3822 vha->d_id.b.domain = rptid_entry->port_id[2]; 3823 vha->d_id.b.area = rptid_entry->port_id[1]; 3824 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 3825 3826 spin_lock_irqsave(&ha->vport_slock, flags); 3827 qlt_update_vp_map(vha, SET_AL_PA); 3828 spin_unlock_irqrestore(&ha->vport_slock, flags); 3829 } 3830 } 3831 3832 /* 3833 * qla24xx_modify_vp_config 3834 * Change VP configuration for vha 3835 * 3836 * Input: 3837 * vha = adapter block pointer. 3838 * 3839 * Returns: 3840 * qla2xxx local function return status code. 3841 * 3842 * Context: 3843 * Kernel context. 3844 */ 3845 int 3846 qla24xx_modify_vp_config(scsi_qla_host_t *vha) 3847 { 3848 int rval; 3849 struct vp_config_entry_24xx *vpmod; 3850 dma_addr_t vpmod_dma; 3851 struct qla_hw_data *ha = vha->hw; 3852 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 3853 3854 /* This can be called by the parent */ 3855 3856 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, 3857 "Entered %s.\n", __func__); 3858 3859 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 3860 if (!vpmod) { 3861 ql_log(ql_log_warn, vha, 0x10bc, 3862 "Failed to allocate modify VP IOCB.\n"); 3863 return QLA_MEMORY_ALLOC_FAILED; 3864 } 3865 3866 memset(vpmod, 0, sizeof(struct vp_config_entry_24xx)); 3867 vpmod->entry_type = VP_CONFIG_IOCB_TYPE; 3868 vpmod->entry_count = 1; 3869 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; 3870 vpmod->vp_count = 1; 3871 vpmod->vp_index1 = vha->vp_idx; 3872 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; 3873 3874 qlt_modify_vp_config(vha, vpmod); 3875 3876 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); 3877 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 3878 vpmod->entry_count = 1; 3879 3880 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 3881 if (rval != QLA_SUCCESS) { 3882 ql_dbg(ql_dbg_mbx, vha, 0x10bd, 3883 "Failed to issue VP config IOCB (%x).\n", rval); 3884 } else if (vpmod->comp_status != 0) { 3885 ql_dbg(ql_dbg_mbx, vha, 0x10be, 3886 "Failed to complete IOCB -- error status (%x).\n", 3887 vpmod->comp_status); 3888 rval = QLA_FUNCTION_FAILED; 3889 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) { 3890 ql_dbg(ql_dbg_mbx, vha, 0x10bf, 3891 "Failed to complete IOCB -- completion status (%x).\n", 3892 le16_to_cpu(vpmod->comp_status)); 3893 rval = QLA_FUNCTION_FAILED; 3894 } else { 3895 /* EMPTY */ 3896 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, 3897 "Done %s.\n", __func__); 3898 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 3899 } 3900 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 3901 3902 return rval; 3903 } 3904 3905 /* 3906 * qla24xx_control_vp 3907 * Enable a virtual port for given host 3908 * 3909 * Input: 3910 * ha = adapter block pointer. 3911 * vhba = virtual adapter (unused) 3912 * index = index number for enabled VP 3913 * 3914 * Returns: 3915 * qla2xxx local function return status code. 3916 * 3917 * Context: 3918 * Kernel context. 3919 */ 3920 int 3921 qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) 3922 { 3923 int rval; 3924 int map, pos; 3925 struct vp_ctrl_entry_24xx *vce; 3926 dma_addr_t vce_dma; 3927 struct qla_hw_data *ha = vha->hw; 3928 int vp_index = vha->vp_idx; 3929 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 3930 3931 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1, 3932 "Entered %s enabling index %d.\n", __func__, vp_index); 3933 3934 if (vp_index == 0 || vp_index >= ha->max_npiv_vports) 3935 return QLA_PARAMETER_ERROR; 3936 3937 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma); 3938 if (!vce) { 3939 ql_log(ql_log_warn, vha, 0x10c2, 3940 "Failed to allocate VP control IOCB.\n"); 3941 return QLA_MEMORY_ALLOC_FAILED; 3942 } 3943 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx)); 3944 3945 vce->entry_type = VP_CTRL_IOCB_TYPE; 3946 vce->entry_count = 1; 3947 vce->command = cpu_to_le16(cmd); 3948 vce->vp_count = cpu_to_le16(1); 3949 3950 /* index map in firmware starts with 1; decrement index 3951 * this is ok as we never use index 0 3952 */ 3953 map = (vp_index - 1) / 8; 3954 pos = (vp_index - 1) & 7; 3955 mutex_lock(&ha->vport_lock); 3956 vce->vp_idx_map[map] |= 1 << pos; 3957 mutex_unlock(&ha->vport_lock); 3958 3959 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0); 3960 if (rval != QLA_SUCCESS) { 3961 ql_dbg(ql_dbg_mbx, vha, 0x10c3, 3962 "Failed to issue VP control IOCB (%x).\n", rval); 3963 } else if (vce->entry_status != 0) { 3964 ql_dbg(ql_dbg_mbx, vha, 0x10c4, 3965 "Failed to complete IOCB -- error status (%x).\n", 3966 vce->entry_status); 3967 rval = QLA_FUNCTION_FAILED; 3968 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) { 3969 ql_dbg(ql_dbg_mbx, vha, 0x10c5, 3970 "Failed to complete IOCB -- completion status (%x).\n", 3971 le16_to_cpu(vce->comp_status)); 3972 rval = QLA_FUNCTION_FAILED; 3973 } else { 3974 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6, 3975 "Done %s.\n", __func__); 3976 } 3977 3978 dma_pool_free(ha->s_dma_pool, vce, vce_dma); 3979 3980 return rval; 3981 } 3982 3983 /* 3984 * qla2x00_send_change_request 3985 * Receive or disable RSCN request from fabric controller 3986 * 3987 * Input: 3988 * ha = adapter block pointer 3989 * format = registration format: 3990 * 0 - Reserved 3991 * 1 - Fabric detected registration 3992 * 2 - N_port detected registration 3993 * 3 - Full registration 3994 * FF - clear registration 3995 * vp_idx = Virtual port index 3996 * 3997 * Returns: 3998 * qla2x00 local function return status code. 3999 * 4000 * Context: 4001 * Kernel Context 4002 */ 4003 4004 int 4005 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, 4006 uint16_t vp_idx) 4007 { 4008 int rval; 4009 mbx_cmd_t mc; 4010 mbx_cmd_t *mcp = &mc; 4011 4012 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, 4013 "Entered %s.\n", __func__); 4014 4015 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; 4016 mcp->mb[1] = format; 4017 mcp->mb[9] = vp_idx; 4018 mcp->out_mb = MBX_9|MBX_1|MBX_0; 4019 mcp->in_mb = MBX_0|MBX_1; 4020 mcp->tov = MBX_TOV_SECONDS; 4021 mcp->flags = 0; 4022 rval = qla2x00_mailbox_command(vha, mcp); 4023 4024 if (rval == QLA_SUCCESS) { 4025 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 4026 rval = BIT_1; 4027 } 4028 } else 4029 rval = BIT_1; 4030 4031 return rval; 4032 } 4033 4034 int 4035 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 4036 uint32_t size) 4037 { 4038 int rval; 4039 mbx_cmd_t mc; 4040 mbx_cmd_t *mcp = &mc; 4041 4042 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, 4043 "Entered %s.\n", __func__); 4044 4045 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 4046 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 4047 mcp->mb[8] = MSW(addr); 4048 mcp->out_mb = MBX_8|MBX_0; 4049 } else { 4050 mcp->mb[0] = MBC_DUMP_RISC_RAM; 4051 mcp->out_mb = MBX_0; 4052 } 4053 mcp->mb[1] = LSW(addr); 4054 mcp->mb[2] = MSW(req_dma); 4055 mcp->mb[3] = LSW(req_dma); 4056 mcp->mb[6] = MSW(MSD(req_dma)); 4057 mcp->mb[7] = LSW(MSD(req_dma)); 4058 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 4059 if (IS_FWI2_CAPABLE(vha->hw)) { 4060 mcp->mb[4] = MSW(size); 4061 mcp->mb[5] = LSW(size); 4062 mcp->out_mb |= MBX_5|MBX_4; 4063 } else { 4064 mcp->mb[4] = LSW(size); 4065 mcp->out_mb |= MBX_4; 4066 } 4067 4068 mcp->in_mb = MBX_0; 4069 mcp->tov = MBX_TOV_SECONDS; 4070 mcp->flags = 0; 4071 rval = qla2x00_mailbox_command(vha, mcp); 4072 4073 if (rval != QLA_SUCCESS) { 4074 ql_dbg(ql_dbg_mbx, vha, 0x1008, 4075 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4076 } else { 4077 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, 4078 "Done %s.\n", __func__); 4079 } 4080 4081 return rval; 4082 } 4083 /* 84XX Support **************************************************************/ 4084 4085 struct cs84xx_mgmt_cmd { 4086 union { 4087 struct verify_chip_entry_84xx req; 4088 struct verify_chip_rsp_84xx rsp; 4089 } p; 4090 }; 4091 4092 int 4093 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) 4094 { 4095 int rval, retry; 4096 struct cs84xx_mgmt_cmd *mn; 4097 dma_addr_t mn_dma; 4098 uint16_t options; 4099 unsigned long flags; 4100 struct qla_hw_data *ha = vha->hw; 4101 4102 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, 4103 "Entered %s.\n", __func__); 4104 4105 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 4106 if (mn == NULL) { 4107 return QLA_MEMORY_ALLOC_FAILED; 4108 } 4109 4110 /* Force Update? */ 4111 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; 4112 /* Diagnostic firmware? */ 4113 /* options |= MENLO_DIAG_FW; */ 4114 /* We update the firmware with only one data sequence. */ 4115 options |= VCO_END_OF_DATA; 4116 4117 do { 4118 retry = 0; 4119 memset(mn, 0, sizeof(*mn)); 4120 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; 4121 mn->p.req.entry_count = 1; 4122 mn->p.req.options = cpu_to_le16(options); 4123 4124 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, 4125 "Dump of Verify Request.\n"); 4126 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, 4127 (uint8_t *)mn, sizeof(*mn)); 4128 4129 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 4130 if (rval != QLA_SUCCESS) { 4131 ql_dbg(ql_dbg_mbx, vha, 0x10cb, 4132 "Failed to issue verify IOCB (%x).\n", rval); 4133 goto verify_done; 4134 } 4135 4136 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, 4137 "Dump of Verify Response.\n"); 4138 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, 4139 (uint8_t *)mn, sizeof(*mn)); 4140 4141 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 4142 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 4143 le16_to_cpu(mn->p.rsp.failure_code) : 0; 4144 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, 4145 "cs=%x fc=%x.\n", status[0], status[1]); 4146 4147 if (status[0] != CS_COMPLETE) { 4148 rval = QLA_FUNCTION_FAILED; 4149 if (!(options & VCO_DONT_UPDATE_FW)) { 4150 ql_dbg(ql_dbg_mbx, vha, 0x10cf, 4151 "Firmware update failed. Retrying " 4152 "without update firmware.\n"); 4153 options |= VCO_DONT_UPDATE_FW; 4154 options &= ~VCO_FORCE_UPDATE; 4155 retry = 1; 4156 } 4157 } else { 4158 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, 4159 "Firmware updated to %x.\n", 4160 le32_to_cpu(mn->p.rsp.fw_ver)); 4161 4162 /* NOTE: we only update OP firmware. */ 4163 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 4164 ha->cs84xx->op_fw_version = 4165 le32_to_cpu(mn->p.rsp.fw_ver); 4166 spin_unlock_irqrestore(&ha->cs84xx->access_lock, 4167 flags); 4168 } 4169 } while (retry); 4170 4171 verify_done: 4172 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 4173 4174 if (rval != QLA_SUCCESS) { 4175 ql_dbg(ql_dbg_mbx, vha, 0x10d1, 4176 "Failed=%x.\n", rval); 4177 } else { 4178 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, 4179 "Done %s.\n", __func__); 4180 } 4181 4182 return rval; 4183 } 4184 4185 int 4186 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) 4187 { 4188 int rval; 4189 unsigned long flags; 4190 mbx_cmd_t mc; 4191 mbx_cmd_t *mcp = &mc; 4192 struct qla_hw_data *ha = vha->hw; 4193 4194 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 4195 "Entered %s.\n", __func__); 4196 4197 if (IS_SHADOW_REG_CAPABLE(ha)) 4198 req->options |= BIT_13; 4199 4200 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4201 mcp->mb[1] = req->options; 4202 mcp->mb[2] = MSW(LSD(req->dma)); 4203 mcp->mb[3] = LSW(LSD(req->dma)); 4204 mcp->mb[6] = MSW(MSD(req->dma)); 4205 mcp->mb[7] = LSW(MSD(req->dma)); 4206 mcp->mb[5] = req->length; 4207 if (req->rsp) 4208 mcp->mb[10] = req->rsp->id; 4209 mcp->mb[12] = req->qos; 4210 mcp->mb[11] = req->vp_idx; 4211 mcp->mb[13] = req->rid; 4212 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 4213 mcp->mb[15] = 0; 4214 4215 mcp->mb[4] = req->id; 4216 /* que in ptr index */ 4217 mcp->mb[8] = 0; 4218 /* que out ptr index */ 4219 mcp->mb[9] = *req->out_ptr = 0; 4220 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| 4221 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4222 mcp->in_mb = MBX_0; 4223 mcp->flags = MBX_DMA_OUT; 4224 mcp->tov = MBX_TOV_SECONDS * 2; 4225 4226 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) 4227 mcp->in_mb |= MBX_1; 4228 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 4229 mcp->out_mb |= MBX_15; 4230 /* debug q create issue in SR-IOV */ 4231 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4232 } 4233 4234 spin_lock_irqsave(&ha->hardware_lock, flags); 4235 if (!(req->options & BIT_0)) { 4236 WRT_REG_DWORD(req->req_q_in, 0); 4237 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) 4238 WRT_REG_DWORD(req->req_q_out, 0); 4239 } 4240 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4241 4242 rval = qla2x00_mailbox_command(vha, mcp); 4243 if (rval != QLA_SUCCESS) { 4244 ql_dbg(ql_dbg_mbx, vha, 0x10d4, 4245 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4246 } else { 4247 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, 4248 "Done %s.\n", __func__); 4249 } 4250 4251 return rval; 4252 } 4253 4254 int 4255 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 4256 { 4257 int rval; 4258 unsigned long flags; 4259 mbx_cmd_t mc; 4260 mbx_cmd_t *mcp = &mc; 4261 struct qla_hw_data *ha = vha->hw; 4262 4263 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 4264 "Entered %s.\n", __func__); 4265 4266 if (IS_SHADOW_REG_CAPABLE(ha)) 4267 rsp->options |= BIT_13; 4268 4269 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4270 mcp->mb[1] = rsp->options; 4271 mcp->mb[2] = MSW(LSD(rsp->dma)); 4272 mcp->mb[3] = LSW(LSD(rsp->dma)); 4273 mcp->mb[6] = MSW(MSD(rsp->dma)); 4274 mcp->mb[7] = LSW(MSD(rsp->dma)); 4275 mcp->mb[5] = rsp->length; 4276 mcp->mb[14] = rsp->msix->entry; 4277 mcp->mb[13] = rsp->rid; 4278 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 4279 mcp->mb[15] = 0; 4280 4281 mcp->mb[4] = rsp->id; 4282 /* que in ptr index */ 4283 mcp->mb[8] = *rsp->in_ptr = 0; 4284 /* que out ptr index */ 4285 mcp->mb[9] = 0; 4286 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 4287 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4288 mcp->in_mb = MBX_0; 4289 mcp->flags = MBX_DMA_OUT; 4290 mcp->tov = MBX_TOV_SECONDS * 2; 4291 4292 if (IS_QLA81XX(ha)) { 4293 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 4294 mcp->in_mb |= MBX_1; 4295 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 4296 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; 4297 mcp->in_mb |= MBX_1; 4298 /* debug q create issue in SR-IOV */ 4299 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4300 } 4301 4302 spin_lock_irqsave(&ha->hardware_lock, flags); 4303 if (!(rsp->options & BIT_0)) { 4304 WRT_REG_DWORD(rsp->rsp_q_out, 0); 4305 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) 4306 WRT_REG_DWORD(rsp->rsp_q_in, 0); 4307 } 4308 4309 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4310 4311 rval = qla2x00_mailbox_command(vha, mcp); 4312 if (rval != QLA_SUCCESS) { 4313 ql_dbg(ql_dbg_mbx, vha, 0x10d7, 4314 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4315 } else { 4316 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, 4317 "Done %s.\n", __func__); 4318 } 4319 4320 return rval; 4321 } 4322 4323 int 4324 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) 4325 { 4326 int rval; 4327 mbx_cmd_t mc; 4328 mbx_cmd_t *mcp = &mc; 4329 4330 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, 4331 "Entered %s.\n", __func__); 4332 4333 mcp->mb[0] = MBC_IDC_ACK; 4334 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4335 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4336 mcp->in_mb = MBX_0; 4337 mcp->tov = MBX_TOV_SECONDS; 4338 mcp->flags = 0; 4339 rval = qla2x00_mailbox_command(vha, mcp); 4340 4341 if (rval != QLA_SUCCESS) { 4342 ql_dbg(ql_dbg_mbx, vha, 0x10da, 4343 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4344 } else { 4345 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, 4346 "Done %s.\n", __func__); 4347 } 4348 4349 return rval; 4350 } 4351 4352 int 4353 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) 4354 { 4355 int rval; 4356 mbx_cmd_t mc; 4357 mbx_cmd_t *mcp = &mc; 4358 4359 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, 4360 "Entered %s.\n", __func__); 4361 4362 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4363 !IS_QLA27XX(vha->hw)) 4364 return QLA_FUNCTION_FAILED; 4365 4366 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4367 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 4368 mcp->out_mb = MBX_1|MBX_0; 4369 mcp->in_mb = MBX_1|MBX_0; 4370 mcp->tov = MBX_TOV_SECONDS; 4371 mcp->flags = 0; 4372 rval = qla2x00_mailbox_command(vha, mcp); 4373 4374 if (rval != QLA_SUCCESS) { 4375 ql_dbg(ql_dbg_mbx, vha, 0x10dd, 4376 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4377 rval, mcp->mb[0], mcp->mb[1]); 4378 } else { 4379 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, 4380 "Done %s.\n", __func__); 4381 *sector_size = mcp->mb[1]; 4382 } 4383 4384 return rval; 4385 } 4386 4387 int 4388 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) 4389 { 4390 int rval; 4391 mbx_cmd_t mc; 4392 mbx_cmd_t *mcp = &mc; 4393 4394 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4395 !IS_QLA27XX(vha->hw)) 4396 return QLA_FUNCTION_FAILED; 4397 4398 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, 4399 "Entered %s.\n", __func__); 4400 4401 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4402 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 4403 FAC_OPT_CMD_WRITE_PROTECT; 4404 mcp->out_mb = MBX_1|MBX_0; 4405 mcp->in_mb = MBX_1|MBX_0; 4406 mcp->tov = MBX_TOV_SECONDS; 4407 mcp->flags = 0; 4408 rval = qla2x00_mailbox_command(vha, mcp); 4409 4410 if (rval != QLA_SUCCESS) { 4411 ql_dbg(ql_dbg_mbx, vha, 0x10e0, 4412 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4413 rval, mcp->mb[0], mcp->mb[1]); 4414 } else { 4415 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, 4416 "Done %s.\n", __func__); 4417 } 4418 4419 return rval; 4420 } 4421 4422 int 4423 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) 4424 { 4425 int rval; 4426 mbx_cmd_t mc; 4427 mbx_cmd_t *mcp = &mc; 4428 4429 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4430 !IS_QLA27XX(vha->hw)) 4431 return QLA_FUNCTION_FAILED; 4432 4433 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4434 "Entered %s.\n", __func__); 4435 4436 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4437 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 4438 mcp->mb[2] = LSW(start); 4439 mcp->mb[3] = MSW(start); 4440 mcp->mb[4] = LSW(finish); 4441 mcp->mb[5] = MSW(finish); 4442 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4443 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4444 mcp->tov = MBX_TOV_SECONDS; 4445 mcp->flags = 0; 4446 rval = qla2x00_mailbox_command(vha, mcp); 4447 4448 if (rval != QLA_SUCCESS) { 4449 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4450 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4451 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4452 } else { 4453 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4454 "Done %s.\n", __func__); 4455 } 4456 4457 return rval; 4458 } 4459 4460 int 4461 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) 4462 { 4463 int rval = 0; 4464 mbx_cmd_t mc; 4465 mbx_cmd_t *mcp = &mc; 4466 4467 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, 4468 "Entered %s.\n", __func__); 4469 4470 mcp->mb[0] = MBC_RESTART_MPI_FW; 4471 mcp->out_mb = MBX_0; 4472 mcp->in_mb = MBX_0|MBX_1; 4473 mcp->tov = MBX_TOV_SECONDS; 4474 mcp->flags = 0; 4475 rval = qla2x00_mailbox_command(vha, mcp); 4476 4477 if (rval != QLA_SUCCESS) { 4478 ql_dbg(ql_dbg_mbx, vha, 0x10e6, 4479 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4480 rval, mcp->mb[0], mcp->mb[1]); 4481 } else { 4482 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, 4483 "Done %s.\n", __func__); 4484 } 4485 4486 return rval; 4487 } 4488 4489 int 4490 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4491 { 4492 int rval; 4493 mbx_cmd_t mc; 4494 mbx_cmd_t *mcp = &mc; 4495 int i; 4496 int len; 4497 uint16_t *str; 4498 struct qla_hw_data *ha = vha->hw; 4499 4500 if (!IS_P3P_TYPE(ha)) 4501 return QLA_FUNCTION_FAILED; 4502 4503 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, 4504 "Entered %s.\n", __func__); 4505 4506 str = (void *)version; 4507 len = strlen(version); 4508 4509 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4510 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; 4511 mcp->out_mb = MBX_1|MBX_0; 4512 for (i = 4; i < 16 && len; i++, str++, len -= 2) { 4513 mcp->mb[i] = cpu_to_le16p(str); 4514 mcp->out_mb |= 1<<i; 4515 } 4516 for (; i < 16; i++) { 4517 mcp->mb[i] = 0; 4518 mcp->out_mb |= 1<<i; 4519 } 4520 mcp->in_mb = MBX_1|MBX_0; 4521 mcp->tov = MBX_TOV_SECONDS; 4522 mcp->flags = 0; 4523 rval = qla2x00_mailbox_command(vha, mcp); 4524 4525 if (rval != QLA_SUCCESS) { 4526 ql_dbg(ql_dbg_mbx, vha, 0x117c, 4527 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4528 } else { 4529 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, 4530 "Done %s.\n", __func__); 4531 } 4532 4533 return rval; 4534 } 4535 4536 int 4537 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4538 { 4539 int rval; 4540 mbx_cmd_t mc; 4541 mbx_cmd_t *mcp = &mc; 4542 int len; 4543 uint16_t dwlen; 4544 uint8_t *str; 4545 dma_addr_t str_dma; 4546 struct qla_hw_data *ha = vha->hw; 4547 4548 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || 4549 IS_P3P_TYPE(ha)) 4550 return QLA_FUNCTION_FAILED; 4551 4552 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, 4553 "Entered %s.\n", __func__); 4554 4555 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); 4556 if (!str) { 4557 ql_log(ql_log_warn, vha, 0x117f, 4558 "Failed to allocate driver version param.\n"); 4559 return QLA_MEMORY_ALLOC_FAILED; 4560 } 4561 4562 memcpy(str, "\x7\x3\x11\x0", 4); 4563 dwlen = str[0]; 4564 len = dwlen * 4 - 4; 4565 memset(str + 4, 0, len); 4566 if (len > strlen(version)) 4567 len = strlen(version); 4568 memcpy(str + 4, version, len); 4569 4570 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4571 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; 4572 mcp->mb[2] = MSW(LSD(str_dma)); 4573 mcp->mb[3] = LSW(LSD(str_dma)); 4574 mcp->mb[6] = MSW(MSD(str_dma)); 4575 mcp->mb[7] = LSW(MSD(str_dma)); 4576 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4577 mcp->in_mb = MBX_1|MBX_0; 4578 mcp->tov = MBX_TOV_SECONDS; 4579 mcp->flags = 0; 4580 rval = qla2x00_mailbox_command(vha, mcp); 4581 4582 if (rval != QLA_SUCCESS) { 4583 ql_dbg(ql_dbg_mbx, vha, 0x1180, 4584 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4585 } else { 4586 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, 4587 "Done %s.\n", __func__); 4588 } 4589 4590 dma_pool_free(ha->s_dma_pool, str, str_dma); 4591 4592 return rval; 4593 } 4594 4595 static int 4596 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 4597 { 4598 int rval; 4599 mbx_cmd_t mc; 4600 mbx_cmd_t *mcp = &mc; 4601 4602 if (!IS_FWI2_CAPABLE(vha->hw)) 4603 return QLA_FUNCTION_FAILED; 4604 4605 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4606 "Entered %s.\n", __func__); 4607 4608 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4609 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; 4610 mcp->out_mb = MBX_1|MBX_0; 4611 mcp->in_mb = MBX_1|MBX_0; 4612 mcp->tov = MBX_TOV_SECONDS; 4613 mcp->flags = 0; 4614 rval = qla2x00_mailbox_command(vha, mcp); 4615 *temp = mcp->mb[1]; 4616 4617 if (rval != QLA_SUCCESS) { 4618 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4619 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4620 } else { 4621 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4622 "Done %s.\n", __func__); 4623 } 4624 4625 return rval; 4626 } 4627 4628 int 4629 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 4630 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 4631 { 4632 int rval; 4633 mbx_cmd_t mc; 4634 mbx_cmd_t *mcp = &mc; 4635 struct qla_hw_data *ha = vha->hw; 4636 4637 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 4638 "Entered %s.\n", __func__); 4639 4640 if (!IS_FWI2_CAPABLE(ha)) 4641 return QLA_FUNCTION_FAILED; 4642 4643 if (len == 1) 4644 opt |= BIT_0; 4645 4646 mcp->mb[0] = MBC_READ_SFP; 4647 mcp->mb[1] = dev; 4648 mcp->mb[2] = MSW(sfp_dma); 4649 mcp->mb[3] = LSW(sfp_dma); 4650 mcp->mb[6] = MSW(MSD(sfp_dma)); 4651 mcp->mb[7] = LSW(MSD(sfp_dma)); 4652 mcp->mb[8] = len; 4653 mcp->mb[9] = off; 4654 mcp->mb[10] = opt; 4655 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4656 mcp->in_mb = MBX_1|MBX_0; 4657 mcp->tov = MBX_TOV_SECONDS; 4658 mcp->flags = 0; 4659 rval = qla2x00_mailbox_command(vha, mcp); 4660 4661 if (opt & BIT_0) 4662 *sfp = mcp->mb[1]; 4663 4664 if (rval != QLA_SUCCESS) { 4665 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 4666 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4667 if (mcp->mb[0] == MBS_COMMAND_ERROR && 4668 mcp->mb[1] == 0x22) 4669 /* sfp is not there */ 4670 rval = QLA_INTERFACE_ERROR; 4671 } else { 4672 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 4673 "Done %s.\n", __func__); 4674 } 4675 4676 return rval; 4677 } 4678 4679 int 4680 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 4681 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 4682 { 4683 int rval; 4684 mbx_cmd_t mc; 4685 mbx_cmd_t *mcp = &mc; 4686 struct qla_hw_data *ha = vha->hw; 4687 4688 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, 4689 "Entered %s.\n", __func__); 4690 4691 if (!IS_FWI2_CAPABLE(ha)) 4692 return QLA_FUNCTION_FAILED; 4693 4694 if (len == 1) 4695 opt |= BIT_0; 4696 4697 if (opt & BIT_0) 4698 len = *sfp; 4699 4700 mcp->mb[0] = MBC_WRITE_SFP; 4701 mcp->mb[1] = dev; 4702 mcp->mb[2] = MSW(sfp_dma); 4703 mcp->mb[3] = LSW(sfp_dma); 4704 mcp->mb[6] = MSW(MSD(sfp_dma)); 4705 mcp->mb[7] = LSW(MSD(sfp_dma)); 4706 mcp->mb[8] = len; 4707 mcp->mb[9] = off; 4708 mcp->mb[10] = opt; 4709 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4710 mcp->in_mb = MBX_1|MBX_0; 4711 mcp->tov = MBX_TOV_SECONDS; 4712 mcp->flags = 0; 4713 rval = qla2x00_mailbox_command(vha, mcp); 4714 4715 if (rval != QLA_SUCCESS) { 4716 ql_dbg(ql_dbg_mbx, vha, 0x10ec, 4717 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4718 } else { 4719 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, 4720 "Done %s.\n", __func__); 4721 } 4722 4723 return rval; 4724 } 4725 4726 int 4727 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, 4728 uint16_t size_in_bytes, uint16_t *actual_size) 4729 { 4730 int rval; 4731 mbx_cmd_t mc; 4732 mbx_cmd_t *mcp = &mc; 4733 4734 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, 4735 "Entered %s.\n", __func__); 4736 4737 if (!IS_CNA_CAPABLE(vha->hw)) 4738 return QLA_FUNCTION_FAILED; 4739 4740 mcp->mb[0] = MBC_GET_XGMAC_STATS; 4741 mcp->mb[2] = MSW(stats_dma); 4742 mcp->mb[3] = LSW(stats_dma); 4743 mcp->mb[6] = MSW(MSD(stats_dma)); 4744 mcp->mb[7] = LSW(MSD(stats_dma)); 4745 mcp->mb[8] = size_in_bytes >> 2; 4746 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 4747 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4748 mcp->tov = MBX_TOV_SECONDS; 4749 mcp->flags = 0; 4750 rval = qla2x00_mailbox_command(vha, mcp); 4751 4752 if (rval != QLA_SUCCESS) { 4753 ql_dbg(ql_dbg_mbx, vha, 0x10ef, 4754 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4755 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4756 } else { 4757 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, 4758 "Done %s.\n", __func__); 4759 4760 4761 *actual_size = mcp->mb[2] << 2; 4762 } 4763 4764 return rval; 4765 } 4766 4767 int 4768 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, 4769 uint16_t size) 4770 { 4771 int rval; 4772 mbx_cmd_t mc; 4773 mbx_cmd_t *mcp = &mc; 4774 4775 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, 4776 "Entered %s.\n", __func__); 4777 4778 if (!IS_CNA_CAPABLE(vha->hw)) 4779 return QLA_FUNCTION_FAILED; 4780 4781 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 4782 mcp->mb[1] = 0; 4783 mcp->mb[2] = MSW(tlv_dma); 4784 mcp->mb[3] = LSW(tlv_dma); 4785 mcp->mb[6] = MSW(MSD(tlv_dma)); 4786 mcp->mb[7] = LSW(MSD(tlv_dma)); 4787 mcp->mb[8] = size; 4788 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4789 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4790 mcp->tov = MBX_TOV_SECONDS; 4791 mcp->flags = 0; 4792 rval = qla2x00_mailbox_command(vha, mcp); 4793 4794 if (rval != QLA_SUCCESS) { 4795 ql_dbg(ql_dbg_mbx, vha, 0x10f2, 4796 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4797 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4798 } else { 4799 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, 4800 "Done %s.\n", __func__); 4801 } 4802 4803 return rval; 4804 } 4805 4806 int 4807 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) 4808 { 4809 int rval; 4810 mbx_cmd_t mc; 4811 mbx_cmd_t *mcp = &mc; 4812 4813 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, 4814 "Entered %s.\n", __func__); 4815 4816 if (!IS_FWI2_CAPABLE(vha->hw)) 4817 return QLA_FUNCTION_FAILED; 4818 4819 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 4820 mcp->mb[1] = LSW(risc_addr); 4821 mcp->mb[8] = MSW(risc_addr); 4822 mcp->out_mb = MBX_8|MBX_1|MBX_0; 4823 mcp->in_mb = MBX_3|MBX_2|MBX_0; 4824 mcp->tov = 30; 4825 mcp->flags = 0; 4826 rval = qla2x00_mailbox_command(vha, mcp); 4827 if (rval != QLA_SUCCESS) { 4828 ql_dbg(ql_dbg_mbx, vha, 0x10f5, 4829 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4830 } else { 4831 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, 4832 "Done %s.\n", __func__); 4833 *data = mcp->mb[3] << 16 | mcp->mb[2]; 4834 } 4835 4836 return rval; 4837 } 4838 4839 int 4840 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 4841 uint16_t *mresp) 4842 { 4843 int rval; 4844 mbx_cmd_t mc; 4845 mbx_cmd_t *mcp = &mc; 4846 4847 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, 4848 "Entered %s.\n", __func__); 4849 4850 memset(mcp->mb, 0 , sizeof(mcp->mb)); 4851 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 4852 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing 4853 4854 /* transfer count */ 4855 mcp->mb[10] = LSW(mreq->transfer_size); 4856 mcp->mb[11] = MSW(mreq->transfer_size); 4857 4858 /* send data address */ 4859 mcp->mb[14] = LSW(mreq->send_dma); 4860 mcp->mb[15] = MSW(mreq->send_dma); 4861 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 4862 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 4863 4864 /* receive data address */ 4865 mcp->mb[16] = LSW(mreq->rcv_dma); 4866 mcp->mb[17] = MSW(mreq->rcv_dma); 4867 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 4868 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 4869 4870 /* Iteration count */ 4871 mcp->mb[18] = LSW(mreq->iteration_count); 4872 mcp->mb[19] = MSW(mreq->iteration_count); 4873 4874 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| 4875 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 4876 if (IS_CNA_CAPABLE(vha->hw)) 4877 mcp->out_mb |= MBX_2; 4878 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; 4879 4880 mcp->buf_size = mreq->transfer_size; 4881 mcp->tov = MBX_TOV_SECONDS; 4882 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 4883 4884 rval = qla2x00_mailbox_command(vha, mcp); 4885 4886 if (rval != QLA_SUCCESS) { 4887 ql_dbg(ql_dbg_mbx, vha, 0x10f8, 4888 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " 4889 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], 4890 mcp->mb[3], mcp->mb[18], mcp->mb[19]); 4891 } else { 4892 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, 4893 "Done %s.\n", __func__); 4894 } 4895 4896 /* Copy mailbox information */ 4897 memcpy( mresp, mcp->mb, 64); 4898 return rval; 4899 } 4900 4901 int 4902 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 4903 uint16_t *mresp) 4904 { 4905 int rval; 4906 mbx_cmd_t mc; 4907 mbx_cmd_t *mcp = &mc; 4908 struct qla_hw_data *ha = vha->hw; 4909 4910 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, 4911 "Entered %s.\n", __func__); 4912 4913 memset(mcp->mb, 0 , sizeof(mcp->mb)); 4914 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 4915 /* BIT_6 specifies 64bit address */ 4916 mcp->mb[1] = mreq->options | BIT_15 | BIT_6; 4917 if (IS_CNA_CAPABLE(ha)) { 4918 mcp->mb[2] = vha->fcoe_fcf_idx; 4919 } 4920 mcp->mb[16] = LSW(mreq->rcv_dma); 4921 mcp->mb[17] = MSW(mreq->rcv_dma); 4922 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 4923 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 4924 4925 mcp->mb[10] = LSW(mreq->transfer_size); 4926 4927 mcp->mb[14] = LSW(mreq->send_dma); 4928 mcp->mb[15] = MSW(mreq->send_dma); 4929 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 4930 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 4931 4932 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| 4933 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 4934 if (IS_CNA_CAPABLE(ha)) 4935 mcp->out_mb |= MBX_2; 4936 4937 mcp->in_mb = MBX_0; 4938 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 4939 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) 4940 mcp->in_mb |= MBX_1; 4941 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) 4942 mcp->in_mb |= MBX_3; 4943 4944 mcp->tov = MBX_TOV_SECONDS; 4945 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 4946 mcp->buf_size = mreq->transfer_size; 4947 4948 rval = qla2x00_mailbox_command(vha, mcp); 4949 4950 if (rval != QLA_SUCCESS) { 4951 ql_dbg(ql_dbg_mbx, vha, 0x10fb, 4952 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4953 rval, mcp->mb[0], mcp->mb[1]); 4954 } else { 4955 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, 4956 "Done %s.\n", __func__); 4957 } 4958 4959 /* Copy mailbox information */ 4960 memcpy(mresp, mcp->mb, 64); 4961 return rval; 4962 } 4963 4964 int 4965 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) 4966 { 4967 int rval; 4968 mbx_cmd_t mc; 4969 mbx_cmd_t *mcp = &mc; 4970 4971 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, 4972 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); 4973 4974 mcp->mb[0] = MBC_ISP84XX_RESET; 4975 mcp->mb[1] = enable_diagnostic; 4976 mcp->out_mb = MBX_1|MBX_0; 4977 mcp->in_mb = MBX_1|MBX_0; 4978 mcp->tov = MBX_TOV_SECONDS; 4979 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 4980 rval = qla2x00_mailbox_command(vha, mcp); 4981 4982 if (rval != QLA_SUCCESS) 4983 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); 4984 else 4985 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, 4986 "Done %s.\n", __func__); 4987 4988 return rval; 4989 } 4990 4991 int 4992 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) 4993 { 4994 int rval; 4995 mbx_cmd_t mc; 4996 mbx_cmd_t *mcp = &mc; 4997 4998 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, 4999 "Entered %s.\n", __func__); 5000 5001 if (!IS_FWI2_CAPABLE(vha->hw)) 5002 return QLA_FUNCTION_FAILED; 5003 5004 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 5005 mcp->mb[1] = LSW(risc_addr); 5006 mcp->mb[2] = LSW(data); 5007 mcp->mb[3] = MSW(data); 5008 mcp->mb[8] = MSW(risc_addr); 5009 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; 5010 mcp->in_mb = MBX_0; 5011 mcp->tov = 30; 5012 mcp->flags = 0; 5013 rval = qla2x00_mailbox_command(vha, mcp); 5014 if (rval != QLA_SUCCESS) { 5015 ql_dbg(ql_dbg_mbx, vha, 0x1101, 5016 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5017 } else { 5018 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, 5019 "Done %s.\n", __func__); 5020 } 5021 5022 return rval; 5023 } 5024 5025 int 5026 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) 5027 { 5028 int rval; 5029 uint32_t stat, timer; 5030 uint16_t mb0 = 0; 5031 struct qla_hw_data *ha = vha->hw; 5032 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 5033 5034 rval = QLA_SUCCESS; 5035 5036 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, 5037 "Entered %s.\n", __func__); 5038 5039 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 5040 5041 /* Write the MBC data to the registers */ 5042 WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER); 5043 WRT_REG_WORD(®->mailbox1, mb[0]); 5044 WRT_REG_WORD(®->mailbox2, mb[1]); 5045 WRT_REG_WORD(®->mailbox3, mb[2]); 5046 WRT_REG_WORD(®->mailbox4, mb[3]); 5047 5048 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); 5049 5050 /* Poll for MBC interrupt */ 5051 for (timer = 6000000; timer; timer--) { 5052 /* Check for pending interrupts. */ 5053 stat = RD_REG_DWORD(®->host_status); 5054 if (stat & HSRX_RISC_INT) { 5055 stat &= 0xff; 5056 5057 if (stat == 0x1 || stat == 0x2 || 5058 stat == 0x10 || stat == 0x11) { 5059 set_bit(MBX_INTERRUPT, 5060 &ha->mbx_cmd_flags); 5061 mb0 = RD_REG_WORD(®->mailbox0); 5062 WRT_REG_DWORD(®->hccr, 5063 HCCRX_CLR_RISC_INT); 5064 RD_REG_DWORD(®->hccr); 5065 break; 5066 } 5067 } 5068 udelay(5); 5069 } 5070 5071 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) 5072 rval = mb0 & MBS_MASK; 5073 else 5074 rval = QLA_FUNCTION_FAILED; 5075 5076 if (rval != QLA_SUCCESS) { 5077 ql_dbg(ql_dbg_mbx, vha, 0x1104, 5078 "Failed=%x mb[0]=%x.\n", rval, mb[0]); 5079 } else { 5080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, 5081 "Done %s.\n", __func__); 5082 } 5083 5084 return rval; 5085 } 5086 5087 int 5088 qla2x00_get_data_rate(scsi_qla_host_t *vha) 5089 { 5090 int rval; 5091 mbx_cmd_t mc; 5092 mbx_cmd_t *mcp = &mc; 5093 struct qla_hw_data *ha = vha->hw; 5094 5095 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5096 "Entered %s.\n", __func__); 5097 5098 if (!IS_FWI2_CAPABLE(ha)) 5099 return QLA_FUNCTION_FAILED; 5100 5101 mcp->mb[0] = MBC_DATA_RATE; 5102 mcp->mb[1] = 0; 5103 mcp->out_mb = MBX_1|MBX_0; 5104 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5105 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 5106 mcp->in_mb |= MBX_3; 5107 mcp->tov = MBX_TOV_SECONDS; 5108 mcp->flags = 0; 5109 rval = qla2x00_mailbox_command(vha, mcp); 5110 if (rval != QLA_SUCCESS) { 5111 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5112 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5113 } else { 5114 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5115 "Done %s.\n", __func__); 5116 if (mcp->mb[1] != 0x7) 5117 ha->link_data_rate = mcp->mb[1]; 5118 } 5119 5120 return rval; 5121 } 5122 5123 int 5124 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5125 { 5126 int rval; 5127 mbx_cmd_t mc; 5128 mbx_cmd_t *mcp = &mc; 5129 struct qla_hw_data *ha = vha->hw; 5130 5131 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, 5132 "Entered %s.\n", __func__); 5133 5134 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && 5135 !IS_QLA27XX(ha)) 5136 return QLA_FUNCTION_FAILED; 5137 mcp->mb[0] = MBC_GET_PORT_CONFIG; 5138 mcp->out_mb = MBX_0; 5139 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5140 mcp->tov = MBX_TOV_SECONDS; 5141 mcp->flags = 0; 5142 5143 rval = qla2x00_mailbox_command(vha, mcp); 5144 5145 if (rval != QLA_SUCCESS) { 5146 ql_dbg(ql_dbg_mbx, vha, 0x110a, 5147 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5148 } else { 5149 /* Copy all bits to preserve original value */ 5150 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 5151 5152 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, 5153 "Done %s.\n", __func__); 5154 } 5155 return rval; 5156 } 5157 5158 int 5159 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5160 { 5161 int rval; 5162 mbx_cmd_t mc; 5163 mbx_cmd_t *mcp = &mc; 5164 5165 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, 5166 "Entered %s.\n", __func__); 5167 5168 mcp->mb[0] = MBC_SET_PORT_CONFIG; 5169 /* Copy all bits to preserve original setting */ 5170 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); 5171 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5172 mcp->in_mb = MBX_0; 5173 mcp->tov = MBX_TOV_SECONDS; 5174 mcp->flags = 0; 5175 rval = qla2x00_mailbox_command(vha, mcp); 5176 5177 if (rval != QLA_SUCCESS) { 5178 ql_dbg(ql_dbg_mbx, vha, 0x110d, 5179 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5180 } else 5181 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, 5182 "Done %s.\n", __func__); 5183 5184 return rval; 5185 } 5186 5187 5188 int 5189 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, 5190 uint16_t *mb) 5191 { 5192 int rval; 5193 mbx_cmd_t mc; 5194 mbx_cmd_t *mcp = &mc; 5195 struct qla_hw_data *ha = vha->hw; 5196 5197 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, 5198 "Entered %s.\n", __func__); 5199 5200 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 5201 return QLA_FUNCTION_FAILED; 5202 5203 mcp->mb[0] = MBC_PORT_PARAMS; 5204 mcp->mb[1] = loop_id; 5205 if (ha->flags.fcp_prio_enabled) 5206 mcp->mb[2] = BIT_1; 5207 else 5208 mcp->mb[2] = BIT_2; 5209 mcp->mb[4] = priority & 0xf; 5210 mcp->mb[9] = vha->vp_idx; 5211 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5212 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 5213 mcp->tov = 30; 5214 mcp->flags = 0; 5215 rval = qla2x00_mailbox_command(vha, mcp); 5216 if (mb != NULL) { 5217 mb[0] = mcp->mb[0]; 5218 mb[1] = mcp->mb[1]; 5219 mb[3] = mcp->mb[3]; 5220 mb[4] = mcp->mb[4]; 5221 } 5222 5223 if (rval != QLA_SUCCESS) { 5224 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); 5225 } else { 5226 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, 5227 "Done %s.\n", __func__); 5228 } 5229 5230 return rval; 5231 } 5232 5233 int 5234 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) 5235 { 5236 int rval = QLA_FUNCTION_FAILED; 5237 struct qla_hw_data *ha = vha->hw; 5238 uint8_t byte; 5239 5240 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { 5241 ql_dbg(ql_dbg_mbx, vha, 0x1150, 5242 "Thermal not supported by this card.\n"); 5243 return rval; 5244 } 5245 5246 if (IS_QLA25XX(ha)) { 5247 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 5248 ha->pdev->subsystem_device == 0x0175) { 5249 rval = qla2x00_read_sfp(vha, 0, &byte, 5250 0x98, 0x1, 1, BIT_13|BIT_0); 5251 *temp = byte; 5252 return rval; 5253 } 5254 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 5255 ha->pdev->subsystem_device == 0x338e) { 5256 rval = qla2x00_read_sfp(vha, 0, &byte, 5257 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); 5258 *temp = byte; 5259 return rval; 5260 } 5261 ql_dbg(ql_dbg_mbx, vha, 0x10c9, 5262 "Thermal not supported by this card.\n"); 5263 return rval; 5264 } 5265 5266 if (IS_QLA82XX(ha)) { 5267 *temp = qla82xx_read_temperature(vha); 5268 rval = QLA_SUCCESS; 5269 return rval; 5270 } else if (IS_QLA8044(ha)) { 5271 *temp = qla8044_read_temperature(vha); 5272 rval = QLA_SUCCESS; 5273 return rval; 5274 } 5275 5276 rval = qla2x00_read_asic_temperature(vha, temp); 5277 return rval; 5278 } 5279 5280 int 5281 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) 5282 { 5283 int rval; 5284 struct qla_hw_data *ha = vha->hw; 5285 mbx_cmd_t mc; 5286 mbx_cmd_t *mcp = &mc; 5287 5288 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, 5289 "Entered %s.\n", __func__); 5290 5291 if (!IS_FWI2_CAPABLE(ha)) 5292 return QLA_FUNCTION_FAILED; 5293 5294 memset(mcp, 0, sizeof(mbx_cmd_t)); 5295 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5296 mcp->mb[1] = 1; 5297 5298 mcp->out_mb = MBX_1|MBX_0; 5299 mcp->in_mb = MBX_0; 5300 mcp->tov = 30; 5301 mcp->flags = 0; 5302 5303 rval = qla2x00_mailbox_command(vha, mcp); 5304 if (rval != QLA_SUCCESS) { 5305 ql_dbg(ql_dbg_mbx, vha, 0x1016, 5306 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5307 } else { 5308 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, 5309 "Done %s.\n", __func__); 5310 } 5311 5312 return rval; 5313 } 5314 5315 int 5316 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) 5317 { 5318 int rval; 5319 struct qla_hw_data *ha = vha->hw; 5320 mbx_cmd_t mc; 5321 mbx_cmd_t *mcp = &mc; 5322 5323 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, 5324 "Entered %s.\n", __func__); 5325 5326 if (!IS_P3P_TYPE(ha)) 5327 return QLA_FUNCTION_FAILED; 5328 5329 memset(mcp, 0, sizeof(mbx_cmd_t)); 5330 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5331 mcp->mb[1] = 0; 5332 5333 mcp->out_mb = MBX_1|MBX_0; 5334 mcp->in_mb = MBX_0; 5335 mcp->tov = 30; 5336 mcp->flags = 0; 5337 5338 rval = qla2x00_mailbox_command(vha, mcp); 5339 if (rval != QLA_SUCCESS) { 5340 ql_dbg(ql_dbg_mbx, vha, 0x100c, 5341 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5342 } else { 5343 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, 5344 "Done %s.\n", __func__); 5345 } 5346 5347 return rval; 5348 } 5349 5350 int 5351 qla82xx_md_get_template_size(scsi_qla_host_t *vha) 5352 { 5353 struct qla_hw_data *ha = vha->hw; 5354 mbx_cmd_t mc; 5355 mbx_cmd_t *mcp = &mc; 5356 int rval = QLA_FUNCTION_FAILED; 5357 5358 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, 5359 "Entered %s.\n", __func__); 5360 5361 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5362 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5363 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5364 mcp->mb[2] = LSW(RQST_TMPLT_SIZE); 5365 mcp->mb[3] = MSW(RQST_TMPLT_SIZE); 5366 5367 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5368 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 5369 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5370 5371 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5372 mcp->tov = MBX_TOV_SECONDS; 5373 rval = qla2x00_mailbox_command(vha, mcp); 5374 5375 /* Always copy back return mailbox values. */ 5376 if (rval != QLA_SUCCESS) { 5377 ql_dbg(ql_dbg_mbx, vha, 0x1120, 5378 "mailbox command FAILED=0x%x, subcode=%x.\n", 5379 (mcp->mb[1] << 16) | mcp->mb[0], 5380 (mcp->mb[3] << 16) | mcp->mb[2]); 5381 } else { 5382 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, 5383 "Done %s.\n", __func__); 5384 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); 5385 if (!ha->md_template_size) { 5386 ql_dbg(ql_dbg_mbx, vha, 0x1122, 5387 "Null template size obtained.\n"); 5388 rval = QLA_FUNCTION_FAILED; 5389 } 5390 } 5391 return rval; 5392 } 5393 5394 int 5395 qla82xx_md_get_template(scsi_qla_host_t *vha) 5396 { 5397 struct qla_hw_data *ha = vha->hw; 5398 mbx_cmd_t mc; 5399 mbx_cmd_t *mcp = &mc; 5400 int rval = QLA_FUNCTION_FAILED; 5401 5402 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, 5403 "Entered %s.\n", __func__); 5404 5405 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5406 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5407 if (!ha->md_tmplt_hdr) { 5408 ql_log(ql_log_warn, vha, 0x1124, 5409 "Unable to allocate memory for Minidump template.\n"); 5410 return rval; 5411 } 5412 5413 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5414 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5415 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5416 mcp->mb[2] = LSW(RQST_TMPLT); 5417 mcp->mb[3] = MSW(RQST_TMPLT); 5418 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); 5419 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); 5420 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); 5421 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); 5422 mcp->mb[8] = LSW(ha->md_template_size); 5423 mcp->mb[9] = MSW(ha->md_template_size); 5424 5425 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5426 mcp->tov = MBX_TOV_SECONDS; 5427 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5428 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5429 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5430 rval = qla2x00_mailbox_command(vha, mcp); 5431 5432 if (rval != QLA_SUCCESS) { 5433 ql_dbg(ql_dbg_mbx, vha, 0x1125, 5434 "mailbox command FAILED=0x%x, subcode=%x.\n", 5435 ((mcp->mb[1] << 16) | mcp->mb[0]), 5436 ((mcp->mb[3] << 16) | mcp->mb[2])); 5437 } else 5438 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, 5439 "Done %s.\n", __func__); 5440 return rval; 5441 } 5442 5443 int 5444 qla8044_md_get_template(scsi_qla_host_t *vha) 5445 { 5446 struct qla_hw_data *ha = vha->hw; 5447 mbx_cmd_t mc; 5448 mbx_cmd_t *mcp = &mc; 5449 int rval = QLA_FUNCTION_FAILED; 5450 int offset = 0, size = MINIDUMP_SIZE_36K; 5451 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, 5452 "Entered %s.\n", __func__); 5453 5454 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5455 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5456 if (!ha->md_tmplt_hdr) { 5457 ql_log(ql_log_warn, vha, 0xb11b, 5458 "Unable to allocate memory for Minidump template.\n"); 5459 return rval; 5460 } 5461 5462 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5463 while (offset < ha->md_template_size) { 5464 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5465 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5466 mcp->mb[2] = LSW(RQST_TMPLT); 5467 mcp->mb[3] = MSW(RQST_TMPLT); 5468 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5469 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5470 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5471 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5472 mcp->mb[8] = LSW(size); 5473 mcp->mb[9] = MSW(size); 5474 mcp->mb[10] = offset & 0x0000FFFF; 5475 mcp->mb[11] = offset & 0xFFFF0000; 5476 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5477 mcp->tov = MBX_TOV_SECONDS; 5478 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5479 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5480 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5481 rval = qla2x00_mailbox_command(vha, mcp); 5482 5483 if (rval != QLA_SUCCESS) { 5484 ql_dbg(ql_dbg_mbx, vha, 0xb11c, 5485 "mailbox command FAILED=0x%x, subcode=%x.\n", 5486 ((mcp->mb[1] << 16) | mcp->mb[0]), 5487 ((mcp->mb[3] << 16) | mcp->mb[2])); 5488 return rval; 5489 } else 5490 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, 5491 "Done %s.\n", __func__); 5492 offset = offset + size; 5493 } 5494 return rval; 5495 } 5496 5497 int 5498 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5499 { 5500 int rval; 5501 struct qla_hw_data *ha = vha->hw; 5502 mbx_cmd_t mc; 5503 mbx_cmd_t *mcp = &mc; 5504 5505 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5506 return QLA_FUNCTION_FAILED; 5507 5508 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, 5509 "Entered %s.\n", __func__); 5510 5511 memset(mcp, 0, sizeof(mbx_cmd_t)); 5512 mcp->mb[0] = MBC_SET_LED_CONFIG; 5513 mcp->mb[1] = led_cfg[0]; 5514 mcp->mb[2] = led_cfg[1]; 5515 if (IS_QLA8031(ha)) { 5516 mcp->mb[3] = led_cfg[2]; 5517 mcp->mb[4] = led_cfg[3]; 5518 mcp->mb[5] = led_cfg[4]; 5519 mcp->mb[6] = led_cfg[5]; 5520 } 5521 5522 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5523 if (IS_QLA8031(ha)) 5524 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 5525 mcp->in_mb = MBX_0; 5526 mcp->tov = 30; 5527 mcp->flags = 0; 5528 5529 rval = qla2x00_mailbox_command(vha, mcp); 5530 if (rval != QLA_SUCCESS) { 5531 ql_dbg(ql_dbg_mbx, vha, 0x1134, 5532 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5533 } else { 5534 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, 5535 "Done %s.\n", __func__); 5536 } 5537 5538 return rval; 5539 } 5540 5541 int 5542 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5543 { 5544 int rval; 5545 struct qla_hw_data *ha = vha->hw; 5546 mbx_cmd_t mc; 5547 mbx_cmd_t *mcp = &mc; 5548 5549 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5550 return QLA_FUNCTION_FAILED; 5551 5552 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, 5553 "Entered %s.\n", __func__); 5554 5555 memset(mcp, 0, sizeof(mbx_cmd_t)); 5556 mcp->mb[0] = MBC_GET_LED_CONFIG; 5557 5558 mcp->out_mb = MBX_0; 5559 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5560 if (IS_QLA8031(ha)) 5561 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 5562 mcp->tov = 30; 5563 mcp->flags = 0; 5564 5565 rval = qla2x00_mailbox_command(vha, mcp); 5566 if (rval != QLA_SUCCESS) { 5567 ql_dbg(ql_dbg_mbx, vha, 0x1137, 5568 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5569 } else { 5570 led_cfg[0] = mcp->mb[1]; 5571 led_cfg[1] = mcp->mb[2]; 5572 if (IS_QLA8031(ha)) { 5573 led_cfg[2] = mcp->mb[3]; 5574 led_cfg[3] = mcp->mb[4]; 5575 led_cfg[4] = mcp->mb[5]; 5576 led_cfg[5] = mcp->mb[6]; 5577 } 5578 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, 5579 "Done %s.\n", __func__); 5580 } 5581 5582 return rval; 5583 } 5584 5585 int 5586 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) 5587 { 5588 int rval; 5589 struct qla_hw_data *ha = vha->hw; 5590 mbx_cmd_t mc; 5591 mbx_cmd_t *mcp = &mc; 5592 5593 if (!IS_P3P_TYPE(ha)) 5594 return QLA_FUNCTION_FAILED; 5595 5596 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, 5597 "Entered %s.\n", __func__); 5598 5599 memset(mcp, 0, sizeof(mbx_cmd_t)); 5600 mcp->mb[0] = MBC_SET_LED_CONFIG; 5601 if (enable) 5602 mcp->mb[7] = 0xE; 5603 else 5604 mcp->mb[7] = 0xD; 5605 5606 mcp->out_mb = MBX_7|MBX_0; 5607 mcp->in_mb = MBX_0; 5608 mcp->tov = MBX_TOV_SECONDS; 5609 mcp->flags = 0; 5610 5611 rval = qla2x00_mailbox_command(vha, mcp); 5612 if (rval != QLA_SUCCESS) { 5613 ql_dbg(ql_dbg_mbx, vha, 0x1128, 5614 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5615 } else { 5616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, 5617 "Done %s.\n", __func__); 5618 } 5619 5620 return rval; 5621 } 5622 5623 int 5624 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) 5625 { 5626 int rval; 5627 struct qla_hw_data *ha = vha->hw; 5628 mbx_cmd_t mc; 5629 mbx_cmd_t *mcp = &mc; 5630 5631 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) 5632 return QLA_FUNCTION_FAILED; 5633 5634 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, 5635 "Entered %s.\n", __func__); 5636 5637 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 5638 mcp->mb[1] = LSW(reg); 5639 mcp->mb[2] = MSW(reg); 5640 mcp->mb[3] = LSW(data); 5641 mcp->mb[4] = MSW(data); 5642 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5643 5644 mcp->in_mb = MBX_1|MBX_0; 5645 mcp->tov = MBX_TOV_SECONDS; 5646 mcp->flags = 0; 5647 rval = qla2x00_mailbox_command(vha, mcp); 5648 5649 if (rval != QLA_SUCCESS) { 5650 ql_dbg(ql_dbg_mbx, vha, 0x1131, 5651 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5652 } else { 5653 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, 5654 "Done %s.\n", __func__); 5655 } 5656 5657 return rval; 5658 } 5659 5660 int 5661 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) 5662 { 5663 int rval; 5664 struct qla_hw_data *ha = vha->hw; 5665 mbx_cmd_t mc; 5666 mbx_cmd_t *mcp = &mc; 5667 5668 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 5669 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, 5670 "Implicit LOGO Unsupported.\n"); 5671 return QLA_FUNCTION_FAILED; 5672 } 5673 5674 5675 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, 5676 "Entering %s.\n", __func__); 5677 5678 /* Perform Implicit LOGO. */ 5679 mcp->mb[0] = MBC_PORT_LOGOUT; 5680 mcp->mb[1] = fcport->loop_id; 5681 mcp->mb[10] = BIT_15; 5682 mcp->out_mb = MBX_10|MBX_1|MBX_0; 5683 mcp->in_mb = MBX_0; 5684 mcp->tov = MBX_TOV_SECONDS; 5685 mcp->flags = 0; 5686 rval = qla2x00_mailbox_command(vha, mcp); 5687 if (rval != QLA_SUCCESS) 5688 ql_dbg(ql_dbg_mbx, vha, 0x113d, 5689 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5690 else 5691 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, 5692 "Done %s.\n", __func__); 5693 5694 return rval; 5695 } 5696 5697 int 5698 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) 5699 { 5700 int rval; 5701 mbx_cmd_t mc; 5702 mbx_cmd_t *mcp = &mc; 5703 struct qla_hw_data *ha = vha->hw; 5704 unsigned long retry_max_time = jiffies + (2 * HZ); 5705 5706 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) 5707 return QLA_FUNCTION_FAILED; 5708 5709 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); 5710 5711 retry_rd_reg: 5712 mcp->mb[0] = MBC_READ_REMOTE_REG; 5713 mcp->mb[1] = LSW(reg); 5714 mcp->mb[2] = MSW(reg); 5715 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5716 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 5717 mcp->tov = MBX_TOV_SECONDS; 5718 mcp->flags = 0; 5719 rval = qla2x00_mailbox_command(vha, mcp); 5720 5721 if (rval != QLA_SUCCESS) { 5722 ql_dbg(ql_dbg_mbx, vha, 0x114c, 5723 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5724 rval, mcp->mb[0], mcp->mb[1]); 5725 } else { 5726 *data = (mcp->mb[3] | (mcp->mb[4] << 16)); 5727 if (*data == QLA8XXX_BAD_VALUE) { 5728 /* 5729 * During soft-reset CAMRAM register reads might 5730 * return 0xbad0bad0. So retry for MAX of 2 sec 5731 * while reading camram registers. 5732 */ 5733 if (time_after(jiffies, retry_max_time)) { 5734 ql_dbg(ql_dbg_mbx, vha, 0x1141, 5735 "Failure to read CAMRAM register. " 5736 "data=0x%x.\n", *data); 5737 return QLA_FUNCTION_FAILED; 5738 } 5739 msleep(100); 5740 goto retry_rd_reg; 5741 } 5742 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); 5743 } 5744 5745 return rval; 5746 } 5747 5748 int 5749 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) 5750 { 5751 int rval; 5752 mbx_cmd_t mc; 5753 mbx_cmd_t *mcp = &mc; 5754 struct qla_hw_data *ha = vha->hw; 5755 5756 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) 5757 return QLA_FUNCTION_FAILED; 5758 5759 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 5760 5761 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; 5762 mcp->out_mb = MBX_0; 5763 mcp->in_mb = MBX_1|MBX_0; 5764 mcp->tov = MBX_TOV_SECONDS; 5765 mcp->flags = 0; 5766 rval = qla2x00_mailbox_command(vha, mcp); 5767 5768 if (rval != QLA_SUCCESS) { 5769 ql_dbg(ql_dbg_mbx, vha, 0x1144, 5770 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5771 rval, mcp->mb[0], mcp->mb[1]); 5772 ha->isp_ops->fw_dump(vha, 0); 5773 } else { 5774 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); 5775 } 5776 5777 return rval; 5778 } 5779 5780 int 5781 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, 5782 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) 5783 { 5784 int rval; 5785 mbx_cmd_t mc; 5786 mbx_cmd_t *mcp = &mc; 5787 uint8_t subcode = (uint8_t)options; 5788 struct qla_hw_data *ha = vha->hw; 5789 5790 if (!IS_QLA8031(ha)) 5791 return QLA_FUNCTION_FAILED; 5792 5793 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); 5794 5795 mcp->mb[0] = MBC_SET_ACCESS_CONTROL; 5796 mcp->mb[1] = options; 5797 mcp->out_mb = MBX_1|MBX_0; 5798 if (subcode & BIT_2) { 5799 mcp->mb[2] = LSW(start_addr); 5800 mcp->mb[3] = MSW(start_addr); 5801 mcp->mb[4] = LSW(end_addr); 5802 mcp->mb[5] = MSW(end_addr); 5803 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; 5804 } 5805 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5806 if (!(subcode & (BIT_2 | BIT_5))) 5807 mcp->in_mb |= MBX_4|MBX_3; 5808 mcp->tov = MBX_TOV_SECONDS; 5809 mcp->flags = 0; 5810 rval = qla2x00_mailbox_command(vha, mcp); 5811 5812 if (rval != QLA_SUCCESS) { 5813 ql_dbg(ql_dbg_mbx, vha, 0x1147, 5814 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", 5815 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], 5816 mcp->mb[4]); 5817 ha->isp_ops->fw_dump(vha, 0); 5818 } else { 5819 if (subcode & BIT_5) 5820 *sector_size = mcp->mb[1]; 5821 else if (subcode & (BIT_6 | BIT_7)) { 5822 ql_dbg(ql_dbg_mbx, vha, 0x1148, 5823 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 5824 } else if (subcode & (BIT_3 | BIT_4)) { 5825 ql_dbg(ql_dbg_mbx, vha, 0x1149, 5826 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 5827 } 5828 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); 5829 } 5830 5831 return rval; 5832 } 5833 5834 int 5835 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 5836 uint32_t size) 5837 { 5838 int rval; 5839 mbx_cmd_t mc; 5840 mbx_cmd_t *mcp = &mc; 5841 5842 if (!IS_MCTP_CAPABLE(vha->hw)) 5843 return QLA_FUNCTION_FAILED; 5844 5845 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, 5846 "Entered %s.\n", __func__); 5847 5848 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 5849 mcp->mb[1] = LSW(addr); 5850 mcp->mb[2] = MSW(req_dma); 5851 mcp->mb[3] = LSW(req_dma); 5852 mcp->mb[4] = MSW(size); 5853 mcp->mb[5] = LSW(size); 5854 mcp->mb[6] = MSW(MSD(req_dma)); 5855 mcp->mb[7] = LSW(MSD(req_dma)); 5856 mcp->mb[8] = MSW(addr); 5857 /* Setting RAM ID to valid */ 5858 mcp->mb[10] |= BIT_7; 5859 /* For MCTP RAM ID is 0x40 */ 5860 mcp->mb[10] |= 0x40; 5861 5862 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| 5863 MBX_0; 5864 5865 mcp->in_mb = MBX_0; 5866 mcp->tov = MBX_TOV_SECONDS; 5867 mcp->flags = 0; 5868 rval = qla2x00_mailbox_command(vha, mcp); 5869 5870 if (rval != QLA_SUCCESS) { 5871 ql_dbg(ql_dbg_mbx, vha, 0x114e, 5872 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5873 } else { 5874 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, 5875 "Done %s.\n", __func__); 5876 } 5877 5878 return rval; 5879 } 5880 5881 int 5882 qla26xx_dport_diagnostics(scsi_qla_host_t *vha, 5883 void *dd_buf, uint size, uint options) 5884 { 5885 int rval; 5886 mbx_cmd_t mc; 5887 mbx_cmd_t *mcp = &mc; 5888 dma_addr_t dd_dma; 5889 5890 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw)) 5891 return QLA_FUNCTION_FAILED; 5892 5893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, 5894 "Entered %s.\n", __func__); 5895 5896 dd_dma = dma_map_single(&vha->hw->pdev->dev, 5897 dd_buf, size, DMA_FROM_DEVICE); 5898 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { 5899 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); 5900 return QLA_MEMORY_ALLOC_FAILED; 5901 } 5902 5903 memset(dd_buf, 0, size); 5904 5905 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; 5906 mcp->mb[1] = options; 5907 mcp->mb[2] = MSW(LSD(dd_dma)); 5908 mcp->mb[3] = LSW(LSD(dd_dma)); 5909 mcp->mb[6] = MSW(MSD(dd_dma)); 5910 mcp->mb[7] = LSW(MSD(dd_dma)); 5911 mcp->mb[8] = size; 5912 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5913 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5914 mcp->buf_size = size; 5915 mcp->flags = MBX_DMA_IN; 5916 mcp->tov = MBX_TOV_SECONDS * 4; 5917 rval = qla2x00_mailbox_command(vha, mcp); 5918 5919 if (rval != QLA_SUCCESS) { 5920 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); 5921 } else { 5922 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, 5923 "Done %s.\n", __func__); 5924 } 5925 5926 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, 5927 size, DMA_FROM_DEVICE); 5928 5929 return rval; 5930 } 5931 5932 static void qla2x00_async_mb_sp_done(void *s, int res) 5933 { 5934 struct srb *sp = s; 5935 5936 sp->u.iocb_cmd.u.mbx.rc = res; 5937 5938 complete(&sp->u.iocb_cmd.u.mbx.comp); 5939 /* don't free sp here. Let the caller do the free */ 5940 } 5941 5942 /* 5943 * This mailbox uses the iocb interface to send MB command. 5944 * This allows non-critial (non chip setup) command to go 5945 * out in parrallel. 5946 */ 5947 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) 5948 { 5949 int rval = QLA_FUNCTION_FAILED; 5950 srb_t *sp; 5951 struct srb_iocb *c; 5952 5953 if (!vha->hw->flags.fw_started) 5954 goto done; 5955 5956 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 5957 if (!sp) 5958 goto done; 5959 5960 sp->type = SRB_MB_IOCB; 5961 sp->name = mb_to_str(mcp->mb[0]); 5962 5963 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 5964 5965 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); 5966 5967 c = &sp->u.iocb_cmd; 5968 c->timeout = qla2x00_async_iocb_timeout; 5969 init_completion(&c->u.mbx.comp); 5970 5971 sp->done = qla2x00_async_mb_sp_done; 5972 5973 rval = qla2x00_start_sp(sp); 5974 if (rval != QLA_SUCCESS) { 5975 ql_dbg(ql_dbg_mbx, vha, 0x1018, 5976 "%s: %s Failed submission. %x.\n", 5977 __func__, sp->name, rval); 5978 goto done_free_sp; 5979 } 5980 5981 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n", 5982 sp->name, sp->handle); 5983 5984 wait_for_completion(&c->u.mbx.comp); 5985 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); 5986 5987 rval = c->u.mbx.rc; 5988 switch (rval) { 5989 case QLA_FUNCTION_TIMEOUT: 5990 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n", 5991 __func__, sp->name, rval); 5992 break; 5993 case QLA_SUCCESS: 5994 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", 5995 __func__, sp->name); 5996 sp->free(sp); 5997 break; 5998 default: 5999 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", 6000 __func__, sp->name, rval); 6001 sp->free(sp); 6002 break; 6003 } 6004 6005 return rval; 6006 6007 done_free_sp: 6008 sp->free(sp); 6009 done: 6010 return rval; 6011 } 6012 6013 /* 6014 * qla24xx_gpdb_wait 6015 * NOTE: Do not call this routine from DPC thread 6016 */ 6017 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 6018 { 6019 int rval = QLA_FUNCTION_FAILED; 6020 dma_addr_t pd_dma; 6021 struct port_database_24xx *pd; 6022 struct qla_hw_data *ha = vha->hw; 6023 mbx_cmd_t mc; 6024 6025 if (!vha->hw->flags.fw_started) 6026 goto done; 6027 6028 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 6029 if (pd == NULL) { 6030 ql_log(ql_log_warn, vha, 0xd047, 6031 "Failed to allocate port database structure.\n"); 6032 goto done_free_sp; 6033 } 6034 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); 6035 6036 memset(&mc, 0, sizeof(mc)); 6037 mc.mb[0] = MBC_GET_PORT_DATABASE; 6038 mc.mb[1] = cpu_to_le16(fcport->loop_id); 6039 mc.mb[2] = MSW(pd_dma); 6040 mc.mb[3] = LSW(pd_dma); 6041 mc.mb[6] = MSW(MSD(pd_dma)); 6042 mc.mb[7] = LSW(MSD(pd_dma)); 6043 mc.mb[9] = cpu_to_le16(vha->vp_idx); 6044 mc.mb[10] = cpu_to_le16((uint16_t)opt); 6045 6046 rval = qla24xx_send_mb_cmd(vha, &mc); 6047 if (rval != QLA_SUCCESS) { 6048 ql_dbg(ql_dbg_mbx, vha, 0x1193, 6049 "%s: %8phC fail\n", __func__, fcport->port_name); 6050 goto done_free_sp; 6051 } 6052 6053 rval = __qla24xx_parse_gpdb(vha, fcport, pd); 6054 6055 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", 6056 __func__, fcport->port_name); 6057 6058 done_free_sp: 6059 if (pd) 6060 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 6061 done: 6062 return rval; 6063 } 6064 6065 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, 6066 struct port_database_24xx *pd) 6067 { 6068 int rval = QLA_SUCCESS; 6069 uint64_t zero = 0; 6070 u8 current_login_state, last_login_state; 6071 6072 if (fcport->fc4f_nvme) { 6073 current_login_state = pd->current_login_state >> 4; 6074 last_login_state = pd->last_login_state >> 4; 6075 } else { 6076 current_login_state = pd->current_login_state & 0xf; 6077 last_login_state = pd->last_login_state & 0xf; 6078 } 6079 6080 /* Check for logged in state. */ 6081 if (current_login_state != PDS_PRLI_COMPLETE && 6082 last_login_state != PDS_PRLI_COMPLETE) { 6083 ql_dbg(ql_dbg_mbx, vha, 0x119a, 6084 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 6085 current_login_state, last_login_state, fcport->loop_id); 6086 rval = QLA_FUNCTION_FAILED; 6087 goto gpd_error_out; 6088 } 6089 6090 if (fcport->loop_id == FC_NO_LOOP_ID || 6091 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 6092 memcmp(fcport->port_name, pd->port_name, 8))) { 6093 /* We lost the device mid way. */ 6094 rval = QLA_NOT_LOGGED_IN; 6095 goto gpd_error_out; 6096 } 6097 6098 /* Names are little-endian. */ 6099 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 6100 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 6101 6102 /* Get port_id of device. */ 6103 fcport->d_id.b.domain = pd->port_id[0]; 6104 fcport->d_id.b.area = pd->port_id[1]; 6105 fcport->d_id.b.al_pa = pd->port_id[2]; 6106 fcport->d_id.b.rsvd_1 = 0; 6107 6108 if (fcport->fc4f_nvme) { 6109 fcport->nvme_prli_service_param = 6110 pd->prli_nvme_svc_param_word_3; 6111 fcport->port_type = FCT_NVME; 6112 } else { 6113 /* If not target must be initiator or unknown type. */ 6114 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6115 fcport->port_type = FCT_INITIATOR; 6116 else 6117 fcport->port_type = FCT_TARGET; 6118 } 6119 /* Passback COS information. */ 6120 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? 6121 FC_COS_CLASS2 : FC_COS_CLASS3; 6122 6123 if (pd->prli_svc_param_word_3[0] & BIT_7) { 6124 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 6125 fcport->conf_compl_supported = 1; 6126 } 6127 6128 gpd_error_out: 6129 return rval; 6130 } 6131 6132 /* 6133 * qla24xx_gidlist__wait 6134 * NOTE: don't call this routine from DPC thread. 6135 */ 6136 int qla24xx_gidlist_wait(struct scsi_qla_host *vha, 6137 void *id_list, dma_addr_t id_list_dma, uint16_t *entries) 6138 { 6139 int rval = QLA_FUNCTION_FAILED; 6140 mbx_cmd_t mc; 6141 6142 if (!vha->hw->flags.fw_started) 6143 goto done; 6144 6145 memset(&mc, 0, sizeof(mc)); 6146 mc.mb[0] = MBC_GET_ID_LIST; 6147 mc.mb[2] = MSW(id_list_dma); 6148 mc.mb[3] = LSW(id_list_dma); 6149 mc.mb[6] = MSW(MSD(id_list_dma)); 6150 mc.mb[7] = LSW(MSD(id_list_dma)); 6151 mc.mb[8] = 0; 6152 mc.mb[9] = cpu_to_le16(vha->vp_idx); 6153 6154 rval = qla24xx_send_mb_cmd(vha, &mc); 6155 if (rval != QLA_SUCCESS) { 6156 ql_dbg(ql_dbg_mbx, vha, 0x119b, 6157 "%s: fail\n", __func__); 6158 } else { 6159 *entries = mc.mb[1]; 6160 ql_dbg(ql_dbg_mbx, vha, 0x119c, 6161 "%s: done\n", __func__); 6162 } 6163 done: 6164 return rval; 6165 } 6166 6167 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) 6168 { 6169 int rval; 6170 mbx_cmd_t mc; 6171 mbx_cmd_t *mcp = &mc; 6172 6173 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200, 6174 "Entered %s\n", __func__); 6175 6176 memset(mcp->mb, 0 , sizeof(mcp->mb)); 6177 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6178 mcp->mb[1] = cpu_to_le16(1); 6179 mcp->mb[2] = cpu_to_le16(value); 6180 mcp->out_mb = MBX_2 | MBX_1 | MBX_0; 6181 mcp->in_mb = MBX_2 | MBX_0; 6182 mcp->tov = MBX_TOV_SECONDS; 6183 mcp->flags = 0; 6184 6185 rval = qla2x00_mailbox_command(vha, mcp); 6186 6187 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n", 6188 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6189 6190 return rval; 6191 } 6192 6193 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) 6194 { 6195 int rval; 6196 mbx_cmd_t mc; 6197 mbx_cmd_t *mcp = &mc; 6198 6199 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203, 6200 "Entered %s\n", __func__); 6201 6202 memset(mcp->mb, 0, sizeof(mcp->mb)); 6203 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6204 mcp->mb[1] = cpu_to_le16(0); 6205 mcp->out_mb = MBX_1 | MBX_0; 6206 mcp->in_mb = MBX_2 | MBX_0; 6207 mcp->tov = MBX_TOV_SECONDS; 6208 mcp->flags = 0; 6209 6210 rval = qla2x00_mailbox_command(vha, mcp); 6211 if (rval == QLA_SUCCESS) 6212 *value = mc.mb[2]; 6213 6214 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n", 6215 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6216 6217 return rval; 6218 } 6219 6220 int 6221 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count) 6222 { 6223 struct qla_hw_data *ha = vha->hw; 6224 uint16_t iter, addr, offset; 6225 dma_addr_t phys_addr; 6226 int rval, c; 6227 u8 *sfp_data; 6228 6229 memset(ha->sfp_data, 0, SFP_DEV_SIZE); 6230 addr = 0xa0; 6231 phys_addr = ha->sfp_data_dma; 6232 sfp_data = ha->sfp_data; 6233 offset = c = 0; 6234 6235 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) { 6236 if (iter == 4) { 6237 /* Skip to next device address. */ 6238 addr = 0xa2; 6239 offset = 0; 6240 } 6241 6242 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data, 6243 addr, offset, SFP_BLOCK_SIZE, BIT_1); 6244 if (rval != QLA_SUCCESS) { 6245 ql_log(ql_log_warn, vha, 0x706d, 6246 "Unable to read SFP data (%x/%x/%x).\n", rval, 6247 addr, offset); 6248 6249 return rval; 6250 } 6251 6252 if (buf && (c < count)) { 6253 u16 sz; 6254 6255 if ((count - c) >= SFP_BLOCK_SIZE) 6256 sz = SFP_BLOCK_SIZE; 6257 else 6258 sz = count - c; 6259 6260 memcpy(buf, sfp_data, sz); 6261 buf += SFP_BLOCK_SIZE; 6262 c += sz; 6263 } 6264 phys_addr += SFP_BLOCK_SIZE; 6265 sfp_data += SFP_BLOCK_SIZE; 6266 offset += SFP_BLOCK_SIZE; 6267 } 6268 6269 return rval; 6270 } 6271