1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include <linux/delay.h> 9 #include <linux/ktime.h> 10 #include <linux/pci.h> 11 #include <linux/ratelimit.h> 12 #include <linux/vmalloc.h> 13 #include <scsi/scsi_tcq.h> 14 #include <linux/utsname.h> 15 16 17 /* QLAFX00 specific Mailbox implementation functions */ 18 19 /* 20 * qlafx00_mailbox_command 21 * Issue mailbox command and waits for completion. 22 * 23 * Input: 24 * ha = adapter block pointer. 25 * mcp = driver internal mbx struct pointer. 26 * 27 * Output: 28 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 29 * 30 * Returns: 31 * 0 : QLA_SUCCESS = cmd performed success 32 * 1 : QLA_FUNCTION_FAILED (error encountered) 33 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 34 * 35 * Context: 36 * Kernel context. 37 */ 38 static int 39 qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp) 40 41 { 42 int rval; 43 unsigned long flags = 0; 44 device_reg_t *reg; 45 uint8_t abort_active; 46 uint8_t io_lock_on; 47 uint16_t command = 0; 48 uint32_t *iptr; 49 uint32_t __iomem *optr; 50 uint32_t cnt; 51 uint32_t mboxes; 52 unsigned long wait_time; 53 struct qla_hw_data *ha = vha->hw; 54 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 55 56 if (ha->pdev->error_state > pci_channel_io_frozen) { 57 ql_log(ql_log_warn, vha, 0x115c, 58 "error_state is greater than pci_channel_io_frozen, " 59 "exiting.\n"); 60 return QLA_FUNCTION_TIMEOUT; 61 } 62 63 if (vha->device_flags & DFLG_DEV_FAILED) { 64 ql_log(ql_log_warn, vha, 0x115f, 65 "Device in failed state, exiting.\n"); 66 return QLA_FUNCTION_TIMEOUT; 67 } 68 69 reg = ha->iobase; 70 io_lock_on = base_vha->flags.init_done; 71 72 rval = QLA_SUCCESS; 73 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 74 75 if (ha->flags.pci_channel_io_perm_failure) { 76 ql_log(ql_log_warn, vha, 0x1175, 77 "Perm failure on EEH timeout MBX, exiting.\n"); 78 return QLA_FUNCTION_TIMEOUT; 79 } 80 81 if (ha->flags.isp82xx_fw_hung) { 82 /* Setting Link-Down error */ 83 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 84 ql_log(ql_log_warn, vha, 0x1176, 85 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 86 rval = QLA_FUNCTION_FAILED; 87 goto premature_exit; 88 } 89 90 /* 91 * Wait for active mailbox commands to finish by waiting at most tov 92 * seconds. This is to serialize actual issuing of mailbox cmds during 93 * non ISP abort time. 94 */ 95 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 96 /* Timeout occurred. Return error. */ 97 ql_log(ql_log_warn, vha, 0x1177, 98 "Cmd access timeout, cmd=0x%x, Exiting.\n", 99 mcp->mb[0]); 100 return QLA_FUNCTION_TIMEOUT; 101 } 102 103 ha->flags.mbox_busy = 1; 104 /* Save mailbox command for debug */ 105 ha->mcp32 = mcp; 106 107 ql_dbg(ql_dbg_mbx, vha, 0x1178, 108 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 109 110 spin_lock_irqsave(&ha->hardware_lock, flags); 111 112 /* Load mailbox registers. */ 113 optr = (uint32_t __iomem *)®->ispfx00.mailbox0; 114 115 iptr = mcp->mb; 116 command = mcp->mb[0]; 117 mboxes = mcp->out_mb; 118 119 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 120 if (mboxes & BIT_0) 121 WRT_REG_DWORD(optr, *iptr); 122 123 mboxes >>= 1; 124 optr++; 125 iptr++; 126 } 127 128 /* Issue set host interrupt command to send cmd out. */ 129 ha->flags.mbox_int = 0; 130 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 131 132 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172, 133 (uint8_t *)mcp->mb, 16); 134 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173, 135 ((uint8_t *)mcp->mb + 0x10), 16); 136 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174, 137 ((uint8_t *)mcp->mb + 0x20), 8); 138 139 /* Unlock mbx registers and wait for interrupt */ 140 ql_dbg(ql_dbg_mbx, vha, 0x1179, 141 "Going to unlock irq & waiting for interrupts. " 142 "jiffies=%lx.\n", jiffies); 143 144 /* Wait for mbx cmd completion until timeout */ 145 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 146 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 147 148 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); 149 spin_unlock_irqrestore(&ha->hardware_lock, flags); 150 151 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); 152 } else { 153 ql_dbg(ql_dbg_mbx, vha, 0x112c, 154 "Cmd=%x Polling Mode.\n", command); 155 156 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); 157 spin_unlock_irqrestore(&ha->hardware_lock, flags); 158 159 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 160 while (!ha->flags.mbox_int) { 161 if (time_after(jiffies, wait_time)) 162 break; 163 164 /* Check for pending interrupts. */ 165 qla2x00_poll(ha->rsp_q_map[0]); 166 167 if (!ha->flags.mbox_int && 168 !(IS_QLA2200(ha) && 169 command == MBC_LOAD_RISC_RAM_EXTENDED)) 170 usleep_range(10000, 11000); 171 } /* while */ 172 ql_dbg(ql_dbg_mbx, vha, 0x112d, 173 "Waited %d sec.\n", 174 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 175 } 176 177 /* Check whether we timed out */ 178 if (ha->flags.mbox_int) { 179 uint32_t *iptr2; 180 181 ql_dbg(ql_dbg_mbx, vha, 0x112e, 182 "Cmd=%x completed.\n", command); 183 184 /* Got interrupt. Clear the flag. */ 185 ha->flags.mbox_int = 0; 186 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 187 188 if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE) 189 rval = QLA_FUNCTION_FAILED; 190 191 /* Load return mailbox registers. */ 192 iptr2 = mcp->mb; 193 iptr = (uint32_t *)&ha->mailbox_out32[0]; 194 mboxes = mcp->in_mb; 195 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 196 if (mboxes & BIT_0) 197 *iptr2 = *iptr; 198 199 mboxes >>= 1; 200 iptr2++; 201 iptr++; 202 } 203 } else { 204 205 rval = QLA_FUNCTION_TIMEOUT; 206 } 207 208 ha->flags.mbox_busy = 0; 209 210 /* Clean up */ 211 ha->mcp32 = NULL; 212 213 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 214 ql_dbg(ql_dbg_mbx, vha, 0x113a, 215 "checking for additional resp interrupt.\n"); 216 217 /* polling mode for non isp_abort commands. */ 218 qla2x00_poll(ha->rsp_q_map[0]); 219 } 220 221 if (rval == QLA_FUNCTION_TIMEOUT && 222 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 223 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 224 ha->flags.eeh_busy) { 225 /* not in dpc. schedule it for dpc to take over. */ 226 ql_dbg(ql_dbg_mbx, vha, 0x115d, 227 "Timeout, schedule isp_abort_needed.\n"); 228 229 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 230 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 231 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 232 233 ql_log(ql_log_info, base_vha, 0x115e, 234 "Mailbox cmd timeout occurred, cmd=0x%x, " 235 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 236 "abort.\n", command, mcp->mb[0], 237 ha->flags.eeh_busy); 238 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 239 qla2xxx_wake_dpc(vha); 240 } 241 } else if (!abort_active) { 242 /* call abort directly since we are in the DPC thread */ 243 ql_dbg(ql_dbg_mbx, vha, 0x1160, 244 "Timeout, calling abort_isp.\n"); 245 246 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 247 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 248 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 249 250 ql_log(ql_log_info, base_vha, 0x1161, 251 "Mailbox cmd timeout occurred, cmd=0x%x, " 252 "mb[0]=0x%x. Scheduling ISP abort ", 253 command, mcp->mb[0]); 254 255 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 256 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 257 if (ha->isp_ops->abort_isp(vha)) { 258 /* Failed. retry later. */ 259 set_bit(ISP_ABORT_NEEDED, 260 &vha->dpc_flags); 261 } 262 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 263 ql_dbg(ql_dbg_mbx, vha, 0x1162, 264 "Finished abort_isp.\n"); 265 } 266 } 267 } 268 269 premature_exit: 270 /* Allow next mbx cmd to come in. */ 271 complete(&ha->mbx_cmd_comp); 272 273 if (rval) { 274 ql_log(ql_log_warn, base_vha, 0x1163, 275 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, " 276 "mb[3]=%x, cmd=%x ****.\n", 277 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); 278 } else { 279 ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__); 280 } 281 282 return rval; 283 } 284 285 /* 286 * qlafx00_driver_shutdown 287 * Indicate a driver shutdown to firmware. 288 * 289 * Input: 290 * ha = adapter block pointer. 291 * 292 * Returns: 293 * local function return status code. 294 * 295 * Context: 296 * Kernel context. 297 */ 298 int 299 qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo) 300 { 301 int rval; 302 struct mbx_cmd_32 mc; 303 struct mbx_cmd_32 *mcp = &mc; 304 305 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166, 306 "Entered %s.\n", __func__); 307 308 mcp->mb[0] = MBC_MR_DRV_SHUTDOWN; 309 mcp->out_mb = MBX_0; 310 mcp->in_mb = MBX_0; 311 if (tmo) 312 mcp->tov = tmo; 313 else 314 mcp->tov = MBX_TOV_SECONDS; 315 mcp->flags = 0; 316 rval = qlafx00_mailbox_command(vha, mcp); 317 318 if (rval != QLA_SUCCESS) { 319 ql_dbg(ql_dbg_mbx, vha, 0x1167, 320 "Failed=%x.\n", rval); 321 } else { 322 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168, 323 "Done %s.\n", __func__); 324 } 325 326 return rval; 327 } 328 329 /* 330 * qlafx00_get_firmware_state 331 * Get adapter firmware state. 332 * 333 * Input: 334 * ha = adapter block pointer. 335 * TARGET_QUEUE_LOCK must be released. 336 * ADAPTER_STATE_LOCK must be released. 337 * 338 * Returns: 339 * qla7xxx local function return status code. 340 * 341 * Context: 342 * Kernel context. 343 */ 344 static int 345 qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states) 346 { 347 int rval; 348 struct mbx_cmd_32 mc; 349 struct mbx_cmd_32 *mcp = &mc; 350 351 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169, 352 "Entered %s.\n", __func__); 353 354 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 355 mcp->out_mb = MBX_0; 356 mcp->in_mb = MBX_1|MBX_0; 357 mcp->tov = MBX_TOV_SECONDS; 358 mcp->flags = 0; 359 rval = qlafx00_mailbox_command(vha, mcp); 360 361 /* Return firmware states. */ 362 states[0] = mcp->mb[1]; 363 364 if (rval != QLA_SUCCESS) { 365 ql_dbg(ql_dbg_mbx, vha, 0x116a, 366 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 367 } else { 368 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b, 369 "Done %s.\n", __func__); 370 } 371 return rval; 372 } 373 374 /* 375 * qlafx00_init_firmware 376 * Initialize adapter firmware. 377 * 378 * Input: 379 * ha = adapter block pointer. 380 * dptr = Initialization control block pointer. 381 * size = size of initialization control block. 382 * TARGET_QUEUE_LOCK must be released. 383 * ADAPTER_STATE_LOCK must be released. 384 * 385 * Returns: 386 * qlafx00 local function return status code. 387 * 388 * Context: 389 * Kernel context. 390 */ 391 int 392 qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 393 { 394 int rval; 395 struct mbx_cmd_32 mc; 396 struct mbx_cmd_32 *mcp = &mc; 397 struct qla_hw_data *ha = vha->hw; 398 399 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c, 400 "Entered %s.\n", __func__); 401 402 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 403 404 mcp->mb[1] = 0; 405 mcp->mb[2] = MSD(ha->init_cb_dma); 406 mcp->mb[3] = LSD(ha->init_cb_dma); 407 408 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 409 mcp->in_mb = MBX_0; 410 mcp->buf_size = size; 411 mcp->flags = MBX_DMA_OUT; 412 mcp->tov = MBX_TOV_SECONDS; 413 rval = qlafx00_mailbox_command(vha, mcp); 414 415 if (rval != QLA_SUCCESS) { 416 ql_dbg(ql_dbg_mbx, vha, 0x116d, 417 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 418 } else { 419 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e, 420 "Done %s.\n", __func__); 421 } 422 return rval; 423 } 424 425 /* 426 * qlafx00_mbx_reg_test 427 */ 428 static int 429 qlafx00_mbx_reg_test(scsi_qla_host_t *vha) 430 { 431 int rval; 432 struct mbx_cmd_32 mc; 433 struct mbx_cmd_32 *mcp = &mc; 434 435 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f, 436 "Entered %s.\n", __func__); 437 438 439 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 440 mcp->mb[1] = 0xAAAA; 441 mcp->mb[2] = 0x5555; 442 mcp->mb[3] = 0xAA55; 443 mcp->mb[4] = 0x55AA; 444 mcp->mb[5] = 0xA5A5; 445 mcp->mb[6] = 0x5A5A; 446 mcp->mb[7] = 0x2525; 447 mcp->mb[8] = 0xBBBB; 448 mcp->mb[9] = 0x6666; 449 mcp->mb[10] = 0xBB66; 450 mcp->mb[11] = 0x66BB; 451 mcp->mb[12] = 0xB6B6; 452 mcp->mb[13] = 0x6B6B; 453 mcp->mb[14] = 0x3636; 454 mcp->mb[15] = 0xCCCC; 455 456 457 mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 458 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 459 mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 460 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 461 mcp->buf_size = 0; 462 mcp->flags = MBX_DMA_OUT; 463 mcp->tov = MBX_TOV_SECONDS; 464 rval = qlafx00_mailbox_command(vha, mcp); 465 if (rval == QLA_SUCCESS) { 466 if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 || 467 mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA) 468 rval = QLA_FUNCTION_FAILED; 469 if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A || 470 mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB) 471 rval = QLA_FUNCTION_FAILED; 472 if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 || 473 mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6) 474 rval = QLA_FUNCTION_FAILED; 475 if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 || 476 mcp->mb[31] != 0xCCCC) 477 rval = QLA_FUNCTION_FAILED; 478 } 479 480 if (rval != QLA_SUCCESS) { 481 ql_dbg(ql_dbg_mbx, vha, 0x1170, 482 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 483 } else { 484 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171, 485 "Done %s.\n", __func__); 486 } 487 return rval; 488 } 489 490 /** 491 * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers. 492 * @ha: HA context 493 * 494 * Returns 0 on success. 495 */ 496 int 497 qlafx00_pci_config(scsi_qla_host_t *vha) 498 { 499 uint16_t w; 500 struct qla_hw_data *ha = vha->hw; 501 502 pci_set_master(ha->pdev); 503 pci_try_set_mwi(ha->pdev); 504 505 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 506 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 507 w &= ~PCI_COMMAND_INTX_DISABLE; 508 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 509 510 /* PCIe -- adjust Maximum Read Request Size (2048). */ 511 if (pci_is_pcie(ha->pdev)) 512 pcie_set_readrq(ha->pdev, 2048); 513 514 ha->chip_revision = ha->pdev->revision; 515 516 return QLA_SUCCESS; 517 } 518 519 /** 520 * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC). 521 * @ha: HA context 522 * 523 */ 524 static inline void 525 qlafx00_soc_cpu_reset(scsi_qla_host_t *vha) 526 { 527 unsigned long flags = 0; 528 struct qla_hw_data *ha = vha->hw; 529 int i, core; 530 uint32_t cnt; 531 uint32_t reg_val; 532 533 spin_lock_irqsave(&ha->hardware_lock, flags); 534 535 QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0); 536 QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0); 537 538 /* stop the XOR DMA engines */ 539 QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02); 540 QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02); 541 QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02); 542 QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02); 543 544 /* stop the IDMA engines */ 545 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840); 546 reg_val &= ~(1<<12); 547 QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val); 548 549 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844); 550 reg_val &= ~(1<<12); 551 QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val); 552 553 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848); 554 reg_val &= ~(1<<12); 555 QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val); 556 557 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C); 558 reg_val &= ~(1<<12); 559 QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val); 560 561 for (i = 0; i < 100000; i++) { 562 if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 && 563 (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0) 564 break; 565 udelay(100); 566 } 567 568 /* Set all 4 cores in reset */ 569 for (i = 0; i < 4; i++) { 570 QLAFX00_SET_HBA_SOC_REG(ha, 571 (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01)); 572 QLAFX00_SET_HBA_SOC_REG(ha, 573 (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101)); 574 } 575 576 /* Reset all units in Fabric */ 577 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101)); 578 579 /* */ 580 QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1); 581 QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0); 582 583 /* Set all 4 core Memory Power Down Registers */ 584 for (i = 0; i < 5; i++) { 585 QLAFX00_SET_HBA_SOC_REG(ha, 586 (SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0)); 587 } 588 589 /* Reset all interrupt control registers */ 590 for (i = 0; i < 115; i++) { 591 QLAFX00_SET_HBA_SOC_REG(ha, 592 (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0)); 593 } 594 595 /* Reset Timers control registers. per core */ 596 for (core = 0; core < 4; core++) 597 for (i = 0; i < 8; i++) 598 QLAFX00_SET_HBA_SOC_REG(ha, 599 (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0)); 600 601 /* Reset per core IRQ ack register */ 602 for (core = 0; core < 4; core++) 603 QLAFX00_SET_HBA_SOC_REG(ha, 604 (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF)); 605 606 /* Set Fabric control and config to defaults */ 607 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2)); 608 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3)); 609 610 /* Kick in Fabric units */ 611 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0)); 612 613 /* Kick in Core0 to start boot process */ 614 QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00)); 615 616 spin_unlock_irqrestore(&ha->hardware_lock, flags); 617 618 /* Wait 10secs for soft-reset to complete. */ 619 for (cnt = 10; cnt; cnt--) { 620 msleep(1000); 621 barrier(); 622 } 623 } 624 625 /** 626 * qlafx00_soft_reset() - Soft Reset ISPFx00. 627 * @ha: HA context 628 * 629 * Returns 0 on success. 630 */ 631 void 632 qlafx00_soft_reset(scsi_qla_host_t *vha) 633 { 634 struct qla_hw_data *ha = vha->hw; 635 636 if (unlikely(pci_channel_offline(ha->pdev) && 637 ha->flags.pci_channel_io_perm_failure)) 638 return; 639 640 ha->isp_ops->disable_intrs(ha); 641 qlafx00_soc_cpu_reset(vha); 642 } 643 644 /** 645 * qlafx00_chip_diag() - Test ISPFx00 for proper operation. 646 * @ha: HA context 647 * 648 * Returns 0 on success. 649 */ 650 int 651 qlafx00_chip_diag(scsi_qla_host_t *vha) 652 { 653 int rval = 0; 654 struct qla_hw_data *ha = vha->hw; 655 struct req_que *req = ha->req_q_map[0]; 656 657 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 658 659 rval = qlafx00_mbx_reg_test(vha); 660 if (rval) { 661 ql_log(ql_log_warn, vha, 0x1165, 662 "Failed mailbox send register test\n"); 663 } else { 664 /* Flag a successful rval */ 665 rval = QLA_SUCCESS; 666 } 667 return rval; 668 } 669 670 void 671 qlafx00_config_rings(struct scsi_qla_host *vha) 672 { 673 struct qla_hw_data *ha = vha->hw; 674 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 675 676 WRT_REG_DWORD(®->req_q_in, 0); 677 WRT_REG_DWORD(®->req_q_out, 0); 678 679 WRT_REG_DWORD(®->rsp_q_in, 0); 680 WRT_REG_DWORD(®->rsp_q_out, 0); 681 682 /* PCI posting */ 683 RD_REG_DWORD(®->rsp_q_out); 684 } 685 686 char * 687 qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str) 688 { 689 struct qla_hw_data *ha = vha->hw; 690 691 if (pci_is_pcie(ha->pdev)) { 692 strcpy(str, "PCIe iSA"); 693 return str; 694 } 695 return str; 696 } 697 698 char * 699 qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 700 { 701 struct qla_hw_data *ha = vha->hw; 702 703 snprintf(str, size, "%s", ha->mr.fw_version); 704 return str; 705 } 706 707 void 708 qlafx00_enable_intrs(struct qla_hw_data *ha) 709 { 710 unsigned long flags = 0; 711 712 spin_lock_irqsave(&ha->hardware_lock, flags); 713 ha->interrupts_on = 1; 714 QLAFX00_ENABLE_ICNTRL_REG(ha); 715 spin_unlock_irqrestore(&ha->hardware_lock, flags); 716 } 717 718 void 719 qlafx00_disable_intrs(struct qla_hw_data *ha) 720 { 721 unsigned long flags = 0; 722 723 spin_lock_irqsave(&ha->hardware_lock, flags); 724 ha->interrupts_on = 0; 725 QLAFX00_DISABLE_ICNTRL_REG(ha); 726 spin_unlock_irqrestore(&ha->hardware_lock, flags); 727 } 728 729 int 730 qlafx00_abort_target(fc_port_t *fcport, uint64_t l, int tag) 731 { 732 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 733 } 734 735 int 736 qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag) 737 { 738 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 739 } 740 741 int 742 qlafx00_loop_reset(scsi_qla_host_t *vha) 743 { 744 int ret; 745 struct fc_port *fcport; 746 struct qla_hw_data *ha = vha->hw; 747 748 if (ql2xtargetreset) { 749 list_for_each_entry(fcport, &vha->vp_fcports, list) { 750 if (fcport->port_type != FCT_TARGET) 751 continue; 752 753 ret = ha->isp_ops->target_reset(fcport, 0, 0); 754 if (ret != QLA_SUCCESS) { 755 ql_dbg(ql_dbg_taskm, vha, 0x803d, 756 "Bus Reset failed: Reset=%d " 757 "d_id=%x.\n", ret, fcport->d_id.b24); 758 } 759 } 760 } 761 return QLA_SUCCESS; 762 } 763 764 int 765 qlafx00_iospace_config(struct qla_hw_data *ha) 766 { 767 if (pci_request_selected_regions(ha->pdev, ha->bars, 768 QLA2XXX_DRIVER_NAME)) { 769 ql_log_pci(ql_log_fatal, ha->pdev, 0x014e, 770 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 771 pci_name(ha->pdev)); 772 goto iospace_error_exit; 773 } 774 775 /* Use MMIO operations for all accesses. */ 776 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 777 ql_log_pci(ql_log_warn, ha->pdev, 0x014f, 778 "Invalid pci I/O region size (%s).\n", 779 pci_name(ha->pdev)); 780 goto iospace_error_exit; 781 } 782 if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) { 783 ql_log_pci(ql_log_warn, ha->pdev, 0x0127, 784 "Invalid PCI mem BAR0 region size (%s), aborting\n", 785 pci_name(ha->pdev)); 786 goto iospace_error_exit; 787 } 788 789 ha->cregbase = 790 ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00); 791 if (!ha->cregbase) { 792 ql_log_pci(ql_log_fatal, ha->pdev, 0x0128, 793 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 794 goto iospace_error_exit; 795 } 796 797 if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) { 798 ql_log_pci(ql_log_warn, ha->pdev, 0x0129, 799 "region #2 not an MMIO resource (%s), aborting\n", 800 pci_name(ha->pdev)); 801 goto iospace_error_exit; 802 } 803 if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) { 804 ql_log_pci(ql_log_warn, ha->pdev, 0x012a, 805 "Invalid PCI mem BAR2 region size (%s), aborting\n", 806 pci_name(ha->pdev)); 807 goto iospace_error_exit; 808 } 809 810 ha->iobase = 811 ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00); 812 if (!ha->iobase) { 813 ql_log_pci(ql_log_fatal, ha->pdev, 0x012b, 814 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 815 goto iospace_error_exit; 816 } 817 818 /* Determine queue resources */ 819 ha->max_req_queues = ha->max_rsp_queues = 1; 820 821 ql_log_pci(ql_log_info, ha->pdev, 0x012c, 822 "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n", 823 ha->bars, ha->cregbase, ha->iobase); 824 825 return 0; 826 827 iospace_error_exit: 828 return -ENOMEM; 829 } 830 831 static void 832 qlafx00_save_queue_ptrs(struct scsi_qla_host *vha) 833 { 834 struct qla_hw_data *ha = vha->hw; 835 struct req_que *req = ha->req_q_map[0]; 836 struct rsp_que *rsp = ha->rsp_q_map[0]; 837 838 req->length_fx00 = req->length; 839 req->ring_fx00 = req->ring; 840 req->dma_fx00 = req->dma; 841 842 rsp->length_fx00 = rsp->length; 843 rsp->ring_fx00 = rsp->ring; 844 rsp->dma_fx00 = rsp->dma; 845 846 ql_dbg(ql_dbg_init, vha, 0x012d, 847 "req: %p, ring_fx00: %p, length_fx00: 0x%x," 848 "req->dma_fx00: 0x%llx\n", req, req->ring_fx00, 849 req->length_fx00, (u64)req->dma_fx00); 850 851 ql_dbg(ql_dbg_init, vha, 0x012e, 852 "rsp: %p, ring_fx00: %p, length_fx00: 0x%x," 853 "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00, 854 rsp->length_fx00, (u64)rsp->dma_fx00); 855 } 856 857 static int 858 qlafx00_config_queues(struct scsi_qla_host *vha) 859 { 860 struct qla_hw_data *ha = vha->hw; 861 struct req_que *req = ha->req_q_map[0]; 862 struct rsp_que *rsp = ha->rsp_q_map[0]; 863 dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2); 864 865 req->length = ha->req_que_len; 866 req->ring = (void __force *)ha->iobase + ha->req_que_off; 867 req->dma = bar2_hdl + ha->req_que_off; 868 if ((!req->ring) || (req->length == 0)) { 869 ql_log_pci(ql_log_info, ha->pdev, 0x012f, 870 "Unable to allocate memory for req_ring\n"); 871 return QLA_FUNCTION_FAILED; 872 } 873 874 ql_dbg(ql_dbg_init, vha, 0x0130, 875 "req: %p req_ring pointer %p req len 0x%x " 876 "req off 0x%x\n, req->dma: 0x%llx", 877 req, req->ring, req->length, 878 ha->req_que_off, (u64)req->dma); 879 880 rsp->length = ha->rsp_que_len; 881 rsp->ring = (void __force *)ha->iobase + ha->rsp_que_off; 882 rsp->dma = bar2_hdl + ha->rsp_que_off; 883 if ((!rsp->ring) || (rsp->length == 0)) { 884 ql_log_pci(ql_log_info, ha->pdev, 0x0131, 885 "Unable to allocate memory for rsp_ring\n"); 886 return QLA_FUNCTION_FAILED; 887 } 888 889 ql_dbg(ql_dbg_init, vha, 0x0132, 890 "rsp: %p rsp_ring pointer %p rsp len 0x%x " 891 "rsp off 0x%x, rsp->dma: 0x%llx\n", 892 rsp, rsp->ring, rsp->length, 893 ha->rsp_que_off, (u64)rsp->dma); 894 895 return QLA_SUCCESS; 896 } 897 898 static int 899 qlafx00_init_fw_ready(scsi_qla_host_t *vha) 900 { 901 int rval = 0; 902 unsigned long wtime; 903 uint16_t wait_time; /* Wait time */ 904 struct qla_hw_data *ha = vha->hw; 905 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 906 uint32_t aenmbx, aenmbx7 = 0; 907 uint32_t pseudo_aen; 908 uint32_t state[5]; 909 bool done = false; 910 911 /* 30 seconds wait - Adjust if required */ 912 wait_time = 30; 913 914 pseudo_aen = RD_REG_DWORD(®->pseudoaen); 915 if (pseudo_aen == 1) { 916 aenmbx7 = RD_REG_DWORD(®->initval7); 917 ha->mbx_intr_code = MSW(aenmbx7); 918 ha->rqstq_intr_code = LSW(aenmbx7); 919 rval = qlafx00_driver_shutdown(vha, 10); 920 if (rval != QLA_SUCCESS) 921 qlafx00_soft_reset(vha); 922 } 923 924 /* wait time before firmware ready */ 925 wtime = jiffies + (wait_time * HZ); 926 do { 927 aenmbx = RD_REG_DWORD(®->aenmailbox0); 928 barrier(); 929 ql_dbg(ql_dbg_mbx, vha, 0x0133, 930 "aenmbx: 0x%x\n", aenmbx); 931 932 switch (aenmbx) { 933 case MBA_FW_NOT_STARTED: 934 case MBA_FW_STARTING: 935 break; 936 937 case MBA_SYSTEM_ERR: 938 case MBA_REQ_TRANSFER_ERR: 939 case MBA_RSP_TRANSFER_ERR: 940 case MBA_FW_INIT_FAILURE: 941 qlafx00_soft_reset(vha); 942 break; 943 944 case MBA_FW_RESTART_CMPLT: 945 /* Set the mbx and rqstq intr code */ 946 aenmbx7 = RD_REG_DWORD(®->aenmailbox7); 947 ha->mbx_intr_code = MSW(aenmbx7); 948 ha->rqstq_intr_code = LSW(aenmbx7); 949 ha->req_que_off = RD_REG_DWORD(®->aenmailbox1); 950 ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3); 951 ha->req_que_len = RD_REG_DWORD(®->aenmailbox5); 952 ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6); 953 WRT_REG_DWORD(®->aenmailbox0, 0); 954 RD_REG_DWORD_RELAXED(®->aenmailbox0); 955 ql_dbg(ql_dbg_init, vha, 0x0134, 956 "f/w returned mbx_intr_code: 0x%x, " 957 "rqstq_intr_code: 0x%x\n", 958 ha->mbx_intr_code, ha->rqstq_intr_code); 959 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 960 rval = QLA_SUCCESS; 961 done = true; 962 break; 963 964 default: 965 if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS) 966 break; 967 968 /* If fw is apparently not ready. In order to continue, 969 * we might need to issue Mbox cmd, but the problem is 970 * that the DoorBell vector values that come with the 971 * 8060 AEN are most likely gone by now (and thus no 972 * bell would be rung on the fw side when mbox cmd is 973 * issued). We have to therefore grab the 8060 AEN 974 * shadow regs (filled in by FW when the last 8060 975 * AEN was being posted). 976 * Do the following to determine what is needed in 977 * order to get the FW ready: 978 * 1. reload the 8060 AEN values from the shadow regs 979 * 2. clear int status to get rid of possible pending 980 * interrupts 981 * 3. issue Get FW State Mbox cmd to determine fw state 982 * Set the mbx and rqstq intr code from Shadow Regs 983 */ 984 aenmbx7 = RD_REG_DWORD(®->initval7); 985 ha->mbx_intr_code = MSW(aenmbx7); 986 ha->rqstq_intr_code = LSW(aenmbx7); 987 ha->req_que_off = RD_REG_DWORD(®->initval1); 988 ha->rsp_que_off = RD_REG_DWORD(®->initval3); 989 ha->req_que_len = RD_REG_DWORD(®->initval5); 990 ha->rsp_que_len = RD_REG_DWORD(®->initval6); 991 ql_dbg(ql_dbg_init, vha, 0x0135, 992 "f/w returned mbx_intr_code: 0x%x, " 993 "rqstq_intr_code: 0x%x\n", 994 ha->mbx_intr_code, ha->rqstq_intr_code); 995 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 996 997 /* Get the FW state */ 998 rval = qlafx00_get_firmware_state(vha, state); 999 if (rval != QLA_SUCCESS) { 1000 /* Retry if timer has not expired */ 1001 break; 1002 } 1003 1004 if (state[0] == FSTATE_FX00_CONFIG_WAIT) { 1005 /* Firmware is waiting to be 1006 * initialized by driver 1007 */ 1008 rval = QLA_SUCCESS; 1009 done = true; 1010 break; 1011 } 1012 1013 /* Issue driver shutdown and wait until f/w recovers. 1014 * Driver should continue to poll until 8060 AEN is 1015 * received indicating firmware recovery. 1016 */ 1017 ql_dbg(ql_dbg_init, vha, 0x0136, 1018 "Sending Driver shutdown fw_state 0x%x\n", 1019 state[0]); 1020 1021 rval = qlafx00_driver_shutdown(vha, 10); 1022 if (rval != QLA_SUCCESS) { 1023 rval = QLA_FUNCTION_FAILED; 1024 break; 1025 } 1026 msleep(500); 1027 1028 wtime = jiffies + (wait_time * HZ); 1029 break; 1030 } 1031 1032 if (!done) { 1033 if (time_after_eq(jiffies, wtime)) { 1034 ql_dbg(ql_dbg_init, vha, 0x0137, 1035 "Init f/w failed: aen[7]: 0x%x\n", 1036 RD_REG_DWORD(®->aenmailbox7)); 1037 rval = QLA_FUNCTION_FAILED; 1038 done = true; 1039 break; 1040 } 1041 /* Delay for a while */ 1042 msleep(500); 1043 } 1044 } while (!done); 1045 1046 if (rval) 1047 ql_dbg(ql_dbg_init, vha, 0x0138, 1048 "%s **** FAILED ****.\n", __func__); 1049 else 1050 ql_dbg(ql_dbg_init, vha, 0x0139, 1051 "%s **** SUCCESS ****.\n", __func__); 1052 1053 return rval; 1054 } 1055 1056 /* 1057 * qlafx00_fw_ready() - Waits for firmware ready. 1058 * @ha: HA context 1059 * 1060 * Returns 0 on success. 1061 */ 1062 int 1063 qlafx00_fw_ready(scsi_qla_host_t *vha) 1064 { 1065 int rval; 1066 unsigned long wtime; 1067 uint16_t wait_time; /* Wait time if loop is coming ready */ 1068 uint32_t state[5]; 1069 1070 rval = QLA_SUCCESS; 1071 1072 wait_time = 10; 1073 1074 /* wait time before firmware ready */ 1075 wtime = jiffies + (wait_time * HZ); 1076 1077 /* Wait for ISP to finish init */ 1078 if (!vha->flags.init_done) 1079 ql_dbg(ql_dbg_init, vha, 0x013a, 1080 "Waiting for init to complete...\n"); 1081 1082 do { 1083 rval = qlafx00_get_firmware_state(vha, state); 1084 1085 if (rval == QLA_SUCCESS) { 1086 if (state[0] == FSTATE_FX00_INITIALIZED) { 1087 ql_dbg(ql_dbg_init, vha, 0x013b, 1088 "fw_state=%x\n", state[0]); 1089 rval = QLA_SUCCESS; 1090 break; 1091 } 1092 } 1093 rval = QLA_FUNCTION_FAILED; 1094 1095 if (time_after_eq(jiffies, wtime)) 1096 break; 1097 1098 /* Delay for a while */ 1099 msleep(500); 1100 1101 ql_dbg(ql_dbg_init, vha, 0x013c, 1102 "fw_state=%x curr time=%lx.\n", state[0], jiffies); 1103 } while (1); 1104 1105 1106 if (rval) 1107 ql_dbg(ql_dbg_init, vha, 0x013d, 1108 "Firmware ready **** FAILED ****.\n"); 1109 else 1110 ql_dbg(ql_dbg_init, vha, 0x013e, 1111 "Firmware ready **** SUCCESS ****.\n"); 1112 1113 return rval; 1114 } 1115 1116 static int 1117 qlafx00_find_all_targets(scsi_qla_host_t *vha, 1118 struct list_head *new_fcports) 1119 { 1120 int rval; 1121 uint16_t tgt_id; 1122 fc_port_t *fcport, *new_fcport; 1123 int found; 1124 struct qla_hw_data *ha = vha->hw; 1125 1126 rval = QLA_SUCCESS; 1127 1128 if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags)) 1129 return QLA_FUNCTION_FAILED; 1130 1131 if ((atomic_read(&vha->loop_down_timer) || 1132 STATE_TRANSITION(vha))) { 1133 atomic_set(&vha->loop_down_timer, 0); 1134 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1135 return QLA_FUNCTION_FAILED; 1136 } 1137 1138 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088, 1139 "Listing Target bit map...\n"); 1140 ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha, 1141 0x2089, (uint8_t *)ha->gid_list, 32); 1142 1143 /* Allocate temporary rmtport for any new rmtports discovered. */ 1144 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 1145 if (new_fcport == NULL) 1146 return QLA_MEMORY_ALLOC_FAILED; 1147 1148 for_each_set_bit(tgt_id, (void *)ha->gid_list, 1149 QLAFX00_TGT_NODE_LIST_SIZE) { 1150 1151 /* Send get target node info */ 1152 new_fcport->tgt_id = tgt_id; 1153 rval = qlafx00_fx_disc(vha, new_fcport, 1154 FXDISC_GET_TGT_NODE_INFO); 1155 if (rval != QLA_SUCCESS) { 1156 ql_log(ql_log_warn, vha, 0x208a, 1157 "Target info scan failed -- assuming zero-entry " 1158 "result...\n"); 1159 continue; 1160 } 1161 1162 /* Locate matching device in database. */ 1163 found = 0; 1164 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1165 if (memcmp(new_fcport->port_name, 1166 fcport->port_name, WWN_SIZE)) 1167 continue; 1168 1169 found++; 1170 1171 /* 1172 * If tgt_id is same and state FCS_ONLINE, nothing 1173 * changed. 1174 */ 1175 if (fcport->tgt_id == new_fcport->tgt_id && 1176 atomic_read(&fcport->state) == FCS_ONLINE) 1177 break; 1178 1179 /* 1180 * Tgt ID changed or device was marked to be updated. 1181 */ 1182 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b, 1183 "TGT-ID Change(%s): Present tgt id: " 1184 "0x%x state: 0x%x " 1185 "wwnn = %llx wwpn = %llx.\n", 1186 __func__, fcport->tgt_id, 1187 atomic_read(&fcport->state), 1188 (unsigned long long)wwn_to_u64(fcport->node_name), 1189 (unsigned long long)wwn_to_u64(fcport->port_name)); 1190 1191 ql_log(ql_log_info, vha, 0x208c, 1192 "TGT-ID Announce(%s): Discovered tgt " 1193 "id 0x%x wwnn = %llx " 1194 "wwpn = %llx.\n", __func__, new_fcport->tgt_id, 1195 (unsigned long long) 1196 wwn_to_u64(new_fcport->node_name), 1197 (unsigned long long) 1198 wwn_to_u64(new_fcport->port_name)); 1199 1200 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1201 fcport->old_tgt_id = fcport->tgt_id; 1202 fcport->tgt_id = new_fcport->tgt_id; 1203 ql_log(ql_log_info, vha, 0x208d, 1204 "TGT-ID: New fcport Added: %p\n", fcport); 1205 qla2x00_update_fcport(vha, fcport); 1206 } else { 1207 ql_log(ql_log_info, vha, 0x208e, 1208 " Existing TGT-ID %x did not get " 1209 " offline event from firmware.\n", 1210 fcport->old_tgt_id); 1211 qla2x00_mark_device_lost(vha, fcport, 0, 0); 1212 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1213 kfree(new_fcport); 1214 return rval; 1215 } 1216 break; 1217 } 1218 1219 if (found) 1220 continue; 1221 1222 /* If device was not in our fcports list, then add it. */ 1223 list_add_tail(&new_fcport->list, new_fcports); 1224 1225 /* Allocate a new replacement fcport. */ 1226 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 1227 if (new_fcport == NULL) 1228 return QLA_MEMORY_ALLOC_FAILED; 1229 } 1230 1231 kfree(new_fcport); 1232 return rval; 1233 } 1234 1235 /* 1236 * qlafx00_configure_all_targets 1237 * Setup target devices with node ID's. 1238 * 1239 * Input: 1240 * ha = adapter block pointer. 1241 * 1242 * Returns: 1243 * 0 = success. 1244 * BIT_0 = error 1245 */ 1246 static int 1247 qlafx00_configure_all_targets(scsi_qla_host_t *vha) 1248 { 1249 int rval; 1250 fc_port_t *fcport, *rmptemp; 1251 LIST_HEAD(new_fcports); 1252 1253 rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport, 1254 FXDISC_GET_TGT_NODE_LIST); 1255 if (rval != QLA_SUCCESS) { 1256 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1257 return rval; 1258 } 1259 1260 rval = qlafx00_find_all_targets(vha, &new_fcports); 1261 if (rval != QLA_SUCCESS) { 1262 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1263 return rval; 1264 } 1265 1266 /* 1267 * Delete all previous devices marked lost. 1268 */ 1269 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1270 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 1271 break; 1272 1273 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 1274 if (fcport->port_type != FCT_INITIATOR) 1275 qla2x00_mark_device_lost(vha, fcport, 0, 0); 1276 } 1277 } 1278 1279 /* 1280 * Add the new devices to our devices list. 1281 */ 1282 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) { 1283 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 1284 break; 1285 1286 qla2x00_update_fcport(vha, fcport); 1287 list_move_tail(&fcport->list, &vha->vp_fcports); 1288 ql_log(ql_log_info, vha, 0x208f, 1289 "Attach new target id 0x%x wwnn = %llx " 1290 "wwpn = %llx.\n", 1291 fcport->tgt_id, 1292 (unsigned long long)wwn_to_u64(fcport->node_name), 1293 (unsigned long long)wwn_to_u64(fcport->port_name)); 1294 } 1295 1296 /* Free all new device structures not processed. */ 1297 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) { 1298 list_del(&fcport->list); 1299 kfree(fcport); 1300 } 1301 1302 return rval; 1303 } 1304 1305 /* 1306 * qlafx00_configure_devices 1307 * Updates Fibre Channel Device Database with what is actually on loop. 1308 * 1309 * Input: 1310 * ha = adapter block pointer. 1311 * 1312 * Returns: 1313 * 0 = success. 1314 * 1 = error. 1315 * 2 = database was full and device was not configured. 1316 */ 1317 int 1318 qlafx00_configure_devices(scsi_qla_host_t *vha) 1319 { 1320 int rval; 1321 unsigned long flags; 1322 rval = QLA_SUCCESS; 1323 1324 flags = vha->dpc_flags; 1325 1326 ql_dbg(ql_dbg_disc, vha, 0x2090, 1327 "Configure devices -- dpc flags =0x%lx\n", flags); 1328 1329 rval = qlafx00_configure_all_targets(vha); 1330 1331 if (rval == QLA_SUCCESS) { 1332 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 1333 rval = QLA_FUNCTION_FAILED; 1334 } else { 1335 atomic_set(&vha->loop_state, LOOP_READY); 1336 ql_log(ql_log_info, vha, 0x2091, 1337 "Device Ready\n"); 1338 } 1339 } 1340 1341 if (rval) { 1342 ql_dbg(ql_dbg_disc, vha, 0x2092, 1343 "%s *** FAILED ***.\n", __func__); 1344 } else { 1345 ql_dbg(ql_dbg_disc, vha, 0x2093, 1346 "%s: exiting normally.\n", __func__); 1347 } 1348 return rval; 1349 } 1350 1351 static void 1352 qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp) 1353 { 1354 struct qla_hw_data *ha = vha->hw; 1355 fc_port_t *fcport; 1356 1357 vha->flags.online = 0; 1358 ha->mr.fw_hbt_en = 0; 1359 1360 if (!critemp) { 1361 ha->flags.chip_reset_done = 0; 1362 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1363 vha->qla_stats.total_isp_aborts++; 1364 ql_log(ql_log_info, vha, 0x013f, 1365 "Performing ISP error recovery - ha = %p.\n", ha); 1366 ha->isp_ops->reset_chip(vha); 1367 } 1368 1369 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1370 atomic_set(&vha->loop_state, LOOP_DOWN); 1371 atomic_set(&vha->loop_down_timer, 1372 QLAFX00_LOOP_DOWN_TIME); 1373 } else { 1374 if (!atomic_read(&vha->loop_down_timer)) 1375 atomic_set(&vha->loop_down_timer, 1376 QLAFX00_LOOP_DOWN_TIME); 1377 } 1378 1379 /* Clear all async request states across all VPs. */ 1380 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1381 fcport->flags = 0; 1382 if (atomic_read(&fcport->state) == FCS_ONLINE) 1383 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 1384 } 1385 1386 if (!ha->flags.eeh_busy) { 1387 if (critemp) { 1388 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 1389 } else { 1390 /* Requeue all commands in outstanding command list. */ 1391 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 1392 } 1393 } 1394 1395 qla2x00_free_irqs(vha); 1396 if (critemp) 1397 set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags); 1398 else 1399 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); 1400 1401 /* Clear the Interrupts */ 1402 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 1403 1404 ql_log(ql_log_info, vha, 0x0140, 1405 "%s Done done - ha=%p.\n", __func__, ha); 1406 } 1407 1408 /** 1409 * qlafx00_init_response_q_entries() - Initializes response queue entries. 1410 * @ha: HA context 1411 * 1412 * Beginning of request ring has initialization control block already built 1413 * by nvram config routine. 1414 * 1415 * Returns 0 on success. 1416 */ 1417 void 1418 qlafx00_init_response_q_entries(struct rsp_que *rsp) 1419 { 1420 uint16_t cnt; 1421 response_t *pkt; 1422 1423 rsp->ring_ptr = rsp->ring; 1424 rsp->ring_index = 0; 1425 rsp->status_srb = NULL; 1426 pkt = rsp->ring_ptr; 1427 for (cnt = 0; cnt < rsp->length; cnt++) { 1428 pkt->signature = RESPONSE_PROCESSED; 1429 WRT_REG_DWORD((void __force __iomem *)&pkt->signature, 1430 RESPONSE_PROCESSED); 1431 pkt++; 1432 } 1433 } 1434 1435 int 1436 qlafx00_rescan_isp(scsi_qla_host_t *vha) 1437 { 1438 uint32_t status = QLA_FUNCTION_FAILED; 1439 struct qla_hw_data *ha = vha->hw; 1440 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 1441 uint32_t aenmbx7; 1442 1443 qla2x00_request_irqs(ha, ha->rsp_q_map[0]); 1444 1445 aenmbx7 = RD_REG_DWORD(®->aenmailbox7); 1446 ha->mbx_intr_code = MSW(aenmbx7); 1447 ha->rqstq_intr_code = LSW(aenmbx7); 1448 ha->req_que_off = RD_REG_DWORD(®->aenmailbox1); 1449 ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3); 1450 ha->req_que_len = RD_REG_DWORD(®->aenmailbox5); 1451 ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6); 1452 1453 ql_dbg(ql_dbg_disc, vha, 0x2094, 1454 "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x " 1455 " Req que offset 0x%x Rsp que offset 0x%x\n", 1456 ha->mbx_intr_code, ha->rqstq_intr_code, 1457 ha->req_que_off, ha->rsp_que_len); 1458 1459 /* Clear the Interrupts */ 1460 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 1461 1462 status = qla2x00_init_rings(vha); 1463 if (!status) { 1464 vha->flags.online = 1; 1465 1466 /* if no cable then assume it's good */ 1467 if ((vha->device_flags & DFLG_NO_CABLE)) 1468 status = 0; 1469 /* Register system information */ 1470 if (qlafx00_fx_disc(vha, 1471 &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO)) 1472 ql_dbg(ql_dbg_disc, vha, 0x2095, 1473 "failed to register host info\n"); 1474 } 1475 scsi_unblock_requests(vha->host); 1476 return status; 1477 } 1478 1479 void 1480 qlafx00_timer_routine(scsi_qla_host_t *vha) 1481 { 1482 struct qla_hw_data *ha = vha->hw; 1483 uint32_t fw_heart_beat; 1484 uint32_t aenmbx0; 1485 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 1486 uint32_t tempc; 1487 1488 /* Check firmware health */ 1489 if (ha->mr.fw_hbt_cnt) 1490 ha->mr.fw_hbt_cnt--; 1491 else { 1492 if ((!ha->flags.mr_reset_hdlr_active) && 1493 (!test_bit(UNLOADING, &vha->dpc_flags)) && 1494 (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && 1495 (ha->mr.fw_hbt_en)) { 1496 fw_heart_beat = RD_REG_DWORD(®->fwheartbeat); 1497 if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) { 1498 ha->mr.old_fw_hbt_cnt = fw_heart_beat; 1499 ha->mr.fw_hbt_miss_cnt = 0; 1500 } else { 1501 ha->mr.fw_hbt_miss_cnt++; 1502 if (ha->mr.fw_hbt_miss_cnt == 1503 QLAFX00_HEARTBEAT_MISS_CNT) { 1504 set_bit(ISP_ABORT_NEEDED, 1505 &vha->dpc_flags); 1506 qla2xxx_wake_dpc(vha); 1507 ha->mr.fw_hbt_miss_cnt = 0; 1508 } 1509 } 1510 } 1511 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; 1512 } 1513 1514 if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) { 1515 /* Reset recovery to be performed in timer routine */ 1516 aenmbx0 = RD_REG_DWORD(®->aenmailbox0); 1517 if (ha->mr.fw_reset_timer_exp) { 1518 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1519 qla2xxx_wake_dpc(vha); 1520 ha->mr.fw_reset_timer_exp = 0; 1521 } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) { 1522 /* Wake up DPC to rescan the targets */ 1523 set_bit(FX00_TARGET_SCAN, &vha->dpc_flags); 1524 clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); 1525 qla2xxx_wake_dpc(vha); 1526 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 1527 } else if ((aenmbx0 == MBA_FW_STARTING) && 1528 (!ha->mr.fw_hbt_en)) { 1529 ha->mr.fw_hbt_en = 1; 1530 } else if (!ha->mr.fw_reset_timer_tick) { 1531 if (aenmbx0 == ha->mr.old_aenmbx0_state) 1532 ha->mr.fw_reset_timer_exp = 1; 1533 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 1534 } else if (aenmbx0 == 0xFFFFFFFF) { 1535 uint32_t data0, data1; 1536 1537 data0 = QLAFX00_RD_REG(ha, 1538 QLAFX00_BAR1_BASE_ADDR_REG); 1539 data1 = QLAFX00_RD_REG(ha, 1540 QLAFX00_PEX0_WIN0_BASE_ADDR_REG); 1541 1542 data0 &= 0xffff0000; 1543 data1 &= 0x0000ffff; 1544 1545 QLAFX00_WR_REG(ha, 1546 QLAFX00_PEX0_WIN0_BASE_ADDR_REG, 1547 (data0 | data1)); 1548 } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) { 1549 ha->mr.fw_reset_timer_tick = 1550 QLAFX00_MAX_RESET_INTERVAL; 1551 } else if (aenmbx0 == MBA_FW_RESET_FCT) { 1552 ha->mr.fw_reset_timer_tick = 1553 QLAFX00_MAX_RESET_INTERVAL; 1554 } 1555 if (ha->mr.old_aenmbx0_state != aenmbx0) { 1556 ha->mr.old_aenmbx0_state = aenmbx0; 1557 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 1558 } 1559 ha->mr.fw_reset_timer_tick--; 1560 } 1561 if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) { 1562 /* 1563 * Critical temperature recovery to be 1564 * performed in timer routine 1565 */ 1566 if (ha->mr.fw_critemp_timer_tick == 0) { 1567 tempc = QLAFX00_GET_TEMPERATURE(ha); 1568 ql_dbg(ql_dbg_timer, vha, 0x6012, 1569 "ISPFx00(%s): Critical temp timer, " 1570 "current SOC temperature: %d\n", 1571 __func__, tempc); 1572 if (tempc < ha->mr.critical_temperature) { 1573 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1574 clear_bit(FX00_CRITEMP_RECOVERY, 1575 &vha->dpc_flags); 1576 qla2xxx_wake_dpc(vha); 1577 } 1578 ha->mr.fw_critemp_timer_tick = 1579 QLAFX00_CRITEMP_INTERVAL; 1580 } else { 1581 ha->mr.fw_critemp_timer_tick--; 1582 } 1583 } 1584 if (ha->mr.host_info_resend) { 1585 /* 1586 * Incomplete host info might be sent to firmware 1587 * durinng system boot - info should be resend 1588 */ 1589 if (ha->mr.hinfo_resend_timer_tick == 0) { 1590 ha->mr.host_info_resend = false; 1591 set_bit(FX00_HOST_INFO_RESEND, &vha->dpc_flags); 1592 ha->mr.hinfo_resend_timer_tick = 1593 QLAFX00_HINFO_RESEND_INTERVAL; 1594 qla2xxx_wake_dpc(vha); 1595 } else { 1596 ha->mr.hinfo_resend_timer_tick--; 1597 } 1598 } 1599 1600 } 1601 1602 /* 1603 * qlfx00a_reset_initialize 1604 * Re-initialize after a iSA device reset. 1605 * 1606 * Input: 1607 * ha = adapter block pointer. 1608 * 1609 * Returns: 1610 * 0 = success 1611 */ 1612 int 1613 qlafx00_reset_initialize(scsi_qla_host_t *vha) 1614 { 1615 struct qla_hw_data *ha = vha->hw; 1616 1617 if (vha->device_flags & DFLG_DEV_FAILED) { 1618 ql_dbg(ql_dbg_init, vha, 0x0142, 1619 "Device in failed state\n"); 1620 return QLA_SUCCESS; 1621 } 1622 1623 ha->flags.mr_reset_hdlr_active = 1; 1624 1625 if (vha->flags.online) { 1626 scsi_block_requests(vha->host); 1627 qlafx00_abort_isp_cleanup(vha, false); 1628 } 1629 1630 ql_log(ql_log_info, vha, 0x0143, 1631 "(%s): succeeded.\n", __func__); 1632 ha->flags.mr_reset_hdlr_active = 0; 1633 return QLA_SUCCESS; 1634 } 1635 1636 /* 1637 * qlafx00_abort_isp 1638 * Resets ISP and aborts all outstanding commands. 1639 * 1640 * Input: 1641 * ha = adapter block pointer. 1642 * 1643 * Returns: 1644 * 0 = success 1645 */ 1646 int 1647 qlafx00_abort_isp(scsi_qla_host_t *vha) 1648 { 1649 struct qla_hw_data *ha = vha->hw; 1650 1651 if (vha->flags.online) { 1652 if (unlikely(pci_channel_offline(ha->pdev) && 1653 ha->flags.pci_channel_io_perm_failure)) { 1654 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 1655 return QLA_SUCCESS; 1656 } 1657 1658 scsi_block_requests(vha->host); 1659 qlafx00_abort_isp_cleanup(vha, false); 1660 } else { 1661 scsi_block_requests(vha->host); 1662 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1663 vha->qla_stats.total_isp_aborts++; 1664 ha->isp_ops->reset_chip(vha); 1665 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); 1666 /* Clear the Interrupts */ 1667 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 1668 } 1669 1670 ql_log(ql_log_info, vha, 0x0145, 1671 "(%s): succeeded.\n", __func__); 1672 1673 return QLA_SUCCESS; 1674 } 1675 1676 static inline fc_port_t* 1677 qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id) 1678 { 1679 fc_port_t *fcport; 1680 1681 /* Check for matching device in remote port list. */ 1682 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1683 if (fcport->tgt_id == tgt_id) { 1684 ql_dbg(ql_dbg_async, vha, 0x5072, 1685 "Matching fcport(%p) found with TGT-ID: 0x%x " 1686 "and Remote TGT_ID: 0x%x\n", 1687 fcport, fcport->tgt_id, tgt_id); 1688 return fcport; 1689 } 1690 } 1691 return NULL; 1692 } 1693 1694 static void 1695 qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id) 1696 { 1697 fc_port_t *fcport; 1698 1699 ql_log(ql_log_info, vha, 0x5073, 1700 "Detach TGT-ID: 0x%x\n", tgt_id); 1701 1702 fcport = qlafx00_get_fcport(vha, tgt_id); 1703 if (!fcport) 1704 return; 1705 1706 qla2x00_mark_device_lost(vha, fcport, 0, 0); 1707 1708 return; 1709 } 1710 1711 int 1712 qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt) 1713 { 1714 int rval = 0; 1715 uint32_t aen_code, aen_data; 1716 1717 aen_code = FCH_EVT_VENDOR_UNIQUE; 1718 aen_data = evt->u.aenfx.evtcode; 1719 1720 switch (evt->u.aenfx.evtcode) { 1721 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ 1722 if (evt->u.aenfx.mbx[1] == 0) { 1723 if (evt->u.aenfx.mbx[2] == 1) { 1724 if (!vha->flags.fw_tgt_reported) 1725 vha->flags.fw_tgt_reported = 1; 1726 atomic_set(&vha->loop_down_timer, 0); 1727 atomic_set(&vha->loop_state, LOOP_UP); 1728 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1729 qla2xxx_wake_dpc(vha); 1730 } else if (evt->u.aenfx.mbx[2] == 2) { 1731 qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]); 1732 } 1733 } else if (evt->u.aenfx.mbx[1] == 0xffff) { 1734 if (evt->u.aenfx.mbx[2] == 1) { 1735 if (!vha->flags.fw_tgt_reported) 1736 vha->flags.fw_tgt_reported = 1; 1737 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1738 } else if (evt->u.aenfx.mbx[2] == 2) { 1739 vha->device_flags |= DFLG_NO_CABLE; 1740 qla2x00_mark_all_devices_lost(vha, 1); 1741 } 1742 } 1743 break; 1744 case QLAFX00_MBA_LINK_UP: 1745 aen_code = FCH_EVT_LINKUP; 1746 aen_data = 0; 1747 break; 1748 case QLAFX00_MBA_LINK_DOWN: 1749 aen_code = FCH_EVT_LINKDOWN; 1750 aen_data = 0; 1751 break; 1752 case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */ 1753 ql_log(ql_log_info, vha, 0x5082, 1754 "Process critical temperature event " 1755 "aenmb[0]: %x\n", 1756 evt->u.aenfx.evtcode); 1757 scsi_block_requests(vha->host); 1758 qlafx00_abort_isp_cleanup(vha, true); 1759 scsi_unblock_requests(vha->host); 1760 break; 1761 } 1762 1763 fc_host_post_event(vha->host, fc_get_event_number(), 1764 aen_code, aen_data); 1765 1766 return rval; 1767 } 1768 1769 static void 1770 qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo) 1771 { 1772 u64 port_name = 0, node_name = 0; 1773 1774 port_name = (unsigned long long)wwn_to_u64(pinfo->port_name); 1775 node_name = (unsigned long long)wwn_to_u64(pinfo->node_name); 1776 1777 fc_host_node_name(vha->host) = node_name; 1778 fc_host_port_name(vha->host) = port_name; 1779 if (!pinfo->port_type) 1780 vha->hw->current_topology = ISP_CFG_F; 1781 if (pinfo->link_status == QLAFX00_LINK_STATUS_UP) 1782 atomic_set(&vha->loop_state, LOOP_READY); 1783 else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN) 1784 atomic_set(&vha->loop_state, LOOP_DOWN); 1785 vha->hw->link_data_rate = (uint16_t)pinfo->link_config; 1786 } 1787 1788 static void 1789 qla2x00_fxdisc_iocb_timeout(void *data) 1790 { 1791 srb_t *sp = (srb_t *)data; 1792 struct srb_iocb *lio = &sp->u.iocb_cmd; 1793 1794 complete(&lio->u.fxiocb.fxiocb_comp); 1795 } 1796 1797 static void 1798 qla2x00_fxdisc_sp_done(void *data, void *ptr, int res) 1799 { 1800 srb_t *sp = (srb_t *)ptr; 1801 struct srb_iocb *lio = &sp->u.iocb_cmd; 1802 1803 complete(&lio->u.fxiocb.fxiocb_comp); 1804 } 1805 1806 int 1807 qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) 1808 { 1809 srb_t *sp; 1810 struct srb_iocb *fdisc; 1811 int rval = QLA_FUNCTION_FAILED; 1812 struct qla_hw_data *ha = vha->hw; 1813 struct host_system_info *phost_info; 1814 struct register_host_info *preg_hsi; 1815 struct new_utsname *p_sysid = NULL; 1816 1817 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1818 if (!sp) 1819 goto done; 1820 1821 fdisc = &sp->u.iocb_cmd; 1822 switch (fx_type) { 1823 case FXDISC_GET_CONFIG_INFO: 1824 fdisc->u.fxiocb.flags = 1825 SRB_FXDISC_RESP_DMA_VALID; 1826 fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data); 1827 break; 1828 case FXDISC_GET_PORT_INFO: 1829 fdisc->u.fxiocb.flags = 1830 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; 1831 fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO; 1832 fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->port_id); 1833 break; 1834 case FXDISC_GET_TGT_NODE_INFO: 1835 fdisc->u.fxiocb.flags = 1836 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; 1837 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO; 1838 fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->tgt_id); 1839 break; 1840 case FXDISC_GET_TGT_NODE_LIST: 1841 fdisc->u.fxiocb.flags = 1842 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; 1843 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE; 1844 break; 1845 case FXDISC_REG_HOST_INFO: 1846 fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID; 1847 fdisc->u.fxiocb.req_len = sizeof(struct register_host_info); 1848 p_sysid = utsname(); 1849 if (!p_sysid) { 1850 ql_log(ql_log_warn, vha, 0x303c, 1851 "Not able to get the system information\n"); 1852 goto done_free_sp; 1853 } 1854 break; 1855 case FXDISC_ABORT_IOCTL: 1856 default: 1857 break; 1858 } 1859 1860 if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) { 1861 fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev, 1862 fdisc->u.fxiocb.req_len, 1863 &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL); 1864 if (!fdisc->u.fxiocb.req_addr) 1865 goto done_free_sp; 1866 1867 if (fx_type == FXDISC_REG_HOST_INFO) { 1868 preg_hsi = (struct register_host_info *) 1869 fdisc->u.fxiocb.req_addr; 1870 phost_info = &preg_hsi->hsi; 1871 memset(preg_hsi, 0, sizeof(struct register_host_info)); 1872 phost_info->os_type = OS_TYPE_LINUX; 1873 strncpy(phost_info->sysname, 1874 p_sysid->sysname, SYSNAME_LENGTH); 1875 strncpy(phost_info->nodename, 1876 p_sysid->nodename, NODENAME_LENGTH); 1877 if (!strcmp(phost_info->nodename, "(none)")) 1878 ha->mr.host_info_resend = true; 1879 strncpy(phost_info->release, 1880 p_sysid->release, RELEASE_LENGTH); 1881 strncpy(phost_info->version, 1882 p_sysid->version, VERSION_LENGTH); 1883 strncpy(phost_info->machine, 1884 p_sysid->machine, MACHINE_LENGTH); 1885 strncpy(phost_info->domainname, 1886 p_sysid->domainname, DOMNAME_LENGTH); 1887 strncpy(phost_info->hostdriver, 1888 QLA2XXX_VERSION, VERSION_LENGTH); 1889 preg_hsi->utc = (uint64_t)ktime_get_real_seconds(); 1890 ql_dbg(ql_dbg_init, vha, 0x0149, 1891 "ISP%04X: Host registration with firmware\n", 1892 ha->pdev->device); 1893 ql_dbg(ql_dbg_init, vha, 0x014a, 1894 "os_type = '%d', sysname = '%s', nodname = '%s'\n", 1895 phost_info->os_type, 1896 phost_info->sysname, 1897 phost_info->nodename); 1898 ql_dbg(ql_dbg_init, vha, 0x014b, 1899 "release = '%s', version = '%s'\n", 1900 phost_info->release, 1901 phost_info->version); 1902 ql_dbg(ql_dbg_init, vha, 0x014c, 1903 "machine = '%s' " 1904 "domainname = '%s', hostdriver = '%s'\n", 1905 phost_info->machine, 1906 phost_info->domainname, 1907 phost_info->hostdriver); 1908 ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d, 1909 (uint8_t *)phost_info, 1910 sizeof(struct host_system_info)); 1911 } 1912 } 1913 1914 if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) { 1915 fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev, 1916 fdisc->u.fxiocb.rsp_len, 1917 &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL); 1918 if (!fdisc->u.fxiocb.rsp_addr) 1919 goto done_unmap_req; 1920 } 1921 1922 sp->type = SRB_FXIOCB_DCMD; 1923 sp->name = "fxdisc"; 1924 qla2x00_init_timer(sp, FXDISC_TIMEOUT); 1925 fdisc->timeout = qla2x00_fxdisc_iocb_timeout; 1926 fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type); 1927 sp->done = qla2x00_fxdisc_sp_done; 1928 1929 rval = qla2x00_start_sp(sp); 1930 if (rval != QLA_SUCCESS) 1931 goto done_unmap_dma; 1932 1933 wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp); 1934 1935 if (fx_type == FXDISC_GET_CONFIG_INFO) { 1936 struct config_info_data *pinfo = 1937 (struct config_info_data *) fdisc->u.fxiocb.rsp_addr; 1938 strcpy(vha->hw->model_number, pinfo->model_num); 1939 strcpy(vha->hw->model_desc, pinfo->model_description); 1940 memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name, 1941 sizeof(vha->hw->mr.symbolic_name)); 1942 memcpy(&vha->hw->mr.serial_num, pinfo->serial_num, 1943 sizeof(vha->hw->mr.serial_num)); 1944 memcpy(&vha->hw->mr.hw_version, pinfo->hw_version, 1945 sizeof(vha->hw->mr.hw_version)); 1946 memcpy(&vha->hw->mr.fw_version, pinfo->fw_version, 1947 sizeof(vha->hw->mr.fw_version)); 1948 strim(vha->hw->mr.fw_version); 1949 memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version, 1950 sizeof(vha->hw->mr.uboot_version)); 1951 memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num, 1952 sizeof(vha->hw->mr.fru_serial_num)); 1953 vha->hw->mr.critical_temperature = 1954 (pinfo->nominal_temp_value) ? 1955 pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD; 1956 ha->mr.extended_io_enabled = (pinfo->enabled_capabilities & 1957 QLAFX00_EXTENDED_IO_EN_MASK) != 0; 1958 } else if (fx_type == FXDISC_GET_PORT_INFO) { 1959 struct port_info_data *pinfo = 1960 (struct port_info_data *) fdisc->u.fxiocb.rsp_addr; 1961 memcpy(vha->node_name, pinfo->node_name, WWN_SIZE); 1962 memcpy(vha->port_name, pinfo->port_name, WWN_SIZE); 1963 vha->d_id.b.domain = pinfo->port_id[0]; 1964 vha->d_id.b.area = pinfo->port_id[1]; 1965 vha->d_id.b.al_pa = pinfo->port_id[2]; 1966 qlafx00_update_host_attr(vha, pinfo); 1967 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141, 1968 (uint8_t *)pinfo, 16); 1969 } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) { 1970 struct qlafx00_tgt_node_info *pinfo = 1971 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr; 1972 memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE); 1973 memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE); 1974 fcport->port_type = FCT_TARGET; 1975 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144, 1976 (uint8_t *)pinfo, 16); 1977 } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) { 1978 struct qlafx00_tgt_node_info *pinfo = 1979 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr; 1980 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146, 1981 (uint8_t *)pinfo, 16); 1982 memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE); 1983 } else if (fx_type == FXDISC_ABORT_IOCTL) 1984 fdisc->u.fxiocb.result = 1985 (fdisc->u.fxiocb.result == 1986 cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ? 1987 cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED); 1988 1989 rval = le32_to_cpu(fdisc->u.fxiocb.result); 1990 1991 done_unmap_dma: 1992 if (fdisc->u.fxiocb.rsp_addr) 1993 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len, 1994 fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle); 1995 1996 done_unmap_req: 1997 if (fdisc->u.fxiocb.req_addr) 1998 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len, 1999 fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle); 2000 done_free_sp: 2001 sp->free(vha, sp); 2002 done: 2003 return rval; 2004 } 2005 2006 /* 2007 * qlafx00_initialize_adapter 2008 * Initialize board. 2009 * 2010 * Input: 2011 * ha = adapter block pointer. 2012 * 2013 * Returns: 2014 * 0 = success 2015 */ 2016 int 2017 qlafx00_initialize_adapter(scsi_qla_host_t *vha) 2018 { 2019 int rval; 2020 struct qla_hw_data *ha = vha->hw; 2021 uint32_t tempc; 2022 2023 /* Clear adapter flags. */ 2024 vha->flags.online = 0; 2025 ha->flags.chip_reset_done = 0; 2026 vha->flags.reset_active = 0; 2027 ha->flags.pci_channel_io_perm_failure = 0; 2028 ha->flags.eeh_busy = 0; 2029 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 2030 atomic_set(&vha->loop_state, LOOP_DOWN); 2031 vha->device_flags = DFLG_NO_CABLE; 2032 vha->dpc_flags = 0; 2033 vha->flags.management_server_logged_in = 0; 2034 ha->isp_abort_cnt = 0; 2035 ha->beacon_blink_led = 0; 2036 2037 set_bit(0, ha->req_qid_map); 2038 set_bit(0, ha->rsp_qid_map); 2039 2040 ql_dbg(ql_dbg_init, vha, 0x0147, 2041 "Configuring PCI space...\n"); 2042 2043 rval = ha->isp_ops->pci_config(vha); 2044 if (rval) { 2045 ql_log(ql_log_warn, vha, 0x0148, 2046 "Unable to configure PCI space.\n"); 2047 return rval; 2048 } 2049 2050 rval = qlafx00_init_fw_ready(vha); 2051 if (rval != QLA_SUCCESS) 2052 return rval; 2053 2054 qlafx00_save_queue_ptrs(vha); 2055 2056 rval = qlafx00_config_queues(vha); 2057 if (rval != QLA_SUCCESS) 2058 return rval; 2059 2060 /* 2061 * Allocate the array of outstanding commands 2062 * now that we know the firmware resources. 2063 */ 2064 rval = qla2x00_alloc_outstanding_cmds(ha, vha->req); 2065 if (rval != QLA_SUCCESS) 2066 return rval; 2067 2068 rval = qla2x00_init_rings(vha); 2069 ha->flags.chip_reset_done = 1; 2070 2071 tempc = QLAFX00_GET_TEMPERATURE(ha); 2072 ql_dbg(ql_dbg_init, vha, 0x0152, 2073 "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n", 2074 __func__, tempc); 2075 2076 return rval; 2077 } 2078 2079 uint32_t 2080 qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr, 2081 char *buf) 2082 { 2083 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2084 int rval = QLA_FUNCTION_FAILED; 2085 uint32_t state[1]; 2086 2087 if (qla2x00_reset_active(vha)) 2088 ql_log(ql_log_warn, vha, 0x70ce, 2089 "ISP reset active.\n"); 2090 else if (!vha->hw->flags.eeh_busy) { 2091 rval = qlafx00_get_firmware_state(vha, state); 2092 } 2093 if (rval != QLA_SUCCESS) 2094 memset(state, -1, sizeof(state)); 2095 2096 return state[0]; 2097 } 2098 2099 void 2100 qlafx00_get_host_speed(struct Scsi_Host *shost) 2101 { 2102 struct qla_hw_data *ha = ((struct scsi_qla_host *) 2103 (shost_priv(shost)))->hw; 2104 u32 speed = FC_PORTSPEED_UNKNOWN; 2105 2106 switch (ha->link_data_rate) { 2107 case QLAFX00_PORT_SPEED_2G: 2108 speed = FC_PORTSPEED_2GBIT; 2109 break; 2110 case QLAFX00_PORT_SPEED_4G: 2111 speed = FC_PORTSPEED_4GBIT; 2112 break; 2113 case QLAFX00_PORT_SPEED_8G: 2114 speed = FC_PORTSPEED_8GBIT; 2115 break; 2116 case QLAFX00_PORT_SPEED_10G: 2117 speed = FC_PORTSPEED_10GBIT; 2118 break; 2119 } 2120 fc_host_speed(shost) = speed; 2121 } 2122 2123 /** QLAFX00 specific ISR implementation functions */ 2124 2125 static inline void 2126 qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 2127 uint32_t sense_len, struct rsp_que *rsp, int res) 2128 { 2129 struct scsi_qla_host *vha = sp->fcport->vha; 2130 struct scsi_cmnd *cp = GET_CMD_SP(sp); 2131 uint32_t track_sense_len; 2132 2133 SET_FW_SENSE_LEN(sp, sense_len); 2134 2135 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 2136 sense_len = SCSI_SENSE_BUFFERSIZE; 2137 2138 SET_CMD_SENSE_LEN(sp, sense_len); 2139 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 2140 track_sense_len = sense_len; 2141 2142 if (sense_len > par_sense_len) 2143 sense_len = par_sense_len; 2144 2145 memcpy(cp->sense_buffer, sense_data, sense_len); 2146 2147 SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len); 2148 2149 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 2150 track_sense_len -= sense_len; 2151 SET_CMD_SENSE_LEN(sp, track_sense_len); 2152 2153 ql_dbg(ql_dbg_io, vha, 0x304d, 2154 "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n", 2155 sense_len, par_sense_len, track_sense_len); 2156 if (GET_FW_SENSE_LEN(sp) > 0) { 2157 rsp->status_srb = sp; 2158 cp->result = res; 2159 } 2160 2161 if (sense_len) { 2162 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039, 2163 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", 2164 sp->fcport->vha->host_no, cp->device->id, cp->device->lun, 2165 cp); 2166 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049, 2167 cp->sense_buffer, sense_len); 2168 } 2169 } 2170 2171 static void 2172 qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2173 struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp, 2174 __le16 sstatus, __le16 cpstatus) 2175 { 2176 struct srb_iocb *tmf; 2177 2178 tmf = &sp->u.iocb_cmd; 2179 if (cpstatus != cpu_to_le16((uint16_t)CS_COMPLETE) || 2180 (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID))) 2181 cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE); 2182 tmf->u.tmf.comp_status = cpstatus; 2183 sp->done(vha, sp, 0); 2184 } 2185 2186 static void 2187 qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2188 struct abort_iocb_entry_fx00 *pkt) 2189 { 2190 const char func[] = "ABT_IOCB"; 2191 srb_t *sp; 2192 struct srb_iocb *abt; 2193 2194 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2195 if (!sp) 2196 return; 2197 2198 abt = &sp->u.iocb_cmd; 2199 abt->u.abt.comp_status = pkt->tgt_id_sts; 2200 sp->done(vha, sp, 0); 2201 } 2202 2203 static void 2204 qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, 2205 struct ioctl_iocb_entry_fx00 *pkt) 2206 { 2207 const char func[] = "IOSB_IOCB"; 2208 srb_t *sp; 2209 struct fc_bsg_job *bsg_job; 2210 struct srb_iocb *iocb_job; 2211 int res; 2212 struct qla_mt_iocb_rsp_fx00 fstatus; 2213 uint8_t *fw_sts_ptr; 2214 2215 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2216 if (!sp) 2217 return; 2218 2219 if (sp->type == SRB_FXIOCB_DCMD) { 2220 iocb_job = &sp->u.iocb_cmd; 2221 iocb_job->u.fxiocb.seq_number = pkt->seq_no; 2222 iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags; 2223 iocb_job->u.fxiocb.result = pkt->status; 2224 if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID) 2225 iocb_job->u.fxiocb.req_data = 2226 pkt->dataword_r; 2227 } else { 2228 bsg_job = sp->u.bsg_job; 2229 2230 memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00)); 2231 2232 fstatus.reserved_1 = pkt->reserved_0; 2233 fstatus.func_type = pkt->comp_func_num; 2234 fstatus.ioctl_flags = pkt->fw_iotcl_flags; 2235 fstatus.ioctl_data = pkt->dataword_r; 2236 fstatus.adapid = pkt->adapid; 2237 fstatus.reserved_2 = pkt->dataword_r_extra; 2238 fstatus.res_count = pkt->residuallen; 2239 fstatus.status = pkt->status; 2240 fstatus.seq_number = pkt->seq_no; 2241 memcpy(fstatus.reserved_3, 2242 pkt->reserved_2, 20 * sizeof(uint8_t)); 2243 2244 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + 2245 sizeof(struct fc_bsg_reply); 2246 2247 memcpy(fw_sts_ptr, (uint8_t *)&fstatus, 2248 sizeof(struct qla_mt_iocb_rsp_fx00)); 2249 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 2250 sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t); 2251 2252 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 2253 sp->fcport->vha, 0x5080, 2254 (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00)); 2255 2256 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 2257 sp->fcport->vha, 0x5074, 2258 (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00)); 2259 2260 res = bsg_job->reply->result = DID_OK << 16; 2261 bsg_job->reply->reply_payload_rcv_len = 2262 bsg_job->reply_payload.payload_len; 2263 } 2264 sp->done(vha, sp, res); 2265 } 2266 2267 /** 2268 * qlafx00_status_entry() - Process a Status IOCB entry. 2269 * @ha: SCSI driver HA context 2270 * @pkt: Entry pointer 2271 */ 2272 static void 2273 qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 2274 { 2275 srb_t *sp; 2276 fc_port_t *fcport; 2277 struct scsi_cmnd *cp; 2278 struct sts_entry_fx00 *sts; 2279 __le16 comp_status; 2280 __le16 scsi_status; 2281 __le16 lscsi_status; 2282 int32_t resid; 2283 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 2284 fw_resid_len; 2285 uint8_t *rsp_info = NULL, *sense_data = NULL; 2286 struct qla_hw_data *ha = vha->hw; 2287 uint32_t hindex, handle; 2288 uint16_t que; 2289 struct req_que *req; 2290 int logit = 1; 2291 int res = 0; 2292 2293 sts = (struct sts_entry_fx00 *) pkt; 2294 2295 comp_status = sts->comp_status; 2296 scsi_status = sts->scsi_status & cpu_to_le16((uint16_t)SS_MASK); 2297 hindex = sts->handle; 2298 handle = LSW(hindex); 2299 2300 que = MSW(hindex); 2301 req = ha->req_q_map[que]; 2302 2303 /* Validate handle. */ 2304 if (handle < req->num_outstanding_cmds) 2305 sp = req->outstanding_cmds[handle]; 2306 else 2307 sp = NULL; 2308 2309 if (sp == NULL) { 2310 ql_dbg(ql_dbg_io, vha, 0x3034, 2311 "Invalid status handle (0x%x).\n", handle); 2312 2313 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2314 qla2xxx_wake_dpc(vha); 2315 return; 2316 } 2317 2318 if (sp->type == SRB_TM_CMD) { 2319 req->outstanding_cmds[handle] = NULL; 2320 qlafx00_tm_iocb_entry(vha, req, pkt, sp, 2321 scsi_status, comp_status); 2322 return; 2323 } 2324 2325 /* Fast path completion. */ 2326 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2327 qla2x00_process_completed_request(vha, req, handle); 2328 return; 2329 } 2330 2331 req->outstanding_cmds[handle] = NULL; 2332 cp = GET_CMD_SP(sp); 2333 if (cp == NULL) { 2334 ql_dbg(ql_dbg_io, vha, 0x3048, 2335 "Command already returned (0x%x/%p).\n", 2336 handle, sp); 2337 2338 return; 2339 } 2340 2341 lscsi_status = scsi_status & cpu_to_le16((uint16_t)STATUS_MASK); 2342 2343 fcport = sp->fcport; 2344 2345 sense_len = par_sense_len = rsp_info_len = resid_len = 2346 fw_resid_len = 0; 2347 if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)) 2348 sense_len = sts->sense_len; 2349 if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER 2350 | (uint16_t)SS_RESIDUAL_OVER))) 2351 resid_len = le32_to_cpu(sts->residual_len); 2352 if (comp_status == cpu_to_le16((uint16_t)CS_DATA_UNDERRUN)) 2353 fw_resid_len = le32_to_cpu(sts->residual_len); 2354 rsp_info = sense_data = sts->data; 2355 par_sense_len = sizeof(sts->data); 2356 2357 /* Check for overrun. */ 2358 if (comp_status == CS_COMPLETE && 2359 scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_OVER)) 2360 comp_status = cpu_to_le16((uint16_t)CS_DATA_OVERRUN); 2361 2362 /* 2363 * Based on Host and scsi status generate status code for Linux 2364 */ 2365 switch (le16_to_cpu(comp_status)) { 2366 case CS_COMPLETE: 2367 case CS_QUEUE_FULL: 2368 if (scsi_status == 0) { 2369 res = DID_OK << 16; 2370 break; 2371 } 2372 if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER 2373 | (uint16_t)SS_RESIDUAL_OVER))) { 2374 resid = resid_len; 2375 scsi_set_resid(cp, resid); 2376 2377 if (!lscsi_status && 2378 ((unsigned)(scsi_bufflen(cp) - resid) < 2379 cp->underflow)) { 2380 ql_dbg(ql_dbg_io, fcport->vha, 0x3050, 2381 "Mid-layer underflow " 2382 "detected (0x%x of 0x%x bytes).\n", 2383 resid, scsi_bufflen(cp)); 2384 2385 res = DID_ERROR << 16; 2386 break; 2387 } 2388 } 2389 res = DID_OK << 16 | le16_to_cpu(lscsi_status); 2390 2391 if (lscsi_status == 2392 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) { 2393 ql_dbg(ql_dbg_io, fcport->vha, 0x3051, 2394 "QUEUE FULL detected.\n"); 2395 break; 2396 } 2397 logit = 0; 2398 if (lscsi_status != cpu_to_le16((uint16_t)SS_CHECK_CONDITION)) 2399 break; 2400 2401 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2402 if (!(scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))) 2403 break; 2404 2405 qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len, 2406 rsp, res); 2407 break; 2408 2409 case CS_DATA_UNDERRUN: 2410 /* Use F/W calculated residual length. */ 2411 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 2412 resid = fw_resid_len; 2413 else 2414 resid = resid_len; 2415 scsi_set_resid(cp, resid); 2416 if (scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_UNDER)) { 2417 if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 2418 && fw_resid_len != resid_len) { 2419 ql_dbg(ql_dbg_io, fcport->vha, 0x3052, 2420 "Dropped frame(s) detected " 2421 "(0x%x of 0x%x bytes).\n", 2422 resid, scsi_bufflen(cp)); 2423 2424 res = DID_ERROR << 16 | 2425 le16_to_cpu(lscsi_status); 2426 goto check_scsi_status; 2427 } 2428 2429 if (!lscsi_status && 2430 ((unsigned)(scsi_bufflen(cp) - resid) < 2431 cp->underflow)) { 2432 ql_dbg(ql_dbg_io, fcport->vha, 0x3053, 2433 "Mid-layer underflow " 2434 "detected (0x%x of 0x%x bytes, " 2435 "cp->underflow: 0x%x).\n", 2436 resid, scsi_bufflen(cp), cp->underflow); 2437 2438 res = DID_ERROR << 16; 2439 break; 2440 } 2441 } else if (lscsi_status != 2442 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL) && 2443 lscsi_status != cpu_to_le16((uint16_t)SAM_STAT_BUSY)) { 2444 /* 2445 * scsi status of task set and busy are considered 2446 * to be task not completed. 2447 */ 2448 2449 ql_dbg(ql_dbg_io, fcport->vha, 0x3054, 2450 "Dropped frame(s) detected (0x%x " 2451 "of 0x%x bytes).\n", resid, 2452 scsi_bufflen(cp)); 2453 2454 res = DID_ERROR << 16 | le16_to_cpu(lscsi_status); 2455 goto check_scsi_status; 2456 } else { 2457 ql_dbg(ql_dbg_io, fcport->vha, 0x3055, 2458 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 2459 scsi_status, lscsi_status); 2460 } 2461 2462 res = DID_OK << 16 | le16_to_cpu(lscsi_status); 2463 logit = 0; 2464 2465 check_scsi_status: 2466 /* 2467 * Check to see if SCSI Status is non zero. If so report SCSI 2468 * Status. 2469 */ 2470 if (lscsi_status != 0) { 2471 if (lscsi_status == 2472 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) { 2473 ql_dbg(ql_dbg_io, fcport->vha, 0x3056, 2474 "QUEUE FULL detected.\n"); 2475 logit = 1; 2476 break; 2477 } 2478 if (lscsi_status != 2479 cpu_to_le16((uint16_t)SS_CHECK_CONDITION)) 2480 break; 2481 2482 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2483 if (!(scsi_status & 2484 cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))) 2485 break; 2486 2487 qlafx00_handle_sense(sp, sense_data, par_sense_len, 2488 sense_len, rsp, res); 2489 } 2490 break; 2491 2492 case CS_PORT_LOGGED_OUT: 2493 case CS_PORT_CONFIG_CHG: 2494 case CS_PORT_BUSY: 2495 case CS_INCOMPLETE: 2496 case CS_PORT_UNAVAILABLE: 2497 case CS_TIMEOUT: 2498 case CS_RESET: 2499 2500 /* 2501 * We are going to have the fc class block the rport 2502 * while we try to recover so instruct the mid layer 2503 * to requeue until the class decides how to handle this. 2504 */ 2505 res = DID_TRANSPORT_DISRUPTED << 16; 2506 2507 ql_dbg(ql_dbg_io, fcport->vha, 0x3057, 2508 "Port down status: port-state=0x%x.\n", 2509 atomic_read(&fcport->state)); 2510 2511 if (atomic_read(&fcport->state) == FCS_ONLINE) 2512 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 2513 break; 2514 2515 case CS_ABORTED: 2516 res = DID_RESET << 16; 2517 break; 2518 2519 default: 2520 res = DID_ERROR << 16; 2521 break; 2522 } 2523 2524 if (logit) 2525 ql_dbg(ql_dbg_io, fcport->vha, 0x3058, 2526 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " 2527 "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x " 2528 "rsp_info=%p resid=0x%x fw_resid=0x%x sense_len=0x%x, " 2529 "par_sense_len=0x%x, rsp_info_len=0x%x\n", 2530 comp_status, scsi_status, res, vha->host_no, 2531 cp->device->id, cp->device->lun, fcport->tgt_id, 2532 lscsi_status, cp->cmnd, scsi_bufflen(cp), 2533 rsp_info, resid_len, fw_resid_len, sense_len, 2534 par_sense_len, rsp_info_len); 2535 2536 if (rsp->status_srb == NULL) 2537 sp->done(ha, sp, res); 2538 } 2539 2540 /** 2541 * qlafx00_status_cont_entry() - Process a Status Continuations entry. 2542 * @ha: SCSI driver HA context 2543 * @pkt: Entry pointer 2544 * 2545 * Extended sense data. 2546 */ 2547 static void 2548 qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 2549 { 2550 uint8_t sense_sz = 0; 2551 struct qla_hw_data *ha = rsp->hw; 2552 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 2553 srb_t *sp = rsp->status_srb; 2554 struct scsi_cmnd *cp; 2555 uint32_t sense_len; 2556 uint8_t *sense_ptr; 2557 2558 if (!sp) { 2559 ql_dbg(ql_dbg_io, vha, 0x3037, 2560 "no SP, sp = %p\n", sp); 2561 return; 2562 } 2563 2564 if (!GET_FW_SENSE_LEN(sp)) { 2565 ql_dbg(ql_dbg_io, vha, 0x304b, 2566 "no fw sense data, sp = %p\n", sp); 2567 return; 2568 } 2569 cp = GET_CMD_SP(sp); 2570 if (cp == NULL) { 2571 ql_log(ql_log_warn, vha, 0x303b, 2572 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 2573 2574 rsp->status_srb = NULL; 2575 return; 2576 } 2577 2578 if (!GET_CMD_SENSE_LEN(sp)) { 2579 ql_dbg(ql_dbg_io, vha, 0x304c, 2580 "no sense data, sp = %p\n", sp); 2581 } else { 2582 sense_len = GET_CMD_SENSE_LEN(sp); 2583 sense_ptr = GET_CMD_SENSE_PTR(sp); 2584 ql_dbg(ql_dbg_io, vha, 0x304f, 2585 "sp=%p sense_len=0x%x sense_ptr=%p.\n", 2586 sp, sense_len, sense_ptr); 2587 2588 if (sense_len > sizeof(pkt->data)) 2589 sense_sz = sizeof(pkt->data); 2590 else 2591 sense_sz = sense_len; 2592 2593 /* Move sense data. */ 2594 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e, 2595 (uint8_t *)pkt, sizeof(sts_cont_entry_t)); 2596 memcpy(sense_ptr, pkt->data, sense_sz); 2597 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a, 2598 sense_ptr, sense_sz); 2599 2600 sense_len -= sense_sz; 2601 sense_ptr += sense_sz; 2602 2603 SET_CMD_SENSE_PTR(sp, sense_ptr); 2604 SET_CMD_SENSE_LEN(sp, sense_len); 2605 } 2606 sense_len = GET_FW_SENSE_LEN(sp); 2607 sense_len = (sense_len > sizeof(pkt->data)) ? 2608 (sense_len - sizeof(pkt->data)) : 0; 2609 SET_FW_SENSE_LEN(sp, sense_len); 2610 2611 /* Place command on done queue. */ 2612 if (sense_len == 0) { 2613 rsp->status_srb = NULL; 2614 sp->done(ha, sp, cp->result); 2615 } 2616 } 2617 2618 /** 2619 * qlafx00_multistatus_entry() - Process Multi response queue entries. 2620 * @ha: SCSI driver HA context 2621 */ 2622 static void 2623 qlafx00_multistatus_entry(struct scsi_qla_host *vha, 2624 struct rsp_que *rsp, void *pkt) 2625 { 2626 srb_t *sp; 2627 struct multi_sts_entry_fx00 *stsmfx; 2628 struct qla_hw_data *ha = vha->hw; 2629 uint32_t handle, hindex, handle_count, i; 2630 uint16_t que; 2631 struct req_que *req; 2632 __le32 *handle_ptr; 2633 2634 stsmfx = (struct multi_sts_entry_fx00 *) pkt; 2635 2636 handle_count = stsmfx->handle_count; 2637 2638 if (handle_count > MAX_HANDLE_COUNT) { 2639 ql_dbg(ql_dbg_io, vha, 0x3035, 2640 "Invalid handle count (0x%x).\n", handle_count); 2641 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2642 qla2xxx_wake_dpc(vha); 2643 return; 2644 } 2645 2646 handle_ptr = &stsmfx->handles[0]; 2647 2648 for (i = 0; i < handle_count; i++) { 2649 hindex = le32_to_cpu(*handle_ptr); 2650 handle = LSW(hindex); 2651 que = MSW(hindex); 2652 req = ha->req_q_map[que]; 2653 2654 /* Validate handle. */ 2655 if (handle < req->num_outstanding_cmds) 2656 sp = req->outstanding_cmds[handle]; 2657 else 2658 sp = NULL; 2659 2660 if (sp == NULL) { 2661 ql_dbg(ql_dbg_io, vha, 0x3044, 2662 "Invalid status handle (0x%x).\n", handle); 2663 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2664 qla2xxx_wake_dpc(vha); 2665 return; 2666 } 2667 qla2x00_process_completed_request(vha, req, handle); 2668 handle_ptr++; 2669 } 2670 } 2671 2672 /** 2673 * qlafx00_error_entry() - Process an error entry. 2674 * @ha: SCSI driver HA context 2675 * @pkt: Entry pointer 2676 */ 2677 static void 2678 qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, 2679 struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype) 2680 { 2681 srb_t *sp; 2682 struct qla_hw_data *ha = vha->hw; 2683 const char func[] = "ERROR-IOCB"; 2684 uint16_t que = 0; 2685 struct req_que *req = NULL; 2686 int res = DID_ERROR << 16; 2687 2688 ql_dbg(ql_dbg_async, vha, 0x507f, 2689 "type of error status in response: 0x%x\n", estatus); 2690 2691 req = ha->req_q_map[que]; 2692 2693 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2694 if (sp) { 2695 sp->done(ha, sp, res); 2696 return; 2697 } 2698 2699 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2700 qla2xxx_wake_dpc(vha); 2701 } 2702 2703 /** 2704 * qlafx00_process_response_queue() - Process response queue entries. 2705 * @ha: SCSI driver HA context 2706 */ 2707 static void 2708 qlafx00_process_response_queue(struct scsi_qla_host *vha, 2709 struct rsp_que *rsp) 2710 { 2711 struct sts_entry_fx00 *pkt; 2712 response_t *lptr; 2713 uint16_t lreq_q_in = 0; 2714 uint16_t lreq_q_out = 0; 2715 2716 lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in); 2717 lreq_q_out = rsp->ring_index; 2718 2719 while (lreq_q_in != lreq_q_out) { 2720 lptr = rsp->ring_ptr; 2721 memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr, 2722 sizeof(rsp->rsp_pkt)); 2723 pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt; 2724 2725 rsp->ring_index++; 2726 lreq_q_out++; 2727 if (rsp->ring_index == rsp->length) { 2728 lreq_q_out = 0; 2729 rsp->ring_index = 0; 2730 rsp->ring_ptr = rsp->ring; 2731 } else { 2732 rsp->ring_ptr++; 2733 } 2734 2735 if (pkt->entry_status != 0 && 2736 pkt->entry_type != IOCTL_IOSB_TYPE_FX00) { 2737 qlafx00_error_entry(vha, rsp, 2738 (struct sts_entry_fx00 *)pkt, pkt->entry_status, 2739 pkt->entry_type); 2740 continue; 2741 } 2742 2743 switch (pkt->entry_type) { 2744 case STATUS_TYPE_FX00: 2745 qlafx00_status_entry(vha, rsp, pkt); 2746 break; 2747 2748 case STATUS_CONT_TYPE_FX00: 2749 qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2750 break; 2751 2752 case MULTI_STATUS_TYPE_FX00: 2753 qlafx00_multistatus_entry(vha, rsp, pkt); 2754 break; 2755 2756 case ABORT_IOCB_TYPE_FX00: 2757 qlafx00_abort_iocb_entry(vha, rsp->req, 2758 (struct abort_iocb_entry_fx00 *)pkt); 2759 break; 2760 2761 case IOCTL_IOSB_TYPE_FX00: 2762 qlafx00_ioctl_iosb_entry(vha, rsp->req, 2763 (struct ioctl_iocb_entry_fx00 *)pkt); 2764 break; 2765 default: 2766 /* Type Not Supported. */ 2767 ql_dbg(ql_dbg_async, vha, 0x5081, 2768 "Received unknown response pkt type %x " 2769 "entry status=%x.\n", 2770 pkt->entry_type, pkt->entry_status); 2771 break; 2772 } 2773 } 2774 2775 /* Adjust ring index */ 2776 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2777 } 2778 2779 /** 2780 * qlafx00_async_event() - Process aynchronous events. 2781 * @ha: SCSI driver HA context 2782 */ 2783 static void 2784 qlafx00_async_event(scsi_qla_host_t *vha) 2785 { 2786 struct qla_hw_data *ha = vha->hw; 2787 struct device_reg_fx00 __iomem *reg; 2788 int data_size = 1; 2789 2790 reg = &ha->iobase->ispfx00; 2791 /* Setup to process RIO completion. */ 2792 switch (ha->aenmb[0]) { 2793 case QLAFX00_MBA_SYSTEM_ERR: /* System Error */ 2794 ql_log(ql_log_warn, vha, 0x5079, 2795 "ISP System Error - mbx1=%x\n", ha->aenmb[0]); 2796 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2797 break; 2798 2799 case QLAFX00_MBA_SHUTDOWN_RQSTD: /* Shutdown requested */ 2800 ql_dbg(ql_dbg_async, vha, 0x5076, 2801 "Asynchronous FW shutdown requested.\n"); 2802 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2803 qla2xxx_wake_dpc(vha); 2804 break; 2805 2806 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ 2807 ha->aenmb[1] = RD_REG_DWORD(®->aenmailbox1); 2808 ha->aenmb[2] = RD_REG_DWORD(®->aenmailbox2); 2809 ha->aenmb[3] = RD_REG_DWORD(®->aenmailbox3); 2810 ql_dbg(ql_dbg_async, vha, 0x5077, 2811 "Asynchronous port Update received " 2812 "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n", 2813 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]); 2814 data_size = 4; 2815 break; 2816 2817 case QLAFX00_MBA_TEMP_OVER: /* Over temperature event */ 2818 ql_log(ql_log_info, vha, 0x5085, 2819 "Asynchronous over temperature event received " 2820 "aenmb[0]: %x\n", 2821 ha->aenmb[0]); 2822 break; 2823 2824 case QLAFX00_MBA_TEMP_NORM: /* Normal temperature event */ 2825 ql_log(ql_log_info, vha, 0x5086, 2826 "Asynchronous normal temperature event received " 2827 "aenmb[0]: %x\n", 2828 ha->aenmb[0]); 2829 break; 2830 2831 case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */ 2832 ql_log(ql_log_info, vha, 0x5083, 2833 "Asynchronous critical temperature event received " 2834 "aenmb[0]: %x\n", 2835 ha->aenmb[0]); 2836 break; 2837 2838 default: 2839 ha->aenmb[1] = RD_REG_WORD(®->aenmailbox1); 2840 ha->aenmb[2] = RD_REG_WORD(®->aenmailbox2); 2841 ha->aenmb[3] = RD_REG_WORD(®->aenmailbox3); 2842 ha->aenmb[4] = RD_REG_WORD(®->aenmailbox4); 2843 ha->aenmb[5] = RD_REG_WORD(®->aenmailbox5); 2844 ha->aenmb[6] = RD_REG_WORD(®->aenmailbox6); 2845 ha->aenmb[7] = RD_REG_WORD(®->aenmailbox7); 2846 ql_dbg(ql_dbg_async, vha, 0x5078, 2847 "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n", 2848 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3], 2849 ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]); 2850 break; 2851 } 2852 qlafx00_post_aenfx_work(vha, ha->aenmb[0], 2853 (uint32_t *)ha->aenmb, data_size); 2854 } 2855 2856 /** 2857 * 2858 * qlafx00x_mbx_completion() - Process mailbox command completions. 2859 * @ha: SCSI driver HA context 2860 * @mb16: Mailbox16 register 2861 */ 2862 static void 2863 qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) 2864 { 2865 uint16_t cnt; 2866 uint32_t __iomem *wptr; 2867 struct qla_hw_data *ha = vha->hw; 2868 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 2869 2870 if (!ha->mcp32) 2871 ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n"); 2872 2873 /* Load return mailbox registers. */ 2874 ha->flags.mbox_int = 1; 2875 ha->mailbox_out32[0] = mb0; 2876 wptr = (uint32_t __iomem *)®->mailbox17; 2877 2878 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2879 ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr); 2880 wptr++; 2881 } 2882 } 2883 2884 /** 2885 * qlafx00_intr_handler() - Process interrupts for the ISPFX00. 2886 * @irq: 2887 * @dev_id: SCSI driver HA context 2888 * 2889 * Called by system whenever the host adapter generates an interrupt. 2890 * 2891 * Returns handled flag. 2892 */ 2893 irqreturn_t 2894 qlafx00_intr_handler(int irq, void *dev_id) 2895 { 2896 scsi_qla_host_t *vha; 2897 struct qla_hw_data *ha; 2898 struct device_reg_fx00 __iomem *reg; 2899 int status; 2900 unsigned long iter; 2901 uint32_t stat; 2902 uint32_t mb[8]; 2903 struct rsp_que *rsp; 2904 unsigned long flags; 2905 uint32_t clr_intr = 0; 2906 uint32_t intr_stat = 0; 2907 2908 rsp = (struct rsp_que *) dev_id; 2909 if (!rsp) { 2910 ql_log(ql_log_info, NULL, 0x507d, 2911 "%s: NULL response queue pointer.\n", __func__); 2912 return IRQ_NONE; 2913 } 2914 2915 ha = rsp->hw; 2916 reg = &ha->iobase->ispfx00; 2917 status = 0; 2918 2919 if (unlikely(pci_channel_offline(ha->pdev))) 2920 return IRQ_HANDLED; 2921 2922 spin_lock_irqsave(&ha->hardware_lock, flags); 2923 vha = pci_get_drvdata(ha->pdev); 2924 for (iter = 50; iter--; clr_intr = 0) { 2925 stat = QLAFX00_RD_INTR_REG(ha); 2926 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 2927 break; 2928 intr_stat = stat & QLAFX00_HST_INT_STS_BITS; 2929 if (!intr_stat) 2930 break; 2931 2932 if (stat & QLAFX00_INTR_MB_CMPLT) { 2933 mb[0] = RD_REG_WORD(®->mailbox16); 2934 qlafx00_mbx_completion(vha, mb[0]); 2935 status |= MBX_INTERRUPT; 2936 clr_intr |= QLAFX00_INTR_MB_CMPLT; 2937 } 2938 if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) { 2939 ha->aenmb[0] = RD_REG_WORD(®->aenmailbox0); 2940 qlafx00_async_event(vha); 2941 clr_intr |= QLAFX00_INTR_ASYNC_CMPLT; 2942 } 2943 if (intr_stat & QLAFX00_INTR_RSP_CMPLT) { 2944 qlafx00_process_response_queue(vha, rsp); 2945 clr_intr |= QLAFX00_INTR_RSP_CMPLT; 2946 } 2947 2948 QLAFX00_CLR_INTR_REG(ha, clr_intr); 2949 QLAFX00_RD_INTR_REG(ha); 2950 } 2951 2952 qla2x00_handle_mbx_completion(ha, status); 2953 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2954 2955 return IRQ_HANDLED; 2956 } 2957 2958 /** QLAFX00 specific IOCB implementation functions */ 2959 2960 static inline cont_a64_entry_t * 2961 qlafx00_prep_cont_type1_iocb(struct req_que *req, 2962 cont_a64_entry_t *lcont_pkt) 2963 { 2964 cont_a64_entry_t *cont_pkt; 2965 2966 /* Adjust ring index. */ 2967 req->ring_index++; 2968 if (req->ring_index == req->length) { 2969 req->ring_index = 0; 2970 req->ring_ptr = req->ring; 2971 } else { 2972 req->ring_ptr++; 2973 } 2974 2975 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 2976 2977 /* Load packet defaults. */ 2978 lcont_pkt->entry_type = CONTINUE_A64_TYPE_FX00; 2979 2980 return cont_pkt; 2981 } 2982 2983 static inline void 2984 qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, 2985 uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt) 2986 { 2987 uint16_t avail_dsds; 2988 __le32 *cur_dsd; 2989 scsi_qla_host_t *vha; 2990 struct scsi_cmnd *cmd; 2991 struct scatterlist *sg; 2992 int i, cont; 2993 struct req_que *req; 2994 cont_a64_entry_t lcont_pkt; 2995 cont_a64_entry_t *cont_pkt; 2996 2997 vha = sp->fcport->vha; 2998 req = vha->req; 2999 3000 cmd = GET_CMD_SP(sp); 3001 cont = 0; 3002 cont_pkt = NULL; 3003 3004 /* Update entry type to indicate Command Type 3 IOCB */ 3005 lcmd_pkt->entry_type = FX00_COMMAND_TYPE_7; 3006 3007 /* No data transfer */ 3008 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 3009 lcmd_pkt->byte_count = cpu_to_le32(0); 3010 return; 3011 } 3012 3013 /* Set transfer direction */ 3014 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 3015 lcmd_pkt->cntrl_flags = TMF_WRITE_DATA; 3016 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 3017 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 3018 lcmd_pkt->cntrl_flags = TMF_READ_DATA; 3019 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 3020 } 3021 3022 /* One DSD is available in the Command Type 3 IOCB */ 3023 avail_dsds = 1; 3024 cur_dsd = (__le32 *)&lcmd_pkt->dseg_0_address; 3025 3026 /* Load data segments */ 3027 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 3028 dma_addr_t sle_dma; 3029 3030 /* Allocate additional continuation packets? */ 3031 if (avail_dsds == 0) { 3032 /* 3033 * Five DSDs are available in the Continuation 3034 * Type 1 IOCB. 3035 */ 3036 memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE); 3037 cont_pkt = 3038 qlafx00_prep_cont_type1_iocb(req, &lcont_pkt); 3039 cur_dsd = (__le32 *)lcont_pkt.dseg_0_address; 3040 avail_dsds = 5; 3041 cont = 1; 3042 } 3043 3044 sle_dma = sg_dma_address(sg); 3045 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 3046 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 3047 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 3048 avail_dsds--; 3049 if (avail_dsds == 0 && cont == 1) { 3050 cont = 0; 3051 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, 3052 REQUEST_ENTRY_SIZE); 3053 } 3054 3055 } 3056 if (avail_dsds != 0 && cont == 1) { 3057 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, 3058 REQUEST_ENTRY_SIZE); 3059 } 3060 } 3061 3062 /** 3063 * qlafx00_start_scsi() - Send a SCSI command to the ISP 3064 * @sp: command to send to the ISP 3065 * 3066 * Returns non-zero if a failure occurred, else zero. 3067 */ 3068 int 3069 qlafx00_start_scsi(srb_t *sp) 3070 { 3071 int nseg; 3072 unsigned long flags; 3073 uint32_t index; 3074 uint32_t handle; 3075 uint16_t cnt; 3076 uint16_t req_cnt; 3077 uint16_t tot_dsds; 3078 struct req_que *req = NULL; 3079 struct rsp_que *rsp = NULL; 3080 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 3081 struct scsi_qla_host *vha = sp->fcport->vha; 3082 struct qla_hw_data *ha = vha->hw; 3083 struct cmd_type_7_fx00 *cmd_pkt; 3084 struct cmd_type_7_fx00 lcmd_pkt; 3085 struct scsi_lun llun; 3086 3087 /* Setup device pointers. */ 3088 rsp = ha->rsp_q_map[0]; 3089 req = vha->req; 3090 3091 /* So we know we haven't pci_map'ed anything yet */ 3092 tot_dsds = 0; 3093 3094 /* Acquire ring specific lock */ 3095 spin_lock_irqsave(&ha->hardware_lock, flags); 3096 3097 /* Check for room in outstanding command list. */ 3098 handle = req->current_outstanding_cmd; 3099 for (index = 1; index < req->num_outstanding_cmds; index++) { 3100 handle++; 3101 if (handle == req->num_outstanding_cmds) 3102 handle = 1; 3103 if (!req->outstanding_cmds[handle]) 3104 break; 3105 } 3106 if (index == req->num_outstanding_cmds) 3107 goto queuing_error; 3108 3109 /* Map the sg table so we have an accurate count of sg entries needed */ 3110 if (scsi_sg_count(cmd)) { 3111 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 3112 scsi_sg_count(cmd), cmd->sc_data_direction); 3113 if (unlikely(!nseg)) 3114 goto queuing_error; 3115 } else 3116 nseg = 0; 3117 3118 tot_dsds = nseg; 3119 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 3120 if (req->cnt < (req_cnt + 2)) { 3121 cnt = RD_REG_DWORD_RELAXED(req->req_q_out); 3122 3123 if (req->ring_index < cnt) 3124 req->cnt = cnt - req->ring_index; 3125 else 3126 req->cnt = req->length - 3127 (req->ring_index - cnt); 3128 if (req->cnt < (req_cnt + 2)) 3129 goto queuing_error; 3130 } 3131 3132 /* Build command packet. */ 3133 req->current_outstanding_cmd = handle; 3134 req->outstanding_cmds[handle] = sp; 3135 sp->handle = handle; 3136 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 3137 req->cnt -= req_cnt; 3138 3139 cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr; 3140 3141 memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE); 3142 3143 lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle); 3144 lcmd_pkt.reserved_0 = 0; 3145 lcmd_pkt.port_path_ctrl = 0; 3146 lcmd_pkt.reserved_1 = 0; 3147 lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds); 3148 lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id); 3149 3150 int_to_scsilun(cmd->device->lun, &llun); 3151 host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun, 3152 sizeof(lcmd_pkt.lun)); 3153 3154 /* Load SCSI command packet. */ 3155 host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb)); 3156 lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 3157 3158 /* Build IOCB segments */ 3159 qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt); 3160 3161 /* Set total data segment count. */ 3162 lcmd_pkt.entry_count = (uint8_t)req_cnt; 3163 3164 /* Specify response queue number where completion should happen */ 3165 lcmd_pkt.entry_status = (uint8_t) rsp->id; 3166 3167 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e, 3168 (uint8_t *)cmd->cmnd, cmd->cmd_len); 3169 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032, 3170 (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE); 3171 3172 memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE); 3173 wmb(); 3174 3175 /* Adjust ring index. */ 3176 req->ring_index++; 3177 if (req->ring_index == req->length) { 3178 req->ring_index = 0; 3179 req->ring_ptr = req->ring; 3180 } else 3181 req->ring_ptr++; 3182 3183 sp->flags |= SRB_DMA_VALID; 3184 3185 /* Set chip new ring index. */ 3186 WRT_REG_DWORD(req->req_q_in, req->ring_index); 3187 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); 3188 3189 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3190 return QLA_SUCCESS; 3191 3192 queuing_error: 3193 if (tot_dsds) 3194 scsi_dma_unmap(cmd); 3195 3196 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3197 3198 return QLA_FUNCTION_FAILED; 3199 } 3200 3201 void 3202 qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb) 3203 { 3204 struct srb_iocb *fxio = &sp->u.iocb_cmd; 3205 scsi_qla_host_t *vha = sp->fcport->vha; 3206 struct req_que *req = vha->req; 3207 struct tsk_mgmt_entry_fx00 tm_iocb; 3208 struct scsi_lun llun; 3209 3210 memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00)); 3211 tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00; 3212 tm_iocb.entry_count = 1; 3213 tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); 3214 tm_iocb.reserved_0 = 0; 3215 tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id); 3216 tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags); 3217 if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) { 3218 int_to_scsilun(fxio->u.tmf.lun, &llun); 3219 host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun, 3220 sizeof(struct scsi_lun)); 3221 } 3222 3223 memcpy((void *)ptm_iocb, &tm_iocb, 3224 sizeof(struct tsk_mgmt_entry_fx00)); 3225 wmb(); 3226 } 3227 3228 void 3229 qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb) 3230 { 3231 struct srb_iocb *fxio = &sp->u.iocb_cmd; 3232 scsi_qla_host_t *vha = sp->fcport->vha; 3233 struct req_que *req = vha->req; 3234 struct abort_iocb_entry_fx00 abt_iocb; 3235 3236 memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00)); 3237 abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00; 3238 abt_iocb.entry_count = 1; 3239 abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); 3240 abt_iocb.abort_handle = 3241 cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl)); 3242 abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id); 3243 abt_iocb.req_que_no = cpu_to_le16(req->id); 3244 3245 memcpy((void *)pabt_iocb, &abt_iocb, 3246 sizeof(struct abort_iocb_entry_fx00)); 3247 wmb(); 3248 } 3249 3250 void 3251 qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) 3252 { 3253 struct srb_iocb *fxio = &sp->u.iocb_cmd; 3254 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 3255 struct fc_bsg_job *bsg_job; 3256 struct fxdisc_entry_fx00 fx_iocb; 3257 uint8_t entry_cnt = 1; 3258 3259 memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00)); 3260 fx_iocb.entry_type = FX00_IOCB_TYPE; 3261 fx_iocb.handle = cpu_to_le32(sp->handle); 3262 fx_iocb.entry_count = entry_cnt; 3263 3264 if (sp->type == SRB_FXIOCB_DCMD) { 3265 fx_iocb.func_num = 3266 sp->u.iocb_cmd.u.fxiocb.req_func_type; 3267 fx_iocb.adapid = fxio->u.fxiocb.adapter_id; 3268 fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi; 3269 fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0; 3270 fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1; 3271 fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra; 3272 3273 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) { 3274 fx_iocb.req_dsdcnt = cpu_to_le16(1); 3275 fx_iocb.req_xfrcnt = 3276 cpu_to_le16(fxio->u.fxiocb.req_len); 3277 fx_iocb.dseg_rq_address[0] = 3278 cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle)); 3279 fx_iocb.dseg_rq_address[1] = 3280 cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle)); 3281 fx_iocb.dseg_rq_len = 3282 cpu_to_le32(fxio->u.fxiocb.req_len); 3283 } 3284 3285 if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) { 3286 fx_iocb.rsp_dsdcnt = cpu_to_le16(1); 3287 fx_iocb.rsp_xfrcnt = 3288 cpu_to_le16(fxio->u.fxiocb.rsp_len); 3289 fx_iocb.dseg_rsp_address[0] = 3290 cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle)); 3291 fx_iocb.dseg_rsp_address[1] = 3292 cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle)); 3293 fx_iocb.dseg_rsp_len = 3294 cpu_to_le32(fxio->u.fxiocb.rsp_len); 3295 } 3296 3297 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) { 3298 fx_iocb.dataword = fxio->u.fxiocb.req_data; 3299 } 3300 fx_iocb.flags = fxio->u.fxiocb.flags; 3301 } else { 3302 struct scatterlist *sg; 3303 bsg_job = sp->u.bsg_job; 3304 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 3305 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 3306 3307 fx_iocb.func_num = piocb_rqst->func_type; 3308 fx_iocb.adapid = piocb_rqst->adapid; 3309 fx_iocb.adapid_hi = piocb_rqst->adapid_hi; 3310 fx_iocb.reserved_0 = piocb_rqst->reserved_0; 3311 fx_iocb.reserved_1 = piocb_rqst->reserved_1; 3312 fx_iocb.dataword_extra = piocb_rqst->dataword_extra; 3313 fx_iocb.dataword = piocb_rqst->dataword; 3314 fx_iocb.req_xfrcnt = piocb_rqst->req_len; 3315 fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len; 3316 3317 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { 3318 int avail_dsds, tot_dsds; 3319 cont_a64_entry_t lcont_pkt; 3320 cont_a64_entry_t *cont_pkt = NULL; 3321 __le32 *cur_dsd; 3322 int index = 0, cont = 0; 3323 3324 fx_iocb.req_dsdcnt = 3325 cpu_to_le16(bsg_job->request_payload.sg_cnt); 3326 tot_dsds = 3327 bsg_job->request_payload.sg_cnt; 3328 cur_dsd = (__le32 *)&fx_iocb.dseg_rq_address[0]; 3329 avail_dsds = 1; 3330 for_each_sg(bsg_job->request_payload.sg_list, sg, 3331 tot_dsds, index) { 3332 dma_addr_t sle_dma; 3333 3334 /* Allocate additional continuation packets? */ 3335 if (avail_dsds == 0) { 3336 /* 3337 * Five DSDs are available in the Cont. 3338 * Type 1 IOCB. 3339 */ 3340 memset(&lcont_pkt, 0, 3341 REQUEST_ENTRY_SIZE); 3342 cont_pkt = 3343 qlafx00_prep_cont_type1_iocb( 3344 sp->fcport->vha->req, 3345 &lcont_pkt); 3346 cur_dsd = (__le32 *) 3347 lcont_pkt.dseg_0_address; 3348 avail_dsds = 5; 3349 cont = 1; 3350 entry_cnt++; 3351 } 3352 3353 sle_dma = sg_dma_address(sg); 3354 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 3355 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 3356 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 3357 avail_dsds--; 3358 3359 if (avail_dsds == 0 && cont == 1) { 3360 cont = 0; 3361 memcpy_toio( 3362 (void __iomem *)cont_pkt, 3363 &lcont_pkt, REQUEST_ENTRY_SIZE); 3364 ql_dump_buffer( 3365 ql_dbg_user + ql_dbg_verbose, 3366 sp->fcport->vha, 0x3042, 3367 (uint8_t *)&lcont_pkt, 3368 REQUEST_ENTRY_SIZE); 3369 } 3370 } 3371 if (avail_dsds != 0 && cont == 1) { 3372 memcpy_toio((void __iomem *)cont_pkt, 3373 &lcont_pkt, REQUEST_ENTRY_SIZE); 3374 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 3375 sp->fcport->vha, 0x3043, 3376 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); 3377 } 3378 } 3379 3380 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { 3381 int avail_dsds, tot_dsds; 3382 cont_a64_entry_t lcont_pkt; 3383 cont_a64_entry_t *cont_pkt = NULL; 3384 __le32 *cur_dsd; 3385 int index = 0, cont = 0; 3386 3387 fx_iocb.rsp_dsdcnt = 3388 cpu_to_le16(bsg_job->reply_payload.sg_cnt); 3389 tot_dsds = bsg_job->reply_payload.sg_cnt; 3390 cur_dsd = (__le32 *)&fx_iocb.dseg_rsp_address[0]; 3391 avail_dsds = 1; 3392 3393 for_each_sg(bsg_job->reply_payload.sg_list, sg, 3394 tot_dsds, index) { 3395 dma_addr_t sle_dma; 3396 3397 /* Allocate additional continuation packets? */ 3398 if (avail_dsds == 0) { 3399 /* 3400 * Five DSDs are available in the Cont. 3401 * Type 1 IOCB. 3402 */ 3403 memset(&lcont_pkt, 0, 3404 REQUEST_ENTRY_SIZE); 3405 cont_pkt = 3406 qlafx00_prep_cont_type1_iocb( 3407 sp->fcport->vha->req, 3408 &lcont_pkt); 3409 cur_dsd = (__le32 *) 3410 lcont_pkt.dseg_0_address; 3411 avail_dsds = 5; 3412 cont = 1; 3413 entry_cnt++; 3414 } 3415 3416 sle_dma = sg_dma_address(sg); 3417 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 3418 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 3419 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 3420 avail_dsds--; 3421 3422 if (avail_dsds == 0 && cont == 1) { 3423 cont = 0; 3424 memcpy_toio((void __iomem *)cont_pkt, 3425 &lcont_pkt, 3426 REQUEST_ENTRY_SIZE); 3427 ql_dump_buffer( 3428 ql_dbg_user + ql_dbg_verbose, 3429 sp->fcport->vha, 0x3045, 3430 (uint8_t *)&lcont_pkt, 3431 REQUEST_ENTRY_SIZE); 3432 } 3433 } 3434 if (avail_dsds != 0 && cont == 1) { 3435 memcpy_toio((void __iomem *)cont_pkt, 3436 &lcont_pkt, REQUEST_ENTRY_SIZE); 3437 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 3438 sp->fcport->vha, 0x3046, 3439 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); 3440 } 3441 } 3442 3443 if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID) 3444 fx_iocb.dataword = piocb_rqst->dataword; 3445 fx_iocb.flags = piocb_rqst->flags; 3446 fx_iocb.entry_count = entry_cnt; 3447 } 3448 3449 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 3450 sp->fcport->vha, 0x3047, 3451 (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00)); 3452 3453 memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, 3454 sizeof(struct fxdisc_entry_fx00)); 3455 wmb(); 3456 } 3457