1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2013 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include <linux/delay.h> 9 #include <linux/pci.h> 10 #include <linux/ratelimit.h> 11 #include <linux/vmalloc.h> 12 #include <scsi/scsi_tcq.h> 13 #include <linux/utsname.h> 14 15 16 /* QLAFX00 specific Mailbox implementation functions */ 17 18 /* 19 * qlafx00_mailbox_command 20 * Issue mailbox command and waits for completion. 21 * 22 * Input: 23 * ha = adapter block pointer. 24 * mcp = driver internal mbx struct pointer. 25 * 26 * Output: 27 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 28 * 29 * Returns: 30 * 0 : QLA_SUCCESS = cmd performed success 31 * 1 : QLA_FUNCTION_FAILED (error encountered) 32 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 33 * 34 * Context: 35 * Kernel context. 36 */ 37 static int 38 qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp) 39 40 { 41 int rval; 42 unsigned long flags = 0; 43 device_reg_t __iomem *reg; 44 uint8_t abort_active; 45 uint8_t io_lock_on; 46 uint16_t command = 0; 47 uint32_t *iptr; 48 uint32_t __iomem *optr; 49 uint32_t cnt; 50 uint32_t mboxes; 51 unsigned long wait_time; 52 struct qla_hw_data *ha = vha->hw; 53 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 54 55 if (ha->pdev->error_state > pci_channel_io_frozen) { 56 ql_log(ql_log_warn, vha, 0x115c, 57 "error_state is greater than pci_channel_io_frozen, " 58 "exiting.\n"); 59 return QLA_FUNCTION_TIMEOUT; 60 } 61 62 if (vha->device_flags & DFLG_DEV_FAILED) { 63 ql_log(ql_log_warn, vha, 0x115f, 64 "Device in failed state, exiting.\n"); 65 return QLA_FUNCTION_TIMEOUT; 66 } 67 68 reg = ha->iobase; 69 io_lock_on = base_vha->flags.init_done; 70 71 rval = QLA_SUCCESS; 72 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 73 74 if (ha->flags.pci_channel_io_perm_failure) { 75 ql_log(ql_log_warn, vha, 0x1175, 76 "Perm failure on EEH timeout MBX, exiting.\n"); 77 return QLA_FUNCTION_TIMEOUT; 78 } 79 80 if (ha->flags.isp82xx_fw_hung) { 81 /* Setting Link-Down error */ 82 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 83 ql_log(ql_log_warn, vha, 0x1176, 84 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 85 rval = QLA_FUNCTION_FAILED; 86 goto premature_exit; 87 } 88 89 /* 90 * Wait for active mailbox commands to finish by waiting at most tov 91 * seconds. This is to serialize actual issuing of mailbox cmds during 92 * non ISP abort time. 93 */ 94 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 95 /* Timeout occurred. Return error. */ 96 ql_log(ql_log_warn, vha, 0x1177, 97 "Cmd access timeout, cmd=0x%x, Exiting.\n", 98 mcp->mb[0]); 99 return QLA_FUNCTION_TIMEOUT; 100 } 101 102 ha->flags.mbox_busy = 1; 103 /* Save mailbox command for debug */ 104 ha->mcp32 = mcp; 105 106 ql_dbg(ql_dbg_mbx, vha, 0x1178, 107 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 108 109 spin_lock_irqsave(&ha->hardware_lock, flags); 110 111 /* Load mailbox registers. */ 112 optr = (uint32_t __iomem *)®->ispfx00.mailbox0; 113 114 iptr = mcp->mb; 115 command = mcp->mb[0]; 116 mboxes = mcp->out_mb; 117 118 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 119 if (mboxes & BIT_0) 120 WRT_REG_DWORD(optr, *iptr); 121 122 mboxes >>= 1; 123 optr++; 124 iptr++; 125 } 126 127 /* Issue set host interrupt command to send cmd out. */ 128 ha->flags.mbox_int = 0; 129 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 130 131 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172, 132 (uint8_t *)mcp->mb, 16); 133 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173, 134 ((uint8_t *)mcp->mb + 0x10), 16); 135 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174, 136 ((uint8_t *)mcp->mb + 0x20), 8); 137 138 /* Unlock mbx registers and wait for interrupt */ 139 ql_dbg(ql_dbg_mbx, vha, 0x1179, 140 "Going to unlock irq & waiting for interrupts. " 141 "jiffies=%lx.\n", jiffies); 142 143 /* Wait for mbx cmd completion until timeout */ 144 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 145 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 146 147 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); 148 spin_unlock_irqrestore(&ha->hardware_lock, flags); 149 150 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); 151 152 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 153 154 } else { 155 ql_dbg(ql_dbg_mbx, vha, 0x112c, 156 "Cmd=%x Polling Mode.\n", command); 157 158 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); 159 spin_unlock_irqrestore(&ha->hardware_lock, flags); 160 161 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 162 while (!ha->flags.mbox_int) { 163 if (time_after(jiffies, wait_time)) 164 break; 165 166 /* Check for pending interrupts. */ 167 qla2x00_poll(ha->rsp_q_map[0]); 168 169 if (!ha->flags.mbox_int && 170 !(IS_QLA2200(ha) && 171 command == MBC_LOAD_RISC_RAM_EXTENDED)) 172 usleep_range(10000, 11000); 173 } /* while */ 174 ql_dbg(ql_dbg_mbx, vha, 0x112d, 175 "Waited %d sec.\n", 176 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 177 } 178 179 /* Check whether we timed out */ 180 if (ha->flags.mbox_int) { 181 uint32_t *iptr2; 182 183 ql_dbg(ql_dbg_mbx, vha, 0x112e, 184 "Cmd=%x completed.\n", command); 185 186 /* Got interrupt. Clear the flag. */ 187 ha->flags.mbox_int = 0; 188 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 189 190 if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE) 191 rval = QLA_FUNCTION_FAILED; 192 193 /* Load return mailbox registers. */ 194 iptr2 = mcp->mb; 195 iptr = (uint32_t *)&ha->mailbox_out32[0]; 196 mboxes = mcp->in_mb; 197 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 198 if (mboxes & BIT_0) 199 *iptr2 = *iptr; 200 201 mboxes >>= 1; 202 iptr2++; 203 iptr++; 204 } 205 } else { 206 207 rval = QLA_FUNCTION_TIMEOUT; 208 } 209 210 ha->flags.mbox_busy = 0; 211 212 /* Clean up */ 213 ha->mcp32 = NULL; 214 215 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 216 ql_dbg(ql_dbg_mbx, vha, 0x113a, 217 "checking for additional resp interrupt.\n"); 218 219 /* polling mode for non isp_abort commands. */ 220 qla2x00_poll(ha->rsp_q_map[0]); 221 } 222 223 if (rval == QLA_FUNCTION_TIMEOUT && 224 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 225 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 226 ha->flags.eeh_busy) { 227 /* not in dpc. schedule it for dpc to take over. */ 228 ql_dbg(ql_dbg_mbx, vha, 0x115d, 229 "Timeout, schedule isp_abort_needed.\n"); 230 231 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 232 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 233 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 234 235 ql_log(ql_log_info, base_vha, 0x115e, 236 "Mailbox cmd timeout occurred, cmd=0x%x, " 237 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 238 "abort.\n", command, mcp->mb[0], 239 ha->flags.eeh_busy); 240 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 241 qla2xxx_wake_dpc(vha); 242 } 243 } else if (!abort_active) { 244 /* call abort directly since we are in the DPC thread */ 245 ql_dbg(ql_dbg_mbx, vha, 0x1160, 246 "Timeout, calling abort_isp.\n"); 247 248 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 249 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 250 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 251 252 ql_log(ql_log_info, base_vha, 0x1161, 253 "Mailbox cmd timeout occurred, cmd=0x%x, " 254 "mb[0]=0x%x. Scheduling ISP abort ", 255 command, mcp->mb[0]); 256 257 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 258 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 259 if (ha->isp_ops->abort_isp(vha)) { 260 /* Failed. retry later. */ 261 set_bit(ISP_ABORT_NEEDED, 262 &vha->dpc_flags); 263 } 264 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 265 ql_dbg(ql_dbg_mbx, vha, 0x1162, 266 "Finished abort_isp.\n"); 267 } 268 } 269 } 270 271 premature_exit: 272 /* Allow next mbx cmd to come in. */ 273 complete(&ha->mbx_cmd_comp); 274 275 if (rval) { 276 ql_log(ql_log_warn, base_vha, 0x1163, 277 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, " 278 "mb[3]=%x, cmd=%x ****.\n", 279 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); 280 } else { 281 ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__); 282 } 283 284 return rval; 285 } 286 287 /* 288 * qlafx00_driver_shutdown 289 * Indicate a driver shutdown to firmware. 290 * 291 * Input: 292 * ha = adapter block pointer. 293 * 294 * Returns: 295 * local function return status code. 296 * 297 * Context: 298 * Kernel context. 299 */ 300 static int 301 qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo) 302 { 303 int rval; 304 struct mbx_cmd_32 mc; 305 struct mbx_cmd_32 *mcp = &mc; 306 307 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166, 308 "Entered %s.\n", __func__); 309 310 mcp->mb[0] = MBC_MR_DRV_SHUTDOWN; 311 mcp->out_mb = MBX_0; 312 mcp->in_mb = MBX_0; 313 if (tmo) 314 mcp->tov = tmo; 315 else 316 mcp->tov = MBX_TOV_SECONDS; 317 mcp->flags = 0; 318 rval = qlafx00_mailbox_command(vha, mcp); 319 320 if (rval != QLA_SUCCESS) { 321 ql_dbg(ql_dbg_mbx, vha, 0x1167, 322 "Failed=%x.\n", rval); 323 } else { 324 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168, 325 "Done %s.\n", __func__); 326 } 327 328 return rval; 329 } 330 331 /* 332 * qlafx00_get_firmware_state 333 * Get adapter firmware state. 334 * 335 * Input: 336 * ha = adapter block pointer. 337 * TARGET_QUEUE_LOCK must be released. 338 * ADAPTER_STATE_LOCK must be released. 339 * 340 * Returns: 341 * qla7xxx local function return status code. 342 * 343 * Context: 344 * Kernel context. 345 */ 346 static int 347 qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states) 348 { 349 int rval; 350 struct mbx_cmd_32 mc; 351 struct mbx_cmd_32 *mcp = &mc; 352 353 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169, 354 "Entered %s.\n", __func__); 355 356 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 357 mcp->out_mb = MBX_0; 358 mcp->in_mb = MBX_1|MBX_0; 359 mcp->tov = MBX_TOV_SECONDS; 360 mcp->flags = 0; 361 rval = qlafx00_mailbox_command(vha, mcp); 362 363 /* Return firmware states. */ 364 states[0] = mcp->mb[1]; 365 366 if (rval != QLA_SUCCESS) { 367 ql_dbg(ql_dbg_mbx, vha, 0x116a, 368 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 369 } else { 370 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b, 371 "Done %s.\n", __func__); 372 } 373 return rval; 374 } 375 376 /* 377 * qlafx00_init_firmware 378 * Initialize adapter firmware. 379 * 380 * Input: 381 * ha = adapter block pointer. 382 * dptr = Initialization control block pointer. 383 * size = size of initialization control block. 384 * TARGET_QUEUE_LOCK must be released. 385 * ADAPTER_STATE_LOCK must be released. 386 * 387 * Returns: 388 * qlafx00 local function return status code. 389 * 390 * Context: 391 * Kernel context. 392 */ 393 int 394 qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 395 { 396 int rval; 397 struct mbx_cmd_32 mc; 398 struct mbx_cmd_32 *mcp = &mc; 399 struct qla_hw_data *ha = vha->hw; 400 401 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c, 402 "Entered %s.\n", __func__); 403 404 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 405 406 mcp->mb[1] = 0; 407 mcp->mb[2] = MSD(ha->init_cb_dma); 408 mcp->mb[3] = LSD(ha->init_cb_dma); 409 410 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 411 mcp->in_mb = MBX_0; 412 mcp->buf_size = size; 413 mcp->flags = MBX_DMA_OUT; 414 mcp->tov = MBX_TOV_SECONDS; 415 rval = qlafx00_mailbox_command(vha, mcp); 416 417 if (rval != QLA_SUCCESS) { 418 ql_dbg(ql_dbg_mbx, vha, 0x116d, 419 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 420 } else { 421 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e, 422 "Done %s.\n", __func__); 423 } 424 return rval; 425 } 426 427 /* 428 * qlafx00_mbx_reg_test 429 */ 430 static int 431 qlafx00_mbx_reg_test(scsi_qla_host_t *vha) 432 { 433 int rval; 434 struct mbx_cmd_32 mc; 435 struct mbx_cmd_32 *mcp = &mc; 436 437 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f, 438 "Entered %s.\n", __func__); 439 440 441 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 442 mcp->mb[1] = 0xAAAA; 443 mcp->mb[2] = 0x5555; 444 mcp->mb[3] = 0xAA55; 445 mcp->mb[4] = 0x55AA; 446 mcp->mb[5] = 0xA5A5; 447 mcp->mb[6] = 0x5A5A; 448 mcp->mb[7] = 0x2525; 449 mcp->mb[8] = 0xBBBB; 450 mcp->mb[9] = 0x6666; 451 mcp->mb[10] = 0xBB66; 452 mcp->mb[11] = 0x66BB; 453 mcp->mb[12] = 0xB6B6; 454 mcp->mb[13] = 0x6B6B; 455 mcp->mb[14] = 0x3636; 456 mcp->mb[15] = 0xCCCC; 457 458 459 mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 460 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 461 mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 462 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 463 mcp->buf_size = 0; 464 mcp->flags = MBX_DMA_OUT; 465 mcp->tov = MBX_TOV_SECONDS; 466 rval = qlafx00_mailbox_command(vha, mcp); 467 if (rval == QLA_SUCCESS) { 468 if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 || 469 mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA) 470 rval = QLA_FUNCTION_FAILED; 471 if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A || 472 mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB) 473 rval = QLA_FUNCTION_FAILED; 474 if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 || 475 mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6) 476 rval = QLA_FUNCTION_FAILED; 477 if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 || 478 mcp->mb[31] != 0xCCCC) 479 rval = QLA_FUNCTION_FAILED; 480 } 481 482 if (rval != QLA_SUCCESS) { 483 ql_dbg(ql_dbg_mbx, vha, 0x1170, 484 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 485 } else { 486 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171, 487 "Done %s.\n", __func__); 488 } 489 return rval; 490 } 491 492 /** 493 * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers. 494 * @ha: HA context 495 * 496 * Returns 0 on success. 497 */ 498 int 499 qlafx00_pci_config(scsi_qla_host_t *vha) 500 { 501 uint16_t w; 502 struct qla_hw_data *ha = vha->hw; 503 504 pci_set_master(ha->pdev); 505 pci_try_set_mwi(ha->pdev); 506 507 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 508 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 509 w &= ~PCI_COMMAND_INTX_DISABLE; 510 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 511 512 /* PCIe -- adjust Maximum Read Request Size (2048). */ 513 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 514 pcie_set_readrq(ha->pdev, 2048); 515 516 ha->chip_revision = ha->pdev->revision; 517 518 return QLA_SUCCESS; 519 } 520 521 /** 522 * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC). 523 * @ha: HA context 524 * 525 */ 526 static inline void 527 qlafx00_soc_cpu_reset(scsi_qla_host_t *vha) 528 { 529 unsigned long flags = 0; 530 struct qla_hw_data *ha = vha->hw; 531 int i, core; 532 uint32_t cnt; 533 534 /* Set all 4 cores in reset */ 535 for (i = 0; i < 4; i++) { 536 QLAFX00_SET_HBA_SOC_REG(ha, 537 (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01)); 538 } 539 540 /* Set all 4 core Clock gating control */ 541 for (i = 0; i < 4; i++) { 542 QLAFX00_SET_HBA_SOC_REG(ha, 543 (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101)); 544 } 545 546 /* Reset all units in Fabric */ 547 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101)); 548 549 /* Reset all interrupt control registers */ 550 for (i = 0; i < 115; i++) { 551 QLAFX00_SET_HBA_SOC_REG(ha, 552 (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0)); 553 } 554 555 /* Reset Timers control registers. per core */ 556 for (core = 0; core < 4; core++) 557 for (i = 0; i < 8; i++) 558 QLAFX00_SET_HBA_SOC_REG(ha, 559 (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0)); 560 561 /* Reset per core IRQ ack register */ 562 for (core = 0; core < 4; core++) 563 QLAFX00_SET_HBA_SOC_REG(ha, 564 (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF)); 565 566 /* Set Fabric control and config to defaults */ 567 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2)); 568 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3)); 569 570 spin_lock_irqsave(&ha->hardware_lock, flags); 571 572 /* Kick in Fabric units */ 573 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0)); 574 575 /* Kick in Core0 to start boot process */ 576 QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00)); 577 578 /* Wait 10secs for soft-reset to complete. */ 579 for (cnt = 10; cnt; cnt--) { 580 msleep(1000); 581 barrier(); 582 } 583 spin_unlock_irqrestore(&ha->hardware_lock, flags); 584 } 585 586 /** 587 * qlafx00_soft_reset() - Soft Reset ISPFx00. 588 * @ha: HA context 589 * 590 * Returns 0 on success. 591 */ 592 void 593 qlafx00_soft_reset(scsi_qla_host_t *vha) 594 { 595 struct qla_hw_data *ha = vha->hw; 596 597 if (unlikely(pci_channel_offline(ha->pdev) && 598 ha->flags.pci_channel_io_perm_failure)) 599 return; 600 601 ha->isp_ops->disable_intrs(ha); 602 qlafx00_soc_cpu_reset(vha); 603 ha->isp_ops->enable_intrs(ha); 604 } 605 606 /** 607 * qlafx00_chip_diag() - Test ISPFx00 for proper operation. 608 * @ha: HA context 609 * 610 * Returns 0 on success. 611 */ 612 int 613 qlafx00_chip_diag(scsi_qla_host_t *vha) 614 { 615 int rval = 0; 616 struct qla_hw_data *ha = vha->hw; 617 struct req_que *req = ha->req_q_map[0]; 618 619 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 620 621 rval = qlafx00_mbx_reg_test(vha); 622 if (rval) { 623 ql_log(ql_log_warn, vha, 0x1165, 624 "Failed mailbox send register test\n"); 625 } else { 626 /* Flag a successful rval */ 627 rval = QLA_SUCCESS; 628 } 629 return rval; 630 } 631 632 void 633 qlafx00_config_rings(struct scsi_qla_host *vha) 634 { 635 struct qla_hw_data *ha = vha->hw; 636 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 637 struct init_cb_fx *icb; 638 struct req_que *req = ha->req_q_map[0]; 639 struct rsp_que *rsp = ha->rsp_q_map[0]; 640 641 /* Setup ring parameters in initialization control block. */ 642 icb = (struct init_cb_fx *)ha->init_cb; 643 icb->request_q_outpointer = __constant_cpu_to_le16(0); 644 icb->response_q_inpointer = __constant_cpu_to_le16(0); 645 icb->request_q_length = cpu_to_le16(req->length); 646 icb->response_q_length = cpu_to_le16(rsp->length); 647 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 648 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 649 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 650 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 651 652 WRT_REG_DWORD(®->req_q_in, 0); 653 WRT_REG_DWORD(®->req_q_out, 0); 654 655 WRT_REG_DWORD(®->rsp_q_in, 0); 656 WRT_REG_DWORD(®->rsp_q_out, 0); 657 658 /* PCI posting */ 659 RD_REG_DWORD(®->rsp_q_out); 660 } 661 662 char * 663 qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str) 664 { 665 struct qla_hw_data *ha = vha->hw; 666 int pcie_reg; 667 668 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); 669 if (pcie_reg) { 670 strcpy(str, "PCIe iSA"); 671 return str; 672 } 673 return str; 674 } 675 676 char * 677 qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str) 678 { 679 struct qla_hw_data *ha = vha->hw; 680 681 sprintf(str, "%s", ha->mr.fw_version); 682 return str; 683 } 684 685 void 686 qlafx00_enable_intrs(struct qla_hw_data *ha) 687 { 688 unsigned long flags = 0; 689 690 spin_lock_irqsave(&ha->hardware_lock, flags); 691 ha->interrupts_on = 1; 692 QLAFX00_ENABLE_ICNTRL_REG(ha); 693 spin_unlock_irqrestore(&ha->hardware_lock, flags); 694 } 695 696 void 697 qlafx00_disable_intrs(struct qla_hw_data *ha) 698 { 699 unsigned long flags = 0; 700 701 spin_lock_irqsave(&ha->hardware_lock, flags); 702 ha->interrupts_on = 0; 703 QLAFX00_DISABLE_ICNTRL_REG(ha); 704 spin_unlock_irqrestore(&ha->hardware_lock, flags); 705 } 706 707 static void 708 qlafx00_tmf_iocb_timeout(void *data) 709 { 710 srb_t *sp = (srb_t *)data; 711 struct srb_iocb *tmf = &sp->u.iocb_cmd; 712 713 tmf->u.tmf.comp_status = CS_TIMEOUT; 714 complete(&tmf->u.tmf.comp); 715 } 716 717 static void 718 qlafx00_tmf_sp_done(void *data, void *ptr, int res) 719 { 720 srb_t *sp = (srb_t *)ptr; 721 struct srb_iocb *tmf = &sp->u.iocb_cmd; 722 723 complete(&tmf->u.tmf.comp); 724 } 725 726 static int 727 qlafx00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, 728 uint32_t lun, uint32_t tag) 729 { 730 scsi_qla_host_t *vha = fcport->vha; 731 struct srb_iocb *tm_iocb; 732 srb_t *sp; 733 int rval = QLA_FUNCTION_FAILED; 734 735 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 736 if (!sp) 737 goto done; 738 739 tm_iocb = &sp->u.iocb_cmd; 740 sp->type = SRB_TM_CMD; 741 sp->name = "tmf"; 742 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); 743 tm_iocb->u.tmf.flags = flags; 744 tm_iocb->u.tmf.lun = lun; 745 tm_iocb->u.tmf.data = tag; 746 sp->done = qlafx00_tmf_sp_done; 747 tm_iocb->timeout = qlafx00_tmf_iocb_timeout; 748 init_completion(&tm_iocb->u.tmf.comp); 749 750 rval = qla2x00_start_sp(sp); 751 if (rval != QLA_SUCCESS) 752 goto done_free_sp; 753 754 ql_dbg(ql_dbg_async, vha, 0x507b, 755 "Task management command issued target_id=%x\n", 756 fcport->tgt_id); 757 758 wait_for_completion(&tm_iocb->u.tmf.comp); 759 760 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? 761 QLA_SUCCESS : QLA_FUNCTION_FAILED; 762 763 done_free_sp: 764 sp->free(vha, sp); 765 done: 766 return rval; 767 } 768 769 int 770 qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag) 771 { 772 return qlafx00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 773 } 774 775 int 776 qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag) 777 { 778 return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 779 } 780 781 int 782 qlafx00_iospace_config(struct qla_hw_data *ha) 783 { 784 if (pci_request_selected_regions(ha->pdev, ha->bars, 785 QLA2XXX_DRIVER_NAME)) { 786 ql_log_pci(ql_log_fatal, ha->pdev, 0x014e, 787 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 788 pci_name(ha->pdev)); 789 goto iospace_error_exit; 790 } 791 792 /* Use MMIO operations for all accesses. */ 793 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 794 ql_log_pci(ql_log_warn, ha->pdev, 0x014f, 795 "Invalid pci I/O region size (%s).\n", 796 pci_name(ha->pdev)); 797 goto iospace_error_exit; 798 } 799 if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) { 800 ql_log_pci(ql_log_warn, ha->pdev, 0x0127, 801 "Invalid PCI mem BAR0 region size (%s), aborting\n", 802 pci_name(ha->pdev)); 803 goto iospace_error_exit; 804 } 805 806 ha->cregbase = 807 ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00); 808 if (!ha->cregbase) { 809 ql_log_pci(ql_log_fatal, ha->pdev, 0x0128, 810 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 811 goto iospace_error_exit; 812 } 813 814 if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) { 815 ql_log_pci(ql_log_warn, ha->pdev, 0x0129, 816 "region #2 not an MMIO resource (%s), aborting\n", 817 pci_name(ha->pdev)); 818 goto iospace_error_exit; 819 } 820 if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) { 821 ql_log_pci(ql_log_warn, ha->pdev, 0x012a, 822 "Invalid PCI mem BAR2 region size (%s), aborting\n", 823 pci_name(ha->pdev)); 824 goto iospace_error_exit; 825 } 826 827 ha->iobase = 828 ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00); 829 if (!ha->iobase) { 830 ql_log_pci(ql_log_fatal, ha->pdev, 0x012b, 831 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 832 goto iospace_error_exit; 833 } 834 835 /* Determine queue resources */ 836 ha->max_req_queues = ha->max_rsp_queues = 1; 837 838 ql_log_pci(ql_log_info, ha->pdev, 0x012c, 839 "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n", 840 ha->bars, ha->cregbase, ha->iobase); 841 842 return 0; 843 844 iospace_error_exit: 845 return -ENOMEM; 846 } 847 848 static void 849 qlafx00_save_queue_ptrs(struct scsi_qla_host *vha) 850 { 851 struct qla_hw_data *ha = vha->hw; 852 struct req_que *req = ha->req_q_map[0]; 853 struct rsp_que *rsp = ha->rsp_q_map[0]; 854 855 req->length_fx00 = req->length; 856 req->ring_fx00 = req->ring; 857 req->dma_fx00 = req->dma; 858 859 rsp->length_fx00 = rsp->length; 860 rsp->ring_fx00 = rsp->ring; 861 rsp->dma_fx00 = rsp->dma; 862 863 ql_dbg(ql_dbg_init, vha, 0x012d, 864 "req: %p, ring_fx00: %p, length_fx00: 0x%x," 865 "req->dma_fx00: 0x%llx\n", req, req->ring_fx00, 866 req->length_fx00, (u64)req->dma_fx00); 867 868 ql_dbg(ql_dbg_init, vha, 0x012e, 869 "rsp: %p, ring_fx00: %p, length_fx00: 0x%x," 870 "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00, 871 rsp->length_fx00, (u64)rsp->dma_fx00); 872 } 873 874 static int 875 qlafx00_config_queues(struct scsi_qla_host *vha) 876 { 877 struct qla_hw_data *ha = vha->hw; 878 struct req_que *req = ha->req_q_map[0]; 879 struct rsp_que *rsp = ha->rsp_q_map[0]; 880 dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2); 881 882 req->length = ha->req_que_len; 883 req->ring = (void *)ha->iobase + ha->req_que_off; 884 req->dma = bar2_hdl + ha->req_que_off; 885 if ((!req->ring) || (req->length == 0)) { 886 ql_log_pci(ql_log_info, ha->pdev, 0x012f, 887 "Unable to allocate memory for req_ring\n"); 888 return QLA_FUNCTION_FAILED; 889 } 890 891 ql_dbg(ql_dbg_init, vha, 0x0130, 892 "req: %p req_ring pointer %p req len 0x%x " 893 "req off 0x%x\n, req->dma: 0x%llx", 894 req, req->ring, req->length, 895 ha->req_que_off, (u64)req->dma); 896 897 rsp->length = ha->rsp_que_len; 898 rsp->ring = (void *)ha->iobase + ha->rsp_que_off; 899 rsp->dma = bar2_hdl + ha->rsp_que_off; 900 if ((!rsp->ring) || (rsp->length == 0)) { 901 ql_log_pci(ql_log_info, ha->pdev, 0x0131, 902 "Unable to allocate memory for rsp_ring\n"); 903 return QLA_FUNCTION_FAILED; 904 } 905 906 ql_dbg(ql_dbg_init, vha, 0x0132, 907 "rsp: %p rsp_ring pointer %p rsp len 0x%x " 908 "rsp off 0x%x, rsp->dma: 0x%llx\n", 909 rsp, rsp->ring, rsp->length, 910 ha->rsp_que_off, (u64)rsp->dma); 911 912 return QLA_SUCCESS; 913 } 914 915 static int 916 qlafx00_init_fw_ready(scsi_qla_host_t *vha) 917 { 918 int rval = 0; 919 unsigned long wtime; 920 uint16_t wait_time; /* Wait time */ 921 struct qla_hw_data *ha = vha->hw; 922 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 923 uint32_t aenmbx, aenmbx7 = 0; 924 uint32_t state[5]; 925 bool done = false; 926 927 /* 30 seconds wait - Adjust if required */ 928 wait_time = 30; 929 930 /* wait time before firmware ready */ 931 wtime = jiffies + (wait_time * HZ); 932 do { 933 aenmbx = RD_REG_DWORD(®->aenmailbox0); 934 barrier(); 935 ql_dbg(ql_dbg_mbx, vha, 0x0133, 936 "aenmbx: 0x%x\n", aenmbx); 937 938 switch (aenmbx) { 939 case MBA_FW_NOT_STARTED: 940 case MBA_FW_STARTING: 941 break; 942 943 case MBA_SYSTEM_ERR: 944 case MBA_REQ_TRANSFER_ERR: 945 case MBA_RSP_TRANSFER_ERR: 946 case MBA_FW_INIT_FAILURE: 947 qlafx00_soft_reset(vha); 948 break; 949 950 case MBA_FW_RESTART_CMPLT: 951 /* Set the mbx and rqstq intr code */ 952 aenmbx7 = RD_REG_DWORD(®->aenmailbox7); 953 ha->mbx_intr_code = MSW(aenmbx7); 954 ha->rqstq_intr_code = LSW(aenmbx7); 955 ha->req_que_off = RD_REG_DWORD(®->aenmailbox1); 956 ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3); 957 ha->req_que_len = RD_REG_DWORD(®->aenmailbox5); 958 ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6); 959 WRT_REG_DWORD(®->aenmailbox0, 0); 960 RD_REG_DWORD_RELAXED(®->aenmailbox0); 961 ql_dbg(ql_dbg_init, vha, 0x0134, 962 "f/w returned mbx_intr_code: 0x%x, " 963 "rqstq_intr_code: 0x%x\n", 964 ha->mbx_intr_code, ha->rqstq_intr_code); 965 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 966 rval = QLA_SUCCESS; 967 done = true; 968 break; 969 970 default: 971 /* If fw is apparently not ready. In order to continue, 972 * we might need to issue Mbox cmd, but the problem is 973 * that the DoorBell vector values that come with the 974 * 8060 AEN are most likely gone by now (and thus no 975 * bell would be rung on the fw side when mbox cmd is 976 * issued). We have to therefore grab the 8060 AEN 977 * shadow regs (filled in by FW when the last 8060 978 * AEN was being posted). 979 * Do the following to determine what is needed in 980 * order to get the FW ready: 981 * 1. reload the 8060 AEN values from the shadow regs 982 * 2. clear int status to get rid of possible pending 983 * interrupts 984 * 3. issue Get FW State Mbox cmd to determine fw state 985 * Set the mbx and rqstq intr code from Shadow Regs 986 */ 987 aenmbx7 = RD_REG_DWORD(®->initval7); 988 ha->mbx_intr_code = MSW(aenmbx7); 989 ha->rqstq_intr_code = LSW(aenmbx7); 990 ha->req_que_off = RD_REG_DWORD(®->initval1); 991 ha->rsp_que_off = RD_REG_DWORD(®->initval3); 992 ha->req_que_len = RD_REG_DWORD(®->initval5); 993 ha->rsp_que_len = RD_REG_DWORD(®->initval6); 994 ql_dbg(ql_dbg_init, vha, 0x0135, 995 "f/w returned mbx_intr_code: 0x%x, " 996 "rqstq_intr_code: 0x%x\n", 997 ha->mbx_intr_code, ha->rqstq_intr_code); 998 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 999 1000 /* Get the FW state */ 1001 rval = qlafx00_get_firmware_state(vha, state); 1002 if (rval != QLA_SUCCESS) { 1003 /* Retry if timer has not expired */ 1004 break; 1005 } 1006 1007 if (state[0] == FSTATE_FX00_CONFIG_WAIT) { 1008 /* Firmware is waiting to be 1009 * initialized by driver 1010 */ 1011 rval = QLA_SUCCESS; 1012 done = true; 1013 break; 1014 } 1015 1016 /* Issue driver shutdown and wait until f/w recovers. 1017 * Driver should continue to poll until 8060 AEN is 1018 * received indicating firmware recovery. 1019 */ 1020 ql_dbg(ql_dbg_init, vha, 0x0136, 1021 "Sending Driver shutdown fw_state 0x%x\n", 1022 state[0]); 1023 1024 rval = qlafx00_driver_shutdown(vha, 10); 1025 if (rval != QLA_SUCCESS) { 1026 rval = QLA_FUNCTION_FAILED; 1027 break; 1028 } 1029 msleep(500); 1030 1031 wtime = jiffies + (wait_time * HZ); 1032 break; 1033 } 1034 1035 if (!done) { 1036 if (time_after_eq(jiffies, wtime)) { 1037 ql_dbg(ql_dbg_init, vha, 0x0137, 1038 "Init f/w failed: aen[7]: 0x%x\n", 1039 RD_REG_DWORD(®->aenmailbox7)); 1040 rval = QLA_FUNCTION_FAILED; 1041 done = true; 1042 break; 1043 } 1044 /* Delay for a while */ 1045 msleep(500); 1046 } 1047 } while (!done); 1048 1049 if (rval) 1050 ql_dbg(ql_dbg_init, vha, 0x0138, 1051 "%s **** FAILED ****.\n", __func__); 1052 else 1053 ql_dbg(ql_dbg_init, vha, 0x0139, 1054 "%s **** SUCCESS ****.\n", __func__); 1055 1056 return rval; 1057 } 1058 1059 /* 1060 * qlafx00_fw_ready() - Waits for firmware ready. 1061 * @ha: HA context 1062 * 1063 * Returns 0 on success. 1064 */ 1065 int 1066 qlafx00_fw_ready(scsi_qla_host_t *vha) 1067 { 1068 int rval; 1069 unsigned long wtime; 1070 uint16_t wait_time; /* Wait time if loop is coming ready */ 1071 uint32_t state[5]; 1072 1073 rval = QLA_SUCCESS; 1074 1075 wait_time = 10; 1076 1077 /* wait time before firmware ready */ 1078 wtime = jiffies + (wait_time * HZ); 1079 1080 /* Wait for ISP to finish init */ 1081 if (!vha->flags.init_done) 1082 ql_dbg(ql_dbg_init, vha, 0x013a, 1083 "Waiting for init to complete...\n"); 1084 1085 do { 1086 rval = qlafx00_get_firmware_state(vha, state); 1087 1088 if (rval == QLA_SUCCESS) { 1089 if (state[0] == FSTATE_FX00_INITIALIZED) { 1090 ql_dbg(ql_dbg_init, vha, 0x013b, 1091 "fw_state=%x\n", state[0]); 1092 rval = QLA_SUCCESS; 1093 break; 1094 } 1095 } 1096 rval = QLA_FUNCTION_FAILED; 1097 1098 if (time_after_eq(jiffies, wtime)) 1099 break; 1100 1101 /* Delay for a while */ 1102 msleep(500); 1103 1104 ql_dbg(ql_dbg_init, vha, 0x013c, 1105 "fw_state=%x curr time=%lx.\n", state[0], jiffies); 1106 } while (1); 1107 1108 1109 if (rval) 1110 ql_dbg(ql_dbg_init, vha, 0x013d, 1111 "Firmware ready **** FAILED ****.\n"); 1112 else 1113 ql_dbg(ql_dbg_init, vha, 0x013e, 1114 "Firmware ready **** SUCCESS ****.\n"); 1115 1116 return rval; 1117 } 1118 1119 static int 1120 qlafx00_find_all_targets(scsi_qla_host_t *vha, 1121 struct list_head *new_fcports) 1122 { 1123 int rval; 1124 uint16_t tgt_id; 1125 fc_port_t *fcport, *new_fcport; 1126 int found; 1127 struct qla_hw_data *ha = vha->hw; 1128 1129 rval = QLA_SUCCESS; 1130 1131 if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags)) 1132 return QLA_FUNCTION_FAILED; 1133 1134 if ((atomic_read(&vha->loop_down_timer) || 1135 STATE_TRANSITION(vha))) { 1136 atomic_set(&vha->loop_down_timer, 0); 1137 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1138 return QLA_FUNCTION_FAILED; 1139 } 1140 1141 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088, 1142 "Listing Target bit map...\n"); 1143 ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha, 1144 0x2089, (uint8_t *)ha->gid_list, 32); 1145 1146 /* Allocate temporary rmtport for any new rmtports discovered. */ 1147 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 1148 if (new_fcport == NULL) 1149 return QLA_MEMORY_ALLOC_FAILED; 1150 1151 for_each_set_bit(tgt_id, (void *)ha->gid_list, 1152 QLAFX00_TGT_NODE_LIST_SIZE) { 1153 1154 /* Send get target node info */ 1155 new_fcport->tgt_id = tgt_id; 1156 rval = qlafx00_fx_disc(vha, new_fcport, 1157 FXDISC_GET_TGT_NODE_INFO); 1158 if (rval != QLA_SUCCESS) { 1159 ql_log(ql_log_warn, vha, 0x208a, 1160 "Target info scan failed -- assuming zero-entry " 1161 "result...\n"); 1162 continue; 1163 } 1164 1165 /* Locate matching device in database. */ 1166 found = 0; 1167 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1168 if (memcmp(new_fcport->port_name, 1169 fcport->port_name, WWN_SIZE)) 1170 continue; 1171 1172 found++; 1173 1174 /* 1175 * If tgt_id is same and state FCS_ONLINE, nothing 1176 * changed. 1177 */ 1178 if (fcport->tgt_id == new_fcport->tgt_id && 1179 atomic_read(&fcport->state) == FCS_ONLINE) 1180 break; 1181 1182 /* 1183 * Tgt ID changed or device was marked to be updated. 1184 */ 1185 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b, 1186 "TGT-ID Change(%s): Present tgt id: " 1187 "0x%x state: 0x%x " 1188 "wwnn = %llx wwpn = %llx.\n", 1189 __func__, fcport->tgt_id, 1190 atomic_read(&fcport->state), 1191 (unsigned long long)wwn_to_u64(fcport->node_name), 1192 (unsigned long long)wwn_to_u64(fcport->port_name)); 1193 1194 ql_log(ql_log_info, vha, 0x208c, 1195 "TGT-ID Announce(%s): Discovered tgt " 1196 "id 0x%x wwnn = %llx " 1197 "wwpn = %llx.\n", __func__, new_fcport->tgt_id, 1198 (unsigned long long) 1199 wwn_to_u64(new_fcport->node_name), 1200 (unsigned long long) 1201 wwn_to_u64(new_fcport->port_name)); 1202 1203 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1204 fcport->old_tgt_id = fcport->tgt_id; 1205 fcport->tgt_id = new_fcport->tgt_id; 1206 ql_log(ql_log_info, vha, 0x208d, 1207 "TGT-ID: New fcport Added: %p\n", fcport); 1208 qla2x00_update_fcport(vha, fcport); 1209 } else { 1210 ql_log(ql_log_info, vha, 0x208e, 1211 " Existing TGT-ID %x did not get " 1212 " offline event from firmware.\n", 1213 fcport->old_tgt_id); 1214 qla2x00_mark_device_lost(vha, fcport, 0, 0); 1215 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1216 kfree(new_fcport); 1217 return rval; 1218 } 1219 break; 1220 } 1221 1222 if (found) 1223 continue; 1224 1225 /* If device was not in our fcports list, then add it. */ 1226 list_add_tail(&new_fcport->list, new_fcports); 1227 1228 /* Allocate a new replacement fcport. */ 1229 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 1230 if (new_fcport == NULL) 1231 return QLA_MEMORY_ALLOC_FAILED; 1232 } 1233 1234 kfree(new_fcport); 1235 return rval; 1236 } 1237 1238 /* 1239 * qlafx00_configure_all_targets 1240 * Setup target devices with node ID's. 1241 * 1242 * Input: 1243 * ha = adapter block pointer. 1244 * 1245 * Returns: 1246 * 0 = success. 1247 * BIT_0 = error 1248 */ 1249 static int 1250 qlafx00_configure_all_targets(scsi_qla_host_t *vha) 1251 { 1252 int rval; 1253 fc_port_t *fcport, *rmptemp; 1254 LIST_HEAD(new_fcports); 1255 1256 rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport, 1257 FXDISC_GET_TGT_NODE_LIST); 1258 if (rval != QLA_SUCCESS) { 1259 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1260 return rval; 1261 } 1262 1263 rval = qlafx00_find_all_targets(vha, &new_fcports); 1264 if (rval != QLA_SUCCESS) { 1265 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1266 return rval; 1267 } 1268 1269 /* 1270 * Delete all previous devices marked lost. 1271 */ 1272 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1273 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 1274 break; 1275 1276 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 1277 if (fcport->port_type != FCT_INITIATOR) 1278 qla2x00_mark_device_lost(vha, fcport, 0, 0); 1279 } 1280 } 1281 1282 /* 1283 * Add the new devices to our devices list. 1284 */ 1285 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) { 1286 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 1287 break; 1288 1289 qla2x00_update_fcport(vha, fcport); 1290 list_move_tail(&fcport->list, &vha->vp_fcports); 1291 ql_log(ql_log_info, vha, 0x208f, 1292 "Attach new target id 0x%x wwnn = %llx " 1293 "wwpn = %llx.\n", 1294 fcport->tgt_id, 1295 (unsigned long long)wwn_to_u64(fcport->node_name), 1296 (unsigned long long)wwn_to_u64(fcport->port_name)); 1297 } 1298 1299 /* Free all new device structures not processed. */ 1300 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) { 1301 list_del(&fcport->list); 1302 kfree(fcport); 1303 } 1304 1305 return rval; 1306 } 1307 1308 /* 1309 * qlafx00_configure_devices 1310 * Updates Fibre Channel Device Database with what is actually on loop. 1311 * 1312 * Input: 1313 * ha = adapter block pointer. 1314 * 1315 * Returns: 1316 * 0 = success. 1317 * 1 = error. 1318 * 2 = database was full and device was not configured. 1319 */ 1320 int 1321 qlafx00_configure_devices(scsi_qla_host_t *vha) 1322 { 1323 int rval; 1324 unsigned long flags, save_flags; 1325 rval = QLA_SUCCESS; 1326 1327 save_flags = flags = vha->dpc_flags; 1328 1329 ql_dbg(ql_dbg_disc, vha, 0x2090, 1330 "Configure devices -- dpc flags =0x%lx\n", flags); 1331 1332 rval = qlafx00_configure_all_targets(vha); 1333 1334 if (rval == QLA_SUCCESS) { 1335 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 1336 rval = QLA_FUNCTION_FAILED; 1337 } else { 1338 atomic_set(&vha->loop_state, LOOP_READY); 1339 ql_log(ql_log_info, vha, 0x2091, 1340 "Device Ready\n"); 1341 } 1342 } 1343 1344 if (rval) { 1345 ql_dbg(ql_dbg_disc, vha, 0x2092, 1346 "%s *** FAILED ***.\n", __func__); 1347 } else { 1348 ql_dbg(ql_dbg_disc, vha, 0x2093, 1349 "%s: exiting normally.\n", __func__); 1350 } 1351 return rval; 1352 } 1353 1354 static void 1355 qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha) 1356 { 1357 struct qla_hw_data *ha = vha->hw; 1358 fc_port_t *fcport; 1359 1360 vha->flags.online = 0; 1361 ha->flags.chip_reset_done = 0; 1362 ha->mr.fw_hbt_en = 0; 1363 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1364 vha->qla_stats.total_isp_aborts++; 1365 1366 ql_log(ql_log_info, vha, 0x013f, 1367 "Performing ISP error recovery - ha = %p.\n", ha); 1368 1369 ha->isp_ops->reset_chip(vha); 1370 1371 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1372 atomic_set(&vha->loop_state, LOOP_DOWN); 1373 atomic_set(&vha->loop_down_timer, 1374 QLAFX00_LOOP_DOWN_TIME); 1375 } else { 1376 if (!atomic_read(&vha->loop_down_timer)) 1377 atomic_set(&vha->loop_down_timer, 1378 QLAFX00_LOOP_DOWN_TIME); 1379 } 1380 1381 /* Clear all async request states across all VPs. */ 1382 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1383 fcport->flags = 0; 1384 if (atomic_read(&fcport->state) == FCS_ONLINE) 1385 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 1386 } 1387 1388 if (!ha->flags.eeh_busy) { 1389 /* Requeue all commands in outstanding command list. */ 1390 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 1391 } 1392 1393 qla2x00_free_irqs(vha); 1394 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); 1395 1396 /* Clear the Interrupts */ 1397 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 1398 1399 ql_log(ql_log_info, vha, 0x0140, 1400 "%s Done done - ha=%p.\n", __func__, ha); 1401 } 1402 1403 /** 1404 * qlafx00_init_response_q_entries() - Initializes response queue entries. 1405 * @ha: HA context 1406 * 1407 * Beginning of request ring has initialization control block already built 1408 * by nvram config routine. 1409 * 1410 * Returns 0 on success. 1411 */ 1412 void 1413 qlafx00_init_response_q_entries(struct rsp_que *rsp) 1414 { 1415 uint16_t cnt; 1416 response_t *pkt; 1417 1418 rsp->ring_ptr = rsp->ring; 1419 rsp->ring_index = 0; 1420 rsp->status_srb = NULL; 1421 pkt = rsp->ring_ptr; 1422 for (cnt = 0; cnt < rsp->length; cnt++) { 1423 pkt->signature = RESPONSE_PROCESSED; 1424 WRT_REG_DWORD(&pkt->signature, RESPONSE_PROCESSED); 1425 pkt++; 1426 } 1427 } 1428 1429 int 1430 qlafx00_rescan_isp(scsi_qla_host_t *vha) 1431 { 1432 uint32_t status = QLA_FUNCTION_FAILED; 1433 struct qla_hw_data *ha = vha->hw; 1434 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 1435 uint32_t aenmbx7; 1436 1437 qla2x00_request_irqs(ha, ha->rsp_q_map[0]); 1438 1439 aenmbx7 = RD_REG_DWORD(®->aenmailbox7); 1440 ha->mbx_intr_code = MSW(aenmbx7); 1441 ha->rqstq_intr_code = LSW(aenmbx7); 1442 ha->req_que_off = RD_REG_DWORD(®->aenmailbox1); 1443 ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3); 1444 ha->req_que_len = RD_REG_DWORD(®->aenmailbox5); 1445 ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6); 1446 1447 ql_dbg(ql_dbg_disc, vha, 0x2094, 1448 "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x " 1449 " Req que offset 0x%x Rsp que offset 0x%x\n", 1450 ha->mbx_intr_code, ha->rqstq_intr_code, 1451 ha->req_que_off, ha->rsp_que_len); 1452 1453 /* Clear the Interrupts */ 1454 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 1455 1456 status = qla2x00_init_rings(vha); 1457 if (!status) { 1458 vha->flags.online = 1; 1459 1460 /* if no cable then assume it's good */ 1461 if ((vha->device_flags & DFLG_NO_CABLE)) 1462 status = 0; 1463 /* Register system information */ 1464 if (qlafx00_fx_disc(vha, 1465 &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO)) 1466 ql_dbg(ql_dbg_disc, vha, 0x2095, 1467 "failed to register host info\n"); 1468 } 1469 scsi_unblock_requests(vha->host); 1470 return status; 1471 } 1472 1473 void 1474 qlafx00_timer_routine(scsi_qla_host_t *vha) 1475 { 1476 struct qla_hw_data *ha = vha->hw; 1477 uint32_t fw_heart_beat; 1478 uint32_t aenmbx0; 1479 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 1480 1481 /* Check firmware health */ 1482 if (ha->mr.fw_hbt_cnt) 1483 ha->mr.fw_hbt_cnt--; 1484 else { 1485 if ((!ha->flags.mr_reset_hdlr_active) && 1486 (!test_bit(UNLOADING, &vha->dpc_flags)) && 1487 (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && 1488 (ha->mr.fw_hbt_en)) { 1489 fw_heart_beat = RD_REG_DWORD(®->fwheartbeat); 1490 if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) { 1491 ha->mr.old_fw_hbt_cnt = fw_heart_beat; 1492 ha->mr.fw_hbt_miss_cnt = 0; 1493 } else { 1494 ha->mr.fw_hbt_miss_cnt++; 1495 if (ha->mr.fw_hbt_miss_cnt == 1496 QLAFX00_HEARTBEAT_MISS_CNT) { 1497 set_bit(ISP_ABORT_NEEDED, 1498 &vha->dpc_flags); 1499 qla2xxx_wake_dpc(vha); 1500 ha->mr.fw_hbt_miss_cnt = 0; 1501 } 1502 } 1503 } 1504 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; 1505 } 1506 1507 if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) { 1508 /* Reset recovery to be performed in timer routine */ 1509 aenmbx0 = RD_REG_DWORD(®->aenmailbox0); 1510 if (ha->mr.fw_reset_timer_exp) { 1511 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1512 qla2xxx_wake_dpc(vha); 1513 ha->mr.fw_reset_timer_exp = 0; 1514 } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) { 1515 /* Wake up DPC to rescan the targets */ 1516 set_bit(FX00_TARGET_SCAN, &vha->dpc_flags); 1517 clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); 1518 qla2xxx_wake_dpc(vha); 1519 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 1520 } else if ((aenmbx0 == MBA_FW_STARTING) && 1521 (!ha->mr.fw_hbt_en)) { 1522 ha->mr.fw_hbt_en = 1; 1523 } else if (!ha->mr.fw_reset_timer_tick) { 1524 if (aenmbx0 == ha->mr.old_aenmbx0_state) 1525 ha->mr.fw_reset_timer_exp = 1; 1526 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 1527 } else if (aenmbx0 == 0xFFFFFFFF) { 1528 uint32_t data0, data1; 1529 1530 data0 = QLAFX00_RD_REG(ha, 1531 QLAFX00_BAR1_BASE_ADDR_REG); 1532 data1 = QLAFX00_RD_REG(ha, 1533 QLAFX00_PEX0_WIN0_BASE_ADDR_REG); 1534 1535 data0 &= 0xffff0000; 1536 data1 &= 0x0000ffff; 1537 1538 QLAFX00_WR_REG(ha, 1539 QLAFX00_PEX0_WIN0_BASE_ADDR_REG, 1540 (data0 | data1)); 1541 } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) { 1542 ha->mr.fw_reset_timer_tick = 1543 QLAFX00_MAX_RESET_INTERVAL; 1544 } 1545 ha->mr.old_aenmbx0_state = aenmbx0; 1546 ha->mr.fw_reset_timer_tick--; 1547 } 1548 } 1549 1550 /* 1551 * qlfx00a_reset_initialize 1552 * Re-initialize after a iSA device reset. 1553 * 1554 * Input: 1555 * ha = adapter block pointer. 1556 * 1557 * Returns: 1558 * 0 = success 1559 */ 1560 int 1561 qlafx00_reset_initialize(scsi_qla_host_t *vha) 1562 { 1563 struct qla_hw_data *ha = vha->hw; 1564 1565 if (vha->device_flags & DFLG_DEV_FAILED) { 1566 ql_dbg(ql_dbg_init, vha, 0x0142, 1567 "Device in failed state\n"); 1568 return QLA_SUCCESS; 1569 } 1570 1571 ha->flags.mr_reset_hdlr_active = 1; 1572 1573 if (vha->flags.online) { 1574 scsi_block_requests(vha->host); 1575 qlafx00_abort_isp_cleanup(vha); 1576 } 1577 1578 ql_log(ql_log_info, vha, 0x0143, 1579 "(%s): succeeded.\n", __func__); 1580 ha->flags.mr_reset_hdlr_active = 0; 1581 return QLA_SUCCESS; 1582 } 1583 1584 /* 1585 * qlafx00_abort_isp 1586 * Resets ISP and aborts all outstanding commands. 1587 * 1588 * Input: 1589 * ha = adapter block pointer. 1590 * 1591 * Returns: 1592 * 0 = success 1593 */ 1594 int 1595 qlafx00_abort_isp(scsi_qla_host_t *vha) 1596 { 1597 struct qla_hw_data *ha = vha->hw; 1598 1599 if (vha->flags.online) { 1600 if (unlikely(pci_channel_offline(ha->pdev) && 1601 ha->flags.pci_channel_io_perm_failure)) { 1602 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 1603 return QLA_SUCCESS; 1604 } 1605 1606 scsi_block_requests(vha->host); 1607 qlafx00_abort_isp_cleanup(vha); 1608 } 1609 1610 ql_log(ql_log_info, vha, 0x0145, 1611 "(%s): succeeded.\n", __func__); 1612 1613 return QLA_SUCCESS; 1614 } 1615 1616 static inline fc_port_t* 1617 qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id) 1618 { 1619 fc_port_t *fcport; 1620 1621 /* Check for matching device in remote port list. */ 1622 fcport = NULL; 1623 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1624 if (fcport->tgt_id == tgt_id) { 1625 ql_dbg(ql_dbg_async, vha, 0x5072, 1626 "Matching fcport(%p) found with TGT-ID: 0x%x " 1627 "and Remote TGT_ID: 0x%x\n", 1628 fcport, fcport->tgt_id, tgt_id); 1629 break; 1630 } 1631 } 1632 return fcport; 1633 } 1634 1635 static void 1636 qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id) 1637 { 1638 fc_port_t *fcport; 1639 1640 ql_log(ql_log_info, vha, 0x5073, 1641 "Detach TGT-ID: 0x%x\n", tgt_id); 1642 1643 fcport = qlafx00_get_fcport(vha, tgt_id); 1644 if (!fcport) 1645 return; 1646 1647 qla2x00_mark_device_lost(vha, fcport, 0, 0); 1648 1649 return; 1650 } 1651 1652 int 1653 qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt) 1654 { 1655 int rval = 0; 1656 uint32_t aen_code, aen_data; 1657 1658 aen_code = FCH_EVT_VENDOR_UNIQUE; 1659 aen_data = evt->u.aenfx.evtcode; 1660 1661 switch (evt->u.aenfx.evtcode) { 1662 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ 1663 if (evt->u.aenfx.mbx[1] == 0) { 1664 if (evt->u.aenfx.mbx[2] == 1) { 1665 if (!vha->flags.fw_tgt_reported) 1666 vha->flags.fw_tgt_reported = 1; 1667 atomic_set(&vha->loop_down_timer, 0); 1668 atomic_set(&vha->loop_state, LOOP_UP); 1669 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1670 qla2xxx_wake_dpc(vha); 1671 } else if (evt->u.aenfx.mbx[2] == 2) { 1672 qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]); 1673 } 1674 } else if (evt->u.aenfx.mbx[1] == 0xffff) { 1675 if (evt->u.aenfx.mbx[2] == 1) { 1676 if (!vha->flags.fw_tgt_reported) 1677 vha->flags.fw_tgt_reported = 1; 1678 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1679 } else if (evt->u.aenfx.mbx[2] == 2) { 1680 vha->device_flags |= DFLG_NO_CABLE; 1681 qla2x00_mark_all_devices_lost(vha, 1); 1682 } 1683 } 1684 break; 1685 case QLAFX00_MBA_LINK_UP: 1686 aen_code = FCH_EVT_LINKUP; 1687 aen_data = 0; 1688 break; 1689 case QLAFX00_MBA_LINK_DOWN: 1690 aen_code = FCH_EVT_LINKDOWN; 1691 aen_data = 0; 1692 break; 1693 } 1694 1695 fc_host_post_event(vha->host, fc_get_event_number(), 1696 aen_code, aen_data); 1697 1698 return rval; 1699 } 1700 1701 static void 1702 qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo) 1703 { 1704 u64 port_name = 0, node_name = 0; 1705 1706 port_name = (unsigned long long)wwn_to_u64(pinfo->port_name); 1707 node_name = (unsigned long long)wwn_to_u64(pinfo->node_name); 1708 1709 fc_host_node_name(vha->host) = node_name; 1710 fc_host_port_name(vha->host) = port_name; 1711 if (!pinfo->port_type) 1712 vha->hw->current_topology = ISP_CFG_F; 1713 if (pinfo->link_status == QLAFX00_LINK_STATUS_UP) 1714 atomic_set(&vha->loop_state, LOOP_READY); 1715 else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN) 1716 atomic_set(&vha->loop_state, LOOP_DOWN); 1717 vha->hw->link_data_rate = (uint16_t)pinfo->link_config; 1718 } 1719 1720 static void 1721 qla2x00_fxdisc_iocb_timeout(void *data) 1722 { 1723 srb_t *sp = (srb_t *)data; 1724 struct srb_iocb *lio = &sp->u.iocb_cmd; 1725 1726 complete(&lio->u.fxiocb.fxiocb_comp); 1727 } 1728 1729 static void 1730 qla2x00_fxdisc_sp_done(void *data, void *ptr, int res) 1731 { 1732 srb_t *sp = (srb_t *)ptr; 1733 struct srb_iocb *lio = &sp->u.iocb_cmd; 1734 1735 complete(&lio->u.fxiocb.fxiocb_comp); 1736 } 1737 1738 int 1739 qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t fx_type) 1740 { 1741 srb_t *sp; 1742 struct srb_iocb *fdisc; 1743 int rval = QLA_FUNCTION_FAILED; 1744 struct qla_hw_data *ha = vha->hw; 1745 struct host_system_info *phost_info; 1746 struct register_host_info *preg_hsi; 1747 struct new_utsname *p_sysid = NULL; 1748 struct timeval tv; 1749 1750 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1751 if (!sp) 1752 goto done; 1753 1754 fdisc = &sp->u.iocb_cmd; 1755 switch (fx_type) { 1756 case FXDISC_GET_CONFIG_INFO: 1757 fdisc->u.fxiocb.flags = 1758 SRB_FXDISC_RESP_DMA_VALID; 1759 fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data); 1760 break; 1761 case FXDISC_GET_PORT_INFO: 1762 fdisc->u.fxiocb.flags = 1763 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; 1764 fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO; 1765 fdisc->u.fxiocb.req_data = fcport->port_id; 1766 break; 1767 case FXDISC_GET_TGT_NODE_INFO: 1768 fdisc->u.fxiocb.flags = 1769 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; 1770 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO; 1771 fdisc->u.fxiocb.req_data = fcport->tgt_id; 1772 break; 1773 case FXDISC_GET_TGT_NODE_LIST: 1774 fdisc->u.fxiocb.flags = 1775 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; 1776 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE; 1777 break; 1778 case FXDISC_REG_HOST_INFO: 1779 fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID; 1780 fdisc->u.fxiocb.req_len = sizeof(struct register_host_info); 1781 p_sysid = utsname(); 1782 if (!p_sysid) { 1783 ql_log(ql_log_warn, vha, 0x303c, 1784 "Not able to get the system informtion\n"); 1785 goto done_free_sp; 1786 } 1787 break; 1788 default: 1789 break; 1790 } 1791 1792 if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) { 1793 fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev, 1794 fdisc->u.fxiocb.req_len, 1795 &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL); 1796 if (!fdisc->u.fxiocb.req_addr) 1797 goto done_free_sp; 1798 1799 if (fx_type == FXDISC_REG_HOST_INFO) { 1800 preg_hsi = (struct register_host_info *) 1801 fdisc->u.fxiocb.req_addr; 1802 phost_info = &preg_hsi->hsi; 1803 memset(preg_hsi, 0, sizeof(struct register_host_info)); 1804 phost_info->os_type = OS_TYPE_LINUX; 1805 strncpy(phost_info->sysname, 1806 p_sysid->sysname, SYSNAME_LENGTH); 1807 strncpy(phost_info->nodename, 1808 p_sysid->nodename, NODENAME_LENGTH); 1809 strncpy(phost_info->release, 1810 p_sysid->release, RELEASE_LENGTH); 1811 strncpy(phost_info->version, 1812 p_sysid->version, VERSION_LENGTH); 1813 strncpy(phost_info->machine, 1814 p_sysid->machine, MACHINE_LENGTH); 1815 strncpy(phost_info->domainname, 1816 p_sysid->domainname, DOMNAME_LENGTH); 1817 strncpy(phost_info->hostdriver, 1818 QLA2XXX_VERSION, VERSION_LENGTH); 1819 do_gettimeofday(&tv); 1820 preg_hsi->utc = (uint64_t)tv.tv_sec; 1821 ql_dbg(ql_dbg_init, vha, 0x0149, 1822 "ISP%04X: Host registration with firmware\n", 1823 ha->pdev->device); 1824 ql_dbg(ql_dbg_init, vha, 0x014a, 1825 "os_type = '%d', sysname = '%s', nodname = '%s'\n", 1826 phost_info->os_type, 1827 phost_info->sysname, 1828 phost_info->nodename); 1829 ql_dbg(ql_dbg_init, vha, 0x014b, 1830 "release = '%s', version = '%s'\n", 1831 phost_info->release, 1832 phost_info->version); 1833 ql_dbg(ql_dbg_init, vha, 0x014c, 1834 "machine = '%s' " 1835 "domainname = '%s', hostdriver = '%s'\n", 1836 phost_info->machine, 1837 phost_info->domainname, 1838 phost_info->hostdriver); 1839 ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d, 1840 (uint8_t *)phost_info, 1841 sizeof(struct host_system_info)); 1842 } 1843 } 1844 1845 if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) { 1846 fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev, 1847 fdisc->u.fxiocb.rsp_len, 1848 &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL); 1849 if (!fdisc->u.fxiocb.rsp_addr) 1850 goto done_unmap_req; 1851 } 1852 1853 sp->type = SRB_FXIOCB_DCMD; 1854 sp->name = "fxdisc"; 1855 qla2x00_init_timer(sp, FXDISC_TIMEOUT); 1856 fdisc->timeout = qla2x00_fxdisc_iocb_timeout; 1857 fdisc->u.fxiocb.req_func_type = fx_type; 1858 sp->done = qla2x00_fxdisc_sp_done; 1859 1860 rval = qla2x00_start_sp(sp); 1861 if (rval != QLA_SUCCESS) 1862 goto done_unmap_dma; 1863 1864 wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp); 1865 1866 if (fx_type == FXDISC_GET_CONFIG_INFO) { 1867 struct config_info_data *pinfo = 1868 (struct config_info_data *) fdisc->u.fxiocb.rsp_addr; 1869 memcpy(&vha->hw->mr.product_name, pinfo->product_name, 1870 sizeof(vha->hw->mr.product_name)); 1871 memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name, 1872 sizeof(vha->hw->mr.symbolic_name)); 1873 memcpy(&vha->hw->mr.serial_num, pinfo->serial_num, 1874 sizeof(vha->hw->mr.serial_num)); 1875 memcpy(&vha->hw->mr.hw_version, pinfo->hw_version, 1876 sizeof(vha->hw->mr.hw_version)); 1877 memcpy(&vha->hw->mr.fw_version, pinfo->fw_version, 1878 sizeof(vha->hw->mr.fw_version)); 1879 strim(vha->hw->mr.fw_version); 1880 memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version, 1881 sizeof(vha->hw->mr.uboot_version)); 1882 memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num, 1883 sizeof(vha->hw->mr.fru_serial_num)); 1884 } else if (fx_type == FXDISC_GET_PORT_INFO) { 1885 struct port_info_data *pinfo = 1886 (struct port_info_data *) fdisc->u.fxiocb.rsp_addr; 1887 memcpy(vha->node_name, pinfo->node_name, WWN_SIZE); 1888 memcpy(vha->port_name, pinfo->port_name, WWN_SIZE); 1889 vha->d_id.b.domain = pinfo->port_id[0]; 1890 vha->d_id.b.area = pinfo->port_id[1]; 1891 vha->d_id.b.al_pa = pinfo->port_id[2]; 1892 qlafx00_update_host_attr(vha, pinfo); 1893 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141, 1894 (uint8_t *)pinfo, 16); 1895 } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) { 1896 struct qlafx00_tgt_node_info *pinfo = 1897 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr; 1898 memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE); 1899 memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE); 1900 fcport->port_type = FCT_TARGET; 1901 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144, 1902 (uint8_t *)pinfo, 16); 1903 } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) { 1904 struct qlafx00_tgt_node_info *pinfo = 1905 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr; 1906 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146, 1907 (uint8_t *)pinfo, 16); 1908 memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE); 1909 } 1910 rval = fdisc->u.fxiocb.result; 1911 1912 done_unmap_dma: 1913 if (fdisc->u.fxiocb.rsp_addr) 1914 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len, 1915 fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle); 1916 1917 done_unmap_req: 1918 if (fdisc->u.fxiocb.req_addr) 1919 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len, 1920 fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle); 1921 done_free_sp: 1922 sp->free(vha, sp); 1923 done: 1924 return rval; 1925 } 1926 1927 static void 1928 qlafx00_abort_iocb_timeout(void *data) 1929 { 1930 srb_t *sp = (srb_t *)data; 1931 struct srb_iocb *abt = &sp->u.iocb_cmd; 1932 1933 abt->u.abt.comp_status = CS_TIMEOUT; 1934 complete(&abt->u.abt.comp); 1935 } 1936 1937 static void 1938 qlafx00_abort_sp_done(void *data, void *ptr, int res) 1939 { 1940 srb_t *sp = (srb_t *)ptr; 1941 struct srb_iocb *abt = &sp->u.iocb_cmd; 1942 1943 complete(&abt->u.abt.comp); 1944 } 1945 1946 static int 1947 qlafx00_async_abt_cmd(srb_t *cmd_sp) 1948 { 1949 scsi_qla_host_t *vha = cmd_sp->fcport->vha; 1950 fc_port_t *fcport = cmd_sp->fcport; 1951 struct srb_iocb *abt_iocb; 1952 srb_t *sp; 1953 int rval = QLA_FUNCTION_FAILED; 1954 1955 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1956 if (!sp) 1957 goto done; 1958 1959 abt_iocb = &sp->u.iocb_cmd; 1960 sp->type = SRB_ABT_CMD; 1961 sp->name = "abort"; 1962 qla2x00_init_timer(sp, FXDISC_TIMEOUT); 1963 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; 1964 sp->done = qlafx00_abort_sp_done; 1965 abt_iocb->timeout = qlafx00_abort_iocb_timeout; 1966 init_completion(&abt_iocb->u.abt.comp); 1967 1968 rval = qla2x00_start_sp(sp); 1969 if (rval != QLA_SUCCESS) 1970 goto done_free_sp; 1971 1972 ql_dbg(ql_dbg_async, vha, 0x507c, 1973 "Abort command issued - hdl=%x, target_id=%x\n", 1974 cmd_sp->handle, fcport->tgt_id); 1975 1976 wait_for_completion(&abt_iocb->u.abt.comp); 1977 1978 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? 1979 QLA_SUCCESS : QLA_FUNCTION_FAILED; 1980 1981 done_free_sp: 1982 sp->free(vha, sp); 1983 done: 1984 return rval; 1985 } 1986 1987 int 1988 qlafx00_abort_command(srb_t *sp) 1989 { 1990 unsigned long flags = 0; 1991 1992 uint32_t handle; 1993 fc_port_t *fcport = sp->fcport; 1994 struct scsi_qla_host *vha = fcport->vha; 1995 struct qla_hw_data *ha = vha->hw; 1996 struct req_que *req = vha->req; 1997 1998 spin_lock_irqsave(&ha->hardware_lock, flags); 1999 for (handle = 1; handle < DEFAULT_OUTSTANDING_COMMANDS; handle++) { 2000 if (req->outstanding_cmds[handle] == sp) 2001 break; 2002 } 2003 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2004 if (handle == DEFAULT_OUTSTANDING_COMMANDS) { 2005 /* Command not found. */ 2006 return QLA_FUNCTION_FAILED; 2007 } 2008 return qlafx00_async_abt_cmd(sp); 2009 } 2010 2011 /* 2012 * qlafx00_initialize_adapter 2013 * Initialize board. 2014 * 2015 * Input: 2016 * ha = adapter block pointer. 2017 * 2018 * Returns: 2019 * 0 = success 2020 */ 2021 int 2022 qlafx00_initialize_adapter(scsi_qla_host_t *vha) 2023 { 2024 int rval; 2025 struct qla_hw_data *ha = vha->hw; 2026 2027 /* Clear adapter flags. */ 2028 vha->flags.online = 0; 2029 ha->flags.chip_reset_done = 0; 2030 vha->flags.reset_active = 0; 2031 ha->flags.pci_channel_io_perm_failure = 0; 2032 ha->flags.eeh_busy = 0; 2033 ha->thermal_support = 0; 2034 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 2035 atomic_set(&vha->loop_state, LOOP_DOWN); 2036 vha->device_flags = DFLG_NO_CABLE; 2037 vha->dpc_flags = 0; 2038 vha->flags.management_server_logged_in = 0; 2039 vha->marker_needed = 0; 2040 ha->isp_abort_cnt = 0; 2041 ha->beacon_blink_led = 0; 2042 2043 set_bit(0, ha->req_qid_map); 2044 set_bit(0, ha->rsp_qid_map); 2045 2046 ql_dbg(ql_dbg_init, vha, 0x0147, 2047 "Configuring PCI space...\n"); 2048 2049 rval = ha->isp_ops->pci_config(vha); 2050 if (rval) { 2051 ql_log(ql_log_warn, vha, 0x0148, 2052 "Unable to configure PCI space.\n"); 2053 return rval; 2054 } 2055 2056 rval = qlafx00_init_fw_ready(vha); 2057 if (rval != QLA_SUCCESS) 2058 return rval; 2059 2060 qlafx00_save_queue_ptrs(vha); 2061 2062 rval = qlafx00_config_queues(vha); 2063 if (rval != QLA_SUCCESS) 2064 return rval; 2065 2066 /* 2067 * Allocate the array of outstanding commands 2068 * now that we know the firmware resources. 2069 */ 2070 rval = qla2x00_alloc_outstanding_cmds(ha, vha->req); 2071 if (rval != QLA_SUCCESS) 2072 return rval; 2073 2074 rval = qla2x00_init_rings(vha); 2075 ha->flags.chip_reset_done = 1; 2076 2077 return rval; 2078 } 2079 2080 uint32_t 2081 qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr, 2082 char *buf) 2083 { 2084 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2085 int rval = QLA_FUNCTION_FAILED; 2086 uint32_t state[1]; 2087 2088 if (qla2x00_reset_active(vha)) 2089 ql_log(ql_log_warn, vha, 0x70ce, 2090 "ISP reset active.\n"); 2091 else if (!vha->hw->flags.eeh_busy) { 2092 rval = qlafx00_get_firmware_state(vha, state); 2093 } 2094 if (rval != QLA_SUCCESS) 2095 memset(state, -1, sizeof(state)); 2096 2097 return state[0]; 2098 } 2099 2100 void 2101 qlafx00_get_host_speed(struct Scsi_Host *shost) 2102 { 2103 struct qla_hw_data *ha = ((struct scsi_qla_host *) 2104 (shost_priv(shost)))->hw; 2105 u32 speed = FC_PORTSPEED_UNKNOWN; 2106 2107 switch (ha->link_data_rate) { 2108 case QLAFX00_PORT_SPEED_2G: 2109 speed = FC_PORTSPEED_2GBIT; 2110 break; 2111 case QLAFX00_PORT_SPEED_4G: 2112 speed = FC_PORTSPEED_4GBIT; 2113 break; 2114 case QLAFX00_PORT_SPEED_8G: 2115 speed = FC_PORTSPEED_8GBIT; 2116 break; 2117 case QLAFX00_PORT_SPEED_10G: 2118 speed = FC_PORTSPEED_10GBIT; 2119 break; 2120 } 2121 fc_host_speed(shost) = speed; 2122 } 2123 2124 /** QLAFX00 specific ISR implementation functions */ 2125 2126 static inline void 2127 qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 2128 uint32_t sense_len, struct rsp_que *rsp, int res) 2129 { 2130 struct scsi_qla_host *vha = sp->fcport->vha; 2131 struct scsi_cmnd *cp = GET_CMD_SP(sp); 2132 uint32_t track_sense_len; 2133 2134 SET_FW_SENSE_LEN(sp, sense_len); 2135 2136 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 2137 sense_len = SCSI_SENSE_BUFFERSIZE; 2138 2139 SET_CMD_SENSE_LEN(sp, sense_len); 2140 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 2141 track_sense_len = sense_len; 2142 2143 if (sense_len > par_sense_len) 2144 sense_len = par_sense_len; 2145 2146 memcpy(cp->sense_buffer, sense_data, sense_len); 2147 2148 SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len); 2149 2150 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 2151 track_sense_len -= sense_len; 2152 SET_CMD_SENSE_LEN(sp, track_sense_len); 2153 2154 ql_dbg(ql_dbg_io, vha, 0x304d, 2155 "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n", 2156 sense_len, par_sense_len, track_sense_len); 2157 if (GET_FW_SENSE_LEN(sp) > 0) { 2158 rsp->status_srb = sp; 2159 cp->result = res; 2160 } 2161 2162 if (sense_len) { 2163 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039, 2164 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n", 2165 sp->fcport->vha->host_no, cp->device->id, cp->device->lun, 2166 cp); 2167 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049, 2168 cp->sense_buffer, sense_len); 2169 } 2170 } 2171 2172 static void 2173 qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2174 struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp, 2175 uint16_t sstatus, uint16_t cpstatus) 2176 { 2177 struct srb_iocb *tmf; 2178 2179 tmf = &sp->u.iocb_cmd; 2180 if (cpstatus != CS_COMPLETE || 2181 (sstatus & SS_RESPONSE_INFO_LEN_VALID)) 2182 cpstatus = CS_INCOMPLETE; 2183 tmf->u.tmf.comp_status = cpstatus; 2184 sp->done(vha, sp, 0); 2185 } 2186 2187 static void 2188 qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2189 struct abort_iocb_entry_fx00 *pkt) 2190 { 2191 const char func[] = "ABT_IOCB"; 2192 srb_t *sp; 2193 struct srb_iocb *abt; 2194 2195 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2196 if (!sp) 2197 return; 2198 2199 abt = &sp->u.iocb_cmd; 2200 abt->u.abt.comp_status = le32_to_cpu(pkt->tgt_id_sts); 2201 sp->done(vha, sp, 0); 2202 } 2203 2204 static void 2205 qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, 2206 struct ioctl_iocb_entry_fx00 *pkt) 2207 { 2208 const char func[] = "IOSB_IOCB"; 2209 srb_t *sp; 2210 struct fc_bsg_job *bsg_job; 2211 struct srb_iocb *iocb_job; 2212 int res; 2213 struct qla_mt_iocb_rsp_fx00 fstatus; 2214 uint8_t *fw_sts_ptr; 2215 2216 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2217 if (!sp) 2218 return; 2219 2220 if (sp->type == SRB_FXIOCB_DCMD) { 2221 iocb_job = &sp->u.iocb_cmd; 2222 iocb_job->u.fxiocb.seq_number = le32_to_cpu(pkt->seq_no); 2223 iocb_job->u.fxiocb.fw_flags = le32_to_cpu(pkt->fw_iotcl_flags); 2224 iocb_job->u.fxiocb.result = le32_to_cpu(pkt->status); 2225 if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID) 2226 iocb_job->u.fxiocb.req_data = 2227 le32_to_cpu(pkt->dataword_r); 2228 } else { 2229 bsg_job = sp->u.bsg_job; 2230 2231 memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00)); 2232 2233 fstatus.reserved_1 = pkt->reserved_0; 2234 fstatus.func_type = pkt->comp_func_num; 2235 fstatus.ioctl_flags = pkt->fw_iotcl_flags; 2236 fstatus.ioctl_data = pkt->dataword_r; 2237 fstatus.adapid = pkt->adapid; 2238 fstatus.adapid_hi = pkt->adapid_hi; 2239 fstatus.reserved_2 = pkt->reserved_1; 2240 fstatus.res_count = pkt->residuallen; 2241 fstatus.status = pkt->status; 2242 fstatus.seq_number = pkt->seq_no; 2243 memcpy(fstatus.reserved_3, 2244 pkt->reserved_2, 20 * sizeof(uint8_t)); 2245 2246 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + 2247 sizeof(struct fc_bsg_reply); 2248 2249 memcpy(fw_sts_ptr, (uint8_t *)&fstatus, 2250 sizeof(struct qla_mt_iocb_rsp_fx00)); 2251 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 2252 sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t); 2253 2254 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 2255 sp->fcport->vha, 0x5080, 2256 (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00)); 2257 2258 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 2259 sp->fcport->vha, 0x5074, 2260 (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00)); 2261 2262 res = bsg_job->reply->result = DID_OK << 16; 2263 bsg_job->reply->reply_payload_rcv_len = 2264 bsg_job->reply_payload.payload_len; 2265 } 2266 sp->done(vha, sp, res); 2267 } 2268 2269 /** 2270 * qlafx00_status_entry() - Process a Status IOCB entry. 2271 * @ha: SCSI driver HA context 2272 * @pkt: Entry pointer 2273 */ 2274 static void 2275 qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 2276 { 2277 srb_t *sp; 2278 fc_port_t *fcport; 2279 struct scsi_cmnd *cp; 2280 struct sts_entry_fx00 *sts; 2281 uint16_t comp_status; 2282 uint16_t scsi_status; 2283 uint16_t ox_id; 2284 uint8_t lscsi_status; 2285 int32_t resid; 2286 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 2287 fw_resid_len; 2288 uint8_t *rsp_info = NULL, *sense_data = NULL; 2289 struct qla_hw_data *ha = vha->hw; 2290 uint32_t hindex, handle; 2291 uint16_t que; 2292 struct req_que *req; 2293 int logit = 1; 2294 int res = 0; 2295 2296 sts = (struct sts_entry_fx00 *) pkt; 2297 2298 comp_status = le16_to_cpu(sts->comp_status); 2299 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2300 hindex = sts->handle; 2301 handle = LSW(hindex); 2302 2303 que = MSW(hindex); 2304 req = ha->req_q_map[que]; 2305 2306 /* Validate handle. */ 2307 if (handle < req->num_outstanding_cmds) 2308 sp = req->outstanding_cmds[handle]; 2309 else 2310 sp = NULL; 2311 2312 if (sp == NULL) { 2313 ql_dbg(ql_dbg_io, vha, 0x3034, 2314 "Invalid status handle (0x%x).\n", handle); 2315 2316 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2317 qla2xxx_wake_dpc(vha); 2318 return; 2319 } 2320 2321 if (sp->type == SRB_TM_CMD) { 2322 req->outstanding_cmds[handle] = NULL; 2323 qlafx00_tm_iocb_entry(vha, req, pkt, sp, 2324 scsi_status, comp_status); 2325 return; 2326 } 2327 2328 /* Fast path completion. */ 2329 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2330 qla2x00_do_host_ramp_up(vha); 2331 qla2x00_process_completed_request(vha, req, handle); 2332 return; 2333 } 2334 2335 req->outstanding_cmds[handle] = NULL; 2336 cp = GET_CMD_SP(sp); 2337 if (cp == NULL) { 2338 ql_dbg(ql_dbg_io, vha, 0x3048, 2339 "Command already returned (0x%x/%p).\n", 2340 handle, sp); 2341 2342 return; 2343 } 2344 2345 lscsi_status = scsi_status & STATUS_MASK; 2346 2347 fcport = sp->fcport; 2348 2349 ox_id = 0; 2350 sense_len = par_sense_len = rsp_info_len = resid_len = 2351 fw_resid_len = 0; 2352 if (scsi_status & SS_SENSE_LEN_VALID) 2353 sense_len = le32_to_cpu(sts->sense_len); 2354 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 2355 resid_len = le32_to_cpu(sts->residual_len); 2356 if (comp_status == CS_DATA_UNDERRUN) 2357 fw_resid_len = le32_to_cpu(sts->residual_len); 2358 rsp_info = sense_data = sts->data; 2359 par_sense_len = sizeof(sts->data); 2360 2361 /* Check for overrun. */ 2362 if (comp_status == CS_COMPLETE && 2363 scsi_status & SS_RESIDUAL_OVER) 2364 comp_status = CS_DATA_OVERRUN; 2365 2366 /* 2367 * Based on Host and scsi status generate status code for Linux 2368 */ 2369 switch (comp_status) { 2370 case CS_COMPLETE: 2371 case CS_QUEUE_FULL: 2372 if (scsi_status == 0) { 2373 res = DID_OK << 16; 2374 break; 2375 } 2376 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 2377 resid = resid_len; 2378 scsi_set_resid(cp, resid); 2379 2380 if (!lscsi_status && 2381 ((unsigned)(scsi_bufflen(cp) - resid) < 2382 cp->underflow)) { 2383 ql_dbg(ql_dbg_io, fcport->vha, 0x3050, 2384 "Mid-layer underflow " 2385 "detected (0x%x of 0x%x bytes).\n", 2386 resid, scsi_bufflen(cp)); 2387 2388 res = DID_ERROR << 16; 2389 break; 2390 } 2391 } 2392 res = DID_OK << 16 | lscsi_status; 2393 2394 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2395 ql_dbg(ql_dbg_io, fcport->vha, 0x3051, 2396 "QUEUE FULL detected.\n"); 2397 break; 2398 } 2399 logit = 0; 2400 if (lscsi_status != SS_CHECK_CONDITION) 2401 break; 2402 2403 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2404 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2405 break; 2406 2407 qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len, 2408 rsp, res); 2409 break; 2410 2411 case CS_DATA_UNDERRUN: 2412 /* Use F/W calculated residual length. */ 2413 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 2414 resid = fw_resid_len; 2415 else 2416 resid = resid_len; 2417 scsi_set_resid(cp, resid); 2418 if (scsi_status & SS_RESIDUAL_UNDER) { 2419 if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 2420 && fw_resid_len != resid_len) { 2421 ql_dbg(ql_dbg_io, fcport->vha, 0x3052, 2422 "Dropped frame(s) detected " 2423 "(0x%x of 0x%x bytes).\n", 2424 resid, scsi_bufflen(cp)); 2425 2426 res = DID_ERROR << 16 | lscsi_status; 2427 goto check_scsi_status; 2428 } 2429 2430 if (!lscsi_status && 2431 ((unsigned)(scsi_bufflen(cp) - resid) < 2432 cp->underflow)) { 2433 ql_dbg(ql_dbg_io, fcport->vha, 0x3053, 2434 "Mid-layer underflow " 2435 "detected (0x%x of 0x%x bytes, " 2436 "cp->underflow: 0x%x).\n", 2437 resid, scsi_bufflen(cp), cp->underflow); 2438 2439 res = DID_ERROR << 16; 2440 break; 2441 } 2442 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 2443 lscsi_status != SAM_STAT_BUSY) { 2444 /* 2445 * scsi status of task set and busy are considered 2446 * to be task not completed. 2447 */ 2448 2449 ql_dbg(ql_dbg_io, fcport->vha, 0x3054, 2450 "Dropped frame(s) detected (0x%x " 2451 "of 0x%x bytes).\n", resid, 2452 scsi_bufflen(cp)); 2453 2454 res = DID_ERROR << 16 | lscsi_status; 2455 goto check_scsi_status; 2456 } else { 2457 ql_dbg(ql_dbg_io, fcport->vha, 0x3055, 2458 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 2459 scsi_status, lscsi_status); 2460 } 2461 2462 res = DID_OK << 16 | lscsi_status; 2463 logit = 0; 2464 2465 check_scsi_status: 2466 /* 2467 * Check to see if SCSI Status is non zero. If so report SCSI 2468 * Status. 2469 */ 2470 if (lscsi_status != 0) { 2471 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2472 ql_dbg(ql_dbg_io, fcport->vha, 0x3056, 2473 "QUEUE FULL detected.\n"); 2474 logit = 1; 2475 break; 2476 } 2477 if (lscsi_status != SS_CHECK_CONDITION) 2478 break; 2479 2480 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2481 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2482 break; 2483 2484 qlafx00_handle_sense(sp, sense_data, par_sense_len, 2485 sense_len, rsp, res); 2486 } 2487 break; 2488 2489 case CS_PORT_LOGGED_OUT: 2490 case CS_PORT_CONFIG_CHG: 2491 case CS_PORT_BUSY: 2492 case CS_INCOMPLETE: 2493 case CS_PORT_UNAVAILABLE: 2494 case CS_TIMEOUT: 2495 case CS_RESET: 2496 2497 /* 2498 * We are going to have the fc class block the rport 2499 * while we try to recover so instruct the mid layer 2500 * to requeue until the class decides how to handle this. 2501 */ 2502 res = DID_TRANSPORT_DISRUPTED << 16; 2503 2504 ql_dbg(ql_dbg_io, fcport->vha, 0x3057, 2505 "Port down status: port-state=0x%x.\n", 2506 atomic_read(&fcport->state)); 2507 2508 if (atomic_read(&fcport->state) == FCS_ONLINE) 2509 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 2510 break; 2511 2512 case CS_ABORTED: 2513 res = DID_RESET << 16; 2514 break; 2515 2516 default: 2517 res = DID_ERROR << 16; 2518 break; 2519 } 2520 2521 if (logit) 2522 ql_dbg(ql_dbg_io, fcport->vha, 0x3058, 2523 "FCP command status: 0x%x-0x%x (0x%x) " 2524 "nexus=%ld:%d:%d tgt_id: 0x%x lscsi_status: 0x%x" 2525 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x " 2526 "rsp_info=0x%x resid=0x%x fw_resid=0x%x " 2527 "sense_len=0x%x, par_sense_len=0x%x, rsp_info_len=0x%x\n", 2528 comp_status, scsi_status, res, vha->host_no, 2529 cp->device->id, cp->device->lun, fcport->tgt_id, 2530 lscsi_status, cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], 2531 cp->cmnd[3], cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], 2532 cp->cmnd[7], cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), 2533 rsp_info_len, resid_len, fw_resid_len, sense_len, 2534 par_sense_len, rsp_info_len); 2535 2536 if (!res) 2537 qla2x00_do_host_ramp_up(vha); 2538 2539 if (rsp->status_srb == NULL) 2540 sp->done(ha, sp, res); 2541 } 2542 2543 /** 2544 * qlafx00_status_cont_entry() - Process a Status Continuations entry. 2545 * @ha: SCSI driver HA context 2546 * @pkt: Entry pointer 2547 * 2548 * Extended sense data. 2549 */ 2550 static void 2551 qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 2552 { 2553 uint8_t sense_sz = 0; 2554 struct qla_hw_data *ha = rsp->hw; 2555 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 2556 srb_t *sp = rsp->status_srb; 2557 struct scsi_cmnd *cp; 2558 uint32_t sense_len; 2559 uint8_t *sense_ptr; 2560 2561 if (!sp) { 2562 ql_dbg(ql_dbg_io, vha, 0x3037, 2563 "no SP, sp = %p\n", sp); 2564 return; 2565 } 2566 2567 if (!GET_FW_SENSE_LEN(sp)) { 2568 ql_dbg(ql_dbg_io, vha, 0x304b, 2569 "no fw sense data, sp = %p\n", sp); 2570 return; 2571 } 2572 cp = GET_CMD_SP(sp); 2573 if (cp == NULL) { 2574 ql_log(ql_log_warn, vha, 0x303b, 2575 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 2576 2577 rsp->status_srb = NULL; 2578 return; 2579 } 2580 2581 if (!GET_CMD_SENSE_LEN(sp)) { 2582 ql_dbg(ql_dbg_io, vha, 0x304c, 2583 "no sense data, sp = %p\n", sp); 2584 } else { 2585 sense_len = GET_CMD_SENSE_LEN(sp); 2586 sense_ptr = GET_CMD_SENSE_PTR(sp); 2587 ql_dbg(ql_dbg_io, vha, 0x304f, 2588 "sp=%p sense_len=0x%x sense_ptr=%p.\n", 2589 sp, sense_len, sense_ptr); 2590 2591 if (sense_len > sizeof(pkt->data)) 2592 sense_sz = sizeof(pkt->data); 2593 else 2594 sense_sz = sense_len; 2595 2596 /* Move sense data. */ 2597 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e, 2598 (uint8_t *)pkt, sizeof(sts_cont_entry_t)); 2599 memcpy(sense_ptr, pkt->data, sense_sz); 2600 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a, 2601 sense_ptr, sense_sz); 2602 2603 sense_len -= sense_sz; 2604 sense_ptr += sense_sz; 2605 2606 SET_CMD_SENSE_PTR(sp, sense_ptr); 2607 SET_CMD_SENSE_LEN(sp, sense_len); 2608 } 2609 sense_len = GET_FW_SENSE_LEN(sp); 2610 sense_len = (sense_len > sizeof(pkt->data)) ? 2611 (sense_len - sizeof(pkt->data)) : 0; 2612 SET_FW_SENSE_LEN(sp, sense_len); 2613 2614 /* Place command on done queue. */ 2615 if (sense_len == 0) { 2616 rsp->status_srb = NULL; 2617 sp->done(ha, sp, cp->result); 2618 } 2619 } 2620 2621 /** 2622 * qlafx00_multistatus_entry() - Process Multi response queue entries. 2623 * @ha: SCSI driver HA context 2624 */ 2625 static void 2626 qlafx00_multistatus_entry(struct scsi_qla_host *vha, 2627 struct rsp_que *rsp, void *pkt) 2628 { 2629 srb_t *sp; 2630 struct multi_sts_entry_fx00 *stsmfx; 2631 struct qla_hw_data *ha = vha->hw; 2632 uint32_t handle, hindex, handle_count, i; 2633 uint16_t que; 2634 struct req_que *req; 2635 uint32_t *handle_ptr; 2636 2637 stsmfx = (struct multi_sts_entry_fx00 *) pkt; 2638 2639 handle_count = stsmfx->handle_count; 2640 2641 if (handle_count > MAX_HANDLE_COUNT) { 2642 ql_dbg(ql_dbg_io, vha, 0x3035, 2643 "Invalid handle count (0x%x).\n", handle_count); 2644 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2645 qla2xxx_wake_dpc(vha); 2646 return; 2647 } 2648 2649 handle_ptr = (uint32_t *) &stsmfx->handles[0]; 2650 2651 for (i = 0; i < handle_count; i++) { 2652 hindex = le32_to_cpu(*handle_ptr); 2653 handle = LSW(hindex); 2654 que = MSW(hindex); 2655 req = ha->req_q_map[que]; 2656 2657 /* Validate handle. */ 2658 if (handle < req->num_outstanding_cmds) 2659 sp = req->outstanding_cmds[handle]; 2660 else 2661 sp = NULL; 2662 2663 if (sp == NULL) { 2664 ql_dbg(ql_dbg_io, vha, 0x3044, 2665 "Invalid status handle (0x%x).\n", handle); 2666 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2667 qla2xxx_wake_dpc(vha); 2668 return; 2669 } 2670 qla2x00_process_completed_request(vha, req, handle); 2671 handle_ptr++; 2672 } 2673 } 2674 2675 /** 2676 * qlafx00_error_entry() - Process an error entry. 2677 * @ha: SCSI driver HA context 2678 * @pkt: Entry pointer 2679 */ 2680 static void 2681 qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, 2682 struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype) 2683 { 2684 srb_t *sp; 2685 struct qla_hw_data *ha = vha->hw; 2686 const char func[] = "ERROR-IOCB"; 2687 uint16_t que = MSW(pkt->handle); 2688 struct req_que *req = NULL; 2689 int res = DID_ERROR << 16; 2690 2691 ql_dbg(ql_dbg_async, vha, 0x507f, 2692 "type of error status in response: 0x%x\n", estatus); 2693 2694 req = ha->req_q_map[que]; 2695 2696 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2697 if (sp) { 2698 sp->done(ha, sp, res); 2699 return; 2700 } 2701 2702 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2703 qla2xxx_wake_dpc(vha); 2704 } 2705 2706 /** 2707 * qlafx00_process_response_queue() - Process response queue entries. 2708 * @ha: SCSI driver HA context 2709 */ 2710 static void 2711 qlafx00_process_response_queue(struct scsi_qla_host *vha, 2712 struct rsp_que *rsp) 2713 { 2714 struct sts_entry_fx00 *pkt; 2715 response_t *lptr; 2716 2717 if (!vha->flags.online) 2718 return; 2719 2720 while (RD_REG_DWORD(&(rsp->ring_ptr->signature)) != 2721 RESPONSE_PROCESSED) { 2722 lptr = rsp->ring_ptr; 2723 memcpy_fromio(rsp->rsp_pkt, lptr, sizeof(rsp->rsp_pkt)); 2724 pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt; 2725 2726 rsp->ring_index++; 2727 if (rsp->ring_index == rsp->length) { 2728 rsp->ring_index = 0; 2729 rsp->ring_ptr = rsp->ring; 2730 } else { 2731 rsp->ring_ptr++; 2732 } 2733 2734 if (pkt->entry_status != 0 && 2735 pkt->entry_type != IOCTL_IOSB_TYPE_FX00) { 2736 qlafx00_error_entry(vha, rsp, 2737 (struct sts_entry_fx00 *)pkt, pkt->entry_status, 2738 pkt->entry_type); 2739 goto next_iter; 2740 continue; 2741 } 2742 2743 switch (pkt->entry_type) { 2744 case STATUS_TYPE_FX00: 2745 qlafx00_status_entry(vha, rsp, pkt); 2746 break; 2747 2748 case STATUS_CONT_TYPE_FX00: 2749 qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2750 break; 2751 2752 case MULTI_STATUS_TYPE_FX00: 2753 qlafx00_multistatus_entry(vha, rsp, pkt); 2754 break; 2755 2756 case ABORT_IOCB_TYPE_FX00: 2757 qlafx00_abort_iocb_entry(vha, rsp->req, 2758 (struct abort_iocb_entry_fx00 *)pkt); 2759 break; 2760 2761 case IOCTL_IOSB_TYPE_FX00: 2762 qlafx00_ioctl_iosb_entry(vha, rsp->req, 2763 (struct ioctl_iocb_entry_fx00 *)pkt); 2764 break; 2765 default: 2766 /* Type Not Supported. */ 2767 ql_dbg(ql_dbg_async, vha, 0x5081, 2768 "Received unknown response pkt type %x " 2769 "entry status=%x.\n", 2770 pkt->entry_type, pkt->entry_status); 2771 break; 2772 } 2773 next_iter: 2774 WRT_REG_DWORD(&lptr->signature, RESPONSE_PROCESSED); 2775 wmb(); 2776 } 2777 2778 /* Adjust ring index */ 2779 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2780 } 2781 2782 /** 2783 * qlafx00_async_event() - Process aynchronous events. 2784 * @ha: SCSI driver HA context 2785 */ 2786 static void 2787 qlafx00_async_event(scsi_qla_host_t *vha) 2788 { 2789 struct qla_hw_data *ha = vha->hw; 2790 struct device_reg_fx00 __iomem *reg; 2791 int data_size = 1; 2792 2793 reg = &ha->iobase->ispfx00; 2794 /* Setup to process RIO completion. */ 2795 switch (ha->aenmb[0]) { 2796 case QLAFX00_MBA_SYSTEM_ERR: /* System Error */ 2797 ql_log(ql_log_warn, vha, 0x5079, 2798 "ISP System Error - mbx1=%x\n", ha->aenmb[0]); 2799 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2800 break; 2801 2802 case QLAFX00_MBA_SHUTDOWN_RQSTD: /* Shutdown requested */ 2803 ql_dbg(ql_dbg_async, vha, 0x5076, 2804 "Asynchronous FW shutdown requested.\n"); 2805 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2806 qla2xxx_wake_dpc(vha); 2807 break; 2808 2809 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ 2810 ha->aenmb[1] = RD_REG_WORD(®->aenmailbox1); 2811 ha->aenmb[2] = RD_REG_WORD(®->aenmailbox2); 2812 ha->aenmb[3] = RD_REG_WORD(®->aenmailbox3); 2813 ql_dbg(ql_dbg_async, vha, 0x5077, 2814 "Asynchronous port Update received " 2815 "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n", 2816 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]); 2817 data_size = 4; 2818 break; 2819 default: 2820 ha->aenmb[1] = RD_REG_WORD(®->aenmailbox1); 2821 ha->aenmb[2] = RD_REG_WORD(®->aenmailbox2); 2822 ha->aenmb[3] = RD_REG_WORD(®->aenmailbox3); 2823 ha->aenmb[4] = RD_REG_WORD(®->aenmailbox4); 2824 ha->aenmb[5] = RD_REG_WORD(®->aenmailbox5); 2825 ha->aenmb[6] = RD_REG_WORD(®->aenmailbox6); 2826 ha->aenmb[7] = RD_REG_WORD(®->aenmailbox7); 2827 ql_dbg(ql_dbg_async, vha, 0x5078, 2828 "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n", 2829 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3], 2830 ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]); 2831 break; 2832 } 2833 qlafx00_post_aenfx_work(vha, ha->aenmb[0], 2834 (uint32_t *)ha->aenmb, data_size); 2835 } 2836 2837 /** 2838 * 2839 * qlafx00x_mbx_completion() - Process mailbox command completions. 2840 * @ha: SCSI driver HA context 2841 * @mb16: Mailbox16 register 2842 */ 2843 static void 2844 qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) 2845 { 2846 uint16_t cnt; 2847 uint16_t __iomem *wptr; 2848 struct qla_hw_data *ha = vha->hw; 2849 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 2850 2851 if (!ha->mcp32) 2852 ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n"); 2853 2854 /* Load return mailbox registers. */ 2855 ha->flags.mbox_int = 1; 2856 ha->mailbox_out32[0] = mb0; 2857 wptr = (uint16_t __iomem *)®->mailbox17; 2858 2859 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2860 ha->mailbox_out32[cnt] = RD_REG_WORD(wptr); 2861 wptr++; 2862 } 2863 } 2864 2865 /** 2866 * qlafx00_intr_handler() - Process interrupts for the ISPFX00. 2867 * @irq: 2868 * @dev_id: SCSI driver HA context 2869 * 2870 * Called by system whenever the host adapter generates an interrupt. 2871 * 2872 * Returns handled flag. 2873 */ 2874 irqreturn_t 2875 qlafx00_intr_handler(int irq, void *dev_id) 2876 { 2877 scsi_qla_host_t *vha; 2878 struct qla_hw_data *ha; 2879 struct device_reg_fx00 __iomem *reg; 2880 int status; 2881 unsigned long iter; 2882 uint32_t stat; 2883 uint32_t mb[8]; 2884 struct rsp_que *rsp; 2885 unsigned long flags; 2886 uint32_t clr_intr = 0; 2887 2888 rsp = (struct rsp_que *) dev_id; 2889 if (!rsp) { 2890 ql_log(ql_log_info, NULL, 0x507d, 2891 "%s: NULL response queue pointer.\n", __func__); 2892 return IRQ_NONE; 2893 } 2894 2895 ha = rsp->hw; 2896 reg = &ha->iobase->ispfx00; 2897 status = 0; 2898 2899 if (unlikely(pci_channel_offline(ha->pdev))) 2900 return IRQ_HANDLED; 2901 2902 spin_lock_irqsave(&ha->hardware_lock, flags); 2903 vha = pci_get_drvdata(ha->pdev); 2904 for (iter = 50; iter--; clr_intr = 0) { 2905 stat = QLAFX00_RD_INTR_REG(ha); 2906 if ((stat & QLAFX00_HST_INT_STS_BITS) == 0) 2907 break; 2908 2909 switch (stat & QLAFX00_HST_INT_STS_BITS) { 2910 case QLAFX00_INTR_MB_CMPLT: 2911 case QLAFX00_INTR_MB_RSP_CMPLT: 2912 case QLAFX00_INTR_MB_ASYNC_CMPLT: 2913 case QLAFX00_INTR_ALL_CMPLT: 2914 mb[0] = RD_REG_WORD(®->mailbox16); 2915 qlafx00_mbx_completion(vha, mb[0]); 2916 status |= MBX_INTERRUPT; 2917 clr_intr |= QLAFX00_INTR_MB_CMPLT; 2918 break; 2919 case QLAFX00_INTR_ASYNC_CMPLT: 2920 case QLAFX00_INTR_RSP_ASYNC_CMPLT: 2921 ha->aenmb[0] = RD_REG_WORD(®->aenmailbox0); 2922 qlafx00_async_event(vha); 2923 clr_intr |= QLAFX00_INTR_ASYNC_CMPLT; 2924 break; 2925 case QLAFX00_INTR_RSP_CMPLT: 2926 qlafx00_process_response_queue(vha, rsp); 2927 clr_intr |= QLAFX00_INTR_RSP_CMPLT; 2928 break; 2929 default: 2930 ql_dbg(ql_dbg_async, vha, 0x507a, 2931 "Unrecognized interrupt type (%d).\n", stat); 2932 break; 2933 } 2934 QLAFX00_CLR_INTR_REG(ha, clr_intr); 2935 QLAFX00_RD_INTR_REG(ha); 2936 } 2937 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2938 2939 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2940 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2941 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2942 complete(&ha->mbx_intr_comp); 2943 } 2944 return IRQ_HANDLED; 2945 } 2946 2947 /** QLAFX00 specific IOCB implementation functions */ 2948 2949 static inline cont_a64_entry_t * 2950 qlafx00_prep_cont_type1_iocb(struct req_que *req, 2951 cont_a64_entry_t *lcont_pkt) 2952 { 2953 cont_a64_entry_t *cont_pkt; 2954 2955 /* Adjust ring index. */ 2956 req->ring_index++; 2957 if (req->ring_index == req->length) { 2958 req->ring_index = 0; 2959 req->ring_ptr = req->ring; 2960 } else { 2961 req->ring_ptr++; 2962 } 2963 2964 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 2965 2966 /* Load packet defaults. */ 2967 *((uint32_t *)(&lcont_pkt->entry_type)) = 2968 __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00); 2969 2970 return cont_pkt; 2971 } 2972 2973 static inline void 2974 qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, 2975 uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt) 2976 { 2977 uint16_t avail_dsds; 2978 uint32_t *cur_dsd; 2979 scsi_qla_host_t *vha; 2980 struct scsi_cmnd *cmd; 2981 struct scatterlist *sg; 2982 int i, cont; 2983 struct req_que *req; 2984 cont_a64_entry_t lcont_pkt; 2985 cont_a64_entry_t *cont_pkt; 2986 2987 vha = sp->fcport->vha; 2988 req = vha->req; 2989 2990 cmd = GET_CMD_SP(sp); 2991 cont = 0; 2992 cont_pkt = NULL; 2993 2994 /* Update entry type to indicate Command Type 3 IOCB */ 2995 *((uint32_t *)(&lcmd_pkt->entry_type)) = 2996 __constant_cpu_to_le32(FX00_COMMAND_TYPE_7); 2997 2998 /* No data transfer */ 2999 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 3000 lcmd_pkt->byte_count = __constant_cpu_to_le32(0); 3001 return; 3002 } 3003 3004 /* Set transfer direction */ 3005 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 3006 lcmd_pkt->cntrl_flags = TMF_WRITE_DATA; 3007 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 3008 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 3009 lcmd_pkt->cntrl_flags = TMF_READ_DATA; 3010 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 3011 } 3012 3013 /* One DSD is available in the Command Type 3 IOCB */ 3014 avail_dsds = 1; 3015 cur_dsd = (uint32_t *)&lcmd_pkt->dseg_0_address; 3016 3017 /* Load data segments */ 3018 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 3019 dma_addr_t sle_dma; 3020 3021 /* Allocate additional continuation packets? */ 3022 if (avail_dsds == 0) { 3023 /* 3024 * Five DSDs are available in the Continuation 3025 * Type 1 IOCB. 3026 */ 3027 memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE); 3028 cont_pkt = 3029 qlafx00_prep_cont_type1_iocb(req, &lcont_pkt); 3030 cur_dsd = (uint32_t *)lcont_pkt.dseg_0_address; 3031 avail_dsds = 5; 3032 cont = 1; 3033 } 3034 3035 sle_dma = sg_dma_address(sg); 3036 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 3037 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 3038 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 3039 avail_dsds--; 3040 if (avail_dsds == 0 && cont == 1) { 3041 cont = 0; 3042 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, 3043 REQUEST_ENTRY_SIZE); 3044 } 3045 3046 } 3047 if (avail_dsds != 0 && cont == 1) { 3048 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, 3049 REQUEST_ENTRY_SIZE); 3050 } 3051 } 3052 3053 /** 3054 * qlafx00_start_scsi() - Send a SCSI command to the ISP 3055 * @sp: command to send to the ISP 3056 * 3057 * Returns non-zero if a failure occurred, else zero. 3058 */ 3059 int 3060 qlafx00_start_scsi(srb_t *sp) 3061 { 3062 int ret, nseg; 3063 unsigned long flags; 3064 uint32_t index; 3065 uint32_t handle; 3066 uint16_t cnt; 3067 uint16_t req_cnt; 3068 uint16_t tot_dsds; 3069 struct req_que *req = NULL; 3070 struct rsp_que *rsp = NULL; 3071 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 3072 struct scsi_qla_host *vha = sp->fcport->vha; 3073 struct qla_hw_data *ha = vha->hw; 3074 struct cmd_type_7_fx00 *cmd_pkt; 3075 struct cmd_type_7_fx00 lcmd_pkt; 3076 struct scsi_lun llun; 3077 char tag[2]; 3078 3079 /* Setup device pointers. */ 3080 ret = 0; 3081 3082 rsp = ha->rsp_q_map[0]; 3083 req = vha->req; 3084 3085 /* So we know we haven't pci_map'ed anything yet */ 3086 tot_dsds = 0; 3087 3088 /* Forcing marker needed for now */ 3089 vha->marker_needed = 0; 3090 3091 /* Send marker if required */ 3092 if (vha->marker_needed != 0) { 3093 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != 3094 QLA_SUCCESS) 3095 return QLA_FUNCTION_FAILED; 3096 vha->marker_needed = 0; 3097 } 3098 3099 /* Acquire ring specific lock */ 3100 spin_lock_irqsave(&ha->hardware_lock, flags); 3101 3102 /* Check for room in outstanding command list. */ 3103 handle = req->current_outstanding_cmd; 3104 for (index = 1; index < req->num_outstanding_cmds; index++) { 3105 handle++; 3106 if (handle == req->num_outstanding_cmds) 3107 handle = 1; 3108 if (!req->outstanding_cmds[handle]) 3109 break; 3110 } 3111 if (index == req->num_outstanding_cmds) 3112 goto queuing_error; 3113 3114 /* Map the sg table so we have an accurate count of sg entries needed */ 3115 if (scsi_sg_count(cmd)) { 3116 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 3117 scsi_sg_count(cmd), cmd->sc_data_direction); 3118 if (unlikely(!nseg)) 3119 goto queuing_error; 3120 } else 3121 nseg = 0; 3122 3123 tot_dsds = nseg; 3124 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 3125 if (req->cnt < (req_cnt + 2)) { 3126 cnt = RD_REG_DWORD_RELAXED(req->req_q_out); 3127 3128 if (req->ring_index < cnt) 3129 req->cnt = cnt - req->ring_index; 3130 else 3131 req->cnt = req->length - 3132 (req->ring_index - cnt); 3133 if (req->cnt < (req_cnt + 2)) 3134 goto queuing_error; 3135 } 3136 3137 /* Build command packet. */ 3138 req->current_outstanding_cmd = handle; 3139 req->outstanding_cmds[handle] = sp; 3140 sp->handle = handle; 3141 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 3142 req->cnt -= req_cnt; 3143 3144 cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr; 3145 3146 memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE); 3147 3148 lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle); 3149 lcmd_pkt.handle_hi = 0; 3150 lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds); 3151 lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id); 3152 3153 int_to_scsilun(cmd->device->lun, &llun); 3154 host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun, 3155 sizeof(lcmd_pkt.lun)); 3156 3157 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */ 3158 if (scsi_populate_tag_msg(cmd, tag)) { 3159 switch (tag[0]) { 3160 case HEAD_OF_QUEUE_TAG: 3161 lcmd_pkt.task = TSK_HEAD_OF_QUEUE; 3162 break; 3163 case ORDERED_QUEUE_TAG: 3164 lcmd_pkt.task = TSK_ORDERED; 3165 break; 3166 } 3167 } 3168 3169 /* Load SCSI command packet. */ 3170 host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb)); 3171 lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 3172 3173 /* Build IOCB segments */ 3174 qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt); 3175 3176 /* Set total data segment count. */ 3177 lcmd_pkt.entry_count = (uint8_t)req_cnt; 3178 3179 /* Specify response queue number where completion should happen */ 3180 lcmd_pkt.entry_status = (uint8_t) rsp->id; 3181 3182 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e, 3183 (uint8_t *)cmd->cmnd, cmd->cmd_len); 3184 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032, 3185 (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE); 3186 3187 memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE); 3188 wmb(); 3189 3190 /* Adjust ring index. */ 3191 req->ring_index++; 3192 if (req->ring_index == req->length) { 3193 req->ring_index = 0; 3194 req->ring_ptr = req->ring; 3195 } else 3196 req->ring_ptr++; 3197 3198 sp->flags |= SRB_DMA_VALID; 3199 3200 /* Set chip new ring index. */ 3201 WRT_REG_DWORD(req->req_q_in, req->ring_index); 3202 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); 3203 3204 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3205 return QLA_SUCCESS; 3206 3207 queuing_error: 3208 if (tot_dsds) 3209 scsi_dma_unmap(cmd); 3210 3211 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3212 3213 return QLA_FUNCTION_FAILED; 3214 } 3215 3216 void 3217 qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb) 3218 { 3219 struct srb_iocb *fxio = &sp->u.iocb_cmd; 3220 scsi_qla_host_t *vha = sp->fcport->vha; 3221 struct req_que *req = vha->req; 3222 struct tsk_mgmt_entry_fx00 tm_iocb; 3223 struct scsi_lun llun; 3224 3225 memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00)); 3226 tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00; 3227 tm_iocb.entry_count = 1; 3228 tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); 3229 tm_iocb.handle_hi = 0; 3230 tm_iocb.timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2); 3231 tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id); 3232 tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags); 3233 if (tm_iocb.control_flags == TCF_LUN_RESET) { 3234 int_to_scsilun(fxio->u.tmf.lun, &llun); 3235 host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun, 3236 sizeof(struct scsi_lun)); 3237 } 3238 3239 memcpy((void __iomem *)ptm_iocb, &tm_iocb, 3240 sizeof(struct tsk_mgmt_entry_fx00)); 3241 wmb(); 3242 } 3243 3244 void 3245 qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb) 3246 { 3247 struct srb_iocb *fxio = &sp->u.iocb_cmd; 3248 scsi_qla_host_t *vha = sp->fcport->vha; 3249 struct req_que *req = vha->req; 3250 struct abort_iocb_entry_fx00 abt_iocb; 3251 3252 memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00)); 3253 abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00; 3254 abt_iocb.entry_count = 1; 3255 abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); 3256 abt_iocb.abort_handle = 3257 cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl)); 3258 abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id); 3259 abt_iocb.req_que_no = cpu_to_le16(req->id); 3260 3261 memcpy((void __iomem *)pabt_iocb, &abt_iocb, 3262 sizeof(struct abort_iocb_entry_fx00)); 3263 wmb(); 3264 } 3265 3266 void 3267 qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) 3268 { 3269 struct srb_iocb *fxio = &sp->u.iocb_cmd; 3270 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 3271 struct fc_bsg_job *bsg_job; 3272 struct fxdisc_entry_fx00 fx_iocb; 3273 uint8_t entry_cnt = 1; 3274 3275 memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00)); 3276 fx_iocb.entry_type = FX00_IOCB_TYPE; 3277 fx_iocb.handle = cpu_to_le32(sp->handle); 3278 fx_iocb.entry_count = entry_cnt; 3279 3280 if (sp->type == SRB_FXIOCB_DCMD) { 3281 fx_iocb.func_num = 3282 cpu_to_le16(sp->u.iocb_cmd.u.fxiocb.req_func_type); 3283 fx_iocb.adapid = cpu_to_le32(fxio->u.fxiocb.adapter_id); 3284 fx_iocb.adapid_hi = cpu_to_le32(fxio->u.fxiocb.adapter_id_hi); 3285 fx_iocb.reserved_0 = cpu_to_le32(fxio->u.fxiocb.reserved_0); 3286 fx_iocb.reserved_1 = cpu_to_le32(fxio->u.fxiocb.reserved_1); 3287 fx_iocb.dataword_extra = 3288 cpu_to_le32(fxio->u.fxiocb.req_data_extra); 3289 3290 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) { 3291 fx_iocb.req_dsdcnt = cpu_to_le16(1); 3292 fx_iocb.req_xfrcnt = 3293 cpu_to_le16(fxio->u.fxiocb.req_len); 3294 fx_iocb.dseg_rq_address[0] = 3295 cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle)); 3296 fx_iocb.dseg_rq_address[1] = 3297 cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle)); 3298 fx_iocb.dseg_rq_len = 3299 cpu_to_le32(fxio->u.fxiocb.req_len); 3300 } 3301 3302 if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) { 3303 fx_iocb.rsp_dsdcnt = cpu_to_le16(1); 3304 fx_iocb.rsp_xfrcnt = 3305 cpu_to_le16(fxio->u.fxiocb.rsp_len); 3306 fx_iocb.dseg_rsp_address[0] = 3307 cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle)); 3308 fx_iocb.dseg_rsp_address[1] = 3309 cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle)); 3310 fx_iocb.dseg_rsp_len = 3311 cpu_to_le32(fxio->u.fxiocb.rsp_len); 3312 } 3313 3314 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) { 3315 fx_iocb.dataword = 3316 cpu_to_le32(fxio->u.fxiocb.req_data); 3317 } 3318 fx_iocb.flags = fxio->u.fxiocb.flags; 3319 } else { 3320 struct scatterlist *sg; 3321 bsg_job = sp->u.bsg_job; 3322 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 3323 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 3324 3325 fx_iocb.func_num = piocb_rqst->func_type; 3326 fx_iocb.adapid = piocb_rqst->adapid; 3327 fx_iocb.adapid_hi = piocb_rqst->adapid_hi; 3328 fx_iocb.reserved_0 = piocb_rqst->reserved_0; 3329 fx_iocb.reserved_1 = piocb_rqst->reserved_1; 3330 fx_iocb.dataword_extra = piocb_rqst->dataword_extra; 3331 fx_iocb.dataword = piocb_rqst->dataword; 3332 fx_iocb.req_xfrcnt = cpu_to_le16(piocb_rqst->req_len); 3333 fx_iocb.rsp_xfrcnt = cpu_to_le16(piocb_rqst->rsp_len); 3334 3335 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { 3336 int avail_dsds, tot_dsds; 3337 cont_a64_entry_t lcont_pkt; 3338 cont_a64_entry_t *cont_pkt = NULL; 3339 uint32_t *cur_dsd; 3340 int index = 0, cont = 0; 3341 3342 fx_iocb.req_dsdcnt = 3343 cpu_to_le16(bsg_job->request_payload.sg_cnt); 3344 tot_dsds = 3345 cpu_to_le32(bsg_job->request_payload.sg_cnt); 3346 cur_dsd = (uint32_t *)&fx_iocb.dseg_rq_address[0]; 3347 avail_dsds = 1; 3348 for_each_sg(bsg_job->request_payload.sg_list, sg, 3349 tot_dsds, index) { 3350 dma_addr_t sle_dma; 3351 3352 /* Allocate additional continuation packets? */ 3353 if (avail_dsds == 0) { 3354 /* 3355 * Five DSDs are available in the Cont. 3356 * Type 1 IOCB. 3357 */ 3358 memset(&lcont_pkt, 0, 3359 REQUEST_ENTRY_SIZE); 3360 cont_pkt = 3361 qlafx00_prep_cont_type1_iocb( 3362 sp->fcport->vha->req, 3363 &lcont_pkt); 3364 cur_dsd = (uint32_t *) 3365 lcont_pkt.dseg_0_address; 3366 avail_dsds = 5; 3367 cont = 1; 3368 entry_cnt++; 3369 } 3370 3371 sle_dma = sg_dma_address(sg); 3372 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 3373 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 3374 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 3375 avail_dsds--; 3376 3377 if (avail_dsds == 0 && cont == 1) { 3378 cont = 0; 3379 memcpy_toio( 3380 (void __iomem *)cont_pkt, 3381 &lcont_pkt, REQUEST_ENTRY_SIZE); 3382 ql_dump_buffer( 3383 ql_dbg_user + ql_dbg_verbose, 3384 sp->fcport->vha, 0x3042, 3385 (uint8_t *)&lcont_pkt, 3386 REQUEST_ENTRY_SIZE); 3387 } 3388 } 3389 if (avail_dsds != 0 && cont == 1) { 3390 memcpy_toio((void __iomem *)cont_pkt, 3391 &lcont_pkt, REQUEST_ENTRY_SIZE); 3392 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 3393 sp->fcport->vha, 0x3043, 3394 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); 3395 } 3396 } 3397 3398 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { 3399 int avail_dsds, tot_dsds; 3400 cont_a64_entry_t lcont_pkt; 3401 cont_a64_entry_t *cont_pkt = NULL; 3402 uint32_t *cur_dsd; 3403 int index = 0, cont = 0; 3404 3405 fx_iocb.rsp_dsdcnt = 3406 cpu_to_le16(bsg_job->reply_payload.sg_cnt); 3407 tot_dsds = cpu_to_le32(bsg_job->reply_payload.sg_cnt); 3408 cur_dsd = (uint32_t *)&fx_iocb.dseg_rsp_address[0]; 3409 avail_dsds = 1; 3410 3411 for_each_sg(bsg_job->reply_payload.sg_list, sg, 3412 tot_dsds, index) { 3413 dma_addr_t sle_dma; 3414 3415 /* Allocate additional continuation packets? */ 3416 if (avail_dsds == 0) { 3417 /* 3418 * Five DSDs are available in the Cont. 3419 * Type 1 IOCB. 3420 */ 3421 memset(&lcont_pkt, 0, 3422 REQUEST_ENTRY_SIZE); 3423 cont_pkt = 3424 qlafx00_prep_cont_type1_iocb( 3425 sp->fcport->vha->req, 3426 &lcont_pkt); 3427 cur_dsd = (uint32_t *) 3428 lcont_pkt.dseg_0_address; 3429 avail_dsds = 5; 3430 cont = 1; 3431 entry_cnt++; 3432 } 3433 3434 sle_dma = sg_dma_address(sg); 3435 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 3436 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 3437 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 3438 avail_dsds--; 3439 3440 if (avail_dsds == 0 && cont == 1) { 3441 cont = 0; 3442 memcpy_toio((void __iomem *)cont_pkt, 3443 &lcont_pkt, 3444 REQUEST_ENTRY_SIZE); 3445 ql_dump_buffer( 3446 ql_dbg_user + ql_dbg_verbose, 3447 sp->fcport->vha, 0x3045, 3448 (uint8_t *)&lcont_pkt, 3449 REQUEST_ENTRY_SIZE); 3450 } 3451 } 3452 if (avail_dsds != 0 && cont == 1) { 3453 memcpy_toio((void __iomem *)cont_pkt, 3454 &lcont_pkt, REQUEST_ENTRY_SIZE); 3455 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 3456 sp->fcport->vha, 0x3046, 3457 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); 3458 } 3459 } 3460 3461 if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID) 3462 fx_iocb.dataword = cpu_to_le32(piocb_rqst->dataword); 3463 fx_iocb.flags = piocb_rqst->flags; 3464 fx_iocb.entry_count = entry_cnt; 3465 } 3466 3467 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 3468 sp->fcport->vha, 0x3047, 3469 (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00)); 3470 3471 memcpy((void __iomem *)pfxiocb, &fx_iocb, 3472 sizeof(struct fxdisc_entry_fx00)); 3473 wmb(); 3474 } 3475