1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2013 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include <linux/delay.h> 9 #include <linux/pci.h> 10 #include <linux/ratelimit.h> 11 #include <linux/vmalloc.h> 12 #include <scsi/scsi_tcq.h> 13 #include <linux/utsname.h> 14 15 16 /* QLAFX00 specific Mailbox implementation functions */ 17 18 /* 19 * qlafx00_mailbox_command 20 * Issue mailbox command and waits for completion. 21 * 22 * Input: 23 * ha = adapter block pointer. 24 * mcp = driver internal mbx struct pointer. 25 * 26 * Output: 27 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 28 * 29 * Returns: 30 * 0 : QLA_SUCCESS = cmd performed success 31 * 1 : QLA_FUNCTION_FAILED (error encountered) 32 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 33 * 34 * Context: 35 * Kernel context. 36 */ 37 static int 38 qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp) 39 40 { 41 int rval; 42 unsigned long flags = 0; 43 device_reg_t __iomem *reg; 44 uint8_t abort_active; 45 uint8_t io_lock_on; 46 uint16_t command = 0; 47 uint32_t *iptr; 48 uint32_t __iomem *optr; 49 uint32_t cnt; 50 uint32_t mboxes; 51 unsigned long wait_time; 52 struct qla_hw_data *ha = vha->hw; 53 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 54 55 if (ha->pdev->error_state > pci_channel_io_frozen) { 56 ql_log(ql_log_warn, vha, 0x115c, 57 "error_state is greater than pci_channel_io_frozen, " 58 "exiting.\n"); 59 return QLA_FUNCTION_TIMEOUT; 60 } 61 62 if (vha->device_flags & DFLG_DEV_FAILED) { 63 ql_log(ql_log_warn, vha, 0x115f, 64 "Device in failed state, exiting.\n"); 65 return QLA_FUNCTION_TIMEOUT; 66 } 67 68 reg = ha->iobase; 69 io_lock_on = base_vha->flags.init_done; 70 71 rval = QLA_SUCCESS; 72 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 73 74 if (ha->flags.pci_channel_io_perm_failure) { 75 ql_log(ql_log_warn, vha, 0x1175, 76 "Perm failure on EEH timeout MBX, exiting.\n"); 77 return QLA_FUNCTION_TIMEOUT; 78 } 79 80 if (ha->flags.isp82xx_fw_hung) { 81 /* Setting Link-Down error */ 82 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 83 ql_log(ql_log_warn, vha, 0x1176, 84 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 85 rval = QLA_FUNCTION_FAILED; 86 goto premature_exit; 87 } 88 89 /* 90 * Wait for active mailbox commands to finish by waiting at most tov 91 * seconds. This is to serialize actual issuing of mailbox cmds during 92 * non ISP abort time. 93 */ 94 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 95 /* Timeout occurred. Return error. */ 96 ql_log(ql_log_warn, vha, 0x1177, 97 "Cmd access timeout, cmd=0x%x, Exiting.\n", 98 mcp->mb[0]); 99 return QLA_FUNCTION_TIMEOUT; 100 } 101 102 ha->flags.mbox_busy = 1; 103 /* Save mailbox command for debug */ 104 ha->mcp32 = mcp; 105 106 ql_dbg(ql_dbg_mbx, vha, 0x1178, 107 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 108 109 spin_lock_irqsave(&ha->hardware_lock, flags); 110 111 /* Load mailbox registers. */ 112 optr = (uint32_t __iomem *)®->ispfx00.mailbox0; 113 114 iptr = mcp->mb; 115 command = mcp->mb[0]; 116 mboxes = mcp->out_mb; 117 118 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 119 if (mboxes & BIT_0) 120 WRT_REG_DWORD(optr, *iptr); 121 122 mboxes >>= 1; 123 optr++; 124 iptr++; 125 } 126 127 /* Issue set host interrupt command to send cmd out. */ 128 ha->flags.mbox_int = 0; 129 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 130 131 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172, 132 (uint8_t *)mcp->mb, 16); 133 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173, 134 ((uint8_t *)mcp->mb + 0x10), 16); 135 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174, 136 ((uint8_t *)mcp->mb + 0x20), 8); 137 138 /* Unlock mbx registers and wait for interrupt */ 139 ql_dbg(ql_dbg_mbx, vha, 0x1179, 140 "Going to unlock irq & waiting for interrupts. " 141 "jiffies=%lx.\n", jiffies); 142 143 /* Wait for mbx cmd completion until timeout */ 144 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 145 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 146 147 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); 148 spin_unlock_irqrestore(&ha->hardware_lock, flags); 149 150 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); 151 } else { 152 ql_dbg(ql_dbg_mbx, vha, 0x112c, 153 "Cmd=%x Polling Mode.\n", command); 154 155 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); 156 spin_unlock_irqrestore(&ha->hardware_lock, flags); 157 158 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 159 while (!ha->flags.mbox_int) { 160 if (time_after(jiffies, wait_time)) 161 break; 162 163 /* Check for pending interrupts. */ 164 qla2x00_poll(ha->rsp_q_map[0]); 165 166 if (!ha->flags.mbox_int && 167 !(IS_QLA2200(ha) && 168 command == MBC_LOAD_RISC_RAM_EXTENDED)) 169 usleep_range(10000, 11000); 170 } /* while */ 171 ql_dbg(ql_dbg_mbx, vha, 0x112d, 172 "Waited %d sec.\n", 173 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 174 } 175 176 /* Check whether we timed out */ 177 if (ha->flags.mbox_int) { 178 uint32_t *iptr2; 179 180 ql_dbg(ql_dbg_mbx, vha, 0x112e, 181 "Cmd=%x completed.\n", command); 182 183 /* Got interrupt. Clear the flag. */ 184 ha->flags.mbox_int = 0; 185 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 186 187 if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE) 188 rval = QLA_FUNCTION_FAILED; 189 190 /* Load return mailbox registers. */ 191 iptr2 = mcp->mb; 192 iptr = (uint32_t *)&ha->mailbox_out32[0]; 193 mboxes = mcp->in_mb; 194 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 195 if (mboxes & BIT_0) 196 *iptr2 = *iptr; 197 198 mboxes >>= 1; 199 iptr2++; 200 iptr++; 201 } 202 } else { 203 204 rval = QLA_FUNCTION_TIMEOUT; 205 } 206 207 ha->flags.mbox_busy = 0; 208 209 /* Clean up */ 210 ha->mcp32 = NULL; 211 212 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 213 ql_dbg(ql_dbg_mbx, vha, 0x113a, 214 "checking for additional resp interrupt.\n"); 215 216 /* polling mode for non isp_abort commands. */ 217 qla2x00_poll(ha->rsp_q_map[0]); 218 } 219 220 if (rval == QLA_FUNCTION_TIMEOUT && 221 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 222 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 223 ha->flags.eeh_busy) { 224 /* not in dpc. schedule it for dpc to take over. */ 225 ql_dbg(ql_dbg_mbx, vha, 0x115d, 226 "Timeout, schedule isp_abort_needed.\n"); 227 228 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 229 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 230 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 231 232 ql_log(ql_log_info, base_vha, 0x115e, 233 "Mailbox cmd timeout occurred, cmd=0x%x, " 234 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 235 "abort.\n", command, mcp->mb[0], 236 ha->flags.eeh_busy); 237 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 238 qla2xxx_wake_dpc(vha); 239 } 240 } else if (!abort_active) { 241 /* call abort directly since we are in the DPC thread */ 242 ql_dbg(ql_dbg_mbx, vha, 0x1160, 243 "Timeout, calling abort_isp.\n"); 244 245 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 246 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 247 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 248 249 ql_log(ql_log_info, base_vha, 0x1161, 250 "Mailbox cmd timeout occurred, cmd=0x%x, " 251 "mb[0]=0x%x. Scheduling ISP abort ", 252 command, mcp->mb[0]); 253 254 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 255 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 256 if (ha->isp_ops->abort_isp(vha)) { 257 /* Failed. retry later. */ 258 set_bit(ISP_ABORT_NEEDED, 259 &vha->dpc_flags); 260 } 261 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 262 ql_dbg(ql_dbg_mbx, vha, 0x1162, 263 "Finished abort_isp.\n"); 264 } 265 } 266 } 267 268 premature_exit: 269 /* Allow next mbx cmd to come in. */ 270 complete(&ha->mbx_cmd_comp); 271 272 if (rval) { 273 ql_log(ql_log_warn, base_vha, 0x1163, 274 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, " 275 "mb[3]=%x, cmd=%x ****.\n", 276 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); 277 } else { 278 ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__); 279 } 280 281 return rval; 282 } 283 284 /* 285 * qlafx00_driver_shutdown 286 * Indicate a driver shutdown to firmware. 287 * 288 * Input: 289 * ha = adapter block pointer. 290 * 291 * Returns: 292 * local function return status code. 293 * 294 * Context: 295 * Kernel context. 296 */ 297 int 298 qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo) 299 { 300 int rval; 301 struct mbx_cmd_32 mc; 302 struct mbx_cmd_32 *mcp = &mc; 303 304 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166, 305 "Entered %s.\n", __func__); 306 307 mcp->mb[0] = MBC_MR_DRV_SHUTDOWN; 308 mcp->out_mb = MBX_0; 309 mcp->in_mb = MBX_0; 310 if (tmo) 311 mcp->tov = tmo; 312 else 313 mcp->tov = MBX_TOV_SECONDS; 314 mcp->flags = 0; 315 rval = qlafx00_mailbox_command(vha, mcp); 316 317 if (rval != QLA_SUCCESS) { 318 ql_dbg(ql_dbg_mbx, vha, 0x1167, 319 "Failed=%x.\n", rval); 320 } else { 321 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168, 322 "Done %s.\n", __func__); 323 } 324 325 return rval; 326 } 327 328 /* 329 * qlafx00_get_firmware_state 330 * Get adapter firmware state. 331 * 332 * Input: 333 * ha = adapter block pointer. 334 * TARGET_QUEUE_LOCK must be released. 335 * ADAPTER_STATE_LOCK must be released. 336 * 337 * Returns: 338 * qla7xxx local function return status code. 339 * 340 * Context: 341 * Kernel context. 342 */ 343 static int 344 qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states) 345 { 346 int rval; 347 struct mbx_cmd_32 mc; 348 struct mbx_cmd_32 *mcp = &mc; 349 350 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169, 351 "Entered %s.\n", __func__); 352 353 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 354 mcp->out_mb = MBX_0; 355 mcp->in_mb = MBX_1|MBX_0; 356 mcp->tov = MBX_TOV_SECONDS; 357 mcp->flags = 0; 358 rval = qlafx00_mailbox_command(vha, mcp); 359 360 /* Return firmware states. */ 361 states[0] = mcp->mb[1]; 362 363 if (rval != QLA_SUCCESS) { 364 ql_dbg(ql_dbg_mbx, vha, 0x116a, 365 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 366 } else { 367 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b, 368 "Done %s.\n", __func__); 369 } 370 return rval; 371 } 372 373 /* 374 * qlafx00_init_firmware 375 * Initialize adapter firmware. 376 * 377 * Input: 378 * ha = adapter block pointer. 379 * dptr = Initialization control block pointer. 380 * size = size of initialization control block. 381 * TARGET_QUEUE_LOCK must be released. 382 * ADAPTER_STATE_LOCK must be released. 383 * 384 * Returns: 385 * qlafx00 local function return status code. 386 * 387 * Context: 388 * Kernel context. 389 */ 390 int 391 qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 392 { 393 int rval; 394 struct mbx_cmd_32 mc; 395 struct mbx_cmd_32 *mcp = &mc; 396 struct qla_hw_data *ha = vha->hw; 397 398 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c, 399 "Entered %s.\n", __func__); 400 401 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 402 403 mcp->mb[1] = 0; 404 mcp->mb[2] = MSD(ha->init_cb_dma); 405 mcp->mb[3] = LSD(ha->init_cb_dma); 406 407 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 408 mcp->in_mb = MBX_0; 409 mcp->buf_size = size; 410 mcp->flags = MBX_DMA_OUT; 411 mcp->tov = MBX_TOV_SECONDS; 412 rval = qlafx00_mailbox_command(vha, mcp); 413 414 if (rval != QLA_SUCCESS) { 415 ql_dbg(ql_dbg_mbx, vha, 0x116d, 416 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 417 } else { 418 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e, 419 "Done %s.\n", __func__); 420 } 421 return rval; 422 } 423 424 /* 425 * qlafx00_mbx_reg_test 426 */ 427 static int 428 qlafx00_mbx_reg_test(scsi_qla_host_t *vha) 429 { 430 int rval; 431 struct mbx_cmd_32 mc; 432 struct mbx_cmd_32 *mcp = &mc; 433 434 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f, 435 "Entered %s.\n", __func__); 436 437 438 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 439 mcp->mb[1] = 0xAAAA; 440 mcp->mb[2] = 0x5555; 441 mcp->mb[3] = 0xAA55; 442 mcp->mb[4] = 0x55AA; 443 mcp->mb[5] = 0xA5A5; 444 mcp->mb[6] = 0x5A5A; 445 mcp->mb[7] = 0x2525; 446 mcp->mb[8] = 0xBBBB; 447 mcp->mb[9] = 0x6666; 448 mcp->mb[10] = 0xBB66; 449 mcp->mb[11] = 0x66BB; 450 mcp->mb[12] = 0xB6B6; 451 mcp->mb[13] = 0x6B6B; 452 mcp->mb[14] = 0x3636; 453 mcp->mb[15] = 0xCCCC; 454 455 456 mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 457 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 458 mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 459 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 460 mcp->buf_size = 0; 461 mcp->flags = MBX_DMA_OUT; 462 mcp->tov = MBX_TOV_SECONDS; 463 rval = qlafx00_mailbox_command(vha, mcp); 464 if (rval == QLA_SUCCESS) { 465 if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 || 466 mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA) 467 rval = QLA_FUNCTION_FAILED; 468 if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A || 469 mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB) 470 rval = QLA_FUNCTION_FAILED; 471 if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 || 472 mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6) 473 rval = QLA_FUNCTION_FAILED; 474 if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 || 475 mcp->mb[31] != 0xCCCC) 476 rval = QLA_FUNCTION_FAILED; 477 } 478 479 if (rval != QLA_SUCCESS) { 480 ql_dbg(ql_dbg_mbx, vha, 0x1170, 481 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 482 } else { 483 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171, 484 "Done %s.\n", __func__); 485 } 486 return rval; 487 } 488 489 /** 490 * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers. 491 * @ha: HA context 492 * 493 * Returns 0 on success. 494 */ 495 int 496 qlafx00_pci_config(scsi_qla_host_t *vha) 497 { 498 uint16_t w; 499 struct qla_hw_data *ha = vha->hw; 500 501 pci_set_master(ha->pdev); 502 pci_try_set_mwi(ha->pdev); 503 504 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 505 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 506 w &= ~PCI_COMMAND_INTX_DISABLE; 507 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 508 509 /* PCIe -- adjust Maximum Read Request Size (2048). */ 510 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 511 pcie_set_readrq(ha->pdev, 2048); 512 513 ha->chip_revision = ha->pdev->revision; 514 515 return QLA_SUCCESS; 516 } 517 518 /** 519 * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC). 520 * @ha: HA context 521 * 522 */ 523 static inline void 524 qlafx00_soc_cpu_reset(scsi_qla_host_t *vha) 525 { 526 unsigned long flags = 0; 527 struct qla_hw_data *ha = vha->hw; 528 int i, core; 529 uint32_t cnt; 530 531 /* Set all 4 cores in reset */ 532 for (i = 0; i < 4; i++) { 533 QLAFX00_SET_HBA_SOC_REG(ha, 534 (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01)); 535 } 536 537 /* Set all 4 core Clock gating control */ 538 for (i = 0; i < 4; i++) { 539 QLAFX00_SET_HBA_SOC_REG(ha, 540 (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101)); 541 } 542 543 /* Reset all units in Fabric */ 544 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101)); 545 546 /* Reset all interrupt control registers */ 547 for (i = 0; i < 115; i++) { 548 QLAFX00_SET_HBA_SOC_REG(ha, 549 (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0)); 550 } 551 552 /* Reset Timers control registers. per core */ 553 for (core = 0; core < 4; core++) 554 for (i = 0; i < 8; i++) 555 QLAFX00_SET_HBA_SOC_REG(ha, 556 (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0)); 557 558 /* Reset per core IRQ ack register */ 559 for (core = 0; core < 4; core++) 560 QLAFX00_SET_HBA_SOC_REG(ha, 561 (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF)); 562 563 /* Set Fabric control and config to defaults */ 564 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2)); 565 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3)); 566 567 spin_lock_irqsave(&ha->hardware_lock, flags); 568 569 /* Kick in Fabric units */ 570 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0)); 571 572 /* Kick in Core0 to start boot process */ 573 QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00)); 574 575 /* Wait 10secs for soft-reset to complete. */ 576 for (cnt = 10; cnt; cnt--) { 577 msleep(1000); 578 barrier(); 579 } 580 spin_unlock_irqrestore(&ha->hardware_lock, flags); 581 } 582 583 /** 584 * qlafx00_soft_reset() - Soft Reset ISPFx00. 585 * @ha: HA context 586 * 587 * Returns 0 on success. 588 */ 589 void 590 qlafx00_soft_reset(scsi_qla_host_t *vha) 591 { 592 struct qla_hw_data *ha = vha->hw; 593 594 if (unlikely(pci_channel_offline(ha->pdev) && 595 ha->flags.pci_channel_io_perm_failure)) 596 return; 597 598 ha->isp_ops->disable_intrs(ha); 599 qlafx00_soc_cpu_reset(vha); 600 ha->isp_ops->enable_intrs(ha); 601 } 602 603 /** 604 * qlafx00_chip_diag() - Test ISPFx00 for proper operation. 605 * @ha: HA context 606 * 607 * Returns 0 on success. 608 */ 609 int 610 qlafx00_chip_diag(scsi_qla_host_t *vha) 611 { 612 int rval = 0; 613 struct qla_hw_data *ha = vha->hw; 614 struct req_que *req = ha->req_q_map[0]; 615 616 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 617 618 rval = qlafx00_mbx_reg_test(vha); 619 if (rval) { 620 ql_log(ql_log_warn, vha, 0x1165, 621 "Failed mailbox send register test\n"); 622 } else { 623 /* Flag a successful rval */ 624 rval = QLA_SUCCESS; 625 } 626 return rval; 627 } 628 629 void 630 qlafx00_config_rings(struct scsi_qla_host *vha) 631 { 632 struct qla_hw_data *ha = vha->hw; 633 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 634 struct init_cb_fx *icb; 635 struct req_que *req = ha->req_q_map[0]; 636 struct rsp_que *rsp = ha->rsp_q_map[0]; 637 638 /* Setup ring parameters in initialization control block. */ 639 icb = (struct init_cb_fx *)ha->init_cb; 640 icb->request_q_outpointer = __constant_cpu_to_le16(0); 641 icb->response_q_inpointer = __constant_cpu_to_le16(0); 642 icb->request_q_length = cpu_to_le16(req->length); 643 icb->response_q_length = cpu_to_le16(rsp->length); 644 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 645 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 646 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 647 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 648 649 WRT_REG_DWORD(®->req_q_in, 0); 650 WRT_REG_DWORD(®->req_q_out, 0); 651 652 WRT_REG_DWORD(®->rsp_q_in, 0); 653 WRT_REG_DWORD(®->rsp_q_out, 0); 654 655 /* PCI posting */ 656 RD_REG_DWORD(®->rsp_q_out); 657 } 658 659 char * 660 qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str) 661 { 662 struct qla_hw_data *ha = vha->hw; 663 int pcie_reg; 664 665 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); 666 if (pcie_reg) { 667 strcpy(str, "PCIe iSA"); 668 return str; 669 } 670 return str; 671 } 672 673 char * 674 qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str) 675 { 676 struct qla_hw_data *ha = vha->hw; 677 678 sprintf(str, "%s", ha->mr.fw_version); 679 return str; 680 } 681 682 void 683 qlafx00_enable_intrs(struct qla_hw_data *ha) 684 { 685 unsigned long flags = 0; 686 687 spin_lock_irqsave(&ha->hardware_lock, flags); 688 ha->interrupts_on = 1; 689 QLAFX00_ENABLE_ICNTRL_REG(ha); 690 spin_unlock_irqrestore(&ha->hardware_lock, flags); 691 } 692 693 void 694 qlafx00_disable_intrs(struct qla_hw_data *ha) 695 { 696 unsigned long flags = 0; 697 698 spin_lock_irqsave(&ha->hardware_lock, flags); 699 ha->interrupts_on = 0; 700 QLAFX00_DISABLE_ICNTRL_REG(ha); 701 spin_unlock_irqrestore(&ha->hardware_lock, flags); 702 } 703 704 static void 705 qlafx00_tmf_iocb_timeout(void *data) 706 { 707 srb_t *sp = (srb_t *)data; 708 struct srb_iocb *tmf = &sp->u.iocb_cmd; 709 710 tmf->u.tmf.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT); 711 complete(&tmf->u.tmf.comp); 712 } 713 714 static void 715 qlafx00_tmf_sp_done(void *data, void *ptr, int res) 716 { 717 srb_t *sp = (srb_t *)ptr; 718 struct srb_iocb *tmf = &sp->u.iocb_cmd; 719 720 complete(&tmf->u.tmf.comp); 721 } 722 723 static int 724 qlafx00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, 725 uint32_t lun, uint32_t tag) 726 { 727 scsi_qla_host_t *vha = fcport->vha; 728 struct srb_iocb *tm_iocb; 729 srb_t *sp; 730 int rval = QLA_FUNCTION_FAILED; 731 732 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 733 if (!sp) 734 goto done; 735 736 tm_iocb = &sp->u.iocb_cmd; 737 sp->type = SRB_TM_CMD; 738 sp->name = "tmf"; 739 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); 740 tm_iocb->u.tmf.flags = flags; 741 tm_iocb->u.tmf.lun = lun; 742 tm_iocb->u.tmf.data = tag; 743 sp->done = qlafx00_tmf_sp_done; 744 tm_iocb->timeout = qlafx00_tmf_iocb_timeout; 745 init_completion(&tm_iocb->u.tmf.comp); 746 747 rval = qla2x00_start_sp(sp); 748 if (rval != QLA_SUCCESS) 749 goto done_free_sp; 750 751 ql_dbg(ql_dbg_async, vha, 0x507b, 752 "Task management command issued target_id=%x\n", 753 fcport->tgt_id); 754 755 wait_for_completion(&tm_iocb->u.tmf.comp); 756 757 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? 758 QLA_SUCCESS : QLA_FUNCTION_FAILED; 759 760 done_free_sp: 761 sp->free(vha, sp); 762 done: 763 return rval; 764 } 765 766 int 767 qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag) 768 { 769 return qlafx00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 770 } 771 772 int 773 qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag) 774 { 775 return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 776 } 777 778 int 779 qlafx00_loop_reset(scsi_qla_host_t *vha) 780 { 781 int ret; 782 struct fc_port *fcport; 783 struct qla_hw_data *ha = vha->hw; 784 785 if (ql2xtargetreset) { 786 list_for_each_entry(fcport, &vha->vp_fcports, list) { 787 if (fcport->port_type != FCT_TARGET) 788 continue; 789 790 ret = ha->isp_ops->target_reset(fcport, 0, 0); 791 if (ret != QLA_SUCCESS) { 792 ql_dbg(ql_dbg_taskm, vha, 0x803d, 793 "Bus Reset failed: Reset=%d " 794 "d_id=%x.\n", ret, fcport->d_id.b24); 795 } 796 } 797 } 798 return QLA_SUCCESS; 799 } 800 801 int 802 qlafx00_iospace_config(struct qla_hw_data *ha) 803 { 804 if (pci_request_selected_regions(ha->pdev, ha->bars, 805 QLA2XXX_DRIVER_NAME)) { 806 ql_log_pci(ql_log_fatal, ha->pdev, 0x014e, 807 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 808 pci_name(ha->pdev)); 809 goto iospace_error_exit; 810 } 811 812 /* Use MMIO operations for all accesses. */ 813 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 814 ql_log_pci(ql_log_warn, ha->pdev, 0x014f, 815 "Invalid pci I/O region size (%s).\n", 816 pci_name(ha->pdev)); 817 goto iospace_error_exit; 818 } 819 if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) { 820 ql_log_pci(ql_log_warn, ha->pdev, 0x0127, 821 "Invalid PCI mem BAR0 region size (%s), aborting\n", 822 pci_name(ha->pdev)); 823 goto iospace_error_exit; 824 } 825 826 ha->cregbase = 827 ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00); 828 if (!ha->cregbase) { 829 ql_log_pci(ql_log_fatal, ha->pdev, 0x0128, 830 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 831 goto iospace_error_exit; 832 } 833 834 if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) { 835 ql_log_pci(ql_log_warn, ha->pdev, 0x0129, 836 "region #2 not an MMIO resource (%s), aborting\n", 837 pci_name(ha->pdev)); 838 goto iospace_error_exit; 839 } 840 if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) { 841 ql_log_pci(ql_log_warn, ha->pdev, 0x012a, 842 "Invalid PCI mem BAR2 region size (%s), aborting\n", 843 pci_name(ha->pdev)); 844 goto iospace_error_exit; 845 } 846 847 ha->iobase = 848 ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00); 849 if (!ha->iobase) { 850 ql_log_pci(ql_log_fatal, ha->pdev, 0x012b, 851 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 852 goto iospace_error_exit; 853 } 854 855 /* Determine queue resources */ 856 ha->max_req_queues = ha->max_rsp_queues = 1; 857 858 ql_log_pci(ql_log_info, ha->pdev, 0x012c, 859 "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n", 860 ha->bars, ha->cregbase, ha->iobase); 861 862 return 0; 863 864 iospace_error_exit: 865 return -ENOMEM; 866 } 867 868 static void 869 qlafx00_save_queue_ptrs(struct scsi_qla_host *vha) 870 { 871 struct qla_hw_data *ha = vha->hw; 872 struct req_que *req = ha->req_q_map[0]; 873 struct rsp_que *rsp = ha->rsp_q_map[0]; 874 875 req->length_fx00 = req->length; 876 req->ring_fx00 = req->ring; 877 req->dma_fx00 = req->dma; 878 879 rsp->length_fx00 = rsp->length; 880 rsp->ring_fx00 = rsp->ring; 881 rsp->dma_fx00 = rsp->dma; 882 883 ql_dbg(ql_dbg_init, vha, 0x012d, 884 "req: %p, ring_fx00: %p, length_fx00: 0x%x," 885 "req->dma_fx00: 0x%llx\n", req, req->ring_fx00, 886 req->length_fx00, (u64)req->dma_fx00); 887 888 ql_dbg(ql_dbg_init, vha, 0x012e, 889 "rsp: %p, ring_fx00: %p, length_fx00: 0x%x," 890 "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00, 891 rsp->length_fx00, (u64)rsp->dma_fx00); 892 } 893 894 static int 895 qlafx00_config_queues(struct scsi_qla_host *vha) 896 { 897 struct qla_hw_data *ha = vha->hw; 898 struct req_que *req = ha->req_q_map[0]; 899 struct rsp_que *rsp = ha->rsp_q_map[0]; 900 dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2); 901 902 req->length = ha->req_que_len; 903 req->ring = (void *)ha->iobase + ha->req_que_off; 904 req->dma = bar2_hdl + ha->req_que_off; 905 if ((!req->ring) || (req->length == 0)) { 906 ql_log_pci(ql_log_info, ha->pdev, 0x012f, 907 "Unable to allocate memory for req_ring\n"); 908 return QLA_FUNCTION_FAILED; 909 } 910 911 ql_dbg(ql_dbg_init, vha, 0x0130, 912 "req: %p req_ring pointer %p req len 0x%x " 913 "req off 0x%x\n, req->dma: 0x%llx", 914 req, req->ring, req->length, 915 ha->req_que_off, (u64)req->dma); 916 917 rsp->length = ha->rsp_que_len; 918 rsp->ring = (void *)ha->iobase + ha->rsp_que_off; 919 rsp->dma = bar2_hdl + ha->rsp_que_off; 920 if ((!rsp->ring) || (rsp->length == 0)) { 921 ql_log_pci(ql_log_info, ha->pdev, 0x0131, 922 "Unable to allocate memory for rsp_ring\n"); 923 return QLA_FUNCTION_FAILED; 924 } 925 926 ql_dbg(ql_dbg_init, vha, 0x0132, 927 "rsp: %p rsp_ring pointer %p rsp len 0x%x " 928 "rsp off 0x%x, rsp->dma: 0x%llx\n", 929 rsp, rsp->ring, rsp->length, 930 ha->rsp_que_off, (u64)rsp->dma); 931 932 return QLA_SUCCESS; 933 } 934 935 static int 936 qlafx00_init_fw_ready(scsi_qla_host_t *vha) 937 { 938 int rval = 0; 939 unsigned long wtime; 940 uint16_t wait_time; /* Wait time */ 941 struct qla_hw_data *ha = vha->hw; 942 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 943 uint32_t aenmbx, aenmbx7 = 0; 944 uint32_t pseudo_aen; 945 uint32_t state[5]; 946 bool done = false; 947 948 /* 30 seconds wait - Adjust if required */ 949 wait_time = 30; 950 951 pseudo_aen = RD_REG_DWORD(®->pseudoaen); 952 if (pseudo_aen == 1) { 953 aenmbx7 = RD_REG_DWORD(®->initval7); 954 ha->mbx_intr_code = MSW(aenmbx7); 955 ha->rqstq_intr_code = LSW(aenmbx7); 956 rval = qlafx00_driver_shutdown(vha, 10); 957 if (rval != QLA_SUCCESS) 958 qlafx00_soft_reset(vha); 959 } 960 961 /* wait time before firmware ready */ 962 wtime = jiffies + (wait_time * HZ); 963 do { 964 aenmbx = RD_REG_DWORD(®->aenmailbox0); 965 barrier(); 966 ql_dbg(ql_dbg_mbx, vha, 0x0133, 967 "aenmbx: 0x%x\n", aenmbx); 968 969 switch (aenmbx) { 970 case MBA_FW_NOT_STARTED: 971 case MBA_FW_STARTING: 972 break; 973 974 case MBA_SYSTEM_ERR: 975 case MBA_REQ_TRANSFER_ERR: 976 case MBA_RSP_TRANSFER_ERR: 977 case MBA_FW_INIT_FAILURE: 978 qlafx00_soft_reset(vha); 979 break; 980 981 case MBA_FW_RESTART_CMPLT: 982 /* Set the mbx and rqstq intr code */ 983 aenmbx7 = RD_REG_DWORD(®->aenmailbox7); 984 ha->mbx_intr_code = MSW(aenmbx7); 985 ha->rqstq_intr_code = LSW(aenmbx7); 986 ha->req_que_off = RD_REG_DWORD(®->aenmailbox1); 987 ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3); 988 ha->req_que_len = RD_REG_DWORD(®->aenmailbox5); 989 ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6); 990 WRT_REG_DWORD(®->aenmailbox0, 0); 991 RD_REG_DWORD_RELAXED(®->aenmailbox0); 992 ql_dbg(ql_dbg_init, vha, 0x0134, 993 "f/w returned mbx_intr_code: 0x%x, " 994 "rqstq_intr_code: 0x%x\n", 995 ha->mbx_intr_code, ha->rqstq_intr_code); 996 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 997 rval = QLA_SUCCESS; 998 done = true; 999 break; 1000 1001 default: 1002 /* If fw is apparently not ready. In order to continue, 1003 * we might need to issue Mbox cmd, but the problem is 1004 * that the DoorBell vector values that come with the 1005 * 8060 AEN are most likely gone by now (and thus no 1006 * bell would be rung on the fw side when mbox cmd is 1007 * issued). We have to therefore grab the 8060 AEN 1008 * shadow regs (filled in by FW when the last 8060 1009 * AEN was being posted). 1010 * Do the following to determine what is needed in 1011 * order to get the FW ready: 1012 * 1. reload the 8060 AEN values from the shadow regs 1013 * 2. clear int status to get rid of possible pending 1014 * interrupts 1015 * 3. issue Get FW State Mbox cmd to determine fw state 1016 * Set the mbx and rqstq intr code from Shadow Regs 1017 */ 1018 aenmbx7 = RD_REG_DWORD(®->initval7); 1019 ha->mbx_intr_code = MSW(aenmbx7); 1020 ha->rqstq_intr_code = LSW(aenmbx7); 1021 ha->req_que_off = RD_REG_DWORD(®->initval1); 1022 ha->rsp_que_off = RD_REG_DWORD(®->initval3); 1023 ha->req_que_len = RD_REG_DWORD(®->initval5); 1024 ha->rsp_que_len = RD_REG_DWORD(®->initval6); 1025 ql_dbg(ql_dbg_init, vha, 0x0135, 1026 "f/w returned mbx_intr_code: 0x%x, " 1027 "rqstq_intr_code: 0x%x\n", 1028 ha->mbx_intr_code, ha->rqstq_intr_code); 1029 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 1030 1031 /* Get the FW state */ 1032 rval = qlafx00_get_firmware_state(vha, state); 1033 if (rval != QLA_SUCCESS) { 1034 /* Retry if timer has not expired */ 1035 break; 1036 } 1037 1038 if (state[0] == FSTATE_FX00_CONFIG_WAIT) { 1039 /* Firmware is waiting to be 1040 * initialized by driver 1041 */ 1042 rval = QLA_SUCCESS; 1043 done = true; 1044 break; 1045 } 1046 1047 /* Issue driver shutdown and wait until f/w recovers. 1048 * Driver should continue to poll until 8060 AEN is 1049 * received indicating firmware recovery. 1050 */ 1051 ql_dbg(ql_dbg_init, vha, 0x0136, 1052 "Sending Driver shutdown fw_state 0x%x\n", 1053 state[0]); 1054 1055 rval = qlafx00_driver_shutdown(vha, 10); 1056 if (rval != QLA_SUCCESS) { 1057 rval = QLA_FUNCTION_FAILED; 1058 break; 1059 } 1060 msleep(500); 1061 1062 wtime = jiffies + (wait_time * HZ); 1063 break; 1064 } 1065 1066 if (!done) { 1067 if (time_after_eq(jiffies, wtime)) { 1068 ql_dbg(ql_dbg_init, vha, 0x0137, 1069 "Init f/w failed: aen[7]: 0x%x\n", 1070 RD_REG_DWORD(®->aenmailbox7)); 1071 rval = QLA_FUNCTION_FAILED; 1072 done = true; 1073 break; 1074 } 1075 /* Delay for a while */ 1076 msleep(500); 1077 } 1078 } while (!done); 1079 1080 if (rval) 1081 ql_dbg(ql_dbg_init, vha, 0x0138, 1082 "%s **** FAILED ****.\n", __func__); 1083 else 1084 ql_dbg(ql_dbg_init, vha, 0x0139, 1085 "%s **** SUCCESS ****.\n", __func__); 1086 1087 return rval; 1088 } 1089 1090 /* 1091 * qlafx00_fw_ready() - Waits for firmware ready. 1092 * @ha: HA context 1093 * 1094 * Returns 0 on success. 1095 */ 1096 int 1097 qlafx00_fw_ready(scsi_qla_host_t *vha) 1098 { 1099 int rval; 1100 unsigned long wtime; 1101 uint16_t wait_time; /* Wait time if loop is coming ready */ 1102 uint32_t state[5]; 1103 1104 rval = QLA_SUCCESS; 1105 1106 wait_time = 10; 1107 1108 /* wait time before firmware ready */ 1109 wtime = jiffies + (wait_time * HZ); 1110 1111 /* Wait for ISP to finish init */ 1112 if (!vha->flags.init_done) 1113 ql_dbg(ql_dbg_init, vha, 0x013a, 1114 "Waiting for init to complete...\n"); 1115 1116 do { 1117 rval = qlafx00_get_firmware_state(vha, state); 1118 1119 if (rval == QLA_SUCCESS) { 1120 if (state[0] == FSTATE_FX00_INITIALIZED) { 1121 ql_dbg(ql_dbg_init, vha, 0x013b, 1122 "fw_state=%x\n", state[0]); 1123 rval = QLA_SUCCESS; 1124 break; 1125 } 1126 } 1127 rval = QLA_FUNCTION_FAILED; 1128 1129 if (time_after_eq(jiffies, wtime)) 1130 break; 1131 1132 /* Delay for a while */ 1133 msleep(500); 1134 1135 ql_dbg(ql_dbg_init, vha, 0x013c, 1136 "fw_state=%x curr time=%lx.\n", state[0], jiffies); 1137 } while (1); 1138 1139 1140 if (rval) 1141 ql_dbg(ql_dbg_init, vha, 0x013d, 1142 "Firmware ready **** FAILED ****.\n"); 1143 else 1144 ql_dbg(ql_dbg_init, vha, 0x013e, 1145 "Firmware ready **** SUCCESS ****.\n"); 1146 1147 return rval; 1148 } 1149 1150 static int 1151 qlafx00_find_all_targets(scsi_qla_host_t *vha, 1152 struct list_head *new_fcports) 1153 { 1154 int rval; 1155 uint16_t tgt_id; 1156 fc_port_t *fcport, *new_fcport; 1157 int found; 1158 struct qla_hw_data *ha = vha->hw; 1159 1160 rval = QLA_SUCCESS; 1161 1162 if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags)) 1163 return QLA_FUNCTION_FAILED; 1164 1165 if ((atomic_read(&vha->loop_down_timer) || 1166 STATE_TRANSITION(vha))) { 1167 atomic_set(&vha->loop_down_timer, 0); 1168 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1169 return QLA_FUNCTION_FAILED; 1170 } 1171 1172 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088, 1173 "Listing Target bit map...\n"); 1174 ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha, 1175 0x2089, (uint8_t *)ha->gid_list, 32); 1176 1177 /* Allocate temporary rmtport for any new rmtports discovered. */ 1178 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 1179 if (new_fcport == NULL) 1180 return QLA_MEMORY_ALLOC_FAILED; 1181 1182 for_each_set_bit(tgt_id, (void *)ha->gid_list, 1183 QLAFX00_TGT_NODE_LIST_SIZE) { 1184 1185 /* Send get target node info */ 1186 new_fcport->tgt_id = tgt_id; 1187 rval = qlafx00_fx_disc(vha, new_fcport, 1188 FXDISC_GET_TGT_NODE_INFO); 1189 if (rval != QLA_SUCCESS) { 1190 ql_log(ql_log_warn, vha, 0x208a, 1191 "Target info scan failed -- assuming zero-entry " 1192 "result...\n"); 1193 continue; 1194 } 1195 1196 /* Locate matching device in database. */ 1197 found = 0; 1198 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1199 if (memcmp(new_fcport->port_name, 1200 fcport->port_name, WWN_SIZE)) 1201 continue; 1202 1203 found++; 1204 1205 /* 1206 * If tgt_id is same and state FCS_ONLINE, nothing 1207 * changed. 1208 */ 1209 if (fcport->tgt_id == new_fcport->tgt_id && 1210 atomic_read(&fcport->state) == FCS_ONLINE) 1211 break; 1212 1213 /* 1214 * Tgt ID changed or device was marked to be updated. 1215 */ 1216 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b, 1217 "TGT-ID Change(%s): Present tgt id: " 1218 "0x%x state: 0x%x " 1219 "wwnn = %llx wwpn = %llx.\n", 1220 __func__, fcport->tgt_id, 1221 atomic_read(&fcport->state), 1222 (unsigned long long)wwn_to_u64(fcport->node_name), 1223 (unsigned long long)wwn_to_u64(fcport->port_name)); 1224 1225 ql_log(ql_log_info, vha, 0x208c, 1226 "TGT-ID Announce(%s): Discovered tgt " 1227 "id 0x%x wwnn = %llx " 1228 "wwpn = %llx.\n", __func__, new_fcport->tgt_id, 1229 (unsigned long long) 1230 wwn_to_u64(new_fcport->node_name), 1231 (unsigned long long) 1232 wwn_to_u64(new_fcport->port_name)); 1233 1234 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1235 fcport->old_tgt_id = fcport->tgt_id; 1236 fcport->tgt_id = new_fcport->tgt_id; 1237 ql_log(ql_log_info, vha, 0x208d, 1238 "TGT-ID: New fcport Added: %p\n", fcport); 1239 qla2x00_update_fcport(vha, fcport); 1240 } else { 1241 ql_log(ql_log_info, vha, 0x208e, 1242 " Existing TGT-ID %x did not get " 1243 " offline event from firmware.\n", 1244 fcport->old_tgt_id); 1245 qla2x00_mark_device_lost(vha, fcport, 0, 0); 1246 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1247 kfree(new_fcport); 1248 return rval; 1249 } 1250 break; 1251 } 1252 1253 if (found) 1254 continue; 1255 1256 /* If device was not in our fcports list, then add it. */ 1257 list_add_tail(&new_fcport->list, new_fcports); 1258 1259 /* Allocate a new replacement fcport. */ 1260 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 1261 if (new_fcport == NULL) 1262 return QLA_MEMORY_ALLOC_FAILED; 1263 } 1264 1265 kfree(new_fcport); 1266 return rval; 1267 } 1268 1269 /* 1270 * qlafx00_configure_all_targets 1271 * Setup target devices with node ID's. 1272 * 1273 * Input: 1274 * ha = adapter block pointer. 1275 * 1276 * Returns: 1277 * 0 = success. 1278 * BIT_0 = error 1279 */ 1280 static int 1281 qlafx00_configure_all_targets(scsi_qla_host_t *vha) 1282 { 1283 int rval; 1284 fc_port_t *fcport, *rmptemp; 1285 LIST_HEAD(new_fcports); 1286 1287 rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport, 1288 FXDISC_GET_TGT_NODE_LIST); 1289 if (rval != QLA_SUCCESS) { 1290 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1291 return rval; 1292 } 1293 1294 rval = qlafx00_find_all_targets(vha, &new_fcports); 1295 if (rval != QLA_SUCCESS) { 1296 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1297 return rval; 1298 } 1299 1300 /* 1301 * Delete all previous devices marked lost. 1302 */ 1303 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1304 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 1305 break; 1306 1307 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 1308 if (fcport->port_type != FCT_INITIATOR) 1309 qla2x00_mark_device_lost(vha, fcport, 0, 0); 1310 } 1311 } 1312 1313 /* 1314 * Add the new devices to our devices list. 1315 */ 1316 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) { 1317 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 1318 break; 1319 1320 qla2x00_update_fcport(vha, fcport); 1321 list_move_tail(&fcport->list, &vha->vp_fcports); 1322 ql_log(ql_log_info, vha, 0x208f, 1323 "Attach new target id 0x%x wwnn = %llx " 1324 "wwpn = %llx.\n", 1325 fcport->tgt_id, 1326 (unsigned long long)wwn_to_u64(fcport->node_name), 1327 (unsigned long long)wwn_to_u64(fcport->port_name)); 1328 } 1329 1330 /* Free all new device structures not processed. */ 1331 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) { 1332 list_del(&fcport->list); 1333 kfree(fcport); 1334 } 1335 1336 return rval; 1337 } 1338 1339 /* 1340 * qlafx00_configure_devices 1341 * Updates Fibre Channel Device Database with what is actually on loop. 1342 * 1343 * Input: 1344 * ha = adapter block pointer. 1345 * 1346 * Returns: 1347 * 0 = success. 1348 * 1 = error. 1349 * 2 = database was full and device was not configured. 1350 */ 1351 int 1352 qlafx00_configure_devices(scsi_qla_host_t *vha) 1353 { 1354 int rval; 1355 unsigned long flags, save_flags; 1356 rval = QLA_SUCCESS; 1357 1358 save_flags = flags = vha->dpc_flags; 1359 1360 ql_dbg(ql_dbg_disc, vha, 0x2090, 1361 "Configure devices -- dpc flags =0x%lx\n", flags); 1362 1363 rval = qlafx00_configure_all_targets(vha); 1364 1365 if (rval == QLA_SUCCESS) { 1366 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 1367 rval = QLA_FUNCTION_FAILED; 1368 } else { 1369 atomic_set(&vha->loop_state, LOOP_READY); 1370 ql_log(ql_log_info, vha, 0x2091, 1371 "Device Ready\n"); 1372 } 1373 } 1374 1375 if (rval) { 1376 ql_dbg(ql_dbg_disc, vha, 0x2092, 1377 "%s *** FAILED ***.\n", __func__); 1378 } else { 1379 ql_dbg(ql_dbg_disc, vha, 0x2093, 1380 "%s: exiting normally.\n", __func__); 1381 } 1382 return rval; 1383 } 1384 1385 static void 1386 qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp) 1387 { 1388 struct qla_hw_data *ha = vha->hw; 1389 fc_port_t *fcport; 1390 1391 vha->flags.online = 0; 1392 ha->mr.fw_hbt_en = 0; 1393 1394 if (!critemp) { 1395 ha->flags.chip_reset_done = 0; 1396 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1397 vha->qla_stats.total_isp_aborts++; 1398 ql_log(ql_log_info, vha, 0x013f, 1399 "Performing ISP error recovery - ha = %p.\n", ha); 1400 ha->isp_ops->reset_chip(vha); 1401 } 1402 1403 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1404 atomic_set(&vha->loop_state, LOOP_DOWN); 1405 atomic_set(&vha->loop_down_timer, 1406 QLAFX00_LOOP_DOWN_TIME); 1407 } else { 1408 if (!atomic_read(&vha->loop_down_timer)) 1409 atomic_set(&vha->loop_down_timer, 1410 QLAFX00_LOOP_DOWN_TIME); 1411 } 1412 1413 /* Clear all async request states across all VPs. */ 1414 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1415 fcport->flags = 0; 1416 if (atomic_read(&fcport->state) == FCS_ONLINE) 1417 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 1418 } 1419 1420 if (!ha->flags.eeh_busy) { 1421 if (critemp) { 1422 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 1423 } else { 1424 /* Requeue all commands in outstanding command list. */ 1425 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 1426 } 1427 } 1428 1429 qla2x00_free_irqs(vha); 1430 if (critemp) 1431 set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags); 1432 else 1433 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); 1434 1435 /* Clear the Interrupts */ 1436 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 1437 1438 ql_log(ql_log_info, vha, 0x0140, 1439 "%s Done done - ha=%p.\n", __func__, ha); 1440 } 1441 1442 /** 1443 * qlafx00_init_response_q_entries() - Initializes response queue entries. 1444 * @ha: HA context 1445 * 1446 * Beginning of request ring has initialization control block already built 1447 * by nvram config routine. 1448 * 1449 * Returns 0 on success. 1450 */ 1451 void 1452 qlafx00_init_response_q_entries(struct rsp_que *rsp) 1453 { 1454 uint16_t cnt; 1455 response_t *pkt; 1456 1457 rsp->ring_ptr = rsp->ring; 1458 rsp->ring_index = 0; 1459 rsp->status_srb = NULL; 1460 pkt = rsp->ring_ptr; 1461 for (cnt = 0; cnt < rsp->length; cnt++) { 1462 pkt->signature = RESPONSE_PROCESSED; 1463 WRT_REG_DWORD((void __iomem *)&pkt->signature, 1464 RESPONSE_PROCESSED); 1465 pkt++; 1466 } 1467 } 1468 1469 int 1470 qlafx00_rescan_isp(scsi_qla_host_t *vha) 1471 { 1472 uint32_t status = QLA_FUNCTION_FAILED; 1473 struct qla_hw_data *ha = vha->hw; 1474 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 1475 uint32_t aenmbx7; 1476 1477 qla2x00_request_irqs(ha, ha->rsp_q_map[0]); 1478 1479 aenmbx7 = RD_REG_DWORD(®->aenmailbox7); 1480 ha->mbx_intr_code = MSW(aenmbx7); 1481 ha->rqstq_intr_code = LSW(aenmbx7); 1482 ha->req_que_off = RD_REG_DWORD(®->aenmailbox1); 1483 ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3); 1484 ha->req_que_len = RD_REG_DWORD(®->aenmailbox5); 1485 ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6); 1486 1487 ql_dbg(ql_dbg_disc, vha, 0x2094, 1488 "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x " 1489 " Req que offset 0x%x Rsp que offset 0x%x\n", 1490 ha->mbx_intr_code, ha->rqstq_intr_code, 1491 ha->req_que_off, ha->rsp_que_len); 1492 1493 /* Clear the Interrupts */ 1494 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 1495 1496 status = qla2x00_init_rings(vha); 1497 if (!status) { 1498 vha->flags.online = 1; 1499 1500 /* if no cable then assume it's good */ 1501 if ((vha->device_flags & DFLG_NO_CABLE)) 1502 status = 0; 1503 /* Register system information */ 1504 if (qlafx00_fx_disc(vha, 1505 &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO)) 1506 ql_dbg(ql_dbg_disc, vha, 0x2095, 1507 "failed to register host info\n"); 1508 } 1509 scsi_unblock_requests(vha->host); 1510 return status; 1511 } 1512 1513 void 1514 qlafx00_timer_routine(scsi_qla_host_t *vha) 1515 { 1516 struct qla_hw_data *ha = vha->hw; 1517 uint32_t fw_heart_beat; 1518 uint32_t aenmbx0; 1519 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 1520 uint32_t tempc; 1521 1522 /* Check firmware health */ 1523 if (ha->mr.fw_hbt_cnt) 1524 ha->mr.fw_hbt_cnt--; 1525 else { 1526 if ((!ha->flags.mr_reset_hdlr_active) && 1527 (!test_bit(UNLOADING, &vha->dpc_flags)) && 1528 (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && 1529 (ha->mr.fw_hbt_en)) { 1530 fw_heart_beat = RD_REG_DWORD(®->fwheartbeat); 1531 if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) { 1532 ha->mr.old_fw_hbt_cnt = fw_heart_beat; 1533 ha->mr.fw_hbt_miss_cnt = 0; 1534 } else { 1535 ha->mr.fw_hbt_miss_cnt++; 1536 if (ha->mr.fw_hbt_miss_cnt == 1537 QLAFX00_HEARTBEAT_MISS_CNT) { 1538 set_bit(ISP_ABORT_NEEDED, 1539 &vha->dpc_flags); 1540 qla2xxx_wake_dpc(vha); 1541 ha->mr.fw_hbt_miss_cnt = 0; 1542 } 1543 } 1544 } 1545 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; 1546 } 1547 1548 if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) { 1549 /* Reset recovery to be performed in timer routine */ 1550 aenmbx0 = RD_REG_DWORD(®->aenmailbox0); 1551 if (ha->mr.fw_reset_timer_exp) { 1552 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1553 qla2xxx_wake_dpc(vha); 1554 ha->mr.fw_reset_timer_exp = 0; 1555 } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) { 1556 /* Wake up DPC to rescan the targets */ 1557 set_bit(FX00_TARGET_SCAN, &vha->dpc_flags); 1558 clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); 1559 qla2xxx_wake_dpc(vha); 1560 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 1561 } else if ((aenmbx0 == MBA_FW_STARTING) && 1562 (!ha->mr.fw_hbt_en)) { 1563 ha->mr.fw_hbt_en = 1; 1564 } else if (!ha->mr.fw_reset_timer_tick) { 1565 if (aenmbx0 == ha->mr.old_aenmbx0_state) 1566 ha->mr.fw_reset_timer_exp = 1; 1567 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 1568 } else if (aenmbx0 == 0xFFFFFFFF) { 1569 uint32_t data0, data1; 1570 1571 data0 = QLAFX00_RD_REG(ha, 1572 QLAFX00_BAR1_BASE_ADDR_REG); 1573 data1 = QLAFX00_RD_REG(ha, 1574 QLAFX00_PEX0_WIN0_BASE_ADDR_REG); 1575 1576 data0 &= 0xffff0000; 1577 data1 &= 0x0000ffff; 1578 1579 QLAFX00_WR_REG(ha, 1580 QLAFX00_PEX0_WIN0_BASE_ADDR_REG, 1581 (data0 | data1)); 1582 } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) { 1583 ha->mr.fw_reset_timer_tick = 1584 QLAFX00_MAX_RESET_INTERVAL; 1585 } else if (aenmbx0 == MBA_FW_RESET_FCT) { 1586 ha->mr.fw_reset_timer_tick = 1587 QLAFX00_MAX_RESET_INTERVAL; 1588 } 1589 ha->mr.old_aenmbx0_state = aenmbx0; 1590 ha->mr.fw_reset_timer_tick--; 1591 } 1592 if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) { 1593 /* 1594 * Critical temperature recovery to be 1595 * performed in timer routine 1596 */ 1597 if (ha->mr.fw_critemp_timer_tick == 0) { 1598 tempc = QLAFX00_GET_TEMPERATURE(ha); 1599 ql_dbg(ql_dbg_timer, vha, 0x6012, 1600 "ISPFx00(%s): Critical temp timer, " 1601 "current SOC temperature: %d\n", 1602 __func__, tempc); 1603 if (tempc < ha->mr.critical_temperature) { 1604 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1605 clear_bit(FX00_CRITEMP_RECOVERY, 1606 &vha->dpc_flags); 1607 qla2xxx_wake_dpc(vha); 1608 } 1609 ha->mr.fw_critemp_timer_tick = 1610 QLAFX00_CRITEMP_INTERVAL; 1611 } else { 1612 ha->mr.fw_critemp_timer_tick--; 1613 } 1614 } 1615 } 1616 1617 /* 1618 * qlfx00a_reset_initialize 1619 * Re-initialize after a iSA device reset. 1620 * 1621 * Input: 1622 * ha = adapter block pointer. 1623 * 1624 * Returns: 1625 * 0 = success 1626 */ 1627 int 1628 qlafx00_reset_initialize(scsi_qla_host_t *vha) 1629 { 1630 struct qla_hw_data *ha = vha->hw; 1631 1632 if (vha->device_flags & DFLG_DEV_FAILED) { 1633 ql_dbg(ql_dbg_init, vha, 0x0142, 1634 "Device in failed state\n"); 1635 return QLA_SUCCESS; 1636 } 1637 1638 ha->flags.mr_reset_hdlr_active = 1; 1639 1640 if (vha->flags.online) { 1641 scsi_block_requests(vha->host); 1642 qlafx00_abort_isp_cleanup(vha, false); 1643 } 1644 1645 ql_log(ql_log_info, vha, 0x0143, 1646 "(%s): succeeded.\n", __func__); 1647 ha->flags.mr_reset_hdlr_active = 0; 1648 return QLA_SUCCESS; 1649 } 1650 1651 /* 1652 * qlafx00_abort_isp 1653 * Resets ISP and aborts all outstanding commands. 1654 * 1655 * Input: 1656 * ha = adapter block pointer. 1657 * 1658 * Returns: 1659 * 0 = success 1660 */ 1661 int 1662 qlafx00_abort_isp(scsi_qla_host_t *vha) 1663 { 1664 struct qla_hw_data *ha = vha->hw; 1665 1666 if (vha->flags.online) { 1667 if (unlikely(pci_channel_offline(ha->pdev) && 1668 ha->flags.pci_channel_io_perm_failure)) { 1669 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 1670 return QLA_SUCCESS; 1671 } 1672 1673 scsi_block_requests(vha->host); 1674 qlafx00_abort_isp_cleanup(vha, false); 1675 } else { 1676 scsi_block_requests(vha->host); 1677 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1678 vha->qla_stats.total_isp_aborts++; 1679 ha->isp_ops->reset_chip(vha); 1680 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); 1681 /* Clear the Interrupts */ 1682 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 1683 } 1684 1685 ql_log(ql_log_info, vha, 0x0145, 1686 "(%s): succeeded.\n", __func__); 1687 1688 return QLA_SUCCESS; 1689 } 1690 1691 static inline fc_port_t* 1692 qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id) 1693 { 1694 fc_port_t *fcport; 1695 1696 /* Check for matching device in remote port list. */ 1697 fcport = NULL; 1698 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1699 if (fcport->tgt_id == tgt_id) { 1700 ql_dbg(ql_dbg_async, vha, 0x5072, 1701 "Matching fcport(%p) found with TGT-ID: 0x%x " 1702 "and Remote TGT_ID: 0x%x\n", 1703 fcport, fcport->tgt_id, tgt_id); 1704 break; 1705 } 1706 } 1707 return fcport; 1708 } 1709 1710 static void 1711 qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id) 1712 { 1713 fc_port_t *fcport; 1714 1715 ql_log(ql_log_info, vha, 0x5073, 1716 "Detach TGT-ID: 0x%x\n", tgt_id); 1717 1718 fcport = qlafx00_get_fcport(vha, tgt_id); 1719 if (!fcport) 1720 return; 1721 1722 qla2x00_mark_device_lost(vha, fcport, 0, 0); 1723 1724 return; 1725 } 1726 1727 int 1728 qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt) 1729 { 1730 int rval = 0; 1731 uint32_t aen_code, aen_data; 1732 1733 aen_code = FCH_EVT_VENDOR_UNIQUE; 1734 aen_data = evt->u.aenfx.evtcode; 1735 1736 switch (evt->u.aenfx.evtcode) { 1737 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ 1738 if (evt->u.aenfx.mbx[1] == 0) { 1739 if (evt->u.aenfx.mbx[2] == 1) { 1740 if (!vha->flags.fw_tgt_reported) 1741 vha->flags.fw_tgt_reported = 1; 1742 atomic_set(&vha->loop_down_timer, 0); 1743 atomic_set(&vha->loop_state, LOOP_UP); 1744 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1745 qla2xxx_wake_dpc(vha); 1746 } else if (evt->u.aenfx.mbx[2] == 2) { 1747 qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]); 1748 } 1749 } else if (evt->u.aenfx.mbx[1] == 0xffff) { 1750 if (evt->u.aenfx.mbx[2] == 1) { 1751 if (!vha->flags.fw_tgt_reported) 1752 vha->flags.fw_tgt_reported = 1; 1753 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1754 } else if (evt->u.aenfx.mbx[2] == 2) { 1755 vha->device_flags |= DFLG_NO_CABLE; 1756 qla2x00_mark_all_devices_lost(vha, 1); 1757 } 1758 } 1759 break; 1760 case QLAFX00_MBA_LINK_UP: 1761 aen_code = FCH_EVT_LINKUP; 1762 aen_data = 0; 1763 break; 1764 case QLAFX00_MBA_LINK_DOWN: 1765 aen_code = FCH_EVT_LINKDOWN; 1766 aen_data = 0; 1767 break; 1768 case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */ 1769 ql_log(ql_log_info, vha, 0x5082, 1770 "Process critical temperature event " 1771 "aenmb[0]: %x\n", 1772 evt->u.aenfx.evtcode); 1773 scsi_block_requests(vha->host); 1774 qlafx00_abort_isp_cleanup(vha, true); 1775 scsi_unblock_requests(vha->host); 1776 break; 1777 } 1778 1779 fc_host_post_event(vha->host, fc_get_event_number(), 1780 aen_code, aen_data); 1781 1782 return rval; 1783 } 1784 1785 static void 1786 qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo) 1787 { 1788 u64 port_name = 0, node_name = 0; 1789 1790 port_name = (unsigned long long)wwn_to_u64(pinfo->port_name); 1791 node_name = (unsigned long long)wwn_to_u64(pinfo->node_name); 1792 1793 fc_host_node_name(vha->host) = node_name; 1794 fc_host_port_name(vha->host) = port_name; 1795 if (!pinfo->port_type) 1796 vha->hw->current_topology = ISP_CFG_F; 1797 if (pinfo->link_status == QLAFX00_LINK_STATUS_UP) 1798 atomic_set(&vha->loop_state, LOOP_READY); 1799 else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN) 1800 atomic_set(&vha->loop_state, LOOP_DOWN); 1801 vha->hw->link_data_rate = (uint16_t)pinfo->link_config; 1802 } 1803 1804 static void 1805 qla2x00_fxdisc_iocb_timeout(void *data) 1806 { 1807 srb_t *sp = (srb_t *)data; 1808 struct srb_iocb *lio = &sp->u.iocb_cmd; 1809 1810 complete(&lio->u.fxiocb.fxiocb_comp); 1811 } 1812 1813 static void 1814 qla2x00_fxdisc_sp_done(void *data, void *ptr, int res) 1815 { 1816 srb_t *sp = (srb_t *)ptr; 1817 struct srb_iocb *lio = &sp->u.iocb_cmd; 1818 1819 complete(&lio->u.fxiocb.fxiocb_comp); 1820 } 1821 1822 int 1823 qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) 1824 { 1825 srb_t *sp; 1826 struct srb_iocb *fdisc; 1827 int rval = QLA_FUNCTION_FAILED; 1828 struct qla_hw_data *ha = vha->hw; 1829 struct host_system_info *phost_info; 1830 struct register_host_info *preg_hsi; 1831 struct new_utsname *p_sysid = NULL; 1832 struct timeval tv; 1833 1834 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1835 if (!sp) 1836 goto done; 1837 1838 fdisc = &sp->u.iocb_cmd; 1839 switch (fx_type) { 1840 case FXDISC_GET_CONFIG_INFO: 1841 fdisc->u.fxiocb.flags = 1842 SRB_FXDISC_RESP_DMA_VALID; 1843 fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data); 1844 break; 1845 case FXDISC_GET_PORT_INFO: 1846 fdisc->u.fxiocb.flags = 1847 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; 1848 fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO; 1849 fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->port_id); 1850 break; 1851 case FXDISC_GET_TGT_NODE_INFO: 1852 fdisc->u.fxiocb.flags = 1853 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; 1854 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO; 1855 fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->tgt_id); 1856 break; 1857 case FXDISC_GET_TGT_NODE_LIST: 1858 fdisc->u.fxiocb.flags = 1859 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; 1860 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE; 1861 break; 1862 case FXDISC_REG_HOST_INFO: 1863 fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID; 1864 fdisc->u.fxiocb.req_len = sizeof(struct register_host_info); 1865 p_sysid = utsname(); 1866 if (!p_sysid) { 1867 ql_log(ql_log_warn, vha, 0x303c, 1868 "Not able to get the system information\n"); 1869 goto done_free_sp; 1870 } 1871 break; 1872 default: 1873 break; 1874 } 1875 1876 if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) { 1877 fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev, 1878 fdisc->u.fxiocb.req_len, 1879 &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL); 1880 if (!fdisc->u.fxiocb.req_addr) 1881 goto done_free_sp; 1882 1883 if (fx_type == FXDISC_REG_HOST_INFO) { 1884 preg_hsi = (struct register_host_info *) 1885 fdisc->u.fxiocb.req_addr; 1886 phost_info = &preg_hsi->hsi; 1887 memset(preg_hsi, 0, sizeof(struct register_host_info)); 1888 phost_info->os_type = OS_TYPE_LINUX; 1889 strncpy(phost_info->sysname, 1890 p_sysid->sysname, SYSNAME_LENGTH); 1891 strncpy(phost_info->nodename, 1892 p_sysid->nodename, NODENAME_LENGTH); 1893 strncpy(phost_info->release, 1894 p_sysid->release, RELEASE_LENGTH); 1895 strncpy(phost_info->version, 1896 p_sysid->version, VERSION_LENGTH); 1897 strncpy(phost_info->machine, 1898 p_sysid->machine, MACHINE_LENGTH); 1899 strncpy(phost_info->domainname, 1900 p_sysid->domainname, DOMNAME_LENGTH); 1901 strncpy(phost_info->hostdriver, 1902 QLA2XXX_VERSION, VERSION_LENGTH); 1903 do_gettimeofday(&tv); 1904 preg_hsi->utc = (uint64_t)tv.tv_sec; 1905 ql_dbg(ql_dbg_init, vha, 0x0149, 1906 "ISP%04X: Host registration with firmware\n", 1907 ha->pdev->device); 1908 ql_dbg(ql_dbg_init, vha, 0x014a, 1909 "os_type = '%d', sysname = '%s', nodname = '%s'\n", 1910 phost_info->os_type, 1911 phost_info->sysname, 1912 phost_info->nodename); 1913 ql_dbg(ql_dbg_init, vha, 0x014b, 1914 "release = '%s', version = '%s'\n", 1915 phost_info->release, 1916 phost_info->version); 1917 ql_dbg(ql_dbg_init, vha, 0x014c, 1918 "machine = '%s' " 1919 "domainname = '%s', hostdriver = '%s'\n", 1920 phost_info->machine, 1921 phost_info->domainname, 1922 phost_info->hostdriver); 1923 ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d, 1924 (uint8_t *)phost_info, 1925 sizeof(struct host_system_info)); 1926 } 1927 } 1928 1929 if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) { 1930 fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev, 1931 fdisc->u.fxiocb.rsp_len, 1932 &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL); 1933 if (!fdisc->u.fxiocb.rsp_addr) 1934 goto done_unmap_req; 1935 } 1936 1937 sp->type = SRB_FXIOCB_DCMD; 1938 sp->name = "fxdisc"; 1939 qla2x00_init_timer(sp, FXDISC_TIMEOUT); 1940 fdisc->timeout = qla2x00_fxdisc_iocb_timeout; 1941 fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type); 1942 sp->done = qla2x00_fxdisc_sp_done; 1943 1944 rval = qla2x00_start_sp(sp); 1945 if (rval != QLA_SUCCESS) 1946 goto done_unmap_dma; 1947 1948 wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp); 1949 1950 if (fx_type == FXDISC_GET_CONFIG_INFO) { 1951 struct config_info_data *pinfo = 1952 (struct config_info_data *) fdisc->u.fxiocb.rsp_addr; 1953 memcpy(&vha->hw->mr.product_name, pinfo->product_name, 1954 sizeof(vha->hw->mr.product_name)); 1955 memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name, 1956 sizeof(vha->hw->mr.symbolic_name)); 1957 memcpy(&vha->hw->mr.serial_num, pinfo->serial_num, 1958 sizeof(vha->hw->mr.serial_num)); 1959 memcpy(&vha->hw->mr.hw_version, pinfo->hw_version, 1960 sizeof(vha->hw->mr.hw_version)); 1961 memcpy(&vha->hw->mr.fw_version, pinfo->fw_version, 1962 sizeof(vha->hw->mr.fw_version)); 1963 strim(vha->hw->mr.fw_version); 1964 memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version, 1965 sizeof(vha->hw->mr.uboot_version)); 1966 memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num, 1967 sizeof(vha->hw->mr.fru_serial_num)); 1968 vha->hw->mr.critical_temperature = 1969 (pinfo->nominal_temp_value) ? 1970 pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD; 1971 ha->mr.extended_io_enabled = (pinfo->enabled_capabilities & 1972 QLAFX00_EXTENDED_IO_EN_MASK) != 0; 1973 } else if (fx_type == FXDISC_GET_PORT_INFO) { 1974 struct port_info_data *pinfo = 1975 (struct port_info_data *) fdisc->u.fxiocb.rsp_addr; 1976 memcpy(vha->node_name, pinfo->node_name, WWN_SIZE); 1977 memcpy(vha->port_name, pinfo->port_name, WWN_SIZE); 1978 vha->d_id.b.domain = pinfo->port_id[0]; 1979 vha->d_id.b.area = pinfo->port_id[1]; 1980 vha->d_id.b.al_pa = pinfo->port_id[2]; 1981 qlafx00_update_host_attr(vha, pinfo); 1982 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141, 1983 (uint8_t *)pinfo, 16); 1984 } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) { 1985 struct qlafx00_tgt_node_info *pinfo = 1986 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr; 1987 memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE); 1988 memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE); 1989 fcport->port_type = FCT_TARGET; 1990 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144, 1991 (uint8_t *)pinfo, 16); 1992 } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) { 1993 struct qlafx00_tgt_node_info *pinfo = 1994 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr; 1995 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146, 1996 (uint8_t *)pinfo, 16); 1997 memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE); 1998 } 1999 rval = le32_to_cpu(fdisc->u.fxiocb.result); 2000 2001 done_unmap_dma: 2002 if (fdisc->u.fxiocb.rsp_addr) 2003 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len, 2004 fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle); 2005 2006 done_unmap_req: 2007 if (fdisc->u.fxiocb.req_addr) 2008 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len, 2009 fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle); 2010 done_free_sp: 2011 sp->free(vha, sp); 2012 done: 2013 return rval; 2014 } 2015 2016 static void 2017 qlafx00_abort_iocb_timeout(void *data) 2018 { 2019 srb_t *sp = (srb_t *)data; 2020 struct srb_iocb *abt = &sp->u.iocb_cmd; 2021 2022 abt->u.abt.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT); 2023 complete(&abt->u.abt.comp); 2024 } 2025 2026 static void 2027 qlafx00_abort_sp_done(void *data, void *ptr, int res) 2028 { 2029 srb_t *sp = (srb_t *)ptr; 2030 struct srb_iocb *abt = &sp->u.iocb_cmd; 2031 2032 complete(&abt->u.abt.comp); 2033 } 2034 2035 static int 2036 qlafx00_async_abt_cmd(srb_t *cmd_sp) 2037 { 2038 scsi_qla_host_t *vha = cmd_sp->fcport->vha; 2039 fc_port_t *fcport = cmd_sp->fcport; 2040 struct srb_iocb *abt_iocb; 2041 srb_t *sp; 2042 int rval = QLA_FUNCTION_FAILED; 2043 2044 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2045 if (!sp) 2046 goto done; 2047 2048 abt_iocb = &sp->u.iocb_cmd; 2049 sp->type = SRB_ABT_CMD; 2050 sp->name = "abort"; 2051 qla2x00_init_timer(sp, FXDISC_TIMEOUT); 2052 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; 2053 sp->done = qlafx00_abort_sp_done; 2054 abt_iocb->timeout = qlafx00_abort_iocb_timeout; 2055 init_completion(&abt_iocb->u.abt.comp); 2056 2057 rval = qla2x00_start_sp(sp); 2058 if (rval != QLA_SUCCESS) 2059 goto done_free_sp; 2060 2061 ql_dbg(ql_dbg_async, vha, 0x507c, 2062 "Abort command issued - hdl=%x, target_id=%x\n", 2063 cmd_sp->handle, fcport->tgt_id); 2064 2065 wait_for_completion(&abt_iocb->u.abt.comp); 2066 2067 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? 2068 QLA_SUCCESS : QLA_FUNCTION_FAILED; 2069 2070 done_free_sp: 2071 sp->free(vha, sp); 2072 done: 2073 return rval; 2074 } 2075 2076 int 2077 qlafx00_abort_command(srb_t *sp) 2078 { 2079 unsigned long flags = 0; 2080 2081 uint32_t handle; 2082 fc_port_t *fcport = sp->fcport; 2083 struct scsi_qla_host *vha = fcport->vha; 2084 struct qla_hw_data *ha = vha->hw; 2085 struct req_que *req = vha->req; 2086 2087 spin_lock_irqsave(&ha->hardware_lock, flags); 2088 for (handle = 1; handle < DEFAULT_OUTSTANDING_COMMANDS; handle++) { 2089 if (req->outstanding_cmds[handle] == sp) 2090 break; 2091 } 2092 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2093 if (handle == DEFAULT_OUTSTANDING_COMMANDS) { 2094 /* Command not found. */ 2095 return QLA_FUNCTION_FAILED; 2096 } 2097 return qlafx00_async_abt_cmd(sp); 2098 } 2099 2100 /* 2101 * qlafx00_initialize_adapter 2102 * Initialize board. 2103 * 2104 * Input: 2105 * ha = adapter block pointer. 2106 * 2107 * Returns: 2108 * 0 = success 2109 */ 2110 int 2111 qlafx00_initialize_adapter(scsi_qla_host_t *vha) 2112 { 2113 int rval; 2114 struct qla_hw_data *ha = vha->hw; 2115 uint32_t tempc; 2116 2117 /* Clear adapter flags. */ 2118 vha->flags.online = 0; 2119 ha->flags.chip_reset_done = 0; 2120 vha->flags.reset_active = 0; 2121 ha->flags.pci_channel_io_perm_failure = 0; 2122 ha->flags.eeh_busy = 0; 2123 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 2124 atomic_set(&vha->loop_state, LOOP_DOWN); 2125 vha->device_flags = DFLG_NO_CABLE; 2126 vha->dpc_flags = 0; 2127 vha->flags.management_server_logged_in = 0; 2128 vha->marker_needed = 0; 2129 ha->isp_abort_cnt = 0; 2130 ha->beacon_blink_led = 0; 2131 2132 set_bit(0, ha->req_qid_map); 2133 set_bit(0, ha->rsp_qid_map); 2134 2135 ql_dbg(ql_dbg_init, vha, 0x0147, 2136 "Configuring PCI space...\n"); 2137 2138 rval = ha->isp_ops->pci_config(vha); 2139 if (rval) { 2140 ql_log(ql_log_warn, vha, 0x0148, 2141 "Unable to configure PCI space.\n"); 2142 return rval; 2143 } 2144 2145 rval = qlafx00_init_fw_ready(vha); 2146 if (rval != QLA_SUCCESS) 2147 return rval; 2148 2149 qlafx00_save_queue_ptrs(vha); 2150 2151 rval = qlafx00_config_queues(vha); 2152 if (rval != QLA_SUCCESS) 2153 return rval; 2154 2155 /* 2156 * Allocate the array of outstanding commands 2157 * now that we know the firmware resources. 2158 */ 2159 rval = qla2x00_alloc_outstanding_cmds(ha, vha->req); 2160 if (rval != QLA_SUCCESS) 2161 return rval; 2162 2163 rval = qla2x00_init_rings(vha); 2164 ha->flags.chip_reset_done = 1; 2165 2166 tempc = QLAFX00_GET_TEMPERATURE(ha); 2167 ql_dbg(ql_dbg_init, vha, 0x0152, 2168 "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n", 2169 __func__, tempc); 2170 2171 return rval; 2172 } 2173 2174 uint32_t 2175 qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr, 2176 char *buf) 2177 { 2178 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2179 int rval = QLA_FUNCTION_FAILED; 2180 uint32_t state[1]; 2181 2182 if (qla2x00_reset_active(vha)) 2183 ql_log(ql_log_warn, vha, 0x70ce, 2184 "ISP reset active.\n"); 2185 else if (!vha->hw->flags.eeh_busy) { 2186 rval = qlafx00_get_firmware_state(vha, state); 2187 } 2188 if (rval != QLA_SUCCESS) 2189 memset(state, -1, sizeof(state)); 2190 2191 return state[0]; 2192 } 2193 2194 void 2195 qlafx00_get_host_speed(struct Scsi_Host *shost) 2196 { 2197 struct qla_hw_data *ha = ((struct scsi_qla_host *) 2198 (shost_priv(shost)))->hw; 2199 u32 speed = FC_PORTSPEED_UNKNOWN; 2200 2201 switch (ha->link_data_rate) { 2202 case QLAFX00_PORT_SPEED_2G: 2203 speed = FC_PORTSPEED_2GBIT; 2204 break; 2205 case QLAFX00_PORT_SPEED_4G: 2206 speed = FC_PORTSPEED_4GBIT; 2207 break; 2208 case QLAFX00_PORT_SPEED_8G: 2209 speed = FC_PORTSPEED_8GBIT; 2210 break; 2211 case QLAFX00_PORT_SPEED_10G: 2212 speed = FC_PORTSPEED_10GBIT; 2213 break; 2214 } 2215 fc_host_speed(shost) = speed; 2216 } 2217 2218 /** QLAFX00 specific ISR implementation functions */ 2219 2220 static inline void 2221 qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 2222 uint32_t sense_len, struct rsp_que *rsp, int res) 2223 { 2224 struct scsi_qla_host *vha = sp->fcport->vha; 2225 struct scsi_cmnd *cp = GET_CMD_SP(sp); 2226 uint32_t track_sense_len; 2227 2228 SET_FW_SENSE_LEN(sp, sense_len); 2229 2230 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 2231 sense_len = SCSI_SENSE_BUFFERSIZE; 2232 2233 SET_CMD_SENSE_LEN(sp, sense_len); 2234 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 2235 track_sense_len = sense_len; 2236 2237 if (sense_len > par_sense_len) 2238 sense_len = par_sense_len; 2239 2240 memcpy(cp->sense_buffer, sense_data, sense_len); 2241 2242 SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len); 2243 2244 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 2245 track_sense_len -= sense_len; 2246 SET_CMD_SENSE_LEN(sp, track_sense_len); 2247 2248 ql_dbg(ql_dbg_io, vha, 0x304d, 2249 "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n", 2250 sense_len, par_sense_len, track_sense_len); 2251 if (GET_FW_SENSE_LEN(sp) > 0) { 2252 rsp->status_srb = sp; 2253 cp->result = res; 2254 } 2255 2256 if (sense_len) { 2257 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039, 2258 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n", 2259 sp->fcport->vha->host_no, cp->device->id, cp->device->lun, 2260 cp); 2261 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049, 2262 cp->sense_buffer, sense_len); 2263 } 2264 } 2265 2266 static void 2267 qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2268 struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp, 2269 __le16 sstatus, __le16 cpstatus) 2270 { 2271 struct srb_iocb *tmf; 2272 2273 tmf = &sp->u.iocb_cmd; 2274 if (cpstatus != cpu_to_le16((uint16_t)CS_COMPLETE) || 2275 (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID))) 2276 cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE); 2277 tmf->u.tmf.comp_status = cpstatus; 2278 sp->done(vha, sp, 0); 2279 } 2280 2281 static void 2282 qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2283 struct abort_iocb_entry_fx00 *pkt) 2284 { 2285 const char func[] = "ABT_IOCB"; 2286 srb_t *sp; 2287 struct srb_iocb *abt; 2288 2289 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2290 if (!sp) 2291 return; 2292 2293 abt = &sp->u.iocb_cmd; 2294 abt->u.abt.comp_status = pkt->tgt_id_sts; 2295 sp->done(vha, sp, 0); 2296 } 2297 2298 static void 2299 qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, 2300 struct ioctl_iocb_entry_fx00 *pkt) 2301 { 2302 const char func[] = "IOSB_IOCB"; 2303 srb_t *sp; 2304 struct fc_bsg_job *bsg_job; 2305 struct srb_iocb *iocb_job; 2306 int res; 2307 struct qla_mt_iocb_rsp_fx00 fstatus; 2308 uint8_t *fw_sts_ptr; 2309 2310 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2311 if (!sp) 2312 return; 2313 2314 if (sp->type == SRB_FXIOCB_DCMD) { 2315 iocb_job = &sp->u.iocb_cmd; 2316 iocb_job->u.fxiocb.seq_number = pkt->seq_no; 2317 iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags; 2318 iocb_job->u.fxiocb.result = pkt->status; 2319 if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID) 2320 iocb_job->u.fxiocb.req_data = 2321 pkt->dataword_r; 2322 } else { 2323 bsg_job = sp->u.bsg_job; 2324 2325 memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00)); 2326 2327 fstatus.reserved_1 = pkt->reserved_0; 2328 fstatus.func_type = pkt->comp_func_num; 2329 fstatus.ioctl_flags = pkt->fw_iotcl_flags; 2330 fstatus.ioctl_data = pkt->dataword_r; 2331 fstatus.adapid = pkt->adapid; 2332 fstatus.adapid_hi = pkt->adapid_hi; 2333 fstatus.reserved_2 = pkt->reserved_1; 2334 fstatus.res_count = pkt->residuallen; 2335 fstatus.status = pkt->status; 2336 fstatus.seq_number = pkt->seq_no; 2337 memcpy(fstatus.reserved_3, 2338 pkt->reserved_2, 20 * sizeof(uint8_t)); 2339 2340 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + 2341 sizeof(struct fc_bsg_reply); 2342 2343 memcpy(fw_sts_ptr, (uint8_t *)&fstatus, 2344 sizeof(struct qla_mt_iocb_rsp_fx00)); 2345 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 2346 sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t); 2347 2348 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 2349 sp->fcport->vha, 0x5080, 2350 (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00)); 2351 2352 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 2353 sp->fcport->vha, 0x5074, 2354 (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00)); 2355 2356 res = bsg_job->reply->result = DID_OK << 16; 2357 bsg_job->reply->reply_payload_rcv_len = 2358 bsg_job->reply_payload.payload_len; 2359 } 2360 sp->done(vha, sp, res); 2361 } 2362 2363 /** 2364 * qlafx00_status_entry() - Process a Status IOCB entry. 2365 * @ha: SCSI driver HA context 2366 * @pkt: Entry pointer 2367 */ 2368 static void 2369 qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 2370 { 2371 srb_t *sp; 2372 fc_port_t *fcport; 2373 struct scsi_cmnd *cp; 2374 struct sts_entry_fx00 *sts; 2375 __le16 comp_status; 2376 __le16 scsi_status; 2377 uint16_t ox_id; 2378 __le16 lscsi_status; 2379 int32_t resid; 2380 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 2381 fw_resid_len; 2382 uint8_t *rsp_info = NULL, *sense_data = NULL; 2383 struct qla_hw_data *ha = vha->hw; 2384 uint32_t hindex, handle; 2385 uint16_t que; 2386 struct req_que *req; 2387 int logit = 1; 2388 int res = 0; 2389 2390 sts = (struct sts_entry_fx00 *) pkt; 2391 2392 comp_status = sts->comp_status; 2393 scsi_status = sts->scsi_status & cpu_to_le16((uint16_t)SS_MASK); 2394 hindex = sts->handle; 2395 handle = LSW(hindex); 2396 2397 que = MSW(hindex); 2398 req = ha->req_q_map[que]; 2399 2400 /* Validate handle. */ 2401 if (handle < req->num_outstanding_cmds) 2402 sp = req->outstanding_cmds[handle]; 2403 else 2404 sp = NULL; 2405 2406 if (sp == NULL) { 2407 ql_dbg(ql_dbg_io, vha, 0x3034, 2408 "Invalid status handle (0x%x).\n", handle); 2409 2410 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2411 qla2xxx_wake_dpc(vha); 2412 return; 2413 } 2414 2415 if (sp->type == SRB_TM_CMD) { 2416 req->outstanding_cmds[handle] = NULL; 2417 qlafx00_tm_iocb_entry(vha, req, pkt, sp, 2418 scsi_status, comp_status); 2419 return; 2420 } 2421 2422 /* Fast path completion. */ 2423 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2424 qla2x00_do_host_ramp_up(vha); 2425 qla2x00_process_completed_request(vha, req, handle); 2426 return; 2427 } 2428 2429 req->outstanding_cmds[handle] = NULL; 2430 cp = GET_CMD_SP(sp); 2431 if (cp == NULL) { 2432 ql_dbg(ql_dbg_io, vha, 0x3048, 2433 "Command already returned (0x%x/%p).\n", 2434 handle, sp); 2435 2436 return; 2437 } 2438 2439 lscsi_status = scsi_status & cpu_to_le16((uint16_t)STATUS_MASK); 2440 2441 fcport = sp->fcport; 2442 2443 ox_id = 0; 2444 sense_len = par_sense_len = rsp_info_len = resid_len = 2445 fw_resid_len = 0; 2446 if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)) 2447 sense_len = sts->sense_len; 2448 if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER 2449 | (uint16_t)SS_RESIDUAL_OVER))) 2450 resid_len = le32_to_cpu(sts->residual_len); 2451 if (comp_status == cpu_to_le16((uint16_t)CS_DATA_UNDERRUN)) 2452 fw_resid_len = le32_to_cpu(sts->residual_len); 2453 rsp_info = sense_data = sts->data; 2454 par_sense_len = sizeof(sts->data); 2455 2456 /* Check for overrun. */ 2457 if (comp_status == CS_COMPLETE && 2458 scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_OVER)) 2459 comp_status = cpu_to_le16((uint16_t)CS_DATA_OVERRUN); 2460 2461 /* 2462 * Based on Host and scsi status generate status code for Linux 2463 */ 2464 switch (le16_to_cpu(comp_status)) { 2465 case CS_COMPLETE: 2466 case CS_QUEUE_FULL: 2467 if (scsi_status == 0) { 2468 res = DID_OK << 16; 2469 break; 2470 } 2471 if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER 2472 | (uint16_t)SS_RESIDUAL_OVER))) { 2473 resid = resid_len; 2474 scsi_set_resid(cp, resid); 2475 2476 if (!lscsi_status && 2477 ((unsigned)(scsi_bufflen(cp) - resid) < 2478 cp->underflow)) { 2479 ql_dbg(ql_dbg_io, fcport->vha, 0x3050, 2480 "Mid-layer underflow " 2481 "detected (0x%x of 0x%x bytes).\n", 2482 resid, scsi_bufflen(cp)); 2483 2484 res = DID_ERROR << 16; 2485 break; 2486 } 2487 } 2488 res = DID_OK << 16 | le16_to_cpu(lscsi_status); 2489 2490 if (lscsi_status == 2491 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) { 2492 ql_dbg(ql_dbg_io, fcport->vha, 0x3051, 2493 "QUEUE FULL detected.\n"); 2494 break; 2495 } 2496 logit = 0; 2497 if (lscsi_status != cpu_to_le16((uint16_t)SS_CHECK_CONDITION)) 2498 break; 2499 2500 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2501 if (!(scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))) 2502 break; 2503 2504 qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len, 2505 rsp, res); 2506 break; 2507 2508 case CS_DATA_UNDERRUN: 2509 /* Use F/W calculated residual length. */ 2510 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 2511 resid = fw_resid_len; 2512 else 2513 resid = resid_len; 2514 scsi_set_resid(cp, resid); 2515 if (scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_UNDER)) { 2516 if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 2517 && fw_resid_len != resid_len) { 2518 ql_dbg(ql_dbg_io, fcport->vha, 0x3052, 2519 "Dropped frame(s) detected " 2520 "(0x%x of 0x%x bytes).\n", 2521 resid, scsi_bufflen(cp)); 2522 2523 res = DID_ERROR << 16 | 2524 le16_to_cpu(lscsi_status); 2525 goto check_scsi_status; 2526 } 2527 2528 if (!lscsi_status && 2529 ((unsigned)(scsi_bufflen(cp) - resid) < 2530 cp->underflow)) { 2531 ql_dbg(ql_dbg_io, fcport->vha, 0x3053, 2532 "Mid-layer underflow " 2533 "detected (0x%x of 0x%x bytes, " 2534 "cp->underflow: 0x%x).\n", 2535 resid, scsi_bufflen(cp), cp->underflow); 2536 2537 res = DID_ERROR << 16; 2538 break; 2539 } 2540 } else if (lscsi_status != 2541 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL) && 2542 lscsi_status != cpu_to_le16((uint16_t)SAM_STAT_BUSY)) { 2543 /* 2544 * scsi status of task set and busy are considered 2545 * to be task not completed. 2546 */ 2547 2548 ql_dbg(ql_dbg_io, fcport->vha, 0x3054, 2549 "Dropped frame(s) detected (0x%x " 2550 "of 0x%x bytes).\n", resid, 2551 scsi_bufflen(cp)); 2552 2553 res = DID_ERROR << 16 | le16_to_cpu(lscsi_status); 2554 goto check_scsi_status; 2555 } else { 2556 ql_dbg(ql_dbg_io, fcport->vha, 0x3055, 2557 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 2558 scsi_status, lscsi_status); 2559 } 2560 2561 res = DID_OK << 16 | le16_to_cpu(lscsi_status); 2562 logit = 0; 2563 2564 check_scsi_status: 2565 /* 2566 * Check to see if SCSI Status is non zero. If so report SCSI 2567 * Status. 2568 */ 2569 if (lscsi_status != 0) { 2570 if (lscsi_status == 2571 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) { 2572 ql_dbg(ql_dbg_io, fcport->vha, 0x3056, 2573 "QUEUE FULL detected.\n"); 2574 logit = 1; 2575 break; 2576 } 2577 if (lscsi_status != 2578 cpu_to_le16((uint16_t)SS_CHECK_CONDITION)) 2579 break; 2580 2581 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2582 if (!(scsi_status & 2583 cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))) 2584 break; 2585 2586 qlafx00_handle_sense(sp, sense_data, par_sense_len, 2587 sense_len, rsp, res); 2588 } 2589 break; 2590 2591 case CS_PORT_LOGGED_OUT: 2592 case CS_PORT_CONFIG_CHG: 2593 case CS_PORT_BUSY: 2594 case CS_INCOMPLETE: 2595 case CS_PORT_UNAVAILABLE: 2596 case CS_TIMEOUT: 2597 case CS_RESET: 2598 2599 /* 2600 * We are going to have the fc class block the rport 2601 * while we try to recover so instruct the mid layer 2602 * to requeue until the class decides how to handle this. 2603 */ 2604 res = DID_TRANSPORT_DISRUPTED << 16; 2605 2606 ql_dbg(ql_dbg_io, fcport->vha, 0x3057, 2607 "Port down status: port-state=0x%x.\n", 2608 atomic_read(&fcport->state)); 2609 2610 if (atomic_read(&fcport->state) == FCS_ONLINE) 2611 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 2612 break; 2613 2614 case CS_ABORTED: 2615 res = DID_RESET << 16; 2616 break; 2617 2618 default: 2619 res = DID_ERROR << 16; 2620 break; 2621 } 2622 2623 if (logit) 2624 ql_dbg(ql_dbg_io, fcport->vha, 0x3058, 2625 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d " 2626 "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x " 2627 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, " 2628 "par_sense_len=0x%x, rsp_info_len=0x%x\n", 2629 comp_status, scsi_status, res, vha->host_no, 2630 cp->device->id, cp->device->lun, fcport->tgt_id, 2631 lscsi_status, cp->cmnd, scsi_bufflen(cp), 2632 rsp_info_len, resid_len, fw_resid_len, sense_len, 2633 par_sense_len, rsp_info_len); 2634 2635 if (!res) 2636 qla2x00_do_host_ramp_up(vha); 2637 2638 if (rsp->status_srb == NULL) 2639 sp->done(ha, sp, res); 2640 } 2641 2642 /** 2643 * qlafx00_status_cont_entry() - Process a Status Continuations entry. 2644 * @ha: SCSI driver HA context 2645 * @pkt: Entry pointer 2646 * 2647 * Extended sense data. 2648 */ 2649 static void 2650 qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 2651 { 2652 uint8_t sense_sz = 0; 2653 struct qla_hw_data *ha = rsp->hw; 2654 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 2655 srb_t *sp = rsp->status_srb; 2656 struct scsi_cmnd *cp; 2657 uint32_t sense_len; 2658 uint8_t *sense_ptr; 2659 2660 if (!sp) { 2661 ql_dbg(ql_dbg_io, vha, 0x3037, 2662 "no SP, sp = %p\n", sp); 2663 return; 2664 } 2665 2666 if (!GET_FW_SENSE_LEN(sp)) { 2667 ql_dbg(ql_dbg_io, vha, 0x304b, 2668 "no fw sense data, sp = %p\n", sp); 2669 return; 2670 } 2671 cp = GET_CMD_SP(sp); 2672 if (cp == NULL) { 2673 ql_log(ql_log_warn, vha, 0x303b, 2674 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 2675 2676 rsp->status_srb = NULL; 2677 return; 2678 } 2679 2680 if (!GET_CMD_SENSE_LEN(sp)) { 2681 ql_dbg(ql_dbg_io, vha, 0x304c, 2682 "no sense data, sp = %p\n", sp); 2683 } else { 2684 sense_len = GET_CMD_SENSE_LEN(sp); 2685 sense_ptr = GET_CMD_SENSE_PTR(sp); 2686 ql_dbg(ql_dbg_io, vha, 0x304f, 2687 "sp=%p sense_len=0x%x sense_ptr=%p.\n", 2688 sp, sense_len, sense_ptr); 2689 2690 if (sense_len > sizeof(pkt->data)) 2691 sense_sz = sizeof(pkt->data); 2692 else 2693 sense_sz = sense_len; 2694 2695 /* Move sense data. */ 2696 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e, 2697 (uint8_t *)pkt, sizeof(sts_cont_entry_t)); 2698 memcpy(sense_ptr, pkt->data, sense_sz); 2699 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a, 2700 sense_ptr, sense_sz); 2701 2702 sense_len -= sense_sz; 2703 sense_ptr += sense_sz; 2704 2705 SET_CMD_SENSE_PTR(sp, sense_ptr); 2706 SET_CMD_SENSE_LEN(sp, sense_len); 2707 } 2708 sense_len = GET_FW_SENSE_LEN(sp); 2709 sense_len = (sense_len > sizeof(pkt->data)) ? 2710 (sense_len - sizeof(pkt->data)) : 0; 2711 SET_FW_SENSE_LEN(sp, sense_len); 2712 2713 /* Place command on done queue. */ 2714 if (sense_len == 0) { 2715 rsp->status_srb = NULL; 2716 sp->done(ha, sp, cp->result); 2717 } 2718 } 2719 2720 /** 2721 * qlafx00_multistatus_entry() - Process Multi response queue entries. 2722 * @ha: SCSI driver HA context 2723 */ 2724 static void 2725 qlafx00_multistatus_entry(struct scsi_qla_host *vha, 2726 struct rsp_que *rsp, void *pkt) 2727 { 2728 srb_t *sp; 2729 struct multi_sts_entry_fx00 *stsmfx; 2730 struct qla_hw_data *ha = vha->hw; 2731 uint32_t handle, hindex, handle_count, i; 2732 uint16_t que; 2733 struct req_que *req; 2734 __le32 *handle_ptr; 2735 2736 stsmfx = (struct multi_sts_entry_fx00 *) pkt; 2737 2738 handle_count = stsmfx->handle_count; 2739 2740 if (handle_count > MAX_HANDLE_COUNT) { 2741 ql_dbg(ql_dbg_io, vha, 0x3035, 2742 "Invalid handle count (0x%x).\n", handle_count); 2743 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2744 qla2xxx_wake_dpc(vha); 2745 return; 2746 } 2747 2748 handle_ptr = &stsmfx->handles[0]; 2749 2750 for (i = 0; i < handle_count; i++) { 2751 hindex = le32_to_cpu(*handle_ptr); 2752 handle = LSW(hindex); 2753 que = MSW(hindex); 2754 req = ha->req_q_map[que]; 2755 2756 /* Validate handle. */ 2757 if (handle < req->num_outstanding_cmds) 2758 sp = req->outstanding_cmds[handle]; 2759 else 2760 sp = NULL; 2761 2762 if (sp == NULL) { 2763 ql_dbg(ql_dbg_io, vha, 0x3044, 2764 "Invalid status handle (0x%x).\n", handle); 2765 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2766 qla2xxx_wake_dpc(vha); 2767 return; 2768 } 2769 qla2x00_process_completed_request(vha, req, handle); 2770 handle_ptr++; 2771 } 2772 } 2773 2774 /** 2775 * qlafx00_error_entry() - Process an error entry. 2776 * @ha: SCSI driver HA context 2777 * @pkt: Entry pointer 2778 */ 2779 static void 2780 qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, 2781 struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype) 2782 { 2783 srb_t *sp; 2784 struct qla_hw_data *ha = vha->hw; 2785 const char func[] = "ERROR-IOCB"; 2786 uint16_t que = MSW(pkt->handle); 2787 struct req_que *req = NULL; 2788 int res = DID_ERROR << 16; 2789 2790 ql_dbg(ql_dbg_async, vha, 0x507f, 2791 "type of error status in response: 0x%x\n", estatus); 2792 2793 req = ha->req_q_map[que]; 2794 2795 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2796 if (sp) { 2797 sp->done(ha, sp, res); 2798 return; 2799 } 2800 2801 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2802 qla2xxx_wake_dpc(vha); 2803 } 2804 2805 /** 2806 * qlafx00_process_response_queue() - Process response queue entries. 2807 * @ha: SCSI driver HA context 2808 */ 2809 static void 2810 qlafx00_process_response_queue(struct scsi_qla_host *vha, 2811 struct rsp_que *rsp) 2812 { 2813 struct sts_entry_fx00 *pkt; 2814 response_t *lptr; 2815 2816 while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) != 2817 RESPONSE_PROCESSED) { 2818 lptr = rsp->ring_ptr; 2819 memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr, 2820 sizeof(rsp->rsp_pkt)); 2821 pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt; 2822 2823 rsp->ring_index++; 2824 if (rsp->ring_index == rsp->length) { 2825 rsp->ring_index = 0; 2826 rsp->ring_ptr = rsp->ring; 2827 } else { 2828 rsp->ring_ptr++; 2829 } 2830 2831 if (pkt->entry_status != 0 && 2832 pkt->entry_type != IOCTL_IOSB_TYPE_FX00) { 2833 qlafx00_error_entry(vha, rsp, 2834 (struct sts_entry_fx00 *)pkt, pkt->entry_status, 2835 pkt->entry_type); 2836 goto next_iter; 2837 continue; 2838 } 2839 2840 switch (pkt->entry_type) { 2841 case STATUS_TYPE_FX00: 2842 qlafx00_status_entry(vha, rsp, pkt); 2843 break; 2844 2845 case STATUS_CONT_TYPE_FX00: 2846 qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2847 break; 2848 2849 case MULTI_STATUS_TYPE_FX00: 2850 qlafx00_multistatus_entry(vha, rsp, pkt); 2851 break; 2852 2853 case ABORT_IOCB_TYPE_FX00: 2854 qlafx00_abort_iocb_entry(vha, rsp->req, 2855 (struct abort_iocb_entry_fx00 *)pkt); 2856 break; 2857 2858 case IOCTL_IOSB_TYPE_FX00: 2859 qlafx00_ioctl_iosb_entry(vha, rsp->req, 2860 (struct ioctl_iocb_entry_fx00 *)pkt); 2861 break; 2862 default: 2863 /* Type Not Supported. */ 2864 ql_dbg(ql_dbg_async, vha, 0x5081, 2865 "Received unknown response pkt type %x " 2866 "entry status=%x.\n", 2867 pkt->entry_type, pkt->entry_status); 2868 break; 2869 } 2870 next_iter: 2871 WRT_REG_DWORD((void __iomem *)&lptr->signature, 2872 RESPONSE_PROCESSED); 2873 wmb(); 2874 } 2875 2876 /* Adjust ring index */ 2877 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2878 } 2879 2880 /** 2881 * qlafx00_async_event() - Process aynchronous events. 2882 * @ha: SCSI driver HA context 2883 */ 2884 static void 2885 qlafx00_async_event(scsi_qla_host_t *vha) 2886 { 2887 struct qla_hw_data *ha = vha->hw; 2888 struct device_reg_fx00 __iomem *reg; 2889 int data_size = 1; 2890 2891 reg = &ha->iobase->ispfx00; 2892 /* Setup to process RIO completion. */ 2893 switch (ha->aenmb[0]) { 2894 case QLAFX00_MBA_SYSTEM_ERR: /* System Error */ 2895 ql_log(ql_log_warn, vha, 0x5079, 2896 "ISP System Error - mbx1=%x\n", ha->aenmb[0]); 2897 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2898 break; 2899 2900 case QLAFX00_MBA_SHUTDOWN_RQSTD: /* Shutdown requested */ 2901 ql_dbg(ql_dbg_async, vha, 0x5076, 2902 "Asynchronous FW shutdown requested.\n"); 2903 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2904 qla2xxx_wake_dpc(vha); 2905 break; 2906 2907 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ 2908 ha->aenmb[1] = RD_REG_WORD(®->aenmailbox1); 2909 ha->aenmb[2] = RD_REG_WORD(®->aenmailbox2); 2910 ha->aenmb[3] = RD_REG_WORD(®->aenmailbox3); 2911 ql_dbg(ql_dbg_async, vha, 0x5077, 2912 "Asynchronous port Update received " 2913 "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n", 2914 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]); 2915 data_size = 4; 2916 break; 2917 2918 case QLAFX00_MBA_TEMP_OVER: /* Over temperature event */ 2919 ql_log(ql_log_info, vha, 0x5085, 2920 "Asynchronous over temperature event received " 2921 "aenmb[0]: %x\n", 2922 ha->aenmb[0]); 2923 break; 2924 2925 case QLAFX00_MBA_TEMP_NORM: /* Normal temperature event */ 2926 ql_log(ql_log_info, vha, 0x5086, 2927 "Asynchronous normal temperature event received " 2928 "aenmb[0]: %x\n", 2929 ha->aenmb[0]); 2930 break; 2931 2932 case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */ 2933 ql_log(ql_log_info, vha, 0x5083, 2934 "Asynchronous critical temperature event received " 2935 "aenmb[0]: %x\n", 2936 ha->aenmb[0]); 2937 break; 2938 2939 default: 2940 ha->aenmb[1] = RD_REG_WORD(®->aenmailbox1); 2941 ha->aenmb[2] = RD_REG_WORD(®->aenmailbox2); 2942 ha->aenmb[3] = RD_REG_WORD(®->aenmailbox3); 2943 ha->aenmb[4] = RD_REG_WORD(®->aenmailbox4); 2944 ha->aenmb[5] = RD_REG_WORD(®->aenmailbox5); 2945 ha->aenmb[6] = RD_REG_WORD(®->aenmailbox6); 2946 ha->aenmb[7] = RD_REG_WORD(®->aenmailbox7); 2947 ql_dbg(ql_dbg_async, vha, 0x5078, 2948 "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n", 2949 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3], 2950 ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]); 2951 break; 2952 } 2953 qlafx00_post_aenfx_work(vha, ha->aenmb[0], 2954 (uint32_t *)ha->aenmb, data_size); 2955 } 2956 2957 /** 2958 * 2959 * qlafx00x_mbx_completion() - Process mailbox command completions. 2960 * @ha: SCSI driver HA context 2961 * @mb16: Mailbox16 register 2962 */ 2963 static void 2964 qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) 2965 { 2966 uint16_t cnt; 2967 uint16_t __iomem *wptr; 2968 struct qla_hw_data *ha = vha->hw; 2969 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 2970 2971 if (!ha->mcp32) 2972 ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n"); 2973 2974 /* Load return mailbox registers. */ 2975 ha->flags.mbox_int = 1; 2976 ha->mailbox_out32[0] = mb0; 2977 wptr = (uint16_t __iomem *)®->mailbox17; 2978 2979 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2980 ha->mailbox_out32[cnt] = RD_REG_WORD(wptr); 2981 wptr++; 2982 } 2983 } 2984 2985 /** 2986 * qlafx00_intr_handler() - Process interrupts for the ISPFX00. 2987 * @irq: 2988 * @dev_id: SCSI driver HA context 2989 * 2990 * Called by system whenever the host adapter generates an interrupt. 2991 * 2992 * Returns handled flag. 2993 */ 2994 irqreturn_t 2995 qlafx00_intr_handler(int irq, void *dev_id) 2996 { 2997 scsi_qla_host_t *vha; 2998 struct qla_hw_data *ha; 2999 struct device_reg_fx00 __iomem *reg; 3000 int status; 3001 unsigned long iter; 3002 uint32_t stat; 3003 uint32_t mb[8]; 3004 struct rsp_que *rsp; 3005 unsigned long flags; 3006 uint32_t clr_intr = 0; 3007 3008 rsp = (struct rsp_que *) dev_id; 3009 if (!rsp) { 3010 ql_log(ql_log_info, NULL, 0x507d, 3011 "%s: NULL response queue pointer.\n", __func__); 3012 return IRQ_NONE; 3013 } 3014 3015 ha = rsp->hw; 3016 reg = &ha->iobase->ispfx00; 3017 status = 0; 3018 3019 if (unlikely(pci_channel_offline(ha->pdev))) 3020 return IRQ_HANDLED; 3021 3022 spin_lock_irqsave(&ha->hardware_lock, flags); 3023 vha = pci_get_drvdata(ha->pdev); 3024 for (iter = 50; iter--; clr_intr = 0) { 3025 stat = QLAFX00_RD_INTR_REG(ha); 3026 if ((stat & QLAFX00_HST_INT_STS_BITS) == 0) 3027 break; 3028 3029 switch (stat & QLAFX00_HST_INT_STS_BITS) { 3030 case QLAFX00_INTR_MB_CMPLT: 3031 case QLAFX00_INTR_MB_RSP_CMPLT: 3032 case QLAFX00_INTR_MB_ASYNC_CMPLT: 3033 case QLAFX00_INTR_ALL_CMPLT: 3034 mb[0] = RD_REG_WORD(®->mailbox16); 3035 qlafx00_mbx_completion(vha, mb[0]); 3036 status |= MBX_INTERRUPT; 3037 clr_intr |= QLAFX00_INTR_MB_CMPLT; 3038 break; 3039 case QLAFX00_INTR_ASYNC_CMPLT: 3040 case QLAFX00_INTR_RSP_ASYNC_CMPLT: 3041 ha->aenmb[0] = RD_REG_WORD(®->aenmailbox0); 3042 qlafx00_async_event(vha); 3043 clr_intr |= QLAFX00_INTR_ASYNC_CMPLT; 3044 break; 3045 case QLAFX00_INTR_RSP_CMPLT: 3046 qlafx00_process_response_queue(vha, rsp); 3047 clr_intr |= QLAFX00_INTR_RSP_CMPLT; 3048 break; 3049 default: 3050 ql_dbg(ql_dbg_async, vha, 0x507a, 3051 "Unrecognized interrupt type (%d).\n", stat); 3052 break; 3053 } 3054 QLAFX00_CLR_INTR_REG(ha, clr_intr); 3055 QLAFX00_RD_INTR_REG(ha); 3056 } 3057 3058 qla2x00_handle_mbx_completion(ha, status); 3059 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3060 3061 return IRQ_HANDLED; 3062 } 3063 3064 /** QLAFX00 specific IOCB implementation functions */ 3065 3066 static inline cont_a64_entry_t * 3067 qlafx00_prep_cont_type1_iocb(struct req_que *req, 3068 cont_a64_entry_t *lcont_pkt) 3069 { 3070 cont_a64_entry_t *cont_pkt; 3071 3072 /* Adjust ring index. */ 3073 req->ring_index++; 3074 if (req->ring_index == req->length) { 3075 req->ring_index = 0; 3076 req->ring_ptr = req->ring; 3077 } else { 3078 req->ring_ptr++; 3079 } 3080 3081 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 3082 3083 /* Load packet defaults. */ 3084 lcont_pkt->entry_type = CONTINUE_A64_TYPE_FX00; 3085 3086 return cont_pkt; 3087 } 3088 3089 static inline void 3090 qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, 3091 uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt) 3092 { 3093 uint16_t avail_dsds; 3094 __le32 *cur_dsd; 3095 scsi_qla_host_t *vha; 3096 struct scsi_cmnd *cmd; 3097 struct scatterlist *sg; 3098 int i, cont; 3099 struct req_que *req; 3100 cont_a64_entry_t lcont_pkt; 3101 cont_a64_entry_t *cont_pkt; 3102 3103 vha = sp->fcport->vha; 3104 req = vha->req; 3105 3106 cmd = GET_CMD_SP(sp); 3107 cont = 0; 3108 cont_pkt = NULL; 3109 3110 /* Update entry type to indicate Command Type 3 IOCB */ 3111 lcmd_pkt->entry_type = FX00_COMMAND_TYPE_7; 3112 3113 /* No data transfer */ 3114 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 3115 lcmd_pkt->byte_count = __constant_cpu_to_le32(0); 3116 return; 3117 } 3118 3119 /* Set transfer direction */ 3120 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 3121 lcmd_pkt->cntrl_flags = TMF_WRITE_DATA; 3122 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 3123 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 3124 lcmd_pkt->cntrl_flags = TMF_READ_DATA; 3125 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 3126 } 3127 3128 /* One DSD is available in the Command Type 3 IOCB */ 3129 avail_dsds = 1; 3130 cur_dsd = (__le32 *)&lcmd_pkt->dseg_0_address; 3131 3132 /* Load data segments */ 3133 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 3134 dma_addr_t sle_dma; 3135 3136 /* Allocate additional continuation packets? */ 3137 if (avail_dsds == 0) { 3138 /* 3139 * Five DSDs are available in the Continuation 3140 * Type 1 IOCB. 3141 */ 3142 memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE); 3143 cont_pkt = 3144 qlafx00_prep_cont_type1_iocb(req, &lcont_pkt); 3145 cur_dsd = (__le32 *)lcont_pkt.dseg_0_address; 3146 avail_dsds = 5; 3147 cont = 1; 3148 } 3149 3150 sle_dma = sg_dma_address(sg); 3151 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 3152 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 3153 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 3154 avail_dsds--; 3155 if (avail_dsds == 0 && cont == 1) { 3156 cont = 0; 3157 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, 3158 REQUEST_ENTRY_SIZE); 3159 } 3160 3161 } 3162 if (avail_dsds != 0 && cont == 1) { 3163 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, 3164 REQUEST_ENTRY_SIZE); 3165 } 3166 } 3167 3168 /** 3169 * qlafx00_start_scsi() - Send a SCSI command to the ISP 3170 * @sp: command to send to the ISP 3171 * 3172 * Returns non-zero if a failure occurred, else zero. 3173 */ 3174 int 3175 qlafx00_start_scsi(srb_t *sp) 3176 { 3177 int ret, nseg; 3178 unsigned long flags; 3179 uint32_t index; 3180 uint32_t handle; 3181 uint16_t cnt; 3182 uint16_t req_cnt; 3183 uint16_t tot_dsds; 3184 struct req_que *req = NULL; 3185 struct rsp_que *rsp = NULL; 3186 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 3187 struct scsi_qla_host *vha = sp->fcport->vha; 3188 struct qla_hw_data *ha = vha->hw; 3189 struct cmd_type_7_fx00 *cmd_pkt; 3190 struct cmd_type_7_fx00 lcmd_pkt; 3191 struct scsi_lun llun; 3192 char tag[2]; 3193 3194 /* Setup device pointers. */ 3195 ret = 0; 3196 3197 rsp = ha->rsp_q_map[0]; 3198 req = vha->req; 3199 3200 /* So we know we haven't pci_map'ed anything yet */ 3201 tot_dsds = 0; 3202 3203 /* Forcing marker needed for now */ 3204 vha->marker_needed = 0; 3205 3206 /* Send marker if required */ 3207 if (vha->marker_needed != 0) { 3208 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != 3209 QLA_SUCCESS) 3210 return QLA_FUNCTION_FAILED; 3211 vha->marker_needed = 0; 3212 } 3213 3214 /* Acquire ring specific lock */ 3215 spin_lock_irqsave(&ha->hardware_lock, flags); 3216 3217 /* Check for room in outstanding command list. */ 3218 handle = req->current_outstanding_cmd; 3219 for (index = 1; index < req->num_outstanding_cmds; index++) { 3220 handle++; 3221 if (handle == req->num_outstanding_cmds) 3222 handle = 1; 3223 if (!req->outstanding_cmds[handle]) 3224 break; 3225 } 3226 if (index == req->num_outstanding_cmds) 3227 goto queuing_error; 3228 3229 /* Map the sg table so we have an accurate count of sg entries needed */ 3230 if (scsi_sg_count(cmd)) { 3231 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 3232 scsi_sg_count(cmd), cmd->sc_data_direction); 3233 if (unlikely(!nseg)) 3234 goto queuing_error; 3235 } else 3236 nseg = 0; 3237 3238 tot_dsds = nseg; 3239 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 3240 if (req->cnt < (req_cnt + 2)) { 3241 cnt = RD_REG_DWORD_RELAXED(req->req_q_out); 3242 3243 if (req->ring_index < cnt) 3244 req->cnt = cnt - req->ring_index; 3245 else 3246 req->cnt = req->length - 3247 (req->ring_index - cnt); 3248 if (req->cnt < (req_cnt + 2)) 3249 goto queuing_error; 3250 } 3251 3252 /* Build command packet. */ 3253 req->current_outstanding_cmd = handle; 3254 req->outstanding_cmds[handle] = sp; 3255 sp->handle = handle; 3256 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 3257 req->cnt -= req_cnt; 3258 3259 cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr; 3260 3261 memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE); 3262 3263 lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle); 3264 lcmd_pkt.handle_hi = 0; 3265 lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds); 3266 lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id); 3267 3268 int_to_scsilun(cmd->device->lun, &llun); 3269 host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun, 3270 sizeof(lcmd_pkt.lun)); 3271 3272 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */ 3273 if (scsi_populate_tag_msg(cmd, tag)) { 3274 switch (tag[0]) { 3275 case HEAD_OF_QUEUE_TAG: 3276 lcmd_pkt.task = TSK_HEAD_OF_QUEUE; 3277 break; 3278 case ORDERED_QUEUE_TAG: 3279 lcmd_pkt.task = TSK_ORDERED; 3280 break; 3281 } 3282 } 3283 3284 /* Load SCSI command packet. */ 3285 host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb)); 3286 lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 3287 3288 /* Build IOCB segments */ 3289 qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt); 3290 3291 /* Set total data segment count. */ 3292 lcmd_pkt.entry_count = (uint8_t)req_cnt; 3293 3294 /* Specify response queue number where completion should happen */ 3295 lcmd_pkt.entry_status = (uint8_t) rsp->id; 3296 3297 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e, 3298 (uint8_t *)cmd->cmnd, cmd->cmd_len); 3299 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032, 3300 (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE); 3301 3302 memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE); 3303 wmb(); 3304 3305 /* Adjust ring index. */ 3306 req->ring_index++; 3307 if (req->ring_index == req->length) { 3308 req->ring_index = 0; 3309 req->ring_ptr = req->ring; 3310 } else 3311 req->ring_ptr++; 3312 3313 sp->flags |= SRB_DMA_VALID; 3314 3315 /* Set chip new ring index. */ 3316 WRT_REG_DWORD(req->req_q_in, req->ring_index); 3317 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); 3318 3319 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3320 return QLA_SUCCESS; 3321 3322 queuing_error: 3323 if (tot_dsds) 3324 scsi_dma_unmap(cmd); 3325 3326 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3327 3328 return QLA_FUNCTION_FAILED; 3329 } 3330 3331 void 3332 qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb) 3333 { 3334 struct srb_iocb *fxio = &sp->u.iocb_cmd; 3335 scsi_qla_host_t *vha = sp->fcport->vha; 3336 struct req_que *req = vha->req; 3337 struct tsk_mgmt_entry_fx00 tm_iocb; 3338 struct scsi_lun llun; 3339 3340 memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00)); 3341 tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00; 3342 tm_iocb.entry_count = 1; 3343 tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); 3344 tm_iocb.handle_hi = 0; 3345 tm_iocb.timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2); 3346 tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id); 3347 tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags); 3348 if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) { 3349 int_to_scsilun(fxio->u.tmf.lun, &llun); 3350 host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun, 3351 sizeof(struct scsi_lun)); 3352 } 3353 3354 memcpy((void *)ptm_iocb, &tm_iocb, 3355 sizeof(struct tsk_mgmt_entry_fx00)); 3356 wmb(); 3357 } 3358 3359 void 3360 qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb) 3361 { 3362 struct srb_iocb *fxio = &sp->u.iocb_cmd; 3363 scsi_qla_host_t *vha = sp->fcport->vha; 3364 struct req_que *req = vha->req; 3365 struct abort_iocb_entry_fx00 abt_iocb; 3366 3367 memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00)); 3368 abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00; 3369 abt_iocb.entry_count = 1; 3370 abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); 3371 abt_iocb.abort_handle = 3372 cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl)); 3373 abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id); 3374 abt_iocb.req_que_no = cpu_to_le16(req->id); 3375 3376 memcpy((void *)pabt_iocb, &abt_iocb, 3377 sizeof(struct abort_iocb_entry_fx00)); 3378 wmb(); 3379 } 3380 3381 void 3382 qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) 3383 { 3384 struct srb_iocb *fxio = &sp->u.iocb_cmd; 3385 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 3386 struct fc_bsg_job *bsg_job; 3387 struct fxdisc_entry_fx00 fx_iocb; 3388 uint8_t entry_cnt = 1; 3389 3390 memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00)); 3391 fx_iocb.entry_type = FX00_IOCB_TYPE; 3392 fx_iocb.handle = cpu_to_le32(sp->handle); 3393 fx_iocb.entry_count = entry_cnt; 3394 3395 if (sp->type == SRB_FXIOCB_DCMD) { 3396 fx_iocb.func_num = 3397 sp->u.iocb_cmd.u.fxiocb.req_func_type; 3398 fx_iocb.adapid = fxio->u.fxiocb.adapter_id; 3399 fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi; 3400 fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0; 3401 fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1; 3402 fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra; 3403 3404 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) { 3405 fx_iocb.req_dsdcnt = cpu_to_le16(1); 3406 fx_iocb.req_xfrcnt = 3407 cpu_to_le16(fxio->u.fxiocb.req_len); 3408 fx_iocb.dseg_rq_address[0] = 3409 cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle)); 3410 fx_iocb.dseg_rq_address[1] = 3411 cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle)); 3412 fx_iocb.dseg_rq_len = 3413 cpu_to_le32(fxio->u.fxiocb.req_len); 3414 } 3415 3416 if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) { 3417 fx_iocb.rsp_dsdcnt = cpu_to_le16(1); 3418 fx_iocb.rsp_xfrcnt = 3419 cpu_to_le16(fxio->u.fxiocb.rsp_len); 3420 fx_iocb.dseg_rsp_address[0] = 3421 cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle)); 3422 fx_iocb.dseg_rsp_address[1] = 3423 cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle)); 3424 fx_iocb.dseg_rsp_len = 3425 cpu_to_le32(fxio->u.fxiocb.rsp_len); 3426 } 3427 3428 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) { 3429 fx_iocb.dataword = fxio->u.fxiocb.req_data; 3430 } 3431 fx_iocb.flags = fxio->u.fxiocb.flags; 3432 } else { 3433 struct scatterlist *sg; 3434 bsg_job = sp->u.bsg_job; 3435 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 3436 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 3437 3438 fx_iocb.func_num = piocb_rqst->func_type; 3439 fx_iocb.adapid = piocb_rqst->adapid; 3440 fx_iocb.adapid_hi = piocb_rqst->adapid_hi; 3441 fx_iocb.reserved_0 = piocb_rqst->reserved_0; 3442 fx_iocb.reserved_1 = piocb_rqst->reserved_1; 3443 fx_iocb.dataword_extra = piocb_rqst->dataword_extra; 3444 fx_iocb.dataword = piocb_rqst->dataword; 3445 fx_iocb.req_xfrcnt = piocb_rqst->req_len; 3446 fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len; 3447 3448 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { 3449 int avail_dsds, tot_dsds; 3450 cont_a64_entry_t lcont_pkt; 3451 cont_a64_entry_t *cont_pkt = NULL; 3452 __le32 *cur_dsd; 3453 int index = 0, cont = 0; 3454 3455 fx_iocb.req_dsdcnt = 3456 cpu_to_le16(bsg_job->request_payload.sg_cnt); 3457 tot_dsds = 3458 bsg_job->request_payload.sg_cnt; 3459 cur_dsd = (__le32 *)&fx_iocb.dseg_rq_address[0]; 3460 avail_dsds = 1; 3461 for_each_sg(bsg_job->request_payload.sg_list, sg, 3462 tot_dsds, index) { 3463 dma_addr_t sle_dma; 3464 3465 /* Allocate additional continuation packets? */ 3466 if (avail_dsds == 0) { 3467 /* 3468 * Five DSDs are available in the Cont. 3469 * Type 1 IOCB. 3470 */ 3471 memset(&lcont_pkt, 0, 3472 REQUEST_ENTRY_SIZE); 3473 cont_pkt = 3474 qlafx00_prep_cont_type1_iocb( 3475 sp->fcport->vha->req, 3476 &lcont_pkt); 3477 cur_dsd = (__le32 *) 3478 lcont_pkt.dseg_0_address; 3479 avail_dsds = 5; 3480 cont = 1; 3481 entry_cnt++; 3482 } 3483 3484 sle_dma = sg_dma_address(sg); 3485 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 3486 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 3487 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 3488 avail_dsds--; 3489 3490 if (avail_dsds == 0 && cont == 1) { 3491 cont = 0; 3492 memcpy_toio( 3493 (void __iomem *)cont_pkt, 3494 &lcont_pkt, REQUEST_ENTRY_SIZE); 3495 ql_dump_buffer( 3496 ql_dbg_user + ql_dbg_verbose, 3497 sp->fcport->vha, 0x3042, 3498 (uint8_t *)&lcont_pkt, 3499 REQUEST_ENTRY_SIZE); 3500 } 3501 } 3502 if (avail_dsds != 0 && cont == 1) { 3503 memcpy_toio((void __iomem *)cont_pkt, 3504 &lcont_pkt, REQUEST_ENTRY_SIZE); 3505 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 3506 sp->fcport->vha, 0x3043, 3507 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); 3508 } 3509 } 3510 3511 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { 3512 int avail_dsds, tot_dsds; 3513 cont_a64_entry_t lcont_pkt; 3514 cont_a64_entry_t *cont_pkt = NULL; 3515 __le32 *cur_dsd; 3516 int index = 0, cont = 0; 3517 3518 fx_iocb.rsp_dsdcnt = 3519 cpu_to_le16(bsg_job->reply_payload.sg_cnt); 3520 tot_dsds = bsg_job->reply_payload.sg_cnt; 3521 cur_dsd = (__le32 *)&fx_iocb.dseg_rsp_address[0]; 3522 avail_dsds = 1; 3523 3524 for_each_sg(bsg_job->reply_payload.sg_list, sg, 3525 tot_dsds, index) { 3526 dma_addr_t sle_dma; 3527 3528 /* Allocate additional continuation packets? */ 3529 if (avail_dsds == 0) { 3530 /* 3531 * Five DSDs are available in the Cont. 3532 * Type 1 IOCB. 3533 */ 3534 memset(&lcont_pkt, 0, 3535 REQUEST_ENTRY_SIZE); 3536 cont_pkt = 3537 qlafx00_prep_cont_type1_iocb( 3538 sp->fcport->vha->req, 3539 &lcont_pkt); 3540 cur_dsd = (__le32 *) 3541 lcont_pkt.dseg_0_address; 3542 avail_dsds = 5; 3543 cont = 1; 3544 entry_cnt++; 3545 } 3546 3547 sle_dma = sg_dma_address(sg); 3548 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 3549 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 3550 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 3551 avail_dsds--; 3552 3553 if (avail_dsds == 0 && cont == 1) { 3554 cont = 0; 3555 memcpy_toio((void __iomem *)cont_pkt, 3556 &lcont_pkt, 3557 REQUEST_ENTRY_SIZE); 3558 ql_dump_buffer( 3559 ql_dbg_user + ql_dbg_verbose, 3560 sp->fcport->vha, 0x3045, 3561 (uint8_t *)&lcont_pkt, 3562 REQUEST_ENTRY_SIZE); 3563 } 3564 } 3565 if (avail_dsds != 0 && cont == 1) { 3566 memcpy_toio((void __iomem *)cont_pkt, 3567 &lcont_pkt, REQUEST_ENTRY_SIZE); 3568 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 3569 sp->fcport->vha, 0x3046, 3570 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); 3571 } 3572 } 3573 3574 if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID) 3575 fx_iocb.dataword = piocb_rqst->dataword; 3576 fx_iocb.flags = piocb_rqst->flags; 3577 fx_iocb.entry_count = entry_cnt; 3578 } 3579 3580 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 3581 sp->fcport->vha, 0x3047, 3582 (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00)); 3583 3584 memcpy((void *)pfxiocb, &fx_iocb, 3585 sizeof(struct fxdisc_entry_fx00)); 3586 wmb(); 3587 } 3588