1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include <linux/delay.h> 9 #include <linux/ktime.h> 10 #include <linux/pci.h> 11 #include <linux/ratelimit.h> 12 #include <linux/vmalloc.h> 13 #include <linux/bsg-lib.h> 14 #include <scsi/scsi_tcq.h> 15 #include <linux/utsname.h> 16 17 18 /* QLAFX00 specific Mailbox implementation functions */ 19 20 /* 21 * qlafx00_mailbox_command 22 * Issue mailbox command and waits for completion. 23 * 24 * Input: 25 * ha = adapter block pointer. 26 * mcp = driver internal mbx struct pointer. 27 * 28 * Output: 29 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 30 * 31 * Returns: 32 * 0 : QLA_SUCCESS = cmd performed success 33 * 1 : QLA_FUNCTION_FAILED (error encountered) 34 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 35 * 36 * Context: 37 * Kernel context. 38 */ 39 static int 40 qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp) 41 42 { 43 int rval; 44 unsigned long flags = 0; 45 device_reg_t *reg; 46 uint8_t abort_active; 47 uint8_t io_lock_on; 48 uint16_t command = 0; 49 uint32_t *iptr; 50 uint32_t __iomem *optr; 51 uint32_t cnt; 52 uint32_t mboxes; 53 unsigned long wait_time; 54 struct qla_hw_data *ha = vha->hw; 55 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 56 57 if (ha->pdev->error_state > pci_channel_io_frozen) { 58 ql_log(ql_log_warn, vha, 0x115c, 59 "error_state is greater than pci_channel_io_frozen, " 60 "exiting.\n"); 61 return QLA_FUNCTION_TIMEOUT; 62 } 63 64 if (vha->device_flags & DFLG_DEV_FAILED) { 65 ql_log(ql_log_warn, vha, 0x115f, 66 "Device in failed state, exiting.\n"); 67 return QLA_FUNCTION_TIMEOUT; 68 } 69 70 reg = ha->iobase; 71 io_lock_on = base_vha->flags.init_done; 72 73 rval = QLA_SUCCESS; 74 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 75 76 if (ha->flags.pci_channel_io_perm_failure) { 77 ql_log(ql_log_warn, vha, 0x1175, 78 "Perm failure on EEH timeout MBX, exiting.\n"); 79 return QLA_FUNCTION_TIMEOUT; 80 } 81 82 if (ha->flags.isp82xx_fw_hung) { 83 /* Setting Link-Down error */ 84 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 85 ql_log(ql_log_warn, vha, 0x1176, 86 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 87 rval = QLA_FUNCTION_FAILED; 88 goto premature_exit; 89 } 90 91 /* 92 * Wait for active mailbox commands to finish by waiting at most tov 93 * seconds. This is to serialize actual issuing of mailbox cmds during 94 * non ISP abort time. 95 */ 96 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 97 /* Timeout occurred. Return error. */ 98 ql_log(ql_log_warn, vha, 0x1177, 99 "Cmd access timeout, cmd=0x%x, Exiting.\n", 100 mcp->mb[0]); 101 return QLA_FUNCTION_TIMEOUT; 102 } 103 104 ha->flags.mbox_busy = 1; 105 /* Save mailbox command for debug */ 106 ha->mcp32 = mcp; 107 108 ql_dbg(ql_dbg_mbx, vha, 0x1178, 109 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 110 111 spin_lock_irqsave(&ha->hardware_lock, flags); 112 113 /* Load mailbox registers. */ 114 optr = (uint32_t __iomem *)®->ispfx00.mailbox0; 115 116 iptr = mcp->mb; 117 command = mcp->mb[0]; 118 mboxes = mcp->out_mb; 119 120 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 121 if (mboxes & BIT_0) 122 WRT_REG_DWORD(optr, *iptr); 123 124 mboxes >>= 1; 125 optr++; 126 iptr++; 127 } 128 129 /* Issue set host interrupt command to send cmd out. */ 130 ha->flags.mbox_int = 0; 131 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 132 133 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172, 134 (uint8_t *)mcp->mb, 16); 135 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173, 136 ((uint8_t *)mcp->mb + 0x10), 16); 137 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174, 138 ((uint8_t *)mcp->mb + 0x20), 8); 139 140 /* Unlock mbx registers and wait for interrupt */ 141 ql_dbg(ql_dbg_mbx, vha, 0x1179, 142 "Going to unlock irq & waiting for interrupts. " 143 "jiffies=%lx.\n", jiffies); 144 145 /* Wait for mbx cmd completion until timeout */ 146 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 147 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 148 149 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); 150 spin_unlock_irqrestore(&ha->hardware_lock, flags); 151 152 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); 153 } else { 154 ql_dbg(ql_dbg_mbx, vha, 0x112c, 155 "Cmd=%x Polling Mode.\n", command); 156 157 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); 158 spin_unlock_irqrestore(&ha->hardware_lock, flags); 159 160 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 161 while (!ha->flags.mbox_int) { 162 if (time_after(jiffies, wait_time)) 163 break; 164 165 /* Check for pending interrupts. */ 166 qla2x00_poll(ha->rsp_q_map[0]); 167 168 if (!ha->flags.mbox_int && 169 !(IS_QLA2200(ha) && 170 command == MBC_LOAD_RISC_RAM_EXTENDED)) 171 usleep_range(10000, 11000); 172 } /* while */ 173 ql_dbg(ql_dbg_mbx, vha, 0x112d, 174 "Waited %d sec.\n", 175 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 176 } 177 178 /* Check whether we timed out */ 179 if (ha->flags.mbox_int) { 180 uint32_t *iptr2; 181 182 ql_dbg(ql_dbg_mbx, vha, 0x112e, 183 "Cmd=%x completed.\n", command); 184 185 /* Got interrupt. Clear the flag. */ 186 ha->flags.mbox_int = 0; 187 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 188 189 if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE) 190 rval = QLA_FUNCTION_FAILED; 191 192 /* Load return mailbox registers. */ 193 iptr2 = mcp->mb; 194 iptr = (uint32_t *)&ha->mailbox_out32[0]; 195 mboxes = mcp->in_mb; 196 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 197 if (mboxes & BIT_0) 198 *iptr2 = *iptr; 199 200 mboxes >>= 1; 201 iptr2++; 202 iptr++; 203 } 204 } else { 205 206 rval = QLA_FUNCTION_TIMEOUT; 207 } 208 209 ha->flags.mbox_busy = 0; 210 211 /* Clean up */ 212 ha->mcp32 = NULL; 213 214 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 215 ql_dbg(ql_dbg_mbx, vha, 0x113a, 216 "checking for additional resp interrupt.\n"); 217 218 /* polling mode for non isp_abort commands. */ 219 qla2x00_poll(ha->rsp_q_map[0]); 220 } 221 222 if (rval == QLA_FUNCTION_TIMEOUT && 223 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 224 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 225 ha->flags.eeh_busy) { 226 /* not in dpc. schedule it for dpc to take over. */ 227 ql_dbg(ql_dbg_mbx, vha, 0x115d, 228 "Timeout, schedule isp_abort_needed.\n"); 229 230 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 231 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 232 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 233 234 ql_log(ql_log_info, base_vha, 0x115e, 235 "Mailbox cmd timeout occurred, cmd=0x%x, " 236 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 237 "abort.\n", command, mcp->mb[0], 238 ha->flags.eeh_busy); 239 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 240 qla2xxx_wake_dpc(vha); 241 } 242 } else if (!abort_active) { 243 /* call abort directly since we are in the DPC thread */ 244 ql_dbg(ql_dbg_mbx, vha, 0x1160, 245 "Timeout, calling abort_isp.\n"); 246 247 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 248 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 249 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 250 251 ql_log(ql_log_info, base_vha, 0x1161, 252 "Mailbox cmd timeout occurred, cmd=0x%x, " 253 "mb[0]=0x%x. Scheduling ISP abort ", 254 command, mcp->mb[0]); 255 256 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 257 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 258 if (ha->isp_ops->abort_isp(vha)) { 259 /* Failed. retry later. */ 260 set_bit(ISP_ABORT_NEEDED, 261 &vha->dpc_flags); 262 } 263 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 264 ql_dbg(ql_dbg_mbx, vha, 0x1162, 265 "Finished abort_isp.\n"); 266 } 267 } 268 } 269 270 premature_exit: 271 /* Allow next mbx cmd to come in. */ 272 complete(&ha->mbx_cmd_comp); 273 274 if (rval) { 275 ql_log(ql_log_warn, base_vha, 0x1163, 276 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, " 277 "mb[3]=%x, cmd=%x ****.\n", 278 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); 279 } else { 280 ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__); 281 } 282 283 return rval; 284 } 285 286 /* 287 * qlafx00_driver_shutdown 288 * Indicate a driver shutdown to firmware. 289 * 290 * Input: 291 * ha = adapter block pointer. 292 * 293 * Returns: 294 * local function return status code. 295 * 296 * Context: 297 * Kernel context. 298 */ 299 int 300 qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo) 301 { 302 int rval; 303 struct mbx_cmd_32 mc; 304 struct mbx_cmd_32 *mcp = &mc; 305 306 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166, 307 "Entered %s.\n", __func__); 308 309 mcp->mb[0] = MBC_MR_DRV_SHUTDOWN; 310 mcp->out_mb = MBX_0; 311 mcp->in_mb = MBX_0; 312 if (tmo) 313 mcp->tov = tmo; 314 else 315 mcp->tov = MBX_TOV_SECONDS; 316 mcp->flags = 0; 317 rval = qlafx00_mailbox_command(vha, mcp); 318 319 if (rval != QLA_SUCCESS) { 320 ql_dbg(ql_dbg_mbx, vha, 0x1167, 321 "Failed=%x.\n", rval); 322 } else { 323 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168, 324 "Done %s.\n", __func__); 325 } 326 327 return rval; 328 } 329 330 /* 331 * qlafx00_get_firmware_state 332 * Get adapter firmware state. 333 * 334 * Input: 335 * ha = adapter block pointer. 336 * TARGET_QUEUE_LOCK must be released. 337 * ADAPTER_STATE_LOCK must be released. 338 * 339 * Returns: 340 * qla7xxx local function return status code. 341 * 342 * Context: 343 * Kernel context. 344 */ 345 static int 346 qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states) 347 { 348 int rval; 349 struct mbx_cmd_32 mc; 350 struct mbx_cmd_32 *mcp = &mc; 351 352 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169, 353 "Entered %s.\n", __func__); 354 355 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 356 mcp->out_mb = MBX_0; 357 mcp->in_mb = MBX_1|MBX_0; 358 mcp->tov = MBX_TOV_SECONDS; 359 mcp->flags = 0; 360 rval = qlafx00_mailbox_command(vha, mcp); 361 362 /* Return firmware states. */ 363 states[0] = mcp->mb[1]; 364 365 if (rval != QLA_SUCCESS) { 366 ql_dbg(ql_dbg_mbx, vha, 0x116a, 367 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 368 } else { 369 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b, 370 "Done %s.\n", __func__); 371 } 372 return rval; 373 } 374 375 /* 376 * qlafx00_init_firmware 377 * Initialize adapter firmware. 378 * 379 * Input: 380 * ha = adapter block pointer. 381 * dptr = Initialization control block pointer. 382 * size = size of initialization control block. 383 * TARGET_QUEUE_LOCK must be released. 384 * ADAPTER_STATE_LOCK must be released. 385 * 386 * Returns: 387 * qlafx00 local function return status code. 388 * 389 * Context: 390 * Kernel context. 391 */ 392 int 393 qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 394 { 395 int rval; 396 struct mbx_cmd_32 mc; 397 struct mbx_cmd_32 *mcp = &mc; 398 struct qla_hw_data *ha = vha->hw; 399 400 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c, 401 "Entered %s.\n", __func__); 402 403 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 404 405 mcp->mb[1] = 0; 406 mcp->mb[2] = MSD(ha->init_cb_dma); 407 mcp->mb[3] = LSD(ha->init_cb_dma); 408 409 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 410 mcp->in_mb = MBX_0; 411 mcp->buf_size = size; 412 mcp->flags = MBX_DMA_OUT; 413 mcp->tov = MBX_TOV_SECONDS; 414 rval = qlafx00_mailbox_command(vha, mcp); 415 416 if (rval != QLA_SUCCESS) { 417 ql_dbg(ql_dbg_mbx, vha, 0x116d, 418 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 419 } else { 420 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e, 421 "Done %s.\n", __func__); 422 } 423 return rval; 424 } 425 426 /* 427 * qlafx00_mbx_reg_test 428 */ 429 static int 430 qlafx00_mbx_reg_test(scsi_qla_host_t *vha) 431 { 432 int rval; 433 struct mbx_cmd_32 mc; 434 struct mbx_cmd_32 *mcp = &mc; 435 436 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f, 437 "Entered %s.\n", __func__); 438 439 440 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 441 mcp->mb[1] = 0xAAAA; 442 mcp->mb[2] = 0x5555; 443 mcp->mb[3] = 0xAA55; 444 mcp->mb[4] = 0x55AA; 445 mcp->mb[5] = 0xA5A5; 446 mcp->mb[6] = 0x5A5A; 447 mcp->mb[7] = 0x2525; 448 mcp->mb[8] = 0xBBBB; 449 mcp->mb[9] = 0x6666; 450 mcp->mb[10] = 0xBB66; 451 mcp->mb[11] = 0x66BB; 452 mcp->mb[12] = 0xB6B6; 453 mcp->mb[13] = 0x6B6B; 454 mcp->mb[14] = 0x3636; 455 mcp->mb[15] = 0xCCCC; 456 457 458 mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 459 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 460 mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 461 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 462 mcp->buf_size = 0; 463 mcp->flags = MBX_DMA_OUT; 464 mcp->tov = MBX_TOV_SECONDS; 465 rval = qlafx00_mailbox_command(vha, mcp); 466 if (rval == QLA_SUCCESS) { 467 if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 || 468 mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA) 469 rval = QLA_FUNCTION_FAILED; 470 if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A || 471 mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB) 472 rval = QLA_FUNCTION_FAILED; 473 if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 || 474 mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6) 475 rval = QLA_FUNCTION_FAILED; 476 if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 || 477 mcp->mb[31] != 0xCCCC) 478 rval = QLA_FUNCTION_FAILED; 479 } 480 481 if (rval != QLA_SUCCESS) { 482 ql_dbg(ql_dbg_mbx, vha, 0x1170, 483 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 484 } else { 485 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171, 486 "Done %s.\n", __func__); 487 } 488 return rval; 489 } 490 491 /** 492 * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers. 493 * @vha: HA context 494 * 495 * Returns 0 on success. 496 */ 497 int 498 qlafx00_pci_config(scsi_qla_host_t *vha) 499 { 500 uint16_t w; 501 struct qla_hw_data *ha = vha->hw; 502 503 pci_set_master(ha->pdev); 504 pci_try_set_mwi(ha->pdev); 505 506 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 507 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 508 w &= ~PCI_COMMAND_INTX_DISABLE; 509 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 510 511 /* PCIe -- adjust Maximum Read Request Size (2048). */ 512 if (pci_is_pcie(ha->pdev)) 513 pcie_set_readrq(ha->pdev, 2048); 514 515 ha->chip_revision = ha->pdev->revision; 516 517 return QLA_SUCCESS; 518 } 519 520 /** 521 * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC). 522 * @vha: HA context 523 * 524 */ 525 static inline void 526 qlafx00_soc_cpu_reset(scsi_qla_host_t *vha) 527 { 528 unsigned long flags = 0; 529 struct qla_hw_data *ha = vha->hw; 530 int i, core; 531 uint32_t cnt; 532 uint32_t reg_val; 533 534 spin_lock_irqsave(&ha->hardware_lock, flags); 535 536 QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0); 537 QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0); 538 539 /* stop the XOR DMA engines */ 540 QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02); 541 QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02); 542 QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02); 543 QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02); 544 545 /* stop the IDMA engines */ 546 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840); 547 reg_val &= ~(1<<12); 548 QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val); 549 550 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844); 551 reg_val &= ~(1<<12); 552 QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val); 553 554 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848); 555 reg_val &= ~(1<<12); 556 QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val); 557 558 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C); 559 reg_val &= ~(1<<12); 560 QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val); 561 562 for (i = 0; i < 100000; i++) { 563 if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 && 564 (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0) 565 break; 566 udelay(100); 567 } 568 569 /* Set all 4 cores in reset */ 570 for (i = 0; i < 4; i++) { 571 QLAFX00_SET_HBA_SOC_REG(ha, 572 (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01)); 573 QLAFX00_SET_HBA_SOC_REG(ha, 574 (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101)); 575 } 576 577 /* Reset all units in Fabric */ 578 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101)); 579 580 /* */ 581 QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1); 582 QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0); 583 584 /* Set all 4 core Memory Power Down Registers */ 585 for (i = 0; i < 5; i++) { 586 QLAFX00_SET_HBA_SOC_REG(ha, 587 (SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0)); 588 } 589 590 /* Reset all interrupt control registers */ 591 for (i = 0; i < 115; i++) { 592 QLAFX00_SET_HBA_SOC_REG(ha, 593 (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0)); 594 } 595 596 /* Reset Timers control registers. per core */ 597 for (core = 0; core < 4; core++) 598 for (i = 0; i < 8; i++) 599 QLAFX00_SET_HBA_SOC_REG(ha, 600 (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0)); 601 602 /* Reset per core IRQ ack register */ 603 for (core = 0; core < 4; core++) 604 QLAFX00_SET_HBA_SOC_REG(ha, 605 (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF)); 606 607 /* Set Fabric control and config to defaults */ 608 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2)); 609 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3)); 610 611 /* Kick in Fabric units */ 612 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0)); 613 614 /* Kick in Core0 to start boot process */ 615 QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00)); 616 617 spin_unlock_irqrestore(&ha->hardware_lock, flags); 618 619 /* Wait 10secs for soft-reset to complete. */ 620 for (cnt = 10; cnt; cnt--) { 621 msleep(1000); 622 barrier(); 623 } 624 } 625 626 /** 627 * qlafx00_soft_reset() - Soft Reset ISPFx00. 628 * @vha: HA context 629 * 630 * Returns 0 on success. 631 */ 632 void 633 qlafx00_soft_reset(scsi_qla_host_t *vha) 634 { 635 struct qla_hw_data *ha = vha->hw; 636 637 if (unlikely(pci_channel_offline(ha->pdev) && 638 ha->flags.pci_channel_io_perm_failure)) 639 return; 640 641 ha->isp_ops->disable_intrs(ha); 642 qlafx00_soc_cpu_reset(vha); 643 } 644 645 /** 646 * qlafx00_chip_diag() - Test ISPFx00 for proper operation. 647 * @vha: HA context 648 * 649 * Returns 0 on success. 650 */ 651 int 652 qlafx00_chip_diag(scsi_qla_host_t *vha) 653 { 654 int rval = 0; 655 struct qla_hw_data *ha = vha->hw; 656 struct req_que *req = ha->req_q_map[0]; 657 658 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 659 660 rval = qlafx00_mbx_reg_test(vha); 661 if (rval) { 662 ql_log(ql_log_warn, vha, 0x1165, 663 "Failed mailbox send register test\n"); 664 } else { 665 /* Flag a successful rval */ 666 rval = QLA_SUCCESS; 667 } 668 return rval; 669 } 670 671 void 672 qlafx00_config_rings(struct scsi_qla_host *vha) 673 { 674 struct qla_hw_data *ha = vha->hw; 675 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 676 677 WRT_REG_DWORD(®->req_q_in, 0); 678 WRT_REG_DWORD(®->req_q_out, 0); 679 680 WRT_REG_DWORD(®->rsp_q_in, 0); 681 WRT_REG_DWORD(®->rsp_q_out, 0); 682 683 /* PCI posting */ 684 RD_REG_DWORD(®->rsp_q_out); 685 } 686 687 char * 688 qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str) 689 { 690 struct qla_hw_data *ha = vha->hw; 691 692 if (pci_is_pcie(ha->pdev)) { 693 strcpy(str, "PCIe iSA"); 694 return str; 695 } 696 return str; 697 } 698 699 char * 700 qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 701 { 702 struct qla_hw_data *ha = vha->hw; 703 704 snprintf(str, size, "%s", ha->mr.fw_version); 705 return str; 706 } 707 708 void 709 qlafx00_enable_intrs(struct qla_hw_data *ha) 710 { 711 unsigned long flags = 0; 712 713 spin_lock_irqsave(&ha->hardware_lock, flags); 714 ha->interrupts_on = 1; 715 QLAFX00_ENABLE_ICNTRL_REG(ha); 716 spin_unlock_irqrestore(&ha->hardware_lock, flags); 717 } 718 719 void 720 qlafx00_disable_intrs(struct qla_hw_data *ha) 721 { 722 unsigned long flags = 0; 723 724 spin_lock_irqsave(&ha->hardware_lock, flags); 725 ha->interrupts_on = 0; 726 QLAFX00_DISABLE_ICNTRL_REG(ha); 727 spin_unlock_irqrestore(&ha->hardware_lock, flags); 728 } 729 730 int 731 qlafx00_abort_target(fc_port_t *fcport, uint64_t l, int tag) 732 { 733 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 734 } 735 736 int 737 qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag) 738 { 739 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 740 } 741 742 int 743 qlafx00_loop_reset(scsi_qla_host_t *vha) 744 { 745 int ret; 746 struct fc_port *fcport; 747 struct qla_hw_data *ha = vha->hw; 748 749 if (ql2xtargetreset) { 750 list_for_each_entry(fcport, &vha->vp_fcports, list) { 751 if (fcport->port_type != FCT_TARGET) 752 continue; 753 754 ret = ha->isp_ops->target_reset(fcport, 0, 0); 755 if (ret != QLA_SUCCESS) { 756 ql_dbg(ql_dbg_taskm, vha, 0x803d, 757 "Bus Reset failed: Reset=%d " 758 "d_id=%x.\n", ret, fcport->d_id.b24); 759 } 760 } 761 } 762 return QLA_SUCCESS; 763 } 764 765 int 766 qlafx00_iospace_config(struct qla_hw_data *ha) 767 { 768 if (pci_request_selected_regions(ha->pdev, ha->bars, 769 QLA2XXX_DRIVER_NAME)) { 770 ql_log_pci(ql_log_fatal, ha->pdev, 0x014e, 771 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 772 pci_name(ha->pdev)); 773 goto iospace_error_exit; 774 } 775 776 /* Use MMIO operations for all accesses. */ 777 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 778 ql_log_pci(ql_log_warn, ha->pdev, 0x014f, 779 "Invalid pci I/O region size (%s).\n", 780 pci_name(ha->pdev)); 781 goto iospace_error_exit; 782 } 783 if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) { 784 ql_log_pci(ql_log_warn, ha->pdev, 0x0127, 785 "Invalid PCI mem BAR0 region size (%s), aborting\n", 786 pci_name(ha->pdev)); 787 goto iospace_error_exit; 788 } 789 790 ha->cregbase = 791 ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00); 792 if (!ha->cregbase) { 793 ql_log_pci(ql_log_fatal, ha->pdev, 0x0128, 794 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 795 goto iospace_error_exit; 796 } 797 798 if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) { 799 ql_log_pci(ql_log_warn, ha->pdev, 0x0129, 800 "region #2 not an MMIO resource (%s), aborting\n", 801 pci_name(ha->pdev)); 802 goto iospace_error_exit; 803 } 804 if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) { 805 ql_log_pci(ql_log_warn, ha->pdev, 0x012a, 806 "Invalid PCI mem BAR2 region size (%s), aborting\n", 807 pci_name(ha->pdev)); 808 goto iospace_error_exit; 809 } 810 811 ha->iobase = 812 ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00); 813 if (!ha->iobase) { 814 ql_log_pci(ql_log_fatal, ha->pdev, 0x012b, 815 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 816 goto iospace_error_exit; 817 } 818 819 /* Determine queue resources */ 820 ha->max_req_queues = ha->max_rsp_queues = 1; 821 822 ql_log_pci(ql_log_info, ha->pdev, 0x012c, 823 "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n", 824 ha->bars, ha->cregbase, ha->iobase); 825 826 return 0; 827 828 iospace_error_exit: 829 return -ENOMEM; 830 } 831 832 static void 833 qlafx00_save_queue_ptrs(struct scsi_qla_host *vha) 834 { 835 struct qla_hw_data *ha = vha->hw; 836 struct req_que *req = ha->req_q_map[0]; 837 struct rsp_que *rsp = ha->rsp_q_map[0]; 838 839 req->length_fx00 = req->length; 840 req->ring_fx00 = req->ring; 841 req->dma_fx00 = req->dma; 842 843 rsp->length_fx00 = rsp->length; 844 rsp->ring_fx00 = rsp->ring; 845 rsp->dma_fx00 = rsp->dma; 846 847 ql_dbg(ql_dbg_init, vha, 0x012d, 848 "req: %p, ring_fx00: %p, length_fx00: 0x%x," 849 "req->dma_fx00: 0x%llx\n", req, req->ring_fx00, 850 req->length_fx00, (u64)req->dma_fx00); 851 852 ql_dbg(ql_dbg_init, vha, 0x012e, 853 "rsp: %p, ring_fx00: %p, length_fx00: 0x%x," 854 "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00, 855 rsp->length_fx00, (u64)rsp->dma_fx00); 856 } 857 858 static int 859 qlafx00_config_queues(struct scsi_qla_host *vha) 860 { 861 struct qla_hw_data *ha = vha->hw; 862 struct req_que *req = ha->req_q_map[0]; 863 struct rsp_que *rsp = ha->rsp_q_map[0]; 864 dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2); 865 866 req->length = ha->req_que_len; 867 req->ring = (void __force *)ha->iobase + ha->req_que_off; 868 req->dma = bar2_hdl + ha->req_que_off; 869 if ((!req->ring) || (req->length == 0)) { 870 ql_log_pci(ql_log_info, ha->pdev, 0x012f, 871 "Unable to allocate memory for req_ring\n"); 872 return QLA_FUNCTION_FAILED; 873 } 874 875 ql_dbg(ql_dbg_init, vha, 0x0130, 876 "req: %p req_ring pointer %p req len 0x%x " 877 "req off 0x%x\n, req->dma: 0x%llx", 878 req, req->ring, req->length, 879 ha->req_que_off, (u64)req->dma); 880 881 rsp->length = ha->rsp_que_len; 882 rsp->ring = (void __force *)ha->iobase + ha->rsp_que_off; 883 rsp->dma = bar2_hdl + ha->rsp_que_off; 884 if ((!rsp->ring) || (rsp->length == 0)) { 885 ql_log_pci(ql_log_info, ha->pdev, 0x0131, 886 "Unable to allocate memory for rsp_ring\n"); 887 return QLA_FUNCTION_FAILED; 888 } 889 890 ql_dbg(ql_dbg_init, vha, 0x0132, 891 "rsp: %p rsp_ring pointer %p rsp len 0x%x " 892 "rsp off 0x%x, rsp->dma: 0x%llx\n", 893 rsp, rsp->ring, rsp->length, 894 ha->rsp_que_off, (u64)rsp->dma); 895 896 return QLA_SUCCESS; 897 } 898 899 static int 900 qlafx00_init_fw_ready(scsi_qla_host_t *vha) 901 { 902 int rval = 0; 903 unsigned long wtime; 904 uint16_t wait_time; /* Wait time */ 905 struct qla_hw_data *ha = vha->hw; 906 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 907 uint32_t aenmbx, aenmbx7 = 0; 908 uint32_t pseudo_aen; 909 uint32_t state[5]; 910 bool done = false; 911 912 /* 30 seconds wait - Adjust if required */ 913 wait_time = 30; 914 915 pseudo_aen = RD_REG_DWORD(®->pseudoaen); 916 if (pseudo_aen == 1) { 917 aenmbx7 = RD_REG_DWORD(®->initval7); 918 ha->mbx_intr_code = MSW(aenmbx7); 919 ha->rqstq_intr_code = LSW(aenmbx7); 920 rval = qlafx00_driver_shutdown(vha, 10); 921 if (rval != QLA_SUCCESS) 922 qlafx00_soft_reset(vha); 923 } 924 925 /* wait time before firmware ready */ 926 wtime = jiffies + (wait_time * HZ); 927 do { 928 aenmbx = RD_REG_DWORD(®->aenmailbox0); 929 barrier(); 930 ql_dbg(ql_dbg_mbx, vha, 0x0133, 931 "aenmbx: 0x%x\n", aenmbx); 932 933 switch (aenmbx) { 934 case MBA_FW_NOT_STARTED: 935 case MBA_FW_STARTING: 936 break; 937 938 case MBA_SYSTEM_ERR: 939 case MBA_REQ_TRANSFER_ERR: 940 case MBA_RSP_TRANSFER_ERR: 941 case MBA_FW_INIT_FAILURE: 942 qlafx00_soft_reset(vha); 943 break; 944 945 case MBA_FW_RESTART_CMPLT: 946 /* Set the mbx and rqstq intr code */ 947 aenmbx7 = RD_REG_DWORD(®->aenmailbox7); 948 ha->mbx_intr_code = MSW(aenmbx7); 949 ha->rqstq_intr_code = LSW(aenmbx7); 950 ha->req_que_off = RD_REG_DWORD(®->aenmailbox1); 951 ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3); 952 ha->req_que_len = RD_REG_DWORD(®->aenmailbox5); 953 ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6); 954 WRT_REG_DWORD(®->aenmailbox0, 0); 955 RD_REG_DWORD_RELAXED(®->aenmailbox0); 956 ql_dbg(ql_dbg_init, vha, 0x0134, 957 "f/w returned mbx_intr_code: 0x%x, " 958 "rqstq_intr_code: 0x%x\n", 959 ha->mbx_intr_code, ha->rqstq_intr_code); 960 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 961 rval = QLA_SUCCESS; 962 done = true; 963 break; 964 965 default: 966 if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS) 967 break; 968 969 /* If fw is apparently not ready. In order to continue, 970 * we might need to issue Mbox cmd, but the problem is 971 * that the DoorBell vector values that come with the 972 * 8060 AEN are most likely gone by now (and thus no 973 * bell would be rung on the fw side when mbox cmd is 974 * issued). We have to therefore grab the 8060 AEN 975 * shadow regs (filled in by FW when the last 8060 976 * AEN was being posted). 977 * Do the following to determine what is needed in 978 * order to get the FW ready: 979 * 1. reload the 8060 AEN values from the shadow regs 980 * 2. clear int status to get rid of possible pending 981 * interrupts 982 * 3. issue Get FW State Mbox cmd to determine fw state 983 * Set the mbx and rqstq intr code from Shadow Regs 984 */ 985 aenmbx7 = RD_REG_DWORD(®->initval7); 986 ha->mbx_intr_code = MSW(aenmbx7); 987 ha->rqstq_intr_code = LSW(aenmbx7); 988 ha->req_que_off = RD_REG_DWORD(®->initval1); 989 ha->rsp_que_off = RD_REG_DWORD(®->initval3); 990 ha->req_que_len = RD_REG_DWORD(®->initval5); 991 ha->rsp_que_len = RD_REG_DWORD(®->initval6); 992 ql_dbg(ql_dbg_init, vha, 0x0135, 993 "f/w returned mbx_intr_code: 0x%x, " 994 "rqstq_intr_code: 0x%x\n", 995 ha->mbx_intr_code, ha->rqstq_intr_code); 996 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 997 998 /* Get the FW state */ 999 rval = qlafx00_get_firmware_state(vha, state); 1000 if (rval != QLA_SUCCESS) { 1001 /* Retry if timer has not expired */ 1002 break; 1003 } 1004 1005 if (state[0] == FSTATE_FX00_CONFIG_WAIT) { 1006 /* Firmware is waiting to be 1007 * initialized by driver 1008 */ 1009 rval = QLA_SUCCESS; 1010 done = true; 1011 break; 1012 } 1013 1014 /* Issue driver shutdown and wait until f/w recovers. 1015 * Driver should continue to poll until 8060 AEN is 1016 * received indicating firmware recovery. 1017 */ 1018 ql_dbg(ql_dbg_init, vha, 0x0136, 1019 "Sending Driver shutdown fw_state 0x%x\n", 1020 state[0]); 1021 1022 rval = qlafx00_driver_shutdown(vha, 10); 1023 if (rval != QLA_SUCCESS) { 1024 rval = QLA_FUNCTION_FAILED; 1025 break; 1026 } 1027 msleep(500); 1028 1029 wtime = jiffies + (wait_time * HZ); 1030 break; 1031 } 1032 1033 if (!done) { 1034 if (time_after_eq(jiffies, wtime)) { 1035 ql_dbg(ql_dbg_init, vha, 0x0137, 1036 "Init f/w failed: aen[7]: 0x%x\n", 1037 RD_REG_DWORD(®->aenmailbox7)); 1038 rval = QLA_FUNCTION_FAILED; 1039 done = true; 1040 break; 1041 } 1042 /* Delay for a while */ 1043 msleep(500); 1044 } 1045 } while (!done); 1046 1047 if (rval) 1048 ql_dbg(ql_dbg_init, vha, 0x0138, 1049 "%s **** FAILED ****.\n", __func__); 1050 else 1051 ql_dbg(ql_dbg_init, vha, 0x0139, 1052 "%s **** SUCCESS ****.\n", __func__); 1053 1054 return rval; 1055 } 1056 1057 /* 1058 * qlafx00_fw_ready() - Waits for firmware ready. 1059 * @ha: HA context 1060 * 1061 * Returns 0 on success. 1062 */ 1063 int 1064 qlafx00_fw_ready(scsi_qla_host_t *vha) 1065 { 1066 int rval; 1067 unsigned long wtime; 1068 uint16_t wait_time; /* Wait time if loop is coming ready */ 1069 uint32_t state[5]; 1070 1071 rval = QLA_SUCCESS; 1072 1073 wait_time = 10; 1074 1075 /* wait time before firmware ready */ 1076 wtime = jiffies + (wait_time * HZ); 1077 1078 /* Wait for ISP to finish init */ 1079 if (!vha->flags.init_done) 1080 ql_dbg(ql_dbg_init, vha, 0x013a, 1081 "Waiting for init to complete...\n"); 1082 1083 do { 1084 rval = qlafx00_get_firmware_state(vha, state); 1085 1086 if (rval == QLA_SUCCESS) { 1087 if (state[0] == FSTATE_FX00_INITIALIZED) { 1088 ql_dbg(ql_dbg_init, vha, 0x013b, 1089 "fw_state=%x\n", state[0]); 1090 rval = QLA_SUCCESS; 1091 break; 1092 } 1093 } 1094 rval = QLA_FUNCTION_FAILED; 1095 1096 if (time_after_eq(jiffies, wtime)) 1097 break; 1098 1099 /* Delay for a while */ 1100 msleep(500); 1101 1102 ql_dbg(ql_dbg_init, vha, 0x013c, 1103 "fw_state=%x curr time=%lx.\n", state[0], jiffies); 1104 } while (1); 1105 1106 1107 if (rval) 1108 ql_dbg(ql_dbg_init, vha, 0x013d, 1109 "Firmware ready **** FAILED ****.\n"); 1110 else 1111 ql_dbg(ql_dbg_init, vha, 0x013e, 1112 "Firmware ready **** SUCCESS ****.\n"); 1113 1114 return rval; 1115 } 1116 1117 static int 1118 qlafx00_find_all_targets(scsi_qla_host_t *vha, 1119 struct list_head *new_fcports) 1120 { 1121 int rval; 1122 uint16_t tgt_id; 1123 fc_port_t *fcport, *new_fcport; 1124 int found; 1125 struct qla_hw_data *ha = vha->hw; 1126 1127 rval = QLA_SUCCESS; 1128 1129 if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags)) 1130 return QLA_FUNCTION_FAILED; 1131 1132 if ((atomic_read(&vha->loop_down_timer) || 1133 STATE_TRANSITION(vha))) { 1134 atomic_set(&vha->loop_down_timer, 0); 1135 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1136 return QLA_FUNCTION_FAILED; 1137 } 1138 1139 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088, 1140 "Listing Target bit map...\n"); 1141 ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha, 1142 0x2089, (uint8_t *)ha->gid_list, 32); 1143 1144 /* Allocate temporary rmtport for any new rmtports discovered. */ 1145 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 1146 if (new_fcport == NULL) 1147 return QLA_MEMORY_ALLOC_FAILED; 1148 1149 for_each_set_bit(tgt_id, (void *)ha->gid_list, 1150 QLAFX00_TGT_NODE_LIST_SIZE) { 1151 1152 /* Send get target node info */ 1153 new_fcport->tgt_id = tgt_id; 1154 rval = qlafx00_fx_disc(vha, new_fcport, 1155 FXDISC_GET_TGT_NODE_INFO); 1156 if (rval != QLA_SUCCESS) { 1157 ql_log(ql_log_warn, vha, 0x208a, 1158 "Target info scan failed -- assuming zero-entry " 1159 "result...\n"); 1160 continue; 1161 } 1162 1163 /* Locate matching device in database. */ 1164 found = 0; 1165 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1166 if (memcmp(new_fcport->port_name, 1167 fcport->port_name, WWN_SIZE)) 1168 continue; 1169 1170 found++; 1171 1172 /* 1173 * If tgt_id is same and state FCS_ONLINE, nothing 1174 * changed. 1175 */ 1176 if (fcport->tgt_id == new_fcport->tgt_id && 1177 atomic_read(&fcport->state) == FCS_ONLINE) 1178 break; 1179 1180 /* 1181 * Tgt ID changed or device was marked to be updated. 1182 */ 1183 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b, 1184 "TGT-ID Change(%s): Present tgt id: " 1185 "0x%x state: 0x%x " 1186 "wwnn = %llx wwpn = %llx.\n", 1187 __func__, fcport->tgt_id, 1188 atomic_read(&fcport->state), 1189 (unsigned long long)wwn_to_u64(fcport->node_name), 1190 (unsigned long long)wwn_to_u64(fcport->port_name)); 1191 1192 ql_log(ql_log_info, vha, 0x208c, 1193 "TGT-ID Announce(%s): Discovered tgt " 1194 "id 0x%x wwnn = %llx " 1195 "wwpn = %llx.\n", __func__, new_fcport->tgt_id, 1196 (unsigned long long) 1197 wwn_to_u64(new_fcport->node_name), 1198 (unsigned long long) 1199 wwn_to_u64(new_fcport->port_name)); 1200 1201 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1202 fcport->old_tgt_id = fcport->tgt_id; 1203 fcport->tgt_id = new_fcport->tgt_id; 1204 ql_log(ql_log_info, vha, 0x208d, 1205 "TGT-ID: New fcport Added: %p\n", fcport); 1206 qla2x00_update_fcport(vha, fcport); 1207 } else { 1208 ql_log(ql_log_info, vha, 0x208e, 1209 " Existing TGT-ID %x did not get " 1210 " offline event from firmware.\n", 1211 fcport->old_tgt_id); 1212 qla2x00_mark_device_lost(vha, fcport, 0, 0); 1213 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1214 kfree(new_fcport); 1215 return rval; 1216 } 1217 break; 1218 } 1219 1220 if (found) 1221 continue; 1222 1223 /* If device was not in our fcports list, then add it. */ 1224 list_add_tail(&new_fcport->list, new_fcports); 1225 1226 /* Allocate a new replacement fcport. */ 1227 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 1228 if (new_fcport == NULL) 1229 return QLA_MEMORY_ALLOC_FAILED; 1230 } 1231 1232 kfree(new_fcport); 1233 return rval; 1234 } 1235 1236 /* 1237 * qlafx00_configure_all_targets 1238 * Setup target devices with node ID's. 1239 * 1240 * Input: 1241 * ha = adapter block pointer. 1242 * 1243 * Returns: 1244 * 0 = success. 1245 * BIT_0 = error 1246 */ 1247 static int 1248 qlafx00_configure_all_targets(scsi_qla_host_t *vha) 1249 { 1250 int rval; 1251 fc_port_t *fcport, *rmptemp; 1252 LIST_HEAD(new_fcports); 1253 1254 rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport, 1255 FXDISC_GET_TGT_NODE_LIST); 1256 if (rval != QLA_SUCCESS) { 1257 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1258 return rval; 1259 } 1260 1261 rval = qlafx00_find_all_targets(vha, &new_fcports); 1262 if (rval != QLA_SUCCESS) { 1263 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1264 return rval; 1265 } 1266 1267 /* 1268 * Delete all previous devices marked lost. 1269 */ 1270 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1271 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 1272 break; 1273 1274 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 1275 if (fcport->port_type != FCT_INITIATOR) 1276 qla2x00_mark_device_lost(vha, fcport, 0, 0); 1277 } 1278 } 1279 1280 /* 1281 * Add the new devices to our devices list. 1282 */ 1283 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) { 1284 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 1285 break; 1286 1287 qla2x00_update_fcport(vha, fcport); 1288 list_move_tail(&fcport->list, &vha->vp_fcports); 1289 ql_log(ql_log_info, vha, 0x208f, 1290 "Attach new target id 0x%x wwnn = %llx " 1291 "wwpn = %llx.\n", 1292 fcport->tgt_id, 1293 (unsigned long long)wwn_to_u64(fcport->node_name), 1294 (unsigned long long)wwn_to_u64(fcport->port_name)); 1295 } 1296 1297 /* Free all new device structures not processed. */ 1298 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) { 1299 list_del(&fcport->list); 1300 kfree(fcport); 1301 } 1302 1303 return rval; 1304 } 1305 1306 /* 1307 * qlafx00_configure_devices 1308 * Updates Fibre Channel Device Database with what is actually on loop. 1309 * 1310 * Input: 1311 * ha = adapter block pointer. 1312 * 1313 * Returns: 1314 * 0 = success. 1315 * 1 = error. 1316 * 2 = database was full and device was not configured. 1317 */ 1318 int 1319 qlafx00_configure_devices(scsi_qla_host_t *vha) 1320 { 1321 int rval; 1322 unsigned long flags; 1323 rval = QLA_SUCCESS; 1324 1325 flags = vha->dpc_flags; 1326 1327 ql_dbg(ql_dbg_disc, vha, 0x2090, 1328 "Configure devices -- dpc flags =0x%lx\n", flags); 1329 1330 rval = qlafx00_configure_all_targets(vha); 1331 1332 if (rval == QLA_SUCCESS) { 1333 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 1334 rval = QLA_FUNCTION_FAILED; 1335 } else { 1336 atomic_set(&vha->loop_state, LOOP_READY); 1337 ql_log(ql_log_info, vha, 0x2091, 1338 "Device Ready\n"); 1339 } 1340 } 1341 1342 if (rval) { 1343 ql_dbg(ql_dbg_disc, vha, 0x2092, 1344 "%s *** FAILED ***.\n", __func__); 1345 } else { 1346 ql_dbg(ql_dbg_disc, vha, 0x2093, 1347 "%s: exiting normally.\n", __func__); 1348 } 1349 return rval; 1350 } 1351 1352 static void 1353 qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp) 1354 { 1355 struct qla_hw_data *ha = vha->hw; 1356 fc_port_t *fcport; 1357 1358 vha->flags.online = 0; 1359 ha->mr.fw_hbt_en = 0; 1360 1361 if (!critemp) { 1362 ha->flags.chip_reset_done = 0; 1363 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1364 vha->qla_stats.total_isp_aborts++; 1365 ql_log(ql_log_info, vha, 0x013f, 1366 "Performing ISP error recovery - ha = %p.\n", ha); 1367 ha->isp_ops->reset_chip(vha); 1368 } 1369 1370 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1371 atomic_set(&vha->loop_state, LOOP_DOWN); 1372 atomic_set(&vha->loop_down_timer, 1373 QLAFX00_LOOP_DOWN_TIME); 1374 } else { 1375 if (!atomic_read(&vha->loop_down_timer)) 1376 atomic_set(&vha->loop_down_timer, 1377 QLAFX00_LOOP_DOWN_TIME); 1378 } 1379 1380 /* Clear all async request states across all VPs. */ 1381 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1382 fcport->flags = 0; 1383 if (atomic_read(&fcport->state) == FCS_ONLINE) 1384 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 1385 } 1386 1387 if (!ha->flags.eeh_busy) { 1388 if (critemp) { 1389 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 1390 } else { 1391 /* Requeue all commands in outstanding command list. */ 1392 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 1393 } 1394 } 1395 1396 qla2x00_free_irqs(vha); 1397 if (critemp) 1398 set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags); 1399 else 1400 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); 1401 1402 /* Clear the Interrupts */ 1403 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 1404 1405 ql_log(ql_log_info, vha, 0x0140, 1406 "%s Done done - ha=%p.\n", __func__, ha); 1407 } 1408 1409 /** 1410 * qlafx00_init_response_q_entries() - Initializes response queue entries. 1411 * @rsp: response queue 1412 * 1413 * Beginning of request ring has initialization control block already built 1414 * by nvram config routine. 1415 * 1416 * Returns 0 on success. 1417 */ 1418 void 1419 qlafx00_init_response_q_entries(struct rsp_que *rsp) 1420 { 1421 uint16_t cnt; 1422 response_t *pkt; 1423 1424 rsp->ring_ptr = rsp->ring; 1425 rsp->ring_index = 0; 1426 rsp->status_srb = NULL; 1427 pkt = rsp->ring_ptr; 1428 for (cnt = 0; cnt < rsp->length; cnt++) { 1429 pkt->signature = RESPONSE_PROCESSED; 1430 WRT_REG_DWORD((void __force __iomem *)&pkt->signature, 1431 RESPONSE_PROCESSED); 1432 pkt++; 1433 } 1434 } 1435 1436 int 1437 qlafx00_rescan_isp(scsi_qla_host_t *vha) 1438 { 1439 uint32_t status = QLA_FUNCTION_FAILED; 1440 struct qla_hw_data *ha = vha->hw; 1441 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 1442 uint32_t aenmbx7; 1443 1444 qla2x00_request_irqs(ha, ha->rsp_q_map[0]); 1445 1446 aenmbx7 = RD_REG_DWORD(®->aenmailbox7); 1447 ha->mbx_intr_code = MSW(aenmbx7); 1448 ha->rqstq_intr_code = LSW(aenmbx7); 1449 ha->req_que_off = RD_REG_DWORD(®->aenmailbox1); 1450 ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3); 1451 ha->req_que_len = RD_REG_DWORD(®->aenmailbox5); 1452 ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6); 1453 1454 ql_dbg(ql_dbg_disc, vha, 0x2094, 1455 "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x " 1456 " Req que offset 0x%x Rsp que offset 0x%x\n", 1457 ha->mbx_intr_code, ha->rqstq_intr_code, 1458 ha->req_que_off, ha->rsp_que_len); 1459 1460 /* Clear the Interrupts */ 1461 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 1462 1463 status = qla2x00_init_rings(vha); 1464 if (!status) { 1465 vha->flags.online = 1; 1466 1467 /* if no cable then assume it's good */ 1468 if ((vha->device_flags & DFLG_NO_CABLE)) 1469 status = 0; 1470 /* Register system information */ 1471 if (qlafx00_fx_disc(vha, 1472 &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO)) 1473 ql_dbg(ql_dbg_disc, vha, 0x2095, 1474 "failed to register host info\n"); 1475 } 1476 scsi_unblock_requests(vha->host); 1477 return status; 1478 } 1479 1480 void 1481 qlafx00_timer_routine(scsi_qla_host_t *vha) 1482 { 1483 struct qla_hw_data *ha = vha->hw; 1484 uint32_t fw_heart_beat; 1485 uint32_t aenmbx0; 1486 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 1487 uint32_t tempc; 1488 1489 /* Check firmware health */ 1490 if (ha->mr.fw_hbt_cnt) 1491 ha->mr.fw_hbt_cnt--; 1492 else { 1493 if ((!ha->flags.mr_reset_hdlr_active) && 1494 (!test_bit(UNLOADING, &vha->dpc_flags)) && 1495 (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && 1496 (ha->mr.fw_hbt_en)) { 1497 fw_heart_beat = RD_REG_DWORD(®->fwheartbeat); 1498 if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) { 1499 ha->mr.old_fw_hbt_cnt = fw_heart_beat; 1500 ha->mr.fw_hbt_miss_cnt = 0; 1501 } else { 1502 ha->mr.fw_hbt_miss_cnt++; 1503 if (ha->mr.fw_hbt_miss_cnt == 1504 QLAFX00_HEARTBEAT_MISS_CNT) { 1505 set_bit(ISP_ABORT_NEEDED, 1506 &vha->dpc_flags); 1507 qla2xxx_wake_dpc(vha); 1508 ha->mr.fw_hbt_miss_cnt = 0; 1509 } 1510 } 1511 } 1512 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; 1513 } 1514 1515 if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) { 1516 /* Reset recovery to be performed in timer routine */ 1517 aenmbx0 = RD_REG_DWORD(®->aenmailbox0); 1518 if (ha->mr.fw_reset_timer_exp) { 1519 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1520 qla2xxx_wake_dpc(vha); 1521 ha->mr.fw_reset_timer_exp = 0; 1522 } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) { 1523 /* Wake up DPC to rescan the targets */ 1524 set_bit(FX00_TARGET_SCAN, &vha->dpc_flags); 1525 clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); 1526 qla2xxx_wake_dpc(vha); 1527 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 1528 } else if ((aenmbx0 == MBA_FW_STARTING) && 1529 (!ha->mr.fw_hbt_en)) { 1530 ha->mr.fw_hbt_en = 1; 1531 } else if (!ha->mr.fw_reset_timer_tick) { 1532 if (aenmbx0 == ha->mr.old_aenmbx0_state) 1533 ha->mr.fw_reset_timer_exp = 1; 1534 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 1535 } else if (aenmbx0 == 0xFFFFFFFF) { 1536 uint32_t data0, data1; 1537 1538 data0 = QLAFX00_RD_REG(ha, 1539 QLAFX00_BAR1_BASE_ADDR_REG); 1540 data1 = QLAFX00_RD_REG(ha, 1541 QLAFX00_PEX0_WIN0_BASE_ADDR_REG); 1542 1543 data0 &= 0xffff0000; 1544 data1 &= 0x0000ffff; 1545 1546 QLAFX00_WR_REG(ha, 1547 QLAFX00_PEX0_WIN0_BASE_ADDR_REG, 1548 (data0 | data1)); 1549 } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) { 1550 ha->mr.fw_reset_timer_tick = 1551 QLAFX00_MAX_RESET_INTERVAL; 1552 } else if (aenmbx0 == MBA_FW_RESET_FCT) { 1553 ha->mr.fw_reset_timer_tick = 1554 QLAFX00_MAX_RESET_INTERVAL; 1555 } 1556 if (ha->mr.old_aenmbx0_state != aenmbx0) { 1557 ha->mr.old_aenmbx0_state = aenmbx0; 1558 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 1559 } 1560 ha->mr.fw_reset_timer_tick--; 1561 } 1562 if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) { 1563 /* 1564 * Critical temperature recovery to be 1565 * performed in timer routine 1566 */ 1567 if (ha->mr.fw_critemp_timer_tick == 0) { 1568 tempc = QLAFX00_GET_TEMPERATURE(ha); 1569 ql_dbg(ql_dbg_timer, vha, 0x6012, 1570 "ISPFx00(%s): Critical temp timer, " 1571 "current SOC temperature: %d\n", 1572 __func__, tempc); 1573 if (tempc < ha->mr.critical_temperature) { 1574 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1575 clear_bit(FX00_CRITEMP_RECOVERY, 1576 &vha->dpc_flags); 1577 qla2xxx_wake_dpc(vha); 1578 } 1579 ha->mr.fw_critemp_timer_tick = 1580 QLAFX00_CRITEMP_INTERVAL; 1581 } else { 1582 ha->mr.fw_critemp_timer_tick--; 1583 } 1584 } 1585 if (ha->mr.host_info_resend) { 1586 /* 1587 * Incomplete host info might be sent to firmware 1588 * durinng system boot - info should be resend 1589 */ 1590 if (ha->mr.hinfo_resend_timer_tick == 0) { 1591 ha->mr.host_info_resend = false; 1592 set_bit(FX00_HOST_INFO_RESEND, &vha->dpc_flags); 1593 ha->mr.hinfo_resend_timer_tick = 1594 QLAFX00_HINFO_RESEND_INTERVAL; 1595 qla2xxx_wake_dpc(vha); 1596 } else { 1597 ha->mr.hinfo_resend_timer_tick--; 1598 } 1599 } 1600 1601 } 1602 1603 /* 1604 * qlfx00a_reset_initialize 1605 * Re-initialize after a iSA device reset. 1606 * 1607 * Input: 1608 * ha = adapter block pointer. 1609 * 1610 * Returns: 1611 * 0 = success 1612 */ 1613 int 1614 qlafx00_reset_initialize(scsi_qla_host_t *vha) 1615 { 1616 struct qla_hw_data *ha = vha->hw; 1617 1618 if (vha->device_flags & DFLG_DEV_FAILED) { 1619 ql_dbg(ql_dbg_init, vha, 0x0142, 1620 "Device in failed state\n"); 1621 return QLA_SUCCESS; 1622 } 1623 1624 ha->flags.mr_reset_hdlr_active = 1; 1625 1626 if (vha->flags.online) { 1627 scsi_block_requests(vha->host); 1628 qlafx00_abort_isp_cleanup(vha, false); 1629 } 1630 1631 ql_log(ql_log_info, vha, 0x0143, 1632 "(%s): succeeded.\n", __func__); 1633 ha->flags.mr_reset_hdlr_active = 0; 1634 return QLA_SUCCESS; 1635 } 1636 1637 /* 1638 * qlafx00_abort_isp 1639 * Resets ISP and aborts all outstanding commands. 1640 * 1641 * Input: 1642 * ha = adapter block pointer. 1643 * 1644 * Returns: 1645 * 0 = success 1646 */ 1647 int 1648 qlafx00_abort_isp(scsi_qla_host_t *vha) 1649 { 1650 struct qla_hw_data *ha = vha->hw; 1651 1652 if (vha->flags.online) { 1653 if (unlikely(pci_channel_offline(ha->pdev) && 1654 ha->flags.pci_channel_io_perm_failure)) { 1655 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 1656 return QLA_SUCCESS; 1657 } 1658 1659 scsi_block_requests(vha->host); 1660 qlafx00_abort_isp_cleanup(vha, false); 1661 } else { 1662 scsi_block_requests(vha->host); 1663 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1664 vha->qla_stats.total_isp_aborts++; 1665 ha->isp_ops->reset_chip(vha); 1666 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); 1667 /* Clear the Interrupts */ 1668 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 1669 } 1670 1671 ql_log(ql_log_info, vha, 0x0145, 1672 "(%s): succeeded.\n", __func__); 1673 1674 return QLA_SUCCESS; 1675 } 1676 1677 static inline fc_port_t* 1678 qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id) 1679 { 1680 fc_port_t *fcport; 1681 1682 /* Check for matching device in remote port list. */ 1683 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1684 if (fcport->tgt_id == tgt_id) { 1685 ql_dbg(ql_dbg_async, vha, 0x5072, 1686 "Matching fcport(%p) found with TGT-ID: 0x%x " 1687 "and Remote TGT_ID: 0x%x\n", 1688 fcport, fcport->tgt_id, tgt_id); 1689 return fcport; 1690 } 1691 } 1692 return NULL; 1693 } 1694 1695 static void 1696 qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id) 1697 { 1698 fc_port_t *fcport; 1699 1700 ql_log(ql_log_info, vha, 0x5073, 1701 "Detach TGT-ID: 0x%x\n", tgt_id); 1702 1703 fcport = qlafx00_get_fcport(vha, tgt_id); 1704 if (!fcport) 1705 return; 1706 1707 qla2x00_mark_device_lost(vha, fcport, 0, 0); 1708 1709 return; 1710 } 1711 1712 int 1713 qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt) 1714 { 1715 int rval = 0; 1716 uint32_t aen_code, aen_data; 1717 1718 aen_code = FCH_EVT_VENDOR_UNIQUE; 1719 aen_data = evt->u.aenfx.evtcode; 1720 1721 switch (evt->u.aenfx.evtcode) { 1722 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ 1723 if (evt->u.aenfx.mbx[1] == 0) { 1724 if (evt->u.aenfx.mbx[2] == 1) { 1725 if (!vha->flags.fw_tgt_reported) 1726 vha->flags.fw_tgt_reported = 1; 1727 atomic_set(&vha->loop_down_timer, 0); 1728 atomic_set(&vha->loop_state, LOOP_UP); 1729 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1730 qla2xxx_wake_dpc(vha); 1731 } else if (evt->u.aenfx.mbx[2] == 2) { 1732 qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]); 1733 } 1734 } else if (evt->u.aenfx.mbx[1] == 0xffff) { 1735 if (evt->u.aenfx.mbx[2] == 1) { 1736 if (!vha->flags.fw_tgt_reported) 1737 vha->flags.fw_tgt_reported = 1; 1738 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1739 } else if (evt->u.aenfx.mbx[2] == 2) { 1740 vha->device_flags |= DFLG_NO_CABLE; 1741 qla2x00_mark_all_devices_lost(vha, 1); 1742 } 1743 } 1744 break; 1745 case QLAFX00_MBA_LINK_UP: 1746 aen_code = FCH_EVT_LINKUP; 1747 aen_data = 0; 1748 break; 1749 case QLAFX00_MBA_LINK_DOWN: 1750 aen_code = FCH_EVT_LINKDOWN; 1751 aen_data = 0; 1752 break; 1753 case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */ 1754 ql_log(ql_log_info, vha, 0x5082, 1755 "Process critical temperature event " 1756 "aenmb[0]: %x\n", 1757 evt->u.aenfx.evtcode); 1758 scsi_block_requests(vha->host); 1759 qlafx00_abort_isp_cleanup(vha, true); 1760 scsi_unblock_requests(vha->host); 1761 break; 1762 } 1763 1764 fc_host_post_event(vha->host, fc_get_event_number(), 1765 aen_code, aen_data); 1766 1767 return rval; 1768 } 1769 1770 static void 1771 qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo) 1772 { 1773 u64 port_name = 0, node_name = 0; 1774 1775 port_name = (unsigned long long)wwn_to_u64(pinfo->port_name); 1776 node_name = (unsigned long long)wwn_to_u64(pinfo->node_name); 1777 1778 fc_host_node_name(vha->host) = node_name; 1779 fc_host_port_name(vha->host) = port_name; 1780 if (!pinfo->port_type) 1781 vha->hw->current_topology = ISP_CFG_F; 1782 if (pinfo->link_status == QLAFX00_LINK_STATUS_UP) 1783 atomic_set(&vha->loop_state, LOOP_READY); 1784 else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN) 1785 atomic_set(&vha->loop_state, LOOP_DOWN); 1786 vha->hw->link_data_rate = (uint16_t)pinfo->link_config; 1787 } 1788 1789 static void 1790 qla2x00_fxdisc_iocb_timeout(void *data) 1791 { 1792 srb_t *sp = data; 1793 struct srb_iocb *lio = &sp->u.iocb_cmd; 1794 1795 complete(&lio->u.fxiocb.fxiocb_comp); 1796 } 1797 1798 static void 1799 qla2x00_fxdisc_sp_done(void *ptr, int res) 1800 { 1801 srb_t *sp = ptr; 1802 struct srb_iocb *lio = &sp->u.iocb_cmd; 1803 1804 complete(&lio->u.fxiocb.fxiocb_comp); 1805 } 1806 1807 int 1808 qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) 1809 { 1810 srb_t *sp; 1811 struct srb_iocb *fdisc; 1812 int rval = QLA_FUNCTION_FAILED; 1813 struct qla_hw_data *ha = vha->hw; 1814 struct host_system_info *phost_info; 1815 struct register_host_info *preg_hsi; 1816 struct new_utsname *p_sysid = NULL; 1817 1818 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1819 if (!sp) 1820 goto done; 1821 1822 sp->type = SRB_FXIOCB_DCMD; 1823 sp->name = "fxdisc"; 1824 1825 fdisc = &sp->u.iocb_cmd; 1826 fdisc->timeout = qla2x00_fxdisc_iocb_timeout; 1827 qla2x00_init_timer(sp, FXDISC_TIMEOUT); 1828 1829 switch (fx_type) { 1830 case FXDISC_GET_CONFIG_INFO: 1831 fdisc->u.fxiocb.flags = 1832 SRB_FXDISC_RESP_DMA_VALID; 1833 fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data); 1834 break; 1835 case FXDISC_GET_PORT_INFO: 1836 fdisc->u.fxiocb.flags = 1837 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; 1838 fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO; 1839 fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->port_id); 1840 break; 1841 case FXDISC_GET_TGT_NODE_INFO: 1842 fdisc->u.fxiocb.flags = 1843 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; 1844 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO; 1845 fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->tgt_id); 1846 break; 1847 case FXDISC_GET_TGT_NODE_LIST: 1848 fdisc->u.fxiocb.flags = 1849 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; 1850 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE; 1851 break; 1852 case FXDISC_REG_HOST_INFO: 1853 fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID; 1854 fdisc->u.fxiocb.req_len = sizeof(struct register_host_info); 1855 p_sysid = utsname(); 1856 if (!p_sysid) { 1857 ql_log(ql_log_warn, vha, 0x303c, 1858 "Not able to get the system information\n"); 1859 goto done_free_sp; 1860 } 1861 break; 1862 case FXDISC_ABORT_IOCTL: 1863 default: 1864 break; 1865 } 1866 1867 if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) { 1868 fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev, 1869 fdisc->u.fxiocb.req_len, 1870 &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL); 1871 if (!fdisc->u.fxiocb.req_addr) 1872 goto done_free_sp; 1873 1874 if (fx_type == FXDISC_REG_HOST_INFO) { 1875 preg_hsi = (struct register_host_info *) 1876 fdisc->u.fxiocb.req_addr; 1877 phost_info = &preg_hsi->hsi; 1878 memset(preg_hsi, 0, sizeof(struct register_host_info)); 1879 phost_info->os_type = OS_TYPE_LINUX; 1880 strncpy(phost_info->sysname, 1881 p_sysid->sysname, SYSNAME_LENGTH); 1882 strncpy(phost_info->nodename, 1883 p_sysid->nodename, NODENAME_LENGTH); 1884 if (!strcmp(phost_info->nodename, "(none)")) 1885 ha->mr.host_info_resend = true; 1886 strncpy(phost_info->release, 1887 p_sysid->release, RELEASE_LENGTH); 1888 strncpy(phost_info->version, 1889 p_sysid->version, VERSION_LENGTH); 1890 strncpy(phost_info->machine, 1891 p_sysid->machine, MACHINE_LENGTH); 1892 strncpy(phost_info->domainname, 1893 p_sysid->domainname, DOMNAME_LENGTH); 1894 strncpy(phost_info->hostdriver, 1895 QLA2XXX_VERSION, VERSION_LENGTH); 1896 preg_hsi->utc = (uint64_t)ktime_get_real_seconds(); 1897 ql_dbg(ql_dbg_init, vha, 0x0149, 1898 "ISP%04X: Host registration with firmware\n", 1899 ha->pdev->device); 1900 ql_dbg(ql_dbg_init, vha, 0x014a, 1901 "os_type = '%d', sysname = '%s', nodname = '%s'\n", 1902 phost_info->os_type, 1903 phost_info->sysname, 1904 phost_info->nodename); 1905 ql_dbg(ql_dbg_init, vha, 0x014b, 1906 "release = '%s', version = '%s'\n", 1907 phost_info->release, 1908 phost_info->version); 1909 ql_dbg(ql_dbg_init, vha, 0x014c, 1910 "machine = '%s' " 1911 "domainname = '%s', hostdriver = '%s'\n", 1912 phost_info->machine, 1913 phost_info->domainname, 1914 phost_info->hostdriver); 1915 ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d, 1916 (uint8_t *)phost_info, 1917 sizeof(struct host_system_info)); 1918 } 1919 } 1920 1921 if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) { 1922 fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev, 1923 fdisc->u.fxiocb.rsp_len, 1924 &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL); 1925 if (!fdisc->u.fxiocb.rsp_addr) 1926 goto done_unmap_req; 1927 } 1928 1929 fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type); 1930 sp->done = qla2x00_fxdisc_sp_done; 1931 1932 rval = qla2x00_start_sp(sp); 1933 if (rval != QLA_SUCCESS) 1934 goto done_unmap_dma; 1935 1936 wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp); 1937 1938 if (fx_type == FXDISC_GET_CONFIG_INFO) { 1939 struct config_info_data *pinfo = 1940 (struct config_info_data *) fdisc->u.fxiocb.rsp_addr; 1941 strcpy(vha->hw->model_number, pinfo->model_num); 1942 strcpy(vha->hw->model_desc, pinfo->model_description); 1943 memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name, 1944 sizeof(vha->hw->mr.symbolic_name)); 1945 memcpy(&vha->hw->mr.serial_num, pinfo->serial_num, 1946 sizeof(vha->hw->mr.serial_num)); 1947 memcpy(&vha->hw->mr.hw_version, pinfo->hw_version, 1948 sizeof(vha->hw->mr.hw_version)); 1949 memcpy(&vha->hw->mr.fw_version, pinfo->fw_version, 1950 sizeof(vha->hw->mr.fw_version)); 1951 strim(vha->hw->mr.fw_version); 1952 memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version, 1953 sizeof(vha->hw->mr.uboot_version)); 1954 memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num, 1955 sizeof(vha->hw->mr.fru_serial_num)); 1956 vha->hw->mr.critical_temperature = 1957 (pinfo->nominal_temp_value) ? 1958 pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD; 1959 ha->mr.extended_io_enabled = (pinfo->enabled_capabilities & 1960 QLAFX00_EXTENDED_IO_EN_MASK) != 0; 1961 } else if (fx_type == FXDISC_GET_PORT_INFO) { 1962 struct port_info_data *pinfo = 1963 (struct port_info_data *) fdisc->u.fxiocb.rsp_addr; 1964 memcpy(vha->node_name, pinfo->node_name, WWN_SIZE); 1965 memcpy(vha->port_name, pinfo->port_name, WWN_SIZE); 1966 vha->d_id.b.domain = pinfo->port_id[0]; 1967 vha->d_id.b.area = pinfo->port_id[1]; 1968 vha->d_id.b.al_pa = pinfo->port_id[2]; 1969 qlafx00_update_host_attr(vha, pinfo); 1970 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141, 1971 (uint8_t *)pinfo, 16); 1972 } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) { 1973 struct qlafx00_tgt_node_info *pinfo = 1974 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr; 1975 memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE); 1976 memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE); 1977 fcport->port_type = FCT_TARGET; 1978 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144, 1979 (uint8_t *)pinfo, 16); 1980 } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) { 1981 struct qlafx00_tgt_node_info *pinfo = 1982 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr; 1983 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146, 1984 (uint8_t *)pinfo, 16); 1985 memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE); 1986 } else if (fx_type == FXDISC_ABORT_IOCTL) 1987 fdisc->u.fxiocb.result = 1988 (fdisc->u.fxiocb.result == 1989 cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ? 1990 cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED); 1991 1992 rval = le32_to_cpu(fdisc->u.fxiocb.result); 1993 1994 done_unmap_dma: 1995 if (fdisc->u.fxiocb.rsp_addr) 1996 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len, 1997 fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle); 1998 1999 done_unmap_req: 2000 if (fdisc->u.fxiocb.req_addr) 2001 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len, 2002 fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle); 2003 done_free_sp: 2004 sp->free(sp); 2005 done: 2006 return rval; 2007 } 2008 2009 /* 2010 * qlafx00_initialize_adapter 2011 * Initialize board. 2012 * 2013 * Input: 2014 * ha = adapter block pointer. 2015 * 2016 * Returns: 2017 * 0 = success 2018 */ 2019 int 2020 qlafx00_initialize_adapter(scsi_qla_host_t *vha) 2021 { 2022 int rval; 2023 struct qla_hw_data *ha = vha->hw; 2024 uint32_t tempc; 2025 2026 /* Clear adapter flags. */ 2027 vha->flags.online = 0; 2028 ha->flags.chip_reset_done = 0; 2029 vha->flags.reset_active = 0; 2030 ha->flags.pci_channel_io_perm_failure = 0; 2031 ha->flags.eeh_busy = 0; 2032 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 2033 atomic_set(&vha->loop_state, LOOP_DOWN); 2034 vha->device_flags = DFLG_NO_CABLE; 2035 vha->dpc_flags = 0; 2036 vha->flags.management_server_logged_in = 0; 2037 ha->isp_abort_cnt = 0; 2038 ha->beacon_blink_led = 0; 2039 2040 set_bit(0, ha->req_qid_map); 2041 set_bit(0, ha->rsp_qid_map); 2042 2043 ql_dbg(ql_dbg_init, vha, 0x0147, 2044 "Configuring PCI space...\n"); 2045 2046 rval = ha->isp_ops->pci_config(vha); 2047 if (rval) { 2048 ql_log(ql_log_warn, vha, 0x0148, 2049 "Unable to configure PCI space.\n"); 2050 return rval; 2051 } 2052 2053 rval = qlafx00_init_fw_ready(vha); 2054 if (rval != QLA_SUCCESS) 2055 return rval; 2056 2057 qlafx00_save_queue_ptrs(vha); 2058 2059 rval = qlafx00_config_queues(vha); 2060 if (rval != QLA_SUCCESS) 2061 return rval; 2062 2063 /* 2064 * Allocate the array of outstanding commands 2065 * now that we know the firmware resources. 2066 */ 2067 rval = qla2x00_alloc_outstanding_cmds(ha, vha->req); 2068 if (rval != QLA_SUCCESS) 2069 return rval; 2070 2071 rval = qla2x00_init_rings(vha); 2072 ha->flags.chip_reset_done = 1; 2073 2074 tempc = QLAFX00_GET_TEMPERATURE(ha); 2075 ql_dbg(ql_dbg_init, vha, 0x0152, 2076 "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n", 2077 __func__, tempc); 2078 2079 return rval; 2080 } 2081 2082 uint32_t 2083 qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr, 2084 char *buf) 2085 { 2086 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 2087 int rval = QLA_FUNCTION_FAILED; 2088 uint32_t state[1]; 2089 2090 if (qla2x00_reset_active(vha)) 2091 ql_log(ql_log_warn, vha, 0x70ce, 2092 "ISP reset active.\n"); 2093 else if (!vha->hw->flags.eeh_busy) { 2094 rval = qlafx00_get_firmware_state(vha, state); 2095 } 2096 if (rval != QLA_SUCCESS) 2097 memset(state, -1, sizeof(state)); 2098 2099 return state[0]; 2100 } 2101 2102 void 2103 qlafx00_get_host_speed(struct Scsi_Host *shost) 2104 { 2105 struct qla_hw_data *ha = ((struct scsi_qla_host *) 2106 (shost_priv(shost)))->hw; 2107 u32 speed = FC_PORTSPEED_UNKNOWN; 2108 2109 switch (ha->link_data_rate) { 2110 case QLAFX00_PORT_SPEED_2G: 2111 speed = FC_PORTSPEED_2GBIT; 2112 break; 2113 case QLAFX00_PORT_SPEED_4G: 2114 speed = FC_PORTSPEED_4GBIT; 2115 break; 2116 case QLAFX00_PORT_SPEED_8G: 2117 speed = FC_PORTSPEED_8GBIT; 2118 break; 2119 case QLAFX00_PORT_SPEED_10G: 2120 speed = FC_PORTSPEED_10GBIT; 2121 break; 2122 } 2123 fc_host_speed(shost) = speed; 2124 } 2125 2126 /** QLAFX00 specific ISR implementation functions */ 2127 2128 static inline void 2129 qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 2130 uint32_t sense_len, struct rsp_que *rsp, int res) 2131 { 2132 struct scsi_qla_host *vha = sp->vha; 2133 struct scsi_cmnd *cp = GET_CMD_SP(sp); 2134 uint32_t track_sense_len; 2135 2136 SET_FW_SENSE_LEN(sp, sense_len); 2137 2138 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 2139 sense_len = SCSI_SENSE_BUFFERSIZE; 2140 2141 SET_CMD_SENSE_LEN(sp, sense_len); 2142 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 2143 track_sense_len = sense_len; 2144 2145 if (sense_len > par_sense_len) 2146 sense_len = par_sense_len; 2147 2148 memcpy(cp->sense_buffer, sense_data, sense_len); 2149 2150 SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len); 2151 2152 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 2153 track_sense_len -= sense_len; 2154 SET_CMD_SENSE_LEN(sp, track_sense_len); 2155 2156 ql_dbg(ql_dbg_io, vha, 0x304d, 2157 "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n", 2158 sense_len, par_sense_len, track_sense_len); 2159 if (GET_FW_SENSE_LEN(sp) > 0) { 2160 rsp->status_srb = sp; 2161 cp->result = res; 2162 } 2163 2164 if (sense_len) { 2165 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039, 2166 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", 2167 sp->vha->host_no, cp->device->id, cp->device->lun, 2168 cp); 2169 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049, 2170 cp->sense_buffer, sense_len); 2171 } 2172 } 2173 2174 static void 2175 qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2176 struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp, 2177 __le16 sstatus, __le16 cpstatus) 2178 { 2179 struct srb_iocb *tmf; 2180 2181 tmf = &sp->u.iocb_cmd; 2182 if (cpstatus != cpu_to_le16((uint16_t)CS_COMPLETE) || 2183 (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID))) 2184 cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE); 2185 tmf->u.tmf.comp_status = cpstatus; 2186 sp->done(sp, 0); 2187 } 2188 2189 static void 2190 qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2191 struct abort_iocb_entry_fx00 *pkt) 2192 { 2193 const char func[] = "ABT_IOCB"; 2194 srb_t *sp; 2195 struct srb_iocb *abt; 2196 2197 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2198 if (!sp) 2199 return; 2200 2201 abt = &sp->u.iocb_cmd; 2202 abt->u.abt.comp_status = pkt->tgt_id_sts; 2203 sp->done(sp, 0); 2204 } 2205 2206 static void 2207 qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, 2208 struct ioctl_iocb_entry_fx00 *pkt) 2209 { 2210 const char func[] = "IOSB_IOCB"; 2211 srb_t *sp; 2212 struct bsg_job *bsg_job; 2213 struct fc_bsg_reply *bsg_reply; 2214 struct srb_iocb *iocb_job; 2215 int res; 2216 struct qla_mt_iocb_rsp_fx00 fstatus; 2217 uint8_t *fw_sts_ptr; 2218 2219 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2220 if (!sp) 2221 return; 2222 2223 if (sp->type == SRB_FXIOCB_DCMD) { 2224 iocb_job = &sp->u.iocb_cmd; 2225 iocb_job->u.fxiocb.seq_number = pkt->seq_no; 2226 iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags; 2227 iocb_job->u.fxiocb.result = pkt->status; 2228 if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID) 2229 iocb_job->u.fxiocb.req_data = 2230 pkt->dataword_r; 2231 } else { 2232 bsg_job = sp->u.bsg_job; 2233 bsg_reply = bsg_job->reply; 2234 2235 memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00)); 2236 2237 fstatus.reserved_1 = pkt->reserved_0; 2238 fstatus.func_type = pkt->comp_func_num; 2239 fstatus.ioctl_flags = pkt->fw_iotcl_flags; 2240 fstatus.ioctl_data = pkt->dataword_r; 2241 fstatus.adapid = pkt->adapid; 2242 fstatus.reserved_2 = pkt->dataword_r_extra; 2243 fstatus.res_count = pkt->residuallen; 2244 fstatus.status = pkt->status; 2245 fstatus.seq_number = pkt->seq_no; 2246 memcpy(fstatus.reserved_3, 2247 pkt->reserved_2, 20 * sizeof(uint8_t)); 2248 2249 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply); 2250 2251 memcpy(fw_sts_ptr, (uint8_t *)&fstatus, 2252 sizeof(struct qla_mt_iocb_rsp_fx00)); 2253 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 2254 sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t); 2255 2256 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 2257 sp->fcport->vha, 0x5080, 2258 (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00)); 2259 2260 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 2261 sp->fcport->vha, 0x5074, 2262 (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00)); 2263 2264 res = bsg_reply->result = DID_OK << 16; 2265 bsg_reply->reply_payload_rcv_len = 2266 bsg_job->reply_payload.payload_len; 2267 } 2268 sp->done(sp, res); 2269 } 2270 2271 /** 2272 * qlafx00_status_entry() - Process a Status IOCB entry. 2273 * @vha: SCSI driver HA context 2274 * @rsp: response queue 2275 * @pkt: Entry pointer 2276 */ 2277 static void 2278 qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 2279 { 2280 srb_t *sp; 2281 fc_port_t *fcport; 2282 struct scsi_cmnd *cp; 2283 struct sts_entry_fx00 *sts; 2284 __le16 comp_status; 2285 __le16 scsi_status; 2286 __le16 lscsi_status; 2287 int32_t resid; 2288 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 2289 fw_resid_len; 2290 uint8_t *rsp_info = NULL, *sense_data = NULL; 2291 struct qla_hw_data *ha = vha->hw; 2292 uint32_t hindex, handle; 2293 uint16_t que; 2294 struct req_que *req; 2295 int logit = 1; 2296 int res = 0; 2297 2298 sts = (struct sts_entry_fx00 *) pkt; 2299 2300 comp_status = sts->comp_status; 2301 scsi_status = sts->scsi_status & cpu_to_le16((uint16_t)SS_MASK); 2302 hindex = sts->handle; 2303 handle = LSW(hindex); 2304 2305 que = MSW(hindex); 2306 req = ha->req_q_map[que]; 2307 2308 /* Validate handle. */ 2309 if (handle < req->num_outstanding_cmds) 2310 sp = req->outstanding_cmds[handle]; 2311 else 2312 sp = NULL; 2313 2314 if (sp == NULL) { 2315 ql_dbg(ql_dbg_io, vha, 0x3034, 2316 "Invalid status handle (0x%x).\n", handle); 2317 2318 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2319 qla2xxx_wake_dpc(vha); 2320 return; 2321 } 2322 2323 if (sp->type == SRB_TM_CMD) { 2324 req->outstanding_cmds[handle] = NULL; 2325 qlafx00_tm_iocb_entry(vha, req, pkt, sp, 2326 scsi_status, comp_status); 2327 return; 2328 } 2329 2330 /* Fast path completion. */ 2331 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2332 qla2x00_process_completed_request(vha, req, handle); 2333 return; 2334 } 2335 2336 req->outstanding_cmds[handle] = NULL; 2337 cp = GET_CMD_SP(sp); 2338 if (cp == NULL) { 2339 ql_dbg(ql_dbg_io, vha, 0x3048, 2340 "Command already returned (0x%x/%p).\n", 2341 handle, sp); 2342 2343 return; 2344 } 2345 2346 lscsi_status = scsi_status & cpu_to_le16((uint16_t)STATUS_MASK); 2347 2348 fcport = sp->fcport; 2349 2350 sense_len = par_sense_len = rsp_info_len = resid_len = 2351 fw_resid_len = 0; 2352 if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)) 2353 sense_len = sts->sense_len; 2354 if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER 2355 | (uint16_t)SS_RESIDUAL_OVER))) 2356 resid_len = le32_to_cpu(sts->residual_len); 2357 if (comp_status == cpu_to_le16((uint16_t)CS_DATA_UNDERRUN)) 2358 fw_resid_len = le32_to_cpu(sts->residual_len); 2359 rsp_info = sense_data = sts->data; 2360 par_sense_len = sizeof(sts->data); 2361 2362 /* Check for overrun. */ 2363 if (comp_status == CS_COMPLETE && 2364 scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_OVER)) 2365 comp_status = cpu_to_le16((uint16_t)CS_DATA_OVERRUN); 2366 2367 /* 2368 * Based on Host and scsi status generate status code for Linux 2369 */ 2370 switch (le16_to_cpu(comp_status)) { 2371 case CS_COMPLETE: 2372 case CS_QUEUE_FULL: 2373 if (scsi_status == 0) { 2374 res = DID_OK << 16; 2375 break; 2376 } 2377 if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER 2378 | (uint16_t)SS_RESIDUAL_OVER))) { 2379 resid = resid_len; 2380 scsi_set_resid(cp, resid); 2381 2382 if (!lscsi_status && 2383 ((unsigned)(scsi_bufflen(cp) - resid) < 2384 cp->underflow)) { 2385 ql_dbg(ql_dbg_io, fcport->vha, 0x3050, 2386 "Mid-layer underflow " 2387 "detected (0x%x of 0x%x bytes).\n", 2388 resid, scsi_bufflen(cp)); 2389 2390 res = DID_ERROR << 16; 2391 break; 2392 } 2393 } 2394 res = DID_OK << 16 | le16_to_cpu(lscsi_status); 2395 2396 if (lscsi_status == 2397 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) { 2398 ql_dbg(ql_dbg_io, fcport->vha, 0x3051, 2399 "QUEUE FULL detected.\n"); 2400 break; 2401 } 2402 logit = 0; 2403 if (lscsi_status != cpu_to_le16((uint16_t)SS_CHECK_CONDITION)) 2404 break; 2405 2406 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2407 if (!(scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))) 2408 break; 2409 2410 qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len, 2411 rsp, res); 2412 break; 2413 2414 case CS_DATA_UNDERRUN: 2415 /* Use F/W calculated residual length. */ 2416 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 2417 resid = fw_resid_len; 2418 else 2419 resid = resid_len; 2420 scsi_set_resid(cp, resid); 2421 if (scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_UNDER)) { 2422 if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 2423 && fw_resid_len != resid_len) { 2424 ql_dbg(ql_dbg_io, fcport->vha, 0x3052, 2425 "Dropped frame(s) detected " 2426 "(0x%x of 0x%x bytes).\n", 2427 resid, scsi_bufflen(cp)); 2428 2429 res = DID_ERROR << 16 | 2430 le16_to_cpu(lscsi_status); 2431 goto check_scsi_status; 2432 } 2433 2434 if (!lscsi_status && 2435 ((unsigned)(scsi_bufflen(cp) - resid) < 2436 cp->underflow)) { 2437 ql_dbg(ql_dbg_io, fcport->vha, 0x3053, 2438 "Mid-layer underflow " 2439 "detected (0x%x of 0x%x bytes, " 2440 "cp->underflow: 0x%x).\n", 2441 resid, scsi_bufflen(cp), cp->underflow); 2442 2443 res = DID_ERROR << 16; 2444 break; 2445 } 2446 } else if (lscsi_status != 2447 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL) && 2448 lscsi_status != cpu_to_le16((uint16_t)SAM_STAT_BUSY)) { 2449 /* 2450 * scsi status of task set and busy are considered 2451 * to be task not completed. 2452 */ 2453 2454 ql_dbg(ql_dbg_io, fcport->vha, 0x3054, 2455 "Dropped frame(s) detected (0x%x " 2456 "of 0x%x bytes).\n", resid, 2457 scsi_bufflen(cp)); 2458 2459 res = DID_ERROR << 16 | le16_to_cpu(lscsi_status); 2460 goto check_scsi_status; 2461 } else { 2462 ql_dbg(ql_dbg_io, fcport->vha, 0x3055, 2463 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 2464 scsi_status, lscsi_status); 2465 } 2466 2467 res = DID_OK << 16 | le16_to_cpu(lscsi_status); 2468 logit = 0; 2469 2470 check_scsi_status: 2471 /* 2472 * Check to see if SCSI Status is non zero. If so report SCSI 2473 * Status. 2474 */ 2475 if (lscsi_status != 0) { 2476 if (lscsi_status == 2477 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) { 2478 ql_dbg(ql_dbg_io, fcport->vha, 0x3056, 2479 "QUEUE FULL detected.\n"); 2480 logit = 1; 2481 break; 2482 } 2483 if (lscsi_status != 2484 cpu_to_le16((uint16_t)SS_CHECK_CONDITION)) 2485 break; 2486 2487 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2488 if (!(scsi_status & 2489 cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))) 2490 break; 2491 2492 qlafx00_handle_sense(sp, sense_data, par_sense_len, 2493 sense_len, rsp, res); 2494 } 2495 break; 2496 2497 case CS_PORT_LOGGED_OUT: 2498 case CS_PORT_CONFIG_CHG: 2499 case CS_PORT_BUSY: 2500 case CS_INCOMPLETE: 2501 case CS_PORT_UNAVAILABLE: 2502 case CS_TIMEOUT: 2503 case CS_RESET: 2504 2505 /* 2506 * We are going to have the fc class block the rport 2507 * while we try to recover so instruct the mid layer 2508 * to requeue until the class decides how to handle this. 2509 */ 2510 res = DID_TRANSPORT_DISRUPTED << 16; 2511 2512 ql_dbg(ql_dbg_io, fcport->vha, 0x3057, 2513 "Port down status: port-state=0x%x.\n", 2514 atomic_read(&fcport->state)); 2515 2516 if (atomic_read(&fcport->state) == FCS_ONLINE) 2517 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 2518 break; 2519 2520 case CS_ABORTED: 2521 res = DID_RESET << 16; 2522 break; 2523 2524 default: 2525 res = DID_ERROR << 16; 2526 break; 2527 } 2528 2529 if (logit) 2530 ql_dbg(ql_dbg_io, fcport->vha, 0x3058, 2531 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " 2532 "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x " 2533 "rsp_info=%p resid=0x%x fw_resid=0x%x sense_len=0x%x, " 2534 "par_sense_len=0x%x, rsp_info_len=0x%x\n", 2535 comp_status, scsi_status, res, vha->host_no, 2536 cp->device->id, cp->device->lun, fcport->tgt_id, 2537 lscsi_status, cp->cmnd, scsi_bufflen(cp), 2538 rsp_info, resid_len, fw_resid_len, sense_len, 2539 par_sense_len, rsp_info_len); 2540 2541 if (rsp->status_srb == NULL) 2542 sp->done(sp, res); 2543 } 2544 2545 /** 2546 * qlafx00_status_cont_entry() - Process a Status Continuations entry. 2547 * @rsp: response queue 2548 * @pkt: Entry pointer 2549 * 2550 * Extended sense data. 2551 */ 2552 static void 2553 qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 2554 { 2555 uint8_t sense_sz = 0; 2556 struct qla_hw_data *ha = rsp->hw; 2557 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 2558 srb_t *sp = rsp->status_srb; 2559 struct scsi_cmnd *cp; 2560 uint32_t sense_len; 2561 uint8_t *sense_ptr; 2562 2563 if (!sp) { 2564 ql_dbg(ql_dbg_io, vha, 0x3037, 2565 "no SP, sp = %p\n", sp); 2566 return; 2567 } 2568 2569 if (!GET_FW_SENSE_LEN(sp)) { 2570 ql_dbg(ql_dbg_io, vha, 0x304b, 2571 "no fw sense data, sp = %p\n", sp); 2572 return; 2573 } 2574 cp = GET_CMD_SP(sp); 2575 if (cp == NULL) { 2576 ql_log(ql_log_warn, vha, 0x303b, 2577 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 2578 2579 rsp->status_srb = NULL; 2580 return; 2581 } 2582 2583 if (!GET_CMD_SENSE_LEN(sp)) { 2584 ql_dbg(ql_dbg_io, vha, 0x304c, 2585 "no sense data, sp = %p\n", sp); 2586 } else { 2587 sense_len = GET_CMD_SENSE_LEN(sp); 2588 sense_ptr = GET_CMD_SENSE_PTR(sp); 2589 ql_dbg(ql_dbg_io, vha, 0x304f, 2590 "sp=%p sense_len=0x%x sense_ptr=%p.\n", 2591 sp, sense_len, sense_ptr); 2592 2593 if (sense_len > sizeof(pkt->data)) 2594 sense_sz = sizeof(pkt->data); 2595 else 2596 sense_sz = sense_len; 2597 2598 /* Move sense data. */ 2599 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e, 2600 (uint8_t *)pkt, sizeof(sts_cont_entry_t)); 2601 memcpy(sense_ptr, pkt->data, sense_sz); 2602 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a, 2603 sense_ptr, sense_sz); 2604 2605 sense_len -= sense_sz; 2606 sense_ptr += sense_sz; 2607 2608 SET_CMD_SENSE_PTR(sp, sense_ptr); 2609 SET_CMD_SENSE_LEN(sp, sense_len); 2610 } 2611 sense_len = GET_FW_SENSE_LEN(sp); 2612 sense_len = (sense_len > sizeof(pkt->data)) ? 2613 (sense_len - sizeof(pkt->data)) : 0; 2614 SET_FW_SENSE_LEN(sp, sense_len); 2615 2616 /* Place command on done queue. */ 2617 if (sense_len == 0) { 2618 rsp->status_srb = NULL; 2619 sp->done(sp, cp->result); 2620 } 2621 } 2622 2623 /** 2624 * qlafx00_multistatus_entry() - Process Multi response queue entries. 2625 * @vha: SCSI driver HA context 2626 * @rsp: response queue 2627 * @pkt: 2628 */ 2629 static void 2630 qlafx00_multistatus_entry(struct scsi_qla_host *vha, 2631 struct rsp_que *rsp, void *pkt) 2632 { 2633 srb_t *sp; 2634 struct multi_sts_entry_fx00 *stsmfx; 2635 struct qla_hw_data *ha = vha->hw; 2636 uint32_t handle, hindex, handle_count, i; 2637 uint16_t que; 2638 struct req_que *req; 2639 __le32 *handle_ptr; 2640 2641 stsmfx = (struct multi_sts_entry_fx00 *) pkt; 2642 2643 handle_count = stsmfx->handle_count; 2644 2645 if (handle_count > MAX_HANDLE_COUNT) { 2646 ql_dbg(ql_dbg_io, vha, 0x3035, 2647 "Invalid handle count (0x%x).\n", handle_count); 2648 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2649 qla2xxx_wake_dpc(vha); 2650 return; 2651 } 2652 2653 handle_ptr = &stsmfx->handles[0]; 2654 2655 for (i = 0; i < handle_count; i++) { 2656 hindex = le32_to_cpu(*handle_ptr); 2657 handle = LSW(hindex); 2658 que = MSW(hindex); 2659 req = ha->req_q_map[que]; 2660 2661 /* Validate handle. */ 2662 if (handle < req->num_outstanding_cmds) 2663 sp = req->outstanding_cmds[handle]; 2664 else 2665 sp = NULL; 2666 2667 if (sp == NULL) { 2668 ql_dbg(ql_dbg_io, vha, 0x3044, 2669 "Invalid status handle (0x%x).\n", handle); 2670 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2671 qla2xxx_wake_dpc(vha); 2672 return; 2673 } 2674 qla2x00_process_completed_request(vha, req, handle); 2675 handle_ptr++; 2676 } 2677 } 2678 2679 /** 2680 * qlafx00_error_entry() - Process an error entry. 2681 * @vha: SCSI driver HA context 2682 * @rsp: response queue 2683 * @pkt: Entry pointer 2684 * @estatus: 2685 * @etype: 2686 */ 2687 static void 2688 qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, 2689 struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype) 2690 { 2691 srb_t *sp; 2692 struct qla_hw_data *ha = vha->hw; 2693 const char func[] = "ERROR-IOCB"; 2694 uint16_t que = 0; 2695 struct req_que *req = NULL; 2696 int res = DID_ERROR << 16; 2697 2698 ql_dbg(ql_dbg_async, vha, 0x507f, 2699 "type of error status in response: 0x%x\n", estatus); 2700 2701 req = ha->req_q_map[que]; 2702 2703 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2704 if (sp) { 2705 sp->done(sp, res); 2706 return; 2707 } 2708 2709 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2710 qla2xxx_wake_dpc(vha); 2711 } 2712 2713 /** 2714 * qlafx00_process_response_queue() - Process response queue entries. 2715 * @vha: SCSI driver HA context 2716 * @rsp: response queue 2717 */ 2718 static void 2719 qlafx00_process_response_queue(struct scsi_qla_host *vha, 2720 struct rsp_que *rsp) 2721 { 2722 struct sts_entry_fx00 *pkt; 2723 response_t *lptr; 2724 uint16_t lreq_q_in = 0; 2725 uint16_t lreq_q_out = 0; 2726 2727 lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in); 2728 lreq_q_out = rsp->ring_index; 2729 2730 while (lreq_q_in != lreq_q_out) { 2731 lptr = rsp->ring_ptr; 2732 memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr, 2733 sizeof(rsp->rsp_pkt)); 2734 pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt; 2735 2736 rsp->ring_index++; 2737 lreq_q_out++; 2738 if (rsp->ring_index == rsp->length) { 2739 lreq_q_out = 0; 2740 rsp->ring_index = 0; 2741 rsp->ring_ptr = rsp->ring; 2742 } else { 2743 rsp->ring_ptr++; 2744 } 2745 2746 if (pkt->entry_status != 0 && 2747 pkt->entry_type != IOCTL_IOSB_TYPE_FX00) { 2748 qlafx00_error_entry(vha, rsp, 2749 (struct sts_entry_fx00 *)pkt, pkt->entry_status, 2750 pkt->entry_type); 2751 continue; 2752 } 2753 2754 switch (pkt->entry_type) { 2755 case STATUS_TYPE_FX00: 2756 qlafx00_status_entry(vha, rsp, pkt); 2757 break; 2758 2759 case STATUS_CONT_TYPE_FX00: 2760 qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2761 break; 2762 2763 case MULTI_STATUS_TYPE_FX00: 2764 qlafx00_multistatus_entry(vha, rsp, pkt); 2765 break; 2766 2767 case ABORT_IOCB_TYPE_FX00: 2768 qlafx00_abort_iocb_entry(vha, rsp->req, 2769 (struct abort_iocb_entry_fx00 *)pkt); 2770 break; 2771 2772 case IOCTL_IOSB_TYPE_FX00: 2773 qlafx00_ioctl_iosb_entry(vha, rsp->req, 2774 (struct ioctl_iocb_entry_fx00 *)pkt); 2775 break; 2776 default: 2777 /* Type Not Supported. */ 2778 ql_dbg(ql_dbg_async, vha, 0x5081, 2779 "Received unknown response pkt type %x " 2780 "entry status=%x.\n", 2781 pkt->entry_type, pkt->entry_status); 2782 break; 2783 } 2784 } 2785 2786 /* Adjust ring index */ 2787 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2788 } 2789 2790 /** 2791 * qlafx00_async_event() - Process aynchronous events. 2792 * @vha: SCSI driver HA context 2793 */ 2794 static void 2795 qlafx00_async_event(scsi_qla_host_t *vha) 2796 { 2797 struct qla_hw_data *ha = vha->hw; 2798 struct device_reg_fx00 __iomem *reg; 2799 int data_size = 1; 2800 2801 reg = &ha->iobase->ispfx00; 2802 /* Setup to process RIO completion. */ 2803 switch (ha->aenmb[0]) { 2804 case QLAFX00_MBA_SYSTEM_ERR: /* System Error */ 2805 ql_log(ql_log_warn, vha, 0x5079, 2806 "ISP System Error - mbx1=%x\n", ha->aenmb[0]); 2807 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2808 break; 2809 2810 case QLAFX00_MBA_SHUTDOWN_RQSTD: /* Shutdown requested */ 2811 ql_dbg(ql_dbg_async, vha, 0x5076, 2812 "Asynchronous FW shutdown requested.\n"); 2813 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2814 qla2xxx_wake_dpc(vha); 2815 break; 2816 2817 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ 2818 ha->aenmb[1] = RD_REG_DWORD(®->aenmailbox1); 2819 ha->aenmb[2] = RD_REG_DWORD(®->aenmailbox2); 2820 ha->aenmb[3] = RD_REG_DWORD(®->aenmailbox3); 2821 ql_dbg(ql_dbg_async, vha, 0x5077, 2822 "Asynchronous port Update received " 2823 "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n", 2824 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]); 2825 data_size = 4; 2826 break; 2827 2828 case QLAFX00_MBA_TEMP_OVER: /* Over temperature event */ 2829 ql_log(ql_log_info, vha, 0x5085, 2830 "Asynchronous over temperature event received " 2831 "aenmb[0]: %x\n", 2832 ha->aenmb[0]); 2833 break; 2834 2835 case QLAFX00_MBA_TEMP_NORM: /* Normal temperature event */ 2836 ql_log(ql_log_info, vha, 0x5086, 2837 "Asynchronous normal temperature event received " 2838 "aenmb[0]: %x\n", 2839 ha->aenmb[0]); 2840 break; 2841 2842 case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */ 2843 ql_log(ql_log_info, vha, 0x5083, 2844 "Asynchronous critical temperature event received " 2845 "aenmb[0]: %x\n", 2846 ha->aenmb[0]); 2847 break; 2848 2849 default: 2850 ha->aenmb[1] = RD_REG_WORD(®->aenmailbox1); 2851 ha->aenmb[2] = RD_REG_WORD(®->aenmailbox2); 2852 ha->aenmb[3] = RD_REG_WORD(®->aenmailbox3); 2853 ha->aenmb[4] = RD_REG_WORD(®->aenmailbox4); 2854 ha->aenmb[5] = RD_REG_WORD(®->aenmailbox5); 2855 ha->aenmb[6] = RD_REG_WORD(®->aenmailbox6); 2856 ha->aenmb[7] = RD_REG_WORD(®->aenmailbox7); 2857 ql_dbg(ql_dbg_async, vha, 0x5078, 2858 "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n", 2859 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3], 2860 ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]); 2861 break; 2862 } 2863 qlafx00_post_aenfx_work(vha, ha->aenmb[0], 2864 (uint32_t *)ha->aenmb, data_size); 2865 } 2866 2867 /** 2868 * qlafx00x_mbx_completion() - Process mailbox command completions. 2869 * @vha: SCSI driver HA context 2870 * @mb0: 2871 */ 2872 static void 2873 qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) 2874 { 2875 uint16_t cnt; 2876 uint32_t __iomem *wptr; 2877 struct qla_hw_data *ha = vha->hw; 2878 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 2879 2880 if (!ha->mcp32) 2881 ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n"); 2882 2883 /* Load return mailbox registers. */ 2884 ha->flags.mbox_int = 1; 2885 ha->mailbox_out32[0] = mb0; 2886 wptr = (uint32_t __iomem *)®->mailbox17; 2887 2888 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2889 ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr); 2890 wptr++; 2891 } 2892 } 2893 2894 /** 2895 * qlafx00_intr_handler() - Process interrupts for the ISPFX00. 2896 * @irq: 2897 * @dev_id: SCSI driver HA context 2898 * 2899 * Called by system whenever the host adapter generates an interrupt. 2900 * 2901 * Returns handled flag. 2902 */ 2903 irqreturn_t 2904 qlafx00_intr_handler(int irq, void *dev_id) 2905 { 2906 scsi_qla_host_t *vha; 2907 struct qla_hw_data *ha; 2908 struct device_reg_fx00 __iomem *reg; 2909 int status; 2910 unsigned long iter; 2911 uint32_t stat; 2912 uint32_t mb[8]; 2913 struct rsp_que *rsp; 2914 unsigned long flags; 2915 uint32_t clr_intr = 0; 2916 uint32_t intr_stat = 0; 2917 2918 rsp = (struct rsp_que *) dev_id; 2919 if (!rsp) { 2920 ql_log(ql_log_info, NULL, 0x507d, 2921 "%s: NULL response queue pointer.\n", __func__); 2922 return IRQ_NONE; 2923 } 2924 2925 ha = rsp->hw; 2926 reg = &ha->iobase->ispfx00; 2927 status = 0; 2928 2929 if (unlikely(pci_channel_offline(ha->pdev))) 2930 return IRQ_HANDLED; 2931 2932 spin_lock_irqsave(&ha->hardware_lock, flags); 2933 vha = pci_get_drvdata(ha->pdev); 2934 for (iter = 50; iter--; clr_intr = 0) { 2935 stat = QLAFX00_RD_INTR_REG(ha); 2936 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 2937 break; 2938 intr_stat = stat & QLAFX00_HST_INT_STS_BITS; 2939 if (!intr_stat) 2940 break; 2941 2942 if (stat & QLAFX00_INTR_MB_CMPLT) { 2943 mb[0] = RD_REG_WORD(®->mailbox16); 2944 qlafx00_mbx_completion(vha, mb[0]); 2945 status |= MBX_INTERRUPT; 2946 clr_intr |= QLAFX00_INTR_MB_CMPLT; 2947 } 2948 if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) { 2949 ha->aenmb[0] = RD_REG_WORD(®->aenmailbox0); 2950 qlafx00_async_event(vha); 2951 clr_intr |= QLAFX00_INTR_ASYNC_CMPLT; 2952 } 2953 if (intr_stat & QLAFX00_INTR_RSP_CMPLT) { 2954 qlafx00_process_response_queue(vha, rsp); 2955 clr_intr |= QLAFX00_INTR_RSP_CMPLT; 2956 } 2957 2958 QLAFX00_CLR_INTR_REG(ha, clr_intr); 2959 QLAFX00_RD_INTR_REG(ha); 2960 } 2961 2962 qla2x00_handle_mbx_completion(ha, status); 2963 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2964 2965 return IRQ_HANDLED; 2966 } 2967 2968 /** QLAFX00 specific IOCB implementation functions */ 2969 2970 static inline cont_a64_entry_t * 2971 qlafx00_prep_cont_type1_iocb(struct req_que *req, 2972 cont_a64_entry_t *lcont_pkt) 2973 { 2974 cont_a64_entry_t *cont_pkt; 2975 2976 /* Adjust ring index. */ 2977 req->ring_index++; 2978 if (req->ring_index == req->length) { 2979 req->ring_index = 0; 2980 req->ring_ptr = req->ring; 2981 } else { 2982 req->ring_ptr++; 2983 } 2984 2985 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 2986 2987 /* Load packet defaults. */ 2988 lcont_pkt->entry_type = CONTINUE_A64_TYPE_FX00; 2989 2990 return cont_pkt; 2991 } 2992 2993 static inline void 2994 qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, 2995 uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt) 2996 { 2997 uint16_t avail_dsds; 2998 __le32 *cur_dsd; 2999 scsi_qla_host_t *vha; 3000 struct scsi_cmnd *cmd; 3001 struct scatterlist *sg; 3002 int i, cont; 3003 struct req_que *req; 3004 cont_a64_entry_t lcont_pkt; 3005 cont_a64_entry_t *cont_pkt; 3006 3007 vha = sp->vha; 3008 req = vha->req; 3009 3010 cmd = GET_CMD_SP(sp); 3011 cont = 0; 3012 cont_pkt = NULL; 3013 3014 /* Update entry type to indicate Command Type 3 IOCB */ 3015 lcmd_pkt->entry_type = FX00_COMMAND_TYPE_7; 3016 3017 /* No data transfer */ 3018 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 3019 lcmd_pkt->byte_count = cpu_to_le32(0); 3020 return; 3021 } 3022 3023 /* Set transfer direction */ 3024 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 3025 lcmd_pkt->cntrl_flags = TMF_WRITE_DATA; 3026 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 3027 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 3028 lcmd_pkt->cntrl_flags = TMF_READ_DATA; 3029 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 3030 } 3031 3032 /* One DSD is available in the Command Type 3 IOCB */ 3033 avail_dsds = 1; 3034 cur_dsd = (__le32 *)&lcmd_pkt->dseg_0_address; 3035 3036 /* Load data segments */ 3037 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 3038 dma_addr_t sle_dma; 3039 3040 /* Allocate additional continuation packets? */ 3041 if (avail_dsds == 0) { 3042 /* 3043 * Five DSDs are available in the Continuation 3044 * Type 1 IOCB. 3045 */ 3046 memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE); 3047 cont_pkt = 3048 qlafx00_prep_cont_type1_iocb(req, &lcont_pkt); 3049 cur_dsd = (__le32 *)lcont_pkt.dseg_0_address; 3050 avail_dsds = 5; 3051 cont = 1; 3052 } 3053 3054 sle_dma = sg_dma_address(sg); 3055 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 3056 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 3057 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 3058 avail_dsds--; 3059 if (avail_dsds == 0 && cont == 1) { 3060 cont = 0; 3061 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, 3062 REQUEST_ENTRY_SIZE); 3063 } 3064 3065 } 3066 if (avail_dsds != 0 && cont == 1) { 3067 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, 3068 REQUEST_ENTRY_SIZE); 3069 } 3070 } 3071 3072 /** 3073 * qlafx00_start_scsi() - Send a SCSI command to the ISP 3074 * @sp: command to send to the ISP 3075 * 3076 * Returns non-zero if a failure occurred, else zero. 3077 */ 3078 int 3079 qlafx00_start_scsi(srb_t *sp) 3080 { 3081 int nseg; 3082 unsigned long flags; 3083 uint32_t index; 3084 uint32_t handle; 3085 uint16_t cnt; 3086 uint16_t req_cnt; 3087 uint16_t tot_dsds; 3088 struct req_que *req = NULL; 3089 struct rsp_que *rsp = NULL; 3090 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 3091 struct scsi_qla_host *vha = sp->vha; 3092 struct qla_hw_data *ha = vha->hw; 3093 struct cmd_type_7_fx00 *cmd_pkt; 3094 struct cmd_type_7_fx00 lcmd_pkt; 3095 struct scsi_lun llun; 3096 3097 /* Setup device pointers. */ 3098 rsp = ha->rsp_q_map[0]; 3099 req = vha->req; 3100 3101 /* So we know we haven't pci_map'ed anything yet */ 3102 tot_dsds = 0; 3103 3104 /* Acquire ring specific lock */ 3105 spin_lock_irqsave(&ha->hardware_lock, flags); 3106 3107 /* Check for room in outstanding command list. */ 3108 handle = req->current_outstanding_cmd; 3109 for (index = 1; index < req->num_outstanding_cmds; index++) { 3110 handle++; 3111 if (handle == req->num_outstanding_cmds) 3112 handle = 1; 3113 if (!req->outstanding_cmds[handle]) 3114 break; 3115 } 3116 if (index == req->num_outstanding_cmds) 3117 goto queuing_error; 3118 3119 /* Map the sg table so we have an accurate count of sg entries needed */ 3120 if (scsi_sg_count(cmd)) { 3121 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 3122 scsi_sg_count(cmd), cmd->sc_data_direction); 3123 if (unlikely(!nseg)) 3124 goto queuing_error; 3125 } else 3126 nseg = 0; 3127 3128 tot_dsds = nseg; 3129 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 3130 if (req->cnt < (req_cnt + 2)) { 3131 cnt = RD_REG_DWORD_RELAXED(req->req_q_out); 3132 3133 if (req->ring_index < cnt) 3134 req->cnt = cnt - req->ring_index; 3135 else 3136 req->cnt = req->length - 3137 (req->ring_index - cnt); 3138 if (req->cnt < (req_cnt + 2)) 3139 goto queuing_error; 3140 } 3141 3142 /* Build command packet. */ 3143 req->current_outstanding_cmd = handle; 3144 req->outstanding_cmds[handle] = sp; 3145 sp->handle = handle; 3146 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 3147 req->cnt -= req_cnt; 3148 3149 cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr; 3150 3151 memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE); 3152 3153 lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle); 3154 lcmd_pkt.reserved_0 = 0; 3155 lcmd_pkt.port_path_ctrl = 0; 3156 lcmd_pkt.reserved_1 = 0; 3157 lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds); 3158 lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id); 3159 3160 int_to_scsilun(cmd->device->lun, &llun); 3161 host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun, 3162 sizeof(lcmd_pkt.lun)); 3163 3164 /* Load SCSI command packet. */ 3165 host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb)); 3166 lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 3167 3168 /* Build IOCB segments */ 3169 qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt); 3170 3171 /* Set total data segment count. */ 3172 lcmd_pkt.entry_count = (uint8_t)req_cnt; 3173 3174 /* Specify response queue number where completion should happen */ 3175 lcmd_pkt.entry_status = (uint8_t) rsp->id; 3176 3177 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e, 3178 (uint8_t *)cmd->cmnd, cmd->cmd_len); 3179 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032, 3180 (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE); 3181 3182 memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE); 3183 wmb(); 3184 3185 /* Adjust ring index. */ 3186 req->ring_index++; 3187 if (req->ring_index == req->length) { 3188 req->ring_index = 0; 3189 req->ring_ptr = req->ring; 3190 } else 3191 req->ring_ptr++; 3192 3193 sp->flags |= SRB_DMA_VALID; 3194 3195 /* Set chip new ring index. */ 3196 WRT_REG_DWORD(req->req_q_in, req->ring_index); 3197 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); 3198 3199 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3200 return QLA_SUCCESS; 3201 3202 queuing_error: 3203 if (tot_dsds) 3204 scsi_dma_unmap(cmd); 3205 3206 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3207 3208 return QLA_FUNCTION_FAILED; 3209 } 3210 3211 void 3212 qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb) 3213 { 3214 struct srb_iocb *fxio = &sp->u.iocb_cmd; 3215 scsi_qla_host_t *vha = sp->vha; 3216 struct req_que *req = vha->req; 3217 struct tsk_mgmt_entry_fx00 tm_iocb; 3218 struct scsi_lun llun; 3219 3220 memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00)); 3221 tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00; 3222 tm_iocb.entry_count = 1; 3223 tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); 3224 tm_iocb.reserved_0 = 0; 3225 tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id); 3226 tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags); 3227 if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) { 3228 int_to_scsilun(fxio->u.tmf.lun, &llun); 3229 host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun, 3230 sizeof(struct scsi_lun)); 3231 } 3232 3233 memcpy((void *)ptm_iocb, &tm_iocb, 3234 sizeof(struct tsk_mgmt_entry_fx00)); 3235 wmb(); 3236 } 3237 3238 void 3239 qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb) 3240 { 3241 struct srb_iocb *fxio = &sp->u.iocb_cmd; 3242 scsi_qla_host_t *vha = sp->vha; 3243 struct req_que *req = vha->req; 3244 struct abort_iocb_entry_fx00 abt_iocb; 3245 3246 memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00)); 3247 abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00; 3248 abt_iocb.entry_count = 1; 3249 abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); 3250 abt_iocb.abort_handle = 3251 cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl)); 3252 abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id); 3253 abt_iocb.req_que_no = cpu_to_le16(req->id); 3254 3255 memcpy((void *)pabt_iocb, &abt_iocb, 3256 sizeof(struct abort_iocb_entry_fx00)); 3257 wmb(); 3258 } 3259 3260 void 3261 qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) 3262 { 3263 struct srb_iocb *fxio = &sp->u.iocb_cmd; 3264 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 3265 struct bsg_job *bsg_job; 3266 struct fc_bsg_request *bsg_request; 3267 struct fxdisc_entry_fx00 fx_iocb; 3268 uint8_t entry_cnt = 1; 3269 3270 memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00)); 3271 fx_iocb.entry_type = FX00_IOCB_TYPE; 3272 fx_iocb.handle = cpu_to_le32(sp->handle); 3273 fx_iocb.entry_count = entry_cnt; 3274 3275 if (sp->type == SRB_FXIOCB_DCMD) { 3276 fx_iocb.func_num = 3277 sp->u.iocb_cmd.u.fxiocb.req_func_type; 3278 fx_iocb.adapid = fxio->u.fxiocb.adapter_id; 3279 fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi; 3280 fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0; 3281 fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1; 3282 fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra; 3283 3284 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) { 3285 fx_iocb.req_dsdcnt = cpu_to_le16(1); 3286 fx_iocb.req_xfrcnt = 3287 cpu_to_le16(fxio->u.fxiocb.req_len); 3288 fx_iocb.dseg_rq_address[0] = 3289 cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle)); 3290 fx_iocb.dseg_rq_address[1] = 3291 cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle)); 3292 fx_iocb.dseg_rq_len = 3293 cpu_to_le32(fxio->u.fxiocb.req_len); 3294 } 3295 3296 if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) { 3297 fx_iocb.rsp_dsdcnt = cpu_to_le16(1); 3298 fx_iocb.rsp_xfrcnt = 3299 cpu_to_le16(fxio->u.fxiocb.rsp_len); 3300 fx_iocb.dseg_rsp_address[0] = 3301 cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle)); 3302 fx_iocb.dseg_rsp_address[1] = 3303 cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle)); 3304 fx_iocb.dseg_rsp_len = 3305 cpu_to_le32(fxio->u.fxiocb.rsp_len); 3306 } 3307 3308 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) { 3309 fx_iocb.dataword = fxio->u.fxiocb.req_data; 3310 } 3311 fx_iocb.flags = fxio->u.fxiocb.flags; 3312 } else { 3313 struct scatterlist *sg; 3314 bsg_job = sp->u.bsg_job; 3315 bsg_request = bsg_job->request; 3316 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 3317 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 3318 3319 fx_iocb.func_num = piocb_rqst->func_type; 3320 fx_iocb.adapid = piocb_rqst->adapid; 3321 fx_iocb.adapid_hi = piocb_rqst->adapid_hi; 3322 fx_iocb.reserved_0 = piocb_rqst->reserved_0; 3323 fx_iocb.reserved_1 = piocb_rqst->reserved_1; 3324 fx_iocb.dataword_extra = piocb_rqst->dataword_extra; 3325 fx_iocb.dataword = piocb_rqst->dataword; 3326 fx_iocb.req_xfrcnt = piocb_rqst->req_len; 3327 fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len; 3328 3329 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { 3330 int avail_dsds, tot_dsds; 3331 cont_a64_entry_t lcont_pkt; 3332 cont_a64_entry_t *cont_pkt = NULL; 3333 __le32 *cur_dsd; 3334 int index = 0, cont = 0; 3335 3336 fx_iocb.req_dsdcnt = 3337 cpu_to_le16(bsg_job->request_payload.sg_cnt); 3338 tot_dsds = 3339 bsg_job->request_payload.sg_cnt; 3340 cur_dsd = (__le32 *)&fx_iocb.dseg_rq_address[0]; 3341 avail_dsds = 1; 3342 for_each_sg(bsg_job->request_payload.sg_list, sg, 3343 tot_dsds, index) { 3344 dma_addr_t sle_dma; 3345 3346 /* Allocate additional continuation packets? */ 3347 if (avail_dsds == 0) { 3348 /* 3349 * Five DSDs are available in the Cont. 3350 * Type 1 IOCB. 3351 */ 3352 memset(&lcont_pkt, 0, 3353 REQUEST_ENTRY_SIZE); 3354 cont_pkt = 3355 qlafx00_prep_cont_type1_iocb( 3356 sp->vha->req, &lcont_pkt); 3357 cur_dsd = (__le32 *) 3358 lcont_pkt.dseg_0_address; 3359 avail_dsds = 5; 3360 cont = 1; 3361 entry_cnt++; 3362 } 3363 3364 sle_dma = sg_dma_address(sg); 3365 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 3366 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 3367 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 3368 avail_dsds--; 3369 3370 if (avail_dsds == 0 && cont == 1) { 3371 cont = 0; 3372 memcpy_toio( 3373 (void __iomem *)cont_pkt, 3374 &lcont_pkt, REQUEST_ENTRY_SIZE); 3375 ql_dump_buffer( 3376 ql_dbg_user + ql_dbg_verbose, 3377 sp->vha, 0x3042, 3378 (uint8_t *)&lcont_pkt, 3379 REQUEST_ENTRY_SIZE); 3380 } 3381 } 3382 if (avail_dsds != 0 && cont == 1) { 3383 memcpy_toio((void __iomem *)cont_pkt, 3384 &lcont_pkt, REQUEST_ENTRY_SIZE); 3385 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 3386 sp->vha, 0x3043, 3387 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); 3388 } 3389 } 3390 3391 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { 3392 int avail_dsds, tot_dsds; 3393 cont_a64_entry_t lcont_pkt; 3394 cont_a64_entry_t *cont_pkt = NULL; 3395 __le32 *cur_dsd; 3396 int index = 0, cont = 0; 3397 3398 fx_iocb.rsp_dsdcnt = 3399 cpu_to_le16(bsg_job->reply_payload.sg_cnt); 3400 tot_dsds = bsg_job->reply_payload.sg_cnt; 3401 cur_dsd = (__le32 *)&fx_iocb.dseg_rsp_address[0]; 3402 avail_dsds = 1; 3403 3404 for_each_sg(bsg_job->reply_payload.sg_list, sg, 3405 tot_dsds, index) { 3406 dma_addr_t sle_dma; 3407 3408 /* Allocate additional continuation packets? */ 3409 if (avail_dsds == 0) { 3410 /* 3411 * Five DSDs are available in the Cont. 3412 * Type 1 IOCB. 3413 */ 3414 memset(&lcont_pkt, 0, 3415 REQUEST_ENTRY_SIZE); 3416 cont_pkt = 3417 qlafx00_prep_cont_type1_iocb( 3418 sp->vha->req, &lcont_pkt); 3419 cur_dsd = (__le32 *) 3420 lcont_pkt.dseg_0_address; 3421 avail_dsds = 5; 3422 cont = 1; 3423 entry_cnt++; 3424 } 3425 3426 sle_dma = sg_dma_address(sg); 3427 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 3428 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 3429 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 3430 avail_dsds--; 3431 3432 if (avail_dsds == 0 && cont == 1) { 3433 cont = 0; 3434 memcpy_toio((void __iomem *)cont_pkt, 3435 &lcont_pkt, 3436 REQUEST_ENTRY_SIZE); 3437 ql_dump_buffer( 3438 ql_dbg_user + ql_dbg_verbose, 3439 sp->vha, 0x3045, 3440 (uint8_t *)&lcont_pkt, 3441 REQUEST_ENTRY_SIZE); 3442 } 3443 } 3444 if (avail_dsds != 0 && cont == 1) { 3445 memcpy_toio((void __iomem *)cont_pkt, 3446 &lcont_pkt, REQUEST_ENTRY_SIZE); 3447 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 3448 sp->vha, 0x3046, 3449 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); 3450 } 3451 } 3452 3453 if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID) 3454 fx_iocb.dataword = piocb_rqst->dataword; 3455 fx_iocb.flags = piocb_rqst->flags; 3456 fx_iocb.entry_count = entry_cnt; 3457 } 3458 3459 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, 3460 sp->vha, 0x3047, 3461 (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00)); 3462 3463 memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, 3464 sizeof(struct fxdisc_entry_fx00)); 3465 wmb(); 3466 } 3467