1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2008 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_gbl.h" 9 10 #include <linux/delay.h> 11 #include <linux/vmalloc.h> 12 13 #include "qla_devtbl.h" 14 15 #ifdef CONFIG_SPARC 16 #include <asm/prom.h> 17 #endif 18 19 /* 20 * QLogic ISP2x00 Hardware Support Function Prototypes. 21 */ 22 static int qla2x00_isp_firmware(scsi_qla_host_t *); 23 static int qla2x00_setup_chip(scsi_qla_host_t *); 24 static int qla2x00_init_rings(scsi_qla_host_t *); 25 static int qla2x00_fw_ready(scsi_qla_host_t *); 26 static int qla2x00_configure_hba(scsi_qla_host_t *); 27 static int qla2x00_configure_loop(scsi_qla_host_t *); 28 static int qla2x00_configure_local_loop(scsi_qla_host_t *); 29 static int qla2x00_configure_fabric(scsi_qla_host_t *); 30 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *); 31 static int qla2x00_device_resync(scsi_qla_host_t *); 32 static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *, 33 uint16_t *); 34 35 static int qla2x00_restart_isp(scsi_qla_host_t *); 36 37 static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *); 38 39 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); 40 static int qla84xx_init_chip(scsi_qla_host_t *); 41 static int qla25xx_init_queues(struct qla_hw_data *); 42 43 /* SRB Extensions ---------------------------------------------------------- */ 44 45 static void 46 qla2x00_ctx_sp_timeout(unsigned long __data) 47 { 48 srb_t *sp = (srb_t *)__data; 49 struct srb_ctx *ctx; 50 fc_port_t *fcport = sp->fcport; 51 struct qla_hw_data *ha = fcport->vha->hw; 52 struct req_que *req; 53 unsigned long flags; 54 55 spin_lock_irqsave(&ha->hardware_lock, flags); 56 req = ha->req_q_map[0]; 57 req->outstanding_cmds[sp->handle] = NULL; 58 ctx = sp->ctx; 59 ctx->timeout(sp); 60 spin_unlock_irqrestore(&ha->hardware_lock, flags); 61 62 ctx->free(sp); 63 } 64 65 static void 66 qla2x00_ctx_sp_free(srb_t *sp) 67 { 68 struct srb_ctx *ctx = sp->ctx; 69 70 kfree(ctx); 71 mempool_free(sp, sp->fcport->vha->hw->srb_mempool); 72 } 73 74 inline srb_t * 75 qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, 76 unsigned long tmo) 77 { 78 srb_t *sp; 79 struct qla_hw_data *ha = vha->hw; 80 struct srb_ctx *ctx; 81 82 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); 83 if (!sp) 84 goto done; 85 ctx = kzalloc(size, GFP_KERNEL); 86 if (!ctx) { 87 mempool_free(sp, ha->srb_mempool); 88 goto done; 89 } 90 91 memset(sp, 0, sizeof(*sp)); 92 sp->fcport = fcport; 93 sp->ctx = ctx; 94 ctx->free = qla2x00_ctx_sp_free; 95 96 init_timer(&ctx->timer); 97 if (!tmo) 98 goto done; 99 ctx->timer.expires = jiffies + tmo * HZ; 100 ctx->timer.data = (unsigned long)sp; 101 ctx->timer.function = qla2x00_ctx_sp_timeout; 102 add_timer(&ctx->timer); 103 done: 104 return sp; 105 } 106 107 /* Asynchronous Login/Logout Routines -------------------------------------- */ 108 109 #define ELS_TMO_2_RATOV(ha) ((ha)->r_a_tov / 10 * 2) 110 111 static void 112 qla2x00_async_logio_timeout(srb_t *sp) 113 { 114 fc_port_t *fcport = sp->fcport; 115 struct srb_logio *lio = sp->ctx; 116 117 DEBUG2(printk(KERN_WARNING 118 "scsi(%ld:%x): Async-%s timeout.\n", 119 fcport->vha->host_no, sp->handle, 120 lio->ctx.type == SRB_LOGIN_CMD ? "login": "logout")); 121 122 if (lio->ctx.type == SRB_LOGIN_CMD) 123 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL); 124 } 125 126 int 127 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, 128 uint16_t *data) 129 { 130 struct qla_hw_data *ha = vha->hw; 131 srb_t *sp; 132 struct srb_logio *lio; 133 int rval; 134 135 rval = QLA_FUNCTION_FAILED; 136 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_logio), 137 ELS_TMO_2_RATOV(ha) + 2); 138 if (!sp) 139 goto done; 140 141 lio = sp->ctx; 142 lio->ctx.type = SRB_LOGIN_CMD; 143 lio->ctx.timeout = qla2x00_async_logio_timeout; 144 lio->flags |= SRB_LOGIN_COND_PLOGI; 145 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 146 lio->flags |= SRB_LOGIN_RETRIED; 147 rval = qla2x00_start_sp(sp); 148 if (rval != QLA_SUCCESS) 149 goto done_free_sp; 150 151 DEBUG2(printk(KERN_DEBUG 152 "scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x " 153 "retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id, 154 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 155 fcport->login_retry)); 156 return rval; 157 158 done_free_sp: 159 del_timer_sync(&lio->ctx.timer); 160 lio->ctx.free(sp); 161 done: 162 return rval; 163 } 164 165 int 166 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) 167 { 168 struct qla_hw_data *ha = vha->hw; 169 srb_t *sp; 170 struct srb_logio *lio; 171 int rval; 172 173 rval = QLA_FUNCTION_FAILED; 174 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_logio), 175 ELS_TMO_2_RATOV(ha) + 2); 176 if (!sp) 177 goto done; 178 179 lio = sp->ctx; 180 lio->ctx.type = SRB_LOGOUT_CMD; 181 lio->ctx.timeout = qla2x00_async_logio_timeout; 182 rval = qla2x00_start_sp(sp); 183 if (rval != QLA_SUCCESS) 184 goto done_free_sp; 185 186 DEBUG2(printk(KERN_DEBUG 187 "scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n", 188 fcport->vha->host_no, sp->handle, fcport->loop_id, 189 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); 190 return rval; 191 192 done_free_sp: 193 del_timer_sync(&lio->ctx.timer); 194 lio->ctx.free(sp); 195 done: 196 return rval; 197 } 198 199 int 200 qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, 201 uint16_t *data) 202 { 203 int rval; 204 uint8_t opts = 0; 205 206 switch (data[0]) { 207 case MBS_COMMAND_COMPLETE: 208 if (fcport->flags & FCF_FCP2_DEVICE) 209 opts |= BIT_1; 210 rval = qla2x00_get_port_database(vha, fcport, opts); 211 if (rval != QLA_SUCCESS) 212 qla2x00_mark_device_lost(vha, fcport, 1, 0); 213 else 214 qla2x00_update_fcport(vha, fcport); 215 break; 216 case MBS_COMMAND_ERROR: 217 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 218 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 219 else 220 qla2x00_mark_device_lost(vha, fcport, 1, 0); 221 break; 222 case MBS_PORT_ID_USED: 223 fcport->loop_id = data[1]; 224 qla2x00_post_async_login_work(vha, fcport, NULL); 225 break; 226 case MBS_LOOP_ID_USED: 227 fcport->loop_id++; 228 rval = qla2x00_find_new_loop_id(vha, fcport); 229 if (rval != QLA_SUCCESS) { 230 qla2x00_mark_device_lost(vha, fcport, 1, 0); 231 break; 232 } 233 qla2x00_post_async_login_work(vha, fcport, NULL); 234 break; 235 } 236 return QLA_SUCCESS; 237 } 238 239 int 240 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, 241 uint16_t *data) 242 { 243 qla2x00_mark_device_lost(vha, fcport, 1, 0); 244 return QLA_SUCCESS; 245 } 246 247 /****************************************************************************/ 248 /* QLogic ISP2x00 Hardware Support Functions. */ 249 /****************************************************************************/ 250 251 /* 252 * qla2x00_initialize_adapter 253 * Initialize board. 254 * 255 * Input: 256 * ha = adapter block pointer. 257 * 258 * Returns: 259 * 0 = success 260 */ 261 int 262 qla2x00_initialize_adapter(scsi_qla_host_t *vha) 263 { 264 int rval; 265 struct qla_hw_data *ha = vha->hw; 266 struct req_que *req = ha->req_q_map[0]; 267 268 /* Clear adapter flags. */ 269 vha->flags.online = 0; 270 ha->flags.chip_reset_done = 0; 271 vha->flags.reset_active = 0; 272 ha->flags.pci_channel_io_perm_failure = 0; 273 ha->flags.eeh_busy = 0; 274 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 275 atomic_set(&vha->loop_state, LOOP_DOWN); 276 vha->device_flags = DFLG_NO_CABLE; 277 vha->dpc_flags = 0; 278 vha->flags.management_server_logged_in = 0; 279 vha->marker_needed = 0; 280 ha->isp_abort_cnt = 0; 281 ha->beacon_blink_led = 0; 282 283 set_bit(0, ha->req_qid_map); 284 set_bit(0, ha->rsp_qid_map); 285 286 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); 287 rval = ha->isp_ops->pci_config(vha); 288 if (rval) { 289 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n", 290 vha->host_no)); 291 return (rval); 292 } 293 294 ha->isp_ops->reset_chip(vha); 295 296 rval = qla2xxx_get_flash_info(vha); 297 if (rval) { 298 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n", 299 vha->host_no)); 300 return (rval); 301 } 302 303 ha->isp_ops->get_flash_version(vha, req->ring); 304 305 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 306 307 ha->isp_ops->nvram_config(vha); 308 309 if (ha->flags.disable_serdes) { 310 /* Mask HBA via NVRAM settings? */ 311 qla_printk(KERN_INFO, ha, "Masking HBA WWPN " 312 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n", 313 vha->port_name[0], vha->port_name[1], 314 vha->port_name[2], vha->port_name[3], 315 vha->port_name[4], vha->port_name[5], 316 vha->port_name[6], vha->port_name[7]); 317 return QLA_FUNCTION_FAILED; 318 } 319 320 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 321 322 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { 323 rval = ha->isp_ops->chip_diag(vha); 324 if (rval) 325 return (rval); 326 rval = qla2x00_setup_chip(vha); 327 if (rval) 328 return (rval); 329 } 330 if (IS_QLA84XX(ha)) { 331 ha->cs84xx = qla84xx_get_chip(vha); 332 if (!ha->cs84xx) { 333 qla_printk(KERN_ERR, ha, 334 "Unable to configure ISP84XX.\n"); 335 return QLA_FUNCTION_FAILED; 336 } 337 } 338 rval = qla2x00_init_rings(vha); 339 ha->flags.chip_reset_done = 1; 340 341 return (rval); 342 } 343 344 /** 345 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers. 346 * @ha: HA context 347 * 348 * Returns 0 on success. 349 */ 350 int 351 qla2100_pci_config(scsi_qla_host_t *vha) 352 { 353 uint16_t w; 354 unsigned long flags; 355 struct qla_hw_data *ha = vha->hw; 356 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 357 358 pci_set_master(ha->pdev); 359 pci_try_set_mwi(ha->pdev); 360 361 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 362 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 363 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 364 365 pci_disable_rom(ha->pdev); 366 367 /* Get PCI bus information. */ 368 spin_lock_irqsave(&ha->hardware_lock, flags); 369 ha->pci_attr = RD_REG_WORD(®->ctrl_status); 370 spin_unlock_irqrestore(&ha->hardware_lock, flags); 371 372 return QLA_SUCCESS; 373 } 374 375 /** 376 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers. 377 * @ha: HA context 378 * 379 * Returns 0 on success. 380 */ 381 int 382 qla2300_pci_config(scsi_qla_host_t *vha) 383 { 384 uint16_t w; 385 unsigned long flags = 0; 386 uint32_t cnt; 387 struct qla_hw_data *ha = vha->hw; 388 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 389 390 pci_set_master(ha->pdev); 391 pci_try_set_mwi(ha->pdev); 392 393 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 394 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 395 396 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 397 w &= ~PCI_COMMAND_INTX_DISABLE; 398 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 399 400 /* 401 * If this is a 2300 card and not 2312, reset the 402 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately, 403 * the 2310 also reports itself as a 2300 so we need to get the 404 * fb revision level -- a 6 indicates it really is a 2300 and 405 * not a 2310. 406 */ 407 if (IS_QLA2300(ha)) { 408 spin_lock_irqsave(&ha->hardware_lock, flags); 409 410 /* Pause RISC. */ 411 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 412 for (cnt = 0; cnt < 30000; cnt++) { 413 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0) 414 break; 415 416 udelay(10); 417 } 418 419 /* Select FPM registers. */ 420 WRT_REG_WORD(®->ctrl_status, 0x20); 421 RD_REG_WORD(®->ctrl_status); 422 423 /* Get the fb rev level */ 424 ha->fb_rev = RD_FB_CMD_REG(ha, reg); 425 426 if (ha->fb_rev == FPM_2300) 427 pci_clear_mwi(ha->pdev); 428 429 /* Deselect FPM registers. */ 430 WRT_REG_WORD(®->ctrl_status, 0x0); 431 RD_REG_WORD(®->ctrl_status); 432 433 /* Release RISC module. */ 434 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 435 for (cnt = 0; cnt < 30000; cnt++) { 436 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0) 437 break; 438 439 udelay(10); 440 } 441 442 spin_unlock_irqrestore(&ha->hardware_lock, flags); 443 } 444 445 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 446 447 pci_disable_rom(ha->pdev); 448 449 /* Get PCI bus information. */ 450 spin_lock_irqsave(&ha->hardware_lock, flags); 451 ha->pci_attr = RD_REG_WORD(®->ctrl_status); 452 spin_unlock_irqrestore(&ha->hardware_lock, flags); 453 454 return QLA_SUCCESS; 455 } 456 457 /** 458 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers. 459 * @ha: HA context 460 * 461 * Returns 0 on success. 462 */ 463 int 464 qla24xx_pci_config(scsi_qla_host_t *vha) 465 { 466 uint16_t w; 467 unsigned long flags = 0; 468 struct qla_hw_data *ha = vha->hw; 469 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 470 471 pci_set_master(ha->pdev); 472 pci_try_set_mwi(ha->pdev); 473 474 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 475 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 476 w &= ~PCI_COMMAND_INTX_DISABLE; 477 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 478 479 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 480 481 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */ 482 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) 483 pcix_set_mmrbc(ha->pdev, 2048); 484 485 /* PCIe -- adjust Maximum Read Request Size (2048). */ 486 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 487 pcie_set_readrq(ha->pdev, 2048); 488 489 pci_disable_rom(ha->pdev); 490 491 ha->chip_revision = ha->pdev->revision; 492 493 /* Get PCI bus information. */ 494 spin_lock_irqsave(&ha->hardware_lock, flags); 495 ha->pci_attr = RD_REG_DWORD(®->ctrl_status); 496 spin_unlock_irqrestore(&ha->hardware_lock, flags); 497 498 return QLA_SUCCESS; 499 } 500 501 /** 502 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers. 503 * @ha: HA context 504 * 505 * Returns 0 on success. 506 */ 507 int 508 qla25xx_pci_config(scsi_qla_host_t *vha) 509 { 510 uint16_t w; 511 struct qla_hw_data *ha = vha->hw; 512 513 pci_set_master(ha->pdev); 514 pci_try_set_mwi(ha->pdev); 515 516 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 517 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 518 w &= ~PCI_COMMAND_INTX_DISABLE; 519 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 520 521 /* PCIe -- adjust Maximum Read Request Size (2048). */ 522 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 523 pcie_set_readrq(ha->pdev, 2048); 524 525 pci_disable_rom(ha->pdev); 526 527 ha->chip_revision = ha->pdev->revision; 528 529 return QLA_SUCCESS; 530 } 531 532 /** 533 * qla2x00_isp_firmware() - Choose firmware image. 534 * @ha: HA context 535 * 536 * Returns 0 on success. 537 */ 538 static int 539 qla2x00_isp_firmware(scsi_qla_host_t *vha) 540 { 541 int rval; 542 uint16_t loop_id, topo, sw_cap; 543 uint8_t domain, area, al_pa; 544 struct qla_hw_data *ha = vha->hw; 545 546 /* Assume loading risc code */ 547 rval = QLA_FUNCTION_FAILED; 548 549 if (ha->flags.disable_risc_code_load) { 550 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n", 551 vha->host_no)); 552 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n"); 553 554 /* Verify checksum of loaded RISC code. */ 555 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); 556 if (rval == QLA_SUCCESS) { 557 /* And, verify we are not in ROM code. */ 558 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 559 &area, &domain, &topo, &sw_cap); 560 } 561 } 562 563 if (rval) { 564 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n", 565 vha->host_no)); 566 } 567 568 return (rval); 569 } 570 571 /** 572 * qla2x00_reset_chip() - Reset ISP chip. 573 * @ha: HA context 574 * 575 * Returns 0 on success. 576 */ 577 void 578 qla2x00_reset_chip(scsi_qla_host_t *vha) 579 { 580 unsigned long flags = 0; 581 struct qla_hw_data *ha = vha->hw; 582 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 583 uint32_t cnt; 584 uint16_t cmd; 585 586 if (unlikely(pci_channel_offline(ha->pdev))) 587 return; 588 589 ha->isp_ops->disable_intrs(ha); 590 591 spin_lock_irqsave(&ha->hardware_lock, flags); 592 593 /* Turn off master enable */ 594 cmd = 0; 595 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); 596 cmd &= ~PCI_COMMAND_MASTER; 597 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 598 599 if (!IS_QLA2100(ha)) { 600 /* Pause RISC. */ 601 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 602 if (IS_QLA2200(ha) || IS_QLA2300(ha)) { 603 for (cnt = 0; cnt < 30000; cnt++) { 604 if ((RD_REG_WORD(®->hccr) & 605 HCCR_RISC_PAUSE) != 0) 606 break; 607 udelay(100); 608 } 609 } else { 610 RD_REG_WORD(®->hccr); /* PCI Posting. */ 611 udelay(10); 612 } 613 614 /* Select FPM registers. */ 615 WRT_REG_WORD(®->ctrl_status, 0x20); 616 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 617 618 /* FPM Soft Reset. */ 619 WRT_REG_WORD(®->fpm_diag_config, 0x100); 620 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ 621 622 /* Toggle Fpm Reset. */ 623 if (!IS_QLA2200(ha)) { 624 WRT_REG_WORD(®->fpm_diag_config, 0x0); 625 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ 626 } 627 628 /* Select frame buffer registers. */ 629 WRT_REG_WORD(®->ctrl_status, 0x10); 630 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 631 632 /* Reset frame buffer FIFOs. */ 633 if (IS_QLA2200(ha)) { 634 WRT_FB_CMD_REG(ha, reg, 0xa000); 635 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ 636 } else { 637 WRT_FB_CMD_REG(ha, reg, 0x00fc); 638 639 /* Read back fb_cmd until zero or 3 seconds max */ 640 for (cnt = 0; cnt < 3000; cnt++) { 641 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) 642 break; 643 udelay(100); 644 } 645 } 646 647 /* Select RISC module registers. */ 648 WRT_REG_WORD(®->ctrl_status, 0); 649 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 650 651 /* Reset RISC processor. */ 652 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 653 RD_REG_WORD(®->hccr); /* PCI Posting. */ 654 655 /* Release RISC processor. */ 656 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 657 RD_REG_WORD(®->hccr); /* PCI Posting. */ 658 } 659 660 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 661 WRT_REG_WORD(®->hccr, HCCR_CLR_HOST_INT); 662 663 /* Reset ISP chip. */ 664 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 665 666 /* Wait for RISC to recover from reset. */ 667 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 668 /* 669 * It is necessary to for a delay here since the card doesn't 670 * respond to PCI reads during a reset. On some architectures 671 * this will result in an MCA. 672 */ 673 udelay(20); 674 for (cnt = 30000; cnt; cnt--) { 675 if ((RD_REG_WORD(®->ctrl_status) & 676 CSR_ISP_SOFT_RESET) == 0) 677 break; 678 udelay(100); 679 } 680 } else 681 udelay(10); 682 683 /* Reset RISC processor. */ 684 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 685 686 WRT_REG_WORD(®->semaphore, 0); 687 688 /* Release RISC processor. */ 689 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 690 RD_REG_WORD(®->hccr); /* PCI Posting. */ 691 692 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 693 for (cnt = 0; cnt < 30000; cnt++) { 694 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) 695 break; 696 697 udelay(100); 698 } 699 } else 700 udelay(100); 701 702 /* Turn on master enable */ 703 cmd |= PCI_COMMAND_MASTER; 704 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 705 706 /* Disable RISC pause on FPM parity error. */ 707 if (!IS_QLA2100(ha)) { 708 WRT_REG_WORD(®->hccr, HCCR_DISABLE_PARITY_PAUSE); 709 RD_REG_WORD(®->hccr); /* PCI Posting. */ 710 } 711 712 spin_unlock_irqrestore(&ha->hardware_lock, flags); 713 } 714 715 /** 716 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. 717 * @ha: HA context 718 * 719 * Returns 0 on success. 720 */ 721 static inline void 722 qla24xx_reset_risc(scsi_qla_host_t *vha) 723 { 724 unsigned long flags = 0; 725 struct qla_hw_data *ha = vha->hw; 726 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 727 uint32_t cnt, d2; 728 uint16_t wd; 729 730 spin_lock_irqsave(&ha->hardware_lock, flags); 731 732 /* Reset RISC. */ 733 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 734 for (cnt = 0; cnt < 30000; cnt++) { 735 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 736 break; 737 738 udelay(10); 739 } 740 741 WRT_REG_DWORD(®->ctrl_status, 742 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 743 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 744 745 udelay(100); 746 /* Wait for firmware to complete NVRAM accesses. */ 747 d2 = (uint32_t) RD_REG_WORD(®->mailbox0); 748 for (cnt = 10000 ; cnt && d2; cnt--) { 749 udelay(5); 750 d2 = (uint32_t) RD_REG_WORD(®->mailbox0); 751 barrier(); 752 } 753 754 /* Wait for soft-reset to complete. */ 755 d2 = RD_REG_DWORD(®->ctrl_status); 756 for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) { 757 udelay(5); 758 d2 = RD_REG_DWORD(®->ctrl_status); 759 barrier(); 760 } 761 762 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); 763 RD_REG_DWORD(®->hccr); 764 765 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); 766 RD_REG_DWORD(®->hccr); 767 768 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); 769 RD_REG_DWORD(®->hccr); 770 771 d2 = (uint32_t) RD_REG_WORD(®->mailbox0); 772 for (cnt = 6000000 ; cnt && d2; cnt--) { 773 udelay(5); 774 d2 = (uint32_t) RD_REG_WORD(®->mailbox0); 775 barrier(); 776 } 777 778 spin_unlock_irqrestore(&ha->hardware_lock, flags); 779 780 if (IS_NOPOLLING_TYPE(ha)) 781 ha->isp_ops->enable_intrs(ha); 782 } 783 784 /** 785 * qla24xx_reset_chip() - Reset ISP24xx chip. 786 * @ha: HA context 787 * 788 * Returns 0 on success. 789 */ 790 void 791 qla24xx_reset_chip(scsi_qla_host_t *vha) 792 { 793 struct qla_hw_data *ha = vha->hw; 794 795 if (pci_channel_offline(ha->pdev) && 796 ha->flags.pci_channel_io_perm_failure) { 797 return; 798 } 799 800 ha->isp_ops->disable_intrs(ha); 801 802 /* Perform RISC reset. */ 803 qla24xx_reset_risc(vha); 804 } 805 806 /** 807 * qla2x00_chip_diag() - Test chip for proper operation. 808 * @ha: HA context 809 * 810 * Returns 0 on success. 811 */ 812 int 813 qla2x00_chip_diag(scsi_qla_host_t *vha) 814 { 815 int rval; 816 struct qla_hw_data *ha = vha->hw; 817 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 818 unsigned long flags = 0; 819 uint16_t data; 820 uint32_t cnt; 821 uint16_t mb[5]; 822 struct req_que *req = ha->req_q_map[0]; 823 824 /* Assume a failed state */ 825 rval = QLA_FUNCTION_FAILED; 826 827 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n", 828 vha->host_no, (u_long)®->flash_address)); 829 830 spin_lock_irqsave(&ha->hardware_lock, flags); 831 832 /* Reset ISP chip. */ 833 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 834 835 /* 836 * We need to have a delay here since the card will not respond while 837 * in reset causing an MCA on some architectures. 838 */ 839 udelay(20); 840 data = qla2x00_debounce_register(®->ctrl_status); 841 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) { 842 udelay(5); 843 data = RD_REG_WORD(®->ctrl_status); 844 barrier(); 845 } 846 847 if (!cnt) 848 goto chip_diag_failed; 849 850 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n", 851 vha->host_no)); 852 853 /* Reset RISC processor. */ 854 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 855 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 856 857 /* Workaround for QLA2312 PCI parity error */ 858 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 859 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); 860 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) { 861 udelay(5); 862 data = RD_MAILBOX_REG(ha, reg, 0); 863 barrier(); 864 } 865 } else 866 udelay(10); 867 868 if (!cnt) 869 goto chip_diag_failed; 870 871 /* Check product ID of chip */ 872 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no)); 873 874 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 875 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 876 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 877 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); 878 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || 879 mb[3] != PROD_ID_3) { 880 qla_printk(KERN_WARNING, ha, 881 "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]); 882 883 goto chip_diag_failed; 884 } 885 ha->product_id[0] = mb[1]; 886 ha->product_id[1] = mb[2]; 887 ha->product_id[2] = mb[3]; 888 ha->product_id[3] = mb[4]; 889 890 /* Adjust fw RISC transfer size */ 891 if (req->length > 1024) 892 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 893 else 894 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 895 req->length; 896 897 if (IS_QLA2200(ha) && 898 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 899 /* Limit firmware transfer size with a 2200A */ 900 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n", 901 vha->host_no)); 902 903 ha->device_type |= DT_ISP2200A; 904 ha->fw_transfer_size = 128; 905 } 906 907 /* Wrap Incoming Mailboxes Test. */ 908 spin_unlock_irqrestore(&ha->hardware_lock, flags); 909 910 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no)); 911 rval = qla2x00_mbx_reg_test(vha); 912 if (rval) { 913 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 914 vha->host_no)); 915 qla_printk(KERN_WARNING, ha, 916 "Failed mailbox send register test\n"); 917 } 918 else { 919 /* Flag a successful rval */ 920 rval = QLA_SUCCESS; 921 } 922 spin_lock_irqsave(&ha->hardware_lock, flags); 923 924 chip_diag_failed: 925 if (rval) 926 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED " 927 "****\n", vha->host_no)); 928 929 spin_unlock_irqrestore(&ha->hardware_lock, flags); 930 931 return (rval); 932 } 933 934 /** 935 * qla24xx_chip_diag() - Test ISP24xx for proper operation. 936 * @ha: HA context 937 * 938 * Returns 0 on success. 939 */ 940 int 941 qla24xx_chip_diag(scsi_qla_host_t *vha) 942 { 943 int rval; 944 struct qla_hw_data *ha = vha->hw; 945 struct req_que *req = ha->req_q_map[0]; 946 947 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 948 949 rval = qla2x00_mbx_reg_test(vha); 950 if (rval) { 951 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 952 vha->host_no)); 953 qla_printk(KERN_WARNING, ha, 954 "Failed mailbox send register test\n"); 955 } else { 956 /* Flag a successful rval */ 957 rval = QLA_SUCCESS; 958 } 959 960 return rval; 961 } 962 963 void 964 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) 965 { 966 int rval; 967 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, 968 eft_size, fce_size, mq_size; 969 dma_addr_t tc_dma; 970 void *tc; 971 struct qla_hw_data *ha = vha->hw; 972 struct req_que *req = ha->req_q_map[0]; 973 struct rsp_que *rsp = ha->rsp_q_map[0]; 974 975 if (ha->fw_dump) { 976 qla_printk(KERN_WARNING, ha, 977 "Firmware dump previously allocated.\n"); 978 return; 979 } 980 981 ha->fw_dumped = 0; 982 fixed_size = mem_size = eft_size = fce_size = mq_size = 0; 983 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 984 fixed_size = sizeof(struct qla2100_fw_dump); 985 } else if (IS_QLA23XX(ha)) { 986 fixed_size = offsetof(struct qla2300_fw_dump, data_ram); 987 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 988 sizeof(uint16_t); 989 } else if (IS_FWI2_CAPABLE(ha)) { 990 if (IS_QLA81XX(ha)) 991 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 992 else if (IS_QLA25XX(ha)) 993 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); 994 else 995 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); 996 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 997 sizeof(uint32_t); 998 if (ha->mqenable) 999 mq_size = sizeof(struct qla2xxx_mq_chain); 1000 /* Allocate memory for Fibre Channel Event Buffer. */ 1001 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 1002 goto try_eft; 1003 1004 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 1005 GFP_KERNEL); 1006 if (!tc) { 1007 qla_printk(KERN_WARNING, ha, "Unable to allocate " 1008 "(%d KB) for FCE.\n", FCE_SIZE / 1024); 1009 goto try_eft; 1010 } 1011 1012 memset(tc, 0, FCE_SIZE); 1013 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, 1014 ha->fce_mb, &ha->fce_bufs); 1015 if (rval) { 1016 qla_printk(KERN_WARNING, ha, "Unable to initialize " 1017 "FCE (%d).\n", rval); 1018 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, 1019 tc_dma); 1020 ha->flags.fce_enabled = 0; 1021 goto try_eft; 1022 } 1023 1024 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n", 1025 FCE_SIZE / 1024); 1026 1027 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; 1028 ha->flags.fce_enabled = 1; 1029 ha->fce_dma = tc_dma; 1030 ha->fce = tc; 1031 try_eft: 1032 /* Allocate memory for Extended Trace Buffer. */ 1033 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 1034 GFP_KERNEL); 1035 if (!tc) { 1036 qla_printk(KERN_WARNING, ha, "Unable to allocate " 1037 "(%d KB) for EFT.\n", EFT_SIZE / 1024); 1038 goto cont_alloc; 1039 } 1040 1041 memset(tc, 0, EFT_SIZE); 1042 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); 1043 if (rval) { 1044 qla_printk(KERN_WARNING, ha, "Unable to initialize " 1045 "EFT (%d).\n", rval); 1046 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, 1047 tc_dma); 1048 goto cont_alloc; 1049 } 1050 1051 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n", 1052 EFT_SIZE / 1024); 1053 1054 eft_size = EFT_SIZE; 1055 ha->eft_dma = tc_dma; 1056 ha->eft = tc; 1057 } 1058 cont_alloc: 1059 req_q_size = req->length * sizeof(request_t); 1060 rsp_q_size = rsp->length * sizeof(response_t); 1061 1062 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 1063 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size; 1064 ha->chain_offset = dump_size; 1065 dump_size += mq_size + fce_size; 1066 1067 ha->fw_dump = vmalloc(dump_size); 1068 if (!ha->fw_dump) { 1069 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for " 1070 "firmware dump!!!\n", dump_size / 1024); 1071 1072 if (ha->eft) { 1073 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft, 1074 ha->eft_dma); 1075 ha->eft = NULL; 1076 ha->eft_dma = 0; 1077 } 1078 return; 1079 } 1080 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n", 1081 dump_size / 1024); 1082 1083 ha->fw_dump_len = dump_size; 1084 ha->fw_dump->signature[0] = 'Q'; 1085 ha->fw_dump->signature[1] = 'L'; 1086 ha->fw_dump->signature[2] = 'G'; 1087 ha->fw_dump->signature[3] = 'C'; 1088 ha->fw_dump->version = __constant_htonl(1); 1089 1090 ha->fw_dump->fixed_size = htonl(fixed_size); 1091 ha->fw_dump->mem_size = htonl(mem_size); 1092 ha->fw_dump->req_q_size = htonl(req_q_size); 1093 ha->fw_dump->rsp_q_size = htonl(rsp_q_size); 1094 1095 ha->fw_dump->eft_size = htonl(eft_size); 1096 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma)); 1097 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma)); 1098 1099 ha->fw_dump->header_size = 1100 htonl(offsetof(struct qla2xxx_fw_dump, isp)); 1101 } 1102 1103 static int 1104 qla81xx_mpi_sync(scsi_qla_host_t *vha) 1105 { 1106 #define MPS_MASK 0xe0 1107 int rval; 1108 uint16_t dc; 1109 uint32_t dw; 1110 struct qla_hw_data *ha = vha->hw; 1111 1112 if (!IS_QLA81XX(vha->hw)) 1113 return QLA_SUCCESS; 1114 1115 rval = qla2x00_write_ram_word(vha, 0x7c00, 1); 1116 if (rval != QLA_SUCCESS) { 1117 DEBUG2(qla_printk(KERN_WARNING, ha, 1118 "Sync-MPI: Unable to acquire semaphore.\n")); 1119 goto done; 1120 } 1121 1122 pci_read_config_word(vha->hw->pdev, 0x54, &dc); 1123 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); 1124 if (rval != QLA_SUCCESS) { 1125 DEBUG2(qla_printk(KERN_WARNING, ha, 1126 "Sync-MPI: Unable to read sync.\n")); 1127 goto done_release; 1128 } 1129 1130 dc &= MPS_MASK; 1131 if (dc == (dw & MPS_MASK)) 1132 goto done_release; 1133 1134 dw &= ~MPS_MASK; 1135 dw |= dc; 1136 rval = qla2x00_write_ram_word(vha, 0x7a15, dw); 1137 if (rval != QLA_SUCCESS) { 1138 DEBUG2(qla_printk(KERN_WARNING, ha, 1139 "Sync-MPI: Unable to gain sync.\n")); 1140 } 1141 1142 done_release: 1143 rval = qla2x00_write_ram_word(vha, 0x7c00, 0); 1144 if (rval != QLA_SUCCESS) { 1145 DEBUG2(qla_printk(KERN_WARNING, ha, 1146 "Sync-MPI: Unable to release semaphore.\n")); 1147 } 1148 1149 done: 1150 return rval; 1151 } 1152 1153 /** 1154 * qla2x00_setup_chip() - Load and start RISC firmware. 1155 * @ha: HA context 1156 * 1157 * Returns 0 on success. 1158 */ 1159 static int 1160 qla2x00_setup_chip(scsi_qla_host_t *vha) 1161 { 1162 int rval; 1163 uint32_t srisc_address = 0; 1164 struct qla_hw_data *ha = vha->hw; 1165 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1166 unsigned long flags; 1167 uint16_t fw_major_version; 1168 1169 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 1170 /* Disable SRAM, Instruction RAM and GP RAM parity. */ 1171 spin_lock_irqsave(&ha->hardware_lock, flags); 1172 WRT_REG_WORD(®->hccr, (HCCR_ENABLE_PARITY + 0x0)); 1173 RD_REG_WORD(®->hccr); 1174 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1175 } 1176 1177 qla81xx_mpi_sync(vha); 1178 1179 /* Load firmware sequences */ 1180 rval = ha->isp_ops->load_risc(vha, &srisc_address); 1181 if (rval == QLA_SUCCESS) { 1182 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC " 1183 "code.\n", vha->host_no)); 1184 1185 rval = qla2x00_verify_checksum(vha, srisc_address); 1186 if (rval == QLA_SUCCESS) { 1187 /* Start firmware execution. */ 1188 DEBUG(printk("scsi(%ld): Checksum OK, start " 1189 "firmware.\n", vha->host_no)); 1190 1191 rval = qla2x00_execute_fw(vha, srisc_address); 1192 /* Retrieve firmware information. */ 1193 if (rval == QLA_SUCCESS) { 1194 fw_major_version = ha->fw_major_version; 1195 rval = qla2x00_get_fw_version(vha, 1196 &ha->fw_major_version, 1197 &ha->fw_minor_version, 1198 &ha->fw_subminor_version, 1199 &ha->fw_attributes, &ha->fw_memory_size, 1200 ha->mpi_version, &ha->mpi_capabilities, 1201 ha->phy_version); 1202 if (rval != QLA_SUCCESS) 1203 goto failed; 1204 ha->flags.npiv_supported = 0; 1205 if (IS_QLA2XXX_MIDTYPE(ha) && 1206 (ha->fw_attributes & BIT_2)) { 1207 ha->flags.npiv_supported = 1; 1208 if ((!ha->max_npiv_vports) || 1209 ((ha->max_npiv_vports + 1) % 1210 MIN_MULTI_ID_FABRIC)) 1211 ha->max_npiv_vports = 1212 MIN_MULTI_ID_FABRIC - 1; 1213 } 1214 qla2x00_get_resource_cnts(vha, NULL, 1215 &ha->fw_xcb_count, NULL, NULL, 1216 &ha->max_npiv_vports, NULL); 1217 1218 if (!fw_major_version && ql2xallocfwdump) 1219 qla2x00_alloc_fw_dump(vha); 1220 } 1221 } else { 1222 DEBUG2(printk(KERN_INFO 1223 "scsi(%ld): ISP Firmware failed checksum.\n", 1224 vha->host_no)); 1225 } 1226 } 1227 1228 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 1229 /* Enable proper parity. */ 1230 spin_lock_irqsave(&ha->hardware_lock, flags); 1231 if (IS_QLA2300(ha)) 1232 /* SRAM parity */ 1233 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x1); 1234 else 1235 /* SRAM, Instruction RAM and GP RAM parity */ 1236 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x7); 1237 RD_REG_WORD(®->hccr); 1238 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1239 } 1240 1241 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { 1242 uint32_t size; 1243 1244 rval = qla81xx_fac_get_sector_size(vha, &size); 1245 if (rval == QLA_SUCCESS) { 1246 ha->flags.fac_supported = 1; 1247 ha->fdt_block_size = size << 2; 1248 } else { 1249 qla_printk(KERN_ERR, ha, 1250 "Unsupported FAC firmware (%d.%02d.%02d).\n", 1251 ha->fw_major_version, ha->fw_minor_version, 1252 ha->fw_subminor_version); 1253 } 1254 } 1255 failed: 1256 if (rval) { 1257 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 1258 vha->host_no)); 1259 } 1260 1261 return (rval); 1262 } 1263 1264 /** 1265 * qla2x00_init_response_q_entries() - Initializes response queue entries. 1266 * @ha: HA context 1267 * 1268 * Beginning of request ring has initialization control block already built 1269 * by nvram config routine. 1270 * 1271 * Returns 0 on success. 1272 */ 1273 void 1274 qla2x00_init_response_q_entries(struct rsp_que *rsp) 1275 { 1276 uint16_t cnt; 1277 response_t *pkt; 1278 1279 rsp->ring_ptr = rsp->ring; 1280 rsp->ring_index = 0; 1281 rsp->status_srb = NULL; 1282 pkt = rsp->ring_ptr; 1283 for (cnt = 0; cnt < rsp->length; cnt++) { 1284 pkt->signature = RESPONSE_PROCESSED; 1285 pkt++; 1286 } 1287 } 1288 1289 /** 1290 * qla2x00_update_fw_options() - Read and process firmware options. 1291 * @ha: HA context 1292 * 1293 * Returns 0 on success. 1294 */ 1295 void 1296 qla2x00_update_fw_options(scsi_qla_host_t *vha) 1297 { 1298 uint16_t swing, emphasis, tx_sens, rx_sens; 1299 struct qla_hw_data *ha = vha->hw; 1300 1301 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 1302 qla2x00_get_fw_options(vha, ha->fw_options); 1303 1304 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1305 return; 1306 1307 /* Serial Link options. */ 1308 DEBUG3(printk("scsi(%ld): Serial link options:\n", 1309 vha->host_no)); 1310 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options, 1311 sizeof(ha->fw_seriallink_options))); 1312 1313 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 1314 if (ha->fw_seriallink_options[3] & BIT_2) { 1315 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; 1316 1317 /* 1G settings */ 1318 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); 1319 emphasis = (ha->fw_seriallink_options[2] & 1320 (BIT_4 | BIT_3)) >> 3; 1321 tx_sens = ha->fw_seriallink_options[0] & 1322 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 1323 rx_sens = (ha->fw_seriallink_options[0] & 1324 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 1325 ha->fw_options[10] = (emphasis << 14) | (swing << 8); 1326 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 1327 if (rx_sens == 0x0) 1328 rx_sens = 0x3; 1329 ha->fw_options[10] |= (tx_sens << 4) | rx_sens; 1330 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1331 ha->fw_options[10] |= BIT_5 | 1332 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 1333 (tx_sens & (BIT_1 | BIT_0)); 1334 1335 /* 2G settings */ 1336 swing = (ha->fw_seriallink_options[2] & 1337 (BIT_7 | BIT_6 | BIT_5)) >> 5; 1338 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); 1339 tx_sens = ha->fw_seriallink_options[1] & 1340 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 1341 rx_sens = (ha->fw_seriallink_options[1] & 1342 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 1343 ha->fw_options[11] = (emphasis << 14) | (swing << 8); 1344 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 1345 if (rx_sens == 0x0) 1346 rx_sens = 0x3; 1347 ha->fw_options[11] |= (tx_sens << 4) | rx_sens; 1348 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1349 ha->fw_options[11] |= BIT_5 | 1350 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 1351 (tx_sens & (BIT_1 | BIT_0)); 1352 } 1353 1354 /* FCP2 options. */ 1355 /* Return command IOCBs without waiting for an ABTS to complete. */ 1356 ha->fw_options[3] |= BIT_13; 1357 1358 /* LED scheme. */ 1359 if (ha->flags.enable_led_scheme) 1360 ha->fw_options[2] |= BIT_12; 1361 1362 /* Detect ISP6312. */ 1363 if (IS_QLA6312(ha)) 1364 ha->fw_options[2] |= BIT_13; 1365 1366 /* Update firmware options. */ 1367 qla2x00_set_fw_options(vha, ha->fw_options); 1368 } 1369 1370 void 1371 qla24xx_update_fw_options(scsi_qla_host_t *vha) 1372 { 1373 int rval; 1374 struct qla_hw_data *ha = vha->hw; 1375 1376 /* Update Serial Link options. */ 1377 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) 1378 return; 1379 1380 rval = qla2x00_set_serdes_params(vha, 1381 le16_to_cpu(ha->fw_seriallink_options24[1]), 1382 le16_to_cpu(ha->fw_seriallink_options24[2]), 1383 le16_to_cpu(ha->fw_seriallink_options24[3])); 1384 if (rval != QLA_SUCCESS) { 1385 qla_printk(KERN_WARNING, ha, 1386 "Unable to update Serial Link options (%x).\n", rval); 1387 } 1388 } 1389 1390 void 1391 qla2x00_config_rings(struct scsi_qla_host *vha) 1392 { 1393 struct qla_hw_data *ha = vha->hw; 1394 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1395 struct req_que *req = ha->req_q_map[0]; 1396 struct rsp_que *rsp = ha->rsp_q_map[0]; 1397 1398 /* Setup ring parameters in initialization control block. */ 1399 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0); 1400 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0); 1401 ha->init_cb->request_q_length = cpu_to_le16(req->length); 1402 ha->init_cb->response_q_length = cpu_to_le16(rsp->length); 1403 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 1404 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 1405 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1406 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1407 1408 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0); 1409 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0); 1410 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0); 1411 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0); 1412 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ 1413 } 1414 1415 void 1416 qla24xx_config_rings(struct scsi_qla_host *vha) 1417 { 1418 struct qla_hw_data *ha = vha->hw; 1419 device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0); 1420 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; 1421 struct qla_msix_entry *msix; 1422 struct init_cb_24xx *icb; 1423 uint16_t rid = 0; 1424 struct req_que *req = ha->req_q_map[0]; 1425 struct rsp_que *rsp = ha->rsp_q_map[0]; 1426 1427 /* Setup ring parameters in initialization control block. */ 1428 icb = (struct init_cb_24xx *)ha->init_cb; 1429 icb->request_q_outpointer = __constant_cpu_to_le16(0); 1430 icb->response_q_inpointer = __constant_cpu_to_le16(0); 1431 icb->request_q_length = cpu_to_le16(req->length); 1432 icb->response_q_length = cpu_to_le16(rsp->length); 1433 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 1434 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 1435 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1436 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1437 1438 if (ha->mqenable) { 1439 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); 1440 icb->rid = __constant_cpu_to_le16(rid); 1441 if (ha->flags.msix_enabled) { 1442 msix = &ha->msix_entries[1]; 1443 DEBUG2_17(printk(KERN_INFO 1444 "Registering vector 0x%x for base que\n", msix->entry)); 1445 icb->msix = cpu_to_le16(msix->entry); 1446 } 1447 /* Use alternate PCI bus number */ 1448 if (MSB(rid)) 1449 icb->firmware_options_2 |= 1450 __constant_cpu_to_le32(BIT_19); 1451 /* Use alternate PCI devfn */ 1452 if (LSB(rid)) 1453 icb->firmware_options_2 |= 1454 __constant_cpu_to_le32(BIT_18); 1455 1456 /* Use Disable MSIX Handshake mode for capable adapters */ 1457 if (IS_MSIX_NACK_CAPABLE(ha)) { 1458 icb->firmware_options_2 &= 1459 __constant_cpu_to_le32(~BIT_22); 1460 ha->flags.disable_msix_handshake = 1; 1461 qla_printk(KERN_INFO, ha, 1462 "MSIX Handshake Disable Mode turned on\n"); 1463 } else { 1464 icb->firmware_options_2 |= 1465 __constant_cpu_to_le32(BIT_22); 1466 } 1467 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); 1468 1469 WRT_REG_DWORD(®->isp25mq.req_q_in, 0); 1470 WRT_REG_DWORD(®->isp25mq.req_q_out, 0); 1471 WRT_REG_DWORD(®->isp25mq.rsp_q_in, 0); 1472 WRT_REG_DWORD(®->isp25mq.rsp_q_out, 0); 1473 } else { 1474 WRT_REG_DWORD(®->isp24.req_q_in, 0); 1475 WRT_REG_DWORD(®->isp24.req_q_out, 0); 1476 WRT_REG_DWORD(®->isp24.rsp_q_in, 0); 1477 WRT_REG_DWORD(®->isp24.rsp_q_out, 0); 1478 } 1479 /* PCI posting */ 1480 RD_REG_DWORD(&ioreg->hccr); 1481 } 1482 1483 /** 1484 * qla2x00_init_rings() - Initializes firmware. 1485 * @ha: HA context 1486 * 1487 * Beginning of request ring has initialization control block already built 1488 * by nvram config routine. 1489 * 1490 * Returns 0 on success. 1491 */ 1492 static int 1493 qla2x00_init_rings(scsi_qla_host_t *vha) 1494 { 1495 int rval; 1496 unsigned long flags = 0; 1497 int cnt, que; 1498 struct qla_hw_data *ha = vha->hw; 1499 struct req_que *req; 1500 struct rsp_que *rsp; 1501 struct scsi_qla_host *vp; 1502 struct mid_init_cb_24xx *mid_init_cb = 1503 (struct mid_init_cb_24xx *) ha->init_cb; 1504 1505 spin_lock_irqsave(&ha->hardware_lock, flags); 1506 1507 /* Clear outstanding commands array. */ 1508 for (que = 0; que < ha->max_req_queues; que++) { 1509 req = ha->req_q_map[que]; 1510 if (!req) 1511 continue; 1512 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 1513 req->outstanding_cmds[cnt] = NULL; 1514 1515 req->current_outstanding_cmd = 1; 1516 1517 /* Initialize firmware. */ 1518 req->ring_ptr = req->ring; 1519 req->ring_index = 0; 1520 req->cnt = req->length; 1521 } 1522 1523 for (que = 0; que < ha->max_rsp_queues; que++) { 1524 rsp = ha->rsp_q_map[que]; 1525 if (!rsp) 1526 continue; 1527 /* Initialize response queue entries */ 1528 qla2x00_init_response_q_entries(rsp); 1529 } 1530 1531 /* Clear RSCN queue. */ 1532 list_for_each_entry(vp, &ha->vp_list, list) { 1533 vp->rscn_in_ptr = 0; 1534 vp->rscn_out_ptr = 0; 1535 } 1536 ha->isp_ops->config_rings(vha); 1537 1538 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1539 1540 /* Update any ISP specific firmware options before initialization. */ 1541 ha->isp_ops->update_fw_options(vha); 1542 1543 DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no)); 1544 1545 if (ha->flags.npiv_supported) { 1546 if (ha->operating_mode == LOOP) 1547 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; 1548 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); 1549 } 1550 1551 if (IS_FWI2_CAPABLE(ha)) { 1552 mid_init_cb->options = __constant_cpu_to_le16(BIT_1); 1553 mid_init_cb->init_cb.execution_throttle = 1554 cpu_to_le16(ha->fw_xcb_count); 1555 } 1556 1557 rval = qla2x00_init_firmware(vha, ha->init_cb_size); 1558 if (rval) { 1559 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n", 1560 vha->host_no)); 1561 } else { 1562 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n", 1563 vha->host_no)); 1564 } 1565 1566 return (rval); 1567 } 1568 1569 /** 1570 * qla2x00_fw_ready() - Waits for firmware ready. 1571 * @ha: HA context 1572 * 1573 * Returns 0 on success. 1574 */ 1575 static int 1576 qla2x00_fw_ready(scsi_qla_host_t *vha) 1577 { 1578 int rval; 1579 unsigned long wtime, mtime, cs84xx_time; 1580 uint16_t min_wait; /* Minimum wait time if loop is down */ 1581 uint16_t wait_time; /* Wait time if loop is coming ready */ 1582 uint16_t state[5]; 1583 struct qla_hw_data *ha = vha->hw; 1584 1585 rval = QLA_SUCCESS; 1586 1587 /* 20 seconds for loop down. */ 1588 min_wait = 20; 1589 1590 /* 1591 * Firmware should take at most one RATOV to login, plus 5 seconds for 1592 * our own processing. 1593 */ 1594 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { 1595 wait_time = min_wait; 1596 } 1597 1598 /* Min wait time if loop down */ 1599 mtime = jiffies + (min_wait * HZ); 1600 1601 /* wait time before firmware ready */ 1602 wtime = jiffies + (wait_time * HZ); 1603 1604 /* Wait for ISP to finish LIP */ 1605 if (!vha->flags.init_done) 1606 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n"); 1607 1608 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n", 1609 vha->host_no)); 1610 1611 do { 1612 rval = qla2x00_get_firmware_state(vha, state); 1613 if (rval == QLA_SUCCESS) { 1614 if (state[0] < FSTATE_LOSS_OF_SYNC) { 1615 vha->device_flags &= ~DFLG_NO_CABLE; 1616 } 1617 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 1618 DEBUG16(printk("scsi(%ld): fw_state=%x " 1619 "84xx=%x.\n", vha->host_no, state[0], 1620 state[2])); 1621 if ((state[2] & FSTATE_LOGGED_IN) && 1622 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 1623 DEBUG16(printk("scsi(%ld): Sending " 1624 "verify iocb.\n", vha->host_no)); 1625 1626 cs84xx_time = jiffies; 1627 rval = qla84xx_init_chip(vha); 1628 if (rval != QLA_SUCCESS) 1629 break; 1630 1631 /* Add time taken to initialize. */ 1632 cs84xx_time = jiffies - cs84xx_time; 1633 wtime += cs84xx_time; 1634 mtime += cs84xx_time; 1635 DEBUG16(printk("scsi(%ld): Increasing " 1636 "wait time by %ld. New time %ld\n", 1637 vha->host_no, cs84xx_time, wtime)); 1638 } 1639 } else if (state[0] == FSTATE_READY) { 1640 DEBUG(printk("scsi(%ld): F/W Ready - OK \n", 1641 vha->host_no)); 1642 1643 qla2x00_get_retry_cnt(vha, &ha->retry_count, 1644 &ha->login_timeout, &ha->r_a_tov); 1645 1646 rval = QLA_SUCCESS; 1647 break; 1648 } 1649 1650 rval = QLA_FUNCTION_FAILED; 1651 1652 if (atomic_read(&vha->loop_down_timer) && 1653 state[0] != FSTATE_READY) { 1654 /* Loop down. Timeout on min_wait for states 1655 * other than Wait for Login. 1656 */ 1657 if (time_after_eq(jiffies, mtime)) { 1658 qla_printk(KERN_INFO, ha, 1659 "Cable is unplugged...\n"); 1660 1661 vha->device_flags |= DFLG_NO_CABLE; 1662 break; 1663 } 1664 } 1665 } else { 1666 /* Mailbox cmd failed. Timeout on min_wait. */ 1667 if (time_after_eq(jiffies, mtime)) 1668 break; 1669 } 1670 1671 if (time_after_eq(jiffies, wtime)) 1672 break; 1673 1674 /* Delay for a while */ 1675 msleep(500); 1676 1677 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1678 vha->host_no, state[0], jiffies)); 1679 } while (1); 1680 1681 DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n", 1682 vha->host_no, state[0], state[1], state[2], state[3], state[4], 1683 jiffies)); 1684 1685 if (rval) { 1686 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", 1687 vha->host_no)); 1688 } 1689 1690 return (rval); 1691 } 1692 1693 /* 1694 * qla2x00_configure_hba 1695 * Setup adapter context. 1696 * 1697 * Input: 1698 * ha = adapter state pointer. 1699 * 1700 * Returns: 1701 * 0 = success 1702 * 1703 * Context: 1704 * Kernel context. 1705 */ 1706 static int 1707 qla2x00_configure_hba(scsi_qla_host_t *vha) 1708 { 1709 int rval; 1710 uint16_t loop_id; 1711 uint16_t topo; 1712 uint16_t sw_cap; 1713 uint8_t al_pa; 1714 uint8_t area; 1715 uint8_t domain; 1716 char connect_type[22]; 1717 struct qla_hw_data *ha = vha->hw; 1718 1719 /* Get host addresses. */ 1720 rval = qla2x00_get_adapter_id(vha, 1721 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 1722 if (rval != QLA_SUCCESS) { 1723 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 1724 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 1725 DEBUG2(printk("%s(%ld) Loop is in a transition state\n", 1726 __func__, vha->host_no)); 1727 } else { 1728 qla_printk(KERN_WARNING, ha, 1729 "ERROR -- Unable to get host loop ID.\n"); 1730 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1731 } 1732 return (rval); 1733 } 1734 1735 if (topo == 4) { 1736 qla_printk(KERN_INFO, ha, 1737 "Cannot get topology - retrying.\n"); 1738 return (QLA_FUNCTION_FAILED); 1739 } 1740 1741 vha->loop_id = loop_id; 1742 1743 /* initialize */ 1744 ha->min_external_loopid = SNS_FIRST_LOOP_ID; 1745 ha->operating_mode = LOOP; 1746 ha->switch_cap = 0; 1747 1748 switch (topo) { 1749 case 0: 1750 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n", 1751 vha->host_no)); 1752 ha->current_topology = ISP_CFG_NL; 1753 strcpy(connect_type, "(Loop)"); 1754 break; 1755 1756 case 1: 1757 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n", 1758 vha->host_no)); 1759 ha->switch_cap = sw_cap; 1760 ha->current_topology = ISP_CFG_FL; 1761 strcpy(connect_type, "(FL_Port)"); 1762 break; 1763 1764 case 2: 1765 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n", 1766 vha->host_no)); 1767 ha->operating_mode = P2P; 1768 ha->current_topology = ISP_CFG_N; 1769 strcpy(connect_type, "(N_Port-to-N_Port)"); 1770 break; 1771 1772 case 3: 1773 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n", 1774 vha->host_no)); 1775 ha->switch_cap = sw_cap; 1776 ha->operating_mode = P2P; 1777 ha->current_topology = ISP_CFG_F; 1778 strcpy(connect_type, "(F_Port)"); 1779 break; 1780 1781 default: 1782 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. " 1783 "Using NL.\n", 1784 vha->host_no, topo)); 1785 ha->current_topology = ISP_CFG_NL; 1786 strcpy(connect_type, "(Loop)"); 1787 break; 1788 } 1789 1790 /* Save Host port and loop ID. */ 1791 /* byte order - Big Endian */ 1792 vha->d_id.b.domain = domain; 1793 vha->d_id.b.area = area; 1794 vha->d_id.b.al_pa = al_pa; 1795 1796 if (!vha->flags.init_done) 1797 qla_printk(KERN_INFO, ha, 1798 "Topology - %s, Host Loop address 0x%x\n", 1799 connect_type, vha->loop_id); 1800 1801 if (rval) { 1802 DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no)); 1803 } else { 1804 DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no)); 1805 } 1806 1807 return(rval); 1808 } 1809 1810 static inline void 1811 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, 1812 char *def) 1813 { 1814 char *st, *en; 1815 uint16_t index; 1816 struct qla_hw_data *ha = vha->hw; 1817 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && 1818 !IS_QLA81XX(ha); 1819 1820 if (memcmp(model, BINZERO, len) != 0) { 1821 strncpy(ha->model_number, model, len); 1822 st = en = ha->model_number; 1823 en += len - 1; 1824 while (en > st) { 1825 if (*en != 0x20 && *en != 0x00) 1826 break; 1827 *en-- = '\0'; 1828 } 1829 1830 index = (ha->pdev->subsystem_device & 0xff); 1831 if (use_tbl && 1832 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1833 index < QLA_MODEL_NAMES) 1834 strncpy(ha->model_desc, 1835 qla2x00_model_name[index * 2 + 1], 1836 sizeof(ha->model_desc) - 1); 1837 } else { 1838 index = (ha->pdev->subsystem_device & 0xff); 1839 if (use_tbl && 1840 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1841 index < QLA_MODEL_NAMES) { 1842 strcpy(ha->model_number, 1843 qla2x00_model_name[index * 2]); 1844 strncpy(ha->model_desc, 1845 qla2x00_model_name[index * 2 + 1], 1846 sizeof(ha->model_desc) - 1); 1847 } else { 1848 strcpy(ha->model_number, def); 1849 } 1850 } 1851 if (IS_FWI2_CAPABLE(ha)) 1852 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, 1853 sizeof(ha->model_desc)); 1854 } 1855 1856 /* On sparc systems, obtain port and node WWN from firmware 1857 * properties. 1858 */ 1859 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv) 1860 { 1861 #ifdef CONFIG_SPARC 1862 struct qla_hw_data *ha = vha->hw; 1863 struct pci_dev *pdev = ha->pdev; 1864 struct device_node *dp = pci_device_to_OF_node(pdev); 1865 const u8 *val; 1866 int len; 1867 1868 val = of_get_property(dp, "port-wwn", &len); 1869 if (val && len >= WWN_SIZE) 1870 memcpy(nv->port_name, val, WWN_SIZE); 1871 1872 val = of_get_property(dp, "node-wwn", &len); 1873 if (val && len >= WWN_SIZE) 1874 memcpy(nv->node_name, val, WWN_SIZE); 1875 #endif 1876 } 1877 1878 /* 1879 * NVRAM configuration for ISP 2xxx 1880 * 1881 * Input: 1882 * ha = adapter block pointer. 1883 * 1884 * Output: 1885 * initialization control block in response_ring 1886 * host adapters parameters in host adapter block 1887 * 1888 * Returns: 1889 * 0 = success. 1890 */ 1891 int 1892 qla2x00_nvram_config(scsi_qla_host_t *vha) 1893 { 1894 int rval; 1895 uint8_t chksum = 0; 1896 uint16_t cnt; 1897 uint8_t *dptr1, *dptr2; 1898 struct qla_hw_data *ha = vha->hw; 1899 init_cb_t *icb = ha->init_cb; 1900 nvram_t *nv = ha->nvram; 1901 uint8_t *ptr = ha->nvram; 1902 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1903 1904 rval = QLA_SUCCESS; 1905 1906 /* Determine NVRAM starting address. */ 1907 ha->nvram_size = sizeof(nvram_t); 1908 ha->nvram_base = 0; 1909 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) 1910 if ((RD_REG_WORD(®->ctrl_status) >> 14) == 1) 1911 ha->nvram_base = 0x80; 1912 1913 /* Get NVRAM data and calculate checksum. */ 1914 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); 1915 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 1916 chksum += *ptr++; 1917 1918 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 1919 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 1920 1921 /* Bad NVRAM data, set defaults parameters. */ 1922 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || 1923 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { 1924 /* Reset NVRAM data. */ 1925 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 1926 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 1927 nv->nvram_version); 1928 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 1929 "invalid -- WWPN) defaults.\n"); 1930 1931 /* 1932 * Set default initialization control block. 1933 */ 1934 memset(nv, 0, ha->nvram_size); 1935 nv->parameter_block_version = ICB_VERSION; 1936 1937 if (IS_QLA23XX(ha)) { 1938 nv->firmware_options[0] = BIT_2 | BIT_1; 1939 nv->firmware_options[1] = BIT_7 | BIT_5; 1940 nv->add_firmware_options[0] = BIT_5; 1941 nv->add_firmware_options[1] = BIT_5 | BIT_4; 1942 nv->frame_payload_size = __constant_cpu_to_le16(2048); 1943 nv->special_options[1] = BIT_7; 1944 } else if (IS_QLA2200(ha)) { 1945 nv->firmware_options[0] = BIT_2 | BIT_1; 1946 nv->firmware_options[1] = BIT_7 | BIT_5; 1947 nv->add_firmware_options[0] = BIT_5; 1948 nv->add_firmware_options[1] = BIT_5 | BIT_4; 1949 nv->frame_payload_size = __constant_cpu_to_le16(1024); 1950 } else if (IS_QLA2100(ha)) { 1951 nv->firmware_options[0] = BIT_3 | BIT_1; 1952 nv->firmware_options[1] = BIT_5; 1953 nv->frame_payload_size = __constant_cpu_to_le16(1024); 1954 } 1955 1956 nv->max_iocb_allocation = __constant_cpu_to_le16(256); 1957 nv->execution_throttle = __constant_cpu_to_le16(16); 1958 nv->retry_count = 8; 1959 nv->retry_delay = 1; 1960 1961 nv->port_name[0] = 33; 1962 nv->port_name[3] = 224; 1963 nv->port_name[4] = 139; 1964 1965 qla2xxx_nvram_wwn_from_ofw(vha, nv); 1966 1967 nv->login_timeout = 4; 1968 1969 /* 1970 * Set default host adapter parameters 1971 */ 1972 nv->host_p[1] = BIT_2; 1973 nv->reset_delay = 5; 1974 nv->port_down_retry_count = 8; 1975 nv->max_luns_per_target = __constant_cpu_to_le16(8); 1976 nv->link_down_timeout = 60; 1977 1978 rval = 1; 1979 } 1980 1981 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 1982 /* 1983 * The SN2 does not provide BIOS emulation which means you can't change 1984 * potentially bogus BIOS settings. Force the use of default settings 1985 * for link rate and frame size. Hope that the rest of the settings 1986 * are valid. 1987 */ 1988 if (ia64_platform_is("sn2")) { 1989 nv->frame_payload_size = __constant_cpu_to_le16(2048); 1990 if (IS_QLA23XX(ha)) 1991 nv->special_options[1] = BIT_7; 1992 } 1993 #endif 1994 1995 /* Reset Initialization control block */ 1996 memset(icb, 0, ha->init_cb_size); 1997 1998 /* 1999 * Setup driver NVRAM options. 2000 */ 2001 nv->firmware_options[0] |= (BIT_6 | BIT_1); 2002 nv->firmware_options[0] &= ~(BIT_5 | BIT_4); 2003 nv->firmware_options[1] |= (BIT_5 | BIT_0); 2004 nv->firmware_options[1] &= ~BIT_4; 2005 2006 if (IS_QLA23XX(ha)) { 2007 nv->firmware_options[0] |= BIT_2; 2008 nv->firmware_options[0] &= ~BIT_3; 2009 nv->add_firmware_options[1] |= BIT_5 | BIT_4; 2010 2011 if (IS_QLA2300(ha)) { 2012 if (ha->fb_rev == FPM_2310) { 2013 strcpy(ha->model_number, "QLA2310"); 2014 } else { 2015 strcpy(ha->model_number, "QLA2300"); 2016 } 2017 } else { 2018 qla2x00_set_model_info(vha, nv->model_number, 2019 sizeof(nv->model_number), "QLA23xx"); 2020 } 2021 } else if (IS_QLA2200(ha)) { 2022 nv->firmware_options[0] |= BIT_2; 2023 /* 2024 * 'Point-to-point preferred, else loop' is not a safe 2025 * connection mode setting. 2026 */ 2027 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) == 2028 (BIT_5 | BIT_4)) { 2029 /* Force 'loop preferred, else point-to-point'. */ 2030 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4); 2031 nv->add_firmware_options[0] |= BIT_5; 2032 } 2033 strcpy(ha->model_number, "QLA22xx"); 2034 } else /*if (IS_QLA2100(ha))*/ { 2035 strcpy(ha->model_number, "QLA2100"); 2036 } 2037 2038 /* 2039 * Copy over NVRAM RISC parameter block to initialization control block. 2040 */ 2041 dptr1 = (uint8_t *)icb; 2042 dptr2 = (uint8_t *)&nv->parameter_block_version; 2043 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version; 2044 while (cnt--) 2045 *dptr1++ = *dptr2++; 2046 2047 /* Copy 2nd half. */ 2048 dptr1 = (uint8_t *)icb->add_firmware_options; 2049 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options; 2050 while (cnt--) 2051 *dptr1++ = *dptr2++; 2052 2053 /* Use alternate WWN? */ 2054 if (nv->host_p[1] & BIT_7) { 2055 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 2056 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 2057 } 2058 2059 /* Prepare nodename */ 2060 if ((icb->firmware_options[1] & BIT_6) == 0) { 2061 /* 2062 * Firmware will apply the following mask if the nodename was 2063 * not provided. 2064 */ 2065 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 2066 icb->node_name[0] &= 0xF0; 2067 } 2068 2069 /* 2070 * Set host adapter parameters. 2071 */ 2072 if (nv->host_p[0] & BIT_7) 2073 ql2xextended_error_logging = 1; 2074 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); 2075 /* Always load RISC code on non ISP2[12]00 chips. */ 2076 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 2077 ha->flags.disable_risc_code_load = 0; 2078 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); 2079 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); 2080 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); 2081 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; 2082 ha->flags.disable_serdes = 0; 2083 2084 ha->operating_mode = 2085 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; 2086 2087 memcpy(ha->fw_seriallink_options, nv->seriallink_options, 2088 sizeof(ha->fw_seriallink_options)); 2089 2090 /* save HBA serial number */ 2091 ha->serial0 = icb->port_name[5]; 2092 ha->serial1 = icb->port_name[6]; 2093 ha->serial2 = icb->port_name[7]; 2094 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 2095 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 2096 2097 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 2098 2099 ha->retry_count = nv->retry_count; 2100 2101 /* Set minimum login_timeout to 4 seconds. */ 2102 if (nv->login_timeout < ql2xlogintimeout) 2103 nv->login_timeout = ql2xlogintimeout; 2104 if (nv->login_timeout < 4) 2105 nv->login_timeout = 4; 2106 ha->login_timeout = nv->login_timeout; 2107 icb->login_timeout = nv->login_timeout; 2108 2109 /* Set minimum RATOV to 100 tenths of a second. */ 2110 ha->r_a_tov = 100; 2111 2112 ha->loop_reset_delay = nv->reset_delay; 2113 2114 /* Link Down Timeout = 0: 2115 * 2116 * When Port Down timer expires we will start returning 2117 * I/O's to OS with "DID_NO_CONNECT". 2118 * 2119 * Link Down Timeout != 0: 2120 * 2121 * The driver waits for the link to come up after link down 2122 * before returning I/Os to OS with "DID_NO_CONNECT". 2123 */ 2124 if (nv->link_down_timeout == 0) { 2125 ha->loop_down_abort_time = 2126 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 2127 } else { 2128 ha->link_down_timeout = nv->link_down_timeout; 2129 ha->loop_down_abort_time = 2130 (LOOP_DOWN_TIME - ha->link_down_timeout); 2131 } 2132 2133 /* 2134 * Need enough time to try and get the port back. 2135 */ 2136 ha->port_down_retry_count = nv->port_down_retry_count; 2137 if (qlport_down_retry) 2138 ha->port_down_retry_count = qlport_down_retry; 2139 /* Set login_retry_count */ 2140 ha->login_retry_count = nv->retry_count; 2141 if (ha->port_down_retry_count == nv->port_down_retry_count && 2142 ha->port_down_retry_count > 3) 2143 ha->login_retry_count = ha->port_down_retry_count; 2144 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 2145 ha->login_retry_count = ha->port_down_retry_count; 2146 if (ql2xloginretrycount) 2147 ha->login_retry_count = ql2xloginretrycount; 2148 2149 icb->lun_enables = __constant_cpu_to_le16(0); 2150 icb->command_resource_count = 0; 2151 icb->immediate_notify_resource_count = 0; 2152 icb->timeout = __constant_cpu_to_le16(0); 2153 2154 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 2155 /* Enable RIO */ 2156 icb->firmware_options[0] &= ~BIT_3; 2157 icb->add_firmware_options[0] &= 2158 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 2159 icb->add_firmware_options[0] |= BIT_2; 2160 icb->response_accumulation_timer = 3; 2161 icb->interrupt_delay_timer = 5; 2162 2163 vha->flags.process_response_queue = 1; 2164 } else { 2165 /* Enable ZIO. */ 2166 if (!vha->flags.init_done) { 2167 ha->zio_mode = icb->add_firmware_options[0] & 2168 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 2169 ha->zio_timer = icb->interrupt_delay_timer ? 2170 icb->interrupt_delay_timer: 2; 2171 } 2172 icb->add_firmware_options[0] &= 2173 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 2174 vha->flags.process_response_queue = 0; 2175 if (ha->zio_mode != QLA_ZIO_DISABLED) { 2176 ha->zio_mode = QLA_ZIO_MODE_6; 2177 2178 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer " 2179 "delay (%d us).\n", vha->host_no, ha->zio_mode, 2180 ha->zio_timer * 100)); 2181 qla_printk(KERN_INFO, ha, 2182 "ZIO mode %d enabled; timer delay (%d us).\n", 2183 ha->zio_mode, ha->zio_timer * 100); 2184 2185 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; 2186 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; 2187 vha->flags.process_response_queue = 1; 2188 } 2189 } 2190 2191 if (rval) { 2192 DEBUG2_3(printk(KERN_WARNING 2193 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 2194 } 2195 return (rval); 2196 } 2197 2198 static void 2199 qla2x00_rport_del(void *data) 2200 { 2201 fc_port_t *fcport = data; 2202 struct fc_rport *rport; 2203 2204 spin_lock_irq(fcport->vha->host->host_lock); 2205 rport = fcport->drport ? fcport->drport: fcport->rport; 2206 fcport->drport = NULL; 2207 spin_unlock_irq(fcport->vha->host->host_lock); 2208 if (rport) 2209 fc_remote_port_delete(rport); 2210 } 2211 2212 /** 2213 * qla2x00_alloc_fcport() - Allocate a generic fcport. 2214 * @ha: HA context 2215 * @flags: allocation flags 2216 * 2217 * Returns a pointer to the allocated fcport, or NULL, if none available. 2218 */ 2219 static fc_port_t * 2220 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) 2221 { 2222 fc_port_t *fcport; 2223 2224 fcport = kzalloc(sizeof(fc_port_t), flags); 2225 if (!fcport) 2226 return NULL; 2227 2228 /* Setup fcport template structure. */ 2229 fcport->vha = vha; 2230 fcport->vp_idx = vha->vp_idx; 2231 fcport->port_type = FCT_UNKNOWN; 2232 fcport->loop_id = FC_NO_LOOP_ID; 2233 atomic_set(&fcport->state, FCS_UNCONFIGURED); 2234 fcport->supported_classes = FC_COS_UNSPECIFIED; 2235 2236 return fcport; 2237 } 2238 2239 /* 2240 * qla2x00_configure_loop 2241 * Updates Fibre Channel Device Database with what is actually on loop. 2242 * 2243 * Input: 2244 * ha = adapter block pointer. 2245 * 2246 * Returns: 2247 * 0 = success. 2248 * 1 = error. 2249 * 2 = database was full and device was not configured. 2250 */ 2251 static int 2252 qla2x00_configure_loop(scsi_qla_host_t *vha) 2253 { 2254 int rval; 2255 unsigned long flags, save_flags; 2256 struct qla_hw_data *ha = vha->hw; 2257 rval = QLA_SUCCESS; 2258 2259 /* Get Initiator ID */ 2260 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { 2261 rval = qla2x00_configure_hba(vha); 2262 if (rval != QLA_SUCCESS) { 2263 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n", 2264 vha->host_no)); 2265 return (rval); 2266 } 2267 } 2268 2269 save_flags = flags = vha->dpc_flags; 2270 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n", 2271 vha->host_no, flags)); 2272 2273 /* 2274 * If we have both an RSCN and PORT UPDATE pending then handle them 2275 * both at the same time. 2276 */ 2277 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2278 clear_bit(RSCN_UPDATE, &vha->dpc_flags); 2279 2280 qla2x00_get_data_rate(vha); 2281 2282 /* Determine what we need to do */ 2283 if (ha->current_topology == ISP_CFG_FL && 2284 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2285 2286 vha->flags.rscn_queue_overflow = 1; 2287 set_bit(RSCN_UPDATE, &flags); 2288 2289 } else if (ha->current_topology == ISP_CFG_F && 2290 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2291 2292 vha->flags.rscn_queue_overflow = 1; 2293 set_bit(RSCN_UPDATE, &flags); 2294 clear_bit(LOCAL_LOOP_UPDATE, &flags); 2295 2296 } else if (ha->current_topology == ISP_CFG_N) { 2297 clear_bit(RSCN_UPDATE, &flags); 2298 2299 } else if (!vha->flags.online || 2300 (test_bit(ABORT_ISP_ACTIVE, &flags))) { 2301 2302 vha->flags.rscn_queue_overflow = 1; 2303 set_bit(RSCN_UPDATE, &flags); 2304 set_bit(LOCAL_LOOP_UPDATE, &flags); 2305 } 2306 2307 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 2308 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2309 rval = QLA_FUNCTION_FAILED; 2310 else 2311 rval = qla2x00_configure_local_loop(vha); 2312 } 2313 2314 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 2315 if (LOOP_TRANSITION(vha)) 2316 rval = QLA_FUNCTION_FAILED; 2317 else 2318 rval = qla2x00_configure_fabric(vha); 2319 } 2320 2321 if (rval == QLA_SUCCESS) { 2322 if (atomic_read(&vha->loop_down_timer) || 2323 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 2324 rval = QLA_FUNCTION_FAILED; 2325 } else { 2326 atomic_set(&vha->loop_state, LOOP_READY); 2327 2328 DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no)); 2329 } 2330 } 2331 2332 if (rval) { 2333 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n", 2334 __func__, vha->host_no)); 2335 } else { 2336 DEBUG3(printk("%s: exiting normally\n", __func__)); 2337 } 2338 2339 /* Restore state if a resync event occurred during processing */ 2340 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 2341 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2342 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2343 if (test_bit(RSCN_UPDATE, &save_flags)) { 2344 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2345 vha->flags.rscn_queue_overflow = 1; 2346 } 2347 } 2348 2349 return (rval); 2350 } 2351 2352 2353 2354 /* 2355 * qla2x00_configure_local_loop 2356 * Updates Fibre Channel Device Database with local loop devices. 2357 * 2358 * Input: 2359 * ha = adapter block pointer. 2360 * 2361 * Returns: 2362 * 0 = success. 2363 */ 2364 static int 2365 qla2x00_configure_local_loop(scsi_qla_host_t *vha) 2366 { 2367 int rval, rval2; 2368 int found_devs; 2369 int found; 2370 fc_port_t *fcport, *new_fcport; 2371 2372 uint16_t index; 2373 uint16_t entries; 2374 char *id_iter; 2375 uint16_t loop_id; 2376 uint8_t domain, area, al_pa; 2377 struct qla_hw_data *ha = vha->hw; 2378 2379 found_devs = 0; 2380 new_fcport = NULL; 2381 entries = MAX_FIBRE_DEVICES; 2382 2383 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no)); 2384 DEBUG3(qla2x00_get_fcal_position_map(vha, NULL)); 2385 2386 /* Get list of logged in devices. */ 2387 memset(ha->gid_list, 0, GID_LIST_SIZE); 2388 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, 2389 &entries); 2390 if (rval != QLA_SUCCESS) 2391 goto cleanup_allocation; 2392 2393 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", 2394 vha->host_no, entries)); 2395 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list, 2396 entries * sizeof(struct gid_list_info))); 2397 2398 /* Allocate temporary fcport for any new fcports discovered. */ 2399 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2400 if (new_fcport == NULL) { 2401 rval = QLA_MEMORY_ALLOC_FAILED; 2402 goto cleanup_allocation; 2403 } 2404 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 2405 2406 /* 2407 * Mark local devices that were present with FCF_DEVICE_LOST for now. 2408 */ 2409 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2410 if (atomic_read(&fcport->state) == FCS_ONLINE && 2411 fcport->port_type != FCT_BROADCAST && 2412 (fcport->flags & FCF_FABRIC_DEVICE) == 0) { 2413 2414 DEBUG(printk("scsi(%ld): Marking port lost, " 2415 "loop_id=0x%04x\n", 2416 vha->host_no, fcport->loop_id)); 2417 2418 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2419 } 2420 } 2421 2422 /* Add devices to port list. */ 2423 id_iter = (char *)ha->gid_list; 2424 for (index = 0; index < entries; index++) { 2425 domain = ((struct gid_list_info *)id_iter)->domain; 2426 area = ((struct gid_list_info *)id_iter)->area; 2427 al_pa = ((struct gid_list_info *)id_iter)->al_pa; 2428 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 2429 loop_id = (uint16_t) 2430 ((struct gid_list_info *)id_iter)->loop_id_2100; 2431 else 2432 loop_id = le16_to_cpu( 2433 ((struct gid_list_info *)id_iter)->loop_id); 2434 id_iter += ha->gid_list_info_size; 2435 2436 /* Bypass reserved domain fields. */ 2437 if ((domain & 0xf0) == 0xf0) 2438 continue; 2439 2440 /* Bypass if not same domain and area of adapter. */ 2441 if (area && domain && 2442 (area != vha->d_id.b.area || domain != vha->d_id.b.domain)) 2443 continue; 2444 2445 /* Bypass invalid local loop ID. */ 2446 if (loop_id > LAST_LOCAL_LOOP_ID) 2447 continue; 2448 2449 /* Fill in member data. */ 2450 new_fcport->d_id.b.domain = domain; 2451 new_fcport->d_id.b.area = area; 2452 new_fcport->d_id.b.al_pa = al_pa; 2453 new_fcport->loop_id = loop_id; 2454 new_fcport->vp_idx = vha->vp_idx; 2455 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 2456 if (rval2 != QLA_SUCCESS) { 2457 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport " 2458 "information -- get_port_database=%x, " 2459 "loop_id=0x%04x\n", 2460 vha->host_no, rval2, new_fcport->loop_id)); 2461 DEBUG2(printk("scsi(%ld): Scheduling resync...\n", 2462 vha->host_no)); 2463 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2464 continue; 2465 } 2466 2467 /* Check for matching device in port list. */ 2468 found = 0; 2469 fcport = NULL; 2470 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2471 if (memcmp(new_fcport->port_name, fcport->port_name, 2472 WWN_SIZE)) 2473 continue; 2474 2475 fcport->flags &= ~FCF_FABRIC_DEVICE; 2476 fcport->loop_id = new_fcport->loop_id; 2477 fcport->port_type = new_fcport->port_type; 2478 fcport->d_id.b24 = new_fcport->d_id.b24; 2479 memcpy(fcport->node_name, new_fcport->node_name, 2480 WWN_SIZE); 2481 2482 found++; 2483 break; 2484 } 2485 2486 if (!found) { 2487 /* New device, add to fcports list. */ 2488 if (vha->vp_idx) { 2489 new_fcport->vha = vha; 2490 new_fcport->vp_idx = vha->vp_idx; 2491 } 2492 list_add_tail(&new_fcport->list, &vha->vp_fcports); 2493 2494 /* Allocate a new replacement fcport. */ 2495 fcport = new_fcport; 2496 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2497 if (new_fcport == NULL) { 2498 rval = QLA_MEMORY_ALLOC_FAILED; 2499 goto cleanup_allocation; 2500 } 2501 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 2502 } 2503 2504 /* Base iIDMA settings on HBA port speed. */ 2505 fcport->fp_speed = ha->link_data_rate; 2506 2507 qla2x00_update_fcport(vha, fcport); 2508 2509 found_devs++; 2510 } 2511 2512 cleanup_allocation: 2513 kfree(new_fcport); 2514 2515 if (rval != QLA_SUCCESS) { 2516 DEBUG2(printk("scsi(%ld): Configure local loop error exit: " 2517 "rval=%x\n", vha->host_no, rval)); 2518 } 2519 2520 return (rval); 2521 } 2522 2523 static void 2524 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2525 { 2526 #define LS_UNKNOWN 2 2527 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; 2528 char *link_speed; 2529 int rval; 2530 uint16_t mb[4]; 2531 struct qla_hw_data *ha = vha->hw; 2532 2533 if (!IS_IIDMA_CAPABLE(ha)) 2534 return; 2535 2536 if (fcport->fp_speed == PORT_SPEED_UNKNOWN || 2537 fcport->fp_speed > ha->link_data_rate) 2538 return; 2539 2540 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, 2541 mb); 2542 if (rval != QLA_SUCCESS) { 2543 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA " 2544 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n", 2545 vha->host_no, fcport->port_name[0], fcport->port_name[1], 2546 fcport->port_name[2], fcport->port_name[3], 2547 fcport->port_name[4], fcport->port_name[5], 2548 fcport->port_name[6], fcport->port_name[7], rval, 2549 fcport->fp_speed, mb[0], mb[1])); 2550 } else { 2551 link_speed = link_speeds[LS_UNKNOWN]; 2552 if (fcport->fp_speed < 5) 2553 link_speed = link_speeds[fcport->fp_speed]; 2554 else if (fcport->fp_speed == 0x13) 2555 link_speed = link_speeds[5]; 2556 DEBUG2(qla_printk(KERN_INFO, ha, 2557 "iIDMA adjusted to %s GB/s on " 2558 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", 2559 link_speed, fcport->port_name[0], 2560 fcport->port_name[1], fcport->port_name[2], 2561 fcport->port_name[3], fcport->port_name[4], 2562 fcport->port_name[5], fcport->port_name[6], 2563 fcport->port_name[7])); 2564 } 2565 } 2566 2567 static void 2568 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) 2569 { 2570 struct fc_rport_identifiers rport_ids; 2571 struct fc_rport *rport; 2572 struct qla_hw_data *ha = vha->hw; 2573 2574 qla2x00_rport_del(fcport); 2575 2576 rport_ids.node_name = wwn_to_u64(fcport->node_name); 2577 rport_ids.port_name = wwn_to_u64(fcport->port_name); 2578 rport_ids.port_id = fcport->d_id.b.domain << 16 | 2579 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 2580 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2581 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); 2582 if (!rport) { 2583 qla_printk(KERN_WARNING, ha, 2584 "Unable to allocate fc remote port!\n"); 2585 return; 2586 } 2587 spin_lock_irq(fcport->vha->host->host_lock); 2588 *((fc_port_t **)rport->dd_data) = fcport; 2589 spin_unlock_irq(fcport->vha->host->host_lock); 2590 2591 rport->supported_classes = fcport->supported_classes; 2592 2593 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2594 if (fcport->port_type == FCT_INITIATOR) 2595 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 2596 if (fcport->port_type == FCT_TARGET) 2597 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; 2598 fc_remote_port_rolechg(rport, rport_ids.roles); 2599 } 2600 2601 /* 2602 * qla2x00_update_fcport 2603 * Updates device on list. 2604 * 2605 * Input: 2606 * ha = adapter block pointer. 2607 * fcport = port structure pointer. 2608 * 2609 * Return: 2610 * 0 - Success 2611 * BIT_0 - error 2612 * 2613 * Context: 2614 * Kernel context. 2615 */ 2616 void 2617 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2618 { 2619 struct qla_hw_data *ha = vha->hw; 2620 2621 fcport->vha = vha; 2622 fcport->login_retry = 0; 2623 fcport->port_login_retry_count = ha->port_down_retry_count * 2624 PORT_RETRY_TIME; 2625 atomic_set(&fcport->port_down_timer, ha->port_down_retry_count * 2626 PORT_RETRY_TIME); 2627 fcport->flags &= ~FCF_LOGIN_NEEDED; 2628 2629 qla2x00_iidma_fcport(vha, fcport); 2630 2631 atomic_set(&fcport->state, FCS_ONLINE); 2632 2633 qla2x00_reg_remote_port(vha, fcport); 2634 } 2635 2636 /* 2637 * qla2x00_configure_fabric 2638 * Setup SNS devices with loop ID's. 2639 * 2640 * Input: 2641 * ha = adapter block pointer. 2642 * 2643 * Returns: 2644 * 0 = success. 2645 * BIT_0 = error 2646 */ 2647 static int 2648 qla2x00_configure_fabric(scsi_qla_host_t *vha) 2649 { 2650 int rval, rval2; 2651 fc_port_t *fcport, *fcptemp; 2652 uint16_t next_loopid; 2653 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2654 uint16_t loop_id; 2655 LIST_HEAD(new_fcports); 2656 struct qla_hw_data *ha = vha->hw; 2657 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2658 2659 /* If FL port exists, then SNS is present */ 2660 if (IS_FWI2_CAPABLE(ha)) 2661 loop_id = NPH_F_PORT; 2662 else 2663 loop_id = SNS_FL_PORT; 2664 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); 2665 if (rval != QLA_SUCCESS) { 2666 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL " 2667 "Port\n", vha->host_no)); 2668 2669 vha->device_flags &= ~SWITCH_FOUND; 2670 return (QLA_SUCCESS); 2671 } 2672 vha->device_flags |= SWITCH_FOUND; 2673 2674 /* Mark devices that need re-synchronization. */ 2675 rval2 = qla2x00_device_resync(vha); 2676 if (rval2 == QLA_RSCNS_HANDLED) { 2677 /* No point doing the scan, just continue. */ 2678 return (QLA_SUCCESS); 2679 } 2680 do { 2681 /* FDMI support. */ 2682 if (ql2xfdmienable && 2683 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) 2684 qla2x00_fdmi_register(vha); 2685 2686 /* Ensure we are logged into the SNS. */ 2687 if (IS_FWI2_CAPABLE(ha)) 2688 loop_id = NPH_SNS; 2689 else 2690 loop_id = SIMPLE_NAME_SERVER; 2691 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 2692 0xfc, mb, BIT_1 | BIT_0); 2693 if (mb[0] != MBS_COMMAND_COMPLETE) { 2694 DEBUG2(qla_printk(KERN_INFO, ha, 2695 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 2696 "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id, 2697 mb[0], mb[1], mb[2], mb[6], mb[7])); 2698 return (QLA_SUCCESS); 2699 } 2700 2701 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { 2702 if (qla2x00_rft_id(vha)) { 2703 /* EMPTY */ 2704 DEBUG2(printk("scsi(%ld): Register FC-4 " 2705 "TYPE failed.\n", vha->host_no)); 2706 } 2707 if (qla2x00_rff_id(vha)) { 2708 /* EMPTY */ 2709 DEBUG2(printk("scsi(%ld): Register FC-4 " 2710 "Features failed.\n", vha->host_no)); 2711 } 2712 if (qla2x00_rnn_id(vha)) { 2713 /* EMPTY */ 2714 DEBUG2(printk("scsi(%ld): Register Node Name " 2715 "failed.\n", vha->host_no)); 2716 } else if (qla2x00_rsnn_nn(vha)) { 2717 /* EMPTY */ 2718 DEBUG2(printk("scsi(%ld): Register Symbolic " 2719 "Node Name failed.\n", vha->host_no)); 2720 } 2721 } 2722 2723 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); 2724 if (rval != QLA_SUCCESS) 2725 break; 2726 2727 /* 2728 * Logout all previous fabric devices marked lost, except 2729 * FCP2 devices. 2730 */ 2731 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2732 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2733 break; 2734 2735 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 2736 continue; 2737 2738 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 2739 qla2x00_mark_device_lost(vha, fcport, 2740 ql2xplogiabsentdevice, 0); 2741 if (fcport->loop_id != FC_NO_LOOP_ID && 2742 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 2743 fcport->port_type != FCT_INITIATOR && 2744 fcport->port_type != FCT_BROADCAST) { 2745 ha->isp_ops->fabric_logout(vha, 2746 fcport->loop_id, 2747 fcport->d_id.b.domain, 2748 fcport->d_id.b.area, 2749 fcport->d_id.b.al_pa); 2750 fcport->loop_id = FC_NO_LOOP_ID; 2751 } 2752 } 2753 } 2754 2755 /* Starting free loop ID. */ 2756 next_loopid = ha->min_external_loopid; 2757 2758 /* 2759 * Scan through our port list and login entries that need to be 2760 * logged in. 2761 */ 2762 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2763 if (atomic_read(&vha->loop_down_timer) || 2764 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2765 break; 2766 2767 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 2768 (fcport->flags & FCF_LOGIN_NEEDED) == 0) 2769 continue; 2770 2771 if (fcport->loop_id == FC_NO_LOOP_ID) { 2772 fcport->loop_id = next_loopid; 2773 rval = qla2x00_find_new_loop_id( 2774 base_vha, fcport); 2775 if (rval != QLA_SUCCESS) { 2776 /* Ran out of IDs to use */ 2777 break; 2778 } 2779 } 2780 /* Login and update database */ 2781 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 2782 } 2783 2784 /* Exit if out of loop IDs. */ 2785 if (rval != QLA_SUCCESS) { 2786 break; 2787 } 2788 2789 /* 2790 * Login and add the new devices to our port list. 2791 */ 2792 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { 2793 if (atomic_read(&vha->loop_down_timer) || 2794 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2795 break; 2796 2797 /* Find a new loop ID to use. */ 2798 fcport->loop_id = next_loopid; 2799 rval = qla2x00_find_new_loop_id(base_vha, fcport); 2800 if (rval != QLA_SUCCESS) { 2801 /* Ran out of IDs to use */ 2802 break; 2803 } 2804 2805 /* Login and update database */ 2806 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 2807 2808 if (vha->vp_idx) { 2809 fcport->vha = vha; 2810 fcport->vp_idx = vha->vp_idx; 2811 } 2812 list_move_tail(&fcport->list, &vha->vp_fcports); 2813 } 2814 } while (0); 2815 2816 /* Free all new device structures not processed. */ 2817 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { 2818 list_del(&fcport->list); 2819 kfree(fcport); 2820 } 2821 2822 if (rval) { 2823 DEBUG2(printk("scsi(%ld): Configure fabric error exit: " 2824 "rval=%d\n", vha->host_no, rval)); 2825 } 2826 2827 return (rval); 2828 } 2829 2830 2831 /* 2832 * qla2x00_find_all_fabric_devs 2833 * 2834 * Input: 2835 * ha = adapter block pointer. 2836 * dev = database device entry pointer. 2837 * 2838 * Returns: 2839 * 0 = success. 2840 * 2841 * Context: 2842 * Kernel context. 2843 */ 2844 static int 2845 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, 2846 struct list_head *new_fcports) 2847 { 2848 int rval; 2849 uint16_t loop_id; 2850 fc_port_t *fcport, *new_fcport, *fcptemp; 2851 int found; 2852 2853 sw_info_t *swl; 2854 int swl_idx; 2855 int first_dev, last_dev; 2856 port_id_t wrap, nxt_d_id; 2857 struct qla_hw_data *ha = vha->hw; 2858 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); 2859 struct scsi_qla_host *tvp; 2860 2861 rval = QLA_SUCCESS; 2862 2863 /* Try GID_PT to get device list, else GAN. */ 2864 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL); 2865 if (!swl) { 2866 /*EMPTY*/ 2867 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " 2868 "on GA_NXT\n", vha->host_no)); 2869 } else { 2870 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { 2871 kfree(swl); 2872 swl = NULL; 2873 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { 2874 kfree(swl); 2875 swl = NULL; 2876 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { 2877 kfree(swl); 2878 swl = NULL; 2879 } else if (ql2xiidmaenable && 2880 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) { 2881 qla2x00_gpsc(vha, swl); 2882 } 2883 } 2884 swl_idx = 0; 2885 2886 /* Allocate temporary fcport for any new fcports discovered. */ 2887 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2888 if (new_fcport == NULL) { 2889 kfree(swl); 2890 return (QLA_MEMORY_ALLOC_FAILED); 2891 } 2892 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 2893 /* Set start port ID scan at adapter ID. */ 2894 first_dev = 1; 2895 last_dev = 0; 2896 2897 /* Starting free loop ID. */ 2898 loop_id = ha->min_external_loopid; 2899 for (; loop_id <= ha->max_loop_id; loop_id++) { 2900 if (qla2x00_is_reserved_id(vha, loop_id)) 2901 continue; 2902 2903 if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha)) 2904 break; 2905 2906 if (swl != NULL) { 2907 if (last_dev) { 2908 wrap.b24 = new_fcport->d_id.b24; 2909 } else { 2910 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24; 2911 memcpy(new_fcport->node_name, 2912 swl[swl_idx].node_name, WWN_SIZE); 2913 memcpy(new_fcport->port_name, 2914 swl[swl_idx].port_name, WWN_SIZE); 2915 memcpy(new_fcport->fabric_port_name, 2916 swl[swl_idx].fabric_port_name, WWN_SIZE); 2917 new_fcport->fp_speed = swl[swl_idx].fp_speed; 2918 2919 if (swl[swl_idx].d_id.b.rsvd_1 != 0) { 2920 last_dev = 1; 2921 } 2922 swl_idx++; 2923 } 2924 } else { 2925 /* Send GA_NXT to the switch */ 2926 rval = qla2x00_ga_nxt(vha, new_fcport); 2927 if (rval != QLA_SUCCESS) { 2928 qla_printk(KERN_WARNING, ha, 2929 "SNS scan failed -- assuming zero-entry " 2930 "result...\n"); 2931 list_for_each_entry_safe(fcport, fcptemp, 2932 new_fcports, list) { 2933 list_del(&fcport->list); 2934 kfree(fcport); 2935 } 2936 rval = QLA_SUCCESS; 2937 break; 2938 } 2939 } 2940 2941 /* If wrap on switch device list, exit. */ 2942 if (first_dev) { 2943 wrap.b24 = new_fcport->d_id.b24; 2944 first_dev = 0; 2945 } else if (new_fcport->d_id.b24 == wrap.b24) { 2946 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n", 2947 vha->host_no, new_fcport->d_id.b.domain, 2948 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa)); 2949 break; 2950 } 2951 2952 /* Bypass if same physical adapter. */ 2953 if (new_fcport->d_id.b24 == base_vha->d_id.b24) 2954 continue; 2955 2956 /* Bypass virtual ports of the same host. */ 2957 found = 0; 2958 if (ha->num_vhosts) { 2959 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 2960 if (new_fcport->d_id.b24 == vp->d_id.b24) { 2961 found = 1; 2962 break; 2963 } 2964 } 2965 if (found) 2966 continue; 2967 } 2968 2969 /* Bypass if same domain and area of adapter. */ 2970 if (((new_fcport->d_id.b24 & 0xffff00) == 2971 (vha->d_id.b24 & 0xffff00)) && ha->current_topology == 2972 ISP_CFG_FL) 2973 continue; 2974 2975 /* Bypass reserved domain fields. */ 2976 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) 2977 continue; 2978 2979 /* Locate matching device in database. */ 2980 found = 0; 2981 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2982 if (memcmp(new_fcport->port_name, fcport->port_name, 2983 WWN_SIZE)) 2984 continue; 2985 2986 found++; 2987 2988 /* Update port state. */ 2989 memcpy(fcport->fabric_port_name, 2990 new_fcport->fabric_port_name, WWN_SIZE); 2991 fcport->fp_speed = new_fcport->fp_speed; 2992 2993 /* 2994 * If address the same and state FCS_ONLINE, nothing 2995 * changed. 2996 */ 2997 if (fcport->d_id.b24 == new_fcport->d_id.b24 && 2998 atomic_read(&fcport->state) == FCS_ONLINE) { 2999 break; 3000 } 3001 3002 /* 3003 * If device was not a fabric device before. 3004 */ 3005 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 3006 fcport->d_id.b24 = new_fcport->d_id.b24; 3007 fcport->loop_id = FC_NO_LOOP_ID; 3008 fcport->flags |= (FCF_FABRIC_DEVICE | 3009 FCF_LOGIN_NEEDED); 3010 break; 3011 } 3012 3013 /* 3014 * Port ID changed or device was marked to be updated; 3015 * Log it out if still logged in and mark it for 3016 * relogin later. 3017 */ 3018 fcport->d_id.b24 = new_fcport->d_id.b24; 3019 fcport->flags |= FCF_LOGIN_NEEDED; 3020 if (fcport->loop_id != FC_NO_LOOP_ID && 3021 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 3022 fcport->port_type != FCT_INITIATOR && 3023 fcport->port_type != FCT_BROADCAST) { 3024 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3025 fcport->d_id.b.domain, fcport->d_id.b.area, 3026 fcport->d_id.b.al_pa); 3027 fcport->loop_id = FC_NO_LOOP_ID; 3028 } 3029 3030 break; 3031 } 3032 3033 if (found) 3034 continue; 3035 /* If device was not in our fcports list, then add it. */ 3036 list_add_tail(&new_fcport->list, new_fcports); 3037 3038 /* Allocate a new replacement fcport. */ 3039 nxt_d_id.b24 = new_fcport->d_id.b24; 3040 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 3041 if (new_fcport == NULL) { 3042 kfree(swl); 3043 return (QLA_MEMORY_ALLOC_FAILED); 3044 } 3045 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 3046 new_fcport->d_id.b24 = nxt_d_id.b24; 3047 } 3048 3049 kfree(swl); 3050 kfree(new_fcport); 3051 3052 return (rval); 3053 } 3054 3055 /* 3056 * qla2x00_find_new_loop_id 3057 * Scan through our port list and find a new usable loop ID. 3058 * 3059 * Input: 3060 * ha: adapter state pointer. 3061 * dev: port structure pointer. 3062 * 3063 * Returns: 3064 * qla2x00 local function return status code. 3065 * 3066 * Context: 3067 * Kernel context. 3068 */ 3069 static int 3070 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) 3071 { 3072 int rval; 3073 int found; 3074 fc_port_t *fcport; 3075 uint16_t first_loop_id; 3076 struct qla_hw_data *ha = vha->hw; 3077 struct scsi_qla_host *vp; 3078 struct scsi_qla_host *tvp; 3079 3080 rval = QLA_SUCCESS; 3081 3082 /* Save starting loop ID. */ 3083 first_loop_id = dev->loop_id; 3084 3085 for (;;) { 3086 /* Skip loop ID if already used by adapter. */ 3087 if (dev->loop_id == vha->loop_id) 3088 dev->loop_id++; 3089 3090 /* Skip reserved loop IDs. */ 3091 while (qla2x00_is_reserved_id(vha, dev->loop_id)) 3092 dev->loop_id++; 3093 3094 /* Reset loop ID if passed the end. */ 3095 if (dev->loop_id > ha->max_loop_id) { 3096 /* first loop ID. */ 3097 dev->loop_id = ha->min_external_loopid; 3098 } 3099 3100 /* Check for loop ID being already in use. */ 3101 found = 0; 3102 fcport = NULL; 3103 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 3104 list_for_each_entry(fcport, &vp->vp_fcports, list) { 3105 if (fcport->loop_id == dev->loop_id && 3106 fcport != dev) { 3107 /* ID possibly in use */ 3108 found++; 3109 break; 3110 } 3111 } 3112 if (found) 3113 break; 3114 } 3115 3116 /* If not in use then it is free to use. */ 3117 if (!found) { 3118 break; 3119 } 3120 3121 /* ID in use. Try next value. */ 3122 dev->loop_id++; 3123 3124 /* If wrap around. No free ID to use. */ 3125 if (dev->loop_id == first_loop_id) { 3126 dev->loop_id = FC_NO_LOOP_ID; 3127 rval = QLA_FUNCTION_FAILED; 3128 break; 3129 } 3130 } 3131 3132 return (rval); 3133 } 3134 3135 /* 3136 * qla2x00_device_resync 3137 * Marks devices in the database that needs resynchronization. 3138 * 3139 * Input: 3140 * ha = adapter block pointer. 3141 * 3142 * Context: 3143 * Kernel context. 3144 */ 3145 static int 3146 qla2x00_device_resync(scsi_qla_host_t *vha) 3147 { 3148 int rval; 3149 uint32_t mask; 3150 fc_port_t *fcport; 3151 uint32_t rscn_entry; 3152 uint8_t rscn_out_iter; 3153 uint8_t format; 3154 port_id_t d_id; 3155 3156 rval = QLA_RSCNS_HANDLED; 3157 3158 while (vha->rscn_out_ptr != vha->rscn_in_ptr || 3159 vha->flags.rscn_queue_overflow) { 3160 3161 rscn_entry = vha->rscn_queue[vha->rscn_out_ptr]; 3162 format = MSB(MSW(rscn_entry)); 3163 d_id.b.domain = LSB(MSW(rscn_entry)); 3164 d_id.b.area = MSB(LSW(rscn_entry)); 3165 d_id.b.al_pa = LSB(LSW(rscn_entry)); 3166 3167 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = " 3168 "[%02x/%02x%02x%02x].\n", 3169 vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain, 3170 d_id.b.area, d_id.b.al_pa)); 3171 3172 vha->rscn_out_ptr++; 3173 if (vha->rscn_out_ptr == MAX_RSCN_COUNT) 3174 vha->rscn_out_ptr = 0; 3175 3176 /* Skip duplicate entries. */ 3177 for (rscn_out_iter = vha->rscn_out_ptr; 3178 !vha->flags.rscn_queue_overflow && 3179 rscn_out_iter != vha->rscn_in_ptr; 3180 rscn_out_iter = (rscn_out_iter == 3181 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) { 3182 3183 if (rscn_entry != vha->rscn_queue[rscn_out_iter]) 3184 break; 3185 3186 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue " 3187 "entry found at [%d].\n", vha->host_no, 3188 rscn_out_iter)); 3189 3190 vha->rscn_out_ptr = rscn_out_iter; 3191 } 3192 3193 /* Queue overflow, set switch default case. */ 3194 if (vha->flags.rscn_queue_overflow) { 3195 DEBUG(printk("scsi(%ld): device_resync: rscn " 3196 "overflow.\n", vha->host_no)); 3197 3198 format = 3; 3199 vha->flags.rscn_queue_overflow = 0; 3200 } 3201 3202 switch (format) { 3203 case 0: 3204 mask = 0xffffff; 3205 break; 3206 case 1: 3207 mask = 0xffff00; 3208 break; 3209 case 2: 3210 mask = 0xff0000; 3211 break; 3212 default: 3213 mask = 0x0; 3214 d_id.b24 = 0; 3215 vha->rscn_out_ptr = vha->rscn_in_ptr; 3216 break; 3217 } 3218 3219 rval = QLA_SUCCESS; 3220 3221 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3222 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 3223 (fcport->d_id.b24 & mask) != d_id.b24 || 3224 fcport->port_type == FCT_BROADCAST) 3225 continue; 3226 3227 if (atomic_read(&fcport->state) == FCS_ONLINE) { 3228 if (format != 3 || 3229 fcport->port_type != FCT_INITIATOR) { 3230 qla2x00_mark_device_lost(vha, fcport, 3231 0, 0); 3232 } 3233 } 3234 } 3235 } 3236 return (rval); 3237 } 3238 3239 /* 3240 * qla2x00_fabric_dev_login 3241 * Login fabric target device and update FC port database. 3242 * 3243 * Input: 3244 * ha: adapter state pointer. 3245 * fcport: port structure list pointer. 3246 * next_loopid: contains value of a new loop ID that can be used 3247 * by the next login attempt. 3248 * 3249 * Returns: 3250 * qla2x00 local function return status code. 3251 * 3252 * Context: 3253 * Kernel context. 3254 */ 3255 static int 3256 qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport, 3257 uint16_t *next_loopid) 3258 { 3259 int rval; 3260 int retry; 3261 uint8_t opts; 3262 struct qla_hw_data *ha = vha->hw; 3263 3264 rval = QLA_SUCCESS; 3265 retry = 0; 3266 3267 if (IS_ALOGIO_CAPABLE(ha)) { 3268 rval = qla2x00_post_async_login_work(vha, fcport, NULL); 3269 if (!rval) 3270 return rval; 3271 } 3272 3273 rval = qla2x00_fabric_login(vha, fcport, next_loopid); 3274 if (rval == QLA_SUCCESS) { 3275 /* Send an ADISC to FCP2 devices.*/ 3276 opts = 0; 3277 if (fcport->flags & FCF_FCP2_DEVICE) 3278 opts |= BIT_1; 3279 rval = qla2x00_get_port_database(vha, fcport, opts); 3280 if (rval != QLA_SUCCESS) { 3281 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3282 fcport->d_id.b.domain, fcport->d_id.b.area, 3283 fcport->d_id.b.al_pa); 3284 qla2x00_mark_device_lost(vha, fcport, 1, 0); 3285 } else { 3286 qla2x00_update_fcport(vha, fcport); 3287 } 3288 } 3289 3290 return (rval); 3291 } 3292 3293 /* 3294 * qla2x00_fabric_login 3295 * Issue fabric login command. 3296 * 3297 * Input: 3298 * ha = adapter block pointer. 3299 * device = pointer to FC device type structure. 3300 * 3301 * Returns: 3302 * 0 - Login successfully 3303 * 1 - Login failed 3304 * 2 - Initiator device 3305 * 3 - Fatal error 3306 */ 3307 int 3308 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, 3309 uint16_t *next_loopid) 3310 { 3311 int rval; 3312 int retry; 3313 uint16_t tmp_loopid; 3314 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3315 struct qla_hw_data *ha = vha->hw; 3316 3317 retry = 0; 3318 tmp_loopid = 0; 3319 3320 for (;;) { 3321 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x " 3322 "for port %02x%02x%02x.\n", 3323 vha->host_no, fcport->loop_id, fcport->d_id.b.domain, 3324 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3325 3326 /* Login fcport on switch. */ 3327 ha->isp_ops->fabric_login(vha, fcport->loop_id, 3328 fcport->d_id.b.domain, fcport->d_id.b.area, 3329 fcport->d_id.b.al_pa, mb, BIT_0); 3330 if (mb[0] == MBS_PORT_ID_USED) { 3331 /* 3332 * Device has another loop ID. The firmware team 3333 * recommends the driver perform an implicit login with 3334 * the specified ID again. The ID we just used is save 3335 * here so we return with an ID that can be tried by 3336 * the next login. 3337 */ 3338 retry++; 3339 tmp_loopid = fcport->loop_id; 3340 fcport->loop_id = mb[1]; 3341 3342 DEBUG(printk("Fabric Login: port in use - next " 3343 "loop id=0x%04x, port Id=%02x%02x%02x.\n", 3344 fcport->loop_id, fcport->d_id.b.domain, 3345 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3346 3347 } else if (mb[0] == MBS_COMMAND_COMPLETE) { 3348 /* 3349 * Login succeeded. 3350 */ 3351 if (retry) { 3352 /* A retry occurred before. */ 3353 *next_loopid = tmp_loopid; 3354 } else { 3355 /* 3356 * No retry occurred before. Just increment the 3357 * ID value for next login. 3358 */ 3359 *next_loopid = (fcport->loop_id + 1); 3360 } 3361 3362 if (mb[1] & BIT_0) { 3363 fcport->port_type = FCT_INITIATOR; 3364 } else { 3365 fcport->port_type = FCT_TARGET; 3366 if (mb[1] & BIT_1) { 3367 fcport->flags |= FCF_FCP2_DEVICE; 3368 } 3369 } 3370 3371 if (mb[10] & BIT_0) 3372 fcport->supported_classes |= FC_COS_CLASS2; 3373 if (mb[10] & BIT_1) 3374 fcport->supported_classes |= FC_COS_CLASS3; 3375 3376 rval = QLA_SUCCESS; 3377 break; 3378 } else if (mb[0] == MBS_LOOP_ID_USED) { 3379 /* 3380 * Loop ID already used, try next loop ID. 3381 */ 3382 fcport->loop_id++; 3383 rval = qla2x00_find_new_loop_id(vha, fcport); 3384 if (rval != QLA_SUCCESS) { 3385 /* Ran out of loop IDs to use */ 3386 break; 3387 } 3388 } else if (mb[0] == MBS_COMMAND_ERROR) { 3389 /* 3390 * Firmware possibly timed out during login. If NO 3391 * retries are left to do then the device is declared 3392 * dead. 3393 */ 3394 *next_loopid = fcport->loop_id; 3395 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3396 fcport->d_id.b.domain, fcport->d_id.b.area, 3397 fcport->d_id.b.al_pa); 3398 qla2x00_mark_device_lost(vha, fcport, 1, 0); 3399 3400 rval = 1; 3401 break; 3402 } else { 3403 /* 3404 * unrecoverable / not handled error 3405 */ 3406 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x " 3407 "loop_id=%x jiffies=%lx.\n", 3408 __func__, vha->host_no, mb[0], 3409 fcport->d_id.b.domain, fcport->d_id.b.area, 3410 fcport->d_id.b.al_pa, fcport->loop_id, jiffies)); 3411 3412 *next_loopid = fcport->loop_id; 3413 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3414 fcport->d_id.b.domain, fcport->d_id.b.area, 3415 fcport->d_id.b.al_pa); 3416 fcport->loop_id = FC_NO_LOOP_ID; 3417 fcport->login_retry = 0; 3418 3419 rval = 3; 3420 break; 3421 } 3422 } 3423 3424 return (rval); 3425 } 3426 3427 /* 3428 * qla2x00_local_device_login 3429 * Issue local device login command. 3430 * 3431 * Input: 3432 * ha = adapter block pointer. 3433 * loop_id = loop id of device to login to. 3434 * 3435 * Returns (Where's the #define!!!!): 3436 * 0 - Login successfully 3437 * 1 - Login failed 3438 * 3 - Fatal error 3439 */ 3440 int 3441 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport) 3442 { 3443 int rval; 3444 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3445 3446 memset(mb, 0, sizeof(mb)); 3447 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0); 3448 if (rval == QLA_SUCCESS) { 3449 /* Interrogate mailbox registers for any errors */ 3450 if (mb[0] == MBS_COMMAND_ERROR) 3451 rval = 1; 3452 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR) 3453 /* device not in PCB table */ 3454 rval = 3; 3455 } 3456 3457 return (rval); 3458 } 3459 3460 /* 3461 * qla2x00_loop_resync 3462 * Resync with fibre channel devices. 3463 * 3464 * Input: 3465 * ha = adapter block pointer. 3466 * 3467 * Returns: 3468 * 0 = success 3469 */ 3470 int 3471 qla2x00_loop_resync(scsi_qla_host_t *vha) 3472 { 3473 int rval = QLA_SUCCESS; 3474 uint32_t wait_time; 3475 struct req_que *req; 3476 struct rsp_que *rsp; 3477 3478 if (vha->hw->flags.cpu_affinity_enabled) 3479 req = vha->hw->req_q_map[0]; 3480 else 3481 req = vha->req; 3482 rsp = req->rsp; 3483 3484 atomic_set(&vha->loop_state, LOOP_UPDATE); 3485 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3486 if (vha->flags.online) { 3487 if (!(rval = qla2x00_fw_ready(vha))) { 3488 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3489 wait_time = 256; 3490 do { 3491 atomic_set(&vha->loop_state, LOOP_UPDATE); 3492 3493 /* Issue a marker after FW becomes ready. */ 3494 qla2x00_marker(vha, req, rsp, 0, 0, 3495 MK_SYNC_ALL); 3496 vha->marker_needed = 0; 3497 3498 /* Remap devices on Loop. */ 3499 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3500 3501 qla2x00_configure_loop(vha); 3502 wait_time--; 3503 } while (!atomic_read(&vha->loop_down_timer) && 3504 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 3505 && wait_time && (test_bit(LOOP_RESYNC_NEEDED, 3506 &vha->dpc_flags))); 3507 } 3508 } 3509 3510 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 3511 return (QLA_FUNCTION_FAILED); 3512 3513 if (rval) 3514 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__)); 3515 3516 return (rval); 3517 } 3518 3519 void 3520 qla2x00_update_fcports(scsi_qla_host_t *base_vha) 3521 { 3522 fc_port_t *fcport; 3523 struct scsi_qla_host *tvp, *vha; 3524 3525 /* Go with deferred removal of rport references. */ 3526 list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) 3527 list_for_each_entry(fcport, &vha->vp_fcports, list) 3528 if (fcport && fcport->drport && 3529 atomic_read(&fcport->state) != FCS_UNCONFIGURED) 3530 qla2x00_rport_del(fcport); 3531 } 3532 3533 /* 3534 * qla2x00_abort_isp 3535 * Resets ISP and aborts all outstanding commands. 3536 * 3537 * Input: 3538 * ha = adapter block pointer. 3539 * 3540 * Returns: 3541 * 0 = success 3542 */ 3543 int 3544 qla2x00_abort_isp(scsi_qla_host_t *vha) 3545 { 3546 int rval; 3547 uint8_t status = 0; 3548 struct qla_hw_data *ha = vha->hw; 3549 struct scsi_qla_host *vp; 3550 struct scsi_qla_host *tvp; 3551 struct req_que *req = ha->req_q_map[0]; 3552 3553 if (vha->flags.online) { 3554 vha->flags.online = 0; 3555 ha->flags.chip_reset_done = 0; 3556 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3557 ha->qla_stats.total_isp_aborts++; 3558 3559 qla_printk(KERN_INFO, ha, 3560 "Performing ISP error recovery - ha= %p.\n", ha); 3561 ha->isp_ops->reset_chip(vha); 3562 3563 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 3564 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 3565 atomic_set(&vha->loop_state, LOOP_DOWN); 3566 qla2x00_mark_all_devices_lost(vha, 0); 3567 } else { 3568 if (!atomic_read(&vha->loop_down_timer)) 3569 atomic_set(&vha->loop_down_timer, 3570 LOOP_DOWN_TIME); 3571 } 3572 3573 /* Requeue all commands in outstanding command list. */ 3574 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 3575 3576 if (unlikely(pci_channel_offline(ha->pdev) && 3577 ha->flags.pci_channel_io_perm_failure)) { 3578 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3579 status = 0; 3580 return status; 3581 } 3582 3583 ha->isp_ops->get_flash_version(vha, req->ring); 3584 3585 ha->isp_ops->nvram_config(vha); 3586 3587 if (!qla2x00_restart_isp(vha)) { 3588 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 3589 3590 if (!atomic_read(&vha->loop_down_timer)) { 3591 /* 3592 * Issue marker command only when we are going 3593 * to start the I/O . 3594 */ 3595 vha->marker_needed = 1; 3596 } 3597 3598 vha->flags.online = 1; 3599 3600 ha->isp_ops->enable_intrs(ha); 3601 3602 ha->isp_abort_cnt = 0; 3603 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3604 3605 if (IS_QLA81XX(ha)) 3606 qla2x00_get_fw_version(vha, 3607 &ha->fw_major_version, 3608 &ha->fw_minor_version, 3609 &ha->fw_subminor_version, 3610 &ha->fw_attributes, &ha->fw_memory_size, 3611 ha->mpi_version, &ha->mpi_capabilities, 3612 ha->phy_version); 3613 3614 if (ha->fce) { 3615 ha->flags.fce_enabled = 1; 3616 memset(ha->fce, 0, 3617 fce_calc_size(ha->fce_bufs)); 3618 rval = qla2x00_enable_fce_trace(vha, 3619 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 3620 &ha->fce_bufs); 3621 if (rval) { 3622 qla_printk(KERN_WARNING, ha, 3623 "Unable to reinitialize FCE " 3624 "(%d).\n", rval); 3625 ha->flags.fce_enabled = 0; 3626 } 3627 } 3628 3629 if (ha->eft) { 3630 memset(ha->eft, 0, EFT_SIZE); 3631 rval = qla2x00_enable_eft_trace(vha, 3632 ha->eft_dma, EFT_NUM_BUFFERS); 3633 if (rval) { 3634 qla_printk(KERN_WARNING, ha, 3635 "Unable to reinitialize EFT " 3636 "(%d).\n", rval); 3637 } 3638 } 3639 } else { /* failed the ISP abort */ 3640 vha->flags.online = 1; 3641 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 3642 if (ha->isp_abort_cnt == 0) { 3643 qla_printk(KERN_WARNING, ha, 3644 "ISP error recovery failed - " 3645 "board disabled\n"); 3646 /* 3647 * The next call disables the board 3648 * completely. 3649 */ 3650 ha->isp_ops->reset_adapter(vha); 3651 vha->flags.online = 0; 3652 clear_bit(ISP_ABORT_RETRY, 3653 &vha->dpc_flags); 3654 status = 0; 3655 } else { /* schedule another ISP abort */ 3656 ha->isp_abort_cnt--; 3657 DEBUG(printk("qla%ld: ISP abort - " 3658 "retry remaining %d\n", 3659 vha->host_no, ha->isp_abort_cnt)); 3660 status = 1; 3661 } 3662 } else { 3663 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 3664 DEBUG(printk("qla2x00(%ld): ISP error recovery " 3665 "- retrying (%d) more times\n", 3666 vha->host_no, ha->isp_abort_cnt)); 3667 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3668 status = 1; 3669 } 3670 } 3671 3672 } 3673 3674 if (!status) { 3675 DEBUG(printk(KERN_INFO 3676 "qla2x00_abort_isp(%ld): succeeded.\n", 3677 vha->host_no)); 3678 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 3679 if (vp->vp_idx) 3680 qla2x00_vp_abort_isp(vp); 3681 } 3682 } else { 3683 qla_printk(KERN_INFO, ha, 3684 "qla2x00_abort_isp: **** FAILED ****\n"); 3685 } 3686 3687 return(status); 3688 } 3689 3690 /* 3691 * qla2x00_restart_isp 3692 * restarts the ISP after a reset 3693 * 3694 * Input: 3695 * ha = adapter block pointer. 3696 * 3697 * Returns: 3698 * 0 = success 3699 */ 3700 static int 3701 qla2x00_restart_isp(scsi_qla_host_t *vha) 3702 { 3703 int status = 0; 3704 uint32_t wait_time; 3705 struct qla_hw_data *ha = vha->hw; 3706 struct req_que *req = ha->req_q_map[0]; 3707 struct rsp_que *rsp = ha->rsp_q_map[0]; 3708 3709 /* If firmware needs to be loaded */ 3710 if (qla2x00_isp_firmware(vha)) { 3711 vha->flags.online = 0; 3712 status = ha->isp_ops->chip_diag(vha); 3713 if (!status) 3714 status = qla2x00_setup_chip(vha); 3715 } 3716 3717 if (!status && !(status = qla2x00_init_rings(vha))) { 3718 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 3719 ha->flags.chip_reset_done = 1; 3720 /* Initialize the queues in use */ 3721 qla25xx_init_queues(ha); 3722 3723 status = qla2x00_fw_ready(vha); 3724 if (!status) { 3725 DEBUG(printk("%s(): Start configure loop, " 3726 "status = %d\n", __func__, status)); 3727 3728 /* Issue a marker after FW becomes ready. */ 3729 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 3730 3731 vha->flags.online = 1; 3732 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3733 wait_time = 256; 3734 do { 3735 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3736 qla2x00_configure_loop(vha); 3737 wait_time--; 3738 } while (!atomic_read(&vha->loop_down_timer) && 3739 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 3740 && wait_time && (test_bit(LOOP_RESYNC_NEEDED, 3741 &vha->dpc_flags))); 3742 } 3743 3744 /* if no cable then assume it's good */ 3745 if ((vha->device_flags & DFLG_NO_CABLE)) 3746 status = 0; 3747 3748 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n", 3749 __func__, 3750 status)); 3751 } 3752 return (status); 3753 } 3754 3755 static int 3756 qla25xx_init_queues(struct qla_hw_data *ha) 3757 { 3758 struct rsp_que *rsp = NULL; 3759 struct req_que *req = NULL; 3760 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 3761 int ret = -1; 3762 int i; 3763 3764 for (i = 1; i < ha->max_rsp_queues; i++) { 3765 rsp = ha->rsp_q_map[i]; 3766 if (rsp) { 3767 rsp->options &= ~BIT_0; 3768 ret = qla25xx_init_rsp_que(base_vha, rsp); 3769 if (ret != QLA_SUCCESS) 3770 DEBUG2_17(printk(KERN_WARNING 3771 "%s Rsp que:%d init failed\n", __func__, 3772 rsp->id)); 3773 else 3774 DEBUG2_17(printk(KERN_INFO 3775 "%s Rsp que:%d inited\n", __func__, 3776 rsp->id)); 3777 } 3778 } 3779 for (i = 1; i < ha->max_req_queues; i++) { 3780 req = ha->req_q_map[i]; 3781 if (req) { 3782 /* Clear outstanding commands array. */ 3783 req->options &= ~BIT_0; 3784 ret = qla25xx_init_req_que(base_vha, req); 3785 if (ret != QLA_SUCCESS) 3786 DEBUG2_17(printk(KERN_WARNING 3787 "%s Req que:%d init failed\n", __func__, 3788 req->id)); 3789 else 3790 DEBUG2_17(printk(KERN_WARNING 3791 "%s Req que:%d inited\n", __func__, 3792 req->id)); 3793 } 3794 } 3795 return ret; 3796 } 3797 3798 /* 3799 * qla2x00_reset_adapter 3800 * Reset adapter. 3801 * 3802 * Input: 3803 * ha = adapter block pointer. 3804 */ 3805 void 3806 qla2x00_reset_adapter(scsi_qla_host_t *vha) 3807 { 3808 unsigned long flags = 0; 3809 struct qla_hw_data *ha = vha->hw; 3810 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3811 3812 vha->flags.online = 0; 3813 ha->isp_ops->disable_intrs(ha); 3814 3815 spin_lock_irqsave(&ha->hardware_lock, flags); 3816 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 3817 RD_REG_WORD(®->hccr); /* PCI Posting. */ 3818 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 3819 RD_REG_WORD(®->hccr); /* PCI Posting. */ 3820 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3821 } 3822 3823 void 3824 qla24xx_reset_adapter(scsi_qla_host_t *vha) 3825 { 3826 unsigned long flags = 0; 3827 struct qla_hw_data *ha = vha->hw; 3828 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3829 3830 vha->flags.online = 0; 3831 ha->isp_ops->disable_intrs(ha); 3832 3833 spin_lock_irqsave(&ha->hardware_lock, flags); 3834 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); 3835 RD_REG_DWORD(®->hccr); 3836 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); 3837 RD_REG_DWORD(®->hccr); 3838 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3839 3840 if (IS_NOPOLLING_TYPE(ha)) 3841 ha->isp_ops->enable_intrs(ha); 3842 } 3843 3844 /* On sparc systems, obtain port and node WWN from firmware 3845 * properties. 3846 */ 3847 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, 3848 struct nvram_24xx *nv) 3849 { 3850 #ifdef CONFIG_SPARC 3851 struct qla_hw_data *ha = vha->hw; 3852 struct pci_dev *pdev = ha->pdev; 3853 struct device_node *dp = pci_device_to_OF_node(pdev); 3854 const u8 *val; 3855 int len; 3856 3857 val = of_get_property(dp, "port-wwn", &len); 3858 if (val && len >= WWN_SIZE) 3859 memcpy(nv->port_name, val, WWN_SIZE); 3860 3861 val = of_get_property(dp, "node-wwn", &len); 3862 if (val && len >= WWN_SIZE) 3863 memcpy(nv->node_name, val, WWN_SIZE); 3864 #endif 3865 } 3866 3867 int 3868 qla24xx_nvram_config(scsi_qla_host_t *vha) 3869 { 3870 int rval; 3871 struct init_cb_24xx *icb; 3872 struct nvram_24xx *nv; 3873 uint32_t *dptr; 3874 uint8_t *dptr1, *dptr2; 3875 uint32_t chksum; 3876 uint16_t cnt; 3877 struct qla_hw_data *ha = vha->hw; 3878 3879 rval = QLA_SUCCESS; 3880 icb = (struct init_cb_24xx *)ha->init_cb; 3881 nv = ha->nvram; 3882 3883 /* Determine NVRAM starting address. */ 3884 if (ha->flags.port0) { 3885 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 3886 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 3887 } else { 3888 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 3889 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 3890 } 3891 ha->nvram_size = sizeof(struct nvram_24xx); 3892 ha->vpd_size = FA_NVRAM_VPD_SIZE; 3893 3894 /* Get VPD data into cache */ 3895 ha->vpd = ha->nvram + VPD_OFFSET; 3896 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, 3897 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); 3898 3899 /* Get NVRAM data into cache and calculate checksum. */ 3900 dptr = (uint32_t *)nv; 3901 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base, 3902 ha->nvram_size); 3903 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 3904 chksum += le32_to_cpu(*dptr++); 3905 3906 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 3907 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 3908 3909 /* Bad NVRAM data, set defaults parameters. */ 3910 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 3911 || nv->id[3] != ' ' || 3912 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 3913 /* Reset NVRAM data. */ 3914 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 3915 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 3916 le16_to_cpu(nv->nvram_version)); 3917 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 3918 "invalid -- WWPN) defaults.\n"); 3919 3920 /* 3921 * Set default initialization control block. 3922 */ 3923 memset(nv, 0, ha->nvram_size); 3924 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); 3925 nv->version = __constant_cpu_to_le16(ICB_VERSION); 3926 nv->frame_payload_size = __constant_cpu_to_le16(2048); 3927 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 3928 nv->exchange_count = __constant_cpu_to_le16(0); 3929 nv->hard_address = __constant_cpu_to_le16(124); 3930 nv->port_name[0] = 0x21; 3931 nv->port_name[1] = 0x00 + ha->port_no; 3932 nv->port_name[2] = 0x00; 3933 nv->port_name[3] = 0xe0; 3934 nv->port_name[4] = 0x8b; 3935 nv->port_name[5] = 0x1c; 3936 nv->port_name[6] = 0x55; 3937 nv->port_name[7] = 0x86; 3938 nv->node_name[0] = 0x20; 3939 nv->node_name[1] = 0x00; 3940 nv->node_name[2] = 0x00; 3941 nv->node_name[3] = 0xe0; 3942 nv->node_name[4] = 0x8b; 3943 nv->node_name[5] = 0x1c; 3944 nv->node_name[6] = 0x55; 3945 nv->node_name[7] = 0x86; 3946 qla24xx_nvram_wwn_from_ofw(vha, nv); 3947 nv->login_retry_count = __constant_cpu_to_le16(8); 3948 nv->interrupt_delay_timer = __constant_cpu_to_le16(0); 3949 nv->login_timeout = __constant_cpu_to_le16(0); 3950 nv->firmware_options_1 = 3951 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 3952 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4); 3953 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 3954 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13); 3955 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10); 3956 nv->efi_parameters = __constant_cpu_to_le32(0); 3957 nv->reset_delay = 5; 3958 nv->max_luns_per_target = __constant_cpu_to_le16(128); 3959 nv->port_down_retry_count = __constant_cpu_to_le16(30); 3960 nv->link_down_timeout = __constant_cpu_to_le16(30); 3961 3962 rval = 1; 3963 } 3964 3965 /* Reset Initialization control block */ 3966 memset(icb, 0, ha->init_cb_size); 3967 3968 /* Copy 1st segment. */ 3969 dptr1 = (uint8_t *)icb; 3970 dptr2 = (uint8_t *)&nv->version; 3971 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 3972 while (cnt--) 3973 *dptr1++ = *dptr2++; 3974 3975 icb->login_retry_count = nv->login_retry_count; 3976 icb->link_down_on_nos = nv->link_down_on_nos; 3977 3978 /* Copy 2nd segment. */ 3979 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 3980 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 3981 cnt = (uint8_t *)&icb->reserved_3 - 3982 (uint8_t *)&icb->interrupt_delay_timer; 3983 while (cnt--) 3984 *dptr1++ = *dptr2++; 3985 3986 /* 3987 * Setup driver NVRAM options. 3988 */ 3989 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 3990 "QLA2462"); 3991 3992 /* Use alternate WWN? */ 3993 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { 3994 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 3995 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 3996 } 3997 3998 /* Prepare nodename */ 3999 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) { 4000 /* 4001 * Firmware will apply the following mask if the nodename was 4002 * not provided. 4003 */ 4004 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 4005 icb->node_name[0] &= 0xF0; 4006 } 4007 4008 /* Set host adapter parameters. */ 4009 ha->flags.disable_risc_code_load = 0; 4010 ha->flags.enable_lip_reset = 0; 4011 ha->flags.enable_lip_full_login = 4012 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0; 4013 ha->flags.enable_target_reset = 4014 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0; 4015 ha->flags.enable_led_scheme = 0; 4016 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; 4017 4018 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 4019 (BIT_6 | BIT_5 | BIT_4)) >> 4; 4020 4021 memcpy(ha->fw_seriallink_options24, nv->seriallink_options, 4022 sizeof(ha->fw_seriallink_options24)); 4023 4024 /* save HBA serial number */ 4025 ha->serial0 = icb->port_name[5]; 4026 ha->serial1 = icb->port_name[6]; 4027 ha->serial2 = icb->port_name[7]; 4028 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 4029 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 4030 4031 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 4032 4033 ha->retry_count = le16_to_cpu(nv->login_retry_count); 4034 4035 /* Set minimum login_timeout to 4 seconds. */ 4036 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 4037 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 4038 if (le16_to_cpu(nv->login_timeout) < 4) 4039 nv->login_timeout = __constant_cpu_to_le16(4); 4040 ha->login_timeout = le16_to_cpu(nv->login_timeout); 4041 icb->login_timeout = nv->login_timeout; 4042 4043 /* Set minimum RATOV to 100 tenths of a second. */ 4044 ha->r_a_tov = 100; 4045 4046 ha->loop_reset_delay = nv->reset_delay; 4047 4048 /* Link Down Timeout = 0: 4049 * 4050 * When Port Down timer expires we will start returning 4051 * I/O's to OS with "DID_NO_CONNECT". 4052 * 4053 * Link Down Timeout != 0: 4054 * 4055 * The driver waits for the link to come up after link down 4056 * before returning I/Os to OS with "DID_NO_CONNECT". 4057 */ 4058 if (le16_to_cpu(nv->link_down_timeout) == 0) { 4059 ha->loop_down_abort_time = 4060 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 4061 } else { 4062 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 4063 ha->loop_down_abort_time = 4064 (LOOP_DOWN_TIME - ha->link_down_timeout); 4065 } 4066 4067 /* Need enough time to try and get the port back. */ 4068 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 4069 if (qlport_down_retry) 4070 ha->port_down_retry_count = qlport_down_retry; 4071 4072 /* Set login_retry_count */ 4073 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 4074 if (ha->port_down_retry_count == 4075 le16_to_cpu(nv->port_down_retry_count) && 4076 ha->port_down_retry_count > 3) 4077 ha->login_retry_count = ha->port_down_retry_count; 4078 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 4079 ha->login_retry_count = ha->port_down_retry_count; 4080 if (ql2xloginretrycount) 4081 ha->login_retry_count = ql2xloginretrycount; 4082 4083 /* Enable ZIO. */ 4084 if (!vha->flags.init_done) { 4085 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 4086 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 4087 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 4088 le16_to_cpu(icb->interrupt_delay_timer): 2; 4089 } 4090 icb->firmware_options_2 &= __constant_cpu_to_le32( 4091 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 4092 vha->flags.process_response_queue = 0; 4093 if (ha->zio_mode != QLA_ZIO_DISABLED) { 4094 ha->zio_mode = QLA_ZIO_MODE_6; 4095 4096 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 4097 "(%d us).\n", vha->host_no, ha->zio_mode, 4098 ha->zio_timer * 100)); 4099 qla_printk(KERN_INFO, ha, 4100 "ZIO mode %d enabled; timer delay (%d us).\n", 4101 ha->zio_mode, ha->zio_timer * 100); 4102 4103 icb->firmware_options_2 |= cpu_to_le32( 4104 (uint32_t)ha->zio_mode); 4105 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 4106 vha->flags.process_response_queue = 1; 4107 } 4108 4109 if (rval) { 4110 DEBUG2_3(printk(KERN_WARNING 4111 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 4112 } 4113 return (rval); 4114 } 4115 4116 static int 4117 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, 4118 uint32_t faddr) 4119 { 4120 int rval = QLA_SUCCESS; 4121 int segments, fragment; 4122 uint32_t *dcode, dlen; 4123 uint32_t risc_addr; 4124 uint32_t risc_size; 4125 uint32_t i; 4126 struct qla_hw_data *ha = vha->hw; 4127 struct req_que *req = ha->req_q_map[0]; 4128 4129 qla_printk(KERN_INFO, ha, 4130 "FW: Loading from flash (%x)...\n", faddr); 4131 4132 rval = QLA_SUCCESS; 4133 4134 segments = FA_RISC_CODE_SEGMENTS; 4135 dcode = (uint32_t *)req->ring; 4136 *srisc_addr = 0; 4137 4138 /* Validate firmware image by checking version. */ 4139 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4); 4140 for (i = 0; i < 4; i++) 4141 dcode[i] = be32_to_cpu(dcode[i]); 4142 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 4143 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 4144 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 4145 dcode[3] == 0)) { 4146 qla_printk(KERN_WARNING, ha, 4147 "Unable to verify integrity of flash firmware image!\n"); 4148 qla_printk(KERN_WARNING, ha, 4149 "Firmware data: %08x %08x %08x %08x!\n", dcode[0], 4150 dcode[1], dcode[2], dcode[3]); 4151 4152 return QLA_FUNCTION_FAILED; 4153 } 4154 4155 while (segments && rval == QLA_SUCCESS) { 4156 /* Read segment's load information. */ 4157 qla24xx_read_flash_data(vha, dcode, faddr, 4); 4158 4159 risc_addr = be32_to_cpu(dcode[2]); 4160 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 4161 risc_size = be32_to_cpu(dcode[3]); 4162 4163 fragment = 0; 4164 while (risc_size > 0 && rval == QLA_SUCCESS) { 4165 dlen = (uint32_t)(ha->fw_transfer_size >> 2); 4166 if (dlen > risc_size) 4167 dlen = risc_size; 4168 4169 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4170 "addr %x, number of dwords 0x%x, offset 0x%x.\n", 4171 vha->host_no, risc_addr, dlen, faddr)); 4172 4173 qla24xx_read_flash_data(vha, dcode, faddr, dlen); 4174 for (i = 0; i < dlen; i++) 4175 dcode[i] = swab32(dcode[i]); 4176 4177 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4178 dlen); 4179 if (rval) { 4180 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4181 "segment %d of firmware\n", vha->host_no, 4182 fragment)); 4183 qla_printk(KERN_WARNING, ha, 4184 "[ERROR] Failed to load segment %d of " 4185 "firmware\n", fragment); 4186 break; 4187 } 4188 4189 faddr += dlen; 4190 risc_addr += dlen; 4191 risc_size -= dlen; 4192 fragment++; 4193 } 4194 4195 /* Next segment. */ 4196 segments--; 4197 } 4198 4199 return rval; 4200 } 4201 4202 #define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/" 4203 4204 int 4205 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4206 { 4207 int rval; 4208 int i, fragment; 4209 uint16_t *wcode, *fwcode; 4210 uint32_t risc_addr, risc_size, fwclen, wlen, *seg; 4211 struct fw_blob *blob; 4212 struct qla_hw_data *ha = vha->hw; 4213 struct req_que *req = ha->req_q_map[0]; 4214 4215 /* Load firmware blob. */ 4216 blob = qla2x00_request_firmware(vha); 4217 if (!blob) { 4218 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 4219 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 4220 "from: " QLA_FW_URL ".\n"); 4221 return QLA_FUNCTION_FAILED; 4222 } 4223 4224 rval = QLA_SUCCESS; 4225 4226 wcode = (uint16_t *)req->ring; 4227 *srisc_addr = 0; 4228 fwcode = (uint16_t *)blob->fw->data; 4229 fwclen = 0; 4230 4231 /* Validate firmware image by checking version. */ 4232 if (blob->fw->size < 8 * sizeof(uint16_t)) { 4233 qla_printk(KERN_WARNING, ha, 4234 "Unable to verify integrity of firmware image (%Zd)!\n", 4235 blob->fw->size); 4236 goto fail_fw_integrity; 4237 } 4238 for (i = 0; i < 4; i++) 4239 wcode[i] = be16_to_cpu(fwcode[i + 4]); 4240 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && 4241 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && 4242 wcode[2] == 0 && wcode[3] == 0)) { 4243 qla_printk(KERN_WARNING, ha, 4244 "Unable to verify integrity of firmware image!\n"); 4245 qla_printk(KERN_WARNING, ha, 4246 "Firmware data: %04x %04x %04x %04x!\n", wcode[0], 4247 wcode[1], wcode[2], wcode[3]); 4248 goto fail_fw_integrity; 4249 } 4250 4251 seg = blob->segs; 4252 while (*seg && rval == QLA_SUCCESS) { 4253 risc_addr = *seg; 4254 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr; 4255 risc_size = be16_to_cpu(fwcode[3]); 4256 4257 /* Validate firmware image size. */ 4258 fwclen += risc_size * sizeof(uint16_t); 4259 if (blob->fw->size < fwclen) { 4260 qla_printk(KERN_WARNING, ha, 4261 "Unable to verify integrity of firmware image " 4262 "(%Zd)!\n", blob->fw->size); 4263 goto fail_fw_integrity; 4264 } 4265 4266 fragment = 0; 4267 while (risc_size > 0 && rval == QLA_SUCCESS) { 4268 wlen = (uint16_t)(ha->fw_transfer_size >> 1); 4269 if (wlen > risc_size) 4270 wlen = risc_size; 4271 4272 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4273 "addr %x, number of words 0x%x.\n", vha->host_no, 4274 risc_addr, wlen)); 4275 4276 for (i = 0; i < wlen; i++) 4277 wcode[i] = swab16(fwcode[i]); 4278 4279 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4280 wlen); 4281 if (rval) { 4282 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4283 "segment %d of firmware\n", vha->host_no, 4284 fragment)); 4285 qla_printk(KERN_WARNING, ha, 4286 "[ERROR] Failed to load segment %d of " 4287 "firmware\n", fragment); 4288 break; 4289 } 4290 4291 fwcode += wlen; 4292 risc_addr += wlen; 4293 risc_size -= wlen; 4294 fragment++; 4295 } 4296 4297 /* Next segment. */ 4298 seg++; 4299 } 4300 return rval; 4301 4302 fail_fw_integrity: 4303 return QLA_FUNCTION_FAILED; 4304 } 4305 4306 static int 4307 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4308 { 4309 int rval; 4310 int segments, fragment; 4311 uint32_t *dcode, dlen; 4312 uint32_t risc_addr; 4313 uint32_t risc_size; 4314 uint32_t i; 4315 struct fw_blob *blob; 4316 uint32_t *fwcode, fwclen; 4317 struct qla_hw_data *ha = vha->hw; 4318 struct req_que *req = ha->req_q_map[0]; 4319 4320 /* Load firmware blob. */ 4321 blob = qla2x00_request_firmware(vha); 4322 if (!blob) { 4323 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 4324 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 4325 "from: " QLA_FW_URL ".\n"); 4326 4327 return QLA_FUNCTION_FAILED; 4328 } 4329 4330 qla_printk(KERN_INFO, ha, 4331 "FW: Loading via request-firmware...\n"); 4332 4333 rval = QLA_SUCCESS; 4334 4335 segments = FA_RISC_CODE_SEGMENTS; 4336 dcode = (uint32_t *)req->ring; 4337 *srisc_addr = 0; 4338 fwcode = (uint32_t *)blob->fw->data; 4339 fwclen = 0; 4340 4341 /* Validate firmware image by checking version. */ 4342 if (blob->fw->size < 8 * sizeof(uint32_t)) { 4343 qla_printk(KERN_WARNING, ha, 4344 "Unable to verify integrity of firmware image (%Zd)!\n", 4345 blob->fw->size); 4346 goto fail_fw_integrity; 4347 } 4348 for (i = 0; i < 4; i++) 4349 dcode[i] = be32_to_cpu(fwcode[i + 4]); 4350 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 4351 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 4352 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 4353 dcode[3] == 0)) { 4354 qla_printk(KERN_WARNING, ha, 4355 "Unable to verify integrity of firmware image!\n"); 4356 qla_printk(KERN_WARNING, ha, 4357 "Firmware data: %08x %08x %08x %08x!\n", dcode[0], 4358 dcode[1], dcode[2], dcode[3]); 4359 goto fail_fw_integrity; 4360 } 4361 4362 while (segments && rval == QLA_SUCCESS) { 4363 risc_addr = be32_to_cpu(fwcode[2]); 4364 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 4365 risc_size = be32_to_cpu(fwcode[3]); 4366 4367 /* Validate firmware image size. */ 4368 fwclen += risc_size * sizeof(uint32_t); 4369 if (blob->fw->size < fwclen) { 4370 qla_printk(KERN_WARNING, ha, 4371 "Unable to verify integrity of firmware image " 4372 "(%Zd)!\n", blob->fw->size); 4373 4374 goto fail_fw_integrity; 4375 } 4376 4377 fragment = 0; 4378 while (risc_size > 0 && rval == QLA_SUCCESS) { 4379 dlen = (uint32_t)(ha->fw_transfer_size >> 2); 4380 if (dlen > risc_size) 4381 dlen = risc_size; 4382 4383 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4384 "addr %x, number of dwords 0x%x.\n", vha->host_no, 4385 risc_addr, dlen)); 4386 4387 for (i = 0; i < dlen; i++) 4388 dcode[i] = swab32(fwcode[i]); 4389 4390 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4391 dlen); 4392 if (rval) { 4393 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4394 "segment %d of firmware\n", vha->host_no, 4395 fragment)); 4396 qla_printk(KERN_WARNING, ha, 4397 "[ERROR] Failed to load segment %d of " 4398 "firmware\n", fragment); 4399 break; 4400 } 4401 4402 fwcode += dlen; 4403 risc_addr += dlen; 4404 risc_size -= dlen; 4405 fragment++; 4406 } 4407 4408 /* Next segment. */ 4409 segments--; 4410 } 4411 return rval; 4412 4413 fail_fw_integrity: 4414 return QLA_FUNCTION_FAILED; 4415 } 4416 4417 int 4418 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4419 { 4420 int rval; 4421 4422 if (ql2xfwloadbin == 1) 4423 return qla81xx_load_risc(vha, srisc_addr); 4424 4425 /* 4426 * FW Load priority: 4427 * 1) Firmware via request-firmware interface (.bin file). 4428 * 2) Firmware residing in flash. 4429 */ 4430 rval = qla24xx_load_risc_blob(vha, srisc_addr); 4431 if (rval == QLA_SUCCESS) 4432 return rval; 4433 4434 return qla24xx_load_risc_flash(vha, srisc_addr, 4435 vha->hw->flt_region_fw); 4436 } 4437 4438 int 4439 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4440 { 4441 int rval; 4442 struct qla_hw_data *ha = vha->hw; 4443 4444 if (ql2xfwloadbin == 2) 4445 goto try_blob_fw; 4446 4447 /* 4448 * FW Load priority: 4449 * 1) Firmware residing in flash. 4450 * 2) Firmware via request-firmware interface (.bin file). 4451 * 3) Golden-Firmware residing in flash -- limited operation. 4452 */ 4453 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); 4454 if (rval == QLA_SUCCESS) 4455 return rval; 4456 4457 try_blob_fw: 4458 rval = qla24xx_load_risc_blob(vha, srisc_addr); 4459 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw) 4460 return rval; 4461 4462 qla_printk(KERN_ERR, ha, 4463 "FW: Attempting to fallback to golden firmware...\n"); 4464 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); 4465 if (rval != QLA_SUCCESS) 4466 return rval; 4467 4468 qla_printk(KERN_ERR, ha, 4469 "FW: Please update operational firmware...\n"); 4470 ha->flags.running_gold_fw = 1; 4471 4472 return rval; 4473 } 4474 4475 void 4476 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) 4477 { 4478 int ret, retries; 4479 struct qla_hw_data *ha = vha->hw; 4480 4481 if (ha->flags.pci_channel_io_perm_failure) 4482 return; 4483 if (!IS_FWI2_CAPABLE(ha)) 4484 return; 4485 if (!ha->fw_major_version) 4486 return; 4487 4488 ret = qla2x00_stop_firmware(vha); 4489 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 4490 ret != QLA_INVALID_COMMAND && retries ; retries--) { 4491 ha->isp_ops->reset_chip(vha); 4492 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) 4493 continue; 4494 if (qla2x00_setup_chip(vha) != QLA_SUCCESS) 4495 continue; 4496 qla_printk(KERN_INFO, ha, 4497 "Attempting retry of stop-firmware command...\n"); 4498 ret = qla2x00_stop_firmware(vha); 4499 } 4500 } 4501 4502 int 4503 qla24xx_configure_vhba(scsi_qla_host_t *vha) 4504 { 4505 int rval = QLA_SUCCESS; 4506 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4507 struct qla_hw_data *ha = vha->hw; 4508 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4509 struct req_que *req; 4510 struct rsp_que *rsp; 4511 4512 if (!vha->vp_idx) 4513 return -EINVAL; 4514 4515 rval = qla2x00_fw_ready(base_vha); 4516 if (ha->flags.cpu_affinity_enabled) 4517 req = ha->req_q_map[0]; 4518 else 4519 req = vha->req; 4520 rsp = req->rsp; 4521 4522 if (rval == QLA_SUCCESS) { 4523 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 4524 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4525 } 4526 4527 vha->flags.management_server_logged_in = 0; 4528 4529 /* Login to SNS first */ 4530 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1); 4531 if (mb[0] != MBS_COMMAND_COMPLETE) { 4532 DEBUG15(qla_printk(KERN_INFO, ha, 4533 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 4534 "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS, 4535 mb[0], mb[1], mb[2], mb[6], mb[7])); 4536 return (QLA_FUNCTION_FAILED); 4537 } 4538 4539 atomic_set(&vha->loop_down_timer, 0); 4540 atomic_set(&vha->loop_state, LOOP_UP); 4541 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4542 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4543 rval = qla2x00_loop_resync(base_vha); 4544 4545 return rval; 4546 } 4547 4548 /* 84XX Support **************************************************************/ 4549 4550 static LIST_HEAD(qla_cs84xx_list); 4551 static DEFINE_MUTEX(qla_cs84xx_mutex); 4552 4553 static struct qla_chip_state_84xx * 4554 qla84xx_get_chip(struct scsi_qla_host *vha) 4555 { 4556 struct qla_chip_state_84xx *cs84xx; 4557 struct qla_hw_data *ha = vha->hw; 4558 4559 mutex_lock(&qla_cs84xx_mutex); 4560 4561 /* Find any shared 84xx chip. */ 4562 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) { 4563 if (cs84xx->bus == ha->pdev->bus) { 4564 kref_get(&cs84xx->kref); 4565 goto done; 4566 } 4567 } 4568 4569 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL); 4570 if (!cs84xx) 4571 goto done; 4572 4573 kref_init(&cs84xx->kref); 4574 spin_lock_init(&cs84xx->access_lock); 4575 mutex_init(&cs84xx->fw_update_mutex); 4576 cs84xx->bus = ha->pdev->bus; 4577 4578 list_add_tail(&cs84xx->list, &qla_cs84xx_list); 4579 done: 4580 mutex_unlock(&qla_cs84xx_mutex); 4581 return cs84xx; 4582 } 4583 4584 static void 4585 __qla84xx_chip_release(struct kref *kref) 4586 { 4587 struct qla_chip_state_84xx *cs84xx = 4588 container_of(kref, struct qla_chip_state_84xx, kref); 4589 4590 mutex_lock(&qla_cs84xx_mutex); 4591 list_del(&cs84xx->list); 4592 mutex_unlock(&qla_cs84xx_mutex); 4593 kfree(cs84xx); 4594 } 4595 4596 void 4597 qla84xx_put_chip(struct scsi_qla_host *vha) 4598 { 4599 struct qla_hw_data *ha = vha->hw; 4600 if (ha->cs84xx) 4601 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); 4602 } 4603 4604 static int 4605 qla84xx_init_chip(scsi_qla_host_t *vha) 4606 { 4607 int rval; 4608 uint16_t status[2]; 4609 struct qla_hw_data *ha = vha->hw; 4610 4611 mutex_lock(&ha->cs84xx->fw_update_mutex); 4612 4613 rval = qla84xx_verify_chip(vha, status); 4614 4615 mutex_unlock(&ha->cs84xx->fw_update_mutex); 4616 4617 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED: 4618 QLA_SUCCESS; 4619 } 4620 4621 /* 81XX Support **************************************************************/ 4622 4623 int 4624 qla81xx_nvram_config(scsi_qla_host_t *vha) 4625 { 4626 int rval; 4627 struct init_cb_81xx *icb; 4628 struct nvram_81xx *nv; 4629 uint32_t *dptr; 4630 uint8_t *dptr1, *dptr2; 4631 uint32_t chksum; 4632 uint16_t cnt; 4633 struct qla_hw_data *ha = vha->hw; 4634 4635 rval = QLA_SUCCESS; 4636 icb = (struct init_cb_81xx *)ha->init_cb; 4637 nv = ha->nvram; 4638 4639 /* Determine NVRAM starting address. */ 4640 ha->nvram_size = sizeof(struct nvram_81xx); 4641 ha->vpd_size = FA_NVRAM_VPD_SIZE; 4642 4643 /* Get VPD data into cache */ 4644 ha->vpd = ha->nvram + VPD_OFFSET; 4645 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, 4646 ha->vpd_size); 4647 4648 /* Get NVRAM data into cache and calculate checksum. */ 4649 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, 4650 ha->nvram_size); 4651 dptr = (uint32_t *)nv; 4652 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 4653 chksum += le32_to_cpu(*dptr++); 4654 4655 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 4656 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 4657 4658 /* Bad NVRAM data, set defaults parameters. */ 4659 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 4660 || nv->id[3] != ' ' || 4661 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 4662 /* Reset NVRAM data. */ 4663 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 4664 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 4665 le16_to_cpu(nv->nvram_version)); 4666 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 4667 "invalid -- WWPN) defaults.\n"); 4668 4669 /* 4670 * Set default initialization control block. 4671 */ 4672 memset(nv, 0, ha->nvram_size); 4673 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); 4674 nv->version = __constant_cpu_to_le16(ICB_VERSION); 4675 nv->frame_payload_size = __constant_cpu_to_le16(2048); 4676 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 4677 nv->exchange_count = __constant_cpu_to_le16(0); 4678 nv->port_name[0] = 0x21; 4679 nv->port_name[1] = 0x00 + ha->port_no; 4680 nv->port_name[2] = 0x00; 4681 nv->port_name[3] = 0xe0; 4682 nv->port_name[4] = 0x8b; 4683 nv->port_name[5] = 0x1c; 4684 nv->port_name[6] = 0x55; 4685 nv->port_name[7] = 0x86; 4686 nv->node_name[0] = 0x20; 4687 nv->node_name[1] = 0x00; 4688 nv->node_name[2] = 0x00; 4689 nv->node_name[3] = 0xe0; 4690 nv->node_name[4] = 0x8b; 4691 nv->node_name[5] = 0x1c; 4692 nv->node_name[6] = 0x55; 4693 nv->node_name[7] = 0x86; 4694 nv->login_retry_count = __constant_cpu_to_le16(8); 4695 nv->interrupt_delay_timer = __constant_cpu_to_le16(0); 4696 nv->login_timeout = __constant_cpu_to_le16(0); 4697 nv->firmware_options_1 = 4698 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 4699 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4); 4700 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 4701 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13); 4702 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10); 4703 nv->efi_parameters = __constant_cpu_to_le32(0); 4704 nv->reset_delay = 5; 4705 nv->max_luns_per_target = __constant_cpu_to_le16(128); 4706 nv->port_down_retry_count = __constant_cpu_to_le16(30); 4707 nv->link_down_timeout = __constant_cpu_to_le16(30); 4708 nv->enode_mac[0] = 0x00; 4709 nv->enode_mac[1] = 0x02; 4710 nv->enode_mac[2] = 0x03; 4711 nv->enode_mac[3] = 0x04; 4712 nv->enode_mac[4] = 0x05; 4713 nv->enode_mac[5] = 0x06 + ha->port_no; 4714 4715 rval = 1; 4716 } 4717 4718 /* Reset Initialization control block */ 4719 memset(icb, 0, sizeof(struct init_cb_81xx)); 4720 4721 /* Copy 1st segment. */ 4722 dptr1 = (uint8_t *)icb; 4723 dptr2 = (uint8_t *)&nv->version; 4724 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 4725 while (cnt--) 4726 *dptr1++ = *dptr2++; 4727 4728 icb->login_retry_count = nv->login_retry_count; 4729 4730 /* Copy 2nd segment. */ 4731 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 4732 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 4733 cnt = (uint8_t *)&icb->reserved_5 - 4734 (uint8_t *)&icb->interrupt_delay_timer; 4735 while (cnt--) 4736 *dptr1++ = *dptr2++; 4737 4738 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); 4739 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ 4740 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { 4741 icb->enode_mac[0] = 0x01; 4742 icb->enode_mac[1] = 0x02; 4743 icb->enode_mac[2] = 0x03; 4744 icb->enode_mac[3] = 0x04; 4745 icb->enode_mac[4] = 0x05; 4746 icb->enode_mac[5] = 0x06 + ha->port_no; 4747 } 4748 4749 /* Use extended-initialization control block. */ 4750 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); 4751 4752 /* 4753 * Setup driver NVRAM options. 4754 */ 4755 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 4756 "QLE81XX"); 4757 4758 /* Use alternate WWN? */ 4759 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { 4760 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 4761 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 4762 } 4763 4764 /* Prepare nodename */ 4765 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) { 4766 /* 4767 * Firmware will apply the following mask if the nodename was 4768 * not provided. 4769 */ 4770 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 4771 icb->node_name[0] &= 0xF0; 4772 } 4773 4774 /* Set host adapter parameters. */ 4775 ha->flags.disable_risc_code_load = 0; 4776 ha->flags.enable_lip_reset = 0; 4777 ha->flags.enable_lip_full_login = 4778 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0; 4779 ha->flags.enable_target_reset = 4780 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0; 4781 ha->flags.enable_led_scheme = 0; 4782 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; 4783 4784 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 4785 (BIT_6 | BIT_5 | BIT_4)) >> 4; 4786 4787 /* save HBA serial number */ 4788 ha->serial0 = icb->port_name[5]; 4789 ha->serial1 = icb->port_name[6]; 4790 ha->serial2 = icb->port_name[7]; 4791 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 4792 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 4793 4794 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 4795 4796 ha->retry_count = le16_to_cpu(nv->login_retry_count); 4797 4798 /* Set minimum login_timeout to 4 seconds. */ 4799 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 4800 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 4801 if (le16_to_cpu(nv->login_timeout) < 4) 4802 nv->login_timeout = __constant_cpu_to_le16(4); 4803 ha->login_timeout = le16_to_cpu(nv->login_timeout); 4804 icb->login_timeout = nv->login_timeout; 4805 4806 /* Set minimum RATOV to 100 tenths of a second. */ 4807 ha->r_a_tov = 100; 4808 4809 ha->loop_reset_delay = nv->reset_delay; 4810 4811 /* Link Down Timeout = 0: 4812 * 4813 * When Port Down timer expires we will start returning 4814 * I/O's to OS with "DID_NO_CONNECT". 4815 * 4816 * Link Down Timeout != 0: 4817 * 4818 * The driver waits for the link to come up after link down 4819 * before returning I/Os to OS with "DID_NO_CONNECT". 4820 */ 4821 if (le16_to_cpu(nv->link_down_timeout) == 0) { 4822 ha->loop_down_abort_time = 4823 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 4824 } else { 4825 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 4826 ha->loop_down_abort_time = 4827 (LOOP_DOWN_TIME - ha->link_down_timeout); 4828 } 4829 4830 /* Need enough time to try and get the port back. */ 4831 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 4832 if (qlport_down_retry) 4833 ha->port_down_retry_count = qlport_down_retry; 4834 4835 /* Set login_retry_count */ 4836 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 4837 if (ha->port_down_retry_count == 4838 le16_to_cpu(nv->port_down_retry_count) && 4839 ha->port_down_retry_count > 3) 4840 ha->login_retry_count = ha->port_down_retry_count; 4841 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 4842 ha->login_retry_count = ha->port_down_retry_count; 4843 if (ql2xloginretrycount) 4844 ha->login_retry_count = ql2xloginretrycount; 4845 4846 /* Enable ZIO. */ 4847 if (!vha->flags.init_done) { 4848 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 4849 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 4850 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 4851 le16_to_cpu(icb->interrupt_delay_timer): 2; 4852 } 4853 icb->firmware_options_2 &= __constant_cpu_to_le32( 4854 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 4855 vha->flags.process_response_queue = 0; 4856 if (ha->zio_mode != QLA_ZIO_DISABLED) { 4857 ha->zio_mode = QLA_ZIO_MODE_6; 4858 4859 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 4860 "(%d us).\n", vha->host_no, ha->zio_mode, 4861 ha->zio_timer * 100)); 4862 qla_printk(KERN_INFO, ha, 4863 "ZIO mode %d enabled; timer delay (%d us).\n", 4864 ha->zio_mode, ha->zio_timer * 100); 4865 4866 icb->firmware_options_2 |= cpu_to_le32( 4867 (uint32_t)ha->zio_mode); 4868 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 4869 vha->flags.process_response_queue = 1; 4870 } 4871 4872 if (rval) { 4873 DEBUG2_3(printk(KERN_WARNING 4874 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 4875 } 4876 return (rval); 4877 } 4878 4879 void 4880 qla81xx_update_fw_options(scsi_qla_host_t *ha) 4881 { 4882 } 4883