1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2008 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_gbl.h" 9 10 #include <linux/delay.h> 11 #include <linux/vmalloc.h> 12 13 #include "qla_devtbl.h" 14 15 #ifdef CONFIG_SPARC 16 #include <asm/prom.h> 17 #endif 18 19 /* 20 * QLogic ISP2x00 Hardware Support Function Prototypes. 21 */ 22 static int qla2x00_isp_firmware(scsi_qla_host_t *); 23 static int qla2x00_setup_chip(scsi_qla_host_t *); 24 static int qla2x00_init_rings(scsi_qla_host_t *); 25 static int qla2x00_fw_ready(scsi_qla_host_t *); 26 static int qla2x00_configure_hba(scsi_qla_host_t *); 27 static int qla2x00_configure_loop(scsi_qla_host_t *); 28 static int qla2x00_configure_local_loop(scsi_qla_host_t *); 29 static int qla2x00_configure_fabric(scsi_qla_host_t *); 30 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *); 31 static int qla2x00_device_resync(scsi_qla_host_t *); 32 static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *, 33 uint16_t *); 34 35 static int qla2x00_restart_isp(scsi_qla_host_t *); 36 37 static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *); 38 39 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); 40 static int qla84xx_init_chip(scsi_qla_host_t *); 41 static int qla25xx_init_queues(struct qla_hw_data *); 42 43 /****************************************************************************/ 44 /* QLogic ISP2x00 Hardware Support Functions. */ 45 /****************************************************************************/ 46 47 /* 48 * qla2x00_initialize_adapter 49 * Initialize board. 50 * 51 * Input: 52 * ha = adapter block pointer. 53 * 54 * Returns: 55 * 0 = success 56 */ 57 int 58 qla2x00_initialize_adapter(scsi_qla_host_t *vha) 59 { 60 int rval; 61 struct qla_hw_data *ha = vha->hw; 62 struct req_que *req = ha->req_q_map[0]; 63 64 /* Clear adapter flags. */ 65 vha->flags.online = 0; 66 ha->flags.chip_reset_done = 0; 67 vha->flags.reset_active = 0; 68 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 69 atomic_set(&vha->loop_state, LOOP_DOWN); 70 vha->device_flags = DFLG_NO_CABLE; 71 vha->dpc_flags = 0; 72 vha->flags.management_server_logged_in = 0; 73 vha->marker_needed = 0; 74 ha->isp_abort_cnt = 0; 75 ha->beacon_blink_led = 0; 76 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 77 78 set_bit(0, ha->req_qid_map); 79 set_bit(0, ha->rsp_qid_map); 80 81 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); 82 rval = ha->isp_ops->pci_config(vha); 83 if (rval) { 84 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n", 85 vha->host_no)); 86 return (rval); 87 } 88 89 ha->isp_ops->reset_chip(vha); 90 91 rval = qla2xxx_get_flash_info(vha); 92 if (rval) { 93 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n", 94 vha->host_no)); 95 return (rval); 96 } 97 98 ha->isp_ops->get_flash_version(vha, req->ring); 99 100 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 101 102 ha->isp_ops->nvram_config(vha); 103 104 if (ha->flags.disable_serdes) { 105 /* Mask HBA via NVRAM settings? */ 106 qla_printk(KERN_INFO, ha, "Masking HBA WWPN " 107 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n", 108 vha->port_name[0], vha->port_name[1], 109 vha->port_name[2], vha->port_name[3], 110 vha->port_name[4], vha->port_name[5], 111 vha->port_name[6], vha->port_name[7]); 112 return QLA_FUNCTION_FAILED; 113 } 114 115 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 116 117 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { 118 rval = ha->isp_ops->chip_diag(vha); 119 if (rval) 120 return (rval); 121 rval = qla2x00_setup_chip(vha); 122 if (rval) 123 return (rval); 124 } 125 if (IS_QLA84XX(ha)) { 126 ha->cs84xx = qla84xx_get_chip(vha); 127 if (!ha->cs84xx) { 128 qla_printk(KERN_ERR, ha, 129 "Unable to configure ISP84XX.\n"); 130 return QLA_FUNCTION_FAILED; 131 } 132 } 133 rval = qla2x00_init_rings(vha); 134 ha->flags.chip_reset_done = 1; 135 136 return (rval); 137 } 138 139 /** 140 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers. 141 * @ha: HA context 142 * 143 * Returns 0 on success. 144 */ 145 int 146 qla2100_pci_config(scsi_qla_host_t *vha) 147 { 148 uint16_t w; 149 unsigned long flags; 150 struct qla_hw_data *ha = vha->hw; 151 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 152 153 pci_set_master(ha->pdev); 154 pci_try_set_mwi(ha->pdev); 155 156 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 157 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 158 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 159 160 pci_disable_rom(ha->pdev); 161 162 /* Get PCI bus information. */ 163 spin_lock_irqsave(&ha->hardware_lock, flags); 164 ha->pci_attr = RD_REG_WORD(®->ctrl_status); 165 spin_unlock_irqrestore(&ha->hardware_lock, flags); 166 167 return QLA_SUCCESS; 168 } 169 170 /** 171 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers. 172 * @ha: HA context 173 * 174 * Returns 0 on success. 175 */ 176 int 177 qla2300_pci_config(scsi_qla_host_t *vha) 178 { 179 uint16_t w; 180 unsigned long flags = 0; 181 uint32_t cnt; 182 struct qla_hw_data *ha = vha->hw; 183 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 184 185 pci_set_master(ha->pdev); 186 pci_try_set_mwi(ha->pdev); 187 188 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 189 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 190 191 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 192 w &= ~PCI_COMMAND_INTX_DISABLE; 193 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 194 195 /* 196 * If this is a 2300 card and not 2312, reset the 197 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately, 198 * the 2310 also reports itself as a 2300 so we need to get the 199 * fb revision level -- a 6 indicates it really is a 2300 and 200 * not a 2310. 201 */ 202 if (IS_QLA2300(ha)) { 203 spin_lock_irqsave(&ha->hardware_lock, flags); 204 205 /* Pause RISC. */ 206 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 207 for (cnt = 0; cnt < 30000; cnt++) { 208 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0) 209 break; 210 211 udelay(10); 212 } 213 214 /* Select FPM registers. */ 215 WRT_REG_WORD(®->ctrl_status, 0x20); 216 RD_REG_WORD(®->ctrl_status); 217 218 /* Get the fb rev level */ 219 ha->fb_rev = RD_FB_CMD_REG(ha, reg); 220 221 if (ha->fb_rev == FPM_2300) 222 pci_clear_mwi(ha->pdev); 223 224 /* Deselect FPM registers. */ 225 WRT_REG_WORD(®->ctrl_status, 0x0); 226 RD_REG_WORD(®->ctrl_status); 227 228 /* Release RISC module. */ 229 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 230 for (cnt = 0; cnt < 30000; cnt++) { 231 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0) 232 break; 233 234 udelay(10); 235 } 236 237 spin_unlock_irqrestore(&ha->hardware_lock, flags); 238 } 239 240 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 241 242 pci_disable_rom(ha->pdev); 243 244 /* Get PCI bus information. */ 245 spin_lock_irqsave(&ha->hardware_lock, flags); 246 ha->pci_attr = RD_REG_WORD(®->ctrl_status); 247 spin_unlock_irqrestore(&ha->hardware_lock, flags); 248 249 return QLA_SUCCESS; 250 } 251 252 /** 253 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers. 254 * @ha: HA context 255 * 256 * Returns 0 on success. 257 */ 258 int 259 qla24xx_pci_config(scsi_qla_host_t *vha) 260 { 261 uint16_t w; 262 unsigned long flags = 0; 263 struct qla_hw_data *ha = vha->hw; 264 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 265 266 pci_set_master(ha->pdev); 267 pci_try_set_mwi(ha->pdev); 268 269 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 270 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 271 w &= ~PCI_COMMAND_INTX_DISABLE; 272 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 273 274 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 275 276 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */ 277 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) 278 pcix_set_mmrbc(ha->pdev, 2048); 279 280 /* PCIe -- adjust Maximum Read Request Size (2048). */ 281 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 282 pcie_set_readrq(ha->pdev, 2048); 283 284 pci_disable_rom(ha->pdev); 285 286 ha->chip_revision = ha->pdev->revision; 287 288 /* Get PCI bus information. */ 289 spin_lock_irqsave(&ha->hardware_lock, flags); 290 ha->pci_attr = RD_REG_DWORD(®->ctrl_status); 291 spin_unlock_irqrestore(&ha->hardware_lock, flags); 292 293 return QLA_SUCCESS; 294 } 295 296 /** 297 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers. 298 * @ha: HA context 299 * 300 * Returns 0 on success. 301 */ 302 int 303 qla25xx_pci_config(scsi_qla_host_t *vha) 304 { 305 uint16_t w; 306 struct qla_hw_data *ha = vha->hw; 307 308 pci_set_master(ha->pdev); 309 pci_try_set_mwi(ha->pdev); 310 311 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 312 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 313 w &= ~PCI_COMMAND_INTX_DISABLE; 314 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 315 316 /* PCIe -- adjust Maximum Read Request Size (2048). */ 317 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 318 pcie_set_readrq(ha->pdev, 2048); 319 320 pci_disable_rom(ha->pdev); 321 322 ha->chip_revision = ha->pdev->revision; 323 324 return QLA_SUCCESS; 325 } 326 327 /** 328 * qla2x00_isp_firmware() - Choose firmware image. 329 * @ha: HA context 330 * 331 * Returns 0 on success. 332 */ 333 static int 334 qla2x00_isp_firmware(scsi_qla_host_t *vha) 335 { 336 int rval; 337 uint16_t loop_id, topo, sw_cap; 338 uint8_t domain, area, al_pa; 339 struct qla_hw_data *ha = vha->hw; 340 341 /* Assume loading risc code */ 342 rval = QLA_FUNCTION_FAILED; 343 344 if (ha->flags.disable_risc_code_load) { 345 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n", 346 vha->host_no)); 347 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n"); 348 349 /* Verify checksum of loaded RISC code. */ 350 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); 351 if (rval == QLA_SUCCESS) { 352 /* And, verify we are not in ROM code. */ 353 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 354 &area, &domain, &topo, &sw_cap); 355 } 356 } 357 358 if (rval) { 359 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n", 360 vha->host_no)); 361 } 362 363 return (rval); 364 } 365 366 /** 367 * qla2x00_reset_chip() - Reset ISP chip. 368 * @ha: HA context 369 * 370 * Returns 0 on success. 371 */ 372 void 373 qla2x00_reset_chip(scsi_qla_host_t *vha) 374 { 375 unsigned long flags = 0; 376 struct qla_hw_data *ha = vha->hw; 377 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 378 uint32_t cnt; 379 uint16_t cmd; 380 381 ha->isp_ops->disable_intrs(ha); 382 383 spin_lock_irqsave(&ha->hardware_lock, flags); 384 385 /* Turn off master enable */ 386 cmd = 0; 387 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); 388 cmd &= ~PCI_COMMAND_MASTER; 389 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 390 391 if (!IS_QLA2100(ha)) { 392 /* Pause RISC. */ 393 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 394 if (IS_QLA2200(ha) || IS_QLA2300(ha)) { 395 for (cnt = 0; cnt < 30000; cnt++) { 396 if ((RD_REG_WORD(®->hccr) & 397 HCCR_RISC_PAUSE) != 0) 398 break; 399 udelay(100); 400 } 401 } else { 402 RD_REG_WORD(®->hccr); /* PCI Posting. */ 403 udelay(10); 404 } 405 406 /* Select FPM registers. */ 407 WRT_REG_WORD(®->ctrl_status, 0x20); 408 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 409 410 /* FPM Soft Reset. */ 411 WRT_REG_WORD(®->fpm_diag_config, 0x100); 412 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ 413 414 /* Toggle Fpm Reset. */ 415 if (!IS_QLA2200(ha)) { 416 WRT_REG_WORD(®->fpm_diag_config, 0x0); 417 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ 418 } 419 420 /* Select frame buffer registers. */ 421 WRT_REG_WORD(®->ctrl_status, 0x10); 422 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 423 424 /* Reset frame buffer FIFOs. */ 425 if (IS_QLA2200(ha)) { 426 WRT_FB_CMD_REG(ha, reg, 0xa000); 427 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ 428 } else { 429 WRT_FB_CMD_REG(ha, reg, 0x00fc); 430 431 /* Read back fb_cmd until zero or 3 seconds max */ 432 for (cnt = 0; cnt < 3000; cnt++) { 433 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) 434 break; 435 udelay(100); 436 } 437 } 438 439 /* Select RISC module registers. */ 440 WRT_REG_WORD(®->ctrl_status, 0); 441 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 442 443 /* Reset RISC processor. */ 444 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 445 RD_REG_WORD(®->hccr); /* PCI Posting. */ 446 447 /* Release RISC processor. */ 448 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 449 RD_REG_WORD(®->hccr); /* PCI Posting. */ 450 } 451 452 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 453 WRT_REG_WORD(®->hccr, HCCR_CLR_HOST_INT); 454 455 /* Reset ISP chip. */ 456 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 457 458 /* Wait for RISC to recover from reset. */ 459 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 460 /* 461 * It is necessary to for a delay here since the card doesn't 462 * respond to PCI reads during a reset. On some architectures 463 * this will result in an MCA. 464 */ 465 udelay(20); 466 for (cnt = 30000; cnt; cnt--) { 467 if ((RD_REG_WORD(®->ctrl_status) & 468 CSR_ISP_SOFT_RESET) == 0) 469 break; 470 udelay(100); 471 } 472 } else 473 udelay(10); 474 475 /* Reset RISC processor. */ 476 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 477 478 WRT_REG_WORD(®->semaphore, 0); 479 480 /* Release RISC processor. */ 481 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 482 RD_REG_WORD(®->hccr); /* PCI Posting. */ 483 484 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 485 for (cnt = 0; cnt < 30000; cnt++) { 486 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) 487 break; 488 489 udelay(100); 490 } 491 } else 492 udelay(100); 493 494 /* Turn on master enable */ 495 cmd |= PCI_COMMAND_MASTER; 496 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 497 498 /* Disable RISC pause on FPM parity error. */ 499 if (!IS_QLA2100(ha)) { 500 WRT_REG_WORD(®->hccr, HCCR_DISABLE_PARITY_PAUSE); 501 RD_REG_WORD(®->hccr); /* PCI Posting. */ 502 } 503 504 spin_unlock_irqrestore(&ha->hardware_lock, flags); 505 } 506 507 /** 508 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. 509 * @ha: HA context 510 * 511 * Returns 0 on success. 512 */ 513 static inline void 514 qla24xx_reset_risc(scsi_qla_host_t *vha) 515 { 516 unsigned long flags = 0; 517 struct qla_hw_data *ha = vha->hw; 518 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 519 uint32_t cnt, d2; 520 uint16_t wd; 521 522 spin_lock_irqsave(&ha->hardware_lock, flags); 523 524 /* Reset RISC. */ 525 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 526 for (cnt = 0; cnt < 30000; cnt++) { 527 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 528 break; 529 530 udelay(10); 531 } 532 533 WRT_REG_DWORD(®->ctrl_status, 534 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 535 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 536 537 udelay(100); 538 /* Wait for firmware to complete NVRAM accesses. */ 539 d2 = (uint32_t) RD_REG_WORD(®->mailbox0); 540 for (cnt = 10000 ; cnt && d2; cnt--) { 541 udelay(5); 542 d2 = (uint32_t) RD_REG_WORD(®->mailbox0); 543 barrier(); 544 } 545 546 /* Wait for soft-reset to complete. */ 547 d2 = RD_REG_DWORD(®->ctrl_status); 548 for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) { 549 udelay(5); 550 d2 = RD_REG_DWORD(®->ctrl_status); 551 barrier(); 552 } 553 554 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); 555 RD_REG_DWORD(®->hccr); 556 557 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); 558 RD_REG_DWORD(®->hccr); 559 560 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); 561 RD_REG_DWORD(®->hccr); 562 563 d2 = (uint32_t) RD_REG_WORD(®->mailbox0); 564 for (cnt = 6000000 ; cnt && d2; cnt--) { 565 udelay(5); 566 d2 = (uint32_t) RD_REG_WORD(®->mailbox0); 567 barrier(); 568 } 569 570 spin_unlock_irqrestore(&ha->hardware_lock, flags); 571 572 if (IS_NOPOLLING_TYPE(ha)) 573 ha->isp_ops->enable_intrs(ha); 574 } 575 576 /** 577 * qla24xx_reset_chip() - Reset ISP24xx chip. 578 * @ha: HA context 579 * 580 * Returns 0 on success. 581 */ 582 void 583 qla24xx_reset_chip(scsi_qla_host_t *vha) 584 { 585 struct qla_hw_data *ha = vha->hw; 586 ha->isp_ops->disable_intrs(ha); 587 588 /* Perform RISC reset. */ 589 qla24xx_reset_risc(vha); 590 } 591 592 /** 593 * qla2x00_chip_diag() - Test chip for proper operation. 594 * @ha: HA context 595 * 596 * Returns 0 on success. 597 */ 598 int 599 qla2x00_chip_diag(scsi_qla_host_t *vha) 600 { 601 int rval; 602 struct qla_hw_data *ha = vha->hw; 603 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 604 unsigned long flags = 0; 605 uint16_t data; 606 uint32_t cnt; 607 uint16_t mb[5]; 608 struct req_que *req = ha->req_q_map[0]; 609 610 /* Assume a failed state */ 611 rval = QLA_FUNCTION_FAILED; 612 613 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n", 614 vha->host_no, (u_long)®->flash_address)); 615 616 spin_lock_irqsave(&ha->hardware_lock, flags); 617 618 /* Reset ISP chip. */ 619 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 620 621 /* 622 * We need to have a delay here since the card will not respond while 623 * in reset causing an MCA on some architectures. 624 */ 625 udelay(20); 626 data = qla2x00_debounce_register(®->ctrl_status); 627 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) { 628 udelay(5); 629 data = RD_REG_WORD(®->ctrl_status); 630 barrier(); 631 } 632 633 if (!cnt) 634 goto chip_diag_failed; 635 636 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n", 637 ha->host_no)); 638 639 /* Reset RISC processor. */ 640 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 641 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 642 643 /* Workaround for QLA2312 PCI parity error */ 644 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 645 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); 646 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) { 647 udelay(5); 648 data = RD_MAILBOX_REG(ha, reg, 0); 649 barrier(); 650 } 651 } else 652 udelay(10); 653 654 if (!cnt) 655 goto chip_diag_failed; 656 657 /* Check product ID of chip */ 658 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", ha->host_no)); 659 660 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 661 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 662 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 663 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); 664 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || 665 mb[3] != PROD_ID_3) { 666 qla_printk(KERN_WARNING, ha, 667 "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]); 668 669 goto chip_diag_failed; 670 } 671 ha->product_id[0] = mb[1]; 672 ha->product_id[1] = mb[2]; 673 ha->product_id[2] = mb[3]; 674 ha->product_id[3] = mb[4]; 675 676 /* Adjust fw RISC transfer size */ 677 if (req->length > 1024) 678 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 679 else 680 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 681 req->length; 682 683 if (IS_QLA2200(ha) && 684 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 685 /* Limit firmware transfer size with a 2200A */ 686 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n", 687 vha->host_no)); 688 689 ha->device_type |= DT_ISP2200A; 690 ha->fw_transfer_size = 128; 691 } 692 693 /* Wrap Incoming Mailboxes Test. */ 694 spin_unlock_irqrestore(&ha->hardware_lock, flags); 695 696 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no)); 697 rval = qla2x00_mbx_reg_test(vha); 698 if (rval) { 699 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 700 vha->host_no)); 701 qla_printk(KERN_WARNING, ha, 702 "Failed mailbox send register test\n"); 703 } 704 else { 705 /* Flag a successful rval */ 706 rval = QLA_SUCCESS; 707 } 708 spin_lock_irqsave(&ha->hardware_lock, flags); 709 710 chip_diag_failed: 711 if (rval) 712 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED " 713 "****\n", vha->host_no)); 714 715 spin_unlock_irqrestore(&ha->hardware_lock, flags); 716 717 return (rval); 718 } 719 720 /** 721 * qla24xx_chip_diag() - Test ISP24xx for proper operation. 722 * @ha: HA context 723 * 724 * Returns 0 on success. 725 */ 726 int 727 qla24xx_chip_diag(scsi_qla_host_t *vha) 728 { 729 int rval; 730 struct qla_hw_data *ha = vha->hw; 731 struct req_que *req = ha->req_q_map[0]; 732 733 /* Perform RISC reset. */ 734 qla24xx_reset_risc(vha); 735 736 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 737 738 rval = qla2x00_mbx_reg_test(vha); 739 if (rval) { 740 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 741 vha->host_no)); 742 qla_printk(KERN_WARNING, ha, 743 "Failed mailbox send register test\n"); 744 } else { 745 /* Flag a successful rval */ 746 rval = QLA_SUCCESS; 747 } 748 749 return rval; 750 } 751 752 void 753 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) 754 { 755 int rval; 756 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, 757 eft_size, fce_size, mq_size; 758 dma_addr_t tc_dma; 759 void *tc; 760 struct qla_hw_data *ha = vha->hw; 761 struct req_que *req = ha->req_q_map[0]; 762 struct rsp_que *rsp = ha->rsp_q_map[0]; 763 764 if (ha->fw_dump) { 765 qla_printk(KERN_WARNING, ha, 766 "Firmware dump previously allocated.\n"); 767 return; 768 } 769 770 ha->fw_dumped = 0; 771 fixed_size = mem_size = eft_size = fce_size = mq_size = 0; 772 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 773 fixed_size = sizeof(struct qla2100_fw_dump); 774 } else if (IS_QLA23XX(ha)) { 775 fixed_size = offsetof(struct qla2300_fw_dump, data_ram); 776 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 777 sizeof(uint16_t); 778 } else if (IS_FWI2_CAPABLE(ha)) { 779 if (IS_QLA81XX(ha)) 780 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 781 else if (IS_QLA25XX(ha)) 782 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); 783 else 784 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); 785 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 786 sizeof(uint32_t); 787 if (ha->mqenable) 788 mq_size = sizeof(struct qla2xxx_mq_chain); 789 790 /* Allocate memory for Fibre Channel Event Buffer. */ 791 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 792 goto try_eft; 793 794 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 795 GFP_KERNEL); 796 if (!tc) { 797 qla_printk(KERN_WARNING, ha, "Unable to allocate " 798 "(%d KB) for FCE.\n", FCE_SIZE / 1024); 799 goto try_eft; 800 } 801 802 memset(tc, 0, FCE_SIZE); 803 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, 804 ha->fce_mb, &ha->fce_bufs); 805 if (rval) { 806 qla_printk(KERN_WARNING, ha, "Unable to initialize " 807 "FCE (%d).\n", rval); 808 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, 809 tc_dma); 810 ha->flags.fce_enabled = 0; 811 goto try_eft; 812 } 813 814 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n", 815 FCE_SIZE / 1024); 816 817 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; 818 ha->flags.fce_enabled = 1; 819 ha->fce_dma = tc_dma; 820 ha->fce = tc; 821 try_eft: 822 /* Allocate memory for Extended Trace Buffer. */ 823 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 824 GFP_KERNEL); 825 if (!tc) { 826 qla_printk(KERN_WARNING, ha, "Unable to allocate " 827 "(%d KB) for EFT.\n", EFT_SIZE / 1024); 828 goto cont_alloc; 829 } 830 831 memset(tc, 0, EFT_SIZE); 832 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); 833 if (rval) { 834 qla_printk(KERN_WARNING, ha, "Unable to initialize " 835 "EFT (%d).\n", rval); 836 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, 837 tc_dma); 838 goto cont_alloc; 839 } 840 841 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n", 842 EFT_SIZE / 1024); 843 844 eft_size = EFT_SIZE; 845 ha->eft_dma = tc_dma; 846 ha->eft = tc; 847 } 848 cont_alloc: 849 req_q_size = req->length * sizeof(request_t); 850 rsp_q_size = rsp->length * sizeof(response_t); 851 852 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 853 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + 854 eft_size; 855 ha->chain_offset = dump_size; 856 dump_size += mq_size + fce_size; 857 858 ha->fw_dump = vmalloc(dump_size); 859 if (!ha->fw_dump) { 860 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for " 861 "firmware dump!!!\n", dump_size / 1024); 862 863 if (ha->eft) { 864 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft, 865 ha->eft_dma); 866 ha->eft = NULL; 867 ha->eft_dma = 0; 868 } 869 return; 870 } 871 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n", 872 dump_size / 1024); 873 874 ha->fw_dump_len = dump_size; 875 ha->fw_dump->signature[0] = 'Q'; 876 ha->fw_dump->signature[1] = 'L'; 877 ha->fw_dump->signature[2] = 'G'; 878 ha->fw_dump->signature[3] = 'C'; 879 ha->fw_dump->version = __constant_htonl(1); 880 881 ha->fw_dump->fixed_size = htonl(fixed_size); 882 ha->fw_dump->mem_size = htonl(mem_size); 883 ha->fw_dump->req_q_size = htonl(req_q_size); 884 ha->fw_dump->rsp_q_size = htonl(rsp_q_size); 885 886 ha->fw_dump->eft_size = htonl(eft_size); 887 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma)); 888 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma)); 889 890 ha->fw_dump->header_size = 891 htonl(offsetof(struct qla2xxx_fw_dump, isp)); 892 } 893 894 /** 895 * qla2x00_setup_chip() - Load and start RISC firmware. 896 * @ha: HA context 897 * 898 * Returns 0 on success. 899 */ 900 static int 901 qla2x00_setup_chip(scsi_qla_host_t *vha) 902 { 903 int rval; 904 uint32_t srisc_address = 0; 905 struct qla_hw_data *ha = vha->hw; 906 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 907 unsigned long flags; 908 uint16_t fw_major_version; 909 910 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 911 /* Disable SRAM, Instruction RAM and GP RAM parity. */ 912 spin_lock_irqsave(&ha->hardware_lock, flags); 913 WRT_REG_WORD(®->hccr, (HCCR_ENABLE_PARITY + 0x0)); 914 RD_REG_WORD(®->hccr); 915 spin_unlock_irqrestore(&ha->hardware_lock, flags); 916 } 917 918 /* Load firmware sequences */ 919 rval = ha->isp_ops->load_risc(vha, &srisc_address); 920 if (rval == QLA_SUCCESS) { 921 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC " 922 "code.\n", vha->host_no)); 923 924 rval = qla2x00_verify_checksum(vha, srisc_address); 925 if (rval == QLA_SUCCESS) { 926 /* Start firmware execution. */ 927 DEBUG(printk("scsi(%ld): Checksum OK, start " 928 "firmware.\n", vha->host_no)); 929 930 rval = qla2x00_execute_fw(vha, srisc_address); 931 /* Retrieve firmware information. */ 932 if (rval == QLA_SUCCESS) { 933 fw_major_version = ha->fw_major_version; 934 qla2x00_get_fw_version(vha, 935 &ha->fw_major_version, 936 &ha->fw_minor_version, 937 &ha->fw_subminor_version, 938 &ha->fw_attributes, &ha->fw_memory_size, 939 ha->mpi_version, &ha->mpi_capabilities, 940 ha->phy_version); 941 ha->flags.npiv_supported = 0; 942 if (IS_QLA2XXX_MIDTYPE(ha) && 943 (ha->fw_attributes & BIT_2)) { 944 ha->flags.npiv_supported = 1; 945 if ((!ha->max_npiv_vports) || 946 ((ha->max_npiv_vports + 1) % 947 MIN_MULTI_ID_FABRIC)) 948 ha->max_npiv_vports = 949 MIN_MULTI_ID_FABRIC - 1; 950 } 951 qla2x00_get_resource_cnts(vha, NULL, 952 &ha->fw_xcb_count, NULL, NULL, 953 &ha->max_npiv_vports); 954 955 if (!fw_major_version && ql2xallocfwdump) 956 qla2x00_alloc_fw_dump(vha); 957 } 958 } else { 959 DEBUG2(printk(KERN_INFO 960 "scsi(%ld): ISP Firmware failed checksum.\n", 961 vha->host_no)); 962 } 963 } 964 965 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 966 /* Enable proper parity. */ 967 spin_lock_irqsave(&ha->hardware_lock, flags); 968 if (IS_QLA2300(ha)) 969 /* SRAM parity */ 970 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x1); 971 else 972 /* SRAM, Instruction RAM and GP RAM parity */ 973 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x7); 974 RD_REG_WORD(®->hccr); 975 spin_unlock_irqrestore(&ha->hardware_lock, flags); 976 } 977 978 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { 979 uint32_t size; 980 981 rval = qla81xx_fac_get_sector_size(vha, &size); 982 if (rval == QLA_SUCCESS) { 983 ha->flags.fac_supported = 1; 984 ha->fdt_block_size = size << 2; 985 } else { 986 qla_printk(KERN_ERR, ha, 987 "Unsupported FAC firmware (%d.%02d.%02d).\n", 988 ha->fw_major_version, ha->fw_minor_version, 989 ha->fw_subminor_version); 990 } 991 } 992 993 if (rval) { 994 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 995 vha->host_no)); 996 } 997 998 return (rval); 999 } 1000 1001 /** 1002 * qla2x00_init_response_q_entries() - Initializes response queue entries. 1003 * @ha: HA context 1004 * 1005 * Beginning of request ring has initialization control block already built 1006 * by nvram config routine. 1007 * 1008 * Returns 0 on success. 1009 */ 1010 void 1011 qla2x00_init_response_q_entries(struct rsp_que *rsp) 1012 { 1013 uint16_t cnt; 1014 response_t *pkt; 1015 1016 pkt = rsp->ring_ptr; 1017 for (cnt = 0; cnt < rsp->length; cnt++) { 1018 pkt->signature = RESPONSE_PROCESSED; 1019 pkt++; 1020 } 1021 1022 } 1023 1024 /** 1025 * qla2x00_update_fw_options() - Read and process firmware options. 1026 * @ha: HA context 1027 * 1028 * Returns 0 on success. 1029 */ 1030 void 1031 qla2x00_update_fw_options(scsi_qla_host_t *vha) 1032 { 1033 uint16_t swing, emphasis, tx_sens, rx_sens; 1034 struct qla_hw_data *ha = vha->hw; 1035 1036 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 1037 qla2x00_get_fw_options(vha, ha->fw_options); 1038 1039 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1040 return; 1041 1042 /* Serial Link options. */ 1043 DEBUG3(printk("scsi(%ld): Serial link options:\n", 1044 vha->host_no)); 1045 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options, 1046 sizeof(ha->fw_seriallink_options))); 1047 1048 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 1049 if (ha->fw_seriallink_options[3] & BIT_2) { 1050 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; 1051 1052 /* 1G settings */ 1053 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); 1054 emphasis = (ha->fw_seriallink_options[2] & 1055 (BIT_4 | BIT_3)) >> 3; 1056 tx_sens = ha->fw_seriallink_options[0] & 1057 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 1058 rx_sens = (ha->fw_seriallink_options[0] & 1059 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 1060 ha->fw_options[10] = (emphasis << 14) | (swing << 8); 1061 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 1062 if (rx_sens == 0x0) 1063 rx_sens = 0x3; 1064 ha->fw_options[10] |= (tx_sens << 4) | rx_sens; 1065 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1066 ha->fw_options[10] |= BIT_5 | 1067 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 1068 (tx_sens & (BIT_1 | BIT_0)); 1069 1070 /* 2G settings */ 1071 swing = (ha->fw_seriallink_options[2] & 1072 (BIT_7 | BIT_6 | BIT_5)) >> 5; 1073 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); 1074 tx_sens = ha->fw_seriallink_options[1] & 1075 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 1076 rx_sens = (ha->fw_seriallink_options[1] & 1077 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 1078 ha->fw_options[11] = (emphasis << 14) | (swing << 8); 1079 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 1080 if (rx_sens == 0x0) 1081 rx_sens = 0x3; 1082 ha->fw_options[11] |= (tx_sens << 4) | rx_sens; 1083 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1084 ha->fw_options[11] |= BIT_5 | 1085 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 1086 (tx_sens & (BIT_1 | BIT_0)); 1087 } 1088 1089 /* FCP2 options. */ 1090 /* Return command IOCBs without waiting for an ABTS to complete. */ 1091 ha->fw_options[3] |= BIT_13; 1092 1093 /* LED scheme. */ 1094 if (ha->flags.enable_led_scheme) 1095 ha->fw_options[2] |= BIT_12; 1096 1097 /* Detect ISP6312. */ 1098 if (IS_QLA6312(ha)) 1099 ha->fw_options[2] |= BIT_13; 1100 1101 /* Update firmware options. */ 1102 qla2x00_set_fw_options(vha, ha->fw_options); 1103 } 1104 1105 void 1106 qla24xx_update_fw_options(scsi_qla_host_t *vha) 1107 { 1108 int rval; 1109 struct qla_hw_data *ha = vha->hw; 1110 1111 /* Update Serial Link options. */ 1112 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) 1113 return; 1114 1115 rval = qla2x00_set_serdes_params(vha, 1116 le16_to_cpu(ha->fw_seriallink_options24[1]), 1117 le16_to_cpu(ha->fw_seriallink_options24[2]), 1118 le16_to_cpu(ha->fw_seriallink_options24[3])); 1119 if (rval != QLA_SUCCESS) { 1120 qla_printk(KERN_WARNING, ha, 1121 "Unable to update Serial Link options (%x).\n", rval); 1122 } 1123 } 1124 1125 void 1126 qla2x00_config_rings(struct scsi_qla_host *vha) 1127 { 1128 struct qla_hw_data *ha = vha->hw; 1129 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1130 struct req_que *req = ha->req_q_map[0]; 1131 struct rsp_que *rsp = ha->rsp_q_map[0]; 1132 1133 /* Setup ring parameters in initialization control block. */ 1134 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0); 1135 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0); 1136 ha->init_cb->request_q_length = cpu_to_le16(req->length); 1137 ha->init_cb->response_q_length = cpu_to_le16(rsp->length); 1138 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 1139 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 1140 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1141 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1142 1143 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0); 1144 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0); 1145 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0); 1146 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0); 1147 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ 1148 } 1149 1150 void 1151 qla24xx_config_rings(struct scsi_qla_host *vha) 1152 { 1153 struct qla_hw_data *ha = vha->hw; 1154 device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0); 1155 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; 1156 struct qla_msix_entry *msix; 1157 struct init_cb_24xx *icb; 1158 uint16_t rid = 0; 1159 struct req_que *req = ha->req_q_map[0]; 1160 struct rsp_que *rsp = ha->rsp_q_map[0]; 1161 1162 /* Setup ring parameters in initialization control block. */ 1163 icb = (struct init_cb_24xx *)ha->init_cb; 1164 icb->request_q_outpointer = __constant_cpu_to_le16(0); 1165 icb->response_q_inpointer = __constant_cpu_to_le16(0); 1166 icb->request_q_length = cpu_to_le16(req->length); 1167 icb->response_q_length = cpu_to_le16(rsp->length); 1168 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 1169 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 1170 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1171 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1172 1173 if (ha->mqenable) { 1174 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); 1175 icb->rid = __constant_cpu_to_le16(rid); 1176 if (ha->flags.msix_enabled) { 1177 msix = &ha->msix_entries[1]; 1178 DEBUG2_17(printk(KERN_INFO 1179 "Reistering vector 0x%x for base que\n", msix->entry)); 1180 icb->msix = cpu_to_le16(msix->entry); 1181 } 1182 /* Use alternate PCI bus number */ 1183 if (MSB(rid)) 1184 icb->firmware_options_2 |= 1185 __constant_cpu_to_le32(BIT_19); 1186 /* Use alternate PCI devfn */ 1187 if (LSB(rid)) 1188 icb->firmware_options_2 |= 1189 __constant_cpu_to_le32(BIT_18); 1190 1191 icb->firmware_options_2 &= __constant_cpu_to_le32(~BIT_22); 1192 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); 1193 1194 WRT_REG_DWORD(®->isp25mq.req_q_in, 0); 1195 WRT_REG_DWORD(®->isp25mq.req_q_out, 0); 1196 WRT_REG_DWORD(®->isp25mq.rsp_q_in, 0); 1197 WRT_REG_DWORD(®->isp25mq.rsp_q_out, 0); 1198 } else { 1199 WRT_REG_DWORD(®->isp24.req_q_in, 0); 1200 WRT_REG_DWORD(®->isp24.req_q_out, 0); 1201 WRT_REG_DWORD(®->isp24.rsp_q_in, 0); 1202 WRT_REG_DWORD(®->isp24.rsp_q_out, 0); 1203 } 1204 /* PCI posting */ 1205 RD_REG_DWORD(&ioreg->hccr); 1206 } 1207 1208 /** 1209 * qla2x00_init_rings() - Initializes firmware. 1210 * @ha: HA context 1211 * 1212 * Beginning of request ring has initialization control block already built 1213 * by nvram config routine. 1214 * 1215 * Returns 0 on success. 1216 */ 1217 static int 1218 qla2x00_init_rings(scsi_qla_host_t *vha) 1219 { 1220 int rval; 1221 unsigned long flags = 0; 1222 int cnt, que; 1223 struct qla_hw_data *ha = vha->hw; 1224 struct req_que *req; 1225 struct rsp_que *rsp; 1226 struct scsi_qla_host *vp; 1227 struct mid_init_cb_24xx *mid_init_cb = 1228 (struct mid_init_cb_24xx *) ha->init_cb; 1229 1230 spin_lock_irqsave(&ha->hardware_lock, flags); 1231 1232 /* Clear outstanding commands array. */ 1233 for (que = 0; que < ha->max_queues; que++) { 1234 req = ha->req_q_map[que]; 1235 if (!req) 1236 continue; 1237 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 1238 req->outstanding_cmds[cnt] = NULL; 1239 1240 req->current_outstanding_cmd = 0; 1241 1242 /* Initialize firmware. */ 1243 req->ring_ptr = req->ring; 1244 req->ring_index = 0; 1245 req->cnt = req->length; 1246 } 1247 1248 for (que = 0; que < ha->max_queues; que++) { 1249 rsp = ha->rsp_q_map[que]; 1250 if (!rsp) 1251 continue; 1252 rsp->ring_ptr = rsp->ring; 1253 rsp->ring_index = 0; 1254 1255 /* Initialize response queue entries */ 1256 qla2x00_init_response_q_entries(rsp); 1257 } 1258 1259 /* Clear RSCN queue. */ 1260 list_for_each_entry(vp, &ha->vp_list, list) { 1261 vp->rscn_in_ptr = 0; 1262 vp->rscn_out_ptr = 0; 1263 } 1264 ha->isp_ops->config_rings(vha); 1265 1266 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1267 1268 /* Update any ISP specific firmware options before initialization. */ 1269 ha->isp_ops->update_fw_options(vha); 1270 1271 DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no)); 1272 1273 if (ha->flags.npiv_supported) { 1274 if (ha->operating_mode == LOOP) 1275 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; 1276 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); 1277 } 1278 1279 if (IS_FWI2_CAPABLE(ha)) { 1280 mid_init_cb->options = __constant_cpu_to_le16(BIT_1); 1281 mid_init_cb->init_cb.execution_throttle = 1282 cpu_to_le16(ha->fw_xcb_count); 1283 } 1284 1285 rval = qla2x00_init_firmware(vha, ha->init_cb_size); 1286 if (rval) { 1287 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n", 1288 vha->host_no)); 1289 } else { 1290 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n", 1291 vha->host_no)); 1292 } 1293 1294 return (rval); 1295 } 1296 1297 /** 1298 * qla2x00_fw_ready() - Waits for firmware ready. 1299 * @ha: HA context 1300 * 1301 * Returns 0 on success. 1302 */ 1303 static int 1304 qla2x00_fw_ready(scsi_qla_host_t *vha) 1305 { 1306 int rval; 1307 unsigned long wtime, mtime, cs84xx_time; 1308 uint16_t min_wait; /* Minimum wait time if loop is down */ 1309 uint16_t wait_time; /* Wait time if loop is coming ready */ 1310 uint16_t state[3]; 1311 struct qla_hw_data *ha = vha->hw; 1312 1313 rval = QLA_SUCCESS; 1314 1315 /* 20 seconds for loop down. */ 1316 min_wait = 20; 1317 1318 /* 1319 * Firmware should take at most one RATOV to login, plus 5 seconds for 1320 * our own processing. 1321 */ 1322 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { 1323 wait_time = min_wait; 1324 } 1325 1326 /* Min wait time if loop down */ 1327 mtime = jiffies + (min_wait * HZ); 1328 1329 /* wait time before firmware ready */ 1330 wtime = jiffies + (wait_time * HZ); 1331 1332 /* Wait for ISP to finish LIP */ 1333 if (!vha->flags.init_done) 1334 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n"); 1335 1336 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n", 1337 vha->host_no)); 1338 1339 do { 1340 rval = qla2x00_get_firmware_state(vha, state); 1341 if (rval == QLA_SUCCESS) { 1342 if (state[0] < FSTATE_LOSS_OF_SYNC) { 1343 vha->device_flags &= ~DFLG_NO_CABLE; 1344 } 1345 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 1346 DEBUG16(printk("scsi(%ld): fw_state=%x " 1347 "84xx=%x.\n", vha->host_no, state[0], 1348 state[2])); 1349 if ((state[2] & FSTATE_LOGGED_IN) && 1350 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 1351 DEBUG16(printk("scsi(%ld): Sending " 1352 "verify iocb.\n", vha->host_no)); 1353 1354 cs84xx_time = jiffies; 1355 rval = qla84xx_init_chip(vha); 1356 if (rval != QLA_SUCCESS) 1357 break; 1358 1359 /* Add time taken to initialize. */ 1360 cs84xx_time = jiffies - cs84xx_time; 1361 wtime += cs84xx_time; 1362 mtime += cs84xx_time; 1363 DEBUG16(printk("scsi(%ld): Increasing " 1364 "wait time by %ld. New time %ld\n", 1365 vha->host_no, cs84xx_time, wtime)); 1366 } 1367 } else if (state[0] == FSTATE_READY) { 1368 DEBUG(printk("scsi(%ld): F/W Ready - OK \n", 1369 vha->host_no)); 1370 1371 qla2x00_get_retry_cnt(vha, &ha->retry_count, 1372 &ha->login_timeout, &ha->r_a_tov); 1373 1374 rval = QLA_SUCCESS; 1375 break; 1376 } 1377 1378 rval = QLA_FUNCTION_FAILED; 1379 1380 if (atomic_read(&vha->loop_down_timer) && 1381 state[0] != FSTATE_READY) { 1382 /* Loop down. Timeout on min_wait for states 1383 * other than Wait for Login. 1384 */ 1385 if (time_after_eq(jiffies, mtime)) { 1386 qla_printk(KERN_INFO, ha, 1387 "Cable is unplugged...\n"); 1388 1389 vha->device_flags |= DFLG_NO_CABLE; 1390 break; 1391 } 1392 } 1393 } else { 1394 /* Mailbox cmd failed. Timeout on min_wait. */ 1395 if (time_after_eq(jiffies, mtime)) 1396 break; 1397 } 1398 1399 if (time_after_eq(jiffies, wtime)) 1400 break; 1401 1402 /* Delay for a while */ 1403 msleep(500); 1404 1405 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1406 vha->host_no, state[0], jiffies)); 1407 } while (1); 1408 1409 DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1410 vha->host_no, state[0], jiffies)); 1411 1412 if (rval) { 1413 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", 1414 vha->host_no)); 1415 } 1416 1417 return (rval); 1418 } 1419 1420 /* 1421 * qla2x00_configure_hba 1422 * Setup adapter context. 1423 * 1424 * Input: 1425 * ha = adapter state pointer. 1426 * 1427 * Returns: 1428 * 0 = success 1429 * 1430 * Context: 1431 * Kernel context. 1432 */ 1433 static int 1434 qla2x00_configure_hba(scsi_qla_host_t *vha) 1435 { 1436 int rval; 1437 uint16_t loop_id; 1438 uint16_t topo; 1439 uint16_t sw_cap; 1440 uint8_t al_pa; 1441 uint8_t area; 1442 uint8_t domain; 1443 char connect_type[22]; 1444 struct qla_hw_data *ha = vha->hw; 1445 1446 /* Get host addresses. */ 1447 rval = qla2x00_get_adapter_id(vha, 1448 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 1449 if (rval != QLA_SUCCESS) { 1450 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 1451 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 1452 DEBUG2(printk("%s(%ld) Loop is in a transition state\n", 1453 __func__, vha->host_no)); 1454 } else { 1455 qla_printk(KERN_WARNING, ha, 1456 "ERROR -- Unable to get host loop ID.\n"); 1457 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1458 } 1459 return (rval); 1460 } 1461 1462 if (topo == 4) { 1463 qla_printk(KERN_INFO, ha, 1464 "Cannot get topology - retrying.\n"); 1465 return (QLA_FUNCTION_FAILED); 1466 } 1467 1468 vha->loop_id = loop_id; 1469 1470 /* initialize */ 1471 ha->min_external_loopid = SNS_FIRST_LOOP_ID; 1472 ha->operating_mode = LOOP; 1473 ha->switch_cap = 0; 1474 1475 switch (topo) { 1476 case 0: 1477 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n", 1478 vha->host_no)); 1479 ha->current_topology = ISP_CFG_NL; 1480 strcpy(connect_type, "(Loop)"); 1481 break; 1482 1483 case 1: 1484 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n", 1485 vha->host_no)); 1486 ha->switch_cap = sw_cap; 1487 ha->current_topology = ISP_CFG_FL; 1488 strcpy(connect_type, "(FL_Port)"); 1489 break; 1490 1491 case 2: 1492 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n", 1493 vha->host_no)); 1494 ha->operating_mode = P2P; 1495 ha->current_topology = ISP_CFG_N; 1496 strcpy(connect_type, "(N_Port-to-N_Port)"); 1497 break; 1498 1499 case 3: 1500 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n", 1501 vha->host_no)); 1502 ha->switch_cap = sw_cap; 1503 ha->operating_mode = P2P; 1504 ha->current_topology = ISP_CFG_F; 1505 strcpy(connect_type, "(F_Port)"); 1506 break; 1507 1508 default: 1509 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. " 1510 "Using NL.\n", 1511 vha->host_no, topo)); 1512 ha->current_topology = ISP_CFG_NL; 1513 strcpy(connect_type, "(Loop)"); 1514 break; 1515 } 1516 1517 /* Save Host port and loop ID. */ 1518 /* byte order - Big Endian */ 1519 vha->d_id.b.domain = domain; 1520 vha->d_id.b.area = area; 1521 vha->d_id.b.al_pa = al_pa; 1522 1523 if (!vha->flags.init_done) 1524 qla_printk(KERN_INFO, ha, 1525 "Topology - %s, Host Loop address 0x%x\n", 1526 connect_type, vha->loop_id); 1527 1528 if (rval) { 1529 DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no)); 1530 } else { 1531 DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no)); 1532 } 1533 1534 return(rval); 1535 } 1536 1537 static inline void 1538 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, 1539 char *def) 1540 { 1541 char *st, *en; 1542 uint16_t index; 1543 struct qla_hw_data *ha = vha->hw; 1544 1545 if (memcmp(model, BINZERO, len) != 0) { 1546 strncpy(ha->model_number, model, len); 1547 st = en = ha->model_number; 1548 en += len - 1; 1549 while (en > st) { 1550 if (*en != 0x20 && *en != 0x00) 1551 break; 1552 *en-- = '\0'; 1553 } 1554 1555 index = (ha->pdev->subsystem_device & 0xff); 1556 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1557 index < QLA_MODEL_NAMES) 1558 strncpy(ha->model_desc, 1559 qla2x00_model_name[index * 2 + 1], 1560 sizeof(ha->model_desc) - 1); 1561 } else { 1562 index = (ha->pdev->subsystem_device & 0xff); 1563 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1564 index < QLA_MODEL_NAMES) { 1565 strcpy(ha->model_number, 1566 qla2x00_model_name[index * 2]); 1567 strncpy(ha->model_desc, 1568 qla2x00_model_name[index * 2 + 1], 1569 sizeof(ha->model_desc) - 1); 1570 } else { 1571 strcpy(ha->model_number, def); 1572 } 1573 } 1574 if (IS_FWI2_CAPABLE(ha)) 1575 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, 1576 sizeof(ha->model_desc)); 1577 } 1578 1579 /* On sparc systems, obtain port and node WWN from firmware 1580 * properties. 1581 */ 1582 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv) 1583 { 1584 #ifdef CONFIG_SPARC 1585 struct qla_hw_data *ha = vha->hw; 1586 struct pci_dev *pdev = ha->pdev; 1587 struct device_node *dp = pci_device_to_OF_node(pdev); 1588 const u8 *val; 1589 int len; 1590 1591 val = of_get_property(dp, "port-wwn", &len); 1592 if (val && len >= WWN_SIZE) 1593 memcpy(nv->port_name, val, WWN_SIZE); 1594 1595 val = of_get_property(dp, "node-wwn", &len); 1596 if (val && len >= WWN_SIZE) 1597 memcpy(nv->node_name, val, WWN_SIZE); 1598 #endif 1599 } 1600 1601 /* 1602 * NVRAM configuration for ISP 2xxx 1603 * 1604 * Input: 1605 * ha = adapter block pointer. 1606 * 1607 * Output: 1608 * initialization control block in response_ring 1609 * host adapters parameters in host adapter block 1610 * 1611 * Returns: 1612 * 0 = success. 1613 */ 1614 int 1615 qla2x00_nvram_config(scsi_qla_host_t *vha) 1616 { 1617 int rval; 1618 uint8_t chksum = 0; 1619 uint16_t cnt; 1620 uint8_t *dptr1, *dptr2; 1621 struct qla_hw_data *ha = vha->hw; 1622 init_cb_t *icb = ha->init_cb; 1623 nvram_t *nv = ha->nvram; 1624 uint8_t *ptr = ha->nvram; 1625 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1626 1627 rval = QLA_SUCCESS; 1628 1629 /* Determine NVRAM starting address. */ 1630 ha->nvram_size = sizeof(nvram_t); 1631 ha->nvram_base = 0; 1632 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) 1633 if ((RD_REG_WORD(®->ctrl_status) >> 14) == 1) 1634 ha->nvram_base = 0x80; 1635 1636 /* Get NVRAM data and calculate checksum. */ 1637 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); 1638 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 1639 chksum += *ptr++; 1640 1641 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 1642 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 1643 1644 /* Bad NVRAM data, set defaults parameters. */ 1645 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || 1646 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { 1647 /* Reset NVRAM data. */ 1648 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 1649 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 1650 nv->nvram_version); 1651 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 1652 "invalid -- WWPN) defaults.\n"); 1653 1654 /* 1655 * Set default initialization control block. 1656 */ 1657 memset(nv, 0, ha->nvram_size); 1658 nv->parameter_block_version = ICB_VERSION; 1659 1660 if (IS_QLA23XX(ha)) { 1661 nv->firmware_options[0] = BIT_2 | BIT_1; 1662 nv->firmware_options[1] = BIT_7 | BIT_5; 1663 nv->add_firmware_options[0] = BIT_5; 1664 nv->add_firmware_options[1] = BIT_5 | BIT_4; 1665 nv->frame_payload_size = __constant_cpu_to_le16(2048); 1666 nv->special_options[1] = BIT_7; 1667 } else if (IS_QLA2200(ha)) { 1668 nv->firmware_options[0] = BIT_2 | BIT_1; 1669 nv->firmware_options[1] = BIT_7 | BIT_5; 1670 nv->add_firmware_options[0] = BIT_5; 1671 nv->add_firmware_options[1] = BIT_5 | BIT_4; 1672 nv->frame_payload_size = __constant_cpu_to_le16(1024); 1673 } else if (IS_QLA2100(ha)) { 1674 nv->firmware_options[0] = BIT_3 | BIT_1; 1675 nv->firmware_options[1] = BIT_5; 1676 nv->frame_payload_size = __constant_cpu_to_le16(1024); 1677 } 1678 1679 nv->max_iocb_allocation = __constant_cpu_to_le16(256); 1680 nv->execution_throttle = __constant_cpu_to_le16(16); 1681 nv->retry_count = 8; 1682 nv->retry_delay = 1; 1683 1684 nv->port_name[0] = 33; 1685 nv->port_name[3] = 224; 1686 nv->port_name[4] = 139; 1687 1688 qla2xxx_nvram_wwn_from_ofw(vha, nv); 1689 1690 nv->login_timeout = 4; 1691 1692 /* 1693 * Set default host adapter parameters 1694 */ 1695 nv->host_p[1] = BIT_2; 1696 nv->reset_delay = 5; 1697 nv->port_down_retry_count = 8; 1698 nv->max_luns_per_target = __constant_cpu_to_le16(8); 1699 nv->link_down_timeout = 60; 1700 1701 rval = 1; 1702 } 1703 1704 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 1705 /* 1706 * The SN2 does not provide BIOS emulation which means you can't change 1707 * potentially bogus BIOS settings. Force the use of default settings 1708 * for link rate and frame size. Hope that the rest of the settings 1709 * are valid. 1710 */ 1711 if (ia64_platform_is("sn2")) { 1712 nv->frame_payload_size = __constant_cpu_to_le16(2048); 1713 if (IS_QLA23XX(ha)) 1714 nv->special_options[1] = BIT_7; 1715 } 1716 #endif 1717 1718 /* Reset Initialization control block */ 1719 memset(icb, 0, ha->init_cb_size); 1720 1721 /* 1722 * Setup driver NVRAM options. 1723 */ 1724 nv->firmware_options[0] |= (BIT_6 | BIT_1); 1725 nv->firmware_options[0] &= ~(BIT_5 | BIT_4); 1726 nv->firmware_options[1] |= (BIT_5 | BIT_0); 1727 nv->firmware_options[1] &= ~BIT_4; 1728 1729 if (IS_QLA23XX(ha)) { 1730 nv->firmware_options[0] |= BIT_2; 1731 nv->firmware_options[0] &= ~BIT_3; 1732 nv->add_firmware_options[1] |= BIT_5 | BIT_4; 1733 1734 if (IS_QLA2300(ha)) { 1735 if (ha->fb_rev == FPM_2310) { 1736 strcpy(ha->model_number, "QLA2310"); 1737 } else { 1738 strcpy(ha->model_number, "QLA2300"); 1739 } 1740 } else { 1741 qla2x00_set_model_info(vha, nv->model_number, 1742 sizeof(nv->model_number), "QLA23xx"); 1743 } 1744 } else if (IS_QLA2200(ha)) { 1745 nv->firmware_options[0] |= BIT_2; 1746 /* 1747 * 'Point-to-point preferred, else loop' is not a safe 1748 * connection mode setting. 1749 */ 1750 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) == 1751 (BIT_5 | BIT_4)) { 1752 /* Force 'loop preferred, else point-to-point'. */ 1753 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4); 1754 nv->add_firmware_options[0] |= BIT_5; 1755 } 1756 strcpy(ha->model_number, "QLA22xx"); 1757 } else /*if (IS_QLA2100(ha))*/ { 1758 strcpy(ha->model_number, "QLA2100"); 1759 } 1760 1761 /* 1762 * Copy over NVRAM RISC parameter block to initialization control block. 1763 */ 1764 dptr1 = (uint8_t *)icb; 1765 dptr2 = (uint8_t *)&nv->parameter_block_version; 1766 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version; 1767 while (cnt--) 1768 *dptr1++ = *dptr2++; 1769 1770 /* Copy 2nd half. */ 1771 dptr1 = (uint8_t *)icb->add_firmware_options; 1772 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options; 1773 while (cnt--) 1774 *dptr1++ = *dptr2++; 1775 1776 /* Use alternate WWN? */ 1777 if (nv->host_p[1] & BIT_7) { 1778 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 1779 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 1780 } 1781 1782 /* Prepare nodename */ 1783 if ((icb->firmware_options[1] & BIT_6) == 0) { 1784 /* 1785 * Firmware will apply the following mask if the nodename was 1786 * not provided. 1787 */ 1788 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 1789 icb->node_name[0] &= 0xF0; 1790 } 1791 1792 /* 1793 * Set host adapter parameters. 1794 */ 1795 if (nv->host_p[0] & BIT_7) 1796 ql2xextended_error_logging = 1; 1797 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); 1798 /* Always load RISC code on non ISP2[12]00 chips. */ 1799 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 1800 ha->flags.disable_risc_code_load = 0; 1801 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); 1802 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); 1803 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); 1804 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; 1805 ha->flags.disable_serdes = 0; 1806 1807 ha->operating_mode = 1808 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; 1809 1810 memcpy(ha->fw_seriallink_options, nv->seriallink_options, 1811 sizeof(ha->fw_seriallink_options)); 1812 1813 /* save HBA serial number */ 1814 ha->serial0 = icb->port_name[5]; 1815 ha->serial1 = icb->port_name[6]; 1816 ha->serial2 = icb->port_name[7]; 1817 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 1818 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 1819 1820 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 1821 1822 ha->retry_count = nv->retry_count; 1823 1824 /* Set minimum login_timeout to 4 seconds. */ 1825 if (nv->login_timeout < ql2xlogintimeout) 1826 nv->login_timeout = ql2xlogintimeout; 1827 if (nv->login_timeout < 4) 1828 nv->login_timeout = 4; 1829 ha->login_timeout = nv->login_timeout; 1830 icb->login_timeout = nv->login_timeout; 1831 1832 /* Set minimum RATOV to 100 tenths of a second. */ 1833 ha->r_a_tov = 100; 1834 1835 ha->loop_reset_delay = nv->reset_delay; 1836 1837 /* Link Down Timeout = 0: 1838 * 1839 * When Port Down timer expires we will start returning 1840 * I/O's to OS with "DID_NO_CONNECT". 1841 * 1842 * Link Down Timeout != 0: 1843 * 1844 * The driver waits for the link to come up after link down 1845 * before returning I/Os to OS with "DID_NO_CONNECT". 1846 */ 1847 if (nv->link_down_timeout == 0) { 1848 ha->loop_down_abort_time = 1849 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 1850 } else { 1851 ha->link_down_timeout = nv->link_down_timeout; 1852 ha->loop_down_abort_time = 1853 (LOOP_DOWN_TIME - ha->link_down_timeout); 1854 } 1855 1856 /* 1857 * Need enough time to try and get the port back. 1858 */ 1859 ha->port_down_retry_count = nv->port_down_retry_count; 1860 if (qlport_down_retry) 1861 ha->port_down_retry_count = qlport_down_retry; 1862 /* Set login_retry_count */ 1863 ha->login_retry_count = nv->retry_count; 1864 if (ha->port_down_retry_count == nv->port_down_retry_count && 1865 ha->port_down_retry_count > 3) 1866 ha->login_retry_count = ha->port_down_retry_count; 1867 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 1868 ha->login_retry_count = ha->port_down_retry_count; 1869 if (ql2xloginretrycount) 1870 ha->login_retry_count = ql2xloginretrycount; 1871 1872 icb->lun_enables = __constant_cpu_to_le16(0); 1873 icb->command_resource_count = 0; 1874 icb->immediate_notify_resource_count = 0; 1875 icb->timeout = __constant_cpu_to_le16(0); 1876 1877 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 1878 /* Enable RIO */ 1879 icb->firmware_options[0] &= ~BIT_3; 1880 icb->add_firmware_options[0] &= 1881 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 1882 icb->add_firmware_options[0] |= BIT_2; 1883 icb->response_accumulation_timer = 3; 1884 icb->interrupt_delay_timer = 5; 1885 1886 vha->flags.process_response_queue = 1; 1887 } else { 1888 /* Enable ZIO. */ 1889 if (!vha->flags.init_done) { 1890 ha->zio_mode = icb->add_firmware_options[0] & 1891 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 1892 ha->zio_timer = icb->interrupt_delay_timer ? 1893 icb->interrupt_delay_timer: 2; 1894 } 1895 icb->add_firmware_options[0] &= 1896 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 1897 vha->flags.process_response_queue = 0; 1898 if (ha->zio_mode != QLA_ZIO_DISABLED) { 1899 ha->zio_mode = QLA_ZIO_MODE_6; 1900 1901 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer " 1902 "delay (%d us).\n", vha->host_no, ha->zio_mode, 1903 ha->zio_timer * 100)); 1904 qla_printk(KERN_INFO, ha, 1905 "ZIO mode %d enabled; timer delay (%d us).\n", 1906 ha->zio_mode, ha->zio_timer * 100); 1907 1908 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; 1909 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; 1910 vha->flags.process_response_queue = 1; 1911 } 1912 } 1913 1914 if (rval) { 1915 DEBUG2_3(printk(KERN_WARNING 1916 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 1917 } 1918 return (rval); 1919 } 1920 1921 static void 1922 qla2x00_rport_del(void *data) 1923 { 1924 fc_port_t *fcport = data; 1925 struct fc_rport *rport; 1926 1927 spin_lock_irq(fcport->vha->host->host_lock); 1928 rport = fcport->drport; 1929 fcport->drport = NULL; 1930 spin_unlock_irq(fcport->vha->host->host_lock); 1931 if (rport) 1932 fc_remote_port_delete(rport); 1933 } 1934 1935 /** 1936 * qla2x00_alloc_fcport() - Allocate a generic fcport. 1937 * @ha: HA context 1938 * @flags: allocation flags 1939 * 1940 * Returns a pointer to the allocated fcport, or NULL, if none available. 1941 */ 1942 static fc_port_t * 1943 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) 1944 { 1945 fc_port_t *fcport; 1946 1947 fcport = kzalloc(sizeof(fc_port_t), flags); 1948 if (!fcport) 1949 return NULL; 1950 1951 /* Setup fcport template structure. */ 1952 fcport->vha = vha; 1953 fcport->vp_idx = vha->vp_idx; 1954 fcport->port_type = FCT_UNKNOWN; 1955 fcport->loop_id = FC_NO_LOOP_ID; 1956 atomic_set(&fcport->state, FCS_UNCONFIGURED); 1957 fcport->supported_classes = FC_COS_UNSPECIFIED; 1958 1959 return fcport; 1960 } 1961 1962 /* 1963 * qla2x00_configure_loop 1964 * Updates Fibre Channel Device Database with what is actually on loop. 1965 * 1966 * Input: 1967 * ha = adapter block pointer. 1968 * 1969 * Returns: 1970 * 0 = success. 1971 * 1 = error. 1972 * 2 = database was full and device was not configured. 1973 */ 1974 static int 1975 qla2x00_configure_loop(scsi_qla_host_t *vha) 1976 { 1977 int rval; 1978 unsigned long flags, save_flags; 1979 struct qla_hw_data *ha = vha->hw; 1980 rval = QLA_SUCCESS; 1981 1982 /* Get Initiator ID */ 1983 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { 1984 rval = qla2x00_configure_hba(vha); 1985 if (rval != QLA_SUCCESS) { 1986 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n", 1987 vha->host_no)); 1988 return (rval); 1989 } 1990 } 1991 1992 save_flags = flags = vha->dpc_flags; 1993 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n", 1994 vha->host_no, flags)); 1995 1996 /* 1997 * If we have both an RSCN and PORT UPDATE pending then handle them 1998 * both at the same time. 1999 */ 2000 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2001 clear_bit(RSCN_UPDATE, &vha->dpc_flags); 2002 2003 /* Determine what we need to do */ 2004 if (ha->current_topology == ISP_CFG_FL && 2005 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2006 2007 vha->flags.rscn_queue_overflow = 1; 2008 set_bit(RSCN_UPDATE, &flags); 2009 2010 } else if (ha->current_topology == ISP_CFG_F && 2011 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2012 2013 vha->flags.rscn_queue_overflow = 1; 2014 set_bit(RSCN_UPDATE, &flags); 2015 clear_bit(LOCAL_LOOP_UPDATE, &flags); 2016 2017 } else if (ha->current_topology == ISP_CFG_N) { 2018 clear_bit(RSCN_UPDATE, &flags); 2019 2020 } else if (!vha->flags.online || 2021 (test_bit(ABORT_ISP_ACTIVE, &flags))) { 2022 2023 vha->flags.rscn_queue_overflow = 1; 2024 set_bit(RSCN_UPDATE, &flags); 2025 set_bit(LOCAL_LOOP_UPDATE, &flags); 2026 } 2027 2028 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 2029 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2030 rval = QLA_FUNCTION_FAILED; 2031 else 2032 rval = qla2x00_configure_local_loop(vha); 2033 } 2034 2035 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 2036 if (LOOP_TRANSITION(vha)) 2037 rval = QLA_FUNCTION_FAILED; 2038 else 2039 rval = qla2x00_configure_fabric(vha); 2040 } 2041 2042 if (rval == QLA_SUCCESS) { 2043 if (atomic_read(&vha->loop_down_timer) || 2044 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 2045 rval = QLA_FUNCTION_FAILED; 2046 } else { 2047 atomic_set(&vha->loop_state, LOOP_READY); 2048 2049 DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no)); 2050 } 2051 } 2052 2053 if (rval) { 2054 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n", 2055 __func__, vha->host_no)); 2056 } else { 2057 DEBUG3(printk("%s: exiting normally\n", __func__)); 2058 } 2059 2060 /* Restore state if a resync event occurred during processing */ 2061 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 2062 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2063 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2064 if (test_bit(RSCN_UPDATE, &save_flags)) 2065 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2066 } 2067 2068 return (rval); 2069 } 2070 2071 2072 2073 /* 2074 * qla2x00_configure_local_loop 2075 * Updates Fibre Channel Device Database with local loop devices. 2076 * 2077 * Input: 2078 * ha = adapter block pointer. 2079 * 2080 * Returns: 2081 * 0 = success. 2082 */ 2083 static int 2084 qla2x00_configure_local_loop(scsi_qla_host_t *vha) 2085 { 2086 int rval, rval2; 2087 int found_devs; 2088 int found; 2089 fc_port_t *fcport, *new_fcport; 2090 2091 uint16_t index; 2092 uint16_t entries; 2093 char *id_iter; 2094 uint16_t loop_id; 2095 uint8_t domain, area, al_pa; 2096 struct qla_hw_data *ha = vha->hw; 2097 2098 found_devs = 0; 2099 new_fcport = NULL; 2100 entries = MAX_FIBRE_DEVICES; 2101 2102 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no)); 2103 DEBUG3(qla2x00_get_fcal_position_map(vha, NULL)); 2104 2105 /* Get list of logged in devices. */ 2106 memset(ha->gid_list, 0, GID_LIST_SIZE); 2107 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, 2108 &entries); 2109 if (rval != QLA_SUCCESS) 2110 goto cleanup_allocation; 2111 2112 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", 2113 ha->host_no, entries)); 2114 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list, 2115 entries * sizeof(struct gid_list_info))); 2116 2117 /* Allocate temporary fcport for any new fcports discovered. */ 2118 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2119 if (new_fcport == NULL) { 2120 rval = QLA_MEMORY_ALLOC_FAILED; 2121 goto cleanup_allocation; 2122 } 2123 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 2124 2125 /* 2126 * Mark local devices that were present with FCF_DEVICE_LOST for now. 2127 */ 2128 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2129 if (atomic_read(&fcport->state) == FCS_ONLINE && 2130 fcport->port_type != FCT_BROADCAST && 2131 (fcport->flags & FCF_FABRIC_DEVICE) == 0) { 2132 2133 DEBUG(printk("scsi(%ld): Marking port lost, " 2134 "loop_id=0x%04x\n", 2135 vha->host_no, fcport->loop_id)); 2136 2137 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2138 } 2139 } 2140 2141 /* Add devices to port list. */ 2142 id_iter = (char *)ha->gid_list; 2143 for (index = 0; index < entries; index++) { 2144 domain = ((struct gid_list_info *)id_iter)->domain; 2145 area = ((struct gid_list_info *)id_iter)->area; 2146 al_pa = ((struct gid_list_info *)id_iter)->al_pa; 2147 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 2148 loop_id = (uint16_t) 2149 ((struct gid_list_info *)id_iter)->loop_id_2100; 2150 else 2151 loop_id = le16_to_cpu( 2152 ((struct gid_list_info *)id_iter)->loop_id); 2153 id_iter += ha->gid_list_info_size; 2154 2155 /* Bypass reserved domain fields. */ 2156 if ((domain & 0xf0) == 0xf0) 2157 continue; 2158 2159 /* Bypass if not same domain and area of adapter. */ 2160 if (area && domain && 2161 (area != vha->d_id.b.area || domain != vha->d_id.b.domain)) 2162 continue; 2163 2164 /* Bypass invalid local loop ID. */ 2165 if (loop_id > LAST_LOCAL_LOOP_ID) 2166 continue; 2167 2168 /* Fill in member data. */ 2169 new_fcport->d_id.b.domain = domain; 2170 new_fcport->d_id.b.area = area; 2171 new_fcport->d_id.b.al_pa = al_pa; 2172 new_fcport->loop_id = loop_id; 2173 new_fcport->vp_idx = vha->vp_idx; 2174 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 2175 if (rval2 != QLA_SUCCESS) { 2176 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport " 2177 "information -- get_port_database=%x, " 2178 "loop_id=0x%04x\n", 2179 vha->host_no, rval2, new_fcport->loop_id)); 2180 DEBUG2(printk("scsi(%ld): Scheduling resync...\n", 2181 vha->host_no)); 2182 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2183 continue; 2184 } 2185 2186 /* Check for matching device in port list. */ 2187 found = 0; 2188 fcport = NULL; 2189 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2190 if (memcmp(new_fcport->port_name, fcport->port_name, 2191 WWN_SIZE)) 2192 continue; 2193 2194 fcport->flags &= ~FCF_FABRIC_DEVICE; 2195 fcport->loop_id = new_fcport->loop_id; 2196 fcport->port_type = new_fcport->port_type; 2197 fcport->d_id.b24 = new_fcport->d_id.b24; 2198 memcpy(fcport->node_name, new_fcport->node_name, 2199 WWN_SIZE); 2200 2201 found++; 2202 break; 2203 } 2204 2205 if (!found) { 2206 /* New device, add to fcports list. */ 2207 if (vha->vp_idx) { 2208 new_fcport->vha = vha; 2209 new_fcport->vp_idx = vha->vp_idx; 2210 } 2211 list_add_tail(&new_fcport->list, &vha->vp_fcports); 2212 2213 /* Allocate a new replacement fcport. */ 2214 fcport = new_fcport; 2215 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2216 if (new_fcport == NULL) { 2217 rval = QLA_MEMORY_ALLOC_FAILED; 2218 goto cleanup_allocation; 2219 } 2220 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 2221 } 2222 2223 /* Base iIDMA settings on HBA port speed. */ 2224 fcport->fp_speed = ha->link_data_rate; 2225 2226 qla2x00_update_fcport(vha, fcport); 2227 2228 found_devs++; 2229 } 2230 2231 cleanup_allocation: 2232 kfree(new_fcport); 2233 2234 if (rval != QLA_SUCCESS) { 2235 DEBUG2(printk("scsi(%ld): Configure local loop error exit: " 2236 "rval=%x\n", vha->host_no, rval)); 2237 } 2238 2239 return (rval); 2240 } 2241 2242 static void 2243 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2244 { 2245 #define LS_UNKNOWN 2 2246 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 2247 int rval; 2248 uint16_t mb[6]; 2249 struct qla_hw_data *ha = vha->hw; 2250 2251 if (!IS_IIDMA_CAPABLE(ha)) 2252 return; 2253 2254 if (fcport->fp_speed == PORT_SPEED_UNKNOWN || 2255 fcport->fp_speed > ha->link_data_rate) 2256 return; 2257 2258 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, 2259 mb); 2260 if (rval != QLA_SUCCESS) { 2261 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA " 2262 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n", 2263 vha->host_no, fcport->port_name[0], fcport->port_name[1], 2264 fcport->port_name[2], fcport->port_name[3], 2265 fcport->port_name[4], fcport->port_name[5], 2266 fcport->port_name[6], fcport->port_name[7], rval, 2267 fcport->fp_speed, mb[0], mb[1])); 2268 } else { 2269 DEBUG2(qla_printk(KERN_INFO, ha, 2270 "iIDMA adjusted to %s GB/s on " 2271 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", 2272 link_speeds[fcport->fp_speed], fcport->port_name[0], 2273 fcport->port_name[1], fcport->port_name[2], 2274 fcport->port_name[3], fcport->port_name[4], 2275 fcport->port_name[5], fcport->port_name[6], 2276 fcport->port_name[7])); 2277 } 2278 } 2279 2280 static void 2281 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) 2282 { 2283 struct fc_rport_identifiers rport_ids; 2284 struct fc_rport *rport; 2285 struct qla_hw_data *ha = vha->hw; 2286 2287 if (fcport->drport) 2288 qla2x00_rport_del(fcport); 2289 2290 rport_ids.node_name = wwn_to_u64(fcport->node_name); 2291 rport_ids.port_name = wwn_to_u64(fcport->port_name); 2292 rport_ids.port_id = fcport->d_id.b.domain << 16 | 2293 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 2294 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2295 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); 2296 if (!rport) { 2297 qla_printk(KERN_WARNING, ha, 2298 "Unable to allocate fc remote port!\n"); 2299 return; 2300 } 2301 spin_lock_irq(fcport->vha->host->host_lock); 2302 *((fc_port_t **)rport->dd_data) = fcport; 2303 spin_unlock_irq(fcport->vha->host->host_lock); 2304 2305 rport->supported_classes = fcport->supported_classes; 2306 2307 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2308 if (fcport->port_type == FCT_INITIATOR) 2309 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 2310 if (fcport->port_type == FCT_TARGET) 2311 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; 2312 fc_remote_port_rolechg(rport, rport_ids.roles); 2313 } 2314 2315 /* 2316 * qla2x00_update_fcport 2317 * Updates device on list. 2318 * 2319 * Input: 2320 * ha = adapter block pointer. 2321 * fcport = port structure pointer. 2322 * 2323 * Return: 2324 * 0 - Success 2325 * BIT_0 - error 2326 * 2327 * Context: 2328 * Kernel context. 2329 */ 2330 void 2331 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2332 { 2333 struct qla_hw_data *ha = vha->hw; 2334 2335 fcport->vha = vha; 2336 fcport->login_retry = 0; 2337 fcport->port_login_retry_count = ha->port_down_retry_count * 2338 PORT_RETRY_TIME; 2339 atomic_set(&fcport->port_down_timer, ha->port_down_retry_count * 2340 PORT_RETRY_TIME); 2341 fcport->flags &= ~FCF_LOGIN_NEEDED; 2342 2343 qla2x00_iidma_fcport(vha, fcport); 2344 2345 atomic_set(&fcport->state, FCS_ONLINE); 2346 2347 qla2x00_reg_remote_port(vha, fcport); 2348 } 2349 2350 /* 2351 * qla2x00_configure_fabric 2352 * Setup SNS devices with loop ID's. 2353 * 2354 * Input: 2355 * ha = adapter block pointer. 2356 * 2357 * Returns: 2358 * 0 = success. 2359 * BIT_0 = error 2360 */ 2361 static int 2362 qla2x00_configure_fabric(scsi_qla_host_t *vha) 2363 { 2364 int rval, rval2; 2365 fc_port_t *fcport, *fcptemp; 2366 uint16_t next_loopid; 2367 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2368 uint16_t loop_id; 2369 LIST_HEAD(new_fcports); 2370 struct qla_hw_data *ha = vha->hw; 2371 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2372 2373 /* If FL port exists, then SNS is present */ 2374 if (IS_FWI2_CAPABLE(ha)) 2375 loop_id = NPH_F_PORT; 2376 else 2377 loop_id = SNS_FL_PORT; 2378 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); 2379 if (rval != QLA_SUCCESS) { 2380 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL " 2381 "Port\n", vha->host_no)); 2382 2383 vha->device_flags &= ~SWITCH_FOUND; 2384 return (QLA_SUCCESS); 2385 } 2386 vha->device_flags |= SWITCH_FOUND; 2387 2388 /* Mark devices that need re-synchronization. */ 2389 rval2 = qla2x00_device_resync(vha); 2390 if (rval2 == QLA_RSCNS_HANDLED) { 2391 /* No point doing the scan, just continue. */ 2392 return (QLA_SUCCESS); 2393 } 2394 do { 2395 /* FDMI support. */ 2396 if (ql2xfdmienable && 2397 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) 2398 qla2x00_fdmi_register(vha); 2399 2400 /* Ensure we are logged into the SNS. */ 2401 if (IS_FWI2_CAPABLE(ha)) 2402 loop_id = NPH_SNS; 2403 else 2404 loop_id = SIMPLE_NAME_SERVER; 2405 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 2406 0xfc, mb, BIT_1 | BIT_0); 2407 if (mb[0] != MBS_COMMAND_COMPLETE) { 2408 DEBUG2(qla_printk(KERN_INFO, ha, 2409 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 2410 "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id, 2411 mb[0], mb[1], mb[2], mb[6], mb[7])); 2412 return (QLA_SUCCESS); 2413 } 2414 2415 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { 2416 if (qla2x00_rft_id(vha)) { 2417 /* EMPTY */ 2418 DEBUG2(printk("scsi(%ld): Register FC-4 " 2419 "TYPE failed.\n", vha->host_no)); 2420 } 2421 if (qla2x00_rff_id(vha)) { 2422 /* EMPTY */ 2423 DEBUG2(printk("scsi(%ld): Register FC-4 " 2424 "Features failed.\n", vha->host_no)); 2425 } 2426 if (qla2x00_rnn_id(vha)) { 2427 /* EMPTY */ 2428 DEBUG2(printk("scsi(%ld): Register Node Name " 2429 "failed.\n", vha->host_no)); 2430 } else if (qla2x00_rsnn_nn(vha)) { 2431 /* EMPTY */ 2432 DEBUG2(printk("scsi(%ld): Register Symbolic " 2433 "Node Name failed.\n", vha->host_no)); 2434 } 2435 } 2436 2437 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); 2438 if (rval != QLA_SUCCESS) 2439 break; 2440 2441 /* 2442 * Logout all previous fabric devices marked lost, except 2443 * tape devices. 2444 */ 2445 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2446 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2447 break; 2448 2449 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 2450 continue; 2451 2452 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 2453 qla2x00_mark_device_lost(vha, fcport, 2454 ql2xplogiabsentdevice, 0); 2455 if (fcport->loop_id != FC_NO_LOOP_ID && 2456 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2457 fcport->port_type != FCT_INITIATOR && 2458 fcport->port_type != FCT_BROADCAST) { 2459 ha->isp_ops->fabric_logout(vha, 2460 fcport->loop_id, 2461 fcport->d_id.b.domain, 2462 fcport->d_id.b.area, 2463 fcport->d_id.b.al_pa); 2464 fcport->loop_id = FC_NO_LOOP_ID; 2465 } 2466 } 2467 } 2468 2469 /* Starting free loop ID. */ 2470 next_loopid = ha->min_external_loopid; 2471 2472 /* 2473 * Scan through our port list and login entries that need to be 2474 * logged in. 2475 */ 2476 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2477 if (atomic_read(&vha->loop_down_timer) || 2478 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2479 break; 2480 2481 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 2482 (fcport->flags & FCF_LOGIN_NEEDED) == 0) 2483 continue; 2484 2485 if (fcport->loop_id == FC_NO_LOOP_ID) { 2486 fcport->loop_id = next_loopid; 2487 rval = qla2x00_find_new_loop_id( 2488 base_vha, fcport); 2489 if (rval != QLA_SUCCESS) { 2490 /* Ran out of IDs to use */ 2491 break; 2492 } 2493 } 2494 /* Login and update database */ 2495 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 2496 } 2497 2498 /* Exit if out of loop IDs. */ 2499 if (rval != QLA_SUCCESS) { 2500 break; 2501 } 2502 2503 /* 2504 * Login and add the new devices to our port list. 2505 */ 2506 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { 2507 if (atomic_read(&vha->loop_down_timer) || 2508 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2509 break; 2510 2511 /* Find a new loop ID to use. */ 2512 fcport->loop_id = next_loopid; 2513 rval = qla2x00_find_new_loop_id(base_vha, fcport); 2514 if (rval != QLA_SUCCESS) { 2515 /* Ran out of IDs to use */ 2516 break; 2517 } 2518 2519 /* Login and update database */ 2520 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 2521 2522 if (vha->vp_idx) { 2523 fcport->vha = vha; 2524 fcport->vp_idx = vha->vp_idx; 2525 } 2526 list_move_tail(&fcport->list, &vha->vp_fcports); 2527 } 2528 } while (0); 2529 2530 /* Free all new device structures not processed. */ 2531 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { 2532 list_del(&fcport->list); 2533 kfree(fcport); 2534 } 2535 2536 if (rval) { 2537 DEBUG2(printk("scsi(%ld): Configure fabric error exit: " 2538 "rval=%d\n", vha->host_no, rval)); 2539 } 2540 2541 return (rval); 2542 } 2543 2544 2545 /* 2546 * qla2x00_find_all_fabric_devs 2547 * 2548 * Input: 2549 * ha = adapter block pointer. 2550 * dev = database device entry pointer. 2551 * 2552 * Returns: 2553 * 0 = success. 2554 * 2555 * Context: 2556 * Kernel context. 2557 */ 2558 static int 2559 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, 2560 struct list_head *new_fcports) 2561 { 2562 int rval; 2563 uint16_t loop_id; 2564 fc_port_t *fcport, *new_fcport, *fcptemp; 2565 int found; 2566 2567 sw_info_t *swl; 2568 int swl_idx; 2569 int first_dev, last_dev; 2570 port_id_t wrap, nxt_d_id; 2571 struct qla_hw_data *ha = vha->hw; 2572 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); 2573 struct scsi_qla_host *tvp; 2574 2575 rval = QLA_SUCCESS; 2576 2577 /* Try GID_PT to get device list, else GAN. */ 2578 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL); 2579 if (!swl) { 2580 /*EMPTY*/ 2581 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " 2582 "on GA_NXT\n", vha->host_no)); 2583 } else { 2584 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { 2585 kfree(swl); 2586 swl = NULL; 2587 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { 2588 kfree(swl); 2589 swl = NULL; 2590 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { 2591 kfree(swl); 2592 swl = NULL; 2593 } else if (ql2xiidmaenable && 2594 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) { 2595 qla2x00_gpsc(vha, swl); 2596 } 2597 } 2598 swl_idx = 0; 2599 2600 /* Allocate temporary fcport for any new fcports discovered. */ 2601 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2602 if (new_fcport == NULL) { 2603 kfree(swl); 2604 return (QLA_MEMORY_ALLOC_FAILED); 2605 } 2606 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 2607 /* Set start port ID scan at adapter ID. */ 2608 first_dev = 1; 2609 last_dev = 0; 2610 2611 /* Starting free loop ID. */ 2612 loop_id = ha->min_external_loopid; 2613 for (; loop_id <= ha->max_loop_id; loop_id++) { 2614 if (qla2x00_is_reserved_id(vha, loop_id)) 2615 continue; 2616 2617 if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha)) 2618 break; 2619 2620 if (swl != NULL) { 2621 if (last_dev) { 2622 wrap.b24 = new_fcport->d_id.b24; 2623 } else { 2624 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24; 2625 memcpy(new_fcport->node_name, 2626 swl[swl_idx].node_name, WWN_SIZE); 2627 memcpy(new_fcport->port_name, 2628 swl[swl_idx].port_name, WWN_SIZE); 2629 memcpy(new_fcport->fabric_port_name, 2630 swl[swl_idx].fabric_port_name, WWN_SIZE); 2631 new_fcport->fp_speed = swl[swl_idx].fp_speed; 2632 2633 if (swl[swl_idx].d_id.b.rsvd_1 != 0) { 2634 last_dev = 1; 2635 } 2636 swl_idx++; 2637 } 2638 } else { 2639 /* Send GA_NXT to the switch */ 2640 rval = qla2x00_ga_nxt(vha, new_fcport); 2641 if (rval != QLA_SUCCESS) { 2642 qla_printk(KERN_WARNING, ha, 2643 "SNS scan failed -- assuming zero-entry " 2644 "result...\n"); 2645 list_for_each_entry_safe(fcport, fcptemp, 2646 new_fcports, list) { 2647 list_del(&fcport->list); 2648 kfree(fcport); 2649 } 2650 rval = QLA_SUCCESS; 2651 break; 2652 } 2653 } 2654 2655 /* If wrap on switch device list, exit. */ 2656 if (first_dev) { 2657 wrap.b24 = new_fcport->d_id.b24; 2658 first_dev = 0; 2659 } else if (new_fcport->d_id.b24 == wrap.b24) { 2660 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n", 2661 vha->host_no, new_fcport->d_id.b.domain, 2662 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa)); 2663 break; 2664 } 2665 2666 /* Bypass if same physical adapter. */ 2667 if (new_fcport->d_id.b24 == base_vha->d_id.b24) 2668 continue; 2669 2670 /* Bypass virtual ports of the same host. */ 2671 found = 0; 2672 if (ha->num_vhosts) { 2673 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 2674 if (new_fcport->d_id.b24 == vp->d_id.b24) { 2675 found = 1; 2676 break; 2677 } 2678 } 2679 if (found) 2680 continue; 2681 } 2682 2683 /* Bypass if same domain and area of adapter. */ 2684 if (((new_fcport->d_id.b24 & 0xffff00) == 2685 (vha->d_id.b24 & 0xffff00)) && ha->current_topology == 2686 ISP_CFG_FL) 2687 continue; 2688 2689 /* Bypass reserved domain fields. */ 2690 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) 2691 continue; 2692 2693 /* Locate matching device in database. */ 2694 found = 0; 2695 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2696 if (memcmp(new_fcport->port_name, fcport->port_name, 2697 WWN_SIZE)) 2698 continue; 2699 2700 found++; 2701 2702 /* Update port state. */ 2703 memcpy(fcport->fabric_port_name, 2704 new_fcport->fabric_port_name, WWN_SIZE); 2705 fcport->fp_speed = new_fcport->fp_speed; 2706 2707 /* 2708 * If address the same and state FCS_ONLINE, nothing 2709 * changed. 2710 */ 2711 if (fcport->d_id.b24 == new_fcport->d_id.b24 && 2712 atomic_read(&fcport->state) == FCS_ONLINE) { 2713 break; 2714 } 2715 2716 /* 2717 * If device was not a fabric device before. 2718 */ 2719 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 2720 fcport->d_id.b24 = new_fcport->d_id.b24; 2721 fcport->loop_id = FC_NO_LOOP_ID; 2722 fcport->flags |= (FCF_FABRIC_DEVICE | 2723 FCF_LOGIN_NEEDED); 2724 break; 2725 } 2726 2727 /* 2728 * Port ID changed or device was marked to be updated; 2729 * Log it out if still logged in and mark it for 2730 * relogin later. 2731 */ 2732 fcport->d_id.b24 = new_fcport->d_id.b24; 2733 fcport->flags |= FCF_LOGIN_NEEDED; 2734 if (fcport->loop_id != FC_NO_LOOP_ID && 2735 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2736 fcport->port_type != FCT_INITIATOR && 2737 fcport->port_type != FCT_BROADCAST) { 2738 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 2739 fcport->d_id.b.domain, fcport->d_id.b.area, 2740 fcport->d_id.b.al_pa); 2741 fcport->loop_id = FC_NO_LOOP_ID; 2742 } 2743 2744 break; 2745 } 2746 2747 if (found) 2748 continue; 2749 /* If device was not in our fcports list, then add it. */ 2750 list_add_tail(&new_fcport->list, new_fcports); 2751 2752 /* Allocate a new replacement fcport. */ 2753 nxt_d_id.b24 = new_fcport->d_id.b24; 2754 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2755 if (new_fcport == NULL) { 2756 kfree(swl); 2757 return (QLA_MEMORY_ALLOC_FAILED); 2758 } 2759 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 2760 new_fcport->d_id.b24 = nxt_d_id.b24; 2761 } 2762 2763 kfree(swl); 2764 kfree(new_fcport); 2765 2766 return (rval); 2767 } 2768 2769 /* 2770 * qla2x00_find_new_loop_id 2771 * Scan through our port list and find a new usable loop ID. 2772 * 2773 * Input: 2774 * ha: adapter state pointer. 2775 * dev: port structure pointer. 2776 * 2777 * Returns: 2778 * qla2x00 local function return status code. 2779 * 2780 * Context: 2781 * Kernel context. 2782 */ 2783 static int 2784 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) 2785 { 2786 int rval; 2787 int found; 2788 fc_port_t *fcport; 2789 uint16_t first_loop_id; 2790 struct qla_hw_data *ha = vha->hw; 2791 struct scsi_qla_host *vp; 2792 struct scsi_qla_host *tvp; 2793 2794 rval = QLA_SUCCESS; 2795 2796 /* Save starting loop ID. */ 2797 first_loop_id = dev->loop_id; 2798 2799 for (;;) { 2800 /* Skip loop ID if already used by adapter. */ 2801 if (dev->loop_id == vha->loop_id) 2802 dev->loop_id++; 2803 2804 /* Skip reserved loop IDs. */ 2805 while (qla2x00_is_reserved_id(vha, dev->loop_id)) 2806 dev->loop_id++; 2807 2808 /* Reset loop ID if passed the end. */ 2809 if (dev->loop_id > ha->max_loop_id) { 2810 /* first loop ID. */ 2811 dev->loop_id = ha->min_external_loopid; 2812 } 2813 2814 /* Check for loop ID being already in use. */ 2815 found = 0; 2816 fcport = NULL; 2817 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 2818 list_for_each_entry(fcport, &vp->vp_fcports, list) { 2819 if (fcport->loop_id == dev->loop_id && 2820 fcport != dev) { 2821 /* ID possibly in use */ 2822 found++; 2823 break; 2824 } 2825 } 2826 if (found) 2827 break; 2828 } 2829 2830 /* If not in use then it is free to use. */ 2831 if (!found) { 2832 break; 2833 } 2834 2835 /* ID in use. Try next value. */ 2836 dev->loop_id++; 2837 2838 /* If wrap around. No free ID to use. */ 2839 if (dev->loop_id == first_loop_id) { 2840 dev->loop_id = FC_NO_LOOP_ID; 2841 rval = QLA_FUNCTION_FAILED; 2842 break; 2843 } 2844 } 2845 2846 return (rval); 2847 } 2848 2849 /* 2850 * qla2x00_device_resync 2851 * Marks devices in the database that needs resynchronization. 2852 * 2853 * Input: 2854 * ha = adapter block pointer. 2855 * 2856 * Context: 2857 * Kernel context. 2858 */ 2859 static int 2860 qla2x00_device_resync(scsi_qla_host_t *vha) 2861 { 2862 int rval; 2863 uint32_t mask; 2864 fc_port_t *fcport; 2865 uint32_t rscn_entry; 2866 uint8_t rscn_out_iter; 2867 uint8_t format; 2868 port_id_t d_id; 2869 2870 rval = QLA_RSCNS_HANDLED; 2871 2872 while (vha->rscn_out_ptr != vha->rscn_in_ptr || 2873 vha->flags.rscn_queue_overflow) { 2874 2875 rscn_entry = vha->rscn_queue[vha->rscn_out_ptr]; 2876 format = MSB(MSW(rscn_entry)); 2877 d_id.b.domain = LSB(MSW(rscn_entry)); 2878 d_id.b.area = MSB(LSW(rscn_entry)); 2879 d_id.b.al_pa = LSB(LSW(rscn_entry)); 2880 2881 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = " 2882 "[%02x/%02x%02x%02x].\n", 2883 vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain, 2884 d_id.b.area, d_id.b.al_pa)); 2885 2886 vha->rscn_out_ptr++; 2887 if (vha->rscn_out_ptr == MAX_RSCN_COUNT) 2888 vha->rscn_out_ptr = 0; 2889 2890 /* Skip duplicate entries. */ 2891 for (rscn_out_iter = vha->rscn_out_ptr; 2892 !vha->flags.rscn_queue_overflow && 2893 rscn_out_iter != vha->rscn_in_ptr; 2894 rscn_out_iter = (rscn_out_iter == 2895 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) { 2896 2897 if (rscn_entry != vha->rscn_queue[rscn_out_iter]) 2898 break; 2899 2900 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue " 2901 "entry found at [%d].\n", vha->host_no, 2902 rscn_out_iter)); 2903 2904 vha->rscn_out_ptr = rscn_out_iter; 2905 } 2906 2907 /* Queue overflow, set switch default case. */ 2908 if (vha->flags.rscn_queue_overflow) { 2909 DEBUG(printk("scsi(%ld): device_resync: rscn " 2910 "overflow.\n", vha->host_no)); 2911 2912 format = 3; 2913 vha->flags.rscn_queue_overflow = 0; 2914 } 2915 2916 switch (format) { 2917 case 0: 2918 mask = 0xffffff; 2919 break; 2920 case 1: 2921 mask = 0xffff00; 2922 break; 2923 case 2: 2924 mask = 0xff0000; 2925 break; 2926 default: 2927 mask = 0x0; 2928 d_id.b24 = 0; 2929 vha->rscn_out_ptr = vha->rscn_in_ptr; 2930 break; 2931 } 2932 2933 rval = QLA_SUCCESS; 2934 2935 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2936 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 2937 (fcport->d_id.b24 & mask) != d_id.b24 || 2938 fcport->port_type == FCT_BROADCAST) 2939 continue; 2940 2941 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2942 if (format != 3 || 2943 fcport->port_type != FCT_INITIATOR) { 2944 qla2x00_mark_device_lost(vha, fcport, 2945 0, 0); 2946 } 2947 } 2948 } 2949 } 2950 return (rval); 2951 } 2952 2953 /* 2954 * qla2x00_fabric_dev_login 2955 * Login fabric target device and update FC port database. 2956 * 2957 * Input: 2958 * ha: adapter state pointer. 2959 * fcport: port structure list pointer. 2960 * next_loopid: contains value of a new loop ID that can be used 2961 * by the next login attempt. 2962 * 2963 * Returns: 2964 * qla2x00 local function return status code. 2965 * 2966 * Context: 2967 * Kernel context. 2968 */ 2969 static int 2970 qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport, 2971 uint16_t *next_loopid) 2972 { 2973 int rval; 2974 int retry; 2975 uint8_t opts; 2976 struct qla_hw_data *ha = vha->hw; 2977 2978 rval = QLA_SUCCESS; 2979 retry = 0; 2980 2981 rval = qla2x00_fabric_login(vha, fcport, next_loopid); 2982 if (rval == QLA_SUCCESS) { 2983 /* Send an ADISC to tape devices.*/ 2984 opts = 0; 2985 if (fcport->flags & FCF_TAPE_PRESENT) 2986 opts |= BIT_1; 2987 rval = qla2x00_get_port_database(vha, fcport, opts); 2988 if (rval != QLA_SUCCESS) { 2989 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 2990 fcport->d_id.b.domain, fcport->d_id.b.area, 2991 fcport->d_id.b.al_pa); 2992 qla2x00_mark_device_lost(vha, fcport, 1, 0); 2993 } else { 2994 qla2x00_update_fcport(vha, fcport); 2995 } 2996 } 2997 2998 return (rval); 2999 } 3000 3001 /* 3002 * qla2x00_fabric_login 3003 * Issue fabric login command. 3004 * 3005 * Input: 3006 * ha = adapter block pointer. 3007 * device = pointer to FC device type structure. 3008 * 3009 * Returns: 3010 * 0 - Login successfully 3011 * 1 - Login failed 3012 * 2 - Initiator device 3013 * 3 - Fatal error 3014 */ 3015 int 3016 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, 3017 uint16_t *next_loopid) 3018 { 3019 int rval; 3020 int retry; 3021 uint16_t tmp_loopid; 3022 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3023 struct qla_hw_data *ha = vha->hw; 3024 3025 retry = 0; 3026 tmp_loopid = 0; 3027 3028 for (;;) { 3029 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x " 3030 "for port %02x%02x%02x.\n", 3031 vha->host_no, fcport->loop_id, fcport->d_id.b.domain, 3032 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3033 3034 /* Login fcport on switch. */ 3035 ha->isp_ops->fabric_login(vha, fcport->loop_id, 3036 fcport->d_id.b.domain, fcport->d_id.b.area, 3037 fcport->d_id.b.al_pa, mb, BIT_0); 3038 if (mb[0] == MBS_PORT_ID_USED) { 3039 /* 3040 * Device has another loop ID. The firmware team 3041 * recommends the driver perform an implicit login with 3042 * the specified ID again. The ID we just used is save 3043 * here so we return with an ID that can be tried by 3044 * the next login. 3045 */ 3046 retry++; 3047 tmp_loopid = fcport->loop_id; 3048 fcport->loop_id = mb[1]; 3049 3050 DEBUG(printk("Fabric Login: port in use - next " 3051 "loop id=0x%04x, port Id=%02x%02x%02x.\n", 3052 fcport->loop_id, fcport->d_id.b.domain, 3053 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3054 3055 } else if (mb[0] == MBS_COMMAND_COMPLETE) { 3056 /* 3057 * Login succeeded. 3058 */ 3059 if (retry) { 3060 /* A retry occurred before. */ 3061 *next_loopid = tmp_loopid; 3062 } else { 3063 /* 3064 * No retry occurred before. Just increment the 3065 * ID value for next login. 3066 */ 3067 *next_loopid = (fcport->loop_id + 1); 3068 } 3069 3070 if (mb[1] & BIT_0) { 3071 fcport->port_type = FCT_INITIATOR; 3072 } else { 3073 fcport->port_type = FCT_TARGET; 3074 if (mb[1] & BIT_1) { 3075 fcport->flags |= FCF_TAPE_PRESENT; 3076 } 3077 } 3078 3079 if (mb[10] & BIT_0) 3080 fcport->supported_classes |= FC_COS_CLASS2; 3081 if (mb[10] & BIT_1) 3082 fcport->supported_classes |= FC_COS_CLASS3; 3083 3084 rval = QLA_SUCCESS; 3085 break; 3086 } else if (mb[0] == MBS_LOOP_ID_USED) { 3087 /* 3088 * Loop ID already used, try next loop ID. 3089 */ 3090 fcport->loop_id++; 3091 rval = qla2x00_find_new_loop_id(vha, fcport); 3092 if (rval != QLA_SUCCESS) { 3093 /* Ran out of loop IDs to use */ 3094 break; 3095 } 3096 } else if (mb[0] == MBS_COMMAND_ERROR) { 3097 /* 3098 * Firmware possibly timed out during login. If NO 3099 * retries are left to do then the device is declared 3100 * dead. 3101 */ 3102 *next_loopid = fcport->loop_id; 3103 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3104 fcport->d_id.b.domain, fcport->d_id.b.area, 3105 fcport->d_id.b.al_pa); 3106 qla2x00_mark_device_lost(vha, fcport, 1, 0); 3107 3108 rval = 1; 3109 break; 3110 } else { 3111 /* 3112 * unrecoverable / not handled error 3113 */ 3114 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x " 3115 "loop_id=%x jiffies=%lx.\n", 3116 __func__, vha->host_no, mb[0], 3117 fcport->d_id.b.domain, fcport->d_id.b.area, 3118 fcport->d_id.b.al_pa, fcport->loop_id, jiffies)); 3119 3120 *next_loopid = fcport->loop_id; 3121 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3122 fcport->d_id.b.domain, fcport->d_id.b.area, 3123 fcport->d_id.b.al_pa); 3124 fcport->loop_id = FC_NO_LOOP_ID; 3125 fcport->login_retry = 0; 3126 3127 rval = 3; 3128 break; 3129 } 3130 } 3131 3132 return (rval); 3133 } 3134 3135 /* 3136 * qla2x00_local_device_login 3137 * Issue local device login command. 3138 * 3139 * Input: 3140 * ha = adapter block pointer. 3141 * loop_id = loop id of device to login to. 3142 * 3143 * Returns (Where's the #define!!!!): 3144 * 0 - Login successfully 3145 * 1 - Login failed 3146 * 3 - Fatal error 3147 */ 3148 int 3149 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport) 3150 { 3151 int rval; 3152 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3153 3154 memset(mb, 0, sizeof(mb)); 3155 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0); 3156 if (rval == QLA_SUCCESS) { 3157 /* Interrogate mailbox registers for any errors */ 3158 if (mb[0] == MBS_COMMAND_ERROR) 3159 rval = 1; 3160 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR) 3161 /* device not in PCB table */ 3162 rval = 3; 3163 } 3164 3165 return (rval); 3166 } 3167 3168 /* 3169 * qla2x00_loop_resync 3170 * Resync with fibre channel devices. 3171 * 3172 * Input: 3173 * ha = adapter block pointer. 3174 * 3175 * Returns: 3176 * 0 = success 3177 */ 3178 int 3179 qla2x00_loop_resync(scsi_qla_host_t *vha) 3180 { 3181 int rval = QLA_SUCCESS; 3182 uint32_t wait_time; 3183 struct qla_hw_data *ha = vha->hw; 3184 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 3185 struct rsp_que *rsp = req->rsp; 3186 3187 atomic_set(&vha->loop_state, LOOP_UPDATE); 3188 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3189 if (vha->flags.online) { 3190 if (!(rval = qla2x00_fw_ready(vha))) { 3191 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3192 wait_time = 256; 3193 do { 3194 atomic_set(&vha->loop_state, LOOP_UPDATE); 3195 3196 /* Issue a marker after FW becomes ready. */ 3197 qla2x00_marker(vha, req, rsp, 0, 0, 3198 MK_SYNC_ALL); 3199 vha->marker_needed = 0; 3200 3201 /* Remap devices on Loop. */ 3202 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3203 3204 qla2x00_configure_loop(vha); 3205 wait_time--; 3206 } while (!atomic_read(&vha->loop_down_timer) && 3207 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 3208 && wait_time && (test_bit(LOOP_RESYNC_NEEDED, 3209 &vha->dpc_flags))); 3210 } 3211 } 3212 3213 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 3214 return (QLA_FUNCTION_FAILED); 3215 3216 if (rval) 3217 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__)); 3218 3219 return (rval); 3220 } 3221 3222 void 3223 qla2x00_update_fcports(scsi_qla_host_t *vha) 3224 { 3225 fc_port_t *fcport; 3226 3227 /* Go with deferred removal of rport references. */ 3228 list_for_each_entry(fcport, &vha->vp_fcports, list) 3229 if (fcport && fcport->drport && 3230 atomic_read(&fcport->state) != FCS_UNCONFIGURED) 3231 qla2x00_rport_del(fcport); 3232 } 3233 3234 /* 3235 * qla2x00_abort_isp 3236 * Resets ISP and aborts all outstanding commands. 3237 * 3238 * Input: 3239 * ha = adapter block pointer. 3240 * 3241 * Returns: 3242 * 0 = success 3243 */ 3244 int 3245 qla2x00_abort_isp(scsi_qla_host_t *vha) 3246 { 3247 int rval; 3248 uint8_t status = 0; 3249 struct qla_hw_data *ha = vha->hw; 3250 struct scsi_qla_host *vp; 3251 struct scsi_qla_host *tvp; 3252 struct req_que *req = ha->req_q_map[0]; 3253 3254 if (vha->flags.online) { 3255 vha->flags.online = 0; 3256 ha->flags.chip_reset_done = 0; 3257 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3258 ha->qla_stats.total_isp_aborts++; 3259 3260 qla_printk(KERN_INFO, ha, 3261 "Performing ISP error recovery - ha= %p.\n", ha); 3262 ha->isp_ops->reset_chip(vha); 3263 3264 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 3265 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 3266 atomic_set(&vha->loop_state, LOOP_DOWN); 3267 qla2x00_mark_all_devices_lost(vha, 0); 3268 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) 3269 qla2x00_mark_all_devices_lost(vp, 0); 3270 } else { 3271 if (!atomic_read(&vha->loop_down_timer)) 3272 atomic_set(&vha->loop_down_timer, 3273 LOOP_DOWN_TIME); 3274 } 3275 3276 /* Requeue all commands in outstanding command list. */ 3277 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 3278 3279 ha->isp_ops->get_flash_version(vha, req->ring); 3280 3281 ha->isp_ops->nvram_config(vha); 3282 3283 if (!qla2x00_restart_isp(vha)) { 3284 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 3285 3286 if (!atomic_read(&vha->loop_down_timer)) { 3287 /* 3288 * Issue marker command only when we are going 3289 * to start the I/O . 3290 */ 3291 vha->marker_needed = 1; 3292 } 3293 3294 vha->flags.online = 1; 3295 3296 ha->isp_ops->enable_intrs(ha); 3297 3298 ha->isp_abort_cnt = 0; 3299 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3300 3301 if (ha->fce) { 3302 ha->flags.fce_enabled = 1; 3303 memset(ha->fce, 0, 3304 fce_calc_size(ha->fce_bufs)); 3305 rval = qla2x00_enable_fce_trace(vha, 3306 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 3307 &ha->fce_bufs); 3308 if (rval) { 3309 qla_printk(KERN_WARNING, ha, 3310 "Unable to reinitialize FCE " 3311 "(%d).\n", rval); 3312 ha->flags.fce_enabled = 0; 3313 } 3314 } 3315 3316 if (ha->eft) { 3317 memset(ha->eft, 0, EFT_SIZE); 3318 rval = qla2x00_enable_eft_trace(vha, 3319 ha->eft_dma, EFT_NUM_BUFFERS); 3320 if (rval) { 3321 qla_printk(KERN_WARNING, ha, 3322 "Unable to reinitialize EFT " 3323 "(%d).\n", rval); 3324 } 3325 } 3326 } else { /* failed the ISP abort */ 3327 vha->flags.online = 1; 3328 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 3329 if (ha->isp_abort_cnt == 0) { 3330 qla_printk(KERN_WARNING, ha, 3331 "ISP error recovery failed - " 3332 "board disabled\n"); 3333 /* 3334 * The next call disables the board 3335 * completely. 3336 */ 3337 ha->isp_ops->reset_adapter(vha); 3338 vha->flags.online = 0; 3339 clear_bit(ISP_ABORT_RETRY, 3340 &vha->dpc_flags); 3341 status = 0; 3342 } else { /* schedule another ISP abort */ 3343 ha->isp_abort_cnt--; 3344 DEBUG(printk("qla%ld: ISP abort - " 3345 "retry remaining %d\n", 3346 vha->host_no, ha->isp_abort_cnt)); 3347 status = 1; 3348 } 3349 } else { 3350 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 3351 DEBUG(printk("qla2x00(%ld): ISP error recovery " 3352 "- retrying (%d) more times\n", 3353 vha->host_no, ha->isp_abort_cnt)); 3354 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3355 status = 1; 3356 } 3357 } 3358 3359 } 3360 3361 if (!status) { 3362 DEBUG(printk(KERN_INFO 3363 "qla2x00_abort_isp(%ld): succeeded.\n", 3364 vha->host_no)); 3365 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 3366 if (vp->vp_idx) 3367 qla2x00_vp_abort_isp(vp); 3368 } 3369 } else { 3370 qla_printk(KERN_INFO, ha, 3371 "qla2x00_abort_isp: **** FAILED ****\n"); 3372 } 3373 3374 return(status); 3375 } 3376 3377 /* 3378 * qla2x00_restart_isp 3379 * restarts the ISP after a reset 3380 * 3381 * Input: 3382 * ha = adapter block pointer. 3383 * 3384 * Returns: 3385 * 0 = success 3386 */ 3387 static int 3388 qla2x00_restart_isp(scsi_qla_host_t *vha) 3389 { 3390 int status = 0; 3391 uint32_t wait_time; 3392 struct qla_hw_data *ha = vha->hw; 3393 struct req_que *req = ha->req_q_map[0]; 3394 struct rsp_que *rsp = ha->rsp_q_map[0]; 3395 3396 /* If firmware needs to be loaded */ 3397 if (qla2x00_isp_firmware(vha)) { 3398 vha->flags.online = 0; 3399 status = ha->isp_ops->chip_diag(vha); 3400 if (!status) 3401 status = qla2x00_setup_chip(vha); 3402 } 3403 3404 if (!status && !(status = qla2x00_init_rings(vha))) { 3405 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 3406 ha->flags.chip_reset_done = 1; 3407 /* Initialize the queues in use */ 3408 qla25xx_init_queues(ha); 3409 3410 status = qla2x00_fw_ready(vha); 3411 if (!status) { 3412 DEBUG(printk("%s(): Start configure loop, " 3413 "status = %d\n", __func__, status)); 3414 3415 /* Issue a marker after FW becomes ready. */ 3416 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 3417 3418 vha->flags.online = 1; 3419 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3420 wait_time = 256; 3421 do { 3422 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3423 qla2x00_configure_loop(vha); 3424 wait_time--; 3425 } while (!atomic_read(&vha->loop_down_timer) && 3426 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 3427 && wait_time && (test_bit(LOOP_RESYNC_NEEDED, 3428 &vha->dpc_flags))); 3429 } 3430 3431 /* if no cable then assume it's good */ 3432 if ((vha->device_flags & DFLG_NO_CABLE)) 3433 status = 0; 3434 3435 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n", 3436 __func__, 3437 status)); 3438 } 3439 return (status); 3440 } 3441 3442 static int 3443 qla25xx_init_queues(struct qla_hw_data *ha) 3444 { 3445 struct rsp_que *rsp = NULL; 3446 struct req_que *req = NULL; 3447 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 3448 int ret = -1; 3449 int i; 3450 3451 for (i = 1; i < ha->max_queues; i++) { 3452 rsp = ha->rsp_q_map[i]; 3453 if (rsp) { 3454 rsp->options &= ~BIT_0; 3455 ret = qla25xx_init_rsp_que(base_vha, rsp); 3456 if (ret != QLA_SUCCESS) 3457 DEBUG2_17(printk(KERN_WARNING 3458 "%s Rsp que:%d init failed\n", __func__, 3459 rsp->id)); 3460 else 3461 DEBUG2_17(printk(KERN_INFO 3462 "%s Rsp que:%d inited\n", __func__, 3463 rsp->id)); 3464 } 3465 req = ha->req_q_map[i]; 3466 if (req) { 3467 /* Clear outstanding commands array. */ 3468 req->options &= ~BIT_0; 3469 ret = qla25xx_init_req_que(base_vha, req); 3470 if (ret != QLA_SUCCESS) 3471 DEBUG2_17(printk(KERN_WARNING 3472 "%s Req que:%d init failed\n", __func__, 3473 req->id)); 3474 else 3475 DEBUG2_17(printk(KERN_WARNING 3476 "%s Req que:%d inited\n", __func__, 3477 req->id)); 3478 } 3479 } 3480 return ret; 3481 } 3482 3483 /* 3484 * qla2x00_reset_adapter 3485 * Reset adapter. 3486 * 3487 * Input: 3488 * ha = adapter block pointer. 3489 */ 3490 void 3491 qla2x00_reset_adapter(scsi_qla_host_t *vha) 3492 { 3493 unsigned long flags = 0; 3494 struct qla_hw_data *ha = vha->hw; 3495 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3496 3497 vha->flags.online = 0; 3498 ha->isp_ops->disable_intrs(ha); 3499 3500 spin_lock_irqsave(&ha->hardware_lock, flags); 3501 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 3502 RD_REG_WORD(®->hccr); /* PCI Posting. */ 3503 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 3504 RD_REG_WORD(®->hccr); /* PCI Posting. */ 3505 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3506 } 3507 3508 void 3509 qla24xx_reset_adapter(scsi_qla_host_t *vha) 3510 { 3511 unsigned long flags = 0; 3512 struct qla_hw_data *ha = vha->hw; 3513 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3514 3515 vha->flags.online = 0; 3516 ha->isp_ops->disable_intrs(ha); 3517 3518 spin_lock_irqsave(&ha->hardware_lock, flags); 3519 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); 3520 RD_REG_DWORD(®->hccr); 3521 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); 3522 RD_REG_DWORD(®->hccr); 3523 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3524 3525 if (IS_NOPOLLING_TYPE(ha)) 3526 ha->isp_ops->enable_intrs(ha); 3527 } 3528 3529 /* On sparc systems, obtain port and node WWN from firmware 3530 * properties. 3531 */ 3532 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, 3533 struct nvram_24xx *nv) 3534 { 3535 #ifdef CONFIG_SPARC 3536 struct qla_hw_data *ha = vha->hw; 3537 struct pci_dev *pdev = ha->pdev; 3538 struct device_node *dp = pci_device_to_OF_node(pdev); 3539 const u8 *val; 3540 int len; 3541 3542 val = of_get_property(dp, "port-wwn", &len); 3543 if (val && len >= WWN_SIZE) 3544 memcpy(nv->port_name, val, WWN_SIZE); 3545 3546 val = of_get_property(dp, "node-wwn", &len); 3547 if (val && len >= WWN_SIZE) 3548 memcpy(nv->node_name, val, WWN_SIZE); 3549 #endif 3550 } 3551 3552 int 3553 qla24xx_nvram_config(scsi_qla_host_t *vha) 3554 { 3555 int rval; 3556 struct init_cb_24xx *icb; 3557 struct nvram_24xx *nv; 3558 uint32_t *dptr; 3559 uint8_t *dptr1, *dptr2; 3560 uint32_t chksum; 3561 uint16_t cnt; 3562 struct qla_hw_data *ha = vha->hw; 3563 3564 rval = QLA_SUCCESS; 3565 icb = (struct init_cb_24xx *)ha->init_cb; 3566 nv = ha->nvram; 3567 3568 /* Determine NVRAM starting address. */ 3569 ha->nvram_size = sizeof(struct nvram_24xx); 3570 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 3571 ha->vpd_size = FA_NVRAM_VPD_SIZE; 3572 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 3573 if (PCI_FUNC(ha->pdev->devfn)) { 3574 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 3575 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 3576 } 3577 3578 /* Get VPD data into cache */ 3579 ha->vpd = ha->nvram + VPD_OFFSET; 3580 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, 3581 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); 3582 3583 /* Get NVRAM data into cache and calculate checksum. */ 3584 dptr = (uint32_t *)nv; 3585 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base, 3586 ha->nvram_size); 3587 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 3588 chksum += le32_to_cpu(*dptr++); 3589 3590 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 3591 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 3592 3593 /* Bad NVRAM data, set defaults parameters. */ 3594 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 3595 || nv->id[3] != ' ' || 3596 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 3597 /* Reset NVRAM data. */ 3598 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 3599 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 3600 le16_to_cpu(nv->nvram_version)); 3601 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 3602 "invalid -- WWPN) defaults.\n"); 3603 3604 /* 3605 * Set default initialization control block. 3606 */ 3607 memset(nv, 0, ha->nvram_size); 3608 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); 3609 nv->version = __constant_cpu_to_le16(ICB_VERSION); 3610 nv->frame_payload_size = __constant_cpu_to_le16(2048); 3611 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 3612 nv->exchange_count = __constant_cpu_to_le16(0); 3613 nv->hard_address = __constant_cpu_to_le16(124); 3614 nv->port_name[0] = 0x21; 3615 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); 3616 nv->port_name[2] = 0x00; 3617 nv->port_name[3] = 0xe0; 3618 nv->port_name[4] = 0x8b; 3619 nv->port_name[5] = 0x1c; 3620 nv->port_name[6] = 0x55; 3621 nv->port_name[7] = 0x86; 3622 nv->node_name[0] = 0x20; 3623 nv->node_name[1] = 0x00; 3624 nv->node_name[2] = 0x00; 3625 nv->node_name[3] = 0xe0; 3626 nv->node_name[4] = 0x8b; 3627 nv->node_name[5] = 0x1c; 3628 nv->node_name[6] = 0x55; 3629 nv->node_name[7] = 0x86; 3630 qla24xx_nvram_wwn_from_ofw(vha, nv); 3631 nv->login_retry_count = __constant_cpu_to_le16(8); 3632 nv->interrupt_delay_timer = __constant_cpu_to_le16(0); 3633 nv->login_timeout = __constant_cpu_to_le16(0); 3634 nv->firmware_options_1 = 3635 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 3636 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4); 3637 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 3638 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13); 3639 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10); 3640 nv->efi_parameters = __constant_cpu_to_le32(0); 3641 nv->reset_delay = 5; 3642 nv->max_luns_per_target = __constant_cpu_to_le16(128); 3643 nv->port_down_retry_count = __constant_cpu_to_le16(30); 3644 nv->link_down_timeout = __constant_cpu_to_le16(30); 3645 3646 rval = 1; 3647 } 3648 3649 /* Reset Initialization control block */ 3650 memset(icb, 0, ha->init_cb_size); 3651 3652 /* Copy 1st segment. */ 3653 dptr1 = (uint8_t *)icb; 3654 dptr2 = (uint8_t *)&nv->version; 3655 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 3656 while (cnt--) 3657 *dptr1++ = *dptr2++; 3658 3659 icb->login_retry_count = nv->login_retry_count; 3660 icb->link_down_on_nos = nv->link_down_on_nos; 3661 3662 /* Copy 2nd segment. */ 3663 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 3664 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 3665 cnt = (uint8_t *)&icb->reserved_3 - 3666 (uint8_t *)&icb->interrupt_delay_timer; 3667 while (cnt--) 3668 *dptr1++ = *dptr2++; 3669 3670 /* 3671 * Setup driver NVRAM options. 3672 */ 3673 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 3674 "QLA2462"); 3675 3676 /* Use alternate WWN? */ 3677 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { 3678 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 3679 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 3680 } 3681 3682 /* Prepare nodename */ 3683 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) { 3684 /* 3685 * Firmware will apply the following mask if the nodename was 3686 * not provided. 3687 */ 3688 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 3689 icb->node_name[0] &= 0xF0; 3690 } 3691 3692 /* Set host adapter parameters. */ 3693 ha->flags.disable_risc_code_load = 0; 3694 ha->flags.enable_lip_reset = 0; 3695 ha->flags.enable_lip_full_login = 3696 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0; 3697 ha->flags.enable_target_reset = 3698 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0; 3699 ha->flags.enable_led_scheme = 0; 3700 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; 3701 3702 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 3703 (BIT_6 | BIT_5 | BIT_4)) >> 4; 3704 3705 memcpy(ha->fw_seriallink_options24, nv->seriallink_options, 3706 sizeof(ha->fw_seriallink_options24)); 3707 3708 /* save HBA serial number */ 3709 ha->serial0 = icb->port_name[5]; 3710 ha->serial1 = icb->port_name[6]; 3711 ha->serial2 = icb->port_name[7]; 3712 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 3713 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 3714 3715 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 3716 3717 ha->retry_count = le16_to_cpu(nv->login_retry_count); 3718 3719 /* Set minimum login_timeout to 4 seconds. */ 3720 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 3721 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 3722 if (le16_to_cpu(nv->login_timeout) < 4) 3723 nv->login_timeout = __constant_cpu_to_le16(4); 3724 ha->login_timeout = le16_to_cpu(nv->login_timeout); 3725 icb->login_timeout = nv->login_timeout; 3726 3727 /* Set minimum RATOV to 100 tenths of a second. */ 3728 ha->r_a_tov = 100; 3729 3730 ha->loop_reset_delay = nv->reset_delay; 3731 3732 /* Link Down Timeout = 0: 3733 * 3734 * When Port Down timer expires we will start returning 3735 * I/O's to OS with "DID_NO_CONNECT". 3736 * 3737 * Link Down Timeout != 0: 3738 * 3739 * The driver waits for the link to come up after link down 3740 * before returning I/Os to OS with "DID_NO_CONNECT". 3741 */ 3742 if (le16_to_cpu(nv->link_down_timeout) == 0) { 3743 ha->loop_down_abort_time = 3744 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 3745 } else { 3746 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 3747 ha->loop_down_abort_time = 3748 (LOOP_DOWN_TIME - ha->link_down_timeout); 3749 } 3750 3751 /* Need enough time to try and get the port back. */ 3752 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 3753 if (qlport_down_retry) 3754 ha->port_down_retry_count = qlport_down_retry; 3755 3756 /* Set login_retry_count */ 3757 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 3758 if (ha->port_down_retry_count == 3759 le16_to_cpu(nv->port_down_retry_count) && 3760 ha->port_down_retry_count > 3) 3761 ha->login_retry_count = ha->port_down_retry_count; 3762 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 3763 ha->login_retry_count = ha->port_down_retry_count; 3764 if (ql2xloginretrycount) 3765 ha->login_retry_count = ql2xloginretrycount; 3766 3767 /* Enable ZIO. */ 3768 if (!vha->flags.init_done) { 3769 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 3770 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 3771 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 3772 le16_to_cpu(icb->interrupt_delay_timer): 2; 3773 } 3774 icb->firmware_options_2 &= __constant_cpu_to_le32( 3775 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 3776 vha->flags.process_response_queue = 0; 3777 if (ha->zio_mode != QLA_ZIO_DISABLED) { 3778 ha->zio_mode = QLA_ZIO_MODE_6; 3779 3780 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 3781 "(%d us).\n", vha->host_no, ha->zio_mode, 3782 ha->zio_timer * 100)); 3783 qla_printk(KERN_INFO, ha, 3784 "ZIO mode %d enabled; timer delay (%d us).\n", 3785 ha->zio_mode, ha->zio_timer * 100); 3786 3787 icb->firmware_options_2 |= cpu_to_le32( 3788 (uint32_t)ha->zio_mode); 3789 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 3790 vha->flags.process_response_queue = 1; 3791 } 3792 3793 if (rval) { 3794 DEBUG2_3(printk(KERN_WARNING 3795 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 3796 } 3797 return (rval); 3798 } 3799 3800 static int 3801 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) 3802 { 3803 int rval = QLA_SUCCESS; 3804 int segments, fragment; 3805 uint32_t faddr; 3806 uint32_t *dcode, dlen; 3807 uint32_t risc_addr; 3808 uint32_t risc_size; 3809 uint32_t i; 3810 struct qla_hw_data *ha = vha->hw; 3811 struct req_que *req = ha->req_q_map[0]; 3812 3813 qla_printk(KERN_INFO, ha, 3814 "FW: Loading from flash (%x)...\n", ha->flt_region_fw); 3815 3816 rval = QLA_SUCCESS; 3817 3818 segments = FA_RISC_CODE_SEGMENTS; 3819 faddr = ha->flt_region_fw; 3820 dcode = (uint32_t *)req->ring; 3821 *srisc_addr = 0; 3822 3823 /* Validate firmware image by checking version. */ 3824 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4); 3825 for (i = 0; i < 4; i++) 3826 dcode[i] = be32_to_cpu(dcode[i]); 3827 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 3828 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 3829 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 3830 dcode[3] == 0)) { 3831 qla_printk(KERN_WARNING, ha, 3832 "Unable to verify integrity of flash firmware image!\n"); 3833 qla_printk(KERN_WARNING, ha, 3834 "Firmware data: %08x %08x %08x %08x!\n", dcode[0], 3835 dcode[1], dcode[2], dcode[3]); 3836 3837 return QLA_FUNCTION_FAILED; 3838 } 3839 3840 while (segments && rval == QLA_SUCCESS) { 3841 /* Read segment's load information. */ 3842 qla24xx_read_flash_data(vha, dcode, faddr, 4); 3843 3844 risc_addr = be32_to_cpu(dcode[2]); 3845 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 3846 risc_size = be32_to_cpu(dcode[3]); 3847 3848 fragment = 0; 3849 while (risc_size > 0 && rval == QLA_SUCCESS) { 3850 dlen = (uint32_t)(ha->fw_transfer_size >> 2); 3851 if (dlen > risc_size) 3852 dlen = risc_size; 3853 3854 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 3855 "addr %x, number of dwords 0x%x, offset 0x%x.\n", 3856 vha->host_no, risc_addr, dlen, faddr)); 3857 3858 qla24xx_read_flash_data(vha, dcode, faddr, dlen); 3859 for (i = 0; i < dlen; i++) 3860 dcode[i] = swab32(dcode[i]); 3861 3862 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 3863 dlen); 3864 if (rval) { 3865 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 3866 "segment %d of firmware\n", vha->host_no, 3867 fragment)); 3868 qla_printk(KERN_WARNING, ha, 3869 "[ERROR] Failed to load segment %d of " 3870 "firmware\n", fragment); 3871 break; 3872 } 3873 3874 faddr += dlen; 3875 risc_addr += dlen; 3876 risc_size -= dlen; 3877 fragment++; 3878 } 3879 3880 /* Next segment. */ 3881 segments--; 3882 } 3883 3884 return rval; 3885 } 3886 3887 #define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/" 3888 3889 int 3890 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 3891 { 3892 int rval; 3893 int i, fragment; 3894 uint16_t *wcode, *fwcode; 3895 uint32_t risc_addr, risc_size, fwclen, wlen, *seg; 3896 struct fw_blob *blob; 3897 struct qla_hw_data *ha = vha->hw; 3898 struct req_que *req = ha->req_q_map[0]; 3899 3900 /* Load firmware blob. */ 3901 blob = qla2x00_request_firmware(vha); 3902 if (!blob) { 3903 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 3904 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 3905 "from: " QLA_FW_URL ".\n"); 3906 return QLA_FUNCTION_FAILED; 3907 } 3908 3909 rval = QLA_SUCCESS; 3910 3911 wcode = (uint16_t *)req->ring; 3912 *srisc_addr = 0; 3913 fwcode = (uint16_t *)blob->fw->data; 3914 fwclen = 0; 3915 3916 /* Validate firmware image by checking version. */ 3917 if (blob->fw->size < 8 * sizeof(uint16_t)) { 3918 qla_printk(KERN_WARNING, ha, 3919 "Unable to verify integrity of firmware image (%Zd)!\n", 3920 blob->fw->size); 3921 goto fail_fw_integrity; 3922 } 3923 for (i = 0; i < 4; i++) 3924 wcode[i] = be16_to_cpu(fwcode[i + 4]); 3925 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && 3926 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && 3927 wcode[2] == 0 && wcode[3] == 0)) { 3928 qla_printk(KERN_WARNING, ha, 3929 "Unable to verify integrity of firmware image!\n"); 3930 qla_printk(KERN_WARNING, ha, 3931 "Firmware data: %04x %04x %04x %04x!\n", wcode[0], 3932 wcode[1], wcode[2], wcode[3]); 3933 goto fail_fw_integrity; 3934 } 3935 3936 seg = blob->segs; 3937 while (*seg && rval == QLA_SUCCESS) { 3938 risc_addr = *seg; 3939 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr; 3940 risc_size = be16_to_cpu(fwcode[3]); 3941 3942 /* Validate firmware image size. */ 3943 fwclen += risc_size * sizeof(uint16_t); 3944 if (blob->fw->size < fwclen) { 3945 qla_printk(KERN_WARNING, ha, 3946 "Unable to verify integrity of firmware image " 3947 "(%Zd)!\n", blob->fw->size); 3948 goto fail_fw_integrity; 3949 } 3950 3951 fragment = 0; 3952 while (risc_size > 0 && rval == QLA_SUCCESS) { 3953 wlen = (uint16_t)(ha->fw_transfer_size >> 1); 3954 if (wlen > risc_size) 3955 wlen = risc_size; 3956 3957 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 3958 "addr %x, number of words 0x%x.\n", vha->host_no, 3959 risc_addr, wlen)); 3960 3961 for (i = 0; i < wlen; i++) 3962 wcode[i] = swab16(fwcode[i]); 3963 3964 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 3965 wlen); 3966 if (rval) { 3967 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 3968 "segment %d of firmware\n", vha->host_no, 3969 fragment)); 3970 qla_printk(KERN_WARNING, ha, 3971 "[ERROR] Failed to load segment %d of " 3972 "firmware\n", fragment); 3973 break; 3974 } 3975 3976 fwcode += wlen; 3977 risc_addr += wlen; 3978 risc_size -= wlen; 3979 fragment++; 3980 } 3981 3982 /* Next segment. */ 3983 seg++; 3984 } 3985 return rval; 3986 3987 fail_fw_integrity: 3988 return QLA_FUNCTION_FAILED; 3989 } 3990 3991 static int 3992 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) 3993 { 3994 int rval; 3995 int segments, fragment; 3996 uint32_t *dcode, dlen; 3997 uint32_t risc_addr; 3998 uint32_t risc_size; 3999 uint32_t i; 4000 struct fw_blob *blob; 4001 uint32_t *fwcode, fwclen; 4002 struct qla_hw_data *ha = vha->hw; 4003 struct req_que *req = ha->req_q_map[0]; 4004 4005 /* Load firmware blob. */ 4006 blob = qla2x00_request_firmware(vha); 4007 if (!blob) { 4008 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 4009 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 4010 "from: " QLA_FW_URL ".\n"); 4011 4012 return QLA_FUNCTION_FAILED; 4013 } 4014 4015 qla_printk(KERN_INFO, ha, 4016 "FW: Loading via request-firmware...\n"); 4017 4018 rval = QLA_SUCCESS; 4019 4020 segments = FA_RISC_CODE_SEGMENTS; 4021 dcode = (uint32_t *)req->ring; 4022 *srisc_addr = 0; 4023 fwcode = (uint32_t *)blob->fw->data; 4024 fwclen = 0; 4025 4026 /* Validate firmware image by checking version. */ 4027 if (blob->fw->size < 8 * sizeof(uint32_t)) { 4028 qla_printk(KERN_WARNING, ha, 4029 "Unable to verify integrity of firmware image (%Zd)!\n", 4030 blob->fw->size); 4031 goto fail_fw_integrity; 4032 } 4033 for (i = 0; i < 4; i++) 4034 dcode[i] = be32_to_cpu(fwcode[i + 4]); 4035 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 4036 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 4037 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 4038 dcode[3] == 0)) { 4039 qla_printk(KERN_WARNING, ha, 4040 "Unable to verify integrity of firmware image!\n"); 4041 qla_printk(KERN_WARNING, ha, 4042 "Firmware data: %08x %08x %08x %08x!\n", dcode[0], 4043 dcode[1], dcode[2], dcode[3]); 4044 goto fail_fw_integrity; 4045 } 4046 4047 while (segments && rval == QLA_SUCCESS) { 4048 risc_addr = be32_to_cpu(fwcode[2]); 4049 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 4050 risc_size = be32_to_cpu(fwcode[3]); 4051 4052 /* Validate firmware image size. */ 4053 fwclen += risc_size * sizeof(uint32_t); 4054 if (blob->fw->size < fwclen) { 4055 qla_printk(KERN_WARNING, ha, 4056 "Unable to verify integrity of firmware image " 4057 "(%Zd)!\n", blob->fw->size); 4058 4059 goto fail_fw_integrity; 4060 } 4061 4062 fragment = 0; 4063 while (risc_size > 0 && rval == QLA_SUCCESS) { 4064 dlen = (uint32_t)(ha->fw_transfer_size >> 2); 4065 if (dlen > risc_size) 4066 dlen = risc_size; 4067 4068 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4069 "addr %x, number of dwords 0x%x.\n", vha->host_no, 4070 risc_addr, dlen)); 4071 4072 for (i = 0; i < dlen; i++) 4073 dcode[i] = swab32(fwcode[i]); 4074 4075 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4076 dlen); 4077 if (rval) { 4078 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4079 "segment %d of firmware\n", vha->host_no, 4080 fragment)); 4081 qla_printk(KERN_WARNING, ha, 4082 "[ERROR] Failed to load segment %d of " 4083 "firmware\n", fragment); 4084 break; 4085 } 4086 4087 fwcode += dlen; 4088 risc_addr += dlen; 4089 risc_size -= dlen; 4090 fragment++; 4091 } 4092 4093 /* Next segment. */ 4094 segments--; 4095 } 4096 return rval; 4097 4098 fail_fw_integrity: 4099 return QLA_FUNCTION_FAILED; 4100 } 4101 4102 int 4103 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4104 { 4105 int rval; 4106 4107 /* 4108 * FW Load priority: 4109 * 1) Firmware via request-firmware interface (.bin file). 4110 * 2) Firmware residing in flash. 4111 */ 4112 rval = qla24xx_load_risc_blob(vha, srisc_addr); 4113 if (rval == QLA_SUCCESS) 4114 return rval; 4115 4116 return qla24xx_load_risc_flash(vha, srisc_addr); 4117 } 4118 4119 int 4120 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4121 { 4122 int rval; 4123 4124 /* 4125 * FW Load priority: 4126 * 1) Firmware residing in flash. 4127 * 2) Firmware via request-firmware interface (.bin file). 4128 */ 4129 rval = qla24xx_load_risc_flash(vha, srisc_addr); 4130 if (rval == QLA_SUCCESS) 4131 return rval; 4132 4133 return qla24xx_load_risc_blob(vha, srisc_addr); 4134 } 4135 4136 void 4137 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) 4138 { 4139 int ret, retries; 4140 struct qla_hw_data *ha = vha->hw; 4141 4142 if (!IS_FWI2_CAPABLE(ha)) 4143 return; 4144 if (!ha->fw_major_version) 4145 return; 4146 4147 ret = qla2x00_stop_firmware(vha); 4148 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 4149 retries ; retries--) { 4150 ha->isp_ops->reset_chip(vha); 4151 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) 4152 continue; 4153 if (qla2x00_setup_chip(vha) != QLA_SUCCESS) 4154 continue; 4155 qla_printk(KERN_INFO, ha, 4156 "Attempting retry of stop-firmware command...\n"); 4157 ret = qla2x00_stop_firmware(vha); 4158 } 4159 } 4160 4161 int 4162 qla24xx_configure_vhba(scsi_qla_host_t *vha) 4163 { 4164 int rval = QLA_SUCCESS; 4165 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4166 struct qla_hw_data *ha = vha->hw; 4167 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4168 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 4169 struct rsp_que *rsp = req->rsp; 4170 4171 if (!vha->vp_idx) 4172 return -EINVAL; 4173 4174 rval = qla2x00_fw_ready(base_vha); 4175 if (rval == QLA_SUCCESS) { 4176 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 4177 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4178 } 4179 4180 vha->flags.management_server_logged_in = 0; 4181 4182 /* Login to SNS first */ 4183 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1); 4184 if (mb[0] != MBS_COMMAND_COMPLETE) { 4185 DEBUG15(qla_printk(KERN_INFO, ha, 4186 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 4187 "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS, 4188 mb[0], mb[1], mb[2], mb[6], mb[7])); 4189 return (QLA_FUNCTION_FAILED); 4190 } 4191 4192 atomic_set(&vha->loop_down_timer, 0); 4193 atomic_set(&vha->loop_state, LOOP_UP); 4194 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4195 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4196 rval = qla2x00_loop_resync(base_vha); 4197 4198 return rval; 4199 } 4200 4201 /* 84XX Support **************************************************************/ 4202 4203 static LIST_HEAD(qla_cs84xx_list); 4204 static DEFINE_MUTEX(qla_cs84xx_mutex); 4205 4206 static struct qla_chip_state_84xx * 4207 qla84xx_get_chip(struct scsi_qla_host *vha) 4208 { 4209 struct qla_chip_state_84xx *cs84xx; 4210 struct qla_hw_data *ha = vha->hw; 4211 4212 mutex_lock(&qla_cs84xx_mutex); 4213 4214 /* Find any shared 84xx chip. */ 4215 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) { 4216 if (cs84xx->bus == ha->pdev->bus) { 4217 kref_get(&cs84xx->kref); 4218 goto done; 4219 } 4220 } 4221 4222 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL); 4223 if (!cs84xx) 4224 goto done; 4225 4226 kref_init(&cs84xx->kref); 4227 spin_lock_init(&cs84xx->access_lock); 4228 mutex_init(&cs84xx->fw_update_mutex); 4229 cs84xx->bus = ha->pdev->bus; 4230 4231 list_add_tail(&cs84xx->list, &qla_cs84xx_list); 4232 done: 4233 mutex_unlock(&qla_cs84xx_mutex); 4234 return cs84xx; 4235 } 4236 4237 static void 4238 __qla84xx_chip_release(struct kref *kref) 4239 { 4240 struct qla_chip_state_84xx *cs84xx = 4241 container_of(kref, struct qla_chip_state_84xx, kref); 4242 4243 mutex_lock(&qla_cs84xx_mutex); 4244 list_del(&cs84xx->list); 4245 mutex_unlock(&qla_cs84xx_mutex); 4246 kfree(cs84xx); 4247 } 4248 4249 void 4250 qla84xx_put_chip(struct scsi_qla_host *vha) 4251 { 4252 struct qla_hw_data *ha = vha->hw; 4253 if (ha->cs84xx) 4254 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); 4255 } 4256 4257 static int 4258 qla84xx_init_chip(scsi_qla_host_t *vha) 4259 { 4260 int rval; 4261 uint16_t status[2]; 4262 struct qla_hw_data *ha = vha->hw; 4263 4264 mutex_lock(&ha->cs84xx->fw_update_mutex); 4265 4266 rval = qla84xx_verify_chip(vha, status); 4267 4268 mutex_unlock(&ha->cs84xx->fw_update_mutex); 4269 4270 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED: 4271 QLA_SUCCESS; 4272 } 4273 4274 /* 81XX Support **************************************************************/ 4275 4276 int 4277 qla81xx_nvram_config(scsi_qla_host_t *vha) 4278 { 4279 int rval; 4280 struct init_cb_81xx *icb; 4281 struct nvram_81xx *nv; 4282 uint32_t *dptr; 4283 uint8_t *dptr1, *dptr2; 4284 uint32_t chksum; 4285 uint16_t cnt; 4286 struct qla_hw_data *ha = vha->hw; 4287 4288 rval = QLA_SUCCESS; 4289 icb = (struct init_cb_81xx *)ha->init_cb; 4290 nv = ha->nvram; 4291 4292 /* Determine NVRAM starting address. */ 4293 ha->nvram_size = sizeof(struct nvram_81xx); 4294 ha->vpd_size = FA_NVRAM_VPD_SIZE; 4295 4296 /* Get VPD data into cache */ 4297 ha->vpd = ha->nvram + VPD_OFFSET; 4298 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, 4299 ha->vpd_size); 4300 4301 /* Get NVRAM data into cache and calculate checksum. */ 4302 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, 4303 ha->nvram_size); 4304 dptr = (uint32_t *)nv; 4305 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 4306 chksum += le32_to_cpu(*dptr++); 4307 4308 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 4309 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 4310 4311 /* Bad NVRAM data, set defaults parameters. */ 4312 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 4313 || nv->id[3] != ' ' || 4314 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 4315 /* Reset NVRAM data. */ 4316 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 4317 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 4318 le16_to_cpu(nv->nvram_version)); 4319 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 4320 "invalid -- WWPN) defaults.\n"); 4321 4322 /* 4323 * Set default initialization control block. 4324 */ 4325 memset(nv, 0, ha->nvram_size); 4326 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); 4327 nv->version = __constant_cpu_to_le16(ICB_VERSION); 4328 nv->frame_payload_size = __constant_cpu_to_le16(2048); 4329 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 4330 nv->exchange_count = __constant_cpu_to_le16(0); 4331 nv->port_name[0] = 0x21; 4332 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); 4333 nv->port_name[2] = 0x00; 4334 nv->port_name[3] = 0xe0; 4335 nv->port_name[4] = 0x8b; 4336 nv->port_name[5] = 0x1c; 4337 nv->port_name[6] = 0x55; 4338 nv->port_name[7] = 0x86; 4339 nv->node_name[0] = 0x20; 4340 nv->node_name[1] = 0x00; 4341 nv->node_name[2] = 0x00; 4342 nv->node_name[3] = 0xe0; 4343 nv->node_name[4] = 0x8b; 4344 nv->node_name[5] = 0x1c; 4345 nv->node_name[6] = 0x55; 4346 nv->node_name[7] = 0x86; 4347 nv->login_retry_count = __constant_cpu_to_le16(8); 4348 nv->interrupt_delay_timer = __constant_cpu_to_le16(0); 4349 nv->login_timeout = __constant_cpu_to_le16(0); 4350 nv->firmware_options_1 = 4351 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 4352 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4); 4353 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 4354 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13); 4355 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10); 4356 nv->efi_parameters = __constant_cpu_to_le32(0); 4357 nv->reset_delay = 5; 4358 nv->max_luns_per_target = __constant_cpu_to_le16(128); 4359 nv->port_down_retry_count = __constant_cpu_to_le16(30); 4360 nv->link_down_timeout = __constant_cpu_to_le16(30); 4361 nv->enode_mac[0] = 0x01; 4362 nv->enode_mac[1] = 0x02; 4363 nv->enode_mac[2] = 0x03; 4364 nv->enode_mac[3] = 0x04; 4365 nv->enode_mac[4] = 0x05; 4366 nv->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); 4367 4368 rval = 1; 4369 } 4370 4371 /* Reset Initialization control block */ 4372 memset(icb, 0, sizeof(struct init_cb_81xx)); 4373 4374 /* Copy 1st segment. */ 4375 dptr1 = (uint8_t *)icb; 4376 dptr2 = (uint8_t *)&nv->version; 4377 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 4378 while (cnt--) 4379 *dptr1++ = *dptr2++; 4380 4381 icb->login_retry_count = nv->login_retry_count; 4382 4383 /* Copy 2nd segment. */ 4384 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 4385 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 4386 cnt = (uint8_t *)&icb->reserved_5 - 4387 (uint8_t *)&icb->interrupt_delay_timer; 4388 while (cnt--) 4389 *dptr1++ = *dptr2++; 4390 4391 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); 4392 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ 4393 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { 4394 icb->enode_mac[0] = 0x01; 4395 icb->enode_mac[1] = 0x02; 4396 icb->enode_mac[2] = 0x03; 4397 icb->enode_mac[3] = 0x04; 4398 icb->enode_mac[4] = 0x05; 4399 icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); 4400 } 4401 4402 /* Use extended-initialization control block. */ 4403 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); 4404 4405 /* 4406 * Setup driver NVRAM options. 4407 */ 4408 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 4409 "QLE81XX"); 4410 4411 /* Use alternate WWN? */ 4412 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { 4413 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 4414 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 4415 } 4416 4417 /* Prepare nodename */ 4418 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) { 4419 /* 4420 * Firmware will apply the following mask if the nodename was 4421 * not provided. 4422 */ 4423 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 4424 icb->node_name[0] &= 0xF0; 4425 } 4426 4427 /* Set host adapter parameters. */ 4428 ha->flags.disable_risc_code_load = 0; 4429 ha->flags.enable_lip_reset = 0; 4430 ha->flags.enable_lip_full_login = 4431 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0; 4432 ha->flags.enable_target_reset = 4433 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0; 4434 ha->flags.enable_led_scheme = 0; 4435 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; 4436 4437 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 4438 (BIT_6 | BIT_5 | BIT_4)) >> 4; 4439 4440 /* save HBA serial number */ 4441 ha->serial0 = icb->port_name[5]; 4442 ha->serial1 = icb->port_name[6]; 4443 ha->serial2 = icb->port_name[7]; 4444 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 4445 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 4446 4447 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 4448 4449 ha->retry_count = le16_to_cpu(nv->login_retry_count); 4450 4451 /* Set minimum login_timeout to 4 seconds. */ 4452 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 4453 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 4454 if (le16_to_cpu(nv->login_timeout) < 4) 4455 nv->login_timeout = __constant_cpu_to_le16(4); 4456 ha->login_timeout = le16_to_cpu(nv->login_timeout); 4457 icb->login_timeout = nv->login_timeout; 4458 4459 /* Set minimum RATOV to 100 tenths of a second. */ 4460 ha->r_a_tov = 100; 4461 4462 ha->loop_reset_delay = nv->reset_delay; 4463 4464 /* Link Down Timeout = 0: 4465 * 4466 * When Port Down timer expires we will start returning 4467 * I/O's to OS with "DID_NO_CONNECT". 4468 * 4469 * Link Down Timeout != 0: 4470 * 4471 * The driver waits for the link to come up after link down 4472 * before returning I/Os to OS with "DID_NO_CONNECT". 4473 */ 4474 if (le16_to_cpu(nv->link_down_timeout) == 0) { 4475 ha->loop_down_abort_time = 4476 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 4477 } else { 4478 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 4479 ha->loop_down_abort_time = 4480 (LOOP_DOWN_TIME - ha->link_down_timeout); 4481 } 4482 4483 /* Need enough time to try and get the port back. */ 4484 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 4485 if (qlport_down_retry) 4486 ha->port_down_retry_count = qlport_down_retry; 4487 4488 /* Set login_retry_count */ 4489 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 4490 if (ha->port_down_retry_count == 4491 le16_to_cpu(nv->port_down_retry_count) && 4492 ha->port_down_retry_count > 3) 4493 ha->login_retry_count = ha->port_down_retry_count; 4494 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 4495 ha->login_retry_count = ha->port_down_retry_count; 4496 if (ql2xloginretrycount) 4497 ha->login_retry_count = ql2xloginretrycount; 4498 4499 /* Enable ZIO. */ 4500 if (!vha->flags.init_done) { 4501 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 4502 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 4503 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 4504 le16_to_cpu(icb->interrupt_delay_timer): 2; 4505 } 4506 icb->firmware_options_2 &= __constant_cpu_to_le32( 4507 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 4508 vha->flags.process_response_queue = 0; 4509 if (ha->zio_mode != QLA_ZIO_DISABLED) { 4510 ha->zio_mode = QLA_ZIO_MODE_6; 4511 4512 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 4513 "(%d us).\n", vha->host_no, ha->zio_mode, 4514 ha->zio_timer * 100)); 4515 qla_printk(KERN_INFO, ha, 4516 "ZIO mode %d enabled; timer delay (%d us).\n", 4517 ha->zio_mode, ha->zio_timer * 100); 4518 4519 icb->firmware_options_2 |= cpu_to_le32( 4520 (uint32_t)ha->zio_mode); 4521 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 4522 vha->flags.process_response_queue = 1; 4523 } 4524 4525 if (rval) { 4526 DEBUG2_3(printk(KERN_WARNING 4527 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 4528 } 4529 return (rval); 4530 } 4531 4532 void 4533 qla81xx_update_fw_options(scsi_qla_host_t *ha) 4534 { 4535 } 4536