1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2008 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_gbl.h" 9 10 #include <linux/delay.h> 11 #include <linux/vmalloc.h> 12 13 #include "qla_devtbl.h" 14 15 #ifdef CONFIG_SPARC 16 #include <asm/prom.h> 17 #endif 18 19 /* 20 * QLogic ISP2x00 Hardware Support Function Prototypes. 21 */ 22 static int qla2x00_isp_firmware(scsi_qla_host_t *); 23 static void qla2x00_resize_request_q(scsi_qla_host_t *); 24 static int qla2x00_setup_chip(scsi_qla_host_t *); 25 static int qla2x00_init_rings(scsi_qla_host_t *); 26 static int qla2x00_fw_ready(scsi_qla_host_t *); 27 static int qla2x00_configure_hba(scsi_qla_host_t *); 28 static int qla2x00_configure_loop(scsi_qla_host_t *); 29 static int qla2x00_configure_local_loop(scsi_qla_host_t *); 30 static int qla2x00_configure_fabric(scsi_qla_host_t *); 31 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *); 32 static int qla2x00_device_resync(scsi_qla_host_t *); 33 static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *, 34 uint16_t *); 35 36 static int qla2x00_restart_isp(scsi_qla_host_t *); 37 38 static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *); 39 40 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); 41 static int qla84xx_init_chip(scsi_qla_host_t *); 42 static int qla25xx_init_queues(struct qla_hw_data *); 43 44 /****************************************************************************/ 45 /* QLogic ISP2x00 Hardware Support Functions. */ 46 /****************************************************************************/ 47 48 /* 49 * qla2x00_initialize_adapter 50 * Initialize board. 51 * 52 * Input: 53 * ha = adapter block pointer. 54 * 55 * Returns: 56 * 0 = success 57 */ 58 int 59 qla2x00_initialize_adapter(scsi_qla_host_t *vha) 60 { 61 int rval; 62 struct qla_hw_data *ha = vha->hw; 63 struct req_que *req = ha->req_q_map[0]; 64 /* Clear adapter flags. */ 65 vha->flags.online = 0; 66 vha->flags.reset_active = 0; 67 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 68 atomic_set(&vha->loop_state, LOOP_DOWN); 69 vha->device_flags = DFLG_NO_CABLE; 70 vha->dpc_flags = 0; 71 vha->flags.management_server_logged_in = 0; 72 vha->marker_needed = 0; 73 ha->mbx_flags = 0; 74 ha->isp_abort_cnt = 0; 75 ha->beacon_blink_led = 0; 76 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 77 78 set_bit(0, ha->req_qid_map); 79 set_bit(0, ha->rsp_qid_map); 80 81 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); 82 rval = ha->isp_ops->pci_config(vha); 83 if (rval) { 84 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n", 85 vha->host_no)); 86 return (rval); 87 } 88 89 ha->isp_ops->reset_chip(vha); 90 91 rval = qla2xxx_get_flash_info(vha); 92 if (rval) { 93 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n", 94 vha->host_no)); 95 return (rval); 96 } 97 98 ha->isp_ops->get_flash_version(vha, req->ring); 99 100 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 101 102 ha->isp_ops->nvram_config(vha); 103 104 if (ha->flags.disable_serdes) { 105 /* Mask HBA via NVRAM settings? */ 106 qla_printk(KERN_INFO, ha, "Masking HBA WWPN " 107 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n", 108 vha->port_name[0], vha->port_name[1], 109 vha->port_name[2], vha->port_name[3], 110 vha->port_name[4], vha->port_name[5], 111 vha->port_name[6], vha->port_name[7]); 112 return QLA_FUNCTION_FAILED; 113 } 114 115 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 116 117 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { 118 rval = ha->isp_ops->chip_diag(vha); 119 if (rval) 120 return (rval); 121 rval = qla2x00_setup_chip(vha); 122 if (rval) 123 return (rval); 124 } 125 if (IS_QLA84XX(ha)) { 126 ha->cs84xx = qla84xx_get_chip(vha); 127 if (!ha->cs84xx) { 128 qla_printk(KERN_ERR, ha, 129 "Unable to configure ISP84XX.\n"); 130 return QLA_FUNCTION_FAILED; 131 } 132 } 133 rval = qla2x00_init_rings(vha); 134 135 return (rval); 136 } 137 138 /** 139 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers. 140 * @ha: HA context 141 * 142 * Returns 0 on success. 143 */ 144 int 145 qla2100_pci_config(scsi_qla_host_t *vha) 146 { 147 uint16_t w; 148 unsigned long flags; 149 struct qla_hw_data *ha = vha->hw; 150 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 151 152 pci_set_master(ha->pdev); 153 pci_try_set_mwi(ha->pdev); 154 155 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 156 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 157 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 158 159 pci_disable_rom(ha->pdev); 160 161 /* Get PCI bus information. */ 162 spin_lock_irqsave(&ha->hardware_lock, flags); 163 ha->pci_attr = RD_REG_WORD(®->ctrl_status); 164 spin_unlock_irqrestore(&ha->hardware_lock, flags); 165 166 return QLA_SUCCESS; 167 } 168 169 /** 170 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers. 171 * @ha: HA context 172 * 173 * Returns 0 on success. 174 */ 175 int 176 qla2300_pci_config(scsi_qla_host_t *vha) 177 { 178 uint16_t w; 179 unsigned long flags = 0; 180 uint32_t cnt; 181 struct qla_hw_data *ha = vha->hw; 182 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 183 184 pci_set_master(ha->pdev); 185 pci_try_set_mwi(ha->pdev); 186 187 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 188 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 189 190 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 191 w &= ~PCI_COMMAND_INTX_DISABLE; 192 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 193 194 /* 195 * If this is a 2300 card and not 2312, reset the 196 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately, 197 * the 2310 also reports itself as a 2300 so we need to get the 198 * fb revision level -- a 6 indicates it really is a 2300 and 199 * not a 2310. 200 */ 201 if (IS_QLA2300(ha)) { 202 spin_lock_irqsave(&ha->hardware_lock, flags); 203 204 /* Pause RISC. */ 205 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 206 for (cnt = 0; cnt < 30000; cnt++) { 207 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0) 208 break; 209 210 udelay(10); 211 } 212 213 /* Select FPM registers. */ 214 WRT_REG_WORD(®->ctrl_status, 0x20); 215 RD_REG_WORD(®->ctrl_status); 216 217 /* Get the fb rev level */ 218 ha->fb_rev = RD_FB_CMD_REG(ha, reg); 219 220 if (ha->fb_rev == FPM_2300) 221 pci_clear_mwi(ha->pdev); 222 223 /* Deselect FPM registers. */ 224 WRT_REG_WORD(®->ctrl_status, 0x0); 225 RD_REG_WORD(®->ctrl_status); 226 227 /* Release RISC module. */ 228 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 229 for (cnt = 0; cnt < 30000; cnt++) { 230 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0) 231 break; 232 233 udelay(10); 234 } 235 236 spin_unlock_irqrestore(&ha->hardware_lock, flags); 237 } 238 239 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 240 241 pci_disable_rom(ha->pdev); 242 243 /* Get PCI bus information. */ 244 spin_lock_irqsave(&ha->hardware_lock, flags); 245 ha->pci_attr = RD_REG_WORD(®->ctrl_status); 246 spin_unlock_irqrestore(&ha->hardware_lock, flags); 247 248 return QLA_SUCCESS; 249 } 250 251 /** 252 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers. 253 * @ha: HA context 254 * 255 * Returns 0 on success. 256 */ 257 int 258 qla24xx_pci_config(scsi_qla_host_t *vha) 259 { 260 uint16_t w; 261 unsigned long flags = 0; 262 struct qla_hw_data *ha = vha->hw; 263 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 264 265 pci_set_master(ha->pdev); 266 pci_try_set_mwi(ha->pdev); 267 268 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 269 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 270 w &= ~PCI_COMMAND_INTX_DISABLE; 271 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 272 273 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 274 275 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */ 276 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) 277 pcix_set_mmrbc(ha->pdev, 2048); 278 279 /* PCIe -- adjust Maximum Read Request Size (2048). */ 280 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 281 pcie_set_readrq(ha->pdev, 2048); 282 283 pci_disable_rom(ha->pdev); 284 285 ha->chip_revision = ha->pdev->revision; 286 287 /* Get PCI bus information. */ 288 spin_lock_irqsave(&ha->hardware_lock, flags); 289 ha->pci_attr = RD_REG_DWORD(®->ctrl_status); 290 spin_unlock_irqrestore(&ha->hardware_lock, flags); 291 292 return QLA_SUCCESS; 293 } 294 295 /** 296 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers. 297 * @ha: HA context 298 * 299 * Returns 0 on success. 300 */ 301 int 302 qla25xx_pci_config(scsi_qla_host_t *vha) 303 { 304 uint16_t w; 305 struct qla_hw_data *ha = vha->hw; 306 307 pci_set_master(ha->pdev); 308 pci_try_set_mwi(ha->pdev); 309 310 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 311 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 312 w &= ~PCI_COMMAND_INTX_DISABLE; 313 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 314 315 /* PCIe -- adjust Maximum Read Request Size (2048). */ 316 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 317 pcie_set_readrq(ha->pdev, 2048); 318 319 pci_disable_rom(ha->pdev); 320 321 ha->chip_revision = ha->pdev->revision; 322 323 return QLA_SUCCESS; 324 } 325 326 /** 327 * qla2x00_isp_firmware() - Choose firmware image. 328 * @ha: HA context 329 * 330 * Returns 0 on success. 331 */ 332 static int 333 qla2x00_isp_firmware(scsi_qla_host_t *vha) 334 { 335 int rval; 336 uint16_t loop_id, topo, sw_cap; 337 uint8_t domain, area, al_pa; 338 struct qla_hw_data *ha = vha->hw; 339 340 /* Assume loading risc code */ 341 rval = QLA_FUNCTION_FAILED; 342 343 if (ha->flags.disable_risc_code_load) { 344 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n", 345 vha->host_no)); 346 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n"); 347 348 /* Verify checksum of loaded RISC code. */ 349 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); 350 if (rval == QLA_SUCCESS) { 351 /* And, verify we are not in ROM code. */ 352 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 353 &area, &domain, &topo, &sw_cap); 354 } 355 } 356 357 if (rval) { 358 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n", 359 vha->host_no)); 360 } 361 362 return (rval); 363 } 364 365 /** 366 * qla2x00_reset_chip() - Reset ISP chip. 367 * @ha: HA context 368 * 369 * Returns 0 on success. 370 */ 371 void 372 qla2x00_reset_chip(scsi_qla_host_t *vha) 373 { 374 unsigned long flags = 0; 375 struct qla_hw_data *ha = vha->hw; 376 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 377 uint32_t cnt; 378 uint16_t cmd; 379 380 ha->isp_ops->disable_intrs(ha); 381 382 spin_lock_irqsave(&ha->hardware_lock, flags); 383 384 /* Turn off master enable */ 385 cmd = 0; 386 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); 387 cmd &= ~PCI_COMMAND_MASTER; 388 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 389 390 if (!IS_QLA2100(ha)) { 391 /* Pause RISC. */ 392 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); 393 if (IS_QLA2200(ha) || IS_QLA2300(ha)) { 394 for (cnt = 0; cnt < 30000; cnt++) { 395 if ((RD_REG_WORD(®->hccr) & 396 HCCR_RISC_PAUSE) != 0) 397 break; 398 udelay(100); 399 } 400 } else { 401 RD_REG_WORD(®->hccr); /* PCI Posting. */ 402 udelay(10); 403 } 404 405 /* Select FPM registers. */ 406 WRT_REG_WORD(®->ctrl_status, 0x20); 407 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 408 409 /* FPM Soft Reset. */ 410 WRT_REG_WORD(®->fpm_diag_config, 0x100); 411 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ 412 413 /* Toggle Fpm Reset. */ 414 if (!IS_QLA2200(ha)) { 415 WRT_REG_WORD(®->fpm_diag_config, 0x0); 416 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ 417 } 418 419 /* Select frame buffer registers. */ 420 WRT_REG_WORD(®->ctrl_status, 0x10); 421 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 422 423 /* Reset frame buffer FIFOs. */ 424 if (IS_QLA2200(ha)) { 425 WRT_FB_CMD_REG(ha, reg, 0xa000); 426 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ 427 } else { 428 WRT_FB_CMD_REG(ha, reg, 0x00fc); 429 430 /* Read back fb_cmd until zero or 3 seconds max */ 431 for (cnt = 0; cnt < 3000; cnt++) { 432 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) 433 break; 434 udelay(100); 435 } 436 } 437 438 /* Select RISC module registers. */ 439 WRT_REG_WORD(®->ctrl_status, 0); 440 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ 441 442 /* Reset RISC processor. */ 443 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 444 RD_REG_WORD(®->hccr); /* PCI Posting. */ 445 446 /* Release RISC processor. */ 447 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 448 RD_REG_WORD(®->hccr); /* PCI Posting. */ 449 } 450 451 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 452 WRT_REG_WORD(®->hccr, HCCR_CLR_HOST_INT); 453 454 /* Reset ISP chip. */ 455 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 456 457 /* Wait for RISC to recover from reset. */ 458 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 459 /* 460 * It is necessary to for a delay here since the card doesn't 461 * respond to PCI reads during a reset. On some architectures 462 * this will result in an MCA. 463 */ 464 udelay(20); 465 for (cnt = 30000; cnt; cnt--) { 466 if ((RD_REG_WORD(®->ctrl_status) & 467 CSR_ISP_SOFT_RESET) == 0) 468 break; 469 udelay(100); 470 } 471 } else 472 udelay(10); 473 474 /* Reset RISC processor. */ 475 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 476 477 WRT_REG_WORD(®->semaphore, 0); 478 479 /* Release RISC processor. */ 480 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 481 RD_REG_WORD(®->hccr); /* PCI Posting. */ 482 483 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 484 for (cnt = 0; cnt < 30000; cnt++) { 485 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) 486 break; 487 488 udelay(100); 489 } 490 } else 491 udelay(100); 492 493 /* Turn on master enable */ 494 cmd |= PCI_COMMAND_MASTER; 495 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); 496 497 /* Disable RISC pause on FPM parity error. */ 498 if (!IS_QLA2100(ha)) { 499 WRT_REG_WORD(®->hccr, HCCR_DISABLE_PARITY_PAUSE); 500 RD_REG_WORD(®->hccr); /* PCI Posting. */ 501 } 502 503 spin_unlock_irqrestore(&ha->hardware_lock, flags); 504 } 505 506 /** 507 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. 508 * @ha: HA context 509 * 510 * Returns 0 on success. 511 */ 512 static inline void 513 qla24xx_reset_risc(scsi_qla_host_t *vha) 514 { 515 int hw_evt = 0; 516 unsigned long flags = 0; 517 struct qla_hw_data *ha = vha->hw; 518 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 519 uint32_t cnt, d2; 520 uint16_t wd; 521 522 spin_lock_irqsave(&ha->hardware_lock, flags); 523 524 /* Reset RISC. */ 525 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 526 for (cnt = 0; cnt < 30000; cnt++) { 527 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 528 break; 529 530 udelay(10); 531 } 532 533 WRT_REG_DWORD(®->ctrl_status, 534 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 535 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 536 537 udelay(100); 538 /* Wait for firmware to complete NVRAM accesses. */ 539 d2 = (uint32_t) RD_REG_WORD(®->mailbox0); 540 for (cnt = 10000 ; cnt && d2; cnt--) { 541 udelay(5); 542 d2 = (uint32_t) RD_REG_WORD(®->mailbox0); 543 barrier(); 544 } 545 if (cnt == 0) 546 hw_evt = 1; 547 548 /* Wait for soft-reset to complete. */ 549 d2 = RD_REG_DWORD(®->ctrl_status); 550 for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) { 551 udelay(5); 552 d2 = RD_REG_DWORD(®->ctrl_status); 553 barrier(); 554 } 555 556 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); 557 RD_REG_DWORD(®->hccr); 558 559 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); 560 RD_REG_DWORD(®->hccr); 561 562 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); 563 RD_REG_DWORD(®->hccr); 564 565 d2 = (uint32_t) RD_REG_WORD(®->mailbox0); 566 for (cnt = 6000000 ; cnt && d2; cnt--) { 567 udelay(5); 568 d2 = (uint32_t) RD_REG_WORD(®->mailbox0); 569 barrier(); 570 } 571 572 spin_unlock_irqrestore(&ha->hardware_lock, flags); 573 574 if (IS_NOPOLLING_TYPE(ha)) 575 ha->isp_ops->enable_intrs(ha); 576 } 577 578 /** 579 * qla24xx_reset_chip() - Reset ISP24xx chip. 580 * @ha: HA context 581 * 582 * Returns 0 on success. 583 */ 584 void 585 qla24xx_reset_chip(scsi_qla_host_t *vha) 586 { 587 struct qla_hw_data *ha = vha->hw; 588 ha->isp_ops->disable_intrs(ha); 589 590 /* Perform RISC reset. */ 591 qla24xx_reset_risc(vha); 592 } 593 594 /** 595 * qla2x00_chip_diag() - Test chip for proper operation. 596 * @ha: HA context 597 * 598 * Returns 0 on success. 599 */ 600 int 601 qla2x00_chip_diag(scsi_qla_host_t *vha) 602 { 603 int rval; 604 struct qla_hw_data *ha = vha->hw; 605 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 606 unsigned long flags = 0; 607 uint16_t data; 608 uint32_t cnt; 609 uint16_t mb[5]; 610 struct req_que *req = ha->req_q_map[0]; 611 612 /* Assume a failed state */ 613 rval = QLA_FUNCTION_FAILED; 614 615 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n", 616 vha->host_no, (u_long)®->flash_address)); 617 618 spin_lock_irqsave(&ha->hardware_lock, flags); 619 620 /* Reset ISP chip. */ 621 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); 622 623 /* 624 * We need to have a delay here since the card will not respond while 625 * in reset causing an MCA on some architectures. 626 */ 627 udelay(20); 628 data = qla2x00_debounce_register(®->ctrl_status); 629 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) { 630 udelay(5); 631 data = RD_REG_WORD(®->ctrl_status); 632 barrier(); 633 } 634 635 if (!cnt) 636 goto chip_diag_failed; 637 638 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n", 639 ha->host_no)); 640 641 /* Reset RISC processor. */ 642 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 643 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 644 645 /* Workaround for QLA2312 PCI parity error */ 646 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { 647 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); 648 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) { 649 udelay(5); 650 data = RD_MAILBOX_REG(ha, reg, 0); 651 barrier(); 652 } 653 } else 654 udelay(10); 655 656 if (!cnt) 657 goto chip_diag_failed; 658 659 /* Check product ID of chip */ 660 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", ha->host_no)); 661 662 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 663 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 664 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 665 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); 666 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || 667 mb[3] != PROD_ID_3) { 668 qla_printk(KERN_WARNING, ha, 669 "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]); 670 671 goto chip_diag_failed; 672 } 673 ha->product_id[0] = mb[1]; 674 ha->product_id[1] = mb[2]; 675 ha->product_id[2] = mb[3]; 676 ha->product_id[3] = mb[4]; 677 678 /* Adjust fw RISC transfer size */ 679 if (req->length > 1024) 680 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 681 else 682 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 683 req->length; 684 685 if (IS_QLA2200(ha) && 686 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 687 /* Limit firmware transfer size with a 2200A */ 688 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n", 689 vha->host_no)); 690 691 ha->device_type |= DT_ISP2200A; 692 ha->fw_transfer_size = 128; 693 } 694 695 /* Wrap Incoming Mailboxes Test. */ 696 spin_unlock_irqrestore(&ha->hardware_lock, flags); 697 698 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no)); 699 rval = qla2x00_mbx_reg_test(vha); 700 if (rval) { 701 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 702 vha->host_no)); 703 qla_printk(KERN_WARNING, ha, 704 "Failed mailbox send register test\n"); 705 } 706 else { 707 /* Flag a successful rval */ 708 rval = QLA_SUCCESS; 709 } 710 spin_lock_irqsave(&ha->hardware_lock, flags); 711 712 chip_diag_failed: 713 if (rval) 714 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED " 715 "****\n", vha->host_no)); 716 717 spin_unlock_irqrestore(&ha->hardware_lock, flags); 718 719 return (rval); 720 } 721 722 /** 723 * qla24xx_chip_diag() - Test ISP24xx for proper operation. 724 * @ha: HA context 725 * 726 * Returns 0 on success. 727 */ 728 int 729 qla24xx_chip_diag(scsi_qla_host_t *vha) 730 { 731 int rval; 732 struct qla_hw_data *ha = vha->hw; 733 struct req_que *req = ha->req_q_map[0]; 734 735 /* Perform RISC reset. */ 736 qla24xx_reset_risc(vha); 737 738 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 739 740 rval = qla2x00_mbx_reg_test(vha); 741 if (rval) { 742 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 743 vha->host_no)); 744 qla_printk(KERN_WARNING, ha, 745 "Failed mailbox send register test\n"); 746 } else { 747 /* Flag a successful rval */ 748 rval = QLA_SUCCESS; 749 } 750 751 return rval; 752 } 753 754 void 755 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) 756 { 757 int rval; 758 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, 759 eft_size, fce_size, mq_size; 760 dma_addr_t tc_dma; 761 void *tc; 762 struct qla_hw_data *ha = vha->hw; 763 struct req_que *req = ha->req_q_map[0]; 764 struct rsp_que *rsp = ha->rsp_q_map[0]; 765 766 if (ha->fw_dump) { 767 qla_printk(KERN_WARNING, ha, 768 "Firmware dump previously allocated.\n"); 769 return; 770 } 771 772 ha->fw_dumped = 0; 773 fixed_size = mem_size = eft_size = fce_size = mq_size = 0; 774 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 775 fixed_size = sizeof(struct qla2100_fw_dump); 776 } else if (IS_QLA23XX(ha)) { 777 fixed_size = offsetof(struct qla2300_fw_dump, data_ram); 778 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 779 sizeof(uint16_t); 780 } else if (IS_FWI2_CAPABLE(ha)) { 781 if (IS_QLA81XX(ha)) 782 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 783 else if (IS_QLA25XX(ha)) 784 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); 785 else 786 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); 787 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 788 sizeof(uint32_t); 789 if (ha->mqenable) 790 mq_size = sizeof(struct qla2xxx_mq_chain); 791 792 /* Allocate memory for Fibre Channel Event Buffer. */ 793 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 794 goto try_eft; 795 796 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 797 GFP_KERNEL); 798 if (!tc) { 799 qla_printk(KERN_WARNING, ha, "Unable to allocate " 800 "(%d KB) for FCE.\n", FCE_SIZE / 1024); 801 goto try_eft; 802 } 803 804 memset(tc, 0, FCE_SIZE); 805 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, 806 ha->fce_mb, &ha->fce_bufs); 807 if (rval) { 808 qla_printk(KERN_WARNING, ha, "Unable to initialize " 809 "FCE (%d).\n", rval); 810 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, 811 tc_dma); 812 ha->flags.fce_enabled = 0; 813 goto try_eft; 814 } 815 816 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n", 817 FCE_SIZE / 1024); 818 819 fce_size = sizeof(struct qla2xxx_fce_chain) + EFT_SIZE; 820 ha->flags.fce_enabled = 1; 821 ha->fce_dma = tc_dma; 822 ha->fce = tc; 823 try_eft: 824 /* Allocate memory for Extended Trace Buffer. */ 825 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 826 GFP_KERNEL); 827 if (!tc) { 828 qla_printk(KERN_WARNING, ha, "Unable to allocate " 829 "(%d KB) for EFT.\n", EFT_SIZE / 1024); 830 goto cont_alloc; 831 } 832 833 memset(tc, 0, EFT_SIZE); 834 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); 835 if (rval) { 836 qla_printk(KERN_WARNING, ha, "Unable to initialize " 837 "EFT (%d).\n", rval); 838 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, 839 tc_dma); 840 goto cont_alloc; 841 } 842 843 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n", 844 EFT_SIZE / 1024); 845 846 eft_size = EFT_SIZE; 847 ha->eft_dma = tc_dma; 848 ha->eft = tc; 849 } 850 cont_alloc: 851 req_q_size = req->length * sizeof(request_t); 852 rsp_q_size = rsp->length * sizeof(response_t); 853 854 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 855 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + 856 eft_size; 857 ha->chain_offset = dump_size; 858 dump_size += mq_size + fce_size; 859 860 ha->fw_dump = vmalloc(dump_size); 861 if (!ha->fw_dump) { 862 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for " 863 "firmware dump!!!\n", dump_size / 1024); 864 865 if (ha->eft) { 866 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft, 867 ha->eft_dma); 868 ha->eft = NULL; 869 ha->eft_dma = 0; 870 } 871 return; 872 } 873 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n", 874 dump_size / 1024); 875 876 ha->fw_dump_len = dump_size; 877 ha->fw_dump->signature[0] = 'Q'; 878 ha->fw_dump->signature[1] = 'L'; 879 ha->fw_dump->signature[2] = 'G'; 880 ha->fw_dump->signature[3] = 'C'; 881 ha->fw_dump->version = __constant_htonl(1); 882 883 ha->fw_dump->fixed_size = htonl(fixed_size); 884 ha->fw_dump->mem_size = htonl(mem_size); 885 ha->fw_dump->req_q_size = htonl(req_q_size); 886 ha->fw_dump->rsp_q_size = htonl(rsp_q_size); 887 888 ha->fw_dump->eft_size = htonl(eft_size); 889 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma)); 890 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma)); 891 892 ha->fw_dump->header_size = 893 htonl(offsetof(struct qla2xxx_fw_dump, isp)); 894 } 895 896 /** 897 * qla2x00_resize_request_q() - Resize request queue given available ISP memory. 898 * @ha: HA context 899 * 900 * Returns 0 on success. 901 */ 902 static void 903 qla2x00_resize_request_q(scsi_qla_host_t *vha) 904 { 905 int rval; 906 uint16_t fw_iocb_cnt = 0; 907 uint16_t request_q_length = REQUEST_ENTRY_CNT_2XXX_EXT_MEM; 908 dma_addr_t request_dma; 909 request_t *request_ring; 910 struct qla_hw_data *ha = vha->hw; 911 struct req_que *req = ha->req_q_map[0]; 912 913 /* Valid only on recent ISPs. */ 914 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 915 return; 916 917 /* Retrieve IOCB counts available to the firmware. */ 918 rval = qla2x00_get_resource_cnts(vha, NULL, NULL, NULL, &fw_iocb_cnt, 919 &ha->max_npiv_vports); 920 if (rval) 921 return; 922 /* No point in continuing if current settings are sufficient. */ 923 if (fw_iocb_cnt < 1024) 924 return; 925 if (req->length >= request_q_length) 926 return; 927 928 /* Attempt to claim larger area for request queue. */ 929 request_ring = dma_alloc_coherent(&ha->pdev->dev, 930 (request_q_length + 1) * sizeof(request_t), &request_dma, 931 GFP_KERNEL); 932 if (request_ring == NULL) 933 return; 934 935 /* Resize successful, report extensions. */ 936 qla_printk(KERN_INFO, ha, "Extended memory detected (%d KB)...\n", 937 (ha->fw_memory_size + 1) / 1024); 938 qla_printk(KERN_INFO, ha, "Resizing request queue depth " 939 "(%d -> %d)...\n", req->length, request_q_length); 940 941 /* Clear old allocations. */ 942 dma_free_coherent(&ha->pdev->dev, 943 (req->length + 1) * sizeof(request_t), req->ring, 944 req->dma); 945 946 /* Begin using larger queue. */ 947 req->length = request_q_length; 948 req->ring = request_ring; 949 req->dma = request_dma; 950 } 951 952 /** 953 * qla2x00_setup_chip() - Load and start RISC firmware. 954 * @ha: HA context 955 * 956 * Returns 0 on success. 957 */ 958 static int 959 qla2x00_setup_chip(scsi_qla_host_t *vha) 960 { 961 int rval; 962 uint32_t srisc_address = 0; 963 struct qla_hw_data *ha = vha->hw; 964 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 965 unsigned long flags; 966 967 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 968 /* Disable SRAM, Instruction RAM and GP RAM parity. */ 969 spin_lock_irqsave(&ha->hardware_lock, flags); 970 WRT_REG_WORD(®->hccr, (HCCR_ENABLE_PARITY + 0x0)); 971 RD_REG_WORD(®->hccr); 972 spin_unlock_irqrestore(&ha->hardware_lock, flags); 973 } 974 975 /* Load firmware sequences */ 976 rval = ha->isp_ops->load_risc(vha, &srisc_address); 977 if (rval == QLA_SUCCESS) { 978 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC " 979 "code.\n", vha->host_no)); 980 981 rval = qla2x00_verify_checksum(vha, srisc_address); 982 if (rval == QLA_SUCCESS) { 983 /* Start firmware execution. */ 984 DEBUG(printk("scsi(%ld): Checksum OK, start " 985 "firmware.\n", vha->host_no)); 986 987 rval = qla2x00_execute_fw(vha, srisc_address); 988 /* Retrieve firmware information. */ 989 if (rval == QLA_SUCCESS && ha->fw_major_version == 0) { 990 qla2x00_get_fw_version(vha, 991 &ha->fw_major_version, 992 &ha->fw_minor_version, 993 &ha->fw_subminor_version, 994 &ha->fw_attributes, &ha->fw_memory_size, 995 ha->mpi_version, &ha->mpi_capabilities); 996 ha->flags.npiv_supported = 0; 997 if (IS_QLA2XXX_MIDTYPE(ha) && 998 (ha->fw_attributes & BIT_2)) { 999 ha->flags.npiv_supported = 1; 1000 if ((!ha->max_npiv_vports) || 1001 ((ha->max_npiv_vports + 1) % 1002 MIN_MULTI_ID_FABRIC)) 1003 ha->max_npiv_vports = 1004 MIN_MULTI_ID_FABRIC - 1; 1005 } 1006 qla2x00_resize_request_q(vha); 1007 1008 if (ql2xallocfwdump) 1009 qla2x00_alloc_fw_dump(vha); 1010 } 1011 } else { 1012 DEBUG2(printk(KERN_INFO 1013 "scsi(%ld): ISP Firmware failed checksum.\n", 1014 vha->host_no)); 1015 } 1016 } 1017 1018 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 1019 /* Enable proper parity. */ 1020 spin_lock_irqsave(&ha->hardware_lock, flags); 1021 if (IS_QLA2300(ha)) 1022 /* SRAM parity */ 1023 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x1); 1024 else 1025 /* SRAM, Instruction RAM and GP RAM parity */ 1026 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x7); 1027 RD_REG_WORD(®->hccr); 1028 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1029 } 1030 1031 if (rval) { 1032 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 1033 vha->host_no)); 1034 } 1035 1036 return (rval); 1037 } 1038 1039 /** 1040 * qla2x00_init_response_q_entries() - Initializes response queue entries. 1041 * @ha: HA context 1042 * 1043 * Beginning of request ring has initialization control block already built 1044 * by nvram config routine. 1045 * 1046 * Returns 0 on success. 1047 */ 1048 void 1049 qla2x00_init_response_q_entries(struct rsp_que *rsp) 1050 { 1051 uint16_t cnt; 1052 response_t *pkt; 1053 1054 pkt = rsp->ring_ptr; 1055 for (cnt = 0; cnt < rsp->length; cnt++) { 1056 pkt->signature = RESPONSE_PROCESSED; 1057 pkt++; 1058 } 1059 1060 } 1061 1062 /** 1063 * qla2x00_update_fw_options() - Read and process firmware options. 1064 * @ha: HA context 1065 * 1066 * Returns 0 on success. 1067 */ 1068 void 1069 qla2x00_update_fw_options(scsi_qla_host_t *vha) 1070 { 1071 uint16_t swing, emphasis, tx_sens, rx_sens; 1072 struct qla_hw_data *ha = vha->hw; 1073 1074 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 1075 qla2x00_get_fw_options(vha, ha->fw_options); 1076 1077 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1078 return; 1079 1080 /* Serial Link options. */ 1081 DEBUG3(printk("scsi(%ld): Serial link options:\n", 1082 vha->host_no)); 1083 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options, 1084 sizeof(ha->fw_seriallink_options))); 1085 1086 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 1087 if (ha->fw_seriallink_options[3] & BIT_2) { 1088 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; 1089 1090 /* 1G settings */ 1091 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); 1092 emphasis = (ha->fw_seriallink_options[2] & 1093 (BIT_4 | BIT_3)) >> 3; 1094 tx_sens = ha->fw_seriallink_options[0] & 1095 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 1096 rx_sens = (ha->fw_seriallink_options[0] & 1097 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 1098 ha->fw_options[10] = (emphasis << 14) | (swing << 8); 1099 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 1100 if (rx_sens == 0x0) 1101 rx_sens = 0x3; 1102 ha->fw_options[10] |= (tx_sens << 4) | rx_sens; 1103 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1104 ha->fw_options[10] |= BIT_5 | 1105 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 1106 (tx_sens & (BIT_1 | BIT_0)); 1107 1108 /* 2G settings */ 1109 swing = (ha->fw_seriallink_options[2] & 1110 (BIT_7 | BIT_6 | BIT_5)) >> 5; 1111 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); 1112 tx_sens = ha->fw_seriallink_options[1] & 1113 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 1114 rx_sens = (ha->fw_seriallink_options[1] & 1115 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; 1116 ha->fw_options[11] = (emphasis << 14) | (swing << 8); 1117 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 1118 if (rx_sens == 0x0) 1119 rx_sens = 0x3; 1120 ha->fw_options[11] |= (tx_sens << 4) | rx_sens; 1121 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1122 ha->fw_options[11] |= BIT_5 | 1123 ((rx_sens & (BIT_1 | BIT_0)) << 2) | 1124 (tx_sens & (BIT_1 | BIT_0)); 1125 } 1126 1127 /* FCP2 options. */ 1128 /* Return command IOCBs without waiting for an ABTS to complete. */ 1129 ha->fw_options[3] |= BIT_13; 1130 1131 /* LED scheme. */ 1132 if (ha->flags.enable_led_scheme) 1133 ha->fw_options[2] |= BIT_12; 1134 1135 /* Detect ISP6312. */ 1136 if (IS_QLA6312(ha)) 1137 ha->fw_options[2] |= BIT_13; 1138 1139 /* Update firmware options. */ 1140 qla2x00_set_fw_options(vha, ha->fw_options); 1141 } 1142 1143 void 1144 qla24xx_update_fw_options(scsi_qla_host_t *vha) 1145 { 1146 int rval; 1147 struct qla_hw_data *ha = vha->hw; 1148 1149 /* Update Serial Link options. */ 1150 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) 1151 return; 1152 1153 rval = qla2x00_set_serdes_params(vha, 1154 le16_to_cpu(ha->fw_seriallink_options24[1]), 1155 le16_to_cpu(ha->fw_seriallink_options24[2]), 1156 le16_to_cpu(ha->fw_seriallink_options24[3])); 1157 if (rval != QLA_SUCCESS) { 1158 qla_printk(KERN_WARNING, ha, 1159 "Unable to update Serial Link options (%x).\n", rval); 1160 } 1161 } 1162 1163 void 1164 qla2x00_config_rings(struct scsi_qla_host *vha) 1165 { 1166 struct qla_hw_data *ha = vha->hw; 1167 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1168 struct req_que *req = ha->req_q_map[0]; 1169 struct rsp_que *rsp = ha->rsp_q_map[0]; 1170 1171 /* Setup ring parameters in initialization control block. */ 1172 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0); 1173 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0); 1174 ha->init_cb->request_q_length = cpu_to_le16(req->length); 1175 ha->init_cb->response_q_length = cpu_to_le16(rsp->length); 1176 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 1177 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 1178 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1179 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1180 1181 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0); 1182 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0); 1183 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0); 1184 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0); 1185 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ 1186 } 1187 1188 void 1189 qla24xx_config_rings(struct scsi_qla_host *vha) 1190 { 1191 struct qla_hw_data *ha = vha->hw; 1192 device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0); 1193 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; 1194 struct qla_msix_entry *msix; 1195 struct init_cb_24xx *icb; 1196 uint16_t rid = 0; 1197 struct req_que *req = ha->req_q_map[0]; 1198 struct rsp_que *rsp = ha->rsp_q_map[0]; 1199 1200 /* Setup ring parameters in initialization control block. */ 1201 icb = (struct init_cb_24xx *)ha->init_cb; 1202 icb->request_q_outpointer = __constant_cpu_to_le16(0); 1203 icb->response_q_inpointer = __constant_cpu_to_le16(0); 1204 icb->request_q_length = cpu_to_le16(req->length); 1205 icb->response_q_length = cpu_to_le16(rsp->length); 1206 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 1207 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 1208 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1209 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1210 1211 if (ha->mqenable) { 1212 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); 1213 icb->rid = __constant_cpu_to_le16(rid); 1214 if (ha->flags.msix_enabled) { 1215 msix = &ha->msix_entries[1]; 1216 DEBUG2_17(printk(KERN_INFO 1217 "Reistering vector 0x%x for base que\n", msix->entry)); 1218 icb->msix = cpu_to_le16(msix->entry); 1219 } 1220 /* Use alternate PCI bus number */ 1221 if (MSB(rid)) 1222 icb->firmware_options_2 |= 1223 __constant_cpu_to_le32(BIT_19); 1224 /* Use alternate PCI devfn */ 1225 if (LSB(rid)) 1226 icb->firmware_options_2 |= 1227 __constant_cpu_to_le32(BIT_18); 1228 1229 icb->firmware_options_2 &= __constant_cpu_to_le32(~BIT_22); 1230 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); 1231 1232 WRT_REG_DWORD(®->isp25mq.req_q_in, 0); 1233 WRT_REG_DWORD(®->isp25mq.req_q_out, 0); 1234 WRT_REG_DWORD(®->isp25mq.rsp_q_in, 0); 1235 WRT_REG_DWORD(®->isp25mq.rsp_q_out, 0); 1236 } else { 1237 WRT_REG_DWORD(®->isp24.req_q_in, 0); 1238 WRT_REG_DWORD(®->isp24.req_q_out, 0); 1239 WRT_REG_DWORD(®->isp24.rsp_q_in, 0); 1240 WRT_REG_DWORD(®->isp24.rsp_q_out, 0); 1241 } 1242 /* PCI posting */ 1243 RD_REG_DWORD(&ioreg->hccr); 1244 } 1245 1246 /** 1247 * qla2x00_init_rings() - Initializes firmware. 1248 * @ha: HA context 1249 * 1250 * Beginning of request ring has initialization control block already built 1251 * by nvram config routine. 1252 * 1253 * Returns 0 on success. 1254 */ 1255 static int 1256 qla2x00_init_rings(scsi_qla_host_t *vha) 1257 { 1258 int rval; 1259 unsigned long flags = 0; 1260 int cnt, que; 1261 struct qla_hw_data *ha = vha->hw; 1262 struct req_que *req; 1263 struct rsp_que *rsp; 1264 struct scsi_qla_host *vp; 1265 struct mid_init_cb_24xx *mid_init_cb = 1266 (struct mid_init_cb_24xx *) ha->init_cb; 1267 1268 spin_lock_irqsave(&ha->hardware_lock, flags); 1269 1270 /* Clear outstanding commands array. */ 1271 for (que = 0; que < ha->max_queues; que++) { 1272 req = ha->req_q_map[que]; 1273 if (!req) 1274 continue; 1275 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 1276 req->outstanding_cmds[cnt] = NULL; 1277 1278 req->current_outstanding_cmd = 0; 1279 1280 /* Initialize firmware. */ 1281 req->ring_ptr = req->ring; 1282 req->ring_index = 0; 1283 req->cnt = req->length; 1284 } 1285 1286 for (que = 0; que < ha->max_queues; que++) { 1287 rsp = ha->rsp_q_map[que]; 1288 if (!rsp) 1289 continue; 1290 rsp->ring_ptr = rsp->ring; 1291 rsp->ring_index = 0; 1292 1293 /* Initialize response queue entries */ 1294 qla2x00_init_response_q_entries(rsp); 1295 } 1296 1297 /* Clear RSCN queue. */ 1298 list_for_each_entry(vp, &ha->vp_list, list) { 1299 vp->rscn_in_ptr = 0; 1300 vp->rscn_out_ptr = 0; 1301 } 1302 ha->isp_ops->config_rings(vha); 1303 1304 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1305 1306 /* Update any ISP specific firmware options before initialization. */ 1307 ha->isp_ops->update_fw_options(vha); 1308 1309 DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no)); 1310 1311 if (ha->flags.npiv_supported) { 1312 if (ha->operating_mode == LOOP) 1313 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; 1314 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); 1315 } 1316 1317 1318 mid_init_cb->options = __constant_cpu_to_le16(BIT_1); 1319 1320 rval = qla2x00_init_firmware(vha, ha->init_cb_size); 1321 if (rval) { 1322 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n", 1323 vha->host_no)); 1324 } else { 1325 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n", 1326 vha->host_no)); 1327 } 1328 1329 return (rval); 1330 } 1331 1332 /** 1333 * qla2x00_fw_ready() - Waits for firmware ready. 1334 * @ha: HA context 1335 * 1336 * Returns 0 on success. 1337 */ 1338 static int 1339 qla2x00_fw_ready(scsi_qla_host_t *vha) 1340 { 1341 int rval; 1342 unsigned long wtime, mtime, cs84xx_time; 1343 uint16_t min_wait; /* Minimum wait time if loop is down */ 1344 uint16_t wait_time; /* Wait time if loop is coming ready */ 1345 uint16_t state[3]; 1346 struct qla_hw_data *ha = vha->hw; 1347 1348 rval = QLA_SUCCESS; 1349 1350 /* 20 seconds for loop down. */ 1351 min_wait = 20; 1352 1353 /* 1354 * Firmware should take at most one RATOV to login, plus 5 seconds for 1355 * our own processing. 1356 */ 1357 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { 1358 wait_time = min_wait; 1359 } 1360 1361 /* Min wait time if loop down */ 1362 mtime = jiffies + (min_wait * HZ); 1363 1364 /* wait time before firmware ready */ 1365 wtime = jiffies + (wait_time * HZ); 1366 1367 /* Wait for ISP to finish LIP */ 1368 if (!vha->flags.init_done) 1369 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n"); 1370 1371 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n", 1372 vha->host_no)); 1373 1374 do { 1375 rval = qla2x00_get_firmware_state(vha, state); 1376 if (rval == QLA_SUCCESS) { 1377 if (state[0] < FSTATE_LOSS_OF_SYNC) { 1378 vha->device_flags &= ~DFLG_NO_CABLE; 1379 } 1380 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 1381 DEBUG16(printk("scsi(%ld): fw_state=%x " 1382 "84xx=%x.\n", vha->host_no, state[0], 1383 state[2])); 1384 if ((state[2] & FSTATE_LOGGED_IN) && 1385 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 1386 DEBUG16(printk("scsi(%ld): Sending " 1387 "verify iocb.\n", vha->host_no)); 1388 1389 cs84xx_time = jiffies; 1390 rval = qla84xx_init_chip(vha); 1391 if (rval != QLA_SUCCESS) 1392 break; 1393 1394 /* Add time taken to initialize. */ 1395 cs84xx_time = jiffies - cs84xx_time; 1396 wtime += cs84xx_time; 1397 mtime += cs84xx_time; 1398 DEBUG16(printk("scsi(%ld): Increasing " 1399 "wait time by %ld. New time %ld\n", 1400 vha->host_no, cs84xx_time, wtime)); 1401 } 1402 } else if (state[0] == FSTATE_READY) { 1403 DEBUG(printk("scsi(%ld): F/W Ready - OK \n", 1404 vha->host_no)); 1405 1406 qla2x00_get_retry_cnt(vha, &ha->retry_count, 1407 &ha->login_timeout, &ha->r_a_tov); 1408 1409 rval = QLA_SUCCESS; 1410 break; 1411 } 1412 1413 rval = QLA_FUNCTION_FAILED; 1414 1415 if (atomic_read(&vha->loop_down_timer) && 1416 state[0] != FSTATE_READY) { 1417 /* Loop down. Timeout on min_wait for states 1418 * other than Wait for Login. 1419 */ 1420 if (time_after_eq(jiffies, mtime)) { 1421 qla_printk(KERN_INFO, ha, 1422 "Cable is unplugged...\n"); 1423 1424 vha->device_flags |= DFLG_NO_CABLE; 1425 break; 1426 } 1427 } 1428 } else { 1429 /* Mailbox cmd failed. Timeout on min_wait. */ 1430 if (time_after_eq(jiffies, mtime)) 1431 break; 1432 } 1433 1434 if (time_after_eq(jiffies, wtime)) 1435 break; 1436 1437 /* Delay for a while */ 1438 msleep(500); 1439 1440 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1441 vha->host_no, state[0], jiffies)); 1442 } while (1); 1443 1444 DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1445 vha->host_no, state[0], jiffies)); 1446 1447 if (rval) { 1448 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", 1449 vha->host_no)); 1450 } 1451 1452 return (rval); 1453 } 1454 1455 /* 1456 * qla2x00_configure_hba 1457 * Setup adapter context. 1458 * 1459 * Input: 1460 * ha = adapter state pointer. 1461 * 1462 * Returns: 1463 * 0 = success 1464 * 1465 * Context: 1466 * Kernel context. 1467 */ 1468 static int 1469 qla2x00_configure_hba(scsi_qla_host_t *vha) 1470 { 1471 int rval; 1472 uint16_t loop_id; 1473 uint16_t topo; 1474 uint16_t sw_cap; 1475 uint8_t al_pa; 1476 uint8_t area; 1477 uint8_t domain; 1478 char connect_type[22]; 1479 struct qla_hw_data *ha = vha->hw; 1480 1481 /* Get host addresses. */ 1482 rval = qla2x00_get_adapter_id(vha, 1483 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 1484 if (rval != QLA_SUCCESS) { 1485 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 1486 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 1487 DEBUG2(printk("%s(%ld) Loop is in a transition state\n", 1488 __func__, vha->host_no)); 1489 } else { 1490 qla_printk(KERN_WARNING, ha, 1491 "ERROR -- Unable to get host loop ID.\n"); 1492 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1493 } 1494 return (rval); 1495 } 1496 1497 if (topo == 4) { 1498 qla_printk(KERN_INFO, ha, 1499 "Cannot get topology - retrying.\n"); 1500 return (QLA_FUNCTION_FAILED); 1501 } 1502 1503 vha->loop_id = loop_id; 1504 1505 /* initialize */ 1506 ha->min_external_loopid = SNS_FIRST_LOOP_ID; 1507 ha->operating_mode = LOOP; 1508 ha->switch_cap = 0; 1509 1510 switch (topo) { 1511 case 0: 1512 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n", 1513 vha->host_no)); 1514 ha->current_topology = ISP_CFG_NL; 1515 strcpy(connect_type, "(Loop)"); 1516 break; 1517 1518 case 1: 1519 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n", 1520 vha->host_no)); 1521 ha->switch_cap = sw_cap; 1522 ha->current_topology = ISP_CFG_FL; 1523 strcpy(connect_type, "(FL_Port)"); 1524 break; 1525 1526 case 2: 1527 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n", 1528 vha->host_no)); 1529 ha->operating_mode = P2P; 1530 ha->current_topology = ISP_CFG_N; 1531 strcpy(connect_type, "(N_Port-to-N_Port)"); 1532 break; 1533 1534 case 3: 1535 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n", 1536 vha->host_no)); 1537 ha->switch_cap = sw_cap; 1538 ha->operating_mode = P2P; 1539 ha->current_topology = ISP_CFG_F; 1540 strcpy(connect_type, "(F_Port)"); 1541 break; 1542 1543 default: 1544 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. " 1545 "Using NL.\n", 1546 vha->host_no, topo)); 1547 ha->current_topology = ISP_CFG_NL; 1548 strcpy(connect_type, "(Loop)"); 1549 break; 1550 } 1551 1552 /* Save Host port and loop ID. */ 1553 /* byte order - Big Endian */ 1554 vha->d_id.b.domain = domain; 1555 vha->d_id.b.area = area; 1556 vha->d_id.b.al_pa = al_pa; 1557 1558 if (!vha->flags.init_done) 1559 qla_printk(KERN_INFO, ha, 1560 "Topology - %s, Host Loop address 0x%x\n", 1561 connect_type, vha->loop_id); 1562 1563 if (rval) { 1564 DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no)); 1565 } else { 1566 DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no)); 1567 } 1568 1569 return(rval); 1570 } 1571 1572 static inline void 1573 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, 1574 char *def) 1575 { 1576 char *st, *en; 1577 uint16_t index; 1578 struct qla_hw_data *ha = vha->hw; 1579 1580 if (memcmp(model, BINZERO, len) != 0) { 1581 strncpy(ha->model_number, model, len); 1582 st = en = ha->model_number; 1583 en += len - 1; 1584 while (en > st) { 1585 if (*en != 0x20 && *en != 0x00) 1586 break; 1587 *en-- = '\0'; 1588 } 1589 1590 index = (ha->pdev->subsystem_device & 0xff); 1591 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1592 index < QLA_MODEL_NAMES) 1593 strncpy(ha->model_desc, 1594 qla2x00_model_name[index * 2 + 1], 1595 sizeof(ha->model_desc) - 1); 1596 } else { 1597 index = (ha->pdev->subsystem_device & 0xff); 1598 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1599 index < QLA_MODEL_NAMES) { 1600 strcpy(ha->model_number, 1601 qla2x00_model_name[index * 2]); 1602 strncpy(ha->model_desc, 1603 qla2x00_model_name[index * 2 + 1], 1604 sizeof(ha->model_desc) - 1); 1605 } else { 1606 strcpy(ha->model_number, def); 1607 } 1608 } 1609 if (IS_FWI2_CAPABLE(ha)) 1610 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, 1611 sizeof(ha->model_desc)); 1612 } 1613 1614 /* On sparc systems, obtain port and node WWN from firmware 1615 * properties. 1616 */ 1617 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv) 1618 { 1619 #ifdef CONFIG_SPARC 1620 struct qla_hw_data *ha = vha->hw; 1621 struct pci_dev *pdev = ha->pdev; 1622 struct device_node *dp = pci_device_to_OF_node(pdev); 1623 const u8 *val; 1624 int len; 1625 1626 val = of_get_property(dp, "port-wwn", &len); 1627 if (val && len >= WWN_SIZE) 1628 memcpy(nv->port_name, val, WWN_SIZE); 1629 1630 val = of_get_property(dp, "node-wwn", &len); 1631 if (val && len >= WWN_SIZE) 1632 memcpy(nv->node_name, val, WWN_SIZE); 1633 #endif 1634 } 1635 1636 /* 1637 * NVRAM configuration for ISP 2xxx 1638 * 1639 * Input: 1640 * ha = adapter block pointer. 1641 * 1642 * Output: 1643 * initialization control block in response_ring 1644 * host adapters parameters in host adapter block 1645 * 1646 * Returns: 1647 * 0 = success. 1648 */ 1649 int 1650 qla2x00_nvram_config(scsi_qla_host_t *vha) 1651 { 1652 int rval; 1653 uint8_t chksum = 0; 1654 uint16_t cnt; 1655 uint8_t *dptr1, *dptr2; 1656 struct qla_hw_data *ha = vha->hw; 1657 init_cb_t *icb = ha->init_cb; 1658 nvram_t *nv = ha->nvram; 1659 uint8_t *ptr = ha->nvram; 1660 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1661 1662 rval = QLA_SUCCESS; 1663 1664 /* Determine NVRAM starting address. */ 1665 ha->nvram_size = sizeof(nvram_t); 1666 ha->nvram_base = 0; 1667 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) 1668 if ((RD_REG_WORD(®->ctrl_status) >> 14) == 1) 1669 ha->nvram_base = 0x80; 1670 1671 /* Get NVRAM data and calculate checksum. */ 1672 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); 1673 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 1674 chksum += *ptr++; 1675 1676 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 1677 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 1678 1679 /* Bad NVRAM data, set defaults parameters. */ 1680 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || 1681 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { 1682 /* Reset NVRAM data. */ 1683 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 1684 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 1685 nv->nvram_version); 1686 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 1687 "invalid -- WWPN) defaults.\n"); 1688 1689 /* 1690 * Set default initialization control block. 1691 */ 1692 memset(nv, 0, ha->nvram_size); 1693 nv->parameter_block_version = ICB_VERSION; 1694 1695 if (IS_QLA23XX(ha)) { 1696 nv->firmware_options[0] = BIT_2 | BIT_1; 1697 nv->firmware_options[1] = BIT_7 | BIT_5; 1698 nv->add_firmware_options[0] = BIT_5; 1699 nv->add_firmware_options[1] = BIT_5 | BIT_4; 1700 nv->frame_payload_size = __constant_cpu_to_le16(2048); 1701 nv->special_options[1] = BIT_7; 1702 } else if (IS_QLA2200(ha)) { 1703 nv->firmware_options[0] = BIT_2 | BIT_1; 1704 nv->firmware_options[1] = BIT_7 | BIT_5; 1705 nv->add_firmware_options[0] = BIT_5; 1706 nv->add_firmware_options[1] = BIT_5 | BIT_4; 1707 nv->frame_payload_size = __constant_cpu_to_le16(1024); 1708 } else if (IS_QLA2100(ha)) { 1709 nv->firmware_options[0] = BIT_3 | BIT_1; 1710 nv->firmware_options[1] = BIT_5; 1711 nv->frame_payload_size = __constant_cpu_to_le16(1024); 1712 } 1713 1714 nv->max_iocb_allocation = __constant_cpu_to_le16(256); 1715 nv->execution_throttle = __constant_cpu_to_le16(16); 1716 nv->retry_count = 8; 1717 nv->retry_delay = 1; 1718 1719 nv->port_name[0] = 33; 1720 nv->port_name[3] = 224; 1721 nv->port_name[4] = 139; 1722 1723 qla2xxx_nvram_wwn_from_ofw(vha, nv); 1724 1725 nv->login_timeout = 4; 1726 1727 /* 1728 * Set default host adapter parameters 1729 */ 1730 nv->host_p[1] = BIT_2; 1731 nv->reset_delay = 5; 1732 nv->port_down_retry_count = 8; 1733 nv->max_luns_per_target = __constant_cpu_to_le16(8); 1734 nv->link_down_timeout = 60; 1735 1736 rval = 1; 1737 } 1738 1739 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 1740 /* 1741 * The SN2 does not provide BIOS emulation which means you can't change 1742 * potentially bogus BIOS settings. Force the use of default settings 1743 * for link rate and frame size. Hope that the rest of the settings 1744 * are valid. 1745 */ 1746 if (ia64_platform_is("sn2")) { 1747 nv->frame_payload_size = __constant_cpu_to_le16(2048); 1748 if (IS_QLA23XX(ha)) 1749 nv->special_options[1] = BIT_7; 1750 } 1751 #endif 1752 1753 /* Reset Initialization control block */ 1754 memset(icb, 0, ha->init_cb_size); 1755 1756 /* 1757 * Setup driver NVRAM options. 1758 */ 1759 nv->firmware_options[0] |= (BIT_6 | BIT_1); 1760 nv->firmware_options[0] &= ~(BIT_5 | BIT_4); 1761 nv->firmware_options[1] |= (BIT_5 | BIT_0); 1762 nv->firmware_options[1] &= ~BIT_4; 1763 1764 if (IS_QLA23XX(ha)) { 1765 nv->firmware_options[0] |= BIT_2; 1766 nv->firmware_options[0] &= ~BIT_3; 1767 nv->add_firmware_options[1] |= BIT_5 | BIT_4; 1768 1769 if (IS_QLA2300(ha)) { 1770 if (ha->fb_rev == FPM_2310) { 1771 strcpy(ha->model_number, "QLA2310"); 1772 } else { 1773 strcpy(ha->model_number, "QLA2300"); 1774 } 1775 } else { 1776 qla2x00_set_model_info(vha, nv->model_number, 1777 sizeof(nv->model_number), "QLA23xx"); 1778 } 1779 } else if (IS_QLA2200(ha)) { 1780 nv->firmware_options[0] |= BIT_2; 1781 /* 1782 * 'Point-to-point preferred, else loop' is not a safe 1783 * connection mode setting. 1784 */ 1785 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) == 1786 (BIT_5 | BIT_4)) { 1787 /* Force 'loop preferred, else point-to-point'. */ 1788 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4); 1789 nv->add_firmware_options[0] |= BIT_5; 1790 } 1791 strcpy(ha->model_number, "QLA22xx"); 1792 } else /*if (IS_QLA2100(ha))*/ { 1793 strcpy(ha->model_number, "QLA2100"); 1794 } 1795 1796 /* 1797 * Copy over NVRAM RISC parameter block to initialization control block. 1798 */ 1799 dptr1 = (uint8_t *)icb; 1800 dptr2 = (uint8_t *)&nv->parameter_block_version; 1801 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version; 1802 while (cnt--) 1803 *dptr1++ = *dptr2++; 1804 1805 /* Copy 2nd half. */ 1806 dptr1 = (uint8_t *)icb->add_firmware_options; 1807 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options; 1808 while (cnt--) 1809 *dptr1++ = *dptr2++; 1810 1811 /* Use alternate WWN? */ 1812 if (nv->host_p[1] & BIT_7) { 1813 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 1814 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 1815 } 1816 1817 /* Prepare nodename */ 1818 if ((icb->firmware_options[1] & BIT_6) == 0) { 1819 /* 1820 * Firmware will apply the following mask if the nodename was 1821 * not provided. 1822 */ 1823 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 1824 icb->node_name[0] &= 0xF0; 1825 } 1826 1827 /* 1828 * Set host adapter parameters. 1829 */ 1830 if (nv->host_p[0] & BIT_7) 1831 ql2xextended_error_logging = 1; 1832 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); 1833 /* Always load RISC code on non ISP2[12]00 chips. */ 1834 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 1835 ha->flags.disable_risc_code_load = 0; 1836 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); 1837 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); 1838 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); 1839 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; 1840 ha->flags.disable_serdes = 0; 1841 1842 ha->operating_mode = 1843 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; 1844 1845 memcpy(ha->fw_seriallink_options, nv->seriallink_options, 1846 sizeof(ha->fw_seriallink_options)); 1847 1848 /* save HBA serial number */ 1849 ha->serial0 = icb->port_name[5]; 1850 ha->serial1 = icb->port_name[6]; 1851 ha->serial2 = icb->port_name[7]; 1852 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 1853 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 1854 1855 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 1856 1857 ha->retry_count = nv->retry_count; 1858 1859 /* Set minimum login_timeout to 4 seconds. */ 1860 if (nv->login_timeout < ql2xlogintimeout) 1861 nv->login_timeout = ql2xlogintimeout; 1862 if (nv->login_timeout < 4) 1863 nv->login_timeout = 4; 1864 ha->login_timeout = nv->login_timeout; 1865 icb->login_timeout = nv->login_timeout; 1866 1867 /* Set minimum RATOV to 100 tenths of a second. */ 1868 ha->r_a_tov = 100; 1869 1870 ha->loop_reset_delay = nv->reset_delay; 1871 1872 /* Link Down Timeout = 0: 1873 * 1874 * When Port Down timer expires we will start returning 1875 * I/O's to OS with "DID_NO_CONNECT". 1876 * 1877 * Link Down Timeout != 0: 1878 * 1879 * The driver waits for the link to come up after link down 1880 * before returning I/Os to OS with "DID_NO_CONNECT". 1881 */ 1882 if (nv->link_down_timeout == 0) { 1883 ha->loop_down_abort_time = 1884 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 1885 } else { 1886 ha->link_down_timeout = nv->link_down_timeout; 1887 ha->loop_down_abort_time = 1888 (LOOP_DOWN_TIME - ha->link_down_timeout); 1889 } 1890 1891 /* 1892 * Need enough time to try and get the port back. 1893 */ 1894 ha->port_down_retry_count = nv->port_down_retry_count; 1895 if (qlport_down_retry) 1896 ha->port_down_retry_count = qlport_down_retry; 1897 /* Set login_retry_count */ 1898 ha->login_retry_count = nv->retry_count; 1899 if (ha->port_down_retry_count == nv->port_down_retry_count && 1900 ha->port_down_retry_count > 3) 1901 ha->login_retry_count = ha->port_down_retry_count; 1902 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 1903 ha->login_retry_count = ha->port_down_retry_count; 1904 if (ql2xloginretrycount) 1905 ha->login_retry_count = ql2xloginretrycount; 1906 1907 icb->lun_enables = __constant_cpu_to_le16(0); 1908 icb->command_resource_count = 0; 1909 icb->immediate_notify_resource_count = 0; 1910 icb->timeout = __constant_cpu_to_le16(0); 1911 1912 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 1913 /* Enable RIO */ 1914 icb->firmware_options[0] &= ~BIT_3; 1915 icb->add_firmware_options[0] &= 1916 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 1917 icb->add_firmware_options[0] |= BIT_2; 1918 icb->response_accumulation_timer = 3; 1919 icb->interrupt_delay_timer = 5; 1920 1921 vha->flags.process_response_queue = 1; 1922 } else { 1923 /* Enable ZIO. */ 1924 if (!vha->flags.init_done) { 1925 ha->zio_mode = icb->add_firmware_options[0] & 1926 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 1927 ha->zio_timer = icb->interrupt_delay_timer ? 1928 icb->interrupt_delay_timer: 2; 1929 } 1930 icb->add_firmware_options[0] &= 1931 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 1932 vha->flags.process_response_queue = 0; 1933 if (ha->zio_mode != QLA_ZIO_DISABLED) { 1934 ha->zio_mode = QLA_ZIO_MODE_6; 1935 1936 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer " 1937 "delay (%d us).\n", vha->host_no, ha->zio_mode, 1938 ha->zio_timer * 100)); 1939 qla_printk(KERN_INFO, ha, 1940 "ZIO mode %d enabled; timer delay (%d us).\n", 1941 ha->zio_mode, ha->zio_timer * 100); 1942 1943 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; 1944 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; 1945 vha->flags.process_response_queue = 1; 1946 } 1947 } 1948 1949 if (rval) { 1950 DEBUG2_3(printk(KERN_WARNING 1951 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 1952 } 1953 return (rval); 1954 } 1955 1956 static void 1957 qla2x00_rport_del(void *data) 1958 { 1959 fc_port_t *fcport = data; 1960 struct fc_rport *rport; 1961 1962 spin_lock_irq(fcport->vha->host->host_lock); 1963 rport = fcport->drport; 1964 fcport->drport = NULL; 1965 spin_unlock_irq(fcport->vha->host->host_lock); 1966 if (rport) 1967 fc_remote_port_delete(rport); 1968 } 1969 1970 /** 1971 * qla2x00_alloc_fcport() - Allocate a generic fcport. 1972 * @ha: HA context 1973 * @flags: allocation flags 1974 * 1975 * Returns a pointer to the allocated fcport, or NULL, if none available. 1976 */ 1977 static fc_port_t * 1978 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) 1979 { 1980 fc_port_t *fcport; 1981 1982 fcport = kzalloc(sizeof(fc_port_t), flags); 1983 if (!fcport) 1984 return NULL; 1985 1986 /* Setup fcport template structure. */ 1987 fcport->vha = vha; 1988 fcport->vp_idx = vha->vp_idx; 1989 fcport->port_type = FCT_UNKNOWN; 1990 fcport->loop_id = FC_NO_LOOP_ID; 1991 atomic_set(&fcport->state, FCS_UNCONFIGURED); 1992 fcport->flags = FCF_RLC_SUPPORT; 1993 fcport->supported_classes = FC_COS_UNSPECIFIED; 1994 1995 return fcport; 1996 } 1997 1998 /* 1999 * qla2x00_configure_loop 2000 * Updates Fibre Channel Device Database with what is actually on loop. 2001 * 2002 * Input: 2003 * ha = adapter block pointer. 2004 * 2005 * Returns: 2006 * 0 = success. 2007 * 1 = error. 2008 * 2 = database was full and device was not configured. 2009 */ 2010 static int 2011 qla2x00_configure_loop(scsi_qla_host_t *vha) 2012 { 2013 int rval; 2014 unsigned long flags, save_flags; 2015 struct qla_hw_data *ha = vha->hw; 2016 rval = QLA_SUCCESS; 2017 2018 /* Get Initiator ID */ 2019 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { 2020 rval = qla2x00_configure_hba(vha); 2021 if (rval != QLA_SUCCESS) { 2022 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n", 2023 vha->host_no)); 2024 return (rval); 2025 } 2026 } 2027 2028 save_flags = flags = vha->dpc_flags; 2029 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n", 2030 vha->host_no, flags)); 2031 2032 /* 2033 * If we have both an RSCN and PORT UPDATE pending then handle them 2034 * both at the same time. 2035 */ 2036 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2037 clear_bit(RSCN_UPDATE, &vha->dpc_flags); 2038 2039 /* Determine what we need to do */ 2040 if (ha->current_topology == ISP_CFG_FL && 2041 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2042 2043 vha->flags.rscn_queue_overflow = 1; 2044 set_bit(RSCN_UPDATE, &flags); 2045 2046 } else if (ha->current_topology == ISP_CFG_F && 2047 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2048 2049 vha->flags.rscn_queue_overflow = 1; 2050 set_bit(RSCN_UPDATE, &flags); 2051 clear_bit(LOCAL_LOOP_UPDATE, &flags); 2052 2053 } else if (ha->current_topology == ISP_CFG_N) { 2054 clear_bit(RSCN_UPDATE, &flags); 2055 2056 } else if (!vha->flags.online || 2057 (test_bit(ABORT_ISP_ACTIVE, &flags))) { 2058 2059 vha->flags.rscn_queue_overflow = 1; 2060 set_bit(RSCN_UPDATE, &flags); 2061 set_bit(LOCAL_LOOP_UPDATE, &flags); 2062 } 2063 2064 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 2065 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2066 rval = QLA_FUNCTION_FAILED; 2067 else 2068 rval = qla2x00_configure_local_loop(vha); 2069 } 2070 2071 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 2072 if (LOOP_TRANSITION(vha)) 2073 rval = QLA_FUNCTION_FAILED; 2074 else 2075 rval = qla2x00_configure_fabric(vha); 2076 } 2077 2078 if (rval == QLA_SUCCESS) { 2079 if (atomic_read(&vha->loop_down_timer) || 2080 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 2081 rval = QLA_FUNCTION_FAILED; 2082 } else { 2083 atomic_set(&vha->loop_state, LOOP_READY); 2084 2085 DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no)); 2086 } 2087 } 2088 2089 if (rval) { 2090 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n", 2091 __func__, vha->host_no)); 2092 } else { 2093 DEBUG3(printk("%s: exiting normally\n", __func__)); 2094 } 2095 2096 /* Restore state if a resync event occurred during processing */ 2097 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 2098 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2099 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2100 if (test_bit(RSCN_UPDATE, &save_flags)) 2101 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2102 } 2103 2104 return (rval); 2105 } 2106 2107 2108 2109 /* 2110 * qla2x00_configure_local_loop 2111 * Updates Fibre Channel Device Database with local loop devices. 2112 * 2113 * Input: 2114 * ha = adapter block pointer. 2115 * 2116 * Returns: 2117 * 0 = success. 2118 */ 2119 static int 2120 qla2x00_configure_local_loop(scsi_qla_host_t *vha) 2121 { 2122 int rval, rval2; 2123 int found_devs; 2124 int found; 2125 fc_port_t *fcport, *new_fcport; 2126 2127 uint16_t index; 2128 uint16_t entries; 2129 char *id_iter; 2130 uint16_t loop_id; 2131 uint8_t domain, area, al_pa; 2132 struct qla_hw_data *ha = vha->hw; 2133 2134 found_devs = 0; 2135 new_fcport = NULL; 2136 entries = MAX_FIBRE_DEVICES; 2137 2138 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no)); 2139 DEBUG3(qla2x00_get_fcal_position_map(vha, NULL)); 2140 2141 /* Get list of logged in devices. */ 2142 memset(ha->gid_list, 0, GID_LIST_SIZE); 2143 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, 2144 &entries); 2145 if (rval != QLA_SUCCESS) 2146 goto cleanup_allocation; 2147 2148 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", 2149 ha->host_no, entries)); 2150 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list, 2151 entries * sizeof(struct gid_list_info))); 2152 2153 /* Allocate temporary fcport for any new fcports discovered. */ 2154 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2155 if (new_fcport == NULL) { 2156 rval = QLA_MEMORY_ALLOC_FAILED; 2157 goto cleanup_allocation; 2158 } 2159 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 2160 2161 /* 2162 * Mark local devices that were present with FCF_DEVICE_LOST for now. 2163 */ 2164 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2165 if (atomic_read(&fcport->state) == FCS_ONLINE && 2166 fcport->port_type != FCT_BROADCAST && 2167 (fcport->flags & FCF_FABRIC_DEVICE) == 0) { 2168 2169 DEBUG(printk("scsi(%ld): Marking port lost, " 2170 "loop_id=0x%04x\n", 2171 vha->host_no, fcport->loop_id)); 2172 2173 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2174 fcport->flags &= ~FCF_FARP_DONE; 2175 } 2176 } 2177 2178 /* Add devices to port list. */ 2179 id_iter = (char *)ha->gid_list; 2180 for (index = 0; index < entries; index++) { 2181 domain = ((struct gid_list_info *)id_iter)->domain; 2182 area = ((struct gid_list_info *)id_iter)->area; 2183 al_pa = ((struct gid_list_info *)id_iter)->al_pa; 2184 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 2185 loop_id = (uint16_t) 2186 ((struct gid_list_info *)id_iter)->loop_id_2100; 2187 else 2188 loop_id = le16_to_cpu( 2189 ((struct gid_list_info *)id_iter)->loop_id); 2190 id_iter += ha->gid_list_info_size; 2191 2192 /* Bypass reserved domain fields. */ 2193 if ((domain & 0xf0) == 0xf0) 2194 continue; 2195 2196 /* Bypass if not same domain and area of adapter. */ 2197 if (area && domain && 2198 (area != vha->d_id.b.area || domain != vha->d_id.b.domain)) 2199 continue; 2200 2201 /* Bypass invalid local loop ID. */ 2202 if (loop_id > LAST_LOCAL_LOOP_ID) 2203 continue; 2204 2205 /* Fill in member data. */ 2206 new_fcport->d_id.b.domain = domain; 2207 new_fcport->d_id.b.area = area; 2208 new_fcport->d_id.b.al_pa = al_pa; 2209 new_fcport->loop_id = loop_id; 2210 new_fcport->vp_idx = vha->vp_idx; 2211 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 2212 if (rval2 != QLA_SUCCESS) { 2213 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport " 2214 "information -- get_port_database=%x, " 2215 "loop_id=0x%04x\n", 2216 vha->host_no, rval2, new_fcport->loop_id)); 2217 DEBUG2(printk("scsi(%ld): Scheduling resync...\n", 2218 vha->host_no)); 2219 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2220 continue; 2221 } 2222 2223 /* Check for matching device in port list. */ 2224 found = 0; 2225 fcport = NULL; 2226 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2227 if (memcmp(new_fcport->port_name, fcport->port_name, 2228 WWN_SIZE)) 2229 continue; 2230 2231 fcport->flags &= ~(FCF_FABRIC_DEVICE | 2232 FCF_PERSISTENT_BOUND); 2233 fcport->loop_id = new_fcport->loop_id; 2234 fcport->port_type = new_fcport->port_type; 2235 fcport->d_id.b24 = new_fcport->d_id.b24; 2236 memcpy(fcport->node_name, new_fcport->node_name, 2237 WWN_SIZE); 2238 2239 found++; 2240 break; 2241 } 2242 2243 if (!found) { 2244 /* New device, add to fcports list. */ 2245 new_fcport->flags &= ~FCF_PERSISTENT_BOUND; 2246 if (vha->vp_idx) { 2247 new_fcport->vha = vha; 2248 new_fcport->vp_idx = vha->vp_idx; 2249 } 2250 list_add_tail(&new_fcport->list, &vha->vp_fcports); 2251 2252 /* Allocate a new replacement fcport. */ 2253 fcport = new_fcport; 2254 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2255 if (new_fcport == NULL) { 2256 rval = QLA_MEMORY_ALLOC_FAILED; 2257 goto cleanup_allocation; 2258 } 2259 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 2260 } 2261 2262 /* Base iIDMA settings on HBA port speed. */ 2263 fcport->fp_speed = ha->link_data_rate; 2264 2265 qla2x00_update_fcport(vha, fcport); 2266 2267 found_devs++; 2268 } 2269 2270 cleanup_allocation: 2271 kfree(new_fcport); 2272 2273 if (rval != QLA_SUCCESS) { 2274 DEBUG2(printk("scsi(%ld): Configure local loop error exit: " 2275 "rval=%x\n", vha->host_no, rval)); 2276 } 2277 2278 if (found_devs) { 2279 vha->device_flags |= DFLG_LOCAL_DEVICES; 2280 vha->device_flags &= ~DFLG_RETRY_LOCAL_DEVICES; 2281 } 2282 2283 return (rval); 2284 } 2285 2286 static void 2287 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2288 { 2289 #define LS_UNKNOWN 2 2290 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 2291 int rval; 2292 uint16_t mb[6]; 2293 struct qla_hw_data *ha = vha->hw; 2294 2295 if (!IS_IIDMA_CAPABLE(ha)) 2296 return; 2297 2298 if (fcport->fp_speed == PORT_SPEED_UNKNOWN || 2299 fcport->fp_speed > ha->link_data_rate) 2300 return; 2301 2302 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, 2303 mb); 2304 if (rval != QLA_SUCCESS) { 2305 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA " 2306 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n", 2307 vha->host_no, fcport->port_name[0], fcport->port_name[1], 2308 fcport->port_name[2], fcport->port_name[3], 2309 fcport->port_name[4], fcport->port_name[5], 2310 fcport->port_name[6], fcport->port_name[7], rval, 2311 fcport->fp_speed, mb[0], mb[1])); 2312 } else { 2313 DEBUG2(qla_printk(KERN_INFO, ha, 2314 "iIDMA adjusted to %s GB/s on " 2315 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", 2316 link_speeds[fcport->fp_speed], fcport->port_name[0], 2317 fcport->port_name[1], fcport->port_name[2], 2318 fcport->port_name[3], fcport->port_name[4], 2319 fcport->port_name[5], fcport->port_name[6], 2320 fcport->port_name[7])); 2321 } 2322 } 2323 2324 static void 2325 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) 2326 { 2327 struct fc_rport_identifiers rport_ids; 2328 struct fc_rport *rport; 2329 struct qla_hw_data *ha = vha->hw; 2330 2331 if (fcport->drport) 2332 qla2x00_rport_del(fcport); 2333 2334 rport_ids.node_name = wwn_to_u64(fcport->node_name); 2335 rport_ids.port_name = wwn_to_u64(fcport->port_name); 2336 rport_ids.port_id = fcport->d_id.b.domain << 16 | 2337 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 2338 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2339 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); 2340 if (!rport) { 2341 qla_printk(KERN_WARNING, ha, 2342 "Unable to allocate fc remote port!\n"); 2343 return; 2344 } 2345 spin_lock_irq(fcport->vha->host->host_lock); 2346 *((fc_port_t **)rport->dd_data) = fcport; 2347 spin_unlock_irq(fcport->vha->host->host_lock); 2348 2349 rport->supported_classes = fcport->supported_classes; 2350 2351 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2352 if (fcport->port_type == FCT_INITIATOR) 2353 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 2354 if (fcport->port_type == FCT_TARGET) 2355 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; 2356 fc_remote_port_rolechg(rport, rport_ids.roles); 2357 } 2358 2359 /* 2360 * qla2x00_update_fcport 2361 * Updates device on list. 2362 * 2363 * Input: 2364 * ha = adapter block pointer. 2365 * fcport = port structure pointer. 2366 * 2367 * Return: 2368 * 0 - Success 2369 * BIT_0 - error 2370 * 2371 * Context: 2372 * Kernel context. 2373 */ 2374 void 2375 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2376 { 2377 struct qla_hw_data *ha = vha->hw; 2378 2379 fcport->vha = vha; 2380 fcport->login_retry = 0; 2381 fcport->port_login_retry_count = ha->port_down_retry_count * 2382 PORT_RETRY_TIME; 2383 atomic_set(&fcport->port_down_timer, ha->port_down_retry_count * 2384 PORT_RETRY_TIME); 2385 fcport->flags &= ~FCF_LOGIN_NEEDED; 2386 2387 qla2x00_iidma_fcport(vha, fcport); 2388 2389 atomic_set(&fcport->state, FCS_ONLINE); 2390 2391 qla2x00_reg_remote_port(vha, fcport); 2392 } 2393 2394 /* 2395 * qla2x00_configure_fabric 2396 * Setup SNS devices with loop ID's. 2397 * 2398 * Input: 2399 * ha = adapter block pointer. 2400 * 2401 * Returns: 2402 * 0 = success. 2403 * BIT_0 = error 2404 */ 2405 static int 2406 qla2x00_configure_fabric(scsi_qla_host_t *vha) 2407 { 2408 int rval, rval2; 2409 fc_port_t *fcport, *fcptemp; 2410 uint16_t next_loopid; 2411 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2412 uint16_t loop_id; 2413 LIST_HEAD(new_fcports); 2414 struct qla_hw_data *ha = vha->hw; 2415 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2416 2417 /* If FL port exists, then SNS is present */ 2418 if (IS_FWI2_CAPABLE(ha)) 2419 loop_id = NPH_F_PORT; 2420 else 2421 loop_id = SNS_FL_PORT; 2422 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); 2423 if (rval != QLA_SUCCESS) { 2424 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL " 2425 "Port\n", vha->host_no)); 2426 2427 vha->device_flags &= ~SWITCH_FOUND; 2428 return (QLA_SUCCESS); 2429 } 2430 vha->device_flags |= SWITCH_FOUND; 2431 2432 /* Mark devices that need re-synchronization. */ 2433 rval2 = qla2x00_device_resync(vha); 2434 if (rval2 == QLA_RSCNS_HANDLED) { 2435 /* No point doing the scan, just continue. */ 2436 return (QLA_SUCCESS); 2437 } 2438 do { 2439 /* FDMI support. */ 2440 if (ql2xfdmienable && 2441 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) 2442 qla2x00_fdmi_register(vha); 2443 2444 /* Ensure we are logged into the SNS. */ 2445 if (IS_FWI2_CAPABLE(ha)) 2446 loop_id = NPH_SNS; 2447 else 2448 loop_id = SIMPLE_NAME_SERVER; 2449 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 2450 0xfc, mb, BIT_1 | BIT_0); 2451 if (mb[0] != MBS_COMMAND_COMPLETE) { 2452 DEBUG2(qla_printk(KERN_INFO, ha, 2453 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 2454 "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id, 2455 mb[0], mb[1], mb[2], mb[6], mb[7])); 2456 return (QLA_SUCCESS); 2457 } 2458 2459 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { 2460 if (qla2x00_rft_id(vha)) { 2461 /* EMPTY */ 2462 DEBUG2(printk("scsi(%ld): Register FC-4 " 2463 "TYPE failed.\n", vha->host_no)); 2464 } 2465 if (qla2x00_rff_id(vha)) { 2466 /* EMPTY */ 2467 DEBUG2(printk("scsi(%ld): Register FC-4 " 2468 "Features failed.\n", vha->host_no)); 2469 } 2470 if (qla2x00_rnn_id(vha)) { 2471 /* EMPTY */ 2472 DEBUG2(printk("scsi(%ld): Register Node Name " 2473 "failed.\n", vha->host_no)); 2474 } else if (qla2x00_rsnn_nn(vha)) { 2475 /* EMPTY */ 2476 DEBUG2(printk("scsi(%ld): Register Symbolic " 2477 "Node Name failed.\n", vha->host_no)); 2478 } 2479 } 2480 2481 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); 2482 if (rval != QLA_SUCCESS) 2483 break; 2484 2485 /* 2486 * Logout all previous fabric devices marked lost, except 2487 * tape devices. 2488 */ 2489 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2490 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2491 break; 2492 2493 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 2494 continue; 2495 2496 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 2497 qla2x00_mark_device_lost(vha, fcport, 2498 ql2xplogiabsentdevice, 0); 2499 if (fcport->loop_id != FC_NO_LOOP_ID && 2500 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2501 fcport->port_type != FCT_INITIATOR && 2502 fcport->port_type != FCT_BROADCAST) { 2503 ha->isp_ops->fabric_logout(vha, 2504 fcport->loop_id, 2505 fcport->d_id.b.domain, 2506 fcport->d_id.b.area, 2507 fcport->d_id.b.al_pa); 2508 fcport->loop_id = FC_NO_LOOP_ID; 2509 } 2510 } 2511 } 2512 2513 /* Starting free loop ID. */ 2514 next_loopid = ha->min_external_loopid; 2515 2516 /* 2517 * Scan through our port list and login entries that need to be 2518 * logged in. 2519 */ 2520 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2521 if (atomic_read(&vha->loop_down_timer) || 2522 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2523 break; 2524 2525 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 2526 (fcport->flags & FCF_LOGIN_NEEDED) == 0) 2527 continue; 2528 2529 if (fcport->loop_id == FC_NO_LOOP_ID) { 2530 fcport->loop_id = next_loopid; 2531 rval = qla2x00_find_new_loop_id( 2532 base_vha, fcport); 2533 if (rval != QLA_SUCCESS) { 2534 /* Ran out of IDs to use */ 2535 break; 2536 } 2537 } 2538 /* Login and update database */ 2539 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 2540 } 2541 2542 /* Exit if out of loop IDs. */ 2543 if (rval != QLA_SUCCESS) { 2544 break; 2545 } 2546 2547 /* 2548 * Login and add the new devices to our port list. 2549 */ 2550 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { 2551 if (atomic_read(&vha->loop_down_timer) || 2552 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2553 break; 2554 2555 /* Find a new loop ID to use. */ 2556 fcport->loop_id = next_loopid; 2557 rval = qla2x00_find_new_loop_id(base_vha, fcport); 2558 if (rval != QLA_SUCCESS) { 2559 /* Ran out of IDs to use */ 2560 break; 2561 } 2562 2563 /* Login and update database */ 2564 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 2565 2566 if (vha->vp_idx) { 2567 fcport->vha = vha; 2568 fcport->vp_idx = vha->vp_idx; 2569 } 2570 list_move_tail(&fcport->list, &vha->vp_fcports); 2571 } 2572 } while (0); 2573 2574 /* Free all new device structures not processed. */ 2575 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { 2576 list_del(&fcport->list); 2577 kfree(fcport); 2578 } 2579 2580 if (rval) { 2581 DEBUG2(printk("scsi(%ld): Configure fabric error exit: " 2582 "rval=%d\n", vha->host_no, rval)); 2583 } 2584 2585 return (rval); 2586 } 2587 2588 2589 /* 2590 * qla2x00_find_all_fabric_devs 2591 * 2592 * Input: 2593 * ha = adapter block pointer. 2594 * dev = database device entry pointer. 2595 * 2596 * Returns: 2597 * 0 = success. 2598 * 2599 * Context: 2600 * Kernel context. 2601 */ 2602 static int 2603 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, 2604 struct list_head *new_fcports) 2605 { 2606 int rval; 2607 uint16_t loop_id; 2608 fc_port_t *fcport, *new_fcport, *fcptemp; 2609 int found; 2610 2611 sw_info_t *swl; 2612 int swl_idx; 2613 int first_dev, last_dev; 2614 port_id_t wrap, nxt_d_id; 2615 struct qla_hw_data *ha = vha->hw; 2616 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); 2617 struct scsi_qla_host *tvp; 2618 2619 rval = QLA_SUCCESS; 2620 2621 /* Try GID_PT to get device list, else GAN. */ 2622 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL); 2623 if (!swl) { 2624 /*EMPTY*/ 2625 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " 2626 "on GA_NXT\n", vha->host_no)); 2627 } else { 2628 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { 2629 kfree(swl); 2630 swl = NULL; 2631 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { 2632 kfree(swl); 2633 swl = NULL; 2634 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { 2635 kfree(swl); 2636 swl = NULL; 2637 } else if (ql2xiidmaenable && 2638 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) { 2639 qla2x00_gpsc(vha, swl); 2640 } 2641 } 2642 swl_idx = 0; 2643 2644 /* Allocate temporary fcport for any new fcports discovered. */ 2645 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2646 if (new_fcport == NULL) { 2647 kfree(swl); 2648 return (QLA_MEMORY_ALLOC_FAILED); 2649 } 2650 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 2651 /* Set start port ID scan at adapter ID. */ 2652 first_dev = 1; 2653 last_dev = 0; 2654 2655 /* Starting free loop ID. */ 2656 loop_id = ha->min_external_loopid; 2657 for (; loop_id <= ha->max_loop_id; loop_id++) { 2658 if (qla2x00_is_reserved_id(vha, loop_id)) 2659 continue; 2660 2661 if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha)) 2662 break; 2663 2664 if (swl != NULL) { 2665 if (last_dev) { 2666 wrap.b24 = new_fcport->d_id.b24; 2667 } else { 2668 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24; 2669 memcpy(new_fcport->node_name, 2670 swl[swl_idx].node_name, WWN_SIZE); 2671 memcpy(new_fcport->port_name, 2672 swl[swl_idx].port_name, WWN_SIZE); 2673 memcpy(new_fcport->fabric_port_name, 2674 swl[swl_idx].fabric_port_name, WWN_SIZE); 2675 new_fcport->fp_speed = swl[swl_idx].fp_speed; 2676 2677 if (swl[swl_idx].d_id.b.rsvd_1 != 0) { 2678 last_dev = 1; 2679 } 2680 swl_idx++; 2681 } 2682 } else { 2683 /* Send GA_NXT to the switch */ 2684 rval = qla2x00_ga_nxt(vha, new_fcport); 2685 if (rval != QLA_SUCCESS) { 2686 qla_printk(KERN_WARNING, ha, 2687 "SNS scan failed -- assuming zero-entry " 2688 "result...\n"); 2689 list_for_each_entry_safe(fcport, fcptemp, 2690 new_fcports, list) { 2691 list_del(&fcport->list); 2692 kfree(fcport); 2693 } 2694 rval = QLA_SUCCESS; 2695 break; 2696 } 2697 } 2698 2699 /* If wrap on switch device list, exit. */ 2700 if (first_dev) { 2701 wrap.b24 = new_fcport->d_id.b24; 2702 first_dev = 0; 2703 } else if (new_fcport->d_id.b24 == wrap.b24) { 2704 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n", 2705 vha->host_no, new_fcport->d_id.b.domain, 2706 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa)); 2707 break; 2708 } 2709 2710 /* Bypass if same physical adapter. */ 2711 if (new_fcport->d_id.b24 == base_vha->d_id.b24) 2712 continue; 2713 2714 /* Bypass virtual ports of the same host. */ 2715 found = 0; 2716 if (ha->num_vhosts) { 2717 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 2718 if (new_fcport->d_id.b24 == vp->d_id.b24) { 2719 found = 1; 2720 break; 2721 } 2722 } 2723 if (found) 2724 continue; 2725 } 2726 2727 /* Bypass if same domain and area of adapter. */ 2728 if (((new_fcport->d_id.b24 & 0xffff00) == 2729 (vha->d_id.b24 & 0xffff00)) && ha->current_topology == 2730 ISP_CFG_FL) 2731 continue; 2732 2733 /* Bypass reserved domain fields. */ 2734 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) 2735 continue; 2736 2737 /* Locate matching device in database. */ 2738 found = 0; 2739 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2740 if (memcmp(new_fcport->port_name, fcport->port_name, 2741 WWN_SIZE)) 2742 continue; 2743 2744 found++; 2745 2746 /* Update port state. */ 2747 memcpy(fcport->fabric_port_name, 2748 new_fcport->fabric_port_name, WWN_SIZE); 2749 fcport->fp_speed = new_fcport->fp_speed; 2750 2751 /* 2752 * If address the same and state FCS_ONLINE, nothing 2753 * changed. 2754 */ 2755 if (fcport->d_id.b24 == new_fcport->d_id.b24 && 2756 atomic_read(&fcport->state) == FCS_ONLINE) { 2757 break; 2758 } 2759 2760 /* 2761 * If device was not a fabric device before. 2762 */ 2763 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 2764 fcport->d_id.b24 = new_fcport->d_id.b24; 2765 fcport->loop_id = FC_NO_LOOP_ID; 2766 fcport->flags |= (FCF_FABRIC_DEVICE | 2767 FCF_LOGIN_NEEDED); 2768 fcport->flags &= ~FCF_PERSISTENT_BOUND; 2769 break; 2770 } 2771 2772 /* 2773 * Port ID changed or device was marked to be updated; 2774 * Log it out if still logged in and mark it for 2775 * relogin later. 2776 */ 2777 fcport->d_id.b24 = new_fcport->d_id.b24; 2778 fcport->flags |= FCF_LOGIN_NEEDED; 2779 if (fcport->loop_id != FC_NO_LOOP_ID && 2780 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2781 fcport->port_type != FCT_INITIATOR && 2782 fcport->port_type != FCT_BROADCAST) { 2783 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 2784 fcport->d_id.b.domain, fcport->d_id.b.area, 2785 fcport->d_id.b.al_pa); 2786 fcport->loop_id = FC_NO_LOOP_ID; 2787 } 2788 2789 break; 2790 } 2791 2792 if (found) 2793 continue; 2794 /* If device was not in our fcports list, then add it. */ 2795 list_add_tail(&new_fcport->list, new_fcports); 2796 2797 /* Allocate a new replacement fcport. */ 2798 nxt_d_id.b24 = new_fcport->d_id.b24; 2799 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2800 if (new_fcport == NULL) { 2801 kfree(swl); 2802 return (QLA_MEMORY_ALLOC_FAILED); 2803 } 2804 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 2805 new_fcport->d_id.b24 = nxt_d_id.b24; 2806 } 2807 2808 kfree(swl); 2809 kfree(new_fcport); 2810 2811 if (!list_empty(new_fcports)) 2812 vha->device_flags |= DFLG_FABRIC_DEVICES; 2813 2814 return (rval); 2815 } 2816 2817 /* 2818 * qla2x00_find_new_loop_id 2819 * Scan through our port list and find a new usable loop ID. 2820 * 2821 * Input: 2822 * ha: adapter state pointer. 2823 * dev: port structure pointer. 2824 * 2825 * Returns: 2826 * qla2x00 local function return status code. 2827 * 2828 * Context: 2829 * Kernel context. 2830 */ 2831 static int 2832 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) 2833 { 2834 int rval; 2835 int found; 2836 fc_port_t *fcport; 2837 uint16_t first_loop_id; 2838 struct qla_hw_data *ha = vha->hw; 2839 struct scsi_qla_host *vp; 2840 struct scsi_qla_host *tvp; 2841 2842 rval = QLA_SUCCESS; 2843 2844 /* Save starting loop ID. */ 2845 first_loop_id = dev->loop_id; 2846 2847 for (;;) { 2848 /* Skip loop ID if already used by adapter. */ 2849 if (dev->loop_id == vha->loop_id) 2850 dev->loop_id++; 2851 2852 /* Skip reserved loop IDs. */ 2853 while (qla2x00_is_reserved_id(vha, dev->loop_id)) 2854 dev->loop_id++; 2855 2856 /* Reset loop ID if passed the end. */ 2857 if (dev->loop_id > ha->max_loop_id) { 2858 /* first loop ID. */ 2859 dev->loop_id = ha->min_external_loopid; 2860 } 2861 2862 /* Check for loop ID being already in use. */ 2863 found = 0; 2864 fcport = NULL; 2865 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 2866 list_for_each_entry(fcport, &vp->vp_fcports, list) { 2867 if (fcport->loop_id == dev->loop_id && 2868 fcport != dev) { 2869 /* ID possibly in use */ 2870 found++; 2871 break; 2872 } 2873 } 2874 if (found) 2875 break; 2876 } 2877 2878 /* If not in use then it is free to use. */ 2879 if (!found) { 2880 break; 2881 } 2882 2883 /* ID in use. Try next value. */ 2884 dev->loop_id++; 2885 2886 /* If wrap around. No free ID to use. */ 2887 if (dev->loop_id == first_loop_id) { 2888 dev->loop_id = FC_NO_LOOP_ID; 2889 rval = QLA_FUNCTION_FAILED; 2890 break; 2891 } 2892 } 2893 2894 return (rval); 2895 } 2896 2897 /* 2898 * qla2x00_device_resync 2899 * Marks devices in the database that needs resynchronization. 2900 * 2901 * Input: 2902 * ha = adapter block pointer. 2903 * 2904 * Context: 2905 * Kernel context. 2906 */ 2907 static int 2908 qla2x00_device_resync(scsi_qla_host_t *vha) 2909 { 2910 int rval; 2911 uint32_t mask; 2912 fc_port_t *fcport; 2913 uint32_t rscn_entry; 2914 uint8_t rscn_out_iter; 2915 uint8_t format; 2916 port_id_t d_id; 2917 2918 rval = QLA_RSCNS_HANDLED; 2919 2920 while (vha->rscn_out_ptr != vha->rscn_in_ptr || 2921 vha->flags.rscn_queue_overflow) { 2922 2923 rscn_entry = vha->rscn_queue[vha->rscn_out_ptr]; 2924 format = MSB(MSW(rscn_entry)); 2925 d_id.b.domain = LSB(MSW(rscn_entry)); 2926 d_id.b.area = MSB(LSW(rscn_entry)); 2927 d_id.b.al_pa = LSB(LSW(rscn_entry)); 2928 2929 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = " 2930 "[%02x/%02x%02x%02x].\n", 2931 vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain, 2932 d_id.b.area, d_id.b.al_pa)); 2933 2934 vha->rscn_out_ptr++; 2935 if (vha->rscn_out_ptr == MAX_RSCN_COUNT) 2936 vha->rscn_out_ptr = 0; 2937 2938 /* Skip duplicate entries. */ 2939 for (rscn_out_iter = vha->rscn_out_ptr; 2940 !vha->flags.rscn_queue_overflow && 2941 rscn_out_iter != vha->rscn_in_ptr; 2942 rscn_out_iter = (rscn_out_iter == 2943 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) { 2944 2945 if (rscn_entry != vha->rscn_queue[rscn_out_iter]) 2946 break; 2947 2948 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue " 2949 "entry found at [%d].\n", vha->host_no, 2950 rscn_out_iter)); 2951 2952 vha->rscn_out_ptr = rscn_out_iter; 2953 } 2954 2955 /* Queue overflow, set switch default case. */ 2956 if (vha->flags.rscn_queue_overflow) { 2957 DEBUG(printk("scsi(%ld): device_resync: rscn " 2958 "overflow.\n", vha->host_no)); 2959 2960 format = 3; 2961 vha->flags.rscn_queue_overflow = 0; 2962 } 2963 2964 switch (format) { 2965 case 0: 2966 mask = 0xffffff; 2967 break; 2968 case 1: 2969 mask = 0xffff00; 2970 break; 2971 case 2: 2972 mask = 0xff0000; 2973 break; 2974 default: 2975 mask = 0x0; 2976 d_id.b24 = 0; 2977 vha->rscn_out_ptr = vha->rscn_in_ptr; 2978 break; 2979 } 2980 2981 rval = QLA_SUCCESS; 2982 2983 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2984 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 2985 (fcport->d_id.b24 & mask) != d_id.b24 || 2986 fcport->port_type == FCT_BROADCAST) 2987 continue; 2988 2989 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2990 if (format != 3 || 2991 fcport->port_type != FCT_INITIATOR) { 2992 qla2x00_mark_device_lost(vha, fcport, 2993 0, 0); 2994 } 2995 } 2996 fcport->flags &= ~FCF_FARP_DONE; 2997 } 2998 } 2999 return (rval); 3000 } 3001 3002 /* 3003 * qla2x00_fabric_dev_login 3004 * Login fabric target device and update FC port database. 3005 * 3006 * Input: 3007 * ha: adapter state pointer. 3008 * fcport: port structure list pointer. 3009 * next_loopid: contains value of a new loop ID that can be used 3010 * by the next login attempt. 3011 * 3012 * Returns: 3013 * qla2x00 local function return status code. 3014 * 3015 * Context: 3016 * Kernel context. 3017 */ 3018 static int 3019 qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport, 3020 uint16_t *next_loopid) 3021 { 3022 int rval; 3023 int retry; 3024 uint8_t opts; 3025 struct qla_hw_data *ha = vha->hw; 3026 3027 rval = QLA_SUCCESS; 3028 retry = 0; 3029 3030 rval = qla2x00_fabric_login(vha, fcport, next_loopid); 3031 if (rval == QLA_SUCCESS) { 3032 /* Send an ADISC to tape devices.*/ 3033 opts = 0; 3034 if (fcport->flags & FCF_TAPE_PRESENT) 3035 opts |= BIT_1; 3036 rval = qla2x00_get_port_database(vha, fcport, opts); 3037 if (rval != QLA_SUCCESS) { 3038 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3039 fcport->d_id.b.domain, fcport->d_id.b.area, 3040 fcport->d_id.b.al_pa); 3041 qla2x00_mark_device_lost(vha, fcport, 1, 0); 3042 } else { 3043 qla2x00_update_fcport(vha, fcport); 3044 } 3045 } 3046 3047 return (rval); 3048 } 3049 3050 /* 3051 * qla2x00_fabric_login 3052 * Issue fabric login command. 3053 * 3054 * Input: 3055 * ha = adapter block pointer. 3056 * device = pointer to FC device type structure. 3057 * 3058 * Returns: 3059 * 0 - Login successfully 3060 * 1 - Login failed 3061 * 2 - Initiator device 3062 * 3 - Fatal error 3063 */ 3064 int 3065 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, 3066 uint16_t *next_loopid) 3067 { 3068 int rval; 3069 int retry; 3070 uint16_t tmp_loopid; 3071 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3072 struct qla_hw_data *ha = vha->hw; 3073 3074 retry = 0; 3075 tmp_loopid = 0; 3076 3077 for (;;) { 3078 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x " 3079 "for port %02x%02x%02x.\n", 3080 vha->host_no, fcport->loop_id, fcport->d_id.b.domain, 3081 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3082 3083 /* Login fcport on switch. */ 3084 ha->isp_ops->fabric_login(vha, fcport->loop_id, 3085 fcport->d_id.b.domain, fcport->d_id.b.area, 3086 fcport->d_id.b.al_pa, mb, BIT_0); 3087 if (mb[0] == MBS_PORT_ID_USED) { 3088 /* 3089 * Device has another loop ID. The firmware team 3090 * recommends the driver perform an implicit login with 3091 * the specified ID again. The ID we just used is save 3092 * here so we return with an ID that can be tried by 3093 * the next login. 3094 */ 3095 retry++; 3096 tmp_loopid = fcport->loop_id; 3097 fcport->loop_id = mb[1]; 3098 3099 DEBUG(printk("Fabric Login: port in use - next " 3100 "loop id=0x%04x, port Id=%02x%02x%02x.\n", 3101 fcport->loop_id, fcport->d_id.b.domain, 3102 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3103 3104 } else if (mb[0] == MBS_COMMAND_COMPLETE) { 3105 /* 3106 * Login succeeded. 3107 */ 3108 if (retry) { 3109 /* A retry occurred before. */ 3110 *next_loopid = tmp_loopid; 3111 } else { 3112 /* 3113 * No retry occurred before. Just increment the 3114 * ID value for next login. 3115 */ 3116 *next_loopid = (fcport->loop_id + 1); 3117 } 3118 3119 if (mb[1] & BIT_0) { 3120 fcport->port_type = FCT_INITIATOR; 3121 } else { 3122 fcport->port_type = FCT_TARGET; 3123 if (mb[1] & BIT_1) { 3124 fcport->flags |= FCF_TAPE_PRESENT; 3125 } 3126 } 3127 3128 if (mb[10] & BIT_0) 3129 fcport->supported_classes |= FC_COS_CLASS2; 3130 if (mb[10] & BIT_1) 3131 fcport->supported_classes |= FC_COS_CLASS3; 3132 3133 rval = QLA_SUCCESS; 3134 break; 3135 } else if (mb[0] == MBS_LOOP_ID_USED) { 3136 /* 3137 * Loop ID already used, try next loop ID. 3138 */ 3139 fcport->loop_id++; 3140 rval = qla2x00_find_new_loop_id(vha, fcport); 3141 if (rval != QLA_SUCCESS) { 3142 /* Ran out of loop IDs to use */ 3143 break; 3144 } 3145 } else if (mb[0] == MBS_COMMAND_ERROR) { 3146 /* 3147 * Firmware possibly timed out during login. If NO 3148 * retries are left to do then the device is declared 3149 * dead. 3150 */ 3151 *next_loopid = fcport->loop_id; 3152 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3153 fcport->d_id.b.domain, fcport->d_id.b.area, 3154 fcport->d_id.b.al_pa); 3155 qla2x00_mark_device_lost(vha, fcport, 1, 0); 3156 3157 rval = 1; 3158 break; 3159 } else { 3160 /* 3161 * unrecoverable / not handled error 3162 */ 3163 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x " 3164 "loop_id=%x jiffies=%lx.\n", 3165 __func__, vha->host_no, mb[0], 3166 fcport->d_id.b.domain, fcport->d_id.b.area, 3167 fcport->d_id.b.al_pa, fcport->loop_id, jiffies)); 3168 3169 *next_loopid = fcport->loop_id; 3170 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3171 fcport->d_id.b.domain, fcport->d_id.b.area, 3172 fcport->d_id.b.al_pa); 3173 fcport->loop_id = FC_NO_LOOP_ID; 3174 fcport->login_retry = 0; 3175 3176 rval = 3; 3177 break; 3178 } 3179 } 3180 3181 return (rval); 3182 } 3183 3184 /* 3185 * qla2x00_local_device_login 3186 * Issue local device login command. 3187 * 3188 * Input: 3189 * ha = adapter block pointer. 3190 * loop_id = loop id of device to login to. 3191 * 3192 * Returns (Where's the #define!!!!): 3193 * 0 - Login successfully 3194 * 1 - Login failed 3195 * 3 - Fatal error 3196 */ 3197 int 3198 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport) 3199 { 3200 int rval; 3201 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3202 3203 memset(mb, 0, sizeof(mb)); 3204 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0); 3205 if (rval == QLA_SUCCESS) { 3206 /* Interrogate mailbox registers for any errors */ 3207 if (mb[0] == MBS_COMMAND_ERROR) 3208 rval = 1; 3209 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR) 3210 /* device not in PCB table */ 3211 rval = 3; 3212 } 3213 3214 return (rval); 3215 } 3216 3217 /* 3218 * qla2x00_loop_resync 3219 * Resync with fibre channel devices. 3220 * 3221 * Input: 3222 * ha = adapter block pointer. 3223 * 3224 * Returns: 3225 * 0 = success 3226 */ 3227 int 3228 qla2x00_loop_resync(scsi_qla_host_t *vha) 3229 { 3230 int rval = QLA_SUCCESS; 3231 uint32_t wait_time; 3232 struct qla_hw_data *ha = vha->hw; 3233 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 3234 struct rsp_que *rsp = req->rsp; 3235 3236 atomic_set(&vha->loop_state, LOOP_UPDATE); 3237 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3238 if (vha->flags.online) { 3239 if (!(rval = qla2x00_fw_ready(vha))) { 3240 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3241 wait_time = 256; 3242 do { 3243 atomic_set(&vha->loop_state, LOOP_UPDATE); 3244 3245 /* Issue a marker after FW becomes ready. */ 3246 qla2x00_marker(vha, req, rsp, 0, 0, 3247 MK_SYNC_ALL); 3248 vha->marker_needed = 0; 3249 3250 /* Remap devices on Loop. */ 3251 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3252 3253 qla2x00_configure_loop(vha); 3254 wait_time--; 3255 } while (!atomic_read(&vha->loop_down_timer) && 3256 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 3257 && wait_time && (test_bit(LOOP_RESYNC_NEEDED, 3258 &vha->dpc_flags))); 3259 } 3260 } 3261 3262 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 3263 return (QLA_FUNCTION_FAILED); 3264 3265 if (rval) 3266 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__)); 3267 3268 return (rval); 3269 } 3270 3271 void 3272 qla2x00_update_fcports(scsi_qla_host_t *vha) 3273 { 3274 fc_port_t *fcport; 3275 3276 /* Go with deferred removal of rport references. */ 3277 list_for_each_entry(fcport, &vha->vp_fcports, list) 3278 if (fcport && fcport->drport && 3279 atomic_read(&fcport->state) != FCS_UNCONFIGURED) 3280 qla2x00_rport_del(fcport); 3281 } 3282 3283 /* 3284 * qla2x00_abort_isp 3285 * Resets ISP and aborts all outstanding commands. 3286 * 3287 * Input: 3288 * ha = adapter block pointer. 3289 * 3290 * Returns: 3291 * 0 = success 3292 */ 3293 int 3294 qla2x00_abort_isp(scsi_qla_host_t *vha) 3295 { 3296 int rval; 3297 uint8_t status = 0; 3298 struct qla_hw_data *ha = vha->hw; 3299 struct scsi_qla_host *vp; 3300 struct scsi_qla_host *tvp; 3301 struct req_que *req = ha->req_q_map[0]; 3302 3303 if (vha->flags.online) { 3304 vha->flags.online = 0; 3305 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3306 ha->qla_stats.total_isp_aborts++; 3307 3308 qla_printk(KERN_INFO, ha, 3309 "Performing ISP error recovery - ha= %p.\n", ha); 3310 ha->isp_ops->reset_chip(vha); 3311 3312 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 3313 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 3314 atomic_set(&vha->loop_state, LOOP_DOWN); 3315 qla2x00_mark_all_devices_lost(vha, 0); 3316 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) 3317 qla2x00_mark_all_devices_lost(vp, 0); 3318 } else { 3319 if (!atomic_read(&vha->loop_down_timer)) 3320 atomic_set(&vha->loop_down_timer, 3321 LOOP_DOWN_TIME); 3322 } 3323 3324 /* Requeue all commands in outstanding command list. */ 3325 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 3326 3327 ha->isp_ops->get_flash_version(vha, req->ring); 3328 3329 ha->isp_ops->nvram_config(vha); 3330 3331 if (!qla2x00_restart_isp(vha)) { 3332 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 3333 3334 if (!atomic_read(&vha->loop_down_timer)) { 3335 /* 3336 * Issue marker command only when we are going 3337 * to start the I/O . 3338 */ 3339 vha->marker_needed = 1; 3340 } 3341 3342 vha->flags.online = 1; 3343 3344 ha->isp_ops->enable_intrs(ha); 3345 3346 ha->isp_abort_cnt = 0; 3347 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3348 3349 if (ha->fce) { 3350 ha->flags.fce_enabled = 1; 3351 memset(ha->fce, 0, 3352 fce_calc_size(ha->fce_bufs)); 3353 rval = qla2x00_enable_fce_trace(vha, 3354 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 3355 &ha->fce_bufs); 3356 if (rval) { 3357 qla_printk(KERN_WARNING, ha, 3358 "Unable to reinitialize FCE " 3359 "(%d).\n", rval); 3360 ha->flags.fce_enabled = 0; 3361 } 3362 } 3363 3364 if (ha->eft) { 3365 memset(ha->eft, 0, EFT_SIZE); 3366 rval = qla2x00_enable_eft_trace(vha, 3367 ha->eft_dma, EFT_NUM_BUFFERS); 3368 if (rval) { 3369 qla_printk(KERN_WARNING, ha, 3370 "Unable to reinitialize EFT " 3371 "(%d).\n", rval); 3372 } 3373 } 3374 } else { /* failed the ISP abort */ 3375 vha->flags.online = 1; 3376 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 3377 if (ha->isp_abort_cnt == 0) { 3378 qla_printk(KERN_WARNING, ha, 3379 "ISP error recovery failed - " 3380 "board disabled\n"); 3381 /* 3382 * The next call disables the board 3383 * completely. 3384 */ 3385 ha->isp_ops->reset_adapter(vha); 3386 vha->flags.online = 0; 3387 clear_bit(ISP_ABORT_RETRY, 3388 &vha->dpc_flags); 3389 status = 0; 3390 } else { /* schedule another ISP abort */ 3391 ha->isp_abort_cnt--; 3392 DEBUG(printk("qla%ld: ISP abort - " 3393 "retry remaining %d\n", 3394 vha->host_no, ha->isp_abort_cnt)); 3395 status = 1; 3396 } 3397 } else { 3398 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 3399 DEBUG(printk("qla2x00(%ld): ISP error recovery " 3400 "- retrying (%d) more times\n", 3401 vha->host_no, ha->isp_abort_cnt)); 3402 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3403 status = 1; 3404 } 3405 } 3406 3407 } 3408 3409 if (!status) { 3410 DEBUG(printk(KERN_INFO 3411 "qla2x00_abort_isp(%ld): succeeded.\n", 3412 vha->host_no)); 3413 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 3414 if (vp->vp_idx) 3415 qla2x00_vp_abort_isp(vp); 3416 } 3417 } else { 3418 qla_printk(KERN_INFO, ha, 3419 "qla2x00_abort_isp: **** FAILED ****\n"); 3420 } 3421 3422 return(status); 3423 } 3424 3425 /* 3426 * qla2x00_restart_isp 3427 * restarts the ISP after a reset 3428 * 3429 * Input: 3430 * ha = adapter block pointer. 3431 * 3432 * Returns: 3433 * 0 = success 3434 */ 3435 static int 3436 qla2x00_restart_isp(scsi_qla_host_t *vha) 3437 { 3438 int status = 0; 3439 uint32_t wait_time; 3440 struct qla_hw_data *ha = vha->hw; 3441 struct req_que *req = ha->req_q_map[0]; 3442 struct rsp_que *rsp = ha->rsp_q_map[0]; 3443 3444 /* If firmware needs to be loaded */ 3445 if (qla2x00_isp_firmware(vha)) { 3446 vha->flags.online = 0; 3447 status = ha->isp_ops->chip_diag(vha); 3448 if (!status) 3449 status = qla2x00_setup_chip(vha); 3450 } 3451 3452 if (!status && !(status = qla2x00_init_rings(vha))) { 3453 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 3454 /* Initialize the queues in use */ 3455 qla25xx_init_queues(ha); 3456 3457 status = qla2x00_fw_ready(vha); 3458 if (!status) { 3459 DEBUG(printk("%s(): Start configure loop, " 3460 "status = %d\n", __func__, status)); 3461 3462 /* Issue a marker after FW becomes ready. */ 3463 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 3464 3465 vha->flags.online = 1; 3466 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3467 wait_time = 256; 3468 do { 3469 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3470 qla2x00_configure_loop(vha); 3471 wait_time--; 3472 } while (!atomic_read(&vha->loop_down_timer) && 3473 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 3474 && wait_time && (test_bit(LOOP_RESYNC_NEEDED, 3475 &vha->dpc_flags))); 3476 } 3477 3478 /* if no cable then assume it's good */ 3479 if ((vha->device_flags & DFLG_NO_CABLE)) 3480 status = 0; 3481 3482 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n", 3483 __func__, 3484 status)); 3485 } 3486 return (status); 3487 } 3488 3489 static int 3490 qla25xx_init_queues(struct qla_hw_data *ha) 3491 { 3492 struct rsp_que *rsp = NULL; 3493 struct req_que *req = NULL; 3494 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 3495 int ret = -1; 3496 int i; 3497 3498 for (i = 1; i < ha->max_queues; i++) { 3499 rsp = ha->rsp_q_map[i]; 3500 if (rsp) { 3501 rsp->options &= ~BIT_0; 3502 ret = qla25xx_init_rsp_que(base_vha, rsp); 3503 if (ret != QLA_SUCCESS) 3504 DEBUG2_17(printk(KERN_WARNING 3505 "%s Rsp que:%d init failed\n", __func__, 3506 rsp->id)); 3507 else 3508 DEBUG2_17(printk(KERN_INFO 3509 "%s Rsp que:%d inited\n", __func__, 3510 rsp->id)); 3511 } 3512 req = ha->req_q_map[i]; 3513 if (req) { 3514 /* Clear outstanding commands array. */ 3515 req->options &= ~BIT_0; 3516 ret = qla25xx_init_req_que(base_vha, req); 3517 if (ret != QLA_SUCCESS) 3518 DEBUG2_17(printk(KERN_WARNING 3519 "%s Req que:%d init failed\n", __func__, 3520 req->id)); 3521 else 3522 DEBUG2_17(printk(KERN_WARNING 3523 "%s Req que:%d inited\n", __func__, 3524 req->id)); 3525 } 3526 } 3527 return ret; 3528 } 3529 3530 /* 3531 * qla2x00_reset_adapter 3532 * Reset adapter. 3533 * 3534 * Input: 3535 * ha = adapter block pointer. 3536 */ 3537 void 3538 qla2x00_reset_adapter(scsi_qla_host_t *vha) 3539 { 3540 unsigned long flags = 0; 3541 struct qla_hw_data *ha = vha->hw; 3542 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3543 3544 vha->flags.online = 0; 3545 ha->isp_ops->disable_intrs(ha); 3546 3547 spin_lock_irqsave(&ha->hardware_lock, flags); 3548 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 3549 RD_REG_WORD(®->hccr); /* PCI Posting. */ 3550 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); 3551 RD_REG_WORD(®->hccr); /* PCI Posting. */ 3552 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3553 } 3554 3555 void 3556 qla24xx_reset_adapter(scsi_qla_host_t *vha) 3557 { 3558 unsigned long flags = 0; 3559 struct qla_hw_data *ha = vha->hw; 3560 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3561 3562 vha->flags.online = 0; 3563 ha->isp_ops->disable_intrs(ha); 3564 3565 spin_lock_irqsave(&ha->hardware_lock, flags); 3566 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); 3567 RD_REG_DWORD(®->hccr); 3568 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); 3569 RD_REG_DWORD(®->hccr); 3570 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3571 3572 if (IS_NOPOLLING_TYPE(ha)) 3573 ha->isp_ops->enable_intrs(ha); 3574 } 3575 3576 /* On sparc systems, obtain port and node WWN from firmware 3577 * properties. 3578 */ 3579 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, 3580 struct nvram_24xx *nv) 3581 { 3582 #ifdef CONFIG_SPARC 3583 struct qla_hw_data *ha = vha->hw; 3584 struct pci_dev *pdev = ha->pdev; 3585 struct device_node *dp = pci_device_to_OF_node(pdev); 3586 const u8 *val; 3587 int len; 3588 3589 val = of_get_property(dp, "port-wwn", &len); 3590 if (val && len >= WWN_SIZE) 3591 memcpy(nv->port_name, val, WWN_SIZE); 3592 3593 val = of_get_property(dp, "node-wwn", &len); 3594 if (val && len >= WWN_SIZE) 3595 memcpy(nv->node_name, val, WWN_SIZE); 3596 #endif 3597 } 3598 3599 int 3600 qla24xx_nvram_config(scsi_qla_host_t *vha) 3601 { 3602 int rval; 3603 struct init_cb_24xx *icb; 3604 struct nvram_24xx *nv; 3605 uint32_t *dptr; 3606 uint8_t *dptr1, *dptr2; 3607 uint32_t chksum; 3608 uint16_t cnt; 3609 struct qla_hw_data *ha = vha->hw; 3610 3611 rval = QLA_SUCCESS; 3612 icb = (struct init_cb_24xx *)ha->init_cb; 3613 nv = ha->nvram; 3614 3615 /* Determine NVRAM starting address. */ 3616 ha->nvram_size = sizeof(struct nvram_24xx); 3617 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 3618 ha->vpd_size = FA_NVRAM_VPD_SIZE; 3619 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 3620 if (PCI_FUNC(ha->pdev->devfn)) { 3621 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 3622 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 3623 } 3624 3625 /* Get VPD data into cache */ 3626 ha->vpd = ha->nvram + VPD_OFFSET; 3627 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, 3628 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); 3629 3630 /* Get NVRAM data into cache and calculate checksum. */ 3631 dptr = (uint32_t *)nv; 3632 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base, 3633 ha->nvram_size); 3634 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 3635 chksum += le32_to_cpu(*dptr++); 3636 3637 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 3638 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 3639 3640 /* Bad NVRAM data, set defaults parameters. */ 3641 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 3642 || nv->id[3] != ' ' || 3643 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 3644 /* Reset NVRAM data. */ 3645 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 3646 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 3647 le16_to_cpu(nv->nvram_version)); 3648 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 3649 "invalid -- WWPN) defaults.\n"); 3650 3651 /* 3652 * Set default initialization control block. 3653 */ 3654 memset(nv, 0, ha->nvram_size); 3655 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); 3656 nv->version = __constant_cpu_to_le16(ICB_VERSION); 3657 nv->frame_payload_size = __constant_cpu_to_le16(2048); 3658 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 3659 nv->exchange_count = __constant_cpu_to_le16(0); 3660 nv->hard_address = __constant_cpu_to_le16(124); 3661 nv->port_name[0] = 0x21; 3662 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); 3663 nv->port_name[2] = 0x00; 3664 nv->port_name[3] = 0xe0; 3665 nv->port_name[4] = 0x8b; 3666 nv->port_name[5] = 0x1c; 3667 nv->port_name[6] = 0x55; 3668 nv->port_name[7] = 0x86; 3669 nv->node_name[0] = 0x20; 3670 nv->node_name[1] = 0x00; 3671 nv->node_name[2] = 0x00; 3672 nv->node_name[3] = 0xe0; 3673 nv->node_name[4] = 0x8b; 3674 nv->node_name[5] = 0x1c; 3675 nv->node_name[6] = 0x55; 3676 nv->node_name[7] = 0x86; 3677 qla24xx_nvram_wwn_from_ofw(vha, nv); 3678 nv->login_retry_count = __constant_cpu_to_le16(8); 3679 nv->interrupt_delay_timer = __constant_cpu_to_le16(0); 3680 nv->login_timeout = __constant_cpu_to_le16(0); 3681 nv->firmware_options_1 = 3682 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 3683 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4); 3684 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 3685 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13); 3686 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10); 3687 nv->efi_parameters = __constant_cpu_to_le32(0); 3688 nv->reset_delay = 5; 3689 nv->max_luns_per_target = __constant_cpu_to_le16(128); 3690 nv->port_down_retry_count = __constant_cpu_to_le16(30); 3691 nv->link_down_timeout = __constant_cpu_to_le16(30); 3692 3693 rval = 1; 3694 } 3695 3696 /* Reset Initialization control block */ 3697 memset(icb, 0, ha->init_cb_size); 3698 3699 /* Copy 1st segment. */ 3700 dptr1 = (uint8_t *)icb; 3701 dptr2 = (uint8_t *)&nv->version; 3702 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 3703 while (cnt--) 3704 *dptr1++ = *dptr2++; 3705 3706 icb->login_retry_count = nv->login_retry_count; 3707 icb->link_down_on_nos = nv->link_down_on_nos; 3708 3709 /* Copy 2nd segment. */ 3710 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 3711 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 3712 cnt = (uint8_t *)&icb->reserved_3 - 3713 (uint8_t *)&icb->interrupt_delay_timer; 3714 while (cnt--) 3715 *dptr1++ = *dptr2++; 3716 3717 /* 3718 * Setup driver NVRAM options. 3719 */ 3720 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 3721 "QLA2462"); 3722 3723 /* Use alternate WWN? */ 3724 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { 3725 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 3726 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 3727 } 3728 3729 /* Prepare nodename */ 3730 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) { 3731 /* 3732 * Firmware will apply the following mask if the nodename was 3733 * not provided. 3734 */ 3735 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 3736 icb->node_name[0] &= 0xF0; 3737 } 3738 3739 /* Set host adapter parameters. */ 3740 ha->flags.disable_risc_code_load = 0; 3741 ha->flags.enable_lip_reset = 0; 3742 ha->flags.enable_lip_full_login = 3743 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0; 3744 ha->flags.enable_target_reset = 3745 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0; 3746 ha->flags.enable_led_scheme = 0; 3747 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; 3748 3749 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 3750 (BIT_6 | BIT_5 | BIT_4)) >> 4; 3751 3752 memcpy(ha->fw_seriallink_options24, nv->seriallink_options, 3753 sizeof(ha->fw_seriallink_options24)); 3754 3755 /* save HBA serial number */ 3756 ha->serial0 = icb->port_name[5]; 3757 ha->serial1 = icb->port_name[6]; 3758 ha->serial2 = icb->port_name[7]; 3759 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 3760 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 3761 3762 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 3763 3764 ha->retry_count = le16_to_cpu(nv->login_retry_count); 3765 3766 /* Set minimum login_timeout to 4 seconds. */ 3767 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 3768 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 3769 if (le16_to_cpu(nv->login_timeout) < 4) 3770 nv->login_timeout = __constant_cpu_to_le16(4); 3771 ha->login_timeout = le16_to_cpu(nv->login_timeout); 3772 icb->login_timeout = nv->login_timeout; 3773 3774 /* Set minimum RATOV to 100 tenths of a second. */ 3775 ha->r_a_tov = 100; 3776 3777 ha->loop_reset_delay = nv->reset_delay; 3778 3779 /* Link Down Timeout = 0: 3780 * 3781 * When Port Down timer expires we will start returning 3782 * I/O's to OS with "DID_NO_CONNECT". 3783 * 3784 * Link Down Timeout != 0: 3785 * 3786 * The driver waits for the link to come up after link down 3787 * before returning I/Os to OS with "DID_NO_CONNECT". 3788 */ 3789 if (le16_to_cpu(nv->link_down_timeout) == 0) { 3790 ha->loop_down_abort_time = 3791 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 3792 } else { 3793 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 3794 ha->loop_down_abort_time = 3795 (LOOP_DOWN_TIME - ha->link_down_timeout); 3796 } 3797 3798 /* Need enough time to try and get the port back. */ 3799 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 3800 if (qlport_down_retry) 3801 ha->port_down_retry_count = qlport_down_retry; 3802 3803 /* Set login_retry_count */ 3804 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 3805 if (ha->port_down_retry_count == 3806 le16_to_cpu(nv->port_down_retry_count) && 3807 ha->port_down_retry_count > 3) 3808 ha->login_retry_count = ha->port_down_retry_count; 3809 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 3810 ha->login_retry_count = ha->port_down_retry_count; 3811 if (ql2xloginretrycount) 3812 ha->login_retry_count = ql2xloginretrycount; 3813 3814 /* Enable ZIO. */ 3815 if (!vha->flags.init_done) { 3816 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 3817 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 3818 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 3819 le16_to_cpu(icb->interrupt_delay_timer): 2; 3820 } 3821 icb->firmware_options_2 &= __constant_cpu_to_le32( 3822 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 3823 vha->flags.process_response_queue = 0; 3824 if (ha->zio_mode != QLA_ZIO_DISABLED) { 3825 ha->zio_mode = QLA_ZIO_MODE_6; 3826 3827 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 3828 "(%d us).\n", vha->host_no, ha->zio_mode, 3829 ha->zio_timer * 100)); 3830 qla_printk(KERN_INFO, ha, 3831 "ZIO mode %d enabled; timer delay (%d us).\n", 3832 ha->zio_mode, ha->zio_timer * 100); 3833 3834 icb->firmware_options_2 |= cpu_to_le32( 3835 (uint32_t)ha->zio_mode); 3836 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 3837 vha->flags.process_response_queue = 1; 3838 } 3839 3840 if (rval) { 3841 DEBUG2_3(printk(KERN_WARNING 3842 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 3843 } 3844 return (rval); 3845 } 3846 3847 static int 3848 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) 3849 { 3850 int rval = QLA_SUCCESS; 3851 int segments, fragment; 3852 uint32_t faddr; 3853 uint32_t *dcode, dlen; 3854 uint32_t risc_addr; 3855 uint32_t risc_size; 3856 uint32_t i; 3857 struct qla_hw_data *ha = vha->hw; 3858 struct req_que *req = ha->req_q_map[0]; 3859 3860 qla_printk(KERN_INFO, ha, 3861 "FW: Loading from flash (%x)...\n", ha->flt_region_fw); 3862 3863 rval = QLA_SUCCESS; 3864 3865 segments = FA_RISC_CODE_SEGMENTS; 3866 faddr = ha->flt_region_fw; 3867 dcode = (uint32_t *)req->ring; 3868 *srisc_addr = 0; 3869 3870 /* Validate firmware image by checking version. */ 3871 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4); 3872 for (i = 0; i < 4; i++) 3873 dcode[i] = be32_to_cpu(dcode[i]); 3874 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 3875 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 3876 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 3877 dcode[3] == 0)) { 3878 qla_printk(KERN_WARNING, ha, 3879 "Unable to verify integrity of flash firmware image!\n"); 3880 qla_printk(KERN_WARNING, ha, 3881 "Firmware data: %08x %08x %08x %08x!\n", dcode[0], 3882 dcode[1], dcode[2], dcode[3]); 3883 3884 return QLA_FUNCTION_FAILED; 3885 } 3886 3887 while (segments && rval == QLA_SUCCESS) { 3888 /* Read segment's load information. */ 3889 qla24xx_read_flash_data(vha, dcode, faddr, 4); 3890 3891 risc_addr = be32_to_cpu(dcode[2]); 3892 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 3893 risc_size = be32_to_cpu(dcode[3]); 3894 3895 fragment = 0; 3896 while (risc_size > 0 && rval == QLA_SUCCESS) { 3897 dlen = (uint32_t)(ha->fw_transfer_size >> 2); 3898 if (dlen > risc_size) 3899 dlen = risc_size; 3900 3901 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 3902 "addr %x, number of dwords 0x%x, offset 0x%x.\n", 3903 vha->host_no, risc_addr, dlen, faddr)); 3904 3905 qla24xx_read_flash_data(vha, dcode, faddr, dlen); 3906 for (i = 0; i < dlen; i++) 3907 dcode[i] = swab32(dcode[i]); 3908 3909 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 3910 dlen); 3911 if (rval) { 3912 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 3913 "segment %d of firmware\n", vha->host_no, 3914 fragment)); 3915 qla_printk(KERN_WARNING, ha, 3916 "[ERROR] Failed to load segment %d of " 3917 "firmware\n", fragment); 3918 break; 3919 } 3920 3921 faddr += dlen; 3922 risc_addr += dlen; 3923 risc_size -= dlen; 3924 fragment++; 3925 } 3926 3927 /* Next segment. */ 3928 segments--; 3929 } 3930 3931 return rval; 3932 } 3933 3934 #define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/" 3935 3936 int 3937 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 3938 { 3939 int rval; 3940 int i, fragment; 3941 uint16_t *wcode, *fwcode; 3942 uint32_t risc_addr, risc_size, fwclen, wlen, *seg; 3943 struct fw_blob *blob; 3944 struct qla_hw_data *ha = vha->hw; 3945 struct req_que *req = ha->req_q_map[0]; 3946 3947 /* Load firmware blob. */ 3948 blob = qla2x00_request_firmware(vha); 3949 if (!blob) { 3950 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 3951 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 3952 "from: " QLA_FW_URL ".\n"); 3953 return QLA_FUNCTION_FAILED; 3954 } 3955 3956 rval = QLA_SUCCESS; 3957 3958 wcode = (uint16_t *)req->ring; 3959 *srisc_addr = 0; 3960 fwcode = (uint16_t *)blob->fw->data; 3961 fwclen = 0; 3962 3963 /* Validate firmware image by checking version. */ 3964 if (blob->fw->size < 8 * sizeof(uint16_t)) { 3965 qla_printk(KERN_WARNING, ha, 3966 "Unable to verify integrity of firmware image (%Zd)!\n", 3967 blob->fw->size); 3968 goto fail_fw_integrity; 3969 } 3970 for (i = 0; i < 4; i++) 3971 wcode[i] = be16_to_cpu(fwcode[i + 4]); 3972 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && 3973 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && 3974 wcode[2] == 0 && wcode[3] == 0)) { 3975 qla_printk(KERN_WARNING, ha, 3976 "Unable to verify integrity of firmware image!\n"); 3977 qla_printk(KERN_WARNING, ha, 3978 "Firmware data: %04x %04x %04x %04x!\n", wcode[0], 3979 wcode[1], wcode[2], wcode[3]); 3980 goto fail_fw_integrity; 3981 } 3982 3983 seg = blob->segs; 3984 while (*seg && rval == QLA_SUCCESS) { 3985 risc_addr = *seg; 3986 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr; 3987 risc_size = be16_to_cpu(fwcode[3]); 3988 3989 /* Validate firmware image size. */ 3990 fwclen += risc_size * sizeof(uint16_t); 3991 if (blob->fw->size < fwclen) { 3992 qla_printk(KERN_WARNING, ha, 3993 "Unable to verify integrity of firmware image " 3994 "(%Zd)!\n", blob->fw->size); 3995 goto fail_fw_integrity; 3996 } 3997 3998 fragment = 0; 3999 while (risc_size > 0 && rval == QLA_SUCCESS) { 4000 wlen = (uint16_t)(ha->fw_transfer_size >> 1); 4001 if (wlen > risc_size) 4002 wlen = risc_size; 4003 4004 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4005 "addr %x, number of words 0x%x.\n", vha->host_no, 4006 risc_addr, wlen)); 4007 4008 for (i = 0; i < wlen; i++) 4009 wcode[i] = swab16(fwcode[i]); 4010 4011 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4012 wlen); 4013 if (rval) { 4014 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4015 "segment %d of firmware\n", vha->host_no, 4016 fragment)); 4017 qla_printk(KERN_WARNING, ha, 4018 "[ERROR] Failed to load segment %d of " 4019 "firmware\n", fragment); 4020 break; 4021 } 4022 4023 fwcode += wlen; 4024 risc_addr += wlen; 4025 risc_size -= wlen; 4026 fragment++; 4027 } 4028 4029 /* Next segment. */ 4030 seg++; 4031 } 4032 return rval; 4033 4034 fail_fw_integrity: 4035 return QLA_FUNCTION_FAILED; 4036 } 4037 4038 static int 4039 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4040 { 4041 int rval; 4042 int segments, fragment; 4043 uint32_t *dcode, dlen; 4044 uint32_t risc_addr; 4045 uint32_t risc_size; 4046 uint32_t i; 4047 struct fw_blob *blob; 4048 uint32_t *fwcode, fwclen; 4049 struct qla_hw_data *ha = vha->hw; 4050 struct req_que *req = ha->req_q_map[0]; 4051 4052 /* Load firmware blob. */ 4053 blob = qla2x00_request_firmware(vha); 4054 if (!blob) { 4055 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 4056 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 4057 "from: " QLA_FW_URL ".\n"); 4058 4059 return QLA_FUNCTION_FAILED; 4060 } 4061 4062 qla_printk(KERN_INFO, ha, 4063 "FW: Loading via request-firmware...\n"); 4064 4065 rval = QLA_SUCCESS; 4066 4067 segments = FA_RISC_CODE_SEGMENTS; 4068 dcode = (uint32_t *)req->ring; 4069 *srisc_addr = 0; 4070 fwcode = (uint32_t *)blob->fw->data; 4071 fwclen = 0; 4072 4073 /* Validate firmware image by checking version. */ 4074 if (blob->fw->size < 8 * sizeof(uint32_t)) { 4075 qla_printk(KERN_WARNING, ha, 4076 "Unable to verify integrity of firmware image (%Zd)!\n", 4077 blob->fw->size); 4078 goto fail_fw_integrity; 4079 } 4080 for (i = 0; i < 4; i++) 4081 dcode[i] = be32_to_cpu(fwcode[i + 4]); 4082 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 4083 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 4084 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 4085 dcode[3] == 0)) { 4086 qla_printk(KERN_WARNING, ha, 4087 "Unable to verify integrity of firmware image!\n"); 4088 qla_printk(KERN_WARNING, ha, 4089 "Firmware data: %08x %08x %08x %08x!\n", dcode[0], 4090 dcode[1], dcode[2], dcode[3]); 4091 goto fail_fw_integrity; 4092 } 4093 4094 while (segments && rval == QLA_SUCCESS) { 4095 risc_addr = be32_to_cpu(fwcode[2]); 4096 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 4097 risc_size = be32_to_cpu(fwcode[3]); 4098 4099 /* Validate firmware image size. */ 4100 fwclen += risc_size * sizeof(uint32_t); 4101 if (blob->fw->size < fwclen) { 4102 qla_printk(KERN_WARNING, ha, 4103 "Unable to verify integrity of firmware image " 4104 "(%Zd)!\n", blob->fw->size); 4105 4106 goto fail_fw_integrity; 4107 } 4108 4109 fragment = 0; 4110 while (risc_size > 0 && rval == QLA_SUCCESS) { 4111 dlen = (uint32_t)(ha->fw_transfer_size >> 2); 4112 if (dlen > risc_size) 4113 dlen = risc_size; 4114 4115 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4116 "addr %x, number of dwords 0x%x.\n", vha->host_no, 4117 risc_addr, dlen)); 4118 4119 for (i = 0; i < dlen; i++) 4120 dcode[i] = swab32(fwcode[i]); 4121 4122 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4123 dlen); 4124 if (rval) { 4125 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4126 "segment %d of firmware\n", vha->host_no, 4127 fragment)); 4128 qla_printk(KERN_WARNING, ha, 4129 "[ERROR] Failed to load segment %d of " 4130 "firmware\n", fragment); 4131 break; 4132 } 4133 4134 fwcode += dlen; 4135 risc_addr += dlen; 4136 risc_size -= dlen; 4137 fragment++; 4138 } 4139 4140 /* Next segment. */ 4141 segments--; 4142 } 4143 return rval; 4144 4145 fail_fw_integrity: 4146 return QLA_FUNCTION_FAILED; 4147 } 4148 4149 int 4150 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4151 { 4152 int rval; 4153 4154 /* 4155 * FW Load priority: 4156 * 1) Firmware via request-firmware interface (.bin file). 4157 * 2) Firmware residing in flash. 4158 */ 4159 rval = qla24xx_load_risc_blob(vha, srisc_addr); 4160 if (rval == QLA_SUCCESS) 4161 return rval; 4162 4163 return qla24xx_load_risc_flash(vha, srisc_addr); 4164 } 4165 4166 int 4167 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4168 { 4169 int rval; 4170 4171 /* 4172 * FW Load priority: 4173 * 1) Firmware residing in flash. 4174 * 2) Firmware via request-firmware interface (.bin file). 4175 */ 4176 rval = qla24xx_load_risc_flash(vha, srisc_addr); 4177 if (rval == QLA_SUCCESS) 4178 return rval; 4179 4180 return qla24xx_load_risc_blob(vha, srisc_addr); 4181 } 4182 4183 void 4184 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) 4185 { 4186 int ret, retries; 4187 struct qla_hw_data *ha = vha->hw; 4188 4189 if (!IS_FWI2_CAPABLE(ha)) 4190 return; 4191 if (!ha->fw_major_version) 4192 return; 4193 4194 ret = qla2x00_stop_firmware(vha); 4195 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 4196 retries ; retries--) { 4197 ha->isp_ops->reset_chip(vha); 4198 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) 4199 continue; 4200 if (qla2x00_setup_chip(vha) != QLA_SUCCESS) 4201 continue; 4202 qla_printk(KERN_INFO, ha, 4203 "Attempting retry of stop-firmware command...\n"); 4204 ret = qla2x00_stop_firmware(vha); 4205 } 4206 } 4207 4208 int 4209 qla24xx_configure_vhba(scsi_qla_host_t *vha) 4210 { 4211 int rval = QLA_SUCCESS; 4212 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4213 struct qla_hw_data *ha = vha->hw; 4214 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4215 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 4216 struct rsp_que *rsp = req->rsp; 4217 4218 if (!vha->vp_idx) 4219 return -EINVAL; 4220 4221 rval = qla2x00_fw_ready(base_vha); 4222 if (rval == QLA_SUCCESS) { 4223 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 4224 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4225 } 4226 4227 vha->flags.management_server_logged_in = 0; 4228 4229 /* Login to SNS first */ 4230 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1); 4231 if (mb[0] != MBS_COMMAND_COMPLETE) { 4232 DEBUG15(qla_printk(KERN_INFO, ha, 4233 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 4234 "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS, 4235 mb[0], mb[1], mb[2], mb[6], mb[7])); 4236 return (QLA_FUNCTION_FAILED); 4237 } 4238 4239 atomic_set(&vha->loop_down_timer, 0); 4240 atomic_set(&vha->loop_state, LOOP_UP); 4241 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4242 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4243 rval = qla2x00_loop_resync(base_vha); 4244 4245 return rval; 4246 } 4247 4248 /* 84XX Support **************************************************************/ 4249 4250 static LIST_HEAD(qla_cs84xx_list); 4251 static DEFINE_MUTEX(qla_cs84xx_mutex); 4252 4253 static struct qla_chip_state_84xx * 4254 qla84xx_get_chip(struct scsi_qla_host *vha) 4255 { 4256 struct qla_chip_state_84xx *cs84xx; 4257 struct qla_hw_data *ha = vha->hw; 4258 4259 mutex_lock(&qla_cs84xx_mutex); 4260 4261 /* Find any shared 84xx chip. */ 4262 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) { 4263 if (cs84xx->bus == ha->pdev->bus) { 4264 kref_get(&cs84xx->kref); 4265 goto done; 4266 } 4267 } 4268 4269 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL); 4270 if (!cs84xx) 4271 goto done; 4272 4273 kref_init(&cs84xx->kref); 4274 spin_lock_init(&cs84xx->access_lock); 4275 mutex_init(&cs84xx->fw_update_mutex); 4276 cs84xx->bus = ha->pdev->bus; 4277 4278 list_add_tail(&cs84xx->list, &qla_cs84xx_list); 4279 done: 4280 mutex_unlock(&qla_cs84xx_mutex); 4281 return cs84xx; 4282 } 4283 4284 static void 4285 __qla84xx_chip_release(struct kref *kref) 4286 { 4287 struct qla_chip_state_84xx *cs84xx = 4288 container_of(kref, struct qla_chip_state_84xx, kref); 4289 4290 mutex_lock(&qla_cs84xx_mutex); 4291 list_del(&cs84xx->list); 4292 mutex_unlock(&qla_cs84xx_mutex); 4293 kfree(cs84xx); 4294 } 4295 4296 void 4297 qla84xx_put_chip(struct scsi_qla_host *vha) 4298 { 4299 struct qla_hw_data *ha = vha->hw; 4300 if (ha->cs84xx) 4301 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); 4302 } 4303 4304 static int 4305 qla84xx_init_chip(scsi_qla_host_t *vha) 4306 { 4307 int rval; 4308 uint16_t status[2]; 4309 struct qla_hw_data *ha = vha->hw; 4310 4311 mutex_lock(&ha->cs84xx->fw_update_mutex); 4312 4313 rval = qla84xx_verify_chip(vha, status); 4314 4315 mutex_unlock(&ha->cs84xx->fw_update_mutex); 4316 4317 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED: 4318 QLA_SUCCESS; 4319 } 4320 4321 /* 81XX Support **************************************************************/ 4322 4323 int 4324 qla81xx_nvram_config(scsi_qla_host_t *vha) 4325 { 4326 int rval; 4327 struct init_cb_81xx *icb; 4328 struct nvram_81xx *nv; 4329 uint32_t *dptr; 4330 uint8_t *dptr1, *dptr2; 4331 uint32_t chksum; 4332 uint16_t cnt; 4333 struct qla_hw_data *ha = vha->hw; 4334 4335 rval = QLA_SUCCESS; 4336 icb = (struct init_cb_81xx *)ha->init_cb; 4337 nv = ha->nvram; 4338 4339 /* Determine NVRAM starting address. */ 4340 ha->nvram_size = sizeof(struct nvram_81xx); 4341 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 4342 ha->vpd_size = FA_NVRAM_VPD_SIZE; 4343 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 4344 if (PCI_FUNC(ha->pdev->devfn) & 1) { 4345 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 4346 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 4347 } 4348 4349 /* Get VPD data into cache */ 4350 ha->vpd = ha->nvram + VPD_OFFSET; 4351 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, 4352 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); 4353 4354 /* Get NVRAM data into cache and calculate checksum. */ 4355 dptr = (uint32_t *)nv; 4356 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base, 4357 ha->nvram_size); 4358 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 4359 chksum += le32_to_cpu(*dptr++); 4360 4361 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 4362 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 4363 4364 /* Bad NVRAM data, set defaults parameters. */ 4365 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 4366 || nv->id[3] != ' ' || 4367 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 4368 /* Reset NVRAM data. */ 4369 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 4370 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 4371 le16_to_cpu(nv->nvram_version)); 4372 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 4373 "invalid -- WWPN) defaults.\n"); 4374 4375 /* 4376 * Set default initialization control block. 4377 */ 4378 memset(nv, 0, ha->nvram_size); 4379 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); 4380 nv->version = __constant_cpu_to_le16(ICB_VERSION); 4381 nv->frame_payload_size = __constant_cpu_to_le16(2048); 4382 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 4383 nv->exchange_count = __constant_cpu_to_le16(0); 4384 nv->port_name[0] = 0x21; 4385 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); 4386 nv->port_name[2] = 0x00; 4387 nv->port_name[3] = 0xe0; 4388 nv->port_name[4] = 0x8b; 4389 nv->port_name[5] = 0x1c; 4390 nv->port_name[6] = 0x55; 4391 nv->port_name[7] = 0x86; 4392 nv->node_name[0] = 0x20; 4393 nv->node_name[1] = 0x00; 4394 nv->node_name[2] = 0x00; 4395 nv->node_name[3] = 0xe0; 4396 nv->node_name[4] = 0x8b; 4397 nv->node_name[5] = 0x1c; 4398 nv->node_name[6] = 0x55; 4399 nv->node_name[7] = 0x86; 4400 nv->login_retry_count = __constant_cpu_to_le16(8); 4401 nv->interrupt_delay_timer = __constant_cpu_to_le16(0); 4402 nv->login_timeout = __constant_cpu_to_le16(0); 4403 nv->firmware_options_1 = 4404 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 4405 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4); 4406 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 4407 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13); 4408 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10); 4409 nv->efi_parameters = __constant_cpu_to_le32(0); 4410 nv->reset_delay = 5; 4411 nv->max_luns_per_target = __constant_cpu_to_le16(128); 4412 nv->port_down_retry_count = __constant_cpu_to_le16(30); 4413 nv->link_down_timeout = __constant_cpu_to_le16(30); 4414 nv->enode_mac[0] = 0x01; 4415 nv->enode_mac[1] = 0x02; 4416 nv->enode_mac[2] = 0x03; 4417 nv->enode_mac[3] = 0x04; 4418 nv->enode_mac[4] = 0x05; 4419 nv->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); 4420 4421 rval = 1; 4422 } 4423 4424 /* Reset Initialization control block */ 4425 memset(icb, 0, sizeof(struct init_cb_81xx)); 4426 4427 /* Copy 1st segment. */ 4428 dptr1 = (uint8_t *)icb; 4429 dptr2 = (uint8_t *)&nv->version; 4430 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; 4431 while (cnt--) 4432 *dptr1++ = *dptr2++; 4433 4434 icb->login_retry_count = nv->login_retry_count; 4435 4436 /* Copy 2nd segment. */ 4437 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 4438 dptr2 = (uint8_t *)&nv->interrupt_delay_timer; 4439 cnt = (uint8_t *)&icb->reserved_5 - 4440 (uint8_t *)&icb->interrupt_delay_timer; 4441 while (cnt--) 4442 *dptr1++ = *dptr2++; 4443 4444 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); 4445 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ 4446 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { 4447 icb->enode_mac[0] = 0x01; 4448 icb->enode_mac[1] = 0x02; 4449 icb->enode_mac[2] = 0x03; 4450 icb->enode_mac[3] = 0x04; 4451 icb->enode_mac[4] = 0x05; 4452 icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); 4453 } 4454 4455 /* 4456 * Setup driver NVRAM options. 4457 */ 4458 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 4459 "QLE81XX"); 4460 4461 /* Use alternate WWN? */ 4462 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { 4463 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 4464 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 4465 } 4466 4467 /* Prepare nodename */ 4468 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) { 4469 /* 4470 * Firmware will apply the following mask if the nodename was 4471 * not provided. 4472 */ 4473 memcpy(icb->node_name, icb->port_name, WWN_SIZE); 4474 icb->node_name[0] &= 0xF0; 4475 } 4476 4477 /* Set host adapter parameters. */ 4478 ha->flags.disable_risc_code_load = 0; 4479 ha->flags.enable_lip_reset = 0; 4480 ha->flags.enable_lip_full_login = 4481 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0; 4482 ha->flags.enable_target_reset = 4483 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0; 4484 ha->flags.enable_led_scheme = 0; 4485 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0; 4486 4487 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 4488 (BIT_6 | BIT_5 | BIT_4)) >> 4; 4489 4490 /* save HBA serial number */ 4491 ha->serial0 = icb->port_name[5]; 4492 ha->serial1 = icb->port_name[6]; 4493 ha->serial2 = icb->port_name[7]; 4494 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 4495 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 4496 4497 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 4498 4499 ha->retry_count = le16_to_cpu(nv->login_retry_count); 4500 4501 /* Set minimum login_timeout to 4 seconds. */ 4502 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 4503 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 4504 if (le16_to_cpu(nv->login_timeout) < 4) 4505 nv->login_timeout = __constant_cpu_to_le16(4); 4506 ha->login_timeout = le16_to_cpu(nv->login_timeout); 4507 icb->login_timeout = nv->login_timeout; 4508 4509 /* Set minimum RATOV to 100 tenths of a second. */ 4510 ha->r_a_tov = 100; 4511 4512 ha->loop_reset_delay = nv->reset_delay; 4513 4514 /* Link Down Timeout = 0: 4515 * 4516 * When Port Down timer expires we will start returning 4517 * I/O's to OS with "DID_NO_CONNECT". 4518 * 4519 * Link Down Timeout != 0: 4520 * 4521 * The driver waits for the link to come up after link down 4522 * before returning I/Os to OS with "DID_NO_CONNECT". 4523 */ 4524 if (le16_to_cpu(nv->link_down_timeout) == 0) { 4525 ha->loop_down_abort_time = 4526 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); 4527 } else { 4528 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); 4529 ha->loop_down_abort_time = 4530 (LOOP_DOWN_TIME - ha->link_down_timeout); 4531 } 4532 4533 /* Need enough time to try and get the port back. */ 4534 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); 4535 if (qlport_down_retry) 4536 ha->port_down_retry_count = qlport_down_retry; 4537 4538 /* Set login_retry_count */ 4539 ha->login_retry_count = le16_to_cpu(nv->login_retry_count); 4540 if (ha->port_down_retry_count == 4541 le16_to_cpu(nv->port_down_retry_count) && 4542 ha->port_down_retry_count > 3) 4543 ha->login_retry_count = ha->port_down_retry_count; 4544 else if (ha->port_down_retry_count > (int)ha->login_retry_count) 4545 ha->login_retry_count = ha->port_down_retry_count; 4546 if (ql2xloginretrycount) 4547 ha->login_retry_count = ql2xloginretrycount; 4548 4549 /* Enable ZIO. */ 4550 if (!vha->flags.init_done) { 4551 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 4552 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 4553 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 4554 le16_to_cpu(icb->interrupt_delay_timer): 2; 4555 } 4556 icb->firmware_options_2 &= __constant_cpu_to_le32( 4557 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 4558 vha->flags.process_response_queue = 0; 4559 if (ha->zio_mode != QLA_ZIO_DISABLED) { 4560 ha->zio_mode = QLA_ZIO_MODE_6; 4561 4562 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 4563 "(%d us).\n", vha->host_no, ha->zio_mode, 4564 ha->zio_timer * 100)); 4565 qla_printk(KERN_INFO, ha, 4566 "ZIO mode %d enabled; timer delay (%d us).\n", 4567 ha->zio_mode, ha->zio_timer * 100); 4568 4569 icb->firmware_options_2 |= cpu_to_le32( 4570 (uint32_t)ha->zio_mode); 4571 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 4572 vha->flags.process_response_queue = 1; 4573 } 4574 4575 if (rval) { 4576 DEBUG2_3(printk(KERN_WARNING 4577 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 4578 } 4579 return (rval); 4580 } 4581 4582 void 4583 qla81xx_update_fw_options(scsi_qla_host_t *ha) 4584 { 4585 } 4586