1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2008 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/moduleparam.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mutex.h> 14 15 #include <scsi/scsi_tcq.h> 16 #include <scsi/scsicam.h> 17 #include <scsi/scsi_transport.h> 18 #include <scsi/scsi_transport_fc.h> 19 20 /* 21 * Driver version 22 */ 23 char qla2x00_version_str[40]; 24 25 /* 26 * SRB allocation cache 27 */ 28 static struct kmem_cache *srb_cachep; 29 30 int ql2xlogintimeout = 20; 31 module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR); 32 MODULE_PARM_DESC(ql2xlogintimeout, 33 "Login timeout value in seconds."); 34 35 int qlport_down_retry; 36 module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR); 37 MODULE_PARM_DESC(qlport_down_retry, 38 "Maximum number of command retries to a port that returns " 39 "a PORT-DOWN status."); 40 41 int ql2xplogiabsentdevice; 42 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); 43 MODULE_PARM_DESC(ql2xplogiabsentdevice, 44 "Option to enable PLOGI to devices that are not present after " 45 "a Fabric scan. This is needed for several broken switches. " 46 "Default is 0 - no PLOGI. 1 - perfom PLOGI."); 47 48 int ql2xloginretrycount = 0; 49 module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR); 50 MODULE_PARM_DESC(ql2xloginretrycount, 51 "Specify an alternate value for the NVRAM login retry count."); 52 53 int ql2xallocfwdump = 1; 54 module_param(ql2xallocfwdump, int, S_IRUGO|S_IRUSR); 55 MODULE_PARM_DESC(ql2xallocfwdump, 56 "Option to enable allocation of memory for a firmware dump " 57 "during HBA initialization. Memory allocation requirements " 58 "vary by ISP type. Default is 1 - allocate memory."); 59 60 int ql2xextended_error_logging; 61 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 62 MODULE_PARM_DESC(ql2xextended_error_logging, 63 "Option to enable extended error logging, " 64 "Default is 0 - no logging. 1 - log errors."); 65 66 static void qla2x00_free_device(scsi_qla_host_t *); 67 68 static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha); 69 70 int ql2xfdmienable=1; 71 module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR); 72 MODULE_PARM_DESC(ql2xfdmienable, 73 "Enables FDMI registratons " 74 "Default is 0 - no FDMI. 1 - perfom FDMI."); 75 76 #define MAX_Q_DEPTH 32 77 static int ql2xmaxqdepth = MAX_Q_DEPTH; 78 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); 79 MODULE_PARM_DESC(ql2xmaxqdepth, 80 "Maximum queue depth to report for target devices."); 81 82 int ql2xqfullrampup = 120; 83 module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR); 84 MODULE_PARM_DESC(ql2xqfullrampup, 85 "Number of seconds to wait to begin to ramp-up the queue " 86 "depth for a device after a queue-full condition has been " 87 "detected. Default is 120 seconds."); 88 89 int ql2xiidmaenable=1; 90 module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR); 91 MODULE_PARM_DESC(ql2xiidmaenable, 92 "Enables iIDMA settings " 93 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 94 95 96 /* 97 * SCSI host template entry points 98 */ 99 static int qla2xxx_slave_configure(struct scsi_device * device); 100 static int qla2xxx_slave_alloc(struct scsi_device *); 101 static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time); 102 static void qla2xxx_scan_start(struct Scsi_Host *); 103 static void qla2xxx_slave_destroy(struct scsi_device *); 104 static int qla2x00_queuecommand(struct scsi_cmnd *cmd, 105 void (*fn)(struct scsi_cmnd *)); 106 static int qla24xx_queuecommand(struct scsi_cmnd *cmd, 107 void (*fn)(struct scsi_cmnd *)); 108 static int qla2xxx_eh_abort(struct scsi_cmnd *); 109 static int qla2xxx_eh_device_reset(struct scsi_cmnd *); 110 static int qla2xxx_eh_target_reset(struct scsi_cmnd *); 111 static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); 112 static int qla2xxx_eh_host_reset(struct scsi_cmnd *); 113 114 static int qla2x00_change_queue_depth(struct scsi_device *, int); 115 static int qla2x00_change_queue_type(struct scsi_device *, int); 116 117 static struct scsi_host_template qla2x00_driver_template = { 118 .module = THIS_MODULE, 119 .name = QLA2XXX_DRIVER_NAME, 120 .queuecommand = qla2x00_queuecommand, 121 122 .eh_abort_handler = qla2xxx_eh_abort, 123 .eh_device_reset_handler = qla2xxx_eh_device_reset, 124 .eh_target_reset_handler = qla2xxx_eh_target_reset, 125 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 126 .eh_host_reset_handler = qla2xxx_eh_host_reset, 127 128 .slave_configure = qla2xxx_slave_configure, 129 130 .slave_alloc = qla2xxx_slave_alloc, 131 .slave_destroy = qla2xxx_slave_destroy, 132 .scan_finished = qla2xxx_scan_finished, 133 .scan_start = qla2xxx_scan_start, 134 .change_queue_depth = qla2x00_change_queue_depth, 135 .change_queue_type = qla2x00_change_queue_type, 136 .this_id = -1, 137 .cmd_per_lun = 3, 138 .use_clustering = ENABLE_CLUSTERING, 139 .sg_tablesize = SG_ALL, 140 141 /* 142 * The RISC allows for each command to transfer (2^32-1) bytes of data, 143 * which equates to 0x800000 sectors. 144 */ 145 .max_sectors = 0xFFFF, 146 .shost_attrs = qla2x00_host_attrs, 147 }; 148 149 struct scsi_host_template qla24xx_driver_template = { 150 .module = THIS_MODULE, 151 .name = QLA2XXX_DRIVER_NAME, 152 .queuecommand = qla24xx_queuecommand, 153 154 .eh_abort_handler = qla2xxx_eh_abort, 155 .eh_device_reset_handler = qla2xxx_eh_device_reset, 156 .eh_target_reset_handler = qla2xxx_eh_target_reset, 157 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 158 .eh_host_reset_handler = qla2xxx_eh_host_reset, 159 160 .slave_configure = qla2xxx_slave_configure, 161 162 .slave_alloc = qla2xxx_slave_alloc, 163 .slave_destroy = qla2xxx_slave_destroy, 164 .scan_finished = qla2xxx_scan_finished, 165 .scan_start = qla2xxx_scan_start, 166 .change_queue_depth = qla2x00_change_queue_depth, 167 .change_queue_type = qla2x00_change_queue_type, 168 .this_id = -1, 169 .cmd_per_lun = 3, 170 .use_clustering = ENABLE_CLUSTERING, 171 .sg_tablesize = SG_ALL, 172 173 .max_sectors = 0xFFFF, 174 .shost_attrs = qla2x00_host_attrs, 175 }; 176 177 static struct scsi_transport_template *qla2xxx_transport_template = NULL; 178 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; 179 180 /* TODO Convert to inlines 181 * 182 * Timer routines 183 */ 184 185 __inline__ void 186 qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval) 187 { 188 init_timer(&ha->timer); 189 ha->timer.expires = jiffies + interval * HZ; 190 ha->timer.data = (unsigned long)ha; 191 ha->timer.function = (void (*)(unsigned long))func; 192 add_timer(&ha->timer); 193 ha->timer_active = 1; 194 } 195 196 static inline void 197 qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval) 198 { 199 mod_timer(&ha->timer, jiffies + interval * HZ); 200 } 201 202 static __inline__ void 203 qla2x00_stop_timer(scsi_qla_host_t *ha) 204 { 205 del_timer_sync(&ha->timer); 206 ha->timer_active = 0; 207 } 208 209 static int qla2x00_do_dpc(void *data); 210 211 static void qla2x00_rst_aen(scsi_qla_host_t *); 212 213 static int qla2x00_mem_alloc(scsi_qla_host_t *); 214 static void qla2x00_mem_free(scsi_qla_host_t *ha); 215 static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *); 216 217 /* -------------------------------------------------------------------------- */ 218 219 static char * 220 qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str) 221 { 222 static char *pci_bus_modes[] = { 223 "33", "66", "100", "133", 224 }; 225 uint16_t pci_bus; 226 227 strcpy(str, "PCI"); 228 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; 229 if (pci_bus) { 230 strcat(str, "-X ("); 231 strcat(str, pci_bus_modes[pci_bus]); 232 } else { 233 pci_bus = (ha->pci_attr & BIT_8) >> 8; 234 strcat(str, " ("); 235 strcat(str, pci_bus_modes[pci_bus]); 236 } 237 strcat(str, " MHz)"); 238 239 return (str); 240 } 241 242 static char * 243 qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str) 244 { 245 static char *pci_bus_modes[] = { "33", "66", "100", "133", }; 246 uint32_t pci_bus; 247 int pcie_reg; 248 249 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); 250 if (pcie_reg) { 251 char lwstr[6]; 252 uint16_t pcie_lstat, lspeed, lwidth; 253 254 pcie_reg += 0x12; 255 pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat); 256 lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3); 257 lwidth = (pcie_lstat & 258 (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4; 259 260 strcpy(str, "PCIe ("); 261 if (lspeed == 1) 262 strcat(str, "2.5GT/s "); 263 else if (lspeed == 2) 264 strcat(str, "5.0GT/s "); 265 else 266 strcat(str, "<unknown> "); 267 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth); 268 strcat(str, lwstr); 269 270 return str; 271 } 272 273 strcpy(str, "PCI"); 274 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; 275 if (pci_bus == 0 || pci_bus == 8) { 276 strcat(str, " ("); 277 strcat(str, pci_bus_modes[pci_bus >> 3]); 278 } else { 279 strcat(str, "-X "); 280 if (pci_bus & BIT_2) 281 strcat(str, "Mode 2"); 282 else 283 strcat(str, "Mode 1"); 284 strcat(str, " ("); 285 strcat(str, pci_bus_modes[pci_bus & ~BIT_2]); 286 } 287 strcat(str, " MHz)"); 288 289 return str; 290 } 291 292 static char * 293 qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str) 294 { 295 char un_str[10]; 296 297 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 298 ha->fw_minor_version, 299 ha->fw_subminor_version); 300 301 if (ha->fw_attributes & BIT_9) { 302 strcat(str, "FLX"); 303 return (str); 304 } 305 306 switch (ha->fw_attributes & 0xFF) { 307 case 0x7: 308 strcat(str, "EF"); 309 break; 310 case 0x17: 311 strcat(str, "TP"); 312 break; 313 case 0x37: 314 strcat(str, "IP"); 315 break; 316 case 0x77: 317 strcat(str, "VI"); 318 break; 319 default: 320 sprintf(un_str, "(%x)", ha->fw_attributes); 321 strcat(str, un_str); 322 break; 323 } 324 if (ha->fw_attributes & 0x100) 325 strcat(str, "X"); 326 327 return (str); 328 } 329 330 static char * 331 qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str) 332 { 333 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 334 ha->fw_minor_version, 335 ha->fw_subminor_version); 336 337 if (ha->fw_attributes & BIT_0) 338 strcat(str, "[Class 2] "); 339 if (ha->fw_attributes & BIT_1) 340 strcat(str, "[IP] "); 341 if (ha->fw_attributes & BIT_2) 342 strcat(str, "[Multi-ID] "); 343 if (ha->fw_attributes & BIT_3) 344 strcat(str, "[SB-2] "); 345 if (ha->fw_attributes & BIT_4) 346 strcat(str, "[T10 CRC] "); 347 if (ha->fw_attributes & BIT_5) 348 strcat(str, "[VI] "); 349 if (ha->fw_attributes & BIT_10) 350 strcat(str, "[84XX] "); 351 if (ha->fw_attributes & BIT_13) 352 strcat(str, "[Experimental]"); 353 return str; 354 } 355 356 static inline srb_t * 357 qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport, 358 struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 359 { 360 srb_t *sp; 361 362 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 363 if (!sp) 364 return sp; 365 366 sp->ha = ha; 367 sp->fcport = fcport; 368 sp->cmd = cmd; 369 sp->flags = 0; 370 CMD_SP(cmd) = (void *)sp; 371 cmd->scsi_done = done; 372 373 return sp; 374 } 375 376 static int 377 qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 378 { 379 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 380 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 381 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 382 srb_t *sp; 383 int rval; 384 385 if (unlikely(pci_channel_offline(ha->pdev))) { 386 cmd->result = DID_REQUEUE << 16; 387 goto qc_fail_command; 388 } 389 390 rval = fc_remote_port_chkready(rport); 391 if (rval) { 392 cmd->result = rval; 393 goto qc_fail_command; 394 } 395 396 /* Close window on fcport/rport state-transitioning. */ 397 if (fcport->drport) { 398 cmd->result = DID_IMM_RETRY << 16; 399 goto qc_fail_command; 400 } 401 402 if (atomic_read(&fcport->state) != FCS_ONLINE) { 403 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 404 atomic_read(&ha->loop_state) == LOOP_DEAD) { 405 cmd->result = DID_NO_CONNECT << 16; 406 goto qc_fail_command; 407 } 408 goto qc_host_busy; 409 } 410 411 spin_unlock_irq(ha->host->host_lock); 412 413 sp = qla2x00_get_new_sp(ha, fcport, cmd, done); 414 if (!sp) 415 goto qc_host_busy_lock; 416 417 rval = qla2x00_start_scsi(sp); 418 if (rval != QLA_SUCCESS) 419 goto qc_host_busy_free_sp; 420 421 spin_lock_irq(ha->host->host_lock); 422 423 return 0; 424 425 qc_host_busy_free_sp: 426 qla2x00_sp_free_dma(ha, sp); 427 mempool_free(sp, ha->srb_mempool); 428 429 qc_host_busy_lock: 430 spin_lock_irq(ha->host->host_lock); 431 432 qc_host_busy: 433 return SCSI_MLQUEUE_HOST_BUSY; 434 435 qc_fail_command: 436 done(cmd); 437 438 return 0; 439 } 440 441 442 static int 443 qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 444 { 445 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 446 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 447 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 448 srb_t *sp; 449 int rval; 450 scsi_qla_host_t *pha = to_qla_parent(ha); 451 452 if (unlikely(pci_channel_offline(pha->pdev))) { 453 cmd->result = DID_REQUEUE << 16; 454 goto qc24_fail_command; 455 } 456 457 rval = fc_remote_port_chkready(rport); 458 if (rval) { 459 cmd->result = rval; 460 goto qc24_fail_command; 461 } 462 463 /* Close window on fcport/rport state-transitioning. */ 464 if (fcport->drport) { 465 cmd->result = DID_IMM_RETRY << 16; 466 goto qc24_fail_command; 467 } 468 469 if (atomic_read(&fcport->state) != FCS_ONLINE) { 470 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 471 atomic_read(&pha->loop_state) == LOOP_DEAD) { 472 cmd->result = DID_NO_CONNECT << 16; 473 goto qc24_fail_command; 474 } 475 goto qc24_host_busy; 476 } 477 478 spin_unlock_irq(ha->host->host_lock); 479 480 sp = qla2x00_get_new_sp(pha, fcport, cmd, done); 481 if (!sp) 482 goto qc24_host_busy_lock; 483 484 rval = qla24xx_start_scsi(sp); 485 if (rval != QLA_SUCCESS) 486 goto qc24_host_busy_free_sp; 487 488 spin_lock_irq(ha->host->host_lock); 489 490 return 0; 491 492 qc24_host_busy_free_sp: 493 qla2x00_sp_free_dma(pha, sp); 494 mempool_free(sp, pha->srb_mempool); 495 496 qc24_host_busy_lock: 497 spin_lock_irq(ha->host->host_lock); 498 499 qc24_host_busy: 500 return SCSI_MLQUEUE_HOST_BUSY; 501 502 qc24_fail_command: 503 done(cmd); 504 505 return 0; 506 } 507 508 509 /* 510 * qla2x00_eh_wait_on_command 511 * Waits for the command to be returned by the Firmware for some 512 * max time. 513 * 514 * Input: 515 * ha = actual ha whose done queue will contain the command 516 * returned by firmware. 517 * cmd = Scsi Command to wait on. 518 * flag = Abort/Reset(Bus or Device Reset) 519 * 520 * Return: 521 * Not Found : 0 522 * Found : 1 523 */ 524 static int 525 qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd) 526 { 527 #define ABORT_POLLING_PERIOD 1000 528 #define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) 529 unsigned long wait_iter = ABORT_WAIT_ITER; 530 int ret = QLA_SUCCESS; 531 532 while (CMD_SP(cmd)) { 533 msleep(ABORT_POLLING_PERIOD); 534 535 if (--wait_iter) 536 break; 537 } 538 if (CMD_SP(cmd)) 539 ret = QLA_FUNCTION_FAILED; 540 541 return ret; 542 } 543 544 /* 545 * qla2x00_wait_for_hba_online 546 * Wait till the HBA is online after going through 547 * <= MAX_RETRIES_OF_ISP_ABORT or 548 * finally HBA is disabled ie marked offline 549 * 550 * Input: 551 * ha - pointer to host adapter structure 552 * 553 * Note: 554 * Does context switching-Release SPIN_LOCK 555 * (if any) before calling this routine. 556 * 557 * Return: 558 * Success (Adapter is online) : 0 559 * Failed (Adapter is offline/disabled) : 1 560 */ 561 int 562 qla2x00_wait_for_hba_online(scsi_qla_host_t *ha) 563 { 564 int return_status; 565 unsigned long wait_online; 566 scsi_qla_host_t *pha = to_qla_parent(ha); 567 568 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 569 while (((test_bit(ISP_ABORT_NEEDED, &pha->dpc_flags)) || 570 test_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags) || 571 test_bit(ISP_ABORT_RETRY, &pha->dpc_flags) || 572 pha->dpc_active) && time_before(jiffies, wait_online)) { 573 574 msleep(1000); 575 } 576 if (pha->flags.online) 577 return_status = QLA_SUCCESS; 578 else 579 return_status = QLA_FUNCTION_FAILED; 580 581 return (return_status); 582 } 583 584 /* 585 * qla2x00_wait_for_loop_ready 586 * Wait for MAX_LOOP_TIMEOUT(5 min) value for loop 587 * to be in LOOP_READY state. 588 * Input: 589 * ha - pointer to host adapter structure 590 * 591 * Note: 592 * Does context switching-Release SPIN_LOCK 593 * (if any) before calling this routine. 594 * 595 * 596 * Return: 597 * Success (LOOP_READY) : 0 598 * Failed (LOOP_NOT_READY) : 1 599 */ 600 static inline int 601 qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha) 602 { 603 int return_status = QLA_SUCCESS; 604 unsigned long loop_timeout ; 605 scsi_qla_host_t *pha = to_qla_parent(ha); 606 607 /* wait for 5 min at the max for loop to be ready */ 608 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ); 609 610 while ((!atomic_read(&pha->loop_down_timer) && 611 atomic_read(&pha->loop_state) == LOOP_DOWN) || 612 atomic_read(&pha->loop_state) != LOOP_READY) { 613 if (atomic_read(&pha->loop_state) == LOOP_DEAD) { 614 return_status = QLA_FUNCTION_FAILED; 615 break; 616 } 617 msleep(1000); 618 if (time_after_eq(jiffies, loop_timeout)) { 619 return_status = QLA_FUNCTION_FAILED; 620 break; 621 } 622 } 623 return (return_status); 624 } 625 626 void 627 qla2x00_abort_fcport_cmds(fc_port_t *fcport) 628 { 629 int cnt; 630 unsigned long flags; 631 srb_t *sp; 632 scsi_qla_host_t *ha = fcport->ha; 633 scsi_qla_host_t *pha = to_qla_parent(ha); 634 635 spin_lock_irqsave(&pha->hardware_lock, flags); 636 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 637 sp = pha->outstanding_cmds[cnt]; 638 if (!sp) 639 continue; 640 if (sp->fcport != fcport) 641 continue; 642 643 spin_unlock_irqrestore(&pha->hardware_lock, flags); 644 if (ha->isp_ops->abort_command(ha, sp)) { 645 DEBUG2(qla_printk(KERN_WARNING, ha, 646 "Abort failed -- %lx\n", sp->cmd->serial_number)); 647 } else { 648 if (qla2x00_eh_wait_on_command(ha, sp->cmd) != 649 QLA_SUCCESS) 650 DEBUG2(qla_printk(KERN_WARNING, ha, 651 "Abort failed while waiting -- %lx\n", 652 sp->cmd->serial_number)); 653 654 } 655 spin_lock_irqsave(&pha->hardware_lock, flags); 656 } 657 spin_unlock_irqrestore(&pha->hardware_lock, flags); 658 } 659 660 static void 661 qla2x00_block_error_handler(struct scsi_cmnd *cmnd) 662 { 663 struct Scsi_Host *shost = cmnd->device->host; 664 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 665 unsigned long flags; 666 667 spin_lock_irqsave(shost->host_lock, flags); 668 while (rport->port_state == FC_PORTSTATE_BLOCKED) { 669 spin_unlock_irqrestore(shost->host_lock, flags); 670 msleep(1000); 671 spin_lock_irqsave(shost->host_lock, flags); 672 } 673 spin_unlock_irqrestore(shost->host_lock, flags); 674 return; 675 } 676 677 /************************************************************************** 678 * qla2xxx_eh_abort 679 * 680 * Description: 681 * The abort function will abort the specified command. 682 * 683 * Input: 684 * cmd = Linux SCSI command packet to be aborted. 685 * 686 * Returns: 687 * Either SUCCESS or FAILED. 688 * 689 * Note: 690 * Only return FAILED if command not returned by firmware. 691 **************************************************************************/ 692 static int 693 qla2xxx_eh_abort(struct scsi_cmnd *cmd) 694 { 695 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 696 srb_t *sp; 697 int ret, i; 698 unsigned int id, lun; 699 unsigned long serial; 700 unsigned long flags; 701 int wait = 0; 702 scsi_qla_host_t *pha = to_qla_parent(ha); 703 704 qla2x00_block_error_handler(cmd); 705 706 if (!CMD_SP(cmd)) 707 return SUCCESS; 708 709 ret = SUCCESS; 710 711 id = cmd->device->id; 712 lun = cmd->device->lun; 713 serial = cmd->serial_number; 714 715 /* Check active list for command command. */ 716 spin_lock_irqsave(&pha->hardware_lock, flags); 717 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) { 718 sp = pha->outstanding_cmds[i]; 719 720 if (sp == NULL) 721 continue; 722 723 if (sp->cmd != cmd) 724 continue; 725 726 DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n", 727 __func__, ha->host_no, sp, serial)); 728 729 spin_unlock_irqrestore(&pha->hardware_lock, flags); 730 if (ha->isp_ops->abort_command(ha, sp)) { 731 DEBUG2(printk("%s(%ld): abort_command " 732 "mbx failed.\n", __func__, ha->host_no)); 733 } else { 734 DEBUG3(printk("%s(%ld): abort_command " 735 "mbx success.\n", __func__, ha->host_no)); 736 wait = 1; 737 } 738 spin_lock_irqsave(&pha->hardware_lock, flags); 739 740 break; 741 } 742 spin_unlock_irqrestore(&pha->hardware_lock, flags); 743 744 /* Wait for the command to be returned. */ 745 if (wait) { 746 if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) { 747 qla_printk(KERN_ERR, ha, 748 "scsi(%ld:%d:%d): Abort handler timed out -- %lx " 749 "%x.\n", ha->host_no, id, lun, serial, ret); 750 ret = FAILED; 751 } 752 } 753 754 qla_printk(KERN_INFO, ha, 755 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n", 756 ha->host_no, id, lun, wait, serial, ret); 757 758 return ret; 759 } 760 761 enum nexus_wait_type { 762 WAIT_HOST = 0, 763 WAIT_TARGET, 764 WAIT_LUN, 765 }; 766 767 static int 768 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t, 769 unsigned int l, enum nexus_wait_type type) 770 { 771 int cnt, match, status; 772 srb_t *sp; 773 unsigned long flags; 774 scsi_qla_host_t *pha = to_qla_parent(ha); 775 776 status = QLA_SUCCESS; 777 spin_lock_irqsave(&pha->hardware_lock, flags); 778 for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS; 779 cnt++) { 780 sp = pha->outstanding_cmds[cnt]; 781 if (!sp) 782 continue; 783 784 if (ha->vp_idx != sp->fcport->ha->vp_idx) 785 continue; 786 match = 0; 787 switch (type) { 788 case WAIT_HOST: 789 match = 1; 790 break; 791 case WAIT_TARGET: 792 match = sp->cmd->device->id == t; 793 break; 794 case WAIT_LUN: 795 match = (sp->cmd->device->id == t && 796 sp->cmd->device->lun == l); 797 break; 798 } 799 if (!match) 800 continue; 801 802 spin_unlock_irqrestore(&pha->hardware_lock, flags); 803 status = qla2x00_eh_wait_on_command(ha, sp->cmd); 804 spin_lock_irqsave(&pha->hardware_lock, flags); 805 } 806 spin_unlock_irqrestore(&pha->hardware_lock, flags); 807 808 return status; 809 } 810 811 static char *reset_errors[] = { 812 "HBA not online", 813 "HBA not ready", 814 "Task management failed", 815 "Waiting for command completions", 816 }; 817 818 static int 819 __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 820 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) 821 { 822 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 823 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 824 int err; 825 826 qla2x00_block_error_handler(cmd); 827 828 if (!fcport) 829 return FAILED; 830 831 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET ISSUED.\n", 832 ha->host_no, cmd->device->id, cmd->device->lun, name); 833 834 err = 0; 835 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 836 goto eh_reset_failed; 837 err = 1; 838 if (qla2x00_wait_for_loop_ready(ha) != QLA_SUCCESS) 839 goto eh_reset_failed; 840 err = 2; 841 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) 842 goto eh_reset_failed; 843 err = 3; 844 if (qla2x00_eh_wait_for_pending_commands(ha, cmd->device->id, 845 cmd->device->lun, type) != QLA_SUCCESS) 846 goto eh_reset_failed; 847 848 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n", 849 ha->host_no, cmd->device->id, cmd->device->lun, name); 850 851 return SUCCESS; 852 853 eh_reset_failed: 854 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n", 855 ha->host_no, cmd->device->id, cmd->device->lun, name, 856 reset_errors[err]); 857 return FAILED; 858 } 859 860 static int 861 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 862 { 863 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 864 865 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd, 866 ha->isp_ops->lun_reset); 867 } 868 869 static int 870 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) 871 { 872 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 873 874 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd, 875 ha->isp_ops->target_reset); 876 } 877 878 /************************************************************************** 879 * qla2xxx_eh_bus_reset 880 * 881 * Description: 882 * The bus reset function will reset the bus and abort any executing 883 * commands. 884 * 885 * Input: 886 * cmd = Linux SCSI command packet of the command that cause the 887 * bus reset. 888 * 889 * Returns: 890 * SUCCESS/FAILURE (defined as macro in scsi.h). 891 * 892 **************************************************************************/ 893 static int 894 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 895 { 896 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 897 scsi_qla_host_t *pha = to_qla_parent(ha); 898 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 899 int ret = FAILED; 900 unsigned int id, lun; 901 unsigned long serial; 902 903 qla2x00_block_error_handler(cmd); 904 905 id = cmd->device->id; 906 lun = cmd->device->lun; 907 serial = cmd->serial_number; 908 909 if (!fcport) 910 return ret; 911 912 qla_printk(KERN_INFO, ha, 913 "scsi(%ld:%d:%d): LOOP RESET ISSUED.\n", ha->host_no, id, lun); 914 915 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) { 916 DEBUG2(printk("%s failed:board disabled\n",__func__)); 917 goto eh_bus_reset_done; 918 } 919 920 if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) { 921 if (qla2x00_loop_reset(ha) == QLA_SUCCESS) 922 ret = SUCCESS; 923 } 924 if (ret == FAILED) 925 goto eh_bus_reset_done; 926 927 /* Flush outstanding commands. */ 928 if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) != 929 QLA_SUCCESS) 930 ret = FAILED; 931 932 eh_bus_reset_done: 933 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 934 (ret == FAILED) ? "failed" : "succeded"); 935 936 return ret; 937 } 938 939 /************************************************************************** 940 * qla2xxx_eh_host_reset 941 * 942 * Description: 943 * The reset function will reset the Adapter. 944 * 945 * Input: 946 * cmd = Linux SCSI command packet of the command that cause the 947 * adapter reset. 948 * 949 * Returns: 950 * Either SUCCESS or FAILED. 951 * 952 * Note: 953 **************************************************************************/ 954 static int 955 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 956 { 957 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 958 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 959 int ret = FAILED; 960 unsigned int id, lun; 961 unsigned long serial; 962 scsi_qla_host_t *pha = to_qla_parent(ha); 963 964 qla2x00_block_error_handler(cmd); 965 966 id = cmd->device->id; 967 lun = cmd->device->lun; 968 serial = cmd->serial_number; 969 970 if (!fcport) 971 return ret; 972 973 qla_printk(KERN_INFO, ha, 974 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, id, lun); 975 976 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 977 goto eh_host_reset_lock; 978 979 /* 980 * Fixme-may be dpc thread is active and processing 981 * loop_resync,so wait a while for it to 982 * be completed and then issue big hammer.Otherwise 983 * it may cause I/O failure as big hammer marks the 984 * devices as lost kicking of the port_down_timer 985 * while dpc is stuck for the mailbox to complete. 986 */ 987 qla2x00_wait_for_loop_ready(ha); 988 set_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags); 989 if (qla2x00_abort_isp(pha)) { 990 clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags); 991 /* failed. schedule dpc to try */ 992 set_bit(ISP_ABORT_NEEDED, &pha->dpc_flags); 993 994 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 995 goto eh_host_reset_lock; 996 } 997 clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags); 998 999 /* Waiting for our command in done_queue to be returned to OS.*/ 1000 if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) == 1001 QLA_SUCCESS) 1002 ret = SUCCESS; 1003 1004 if (ha->parent) 1005 qla2x00_vp_abort_isp(ha); 1006 1007 eh_host_reset_lock: 1008 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 1009 (ret == FAILED) ? "failed" : "succeded"); 1010 1011 return ret; 1012 } 1013 1014 /* 1015 * qla2x00_loop_reset 1016 * Issue loop reset. 1017 * 1018 * Input: 1019 * ha = adapter block pointer. 1020 * 1021 * Returns: 1022 * 0 = success 1023 */ 1024 int 1025 qla2x00_loop_reset(scsi_qla_host_t *ha) 1026 { 1027 int ret; 1028 struct fc_port *fcport; 1029 1030 if (ha->flags.enable_lip_full_login) { 1031 ret = qla2x00_full_login_lip(ha); 1032 if (ret != QLA_SUCCESS) { 1033 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1034 "full_login_lip=%d.\n", __func__, ha->host_no, 1035 ret)); 1036 } 1037 atomic_set(&ha->loop_state, LOOP_DOWN); 1038 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 1039 qla2x00_mark_all_devices_lost(ha, 0); 1040 qla2x00_wait_for_loop_ready(ha); 1041 } 1042 1043 if (ha->flags.enable_lip_reset) { 1044 ret = qla2x00_lip_reset(ha); 1045 if (ret != QLA_SUCCESS) { 1046 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1047 "lip_reset=%d.\n", __func__, ha->host_no, ret)); 1048 } 1049 qla2x00_wait_for_loop_ready(ha); 1050 } 1051 1052 if (ha->flags.enable_target_reset) { 1053 list_for_each_entry(fcport, &ha->fcports, list) { 1054 if (fcport->port_type != FCT_TARGET) 1055 continue; 1056 1057 ret = ha->isp_ops->target_reset(fcport, 0); 1058 if (ret != QLA_SUCCESS) { 1059 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1060 "target_reset=%d d_id=%x.\n", __func__, 1061 ha->host_no, ret, fcport->d_id.b24)); 1062 } 1063 } 1064 } 1065 1066 /* Issue marker command only when we are going to start the I/O */ 1067 ha->marker_needed = 1; 1068 1069 return QLA_SUCCESS; 1070 } 1071 1072 void 1073 qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res) 1074 { 1075 int cnt; 1076 unsigned long flags; 1077 srb_t *sp; 1078 1079 spin_lock_irqsave(&ha->hardware_lock, flags); 1080 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1081 sp = ha->outstanding_cmds[cnt]; 1082 if (sp) { 1083 ha->outstanding_cmds[cnt] = NULL; 1084 sp->cmd->result = res; 1085 qla2x00_sp_compl(ha, sp); 1086 } 1087 } 1088 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1089 } 1090 1091 static int 1092 qla2xxx_slave_alloc(struct scsi_device *sdev) 1093 { 1094 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1095 1096 if (!rport || fc_remote_port_chkready(rport)) 1097 return -ENXIO; 1098 1099 sdev->hostdata = *(fc_port_t **)rport->dd_data; 1100 1101 return 0; 1102 } 1103 1104 static int 1105 qla2xxx_slave_configure(struct scsi_device *sdev) 1106 { 1107 scsi_qla_host_t *ha = shost_priv(sdev->host); 1108 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1109 1110 if (sdev->tagged_supported) 1111 scsi_activate_tcq(sdev, ha->max_q_depth); 1112 else 1113 scsi_deactivate_tcq(sdev, ha->max_q_depth); 1114 1115 rport->dev_loss_tmo = ha->port_down_retry_count; 1116 1117 return 0; 1118 } 1119 1120 static void 1121 qla2xxx_slave_destroy(struct scsi_device *sdev) 1122 { 1123 sdev->hostdata = NULL; 1124 } 1125 1126 static int 1127 qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth) 1128 { 1129 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 1130 return sdev->queue_depth; 1131 } 1132 1133 static int 1134 qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type) 1135 { 1136 if (sdev->tagged_supported) { 1137 scsi_set_tag_type(sdev, tag_type); 1138 if (tag_type) 1139 scsi_activate_tcq(sdev, sdev->queue_depth); 1140 else 1141 scsi_deactivate_tcq(sdev, sdev->queue_depth); 1142 } else 1143 tag_type = 0; 1144 1145 return tag_type; 1146 } 1147 1148 /** 1149 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1150 * @ha: HA context 1151 * 1152 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 1153 * supported addressing method. 1154 */ 1155 static void 1156 qla2x00_config_dma_addressing(scsi_qla_host_t *ha) 1157 { 1158 /* Assume a 32bit DMA mask. */ 1159 ha->flags.enable_64bit_addressing = 0; 1160 1161 if (!dma_set_mask(&ha->pdev->dev, DMA_64BIT_MASK)) { 1162 /* Any upper-dword bits set? */ 1163 if (MSD(dma_get_required_mask(&ha->pdev->dev)) && 1164 !pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) { 1165 /* Ok, a 64bit DMA mask is applicable. */ 1166 ha->flags.enable_64bit_addressing = 1; 1167 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; 1168 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; 1169 return; 1170 } 1171 } 1172 1173 dma_set_mask(&ha->pdev->dev, DMA_32BIT_MASK); 1174 pci_set_consistent_dma_mask(ha->pdev, DMA_32BIT_MASK); 1175 } 1176 1177 static void 1178 qla2x00_enable_intrs(scsi_qla_host_t *ha) 1179 { 1180 unsigned long flags = 0; 1181 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1182 1183 spin_lock_irqsave(&ha->hardware_lock, flags); 1184 ha->interrupts_on = 1; 1185 /* enable risc and host interrupts */ 1186 WRT_REG_WORD(®->ictrl, ICR_EN_INT | ICR_EN_RISC); 1187 RD_REG_WORD(®->ictrl); 1188 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1189 1190 } 1191 1192 static void 1193 qla2x00_disable_intrs(scsi_qla_host_t *ha) 1194 { 1195 unsigned long flags = 0; 1196 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1197 1198 spin_lock_irqsave(&ha->hardware_lock, flags); 1199 ha->interrupts_on = 0; 1200 /* disable risc and host interrupts */ 1201 WRT_REG_WORD(®->ictrl, 0); 1202 RD_REG_WORD(®->ictrl); 1203 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1204 } 1205 1206 static void 1207 qla24xx_enable_intrs(scsi_qla_host_t *ha) 1208 { 1209 unsigned long flags = 0; 1210 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1211 1212 spin_lock_irqsave(&ha->hardware_lock, flags); 1213 ha->interrupts_on = 1; 1214 WRT_REG_DWORD(®->ictrl, ICRX_EN_RISC_INT); 1215 RD_REG_DWORD(®->ictrl); 1216 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1217 } 1218 1219 static void 1220 qla24xx_disable_intrs(scsi_qla_host_t *ha) 1221 { 1222 unsigned long flags = 0; 1223 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1224 1225 spin_lock_irqsave(&ha->hardware_lock, flags); 1226 ha->interrupts_on = 0; 1227 WRT_REG_DWORD(®->ictrl, 0); 1228 RD_REG_DWORD(®->ictrl); 1229 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1230 } 1231 1232 static struct isp_operations qla2100_isp_ops = { 1233 .pci_config = qla2100_pci_config, 1234 .reset_chip = qla2x00_reset_chip, 1235 .chip_diag = qla2x00_chip_diag, 1236 .config_rings = qla2x00_config_rings, 1237 .reset_adapter = qla2x00_reset_adapter, 1238 .nvram_config = qla2x00_nvram_config, 1239 .update_fw_options = qla2x00_update_fw_options, 1240 .load_risc = qla2x00_load_risc, 1241 .pci_info_str = qla2x00_pci_info_str, 1242 .fw_version_str = qla2x00_fw_version_str, 1243 .intr_handler = qla2100_intr_handler, 1244 .enable_intrs = qla2x00_enable_intrs, 1245 .disable_intrs = qla2x00_disable_intrs, 1246 .abort_command = qla2x00_abort_command, 1247 .target_reset = qla2x00_abort_target, 1248 .lun_reset = qla2x00_lun_reset, 1249 .fabric_login = qla2x00_login_fabric, 1250 .fabric_logout = qla2x00_fabric_logout, 1251 .calc_req_entries = qla2x00_calc_iocbs_32, 1252 .build_iocbs = qla2x00_build_scsi_iocbs_32, 1253 .prep_ms_iocb = qla2x00_prep_ms_iocb, 1254 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 1255 .read_nvram = qla2x00_read_nvram_data, 1256 .write_nvram = qla2x00_write_nvram_data, 1257 .fw_dump = qla2100_fw_dump, 1258 .beacon_on = NULL, 1259 .beacon_off = NULL, 1260 .beacon_blink = NULL, 1261 .read_optrom = qla2x00_read_optrom_data, 1262 .write_optrom = qla2x00_write_optrom_data, 1263 .get_flash_version = qla2x00_get_flash_version, 1264 }; 1265 1266 static struct isp_operations qla2300_isp_ops = { 1267 .pci_config = qla2300_pci_config, 1268 .reset_chip = qla2x00_reset_chip, 1269 .chip_diag = qla2x00_chip_diag, 1270 .config_rings = qla2x00_config_rings, 1271 .reset_adapter = qla2x00_reset_adapter, 1272 .nvram_config = qla2x00_nvram_config, 1273 .update_fw_options = qla2x00_update_fw_options, 1274 .load_risc = qla2x00_load_risc, 1275 .pci_info_str = qla2x00_pci_info_str, 1276 .fw_version_str = qla2x00_fw_version_str, 1277 .intr_handler = qla2300_intr_handler, 1278 .enable_intrs = qla2x00_enable_intrs, 1279 .disable_intrs = qla2x00_disable_intrs, 1280 .abort_command = qla2x00_abort_command, 1281 .target_reset = qla2x00_abort_target, 1282 .lun_reset = qla2x00_lun_reset, 1283 .fabric_login = qla2x00_login_fabric, 1284 .fabric_logout = qla2x00_fabric_logout, 1285 .calc_req_entries = qla2x00_calc_iocbs_32, 1286 .build_iocbs = qla2x00_build_scsi_iocbs_32, 1287 .prep_ms_iocb = qla2x00_prep_ms_iocb, 1288 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 1289 .read_nvram = qla2x00_read_nvram_data, 1290 .write_nvram = qla2x00_write_nvram_data, 1291 .fw_dump = qla2300_fw_dump, 1292 .beacon_on = qla2x00_beacon_on, 1293 .beacon_off = qla2x00_beacon_off, 1294 .beacon_blink = qla2x00_beacon_blink, 1295 .read_optrom = qla2x00_read_optrom_data, 1296 .write_optrom = qla2x00_write_optrom_data, 1297 .get_flash_version = qla2x00_get_flash_version, 1298 }; 1299 1300 static struct isp_operations qla24xx_isp_ops = { 1301 .pci_config = qla24xx_pci_config, 1302 .reset_chip = qla24xx_reset_chip, 1303 .chip_diag = qla24xx_chip_diag, 1304 .config_rings = qla24xx_config_rings, 1305 .reset_adapter = qla24xx_reset_adapter, 1306 .nvram_config = qla24xx_nvram_config, 1307 .update_fw_options = qla24xx_update_fw_options, 1308 .load_risc = qla24xx_load_risc, 1309 .pci_info_str = qla24xx_pci_info_str, 1310 .fw_version_str = qla24xx_fw_version_str, 1311 .intr_handler = qla24xx_intr_handler, 1312 .enable_intrs = qla24xx_enable_intrs, 1313 .disable_intrs = qla24xx_disable_intrs, 1314 .abort_command = qla24xx_abort_command, 1315 .target_reset = qla24xx_abort_target, 1316 .lun_reset = qla24xx_lun_reset, 1317 .fabric_login = qla24xx_login_fabric, 1318 .fabric_logout = qla24xx_fabric_logout, 1319 .calc_req_entries = NULL, 1320 .build_iocbs = NULL, 1321 .prep_ms_iocb = qla24xx_prep_ms_iocb, 1322 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1323 .read_nvram = qla24xx_read_nvram_data, 1324 .write_nvram = qla24xx_write_nvram_data, 1325 .fw_dump = qla24xx_fw_dump, 1326 .beacon_on = qla24xx_beacon_on, 1327 .beacon_off = qla24xx_beacon_off, 1328 .beacon_blink = qla24xx_beacon_blink, 1329 .read_optrom = qla24xx_read_optrom_data, 1330 .write_optrom = qla24xx_write_optrom_data, 1331 .get_flash_version = qla24xx_get_flash_version, 1332 }; 1333 1334 static struct isp_operations qla25xx_isp_ops = { 1335 .pci_config = qla25xx_pci_config, 1336 .reset_chip = qla24xx_reset_chip, 1337 .chip_diag = qla24xx_chip_diag, 1338 .config_rings = qla24xx_config_rings, 1339 .reset_adapter = qla24xx_reset_adapter, 1340 .nvram_config = qla24xx_nvram_config, 1341 .update_fw_options = qla24xx_update_fw_options, 1342 .load_risc = qla24xx_load_risc, 1343 .pci_info_str = qla24xx_pci_info_str, 1344 .fw_version_str = qla24xx_fw_version_str, 1345 .intr_handler = qla24xx_intr_handler, 1346 .enable_intrs = qla24xx_enable_intrs, 1347 .disable_intrs = qla24xx_disable_intrs, 1348 .abort_command = qla24xx_abort_command, 1349 .target_reset = qla24xx_abort_target, 1350 .lun_reset = qla24xx_lun_reset, 1351 .fabric_login = qla24xx_login_fabric, 1352 .fabric_logout = qla24xx_fabric_logout, 1353 .calc_req_entries = NULL, 1354 .build_iocbs = NULL, 1355 .prep_ms_iocb = qla24xx_prep_ms_iocb, 1356 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1357 .read_nvram = qla25xx_read_nvram_data, 1358 .write_nvram = qla25xx_write_nvram_data, 1359 .fw_dump = qla25xx_fw_dump, 1360 .beacon_on = qla24xx_beacon_on, 1361 .beacon_off = qla24xx_beacon_off, 1362 .beacon_blink = qla24xx_beacon_blink, 1363 .read_optrom = qla25xx_read_optrom_data, 1364 .write_optrom = qla24xx_write_optrom_data, 1365 .get_flash_version = qla24xx_get_flash_version, 1366 }; 1367 1368 static inline void 1369 qla2x00_set_isp_flags(scsi_qla_host_t *ha) 1370 { 1371 ha->device_type = DT_EXTENDED_IDS; 1372 switch (ha->pdev->device) { 1373 case PCI_DEVICE_ID_QLOGIC_ISP2100: 1374 ha->device_type |= DT_ISP2100; 1375 ha->device_type &= ~DT_EXTENDED_IDS; 1376 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 1377 break; 1378 case PCI_DEVICE_ID_QLOGIC_ISP2200: 1379 ha->device_type |= DT_ISP2200; 1380 ha->device_type &= ~DT_EXTENDED_IDS; 1381 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 1382 break; 1383 case PCI_DEVICE_ID_QLOGIC_ISP2300: 1384 ha->device_type |= DT_ISP2300; 1385 ha->device_type |= DT_ZIO_SUPPORTED; 1386 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 1387 break; 1388 case PCI_DEVICE_ID_QLOGIC_ISP2312: 1389 ha->device_type |= DT_ISP2312; 1390 ha->device_type |= DT_ZIO_SUPPORTED; 1391 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 1392 break; 1393 case PCI_DEVICE_ID_QLOGIC_ISP2322: 1394 ha->device_type |= DT_ISP2322; 1395 ha->device_type |= DT_ZIO_SUPPORTED; 1396 if (ha->pdev->subsystem_vendor == 0x1028 && 1397 ha->pdev->subsystem_device == 0x0170) 1398 ha->device_type |= DT_OEM_001; 1399 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 1400 break; 1401 case PCI_DEVICE_ID_QLOGIC_ISP6312: 1402 ha->device_type |= DT_ISP6312; 1403 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 1404 break; 1405 case PCI_DEVICE_ID_QLOGIC_ISP6322: 1406 ha->device_type |= DT_ISP6322; 1407 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 1408 break; 1409 case PCI_DEVICE_ID_QLOGIC_ISP2422: 1410 ha->device_type |= DT_ISP2422; 1411 ha->device_type |= DT_ZIO_SUPPORTED; 1412 ha->device_type |= DT_FWI2; 1413 ha->device_type |= DT_IIDMA; 1414 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1415 break; 1416 case PCI_DEVICE_ID_QLOGIC_ISP2432: 1417 ha->device_type |= DT_ISP2432; 1418 ha->device_type |= DT_ZIO_SUPPORTED; 1419 ha->device_type |= DT_FWI2; 1420 ha->device_type |= DT_IIDMA; 1421 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1422 break; 1423 case PCI_DEVICE_ID_QLOGIC_ISP8432: 1424 ha->device_type |= DT_ISP8432; 1425 ha->device_type |= DT_ZIO_SUPPORTED; 1426 ha->device_type |= DT_FWI2; 1427 ha->device_type |= DT_IIDMA; 1428 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1429 break; 1430 case PCI_DEVICE_ID_QLOGIC_ISP5422: 1431 ha->device_type |= DT_ISP5422; 1432 ha->device_type |= DT_FWI2; 1433 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1434 break; 1435 case PCI_DEVICE_ID_QLOGIC_ISP5432: 1436 ha->device_type |= DT_ISP5432; 1437 ha->device_type |= DT_FWI2; 1438 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1439 break; 1440 case PCI_DEVICE_ID_QLOGIC_ISP2532: 1441 ha->device_type |= DT_ISP2532; 1442 ha->device_type |= DT_ZIO_SUPPORTED; 1443 ha->device_type |= DT_FWI2; 1444 ha->device_type |= DT_IIDMA; 1445 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1446 break; 1447 } 1448 } 1449 1450 static int 1451 qla2x00_iospace_config(scsi_qla_host_t *ha) 1452 { 1453 resource_size_t pio; 1454 1455 if (pci_request_selected_regions(ha->pdev, ha->bars, 1456 QLA2XXX_DRIVER_NAME)) { 1457 qla_printk(KERN_WARNING, ha, 1458 "Failed to reserve PIO/MMIO regions (%s)\n", 1459 pci_name(ha->pdev)); 1460 1461 goto iospace_error_exit; 1462 } 1463 if (!(ha->bars & 1)) 1464 goto skip_pio; 1465 1466 /* We only need PIO for Flash operations on ISP2312 v2 chips. */ 1467 pio = pci_resource_start(ha->pdev, 0); 1468 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 1469 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1470 qla_printk(KERN_WARNING, ha, 1471 "Invalid PCI I/O region size (%s)...\n", 1472 pci_name(ha->pdev)); 1473 pio = 0; 1474 } 1475 } else { 1476 qla_printk(KERN_WARNING, ha, 1477 "region #0 not a PIO resource (%s)...\n", 1478 pci_name(ha->pdev)); 1479 pio = 0; 1480 } 1481 ha->pio_address = pio; 1482 1483 skip_pio: 1484 /* Use MMIO operations for all accesses. */ 1485 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 1486 qla_printk(KERN_ERR, ha, 1487 "region #1 not an MMIO resource (%s), aborting\n", 1488 pci_name(ha->pdev)); 1489 goto iospace_error_exit; 1490 } 1491 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 1492 qla_printk(KERN_ERR, ha, 1493 "Invalid PCI mem region size (%s), aborting\n", 1494 pci_name(ha->pdev)); 1495 goto iospace_error_exit; 1496 } 1497 1498 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 1499 if (!ha->iobase) { 1500 qla_printk(KERN_ERR, ha, 1501 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 1502 1503 goto iospace_error_exit; 1504 } 1505 1506 return (0); 1507 1508 iospace_error_exit: 1509 return (-ENOMEM); 1510 } 1511 1512 static void 1513 qla2xxx_scan_start(struct Scsi_Host *shost) 1514 { 1515 scsi_qla_host_t *ha = shost_priv(shost); 1516 1517 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 1518 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 1519 set_bit(RSCN_UPDATE, &ha->dpc_flags); 1520 set_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags); 1521 } 1522 1523 static int 1524 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) 1525 { 1526 scsi_qla_host_t *ha = shost_priv(shost); 1527 1528 if (!ha->host) 1529 return 1; 1530 if (time > ha->loop_reset_delay * HZ) 1531 return 1; 1532 1533 return atomic_read(&ha->loop_state) == LOOP_READY; 1534 } 1535 1536 /* 1537 * PCI driver interface 1538 */ 1539 static int __devinit 1540 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 1541 { 1542 int ret = -ENODEV; 1543 struct Scsi_Host *host; 1544 scsi_qla_host_t *ha; 1545 char pci_info[30]; 1546 char fw_str[30]; 1547 struct scsi_host_template *sht; 1548 int bars, mem_only = 0; 1549 1550 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 1551 sht = &qla2x00_driver_template; 1552 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || 1553 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || 1554 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || 1555 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 1556 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || 1557 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532) { 1558 bars = pci_select_bars(pdev, IORESOURCE_MEM); 1559 sht = &qla24xx_driver_template; 1560 mem_only = 1; 1561 } 1562 1563 if (mem_only) { 1564 if (pci_enable_device_mem(pdev)) 1565 goto probe_out; 1566 } else { 1567 if (pci_enable_device(pdev)) 1568 goto probe_out; 1569 } 1570 1571 if (pci_find_aer_capability(pdev)) 1572 if (pci_enable_pcie_error_reporting(pdev)) 1573 goto probe_out; 1574 1575 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 1576 if (host == NULL) { 1577 printk(KERN_WARNING 1578 "qla2xxx: Couldn't allocate host from scsi layer!\n"); 1579 goto probe_disable_device; 1580 } 1581 1582 /* Clear our data area */ 1583 ha = shost_priv(host); 1584 memset(ha, 0, sizeof(scsi_qla_host_t)); 1585 1586 ha->pdev = pdev; 1587 ha->host = host; 1588 ha->host_no = host->host_no; 1589 sprintf(ha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, ha->host_no); 1590 ha->parent = NULL; 1591 ha->bars = bars; 1592 ha->mem_only = mem_only; 1593 spin_lock_init(&ha->hardware_lock); 1594 1595 /* Set ISP-type information. */ 1596 qla2x00_set_isp_flags(ha); 1597 1598 /* Configure PCI I/O space */ 1599 ret = qla2x00_iospace_config(ha); 1600 if (ret) 1601 goto probe_failed; 1602 1603 qla_printk(KERN_INFO, ha, 1604 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, 1605 ha->iobase); 1606 1607 ha->prev_topology = 0; 1608 ha->init_cb_size = sizeof(init_cb_t); 1609 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx; 1610 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1611 ha->optrom_size = OPTROM_SIZE_2300; 1612 1613 ha->max_q_depth = MAX_Q_DEPTH; 1614 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) 1615 ha->max_q_depth = ql2xmaxqdepth; 1616 1617 /* Assign ISP specific operations. */ 1618 if (IS_QLA2100(ha)) { 1619 host->max_id = MAX_TARGETS_2100; 1620 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 1621 ha->request_q_length = REQUEST_ENTRY_CNT_2100; 1622 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1623 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1624 host->sg_tablesize = 32; 1625 ha->gid_list_info_size = 4; 1626 ha->isp_ops = &qla2100_isp_ops; 1627 } else if (IS_QLA2200(ha)) { 1628 host->max_id = MAX_TARGETS_2200; 1629 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1630 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1631 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1632 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1633 ha->gid_list_info_size = 4; 1634 ha->isp_ops = &qla2100_isp_ops; 1635 } else if (IS_QLA23XX(ha)) { 1636 host->max_id = MAX_TARGETS_2200; 1637 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1638 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1639 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1640 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1641 ha->gid_list_info_size = 6; 1642 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1643 ha->optrom_size = OPTROM_SIZE_2322; 1644 ha->isp_ops = &qla2300_isp_ops; 1645 } else if (IS_QLA24XX_TYPE(ha)) { 1646 host->max_id = MAX_TARGETS_2200; 1647 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1648 ha->request_q_length = REQUEST_ENTRY_CNT_24XX; 1649 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1650 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1651 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1652 ha->mgmt_svr_loop_id = 10 + ha->vp_idx; 1653 ha->gid_list_info_size = 8; 1654 ha->optrom_size = OPTROM_SIZE_24XX; 1655 ha->isp_ops = &qla24xx_isp_ops; 1656 } else if (IS_QLA25XX(ha)) { 1657 host->max_id = MAX_TARGETS_2200; 1658 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1659 ha->request_q_length = REQUEST_ENTRY_CNT_24XX; 1660 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1661 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1662 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1663 ha->mgmt_svr_loop_id = 10 + ha->vp_idx; 1664 ha->gid_list_info_size = 8; 1665 ha->optrom_size = OPTROM_SIZE_25XX; 1666 ha->isp_ops = &qla25xx_isp_ops; 1667 } 1668 host->can_queue = ha->request_q_length + 128; 1669 1670 mutex_init(&ha->vport_lock); 1671 init_completion(&ha->mbx_cmd_comp); 1672 complete(&ha->mbx_cmd_comp); 1673 init_completion(&ha->mbx_intr_comp); 1674 1675 INIT_LIST_HEAD(&ha->list); 1676 INIT_LIST_HEAD(&ha->fcports); 1677 INIT_LIST_HEAD(&ha->vp_list); 1678 INIT_LIST_HEAD(&ha->work_list); 1679 1680 set_bit(0, (unsigned long *) ha->vp_idx_map); 1681 1682 qla2x00_config_dma_addressing(ha); 1683 if (qla2x00_mem_alloc(ha)) { 1684 qla_printk(KERN_WARNING, ha, 1685 "[ERROR] Failed to allocate memory for adapter\n"); 1686 1687 ret = -ENOMEM; 1688 goto probe_failed; 1689 } 1690 1691 if (qla2x00_initialize_adapter(ha)) { 1692 qla_printk(KERN_WARNING, ha, 1693 "Failed to initialize adapter\n"); 1694 1695 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - " 1696 "Adapter flags %x.\n", 1697 ha->host_no, ha->device_flags)); 1698 1699 ret = -ENODEV; 1700 goto probe_failed; 1701 } 1702 1703 /* 1704 * Startup the kernel thread for this host adapter 1705 */ 1706 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 1707 "%s_dpc", ha->host_str); 1708 if (IS_ERR(ha->dpc_thread)) { 1709 qla_printk(KERN_WARNING, ha, 1710 "Unable to start DPC thread!\n"); 1711 ret = PTR_ERR(ha->dpc_thread); 1712 goto probe_failed; 1713 } 1714 1715 host->this_id = 255; 1716 host->cmd_per_lun = 3; 1717 host->unique_id = host->host_no; 1718 host->max_cmd_len = MAX_CMDSZ; 1719 host->max_channel = MAX_BUSES - 1; 1720 host->max_lun = MAX_LUNS; 1721 host->transportt = qla2xxx_transport_template; 1722 1723 ret = qla2x00_request_irqs(ha); 1724 if (ret) 1725 goto probe_failed; 1726 1727 /* Initialized the timer */ 1728 qla2x00_start_timer(ha, qla2x00_timer, WATCH_INTERVAL); 1729 1730 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 1731 ha->host_no, ha)); 1732 1733 pci_set_drvdata(pdev, ha); 1734 1735 ha->flags.init_done = 1; 1736 ha->flags.online = 1; 1737 1738 ret = scsi_add_host(host, &pdev->dev); 1739 if (ret) 1740 goto probe_failed; 1741 1742 ha->isp_ops->enable_intrs(ha); 1743 1744 scsi_scan_host(host); 1745 1746 qla2x00_alloc_sysfs_attr(ha); 1747 1748 qla2x00_init_host_attr(ha); 1749 1750 qla2x00_dfs_setup(ha); 1751 1752 qla_printk(KERN_INFO, ha, "\n" 1753 " QLogic Fibre Channel HBA Driver: %s\n" 1754 " QLogic %s - %s\n" 1755 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n", 1756 qla2x00_version_str, ha->model_number, 1757 ha->model_desc ? ha->model_desc: "", pdev->device, 1758 ha->isp_ops->pci_info_str(ha, pci_info), pci_name(pdev), 1759 ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no, 1760 ha->isp_ops->fw_version_str(ha, fw_str)); 1761 1762 return 0; 1763 1764 probe_failed: 1765 qla2x00_free_device(ha); 1766 1767 scsi_host_put(host); 1768 1769 probe_disable_device: 1770 pci_disable_device(pdev); 1771 1772 probe_out: 1773 return ret; 1774 } 1775 1776 static void 1777 qla2x00_remove_one(struct pci_dev *pdev) 1778 { 1779 scsi_qla_host_t *ha, *vha, *temp; 1780 1781 ha = pci_get_drvdata(pdev); 1782 1783 list_for_each_entry_safe(vha, temp, &ha->vp_list, vp_list) 1784 fc_vport_terminate(vha->fc_vport); 1785 1786 set_bit(UNLOADING, &ha->dpc_flags); 1787 1788 qla2x00_dfs_remove(ha); 1789 1790 qla84xx_put_chip(ha); 1791 1792 qla2x00_free_sysfs_attr(ha); 1793 1794 fc_remove_host(ha->host); 1795 1796 scsi_remove_host(ha->host); 1797 1798 qla2x00_free_device(ha); 1799 1800 scsi_host_put(ha->host); 1801 1802 pci_disable_device(pdev); 1803 pci_set_drvdata(pdev, NULL); 1804 } 1805 1806 static void 1807 qla2x00_free_device(scsi_qla_host_t *ha) 1808 { 1809 qla2x00_abort_all_cmds(ha, DID_NO_CONNECT << 16); 1810 1811 /* Disable timer */ 1812 if (ha->timer_active) 1813 qla2x00_stop_timer(ha); 1814 1815 ha->flags.online = 0; 1816 1817 /* Kill the kernel thread for this host */ 1818 if (ha->dpc_thread) { 1819 struct task_struct *t = ha->dpc_thread; 1820 1821 /* 1822 * qla2xxx_wake_dpc checks for ->dpc_thread 1823 * so we need to zero it out. 1824 */ 1825 ha->dpc_thread = NULL; 1826 kthread_stop(t); 1827 } 1828 1829 if (ha->flags.fce_enabled) 1830 qla2x00_disable_fce_trace(ha, NULL, NULL); 1831 1832 if (ha->eft) 1833 qla2x00_disable_eft_trace(ha); 1834 1835 /* Stop currently executing firmware. */ 1836 qla2x00_try_to_stop_firmware(ha); 1837 1838 /* turn-off interrupts on the card */ 1839 if (ha->interrupts_on) 1840 ha->isp_ops->disable_intrs(ha); 1841 1842 qla2x00_mem_free(ha); 1843 1844 qla2x00_free_irqs(ha); 1845 1846 /* release io space registers */ 1847 if (ha->iobase) 1848 iounmap(ha->iobase); 1849 pci_release_selected_regions(ha->pdev, ha->bars); 1850 } 1851 1852 static inline void 1853 qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport, 1854 int defer) 1855 { 1856 struct fc_rport *rport; 1857 scsi_qla_host_t *pha = to_qla_parent(ha); 1858 1859 if (!fcport->rport) 1860 return; 1861 1862 rport = fcport->rport; 1863 if (defer) { 1864 spin_lock_irq(ha->host->host_lock); 1865 fcport->drport = rport; 1866 spin_unlock_irq(ha->host->host_lock); 1867 set_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags); 1868 qla2xxx_wake_dpc(pha); 1869 } else 1870 fc_remote_port_delete(rport); 1871 } 1872 1873 /* 1874 * qla2x00_mark_device_lost Updates fcport state when device goes offline. 1875 * 1876 * Input: ha = adapter block pointer. fcport = port structure pointer. 1877 * 1878 * Return: None. 1879 * 1880 * Context: 1881 */ 1882 void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport, 1883 int do_login, int defer) 1884 { 1885 if (atomic_read(&fcport->state) == FCS_ONLINE && 1886 ha->vp_idx == fcport->vp_idx) 1887 qla2x00_schedule_rport_del(ha, fcport, defer); 1888 1889 /* 1890 * We may need to retry the login, so don't change the state of the 1891 * port but do the retries. 1892 */ 1893 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) 1894 atomic_set(&fcport->state, FCS_DEVICE_LOST); 1895 1896 if (!do_login) 1897 return; 1898 1899 if (fcport->login_retry == 0) { 1900 fcport->login_retry = ha->login_retry_count; 1901 set_bit(RELOGIN_NEEDED, &ha->dpc_flags); 1902 1903 DEBUG(printk("scsi(%ld): Port login retry: " 1904 "%02x%02x%02x%02x%02x%02x%02x%02x, " 1905 "id = 0x%04x retry cnt=%d\n", 1906 ha->host_no, 1907 fcport->port_name[0], 1908 fcport->port_name[1], 1909 fcport->port_name[2], 1910 fcport->port_name[3], 1911 fcport->port_name[4], 1912 fcport->port_name[5], 1913 fcport->port_name[6], 1914 fcport->port_name[7], 1915 fcport->loop_id, 1916 fcport->login_retry)); 1917 } 1918 } 1919 1920 /* 1921 * qla2x00_mark_all_devices_lost 1922 * Updates fcport state when device goes offline. 1923 * 1924 * Input: 1925 * ha = adapter block pointer. 1926 * fcport = port structure pointer. 1927 * 1928 * Return: 1929 * None. 1930 * 1931 * Context: 1932 */ 1933 void 1934 qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer) 1935 { 1936 fc_port_t *fcport; 1937 scsi_qla_host_t *pha = to_qla_parent(ha); 1938 1939 list_for_each_entry(fcport, &pha->fcports, list) { 1940 if (ha->vp_idx != fcport->vp_idx) 1941 continue; 1942 /* 1943 * No point in marking the device as lost, if the device is 1944 * already DEAD. 1945 */ 1946 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 1947 continue; 1948 if (atomic_read(&fcport->state) == FCS_ONLINE) 1949 qla2x00_schedule_rport_del(ha, fcport, defer); 1950 atomic_set(&fcport->state, FCS_DEVICE_LOST); 1951 } 1952 } 1953 1954 /* 1955 * qla2x00_mem_alloc 1956 * Allocates adapter memory. 1957 * 1958 * Returns: 1959 * 0 = success. 1960 * !0 = failure. 1961 */ 1962 static int 1963 qla2x00_mem_alloc(scsi_qla_host_t *ha) 1964 { 1965 char name[16]; 1966 1967 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev, 1968 (ha->request_q_length + 1) * sizeof(request_t), &ha->request_dma, 1969 GFP_KERNEL); 1970 if (!ha->request_ring) 1971 goto fail; 1972 1973 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev, 1974 (ha->response_q_length + 1) * sizeof(response_t), 1975 &ha->response_dma, GFP_KERNEL); 1976 if (!ha->response_ring) 1977 goto fail_free_request_ring; 1978 1979 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE, 1980 &ha->gid_list_dma, GFP_KERNEL); 1981 if (!ha->gid_list) 1982 goto fail_free_response_ring; 1983 1984 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 1985 &ha->init_cb_dma, GFP_KERNEL); 1986 if (!ha->init_cb) 1987 goto fail_free_gid_list; 1988 1989 snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME, 1990 ha->host_no); 1991 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 1992 DMA_POOL_SIZE, 8, 0); 1993 if (!ha->s_dma_pool) 1994 goto fail_free_init_cb; 1995 1996 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 1997 if (!ha->srb_mempool) 1998 goto fail_free_s_dma_pool; 1999 2000 /* Get memory for cached NVRAM */ 2001 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 2002 if (!ha->nvram) 2003 goto fail_free_srb_mempool; 2004 2005 /* Allocate memory for SNS commands */ 2006 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 2007 /* Get consistent memory allocated for SNS commands */ 2008 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 2009 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 2010 if (!ha->sns_cmd) 2011 goto fail_free_nvram; 2012 } else { 2013 /* Get consistent memory allocated for MS IOCB */ 2014 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 2015 &ha->ms_iocb_dma); 2016 if (!ha->ms_iocb) 2017 goto fail_free_nvram; 2018 2019 /* Get consistent memory allocated for CT SNS commands */ 2020 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 2021 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 2022 if (!ha->ct_sns) 2023 goto fail_free_ms_iocb; 2024 } 2025 2026 return 0; 2027 2028 fail_free_ms_iocb: 2029 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 2030 ha->ms_iocb = NULL; 2031 ha->ms_iocb_dma = 0; 2032 fail_free_nvram: 2033 kfree(ha->nvram); 2034 ha->nvram = NULL; 2035 fail_free_srb_mempool: 2036 mempool_destroy(ha->srb_mempool); 2037 ha->srb_mempool = NULL; 2038 fail_free_s_dma_pool: 2039 dma_pool_destroy(ha->s_dma_pool); 2040 ha->s_dma_pool = NULL; 2041 fail_free_init_cb: 2042 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, 2043 ha->init_cb_dma); 2044 ha->init_cb = NULL; 2045 ha->init_cb_dma = 0; 2046 fail_free_gid_list: 2047 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2048 ha->gid_list_dma); 2049 ha->gid_list = NULL; 2050 ha->gid_list_dma = 0; 2051 fail_free_response_ring: 2052 dma_free_coherent(&ha->pdev->dev, (ha->response_q_length + 1) * 2053 sizeof(response_t), ha->response_ring, ha->response_dma); 2054 ha->response_ring = NULL; 2055 ha->response_dma = 0; 2056 fail_free_request_ring: 2057 dma_free_coherent(&ha->pdev->dev, (ha->request_q_length + 1) * 2058 sizeof(request_t), ha->request_ring, ha->request_dma); 2059 ha->request_ring = NULL; 2060 ha->request_dma = 0; 2061 fail: 2062 return -ENOMEM; 2063 } 2064 2065 /* 2066 * qla2x00_mem_free 2067 * Frees all adapter allocated memory. 2068 * 2069 * Input: 2070 * ha = adapter block pointer. 2071 */ 2072 static void 2073 qla2x00_mem_free(scsi_qla_host_t *ha) 2074 { 2075 struct list_head *fcpl, *fcptemp; 2076 fc_port_t *fcport; 2077 2078 if (ha->srb_mempool) 2079 mempool_destroy(ha->srb_mempool); 2080 2081 if (ha->fce) 2082 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 2083 ha->fce_dma); 2084 2085 if (ha->fw_dump) { 2086 if (ha->eft) 2087 dma_free_coherent(&ha->pdev->dev, 2088 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); 2089 vfree(ha->fw_dump); 2090 } 2091 2092 if (ha->sns_cmd) 2093 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 2094 ha->sns_cmd, ha->sns_cmd_dma); 2095 2096 if (ha->ct_sns) 2097 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 2098 ha->ct_sns, ha->ct_sns_dma); 2099 2100 if (ha->sfp_data) 2101 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); 2102 2103 if (ha->ms_iocb) 2104 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 2105 2106 if (ha->s_dma_pool) 2107 dma_pool_destroy(ha->s_dma_pool); 2108 2109 if (ha->init_cb) 2110 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 2111 ha->init_cb, ha->init_cb_dma); 2112 2113 if (ha->gid_list) 2114 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2115 ha->gid_list_dma); 2116 2117 if (ha->response_ring) 2118 dma_free_coherent(&ha->pdev->dev, 2119 (ha->response_q_length + 1) * sizeof(response_t), 2120 ha->response_ring, ha->response_dma); 2121 2122 if (ha->request_ring) 2123 dma_free_coherent(&ha->pdev->dev, 2124 (ha->request_q_length + 1) * sizeof(request_t), 2125 ha->request_ring, ha->request_dma); 2126 2127 ha->srb_mempool = NULL; 2128 ha->eft = NULL; 2129 ha->eft_dma = 0; 2130 ha->sns_cmd = NULL; 2131 ha->sns_cmd_dma = 0; 2132 ha->ct_sns = NULL; 2133 ha->ct_sns_dma = 0; 2134 ha->ms_iocb = NULL; 2135 ha->ms_iocb_dma = 0; 2136 ha->init_cb = NULL; 2137 ha->init_cb_dma = 0; 2138 2139 ha->s_dma_pool = NULL; 2140 2141 ha->gid_list = NULL; 2142 ha->gid_list_dma = 0; 2143 2144 ha->response_ring = NULL; 2145 ha->response_dma = 0; 2146 ha->request_ring = NULL; 2147 ha->request_dma = 0; 2148 2149 list_for_each_safe(fcpl, fcptemp, &ha->fcports) { 2150 fcport = list_entry(fcpl, fc_port_t, list); 2151 2152 /* fc ports */ 2153 list_del_init(&fcport->list); 2154 kfree(fcport); 2155 } 2156 INIT_LIST_HEAD(&ha->fcports); 2157 2158 ha->fw_dump = NULL; 2159 ha->fw_dumped = 0; 2160 ha->fw_dump_reading = 0; 2161 2162 vfree(ha->optrom_buffer); 2163 kfree(ha->nvram); 2164 } 2165 2166 static struct qla_work_evt * 2167 qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type, 2168 int locked) 2169 { 2170 struct qla_work_evt *e; 2171 2172 e = kzalloc(sizeof(struct qla_work_evt), locked ? GFP_ATOMIC: 2173 GFP_KERNEL); 2174 if (!e) 2175 return NULL; 2176 2177 INIT_LIST_HEAD(&e->list); 2178 e->type = type; 2179 e->flags = QLA_EVT_FLAG_FREE; 2180 return e; 2181 } 2182 2183 static int 2184 qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked) 2185 { 2186 unsigned long uninitialized_var(flags); 2187 scsi_qla_host_t *pha = to_qla_parent(ha); 2188 2189 if (!locked) 2190 spin_lock_irqsave(&pha->hardware_lock, flags); 2191 list_add_tail(&e->list, &ha->work_list); 2192 qla2xxx_wake_dpc(ha); 2193 if (!locked) 2194 spin_unlock_irqrestore(&pha->hardware_lock, flags); 2195 return QLA_SUCCESS; 2196 } 2197 2198 int 2199 qla2x00_post_aen_work(struct scsi_qla_host *ha, enum fc_host_event_code code, 2200 u32 data) 2201 { 2202 struct qla_work_evt *e; 2203 2204 e = qla2x00_alloc_work(ha, QLA_EVT_AEN, 1); 2205 if (!e) 2206 return QLA_FUNCTION_FAILED; 2207 2208 e->u.aen.code = code; 2209 e->u.aen.data = data; 2210 return qla2x00_post_work(ha, e, 1); 2211 } 2212 2213 int 2214 qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1, 2215 uint16_t d2, uint16_t d3) 2216 { 2217 struct qla_work_evt *e; 2218 2219 e = qla2x00_alloc_work(ha, QLA_EVT_HWE_LOG, 1); 2220 if (!e) 2221 return QLA_FUNCTION_FAILED; 2222 2223 e->u.hwe.code = code; 2224 e->u.hwe.d1 = d1; 2225 e->u.hwe.d2 = d2; 2226 e->u.hwe.d3 = d3; 2227 return qla2x00_post_work(ha, e, 1); 2228 } 2229 2230 static void 2231 qla2x00_do_work(struct scsi_qla_host *ha) 2232 { 2233 struct qla_work_evt *e; 2234 scsi_qla_host_t *pha = to_qla_parent(ha); 2235 2236 spin_lock_irq(&pha->hardware_lock); 2237 while (!list_empty(&ha->work_list)) { 2238 e = list_entry(ha->work_list.next, struct qla_work_evt, list); 2239 list_del_init(&e->list); 2240 spin_unlock_irq(&pha->hardware_lock); 2241 2242 switch (e->type) { 2243 case QLA_EVT_AEN: 2244 fc_host_post_event(ha->host, fc_get_event_number(), 2245 e->u.aen.code, e->u.aen.data); 2246 break; 2247 case QLA_EVT_HWE_LOG: 2248 qla2xxx_hw_event_log(ha, e->u.hwe.code, e->u.hwe.d1, 2249 e->u.hwe.d2, e->u.hwe.d3); 2250 break; 2251 } 2252 if (e->flags & QLA_EVT_FLAG_FREE) 2253 kfree(e); 2254 spin_lock_irq(&pha->hardware_lock); 2255 } 2256 spin_unlock_irq(&pha->hardware_lock); 2257 } 2258 2259 /************************************************************************** 2260 * qla2x00_do_dpc 2261 * This kernel thread is a task that is schedule by the interrupt handler 2262 * to perform the background processing for interrupts. 2263 * 2264 * Notes: 2265 * This task always run in the context of a kernel thread. It 2266 * is kick-off by the driver's detect code and starts up 2267 * up one per adapter. It immediately goes to sleep and waits for 2268 * some fibre event. When either the interrupt handler or 2269 * the timer routine detects a event it will one of the task 2270 * bits then wake us up. 2271 **************************************************************************/ 2272 static int 2273 qla2x00_do_dpc(void *data) 2274 { 2275 int rval; 2276 scsi_qla_host_t *ha; 2277 fc_port_t *fcport; 2278 uint8_t status; 2279 uint16_t next_loopid; 2280 struct scsi_qla_host *vha; 2281 int i; 2282 2283 2284 ha = (scsi_qla_host_t *)data; 2285 2286 set_user_nice(current, -20); 2287 2288 while (!kthread_should_stop()) { 2289 DEBUG3(printk("qla2x00: DPC handler sleeping\n")); 2290 2291 set_current_state(TASK_INTERRUPTIBLE); 2292 schedule(); 2293 __set_current_state(TASK_RUNNING); 2294 2295 DEBUG3(printk("qla2x00: DPC handler waking up\n")); 2296 2297 /* Initialization not yet finished. Don't do anything yet. */ 2298 if (!ha->flags.init_done) 2299 continue; 2300 2301 DEBUG3(printk("scsi(%ld): DPC handler\n", ha->host_no)); 2302 2303 ha->dpc_active = 1; 2304 2305 if (ha->flags.mbox_busy) { 2306 ha->dpc_active = 0; 2307 continue; 2308 } 2309 2310 qla2x00_do_work(ha); 2311 2312 if (test_and_clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) { 2313 2314 DEBUG(printk("scsi(%ld): dpc: sched " 2315 "qla2x00_abort_isp ha = %p\n", 2316 ha->host_no, ha)); 2317 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 2318 &ha->dpc_flags))) { 2319 2320 if (qla2x00_abort_isp(ha)) { 2321 /* failed. retry later */ 2322 set_bit(ISP_ABORT_NEEDED, 2323 &ha->dpc_flags); 2324 } 2325 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 2326 } 2327 2328 for_each_mapped_vp_idx(ha, i) { 2329 list_for_each_entry(vha, &ha->vp_list, 2330 vp_list) { 2331 if (i == vha->vp_idx) { 2332 set_bit(ISP_ABORT_NEEDED, 2333 &vha->dpc_flags); 2334 break; 2335 } 2336 } 2337 } 2338 2339 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", 2340 ha->host_no)); 2341 } 2342 2343 if (test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) { 2344 qla2x00_update_fcports(ha); 2345 clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 2346 } 2347 2348 if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) && 2349 (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) { 2350 2351 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n", 2352 ha->host_no)); 2353 2354 qla2x00_rst_aen(ha); 2355 clear_bit(RESET_ACTIVE, &ha->dpc_flags); 2356 } 2357 2358 /* Retry each device up to login retry count */ 2359 if ((test_and_clear_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && 2360 !test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) && 2361 atomic_read(&ha->loop_state) != LOOP_DOWN) { 2362 2363 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 2364 ha->host_no)); 2365 2366 next_loopid = 0; 2367 list_for_each_entry(fcport, &ha->fcports, list) { 2368 /* 2369 * If the port is not ONLINE then try to login 2370 * to it if we haven't run out of retries. 2371 */ 2372 if (atomic_read(&fcport->state) != FCS_ONLINE && 2373 fcport->login_retry) { 2374 2375 if (fcport->flags & FCF_FABRIC_DEVICE) { 2376 if (fcport->flags & 2377 FCF_TAPE_PRESENT) 2378 ha->isp_ops->fabric_logout( 2379 ha, fcport->loop_id, 2380 fcport->d_id.b.domain, 2381 fcport->d_id.b.area, 2382 fcport->d_id.b.al_pa); 2383 status = qla2x00_fabric_login( 2384 ha, fcport, &next_loopid); 2385 } else 2386 status = 2387 qla2x00_local_device_login( 2388 ha, fcport); 2389 2390 fcport->login_retry--; 2391 if (status == QLA_SUCCESS) { 2392 fcport->old_loop_id = fcport->loop_id; 2393 2394 DEBUG(printk("scsi(%ld): port login OK: logged in ID 0x%x\n", 2395 ha->host_no, fcport->loop_id)); 2396 2397 qla2x00_update_fcport(ha, 2398 fcport); 2399 } else if (status == 1) { 2400 set_bit(RELOGIN_NEEDED, &ha->dpc_flags); 2401 /* retry the login again */ 2402 DEBUG(printk("scsi(%ld): Retrying %d login again loop_id 0x%x\n", 2403 ha->host_no, 2404 fcport->login_retry, fcport->loop_id)); 2405 } else { 2406 fcport->login_retry = 0; 2407 } 2408 if (fcport->login_retry == 0 && status != QLA_SUCCESS) 2409 fcport->loop_id = FC_NO_LOOP_ID; 2410 } 2411 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) 2412 break; 2413 } 2414 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 2415 ha->host_no)); 2416 } 2417 2418 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2419 2420 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n", 2421 ha->host_no)); 2422 2423 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 2424 &ha->dpc_flags))) { 2425 2426 rval = qla2x00_loop_resync(ha); 2427 2428 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); 2429 } 2430 2431 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n", 2432 ha->host_no)); 2433 } 2434 2435 if (test_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags) && 2436 atomic_read(&ha->loop_state) == LOOP_READY) { 2437 clear_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags); 2438 qla2xxx_flash_npiv_conf(ha); 2439 } 2440 2441 if (!ha->interrupts_on) 2442 ha->isp_ops->enable_intrs(ha); 2443 2444 if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags)) 2445 ha->isp_ops->beacon_blink(ha); 2446 2447 qla2x00_do_dpc_all_vps(ha); 2448 2449 ha->dpc_active = 0; 2450 } /* End of while(1) */ 2451 2452 DEBUG(printk("scsi(%ld): DPC handler exiting\n", ha->host_no)); 2453 2454 /* 2455 * Make sure that nobody tries to wake us up again. 2456 */ 2457 ha->dpc_active = 0; 2458 2459 return 0; 2460 } 2461 2462 void 2463 qla2xxx_wake_dpc(scsi_qla_host_t *ha) 2464 { 2465 struct task_struct *t = ha->dpc_thread; 2466 2467 if (!test_bit(UNLOADING, &ha->dpc_flags) && t) 2468 wake_up_process(t); 2469 } 2470 2471 /* 2472 * qla2x00_rst_aen 2473 * Processes asynchronous reset. 2474 * 2475 * Input: 2476 * ha = adapter block pointer. 2477 */ 2478 static void 2479 qla2x00_rst_aen(scsi_qla_host_t *ha) 2480 { 2481 if (ha->flags.online && !ha->flags.reset_active && 2482 !atomic_read(&ha->loop_down_timer) && 2483 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { 2484 do { 2485 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 2486 2487 /* 2488 * Issue marker command only when we are going to start 2489 * the I/O. 2490 */ 2491 ha->marker_needed = 1; 2492 } while (!atomic_read(&ha->loop_down_timer) && 2493 (test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags))); 2494 } 2495 } 2496 2497 static void 2498 qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp) 2499 { 2500 struct scsi_cmnd *cmd = sp->cmd; 2501 2502 if (sp->flags & SRB_DMA_VALID) { 2503 scsi_dma_unmap(cmd); 2504 sp->flags &= ~SRB_DMA_VALID; 2505 } 2506 CMD_SP(cmd) = NULL; 2507 } 2508 2509 void 2510 qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp) 2511 { 2512 struct scsi_cmnd *cmd = sp->cmd; 2513 2514 qla2x00_sp_free_dma(ha, sp); 2515 2516 mempool_free(sp, ha->srb_mempool); 2517 2518 cmd->scsi_done(cmd); 2519 } 2520 2521 /************************************************************************** 2522 * qla2x00_timer 2523 * 2524 * Description: 2525 * One second timer 2526 * 2527 * Context: Interrupt 2528 ***************************************************************************/ 2529 void 2530 qla2x00_timer(scsi_qla_host_t *ha) 2531 { 2532 unsigned long cpu_flags = 0; 2533 fc_port_t *fcport; 2534 int start_dpc = 0; 2535 int index; 2536 srb_t *sp; 2537 int t; 2538 scsi_qla_host_t *pha = to_qla_parent(ha); 2539 2540 /* 2541 * Ports - Port down timer. 2542 * 2543 * Whenever, a port is in the LOST state we start decrementing its port 2544 * down timer every second until it reaches zero. Once it reaches zero 2545 * the port it marked DEAD. 2546 */ 2547 t = 0; 2548 list_for_each_entry(fcport, &ha->fcports, list) { 2549 if (fcport->port_type != FCT_TARGET) 2550 continue; 2551 2552 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 2553 2554 if (atomic_read(&fcport->port_down_timer) == 0) 2555 continue; 2556 2557 if (atomic_dec_and_test(&fcport->port_down_timer) != 0) 2558 atomic_set(&fcport->state, FCS_DEVICE_DEAD); 2559 2560 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: " 2561 "%d remaining\n", 2562 ha->host_no, 2563 t, atomic_read(&fcport->port_down_timer))); 2564 } 2565 t++; 2566 } /* End of for fcport */ 2567 2568 2569 /* Loop down handler. */ 2570 if (atomic_read(&ha->loop_down_timer) > 0 && 2571 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) && ha->flags.online) { 2572 2573 if (atomic_read(&ha->loop_down_timer) == 2574 ha->loop_down_abort_time) { 2575 2576 DEBUG(printk("scsi(%ld): Loop Down - aborting the " 2577 "queues before time expire\n", 2578 ha->host_no)); 2579 2580 if (!IS_QLA2100(ha) && ha->link_down_timeout) 2581 atomic_set(&ha->loop_state, LOOP_DEAD); 2582 2583 /* Schedule an ISP abort to return any tape commands. */ 2584 /* NPIV - scan physical port only */ 2585 if (!ha->parent) { 2586 spin_lock_irqsave(&ha->hardware_lock, 2587 cpu_flags); 2588 for (index = 1; 2589 index < MAX_OUTSTANDING_COMMANDS; 2590 index++) { 2591 fc_port_t *sfcp; 2592 2593 sp = ha->outstanding_cmds[index]; 2594 if (!sp) 2595 continue; 2596 sfcp = sp->fcport; 2597 if (!(sfcp->flags & FCF_TAPE_PRESENT)) 2598 continue; 2599 2600 set_bit(ISP_ABORT_NEEDED, 2601 &ha->dpc_flags); 2602 break; 2603 } 2604 spin_unlock_irqrestore(&ha->hardware_lock, 2605 cpu_flags); 2606 } 2607 set_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags); 2608 start_dpc++; 2609 } 2610 2611 /* if the loop has been down for 4 minutes, reinit adapter */ 2612 if (atomic_dec_and_test(&ha->loop_down_timer) != 0) { 2613 DEBUG(printk("scsi(%ld): Loop down exceed 4 mins - " 2614 "restarting queues.\n", 2615 ha->host_no)); 2616 2617 set_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags); 2618 start_dpc++; 2619 2620 if (!(ha->device_flags & DFLG_NO_CABLE) && 2621 !ha->parent) { 2622 DEBUG(printk("scsi(%ld): Loop down - " 2623 "aborting ISP.\n", 2624 ha->host_no)); 2625 qla_printk(KERN_WARNING, ha, 2626 "Loop down - aborting ISP.\n"); 2627 2628 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 2629 } 2630 } 2631 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", 2632 ha->host_no, 2633 atomic_read(&ha->loop_down_timer))); 2634 } 2635 2636 /* Check if beacon LED needs to be blinked */ 2637 if (ha->beacon_blink_led == 1) { 2638 set_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags); 2639 start_dpc++; 2640 } 2641 2642 /* Process any deferred work. */ 2643 if (!list_empty(&ha->work_list)) 2644 start_dpc++; 2645 2646 /* Schedule the DPC routine if needed */ 2647 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2648 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || 2649 test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags) || 2650 start_dpc || 2651 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || 2652 test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) || 2653 test_bit(VP_DPC_NEEDED, &ha->dpc_flags) || 2654 test_bit(RELOGIN_NEEDED, &ha->dpc_flags))) 2655 qla2xxx_wake_dpc(pha); 2656 2657 qla2x00_restart_timer(ha, WATCH_INTERVAL); 2658 } 2659 2660 /* Firmware interface routines. */ 2661 2662 #define FW_BLOBS 6 2663 #define FW_ISP21XX 0 2664 #define FW_ISP22XX 1 2665 #define FW_ISP2300 2 2666 #define FW_ISP2322 3 2667 #define FW_ISP24XX 4 2668 #define FW_ISP25XX 5 2669 2670 #define FW_FILE_ISP21XX "ql2100_fw.bin" 2671 #define FW_FILE_ISP22XX "ql2200_fw.bin" 2672 #define FW_FILE_ISP2300 "ql2300_fw.bin" 2673 #define FW_FILE_ISP2322 "ql2322_fw.bin" 2674 #define FW_FILE_ISP24XX "ql2400_fw.bin" 2675 #define FW_FILE_ISP25XX "ql2500_fw.bin" 2676 2677 static DEFINE_MUTEX(qla_fw_lock); 2678 2679 static struct fw_blob qla_fw_blobs[FW_BLOBS] = { 2680 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, 2681 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, 2682 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, 2683 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 2684 { .name = FW_FILE_ISP24XX, }, 2685 { .name = FW_FILE_ISP25XX, }, 2686 }; 2687 2688 struct fw_blob * 2689 qla2x00_request_firmware(scsi_qla_host_t *ha) 2690 { 2691 struct fw_blob *blob; 2692 2693 blob = NULL; 2694 if (IS_QLA2100(ha)) { 2695 blob = &qla_fw_blobs[FW_ISP21XX]; 2696 } else if (IS_QLA2200(ha)) { 2697 blob = &qla_fw_blobs[FW_ISP22XX]; 2698 } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 2699 blob = &qla_fw_blobs[FW_ISP2300]; 2700 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 2701 blob = &qla_fw_blobs[FW_ISP2322]; 2702 } else if (IS_QLA24XX_TYPE(ha)) { 2703 blob = &qla_fw_blobs[FW_ISP24XX]; 2704 } else if (IS_QLA25XX(ha)) { 2705 blob = &qla_fw_blobs[FW_ISP25XX]; 2706 } 2707 2708 mutex_lock(&qla_fw_lock); 2709 if (blob->fw) 2710 goto out; 2711 2712 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 2713 DEBUG2(printk("scsi(%ld): Failed to load firmware image " 2714 "(%s).\n", ha->host_no, blob->name)); 2715 blob->fw = NULL; 2716 blob = NULL; 2717 goto out; 2718 } 2719 2720 out: 2721 mutex_unlock(&qla_fw_lock); 2722 return blob; 2723 } 2724 2725 static void 2726 qla2x00_release_firmware(void) 2727 { 2728 int idx; 2729 2730 mutex_lock(&qla_fw_lock); 2731 for (idx = 0; idx < FW_BLOBS; idx++) 2732 if (qla_fw_blobs[idx].fw) 2733 release_firmware(qla_fw_blobs[idx].fw); 2734 mutex_unlock(&qla_fw_lock); 2735 } 2736 2737 static pci_ers_result_t 2738 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 2739 { 2740 switch (state) { 2741 case pci_channel_io_normal: 2742 return PCI_ERS_RESULT_CAN_RECOVER; 2743 case pci_channel_io_frozen: 2744 pci_disable_device(pdev); 2745 return PCI_ERS_RESULT_NEED_RESET; 2746 case pci_channel_io_perm_failure: 2747 qla2x00_remove_one(pdev); 2748 return PCI_ERS_RESULT_DISCONNECT; 2749 } 2750 return PCI_ERS_RESULT_NEED_RESET; 2751 } 2752 2753 static pci_ers_result_t 2754 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) 2755 { 2756 int risc_paused = 0; 2757 uint32_t stat; 2758 unsigned long flags; 2759 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 2760 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2761 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 2762 2763 spin_lock_irqsave(&ha->hardware_lock, flags); 2764 if (IS_QLA2100(ha) || IS_QLA2200(ha)){ 2765 stat = RD_REG_DWORD(®->hccr); 2766 if (stat & HCCR_RISC_PAUSE) 2767 risc_paused = 1; 2768 } else if (IS_QLA23XX(ha)) { 2769 stat = RD_REG_DWORD(®->u.isp2300.host_status); 2770 if (stat & HSR_RISC_PAUSED) 2771 risc_paused = 1; 2772 } else if (IS_FWI2_CAPABLE(ha)) { 2773 stat = RD_REG_DWORD(®24->host_status); 2774 if (stat & HSRX_RISC_PAUSED) 2775 risc_paused = 1; 2776 } 2777 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2778 2779 if (risc_paused) { 2780 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, " 2781 "Dumping firmware!\n"); 2782 ha->isp_ops->fw_dump(ha, 0); 2783 2784 return PCI_ERS_RESULT_NEED_RESET; 2785 } else 2786 return PCI_ERS_RESULT_RECOVERED; 2787 } 2788 2789 static pci_ers_result_t 2790 qla2xxx_pci_slot_reset(struct pci_dev *pdev) 2791 { 2792 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 2793 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 2794 int rc; 2795 2796 if (ha->mem_only) 2797 rc = pci_enable_device_mem(pdev); 2798 else 2799 rc = pci_enable_device(pdev); 2800 2801 if (rc) { 2802 qla_printk(KERN_WARNING, ha, 2803 "Can't re-enable PCI device after reset.\n"); 2804 2805 return ret; 2806 } 2807 pci_set_master(pdev); 2808 2809 if (ha->isp_ops->pci_config(ha)) 2810 return ret; 2811 2812 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 2813 if (qla2x00_abort_isp(ha)== QLA_SUCCESS) 2814 ret = PCI_ERS_RESULT_RECOVERED; 2815 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 2816 2817 return ret; 2818 } 2819 2820 static void 2821 qla2xxx_pci_resume(struct pci_dev *pdev) 2822 { 2823 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 2824 int ret; 2825 2826 ret = qla2x00_wait_for_hba_online(ha); 2827 if (ret != QLA_SUCCESS) { 2828 qla_printk(KERN_ERR, ha, 2829 "the device failed to resume I/O " 2830 "from slot/link_reset"); 2831 } 2832 pci_cleanup_aer_uncorrect_error_status(pdev); 2833 } 2834 2835 static struct pci_error_handlers qla2xxx_err_handler = { 2836 .error_detected = qla2xxx_pci_error_detected, 2837 .mmio_enabled = qla2xxx_pci_mmio_enabled, 2838 .slot_reset = qla2xxx_pci_slot_reset, 2839 .resume = qla2xxx_pci_resume, 2840 }; 2841 2842 static struct pci_device_id qla2xxx_pci_tbl[] = { 2843 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) }, 2844 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) }, 2845 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) }, 2846 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) }, 2847 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) }, 2848 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) }, 2849 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, 2850 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, 2851 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, 2852 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) }, 2853 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 2854 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 2855 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, 2856 { 0 }, 2857 }; 2858 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 2859 2860 static struct pci_driver qla2xxx_pci_driver = { 2861 .name = QLA2XXX_DRIVER_NAME, 2862 .driver = { 2863 .owner = THIS_MODULE, 2864 }, 2865 .id_table = qla2xxx_pci_tbl, 2866 .probe = qla2x00_probe_one, 2867 .remove = qla2x00_remove_one, 2868 .err_handler = &qla2xxx_err_handler, 2869 }; 2870 2871 /** 2872 * qla2x00_module_init - Module initialization. 2873 **/ 2874 static int __init 2875 qla2x00_module_init(void) 2876 { 2877 int ret = 0; 2878 2879 /* Allocate cache for SRBs. */ 2880 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 2881 SLAB_HWCACHE_ALIGN, NULL); 2882 if (srb_cachep == NULL) { 2883 printk(KERN_ERR 2884 "qla2xxx: Unable to allocate SRB cache...Failing load!\n"); 2885 return -ENOMEM; 2886 } 2887 2888 /* Derive version string. */ 2889 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 2890 if (ql2xextended_error_logging) 2891 strcat(qla2x00_version_str, "-debug"); 2892 2893 qla2xxx_transport_template = 2894 fc_attach_transport(&qla2xxx_transport_functions); 2895 if (!qla2xxx_transport_template) { 2896 kmem_cache_destroy(srb_cachep); 2897 return -ENODEV; 2898 } 2899 qla2xxx_transport_vport_template = 2900 fc_attach_transport(&qla2xxx_transport_vport_functions); 2901 if (!qla2xxx_transport_vport_template) { 2902 kmem_cache_destroy(srb_cachep); 2903 fc_release_transport(qla2xxx_transport_template); 2904 return -ENODEV; 2905 } 2906 2907 printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n", 2908 qla2x00_version_str); 2909 ret = pci_register_driver(&qla2xxx_pci_driver); 2910 if (ret) { 2911 kmem_cache_destroy(srb_cachep); 2912 fc_release_transport(qla2xxx_transport_template); 2913 fc_release_transport(qla2xxx_transport_vport_template); 2914 } 2915 return ret; 2916 } 2917 2918 /** 2919 * qla2x00_module_exit - Module cleanup. 2920 **/ 2921 static void __exit 2922 qla2x00_module_exit(void) 2923 { 2924 pci_unregister_driver(&qla2xxx_pci_driver); 2925 qla2x00_release_firmware(); 2926 kmem_cache_destroy(srb_cachep); 2927 fc_release_transport(qla2xxx_transport_template); 2928 fc_release_transport(qla2xxx_transport_vport_template); 2929 } 2930 2931 module_init(qla2x00_module_init); 2932 module_exit(qla2x00_module_exit); 2933 2934 MODULE_AUTHOR("QLogic Corporation"); 2935 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); 2936 MODULE_LICENSE("GPL"); 2937 MODULE_VERSION(QLA2XXX_VERSION); 2938 MODULE_FIRMWARE(FW_FILE_ISP21XX); 2939 MODULE_FIRMWARE(FW_FILE_ISP22XX); 2940 MODULE_FIRMWARE(FW_FILE_ISP2300); 2941 MODULE_FIRMWARE(FW_FILE_ISP2322); 2942 MODULE_FIRMWARE(FW_FILE_ISP24XX); 2943 MODULE_FIRMWARE(FW_FILE_ISP25XX); 2944