1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2008 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/moduleparam.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mutex.h> 14 15 #include <scsi/scsi_tcq.h> 16 #include <scsi/scsicam.h> 17 #include <scsi/scsi_transport.h> 18 #include <scsi/scsi_transport_fc.h> 19 20 /* 21 * Driver version 22 */ 23 char qla2x00_version_str[40]; 24 25 /* 26 * SRB allocation cache 27 */ 28 static struct kmem_cache *srb_cachep; 29 30 int num_hosts; 31 int ql2xlogintimeout = 20; 32 module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR); 33 MODULE_PARM_DESC(ql2xlogintimeout, 34 "Login timeout value in seconds."); 35 36 int qlport_down_retry; 37 module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR); 38 MODULE_PARM_DESC(qlport_down_retry, 39 "Maximum number of command retries to a port that returns " 40 "a PORT-DOWN status."); 41 42 int ql2xplogiabsentdevice; 43 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); 44 MODULE_PARM_DESC(ql2xplogiabsentdevice, 45 "Option to enable PLOGI to devices that are not present after " 46 "a Fabric scan. This is needed for several broken switches. " 47 "Default is 0 - no PLOGI. 1 - perfom PLOGI."); 48 49 int ql2xloginretrycount = 0; 50 module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR); 51 MODULE_PARM_DESC(ql2xloginretrycount, 52 "Specify an alternate value for the NVRAM login retry count."); 53 54 int ql2xallocfwdump = 1; 55 module_param(ql2xallocfwdump, int, S_IRUGO|S_IRUSR); 56 MODULE_PARM_DESC(ql2xallocfwdump, 57 "Option to enable allocation of memory for a firmware dump " 58 "during HBA initialization. Memory allocation requirements " 59 "vary by ISP type. Default is 1 - allocate memory."); 60 61 int ql2xextended_error_logging; 62 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 63 MODULE_PARM_DESC(ql2xextended_error_logging, 64 "Option to enable extended error logging, " 65 "Default is 0 - no logging. 1 - log errors."); 66 67 static void qla2x00_free_device(scsi_qla_host_t *); 68 69 static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha); 70 71 int ql2xfdmienable=1; 72 module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR); 73 MODULE_PARM_DESC(ql2xfdmienable, 74 "Enables FDMI registratons " 75 "Default is 0 - no FDMI. 1 - perfom FDMI."); 76 77 #define MAX_Q_DEPTH 32 78 static int ql2xmaxqdepth = MAX_Q_DEPTH; 79 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); 80 MODULE_PARM_DESC(ql2xmaxqdepth, 81 "Maximum queue depth to report for target devices."); 82 83 int ql2xqfullrampup = 120; 84 module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR); 85 MODULE_PARM_DESC(ql2xqfullrampup, 86 "Number of seconds to wait to begin to ramp-up the queue " 87 "depth for a device after a queue-full condition has been " 88 "detected. Default is 120 seconds."); 89 90 /* 91 * SCSI host template entry points 92 */ 93 static int qla2xxx_slave_configure(struct scsi_device * device); 94 static int qla2xxx_slave_alloc(struct scsi_device *); 95 static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time); 96 static void qla2xxx_scan_start(struct Scsi_Host *); 97 static void qla2xxx_slave_destroy(struct scsi_device *); 98 static int qla2x00_queuecommand(struct scsi_cmnd *cmd, 99 void (*fn)(struct scsi_cmnd *)); 100 static int qla24xx_queuecommand(struct scsi_cmnd *cmd, 101 void (*fn)(struct scsi_cmnd *)); 102 static int qla2xxx_eh_abort(struct scsi_cmnd *); 103 static int qla2xxx_eh_device_reset(struct scsi_cmnd *); 104 static int qla2xxx_eh_target_reset(struct scsi_cmnd *); 105 static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); 106 static int qla2xxx_eh_host_reset(struct scsi_cmnd *); 107 108 static int qla2x00_change_queue_depth(struct scsi_device *, int); 109 static int qla2x00_change_queue_type(struct scsi_device *, int); 110 111 static struct scsi_host_template qla2x00_driver_template = { 112 .module = THIS_MODULE, 113 .name = QLA2XXX_DRIVER_NAME, 114 .queuecommand = qla2x00_queuecommand, 115 116 .eh_abort_handler = qla2xxx_eh_abort, 117 .eh_device_reset_handler = qla2xxx_eh_device_reset, 118 .eh_target_reset_handler = qla2xxx_eh_target_reset, 119 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 120 .eh_host_reset_handler = qla2xxx_eh_host_reset, 121 122 .slave_configure = qla2xxx_slave_configure, 123 124 .slave_alloc = qla2xxx_slave_alloc, 125 .slave_destroy = qla2xxx_slave_destroy, 126 .scan_finished = qla2xxx_scan_finished, 127 .scan_start = qla2xxx_scan_start, 128 .change_queue_depth = qla2x00_change_queue_depth, 129 .change_queue_type = qla2x00_change_queue_type, 130 .this_id = -1, 131 .cmd_per_lun = 3, 132 .use_clustering = ENABLE_CLUSTERING, 133 .sg_tablesize = SG_ALL, 134 135 /* 136 * The RISC allows for each command to transfer (2^32-1) bytes of data, 137 * which equates to 0x800000 sectors. 138 */ 139 .max_sectors = 0xFFFF, 140 .shost_attrs = qla2x00_host_attrs, 141 }; 142 143 struct scsi_host_template qla24xx_driver_template = { 144 .module = THIS_MODULE, 145 .name = QLA2XXX_DRIVER_NAME, 146 .queuecommand = qla24xx_queuecommand, 147 148 .eh_abort_handler = qla2xxx_eh_abort, 149 .eh_device_reset_handler = qla2xxx_eh_device_reset, 150 .eh_target_reset_handler = qla2xxx_eh_target_reset, 151 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 152 .eh_host_reset_handler = qla2xxx_eh_host_reset, 153 154 .slave_configure = qla2xxx_slave_configure, 155 156 .slave_alloc = qla2xxx_slave_alloc, 157 .slave_destroy = qla2xxx_slave_destroy, 158 .scan_finished = qla2xxx_scan_finished, 159 .scan_start = qla2xxx_scan_start, 160 .change_queue_depth = qla2x00_change_queue_depth, 161 .change_queue_type = qla2x00_change_queue_type, 162 .this_id = -1, 163 .cmd_per_lun = 3, 164 .use_clustering = ENABLE_CLUSTERING, 165 .sg_tablesize = SG_ALL, 166 167 .max_sectors = 0xFFFF, 168 .shost_attrs = qla2x00_host_attrs, 169 }; 170 171 static struct scsi_transport_template *qla2xxx_transport_template = NULL; 172 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; 173 174 /* TODO Convert to inlines 175 * 176 * Timer routines 177 */ 178 179 __inline__ void 180 qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval) 181 { 182 init_timer(&ha->timer); 183 ha->timer.expires = jiffies + interval * HZ; 184 ha->timer.data = (unsigned long)ha; 185 ha->timer.function = (void (*)(unsigned long))func; 186 add_timer(&ha->timer); 187 ha->timer_active = 1; 188 } 189 190 static inline void 191 qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval) 192 { 193 mod_timer(&ha->timer, jiffies + interval * HZ); 194 } 195 196 static __inline__ void 197 qla2x00_stop_timer(scsi_qla_host_t *ha) 198 { 199 del_timer_sync(&ha->timer); 200 ha->timer_active = 0; 201 } 202 203 static int qla2x00_do_dpc(void *data); 204 205 static void qla2x00_rst_aen(scsi_qla_host_t *); 206 207 static int qla2x00_mem_alloc(scsi_qla_host_t *); 208 static void qla2x00_mem_free(scsi_qla_host_t *ha); 209 static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *); 210 211 /* -------------------------------------------------------------------------- */ 212 213 static char * 214 qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str) 215 { 216 static char *pci_bus_modes[] = { 217 "33", "66", "100", "133", 218 }; 219 uint16_t pci_bus; 220 221 strcpy(str, "PCI"); 222 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; 223 if (pci_bus) { 224 strcat(str, "-X ("); 225 strcat(str, pci_bus_modes[pci_bus]); 226 } else { 227 pci_bus = (ha->pci_attr & BIT_8) >> 8; 228 strcat(str, " ("); 229 strcat(str, pci_bus_modes[pci_bus]); 230 } 231 strcat(str, " MHz)"); 232 233 return (str); 234 } 235 236 static char * 237 qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str) 238 { 239 static char *pci_bus_modes[] = { "33", "66", "100", "133", }; 240 uint32_t pci_bus; 241 int pcie_reg; 242 243 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); 244 if (pcie_reg) { 245 char lwstr[6]; 246 uint16_t pcie_lstat, lspeed, lwidth; 247 248 pcie_reg += 0x12; 249 pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat); 250 lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3); 251 lwidth = (pcie_lstat & 252 (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4; 253 254 strcpy(str, "PCIe ("); 255 if (lspeed == 1) 256 strcat(str, "2.5GT/s "); 257 else if (lspeed == 2) 258 strcat(str, "5.0GT/s "); 259 else 260 strcat(str, "<unknown> "); 261 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth); 262 strcat(str, lwstr); 263 264 return str; 265 } 266 267 strcpy(str, "PCI"); 268 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; 269 if (pci_bus == 0 || pci_bus == 8) { 270 strcat(str, " ("); 271 strcat(str, pci_bus_modes[pci_bus >> 3]); 272 } else { 273 strcat(str, "-X "); 274 if (pci_bus & BIT_2) 275 strcat(str, "Mode 2"); 276 else 277 strcat(str, "Mode 1"); 278 strcat(str, " ("); 279 strcat(str, pci_bus_modes[pci_bus & ~BIT_2]); 280 } 281 strcat(str, " MHz)"); 282 283 return str; 284 } 285 286 static char * 287 qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str) 288 { 289 char un_str[10]; 290 291 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 292 ha->fw_minor_version, 293 ha->fw_subminor_version); 294 295 if (ha->fw_attributes & BIT_9) { 296 strcat(str, "FLX"); 297 return (str); 298 } 299 300 switch (ha->fw_attributes & 0xFF) { 301 case 0x7: 302 strcat(str, "EF"); 303 break; 304 case 0x17: 305 strcat(str, "TP"); 306 break; 307 case 0x37: 308 strcat(str, "IP"); 309 break; 310 case 0x77: 311 strcat(str, "VI"); 312 break; 313 default: 314 sprintf(un_str, "(%x)", ha->fw_attributes); 315 strcat(str, un_str); 316 break; 317 } 318 if (ha->fw_attributes & 0x100) 319 strcat(str, "X"); 320 321 return (str); 322 } 323 324 static char * 325 qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str) 326 { 327 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 328 ha->fw_minor_version, 329 ha->fw_subminor_version); 330 331 if (ha->fw_attributes & BIT_0) 332 strcat(str, "[Class 2] "); 333 if (ha->fw_attributes & BIT_1) 334 strcat(str, "[IP] "); 335 if (ha->fw_attributes & BIT_2) 336 strcat(str, "[Multi-ID] "); 337 if (ha->fw_attributes & BIT_3) 338 strcat(str, "[SB-2] "); 339 if (ha->fw_attributes & BIT_4) 340 strcat(str, "[T10 CRC] "); 341 if (ha->fw_attributes & BIT_5) 342 strcat(str, "[VI] "); 343 if (ha->fw_attributes & BIT_10) 344 strcat(str, "[84XX] "); 345 if (ha->fw_attributes & BIT_13) 346 strcat(str, "[Experimental]"); 347 return str; 348 } 349 350 static inline srb_t * 351 qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport, 352 struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 353 { 354 srb_t *sp; 355 356 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 357 if (!sp) 358 return sp; 359 360 sp->ha = ha; 361 sp->fcport = fcport; 362 sp->cmd = cmd; 363 sp->flags = 0; 364 CMD_SP(cmd) = (void *)sp; 365 cmd->scsi_done = done; 366 367 return sp; 368 } 369 370 static int 371 qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 372 { 373 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 374 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 375 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 376 srb_t *sp; 377 int rval; 378 379 if (unlikely(pci_channel_offline(ha->pdev))) { 380 cmd->result = DID_REQUEUE << 16; 381 goto qc_fail_command; 382 } 383 384 rval = fc_remote_port_chkready(rport); 385 if (rval) { 386 cmd->result = rval; 387 goto qc_fail_command; 388 } 389 390 /* Close window on fcport/rport state-transitioning. */ 391 if (!*(fc_port_t **)rport->dd_data) { 392 cmd->result = DID_IMM_RETRY << 16; 393 goto qc_fail_command; 394 } 395 396 if (atomic_read(&fcport->state) != FCS_ONLINE) { 397 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 398 atomic_read(&ha->loop_state) == LOOP_DEAD) { 399 cmd->result = DID_NO_CONNECT << 16; 400 goto qc_fail_command; 401 } 402 goto qc_host_busy; 403 } 404 405 spin_unlock_irq(ha->host->host_lock); 406 407 sp = qla2x00_get_new_sp(ha, fcport, cmd, done); 408 if (!sp) 409 goto qc_host_busy_lock; 410 411 rval = qla2x00_start_scsi(sp); 412 if (rval != QLA_SUCCESS) 413 goto qc_host_busy_free_sp; 414 415 spin_lock_irq(ha->host->host_lock); 416 417 return 0; 418 419 qc_host_busy_free_sp: 420 qla2x00_sp_free_dma(ha, sp); 421 mempool_free(sp, ha->srb_mempool); 422 423 qc_host_busy_lock: 424 spin_lock_irq(ha->host->host_lock); 425 426 qc_host_busy: 427 return SCSI_MLQUEUE_HOST_BUSY; 428 429 qc_fail_command: 430 done(cmd); 431 432 return 0; 433 } 434 435 436 static int 437 qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 438 { 439 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 440 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 441 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 442 srb_t *sp; 443 int rval; 444 scsi_qla_host_t *pha = to_qla_parent(ha); 445 446 if (unlikely(pci_channel_offline(ha->pdev))) { 447 cmd->result = DID_REQUEUE << 16; 448 goto qc24_fail_command; 449 } 450 451 rval = fc_remote_port_chkready(rport); 452 if (rval) { 453 cmd->result = rval; 454 goto qc24_fail_command; 455 } 456 457 /* Close window on fcport/rport state-transitioning. */ 458 if (!*(fc_port_t **)rport->dd_data) { 459 cmd->result = DID_IMM_RETRY << 16; 460 goto qc24_fail_command; 461 } 462 463 if (atomic_read(&fcport->state) != FCS_ONLINE) { 464 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 465 atomic_read(&pha->loop_state) == LOOP_DEAD) { 466 cmd->result = DID_NO_CONNECT << 16; 467 goto qc24_fail_command; 468 } 469 goto qc24_host_busy; 470 } 471 472 spin_unlock_irq(ha->host->host_lock); 473 474 sp = qla2x00_get_new_sp(pha, fcport, cmd, done); 475 if (!sp) 476 goto qc24_host_busy_lock; 477 478 rval = qla24xx_start_scsi(sp); 479 if (rval != QLA_SUCCESS) 480 goto qc24_host_busy_free_sp; 481 482 spin_lock_irq(ha->host->host_lock); 483 484 return 0; 485 486 qc24_host_busy_free_sp: 487 qla2x00_sp_free_dma(pha, sp); 488 mempool_free(sp, pha->srb_mempool); 489 490 qc24_host_busy_lock: 491 spin_lock_irq(ha->host->host_lock); 492 493 qc24_host_busy: 494 return SCSI_MLQUEUE_HOST_BUSY; 495 496 qc24_fail_command: 497 done(cmd); 498 499 return 0; 500 } 501 502 503 /* 504 * qla2x00_eh_wait_on_command 505 * Waits for the command to be returned by the Firmware for some 506 * max time. 507 * 508 * Input: 509 * ha = actual ha whose done queue will contain the command 510 * returned by firmware. 511 * cmd = Scsi Command to wait on. 512 * flag = Abort/Reset(Bus or Device Reset) 513 * 514 * Return: 515 * Not Found : 0 516 * Found : 1 517 */ 518 static int 519 qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd) 520 { 521 #define ABORT_POLLING_PERIOD 1000 522 #define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) 523 unsigned long wait_iter = ABORT_WAIT_ITER; 524 int ret = QLA_SUCCESS; 525 526 while (CMD_SP(cmd)) { 527 msleep(ABORT_POLLING_PERIOD); 528 529 if (--wait_iter) 530 break; 531 } 532 if (CMD_SP(cmd)) 533 ret = QLA_FUNCTION_FAILED; 534 535 return ret; 536 } 537 538 /* 539 * qla2x00_wait_for_hba_online 540 * Wait till the HBA is online after going through 541 * <= MAX_RETRIES_OF_ISP_ABORT or 542 * finally HBA is disabled ie marked offline 543 * 544 * Input: 545 * ha - pointer to host adapter structure 546 * 547 * Note: 548 * Does context switching-Release SPIN_LOCK 549 * (if any) before calling this routine. 550 * 551 * Return: 552 * Success (Adapter is online) : 0 553 * Failed (Adapter is offline/disabled) : 1 554 */ 555 int 556 qla2x00_wait_for_hba_online(scsi_qla_host_t *ha) 557 { 558 int return_status; 559 unsigned long wait_online; 560 scsi_qla_host_t *pha = to_qla_parent(ha); 561 562 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 563 while (((test_bit(ISP_ABORT_NEEDED, &pha->dpc_flags)) || 564 test_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags) || 565 test_bit(ISP_ABORT_RETRY, &pha->dpc_flags) || 566 pha->dpc_active) && time_before(jiffies, wait_online)) { 567 568 msleep(1000); 569 } 570 if (pha->flags.online) 571 return_status = QLA_SUCCESS; 572 else 573 return_status = QLA_FUNCTION_FAILED; 574 575 return (return_status); 576 } 577 578 /* 579 * qla2x00_wait_for_loop_ready 580 * Wait for MAX_LOOP_TIMEOUT(5 min) value for loop 581 * to be in LOOP_READY state. 582 * Input: 583 * ha - pointer to host adapter structure 584 * 585 * Note: 586 * Does context switching-Release SPIN_LOCK 587 * (if any) before calling this routine. 588 * 589 * 590 * Return: 591 * Success (LOOP_READY) : 0 592 * Failed (LOOP_NOT_READY) : 1 593 */ 594 static inline int 595 qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha) 596 { 597 int return_status = QLA_SUCCESS; 598 unsigned long loop_timeout ; 599 scsi_qla_host_t *pha = to_qla_parent(ha); 600 601 /* wait for 5 min at the max for loop to be ready */ 602 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ); 603 604 while ((!atomic_read(&pha->loop_down_timer) && 605 atomic_read(&pha->loop_state) == LOOP_DOWN) || 606 atomic_read(&pha->loop_state) != LOOP_READY) { 607 if (atomic_read(&pha->loop_state) == LOOP_DEAD) { 608 return_status = QLA_FUNCTION_FAILED; 609 break; 610 } 611 msleep(1000); 612 if (time_after_eq(jiffies, loop_timeout)) { 613 return_status = QLA_FUNCTION_FAILED; 614 break; 615 } 616 } 617 return (return_status); 618 } 619 620 static void 621 qla2x00_block_error_handler(struct scsi_cmnd *cmnd) 622 { 623 struct Scsi_Host *shost = cmnd->device->host; 624 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 625 unsigned long flags; 626 627 spin_lock_irqsave(shost->host_lock, flags); 628 while (rport->port_state == FC_PORTSTATE_BLOCKED) { 629 spin_unlock_irqrestore(shost->host_lock, flags); 630 msleep(1000); 631 spin_lock_irqsave(shost->host_lock, flags); 632 } 633 spin_unlock_irqrestore(shost->host_lock, flags); 634 return; 635 } 636 637 /************************************************************************** 638 * qla2xxx_eh_abort 639 * 640 * Description: 641 * The abort function will abort the specified command. 642 * 643 * Input: 644 * cmd = Linux SCSI command packet to be aborted. 645 * 646 * Returns: 647 * Either SUCCESS or FAILED. 648 * 649 * Note: 650 * Only return FAILED if command not returned by firmware. 651 **************************************************************************/ 652 static int 653 qla2xxx_eh_abort(struct scsi_cmnd *cmd) 654 { 655 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 656 srb_t *sp; 657 int ret, i; 658 unsigned int id, lun; 659 unsigned long serial; 660 unsigned long flags; 661 int wait = 0; 662 scsi_qla_host_t *pha = to_qla_parent(ha); 663 664 qla2x00_block_error_handler(cmd); 665 666 if (!CMD_SP(cmd)) 667 return SUCCESS; 668 669 ret = SUCCESS; 670 671 id = cmd->device->id; 672 lun = cmd->device->lun; 673 serial = cmd->serial_number; 674 675 /* Check active list for command command. */ 676 spin_lock_irqsave(&pha->hardware_lock, flags); 677 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) { 678 sp = pha->outstanding_cmds[i]; 679 680 if (sp == NULL) 681 continue; 682 683 if (sp->cmd != cmd) 684 continue; 685 686 DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n", 687 __func__, ha->host_no, sp, serial)); 688 689 spin_unlock_irqrestore(&pha->hardware_lock, flags); 690 if (ha->isp_ops->abort_command(ha, sp)) { 691 DEBUG2(printk("%s(%ld): abort_command " 692 "mbx failed.\n", __func__, ha->host_no)); 693 } else { 694 DEBUG3(printk("%s(%ld): abort_command " 695 "mbx success.\n", __func__, ha->host_no)); 696 wait = 1; 697 } 698 spin_lock_irqsave(&pha->hardware_lock, flags); 699 700 break; 701 } 702 spin_unlock_irqrestore(&pha->hardware_lock, flags); 703 704 /* Wait for the command to be returned. */ 705 if (wait) { 706 if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) { 707 qla_printk(KERN_ERR, ha, 708 "scsi(%ld:%d:%d): Abort handler timed out -- %lx " 709 "%x.\n", ha->host_no, id, lun, serial, ret); 710 ret = FAILED; 711 } 712 } 713 714 qla_printk(KERN_INFO, ha, 715 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n", 716 ha->host_no, id, lun, wait, serial, ret); 717 718 return ret; 719 } 720 721 enum nexus_wait_type { 722 WAIT_HOST = 0, 723 WAIT_TARGET, 724 WAIT_LUN, 725 }; 726 727 static int 728 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t, 729 unsigned int l, enum nexus_wait_type type) 730 { 731 int cnt, match, status; 732 srb_t *sp; 733 unsigned long flags; 734 scsi_qla_host_t *pha = to_qla_parent(ha); 735 736 status = QLA_SUCCESS; 737 spin_lock_irqsave(&pha->hardware_lock, flags); 738 for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS; 739 cnt++) { 740 sp = pha->outstanding_cmds[cnt]; 741 if (!sp) 742 continue; 743 if (ha->vp_idx != sp->ha->vp_idx) 744 continue; 745 match = 0; 746 switch (type) { 747 case WAIT_HOST: 748 match = 1; 749 break; 750 case WAIT_TARGET: 751 match = sp->cmd->device->id == t; 752 break; 753 case WAIT_LUN: 754 match = (sp->cmd->device->id == t && 755 sp->cmd->device->lun == l); 756 break; 757 } 758 if (!match) 759 continue; 760 761 spin_unlock_irqrestore(&pha->hardware_lock, flags); 762 status = qla2x00_eh_wait_on_command(ha, sp->cmd); 763 spin_lock_irqsave(&pha->hardware_lock, flags); 764 } 765 spin_unlock_irqrestore(&pha->hardware_lock, flags); 766 767 return status; 768 } 769 770 static char *reset_errors[] = { 771 "HBA not online", 772 "HBA not ready", 773 "Task management failed", 774 "Waiting for command completions", 775 }; 776 777 static int 778 __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 779 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) 780 { 781 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 782 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 783 int err; 784 785 qla2x00_block_error_handler(cmd); 786 787 if (!fcport) 788 return FAILED; 789 790 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET ISSUED.\n", 791 ha->host_no, cmd->device->id, cmd->device->lun, name); 792 793 err = 0; 794 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 795 goto eh_reset_failed; 796 err = 1; 797 if (qla2x00_wait_for_loop_ready(ha) != QLA_SUCCESS) 798 goto eh_reset_failed; 799 err = 2; 800 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) 801 goto eh_reset_failed; 802 err = 3; 803 if (qla2x00_eh_wait_for_pending_commands(ha, cmd->device->id, 804 cmd->device->lun, type) != QLA_SUCCESS) 805 goto eh_reset_failed; 806 807 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n", 808 ha->host_no, cmd->device->id, cmd->device->lun, name); 809 810 return SUCCESS; 811 812 eh_reset_failed: 813 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n", 814 ha->host_no, cmd->device->id, cmd->device->lun, name, 815 reset_errors[err]); 816 return FAILED; 817 } 818 819 static int 820 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 821 { 822 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 823 824 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd, 825 ha->isp_ops->lun_reset); 826 } 827 828 static int 829 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) 830 { 831 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 832 833 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd, 834 ha->isp_ops->target_reset); 835 } 836 837 /************************************************************************** 838 * qla2xxx_eh_bus_reset 839 * 840 * Description: 841 * The bus reset function will reset the bus and abort any executing 842 * commands. 843 * 844 * Input: 845 * cmd = Linux SCSI command packet of the command that cause the 846 * bus reset. 847 * 848 * Returns: 849 * SUCCESS/FAILURE (defined as macro in scsi.h). 850 * 851 **************************************************************************/ 852 static int 853 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 854 { 855 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 856 scsi_qla_host_t *pha = to_qla_parent(ha); 857 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 858 int ret = FAILED; 859 unsigned int id, lun; 860 unsigned long serial; 861 862 qla2x00_block_error_handler(cmd); 863 864 id = cmd->device->id; 865 lun = cmd->device->lun; 866 serial = cmd->serial_number; 867 868 if (!fcport) 869 return ret; 870 871 qla_printk(KERN_INFO, ha, 872 "scsi(%ld:%d:%d): LOOP RESET ISSUED.\n", ha->host_no, id, lun); 873 874 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) { 875 DEBUG2(printk("%s failed:board disabled\n",__func__)); 876 goto eh_bus_reset_done; 877 } 878 879 if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) { 880 if (qla2x00_loop_reset(ha) == QLA_SUCCESS) 881 ret = SUCCESS; 882 } 883 if (ret == FAILED) 884 goto eh_bus_reset_done; 885 886 /* Flush outstanding commands. */ 887 if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) != 888 QLA_SUCCESS) 889 ret = FAILED; 890 891 eh_bus_reset_done: 892 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 893 (ret == FAILED) ? "failed" : "succeded"); 894 895 return ret; 896 } 897 898 /************************************************************************** 899 * qla2xxx_eh_host_reset 900 * 901 * Description: 902 * The reset function will reset the Adapter. 903 * 904 * Input: 905 * cmd = Linux SCSI command packet of the command that cause the 906 * adapter reset. 907 * 908 * Returns: 909 * Either SUCCESS or FAILED. 910 * 911 * Note: 912 **************************************************************************/ 913 static int 914 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 915 { 916 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 917 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 918 int ret = FAILED; 919 unsigned int id, lun; 920 unsigned long serial; 921 scsi_qla_host_t *pha = to_qla_parent(ha); 922 923 qla2x00_block_error_handler(cmd); 924 925 id = cmd->device->id; 926 lun = cmd->device->lun; 927 serial = cmd->serial_number; 928 929 if (!fcport) 930 return ret; 931 932 qla_printk(KERN_INFO, ha, 933 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, id, lun); 934 935 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 936 goto eh_host_reset_lock; 937 938 /* 939 * Fixme-may be dpc thread is active and processing 940 * loop_resync,so wait a while for it to 941 * be completed and then issue big hammer.Otherwise 942 * it may cause I/O failure as big hammer marks the 943 * devices as lost kicking of the port_down_timer 944 * while dpc is stuck for the mailbox to complete. 945 */ 946 qla2x00_wait_for_loop_ready(ha); 947 set_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags); 948 if (qla2x00_abort_isp(pha)) { 949 clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags); 950 /* failed. schedule dpc to try */ 951 set_bit(ISP_ABORT_NEEDED, &pha->dpc_flags); 952 953 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 954 goto eh_host_reset_lock; 955 } 956 clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags); 957 958 /* Waiting for our command in done_queue to be returned to OS.*/ 959 if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) == 960 QLA_SUCCESS) 961 ret = SUCCESS; 962 963 if (ha->parent) 964 qla2x00_vp_abort_isp(ha); 965 966 eh_host_reset_lock: 967 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 968 (ret == FAILED) ? "failed" : "succeded"); 969 970 return ret; 971 } 972 973 /* 974 * qla2x00_loop_reset 975 * Issue loop reset. 976 * 977 * Input: 978 * ha = adapter block pointer. 979 * 980 * Returns: 981 * 0 = success 982 */ 983 int 984 qla2x00_loop_reset(scsi_qla_host_t *ha) 985 { 986 int ret; 987 struct fc_port *fcport; 988 989 if (ha->flags.enable_lip_full_login) { 990 ret = qla2x00_full_login_lip(ha); 991 if (ret != QLA_SUCCESS) { 992 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 993 "full_login_lip=%d.\n", __func__, ha->host_no, 994 ret)); 995 } 996 atomic_set(&ha->loop_state, LOOP_DOWN); 997 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 998 qla2x00_mark_all_devices_lost(ha, 0); 999 qla2x00_wait_for_loop_ready(ha); 1000 } 1001 1002 if (ha->flags.enable_lip_reset) { 1003 ret = qla2x00_lip_reset(ha); 1004 if (ret != QLA_SUCCESS) { 1005 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1006 "lip_reset=%d.\n", __func__, ha->host_no, ret)); 1007 } 1008 qla2x00_wait_for_loop_ready(ha); 1009 } 1010 1011 if (ha->flags.enable_target_reset) { 1012 list_for_each_entry(fcport, &ha->fcports, list) { 1013 if (fcport->port_type != FCT_TARGET) 1014 continue; 1015 1016 ret = ha->isp_ops->target_reset(fcport, 0); 1017 if (ret != QLA_SUCCESS) { 1018 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1019 "target_reset=%d d_id=%x.\n", __func__, 1020 ha->host_no, ret, fcport->d_id.b24)); 1021 } 1022 } 1023 } 1024 1025 /* Issue marker command only when we are going to start the I/O */ 1026 ha->marker_needed = 1; 1027 1028 return QLA_SUCCESS; 1029 } 1030 1031 void 1032 qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res) 1033 { 1034 int cnt; 1035 unsigned long flags; 1036 srb_t *sp; 1037 1038 spin_lock_irqsave(&ha->hardware_lock, flags); 1039 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1040 sp = ha->outstanding_cmds[cnt]; 1041 if (sp) { 1042 ha->outstanding_cmds[cnt] = NULL; 1043 sp->flags = 0; 1044 sp->cmd->result = res; 1045 sp->cmd->host_scribble = (unsigned char *)NULL; 1046 qla2x00_sp_compl(ha, sp); 1047 } 1048 } 1049 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1050 } 1051 1052 static int 1053 qla2xxx_slave_alloc(struct scsi_device *sdev) 1054 { 1055 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1056 1057 if (!rport || fc_remote_port_chkready(rport)) 1058 return -ENXIO; 1059 1060 sdev->hostdata = *(fc_port_t **)rport->dd_data; 1061 1062 return 0; 1063 } 1064 1065 static int 1066 qla2xxx_slave_configure(struct scsi_device *sdev) 1067 { 1068 scsi_qla_host_t *ha = shost_priv(sdev->host); 1069 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1070 1071 if (sdev->tagged_supported) 1072 scsi_activate_tcq(sdev, ha->max_q_depth); 1073 else 1074 scsi_deactivate_tcq(sdev, ha->max_q_depth); 1075 1076 rport->dev_loss_tmo = ha->port_down_retry_count + 5; 1077 1078 return 0; 1079 } 1080 1081 static void 1082 qla2xxx_slave_destroy(struct scsi_device *sdev) 1083 { 1084 sdev->hostdata = NULL; 1085 } 1086 1087 static int 1088 qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth) 1089 { 1090 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 1091 return sdev->queue_depth; 1092 } 1093 1094 static int 1095 qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type) 1096 { 1097 if (sdev->tagged_supported) { 1098 scsi_set_tag_type(sdev, tag_type); 1099 if (tag_type) 1100 scsi_activate_tcq(sdev, sdev->queue_depth); 1101 else 1102 scsi_deactivate_tcq(sdev, sdev->queue_depth); 1103 } else 1104 tag_type = 0; 1105 1106 return tag_type; 1107 } 1108 1109 /** 1110 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1111 * @ha: HA context 1112 * 1113 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 1114 * supported addressing method. 1115 */ 1116 static void 1117 qla2x00_config_dma_addressing(scsi_qla_host_t *ha) 1118 { 1119 /* Assume a 32bit DMA mask. */ 1120 ha->flags.enable_64bit_addressing = 0; 1121 1122 if (!dma_set_mask(&ha->pdev->dev, DMA_64BIT_MASK)) { 1123 /* Any upper-dword bits set? */ 1124 if (MSD(dma_get_required_mask(&ha->pdev->dev)) && 1125 !pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) { 1126 /* Ok, a 64bit DMA mask is applicable. */ 1127 ha->flags.enable_64bit_addressing = 1; 1128 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; 1129 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; 1130 return; 1131 } 1132 } 1133 1134 dma_set_mask(&ha->pdev->dev, DMA_32BIT_MASK); 1135 pci_set_consistent_dma_mask(ha->pdev, DMA_32BIT_MASK); 1136 } 1137 1138 static void 1139 qla2x00_enable_intrs(scsi_qla_host_t *ha) 1140 { 1141 unsigned long flags = 0; 1142 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1143 1144 spin_lock_irqsave(&ha->hardware_lock, flags); 1145 ha->interrupts_on = 1; 1146 /* enable risc and host interrupts */ 1147 WRT_REG_WORD(®->ictrl, ICR_EN_INT | ICR_EN_RISC); 1148 RD_REG_WORD(®->ictrl); 1149 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1150 1151 } 1152 1153 static void 1154 qla2x00_disable_intrs(scsi_qla_host_t *ha) 1155 { 1156 unsigned long flags = 0; 1157 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1158 1159 spin_lock_irqsave(&ha->hardware_lock, flags); 1160 ha->interrupts_on = 0; 1161 /* disable risc and host interrupts */ 1162 WRT_REG_WORD(®->ictrl, 0); 1163 RD_REG_WORD(®->ictrl); 1164 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1165 } 1166 1167 static void 1168 qla24xx_enable_intrs(scsi_qla_host_t *ha) 1169 { 1170 unsigned long flags = 0; 1171 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1172 1173 spin_lock_irqsave(&ha->hardware_lock, flags); 1174 ha->interrupts_on = 1; 1175 WRT_REG_DWORD(®->ictrl, ICRX_EN_RISC_INT); 1176 RD_REG_DWORD(®->ictrl); 1177 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1178 } 1179 1180 static void 1181 qla24xx_disable_intrs(scsi_qla_host_t *ha) 1182 { 1183 unsigned long flags = 0; 1184 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1185 1186 spin_lock_irqsave(&ha->hardware_lock, flags); 1187 ha->interrupts_on = 0; 1188 WRT_REG_DWORD(®->ictrl, 0); 1189 RD_REG_DWORD(®->ictrl); 1190 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1191 } 1192 1193 static struct isp_operations qla2100_isp_ops = { 1194 .pci_config = qla2100_pci_config, 1195 .reset_chip = qla2x00_reset_chip, 1196 .chip_diag = qla2x00_chip_diag, 1197 .config_rings = qla2x00_config_rings, 1198 .reset_adapter = qla2x00_reset_adapter, 1199 .nvram_config = qla2x00_nvram_config, 1200 .update_fw_options = qla2x00_update_fw_options, 1201 .load_risc = qla2x00_load_risc, 1202 .pci_info_str = qla2x00_pci_info_str, 1203 .fw_version_str = qla2x00_fw_version_str, 1204 .intr_handler = qla2100_intr_handler, 1205 .enable_intrs = qla2x00_enable_intrs, 1206 .disable_intrs = qla2x00_disable_intrs, 1207 .abort_command = qla2x00_abort_command, 1208 .target_reset = qla2x00_abort_target, 1209 .lun_reset = qla2x00_lun_reset, 1210 .fabric_login = qla2x00_login_fabric, 1211 .fabric_logout = qla2x00_fabric_logout, 1212 .calc_req_entries = qla2x00_calc_iocbs_32, 1213 .build_iocbs = qla2x00_build_scsi_iocbs_32, 1214 .prep_ms_iocb = qla2x00_prep_ms_iocb, 1215 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 1216 .read_nvram = qla2x00_read_nvram_data, 1217 .write_nvram = qla2x00_write_nvram_data, 1218 .fw_dump = qla2100_fw_dump, 1219 .beacon_on = NULL, 1220 .beacon_off = NULL, 1221 .beacon_blink = NULL, 1222 .read_optrom = qla2x00_read_optrom_data, 1223 .write_optrom = qla2x00_write_optrom_data, 1224 .get_flash_version = qla2x00_get_flash_version, 1225 }; 1226 1227 static struct isp_operations qla2300_isp_ops = { 1228 .pci_config = qla2300_pci_config, 1229 .reset_chip = qla2x00_reset_chip, 1230 .chip_diag = qla2x00_chip_diag, 1231 .config_rings = qla2x00_config_rings, 1232 .reset_adapter = qla2x00_reset_adapter, 1233 .nvram_config = qla2x00_nvram_config, 1234 .update_fw_options = qla2x00_update_fw_options, 1235 .load_risc = qla2x00_load_risc, 1236 .pci_info_str = qla2x00_pci_info_str, 1237 .fw_version_str = qla2x00_fw_version_str, 1238 .intr_handler = qla2300_intr_handler, 1239 .enable_intrs = qla2x00_enable_intrs, 1240 .disable_intrs = qla2x00_disable_intrs, 1241 .abort_command = qla2x00_abort_command, 1242 .target_reset = qla2x00_abort_target, 1243 .lun_reset = qla2x00_lun_reset, 1244 .fabric_login = qla2x00_login_fabric, 1245 .fabric_logout = qla2x00_fabric_logout, 1246 .calc_req_entries = qla2x00_calc_iocbs_32, 1247 .build_iocbs = qla2x00_build_scsi_iocbs_32, 1248 .prep_ms_iocb = qla2x00_prep_ms_iocb, 1249 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 1250 .read_nvram = qla2x00_read_nvram_data, 1251 .write_nvram = qla2x00_write_nvram_data, 1252 .fw_dump = qla2300_fw_dump, 1253 .beacon_on = qla2x00_beacon_on, 1254 .beacon_off = qla2x00_beacon_off, 1255 .beacon_blink = qla2x00_beacon_blink, 1256 .read_optrom = qla2x00_read_optrom_data, 1257 .write_optrom = qla2x00_write_optrom_data, 1258 .get_flash_version = qla2x00_get_flash_version, 1259 }; 1260 1261 static struct isp_operations qla24xx_isp_ops = { 1262 .pci_config = qla24xx_pci_config, 1263 .reset_chip = qla24xx_reset_chip, 1264 .chip_diag = qla24xx_chip_diag, 1265 .config_rings = qla24xx_config_rings, 1266 .reset_adapter = qla24xx_reset_adapter, 1267 .nvram_config = qla24xx_nvram_config, 1268 .update_fw_options = qla24xx_update_fw_options, 1269 .load_risc = qla24xx_load_risc, 1270 .pci_info_str = qla24xx_pci_info_str, 1271 .fw_version_str = qla24xx_fw_version_str, 1272 .intr_handler = qla24xx_intr_handler, 1273 .enable_intrs = qla24xx_enable_intrs, 1274 .disable_intrs = qla24xx_disable_intrs, 1275 .abort_command = qla24xx_abort_command, 1276 .target_reset = qla24xx_abort_target, 1277 .lun_reset = qla24xx_lun_reset, 1278 .fabric_login = qla24xx_login_fabric, 1279 .fabric_logout = qla24xx_fabric_logout, 1280 .calc_req_entries = NULL, 1281 .build_iocbs = NULL, 1282 .prep_ms_iocb = qla24xx_prep_ms_iocb, 1283 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1284 .read_nvram = qla24xx_read_nvram_data, 1285 .write_nvram = qla24xx_write_nvram_data, 1286 .fw_dump = qla24xx_fw_dump, 1287 .beacon_on = qla24xx_beacon_on, 1288 .beacon_off = qla24xx_beacon_off, 1289 .beacon_blink = qla24xx_beacon_blink, 1290 .read_optrom = qla24xx_read_optrom_data, 1291 .write_optrom = qla24xx_write_optrom_data, 1292 .get_flash_version = qla24xx_get_flash_version, 1293 }; 1294 1295 static struct isp_operations qla25xx_isp_ops = { 1296 .pci_config = qla25xx_pci_config, 1297 .reset_chip = qla24xx_reset_chip, 1298 .chip_diag = qla24xx_chip_diag, 1299 .config_rings = qla24xx_config_rings, 1300 .reset_adapter = qla24xx_reset_adapter, 1301 .nvram_config = qla24xx_nvram_config, 1302 .update_fw_options = qla24xx_update_fw_options, 1303 .load_risc = qla24xx_load_risc, 1304 .pci_info_str = qla24xx_pci_info_str, 1305 .fw_version_str = qla24xx_fw_version_str, 1306 .intr_handler = qla24xx_intr_handler, 1307 .enable_intrs = qla24xx_enable_intrs, 1308 .disable_intrs = qla24xx_disable_intrs, 1309 .abort_command = qla24xx_abort_command, 1310 .target_reset = qla24xx_abort_target, 1311 .lun_reset = qla24xx_lun_reset, 1312 .fabric_login = qla24xx_login_fabric, 1313 .fabric_logout = qla24xx_fabric_logout, 1314 .calc_req_entries = NULL, 1315 .build_iocbs = NULL, 1316 .prep_ms_iocb = qla24xx_prep_ms_iocb, 1317 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1318 .read_nvram = qla25xx_read_nvram_data, 1319 .write_nvram = qla25xx_write_nvram_data, 1320 .fw_dump = qla25xx_fw_dump, 1321 .beacon_on = qla24xx_beacon_on, 1322 .beacon_off = qla24xx_beacon_off, 1323 .beacon_blink = qla24xx_beacon_blink, 1324 .read_optrom = qla25xx_read_optrom_data, 1325 .write_optrom = qla24xx_write_optrom_data, 1326 .get_flash_version = qla24xx_get_flash_version, 1327 }; 1328 1329 static inline void 1330 qla2x00_set_isp_flags(scsi_qla_host_t *ha) 1331 { 1332 ha->device_type = DT_EXTENDED_IDS; 1333 switch (ha->pdev->device) { 1334 case PCI_DEVICE_ID_QLOGIC_ISP2100: 1335 ha->device_type |= DT_ISP2100; 1336 ha->device_type &= ~DT_EXTENDED_IDS; 1337 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 1338 break; 1339 case PCI_DEVICE_ID_QLOGIC_ISP2200: 1340 ha->device_type |= DT_ISP2200; 1341 ha->device_type &= ~DT_EXTENDED_IDS; 1342 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 1343 break; 1344 case PCI_DEVICE_ID_QLOGIC_ISP2300: 1345 ha->device_type |= DT_ISP2300; 1346 ha->device_type |= DT_ZIO_SUPPORTED; 1347 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 1348 break; 1349 case PCI_DEVICE_ID_QLOGIC_ISP2312: 1350 ha->device_type |= DT_ISP2312; 1351 ha->device_type |= DT_ZIO_SUPPORTED; 1352 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 1353 break; 1354 case PCI_DEVICE_ID_QLOGIC_ISP2322: 1355 ha->device_type |= DT_ISP2322; 1356 ha->device_type |= DT_ZIO_SUPPORTED; 1357 if (ha->pdev->subsystem_vendor == 0x1028 && 1358 ha->pdev->subsystem_device == 0x0170) 1359 ha->device_type |= DT_OEM_001; 1360 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 1361 break; 1362 case PCI_DEVICE_ID_QLOGIC_ISP6312: 1363 ha->device_type |= DT_ISP6312; 1364 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 1365 break; 1366 case PCI_DEVICE_ID_QLOGIC_ISP6322: 1367 ha->device_type |= DT_ISP6322; 1368 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 1369 break; 1370 case PCI_DEVICE_ID_QLOGIC_ISP2422: 1371 ha->device_type |= DT_ISP2422; 1372 ha->device_type |= DT_ZIO_SUPPORTED; 1373 ha->device_type |= DT_FWI2; 1374 ha->device_type |= DT_IIDMA; 1375 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1376 break; 1377 case PCI_DEVICE_ID_QLOGIC_ISP2432: 1378 ha->device_type |= DT_ISP2432; 1379 ha->device_type |= DT_ZIO_SUPPORTED; 1380 ha->device_type |= DT_FWI2; 1381 ha->device_type |= DT_IIDMA; 1382 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1383 break; 1384 case PCI_DEVICE_ID_QLOGIC_ISP8432: 1385 ha->device_type |= DT_ISP8432; 1386 ha->device_type |= DT_ZIO_SUPPORTED; 1387 ha->device_type |= DT_FWI2; 1388 ha->device_type |= DT_IIDMA; 1389 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1390 break; 1391 case PCI_DEVICE_ID_QLOGIC_ISP5422: 1392 ha->device_type |= DT_ISP5422; 1393 ha->device_type |= DT_FWI2; 1394 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1395 break; 1396 case PCI_DEVICE_ID_QLOGIC_ISP5432: 1397 ha->device_type |= DT_ISP5432; 1398 ha->device_type |= DT_FWI2; 1399 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1400 break; 1401 case PCI_DEVICE_ID_QLOGIC_ISP2532: 1402 ha->device_type |= DT_ISP2532; 1403 ha->device_type |= DT_ZIO_SUPPORTED; 1404 ha->device_type |= DT_FWI2; 1405 ha->device_type |= DT_IIDMA; 1406 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1407 break; 1408 } 1409 } 1410 1411 static int 1412 qla2x00_iospace_config(scsi_qla_host_t *ha) 1413 { 1414 resource_size_t pio; 1415 1416 if (pci_request_selected_regions(ha->pdev, ha->bars, 1417 QLA2XXX_DRIVER_NAME)) { 1418 qla_printk(KERN_WARNING, ha, 1419 "Failed to reserve PIO/MMIO regions (%s)\n", 1420 pci_name(ha->pdev)); 1421 1422 goto iospace_error_exit; 1423 } 1424 if (!(ha->bars & 1)) 1425 goto skip_pio; 1426 1427 /* We only need PIO for Flash operations on ISP2312 v2 chips. */ 1428 pio = pci_resource_start(ha->pdev, 0); 1429 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 1430 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1431 qla_printk(KERN_WARNING, ha, 1432 "Invalid PCI I/O region size (%s)...\n", 1433 pci_name(ha->pdev)); 1434 pio = 0; 1435 } 1436 } else { 1437 qla_printk(KERN_WARNING, ha, 1438 "region #0 not a PIO resource (%s)...\n", 1439 pci_name(ha->pdev)); 1440 pio = 0; 1441 } 1442 ha->pio_address = pio; 1443 1444 skip_pio: 1445 /* Use MMIO operations for all accesses. */ 1446 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 1447 qla_printk(KERN_ERR, ha, 1448 "region #1 not an MMIO resource (%s), aborting\n", 1449 pci_name(ha->pdev)); 1450 goto iospace_error_exit; 1451 } 1452 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 1453 qla_printk(KERN_ERR, ha, 1454 "Invalid PCI mem region size (%s), aborting\n", 1455 pci_name(ha->pdev)); 1456 goto iospace_error_exit; 1457 } 1458 1459 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 1460 if (!ha->iobase) { 1461 qla_printk(KERN_ERR, ha, 1462 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 1463 1464 goto iospace_error_exit; 1465 } 1466 1467 return (0); 1468 1469 iospace_error_exit: 1470 return (-ENOMEM); 1471 } 1472 1473 static void 1474 qla2xxx_scan_start(struct Scsi_Host *shost) 1475 { 1476 scsi_qla_host_t *ha = shost_priv(shost); 1477 1478 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 1479 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 1480 set_bit(RSCN_UPDATE, &ha->dpc_flags); 1481 } 1482 1483 static int 1484 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) 1485 { 1486 scsi_qla_host_t *ha = shost_priv(shost); 1487 1488 if (!ha->host) 1489 return 1; 1490 if (time > ha->loop_reset_delay * HZ) 1491 return 1; 1492 1493 return atomic_read(&ha->loop_state) == LOOP_READY; 1494 } 1495 1496 /* 1497 * PCI driver interface 1498 */ 1499 static int __devinit 1500 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 1501 { 1502 int ret = -ENODEV; 1503 struct Scsi_Host *host; 1504 scsi_qla_host_t *ha; 1505 char pci_info[30]; 1506 char fw_str[30]; 1507 struct scsi_host_template *sht; 1508 int bars, mem_only = 0; 1509 1510 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 1511 sht = &qla2x00_driver_template; 1512 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || 1513 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || 1514 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || 1515 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 1516 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || 1517 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532) { 1518 bars = pci_select_bars(pdev, IORESOURCE_MEM); 1519 sht = &qla24xx_driver_template; 1520 mem_only = 1; 1521 } 1522 1523 if (mem_only) { 1524 if (pci_enable_device_mem(pdev)) 1525 goto probe_out; 1526 } else { 1527 if (pci_enable_device(pdev)) 1528 goto probe_out; 1529 } 1530 1531 if (pci_find_aer_capability(pdev)) 1532 if (pci_enable_pcie_error_reporting(pdev)) 1533 goto probe_out; 1534 1535 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 1536 if (host == NULL) { 1537 printk(KERN_WARNING 1538 "qla2xxx: Couldn't allocate host from scsi layer!\n"); 1539 goto probe_disable_device; 1540 } 1541 1542 /* Clear our data area */ 1543 ha = shost_priv(host); 1544 memset(ha, 0, sizeof(scsi_qla_host_t)); 1545 1546 ha->pdev = pdev; 1547 ha->host = host; 1548 ha->host_no = host->host_no; 1549 sprintf(ha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, ha->host_no); 1550 ha->parent = NULL; 1551 ha->bars = bars; 1552 ha->mem_only = mem_only; 1553 spin_lock_init(&ha->hardware_lock); 1554 1555 /* Set ISP-type information. */ 1556 qla2x00_set_isp_flags(ha); 1557 1558 /* Configure PCI I/O space */ 1559 ret = qla2x00_iospace_config(ha); 1560 if (ret) 1561 goto probe_failed; 1562 1563 qla_printk(KERN_INFO, ha, 1564 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, 1565 ha->iobase); 1566 1567 ha->prev_topology = 0; 1568 ha->init_cb_size = sizeof(init_cb_t); 1569 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx; 1570 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1571 ha->optrom_size = OPTROM_SIZE_2300; 1572 1573 ha->max_q_depth = MAX_Q_DEPTH; 1574 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) 1575 ha->max_q_depth = ql2xmaxqdepth; 1576 1577 /* Assign ISP specific operations. */ 1578 if (IS_QLA2100(ha)) { 1579 host->max_id = MAX_TARGETS_2100; 1580 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 1581 ha->request_q_length = REQUEST_ENTRY_CNT_2100; 1582 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1583 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1584 host->sg_tablesize = 32; 1585 ha->gid_list_info_size = 4; 1586 ha->isp_ops = &qla2100_isp_ops; 1587 } else if (IS_QLA2200(ha)) { 1588 host->max_id = MAX_TARGETS_2200; 1589 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1590 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1591 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1592 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1593 ha->gid_list_info_size = 4; 1594 ha->isp_ops = &qla2100_isp_ops; 1595 } else if (IS_QLA23XX(ha)) { 1596 host->max_id = MAX_TARGETS_2200; 1597 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1598 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1599 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1600 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1601 ha->gid_list_info_size = 6; 1602 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1603 ha->optrom_size = OPTROM_SIZE_2322; 1604 ha->isp_ops = &qla2300_isp_ops; 1605 } else if (IS_QLA24XX_TYPE(ha)) { 1606 host->max_id = MAX_TARGETS_2200; 1607 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1608 ha->request_q_length = REQUEST_ENTRY_CNT_24XX; 1609 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1610 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1611 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1612 ha->mgmt_svr_loop_id = 10 + ha->vp_idx; 1613 ha->gid_list_info_size = 8; 1614 ha->optrom_size = OPTROM_SIZE_24XX; 1615 ha->isp_ops = &qla24xx_isp_ops; 1616 } else if (IS_QLA25XX(ha)) { 1617 host->max_id = MAX_TARGETS_2200; 1618 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1619 ha->request_q_length = REQUEST_ENTRY_CNT_24XX; 1620 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1621 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1622 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1623 ha->mgmt_svr_loop_id = 10 + ha->vp_idx; 1624 ha->gid_list_info_size = 8; 1625 ha->optrom_size = OPTROM_SIZE_25XX; 1626 ha->isp_ops = &qla25xx_isp_ops; 1627 ha->hw_event_start = PCI_FUNC(pdev->devfn) ? 1628 FA_HW_EVENT1_ADDR: FA_HW_EVENT0_ADDR; 1629 } 1630 host->can_queue = ha->request_q_length + 128; 1631 1632 /* load the F/W, read paramaters, and init the H/W */ 1633 ha->instance = num_hosts; 1634 1635 mutex_init(&ha->vport_lock); 1636 init_completion(&ha->mbx_cmd_comp); 1637 complete(&ha->mbx_cmd_comp); 1638 init_completion(&ha->mbx_intr_comp); 1639 1640 INIT_LIST_HEAD(&ha->list); 1641 INIT_LIST_HEAD(&ha->fcports); 1642 INIT_LIST_HEAD(&ha->vp_list); 1643 INIT_LIST_HEAD(&ha->work_list); 1644 1645 set_bit(0, (unsigned long *) ha->vp_idx_map); 1646 1647 qla2x00_config_dma_addressing(ha); 1648 if (qla2x00_mem_alloc(ha)) { 1649 qla_printk(KERN_WARNING, ha, 1650 "[ERROR] Failed to allocate memory for adapter\n"); 1651 1652 ret = -ENOMEM; 1653 goto probe_failed; 1654 } 1655 1656 if (qla2x00_initialize_adapter(ha)) { 1657 qla_printk(KERN_WARNING, ha, 1658 "Failed to initialize adapter\n"); 1659 1660 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - " 1661 "Adapter flags %x.\n", 1662 ha->host_no, ha->device_flags)); 1663 1664 ret = -ENODEV; 1665 goto probe_failed; 1666 } 1667 1668 /* 1669 * Startup the kernel thread for this host adapter 1670 */ 1671 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 1672 "%s_dpc", ha->host_str); 1673 if (IS_ERR(ha->dpc_thread)) { 1674 qla_printk(KERN_WARNING, ha, 1675 "Unable to start DPC thread!\n"); 1676 ret = PTR_ERR(ha->dpc_thread); 1677 goto probe_failed; 1678 } 1679 1680 host->this_id = 255; 1681 host->cmd_per_lun = 3; 1682 host->unique_id = ha->instance; 1683 host->max_cmd_len = MAX_CMDSZ; 1684 host->max_channel = MAX_BUSES - 1; 1685 host->max_lun = MAX_LUNS; 1686 host->transportt = qla2xxx_transport_template; 1687 1688 ret = qla2x00_request_irqs(ha); 1689 if (ret) 1690 goto probe_failed; 1691 1692 /* Initialized the timer */ 1693 qla2x00_start_timer(ha, qla2x00_timer, WATCH_INTERVAL); 1694 1695 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 1696 ha->host_no, ha)); 1697 1698 pci_set_drvdata(pdev, ha); 1699 1700 ha->flags.init_done = 1; 1701 ha->flags.online = 1; 1702 1703 num_hosts++; 1704 1705 ret = scsi_add_host(host, &pdev->dev); 1706 if (ret) 1707 goto probe_failed; 1708 1709 scsi_scan_host(host); 1710 1711 qla2x00_alloc_sysfs_attr(ha); 1712 1713 qla2x00_init_host_attr(ha); 1714 1715 qla2x00_dfs_setup(ha); 1716 1717 qla_printk(KERN_INFO, ha, "\n" 1718 " QLogic Fibre Channel HBA Driver: %s\n" 1719 " QLogic %s - %s\n" 1720 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n", 1721 qla2x00_version_str, ha->model_number, 1722 ha->model_desc ? ha->model_desc: "", pdev->device, 1723 ha->isp_ops->pci_info_str(ha, pci_info), pci_name(pdev), 1724 ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no, 1725 ha->isp_ops->fw_version_str(ha, fw_str)); 1726 1727 return 0; 1728 1729 probe_failed: 1730 qla2x00_free_device(ha); 1731 1732 scsi_host_put(host); 1733 1734 probe_disable_device: 1735 pci_disable_device(pdev); 1736 1737 probe_out: 1738 return ret; 1739 } 1740 1741 static void 1742 qla2x00_remove_one(struct pci_dev *pdev) 1743 { 1744 scsi_qla_host_t *ha; 1745 1746 ha = pci_get_drvdata(pdev); 1747 1748 qla2x00_dfs_remove(ha); 1749 1750 qla84xx_put_chip(ha); 1751 1752 qla2x00_free_sysfs_attr(ha); 1753 1754 fc_remove_host(ha->host); 1755 1756 scsi_remove_host(ha->host); 1757 1758 qla2x00_free_device(ha); 1759 1760 scsi_host_put(ha->host); 1761 1762 pci_disable_device(pdev); 1763 pci_set_drvdata(pdev, NULL); 1764 } 1765 1766 static void 1767 qla2x00_free_device(scsi_qla_host_t *ha) 1768 { 1769 qla2x00_abort_all_cmds(ha, DID_NO_CONNECT << 16); 1770 1771 /* Disable timer */ 1772 if (ha->timer_active) 1773 qla2x00_stop_timer(ha); 1774 1775 ha->flags.online = 0; 1776 1777 /* Kill the kernel thread for this host */ 1778 if (ha->dpc_thread) { 1779 struct task_struct *t = ha->dpc_thread; 1780 1781 /* 1782 * qla2xxx_wake_dpc checks for ->dpc_thread 1783 * so we need to zero it out. 1784 */ 1785 ha->dpc_thread = NULL; 1786 kthread_stop(t); 1787 } 1788 1789 if (ha->flags.fce_enabled) 1790 qla2x00_disable_fce_trace(ha, NULL, NULL); 1791 1792 if (ha->eft) 1793 qla2x00_disable_eft_trace(ha); 1794 1795 /* Stop currently executing firmware. */ 1796 qla2x00_try_to_stop_firmware(ha); 1797 1798 /* turn-off interrupts on the card */ 1799 if (ha->interrupts_on) 1800 ha->isp_ops->disable_intrs(ha); 1801 1802 qla2x00_mem_free(ha); 1803 1804 qla2x00_free_irqs(ha); 1805 1806 /* release io space registers */ 1807 if (ha->iobase) 1808 iounmap(ha->iobase); 1809 pci_release_selected_regions(ha->pdev, ha->bars); 1810 } 1811 1812 static inline void 1813 qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport, 1814 int defer) 1815 { 1816 unsigned long flags; 1817 struct fc_rport *rport; 1818 1819 if (!fcport->rport) 1820 return; 1821 1822 rport = fcport->rport; 1823 if (defer) { 1824 spin_lock_irqsave(&fcport->rport_lock, flags); 1825 fcport->drport = rport; 1826 fcport->rport = NULL; 1827 *(fc_port_t **)rport->dd_data = NULL; 1828 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1829 set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 1830 } else { 1831 spin_lock_irqsave(&fcport->rport_lock, flags); 1832 fcport->rport = NULL; 1833 *(fc_port_t **)rport->dd_data = NULL; 1834 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1835 fc_remote_port_delete(rport); 1836 } 1837 } 1838 1839 /* 1840 * qla2x00_mark_device_lost Updates fcport state when device goes offline. 1841 * 1842 * Input: ha = adapter block pointer. fcport = port structure pointer. 1843 * 1844 * Return: None. 1845 * 1846 * Context: 1847 */ 1848 void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport, 1849 int do_login, int defer) 1850 { 1851 if (atomic_read(&fcport->state) == FCS_ONLINE && 1852 ha->vp_idx == fcport->vp_idx) 1853 qla2x00_schedule_rport_del(ha, fcport, defer); 1854 1855 /* 1856 * We may need to retry the login, so don't change the state of the 1857 * port but do the retries. 1858 */ 1859 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) 1860 atomic_set(&fcport->state, FCS_DEVICE_LOST); 1861 1862 if (!do_login) 1863 return; 1864 1865 if (fcport->login_retry == 0) { 1866 fcport->login_retry = ha->login_retry_count; 1867 set_bit(RELOGIN_NEEDED, &ha->dpc_flags); 1868 1869 DEBUG(printk("scsi(%ld): Port login retry: " 1870 "%02x%02x%02x%02x%02x%02x%02x%02x, " 1871 "id = 0x%04x retry cnt=%d\n", 1872 ha->host_no, 1873 fcport->port_name[0], 1874 fcport->port_name[1], 1875 fcport->port_name[2], 1876 fcport->port_name[3], 1877 fcport->port_name[4], 1878 fcport->port_name[5], 1879 fcport->port_name[6], 1880 fcport->port_name[7], 1881 fcport->loop_id, 1882 fcport->login_retry)); 1883 } 1884 } 1885 1886 /* 1887 * qla2x00_mark_all_devices_lost 1888 * Updates fcport state when device goes offline. 1889 * 1890 * Input: 1891 * ha = adapter block pointer. 1892 * fcport = port structure pointer. 1893 * 1894 * Return: 1895 * None. 1896 * 1897 * Context: 1898 */ 1899 void 1900 qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer) 1901 { 1902 fc_port_t *fcport; 1903 scsi_qla_host_t *pha = to_qla_parent(ha); 1904 1905 list_for_each_entry(fcport, &pha->fcports, list) { 1906 if (ha->vp_idx != 0 && ha->vp_idx != fcport->vp_idx) 1907 continue; 1908 /* 1909 * No point in marking the device as lost, if the device is 1910 * already DEAD. 1911 */ 1912 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 1913 continue; 1914 if (atomic_read(&fcport->state) == FCS_ONLINE) { 1915 if (defer) 1916 qla2x00_schedule_rport_del(ha, fcport, defer); 1917 else if (ha->vp_idx == fcport->vp_idx) 1918 qla2x00_schedule_rport_del(ha, fcport, defer); 1919 } 1920 atomic_set(&fcport->state, FCS_DEVICE_LOST); 1921 } 1922 1923 if (defer) 1924 qla2xxx_wake_dpc(ha); 1925 } 1926 1927 /* 1928 * qla2x00_mem_alloc 1929 * Allocates adapter memory. 1930 * 1931 * Returns: 1932 * 0 = success. 1933 * !0 = failure. 1934 */ 1935 static int 1936 qla2x00_mem_alloc(scsi_qla_host_t *ha) 1937 { 1938 char name[16]; 1939 1940 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev, 1941 (ha->request_q_length + 1) * sizeof(request_t), &ha->request_dma, 1942 GFP_KERNEL); 1943 if (!ha->request_ring) 1944 goto fail; 1945 1946 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev, 1947 (ha->response_q_length + 1) * sizeof(response_t), 1948 &ha->response_dma, GFP_KERNEL); 1949 if (!ha->response_ring) 1950 goto fail_free_request_ring; 1951 1952 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE, 1953 &ha->gid_list_dma, GFP_KERNEL); 1954 if (!ha->gid_list) 1955 goto fail_free_response_ring; 1956 1957 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 1958 &ha->init_cb_dma, GFP_KERNEL); 1959 if (!ha->init_cb) 1960 goto fail_free_gid_list; 1961 1962 snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME, 1963 ha->host_no); 1964 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 1965 DMA_POOL_SIZE, 8, 0); 1966 if (!ha->s_dma_pool) 1967 goto fail_free_init_cb; 1968 1969 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 1970 if (!ha->srb_mempool) 1971 goto fail_free_s_dma_pool; 1972 1973 /* Get memory for cached NVRAM */ 1974 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 1975 if (!ha->nvram) 1976 goto fail_free_srb_mempool; 1977 1978 /* Allocate memory for SNS commands */ 1979 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 1980 /* Get consistent memory allocated for SNS commands */ 1981 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 1982 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 1983 if (!ha->sns_cmd) 1984 goto fail_free_nvram; 1985 } else { 1986 /* Get consistent memory allocated for MS IOCB */ 1987 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 1988 &ha->ms_iocb_dma); 1989 if (!ha->ms_iocb) 1990 goto fail_free_nvram; 1991 1992 /* Get consistent memory allocated for CT SNS commands */ 1993 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 1994 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 1995 if (!ha->ct_sns) 1996 goto fail_free_ms_iocb; 1997 } 1998 1999 return 0; 2000 2001 fail_free_ms_iocb: 2002 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 2003 ha->ms_iocb = NULL; 2004 ha->ms_iocb_dma = 0; 2005 fail_free_nvram: 2006 kfree(ha->nvram); 2007 ha->nvram = NULL; 2008 fail_free_srb_mempool: 2009 mempool_destroy(ha->srb_mempool); 2010 ha->srb_mempool = NULL; 2011 fail_free_s_dma_pool: 2012 dma_pool_destroy(ha->s_dma_pool); 2013 ha->s_dma_pool = NULL; 2014 fail_free_init_cb: 2015 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, 2016 ha->init_cb_dma); 2017 ha->init_cb = NULL; 2018 ha->init_cb_dma = 0; 2019 fail_free_gid_list: 2020 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2021 ha->gid_list_dma); 2022 ha->gid_list = NULL; 2023 ha->gid_list_dma = 0; 2024 fail_free_response_ring: 2025 dma_free_coherent(&ha->pdev->dev, (ha->response_q_length + 1) * 2026 sizeof(response_t), ha->response_ring, ha->response_dma); 2027 ha->response_ring = NULL; 2028 ha->response_dma = 0; 2029 fail_free_request_ring: 2030 dma_free_coherent(&ha->pdev->dev, (ha->request_q_length + 1) * 2031 sizeof(request_t), ha->request_ring, ha->request_dma); 2032 ha->request_ring = NULL; 2033 ha->request_dma = 0; 2034 fail: 2035 return -ENOMEM; 2036 } 2037 2038 /* 2039 * qla2x00_mem_free 2040 * Frees all adapter allocated memory. 2041 * 2042 * Input: 2043 * ha = adapter block pointer. 2044 */ 2045 static void 2046 qla2x00_mem_free(scsi_qla_host_t *ha) 2047 { 2048 struct list_head *fcpl, *fcptemp; 2049 fc_port_t *fcport; 2050 2051 if (ha->srb_mempool) 2052 mempool_destroy(ha->srb_mempool); 2053 2054 if (ha->fce) 2055 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 2056 ha->fce_dma); 2057 2058 if (ha->fw_dump) { 2059 if (ha->eft) 2060 dma_free_coherent(&ha->pdev->dev, 2061 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); 2062 vfree(ha->fw_dump); 2063 } 2064 2065 if (ha->sns_cmd) 2066 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 2067 ha->sns_cmd, ha->sns_cmd_dma); 2068 2069 if (ha->ct_sns) 2070 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 2071 ha->ct_sns, ha->ct_sns_dma); 2072 2073 if (ha->sfp_data) 2074 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); 2075 2076 if (ha->ms_iocb) 2077 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 2078 2079 if (ha->s_dma_pool) 2080 dma_pool_destroy(ha->s_dma_pool); 2081 2082 if (ha->init_cb) 2083 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 2084 ha->init_cb, ha->init_cb_dma); 2085 2086 if (ha->gid_list) 2087 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2088 ha->gid_list_dma); 2089 2090 if (ha->response_ring) 2091 dma_free_coherent(&ha->pdev->dev, 2092 (ha->response_q_length + 1) * sizeof(response_t), 2093 ha->response_ring, ha->response_dma); 2094 2095 if (ha->request_ring) 2096 dma_free_coherent(&ha->pdev->dev, 2097 (ha->request_q_length + 1) * sizeof(request_t), 2098 ha->request_ring, ha->request_dma); 2099 2100 ha->srb_mempool = NULL; 2101 ha->eft = NULL; 2102 ha->eft_dma = 0; 2103 ha->sns_cmd = NULL; 2104 ha->sns_cmd_dma = 0; 2105 ha->ct_sns = NULL; 2106 ha->ct_sns_dma = 0; 2107 ha->ms_iocb = NULL; 2108 ha->ms_iocb_dma = 0; 2109 ha->init_cb = NULL; 2110 ha->init_cb_dma = 0; 2111 2112 ha->s_dma_pool = NULL; 2113 2114 ha->gid_list = NULL; 2115 ha->gid_list_dma = 0; 2116 2117 ha->response_ring = NULL; 2118 ha->response_dma = 0; 2119 ha->request_ring = NULL; 2120 ha->request_dma = 0; 2121 2122 list_for_each_safe(fcpl, fcptemp, &ha->fcports) { 2123 fcport = list_entry(fcpl, fc_port_t, list); 2124 2125 /* fc ports */ 2126 list_del_init(&fcport->list); 2127 kfree(fcport); 2128 } 2129 INIT_LIST_HEAD(&ha->fcports); 2130 2131 ha->fw_dump = NULL; 2132 ha->fw_dumped = 0; 2133 ha->fw_dump_reading = 0; 2134 2135 vfree(ha->optrom_buffer); 2136 kfree(ha->nvram); 2137 } 2138 2139 static struct qla_work_evt * 2140 qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type, 2141 int locked) 2142 { 2143 struct qla_work_evt *e; 2144 2145 e = kzalloc(sizeof(struct qla_work_evt), locked ? GFP_ATOMIC: 2146 GFP_KERNEL); 2147 if (!e) 2148 return NULL; 2149 2150 INIT_LIST_HEAD(&e->list); 2151 e->type = type; 2152 e->flags = QLA_EVT_FLAG_FREE; 2153 return e; 2154 } 2155 2156 static int 2157 qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked) 2158 { 2159 unsigned long flags; 2160 scsi_qla_host_t *pha = to_qla_parent(ha); 2161 2162 if (!locked) 2163 spin_lock_irqsave(&pha->hardware_lock, flags); 2164 list_add_tail(&e->list, &ha->work_list); 2165 qla2xxx_wake_dpc(ha); 2166 if (!locked) 2167 spin_unlock_irqrestore(&pha->hardware_lock, flags); 2168 return QLA_SUCCESS; 2169 } 2170 2171 int 2172 qla2x00_post_aen_work(struct scsi_qla_host *ha, enum fc_host_event_code code, 2173 u32 data) 2174 { 2175 struct qla_work_evt *e; 2176 2177 e = qla2x00_alloc_work(ha, QLA_EVT_AEN, 1); 2178 if (!e) 2179 return QLA_FUNCTION_FAILED; 2180 2181 e->u.aen.code = code; 2182 e->u.aen.data = data; 2183 return qla2x00_post_work(ha, e, 1); 2184 } 2185 2186 int 2187 qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1, 2188 uint16_t d2, uint16_t d3) 2189 { 2190 struct qla_work_evt *e; 2191 2192 e = qla2x00_alloc_work(ha, QLA_EVT_HWE_LOG, 1); 2193 if (!e) 2194 return QLA_FUNCTION_FAILED; 2195 2196 e->u.hwe.code = code; 2197 e->u.hwe.d1 = d1; 2198 e->u.hwe.d2 = d2; 2199 e->u.hwe.d3 = d3; 2200 return qla2x00_post_work(ha, e, 1); 2201 } 2202 2203 static void 2204 qla2x00_do_work(struct scsi_qla_host *ha) 2205 { 2206 struct qla_work_evt *e; 2207 scsi_qla_host_t *pha = to_qla_parent(ha); 2208 2209 spin_lock_irq(&pha->hardware_lock); 2210 while (!list_empty(&ha->work_list)) { 2211 e = list_entry(ha->work_list.next, struct qla_work_evt, list); 2212 list_del_init(&e->list); 2213 spin_unlock_irq(&pha->hardware_lock); 2214 2215 switch (e->type) { 2216 case QLA_EVT_AEN: 2217 fc_host_post_event(ha->host, fc_get_event_number(), 2218 e->u.aen.code, e->u.aen.data); 2219 break; 2220 case QLA_EVT_HWE_LOG: 2221 qla2xxx_hw_event_log(ha, e->u.hwe.code, e->u.hwe.d1, 2222 e->u.hwe.d2, e->u.hwe.d3); 2223 break; 2224 } 2225 if (e->flags & QLA_EVT_FLAG_FREE) 2226 kfree(e); 2227 spin_lock_irq(&pha->hardware_lock); 2228 } 2229 spin_unlock_irq(&pha->hardware_lock); 2230 } 2231 2232 /************************************************************************** 2233 * qla2x00_do_dpc 2234 * This kernel thread is a task that is schedule by the interrupt handler 2235 * to perform the background processing for interrupts. 2236 * 2237 * Notes: 2238 * This task always run in the context of a kernel thread. It 2239 * is kick-off by the driver's detect code and starts up 2240 * up one per adapter. It immediately goes to sleep and waits for 2241 * some fibre event. When either the interrupt handler or 2242 * the timer routine detects a event it will one of the task 2243 * bits then wake us up. 2244 **************************************************************************/ 2245 static int 2246 qla2x00_do_dpc(void *data) 2247 { 2248 int rval; 2249 scsi_qla_host_t *ha; 2250 fc_port_t *fcport; 2251 uint8_t status; 2252 uint16_t next_loopid; 2253 struct scsi_qla_host *vha; 2254 int i; 2255 2256 2257 ha = (scsi_qla_host_t *)data; 2258 2259 set_user_nice(current, -20); 2260 2261 while (!kthread_should_stop()) { 2262 DEBUG3(printk("qla2x00: DPC handler sleeping\n")); 2263 2264 set_current_state(TASK_INTERRUPTIBLE); 2265 schedule(); 2266 __set_current_state(TASK_RUNNING); 2267 2268 DEBUG3(printk("qla2x00: DPC handler waking up\n")); 2269 2270 /* Initialization not yet finished. Don't do anything yet. */ 2271 if (!ha->flags.init_done) 2272 continue; 2273 2274 DEBUG3(printk("scsi(%ld): DPC handler\n", ha->host_no)); 2275 2276 ha->dpc_active = 1; 2277 2278 if (ha->flags.mbox_busy) { 2279 ha->dpc_active = 0; 2280 continue; 2281 } 2282 2283 qla2x00_do_work(ha); 2284 2285 if (test_and_clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) { 2286 2287 DEBUG(printk("scsi(%ld): dpc: sched " 2288 "qla2x00_abort_isp ha = %p\n", 2289 ha->host_no, ha)); 2290 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 2291 &ha->dpc_flags))) { 2292 2293 if (qla2x00_abort_isp(ha)) { 2294 /* failed. retry later */ 2295 set_bit(ISP_ABORT_NEEDED, 2296 &ha->dpc_flags); 2297 } 2298 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 2299 } 2300 2301 for_each_mapped_vp_idx(ha, i) { 2302 list_for_each_entry(vha, &ha->vp_list, 2303 vp_list) { 2304 if (i == vha->vp_idx) { 2305 set_bit(ISP_ABORT_NEEDED, 2306 &vha->dpc_flags); 2307 break; 2308 } 2309 } 2310 } 2311 2312 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", 2313 ha->host_no)); 2314 } 2315 2316 if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) 2317 qla2x00_update_fcports(ha); 2318 2319 if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) && 2320 (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) { 2321 2322 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n", 2323 ha->host_no)); 2324 2325 qla2x00_rst_aen(ha); 2326 clear_bit(RESET_ACTIVE, &ha->dpc_flags); 2327 } 2328 2329 /* Retry each device up to login retry count */ 2330 if ((test_and_clear_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && 2331 !test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) && 2332 atomic_read(&ha->loop_state) != LOOP_DOWN) { 2333 2334 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 2335 ha->host_no)); 2336 2337 next_loopid = 0; 2338 list_for_each_entry(fcport, &ha->fcports, list) { 2339 /* 2340 * If the port is not ONLINE then try to login 2341 * to it if we haven't run out of retries. 2342 */ 2343 if (atomic_read(&fcport->state) != FCS_ONLINE && 2344 fcport->login_retry) { 2345 2346 if (fcport->flags & FCF_FABRIC_DEVICE) { 2347 if (fcport->flags & 2348 FCF_TAPE_PRESENT) 2349 ha->isp_ops->fabric_logout( 2350 ha, fcport->loop_id, 2351 fcport->d_id.b.domain, 2352 fcport->d_id.b.area, 2353 fcport->d_id.b.al_pa); 2354 status = qla2x00_fabric_login( 2355 ha, fcport, &next_loopid); 2356 } else 2357 status = 2358 qla2x00_local_device_login( 2359 ha, fcport); 2360 2361 fcport->login_retry--; 2362 if (status == QLA_SUCCESS) { 2363 fcport->old_loop_id = fcport->loop_id; 2364 2365 DEBUG(printk("scsi(%ld): port login OK: logged in ID 0x%x\n", 2366 ha->host_no, fcport->loop_id)); 2367 2368 qla2x00_update_fcport(ha, 2369 fcport); 2370 } else if (status == 1) { 2371 set_bit(RELOGIN_NEEDED, &ha->dpc_flags); 2372 /* retry the login again */ 2373 DEBUG(printk("scsi(%ld): Retrying %d login again loop_id 0x%x\n", 2374 ha->host_no, 2375 fcport->login_retry, fcport->loop_id)); 2376 } else { 2377 fcport->login_retry = 0; 2378 } 2379 if (fcport->login_retry == 0 && status != QLA_SUCCESS) 2380 fcport->loop_id = FC_NO_LOOP_ID; 2381 } 2382 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) 2383 break; 2384 } 2385 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 2386 ha->host_no)); 2387 } 2388 2389 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2390 2391 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n", 2392 ha->host_no)); 2393 2394 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 2395 &ha->dpc_flags))) { 2396 2397 rval = qla2x00_loop_resync(ha); 2398 2399 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); 2400 } 2401 2402 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n", 2403 ha->host_no)); 2404 } 2405 2406 if (!ha->interrupts_on) 2407 ha->isp_ops->enable_intrs(ha); 2408 2409 if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags)) 2410 ha->isp_ops->beacon_blink(ha); 2411 2412 qla2x00_do_dpc_all_vps(ha); 2413 2414 ha->dpc_active = 0; 2415 } /* End of while(1) */ 2416 2417 DEBUG(printk("scsi(%ld): DPC handler exiting\n", ha->host_no)); 2418 2419 /* 2420 * Make sure that nobody tries to wake us up again. 2421 */ 2422 ha->dpc_active = 0; 2423 2424 return 0; 2425 } 2426 2427 void 2428 qla2xxx_wake_dpc(scsi_qla_host_t *ha) 2429 { 2430 if (ha->dpc_thread) 2431 wake_up_process(ha->dpc_thread); 2432 } 2433 2434 /* 2435 * qla2x00_rst_aen 2436 * Processes asynchronous reset. 2437 * 2438 * Input: 2439 * ha = adapter block pointer. 2440 */ 2441 static void 2442 qla2x00_rst_aen(scsi_qla_host_t *ha) 2443 { 2444 if (ha->flags.online && !ha->flags.reset_active && 2445 !atomic_read(&ha->loop_down_timer) && 2446 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { 2447 do { 2448 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 2449 2450 /* 2451 * Issue marker command only when we are going to start 2452 * the I/O. 2453 */ 2454 ha->marker_needed = 1; 2455 } while (!atomic_read(&ha->loop_down_timer) && 2456 (test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags))); 2457 } 2458 } 2459 2460 static void 2461 qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp) 2462 { 2463 struct scsi_cmnd *cmd = sp->cmd; 2464 2465 if (sp->flags & SRB_DMA_VALID) { 2466 scsi_dma_unmap(cmd); 2467 sp->flags &= ~SRB_DMA_VALID; 2468 } 2469 CMD_SP(cmd) = NULL; 2470 } 2471 2472 void 2473 qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp) 2474 { 2475 struct scsi_cmnd *cmd = sp->cmd; 2476 2477 qla2x00_sp_free_dma(ha, sp); 2478 2479 mempool_free(sp, ha->srb_mempool); 2480 2481 cmd->scsi_done(cmd); 2482 } 2483 2484 /************************************************************************** 2485 * qla2x00_timer 2486 * 2487 * Description: 2488 * One second timer 2489 * 2490 * Context: Interrupt 2491 ***************************************************************************/ 2492 void 2493 qla2x00_timer(scsi_qla_host_t *ha) 2494 { 2495 unsigned long cpu_flags = 0; 2496 fc_port_t *fcport; 2497 int start_dpc = 0; 2498 int index; 2499 srb_t *sp; 2500 int t; 2501 scsi_qla_host_t *pha = to_qla_parent(ha); 2502 2503 /* 2504 * Ports - Port down timer. 2505 * 2506 * Whenever, a port is in the LOST state we start decrementing its port 2507 * down timer every second until it reaches zero. Once it reaches zero 2508 * the port it marked DEAD. 2509 */ 2510 t = 0; 2511 list_for_each_entry(fcport, &ha->fcports, list) { 2512 if (fcport->port_type != FCT_TARGET) 2513 continue; 2514 2515 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 2516 2517 if (atomic_read(&fcport->port_down_timer) == 0) 2518 continue; 2519 2520 if (atomic_dec_and_test(&fcport->port_down_timer) != 0) 2521 atomic_set(&fcport->state, FCS_DEVICE_DEAD); 2522 2523 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: " 2524 "%d remaining\n", 2525 ha->host_no, 2526 t, atomic_read(&fcport->port_down_timer))); 2527 } 2528 t++; 2529 } /* End of for fcport */ 2530 2531 2532 /* Loop down handler. */ 2533 if (atomic_read(&ha->loop_down_timer) > 0 && 2534 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) && ha->flags.online) { 2535 2536 if (atomic_read(&ha->loop_down_timer) == 2537 ha->loop_down_abort_time) { 2538 2539 DEBUG(printk("scsi(%ld): Loop Down - aborting the " 2540 "queues before time expire\n", 2541 ha->host_no)); 2542 2543 if (!IS_QLA2100(ha) && ha->link_down_timeout) 2544 atomic_set(&ha->loop_state, LOOP_DEAD); 2545 2546 /* Schedule an ISP abort to return any tape commands. */ 2547 /* NPIV - scan physical port only */ 2548 if (!ha->parent) { 2549 spin_lock_irqsave(&ha->hardware_lock, 2550 cpu_flags); 2551 for (index = 1; 2552 index < MAX_OUTSTANDING_COMMANDS; 2553 index++) { 2554 fc_port_t *sfcp; 2555 2556 sp = ha->outstanding_cmds[index]; 2557 if (!sp) 2558 continue; 2559 sfcp = sp->fcport; 2560 if (!(sfcp->flags & FCF_TAPE_PRESENT)) 2561 continue; 2562 2563 set_bit(ISP_ABORT_NEEDED, 2564 &ha->dpc_flags); 2565 break; 2566 } 2567 spin_unlock_irqrestore(&ha->hardware_lock, 2568 cpu_flags); 2569 } 2570 set_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags); 2571 start_dpc++; 2572 } 2573 2574 /* if the loop has been down for 4 minutes, reinit adapter */ 2575 if (atomic_dec_and_test(&ha->loop_down_timer) != 0) { 2576 DEBUG(printk("scsi(%ld): Loop down exceed 4 mins - " 2577 "restarting queues.\n", 2578 ha->host_no)); 2579 2580 set_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags); 2581 start_dpc++; 2582 2583 if (!(ha->device_flags & DFLG_NO_CABLE) && 2584 !ha->parent) { 2585 DEBUG(printk("scsi(%ld): Loop down - " 2586 "aborting ISP.\n", 2587 ha->host_no)); 2588 qla_printk(KERN_WARNING, ha, 2589 "Loop down - aborting ISP.\n"); 2590 2591 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 2592 } 2593 } 2594 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", 2595 ha->host_no, 2596 atomic_read(&ha->loop_down_timer))); 2597 } 2598 2599 /* Check if beacon LED needs to be blinked */ 2600 if (ha->beacon_blink_led == 1) { 2601 set_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags); 2602 start_dpc++; 2603 } 2604 2605 /* Process any deferred work. */ 2606 if (!list_empty(&ha->work_list)) 2607 start_dpc++; 2608 2609 /* Schedule the DPC routine if needed */ 2610 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2611 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || 2612 test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags) || 2613 start_dpc || 2614 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || 2615 test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) || 2616 test_bit(VP_DPC_NEEDED, &ha->dpc_flags) || 2617 test_bit(RELOGIN_NEEDED, &ha->dpc_flags))) 2618 qla2xxx_wake_dpc(pha); 2619 2620 qla2x00_restart_timer(ha, WATCH_INTERVAL); 2621 } 2622 2623 /* Firmware interface routines. */ 2624 2625 #define FW_BLOBS 6 2626 #define FW_ISP21XX 0 2627 #define FW_ISP22XX 1 2628 #define FW_ISP2300 2 2629 #define FW_ISP2322 3 2630 #define FW_ISP24XX 4 2631 #define FW_ISP25XX 5 2632 2633 #define FW_FILE_ISP21XX "ql2100_fw.bin" 2634 #define FW_FILE_ISP22XX "ql2200_fw.bin" 2635 #define FW_FILE_ISP2300 "ql2300_fw.bin" 2636 #define FW_FILE_ISP2322 "ql2322_fw.bin" 2637 #define FW_FILE_ISP24XX "ql2400_fw.bin" 2638 #define FW_FILE_ISP25XX "ql2500_fw.bin" 2639 2640 static DEFINE_MUTEX(qla_fw_lock); 2641 2642 static struct fw_blob qla_fw_blobs[FW_BLOBS] = { 2643 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, 2644 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, 2645 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, 2646 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 2647 { .name = FW_FILE_ISP24XX, }, 2648 { .name = FW_FILE_ISP25XX, }, 2649 }; 2650 2651 struct fw_blob * 2652 qla2x00_request_firmware(scsi_qla_host_t *ha) 2653 { 2654 struct fw_blob *blob; 2655 2656 blob = NULL; 2657 if (IS_QLA2100(ha)) { 2658 blob = &qla_fw_blobs[FW_ISP21XX]; 2659 } else if (IS_QLA2200(ha)) { 2660 blob = &qla_fw_blobs[FW_ISP22XX]; 2661 } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 2662 blob = &qla_fw_blobs[FW_ISP2300]; 2663 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 2664 blob = &qla_fw_blobs[FW_ISP2322]; 2665 } else if (IS_QLA24XX_TYPE(ha)) { 2666 blob = &qla_fw_blobs[FW_ISP24XX]; 2667 } else if (IS_QLA25XX(ha)) { 2668 blob = &qla_fw_blobs[FW_ISP25XX]; 2669 } 2670 2671 mutex_lock(&qla_fw_lock); 2672 if (blob->fw) 2673 goto out; 2674 2675 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 2676 DEBUG2(printk("scsi(%ld): Failed to load firmware image " 2677 "(%s).\n", ha->host_no, blob->name)); 2678 blob->fw = NULL; 2679 blob = NULL; 2680 goto out; 2681 } 2682 2683 out: 2684 mutex_unlock(&qla_fw_lock); 2685 return blob; 2686 } 2687 2688 static void 2689 qla2x00_release_firmware(void) 2690 { 2691 int idx; 2692 2693 mutex_lock(&qla_fw_lock); 2694 for (idx = 0; idx < FW_BLOBS; idx++) 2695 if (qla_fw_blobs[idx].fw) 2696 release_firmware(qla_fw_blobs[idx].fw); 2697 mutex_unlock(&qla_fw_lock); 2698 } 2699 2700 static pci_ers_result_t 2701 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 2702 { 2703 switch (state) { 2704 case pci_channel_io_normal: 2705 return PCI_ERS_RESULT_CAN_RECOVER; 2706 case pci_channel_io_frozen: 2707 pci_disable_device(pdev); 2708 return PCI_ERS_RESULT_NEED_RESET; 2709 case pci_channel_io_perm_failure: 2710 qla2x00_remove_one(pdev); 2711 return PCI_ERS_RESULT_DISCONNECT; 2712 } 2713 return PCI_ERS_RESULT_NEED_RESET; 2714 } 2715 2716 static pci_ers_result_t 2717 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) 2718 { 2719 int risc_paused = 0; 2720 uint32_t stat; 2721 unsigned long flags; 2722 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 2723 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2724 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 2725 2726 spin_lock_irqsave(&ha->hardware_lock, flags); 2727 if (IS_QLA2100(ha) || IS_QLA2200(ha)){ 2728 stat = RD_REG_DWORD(®->hccr); 2729 if (stat & HCCR_RISC_PAUSE) 2730 risc_paused = 1; 2731 } else if (IS_QLA23XX(ha)) { 2732 stat = RD_REG_DWORD(®->u.isp2300.host_status); 2733 if (stat & HSR_RISC_PAUSED) 2734 risc_paused = 1; 2735 } else if (IS_FWI2_CAPABLE(ha)) { 2736 stat = RD_REG_DWORD(®24->host_status); 2737 if (stat & HSRX_RISC_PAUSED) 2738 risc_paused = 1; 2739 } 2740 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2741 2742 if (risc_paused) { 2743 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, " 2744 "Dumping firmware!\n"); 2745 ha->isp_ops->fw_dump(ha, 0); 2746 2747 return PCI_ERS_RESULT_NEED_RESET; 2748 } else 2749 return PCI_ERS_RESULT_RECOVERED; 2750 } 2751 2752 static pci_ers_result_t 2753 qla2xxx_pci_slot_reset(struct pci_dev *pdev) 2754 { 2755 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 2756 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 2757 int rc; 2758 2759 if (ha->mem_only) 2760 rc = pci_enable_device_mem(pdev); 2761 else 2762 rc = pci_enable_device(pdev); 2763 2764 if (rc) { 2765 qla_printk(KERN_WARNING, ha, 2766 "Can't re-enable PCI device after reset.\n"); 2767 2768 return ret; 2769 } 2770 pci_set_master(pdev); 2771 2772 if (ha->isp_ops->pci_config(ha)) 2773 return ret; 2774 2775 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 2776 if (qla2x00_abort_isp(ha)== QLA_SUCCESS) 2777 ret = PCI_ERS_RESULT_RECOVERED; 2778 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 2779 2780 return ret; 2781 } 2782 2783 static void 2784 qla2xxx_pci_resume(struct pci_dev *pdev) 2785 { 2786 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 2787 int ret; 2788 2789 ret = qla2x00_wait_for_hba_online(ha); 2790 if (ret != QLA_SUCCESS) { 2791 qla_printk(KERN_ERR, ha, 2792 "the device failed to resume I/O " 2793 "from slot/link_reset"); 2794 } 2795 pci_cleanup_aer_uncorrect_error_status(pdev); 2796 } 2797 2798 static struct pci_error_handlers qla2xxx_err_handler = { 2799 .error_detected = qla2xxx_pci_error_detected, 2800 .mmio_enabled = qla2xxx_pci_mmio_enabled, 2801 .slot_reset = qla2xxx_pci_slot_reset, 2802 .resume = qla2xxx_pci_resume, 2803 }; 2804 2805 static struct pci_device_id qla2xxx_pci_tbl[] = { 2806 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) }, 2807 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) }, 2808 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) }, 2809 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) }, 2810 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) }, 2811 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) }, 2812 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, 2813 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, 2814 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, 2815 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) }, 2816 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 2817 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 2818 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, 2819 { 0 }, 2820 }; 2821 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 2822 2823 static struct pci_driver qla2xxx_pci_driver = { 2824 .name = QLA2XXX_DRIVER_NAME, 2825 .driver = { 2826 .owner = THIS_MODULE, 2827 }, 2828 .id_table = qla2xxx_pci_tbl, 2829 .probe = qla2x00_probe_one, 2830 .remove = qla2x00_remove_one, 2831 .err_handler = &qla2xxx_err_handler, 2832 }; 2833 2834 /** 2835 * qla2x00_module_init - Module initialization. 2836 **/ 2837 static int __init 2838 qla2x00_module_init(void) 2839 { 2840 int ret = 0; 2841 2842 /* Allocate cache for SRBs. */ 2843 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 2844 SLAB_HWCACHE_ALIGN, NULL); 2845 if (srb_cachep == NULL) { 2846 printk(KERN_ERR 2847 "qla2xxx: Unable to allocate SRB cache...Failing load!\n"); 2848 return -ENOMEM; 2849 } 2850 2851 /* Derive version string. */ 2852 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 2853 if (ql2xextended_error_logging) 2854 strcat(qla2x00_version_str, "-debug"); 2855 2856 qla2xxx_transport_template = 2857 fc_attach_transport(&qla2xxx_transport_functions); 2858 if (!qla2xxx_transport_template) { 2859 kmem_cache_destroy(srb_cachep); 2860 return -ENODEV; 2861 } 2862 qla2xxx_transport_vport_template = 2863 fc_attach_transport(&qla2xxx_transport_vport_functions); 2864 if (!qla2xxx_transport_vport_template) { 2865 kmem_cache_destroy(srb_cachep); 2866 fc_release_transport(qla2xxx_transport_template); 2867 return -ENODEV; 2868 } 2869 2870 printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n", 2871 qla2x00_version_str); 2872 ret = pci_register_driver(&qla2xxx_pci_driver); 2873 if (ret) { 2874 kmem_cache_destroy(srb_cachep); 2875 fc_release_transport(qla2xxx_transport_template); 2876 fc_release_transport(qla2xxx_transport_vport_template); 2877 } 2878 return ret; 2879 } 2880 2881 /** 2882 * qla2x00_module_exit - Module cleanup. 2883 **/ 2884 static void __exit 2885 qla2x00_module_exit(void) 2886 { 2887 pci_unregister_driver(&qla2xxx_pci_driver); 2888 qla2x00_release_firmware(); 2889 kmem_cache_destroy(srb_cachep); 2890 fc_release_transport(qla2xxx_transport_template); 2891 fc_release_transport(qla2xxx_transport_vport_template); 2892 } 2893 2894 module_init(qla2x00_module_init); 2895 module_exit(qla2x00_module_exit); 2896 2897 MODULE_AUTHOR("QLogic Corporation"); 2898 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); 2899 MODULE_LICENSE("GPL"); 2900 MODULE_VERSION(QLA2XXX_VERSION); 2901 MODULE_FIRMWARE(FW_FILE_ISP21XX); 2902 MODULE_FIRMWARE(FW_FILE_ISP22XX); 2903 MODULE_FIRMWARE(FW_FILE_ISP2300); 2904 MODULE_FIRMWARE(FW_FILE_ISP2322); 2905 MODULE_FIRMWARE(FW_FILE_ISP24XX); 2906 MODULE_FIRMWARE(FW_FILE_ISP25XX); 2907