1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/moduleparam.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mutex.h> 14 #include <linux/kobject.h> 15 #include <linux/slab.h> 16 #include <scsi/scsi_tcq.h> 17 #include <scsi/scsicam.h> 18 #include <scsi/scsi_transport.h> 19 #include <scsi/scsi_transport_fc.h> 20 21 #include "qla_target.h" 22 23 /* 24 * Driver version 25 */ 26 char qla2x00_version_str[40]; 27 28 static int apidev_major; 29 30 /* 31 * SRB allocation cache 32 */ 33 static struct kmem_cache *srb_cachep; 34 35 /* 36 * CT6 CTX allocation cache 37 */ 38 static struct kmem_cache *ctx_cachep; 39 /* 40 * error level for logging 41 */ 42 int ql_errlev = ql_log_all; 43 44 static int ql2xenableclass2; 45 module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); 46 MODULE_PARM_DESC(ql2xenableclass2, 47 "Specify if Class 2 operations are supported from the very " 48 "beginning. Default is 0 - class 2 not supported."); 49 50 51 int ql2xlogintimeout = 20; 52 module_param(ql2xlogintimeout, int, S_IRUGO); 53 MODULE_PARM_DESC(ql2xlogintimeout, 54 "Login timeout value in seconds."); 55 56 int qlport_down_retry; 57 module_param(qlport_down_retry, int, S_IRUGO); 58 MODULE_PARM_DESC(qlport_down_retry, 59 "Maximum number of command retries to a port that returns " 60 "a PORT-DOWN status."); 61 62 int ql2xplogiabsentdevice; 63 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); 64 MODULE_PARM_DESC(ql2xplogiabsentdevice, 65 "Option to enable PLOGI to devices that are not present after " 66 "a Fabric scan. This is needed for several broken switches. " 67 "Default is 0 - no PLOGI. 1 - perfom PLOGI."); 68 69 int ql2xloginretrycount = 0; 70 module_param(ql2xloginretrycount, int, S_IRUGO); 71 MODULE_PARM_DESC(ql2xloginretrycount, 72 "Specify an alternate value for the NVRAM login retry count."); 73 74 int ql2xallocfwdump = 1; 75 module_param(ql2xallocfwdump, int, S_IRUGO); 76 MODULE_PARM_DESC(ql2xallocfwdump, 77 "Option to enable allocation of memory for a firmware dump " 78 "during HBA initialization. Memory allocation requirements " 79 "vary by ISP type. Default is 1 - allocate memory."); 80 81 int ql2xextended_error_logging; 82 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 83 MODULE_PARM_DESC(ql2xextended_error_logging, 84 "Option to enable extended error logging,\n" 85 "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n" 86 "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n" 87 "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n" 88 "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n" 89 "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n" 90 "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n" 91 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" 92 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" 93 "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n" 94 "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n" 95 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" 96 "\t\t0x1e400000 - Preferred value for capturing essential " 97 "debug information (equivalent to old " 98 "ql2xextended_error_logging=1).\n" 99 "\t\tDo LOGICAL OR of the value to enable more than one level"); 100 101 int ql2xshiftctondsd = 6; 102 module_param(ql2xshiftctondsd, int, S_IRUGO); 103 MODULE_PARM_DESC(ql2xshiftctondsd, 104 "Set to control shifting of command type processing " 105 "based on total number of SG elements."); 106 107 int ql2xfdmienable=1; 108 module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR); 109 MODULE_PARM_DESC(ql2xfdmienable, 110 "Enables FDMI registrations. " 111 "0 - no FDMI. Default is 1 - perform FDMI."); 112 113 #define MAX_Q_DEPTH 32 114 static int ql2xmaxqdepth = MAX_Q_DEPTH; 115 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); 116 MODULE_PARM_DESC(ql2xmaxqdepth, 117 "Maximum queue depth to set for each LUN. " 118 "Default is 32."); 119 120 int ql2xenabledif = 2; 121 module_param(ql2xenabledif, int, S_IRUGO); 122 MODULE_PARM_DESC(ql2xenabledif, 123 " Enable T10-CRC-DIF:\n" 124 " Default is 2.\n" 125 " 0 -- No DIF Support\n" 126 " 1 -- Enable DIF for all types\n" 127 " 2 -- Enable DIF for all types, except Type 0.\n"); 128 129 int ql2xenablehba_err_chk = 2; 130 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); 131 MODULE_PARM_DESC(ql2xenablehba_err_chk, 132 " Enable T10-CRC-DIF Error isolation by HBA:\n" 133 " Default is 2.\n" 134 " 0 -- Error isolation disabled\n" 135 " 1 -- Error isolation enabled only for DIX Type 0\n" 136 " 2 -- Error isolation enabled for all Types\n"); 137 138 int ql2xiidmaenable=1; 139 module_param(ql2xiidmaenable, int, S_IRUGO); 140 MODULE_PARM_DESC(ql2xiidmaenable, 141 "Enables iIDMA settings " 142 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 143 144 int ql2xmaxqueues = 1; 145 module_param(ql2xmaxqueues, int, S_IRUGO); 146 MODULE_PARM_DESC(ql2xmaxqueues, 147 "Enables MQ settings " 148 "Default is 1 for single queue. Set it to number " 149 "of queues in MQ mode."); 150 151 int ql2xmultique_tag; 152 module_param(ql2xmultique_tag, int, S_IRUGO); 153 MODULE_PARM_DESC(ql2xmultique_tag, 154 "Enables CPU affinity settings for the driver " 155 "Default is 0 for no affinity of request and response IO. " 156 "Set it to 1 to turn on the cpu affinity."); 157 158 int ql2xfwloadbin; 159 module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 160 MODULE_PARM_DESC(ql2xfwloadbin, 161 "Option to specify location from which to load ISP firmware:.\n" 162 " 2 -- load firmware via the request_firmware() (hotplug).\n" 163 " interface.\n" 164 " 1 -- load firmware from flash.\n" 165 " 0 -- use default semantics.\n"); 166 167 int ql2xetsenable; 168 module_param(ql2xetsenable, int, S_IRUGO); 169 MODULE_PARM_DESC(ql2xetsenable, 170 "Enables firmware ETS burst." 171 "Default is 0 - skip ETS enablement."); 172 173 int ql2xdbwr = 1; 174 module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); 175 MODULE_PARM_DESC(ql2xdbwr, 176 "Option to specify scheme for request queue posting.\n" 177 " 0 -- Regular doorbell.\n" 178 " 1 -- CAMRAM doorbell (faster).\n"); 179 180 int ql2xtargetreset = 1; 181 module_param(ql2xtargetreset, int, S_IRUGO); 182 MODULE_PARM_DESC(ql2xtargetreset, 183 "Enable target reset." 184 "Default is 1 - use hw defaults."); 185 186 int ql2xgffidenable; 187 module_param(ql2xgffidenable, int, S_IRUGO); 188 MODULE_PARM_DESC(ql2xgffidenable, 189 "Enables GFF_ID checks of port type. " 190 "Default is 0 - Do not use GFF_ID information."); 191 192 int ql2xasynctmfenable; 193 module_param(ql2xasynctmfenable, int, S_IRUGO); 194 MODULE_PARM_DESC(ql2xasynctmfenable, 195 "Enables issue of TM IOCBs asynchronously via IOCB mechanism" 196 "Default is 0 - Issue TM IOCBs via mailbox mechanism."); 197 198 int ql2xdontresethba; 199 module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR); 200 MODULE_PARM_DESC(ql2xdontresethba, 201 "Option to specify reset behaviour.\n" 202 " 0 (Default) -- Reset on failure.\n" 203 " 1 -- Do not reset on failure.\n"); 204 205 uint64_t ql2xmaxlun = MAX_LUNS; 206 module_param(ql2xmaxlun, ullong, S_IRUGO); 207 MODULE_PARM_DESC(ql2xmaxlun, 208 "Defines the maximum LU number to register with the SCSI " 209 "midlayer. Default is 65535."); 210 211 int ql2xmdcapmask = 0x1F; 212 module_param(ql2xmdcapmask, int, S_IRUGO); 213 MODULE_PARM_DESC(ql2xmdcapmask, 214 "Set the Minidump driver capture mask level. " 215 "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); 216 217 int ql2xmdenable = 1; 218 module_param(ql2xmdenable, int, S_IRUGO); 219 MODULE_PARM_DESC(ql2xmdenable, 220 "Enable/disable MiniDump. " 221 "0 - MiniDump disabled. " 222 "1 (Default) - MiniDump enabled."); 223 224 int ql2xexlogins = 0; 225 module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR); 226 MODULE_PARM_DESC(ql2xexlogins, 227 "Number of extended Logins. " 228 "0 (Default)- Disabled."); 229 230 int ql2xexchoffld = 0; 231 module_param(ql2xexchoffld, uint, S_IRUGO|S_IWUSR); 232 MODULE_PARM_DESC(ql2xexchoffld, 233 "Number of exchanges to offload. " 234 "0 (Default)- Disabled."); 235 236 /* 237 * SCSI host template entry points 238 */ 239 static int qla2xxx_slave_configure(struct scsi_device * device); 240 static int qla2xxx_slave_alloc(struct scsi_device *); 241 static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time); 242 static void qla2xxx_scan_start(struct Scsi_Host *); 243 static void qla2xxx_slave_destroy(struct scsi_device *); 244 static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); 245 static int qla2xxx_eh_abort(struct scsi_cmnd *); 246 static int qla2xxx_eh_device_reset(struct scsi_cmnd *); 247 static int qla2xxx_eh_target_reset(struct scsi_cmnd *); 248 static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); 249 static int qla2xxx_eh_host_reset(struct scsi_cmnd *); 250 251 static void qla2x00_clear_drv_active(struct qla_hw_data *); 252 static void qla2x00_free_device(scsi_qla_host_t *); 253 static void qla83xx_disable_laser(scsi_qla_host_t *vha); 254 255 struct scsi_host_template qla2xxx_driver_template = { 256 .module = THIS_MODULE, 257 .name = QLA2XXX_DRIVER_NAME, 258 .queuecommand = qla2xxx_queuecommand, 259 260 .eh_abort_handler = qla2xxx_eh_abort, 261 .eh_device_reset_handler = qla2xxx_eh_device_reset, 262 .eh_target_reset_handler = qla2xxx_eh_target_reset, 263 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 264 .eh_host_reset_handler = qla2xxx_eh_host_reset, 265 266 .slave_configure = qla2xxx_slave_configure, 267 268 .slave_alloc = qla2xxx_slave_alloc, 269 .slave_destroy = qla2xxx_slave_destroy, 270 .scan_finished = qla2xxx_scan_finished, 271 .scan_start = qla2xxx_scan_start, 272 .change_queue_depth = scsi_change_queue_depth, 273 .this_id = -1, 274 .cmd_per_lun = 3, 275 .use_clustering = ENABLE_CLUSTERING, 276 .sg_tablesize = SG_ALL, 277 278 .max_sectors = 0xFFFF, 279 .shost_attrs = qla2x00_host_attrs, 280 281 .supported_mode = MODE_INITIATOR, 282 .track_queue_depth = 1, 283 }; 284 285 static struct scsi_transport_template *qla2xxx_transport_template = NULL; 286 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; 287 288 /* TODO Convert to inlines 289 * 290 * Timer routines 291 */ 292 293 __inline__ void 294 qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval) 295 { 296 init_timer(&vha->timer); 297 vha->timer.expires = jiffies + interval * HZ; 298 vha->timer.data = (unsigned long)vha; 299 vha->timer.function = (void (*)(unsigned long))func; 300 add_timer(&vha->timer); 301 vha->timer_active = 1; 302 } 303 304 static inline void 305 qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) 306 { 307 /* Currently used for 82XX only. */ 308 if (vha->device_flags & DFLG_DEV_FAILED) { 309 ql_dbg(ql_dbg_timer, vha, 0x600d, 310 "Device in a failed state, returning.\n"); 311 return; 312 } 313 314 mod_timer(&vha->timer, jiffies + interval * HZ); 315 } 316 317 static __inline__ void 318 qla2x00_stop_timer(scsi_qla_host_t *vha) 319 { 320 del_timer_sync(&vha->timer); 321 vha->timer_active = 0; 322 } 323 324 static int qla2x00_do_dpc(void *data); 325 326 static void qla2x00_rst_aen(scsi_qla_host_t *); 327 328 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, 329 struct req_que **, struct rsp_que **); 330 static void qla2x00_free_fw_dump(struct qla_hw_data *); 331 static void qla2x00_mem_free(struct qla_hw_data *); 332 333 /* -------------------------------------------------------------------------- */ 334 static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, 335 struct rsp_que *rsp) 336 { 337 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 338 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, 339 GFP_KERNEL); 340 if (!ha->req_q_map) { 341 ql_log(ql_log_fatal, vha, 0x003b, 342 "Unable to allocate memory for request queue ptrs.\n"); 343 goto fail_req_map; 344 } 345 346 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues, 347 GFP_KERNEL); 348 if (!ha->rsp_q_map) { 349 ql_log(ql_log_fatal, vha, 0x003c, 350 "Unable to allocate memory for response queue ptrs.\n"); 351 goto fail_rsp_map; 352 } 353 /* 354 * Make sure we record at least the request and response queue zero in 355 * case we need to free them if part of the probe fails. 356 */ 357 ha->rsp_q_map[0] = rsp; 358 ha->req_q_map[0] = req; 359 set_bit(0, ha->rsp_qid_map); 360 set_bit(0, ha->req_qid_map); 361 return 1; 362 363 fail_rsp_map: 364 kfree(ha->req_q_map); 365 ha->req_q_map = NULL; 366 fail_req_map: 367 return -ENOMEM; 368 } 369 370 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) 371 { 372 if (IS_QLAFX00(ha)) { 373 if (req && req->ring_fx00) 374 dma_free_coherent(&ha->pdev->dev, 375 (req->length_fx00 + 1) * sizeof(request_t), 376 req->ring_fx00, req->dma_fx00); 377 } else if (req && req->ring) 378 dma_free_coherent(&ha->pdev->dev, 379 (req->length + 1) * sizeof(request_t), 380 req->ring, req->dma); 381 382 if (req) 383 kfree(req->outstanding_cmds); 384 385 kfree(req); 386 req = NULL; 387 } 388 389 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) 390 { 391 if (IS_QLAFX00(ha)) { 392 if (rsp && rsp->ring) 393 dma_free_coherent(&ha->pdev->dev, 394 (rsp->length_fx00 + 1) * sizeof(request_t), 395 rsp->ring_fx00, rsp->dma_fx00); 396 } else if (rsp && rsp->ring) { 397 dma_free_coherent(&ha->pdev->dev, 398 (rsp->length + 1) * sizeof(response_t), 399 rsp->ring, rsp->dma); 400 } 401 kfree(rsp); 402 rsp = NULL; 403 } 404 405 static void qla2x00_free_queues(struct qla_hw_data *ha) 406 { 407 struct req_que *req; 408 struct rsp_que *rsp; 409 int cnt; 410 411 for (cnt = 0; cnt < ha->max_req_queues; cnt++) { 412 if (!test_bit(cnt, ha->req_qid_map)) 413 continue; 414 415 req = ha->req_q_map[cnt]; 416 qla2x00_free_req_que(ha, req); 417 } 418 kfree(ha->req_q_map); 419 ha->req_q_map = NULL; 420 421 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { 422 if (!test_bit(cnt, ha->rsp_qid_map)) 423 continue; 424 425 rsp = ha->rsp_q_map[cnt]; 426 qla2x00_free_rsp_que(ha, rsp); 427 } 428 kfree(ha->rsp_q_map); 429 ha->rsp_q_map = NULL; 430 } 431 432 static int qla25xx_setup_mode(struct scsi_qla_host *vha) 433 { 434 uint16_t options = 0; 435 int ques, req, ret; 436 struct qla_hw_data *ha = vha->hw; 437 438 if (!(ha->fw_attributes & BIT_6)) { 439 ql_log(ql_log_warn, vha, 0x00d8, 440 "Firmware is not multi-queue capable.\n"); 441 goto fail; 442 } 443 if (ql2xmultique_tag) { 444 /* create a request queue for IO */ 445 options |= BIT_7; 446 req = qla25xx_create_req_que(ha, options, 0, 0, -1, 447 QLA_DEFAULT_QUE_QOS); 448 if (!req) { 449 ql_log(ql_log_warn, vha, 0x00e0, 450 "Failed to create request queue.\n"); 451 goto fail; 452 } 453 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1); 454 vha->req = ha->req_q_map[req]; 455 options |= BIT_1; 456 for (ques = 1; ques < ha->max_rsp_queues; ques++) { 457 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req); 458 if (!ret) { 459 ql_log(ql_log_warn, vha, 0x00e8, 460 "Failed to create response queue.\n"); 461 goto fail2; 462 } 463 } 464 ha->flags.cpu_affinity_enabled = 1; 465 ql_dbg(ql_dbg_multiq, vha, 0xc007, 466 "CPU affinity mode enabled, " 467 "no. of response queues:%d no. of request queues:%d.\n", 468 ha->max_rsp_queues, ha->max_req_queues); 469 ql_dbg(ql_dbg_init, vha, 0x00e9, 470 "CPU affinity mode enabled, " 471 "no. of response queues:%d no. of request queues:%d.\n", 472 ha->max_rsp_queues, ha->max_req_queues); 473 } 474 return 0; 475 fail2: 476 qla25xx_delete_queues(vha); 477 destroy_workqueue(ha->wq); 478 ha->wq = NULL; 479 vha->req = ha->req_q_map[0]; 480 fail: 481 ha->mqenable = 0; 482 kfree(ha->req_q_map); 483 kfree(ha->rsp_q_map); 484 ha->max_req_queues = ha->max_rsp_queues = 1; 485 return 1; 486 } 487 488 static char * 489 qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str) 490 { 491 struct qla_hw_data *ha = vha->hw; 492 static char *pci_bus_modes[] = { 493 "33", "66", "100", "133", 494 }; 495 uint16_t pci_bus; 496 497 strcpy(str, "PCI"); 498 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; 499 if (pci_bus) { 500 strcat(str, "-X ("); 501 strcat(str, pci_bus_modes[pci_bus]); 502 } else { 503 pci_bus = (ha->pci_attr & BIT_8) >> 8; 504 strcat(str, " ("); 505 strcat(str, pci_bus_modes[pci_bus]); 506 } 507 strcat(str, " MHz)"); 508 509 return (str); 510 } 511 512 static char * 513 qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str) 514 { 515 static char *pci_bus_modes[] = { "33", "66", "100", "133", }; 516 struct qla_hw_data *ha = vha->hw; 517 uint32_t pci_bus; 518 519 if (pci_is_pcie(ha->pdev)) { 520 char lwstr[6]; 521 uint32_t lstat, lspeed, lwidth; 522 523 pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat); 524 lspeed = lstat & PCI_EXP_LNKCAP_SLS; 525 lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4; 526 527 strcpy(str, "PCIe ("); 528 switch (lspeed) { 529 case 1: 530 strcat(str, "2.5GT/s "); 531 break; 532 case 2: 533 strcat(str, "5.0GT/s "); 534 break; 535 case 3: 536 strcat(str, "8.0GT/s "); 537 break; 538 default: 539 strcat(str, "<unknown> "); 540 break; 541 } 542 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth); 543 strcat(str, lwstr); 544 545 return str; 546 } 547 548 strcpy(str, "PCI"); 549 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; 550 if (pci_bus == 0 || pci_bus == 8) { 551 strcat(str, " ("); 552 strcat(str, pci_bus_modes[pci_bus >> 3]); 553 } else { 554 strcat(str, "-X "); 555 if (pci_bus & BIT_2) 556 strcat(str, "Mode 2"); 557 else 558 strcat(str, "Mode 1"); 559 strcat(str, " ("); 560 strcat(str, pci_bus_modes[pci_bus & ~BIT_2]); 561 } 562 strcat(str, " MHz)"); 563 564 return str; 565 } 566 567 static char * 568 qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 569 { 570 char un_str[10]; 571 struct qla_hw_data *ha = vha->hw; 572 573 snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version, 574 ha->fw_minor_version, ha->fw_subminor_version); 575 576 if (ha->fw_attributes & BIT_9) { 577 strcat(str, "FLX"); 578 return (str); 579 } 580 581 switch (ha->fw_attributes & 0xFF) { 582 case 0x7: 583 strcat(str, "EF"); 584 break; 585 case 0x17: 586 strcat(str, "TP"); 587 break; 588 case 0x37: 589 strcat(str, "IP"); 590 break; 591 case 0x77: 592 strcat(str, "VI"); 593 break; 594 default: 595 sprintf(un_str, "(%x)", ha->fw_attributes); 596 strcat(str, un_str); 597 break; 598 } 599 if (ha->fw_attributes & 0x100) 600 strcat(str, "X"); 601 602 return (str); 603 } 604 605 static char * 606 qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 607 { 608 struct qla_hw_data *ha = vha->hw; 609 610 snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version, 611 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes); 612 return str; 613 } 614 615 void 616 qla2x00_sp_free_dma(void *vha, void *ptr) 617 { 618 srb_t *sp = (srb_t *)ptr; 619 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 620 struct qla_hw_data *ha = sp->fcport->vha->hw; 621 void *ctx = GET_CMD_CTX_SP(sp); 622 623 if (sp->flags & SRB_DMA_VALID) { 624 scsi_dma_unmap(cmd); 625 sp->flags &= ~SRB_DMA_VALID; 626 } 627 628 if (sp->flags & SRB_CRC_PROT_DMA_VALID) { 629 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 630 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 631 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 632 } 633 634 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 635 /* List assured to be having elements */ 636 qla2x00_clean_dsd_pool(ha, sp, NULL); 637 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 638 } 639 640 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 641 dma_pool_free(ha->dl_dma_pool, ctx, 642 ((struct crc_context *)ctx)->crc_ctx_dma); 643 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 644 } 645 646 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 647 struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx; 648 649 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 650 ctx1->fcp_cmnd_dma); 651 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 652 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 653 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 654 mempool_free(ctx1, ha->ctx_mempool); 655 ctx1 = NULL; 656 } 657 658 CMD_SP(cmd) = NULL; 659 qla2x00_rel_sp(sp->fcport->vha, sp); 660 } 661 662 static void 663 qla2x00_sp_compl(void *data, void *ptr, int res) 664 { 665 struct qla_hw_data *ha = (struct qla_hw_data *)data; 666 srb_t *sp = (srb_t *)ptr; 667 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 668 669 cmd->result = res; 670 671 if (atomic_read(&sp->ref_count) == 0) { 672 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015, 673 "SP reference-count to ZERO -- sp=%p cmd=%p.\n", 674 sp, GET_CMD_SP(sp)); 675 if (ql2xextended_error_logging & ql_dbg_io) 676 WARN_ON(atomic_read(&sp->ref_count) == 0); 677 return; 678 } 679 if (!atomic_dec_and_test(&sp->ref_count)) 680 return; 681 682 qla2x00_sp_free_dma(ha, sp); 683 cmd->scsi_done(cmd); 684 } 685 686 /* If we are SP1 here, we need to still take and release the host_lock as SP1 687 * does not have the changes necessary to avoid taking host->host_lock. 688 */ 689 static int 690 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 691 { 692 scsi_qla_host_t *vha = shost_priv(host); 693 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 694 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 695 struct qla_hw_data *ha = vha->hw; 696 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 697 srb_t *sp; 698 int rval; 699 700 if (ha->flags.eeh_busy) { 701 if (ha->flags.pci_channel_io_perm_failure) { 702 ql_dbg(ql_dbg_aer, vha, 0x9010, 703 "PCI Channel IO permanent failure, exiting " 704 "cmd=%p.\n", cmd); 705 cmd->result = DID_NO_CONNECT << 16; 706 } else { 707 ql_dbg(ql_dbg_aer, vha, 0x9011, 708 "EEH_Busy, Requeuing the cmd=%p.\n", cmd); 709 cmd->result = DID_REQUEUE << 16; 710 } 711 goto qc24_fail_command; 712 } 713 714 rval = fc_remote_port_chkready(rport); 715 if (rval) { 716 cmd->result = rval; 717 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003, 718 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 719 cmd, rval); 720 goto qc24_fail_command; 721 } 722 723 if (!vha->flags.difdix_supported && 724 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 725 ql_dbg(ql_dbg_io, vha, 0x3004, 726 "DIF Cap not reg, fail DIF capable cmd's:%p.\n", 727 cmd); 728 cmd->result = DID_NO_CONNECT << 16; 729 goto qc24_fail_command; 730 } 731 732 if (!fcport) { 733 cmd->result = DID_NO_CONNECT << 16; 734 goto qc24_fail_command; 735 } 736 737 if (atomic_read(&fcport->state) != FCS_ONLINE) { 738 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 739 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 740 ql_dbg(ql_dbg_io, vha, 0x3005, 741 "Returning DNC, fcport_state=%d loop_state=%d.\n", 742 atomic_read(&fcport->state), 743 atomic_read(&base_vha->loop_state)); 744 cmd->result = DID_NO_CONNECT << 16; 745 goto qc24_fail_command; 746 } 747 goto qc24_target_busy; 748 } 749 750 /* 751 * Return target busy if we've received a non-zero retry_delay_timer 752 * in a FCP_RSP. 753 */ 754 if (fcport->retry_delay_timestamp == 0) { 755 /* retry delay not set */ 756 } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 757 fcport->retry_delay_timestamp = 0; 758 else 759 goto qc24_target_busy; 760 761 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 762 if (!sp) 763 goto qc24_host_busy; 764 765 sp->u.scmd.cmd = cmd; 766 sp->type = SRB_SCSI_CMD; 767 atomic_set(&sp->ref_count, 1); 768 CMD_SP(cmd) = (void *)sp; 769 sp->free = qla2x00_sp_free_dma; 770 sp->done = qla2x00_sp_compl; 771 772 rval = ha->isp_ops->start_scsi(sp); 773 if (rval != QLA_SUCCESS) { 774 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, 775 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); 776 goto qc24_host_busy_free_sp; 777 } 778 779 return 0; 780 781 qc24_host_busy_free_sp: 782 qla2x00_sp_free_dma(ha, sp); 783 784 qc24_host_busy: 785 return SCSI_MLQUEUE_HOST_BUSY; 786 787 qc24_target_busy: 788 return SCSI_MLQUEUE_TARGET_BUSY; 789 790 qc24_fail_command: 791 cmd->scsi_done(cmd); 792 793 return 0; 794 } 795 796 /* 797 * qla2x00_eh_wait_on_command 798 * Waits for the command to be returned by the Firmware for some 799 * max time. 800 * 801 * Input: 802 * cmd = Scsi Command to wait on. 803 * 804 * Return: 805 * Not Found : 0 806 * Found : 1 807 */ 808 static int 809 qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) 810 { 811 #define ABORT_POLLING_PERIOD 1000 812 #define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD)) 813 unsigned long wait_iter = ABORT_WAIT_ITER; 814 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 815 struct qla_hw_data *ha = vha->hw; 816 int ret = QLA_SUCCESS; 817 818 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { 819 ql_dbg(ql_dbg_taskm, vha, 0x8005, 820 "Return:eh_wait.\n"); 821 return ret; 822 } 823 824 while (CMD_SP(cmd) && wait_iter--) { 825 msleep(ABORT_POLLING_PERIOD); 826 } 827 if (CMD_SP(cmd)) 828 ret = QLA_FUNCTION_FAILED; 829 830 return ret; 831 } 832 833 /* 834 * qla2x00_wait_for_hba_online 835 * Wait till the HBA is online after going through 836 * <= MAX_RETRIES_OF_ISP_ABORT or 837 * finally HBA is disabled ie marked offline 838 * 839 * Input: 840 * ha - pointer to host adapter structure 841 * 842 * Note: 843 * Does context switching-Release SPIN_LOCK 844 * (if any) before calling this routine. 845 * 846 * Return: 847 * Success (Adapter is online) : 0 848 * Failed (Adapter is offline/disabled) : 1 849 */ 850 int 851 qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) 852 { 853 int return_status; 854 unsigned long wait_online; 855 struct qla_hw_data *ha = vha->hw; 856 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 857 858 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 859 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 860 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 861 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 862 ha->dpc_active) && time_before(jiffies, wait_online)) { 863 864 msleep(1000); 865 } 866 if (base_vha->flags.online) 867 return_status = QLA_SUCCESS; 868 else 869 return_status = QLA_FUNCTION_FAILED; 870 871 return (return_status); 872 } 873 874 /* 875 * qla2x00_wait_for_hba_ready 876 * Wait till the HBA is ready before doing driver unload 877 * 878 * Input: 879 * ha - pointer to host adapter structure 880 * 881 * Note: 882 * Does context switching-Release SPIN_LOCK 883 * (if any) before calling this routine. 884 * 885 */ 886 static void 887 qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) 888 { 889 struct qla_hw_data *ha = vha->hw; 890 891 while (((qla2x00_reset_active(vha)) || ha->dpc_active || 892 ha->flags.mbox_busy) || 893 test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) || 894 test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) 895 msleep(1000); 896 } 897 898 int 899 qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) 900 { 901 int return_status; 902 unsigned long wait_reset; 903 struct qla_hw_data *ha = vha->hw; 904 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 905 906 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); 907 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 908 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 909 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 910 ha->dpc_active) && time_before(jiffies, wait_reset)) { 911 912 msleep(1000); 913 914 if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 915 ha->flags.chip_reset_done) 916 break; 917 } 918 if (ha->flags.chip_reset_done) 919 return_status = QLA_SUCCESS; 920 else 921 return_status = QLA_FUNCTION_FAILED; 922 923 return return_status; 924 } 925 926 static void 927 sp_get(struct srb *sp) 928 { 929 atomic_inc(&sp->ref_count); 930 } 931 932 /************************************************************************** 933 * qla2xxx_eh_abort 934 * 935 * Description: 936 * The abort function will abort the specified command. 937 * 938 * Input: 939 * cmd = Linux SCSI command packet to be aborted. 940 * 941 * Returns: 942 * Either SUCCESS or FAILED. 943 * 944 * Note: 945 * Only return FAILED if command not returned by firmware. 946 **************************************************************************/ 947 static int 948 qla2xxx_eh_abort(struct scsi_cmnd *cmd) 949 { 950 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 951 srb_t *sp; 952 int ret; 953 unsigned int id; 954 uint64_t lun; 955 unsigned long flags; 956 int rval, wait = 0; 957 struct qla_hw_data *ha = vha->hw; 958 959 if (!CMD_SP(cmd)) 960 return SUCCESS; 961 962 ret = fc_block_scsi_eh(cmd); 963 if (ret != 0) 964 return ret; 965 ret = SUCCESS; 966 967 id = cmd->device->id; 968 lun = cmd->device->lun; 969 970 spin_lock_irqsave(&ha->hardware_lock, flags); 971 sp = (srb_t *) CMD_SP(cmd); 972 if (!sp) { 973 spin_unlock_irqrestore(&ha->hardware_lock, flags); 974 return SUCCESS; 975 } 976 977 ql_dbg(ql_dbg_taskm, vha, 0x8002, 978 "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n", 979 vha->host_no, id, lun, sp, cmd, sp->handle); 980 981 /* Get a reference to the sp and drop the lock.*/ 982 sp_get(sp); 983 984 spin_unlock_irqrestore(&ha->hardware_lock, flags); 985 rval = ha->isp_ops->abort_command(sp); 986 if (rval) { 987 if (rval == QLA_FUNCTION_PARAMETER_ERROR) 988 ret = SUCCESS; 989 else 990 ret = FAILED; 991 992 ql_dbg(ql_dbg_taskm, vha, 0x8003, 993 "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval); 994 } else { 995 ql_dbg(ql_dbg_taskm, vha, 0x8004, 996 "Abort command mbx success cmd=%p.\n", cmd); 997 wait = 1; 998 } 999 1000 spin_lock_irqsave(&ha->hardware_lock, flags); 1001 sp->done(ha, sp, 0); 1002 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1003 1004 /* Did the command return during mailbox execution? */ 1005 if (ret == FAILED && !CMD_SP(cmd)) 1006 ret = SUCCESS; 1007 1008 /* Wait for the command to be returned. */ 1009 if (wait) { 1010 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { 1011 ql_log(ql_log_warn, vha, 0x8006, 1012 "Abort handler timed out cmd=%p.\n", cmd); 1013 ret = FAILED; 1014 } 1015 } 1016 1017 ql_log(ql_log_info, vha, 0x801c, 1018 "Abort command issued nexus=%ld:%d:%llu -- %d %x.\n", 1019 vha->host_no, id, lun, wait, ret); 1020 1021 return ret; 1022 } 1023 1024 int 1025 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, 1026 uint64_t l, enum nexus_wait_type type) 1027 { 1028 int cnt, match, status; 1029 unsigned long flags; 1030 struct qla_hw_data *ha = vha->hw; 1031 struct req_que *req; 1032 srb_t *sp; 1033 struct scsi_cmnd *cmd; 1034 1035 status = QLA_SUCCESS; 1036 1037 spin_lock_irqsave(&ha->hardware_lock, flags); 1038 req = vha->req; 1039 for (cnt = 1; status == QLA_SUCCESS && 1040 cnt < req->num_outstanding_cmds; cnt++) { 1041 sp = req->outstanding_cmds[cnt]; 1042 if (!sp) 1043 continue; 1044 if (sp->type != SRB_SCSI_CMD) 1045 continue; 1046 if (vha->vp_idx != sp->fcport->vha->vp_idx) 1047 continue; 1048 match = 0; 1049 cmd = GET_CMD_SP(sp); 1050 switch (type) { 1051 case WAIT_HOST: 1052 match = 1; 1053 break; 1054 case WAIT_TARGET: 1055 match = cmd->device->id == t; 1056 break; 1057 case WAIT_LUN: 1058 match = (cmd->device->id == t && 1059 cmd->device->lun == l); 1060 break; 1061 } 1062 if (!match) 1063 continue; 1064 1065 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1066 status = qla2x00_eh_wait_on_command(cmd); 1067 spin_lock_irqsave(&ha->hardware_lock, flags); 1068 } 1069 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1070 1071 return status; 1072 } 1073 1074 static char *reset_errors[] = { 1075 "HBA not online", 1076 "HBA not ready", 1077 "Task management failed", 1078 "Waiting for command completions", 1079 }; 1080 1081 static int 1082 __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 1083 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, uint64_t, int)) 1084 { 1085 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1086 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1087 int err; 1088 1089 if (!fcport) { 1090 return FAILED; 1091 } 1092 1093 err = fc_block_scsi_eh(cmd); 1094 if (err != 0) 1095 return err; 1096 1097 ql_log(ql_log_info, vha, 0x8009, 1098 "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no, 1099 cmd->device->id, cmd->device->lun, cmd); 1100 1101 err = 0; 1102 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1103 ql_log(ql_log_warn, vha, 0x800a, 1104 "Wait for hba online failed for cmd=%p.\n", cmd); 1105 goto eh_reset_failed; 1106 } 1107 err = 2; 1108 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) 1109 != QLA_SUCCESS) { 1110 ql_log(ql_log_warn, vha, 0x800c, 1111 "do_reset failed for cmd=%p.\n", cmd); 1112 goto eh_reset_failed; 1113 } 1114 err = 3; 1115 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 1116 cmd->device->lun, type) != QLA_SUCCESS) { 1117 ql_log(ql_log_warn, vha, 0x800d, 1118 "wait for pending cmds failed for cmd=%p.\n", cmd); 1119 goto eh_reset_failed; 1120 } 1121 1122 ql_log(ql_log_info, vha, 0x800e, 1123 "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name, 1124 vha->host_no, cmd->device->id, cmd->device->lun, cmd); 1125 1126 return SUCCESS; 1127 1128 eh_reset_failed: 1129 ql_log(ql_log_info, vha, 0x800f, 1130 "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name, 1131 reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun, 1132 cmd); 1133 return FAILED; 1134 } 1135 1136 static int 1137 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 1138 { 1139 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1140 struct qla_hw_data *ha = vha->hw; 1141 1142 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd, 1143 ha->isp_ops->lun_reset); 1144 } 1145 1146 static int 1147 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) 1148 { 1149 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1150 struct qla_hw_data *ha = vha->hw; 1151 1152 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd, 1153 ha->isp_ops->target_reset); 1154 } 1155 1156 /************************************************************************** 1157 * qla2xxx_eh_bus_reset 1158 * 1159 * Description: 1160 * The bus reset function will reset the bus and abort any executing 1161 * commands. 1162 * 1163 * Input: 1164 * cmd = Linux SCSI command packet of the command that cause the 1165 * bus reset. 1166 * 1167 * Returns: 1168 * SUCCESS/FAILURE (defined as macro in scsi.h). 1169 * 1170 **************************************************************************/ 1171 static int 1172 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 1173 { 1174 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1175 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1176 int ret = FAILED; 1177 unsigned int id; 1178 uint64_t lun; 1179 1180 id = cmd->device->id; 1181 lun = cmd->device->lun; 1182 1183 if (!fcport) { 1184 return ret; 1185 } 1186 1187 ret = fc_block_scsi_eh(cmd); 1188 if (ret != 0) 1189 return ret; 1190 ret = FAILED; 1191 1192 ql_log(ql_log_info, vha, 0x8012, 1193 "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1194 1195 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1196 ql_log(ql_log_fatal, vha, 0x8013, 1197 "Wait for hba online failed board disabled.\n"); 1198 goto eh_bus_reset_done; 1199 } 1200 1201 if (qla2x00_loop_reset(vha) == QLA_SUCCESS) 1202 ret = SUCCESS; 1203 1204 if (ret == FAILED) 1205 goto eh_bus_reset_done; 1206 1207 /* Flush outstanding commands. */ 1208 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != 1209 QLA_SUCCESS) { 1210 ql_log(ql_log_warn, vha, 0x8014, 1211 "Wait for pending commands failed.\n"); 1212 ret = FAILED; 1213 } 1214 1215 eh_bus_reset_done: 1216 ql_log(ql_log_warn, vha, 0x802b, 1217 "BUS RESET %s nexus=%ld:%d:%llu.\n", 1218 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1219 1220 return ret; 1221 } 1222 1223 /************************************************************************** 1224 * qla2xxx_eh_host_reset 1225 * 1226 * Description: 1227 * The reset function will reset the Adapter. 1228 * 1229 * Input: 1230 * cmd = Linux SCSI command packet of the command that cause the 1231 * adapter reset. 1232 * 1233 * Returns: 1234 * Either SUCCESS or FAILED. 1235 * 1236 * Note: 1237 **************************************************************************/ 1238 static int 1239 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 1240 { 1241 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1242 struct qla_hw_data *ha = vha->hw; 1243 int ret = FAILED; 1244 unsigned int id; 1245 uint64_t lun; 1246 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1247 1248 id = cmd->device->id; 1249 lun = cmd->device->lun; 1250 1251 ql_log(ql_log_info, vha, 0x8018, 1252 "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1253 1254 /* 1255 * No point in issuing another reset if one is active. Also do not 1256 * attempt a reset if we are updating flash. 1257 */ 1258 if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING) 1259 goto eh_host_reset_lock; 1260 1261 if (vha != base_vha) { 1262 if (qla2x00_vp_abort_isp(vha)) 1263 goto eh_host_reset_lock; 1264 } else { 1265 if (IS_P3P_TYPE(vha->hw)) { 1266 if (!qla82xx_fcoe_ctx_reset(vha)) { 1267 /* Ctx reset success */ 1268 ret = SUCCESS; 1269 goto eh_host_reset_lock; 1270 } 1271 /* fall thru if ctx reset failed */ 1272 } 1273 if (ha->wq) 1274 flush_workqueue(ha->wq); 1275 1276 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1277 if (ha->isp_ops->abort_isp(base_vha)) { 1278 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1279 /* failed. schedule dpc to try */ 1280 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 1281 1282 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1283 ql_log(ql_log_warn, vha, 0x802a, 1284 "wait for hba online failed.\n"); 1285 goto eh_host_reset_lock; 1286 } 1287 } 1288 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1289 } 1290 1291 /* Waiting for command to be returned to OS.*/ 1292 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) == 1293 QLA_SUCCESS) 1294 ret = SUCCESS; 1295 1296 eh_host_reset_lock: 1297 ql_log(ql_log_info, vha, 0x8017, 1298 "ADAPTER RESET %s nexus=%ld:%d:%llu.\n", 1299 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1300 1301 return ret; 1302 } 1303 1304 /* 1305 * qla2x00_loop_reset 1306 * Issue loop reset. 1307 * 1308 * Input: 1309 * ha = adapter block pointer. 1310 * 1311 * Returns: 1312 * 0 = success 1313 */ 1314 int 1315 qla2x00_loop_reset(scsi_qla_host_t *vha) 1316 { 1317 int ret; 1318 struct fc_port *fcport; 1319 struct qla_hw_data *ha = vha->hw; 1320 1321 if (IS_QLAFX00(ha)) { 1322 return qlafx00_loop_reset(vha); 1323 } 1324 1325 if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) { 1326 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1327 if (fcport->port_type != FCT_TARGET) 1328 continue; 1329 1330 ret = ha->isp_ops->target_reset(fcport, 0, 0); 1331 if (ret != QLA_SUCCESS) { 1332 ql_dbg(ql_dbg_taskm, vha, 0x802c, 1333 "Bus Reset failed: Reset=%d " 1334 "d_id=%x.\n", ret, fcport->d_id.b24); 1335 } 1336 } 1337 } 1338 1339 1340 if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { 1341 atomic_set(&vha->loop_state, LOOP_DOWN); 1342 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1343 qla2x00_mark_all_devices_lost(vha, 0); 1344 ret = qla2x00_full_login_lip(vha); 1345 if (ret != QLA_SUCCESS) { 1346 ql_dbg(ql_dbg_taskm, vha, 0x802d, 1347 "full_login_lip=%d.\n", ret); 1348 } 1349 } 1350 1351 if (ha->flags.enable_lip_reset) { 1352 ret = qla2x00_lip_reset(vha); 1353 if (ret != QLA_SUCCESS) 1354 ql_dbg(ql_dbg_taskm, vha, 0x802e, 1355 "lip_reset failed (%d).\n", ret); 1356 } 1357 1358 /* Issue marker command only when we are going to start the I/O */ 1359 vha->marker_needed = 1; 1360 1361 return QLA_SUCCESS; 1362 } 1363 1364 void 1365 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) 1366 { 1367 int que, cnt; 1368 unsigned long flags; 1369 srb_t *sp; 1370 struct qla_hw_data *ha = vha->hw; 1371 struct req_que *req; 1372 1373 qlt_host_reset_handler(ha); 1374 1375 spin_lock_irqsave(&ha->hardware_lock, flags); 1376 for (que = 0; que < ha->max_req_queues; que++) { 1377 req = ha->req_q_map[que]; 1378 if (!req) 1379 continue; 1380 if (!req->outstanding_cmds) 1381 continue; 1382 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1383 sp = req->outstanding_cmds[cnt]; 1384 if (sp) { 1385 req->outstanding_cmds[cnt] = NULL; 1386 sp->done(vha, sp, res); 1387 } 1388 } 1389 } 1390 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1391 } 1392 1393 static int 1394 qla2xxx_slave_alloc(struct scsi_device *sdev) 1395 { 1396 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1397 1398 if (!rport || fc_remote_port_chkready(rport)) 1399 return -ENXIO; 1400 1401 sdev->hostdata = *(fc_port_t **)rport->dd_data; 1402 1403 return 0; 1404 } 1405 1406 static int 1407 qla2xxx_slave_configure(struct scsi_device *sdev) 1408 { 1409 scsi_qla_host_t *vha = shost_priv(sdev->host); 1410 struct req_que *req = vha->req; 1411 1412 if (IS_T10_PI_CAPABLE(vha->hw)) 1413 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1414 1415 scsi_change_queue_depth(sdev, req->max_q_depth); 1416 return 0; 1417 } 1418 1419 static void 1420 qla2xxx_slave_destroy(struct scsi_device *sdev) 1421 { 1422 sdev->hostdata = NULL; 1423 } 1424 1425 /** 1426 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1427 * @ha: HA context 1428 * 1429 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 1430 * supported addressing method. 1431 */ 1432 static void 1433 qla2x00_config_dma_addressing(struct qla_hw_data *ha) 1434 { 1435 /* Assume a 32bit DMA mask. */ 1436 ha->flags.enable_64bit_addressing = 0; 1437 1438 if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 1439 /* Any upper-dword bits set? */ 1440 if (MSD(dma_get_required_mask(&ha->pdev->dev)) && 1441 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { 1442 /* Ok, a 64bit DMA mask is applicable. */ 1443 ha->flags.enable_64bit_addressing = 1; 1444 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; 1445 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; 1446 return; 1447 } 1448 } 1449 1450 dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); 1451 pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32)); 1452 } 1453 1454 static void 1455 qla2x00_enable_intrs(struct qla_hw_data *ha) 1456 { 1457 unsigned long flags = 0; 1458 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1459 1460 spin_lock_irqsave(&ha->hardware_lock, flags); 1461 ha->interrupts_on = 1; 1462 /* enable risc and host interrupts */ 1463 WRT_REG_WORD(®->ictrl, ICR_EN_INT | ICR_EN_RISC); 1464 RD_REG_WORD(®->ictrl); 1465 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1466 1467 } 1468 1469 static void 1470 qla2x00_disable_intrs(struct qla_hw_data *ha) 1471 { 1472 unsigned long flags = 0; 1473 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1474 1475 spin_lock_irqsave(&ha->hardware_lock, flags); 1476 ha->interrupts_on = 0; 1477 /* disable risc and host interrupts */ 1478 WRT_REG_WORD(®->ictrl, 0); 1479 RD_REG_WORD(®->ictrl); 1480 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1481 } 1482 1483 static void 1484 qla24xx_enable_intrs(struct qla_hw_data *ha) 1485 { 1486 unsigned long flags = 0; 1487 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1488 1489 spin_lock_irqsave(&ha->hardware_lock, flags); 1490 ha->interrupts_on = 1; 1491 WRT_REG_DWORD(®->ictrl, ICRX_EN_RISC_INT); 1492 RD_REG_DWORD(®->ictrl); 1493 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1494 } 1495 1496 static void 1497 qla24xx_disable_intrs(struct qla_hw_data *ha) 1498 { 1499 unsigned long flags = 0; 1500 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1501 1502 if (IS_NOPOLLING_TYPE(ha)) 1503 return; 1504 spin_lock_irqsave(&ha->hardware_lock, flags); 1505 ha->interrupts_on = 0; 1506 WRT_REG_DWORD(®->ictrl, 0); 1507 RD_REG_DWORD(®->ictrl); 1508 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1509 } 1510 1511 static int 1512 qla2x00_iospace_config(struct qla_hw_data *ha) 1513 { 1514 resource_size_t pio; 1515 uint16_t msix; 1516 int cpus; 1517 1518 if (pci_request_selected_regions(ha->pdev, ha->bars, 1519 QLA2XXX_DRIVER_NAME)) { 1520 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011, 1521 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 1522 pci_name(ha->pdev)); 1523 goto iospace_error_exit; 1524 } 1525 if (!(ha->bars & 1)) 1526 goto skip_pio; 1527 1528 /* We only need PIO for Flash operations on ISP2312 v2 chips. */ 1529 pio = pci_resource_start(ha->pdev, 0); 1530 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 1531 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1532 ql_log_pci(ql_log_warn, ha->pdev, 0x0012, 1533 "Invalid pci I/O region size (%s).\n", 1534 pci_name(ha->pdev)); 1535 pio = 0; 1536 } 1537 } else { 1538 ql_log_pci(ql_log_warn, ha->pdev, 0x0013, 1539 "Region #0 no a PIO resource (%s).\n", 1540 pci_name(ha->pdev)); 1541 pio = 0; 1542 } 1543 ha->pio_address = pio; 1544 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014, 1545 "PIO address=%llu.\n", 1546 (unsigned long long)ha->pio_address); 1547 1548 skip_pio: 1549 /* Use MMIO operations for all accesses. */ 1550 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 1551 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015, 1552 "Region #1 not an MMIO resource (%s), aborting.\n", 1553 pci_name(ha->pdev)); 1554 goto iospace_error_exit; 1555 } 1556 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 1557 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016, 1558 "Invalid PCI mem region size (%s), aborting.\n", 1559 pci_name(ha->pdev)); 1560 goto iospace_error_exit; 1561 } 1562 1563 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 1564 if (!ha->iobase) { 1565 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017, 1566 "Cannot remap MMIO (%s), aborting.\n", 1567 pci_name(ha->pdev)); 1568 goto iospace_error_exit; 1569 } 1570 1571 /* Determine queue resources */ 1572 ha->max_req_queues = ha->max_rsp_queues = 1; 1573 if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) || 1574 (ql2xmaxqueues > 1 && ql2xmultique_tag) || 1575 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 1576 goto mqiobase_exit; 1577 1578 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1579 pci_resource_len(ha->pdev, 3)); 1580 if (ha->mqiobase) { 1581 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018, 1582 "MQIO Base=%p.\n", ha->mqiobase); 1583 /* Read MSIX vector size of the board */ 1584 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 1585 ha->msix_count = msix; 1586 /* Max queues are bounded by available msix vectors */ 1587 /* queue 0 uses two msix vectors */ 1588 if (ql2xmultique_tag) { 1589 cpus = num_online_cpus(); 1590 ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ? 1591 (cpus + 1) : (ha->msix_count - 1); 1592 ha->max_req_queues = 2; 1593 } else if (ql2xmaxqueues > 1) { 1594 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? 1595 QLA_MQ_SIZE : ql2xmaxqueues; 1596 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008, 1597 "QoS mode set, max no of request queues:%d.\n", 1598 ha->max_req_queues); 1599 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019, 1600 "QoS mode set, max no of request queues:%d.\n", 1601 ha->max_req_queues); 1602 } 1603 ql_log_pci(ql_log_info, ha->pdev, 0x001a, 1604 "MSI-X vector count: %d.\n", msix); 1605 } else 1606 ql_log_pci(ql_log_info, ha->pdev, 0x001b, 1607 "BAR 3 not enabled.\n"); 1608 1609 mqiobase_exit: 1610 ha->msix_count = ha->max_rsp_queues + 1; 1611 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c, 1612 "MSIX Count:%d.\n", ha->msix_count); 1613 return (0); 1614 1615 iospace_error_exit: 1616 return (-ENOMEM); 1617 } 1618 1619 1620 static int 1621 qla83xx_iospace_config(struct qla_hw_data *ha) 1622 { 1623 uint16_t msix; 1624 int cpus; 1625 1626 if (pci_request_selected_regions(ha->pdev, ha->bars, 1627 QLA2XXX_DRIVER_NAME)) { 1628 ql_log_pci(ql_log_fatal, ha->pdev, 0x0117, 1629 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 1630 pci_name(ha->pdev)); 1631 1632 goto iospace_error_exit; 1633 } 1634 1635 /* Use MMIO operations for all accesses. */ 1636 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 1637 ql_log_pci(ql_log_warn, ha->pdev, 0x0118, 1638 "Invalid pci I/O region size (%s).\n", 1639 pci_name(ha->pdev)); 1640 goto iospace_error_exit; 1641 } 1642 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1643 ql_log_pci(ql_log_warn, ha->pdev, 0x0119, 1644 "Invalid PCI mem region size (%s), aborting\n", 1645 pci_name(ha->pdev)); 1646 goto iospace_error_exit; 1647 } 1648 1649 ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN); 1650 if (!ha->iobase) { 1651 ql_log_pci(ql_log_fatal, ha->pdev, 0x011a, 1652 "Cannot remap MMIO (%s), aborting.\n", 1653 pci_name(ha->pdev)); 1654 goto iospace_error_exit; 1655 } 1656 1657 /* 64bit PCI BAR - BAR2 will correspoond to region 4 */ 1658 /* 83XX 26XX always use MQ type access for queues 1659 * - mbar 2, a.k.a region 4 */ 1660 ha->max_req_queues = ha->max_rsp_queues = 1; 1661 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4), 1662 pci_resource_len(ha->pdev, 4)); 1663 1664 if (!ha->mqiobase) { 1665 ql_log_pci(ql_log_fatal, ha->pdev, 0x011d, 1666 "BAR2/region4 not enabled\n"); 1667 goto mqiobase_exit; 1668 } 1669 1670 ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2), 1671 pci_resource_len(ha->pdev, 2)); 1672 if (ha->msixbase) { 1673 /* Read MSIX vector size of the board */ 1674 pci_read_config_word(ha->pdev, 1675 QLA_83XX_PCI_MSIX_CONTROL, &msix); 1676 ha->msix_count = msix; 1677 /* Max queues are bounded by available msix vectors */ 1678 /* queue 0 uses two msix vectors */ 1679 if (ql2xmultique_tag) { 1680 cpus = num_online_cpus(); 1681 ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ? 1682 (cpus + 1) : (ha->msix_count - 1); 1683 ha->max_req_queues = 2; 1684 } else if (ql2xmaxqueues > 1) { 1685 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? 1686 QLA_MQ_SIZE : ql2xmaxqueues; 1687 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c, 1688 "QoS mode set, max no of request queues:%d.\n", 1689 ha->max_req_queues); 1690 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, 1691 "QoS mode set, max no of request queues:%d.\n", 1692 ha->max_req_queues); 1693 } 1694 ql_log_pci(ql_log_info, ha->pdev, 0x011c, 1695 "MSI-X vector count: %d.\n", msix); 1696 } else 1697 ql_log_pci(ql_log_info, ha->pdev, 0x011e, 1698 "BAR 1 not enabled.\n"); 1699 1700 mqiobase_exit: 1701 ha->msix_count = ha->max_rsp_queues + 1; 1702 1703 qlt_83xx_iospace_config(ha); 1704 1705 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f, 1706 "MSIX Count:%d.\n", ha->msix_count); 1707 return 0; 1708 1709 iospace_error_exit: 1710 return -ENOMEM; 1711 } 1712 1713 static struct isp_operations qla2100_isp_ops = { 1714 .pci_config = qla2100_pci_config, 1715 .reset_chip = qla2x00_reset_chip, 1716 .chip_diag = qla2x00_chip_diag, 1717 .config_rings = qla2x00_config_rings, 1718 .reset_adapter = qla2x00_reset_adapter, 1719 .nvram_config = qla2x00_nvram_config, 1720 .update_fw_options = qla2x00_update_fw_options, 1721 .load_risc = qla2x00_load_risc, 1722 .pci_info_str = qla2x00_pci_info_str, 1723 .fw_version_str = qla2x00_fw_version_str, 1724 .intr_handler = qla2100_intr_handler, 1725 .enable_intrs = qla2x00_enable_intrs, 1726 .disable_intrs = qla2x00_disable_intrs, 1727 .abort_command = qla2x00_abort_command, 1728 .target_reset = qla2x00_abort_target, 1729 .lun_reset = qla2x00_lun_reset, 1730 .fabric_login = qla2x00_login_fabric, 1731 .fabric_logout = qla2x00_fabric_logout, 1732 .calc_req_entries = qla2x00_calc_iocbs_32, 1733 .build_iocbs = qla2x00_build_scsi_iocbs_32, 1734 .prep_ms_iocb = qla2x00_prep_ms_iocb, 1735 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 1736 .read_nvram = qla2x00_read_nvram_data, 1737 .write_nvram = qla2x00_write_nvram_data, 1738 .fw_dump = qla2100_fw_dump, 1739 .beacon_on = NULL, 1740 .beacon_off = NULL, 1741 .beacon_blink = NULL, 1742 .read_optrom = qla2x00_read_optrom_data, 1743 .write_optrom = qla2x00_write_optrom_data, 1744 .get_flash_version = qla2x00_get_flash_version, 1745 .start_scsi = qla2x00_start_scsi, 1746 .abort_isp = qla2x00_abort_isp, 1747 .iospace_config = qla2x00_iospace_config, 1748 .initialize_adapter = qla2x00_initialize_adapter, 1749 }; 1750 1751 static struct isp_operations qla2300_isp_ops = { 1752 .pci_config = qla2300_pci_config, 1753 .reset_chip = qla2x00_reset_chip, 1754 .chip_diag = qla2x00_chip_diag, 1755 .config_rings = qla2x00_config_rings, 1756 .reset_adapter = qla2x00_reset_adapter, 1757 .nvram_config = qla2x00_nvram_config, 1758 .update_fw_options = qla2x00_update_fw_options, 1759 .load_risc = qla2x00_load_risc, 1760 .pci_info_str = qla2x00_pci_info_str, 1761 .fw_version_str = qla2x00_fw_version_str, 1762 .intr_handler = qla2300_intr_handler, 1763 .enable_intrs = qla2x00_enable_intrs, 1764 .disable_intrs = qla2x00_disable_intrs, 1765 .abort_command = qla2x00_abort_command, 1766 .target_reset = qla2x00_abort_target, 1767 .lun_reset = qla2x00_lun_reset, 1768 .fabric_login = qla2x00_login_fabric, 1769 .fabric_logout = qla2x00_fabric_logout, 1770 .calc_req_entries = qla2x00_calc_iocbs_32, 1771 .build_iocbs = qla2x00_build_scsi_iocbs_32, 1772 .prep_ms_iocb = qla2x00_prep_ms_iocb, 1773 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 1774 .read_nvram = qla2x00_read_nvram_data, 1775 .write_nvram = qla2x00_write_nvram_data, 1776 .fw_dump = qla2300_fw_dump, 1777 .beacon_on = qla2x00_beacon_on, 1778 .beacon_off = qla2x00_beacon_off, 1779 .beacon_blink = qla2x00_beacon_blink, 1780 .read_optrom = qla2x00_read_optrom_data, 1781 .write_optrom = qla2x00_write_optrom_data, 1782 .get_flash_version = qla2x00_get_flash_version, 1783 .start_scsi = qla2x00_start_scsi, 1784 .abort_isp = qla2x00_abort_isp, 1785 .iospace_config = qla2x00_iospace_config, 1786 .initialize_adapter = qla2x00_initialize_adapter, 1787 }; 1788 1789 static struct isp_operations qla24xx_isp_ops = { 1790 .pci_config = qla24xx_pci_config, 1791 .reset_chip = qla24xx_reset_chip, 1792 .chip_diag = qla24xx_chip_diag, 1793 .config_rings = qla24xx_config_rings, 1794 .reset_adapter = qla24xx_reset_adapter, 1795 .nvram_config = qla24xx_nvram_config, 1796 .update_fw_options = qla24xx_update_fw_options, 1797 .load_risc = qla24xx_load_risc, 1798 .pci_info_str = qla24xx_pci_info_str, 1799 .fw_version_str = qla24xx_fw_version_str, 1800 .intr_handler = qla24xx_intr_handler, 1801 .enable_intrs = qla24xx_enable_intrs, 1802 .disable_intrs = qla24xx_disable_intrs, 1803 .abort_command = qla24xx_abort_command, 1804 .target_reset = qla24xx_abort_target, 1805 .lun_reset = qla24xx_lun_reset, 1806 .fabric_login = qla24xx_login_fabric, 1807 .fabric_logout = qla24xx_fabric_logout, 1808 .calc_req_entries = NULL, 1809 .build_iocbs = NULL, 1810 .prep_ms_iocb = qla24xx_prep_ms_iocb, 1811 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1812 .read_nvram = qla24xx_read_nvram_data, 1813 .write_nvram = qla24xx_write_nvram_data, 1814 .fw_dump = qla24xx_fw_dump, 1815 .beacon_on = qla24xx_beacon_on, 1816 .beacon_off = qla24xx_beacon_off, 1817 .beacon_blink = qla24xx_beacon_blink, 1818 .read_optrom = qla24xx_read_optrom_data, 1819 .write_optrom = qla24xx_write_optrom_data, 1820 .get_flash_version = qla24xx_get_flash_version, 1821 .start_scsi = qla24xx_start_scsi, 1822 .abort_isp = qla2x00_abort_isp, 1823 .iospace_config = qla2x00_iospace_config, 1824 .initialize_adapter = qla2x00_initialize_adapter, 1825 }; 1826 1827 static struct isp_operations qla25xx_isp_ops = { 1828 .pci_config = qla25xx_pci_config, 1829 .reset_chip = qla24xx_reset_chip, 1830 .chip_diag = qla24xx_chip_diag, 1831 .config_rings = qla24xx_config_rings, 1832 .reset_adapter = qla24xx_reset_adapter, 1833 .nvram_config = qla24xx_nvram_config, 1834 .update_fw_options = qla24xx_update_fw_options, 1835 .load_risc = qla24xx_load_risc, 1836 .pci_info_str = qla24xx_pci_info_str, 1837 .fw_version_str = qla24xx_fw_version_str, 1838 .intr_handler = qla24xx_intr_handler, 1839 .enable_intrs = qla24xx_enable_intrs, 1840 .disable_intrs = qla24xx_disable_intrs, 1841 .abort_command = qla24xx_abort_command, 1842 .target_reset = qla24xx_abort_target, 1843 .lun_reset = qla24xx_lun_reset, 1844 .fabric_login = qla24xx_login_fabric, 1845 .fabric_logout = qla24xx_fabric_logout, 1846 .calc_req_entries = NULL, 1847 .build_iocbs = NULL, 1848 .prep_ms_iocb = qla24xx_prep_ms_iocb, 1849 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1850 .read_nvram = qla25xx_read_nvram_data, 1851 .write_nvram = qla25xx_write_nvram_data, 1852 .fw_dump = qla25xx_fw_dump, 1853 .beacon_on = qla24xx_beacon_on, 1854 .beacon_off = qla24xx_beacon_off, 1855 .beacon_blink = qla24xx_beacon_blink, 1856 .read_optrom = qla25xx_read_optrom_data, 1857 .write_optrom = qla24xx_write_optrom_data, 1858 .get_flash_version = qla24xx_get_flash_version, 1859 .start_scsi = qla24xx_dif_start_scsi, 1860 .abort_isp = qla2x00_abort_isp, 1861 .iospace_config = qla2x00_iospace_config, 1862 .initialize_adapter = qla2x00_initialize_adapter, 1863 }; 1864 1865 static struct isp_operations qla81xx_isp_ops = { 1866 .pci_config = qla25xx_pci_config, 1867 .reset_chip = qla24xx_reset_chip, 1868 .chip_diag = qla24xx_chip_diag, 1869 .config_rings = qla24xx_config_rings, 1870 .reset_adapter = qla24xx_reset_adapter, 1871 .nvram_config = qla81xx_nvram_config, 1872 .update_fw_options = qla81xx_update_fw_options, 1873 .load_risc = qla81xx_load_risc, 1874 .pci_info_str = qla24xx_pci_info_str, 1875 .fw_version_str = qla24xx_fw_version_str, 1876 .intr_handler = qla24xx_intr_handler, 1877 .enable_intrs = qla24xx_enable_intrs, 1878 .disable_intrs = qla24xx_disable_intrs, 1879 .abort_command = qla24xx_abort_command, 1880 .target_reset = qla24xx_abort_target, 1881 .lun_reset = qla24xx_lun_reset, 1882 .fabric_login = qla24xx_login_fabric, 1883 .fabric_logout = qla24xx_fabric_logout, 1884 .calc_req_entries = NULL, 1885 .build_iocbs = NULL, 1886 .prep_ms_iocb = qla24xx_prep_ms_iocb, 1887 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1888 .read_nvram = NULL, 1889 .write_nvram = NULL, 1890 .fw_dump = qla81xx_fw_dump, 1891 .beacon_on = qla24xx_beacon_on, 1892 .beacon_off = qla24xx_beacon_off, 1893 .beacon_blink = qla83xx_beacon_blink, 1894 .read_optrom = qla25xx_read_optrom_data, 1895 .write_optrom = qla24xx_write_optrom_data, 1896 .get_flash_version = qla24xx_get_flash_version, 1897 .start_scsi = qla24xx_dif_start_scsi, 1898 .abort_isp = qla2x00_abort_isp, 1899 .iospace_config = qla2x00_iospace_config, 1900 .initialize_adapter = qla2x00_initialize_adapter, 1901 }; 1902 1903 static struct isp_operations qla82xx_isp_ops = { 1904 .pci_config = qla82xx_pci_config, 1905 .reset_chip = qla82xx_reset_chip, 1906 .chip_diag = qla24xx_chip_diag, 1907 .config_rings = qla82xx_config_rings, 1908 .reset_adapter = qla24xx_reset_adapter, 1909 .nvram_config = qla81xx_nvram_config, 1910 .update_fw_options = qla24xx_update_fw_options, 1911 .load_risc = qla82xx_load_risc, 1912 .pci_info_str = qla24xx_pci_info_str, 1913 .fw_version_str = qla24xx_fw_version_str, 1914 .intr_handler = qla82xx_intr_handler, 1915 .enable_intrs = qla82xx_enable_intrs, 1916 .disable_intrs = qla82xx_disable_intrs, 1917 .abort_command = qla24xx_abort_command, 1918 .target_reset = qla24xx_abort_target, 1919 .lun_reset = qla24xx_lun_reset, 1920 .fabric_login = qla24xx_login_fabric, 1921 .fabric_logout = qla24xx_fabric_logout, 1922 .calc_req_entries = NULL, 1923 .build_iocbs = NULL, 1924 .prep_ms_iocb = qla24xx_prep_ms_iocb, 1925 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1926 .read_nvram = qla24xx_read_nvram_data, 1927 .write_nvram = qla24xx_write_nvram_data, 1928 .fw_dump = qla82xx_fw_dump, 1929 .beacon_on = qla82xx_beacon_on, 1930 .beacon_off = qla82xx_beacon_off, 1931 .beacon_blink = NULL, 1932 .read_optrom = qla82xx_read_optrom_data, 1933 .write_optrom = qla82xx_write_optrom_data, 1934 .get_flash_version = qla82xx_get_flash_version, 1935 .start_scsi = qla82xx_start_scsi, 1936 .abort_isp = qla82xx_abort_isp, 1937 .iospace_config = qla82xx_iospace_config, 1938 .initialize_adapter = qla2x00_initialize_adapter, 1939 }; 1940 1941 static struct isp_operations qla8044_isp_ops = { 1942 .pci_config = qla82xx_pci_config, 1943 .reset_chip = qla82xx_reset_chip, 1944 .chip_diag = qla24xx_chip_diag, 1945 .config_rings = qla82xx_config_rings, 1946 .reset_adapter = qla24xx_reset_adapter, 1947 .nvram_config = qla81xx_nvram_config, 1948 .update_fw_options = qla24xx_update_fw_options, 1949 .load_risc = qla82xx_load_risc, 1950 .pci_info_str = qla24xx_pci_info_str, 1951 .fw_version_str = qla24xx_fw_version_str, 1952 .intr_handler = qla8044_intr_handler, 1953 .enable_intrs = qla82xx_enable_intrs, 1954 .disable_intrs = qla82xx_disable_intrs, 1955 .abort_command = qla24xx_abort_command, 1956 .target_reset = qla24xx_abort_target, 1957 .lun_reset = qla24xx_lun_reset, 1958 .fabric_login = qla24xx_login_fabric, 1959 .fabric_logout = qla24xx_fabric_logout, 1960 .calc_req_entries = NULL, 1961 .build_iocbs = NULL, 1962 .prep_ms_iocb = qla24xx_prep_ms_iocb, 1963 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1964 .read_nvram = NULL, 1965 .write_nvram = NULL, 1966 .fw_dump = qla8044_fw_dump, 1967 .beacon_on = qla82xx_beacon_on, 1968 .beacon_off = qla82xx_beacon_off, 1969 .beacon_blink = NULL, 1970 .read_optrom = qla8044_read_optrom_data, 1971 .write_optrom = qla8044_write_optrom_data, 1972 .get_flash_version = qla82xx_get_flash_version, 1973 .start_scsi = qla82xx_start_scsi, 1974 .abort_isp = qla8044_abort_isp, 1975 .iospace_config = qla82xx_iospace_config, 1976 .initialize_adapter = qla2x00_initialize_adapter, 1977 }; 1978 1979 static struct isp_operations qla83xx_isp_ops = { 1980 .pci_config = qla25xx_pci_config, 1981 .reset_chip = qla24xx_reset_chip, 1982 .chip_diag = qla24xx_chip_diag, 1983 .config_rings = qla24xx_config_rings, 1984 .reset_adapter = qla24xx_reset_adapter, 1985 .nvram_config = qla81xx_nvram_config, 1986 .update_fw_options = qla81xx_update_fw_options, 1987 .load_risc = qla81xx_load_risc, 1988 .pci_info_str = qla24xx_pci_info_str, 1989 .fw_version_str = qla24xx_fw_version_str, 1990 .intr_handler = qla24xx_intr_handler, 1991 .enable_intrs = qla24xx_enable_intrs, 1992 .disable_intrs = qla24xx_disable_intrs, 1993 .abort_command = qla24xx_abort_command, 1994 .target_reset = qla24xx_abort_target, 1995 .lun_reset = qla24xx_lun_reset, 1996 .fabric_login = qla24xx_login_fabric, 1997 .fabric_logout = qla24xx_fabric_logout, 1998 .calc_req_entries = NULL, 1999 .build_iocbs = NULL, 2000 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2001 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2002 .read_nvram = NULL, 2003 .write_nvram = NULL, 2004 .fw_dump = qla83xx_fw_dump, 2005 .beacon_on = qla24xx_beacon_on, 2006 .beacon_off = qla24xx_beacon_off, 2007 .beacon_blink = qla83xx_beacon_blink, 2008 .read_optrom = qla25xx_read_optrom_data, 2009 .write_optrom = qla24xx_write_optrom_data, 2010 .get_flash_version = qla24xx_get_flash_version, 2011 .start_scsi = qla24xx_dif_start_scsi, 2012 .abort_isp = qla2x00_abort_isp, 2013 .iospace_config = qla83xx_iospace_config, 2014 .initialize_adapter = qla2x00_initialize_adapter, 2015 }; 2016 2017 static struct isp_operations qlafx00_isp_ops = { 2018 .pci_config = qlafx00_pci_config, 2019 .reset_chip = qlafx00_soft_reset, 2020 .chip_diag = qlafx00_chip_diag, 2021 .config_rings = qlafx00_config_rings, 2022 .reset_adapter = qlafx00_soft_reset, 2023 .nvram_config = NULL, 2024 .update_fw_options = NULL, 2025 .load_risc = NULL, 2026 .pci_info_str = qlafx00_pci_info_str, 2027 .fw_version_str = qlafx00_fw_version_str, 2028 .intr_handler = qlafx00_intr_handler, 2029 .enable_intrs = qlafx00_enable_intrs, 2030 .disable_intrs = qlafx00_disable_intrs, 2031 .abort_command = qla24xx_async_abort_command, 2032 .target_reset = qlafx00_abort_target, 2033 .lun_reset = qlafx00_lun_reset, 2034 .fabric_login = NULL, 2035 .fabric_logout = NULL, 2036 .calc_req_entries = NULL, 2037 .build_iocbs = NULL, 2038 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2039 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2040 .read_nvram = qla24xx_read_nvram_data, 2041 .write_nvram = qla24xx_write_nvram_data, 2042 .fw_dump = NULL, 2043 .beacon_on = qla24xx_beacon_on, 2044 .beacon_off = qla24xx_beacon_off, 2045 .beacon_blink = NULL, 2046 .read_optrom = qla24xx_read_optrom_data, 2047 .write_optrom = qla24xx_write_optrom_data, 2048 .get_flash_version = qla24xx_get_flash_version, 2049 .start_scsi = qlafx00_start_scsi, 2050 .abort_isp = qlafx00_abort_isp, 2051 .iospace_config = qlafx00_iospace_config, 2052 .initialize_adapter = qlafx00_initialize_adapter, 2053 }; 2054 2055 static struct isp_operations qla27xx_isp_ops = { 2056 .pci_config = qla25xx_pci_config, 2057 .reset_chip = qla24xx_reset_chip, 2058 .chip_diag = qla24xx_chip_diag, 2059 .config_rings = qla24xx_config_rings, 2060 .reset_adapter = qla24xx_reset_adapter, 2061 .nvram_config = qla81xx_nvram_config, 2062 .update_fw_options = qla81xx_update_fw_options, 2063 .load_risc = qla81xx_load_risc, 2064 .pci_info_str = qla24xx_pci_info_str, 2065 .fw_version_str = qla24xx_fw_version_str, 2066 .intr_handler = qla24xx_intr_handler, 2067 .enable_intrs = qla24xx_enable_intrs, 2068 .disable_intrs = qla24xx_disable_intrs, 2069 .abort_command = qla24xx_abort_command, 2070 .target_reset = qla24xx_abort_target, 2071 .lun_reset = qla24xx_lun_reset, 2072 .fabric_login = qla24xx_login_fabric, 2073 .fabric_logout = qla24xx_fabric_logout, 2074 .calc_req_entries = NULL, 2075 .build_iocbs = NULL, 2076 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2077 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2078 .read_nvram = NULL, 2079 .write_nvram = NULL, 2080 .fw_dump = qla27xx_fwdump, 2081 .beacon_on = qla24xx_beacon_on, 2082 .beacon_off = qla24xx_beacon_off, 2083 .beacon_blink = qla83xx_beacon_blink, 2084 .read_optrom = qla25xx_read_optrom_data, 2085 .write_optrom = qla24xx_write_optrom_data, 2086 .get_flash_version = qla24xx_get_flash_version, 2087 .start_scsi = qla24xx_dif_start_scsi, 2088 .abort_isp = qla2x00_abort_isp, 2089 .iospace_config = qla83xx_iospace_config, 2090 .initialize_adapter = qla2x00_initialize_adapter, 2091 }; 2092 2093 static inline void 2094 qla2x00_set_isp_flags(struct qla_hw_data *ha) 2095 { 2096 ha->device_type = DT_EXTENDED_IDS; 2097 switch (ha->pdev->device) { 2098 case PCI_DEVICE_ID_QLOGIC_ISP2100: 2099 ha->device_type |= DT_ISP2100; 2100 ha->device_type &= ~DT_EXTENDED_IDS; 2101 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2102 break; 2103 case PCI_DEVICE_ID_QLOGIC_ISP2200: 2104 ha->device_type |= DT_ISP2200; 2105 ha->device_type &= ~DT_EXTENDED_IDS; 2106 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2107 break; 2108 case PCI_DEVICE_ID_QLOGIC_ISP2300: 2109 ha->device_type |= DT_ISP2300; 2110 ha->device_type |= DT_ZIO_SUPPORTED; 2111 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2112 break; 2113 case PCI_DEVICE_ID_QLOGIC_ISP2312: 2114 ha->device_type |= DT_ISP2312; 2115 ha->device_type |= DT_ZIO_SUPPORTED; 2116 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2117 break; 2118 case PCI_DEVICE_ID_QLOGIC_ISP2322: 2119 ha->device_type |= DT_ISP2322; 2120 ha->device_type |= DT_ZIO_SUPPORTED; 2121 if (ha->pdev->subsystem_vendor == 0x1028 && 2122 ha->pdev->subsystem_device == 0x0170) 2123 ha->device_type |= DT_OEM_001; 2124 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2125 break; 2126 case PCI_DEVICE_ID_QLOGIC_ISP6312: 2127 ha->device_type |= DT_ISP6312; 2128 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2129 break; 2130 case PCI_DEVICE_ID_QLOGIC_ISP6322: 2131 ha->device_type |= DT_ISP6322; 2132 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2133 break; 2134 case PCI_DEVICE_ID_QLOGIC_ISP2422: 2135 ha->device_type |= DT_ISP2422; 2136 ha->device_type |= DT_ZIO_SUPPORTED; 2137 ha->device_type |= DT_FWI2; 2138 ha->device_type |= DT_IIDMA; 2139 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2140 break; 2141 case PCI_DEVICE_ID_QLOGIC_ISP2432: 2142 ha->device_type |= DT_ISP2432; 2143 ha->device_type |= DT_ZIO_SUPPORTED; 2144 ha->device_type |= DT_FWI2; 2145 ha->device_type |= DT_IIDMA; 2146 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2147 break; 2148 case PCI_DEVICE_ID_QLOGIC_ISP8432: 2149 ha->device_type |= DT_ISP8432; 2150 ha->device_type |= DT_ZIO_SUPPORTED; 2151 ha->device_type |= DT_FWI2; 2152 ha->device_type |= DT_IIDMA; 2153 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2154 break; 2155 case PCI_DEVICE_ID_QLOGIC_ISP5422: 2156 ha->device_type |= DT_ISP5422; 2157 ha->device_type |= DT_FWI2; 2158 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2159 break; 2160 case PCI_DEVICE_ID_QLOGIC_ISP5432: 2161 ha->device_type |= DT_ISP5432; 2162 ha->device_type |= DT_FWI2; 2163 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2164 break; 2165 case PCI_DEVICE_ID_QLOGIC_ISP2532: 2166 ha->device_type |= DT_ISP2532; 2167 ha->device_type |= DT_ZIO_SUPPORTED; 2168 ha->device_type |= DT_FWI2; 2169 ha->device_type |= DT_IIDMA; 2170 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2171 break; 2172 case PCI_DEVICE_ID_QLOGIC_ISP8001: 2173 ha->device_type |= DT_ISP8001; 2174 ha->device_type |= DT_ZIO_SUPPORTED; 2175 ha->device_type |= DT_FWI2; 2176 ha->device_type |= DT_IIDMA; 2177 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2178 break; 2179 case PCI_DEVICE_ID_QLOGIC_ISP8021: 2180 ha->device_type |= DT_ISP8021; 2181 ha->device_type |= DT_ZIO_SUPPORTED; 2182 ha->device_type |= DT_FWI2; 2183 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2184 /* Initialize 82XX ISP flags */ 2185 qla82xx_init_flags(ha); 2186 break; 2187 case PCI_DEVICE_ID_QLOGIC_ISP8044: 2188 ha->device_type |= DT_ISP8044; 2189 ha->device_type |= DT_ZIO_SUPPORTED; 2190 ha->device_type |= DT_FWI2; 2191 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2192 /* Initialize 82XX ISP flags */ 2193 qla82xx_init_flags(ha); 2194 break; 2195 case PCI_DEVICE_ID_QLOGIC_ISP2031: 2196 ha->device_type |= DT_ISP2031; 2197 ha->device_type |= DT_ZIO_SUPPORTED; 2198 ha->device_type |= DT_FWI2; 2199 ha->device_type |= DT_IIDMA; 2200 ha->device_type |= DT_T10_PI; 2201 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2202 break; 2203 case PCI_DEVICE_ID_QLOGIC_ISP8031: 2204 ha->device_type |= DT_ISP8031; 2205 ha->device_type |= DT_ZIO_SUPPORTED; 2206 ha->device_type |= DT_FWI2; 2207 ha->device_type |= DT_IIDMA; 2208 ha->device_type |= DT_T10_PI; 2209 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2210 break; 2211 case PCI_DEVICE_ID_QLOGIC_ISPF001: 2212 ha->device_type |= DT_ISPFX00; 2213 break; 2214 case PCI_DEVICE_ID_QLOGIC_ISP2071: 2215 ha->device_type |= DT_ISP2071; 2216 ha->device_type |= DT_ZIO_SUPPORTED; 2217 ha->device_type |= DT_FWI2; 2218 ha->device_type |= DT_IIDMA; 2219 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2220 break; 2221 case PCI_DEVICE_ID_QLOGIC_ISP2271: 2222 ha->device_type |= DT_ISP2271; 2223 ha->device_type |= DT_ZIO_SUPPORTED; 2224 ha->device_type |= DT_FWI2; 2225 ha->device_type |= DT_IIDMA; 2226 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2227 break; 2228 case PCI_DEVICE_ID_QLOGIC_ISP2261: 2229 ha->device_type |= DT_ISP2261; 2230 ha->device_type |= DT_ZIO_SUPPORTED; 2231 ha->device_type |= DT_FWI2; 2232 ha->device_type |= DT_IIDMA; 2233 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2234 break; 2235 } 2236 2237 if (IS_QLA82XX(ha)) 2238 ha->port_no = ha->portnum & 1; 2239 else { 2240 /* Get adapter physical port no from interrupt pin register. */ 2241 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); 2242 if (IS_QLA27XX(ha)) 2243 ha->port_no--; 2244 else 2245 ha->port_no = !(ha->port_no & 1); 2246 } 2247 2248 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, 2249 "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", 2250 ha->device_type, ha->port_no, ha->fw_srisc_address); 2251 } 2252 2253 static void 2254 qla2xxx_scan_start(struct Scsi_Host *shost) 2255 { 2256 scsi_qla_host_t *vha = shost_priv(shost); 2257 2258 if (vha->hw->flags.running_gold_fw) 2259 return; 2260 2261 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2262 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2263 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2264 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags); 2265 } 2266 2267 static int 2268 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) 2269 { 2270 scsi_qla_host_t *vha = shost_priv(shost); 2271 2272 if (!vha->host) 2273 return 1; 2274 if (time > vha->hw->loop_reset_delay * HZ) 2275 return 1; 2276 2277 return atomic_read(&vha->loop_state) == LOOP_READY; 2278 } 2279 2280 /* 2281 * PCI driver interface 2282 */ 2283 static int 2284 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 2285 { 2286 int ret = -ENODEV; 2287 struct Scsi_Host *host; 2288 scsi_qla_host_t *base_vha = NULL; 2289 struct qla_hw_data *ha; 2290 char pci_info[30]; 2291 char fw_str[30], wq_name[30]; 2292 struct scsi_host_template *sht; 2293 int bars, mem_only = 0; 2294 uint16_t req_length = 0, rsp_length = 0; 2295 struct req_que *req = NULL; 2296 struct rsp_que *rsp = NULL; 2297 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 2298 sht = &qla2xxx_driver_template; 2299 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || 2300 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || 2301 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || 2302 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 2303 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || 2304 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 || 2305 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || 2306 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || 2307 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || 2308 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || 2309 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || 2310 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || 2311 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || 2312 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 || 2313 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261) { 2314 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2315 mem_only = 1; 2316 ql_dbg_pci(ql_dbg_init, pdev, 0x0007, 2317 "Mem only adapter.\n"); 2318 } 2319 ql_dbg_pci(ql_dbg_init, pdev, 0x0008, 2320 "Bars=%d.\n", bars); 2321 2322 if (mem_only) { 2323 if (pci_enable_device_mem(pdev)) 2324 goto probe_out; 2325 } else { 2326 if (pci_enable_device(pdev)) 2327 goto probe_out; 2328 } 2329 2330 /* This may fail but that's ok */ 2331 pci_enable_pcie_error_reporting(pdev); 2332 2333 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); 2334 if (!ha) { 2335 ql_log_pci(ql_log_fatal, pdev, 0x0009, 2336 "Unable to allocate memory for ha.\n"); 2337 goto probe_out; 2338 } 2339 ql_dbg_pci(ql_dbg_init, pdev, 0x000a, 2340 "Memory allocated for ha=%p.\n", ha); 2341 ha->pdev = pdev; 2342 ha->tgt.enable_class_2 = ql2xenableclass2; 2343 INIT_LIST_HEAD(&ha->tgt.q_full_list); 2344 spin_lock_init(&ha->tgt.q_full_lock); 2345 spin_lock_init(&ha->tgt.sess_lock); 2346 spin_lock_init(&ha->tgt.atio_lock); 2347 2348 2349 /* Clear our data area */ 2350 ha->bars = bars; 2351 ha->mem_only = mem_only; 2352 spin_lock_init(&ha->hardware_lock); 2353 spin_lock_init(&ha->vport_slock); 2354 mutex_init(&ha->selflogin_lock); 2355 mutex_init(&ha->optrom_mutex); 2356 2357 /* Set ISP-type information. */ 2358 qla2x00_set_isp_flags(ha); 2359 2360 /* Set EEH reset type to fundamental if required by hba */ 2361 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || 2362 IS_QLA83XX(ha) || IS_QLA27XX(ha)) 2363 pdev->needs_freset = 1; 2364 2365 ha->prev_topology = 0; 2366 ha->init_cb_size = sizeof(init_cb_t); 2367 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2368 ha->optrom_size = OPTROM_SIZE_2300; 2369 2370 /* Assign ISP specific operations. */ 2371 if (IS_QLA2100(ha)) { 2372 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2373 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 2374 req_length = REQUEST_ENTRY_CNT_2100; 2375 rsp_length = RESPONSE_ENTRY_CNT_2100; 2376 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 2377 ha->gid_list_info_size = 4; 2378 ha->flash_conf_off = ~0; 2379 ha->flash_data_off = ~0; 2380 ha->nvram_conf_off = ~0; 2381 ha->nvram_data_off = ~0; 2382 ha->isp_ops = &qla2100_isp_ops; 2383 } else if (IS_QLA2200(ha)) { 2384 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2385 ha->mbx_count = MAILBOX_REGISTER_COUNT_2200; 2386 req_length = REQUEST_ENTRY_CNT_2200; 2387 rsp_length = RESPONSE_ENTRY_CNT_2100; 2388 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 2389 ha->gid_list_info_size = 4; 2390 ha->flash_conf_off = ~0; 2391 ha->flash_data_off = ~0; 2392 ha->nvram_conf_off = ~0; 2393 ha->nvram_data_off = ~0; 2394 ha->isp_ops = &qla2100_isp_ops; 2395 } else if (IS_QLA23XX(ha)) { 2396 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2397 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2398 req_length = REQUEST_ENTRY_CNT_2200; 2399 rsp_length = RESPONSE_ENTRY_CNT_2300; 2400 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2401 ha->gid_list_info_size = 6; 2402 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 2403 ha->optrom_size = OPTROM_SIZE_2322; 2404 ha->flash_conf_off = ~0; 2405 ha->flash_data_off = ~0; 2406 ha->nvram_conf_off = ~0; 2407 ha->nvram_data_off = ~0; 2408 ha->isp_ops = &qla2300_isp_ops; 2409 } else if (IS_QLA24XX_TYPE(ha)) { 2410 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2411 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2412 req_length = REQUEST_ENTRY_CNT_24XX; 2413 rsp_length = RESPONSE_ENTRY_CNT_2300; 2414 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2415 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2416 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2417 ha->gid_list_info_size = 8; 2418 ha->optrom_size = OPTROM_SIZE_24XX; 2419 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX; 2420 ha->isp_ops = &qla24xx_isp_ops; 2421 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2422 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2423 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2424 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2425 } else if (IS_QLA25XX(ha)) { 2426 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2427 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2428 req_length = REQUEST_ENTRY_CNT_24XX; 2429 rsp_length = RESPONSE_ENTRY_CNT_2300; 2430 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2431 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2432 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2433 ha->gid_list_info_size = 8; 2434 ha->optrom_size = OPTROM_SIZE_25XX; 2435 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2436 ha->isp_ops = &qla25xx_isp_ops; 2437 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2438 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2439 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2440 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2441 } else if (IS_QLA81XX(ha)) { 2442 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2443 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2444 req_length = REQUEST_ENTRY_CNT_24XX; 2445 rsp_length = RESPONSE_ENTRY_CNT_2300; 2446 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2447 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2448 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2449 ha->gid_list_info_size = 8; 2450 ha->optrom_size = OPTROM_SIZE_81XX; 2451 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2452 ha->isp_ops = &qla81xx_isp_ops; 2453 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 2454 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2455 ha->nvram_conf_off = ~0; 2456 ha->nvram_data_off = ~0; 2457 } else if (IS_QLA82XX(ha)) { 2458 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2459 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2460 req_length = REQUEST_ENTRY_CNT_82XX; 2461 rsp_length = RESPONSE_ENTRY_CNT_82XX; 2462 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2463 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2464 ha->gid_list_info_size = 8; 2465 ha->optrom_size = OPTROM_SIZE_82XX; 2466 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2467 ha->isp_ops = &qla82xx_isp_ops; 2468 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2469 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2470 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2471 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2472 } else if (IS_QLA8044(ha)) { 2473 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2474 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2475 req_length = REQUEST_ENTRY_CNT_82XX; 2476 rsp_length = RESPONSE_ENTRY_CNT_82XX; 2477 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2478 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2479 ha->gid_list_info_size = 8; 2480 ha->optrom_size = OPTROM_SIZE_83XX; 2481 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2482 ha->isp_ops = &qla8044_isp_ops; 2483 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2484 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2485 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2486 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2487 } else if (IS_QLA83XX(ha)) { 2488 ha->portnum = PCI_FUNC(ha->pdev->devfn); 2489 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2490 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2491 req_length = REQUEST_ENTRY_CNT_83XX; 2492 rsp_length = RESPONSE_ENTRY_CNT_83XX; 2493 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2494 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2495 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2496 ha->gid_list_info_size = 8; 2497 ha->optrom_size = OPTROM_SIZE_83XX; 2498 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2499 ha->isp_ops = &qla83xx_isp_ops; 2500 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 2501 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2502 ha->nvram_conf_off = ~0; 2503 ha->nvram_data_off = ~0; 2504 } else if (IS_QLAFX00(ha)) { 2505 ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00; 2506 ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00; 2507 ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00; 2508 req_length = REQUEST_ENTRY_CNT_FX00; 2509 rsp_length = RESPONSE_ENTRY_CNT_FX00; 2510 ha->isp_ops = &qlafx00_isp_ops; 2511 ha->port_down_retry_count = 30; /* default value */ 2512 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; 2513 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 2514 ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL; 2515 ha->mr.fw_hbt_en = 1; 2516 ha->mr.host_info_resend = false; 2517 ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; 2518 } else if (IS_QLA27XX(ha)) { 2519 ha->portnum = PCI_FUNC(ha->pdev->devfn); 2520 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2521 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2522 req_length = REQUEST_ENTRY_CNT_83XX; 2523 rsp_length = RESPONSE_ENTRY_CNT_83XX; 2524 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2525 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2526 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2527 ha->gid_list_info_size = 8; 2528 ha->optrom_size = OPTROM_SIZE_83XX; 2529 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2530 ha->isp_ops = &qla27xx_isp_ops; 2531 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 2532 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2533 ha->nvram_conf_off = ~0; 2534 ha->nvram_data_off = ~0; 2535 } 2536 2537 ql_dbg_pci(ql_dbg_init, pdev, 0x001e, 2538 "mbx_count=%d, req_length=%d, " 2539 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, " 2540 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, " 2541 "max_fibre_devices=%d.\n", 2542 ha->mbx_count, req_length, rsp_length, ha->max_loop_id, 2543 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, 2544 ha->nvram_npiv_size, ha->max_fibre_devices); 2545 ql_dbg_pci(ql_dbg_init, pdev, 0x001f, 2546 "isp_ops=%p, flash_conf_off=%d, " 2547 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", 2548 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, 2549 ha->nvram_conf_off, ha->nvram_data_off); 2550 2551 /* Configure PCI I/O space */ 2552 ret = ha->isp_ops->iospace_config(ha); 2553 if (ret) 2554 goto iospace_config_failed; 2555 2556 ql_log_pci(ql_log_info, pdev, 0x001d, 2557 "Found an ISP%04X irq %d iobase 0x%p.\n", 2558 pdev->device, pdev->irq, ha->iobase); 2559 mutex_init(&ha->vport_lock); 2560 init_completion(&ha->mbx_cmd_comp); 2561 complete(&ha->mbx_cmd_comp); 2562 init_completion(&ha->mbx_intr_comp); 2563 init_completion(&ha->dcbx_comp); 2564 init_completion(&ha->lb_portup_comp); 2565 2566 set_bit(0, (unsigned long *) ha->vp_idx_map); 2567 2568 qla2x00_config_dma_addressing(ha); 2569 ql_dbg_pci(ql_dbg_init, pdev, 0x0020, 2570 "64 Bit addressing is %s.\n", 2571 ha->flags.enable_64bit_addressing ? "enable" : 2572 "disable"); 2573 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 2574 if (ret) { 2575 ql_log_pci(ql_log_fatal, pdev, 0x0031, 2576 "Failed to allocate memory for adapter, aborting.\n"); 2577 2578 goto probe_hw_failed; 2579 } 2580 2581 req->max_q_depth = MAX_Q_DEPTH; 2582 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) 2583 req->max_q_depth = ql2xmaxqdepth; 2584 2585 2586 base_vha = qla2x00_create_host(sht, ha); 2587 if (!base_vha) { 2588 ret = -ENOMEM; 2589 qla2x00_mem_free(ha); 2590 qla2x00_free_req_que(ha, req); 2591 qla2x00_free_rsp_que(ha, rsp); 2592 goto probe_hw_failed; 2593 } 2594 2595 pci_set_drvdata(pdev, base_vha); 2596 set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); 2597 2598 host = base_vha->host; 2599 base_vha->req = req; 2600 if (IS_QLA2XXX_MIDTYPE(ha)) 2601 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; 2602 else 2603 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + 2604 base_vha->vp_idx; 2605 2606 /* Setup fcport template structure. */ 2607 ha->mr.fcport.vha = base_vha; 2608 ha->mr.fcport.port_type = FCT_UNKNOWN; 2609 ha->mr.fcport.loop_id = FC_NO_LOOP_ID; 2610 qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED); 2611 ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED; 2612 ha->mr.fcport.scan_state = 1; 2613 2614 /* Set the SG table size based on ISP type */ 2615 if (!IS_FWI2_CAPABLE(ha)) { 2616 if (IS_QLA2100(ha)) 2617 host->sg_tablesize = 32; 2618 } else { 2619 if (!IS_QLA82XX(ha)) 2620 host->sg_tablesize = QLA_SG_ALL; 2621 } 2622 host->max_id = ha->max_fibre_devices; 2623 host->cmd_per_lun = 3; 2624 host->unique_id = host->host_no; 2625 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 2626 host->max_cmd_len = 32; 2627 else 2628 host->max_cmd_len = MAX_CMDSZ; 2629 host->max_channel = MAX_BUSES - 1; 2630 /* Older HBAs support only 16-bit LUNs */ 2631 if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) && 2632 ql2xmaxlun > 0xffff) 2633 host->max_lun = 0xffff; 2634 else 2635 host->max_lun = ql2xmaxlun; 2636 host->transportt = qla2xxx_transport_template; 2637 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); 2638 2639 ql_dbg(ql_dbg_init, base_vha, 0x0033, 2640 "max_id=%d this_id=%d " 2641 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " 2642 "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id, 2643 host->this_id, host->cmd_per_lun, host->unique_id, 2644 host->max_cmd_len, host->max_channel, host->max_lun, 2645 host->transportt, sht->vendor_id); 2646 2647 que_init: 2648 /* Alloc arrays of request and response ring ptrs */ 2649 if (!qla2x00_alloc_queues(ha, req, rsp)) { 2650 ql_log(ql_log_fatal, base_vha, 0x003d, 2651 "Failed to allocate memory for queue pointers..." 2652 "aborting.\n"); 2653 goto probe_init_failed; 2654 } 2655 2656 qlt_probe_one_stage1(base_vha, ha); 2657 2658 /* Set up the irqs */ 2659 ret = qla2x00_request_irqs(ha, rsp); 2660 if (ret) 2661 goto probe_init_failed; 2662 2663 pci_save_state(pdev); 2664 2665 /* Assign back pointers */ 2666 rsp->req = req; 2667 req->rsp = rsp; 2668 2669 if (IS_QLAFX00(ha)) { 2670 ha->rsp_q_map[0] = rsp; 2671 ha->req_q_map[0] = req; 2672 set_bit(0, ha->req_qid_map); 2673 set_bit(0, ha->rsp_qid_map); 2674 } 2675 2676 /* FWI2-capable only. */ 2677 req->req_q_in = &ha->iobase->isp24.req_q_in; 2678 req->req_q_out = &ha->iobase->isp24.req_q_out; 2679 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; 2680 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; 2681 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 2682 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; 2683 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; 2684 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; 2685 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out; 2686 } 2687 2688 if (IS_QLAFX00(ha)) { 2689 req->req_q_in = &ha->iobase->ispfx00.req_q_in; 2690 req->req_q_out = &ha->iobase->ispfx00.req_q_out; 2691 rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in; 2692 rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out; 2693 } 2694 2695 if (IS_P3P_TYPE(ha)) { 2696 req->req_q_out = &ha->iobase->isp82.req_q_out[0]; 2697 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0]; 2698 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; 2699 } 2700 2701 ql_dbg(ql_dbg_multiq, base_vha, 0xc009, 2702 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 2703 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 2704 ql_dbg(ql_dbg_multiq, base_vha, 0xc00a, 2705 "req->req_q_in=%p req->req_q_out=%p " 2706 "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 2707 req->req_q_in, req->req_q_out, 2708 rsp->rsp_q_in, rsp->rsp_q_out); 2709 ql_dbg(ql_dbg_init, base_vha, 0x003e, 2710 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 2711 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 2712 ql_dbg(ql_dbg_init, base_vha, 0x003f, 2713 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 2714 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); 2715 2716 if (ha->isp_ops->initialize_adapter(base_vha)) { 2717 ql_log(ql_log_fatal, base_vha, 0x00d6, 2718 "Failed to initialize adapter - Adapter flags %x.\n", 2719 base_vha->device_flags); 2720 2721 if (IS_QLA82XX(ha)) { 2722 qla82xx_idc_lock(ha); 2723 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2724 QLA8XXX_DEV_FAILED); 2725 qla82xx_idc_unlock(ha); 2726 ql_log(ql_log_fatal, base_vha, 0x00d7, 2727 "HW State: FAILED.\n"); 2728 } else if (IS_QLA8044(ha)) { 2729 qla8044_idc_lock(ha); 2730 qla8044_wr_direct(base_vha, 2731 QLA8044_CRB_DEV_STATE_INDEX, 2732 QLA8XXX_DEV_FAILED); 2733 qla8044_idc_unlock(ha); 2734 ql_log(ql_log_fatal, base_vha, 0x0150, 2735 "HW State: FAILED.\n"); 2736 } 2737 2738 ret = -ENODEV; 2739 goto probe_failed; 2740 } 2741 2742 if (IS_QLAFX00(ha)) 2743 host->can_queue = QLAFX00_MAX_CANQUEUE; 2744 else 2745 host->can_queue = req->num_outstanding_cmds - 10; 2746 2747 ql_dbg(ql_dbg_init, base_vha, 0x0032, 2748 "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", 2749 host->can_queue, base_vha->req, 2750 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 2751 2752 if (ha->mqenable) { 2753 if (qla25xx_setup_mode(base_vha)) { 2754 ql_log(ql_log_warn, base_vha, 0x00ec, 2755 "Failed to create queues, falling back to single queue mode.\n"); 2756 goto que_init; 2757 } 2758 } 2759 2760 if (ha->flags.running_gold_fw) 2761 goto skip_dpc; 2762 2763 /* 2764 * Startup the kernel thread for this host adapter 2765 */ 2766 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 2767 "%s_dpc", base_vha->host_str); 2768 if (IS_ERR(ha->dpc_thread)) { 2769 ql_log(ql_log_fatal, base_vha, 0x00ed, 2770 "Failed to start DPC thread.\n"); 2771 ret = PTR_ERR(ha->dpc_thread); 2772 goto probe_failed; 2773 } 2774 ql_dbg(ql_dbg_init, base_vha, 0x00ee, 2775 "DPC thread started successfully.\n"); 2776 2777 /* 2778 * If we're not coming up in initiator mode, we might sit for 2779 * a while without waking up the dpc thread, which leads to a 2780 * stuck process warning. So just kick the dpc once here and 2781 * let the kthread start (and go back to sleep in qla2x00_do_dpc). 2782 */ 2783 qla2xxx_wake_dpc(base_vha); 2784 2785 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); 2786 2787 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { 2788 sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); 2789 ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); 2790 INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen); 2791 2792 sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); 2793 ha->dpc_hp_wq = create_singlethread_workqueue(wq_name); 2794 INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work); 2795 INIT_WORK(&ha->idc_state_handler, 2796 qla83xx_idc_state_handler_work); 2797 INIT_WORK(&ha->nic_core_unrecoverable, 2798 qla83xx_nic_core_unrecoverable_work); 2799 } 2800 2801 skip_dpc: 2802 list_add_tail(&base_vha->list, &ha->vp_list); 2803 base_vha->host->irq = ha->pdev->irq; 2804 2805 /* Initialized the timer */ 2806 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL); 2807 ql_dbg(ql_dbg_init, base_vha, 0x00ef, 2808 "Started qla2x00_timer with " 2809 "interval=%d.\n", WATCH_INTERVAL); 2810 ql_dbg(ql_dbg_init, base_vha, 0x00f0, 2811 "Detected hba at address=%p.\n", 2812 ha); 2813 2814 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 2815 if (ha->fw_attributes & BIT_4) { 2816 int prot = 0, guard; 2817 base_vha->flags.difdix_supported = 1; 2818 ql_dbg(ql_dbg_init, base_vha, 0x00f1, 2819 "Registering for DIF/DIX type 1 and 3 protection.\n"); 2820 if (ql2xenabledif == 1) 2821 prot = SHOST_DIX_TYPE0_PROTECTION; 2822 scsi_host_set_prot(host, 2823 prot | SHOST_DIF_TYPE1_PROTECTION 2824 | SHOST_DIF_TYPE2_PROTECTION 2825 | SHOST_DIF_TYPE3_PROTECTION 2826 | SHOST_DIX_TYPE1_PROTECTION 2827 | SHOST_DIX_TYPE2_PROTECTION 2828 | SHOST_DIX_TYPE3_PROTECTION); 2829 2830 guard = SHOST_DIX_GUARD_CRC; 2831 2832 if (IS_PI_IPGUARD_CAPABLE(ha) && 2833 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) 2834 guard |= SHOST_DIX_GUARD_IP; 2835 2836 scsi_host_set_guard(host, guard); 2837 } else 2838 base_vha->flags.difdix_supported = 0; 2839 } 2840 2841 ha->isp_ops->enable_intrs(ha); 2842 2843 if (IS_QLAFX00(ha)) { 2844 ret = qlafx00_fx_disc(base_vha, 2845 &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO); 2846 host->sg_tablesize = (ha->mr.extended_io_enabled) ? 2847 QLA_SG_ALL : 128; 2848 } 2849 2850 ret = scsi_add_host(host, &pdev->dev); 2851 if (ret) 2852 goto probe_failed; 2853 2854 base_vha->flags.init_done = 1; 2855 base_vha->flags.online = 1; 2856 ha->prev_minidump_failed = 0; 2857 2858 ql_dbg(ql_dbg_init, base_vha, 0x00f2, 2859 "Init done and hba is online.\n"); 2860 2861 if (qla_ini_mode_enabled(base_vha)) 2862 scsi_scan_host(host); 2863 else 2864 ql_dbg(ql_dbg_init, base_vha, 0x0122, 2865 "skipping scsi_scan_host() for non-initiator port\n"); 2866 2867 qla2x00_alloc_sysfs_attr(base_vha); 2868 2869 if (IS_QLAFX00(ha)) { 2870 ret = qlafx00_fx_disc(base_vha, 2871 &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO); 2872 2873 /* Register system information */ 2874 ret = qlafx00_fx_disc(base_vha, 2875 &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO); 2876 } 2877 2878 qla2x00_init_host_attr(base_vha); 2879 2880 qla2x00_dfs_setup(base_vha); 2881 2882 ql_log(ql_log_info, base_vha, 0x00fb, 2883 "QLogic %s - %s.\n", ha->model_number, ha->model_desc); 2884 ql_log(ql_log_info, base_vha, 0x00fc, 2885 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", 2886 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info), 2887 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', 2888 base_vha->host_no, 2889 ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str))); 2890 2891 qlt_add_target(ha, base_vha); 2892 2893 clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); 2894 return 0; 2895 2896 probe_init_failed: 2897 qla2x00_free_req_que(ha, req); 2898 ha->req_q_map[0] = NULL; 2899 clear_bit(0, ha->req_qid_map); 2900 qla2x00_free_rsp_que(ha, rsp); 2901 ha->rsp_q_map[0] = NULL; 2902 clear_bit(0, ha->rsp_qid_map); 2903 ha->max_req_queues = ha->max_rsp_queues = 0; 2904 2905 probe_failed: 2906 if (base_vha->timer_active) 2907 qla2x00_stop_timer(base_vha); 2908 base_vha->flags.online = 0; 2909 if (ha->dpc_thread) { 2910 struct task_struct *t = ha->dpc_thread; 2911 2912 ha->dpc_thread = NULL; 2913 kthread_stop(t); 2914 } 2915 2916 qla2x00_free_device(base_vha); 2917 2918 scsi_host_put(base_vha->host); 2919 2920 probe_hw_failed: 2921 qla2x00_clear_drv_active(ha); 2922 2923 iospace_config_failed: 2924 if (IS_P3P_TYPE(ha)) { 2925 if (!ha->nx_pcibase) 2926 iounmap((device_reg_t *)ha->nx_pcibase); 2927 if (!ql2xdbwr) 2928 iounmap((device_reg_t *)ha->nxdb_wr_ptr); 2929 } else { 2930 if (ha->iobase) 2931 iounmap(ha->iobase); 2932 if (ha->cregbase) 2933 iounmap(ha->cregbase); 2934 } 2935 pci_release_selected_regions(ha->pdev, ha->bars); 2936 kfree(ha); 2937 ha = NULL; 2938 2939 probe_out: 2940 pci_disable_device(pdev); 2941 return ret; 2942 } 2943 2944 static void 2945 qla2x00_shutdown(struct pci_dev *pdev) 2946 { 2947 scsi_qla_host_t *vha; 2948 struct qla_hw_data *ha; 2949 2950 if (!atomic_read(&pdev->enable_cnt)) 2951 return; 2952 2953 vha = pci_get_drvdata(pdev); 2954 ha = vha->hw; 2955 2956 /* Notify ISPFX00 firmware */ 2957 if (IS_QLAFX00(ha)) 2958 qlafx00_driver_shutdown(vha, 20); 2959 2960 /* Turn-off FCE trace */ 2961 if (ha->flags.fce_enabled) { 2962 qla2x00_disable_fce_trace(vha, NULL, NULL); 2963 ha->flags.fce_enabled = 0; 2964 } 2965 2966 /* Turn-off EFT trace */ 2967 if (ha->eft) 2968 qla2x00_disable_eft_trace(vha); 2969 2970 /* Stop currently executing firmware. */ 2971 qla2x00_try_to_stop_firmware(vha); 2972 2973 /* Turn adapter off line */ 2974 vha->flags.online = 0; 2975 2976 /* turn-off interrupts on the card */ 2977 if (ha->interrupts_on) { 2978 vha->flags.init_done = 0; 2979 ha->isp_ops->disable_intrs(ha); 2980 } 2981 2982 qla2x00_free_irqs(vha); 2983 2984 qla2x00_free_fw_dump(ha); 2985 2986 pci_disable_pcie_error_reporting(pdev); 2987 pci_disable_device(pdev); 2988 } 2989 2990 /* Deletes all the virtual ports for a given ha */ 2991 static void 2992 qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) 2993 { 2994 scsi_qla_host_t *vha; 2995 unsigned long flags; 2996 2997 mutex_lock(&ha->vport_lock); 2998 while (ha->cur_vport_count) { 2999 spin_lock_irqsave(&ha->vport_slock, flags); 3000 3001 BUG_ON(base_vha->list.next == &ha->vp_list); 3002 /* This assumes first entry in ha->vp_list is always base vha */ 3003 vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); 3004 scsi_host_get(vha->host); 3005 3006 spin_unlock_irqrestore(&ha->vport_slock, flags); 3007 mutex_unlock(&ha->vport_lock); 3008 3009 fc_vport_terminate(vha->fc_vport); 3010 scsi_host_put(vha->host); 3011 3012 mutex_lock(&ha->vport_lock); 3013 } 3014 mutex_unlock(&ha->vport_lock); 3015 } 3016 3017 /* Stops all deferred work threads */ 3018 static void 3019 qla2x00_destroy_deferred_work(struct qla_hw_data *ha) 3020 { 3021 /* Flush the work queue and remove it */ 3022 if (ha->wq) { 3023 flush_workqueue(ha->wq); 3024 destroy_workqueue(ha->wq); 3025 ha->wq = NULL; 3026 } 3027 3028 /* Cancel all work and destroy DPC workqueues */ 3029 if (ha->dpc_lp_wq) { 3030 cancel_work_sync(&ha->idc_aen); 3031 destroy_workqueue(ha->dpc_lp_wq); 3032 ha->dpc_lp_wq = NULL; 3033 } 3034 3035 if (ha->dpc_hp_wq) { 3036 cancel_work_sync(&ha->nic_core_reset); 3037 cancel_work_sync(&ha->idc_state_handler); 3038 cancel_work_sync(&ha->nic_core_unrecoverable); 3039 destroy_workqueue(ha->dpc_hp_wq); 3040 ha->dpc_hp_wq = NULL; 3041 } 3042 3043 /* Kill the kernel thread for this host */ 3044 if (ha->dpc_thread) { 3045 struct task_struct *t = ha->dpc_thread; 3046 3047 /* 3048 * qla2xxx_wake_dpc checks for ->dpc_thread 3049 * so we need to zero it out. 3050 */ 3051 ha->dpc_thread = NULL; 3052 kthread_stop(t); 3053 } 3054 } 3055 3056 static void 3057 qla2x00_unmap_iobases(struct qla_hw_data *ha) 3058 { 3059 if (IS_QLA82XX(ha)) { 3060 3061 iounmap((device_reg_t *)ha->nx_pcibase); 3062 if (!ql2xdbwr) 3063 iounmap((device_reg_t *)ha->nxdb_wr_ptr); 3064 } else { 3065 if (ha->iobase) 3066 iounmap(ha->iobase); 3067 3068 if (ha->cregbase) 3069 iounmap(ha->cregbase); 3070 3071 if (ha->mqiobase) 3072 iounmap(ha->mqiobase); 3073 3074 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase) 3075 iounmap(ha->msixbase); 3076 } 3077 } 3078 3079 static void 3080 qla2x00_clear_drv_active(struct qla_hw_data *ha) 3081 { 3082 if (IS_QLA8044(ha)) { 3083 qla8044_idc_lock(ha); 3084 qla8044_clear_drv_active(ha); 3085 qla8044_idc_unlock(ha); 3086 } else if (IS_QLA82XX(ha)) { 3087 qla82xx_idc_lock(ha); 3088 qla82xx_clear_drv_active(ha); 3089 qla82xx_idc_unlock(ha); 3090 } 3091 } 3092 3093 static void 3094 qla2x00_remove_one(struct pci_dev *pdev) 3095 { 3096 scsi_qla_host_t *base_vha; 3097 struct qla_hw_data *ha; 3098 3099 base_vha = pci_get_drvdata(pdev); 3100 ha = base_vha->hw; 3101 3102 /* Indicate device removal to prevent future board_disable and wait 3103 * until any pending board_disable has completed. */ 3104 set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags); 3105 cancel_work_sync(&ha->board_disable); 3106 3107 /* 3108 * If the PCI device is disabled then there was a PCI-disconnect and 3109 * qla2x00_disable_board_on_pci_error has taken care of most of the 3110 * resources. 3111 */ 3112 if (!atomic_read(&pdev->enable_cnt)) { 3113 scsi_host_put(base_vha->host); 3114 kfree(ha); 3115 pci_set_drvdata(pdev, NULL); 3116 return; 3117 } 3118 3119 qla2x00_wait_for_hba_ready(base_vha); 3120 3121 set_bit(UNLOADING, &base_vha->dpc_flags); 3122 3123 if (IS_QLAFX00(ha)) 3124 qlafx00_driver_shutdown(base_vha, 20); 3125 3126 qla2x00_delete_all_vps(ha, base_vha); 3127 3128 if (IS_QLA8031(ha)) { 3129 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, 3130 "Clearing fcoe driver presence.\n"); 3131 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) 3132 ql_dbg(ql_dbg_p3p, base_vha, 0xb079, 3133 "Error while clearing DRV-Presence.\n"); 3134 } 3135 3136 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 3137 3138 qla2x00_dfs_remove(base_vha); 3139 3140 qla84xx_put_chip(base_vha); 3141 3142 /* Laser should be disabled only for ISP2031 */ 3143 if (IS_QLA2031(ha)) 3144 qla83xx_disable_laser(base_vha); 3145 3146 /* Disable timer */ 3147 if (base_vha->timer_active) 3148 qla2x00_stop_timer(base_vha); 3149 3150 base_vha->flags.online = 0; 3151 3152 /* free DMA memory */ 3153 if (ha->exlogin_buf) 3154 qla2x00_free_exlogin_buffer(ha); 3155 3156 /* free DMA memory */ 3157 if (ha->exchoffld_buf) 3158 qla2x00_free_exchoffld_buffer(ha); 3159 3160 qla2x00_destroy_deferred_work(ha); 3161 3162 qlt_remove_target(ha, base_vha); 3163 3164 qla2x00_free_sysfs_attr(base_vha, true); 3165 3166 fc_remove_host(base_vha->host); 3167 3168 scsi_remove_host(base_vha->host); 3169 3170 qla2x00_free_device(base_vha); 3171 3172 qla2x00_clear_drv_active(ha); 3173 3174 scsi_host_put(base_vha->host); 3175 3176 qla2x00_unmap_iobases(ha); 3177 3178 pci_release_selected_regions(ha->pdev, ha->bars); 3179 kfree(ha); 3180 ha = NULL; 3181 3182 pci_disable_pcie_error_reporting(pdev); 3183 3184 pci_disable_device(pdev); 3185 } 3186 3187 static void 3188 qla2x00_free_device(scsi_qla_host_t *vha) 3189 { 3190 struct qla_hw_data *ha = vha->hw; 3191 3192 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 3193 3194 /* Disable timer */ 3195 if (vha->timer_active) 3196 qla2x00_stop_timer(vha); 3197 3198 qla25xx_delete_queues(vha); 3199 3200 if (ha->flags.fce_enabled) 3201 qla2x00_disable_fce_trace(vha, NULL, NULL); 3202 3203 if (ha->eft) 3204 qla2x00_disable_eft_trace(vha); 3205 3206 /* Stop currently executing firmware. */ 3207 qla2x00_try_to_stop_firmware(vha); 3208 3209 vha->flags.online = 0; 3210 3211 /* turn-off interrupts on the card */ 3212 if (ha->interrupts_on) { 3213 vha->flags.init_done = 0; 3214 ha->isp_ops->disable_intrs(ha); 3215 } 3216 3217 qla2x00_free_irqs(vha); 3218 3219 qla2x00_free_fcports(vha); 3220 3221 qla2x00_mem_free(ha); 3222 3223 qla82xx_md_free(vha); 3224 3225 qla2x00_free_queues(ha); 3226 } 3227 3228 void qla2x00_free_fcports(struct scsi_qla_host *vha) 3229 { 3230 fc_port_t *fcport, *tfcport; 3231 3232 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) { 3233 list_del(&fcport->list); 3234 qla2x00_clear_loop_id(fcport); 3235 kfree(fcport); 3236 fcport = NULL; 3237 } 3238 } 3239 3240 static inline void 3241 qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, 3242 int defer) 3243 { 3244 struct fc_rport *rport; 3245 scsi_qla_host_t *base_vha; 3246 unsigned long flags; 3247 3248 if (!fcport->rport) 3249 return; 3250 3251 rport = fcport->rport; 3252 if (defer) { 3253 base_vha = pci_get_drvdata(vha->hw->pdev); 3254 spin_lock_irqsave(vha->host->host_lock, flags); 3255 fcport->drport = rport; 3256 spin_unlock_irqrestore(vha->host->host_lock, flags); 3257 qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen); 3258 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 3259 qla2xxx_wake_dpc(base_vha); 3260 } else { 3261 int now; 3262 if (rport) 3263 fc_remote_port_delete(rport); 3264 qlt_do_generation_tick(vha, &now); 3265 qlt_fc_port_deleted(vha, fcport, now); 3266 } 3267 } 3268 3269 /* 3270 * qla2x00_mark_device_lost Updates fcport state when device goes offline. 3271 * 3272 * Input: ha = adapter block pointer. fcport = port structure pointer. 3273 * 3274 * Return: None. 3275 * 3276 * Context: 3277 */ 3278 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, 3279 int do_login, int defer) 3280 { 3281 if (IS_QLAFX00(vha->hw)) { 3282 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3283 qla2x00_schedule_rport_del(vha, fcport, defer); 3284 return; 3285 } 3286 3287 if (atomic_read(&fcport->state) == FCS_ONLINE && 3288 vha->vp_idx == fcport->vha->vp_idx) { 3289 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3290 qla2x00_schedule_rport_del(vha, fcport, defer); 3291 } 3292 /* 3293 * We may need to retry the login, so don't change the state of the 3294 * port but do the retries. 3295 */ 3296 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) 3297 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3298 3299 if (!do_login) 3300 return; 3301 3302 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 3303 3304 if (fcport->login_retry == 0) { 3305 fcport->login_retry = vha->hw->login_retry_count; 3306 3307 ql_dbg(ql_dbg_disc, vha, 0x2067, 3308 "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n", 3309 fcport->port_name, fcport->loop_id, fcport->login_retry); 3310 } 3311 } 3312 3313 /* 3314 * qla2x00_mark_all_devices_lost 3315 * Updates fcport state when device goes offline. 3316 * 3317 * Input: 3318 * ha = adapter block pointer. 3319 * fcport = port structure pointer. 3320 * 3321 * Return: 3322 * None. 3323 * 3324 * Context: 3325 */ 3326 void 3327 qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) 3328 { 3329 fc_port_t *fcport; 3330 3331 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3332 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx) 3333 continue; 3334 3335 /* 3336 * No point in marking the device as lost, if the device is 3337 * already DEAD. 3338 */ 3339 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 3340 continue; 3341 if (atomic_read(&fcport->state) == FCS_ONLINE) { 3342 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3343 if (defer) 3344 qla2x00_schedule_rport_del(vha, fcport, defer); 3345 else if (vha->vp_idx == fcport->vha->vp_idx) 3346 qla2x00_schedule_rport_del(vha, fcport, defer); 3347 } 3348 } 3349 } 3350 3351 /* 3352 * qla2x00_mem_alloc 3353 * Allocates adapter memory. 3354 * 3355 * Returns: 3356 * 0 = success. 3357 * !0 = failure. 3358 */ 3359 static int 3360 qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, 3361 struct req_que **req, struct rsp_que **rsp) 3362 { 3363 char name[16]; 3364 3365 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 3366 &ha->init_cb_dma, GFP_KERNEL); 3367 if (!ha->init_cb) 3368 goto fail; 3369 3370 if (qlt_mem_alloc(ha) < 0) 3371 goto fail_free_init_cb; 3372 3373 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, 3374 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); 3375 if (!ha->gid_list) 3376 goto fail_free_tgt_mem; 3377 3378 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 3379 if (!ha->srb_mempool) 3380 goto fail_free_gid_list; 3381 3382 if (IS_P3P_TYPE(ha)) { 3383 /* Allocate cache for CT6 Ctx. */ 3384 if (!ctx_cachep) { 3385 ctx_cachep = kmem_cache_create("qla2xxx_ctx", 3386 sizeof(struct ct6_dsd), 0, 3387 SLAB_HWCACHE_ALIGN, NULL); 3388 if (!ctx_cachep) 3389 goto fail_free_gid_list; 3390 } 3391 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, 3392 ctx_cachep); 3393 if (!ha->ctx_mempool) 3394 goto fail_free_srb_mempool; 3395 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021, 3396 "ctx_cachep=%p ctx_mempool=%p.\n", 3397 ctx_cachep, ha->ctx_mempool); 3398 } 3399 3400 /* Get memory for cached NVRAM */ 3401 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 3402 if (!ha->nvram) 3403 goto fail_free_ctx_mempool; 3404 3405 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME, 3406 ha->pdev->device); 3407 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 3408 DMA_POOL_SIZE, 8, 0); 3409 if (!ha->s_dma_pool) 3410 goto fail_free_nvram; 3411 3412 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022, 3413 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", 3414 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); 3415 3416 if (IS_P3P_TYPE(ha) || ql2xenabledif) { 3417 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, 3418 DSD_LIST_DMA_POOL_SIZE, 8, 0); 3419 if (!ha->dl_dma_pool) { 3420 ql_log_pci(ql_log_fatal, ha->pdev, 0x0023, 3421 "Failed to allocate memory for dl_dma_pool.\n"); 3422 goto fail_s_dma_pool; 3423 } 3424 3425 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, 3426 FCP_CMND_DMA_POOL_SIZE, 8, 0); 3427 if (!ha->fcp_cmnd_dma_pool) { 3428 ql_log_pci(ql_log_fatal, ha->pdev, 0x0024, 3429 "Failed to allocate memory for fcp_cmnd_dma_pool.\n"); 3430 goto fail_dl_dma_pool; 3431 } 3432 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025, 3433 "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n", 3434 ha->dl_dma_pool, ha->fcp_cmnd_dma_pool); 3435 } 3436 3437 /* Allocate memory for SNS commands */ 3438 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 3439 /* Get consistent memory allocated for SNS commands */ 3440 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 3441 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 3442 if (!ha->sns_cmd) 3443 goto fail_dma_pool; 3444 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, 3445 "sns_cmd: %p.\n", ha->sns_cmd); 3446 } else { 3447 /* Get consistent memory allocated for MS IOCB */ 3448 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 3449 &ha->ms_iocb_dma); 3450 if (!ha->ms_iocb) 3451 goto fail_dma_pool; 3452 /* Get consistent memory allocated for CT SNS commands */ 3453 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 3454 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 3455 if (!ha->ct_sns) 3456 goto fail_free_ms_iocb; 3457 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027, 3458 "ms_iocb=%p ct_sns=%p.\n", 3459 ha->ms_iocb, ha->ct_sns); 3460 } 3461 3462 /* Allocate memory for request ring */ 3463 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 3464 if (!*req) { 3465 ql_log_pci(ql_log_fatal, ha->pdev, 0x0028, 3466 "Failed to allocate memory for req.\n"); 3467 goto fail_req; 3468 } 3469 (*req)->length = req_len; 3470 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev, 3471 ((*req)->length + 1) * sizeof(request_t), 3472 &(*req)->dma, GFP_KERNEL); 3473 if (!(*req)->ring) { 3474 ql_log_pci(ql_log_fatal, ha->pdev, 0x0029, 3475 "Failed to allocate memory for req_ring.\n"); 3476 goto fail_req_ring; 3477 } 3478 /* Allocate memory for response ring */ 3479 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 3480 if (!*rsp) { 3481 ql_log_pci(ql_log_fatal, ha->pdev, 0x002a, 3482 "Failed to allocate memory for rsp.\n"); 3483 goto fail_rsp; 3484 } 3485 (*rsp)->hw = ha; 3486 (*rsp)->length = rsp_len; 3487 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev, 3488 ((*rsp)->length + 1) * sizeof(response_t), 3489 &(*rsp)->dma, GFP_KERNEL); 3490 if (!(*rsp)->ring) { 3491 ql_log_pci(ql_log_fatal, ha->pdev, 0x002b, 3492 "Failed to allocate memory for rsp_ring.\n"); 3493 goto fail_rsp_ring; 3494 } 3495 (*req)->rsp = *rsp; 3496 (*rsp)->req = *req; 3497 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c, 3498 "req=%p req->length=%d req->ring=%p rsp=%p " 3499 "rsp->length=%d rsp->ring=%p.\n", 3500 *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length, 3501 (*rsp)->ring); 3502 /* Allocate memory for NVRAM data for vports */ 3503 if (ha->nvram_npiv_size) { 3504 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) * 3505 ha->nvram_npiv_size, GFP_KERNEL); 3506 if (!ha->npiv_info) { 3507 ql_log_pci(ql_log_fatal, ha->pdev, 0x002d, 3508 "Failed to allocate memory for npiv_info.\n"); 3509 goto fail_npiv_info; 3510 } 3511 } else 3512 ha->npiv_info = NULL; 3513 3514 /* Get consistent memory allocated for EX-INIT-CB. */ 3515 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) { 3516 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 3517 &ha->ex_init_cb_dma); 3518 if (!ha->ex_init_cb) 3519 goto fail_ex_init_cb; 3520 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e, 3521 "ex_init_cb=%p.\n", ha->ex_init_cb); 3522 } 3523 3524 INIT_LIST_HEAD(&ha->gbl_dsd_list); 3525 3526 /* Get consistent memory allocated for Async Port-Database. */ 3527 if (!IS_FWI2_CAPABLE(ha)) { 3528 ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 3529 &ha->async_pd_dma); 3530 if (!ha->async_pd) 3531 goto fail_async_pd; 3532 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f, 3533 "async_pd=%p.\n", ha->async_pd); 3534 } 3535 3536 INIT_LIST_HEAD(&ha->vp_list); 3537 3538 /* Allocate memory for our loop_id bitmap */ 3539 ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long), 3540 GFP_KERNEL); 3541 if (!ha->loop_id_map) 3542 goto fail_async_pd; 3543 else { 3544 qla2x00_set_reserved_loop_ids(ha); 3545 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, 3546 "loop_id_map=%p.\n", ha->loop_id_map); 3547 } 3548 3549 return 0; 3550 3551 fail_async_pd: 3552 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); 3553 fail_ex_init_cb: 3554 kfree(ha->npiv_info); 3555 fail_npiv_info: 3556 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) * 3557 sizeof(response_t), (*rsp)->ring, (*rsp)->dma); 3558 (*rsp)->ring = NULL; 3559 (*rsp)->dma = 0; 3560 fail_rsp_ring: 3561 kfree(*rsp); 3562 fail_rsp: 3563 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) * 3564 sizeof(request_t), (*req)->ring, (*req)->dma); 3565 (*req)->ring = NULL; 3566 (*req)->dma = 0; 3567 fail_req_ring: 3568 kfree(*req); 3569 fail_req: 3570 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 3571 ha->ct_sns, ha->ct_sns_dma); 3572 ha->ct_sns = NULL; 3573 ha->ct_sns_dma = 0; 3574 fail_free_ms_iocb: 3575 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 3576 ha->ms_iocb = NULL; 3577 ha->ms_iocb_dma = 0; 3578 fail_dma_pool: 3579 if (IS_QLA82XX(ha) || ql2xenabledif) { 3580 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 3581 ha->fcp_cmnd_dma_pool = NULL; 3582 } 3583 fail_dl_dma_pool: 3584 if (IS_QLA82XX(ha) || ql2xenabledif) { 3585 dma_pool_destroy(ha->dl_dma_pool); 3586 ha->dl_dma_pool = NULL; 3587 } 3588 fail_s_dma_pool: 3589 dma_pool_destroy(ha->s_dma_pool); 3590 ha->s_dma_pool = NULL; 3591 fail_free_nvram: 3592 kfree(ha->nvram); 3593 ha->nvram = NULL; 3594 fail_free_ctx_mempool: 3595 mempool_destroy(ha->ctx_mempool); 3596 ha->ctx_mempool = NULL; 3597 fail_free_srb_mempool: 3598 mempool_destroy(ha->srb_mempool); 3599 ha->srb_mempool = NULL; 3600 fail_free_gid_list: 3601 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 3602 ha->gid_list, 3603 ha->gid_list_dma); 3604 ha->gid_list = NULL; 3605 ha->gid_list_dma = 0; 3606 fail_free_tgt_mem: 3607 qlt_mem_free(ha); 3608 fail_free_init_cb: 3609 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, 3610 ha->init_cb_dma); 3611 ha->init_cb = NULL; 3612 ha->init_cb_dma = 0; 3613 fail: 3614 ql_log(ql_log_fatal, NULL, 0x0030, 3615 "Memory allocation failure.\n"); 3616 return -ENOMEM; 3617 } 3618 3619 int 3620 qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha) 3621 { 3622 int rval; 3623 uint16_t size, max_cnt, temp; 3624 struct qla_hw_data *ha = vha->hw; 3625 3626 /* Return if we don't need to alloacate any extended logins */ 3627 if (!ql2xexlogins) 3628 return QLA_SUCCESS; 3629 3630 ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins); 3631 max_cnt = 0; 3632 rval = qla_get_exlogin_status(vha, &size, &max_cnt); 3633 if (rval != QLA_SUCCESS) { 3634 ql_log_pci(ql_log_fatal, ha->pdev, 0xd029, 3635 "Failed to get exlogin status.\n"); 3636 return rval; 3637 } 3638 3639 temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins; 3640 ha->exlogin_size = (size * temp); 3641 ql_log(ql_log_info, vha, 0xd024, 3642 "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n", 3643 max_cnt, size, temp); 3644 3645 ql_log(ql_log_info, vha, 0xd025, "EXLOGIN: requested size=0x%x\n", 3646 ha->exlogin_size); 3647 3648 /* Get consistent memory for extended logins */ 3649 ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev, 3650 ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL); 3651 if (!ha->exlogin_buf) { 3652 ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a, 3653 "Failed to allocate memory for exlogin_buf_dma.\n"); 3654 return -ENOMEM; 3655 } 3656 3657 /* Now configure the dma buffer */ 3658 rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma); 3659 if (rval) { 3660 ql_log(ql_log_fatal, vha, 0x00cf, 3661 "Setup extended login buffer ****FAILED****.\n"); 3662 qla2x00_free_exlogin_buffer(ha); 3663 } 3664 3665 return rval; 3666 } 3667 3668 /* 3669 * qla2x00_free_exlogin_buffer 3670 * 3671 * Input: 3672 * ha = adapter block pointer 3673 */ 3674 void 3675 qla2x00_free_exlogin_buffer(struct qla_hw_data *ha) 3676 { 3677 if (ha->exlogin_buf) { 3678 dma_free_coherent(&ha->pdev->dev, ha->exlogin_size, 3679 ha->exlogin_buf, ha->exlogin_buf_dma); 3680 ha->exlogin_buf = NULL; 3681 ha->exlogin_size = 0; 3682 } 3683 } 3684 3685 int 3686 qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha) 3687 { 3688 int rval; 3689 uint16_t size, max_cnt, temp; 3690 struct qla_hw_data *ha = vha->hw; 3691 3692 /* Return if we don't need to alloacate any extended logins */ 3693 if (!ql2xexchoffld) 3694 return QLA_SUCCESS; 3695 3696 ql_log(ql_log_info, vha, 0xd014, 3697 "Exchange offload count: %d.\n", ql2xexlogins); 3698 3699 max_cnt = 0; 3700 rval = qla_get_exchoffld_status(vha, &size, &max_cnt); 3701 if (rval != QLA_SUCCESS) { 3702 ql_log_pci(ql_log_fatal, ha->pdev, 0xd012, 3703 "Failed to get exlogin status.\n"); 3704 return rval; 3705 } 3706 3707 temp = (ql2xexchoffld > max_cnt) ? max_cnt : ql2xexchoffld; 3708 ha->exchoffld_size = (size * temp); 3709 ql_log(ql_log_info, vha, 0xd016, 3710 "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n", 3711 max_cnt, size, temp); 3712 3713 ql_log(ql_log_info, vha, 0xd017, 3714 "Exchange Buffers requested size = 0x%x\n", ha->exchoffld_size); 3715 3716 /* Get consistent memory for extended logins */ 3717 ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev, 3718 ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL); 3719 if (!ha->exchoffld_buf) { 3720 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, 3721 "Failed to allocate memory for exchoffld_buf_dma.\n"); 3722 return -ENOMEM; 3723 } 3724 3725 /* Now configure the dma buffer */ 3726 rval = qla_set_exchoffld_mem_cfg(vha, ha->exchoffld_buf_dma); 3727 if (rval) { 3728 ql_log(ql_log_fatal, vha, 0xd02e, 3729 "Setup exchange offload buffer ****FAILED****.\n"); 3730 qla2x00_free_exchoffld_buffer(ha); 3731 } 3732 3733 return rval; 3734 } 3735 3736 /* 3737 * qla2x00_free_exchoffld_buffer 3738 * 3739 * Input: 3740 * ha = adapter block pointer 3741 */ 3742 void 3743 qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha) 3744 { 3745 if (ha->exchoffld_buf) { 3746 dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size, 3747 ha->exchoffld_buf, ha->exchoffld_buf_dma); 3748 ha->exchoffld_buf = NULL; 3749 ha->exchoffld_size = 0; 3750 } 3751 } 3752 3753 /* 3754 * qla2x00_free_fw_dump 3755 * Frees fw dump stuff. 3756 * 3757 * Input: 3758 * ha = adapter block pointer 3759 */ 3760 static void 3761 qla2x00_free_fw_dump(struct qla_hw_data *ha) 3762 { 3763 if (ha->fce) 3764 dma_free_coherent(&ha->pdev->dev, 3765 FCE_SIZE, ha->fce, ha->fce_dma); 3766 3767 if (ha->eft) 3768 dma_free_coherent(&ha->pdev->dev, 3769 EFT_SIZE, ha->eft, ha->eft_dma); 3770 3771 if (ha->fw_dump) 3772 vfree(ha->fw_dump); 3773 if (ha->fw_dump_template) 3774 vfree(ha->fw_dump_template); 3775 3776 ha->fce = NULL; 3777 ha->fce_dma = 0; 3778 ha->eft = NULL; 3779 ha->eft_dma = 0; 3780 ha->fw_dumped = 0; 3781 ha->fw_dump_cap_flags = 0; 3782 ha->fw_dump_reading = 0; 3783 ha->fw_dump = NULL; 3784 ha->fw_dump_len = 0; 3785 ha->fw_dump_template = NULL; 3786 ha->fw_dump_template_len = 0; 3787 } 3788 3789 /* 3790 * qla2x00_mem_free 3791 * Frees all adapter allocated memory. 3792 * 3793 * Input: 3794 * ha = adapter block pointer. 3795 */ 3796 static void 3797 qla2x00_mem_free(struct qla_hw_data *ha) 3798 { 3799 qla2x00_free_fw_dump(ha); 3800 3801 if (ha->mctp_dump) 3802 dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump, 3803 ha->mctp_dump_dma); 3804 3805 if (ha->srb_mempool) 3806 mempool_destroy(ha->srb_mempool); 3807 3808 if (ha->dcbx_tlv) 3809 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 3810 ha->dcbx_tlv, ha->dcbx_tlv_dma); 3811 3812 if (ha->xgmac_data) 3813 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 3814 ha->xgmac_data, ha->xgmac_data_dma); 3815 3816 if (ha->sns_cmd) 3817 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 3818 ha->sns_cmd, ha->sns_cmd_dma); 3819 3820 if (ha->ct_sns) 3821 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 3822 ha->ct_sns, ha->ct_sns_dma); 3823 3824 if (ha->sfp_data) 3825 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); 3826 3827 if (ha->ms_iocb) 3828 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 3829 3830 if (ha->ex_init_cb) 3831 dma_pool_free(ha->s_dma_pool, 3832 ha->ex_init_cb, ha->ex_init_cb_dma); 3833 3834 if (ha->async_pd) 3835 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 3836 3837 if (ha->s_dma_pool) 3838 dma_pool_destroy(ha->s_dma_pool); 3839 3840 if (ha->gid_list) 3841 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 3842 ha->gid_list, ha->gid_list_dma); 3843 3844 if (IS_QLA82XX(ha)) { 3845 if (!list_empty(&ha->gbl_dsd_list)) { 3846 struct dsd_dma *dsd_ptr, *tdsd_ptr; 3847 3848 /* clean up allocated prev pool */ 3849 list_for_each_entry_safe(dsd_ptr, 3850 tdsd_ptr, &ha->gbl_dsd_list, list) { 3851 dma_pool_free(ha->dl_dma_pool, 3852 dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); 3853 list_del(&dsd_ptr->list); 3854 kfree(dsd_ptr); 3855 } 3856 } 3857 } 3858 3859 if (ha->dl_dma_pool) 3860 dma_pool_destroy(ha->dl_dma_pool); 3861 3862 if (ha->fcp_cmnd_dma_pool) 3863 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 3864 3865 if (ha->ctx_mempool) 3866 mempool_destroy(ha->ctx_mempool); 3867 3868 qlt_mem_free(ha); 3869 3870 if (ha->init_cb) 3871 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 3872 ha->init_cb, ha->init_cb_dma); 3873 vfree(ha->optrom_buffer); 3874 kfree(ha->nvram); 3875 kfree(ha->npiv_info); 3876 kfree(ha->swl); 3877 kfree(ha->loop_id_map); 3878 3879 ha->srb_mempool = NULL; 3880 ha->ctx_mempool = NULL; 3881 ha->sns_cmd = NULL; 3882 ha->sns_cmd_dma = 0; 3883 ha->ct_sns = NULL; 3884 ha->ct_sns_dma = 0; 3885 ha->ms_iocb = NULL; 3886 ha->ms_iocb_dma = 0; 3887 ha->init_cb = NULL; 3888 ha->init_cb_dma = 0; 3889 ha->ex_init_cb = NULL; 3890 ha->ex_init_cb_dma = 0; 3891 ha->async_pd = NULL; 3892 ha->async_pd_dma = 0; 3893 3894 ha->s_dma_pool = NULL; 3895 ha->dl_dma_pool = NULL; 3896 ha->fcp_cmnd_dma_pool = NULL; 3897 3898 ha->gid_list = NULL; 3899 ha->gid_list_dma = 0; 3900 3901 ha->tgt.atio_ring = NULL; 3902 ha->tgt.atio_dma = 0; 3903 ha->tgt.tgt_vp_map = NULL; 3904 } 3905 3906 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, 3907 struct qla_hw_data *ha) 3908 { 3909 struct Scsi_Host *host; 3910 struct scsi_qla_host *vha = NULL; 3911 3912 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 3913 if (host == NULL) { 3914 ql_log_pci(ql_log_fatal, ha->pdev, 0x0107, 3915 "Failed to allocate host from the scsi layer, aborting.\n"); 3916 goto fail; 3917 } 3918 3919 /* Clear our data area */ 3920 vha = shost_priv(host); 3921 memset(vha, 0, sizeof(scsi_qla_host_t)); 3922 3923 vha->host = host; 3924 vha->host_no = host->host_no; 3925 vha->hw = ha; 3926 3927 INIT_LIST_HEAD(&vha->vp_fcports); 3928 INIT_LIST_HEAD(&vha->work_list); 3929 INIT_LIST_HEAD(&vha->list); 3930 INIT_LIST_HEAD(&vha->qla_cmd_list); 3931 INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list); 3932 INIT_LIST_HEAD(&vha->logo_list); 3933 INIT_LIST_HEAD(&vha->plogi_ack_list); 3934 3935 spin_lock_init(&vha->work_lock); 3936 spin_lock_init(&vha->cmd_list_lock); 3937 3938 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 3939 ql_dbg(ql_dbg_init, vha, 0x0041, 3940 "Allocated the host=%p hw=%p vha=%p dev_name=%s", 3941 vha->host, vha->hw, vha, 3942 dev_name(&(ha->pdev->dev))); 3943 3944 return vha; 3945 3946 fail: 3947 return vha; 3948 } 3949 3950 static struct qla_work_evt * 3951 qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) 3952 { 3953 struct qla_work_evt *e; 3954 uint8_t bail; 3955 3956 QLA_VHA_MARK_BUSY(vha, bail); 3957 if (bail) 3958 return NULL; 3959 3960 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); 3961 if (!e) { 3962 QLA_VHA_MARK_NOT_BUSY(vha); 3963 return NULL; 3964 } 3965 3966 INIT_LIST_HEAD(&e->list); 3967 e->type = type; 3968 e->flags = QLA_EVT_FLAG_FREE; 3969 return e; 3970 } 3971 3972 static int 3973 qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 3974 { 3975 unsigned long flags; 3976 3977 spin_lock_irqsave(&vha->work_lock, flags); 3978 list_add_tail(&e->list, &vha->work_list); 3979 spin_unlock_irqrestore(&vha->work_lock, flags); 3980 qla2xxx_wake_dpc(vha); 3981 3982 return QLA_SUCCESS; 3983 } 3984 3985 int 3986 qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, 3987 u32 data) 3988 { 3989 struct qla_work_evt *e; 3990 3991 e = qla2x00_alloc_work(vha, QLA_EVT_AEN); 3992 if (!e) 3993 return QLA_FUNCTION_FAILED; 3994 3995 e->u.aen.code = code; 3996 e->u.aen.data = data; 3997 return qla2x00_post_work(vha, e); 3998 } 3999 4000 int 4001 qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) 4002 { 4003 struct qla_work_evt *e; 4004 4005 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK); 4006 if (!e) 4007 return QLA_FUNCTION_FAILED; 4008 4009 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4010 return qla2x00_post_work(vha, e); 4011 } 4012 4013 #define qla2x00_post_async_work(name, type) \ 4014 int qla2x00_post_async_##name##_work( \ 4015 struct scsi_qla_host *vha, \ 4016 fc_port_t *fcport, uint16_t *data) \ 4017 { \ 4018 struct qla_work_evt *e; \ 4019 \ 4020 e = qla2x00_alloc_work(vha, type); \ 4021 if (!e) \ 4022 return QLA_FUNCTION_FAILED; \ 4023 \ 4024 e->u.logio.fcport = fcport; \ 4025 if (data) { \ 4026 e->u.logio.data[0] = data[0]; \ 4027 e->u.logio.data[1] = data[1]; \ 4028 } \ 4029 return qla2x00_post_work(vha, e); \ 4030 } 4031 4032 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); 4033 qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE); 4034 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); 4035 qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); 4036 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); 4037 qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE); 4038 4039 int 4040 qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) 4041 { 4042 struct qla_work_evt *e; 4043 4044 e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT); 4045 if (!e) 4046 return QLA_FUNCTION_FAILED; 4047 4048 e->u.uevent.code = code; 4049 return qla2x00_post_work(vha, e); 4050 } 4051 4052 static void 4053 qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) 4054 { 4055 char event_string[40]; 4056 char *envp[] = { event_string, NULL }; 4057 4058 switch (code) { 4059 case QLA_UEVENT_CODE_FW_DUMP: 4060 snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld", 4061 vha->host_no); 4062 break; 4063 default: 4064 /* do nothing */ 4065 break; 4066 } 4067 kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp); 4068 } 4069 4070 int 4071 qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, 4072 uint32_t *data, int cnt) 4073 { 4074 struct qla_work_evt *e; 4075 4076 e = qla2x00_alloc_work(vha, QLA_EVT_AENFX); 4077 if (!e) 4078 return QLA_FUNCTION_FAILED; 4079 4080 e->u.aenfx.evtcode = evtcode; 4081 e->u.aenfx.count = cnt; 4082 memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt); 4083 return qla2x00_post_work(vha, e); 4084 } 4085 4086 void 4087 qla2x00_do_work(struct scsi_qla_host *vha) 4088 { 4089 struct qla_work_evt *e, *tmp; 4090 unsigned long flags; 4091 LIST_HEAD(work); 4092 4093 spin_lock_irqsave(&vha->work_lock, flags); 4094 list_splice_init(&vha->work_list, &work); 4095 spin_unlock_irqrestore(&vha->work_lock, flags); 4096 4097 list_for_each_entry_safe(e, tmp, &work, list) { 4098 list_del_init(&e->list); 4099 4100 switch (e->type) { 4101 case QLA_EVT_AEN: 4102 fc_host_post_event(vha->host, fc_get_event_number(), 4103 e->u.aen.code, e->u.aen.data); 4104 break; 4105 case QLA_EVT_IDC_ACK: 4106 qla81xx_idc_ack(vha, e->u.idc_ack.mb); 4107 break; 4108 case QLA_EVT_ASYNC_LOGIN: 4109 qla2x00_async_login(vha, e->u.logio.fcport, 4110 e->u.logio.data); 4111 break; 4112 case QLA_EVT_ASYNC_LOGIN_DONE: 4113 qla2x00_async_login_done(vha, e->u.logio.fcport, 4114 e->u.logio.data); 4115 break; 4116 case QLA_EVT_ASYNC_LOGOUT: 4117 qla2x00_async_logout(vha, e->u.logio.fcport); 4118 break; 4119 case QLA_EVT_ASYNC_LOGOUT_DONE: 4120 qla2x00_async_logout_done(vha, e->u.logio.fcport, 4121 e->u.logio.data); 4122 break; 4123 case QLA_EVT_ASYNC_ADISC: 4124 qla2x00_async_adisc(vha, e->u.logio.fcport, 4125 e->u.logio.data); 4126 break; 4127 case QLA_EVT_ASYNC_ADISC_DONE: 4128 qla2x00_async_adisc_done(vha, e->u.logio.fcport, 4129 e->u.logio.data); 4130 break; 4131 case QLA_EVT_UEVENT: 4132 qla2x00_uevent_emit(vha, e->u.uevent.code); 4133 break; 4134 case QLA_EVT_AENFX: 4135 qlafx00_process_aen(vha, e); 4136 break; 4137 } 4138 if (e->flags & QLA_EVT_FLAG_FREE) 4139 kfree(e); 4140 4141 /* For each work completed decrement vha ref count */ 4142 QLA_VHA_MARK_NOT_BUSY(vha); 4143 } 4144 } 4145 4146 /* Relogins all the fcports of a vport 4147 * Context: dpc thread 4148 */ 4149 void qla2x00_relogin(struct scsi_qla_host *vha) 4150 { 4151 fc_port_t *fcport; 4152 int status; 4153 uint16_t next_loopid = 0; 4154 struct qla_hw_data *ha = vha->hw; 4155 uint16_t data[2]; 4156 4157 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4158 /* 4159 * If the port is not ONLINE then try to login 4160 * to it if we haven't run out of retries. 4161 */ 4162 if (atomic_read(&fcport->state) != FCS_ONLINE && 4163 fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) { 4164 fcport->login_retry--; 4165 if (fcport->flags & FCF_FABRIC_DEVICE) { 4166 if (fcport->flags & FCF_FCP2_DEVICE) 4167 ha->isp_ops->fabric_logout(vha, 4168 fcport->loop_id, 4169 fcport->d_id.b.domain, 4170 fcport->d_id.b.area, 4171 fcport->d_id.b.al_pa); 4172 4173 if (fcport->loop_id == FC_NO_LOOP_ID) { 4174 fcport->loop_id = next_loopid = 4175 ha->min_external_loopid; 4176 status = qla2x00_find_new_loop_id( 4177 vha, fcport); 4178 if (status != QLA_SUCCESS) { 4179 /* Ran out of IDs to use */ 4180 break; 4181 } 4182 } 4183 4184 if (IS_ALOGIO_CAPABLE(ha)) { 4185 fcport->flags |= FCF_ASYNC_SENT; 4186 data[0] = 0; 4187 data[1] = QLA_LOGIO_LOGIN_RETRIED; 4188 status = qla2x00_post_async_login_work( 4189 vha, fcport, data); 4190 if (status == QLA_SUCCESS) 4191 continue; 4192 /* Attempt a retry. */ 4193 status = 1; 4194 } else { 4195 status = qla2x00_fabric_login(vha, 4196 fcport, &next_loopid); 4197 if (status == QLA_SUCCESS) { 4198 int status2; 4199 uint8_t opts; 4200 4201 opts = 0; 4202 if (fcport->flags & 4203 FCF_FCP2_DEVICE) 4204 opts |= BIT_1; 4205 status2 = 4206 qla2x00_get_port_database( 4207 vha, fcport, opts); 4208 if (status2 != QLA_SUCCESS) 4209 status = 1; 4210 } 4211 } 4212 } else 4213 status = qla2x00_local_device_login(vha, 4214 fcport); 4215 4216 if (status == QLA_SUCCESS) { 4217 fcport->old_loop_id = fcport->loop_id; 4218 4219 ql_dbg(ql_dbg_disc, vha, 0x2003, 4220 "Port login OK: logged in ID 0x%x.\n", 4221 fcport->loop_id); 4222 4223 qla2x00_update_fcport(vha, fcport); 4224 4225 } else if (status == 1) { 4226 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 4227 /* retry the login again */ 4228 ql_dbg(ql_dbg_disc, vha, 0x2007, 4229 "Retrying %d login again loop_id 0x%x.\n", 4230 fcport->login_retry, fcport->loop_id); 4231 } else { 4232 fcport->login_retry = 0; 4233 } 4234 4235 if (fcport->login_retry == 0 && status != QLA_SUCCESS) 4236 qla2x00_clear_loop_id(fcport); 4237 } 4238 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 4239 break; 4240 } 4241 } 4242 4243 /* Schedule work on any of the dpc-workqueues */ 4244 void 4245 qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code) 4246 { 4247 struct qla_hw_data *ha = base_vha->hw; 4248 4249 switch (work_code) { 4250 case MBA_IDC_AEN: /* 0x8200 */ 4251 if (ha->dpc_lp_wq) 4252 queue_work(ha->dpc_lp_wq, &ha->idc_aen); 4253 break; 4254 4255 case QLA83XX_NIC_CORE_RESET: /* 0x1 */ 4256 if (!ha->flags.nic_core_reset_hdlr_active) { 4257 if (ha->dpc_hp_wq) 4258 queue_work(ha->dpc_hp_wq, &ha->nic_core_reset); 4259 } else 4260 ql_dbg(ql_dbg_p3p, base_vha, 0xb05e, 4261 "NIC Core reset is already active. Skip " 4262 "scheduling it again.\n"); 4263 break; 4264 case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */ 4265 if (ha->dpc_hp_wq) 4266 queue_work(ha->dpc_hp_wq, &ha->idc_state_handler); 4267 break; 4268 case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */ 4269 if (ha->dpc_hp_wq) 4270 queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable); 4271 break; 4272 default: 4273 ql_log(ql_log_warn, base_vha, 0xb05f, 4274 "Unknown work-code=0x%x.\n", work_code); 4275 } 4276 4277 return; 4278 } 4279 4280 /* Work: Perform NIC Core Unrecoverable state handling */ 4281 void 4282 qla83xx_nic_core_unrecoverable_work(struct work_struct *work) 4283 { 4284 struct qla_hw_data *ha = 4285 container_of(work, struct qla_hw_data, nic_core_unrecoverable); 4286 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4287 uint32_t dev_state = 0; 4288 4289 qla83xx_idc_lock(base_vha, 0); 4290 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 4291 qla83xx_reset_ownership(base_vha); 4292 if (ha->flags.nic_core_reset_owner) { 4293 ha->flags.nic_core_reset_owner = 0; 4294 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 4295 QLA8XXX_DEV_FAILED); 4296 ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n"); 4297 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 4298 } 4299 qla83xx_idc_unlock(base_vha, 0); 4300 } 4301 4302 /* Work: Execute IDC state handler */ 4303 void 4304 qla83xx_idc_state_handler_work(struct work_struct *work) 4305 { 4306 struct qla_hw_data *ha = 4307 container_of(work, struct qla_hw_data, idc_state_handler); 4308 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4309 uint32_t dev_state = 0; 4310 4311 qla83xx_idc_lock(base_vha, 0); 4312 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 4313 if (dev_state == QLA8XXX_DEV_FAILED || 4314 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) 4315 qla83xx_idc_state_handler(base_vha); 4316 qla83xx_idc_unlock(base_vha, 0); 4317 } 4318 4319 static int 4320 qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha) 4321 { 4322 int rval = QLA_SUCCESS; 4323 unsigned long heart_beat_wait = jiffies + (1 * HZ); 4324 uint32_t heart_beat_counter1, heart_beat_counter2; 4325 4326 do { 4327 if (time_after(jiffies, heart_beat_wait)) { 4328 ql_dbg(ql_dbg_p3p, base_vha, 0xb07c, 4329 "Nic Core f/w is not alive.\n"); 4330 rval = QLA_FUNCTION_FAILED; 4331 break; 4332 } 4333 4334 qla83xx_idc_lock(base_vha, 0); 4335 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 4336 &heart_beat_counter1); 4337 qla83xx_idc_unlock(base_vha, 0); 4338 msleep(100); 4339 qla83xx_idc_lock(base_vha, 0); 4340 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 4341 &heart_beat_counter2); 4342 qla83xx_idc_unlock(base_vha, 0); 4343 } while (heart_beat_counter1 == heart_beat_counter2); 4344 4345 return rval; 4346 } 4347 4348 /* Work: Perform NIC Core Reset handling */ 4349 void 4350 qla83xx_nic_core_reset_work(struct work_struct *work) 4351 { 4352 struct qla_hw_data *ha = 4353 container_of(work, struct qla_hw_data, nic_core_reset); 4354 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4355 uint32_t dev_state = 0; 4356 4357 if (IS_QLA2031(ha)) { 4358 if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS) 4359 ql_log(ql_log_warn, base_vha, 0xb081, 4360 "Failed to dump mctp\n"); 4361 return; 4362 } 4363 4364 if (!ha->flags.nic_core_reset_hdlr_active) { 4365 if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) { 4366 qla83xx_idc_lock(base_vha, 0); 4367 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, 4368 &dev_state); 4369 qla83xx_idc_unlock(base_vha, 0); 4370 if (dev_state != QLA8XXX_DEV_NEED_RESET) { 4371 ql_dbg(ql_dbg_p3p, base_vha, 0xb07a, 4372 "Nic Core f/w is alive.\n"); 4373 return; 4374 } 4375 } 4376 4377 ha->flags.nic_core_reset_hdlr_active = 1; 4378 if (qla83xx_nic_core_reset(base_vha)) { 4379 /* NIC Core reset failed. */ 4380 ql_dbg(ql_dbg_p3p, base_vha, 0xb061, 4381 "NIC Core reset failed.\n"); 4382 } 4383 ha->flags.nic_core_reset_hdlr_active = 0; 4384 } 4385 } 4386 4387 /* Work: Handle 8200 IDC aens */ 4388 void 4389 qla83xx_service_idc_aen(struct work_struct *work) 4390 { 4391 struct qla_hw_data *ha = 4392 container_of(work, struct qla_hw_data, idc_aen); 4393 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4394 uint32_t dev_state, idc_control; 4395 4396 qla83xx_idc_lock(base_vha, 0); 4397 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 4398 qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control); 4399 qla83xx_idc_unlock(base_vha, 0); 4400 if (dev_state == QLA8XXX_DEV_NEED_RESET) { 4401 if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) { 4402 ql_dbg(ql_dbg_p3p, base_vha, 0xb062, 4403 "Application requested NIC Core Reset.\n"); 4404 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 4405 } else if (qla83xx_check_nic_core_fw_alive(base_vha) == 4406 QLA_SUCCESS) { 4407 ql_dbg(ql_dbg_p3p, base_vha, 0xb07b, 4408 "Other protocol driver requested NIC Core Reset.\n"); 4409 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 4410 } 4411 } else if (dev_state == QLA8XXX_DEV_FAILED || 4412 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { 4413 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 4414 } 4415 } 4416 4417 static void 4418 qla83xx_wait_logic(void) 4419 { 4420 int i; 4421 4422 /* Yield CPU */ 4423 if (!in_interrupt()) { 4424 /* 4425 * Wait about 200ms before retrying again. 4426 * This controls the number of retries for single 4427 * lock operation. 4428 */ 4429 msleep(100); 4430 schedule(); 4431 } else { 4432 for (i = 0; i < 20; i++) 4433 cpu_relax(); /* This a nop instr on i386 */ 4434 } 4435 } 4436 4437 static int 4438 qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha) 4439 { 4440 int rval; 4441 uint32_t data; 4442 uint32_t idc_lck_rcvry_stage_mask = 0x3; 4443 uint32_t idc_lck_rcvry_owner_mask = 0x3c; 4444 struct qla_hw_data *ha = base_vha->hw; 4445 ql_dbg(ql_dbg_p3p, base_vha, 0xb086, 4446 "Trying force recovery of the IDC lock.\n"); 4447 4448 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data); 4449 if (rval) 4450 return rval; 4451 4452 if ((data & idc_lck_rcvry_stage_mask) > 0) { 4453 return QLA_SUCCESS; 4454 } else { 4455 data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2); 4456 rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 4457 data); 4458 if (rval) 4459 return rval; 4460 4461 msleep(200); 4462 4463 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 4464 &data); 4465 if (rval) 4466 return rval; 4467 4468 if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) { 4469 data &= (IDC_LOCK_RECOVERY_STAGE2 | 4470 ~(idc_lck_rcvry_stage_mask)); 4471 rval = qla83xx_wr_reg(base_vha, 4472 QLA83XX_IDC_LOCK_RECOVERY, data); 4473 if (rval) 4474 return rval; 4475 4476 /* Forcefully perform IDC UnLock */ 4477 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, 4478 &data); 4479 if (rval) 4480 return rval; 4481 /* Clear lock-id by setting 0xff */ 4482 rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 4483 0xff); 4484 if (rval) 4485 return rval; 4486 /* Clear lock-recovery by setting 0x0 */ 4487 rval = qla83xx_wr_reg(base_vha, 4488 QLA83XX_IDC_LOCK_RECOVERY, 0x0); 4489 if (rval) 4490 return rval; 4491 } else 4492 return QLA_SUCCESS; 4493 } 4494 4495 return rval; 4496 } 4497 4498 static int 4499 qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha) 4500 { 4501 int rval = QLA_SUCCESS; 4502 uint32_t o_drv_lockid, n_drv_lockid; 4503 unsigned long lock_recovery_timeout; 4504 4505 lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT; 4506 retry_lockid: 4507 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid); 4508 if (rval) 4509 goto exit; 4510 4511 /* MAX wait time before forcing IDC Lock recovery = 2 secs */ 4512 if (time_after_eq(jiffies, lock_recovery_timeout)) { 4513 if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS) 4514 return QLA_SUCCESS; 4515 else 4516 return QLA_FUNCTION_FAILED; 4517 } 4518 4519 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid); 4520 if (rval) 4521 goto exit; 4522 4523 if (o_drv_lockid == n_drv_lockid) { 4524 qla83xx_wait_logic(); 4525 goto retry_lockid; 4526 } else 4527 return QLA_SUCCESS; 4528 4529 exit: 4530 return rval; 4531 } 4532 4533 void 4534 qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) 4535 { 4536 uint16_t options = (requester_id << 15) | BIT_6; 4537 uint32_t data; 4538 uint32_t lock_owner; 4539 struct qla_hw_data *ha = base_vha->hw; 4540 4541 /* IDC-lock implementation using driver-lock/lock-id remote registers */ 4542 retry_lock: 4543 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data) 4544 == QLA_SUCCESS) { 4545 if (data) { 4546 /* Setting lock-id to our function-number */ 4547 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 4548 ha->portnum); 4549 } else { 4550 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, 4551 &lock_owner); 4552 ql_dbg(ql_dbg_p3p, base_vha, 0xb063, 4553 "Failed to acquire IDC lock, acquired by %d, " 4554 "retrying...\n", lock_owner); 4555 4556 /* Retry/Perform IDC-Lock recovery */ 4557 if (qla83xx_idc_lock_recovery(base_vha) 4558 == QLA_SUCCESS) { 4559 qla83xx_wait_logic(); 4560 goto retry_lock; 4561 } else 4562 ql_log(ql_log_warn, base_vha, 0xb075, 4563 "IDC Lock recovery FAILED.\n"); 4564 } 4565 4566 } 4567 4568 return; 4569 4570 /* XXX: IDC-lock implementation using access-control mbx */ 4571 retry_lock2: 4572 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { 4573 ql_dbg(ql_dbg_p3p, base_vha, 0xb072, 4574 "Failed to acquire IDC lock. retrying...\n"); 4575 /* Retry/Perform IDC-Lock recovery */ 4576 if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) { 4577 qla83xx_wait_logic(); 4578 goto retry_lock2; 4579 } else 4580 ql_log(ql_log_warn, base_vha, 0xb076, 4581 "IDC Lock recovery FAILED.\n"); 4582 } 4583 4584 return; 4585 } 4586 4587 void 4588 qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id) 4589 { 4590 #if 0 4591 uint16_t options = (requester_id << 15) | BIT_7; 4592 #endif 4593 uint16_t retry; 4594 uint32_t data; 4595 struct qla_hw_data *ha = base_vha->hw; 4596 4597 /* IDC-unlock implementation using driver-unlock/lock-id 4598 * remote registers 4599 */ 4600 retry = 0; 4601 retry_unlock: 4602 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data) 4603 == QLA_SUCCESS) { 4604 if (data == ha->portnum) { 4605 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data); 4606 /* Clearing lock-id by setting 0xff */ 4607 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff); 4608 } else if (retry < 10) { 4609 /* SV: XXX: IDC unlock retrying needed here? */ 4610 4611 /* Retry for IDC-unlock */ 4612 qla83xx_wait_logic(); 4613 retry++; 4614 ql_dbg(ql_dbg_p3p, base_vha, 0xb064, 4615 "Failed to release IDC lock, retyring=%d\n", retry); 4616 goto retry_unlock; 4617 } 4618 } else if (retry < 10) { 4619 /* Retry for IDC-unlock */ 4620 qla83xx_wait_logic(); 4621 retry++; 4622 ql_dbg(ql_dbg_p3p, base_vha, 0xb065, 4623 "Failed to read drv-lockid, retyring=%d\n", retry); 4624 goto retry_unlock; 4625 } 4626 4627 return; 4628 4629 #if 0 4630 /* XXX: IDC-unlock implementation using access-control mbx */ 4631 retry = 0; 4632 retry_unlock2: 4633 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { 4634 if (retry < 10) { 4635 /* Retry for IDC-unlock */ 4636 qla83xx_wait_logic(); 4637 retry++; 4638 ql_dbg(ql_dbg_p3p, base_vha, 0xb066, 4639 "Failed to release IDC lock, retyring=%d\n", retry); 4640 goto retry_unlock2; 4641 } 4642 } 4643 4644 return; 4645 #endif 4646 } 4647 4648 int 4649 __qla83xx_set_drv_presence(scsi_qla_host_t *vha) 4650 { 4651 int rval = QLA_SUCCESS; 4652 struct qla_hw_data *ha = vha->hw; 4653 uint32_t drv_presence; 4654 4655 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 4656 if (rval == QLA_SUCCESS) { 4657 drv_presence |= (1 << ha->portnum); 4658 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 4659 drv_presence); 4660 } 4661 4662 return rval; 4663 } 4664 4665 int 4666 qla83xx_set_drv_presence(scsi_qla_host_t *vha) 4667 { 4668 int rval = QLA_SUCCESS; 4669 4670 qla83xx_idc_lock(vha, 0); 4671 rval = __qla83xx_set_drv_presence(vha); 4672 qla83xx_idc_unlock(vha, 0); 4673 4674 return rval; 4675 } 4676 4677 int 4678 __qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 4679 { 4680 int rval = QLA_SUCCESS; 4681 struct qla_hw_data *ha = vha->hw; 4682 uint32_t drv_presence; 4683 4684 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 4685 if (rval == QLA_SUCCESS) { 4686 drv_presence &= ~(1 << ha->portnum); 4687 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 4688 drv_presence); 4689 } 4690 4691 return rval; 4692 } 4693 4694 int 4695 qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 4696 { 4697 int rval = QLA_SUCCESS; 4698 4699 qla83xx_idc_lock(vha, 0); 4700 rval = __qla83xx_clear_drv_presence(vha); 4701 qla83xx_idc_unlock(vha, 0); 4702 4703 return rval; 4704 } 4705 4706 static void 4707 qla83xx_need_reset_handler(scsi_qla_host_t *vha) 4708 { 4709 struct qla_hw_data *ha = vha->hw; 4710 uint32_t drv_ack, drv_presence; 4711 unsigned long ack_timeout; 4712 4713 /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */ 4714 ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); 4715 while (1) { 4716 qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 4717 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 4718 if ((drv_ack & drv_presence) == drv_presence) 4719 break; 4720 4721 if (time_after_eq(jiffies, ack_timeout)) { 4722 ql_log(ql_log_warn, vha, 0xb067, 4723 "RESET ACK TIMEOUT! drv_presence=0x%x " 4724 "drv_ack=0x%x\n", drv_presence, drv_ack); 4725 /* 4726 * The function(s) which did not ack in time are forced 4727 * to withdraw any further participation in the IDC 4728 * reset. 4729 */ 4730 if (drv_ack != drv_presence) 4731 qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 4732 drv_ack); 4733 break; 4734 } 4735 4736 qla83xx_idc_unlock(vha, 0); 4737 msleep(1000); 4738 qla83xx_idc_lock(vha, 0); 4739 } 4740 4741 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD); 4742 ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n"); 4743 } 4744 4745 static int 4746 qla83xx_device_bootstrap(scsi_qla_host_t *vha) 4747 { 4748 int rval = QLA_SUCCESS; 4749 uint32_t idc_control; 4750 4751 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING); 4752 ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n"); 4753 4754 /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */ 4755 __qla83xx_get_idc_control(vha, &idc_control); 4756 idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET; 4757 __qla83xx_set_idc_control(vha, 0); 4758 4759 qla83xx_idc_unlock(vha, 0); 4760 rval = qla83xx_restart_nic_firmware(vha); 4761 qla83xx_idc_lock(vha, 0); 4762 4763 if (rval != QLA_SUCCESS) { 4764 ql_log(ql_log_fatal, vha, 0xb06a, 4765 "Failed to restart NIC f/w.\n"); 4766 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED); 4767 ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n"); 4768 } else { 4769 ql_dbg(ql_dbg_p3p, vha, 0xb06c, 4770 "Success in restarting nic f/w.\n"); 4771 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY); 4772 ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n"); 4773 } 4774 4775 return rval; 4776 } 4777 4778 /* Assumes idc_lock always held on entry */ 4779 int 4780 qla83xx_idc_state_handler(scsi_qla_host_t *base_vha) 4781 { 4782 struct qla_hw_data *ha = base_vha->hw; 4783 int rval = QLA_SUCCESS; 4784 unsigned long dev_init_timeout; 4785 uint32_t dev_state; 4786 4787 /* Wait for MAX-INIT-TIMEOUT for the device to go ready */ 4788 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); 4789 4790 while (1) { 4791 4792 if (time_after_eq(jiffies, dev_init_timeout)) { 4793 ql_log(ql_log_warn, base_vha, 0xb06e, 4794 "Initialization TIMEOUT!\n"); 4795 /* Init timeout. Disable further NIC Core 4796 * communication. 4797 */ 4798 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 4799 QLA8XXX_DEV_FAILED); 4800 ql_log(ql_log_info, base_vha, 0xb06f, 4801 "HW State: FAILED.\n"); 4802 } 4803 4804 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 4805 switch (dev_state) { 4806 case QLA8XXX_DEV_READY: 4807 if (ha->flags.nic_core_reset_owner) 4808 qla83xx_idc_audit(base_vha, 4809 IDC_AUDIT_COMPLETION); 4810 ha->flags.nic_core_reset_owner = 0; 4811 ql_dbg(ql_dbg_p3p, base_vha, 0xb070, 4812 "Reset_owner reset by 0x%x.\n", 4813 ha->portnum); 4814 goto exit; 4815 case QLA8XXX_DEV_COLD: 4816 if (ha->flags.nic_core_reset_owner) 4817 rval = qla83xx_device_bootstrap(base_vha); 4818 else { 4819 /* Wait for AEN to change device-state */ 4820 qla83xx_idc_unlock(base_vha, 0); 4821 msleep(1000); 4822 qla83xx_idc_lock(base_vha, 0); 4823 } 4824 break; 4825 case QLA8XXX_DEV_INITIALIZING: 4826 /* Wait for AEN to change device-state */ 4827 qla83xx_idc_unlock(base_vha, 0); 4828 msleep(1000); 4829 qla83xx_idc_lock(base_vha, 0); 4830 break; 4831 case QLA8XXX_DEV_NEED_RESET: 4832 if (!ql2xdontresethba && ha->flags.nic_core_reset_owner) 4833 qla83xx_need_reset_handler(base_vha); 4834 else { 4835 /* Wait for AEN to change device-state */ 4836 qla83xx_idc_unlock(base_vha, 0); 4837 msleep(1000); 4838 qla83xx_idc_lock(base_vha, 0); 4839 } 4840 /* reset timeout value after need reset handler */ 4841 dev_init_timeout = jiffies + 4842 (ha->fcoe_dev_init_timeout * HZ); 4843 break; 4844 case QLA8XXX_DEV_NEED_QUIESCENT: 4845 /* XXX: DEBUG for now */ 4846 qla83xx_idc_unlock(base_vha, 0); 4847 msleep(1000); 4848 qla83xx_idc_lock(base_vha, 0); 4849 break; 4850 case QLA8XXX_DEV_QUIESCENT: 4851 /* XXX: DEBUG for now */ 4852 if (ha->flags.quiesce_owner) 4853 goto exit; 4854 4855 qla83xx_idc_unlock(base_vha, 0); 4856 msleep(1000); 4857 qla83xx_idc_lock(base_vha, 0); 4858 dev_init_timeout = jiffies + 4859 (ha->fcoe_dev_init_timeout * HZ); 4860 break; 4861 case QLA8XXX_DEV_FAILED: 4862 if (ha->flags.nic_core_reset_owner) 4863 qla83xx_idc_audit(base_vha, 4864 IDC_AUDIT_COMPLETION); 4865 ha->flags.nic_core_reset_owner = 0; 4866 __qla83xx_clear_drv_presence(base_vha); 4867 qla83xx_idc_unlock(base_vha, 0); 4868 qla8xxx_dev_failed_handler(base_vha); 4869 rval = QLA_FUNCTION_FAILED; 4870 qla83xx_idc_lock(base_vha, 0); 4871 goto exit; 4872 case QLA8XXX_BAD_VALUE: 4873 qla83xx_idc_unlock(base_vha, 0); 4874 msleep(1000); 4875 qla83xx_idc_lock(base_vha, 0); 4876 break; 4877 default: 4878 ql_log(ql_log_warn, base_vha, 0xb071, 4879 "Unknown Device State: %x.\n", dev_state); 4880 qla83xx_idc_unlock(base_vha, 0); 4881 qla8xxx_dev_failed_handler(base_vha); 4882 rval = QLA_FUNCTION_FAILED; 4883 qla83xx_idc_lock(base_vha, 0); 4884 goto exit; 4885 } 4886 } 4887 4888 exit: 4889 return rval; 4890 } 4891 4892 void 4893 qla2x00_disable_board_on_pci_error(struct work_struct *work) 4894 { 4895 struct qla_hw_data *ha = container_of(work, struct qla_hw_data, 4896 board_disable); 4897 struct pci_dev *pdev = ha->pdev; 4898 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4899 4900 ql_log(ql_log_warn, base_vha, 0x015b, 4901 "Disabling adapter.\n"); 4902 4903 set_bit(UNLOADING, &base_vha->dpc_flags); 4904 4905 qla2x00_delete_all_vps(ha, base_vha); 4906 4907 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 4908 4909 qla2x00_dfs_remove(base_vha); 4910 4911 qla84xx_put_chip(base_vha); 4912 4913 if (base_vha->timer_active) 4914 qla2x00_stop_timer(base_vha); 4915 4916 base_vha->flags.online = 0; 4917 4918 qla2x00_destroy_deferred_work(ha); 4919 4920 /* 4921 * Do not try to stop beacon blink as it will issue a mailbox 4922 * command. 4923 */ 4924 qla2x00_free_sysfs_attr(base_vha, false); 4925 4926 fc_remove_host(base_vha->host); 4927 4928 scsi_remove_host(base_vha->host); 4929 4930 base_vha->flags.init_done = 0; 4931 qla25xx_delete_queues(base_vha); 4932 qla2x00_free_irqs(base_vha); 4933 qla2x00_free_fcports(base_vha); 4934 qla2x00_mem_free(ha); 4935 qla82xx_md_free(base_vha); 4936 qla2x00_free_queues(ha); 4937 4938 qla2x00_unmap_iobases(ha); 4939 4940 pci_release_selected_regions(ha->pdev, ha->bars); 4941 pci_disable_pcie_error_reporting(pdev); 4942 pci_disable_device(pdev); 4943 4944 /* 4945 * Let qla2x00_remove_one cleanup qla_hw_data on device removal. 4946 */ 4947 } 4948 4949 /************************************************************************** 4950 * qla2x00_do_dpc 4951 * This kernel thread is a task that is schedule by the interrupt handler 4952 * to perform the background processing for interrupts. 4953 * 4954 * Notes: 4955 * This task always run in the context of a kernel thread. It 4956 * is kick-off by the driver's detect code and starts up 4957 * up one per adapter. It immediately goes to sleep and waits for 4958 * some fibre event. When either the interrupt handler or 4959 * the timer routine detects a event it will one of the task 4960 * bits then wake us up. 4961 **************************************************************************/ 4962 static int 4963 qla2x00_do_dpc(void *data) 4964 { 4965 scsi_qla_host_t *base_vha; 4966 struct qla_hw_data *ha; 4967 4968 ha = (struct qla_hw_data *)data; 4969 base_vha = pci_get_drvdata(ha->pdev); 4970 4971 set_user_nice(current, MIN_NICE); 4972 4973 set_current_state(TASK_INTERRUPTIBLE); 4974 while (!kthread_should_stop()) { 4975 ql_dbg(ql_dbg_dpc, base_vha, 0x4000, 4976 "DPC handler sleeping.\n"); 4977 4978 schedule(); 4979 4980 if (!base_vha->flags.init_done || ha->flags.mbox_busy) 4981 goto end_loop; 4982 4983 if (ha->flags.eeh_busy) { 4984 ql_dbg(ql_dbg_dpc, base_vha, 0x4003, 4985 "eeh_busy=%d.\n", ha->flags.eeh_busy); 4986 goto end_loop; 4987 } 4988 4989 ha->dpc_active = 1; 4990 4991 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001, 4992 "DPC handler waking up, dpc_flags=0x%lx.\n", 4993 base_vha->dpc_flags); 4994 4995 qla2x00_do_work(base_vha); 4996 4997 if (IS_P3P_TYPE(ha)) { 4998 if (IS_QLA8044(ha)) { 4999 if (test_and_clear_bit(ISP_UNRECOVERABLE, 5000 &base_vha->dpc_flags)) { 5001 qla8044_idc_lock(ha); 5002 qla8044_wr_direct(base_vha, 5003 QLA8044_CRB_DEV_STATE_INDEX, 5004 QLA8XXX_DEV_FAILED); 5005 qla8044_idc_unlock(ha); 5006 ql_log(ql_log_info, base_vha, 0x4004, 5007 "HW State: FAILED.\n"); 5008 qla8044_device_state_handler(base_vha); 5009 continue; 5010 } 5011 5012 } else { 5013 if (test_and_clear_bit(ISP_UNRECOVERABLE, 5014 &base_vha->dpc_flags)) { 5015 qla82xx_idc_lock(ha); 5016 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5017 QLA8XXX_DEV_FAILED); 5018 qla82xx_idc_unlock(ha); 5019 ql_log(ql_log_info, base_vha, 0x0151, 5020 "HW State: FAILED.\n"); 5021 qla82xx_device_state_handler(base_vha); 5022 continue; 5023 } 5024 } 5025 5026 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, 5027 &base_vha->dpc_flags)) { 5028 5029 ql_dbg(ql_dbg_dpc, base_vha, 0x4005, 5030 "FCoE context reset scheduled.\n"); 5031 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 5032 &base_vha->dpc_flags))) { 5033 if (qla82xx_fcoe_ctx_reset(base_vha)) { 5034 /* FCoE-ctx reset failed. 5035 * Escalate to chip-reset 5036 */ 5037 set_bit(ISP_ABORT_NEEDED, 5038 &base_vha->dpc_flags); 5039 } 5040 clear_bit(ABORT_ISP_ACTIVE, 5041 &base_vha->dpc_flags); 5042 } 5043 5044 ql_dbg(ql_dbg_dpc, base_vha, 0x4006, 5045 "FCoE context reset end.\n"); 5046 } 5047 } else if (IS_QLAFX00(ha)) { 5048 if (test_and_clear_bit(ISP_UNRECOVERABLE, 5049 &base_vha->dpc_flags)) { 5050 ql_dbg(ql_dbg_dpc, base_vha, 0x4020, 5051 "Firmware Reset Recovery\n"); 5052 if (qlafx00_reset_initialize(base_vha)) { 5053 /* Failed. Abort isp later. */ 5054 if (!test_bit(UNLOADING, 5055 &base_vha->dpc_flags)) { 5056 set_bit(ISP_UNRECOVERABLE, 5057 &base_vha->dpc_flags); 5058 ql_dbg(ql_dbg_dpc, base_vha, 5059 0x4021, 5060 "Reset Recovery Failed\n"); 5061 } 5062 } 5063 } 5064 5065 if (test_and_clear_bit(FX00_TARGET_SCAN, 5066 &base_vha->dpc_flags)) { 5067 ql_dbg(ql_dbg_dpc, base_vha, 0x4022, 5068 "ISPFx00 Target Scan scheduled\n"); 5069 if (qlafx00_rescan_isp(base_vha)) { 5070 if (!test_bit(UNLOADING, 5071 &base_vha->dpc_flags)) 5072 set_bit(ISP_UNRECOVERABLE, 5073 &base_vha->dpc_flags); 5074 ql_dbg(ql_dbg_dpc, base_vha, 0x401e, 5075 "ISPFx00 Target Scan Failed\n"); 5076 } 5077 ql_dbg(ql_dbg_dpc, base_vha, 0x401f, 5078 "ISPFx00 Target Scan End\n"); 5079 } 5080 if (test_and_clear_bit(FX00_HOST_INFO_RESEND, 5081 &base_vha->dpc_flags)) { 5082 ql_dbg(ql_dbg_dpc, base_vha, 0x4023, 5083 "ISPFx00 Host Info resend scheduled\n"); 5084 qlafx00_fx_disc(base_vha, 5085 &base_vha->hw->mr.fcport, 5086 FXDISC_REG_HOST_INFO); 5087 } 5088 } 5089 5090 if (test_and_clear_bit(ISP_ABORT_NEEDED, 5091 &base_vha->dpc_flags)) { 5092 5093 ql_dbg(ql_dbg_dpc, base_vha, 0x4007, 5094 "ISP abort scheduled.\n"); 5095 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 5096 &base_vha->dpc_flags))) { 5097 5098 if (ha->isp_ops->abort_isp(base_vha)) { 5099 /* failed. retry later */ 5100 set_bit(ISP_ABORT_NEEDED, 5101 &base_vha->dpc_flags); 5102 } 5103 clear_bit(ABORT_ISP_ACTIVE, 5104 &base_vha->dpc_flags); 5105 } 5106 5107 ql_dbg(ql_dbg_dpc, base_vha, 0x4008, 5108 "ISP abort end.\n"); 5109 } 5110 5111 if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, 5112 &base_vha->dpc_flags)) { 5113 qla2x00_update_fcports(base_vha); 5114 } 5115 5116 if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) { 5117 int ret; 5118 ret = qla2x00_send_change_request(base_vha, 0x3, 0); 5119 if (ret != QLA_SUCCESS) 5120 ql_log(ql_log_warn, base_vha, 0x121, 5121 "Failed to enable receiving of RSCN " 5122 "requests: 0x%x.\n", ret); 5123 clear_bit(SCR_PENDING, &base_vha->dpc_flags); 5124 } 5125 5126 if (IS_QLAFX00(ha)) 5127 goto loop_resync_check; 5128 5129 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 5130 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 5131 "Quiescence mode scheduled.\n"); 5132 if (IS_P3P_TYPE(ha)) { 5133 if (IS_QLA82XX(ha)) 5134 qla82xx_device_state_handler(base_vha); 5135 if (IS_QLA8044(ha)) 5136 qla8044_device_state_handler(base_vha); 5137 clear_bit(ISP_QUIESCE_NEEDED, 5138 &base_vha->dpc_flags); 5139 if (!ha->flags.quiesce_owner) { 5140 qla2x00_perform_loop_resync(base_vha); 5141 if (IS_QLA82XX(ha)) { 5142 qla82xx_idc_lock(ha); 5143 qla82xx_clear_qsnt_ready( 5144 base_vha); 5145 qla82xx_idc_unlock(ha); 5146 } else if (IS_QLA8044(ha)) { 5147 qla8044_idc_lock(ha); 5148 qla8044_clear_qsnt_ready( 5149 base_vha); 5150 qla8044_idc_unlock(ha); 5151 } 5152 } 5153 } else { 5154 clear_bit(ISP_QUIESCE_NEEDED, 5155 &base_vha->dpc_flags); 5156 qla2x00_quiesce_io(base_vha); 5157 } 5158 ql_dbg(ql_dbg_dpc, base_vha, 0x400a, 5159 "Quiescence mode end.\n"); 5160 } 5161 5162 if (test_and_clear_bit(RESET_MARKER_NEEDED, 5163 &base_vha->dpc_flags) && 5164 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { 5165 5166 ql_dbg(ql_dbg_dpc, base_vha, 0x400b, 5167 "Reset marker scheduled.\n"); 5168 qla2x00_rst_aen(base_vha); 5169 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); 5170 ql_dbg(ql_dbg_dpc, base_vha, 0x400c, 5171 "Reset marker end.\n"); 5172 } 5173 5174 /* Retry each device up to login retry count */ 5175 if ((test_and_clear_bit(RELOGIN_NEEDED, 5176 &base_vha->dpc_flags)) && 5177 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && 5178 atomic_read(&base_vha->loop_state) != LOOP_DOWN) { 5179 5180 ql_dbg(ql_dbg_dpc, base_vha, 0x400d, 5181 "Relogin scheduled.\n"); 5182 qla2x00_relogin(base_vha); 5183 ql_dbg(ql_dbg_dpc, base_vha, 0x400e, 5184 "Relogin end.\n"); 5185 } 5186 loop_resync_check: 5187 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, 5188 &base_vha->dpc_flags)) { 5189 5190 ql_dbg(ql_dbg_dpc, base_vha, 0x400f, 5191 "Loop resync scheduled.\n"); 5192 5193 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 5194 &base_vha->dpc_flags))) { 5195 5196 qla2x00_loop_resync(base_vha); 5197 5198 clear_bit(LOOP_RESYNC_ACTIVE, 5199 &base_vha->dpc_flags); 5200 } 5201 5202 ql_dbg(ql_dbg_dpc, base_vha, 0x4010, 5203 "Loop resync end.\n"); 5204 } 5205 5206 if (IS_QLAFX00(ha)) 5207 goto intr_on_check; 5208 5209 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && 5210 atomic_read(&base_vha->loop_state) == LOOP_READY) { 5211 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags); 5212 qla2xxx_flash_npiv_conf(base_vha); 5213 } 5214 5215 intr_on_check: 5216 if (!ha->interrupts_on) 5217 ha->isp_ops->enable_intrs(ha); 5218 5219 if (test_and_clear_bit(BEACON_BLINK_NEEDED, 5220 &base_vha->dpc_flags)) { 5221 if (ha->beacon_blink_led == 1) 5222 ha->isp_ops->beacon_blink(base_vha); 5223 } 5224 5225 if (!IS_QLAFX00(ha)) 5226 qla2x00_do_dpc_all_vps(base_vha); 5227 5228 ha->dpc_active = 0; 5229 end_loop: 5230 set_current_state(TASK_INTERRUPTIBLE); 5231 } /* End of while(1) */ 5232 __set_current_state(TASK_RUNNING); 5233 5234 ql_dbg(ql_dbg_dpc, base_vha, 0x4011, 5235 "DPC handler exiting.\n"); 5236 5237 /* 5238 * Make sure that nobody tries to wake us up again. 5239 */ 5240 ha->dpc_active = 0; 5241 5242 /* Cleanup any residual CTX SRBs. */ 5243 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 5244 5245 return 0; 5246 } 5247 5248 void 5249 qla2xxx_wake_dpc(struct scsi_qla_host *vha) 5250 { 5251 struct qla_hw_data *ha = vha->hw; 5252 struct task_struct *t = ha->dpc_thread; 5253 5254 if (!test_bit(UNLOADING, &vha->dpc_flags) && t) 5255 wake_up_process(t); 5256 } 5257 5258 /* 5259 * qla2x00_rst_aen 5260 * Processes asynchronous reset. 5261 * 5262 * Input: 5263 * ha = adapter block pointer. 5264 */ 5265 static void 5266 qla2x00_rst_aen(scsi_qla_host_t *vha) 5267 { 5268 if (vha->flags.online && !vha->flags.reset_active && 5269 !atomic_read(&vha->loop_down_timer) && 5270 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) { 5271 do { 5272 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 5273 5274 /* 5275 * Issue marker command only when we are going to start 5276 * the I/O. 5277 */ 5278 vha->marker_needed = 1; 5279 } while (!atomic_read(&vha->loop_down_timer) && 5280 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags))); 5281 } 5282 } 5283 5284 /************************************************************************** 5285 * qla2x00_timer 5286 * 5287 * Description: 5288 * One second timer 5289 * 5290 * Context: Interrupt 5291 ***************************************************************************/ 5292 void 5293 qla2x00_timer(scsi_qla_host_t *vha) 5294 { 5295 unsigned long cpu_flags = 0; 5296 int start_dpc = 0; 5297 int index; 5298 srb_t *sp; 5299 uint16_t w; 5300 struct qla_hw_data *ha = vha->hw; 5301 struct req_que *req; 5302 5303 if (ha->flags.eeh_busy) { 5304 ql_dbg(ql_dbg_timer, vha, 0x6000, 5305 "EEH = %d, restarting timer.\n", 5306 ha->flags.eeh_busy); 5307 qla2x00_restart_timer(vha, WATCH_INTERVAL); 5308 return; 5309 } 5310 5311 /* 5312 * Hardware read to raise pending EEH errors during mailbox waits. If 5313 * the read returns -1 then disable the board. 5314 */ 5315 if (!pci_channel_offline(ha->pdev)) { 5316 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 5317 qla2x00_check_reg16_for_disconnect(vha, w); 5318 } 5319 5320 /* Make sure qla82xx_watchdog is run only for physical port */ 5321 if (!vha->vp_idx && IS_P3P_TYPE(ha)) { 5322 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) 5323 start_dpc++; 5324 if (IS_QLA82XX(ha)) 5325 qla82xx_watchdog(vha); 5326 else if (IS_QLA8044(ha)) 5327 qla8044_watchdog(vha); 5328 } 5329 5330 if (!vha->vp_idx && IS_QLAFX00(ha)) 5331 qlafx00_timer_routine(vha); 5332 5333 /* Loop down handler. */ 5334 if (atomic_read(&vha->loop_down_timer) > 0 && 5335 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && 5336 !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags)) 5337 && vha->flags.online) { 5338 5339 if (atomic_read(&vha->loop_down_timer) == 5340 vha->loop_down_abort_time) { 5341 5342 ql_log(ql_log_info, vha, 0x6008, 5343 "Loop down - aborting the queues before time expires.\n"); 5344 5345 if (!IS_QLA2100(ha) && vha->link_down_timeout) 5346 atomic_set(&vha->loop_state, LOOP_DEAD); 5347 5348 /* 5349 * Schedule an ISP abort to return any FCP2-device 5350 * commands. 5351 */ 5352 /* NPIV - scan physical port only */ 5353 if (!vha->vp_idx) { 5354 spin_lock_irqsave(&ha->hardware_lock, 5355 cpu_flags); 5356 req = ha->req_q_map[0]; 5357 for (index = 1; 5358 index < req->num_outstanding_cmds; 5359 index++) { 5360 fc_port_t *sfcp; 5361 5362 sp = req->outstanding_cmds[index]; 5363 if (!sp) 5364 continue; 5365 if (sp->type != SRB_SCSI_CMD) 5366 continue; 5367 sfcp = sp->fcport; 5368 if (!(sfcp->flags & FCF_FCP2_DEVICE)) 5369 continue; 5370 5371 if (IS_QLA82XX(ha)) 5372 set_bit(FCOE_CTX_RESET_NEEDED, 5373 &vha->dpc_flags); 5374 else 5375 set_bit(ISP_ABORT_NEEDED, 5376 &vha->dpc_flags); 5377 break; 5378 } 5379 spin_unlock_irqrestore(&ha->hardware_lock, 5380 cpu_flags); 5381 } 5382 start_dpc++; 5383 } 5384 5385 /* if the loop has been down for 4 minutes, reinit adapter */ 5386 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { 5387 if (!(vha->device_flags & DFLG_NO_CABLE)) { 5388 ql_log(ql_log_warn, vha, 0x6009, 5389 "Loop down - aborting ISP.\n"); 5390 5391 if (IS_QLA82XX(ha)) 5392 set_bit(FCOE_CTX_RESET_NEEDED, 5393 &vha->dpc_flags); 5394 else 5395 set_bit(ISP_ABORT_NEEDED, 5396 &vha->dpc_flags); 5397 } 5398 } 5399 ql_dbg(ql_dbg_timer, vha, 0x600a, 5400 "Loop down - seconds remaining %d.\n", 5401 atomic_read(&vha->loop_down_timer)); 5402 } 5403 /* Check if beacon LED needs to be blinked for physical host only */ 5404 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { 5405 /* There is no beacon_blink function for ISP82xx */ 5406 if (!IS_P3P_TYPE(ha)) { 5407 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); 5408 start_dpc++; 5409 } 5410 } 5411 5412 /* Process any deferred work. */ 5413 if (!list_empty(&vha->work_list)) 5414 start_dpc++; 5415 5416 /* Schedule the DPC routine if needed */ 5417 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 5418 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || 5419 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) || 5420 start_dpc || 5421 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) || 5422 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) || 5423 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || 5424 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || 5425 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || 5426 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) { 5427 ql_dbg(ql_dbg_timer, vha, 0x600b, 5428 "isp_abort_needed=%d loop_resync_needed=%d " 5429 "fcport_update_needed=%d start_dpc=%d " 5430 "reset_marker_needed=%d", 5431 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags), 5432 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags), 5433 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags), 5434 start_dpc, 5435 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); 5436 ql_dbg(ql_dbg_timer, vha, 0x600c, 5437 "beacon_blink_needed=%d isp_unrecoverable=%d " 5438 "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " 5439 "relogin_needed=%d.\n", 5440 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), 5441 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), 5442 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), 5443 test_bit(VP_DPC_NEEDED, &vha->dpc_flags), 5444 test_bit(RELOGIN_NEEDED, &vha->dpc_flags)); 5445 qla2xxx_wake_dpc(vha); 5446 } 5447 5448 qla2x00_restart_timer(vha, WATCH_INTERVAL); 5449 } 5450 5451 /* Firmware interface routines. */ 5452 5453 #define FW_BLOBS 11 5454 #define FW_ISP21XX 0 5455 #define FW_ISP22XX 1 5456 #define FW_ISP2300 2 5457 #define FW_ISP2322 3 5458 #define FW_ISP24XX 4 5459 #define FW_ISP25XX 5 5460 #define FW_ISP81XX 6 5461 #define FW_ISP82XX 7 5462 #define FW_ISP2031 8 5463 #define FW_ISP8031 9 5464 #define FW_ISP27XX 10 5465 5466 #define FW_FILE_ISP21XX "ql2100_fw.bin" 5467 #define FW_FILE_ISP22XX "ql2200_fw.bin" 5468 #define FW_FILE_ISP2300 "ql2300_fw.bin" 5469 #define FW_FILE_ISP2322 "ql2322_fw.bin" 5470 #define FW_FILE_ISP24XX "ql2400_fw.bin" 5471 #define FW_FILE_ISP25XX "ql2500_fw.bin" 5472 #define FW_FILE_ISP81XX "ql8100_fw.bin" 5473 #define FW_FILE_ISP82XX "ql8200_fw.bin" 5474 #define FW_FILE_ISP2031 "ql2600_fw.bin" 5475 #define FW_FILE_ISP8031 "ql8300_fw.bin" 5476 #define FW_FILE_ISP27XX "ql2700_fw.bin" 5477 5478 5479 static DEFINE_MUTEX(qla_fw_lock); 5480 5481 static struct fw_blob qla_fw_blobs[FW_BLOBS] = { 5482 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, 5483 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, 5484 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, 5485 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 5486 { .name = FW_FILE_ISP24XX, }, 5487 { .name = FW_FILE_ISP25XX, }, 5488 { .name = FW_FILE_ISP81XX, }, 5489 { .name = FW_FILE_ISP82XX, }, 5490 { .name = FW_FILE_ISP2031, }, 5491 { .name = FW_FILE_ISP8031, }, 5492 { .name = FW_FILE_ISP27XX, }, 5493 }; 5494 5495 struct fw_blob * 5496 qla2x00_request_firmware(scsi_qla_host_t *vha) 5497 { 5498 struct qla_hw_data *ha = vha->hw; 5499 struct fw_blob *blob; 5500 5501 if (IS_QLA2100(ha)) { 5502 blob = &qla_fw_blobs[FW_ISP21XX]; 5503 } else if (IS_QLA2200(ha)) { 5504 blob = &qla_fw_blobs[FW_ISP22XX]; 5505 } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 5506 blob = &qla_fw_blobs[FW_ISP2300]; 5507 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 5508 blob = &qla_fw_blobs[FW_ISP2322]; 5509 } else if (IS_QLA24XX_TYPE(ha)) { 5510 blob = &qla_fw_blobs[FW_ISP24XX]; 5511 } else if (IS_QLA25XX(ha)) { 5512 blob = &qla_fw_blobs[FW_ISP25XX]; 5513 } else if (IS_QLA81XX(ha)) { 5514 blob = &qla_fw_blobs[FW_ISP81XX]; 5515 } else if (IS_QLA82XX(ha)) { 5516 blob = &qla_fw_blobs[FW_ISP82XX]; 5517 } else if (IS_QLA2031(ha)) { 5518 blob = &qla_fw_blobs[FW_ISP2031]; 5519 } else if (IS_QLA8031(ha)) { 5520 blob = &qla_fw_blobs[FW_ISP8031]; 5521 } else if (IS_QLA27XX(ha)) { 5522 blob = &qla_fw_blobs[FW_ISP27XX]; 5523 } else { 5524 return NULL; 5525 } 5526 5527 mutex_lock(&qla_fw_lock); 5528 if (blob->fw) 5529 goto out; 5530 5531 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 5532 ql_log(ql_log_warn, vha, 0x0063, 5533 "Failed to load firmware image (%s).\n", blob->name); 5534 blob->fw = NULL; 5535 blob = NULL; 5536 goto out; 5537 } 5538 5539 out: 5540 mutex_unlock(&qla_fw_lock); 5541 return blob; 5542 } 5543 5544 static void 5545 qla2x00_release_firmware(void) 5546 { 5547 int idx; 5548 5549 mutex_lock(&qla_fw_lock); 5550 for (idx = 0; idx < FW_BLOBS; idx++) 5551 release_firmware(qla_fw_blobs[idx].fw); 5552 mutex_unlock(&qla_fw_lock); 5553 } 5554 5555 static pci_ers_result_t 5556 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 5557 { 5558 scsi_qla_host_t *vha = pci_get_drvdata(pdev); 5559 struct qla_hw_data *ha = vha->hw; 5560 5561 ql_dbg(ql_dbg_aer, vha, 0x9000, 5562 "PCI error detected, state %x.\n", state); 5563 5564 switch (state) { 5565 case pci_channel_io_normal: 5566 ha->flags.eeh_busy = 0; 5567 return PCI_ERS_RESULT_CAN_RECOVER; 5568 case pci_channel_io_frozen: 5569 ha->flags.eeh_busy = 1; 5570 /* For ISP82XX complete any pending mailbox cmd */ 5571 if (IS_QLA82XX(ha)) { 5572 ha->flags.isp82xx_fw_hung = 1; 5573 ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n"); 5574 qla82xx_clear_pending_mbx(vha); 5575 } 5576 qla2x00_free_irqs(vha); 5577 pci_disable_device(pdev); 5578 /* Return back all IOs */ 5579 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 5580 return PCI_ERS_RESULT_NEED_RESET; 5581 case pci_channel_io_perm_failure: 5582 ha->flags.pci_channel_io_perm_failure = 1; 5583 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 5584 return PCI_ERS_RESULT_DISCONNECT; 5585 } 5586 return PCI_ERS_RESULT_NEED_RESET; 5587 } 5588 5589 static pci_ers_result_t 5590 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) 5591 { 5592 int risc_paused = 0; 5593 uint32_t stat; 5594 unsigned long flags; 5595 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 5596 struct qla_hw_data *ha = base_vha->hw; 5597 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 5598 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 5599 5600 if (IS_QLA82XX(ha)) 5601 return PCI_ERS_RESULT_RECOVERED; 5602 5603 spin_lock_irqsave(&ha->hardware_lock, flags); 5604 if (IS_QLA2100(ha) || IS_QLA2200(ha)){ 5605 stat = RD_REG_DWORD(®->hccr); 5606 if (stat & HCCR_RISC_PAUSE) 5607 risc_paused = 1; 5608 } else if (IS_QLA23XX(ha)) { 5609 stat = RD_REG_DWORD(®->u.isp2300.host_status); 5610 if (stat & HSR_RISC_PAUSED) 5611 risc_paused = 1; 5612 } else if (IS_FWI2_CAPABLE(ha)) { 5613 stat = RD_REG_DWORD(®24->host_status); 5614 if (stat & HSRX_RISC_PAUSED) 5615 risc_paused = 1; 5616 } 5617 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5618 5619 if (risc_paused) { 5620 ql_log(ql_log_info, base_vha, 0x9003, 5621 "RISC paused -- mmio_enabled, Dumping firmware.\n"); 5622 ha->isp_ops->fw_dump(base_vha, 0); 5623 5624 return PCI_ERS_RESULT_NEED_RESET; 5625 } else 5626 return PCI_ERS_RESULT_RECOVERED; 5627 } 5628 5629 static uint32_t 5630 qla82xx_error_recovery(scsi_qla_host_t *base_vha) 5631 { 5632 uint32_t rval = QLA_FUNCTION_FAILED; 5633 uint32_t drv_active = 0; 5634 struct qla_hw_data *ha = base_vha->hw; 5635 int fn; 5636 struct pci_dev *other_pdev = NULL; 5637 5638 ql_dbg(ql_dbg_aer, base_vha, 0x9006, 5639 "Entered %s.\n", __func__); 5640 5641 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 5642 5643 if (base_vha->flags.online) { 5644 /* Abort all outstanding commands, 5645 * so as to be requeued later */ 5646 qla2x00_abort_isp_cleanup(base_vha); 5647 } 5648 5649 5650 fn = PCI_FUNC(ha->pdev->devfn); 5651 while (fn > 0) { 5652 fn--; 5653 ql_dbg(ql_dbg_aer, base_vha, 0x9007, 5654 "Finding pci device at function = 0x%x.\n", fn); 5655 other_pdev = 5656 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 5657 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 5658 fn)); 5659 5660 if (!other_pdev) 5661 continue; 5662 if (atomic_read(&other_pdev->enable_cnt)) { 5663 ql_dbg(ql_dbg_aer, base_vha, 0x9008, 5664 "Found PCI func available and enable at 0x%x.\n", 5665 fn); 5666 pci_dev_put(other_pdev); 5667 break; 5668 } 5669 pci_dev_put(other_pdev); 5670 } 5671 5672 if (!fn) { 5673 /* Reset owner */ 5674 ql_dbg(ql_dbg_aer, base_vha, 0x9009, 5675 "This devfn is reset owner = 0x%x.\n", 5676 ha->pdev->devfn); 5677 qla82xx_idc_lock(ha); 5678 5679 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5680 QLA8XXX_DEV_INITIALIZING); 5681 5682 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, 5683 QLA82XX_IDC_VERSION); 5684 5685 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 5686 ql_dbg(ql_dbg_aer, base_vha, 0x900a, 5687 "drv_active = 0x%x.\n", drv_active); 5688 5689 qla82xx_idc_unlock(ha); 5690 /* Reset if device is not already reset 5691 * drv_active would be 0 if a reset has already been done 5692 */ 5693 if (drv_active) 5694 rval = qla82xx_start_firmware(base_vha); 5695 else 5696 rval = QLA_SUCCESS; 5697 qla82xx_idc_lock(ha); 5698 5699 if (rval != QLA_SUCCESS) { 5700 ql_log(ql_log_info, base_vha, 0x900b, 5701 "HW State: FAILED.\n"); 5702 qla82xx_clear_drv_active(ha); 5703 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5704 QLA8XXX_DEV_FAILED); 5705 } else { 5706 ql_log(ql_log_info, base_vha, 0x900c, 5707 "HW State: READY.\n"); 5708 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5709 QLA8XXX_DEV_READY); 5710 qla82xx_idc_unlock(ha); 5711 ha->flags.isp82xx_fw_hung = 0; 5712 rval = qla82xx_restart_isp(base_vha); 5713 qla82xx_idc_lock(ha); 5714 /* Clear driver state register */ 5715 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0); 5716 qla82xx_set_drv_active(base_vha); 5717 } 5718 qla82xx_idc_unlock(ha); 5719 } else { 5720 ql_dbg(ql_dbg_aer, base_vha, 0x900d, 5721 "This devfn is not reset owner = 0x%x.\n", 5722 ha->pdev->devfn); 5723 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == 5724 QLA8XXX_DEV_READY)) { 5725 ha->flags.isp82xx_fw_hung = 0; 5726 rval = qla82xx_restart_isp(base_vha); 5727 qla82xx_idc_lock(ha); 5728 qla82xx_set_drv_active(base_vha); 5729 qla82xx_idc_unlock(ha); 5730 } 5731 } 5732 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 5733 5734 return rval; 5735 } 5736 5737 static pci_ers_result_t 5738 qla2xxx_pci_slot_reset(struct pci_dev *pdev) 5739 { 5740 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 5741 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 5742 struct qla_hw_data *ha = base_vha->hw; 5743 struct rsp_que *rsp; 5744 int rc, retries = 10; 5745 5746 ql_dbg(ql_dbg_aer, base_vha, 0x9004, 5747 "Slot Reset.\n"); 5748 5749 /* Workaround: qla2xxx driver which access hardware earlier 5750 * needs error state to be pci_channel_io_online. 5751 * Otherwise mailbox command timesout. 5752 */ 5753 pdev->error_state = pci_channel_io_normal; 5754 5755 pci_restore_state(pdev); 5756 5757 /* pci_restore_state() clears the saved_state flag of the device 5758 * save restored state which resets saved_state flag 5759 */ 5760 pci_save_state(pdev); 5761 5762 if (ha->mem_only) 5763 rc = pci_enable_device_mem(pdev); 5764 else 5765 rc = pci_enable_device(pdev); 5766 5767 if (rc) { 5768 ql_log(ql_log_warn, base_vha, 0x9005, 5769 "Can't re-enable PCI device after reset.\n"); 5770 goto exit_slot_reset; 5771 } 5772 5773 rsp = ha->rsp_q_map[0]; 5774 if (qla2x00_request_irqs(ha, rsp)) 5775 goto exit_slot_reset; 5776 5777 if (ha->isp_ops->pci_config(base_vha)) 5778 goto exit_slot_reset; 5779 5780 if (IS_QLA82XX(ha)) { 5781 if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) { 5782 ret = PCI_ERS_RESULT_RECOVERED; 5783 goto exit_slot_reset; 5784 } else 5785 goto exit_slot_reset; 5786 } 5787 5788 while (ha->flags.mbox_busy && retries--) 5789 msleep(1000); 5790 5791 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 5792 if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS) 5793 ret = PCI_ERS_RESULT_RECOVERED; 5794 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 5795 5796 5797 exit_slot_reset: 5798 ql_dbg(ql_dbg_aer, base_vha, 0x900e, 5799 "slot_reset return %x.\n", ret); 5800 5801 return ret; 5802 } 5803 5804 static void 5805 qla2xxx_pci_resume(struct pci_dev *pdev) 5806 { 5807 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 5808 struct qla_hw_data *ha = base_vha->hw; 5809 int ret; 5810 5811 ql_dbg(ql_dbg_aer, base_vha, 0x900f, 5812 "pci_resume.\n"); 5813 5814 ret = qla2x00_wait_for_hba_online(base_vha); 5815 if (ret != QLA_SUCCESS) { 5816 ql_log(ql_log_fatal, base_vha, 0x9002, 5817 "The device failed to resume I/O from slot/link_reset.\n"); 5818 } 5819 5820 pci_cleanup_aer_uncorrect_error_status(pdev); 5821 5822 ha->flags.eeh_busy = 0; 5823 } 5824 5825 static void 5826 qla83xx_disable_laser(scsi_qla_host_t *vha) 5827 { 5828 uint32_t reg, data, fn; 5829 struct qla_hw_data *ha = vha->hw; 5830 struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24; 5831 5832 /* pci func #/port # */ 5833 ql_dbg(ql_dbg_init, vha, 0x004b, 5834 "Disabling Laser for hba: %p\n", vha); 5835 5836 fn = (RD_REG_DWORD(&isp_reg->ctrl_status) & 5837 (BIT_15|BIT_14|BIT_13|BIT_12)); 5838 5839 fn = (fn >> 12); 5840 5841 if (fn & 1) 5842 reg = PORT_1_2031; 5843 else 5844 reg = PORT_0_2031; 5845 5846 data = LASER_OFF_2031; 5847 5848 qla83xx_wr_reg(vha, reg, data); 5849 } 5850 5851 static const struct pci_error_handlers qla2xxx_err_handler = { 5852 .error_detected = qla2xxx_pci_error_detected, 5853 .mmio_enabled = qla2xxx_pci_mmio_enabled, 5854 .slot_reset = qla2xxx_pci_slot_reset, 5855 .resume = qla2xxx_pci_resume, 5856 }; 5857 5858 static struct pci_device_id qla2xxx_pci_tbl[] = { 5859 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) }, 5860 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) }, 5861 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) }, 5862 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) }, 5863 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) }, 5864 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) }, 5865 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, 5866 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, 5867 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, 5868 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) }, 5869 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 5870 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 5871 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, 5872 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) }, 5873 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, 5874 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, 5875 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, 5876 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, 5877 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, 5878 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, 5879 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, 5880 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) }, 5881 { 0 }, 5882 }; 5883 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 5884 5885 static struct pci_driver qla2xxx_pci_driver = { 5886 .name = QLA2XXX_DRIVER_NAME, 5887 .driver = { 5888 .owner = THIS_MODULE, 5889 }, 5890 .id_table = qla2xxx_pci_tbl, 5891 .probe = qla2x00_probe_one, 5892 .remove = qla2x00_remove_one, 5893 .shutdown = qla2x00_shutdown, 5894 .err_handler = &qla2xxx_err_handler, 5895 }; 5896 5897 static const struct file_operations apidev_fops = { 5898 .owner = THIS_MODULE, 5899 .llseek = noop_llseek, 5900 }; 5901 5902 /** 5903 * qla2x00_module_init - Module initialization. 5904 **/ 5905 static int __init 5906 qla2x00_module_init(void) 5907 { 5908 int ret = 0; 5909 5910 /* Allocate cache for SRBs. */ 5911 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 5912 SLAB_HWCACHE_ALIGN, NULL); 5913 if (srb_cachep == NULL) { 5914 ql_log(ql_log_fatal, NULL, 0x0001, 5915 "Unable to allocate SRB cache...Failing load!.\n"); 5916 return -ENOMEM; 5917 } 5918 5919 /* Initialize target kmem_cache and mem_pools */ 5920 ret = qlt_init(); 5921 if (ret < 0) { 5922 kmem_cache_destroy(srb_cachep); 5923 return ret; 5924 } else if (ret > 0) { 5925 /* 5926 * If initiator mode is explictly disabled by qlt_init(), 5927 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from 5928 * performing scsi_scan_target() during LOOP UP event. 5929 */ 5930 qla2xxx_transport_functions.disable_target_scan = 1; 5931 qla2xxx_transport_vport_functions.disable_target_scan = 1; 5932 } 5933 5934 /* Derive version string. */ 5935 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 5936 if (ql2xextended_error_logging) 5937 strcat(qla2x00_version_str, "-debug"); 5938 5939 qla2xxx_transport_template = 5940 fc_attach_transport(&qla2xxx_transport_functions); 5941 if (!qla2xxx_transport_template) { 5942 kmem_cache_destroy(srb_cachep); 5943 ql_log(ql_log_fatal, NULL, 0x0002, 5944 "fc_attach_transport failed...Failing load!.\n"); 5945 qlt_exit(); 5946 return -ENODEV; 5947 } 5948 5949 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); 5950 if (apidev_major < 0) { 5951 ql_log(ql_log_fatal, NULL, 0x0003, 5952 "Unable to register char device %s.\n", QLA2XXX_APIDEV); 5953 } 5954 5955 qla2xxx_transport_vport_template = 5956 fc_attach_transport(&qla2xxx_transport_vport_functions); 5957 if (!qla2xxx_transport_vport_template) { 5958 kmem_cache_destroy(srb_cachep); 5959 qlt_exit(); 5960 fc_release_transport(qla2xxx_transport_template); 5961 ql_log(ql_log_fatal, NULL, 0x0004, 5962 "fc_attach_transport vport failed...Failing load!.\n"); 5963 return -ENODEV; 5964 } 5965 ql_log(ql_log_info, NULL, 0x0005, 5966 "QLogic Fibre Channel HBA Driver: %s.\n", 5967 qla2x00_version_str); 5968 ret = pci_register_driver(&qla2xxx_pci_driver); 5969 if (ret) { 5970 kmem_cache_destroy(srb_cachep); 5971 qlt_exit(); 5972 fc_release_transport(qla2xxx_transport_template); 5973 fc_release_transport(qla2xxx_transport_vport_template); 5974 ql_log(ql_log_fatal, NULL, 0x0006, 5975 "pci_register_driver failed...ret=%d Failing load!.\n", 5976 ret); 5977 } 5978 return ret; 5979 } 5980 5981 /** 5982 * qla2x00_module_exit - Module cleanup. 5983 **/ 5984 static void __exit 5985 qla2x00_module_exit(void) 5986 { 5987 unregister_chrdev(apidev_major, QLA2XXX_APIDEV); 5988 pci_unregister_driver(&qla2xxx_pci_driver); 5989 qla2x00_release_firmware(); 5990 kmem_cache_destroy(srb_cachep); 5991 qlt_exit(); 5992 if (ctx_cachep) 5993 kmem_cache_destroy(ctx_cachep); 5994 fc_release_transport(qla2xxx_transport_template); 5995 fc_release_transport(qla2xxx_transport_vport_template); 5996 } 5997 5998 module_init(qla2x00_module_init); 5999 module_exit(qla2x00_module_exit); 6000 6001 MODULE_AUTHOR("QLogic Corporation"); 6002 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); 6003 MODULE_LICENSE("GPL"); 6004 MODULE_VERSION(QLA2XXX_VERSION); 6005 MODULE_FIRMWARE(FW_FILE_ISP21XX); 6006 MODULE_FIRMWARE(FW_FILE_ISP22XX); 6007 MODULE_FIRMWARE(FW_FILE_ISP2300); 6008 MODULE_FIRMWARE(FW_FILE_ISP2322); 6009 MODULE_FIRMWARE(FW_FILE_ISP24XX); 6010 MODULE_FIRMWARE(FW_FILE_ISP25XX); 6011