1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/moduleparam.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mutex.h> 14 #include <linux/kobject.h> 15 #include <linux/slab.h> 16 #include <linux/blk-mq-pci.h> 17 #include <scsi/scsi_tcq.h> 18 #include <scsi/scsicam.h> 19 #include <scsi/scsi_transport.h> 20 #include <scsi/scsi_transport_fc.h> 21 22 #include "qla_target.h" 23 24 /* 25 * Driver version 26 */ 27 char qla2x00_version_str[40]; 28 29 static int apidev_major; 30 31 /* 32 * SRB allocation cache 33 */ 34 struct kmem_cache *srb_cachep; 35 36 /* 37 * CT6 CTX allocation cache 38 */ 39 static struct kmem_cache *ctx_cachep; 40 /* 41 * error level for logging 42 */ 43 int ql_errlev = ql_log_all; 44 45 static int ql2xenableclass2; 46 module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); 47 MODULE_PARM_DESC(ql2xenableclass2, 48 "Specify if Class 2 operations are supported from the very " 49 "beginning. Default is 0 - class 2 not supported."); 50 51 52 int ql2xlogintimeout = 20; 53 module_param(ql2xlogintimeout, int, S_IRUGO); 54 MODULE_PARM_DESC(ql2xlogintimeout, 55 "Login timeout value in seconds."); 56 57 int qlport_down_retry; 58 module_param(qlport_down_retry, int, S_IRUGO); 59 MODULE_PARM_DESC(qlport_down_retry, 60 "Maximum number of command retries to a port that returns " 61 "a PORT-DOWN status."); 62 63 int ql2xplogiabsentdevice; 64 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); 65 MODULE_PARM_DESC(ql2xplogiabsentdevice, 66 "Option to enable PLOGI to devices that are not present after " 67 "a Fabric scan. This is needed for several broken switches. " 68 "Default is 0 - no PLOGI. 1 - perfom PLOGI."); 69 70 int ql2xloginretrycount = 0; 71 module_param(ql2xloginretrycount, int, S_IRUGO); 72 MODULE_PARM_DESC(ql2xloginretrycount, 73 "Specify an alternate value for the NVRAM login retry count."); 74 75 int ql2xallocfwdump = 1; 76 module_param(ql2xallocfwdump, int, S_IRUGO); 77 MODULE_PARM_DESC(ql2xallocfwdump, 78 "Option to enable allocation of memory for a firmware dump " 79 "during HBA initialization. Memory allocation requirements " 80 "vary by ISP type. Default is 1 - allocate memory."); 81 82 int ql2xextended_error_logging; 83 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 84 module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 85 MODULE_PARM_DESC(ql2xextended_error_logging, 86 "Option to enable extended error logging,\n" 87 "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n" 88 "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n" 89 "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n" 90 "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n" 91 "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n" 92 "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n" 93 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" 94 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" 95 "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n" 96 "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n" 97 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" 98 "\t\t0x1e400000 - Preferred value for capturing essential " 99 "debug information (equivalent to old " 100 "ql2xextended_error_logging=1).\n" 101 "\t\tDo LOGICAL OR of the value to enable more than one level"); 102 103 int ql2xshiftctondsd = 6; 104 module_param(ql2xshiftctondsd, int, S_IRUGO); 105 MODULE_PARM_DESC(ql2xshiftctondsd, 106 "Set to control shifting of command type processing " 107 "based on total number of SG elements."); 108 109 int ql2xfdmienable=1; 110 module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR); 111 module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR); 112 MODULE_PARM_DESC(ql2xfdmienable, 113 "Enables FDMI registrations. " 114 "0 - no FDMI. Default is 1 - perform FDMI."); 115 116 #define MAX_Q_DEPTH 32 117 static int ql2xmaxqdepth = MAX_Q_DEPTH; 118 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); 119 MODULE_PARM_DESC(ql2xmaxqdepth, 120 "Maximum queue depth to set for each LUN. " 121 "Default is 32."); 122 123 int ql2xenabledif = 2; 124 module_param(ql2xenabledif, int, S_IRUGO); 125 MODULE_PARM_DESC(ql2xenabledif, 126 " Enable T10-CRC-DIF:\n" 127 " Default is 2.\n" 128 " 0 -- No DIF Support\n" 129 " 1 -- Enable DIF for all types\n" 130 " 2 -- Enable DIF for all types, except Type 0.\n"); 131 132 int ql2xenablehba_err_chk = 2; 133 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); 134 MODULE_PARM_DESC(ql2xenablehba_err_chk, 135 " Enable T10-CRC-DIF Error isolation by HBA:\n" 136 " Default is 2.\n" 137 " 0 -- Error isolation disabled\n" 138 " 1 -- Error isolation enabled only for DIX Type 0\n" 139 " 2 -- Error isolation enabled for all Types\n"); 140 141 int ql2xiidmaenable=1; 142 module_param(ql2xiidmaenable, int, S_IRUGO); 143 MODULE_PARM_DESC(ql2xiidmaenable, 144 "Enables iIDMA settings " 145 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 146 147 int ql2xmqsupport = 1; 148 module_param(ql2xmqsupport, int, S_IRUGO); 149 MODULE_PARM_DESC(ql2xmqsupport, 150 "Enable on demand multiple queue pairs support " 151 "Default is 1 for supported. " 152 "Set it to 0 to turn off mq qpair support."); 153 154 int ql2xfwloadbin; 155 module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 156 module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 157 MODULE_PARM_DESC(ql2xfwloadbin, 158 "Option to specify location from which to load ISP firmware:.\n" 159 " 2 -- load firmware via the request_firmware() (hotplug).\n" 160 " interface.\n" 161 " 1 -- load firmware from flash.\n" 162 " 0 -- use default semantics.\n"); 163 164 int ql2xetsenable; 165 module_param(ql2xetsenable, int, S_IRUGO); 166 MODULE_PARM_DESC(ql2xetsenable, 167 "Enables firmware ETS burst." 168 "Default is 0 - skip ETS enablement."); 169 170 int ql2xdbwr = 1; 171 module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); 172 MODULE_PARM_DESC(ql2xdbwr, 173 "Option to specify scheme for request queue posting.\n" 174 " 0 -- Regular doorbell.\n" 175 " 1 -- CAMRAM doorbell (faster).\n"); 176 177 int ql2xtargetreset = 1; 178 module_param(ql2xtargetreset, int, S_IRUGO); 179 MODULE_PARM_DESC(ql2xtargetreset, 180 "Enable target reset." 181 "Default is 1 - use hw defaults."); 182 183 int ql2xgffidenable; 184 module_param(ql2xgffidenable, int, S_IRUGO); 185 MODULE_PARM_DESC(ql2xgffidenable, 186 "Enables GFF_ID checks of port type. " 187 "Default is 0 - Do not use GFF_ID information."); 188 189 int ql2xasynctmfenable; 190 module_param(ql2xasynctmfenable, int, S_IRUGO); 191 MODULE_PARM_DESC(ql2xasynctmfenable, 192 "Enables issue of TM IOCBs asynchronously via IOCB mechanism" 193 "Default is 0 - Issue TM IOCBs via mailbox mechanism."); 194 195 int ql2xdontresethba; 196 module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR); 197 MODULE_PARM_DESC(ql2xdontresethba, 198 "Option to specify reset behaviour.\n" 199 " 0 (Default) -- Reset on failure.\n" 200 " 1 -- Do not reset on failure.\n"); 201 202 uint64_t ql2xmaxlun = MAX_LUNS; 203 module_param(ql2xmaxlun, ullong, S_IRUGO); 204 MODULE_PARM_DESC(ql2xmaxlun, 205 "Defines the maximum LU number to register with the SCSI " 206 "midlayer. Default is 65535."); 207 208 int ql2xmdcapmask = 0x1F; 209 module_param(ql2xmdcapmask, int, S_IRUGO); 210 MODULE_PARM_DESC(ql2xmdcapmask, 211 "Set the Minidump driver capture mask level. " 212 "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); 213 214 int ql2xmdenable = 1; 215 module_param(ql2xmdenable, int, S_IRUGO); 216 MODULE_PARM_DESC(ql2xmdenable, 217 "Enable/disable MiniDump. " 218 "0 - MiniDump disabled. " 219 "1 (Default) - MiniDump enabled."); 220 221 int ql2xexlogins = 0; 222 module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR); 223 MODULE_PARM_DESC(ql2xexlogins, 224 "Number of extended Logins. " 225 "0 (Default)- Disabled."); 226 227 int ql2xexchoffld = 0; 228 module_param(ql2xexchoffld, uint, S_IRUGO|S_IWUSR); 229 MODULE_PARM_DESC(ql2xexchoffld, 230 "Number of exchanges to offload. " 231 "0 (Default)- Disabled."); 232 233 int ql2xfwholdabts = 0; 234 module_param(ql2xfwholdabts, int, S_IRUGO); 235 MODULE_PARM_DESC(ql2xfwholdabts, 236 "Allow FW to hold status IOCB until ABTS rsp received. " 237 "0 (Default) Do not set fw option. " 238 "1 - Set fw option to hold ABTS."); 239 240 int ql2xmvasynctoatio = 1; 241 module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR); 242 MODULE_PARM_DESC(ql2xmvasynctoatio, 243 "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ" 244 "0 (Default). Do not move IOCBs" 245 "1 - Move IOCBs."); 246 247 /* 248 * SCSI host template entry points 249 */ 250 static int qla2xxx_slave_configure(struct scsi_device * device); 251 static int qla2xxx_slave_alloc(struct scsi_device *); 252 static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time); 253 static void qla2xxx_scan_start(struct Scsi_Host *); 254 static void qla2xxx_slave_destroy(struct scsi_device *); 255 static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); 256 static int qla2xxx_eh_abort(struct scsi_cmnd *); 257 static int qla2xxx_eh_device_reset(struct scsi_cmnd *); 258 static int qla2xxx_eh_target_reset(struct scsi_cmnd *); 259 static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); 260 static int qla2xxx_eh_host_reset(struct scsi_cmnd *); 261 262 static void qla2x00_clear_drv_active(struct qla_hw_data *); 263 static void qla2x00_free_device(scsi_qla_host_t *); 264 static void qla83xx_disable_laser(scsi_qla_host_t *vha); 265 static int qla2xxx_map_queues(struct Scsi_Host *shost); 266 267 struct scsi_host_template qla2xxx_driver_template = { 268 .module = THIS_MODULE, 269 .name = QLA2XXX_DRIVER_NAME, 270 .queuecommand = qla2xxx_queuecommand, 271 272 .eh_timed_out = fc_eh_timed_out, 273 .eh_abort_handler = qla2xxx_eh_abort, 274 .eh_device_reset_handler = qla2xxx_eh_device_reset, 275 .eh_target_reset_handler = qla2xxx_eh_target_reset, 276 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 277 .eh_host_reset_handler = qla2xxx_eh_host_reset, 278 279 .slave_configure = qla2xxx_slave_configure, 280 281 .slave_alloc = qla2xxx_slave_alloc, 282 .slave_destroy = qla2xxx_slave_destroy, 283 .scan_finished = qla2xxx_scan_finished, 284 .scan_start = qla2xxx_scan_start, 285 .change_queue_depth = scsi_change_queue_depth, 286 .map_queues = qla2xxx_map_queues, 287 .this_id = -1, 288 .cmd_per_lun = 3, 289 .use_clustering = ENABLE_CLUSTERING, 290 .sg_tablesize = SG_ALL, 291 292 .max_sectors = 0xFFFF, 293 .shost_attrs = qla2x00_host_attrs, 294 295 .supported_mode = MODE_INITIATOR, 296 .track_queue_depth = 1, 297 }; 298 299 static struct scsi_transport_template *qla2xxx_transport_template = NULL; 300 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; 301 302 /* TODO Convert to inlines 303 * 304 * Timer routines 305 */ 306 307 __inline__ void 308 qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval) 309 { 310 init_timer(&vha->timer); 311 vha->timer.expires = jiffies + interval * HZ; 312 vha->timer.data = (unsigned long)vha; 313 vha->timer.function = (void (*)(unsigned long))func; 314 add_timer(&vha->timer); 315 vha->timer_active = 1; 316 } 317 318 static inline void 319 qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) 320 { 321 /* Currently used for 82XX only. */ 322 if (vha->device_flags & DFLG_DEV_FAILED) { 323 ql_dbg(ql_dbg_timer, vha, 0x600d, 324 "Device in a failed state, returning.\n"); 325 return; 326 } 327 328 mod_timer(&vha->timer, jiffies + interval * HZ); 329 } 330 331 static __inline__ void 332 qla2x00_stop_timer(scsi_qla_host_t *vha) 333 { 334 del_timer_sync(&vha->timer); 335 vha->timer_active = 0; 336 } 337 338 static int qla2x00_do_dpc(void *data); 339 340 static void qla2x00_rst_aen(scsi_qla_host_t *); 341 342 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, 343 struct req_que **, struct rsp_que **); 344 static void qla2x00_free_fw_dump(struct qla_hw_data *); 345 static void qla2x00_mem_free(struct qla_hw_data *); 346 int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, 347 struct qla_qpair *qpair); 348 349 /* -------------------------------------------------------------------------- */ 350 static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, 351 struct rsp_que *rsp) 352 { 353 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 354 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, 355 GFP_KERNEL); 356 if (!ha->req_q_map) { 357 ql_log(ql_log_fatal, vha, 0x003b, 358 "Unable to allocate memory for request queue ptrs.\n"); 359 goto fail_req_map; 360 } 361 362 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues, 363 GFP_KERNEL); 364 if (!ha->rsp_q_map) { 365 ql_log(ql_log_fatal, vha, 0x003c, 366 "Unable to allocate memory for response queue ptrs.\n"); 367 goto fail_rsp_map; 368 } 369 370 if (ql2xmqsupport && ha->max_qpairs) { 371 ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *), 372 GFP_KERNEL); 373 if (!ha->queue_pair_map) { 374 ql_log(ql_log_fatal, vha, 0x0180, 375 "Unable to allocate memory for queue pair ptrs.\n"); 376 goto fail_qpair_map; 377 } 378 ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); 379 if (ha->base_qpair == NULL) { 380 ql_log(ql_log_warn, vha, 0x0182, 381 "Failed to allocate base queue pair memory.\n"); 382 goto fail_base_qpair; 383 } 384 ha->base_qpair->req = req; 385 ha->base_qpair->rsp = rsp; 386 } 387 388 /* 389 * Make sure we record at least the request and response queue zero in 390 * case we need to free them if part of the probe fails. 391 */ 392 ha->rsp_q_map[0] = rsp; 393 ha->req_q_map[0] = req; 394 set_bit(0, ha->rsp_qid_map); 395 set_bit(0, ha->req_qid_map); 396 return 1; 397 398 fail_base_qpair: 399 kfree(ha->queue_pair_map); 400 fail_qpair_map: 401 kfree(ha->rsp_q_map); 402 ha->rsp_q_map = NULL; 403 fail_rsp_map: 404 kfree(ha->req_q_map); 405 ha->req_q_map = NULL; 406 fail_req_map: 407 return -ENOMEM; 408 } 409 410 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) 411 { 412 if (IS_QLAFX00(ha)) { 413 if (req && req->ring_fx00) 414 dma_free_coherent(&ha->pdev->dev, 415 (req->length_fx00 + 1) * sizeof(request_t), 416 req->ring_fx00, req->dma_fx00); 417 } else if (req && req->ring) 418 dma_free_coherent(&ha->pdev->dev, 419 (req->length + 1) * sizeof(request_t), 420 req->ring, req->dma); 421 422 if (req) 423 kfree(req->outstanding_cmds); 424 425 kfree(req); 426 } 427 428 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) 429 { 430 if (IS_QLAFX00(ha)) { 431 if (rsp && rsp->ring) 432 dma_free_coherent(&ha->pdev->dev, 433 (rsp->length_fx00 + 1) * sizeof(request_t), 434 rsp->ring_fx00, rsp->dma_fx00); 435 } else if (rsp && rsp->ring) { 436 dma_free_coherent(&ha->pdev->dev, 437 (rsp->length + 1) * sizeof(response_t), 438 rsp->ring, rsp->dma); 439 } 440 kfree(rsp); 441 } 442 443 static void qla2x00_free_queues(struct qla_hw_data *ha) 444 { 445 struct req_que *req; 446 struct rsp_que *rsp; 447 int cnt; 448 unsigned long flags; 449 450 spin_lock_irqsave(&ha->hardware_lock, flags); 451 for (cnt = 0; cnt < ha->max_req_queues; cnt++) { 452 if (!test_bit(cnt, ha->req_qid_map)) 453 continue; 454 455 req = ha->req_q_map[cnt]; 456 clear_bit(cnt, ha->req_qid_map); 457 ha->req_q_map[cnt] = NULL; 458 459 spin_unlock_irqrestore(&ha->hardware_lock, flags); 460 qla2x00_free_req_que(ha, req); 461 spin_lock_irqsave(&ha->hardware_lock, flags); 462 } 463 spin_unlock_irqrestore(&ha->hardware_lock, flags); 464 465 kfree(ha->req_q_map); 466 ha->req_q_map = NULL; 467 468 469 spin_lock_irqsave(&ha->hardware_lock, flags); 470 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { 471 if (!test_bit(cnt, ha->rsp_qid_map)) 472 continue; 473 474 rsp = ha->rsp_q_map[cnt]; 475 clear_bit(cnt, ha->rsp_qid_map); 476 ha->rsp_q_map[cnt] = NULL; 477 spin_unlock_irqrestore(&ha->hardware_lock, flags); 478 qla2x00_free_rsp_que(ha, rsp); 479 spin_lock_irqsave(&ha->hardware_lock, flags); 480 } 481 spin_unlock_irqrestore(&ha->hardware_lock, flags); 482 483 kfree(ha->rsp_q_map); 484 ha->rsp_q_map = NULL; 485 } 486 487 static char * 488 qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str) 489 { 490 struct qla_hw_data *ha = vha->hw; 491 static char *pci_bus_modes[] = { 492 "33", "66", "100", "133", 493 }; 494 uint16_t pci_bus; 495 496 strcpy(str, "PCI"); 497 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; 498 if (pci_bus) { 499 strcat(str, "-X ("); 500 strcat(str, pci_bus_modes[pci_bus]); 501 } else { 502 pci_bus = (ha->pci_attr & BIT_8) >> 8; 503 strcat(str, " ("); 504 strcat(str, pci_bus_modes[pci_bus]); 505 } 506 strcat(str, " MHz)"); 507 508 return (str); 509 } 510 511 static char * 512 qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str) 513 { 514 static char *pci_bus_modes[] = { "33", "66", "100", "133", }; 515 struct qla_hw_data *ha = vha->hw; 516 uint32_t pci_bus; 517 518 if (pci_is_pcie(ha->pdev)) { 519 char lwstr[6]; 520 uint32_t lstat, lspeed, lwidth; 521 522 pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat); 523 lspeed = lstat & PCI_EXP_LNKCAP_SLS; 524 lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4; 525 526 strcpy(str, "PCIe ("); 527 switch (lspeed) { 528 case 1: 529 strcat(str, "2.5GT/s "); 530 break; 531 case 2: 532 strcat(str, "5.0GT/s "); 533 break; 534 case 3: 535 strcat(str, "8.0GT/s "); 536 break; 537 default: 538 strcat(str, "<unknown> "); 539 break; 540 } 541 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth); 542 strcat(str, lwstr); 543 544 return str; 545 } 546 547 strcpy(str, "PCI"); 548 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; 549 if (pci_bus == 0 || pci_bus == 8) { 550 strcat(str, " ("); 551 strcat(str, pci_bus_modes[pci_bus >> 3]); 552 } else { 553 strcat(str, "-X "); 554 if (pci_bus & BIT_2) 555 strcat(str, "Mode 2"); 556 else 557 strcat(str, "Mode 1"); 558 strcat(str, " ("); 559 strcat(str, pci_bus_modes[pci_bus & ~BIT_2]); 560 } 561 strcat(str, " MHz)"); 562 563 return str; 564 } 565 566 static char * 567 qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 568 { 569 char un_str[10]; 570 struct qla_hw_data *ha = vha->hw; 571 572 snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version, 573 ha->fw_minor_version, ha->fw_subminor_version); 574 575 if (ha->fw_attributes & BIT_9) { 576 strcat(str, "FLX"); 577 return (str); 578 } 579 580 switch (ha->fw_attributes & 0xFF) { 581 case 0x7: 582 strcat(str, "EF"); 583 break; 584 case 0x17: 585 strcat(str, "TP"); 586 break; 587 case 0x37: 588 strcat(str, "IP"); 589 break; 590 case 0x77: 591 strcat(str, "VI"); 592 break; 593 default: 594 sprintf(un_str, "(%x)", ha->fw_attributes); 595 strcat(str, un_str); 596 break; 597 } 598 if (ha->fw_attributes & 0x100) 599 strcat(str, "X"); 600 601 return (str); 602 } 603 604 static char * 605 qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 606 { 607 struct qla_hw_data *ha = vha->hw; 608 609 snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version, 610 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes); 611 return str; 612 } 613 614 void 615 qla2x00_sp_free_dma(void *ptr) 616 { 617 srb_t *sp = ptr; 618 struct qla_hw_data *ha = sp->vha->hw; 619 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 620 void *ctx = GET_CMD_CTX_SP(sp); 621 622 if (sp->flags & SRB_DMA_VALID) { 623 scsi_dma_unmap(cmd); 624 sp->flags &= ~SRB_DMA_VALID; 625 } 626 627 if (sp->flags & SRB_CRC_PROT_DMA_VALID) { 628 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 629 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 630 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 631 } 632 633 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 634 /* List assured to be having elements */ 635 qla2x00_clean_dsd_pool(ha, sp, NULL); 636 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 637 } 638 639 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 640 dma_pool_free(ha->dl_dma_pool, ctx, 641 ((struct crc_context *)ctx)->crc_ctx_dma); 642 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 643 } 644 645 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 646 struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx; 647 648 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 649 ctx1->fcp_cmnd_dma); 650 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 651 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 652 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 653 mempool_free(ctx1, ha->ctx_mempool); 654 } 655 656 CMD_SP(cmd) = NULL; 657 qla2x00_rel_sp(sp); 658 } 659 660 void 661 qla2x00_sp_compl(void *ptr, int res) 662 { 663 srb_t *sp = ptr; 664 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 665 666 cmd->result = res; 667 668 if (atomic_read(&sp->ref_count) == 0) { 669 ql_dbg(ql_dbg_io, sp->vha, 0x3015, 670 "SP reference-count to ZERO -- sp=%p cmd=%p.\n", 671 sp, GET_CMD_SP(sp)); 672 if (ql2xextended_error_logging & ql_dbg_io) 673 WARN_ON(atomic_read(&sp->ref_count) == 0); 674 return; 675 } 676 if (!atomic_dec_and_test(&sp->ref_count)) 677 return; 678 679 qla2x00_sp_free_dma(sp); 680 cmd->scsi_done(cmd); 681 } 682 683 void 684 qla2xxx_qpair_sp_free_dma(void *ptr) 685 { 686 srb_t *sp = (srb_t *)ptr; 687 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 688 struct qla_hw_data *ha = sp->fcport->vha->hw; 689 void *ctx = GET_CMD_CTX_SP(sp); 690 691 if (sp->flags & SRB_DMA_VALID) { 692 scsi_dma_unmap(cmd); 693 sp->flags &= ~SRB_DMA_VALID; 694 } 695 696 if (sp->flags & SRB_CRC_PROT_DMA_VALID) { 697 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 698 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 699 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 700 } 701 702 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 703 /* List assured to be having elements */ 704 qla2x00_clean_dsd_pool(ha, sp, NULL); 705 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 706 } 707 708 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 709 dma_pool_free(ha->dl_dma_pool, ctx, 710 ((struct crc_context *)ctx)->crc_ctx_dma); 711 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 712 } 713 714 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 715 struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx; 716 717 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 718 ctx1->fcp_cmnd_dma); 719 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 720 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 721 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 722 mempool_free(ctx1, ha->ctx_mempool); 723 } 724 725 CMD_SP(cmd) = NULL; 726 qla2xxx_rel_qpair_sp(sp->qpair, sp); 727 } 728 729 void 730 qla2xxx_qpair_sp_compl(void *ptr, int res) 731 { 732 srb_t *sp = ptr; 733 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 734 735 cmd->result = res; 736 737 if (atomic_read(&sp->ref_count) == 0) { 738 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079, 739 "SP reference-count to ZERO -- sp=%p cmd=%p.\n", 740 sp, GET_CMD_SP(sp)); 741 if (ql2xextended_error_logging & ql_dbg_io) 742 WARN_ON(atomic_read(&sp->ref_count) == 0); 743 return; 744 } 745 if (!atomic_dec_and_test(&sp->ref_count)) 746 return; 747 748 qla2xxx_qpair_sp_free_dma(sp); 749 cmd->scsi_done(cmd); 750 } 751 752 /* If we are SP1 here, we need to still take and release the host_lock as SP1 753 * does not have the changes necessary to avoid taking host->host_lock. 754 */ 755 static int 756 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 757 { 758 scsi_qla_host_t *vha = shost_priv(host); 759 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 760 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 761 struct qla_hw_data *ha = vha->hw; 762 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 763 srb_t *sp; 764 int rval; 765 struct qla_qpair *qpair = NULL; 766 uint32_t tag; 767 uint16_t hwq; 768 769 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) { 770 cmd->result = DID_NO_CONNECT << 16; 771 goto qc24_fail_command; 772 } 773 774 if (ha->mqenable) { 775 if (shost_use_blk_mq(vha->host)) { 776 tag = blk_mq_unique_tag(cmd->request); 777 hwq = blk_mq_unique_tag_to_hwq(tag); 778 qpair = ha->queue_pair_map[hwq]; 779 } else if (vha->vp_idx && vha->qpair) { 780 qpair = vha->qpair; 781 } 782 783 if (qpair) 784 return qla2xxx_mqueuecommand(host, cmd, qpair); 785 } 786 787 if (ha->flags.eeh_busy) { 788 if (ha->flags.pci_channel_io_perm_failure) { 789 ql_dbg(ql_dbg_aer, vha, 0x9010, 790 "PCI Channel IO permanent failure, exiting " 791 "cmd=%p.\n", cmd); 792 cmd->result = DID_NO_CONNECT << 16; 793 } else { 794 ql_dbg(ql_dbg_aer, vha, 0x9011, 795 "EEH_Busy, Requeuing the cmd=%p.\n", cmd); 796 cmd->result = DID_REQUEUE << 16; 797 } 798 goto qc24_fail_command; 799 } 800 801 rval = fc_remote_port_chkready(rport); 802 if (rval) { 803 cmd->result = rval; 804 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003, 805 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 806 cmd, rval); 807 goto qc24_fail_command; 808 } 809 810 if (!vha->flags.difdix_supported && 811 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 812 ql_dbg(ql_dbg_io, vha, 0x3004, 813 "DIF Cap not reg, fail DIF capable cmd's:%p.\n", 814 cmd); 815 cmd->result = DID_NO_CONNECT << 16; 816 goto qc24_fail_command; 817 } 818 819 if (!fcport) { 820 cmd->result = DID_NO_CONNECT << 16; 821 goto qc24_fail_command; 822 } 823 824 if (atomic_read(&fcport->state) != FCS_ONLINE) { 825 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 826 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 827 ql_dbg(ql_dbg_io, vha, 0x3005, 828 "Returning DNC, fcport_state=%d loop_state=%d.\n", 829 atomic_read(&fcport->state), 830 atomic_read(&base_vha->loop_state)); 831 cmd->result = DID_NO_CONNECT << 16; 832 goto qc24_fail_command; 833 } 834 goto qc24_target_busy; 835 } 836 837 /* 838 * Return target busy if we've received a non-zero retry_delay_timer 839 * in a FCP_RSP. 840 */ 841 if (fcport->retry_delay_timestamp == 0) { 842 /* retry delay not set */ 843 } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 844 fcport->retry_delay_timestamp = 0; 845 else 846 goto qc24_target_busy; 847 848 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 849 if (!sp) 850 goto qc24_host_busy; 851 852 sp->u.scmd.cmd = cmd; 853 sp->type = SRB_SCSI_CMD; 854 atomic_set(&sp->ref_count, 1); 855 CMD_SP(cmd) = (void *)sp; 856 sp->free = qla2x00_sp_free_dma; 857 sp->done = qla2x00_sp_compl; 858 859 rval = ha->isp_ops->start_scsi(sp); 860 if (rval != QLA_SUCCESS) { 861 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, 862 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); 863 goto qc24_host_busy_free_sp; 864 } 865 866 return 0; 867 868 qc24_host_busy_free_sp: 869 qla2x00_sp_free_dma(sp); 870 871 qc24_host_busy: 872 return SCSI_MLQUEUE_HOST_BUSY; 873 874 qc24_target_busy: 875 return SCSI_MLQUEUE_TARGET_BUSY; 876 877 qc24_fail_command: 878 cmd->scsi_done(cmd); 879 880 return 0; 881 } 882 883 /* For MQ supported I/O */ 884 int 885 qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, 886 struct qla_qpair *qpair) 887 { 888 scsi_qla_host_t *vha = shost_priv(host); 889 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 890 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 891 struct qla_hw_data *ha = vha->hw; 892 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 893 srb_t *sp; 894 int rval; 895 896 rval = fc_remote_port_chkready(rport); 897 if (rval) { 898 cmd->result = rval; 899 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076, 900 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 901 cmd, rval); 902 goto qc24_fail_command; 903 } 904 905 if (!fcport) { 906 cmd->result = DID_NO_CONNECT << 16; 907 goto qc24_fail_command; 908 } 909 910 if (atomic_read(&fcport->state) != FCS_ONLINE) { 911 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 912 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 913 ql_dbg(ql_dbg_io, vha, 0x3077, 914 "Returning DNC, fcport_state=%d loop_state=%d.\n", 915 atomic_read(&fcport->state), 916 atomic_read(&base_vha->loop_state)); 917 cmd->result = DID_NO_CONNECT << 16; 918 goto qc24_fail_command; 919 } 920 goto qc24_target_busy; 921 } 922 923 /* 924 * Return target busy if we've received a non-zero retry_delay_timer 925 * in a FCP_RSP. 926 */ 927 if (fcport->retry_delay_timestamp == 0) { 928 /* retry delay not set */ 929 } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 930 fcport->retry_delay_timestamp = 0; 931 else 932 goto qc24_target_busy; 933 934 sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC); 935 if (!sp) 936 goto qc24_host_busy; 937 938 sp->u.scmd.cmd = cmd; 939 sp->type = SRB_SCSI_CMD; 940 atomic_set(&sp->ref_count, 1); 941 CMD_SP(cmd) = (void *)sp; 942 sp->free = qla2xxx_qpair_sp_free_dma; 943 sp->done = qla2xxx_qpair_sp_compl; 944 sp->qpair = qpair; 945 946 rval = ha->isp_ops->start_scsi_mq(sp); 947 if (rval != QLA_SUCCESS) { 948 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078, 949 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); 950 if (rval == QLA_INTERFACE_ERROR) 951 goto qc24_fail_command; 952 goto qc24_host_busy_free_sp; 953 } 954 955 return 0; 956 957 qc24_host_busy_free_sp: 958 qla2xxx_qpair_sp_free_dma(sp); 959 960 qc24_host_busy: 961 return SCSI_MLQUEUE_HOST_BUSY; 962 963 qc24_target_busy: 964 return SCSI_MLQUEUE_TARGET_BUSY; 965 966 qc24_fail_command: 967 cmd->scsi_done(cmd); 968 969 return 0; 970 } 971 972 /* 973 * qla2x00_eh_wait_on_command 974 * Waits for the command to be returned by the Firmware for some 975 * max time. 976 * 977 * Input: 978 * cmd = Scsi Command to wait on. 979 * 980 * Return: 981 * Not Found : 0 982 * Found : 1 983 */ 984 static int 985 qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) 986 { 987 #define ABORT_POLLING_PERIOD 1000 988 #define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD)) 989 unsigned long wait_iter = ABORT_WAIT_ITER; 990 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 991 struct qla_hw_data *ha = vha->hw; 992 int ret = QLA_SUCCESS; 993 994 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { 995 ql_dbg(ql_dbg_taskm, vha, 0x8005, 996 "Return:eh_wait.\n"); 997 return ret; 998 } 999 1000 while (CMD_SP(cmd) && wait_iter--) { 1001 msleep(ABORT_POLLING_PERIOD); 1002 } 1003 if (CMD_SP(cmd)) 1004 ret = QLA_FUNCTION_FAILED; 1005 1006 return ret; 1007 } 1008 1009 /* 1010 * qla2x00_wait_for_hba_online 1011 * Wait till the HBA is online after going through 1012 * <= MAX_RETRIES_OF_ISP_ABORT or 1013 * finally HBA is disabled ie marked offline 1014 * 1015 * Input: 1016 * ha - pointer to host adapter structure 1017 * 1018 * Note: 1019 * Does context switching-Release SPIN_LOCK 1020 * (if any) before calling this routine. 1021 * 1022 * Return: 1023 * Success (Adapter is online) : 0 1024 * Failed (Adapter is offline/disabled) : 1 1025 */ 1026 int 1027 qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) 1028 { 1029 int return_status; 1030 unsigned long wait_online; 1031 struct qla_hw_data *ha = vha->hw; 1032 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1033 1034 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 1035 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 1036 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 1037 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 1038 ha->dpc_active) && time_before(jiffies, wait_online)) { 1039 1040 msleep(1000); 1041 } 1042 if (base_vha->flags.online) 1043 return_status = QLA_SUCCESS; 1044 else 1045 return_status = QLA_FUNCTION_FAILED; 1046 1047 return (return_status); 1048 } 1049 1050 static inline int test_fcport_count(scsi_qla_host_t *vha) 1051 { 1052 struct qla_hw_data *ha = vha->hw; 1053 unsigned long flags; 1054 int res; 1055 1056 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1057 ql_dbg(ql_dbg_init, vha, 0xffff, 1058 "tgt %p, fcport_count=%d\n", 1059 vha, vha->fcport_count); 1060 res = (vha->fcport_count == 0); 1061 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1062 1063 return res; 1064 } 1065 1066 /* 1067 * qla2x00_wait_for_sess_deletion can only be called from remove_one. 1068 * it has dependency on UNLOADING flag to stop device discovery 1069 */ 1070 static void 1071 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) 1072 { 1073 qla2x00_mark_all_devices_lost(vha, 0); 1074 1075 wait_event(vha->fcport_waitQ, test_fcport_count(vha)); 1076 } 1077 1078 /* 1079 * qla2x00_wait_for_hba_ready 1080 * Wait till the HBA is ready before doing driver unload 1081 * 1082 * Input: 1083 * ha - pointer to host adapter structure 1084 * 1085 * Note: 1086 * Does context switching-Release SPIN_LOCK 1087 * (if any) before calling this routine. 1088 * 1089 */ 1090 static void 1091 qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) 1092 { 1093 struct qla_hw_data *ha = vha->hw; 1094 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1095 1096 while ((qla2x00_reset_active(vha) || ha->dpc_active || 1097 ha->flags.mbox_busy) || 1098 test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) || 1099 test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) { 1100 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 1101 break; 1102 msleep(1000); 1103 } 1104 } 1105 1106 int 1107 qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) 1108 { 1109 int return_status; 1110 unsigned long wait_reset; 1111 struct qla_hw_data *ha = vha->hw; 1112 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1113 1114 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); 1115 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 1116 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 1117 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 1118 ha->dpc_active) && time_before(jiffies, wait_reset)) { 1119 1120 msleep(1000); 1121 1122 if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 1123 ha->flags.chip_reset_done) 1124 break; 1125 } 1126 if (ha->flags.chip_reset_done) 1127 return_status = QLA_SUCCESS; 1128 else 1129 return_status = QLA_FUNCTION_FAILED; 1130 1131 return return_status; 1132 } 1133 1134 static void 1135 sp_get(struct srb *sp) 1136 { 1137 atomic_inc(&sp->ref_count); 1138 } 1139 1140 #define ISP_REG_DISCONNECT 0xffffffffU 1141 /************************************************************************** 1142 * qla2x00_isp_reg_stat 1143 * 1144 * Description: 1145 * Read the host status register of ISP before aborting the command. 1146 * 1147 * Input: 1148 * ha = pointer to host adapter structure. 1149 * 1150 * 1151 * Returns: 1152 * Either true or false. 1153 * 1154 * Note: Return true if there is register disconnect. 1155 **************************************************************************/ 1156 static inline 1157 uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) 1158 { 1159 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1160 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 1161 1162 if (IS_P3P_TYPE(ha)) 1163 return ((RD_REG_DWORD(®82->host_int)) == ISP_REG_DISCONNECT); 1164 else 1165 return ((RD_REG_DWORD(®->host_status)) == 1166 ISP_REG_DISCONNECT); 1167 } 1168 1169 /************************************************************************** 1170 * qla2xxx_eh_abort 1171 * 1172 * Description: 1173 * The abort function will abort the specified command. 1174 * 1175 * Input: 1176 * cmd = Linux SCSI command packet to be aborted. 1177 * 1178 * Returns: 1179 * Either SUCCESS or FAILED. 1180 * 1181 * Note: 1182 * Only return FAILED if command not returned by firmware. 1183 **************************************************************************/ 1184 static int 1185 qla2xxx_eh_abort(struct scsi_cmnd *cmd) 1186 { 1187 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1188 srb_t *sp; 1189 int ret; 1190 unsigned int id; 1191 uint64_t lun; 1192 unsigned long flags; 1193 int rval, wait = 0; 1194 struct qla_hw_data *ha = vha->hw; 1195 1196 if (qla2x00_isp_reg_stat(ha)) { 1197 ql_log(ql_log_info, vha, 0x8042, 1198 "PCI/Register disconnect, exiting.\n"); 1199 return FAILED; 1200 } 1201 if (!CMD_SP(cmd)) 1202 return SUCCESS; 1203 1204 ret = fc_block_scsi_eh(cmd); 1205 if (ret != 0) 1206 return ret; 1207 ret = SUCCESS; 1208 1209 id = cmd->device->id; 1210 lun = cmd->device->lun; 1211 1212 spin_lock_irqsave(&ha->hardware_lock, flags); 1213 sp = (srb_t *) CMD_SP(cmd); 1214 if (!sp) { 1215 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1216 return SUCCESS; 1217 } 1218 1219 ql_dbg(ql_dbg_taskm, vha, 0x8002, 1220 "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n", 1221 vha->host_no, id, lun, sp, cmd, sp->handle); 1222 1223 /* Get a reference to the sp and drop the lock.*/ 1224 sp_get(sp); 1225 1226 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1227 rval = ha->isp_ops->abort_command(sp); 1228 if (rval) { 1229 if (rval == QLA_FUNCTION_PARAMETER_ERROR) 1230 ret = SUCCESS; 1231 else 1232 ret = FAILED; 1233 1234 ql_dbg(ql_dbg_taskm, vha, 0x8003, 1235 "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval); 1236 } else { 1237 ql_dbg(ql_dbg_taskm, vha, 0x8004, 1238 "Abort command mbx success cmd=%p.\n", cmd); 1239 wait = 1; 1240 } 1241 1242 spin_lock_irqsave(&ha->hardware_lock, flags); 1243 sp->done(sp, 0); 1244 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1245 1246 /* Did the command return during mailbox execution? */ 1247 if (ret == FAILED && !CMD_SP(cmd)) 1248 ret = SUCCESS; 1249 1250 /* Wait for the command to be returned. */ 1251 if (wait) { 1252 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { 1253 ql_log(ql_log_warn, vha, 0x8006, 1254 "Abort handler timed out cmd=%p.\n", cmd); 1255 ret = FAILED; 1256 } 1257 } 1258 1259 ql_log(ql_log_info, vha, 0x801c, 1260 "Abort command issued nexus=%ld:%d:%llu -- %d %x.\n", 1261 vha->host_no, id, lun, wait, ret); 1262 1263 return ret; 1264 } 1265 1266 int 1267 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, 1268 uint64_t l, enum nexus_wait_type type) 1269 { 1270 int cnt, match, status; 1271 unsigned long flags; 1272 struct qla_hw_data *ha = vha->hw; 1273 struct req_que *req; 1274 srb_t *sp; 1275 struct scsi_cmnd *cmd; 1276 1277 status = QLA_SUCCESS; 1278 1279 spin_lock_irqsave(&ha->hardware_lock, flags); 1280 req = vha->req; 1281 for (cnt = 1; status == QLA_SUCCESS && 1282 cnt < req->num_outstanding_cmds; cnt++) { 1283 sp = req->outstanding_cmds[cnt]; 1284 if (!sp) 1285 continue; 1286 if (sp->type != SRB_SCSI_CMD) 1287 continue; 1288 if (vha->vp_idx != sp->vha->vp_idx) 1289 continue; 1290 match = 0; 1291 cmd = GET_CMD_SP(sp); 1292 switch (type) { 1293 case WAIT_HOST: 1294 match = 1; 1295 break; 1296 case WAIT_TARGET: 1297 match = cmd->device->id == t; 1298 break; 1299 case WAIT_LUN: 1300 match = (cmd->device->id == t && 1301 cmd->device->lun == l); 1302 break; 1303 } 1304 if (!match) 1305 continue; 1306 1307 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1308 status = qla2x00_eh_wait_on_command(cmd); 1309 spin_lock_irqsave(&ha->hardware_lock, flags); 1310 } 1311 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1312 1313 return status; 1314 } 1315 1316 static char *reset_errors[] = { 1317 "HBA not online", 1318 "HBA not ready", 1319 "Task management failed", 1320 "Waiting for command completions", 1321 }; 1322 1323 static int 1324 __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 1325 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, uint64_t, int)) 1326 { 1327 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1328 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1329 int err; 1330 1331 if (!fcport) { 1332 return FAILED; 1333 } 1334 1335 err = fc_block_scsi_eh(cmd); 1336 if (err != 0) 1337 return err; 1338 1339 ql_log(ql_log_info, vha, 0x8009, 1340 "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no, 1341 cmd->device->id, cmd->device->lun, cmd); 1342 1343 err = 0; 1344 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1345 ql_log(ql_log_warn, vha, 0x800a, 1346 "Wait for hba online failed for cmd=%p.\n", cmd); 1347 goto eh_reset_failed; 1348 } 1349 err = 2; 1350 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) 1351 != QLA_SUCCESS) { 1352 ql_log(ql_log_warn, vha, 0x800c, 1353 "do_reset failed for cmd=%p.\n", cmd); 1354 goto eh_reset_failed; 1355 } 1356 err = 3; 1357 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 1358 cmd->device->lun, type) != QLA_SUCCESS) { 1359 ql_log(ql_log_warn, vha, 0x800d, 1360 "wait for pending cmds failed for cmd=%p.\n", cmd); 1361 goto eh_reset_failed; 1362 } 1363 1364 ql_log(ql_log_info, vha, 0x800e, 1365 "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name, 1366 vha->host_no, cmd->device->id, cmd->device->lun, cmd); 1367 1368 return SUCCESS; 1369 1370 eh_reset_failed: 1371 ql_log(ql_log_info, vha, 0x800f, 1372 "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name, 1373 reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun, 1374 cmd); 1375 return FAILED; 1376 } 1377 1378 static int 1379 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 1380 { 1381 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1382 struct qla_hw_data *ha = vha->hw; 1383 1384 if (qla2x00_isp_reg_stat(ha)) { 1385 ql_log(ql_log_info, vha, 0x803e, 1386 "PCI/Register disconnect, exiting.\n"); 1387 return FAILED; 1388 } 1389 1390 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd, 1391 ha->isp_ops->lun_reset); 1392 } 1393 1394 static int 1395 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) 1396 { 1397 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1398 struct qla_hw_data *ha = vha->hw; 1399 1400 if (qla2x00_isp_reg_stat(ha)) { 1401 ql_log(ql_log_info, vha, 0x803f, 1402 "PCI/Register disconnect, exiting.\n"); 1403 return FAILED; 1404 } 1405 1406 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd, 1407 ha->isp_ops->target_reset); 1408 } 1409 1410 /************************************************************************** 1411 * qla2xxx_eh_bus_reset 1412 * 1413 * Description: 1414 * The bus reset function will reset the bus and abort any executing 1415 * commands. 1416 * 1417 * Input: 1418 * cmd = Linux SCSI command packet of the command that cause the 1419 * bus reset. 1420 * 1421 * Returns: 1422 * SUCCESS/FAILURE (defined as macro in scsi.h). 1423 * 1424 **************************************************************************/ 1425 static int 1426 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 1427 { 1428 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1429 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1430 int ret = FAILED; 1431 unsigned int id; 1432 uint64_t lun; 1433 struct qla_hw_data *ha = vha->hw; 1434 1435 if (qla2x00_isp_reg_stat(ha)) { 1436 ql_log(ql_log_info, vha, 0x8040, 1437 "PCI/Register disconnect, exiting.\n"); 1438 return FAILED; 1439 } 1440 1441 id = cmd->device->id; 1442 lun = cmd->device->lun; 1443 1444 if (!fcport) { 1445 return ret; 1446 } 1447 1448 ret = fc_block_scsi_eh(cmd); 1449 if (ret != 0) 1450 return ret; 1451 ret = FAILED; 1452 1453 ql_log(ql_log_info, vha, 0x8012, 1454 "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1455 1456 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1457 ql_log(ql_log_fatal, vha, 0x8013, 1458 "Wait for hba online failed board disabled.\n"); 1459 goto eh_bus_reset_done; 1460 } 1461 1462 if (qla2x00_loop_reset(vha) == QLA_SUCCESS) 1463 ret = SUCCESS; 1464 1465 if (ret == FAILED) 1466 goto eh_bus_reset_done; 1467 1468 /* Flush outstanding commands. */ 1469 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != 1470 QLA_SUCCESS) { 1471 ql_log(ql_log_warn, vha, 0x8014, 1472 "Wait for pending commands failed.\n"); 1473 ret = FAILED; 1474 } 1475 1476 eh_bus_reset_done: 1477 ql_log(ql_log_warn, vha, 0x802b, 1478 "BUS RESET %s nexus=%ld:%d:%llu.\n", 1479 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1480 1481 return ret; 1482 } 1483 1484 /************************************************************************** 1485 * qla2xxx_eh_host_reset 1486 * 1487 * Description: 1488 * The reset function will reset the Adapter. 1489 * 1490 * Input: 1491 * cmd = Linux SCSI command packet of the command that cause the 1492 * adapter reset. 1493 * 1494 * Returns: 1495 * Either SUCCESS or FAILED. 1496 * 1497 * Note: 1498 **************************************************************************/ 1499 static int 1500 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 1501 { 1502 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1503 struct qla_hw_data *ha = vha->hw; 1504 int ret = FAILED; 1505 unsigned int id; 1506 uint64_t lun; 1507 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1508 1509 if (qla2x00_isp_reg_stat(ha)) { 1510 ql_log(ql_log_info, vha, 0x8041, 1511 "PCI/Register disconnect, exiting.\n"); 1512 schedule_work(&ha->board_disable); 1513 return SUCCESS; 1514 } 1515 1516 id = cmd->device->id; 1517 lun = cmd->device->lun; 1518 1519 ql_log(ql_log_info, vha, 0x8018, 1520 "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1521 1522 /* 1523 * No point in issuing another reset if one is active. Also do not 1524 * attempt a reset if we are updating flash. 1525 */ 1526 if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING) 1527 goto eh_host_reset_lock; 1528 1529 if (vha != base_vha) { 1530 if (qla2x00_vp_abort_isp(vha)) 1531 goto eh_host_reset_lock; 1532 } else { 1533 if (IS_P3P_TYPE(vha->hw)) { 1534 if (!qla82xx_fcoe_ctx_reset(vha)) { 1535 /* Ctx reset success */ 1536 ret = SUCCESS; 1537 goto eh_host_reset_lock; 1538 } 1539 /* fall thru if ctx reset failed */ 1540 } 1541 if (ha->wq) 1542 flush_workqueue(ha->wq); 1543 1544 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1545 if (ha->isp_ops->abort_isp(base_vha)) { 1546 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1547 /* failed. schedule dpc to try */ 1548 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 1549 1550 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1551 ql_log(ql_log_warn, vha, 0x802a, 1552 "wait for hba online failed.\n"); 1553 goto eh_host_reset_lock; 1554 } 1555 } 1556 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1557 } 1558 1559 /* Waiting for command to be returned to OS.*/ 1560 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) == 1561 QLA_SUCCESS) 1562 ret = SUCCESS; 1563 1564 eh_host_reset_lock: 1565 ql_log(ql_log_info, vha, 0x8017, 1566 "ADAPTER RESET %s nexus=%ld:%d:%llu.\n", 1567 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1568 1569 return ret; 1570 } 1571 1572 /* 1573 * qla2x00_loop_reset 1574 * Issue loop reset. 1575 * 1576 * Input: 1577 * ha = adapter block pointer. 1578 * 1579 * Returns: 1580 * 0 = success 1581 */ 1582 int 1583 qla2x00_loop_reset(scsi_qla_host_t *vha) 1584 { 1585 int ret; 1586 struct fc_port *fcport; 1587 struct qla_hw_data *ha = vha->hw; 1588 1589 if (IS_QLAFX00(ha)) { 1590 return qlafx00_loop_reset(vha); 1591 } 1592 1593 if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) { 1594 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1595 if (fcport->port_type != FCT_TARGET) 1596 continue; 1597 1598 ret = ha->isp_ops->target_reset(fcport, 0, 0); 1599 if (ret != QLA_SUCCESS) { 1600 ql_dbg(ql_dbg_taskm, vha, 0x802c, 1601 "Bus Reset failed: Reset=%d " 1602 "d_id=%x.\n", ret, fcport->d_id.b24); 1603 } 1604 } 1605 } 1606 1607 1608 if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { 1609 atomic_set(&vha->loop_state, LOOP_DOWN); 1610 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1611 qla2x00_mark_all_devices_lost(vha, 0); 1612 ret = qla2x00_full_login_lip(vha); 1613 if (ret != QLA_SUCCESS) { 1614 ql_dbg(ql_dbg_taskm, vha, 0x802d, 1615 "full_login_lip=%d.\n", ret); 1616 } 1617 } 1618 1619 if (ha->flags.enable_lip_reset) { 1620 ret = qla2x00_lip_reset(vha); 1621 if (ret != QLA_SUCCESS) 1622 ql_dbg(ql_dbg_taskm, vha, 0x802e, 1623 "lip_reset failed (%d).\n", ret); 1624 } 1625 1626 /* Issue marker command only when we are going to start the I/O */ 1627 vha->marker_needed = 1; 1628 1629 return QLA_SUCCESS; 1630 } 1631 1632 void 1633 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) 1634 { 1635 int que, cnt; 1636 unsigned long flags; 1637 srb_t *sp; 1638 struct qla_hw_data *ha = vha->hw; 1639 struct req_que *req; 1640 1641 qlt_host_reset_handler(ha); 1642 1643 spin_lock_irqsave(&ha->hardware_lock, flags); 1644 for (que = 0; que < ha->max_req_queues; que++) { 1645 req = ha->req_q_map[que]; 1646 if (!req) 1647 continue; 1648 if (!req->outstanding_cmds) 1649 continue; 1650 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1651 sp = req->outstanding_cmds[cnt]; 1652 if (sp) { 1653 /* Don't abort commands in adapter during EEH 1654 * recovery as it's not accessible/responding. 1655 */ 1656 if (GET_CMD_SP(sp) && !ha->flags.eeh_busy && 1657 (sp->type == SRB_SCSI_CMD)) { 1658 /* Get a reference to the sp and drop the lock. 1659 * The reference ensures this sp->done() call 1660 * - and not the call in qla2xxx_eh_abort() - 1661 * ends the SCSI command (with result 'res'). 1662 */ 1663 sp_get(sp); 1664 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1665 qla2xxx_eh_abort(GET_CMD_SP(sp)); 1666 spin_lock_irqsave(&ha->hardware_lock, flags); 1667 } 1668 req->outstanding_cmds[cnt] = NULL; 1669 sp->done(sp, res); 1670 } 1671 } 1672 } 1673 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1674 } 1675 1676 static int 1677 qla2xxx_slave_alloc(struct scsi_device *sdev) 1678 { 1679 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1680 1681 if (!rport || fc_remote_port_chkready(rport)) 1682 return -ENXIO; 1683 1684 sdev->hostdata = *(fc_port_t **)rport->dd_data; 1685 1686 return 0; 1687 } 1688 1689 static int 1690 qla2xxx_slave_configure(struct scsi_device *sdev) 1691 { 1692 scsi_qla_host_t *vha = shost_priv(sdev->host); 1693 struct req_que *req = vha->req; 1694 1695 if (IS_T10_PI_CAPABLE(vha->hw)) 1696 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1697 1698 scsi_change_queue_depth(sdev, req->max_q_depth); 1699 return 0; 1700 } 1701 1702 static void 1703 qla2xxx_slave_destroy(struct scsi_device *sdev) 1704 { 1705 sdev->hostdata = NULL; 1706 } 1707 1708 /** 1709 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1710 * @ha: HA context 1711 * 1712 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 1713 * supported addressing method. 1714 */ 1715 static void 1716 qla2x00_config_dma_addressing(struct qla_hw_data *ha) 1717 { 1718 /* Assume a 32bit DMA mask. */ 1719 ha->flags.enable_64bit_addressing = 0; 1720 1721 if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 1722 /* Any upper-dword bits set? */ 1723 if (MSD(dma_get_required_mask(&ha->pdev->dev)) && 1724 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { 1725 /* Ok, a 64bit DMA mask is applicable. */ 1726 ha->flags.enable_64bit_addressing = 1; 1727 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; 1728 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; 1729 return; 1730 } 1731 } 1732 1733 dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); 1734 pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32)); 1735 } 1736 1737 static void 1738 qla2x00_enable_intrs(struct qla_hw_data *ha) 1739 { 1740 unsigned long flags = 0; 1741 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1742 1743 spin_lock_irqsave(&ha->hardware_lock, flags); 1744 ha->interrupts_on = 1; 1745 /* enable risc and host interrupts */ 1746 WRT_REG_WORD(®->ictrl, ICR_EN_INT | ICR_EN_RISC); 1747 RD_REG_WORD(®->ictrl); 1748 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1749 1750 } 1751 1752 static void 1753 qla2x00_disable_intrs(struct qla_hw_data *ha) 1754 { 1755 unsigned long flags = 0; 1756 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1757 1758 spin_lock_irqsave(&ha->hardware_lock, flags); 1759 ha->interrupts_on = 0; 1760 /* disable risc and host interrupts */ 1761 WRT_REG_WORD(®->ictrl, 0); 1762 RD_REG_WORD(®->ictrl); 1763 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1764 } 1765 1766 static void 1767 qla24xx_enable_intrs(struct qla_hw_data *ha) 1768 { 1769 unsigned long flags = 0; 1770 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1771 1772 spin_lock_irqsave(&ha->hardware_lock, flags); 1773 ha->interrupts_on = 1; 1774 WRT_REG_DWORD(®->ictrl, ICRX_EN_RISC_INT); 1775 RD_REG_DWORD(®->ictrl); 1776 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1777 } 1778 1779 static void 1780 qla24xx_disable_intrs(struct qla_hw_data *ha) 1781 { 1782 unsigned long flags = 0; 1783 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1784 1785 if (IS_NOPOLLING_TYPE(ha)) 1786 return; 1787 spin_lock_irqsave(&ha->hardware_lock, flags); 1788 ha->interrupts_on = 0; 1789 WRT_REG_DWORD(®->ictrl, 0); 1790 RD_REG_DWORD(®->ictrl); 1791 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1792 } 1793 1794 static int 1795 qla2x00_iospace_config(struct qla_hw_data *ha) 1796 { 1797 resource_size_t pio; 1798 uint16_t msix; 1799 1800 if (pci_request_selected_regions(ha->pdev, ha->bars, 1801 QLA2XXX_DRIVER_NAME)) { 1802 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011, 1803 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 1804 pci_name(ha->pdev)); 1805 goto iospace_error_exit; 1806 } 1807 if (!(ha->bars & 1)) 1808 goto skip_pio; 1809 1810 /* We only need PIO for Flash operations on ISP2312 v2 chips. */ 1811 pio = pci_resource_start(ha->pdev, 0); 1812 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 1813 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1814 ql_log_pci(ql_log_warn, ha->pdev, 0x0012, 1815 "Invalid pci I/O region size (%s).\n", 1816 pci_name(ha->pdev)); 1817 pio = 0; 1818 } 1819 } else { 1820 ql_log_pci(ql_log_warn, ha->pdev, 0x0013, 1821 "Region #0 no a PIO resource (%s).\n", 1822 pci_name(ha->pdev)); 1823 pio = 0; 1824 } 1825 ha->pio_address = pio; 1826 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014, 1827 "PIO address=%llu.\n", 1828 (unsigned long long)ha->pio_address); 1829 1830 skip_pio: 1831 /* Use MMIO operations for all accesses. */ 1832 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 1833 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015, 1834 "Region #1 not an MMIO resource (%s), aborting.\n", 1835 pci_name(ha->pdev)); 1836 goto iospace_error_exit; 1837 } 1838 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 1839 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016, 1840 "Invalid PCI mem region size (%s), aborting.\n", 1841 pci_name(ha->pdev)); 1842 goto iospace_error_exit; 1843 } 1844 1845 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 1846 if (!ha->iobase) { 1847 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017, 1848 "Cannot remap MMIO (%s), aborting.\n", 1849 pci_name(ha->pdev)); 1850 goto iospace_error_exit; 1851 } 1852 1853 /* Determine queue resources */ 1854 ha->max_req_queues = ha->max_rsp_queues = 1; 1855 ha->msix_count = QLA_BASE_VECTORS; 1856 if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 1857 goto mqiobase_exit; 1858 1859 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1860 pci_resource_len(ha->pdev, 3)); 1861 if (ha->mqiobase) { 1862 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018, 1863 "MQIO Base=%p.\n", ha->mqiobase); 1864 /* Read MSIX vector size of the board */ 1865 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 1866 ha->msix_count = msix + 1; 1867 /* Max queues are bounded by available msix vectors */ 1868 /* MB interrupt uses 1 vector */ 1869 ha->max_req_queues = ha->msix_count - 1; 1870 ha->max_rsp_queues = ha->max_req_queues; 1871 /* Queue pairs is the max value minus the base queue pair */ 1872 ha->max_qpairs = ha->max_rsp_queues - 1; 1873 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188, 1874 "Max no of queues pairs: %d.\n", ha->max_qpairs); 1875 1876 ql_log_pci(ql_log_info, ha->pdev, 0x001a, 1877 "MSI-X vector count: %d.\n", ha->msix_count); 1878 } else 1879 ql_log_pci(ql_log_info, ha->pdev, 0x001b, 1880 "BAR 3 not enabled.\n"); 1881 1882 mqiobase_exit: 1883 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c, 1884 "MSIX Count: %d.\n", ha->msix_count); 1885 return (0); 1886 1887 iospace_error_exit: 1888 return (-ENOMEM); 1889 } 1890 1891 1892 static int 1893 qla83xx_iospace_config(struct qla_hw_data *ha) 1894 { 1895 uint16_t msix; 1896 1897 if (pci_request_selected_regions(ha->pdev, ha->bars, 1898 QLA2XXX_DRIVER_NAME)) { 1899 ql_log_pci(ql_log_fatal, ha->pdev, 0x0117, 1900 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 1901 pci_name(ha->pdev)); 1902 1903 goto iospace_error_exit; 1904 } 1905 1906 /* Use MMIO operations for all accesses. */ 1907 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 1908 ql_log_pci(ql_log_warn, ha->pdev, 0x0118, 1909 "Invalid pci I/O region size (%s).\n", 1910 pci_name(ha->pdev)); 1911 goto iospace_error_exit; 1912 } 1913 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1914 ql_log_pci(ql_log_warn, ha->pdev, 0x0119, 1915 "Invalid PCI mem region size (%s), aborting\n", 1916 pci_name(ha->pdev)); 1917 goto iospace_error_exit; 1918 } 1919 1920 ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN); 1921 if (!ha->iobase) { 1922 ql_log_pci(ql_log_fatal, ha->pdev, 0x011a, 1923 "Cannot remap MMIO (%s), aborting.\n", 1924 pci_name(ha->pdev)); 1925 goto iospace_error_exit; 1926 } 1927 1928 /* 64bit PCI BAR - BAR2 will correspoond to region 4 */ 1929 /* 83XX 26XX always use MQ type access for queues 1930 * - mbar 2, a.k.a region 4 */ 1931 ha->max_req_queues = ha->max_rsp_queues = 1; 1932 ha->msix_count = QLA_BASE_VECTORS; 1933 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4), 1934 pci_resource_len(ha->pdev, 4)); 1935 1936 if (!ha->mqiobase) { 1937 ql_log_pci(ql_log_fatal, ha->pdev, 0x011d, 1938 "BAR2/region4 not enabled\n"); 1939 goto mqiobase_exit; 1940 } 1941 1942 ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2), 1943 pci_resource_len(ha->pdev, 2)); 1944 if (ha->msixbase) { 1945 /* Read MSIX vector size of the board */ 1946 pci_read_config_word(ha->pdev, 1947 QLA_83XX_PCI_MSIX_CONTROL, &msix); 1948 ha->msix_count = msix + 1; 1949 /* 1950 * By default, driver uses at least two msix vectors 1951 * (default & rspq) 1952 */ 1953 if (ql2xmqsupport) { 1954 /* MB interrupt uses 1 vector */ 1955 ha->max_req_queues = ha->msix_count - 1; 1956 1957 /* ATIOQ needs 1 vector. That's 1 less QPair */ 1958 if (QLA_TGT_MODE_ENABLED()) 1959 ha->max_req_queues--; 1960 1961 ha->max_rsp_queues = ha->max_req_queues; 1962 1963 /* Queue pairs is the max value minus 1964 * the base queue pair */ 1965 ha->max_qpairs = ha->max_req_queues - 1; 1966 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, 1967 "Max no of queues pairs: %d.\n", ha->max_qpairs); 1968 } 1969 ql_log_pci(ql_log_info, ha->pdev, 0x011c, 1970 "MSI-X vector count: %d.\n", ha->msix_count); 1971 } else 1972 ql_log_pci(ql_log_info, ha->pdev, 0x011e, 1973 "BAR 1 not enabled.\n"); 1974 1975 mqiobase_exit: 1976 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f, 1977 "MSIX Count: %d.\n", ha->msix_count); 1978 return 0; 1979 1980 iospace_error_exit: 1981 return -ENOMEM; 1982 } 1983 1984 static struct isp_operations qla2100_isp_ops = { 1985 .pci_config = qla2100_pci_config, 1986 .reset_chip = qla2x00_reset_chip, 1987 .chip_diag = qla2x00_chip_diag, 1988 .config_rings = qla2x00_config_rings, 1989 .reset_adapter = qla2x00_reset_adapter, 1990 .nvram_config = qla2x00_nvram_config, 1991 .update_fw_options = qla2x00_update_fw_options, 1992 .load_risc = qla2x00_load_risc, 1993 .pci_info_str = qla2x00_pci_info_str, 1994 .fw_version_str = qla2x00_fw_version_str, 1995 .intr_handler = qla2100_intr_handler, 1996 .enable_intrs = qla2x00_enable_intrs, 1997 .disable_intrs = qla2x00_disable_intrs, 1998 .abort_command = qla2x00_abort_command, 1999 .target_reset = qla2x00_abort_target, 2000 .lun_reset = qla2x00_lun_reset, 2001 .fabric_login = qla2x00_login_fabric, 2002 .fabric_logout = qla2x00_fabric_logout, 2003 .calc_req_entries = qla2x00_calc_iocbs_32, 2004 .build_iocbs = qla2x00_build_scsi_iocbs_32, 2005 .prep_ms_iocb = qla2x00_prep_ms_iocb, 2006 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 2007 .read_nvram = qla2x00_read_nvram_data, 2008 .write_nvram = qla2x00_write_nvram_data, 2009 .fw_dump = qla2100_fw_dump, 2010 .beacon_on = NULL, 2011 .beacon_off = NULL, 2012 .beacon_blink = NULL, 2013 .read_optrom = qla2x00_read_optrom_data, 2014 .write_optrom = qla2x00_write_optrom_data, 2015 .get_flash_version = qla2x00_get_flash_version, 2016 .start_scsi = qla2x00_start_scsi, 2017 .start_scsi_mq = NULL, 2018 .abort_isp = qla2x00_abort_isp, 2019 .iospace_config = qla2x00_iospace_config, 2020 .initialize_adapter = qla2x00_initialize_adapter, 2021 }; 2022 2023 static struct isp_operations qla2300_isp_ops = { 2024 .pci_config = qla2300_pci_config, 2025 .reset_chip = qla2x00_reset_chip, 2026 .chip_diag = qla2x00_chip_diag, 2027 .config_rings = qla2x00_config_rings, 2028 .reset_adapter = qla2x00_reset_adapter, 2029 .nvram_config = qla2x00_nvram_config, 2030 .update_fw_options = qla2x00_update_fw_options, 2031 .load_risc = qla2x00_load_risc, 2032 .pci_info_str = qla2x00_pci_info_str, 2033 .fw_version_str = qla2x00_fw_version_str, 2034 .intr_handler = qla2300_intr_handler, 2035 .enable_intrs = qla2x00_enable_intrs, 2036 .disable_intrs = qla2x00_disable_intrs, 2037 .abort_command = qla2x00_abort_command, 2038 .target_reset = qla2x00_abort_target, 2039 .lun_reset = qla2x00_lun_reset, 2040 .fabric_login = qla2x00_login_fabric, 2041 .fabric_logout = qla2x00_fabric_logout, 2042 .calc_req_entries = qla2x00_calc_iocbs_32, 2043 .build_iocbs = qla2x00_build_scsi_iocbs_32, 2044 .prep_ms_iocb = qla2x00_prep_ms_iocb, 2045 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 2046 .read_nvram = qla2x00_read_nvram_data, 2047 .write_nvram = qla2x00_write_nvram_data, 2048 .fw_dump = qla2300_fw_dump, 2049 .beacon_on = qla2x00_beacon_on, 2050 .beacon_off = qla2x00_beacon_off, 2051 .beacon_blink = qla2x00_beacon_blink, 2052 .read_optrom = qla2x00_read_optrom_data, 2053 .write_optrom = qla2x00_write_optrom_data, 2054 .get_flash_version = qla2x00_get_flash_version, 2055 .start_scsi = qla2x00_start_scsi, 2056 .start_scsi_mq = NULL, 2057 .abort_isp = qla2x00_abort_isp, 2058 .iospace_config = qla2x00_iospace_config, 2059 .initialize_adapter = qla2x00_initialize_adapter, 2060 }; 2061 2062 static struct isp_operations qla24xx_isp_ops = { 2063 .pci_config = qla24xx_pci_config, 2064 .reset_chip = qla24xx_reset_chip, 2065 .chip_diag = qla24xx_chip_diag, 2066 .config_rings = qla24xx_config_rings, 2067 .reset_adapter = qla24xx_reset_adapter, 2068 .nvram_config = qla24xx_nvram_config, 2069 .update_fw_options = qla24xx_update_fw_options, 2070 .load_risc = qla24xx_load_risc, 2071 .pci_info_str = qla24xx_pci_info_str, 2072 .fw_version_str = qla24xx_fw_version_str, 2073 .intr_handler = qla24xx_intr_handler, 2074 .enable_intrs = qla24xx_enable_intrs, 2075 .disable_intrs = qla24xx_disable_intrs, 2076 .abort_command = qla24xx_abort_command, 2077 .target_reset = qla24xx_abort_target, 2078 .lun_reset = qla24xx_lun_reset, 2079 .fabric_login = qla24xx_login_fabric, 2080 .fabric_logout = qla24xx_fabric_logout, 2081 .calc_req_entries = NULL, 2082 .build_iocbs = NULL, 2083 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2084 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2085 .read_nvram = qla24xx_read_nvram_data, 2086 .write_nvram = qla24xx_write_nvram_data, 2087 .fw_dump = qla24xx_fw_dump, 2088 .beacon_on = qla24xx_beacon_on, 2089 .beacon_off = qla24xx_beacon_off, 2090 .beacon_blink = qla24xx_beacon_blink, 2091 .read_optrom = qla24xx_read_optrom_data, 2092 .write_optrom = qla24xx_write_optrom_data, 2093 .get_flash_version = qla24xx_get_flash_version, 2094 .start_scsi = qla24xx_start_scsi, 2095 .start_scsi_mq = NULL, 2096 .abort_isp = qla2x00_abort_isp, 2097 .iospace_config = qla2x00_iospace_config, 2098 .initialize_adapter = qla2x00_initialize_adapter, 2099 }; 2100 2101 static struct isp_operations qla25xx_isp_ops = { 2102 .pci_config = qla25xx_pci_config, 2103 .reset_chip = qla24xx_reset_chip, 2104 .chip_diag = qla24xx_chip_diag, 2105 .config_rings = qla24xx_config_rings, 2106 .reset_adapter = qla24xx_reset_adapter, 2107 .nvram_config = qla24xx_nvram_config, 2108 .update_fw_options = qla24xx_update_fw_options, 2109 .load_risc = qla24xx_load_risc, 2110 .pci_info_str = qla24xx_pci_info_str, 2111 .fw_version_str = qla24xx_fw_version_str, 2112 .intr_handler = qla24xx_intr_handler, 2113 .enable_intrs = qla24xx_enable_intrs, 2114 .disable_intrs = qla24xx_disable_intrs, 2115 .abort_command = qla24xx_abort_command, 2116 .target_reset = qla24xx_abort_target, 2117 .lun_reset = qla24xx_lun_reset, 2118 .fabric_login = qla24xx_login_fabric, 2119 .fabric_logout = qla24xx_fabric_logout, 2120 .calc_req_entries = NULL, 2121 .build_iocbs = NULL, 2122 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2123 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2124 .read_nvram = qla25xx_read_nvram_data, 2125 .write_nvram = qla25xx_write_nvram_data, 2126 .fw_dump = qla25xx_fw_dump, 2127 .beacon_on = qla24xx_beacon_on, 2128 .beacon_off = qla24xx_beacon_off, 2129 .beacon_blink = qla24xx_beacon_blink, 2130 .read_optrom = qla25xx_read_optrom_data, 2131 .write_optrom = qla24xx_write_optrom_data, 2132 .get_flash_version = qla24xx_get_flash_version, 2133 .start_scsi = qla24xx_dif_start_scsi, 2134 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2135 .abort_isp = qla2x00_abort_isp, 2136 .iospace_config = qla2x00_iospace_config, 2137 .initialize_adapter = qla2x00_initialize_adapter, 2138 }; 2139 2140 static struct isp_operations qla81xx_isp_ops = { 2141 .pci_config = qla25xx_pci_config, 2142 .reset_chip = qla24xx_reset_chip, 2143 .chip_diag = qla24xx_chip_diag, 2144 .config_rings = qla24xx_config_rings, 2145 .reset_adapter = qla24xx_reset_adapter, 2146 .nvram_config = qla81xx_nvram_config, 2147 .update_fw_options = qla81xx_update_fw_options, 2148 .load_risc = qla81xx_load_risc, 2149 .pci_info_str = qla24xx_pci_info_str, 2150 .fw_version_str = qla24xx_fw_version_str, 2151 .intr_handler = qla24xx_intr_handler, 2152 .enable_intrs = qla24xx_enable_intrs, 2153 .disable_intrs = qla24xx_disable_intrs, 2154 .abort_command = qla24xx_abort_command, 2155 .target_reset = qla24xx_abort_target, 2156 .lun_reset = qla24xx_lun_reset, 2157 .fabric_login = qla24xx_login_fabric, 2158 .fabric_logout = qla24xx_fabric_logout, 2159 .calc_req_entries = NULL, 2160 .build_iocbs = NULL, 2161 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2162 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2163 .read_nvram = NULL, 2164 .write_nvram = NULL, 2165 .fw_dump = qla81xx_fw_dump, 2166 .beacon_on = qla24xx_beacon_on, 2167 .beacon_off = qla24xx_beacon_off, 2168 .beacon_blink = qla83xx_beacon_blink, 2169 .read_optrom = qla25xx_read_optrom_data, 2170 .write_optrom = qla24xx_write_optrom_data, 2171 .get_flash_version = qla24xx_get_flash_version, 2172 .start_scsi = qla24xx_dif_start_scsi, 2173 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2174 .abort_isp = qla2x00_abort_isp, 2175 .iospace_config = qla2x00_iospace_config, 2176 .initialize_adapter = qla2x00_initialize_adapter, 2177 }; 2178 2179 static struct isp_operations qla82xx_isp_ops = { 2180 .pci_config = qla82xx_pci_config, 2181 .reset_chip = qla82xx_reset_chip, 2182 .chip_diag = qla24xx_chip_diag, 2183 .config_rings = qla82xx_config_rings, 2184 .reset_adapter = qla24xx_reset_adapter, 2185 .nvram_config = qla81xx_nvram_config, 2186 .update_fw_options = qla24xx_update_fw_options, 2187 .load_risc = qla82xx_load_risc, 2188 .pci_info_str = qla24xx_pci_info_str, 2189 .fw_version_str = qla24xx_fw_version_str, 2190 .intr_handler = qla82xx_intr_handler, 2191 .enable_intrs = qla82xx_enable_intrs, 2192 .disable_intrs = qla82xx_disable_intrs, 2193 .abort_command = qla24xx_abort_command, 2194 .target_reset = qla24xx_abort_target, 2195 .lun_reset = qla24xx_lun_reset, 2196 .fabric_login = qla24xx_login_fabric, 2197 .fabric_logout = qla24xx_fabric_logout, 2198 .calc_req_entries = NULL, 2199 .build_iocbs = NULL, 2200 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2201 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2202 .read_nvram = qla24xx_read_nvram_data, 2203 .write_nvram = qla24xx_write_nvram_data, 2204 .fw_dump = qla82xx_fw_dump, 2205 .beacon_on = qla82xx_beacon_on, 2206 .beacon_off = qla82xx_beacon_off, 2207 .beacon_blink = NULL, 2208 .read_optrom = qla82xx_read_optrom_data, 2209 .write_optrom = qla82xx_write_optrom_data, 2210 .get_flash_version = qla82xx_get_flash_version, 2211 .start_scsi = qla82xx_start_scsi, 2212 .start_scsi_mq = NULL, 2213 .abort_isp = qla82xx_abort_isp, 2214 .iospace_config = qla82xx_iospace_config, 2215 .initialize_adapter = qla2x00_initialize_adapter, 2216 }; 2217 2218 static struct isp_operations qla8044_isp_ops = { 2219 .pci_config = qla82xx_pci_config, 2220 .reset_chip = qla82xx_reset_chip, 2221 .chip_diag = qla24xx_chip_diag, 2222 .config_rings = qla82xx_config_rings, 2223 .reset_adapter = qla24xx_reset_adapter, 2224 .nvram_config = qla81xx_nvram_config, 2225 .update_fw_options = qla24xx_update_fw_options, 2226 .load_risc = qla82xx_load_risc, 2227 .pci_info_str = qla24xx_pci_info_str, 2228 .fw_version_str = qla24xx_fw_version_str, 2229 .intr_handler = qla8044_intr_handler, 2230 .enable_intrs = qla82xx_enable_intrs, 2231 .disable_intrs = qla82xx_disable_intrs, 2232 .abort_command = qla24xx_abort_command, 2233 .target_reset = qla24xx_abort_target, 2234 .lun_reset = qla24xx_lun_reset, 2235 .fabric_login = qla24xx_login_fabric, 2236 .fabric_logout = qla24xx_fabric_logout, 2237 .calc_req_entries = NULL, 2238 .build_iocbs = NULL, 2239 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2240 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2241 .read_nvram = NULL, 2242 .write_nvram = NULL, 2243 .fw_dump = qla8044_fw_dump, 2244 .beacon_on = qla82xx_beacon_on, 2245 .beacon_off = qla82xx_beacon_off, 2246 .beacon_blink = NULL, 2247 .read_optrom = qla8044_read_optrom_data, 2248 .write_optrom = qla8044_write_optrom_data, 2249 .get_flash_version = qla82xx_get_flash_version, 2250 .start_scsi = qla82xx_start_scsi, 2251 .start_scsi_mq = NULL, 2252 .abort_isp = qla8044_abort_isp, 2253 .iospace_config = qla82xx_iospace_config, 2254 .initialize_adapter = qla2x00_initialize_adapter, 2255 }; 2256 2257 static struct isp_operations qla83xx_isp_ops = { 2258 .pci_config = qla25xx_pci_config, 2259 .reset_chip = qla24xx_reset_chip, 2260 .chip_diag = qla24xx_chip_diag, 2261 .config_rings = qla24xx_config_rings, 2262 .reset_adapter = qla24xx_reset_adapter, 2263 .nvram_config = qla81xx_nvram_config, 2264 .update_fw_options = qla81xx_update_fw_options, 2265 .load_risc = qla81xx_load_risc, 2266 .pci_info_str = qla24xx_pci_info_str, 2267 .fw_version_str = qla24xx_fw_version_str, 2268 .intr_handler = qla24xx_intr_handler, 2269 .enable_intrs = qla24xx_enable_intrs, 2270 .disable_intrs = qla24xx_disable_intrs, 2271 .abort_command = qla24xx_abort_command, 2272 .target_reset = qla24xx_abort_target, 2273 .lun_reset = qla24xx_lun_reset, 2274 .fabric_login = qla24xx_login_fabric, 2275 .fabric_logout = qla24xx_fabric_logout, 2276 .calc_req_entries = NULL, 2277 .build_iocbs = NULL, 2278 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2279 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2280 .read_nvram = NULL, 2281 .write_nvram = NULL, 2282 .fw_dump = qla83xx_fw_dump, 2283 .beacon_on = qla24xx_beacon_on, 2284 .beacon_off = qla24xx_beacon_off, 2285 .beacon_blink = qla83xx_beacon_blink, 2286 .read_optrom = qla25xx_read_optrom_data, 2287 .write_optrom = qla24xx_write_optrom_data, 2288 .get_flash_version = qla24xx_get_flash_version, 2289 .start_scsi = qla24xx_dif_start_scsi, 2290 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2291 .abort_isp = qla2x00_abort_isp, 2292 .iospace_config = qla83xx_iospace_config, 2293 .initialize_adapter = qla2x00_initialize_adapter, 2294 }; 2295 2296 static struct isp_operations qlafx00_isp_ops = { 2297 .pci_config = qlafx00_pci_config, 2298 .reset_chip = qlafx00_soft_reset, 2299 .chip_diag = qlafx00_chip_diag, 2300 .config_rings = qlafx00_config_rings, 2301 .reset_adapter = qlafx00_soft_reset, 2302 .nvram_config = NULL, 2303 .update_fw_options = NULL, 2304 .load_risc = NULL, 2305 .pci_info_str = qlafx00_pci_info_str, 2306 .fw_version_str = qlafx00_fw_version_str, 2307 .intr_handler = qlafx00_intr_handler, 2308 .enable_intrs = qlafx00_enable_intrs, 2309 .disable_intrs = qlafx00_disable_intrs, 2310 .abort_command = qla24xx_async_abort_command, 2311 .target_reset = qlafx00_abort_target, 2312 .lun_reset = qlafx00_lun_reset, 2313 .fabric_login = NULL, 2314 .fabric_logout = NULL, 2315 .calc_req_entries = NULL, 2316 .build_iocbs = NULL, 2317 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2318 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2319 .read_nvram = qla24xx_read_nvram_data, 2320 .write_nvram = qla24xx_write_nvram_data, 2321 .fw_dump = NULL, 2322 .beacon_on = qla24xx_beacon_on, 2323 .beacon_off = qla24xx_beacon_off, 2324 .beacon_blink = NULL, 2325 .read_optrom = qla24xx_read_optrom_data, 2326 .write_optrom = qla24xx_write_optrom_data, 2327 .get_flash_version = qla24xx_get_flash_version, 2328 .start_scsi = qlafx00_start_scsi, 2329 .start_scsi_mq = NULL, 2330 .abort_isp = qlafx00_abort_isp, 2331 .iospace_config = qlafx00_iospace_config, 2332 .initialize_adapter = qlafx00_initialize_adapter, 2333 }; 2334 2335 static struct isp_operations qla27xx_isp_ops = { 2336 .pci_config = qla25xx_pci_config, 2337 .reset_chip = qla24xx_reset_chip, 2338 .chip_diag = qla24xx_chip_diag, 2339 .config_rings = qla24xx_config_rings, 2340 .reset_adapter = qla24xx_reset_adapter, 2341 .nvram_config = qla81xx_nvram_config, 2342 .update_fw_options = qla81xx_update_fw_options, 2343 .load_risc = qla81xx_load_risc, 2344 .pci_info_str = qla24xx_pci_info_str, 2345 .fw_version_str = qla24xx_fw_version_str, 2346 .intr_handler = qla24xx_intr_handler, 2347 .enable_intrs = qla24xx_enable_intrs, 2348 .disable_intrs = qla24xx_disable_intrs, 2349 .abort_command = qla24xx_abort_command, 2350 .target_reset = qla24xx_abort_target, 2351 .lun_reset = qla24xx_lun_reset, 2352 .fabric_login = qla24xx_login_fabric, 2353 .fabric_logout = qla24xx_fabric_logout, 2354 .calc_req_entries = NULL, 2355 .build_iocbs = NULL, 2356 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2357 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2358 .read_nvram = NULL, 2359 .write_nvram = NULL, 2360 .fw_dump = qla27xx_fwdump, 2361 .beacon_on = qla24xx_beacon_on, 2362 .beacon_off = qla24xx_beacon_off, 2363 .beacon_blink = qla83xx_beacon_blink, 2364 .read_optrom = qla25xx_read_optrom_data, 2365 .write_optrom = qla24xx_write_optrom_data, 2366 .get_flash_version = qla24xx_get_flash_version, 2367 .start_scsi = qla24xx_dif_start_scsi, 2368 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2369 .abort_isp = qla2x00_abort_isp, 2370 .iospace_config = qla83xx_iospace_config, 2371 .initialize_adapter = qla2x00_initialize_adapter, 2372 }; 2373 2374 static inline void 2375 qla2x00_set_isp_flags(struct qla_hw_data *ha) 2376 { 2377 ha->device_type = DT_EXTENDED_IDS; 2378 switch (ha->pdev->device) { 2379 case PCI_DEVICE_ID_QLOGIC_ISP2100: 2380 ha->isp_type |= DT_ISP2100; 2381 ha->device_type &= ~DT_EXTENDED_IDS; 2382 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2383 break; 2384 case PCI_DEVICE_ID_QLOGIC_ISP2200: 2385 ha->isp_type |= DT_ISP2200; 2386 ha->device_type &= ~DT_EXTENDED_IDS; 2387 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2388 break; 2389 case PCI_DEVICE_ID_QLOGIC_ISP2300: 2390 ha->isp_type |= DT_ISP2300; 2391 ha->device_type |= DT_ZIO_SUPPORTED; 2392 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2393 break; 2394 case PCI_DEVICE_ID_QLOGIC_ISP2312: 2395 ha->isp_type |= DT_ISP2312; 2396 ha->device_type |= DT_ZIO_SUPPORTED; 2397 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2398 break; 2399 case PCI_DEVICE_ID_QLOGIC_ISP2322: 2400 ha->isp_type |= DT_ISP2322; 2401 ha->device_type |= DT_ZIO_SUPPORTED; 2402 if (ha->pdev->subsystem_vendor == 0x1028 && 2403 ha->pdev->subsystem_device == 0x0170) 2404 ha->device_type |= DT_OEM_001; 2405 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2406 break; 2407 case PCI_DEVICE_ID_QLOGIC_ISP6312: 2408 ha->isp_type |= DT_ISP6312; 2409 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2410 break; 2411 case PCI_DEVICE_ID_QLOGIC_ISP6322: 2412 ha->isp_type |= DT_ISP6322; 2413 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2414 break; 2415 case PCI_DEVICE_ID_QLOGIC_ISP2422: 2416 ha->isp_type |= DT_ISP2422; 2417 ha->device_type |= DT_ZIO_SUPPORTED; 2418 ha->device_type |= DT_FWI2; 2419 ha->device_type |= DT_IIDMA; 2420 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2421 break; 2422 case PCI_DEVICE_ID_QLOGIC_ISP2432: 2423 ha->isp_type |= DT_ISP2432; 2424 ha->device_type |= DT_ZIO_SUPPORTED; 2425 ha->device_type |= DT_FWI2; 2426 ha->device_type |= DT_IIDMA; 2427 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2428 break; 2429 case PCI_DEVICE_ID_QLOGIC_ISP8432: 2430 ha->isp_type |= DT_ISP8432; 2431 ha->device_type |= DT_ZIO_SUPPORTED; 2432 ha->device_type |= DT_FWI2; 2433 ha->device_type |= DT_IIDMA; 2434 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2435 break; 2436 case PCI_DEVICE_ID_QLOGIC_ISP5422: 2437 ha->isp_type |= DT_ISP5422; 2438 ha->device_type |= DT_FWI2; 2439 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2440 break; 2441 case PCI_DEVICE_ID_QLOGIC_ISP5432: 2442 ha->isp_type |= DT_ISP5432; 2443 ha->device_type |= DT_FWI2; 2444 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2445 break; 2446 case PCI_DEVICE_ID_QLOGIC_ISP2532: 2447 ha->isp_type |= DT_ISP2532; 2448 ha->device_type |= DT_ZIO_SUPPORTED; 2449 ha->device_type |= DT_FWI2; 2450 ha->device_type |= DT_IIDMA; 2451 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2452 break; 2453 case PCI_DEVICE_ID_QLOGIC_ISP8001: 2454 ha->isp_type |= DT_ISP8001; 2455 ha->device_type |= DT_ZIO_SUPPORTED; 2456 ha->device_type |= DT_FWI2; 2457 ha->device_type |= DT_IIDMA; 2458 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2459 break; 2460 case PCI_DEVICE_ID_QLOGIC_ISP8021: 2461 ha->isp_type |= DT_ISP8021; 2462 ha->device_type |= DT_ZIO_SUPPORTED; 2463 ha->device_type |= DT_FWI2; 2464 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2465 /* Initialize 82XX ISP flags */ 2466 qla82xx_init_flags(ha); 2467 break; 2468 case PCI_DEVICE_ID_QLOGIC_ISP8044: 2469 ha->isp_type |= DT_ISP8044; 2470 ha->device_type |= DT_ZIO_SUPPORTED; 2471 ha->device_type |= DT_FWI2; 2472 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2473 /* Initialize 82XX ISP flags */ 2474 qla82xx_init_flags(ha); 2475 break; 2476 case PCI_DEVICE_ID_QLOGIC_ISP2031: 2477 ha->isp_type |= DT_ISP2031; 2478 ha->device_type |= DT_ZIO_SUPPORTED; 2479 ha->device_type |= DT_FWI2; 2480 ha->device_type |= DT_IIDMA; 2481 ha->device_type |= DT_T10_PI; 2482 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2483 break; 2484 case PCI_DEVICE_ID_QLOGIC_ISP8031: 2485 ha->isp_type |= DT_ISP8031; 2486 ha->device_type |= DT_ZIO_SUPPORTED; 2487 ha->device_type |= DT_FWI2; 2488 ha->device_type |= DT_IIDMA; 2489 ha->device_type |= DT_T10_PI; 2490 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2491 break; 2492 case PCI_DEVICE_ID_QLOGIC_ISPF001: 2493 ha->isp_type |= DT_ISPFX00; 2494 break; 2495 case PCI_DEVICE_ID_QLOGIC_ISP2071: 2496 ha->isp_type |= DT_ISP2071; 2497 ha->device_type |= DT_ZIO_SUPPORTED; 2498 ha->device_type |= DT_FWI2; 2499 ha->device_type |= DT_IIDMA; 2500 ha->device_type |= DT_T10_PI; 2501 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2502 break; 2503 case PCI_DEVICE_ID_QLOGIC_ISP2271: 2504 ha->isp_type |= DT_ISP2271; 2505 ha->device_type |= DT_ZIO_SUPPORTED; 2506 ha->device_type |= DT_FWI2; 2507 ha->device_type |= DT_IIDMA; 2508 ha->device_type |= DT_T10_PI; 2509 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2510 break; 2511 case PCI_DEVICE_ID_QLOGIC_ISP2261: 2512 ha->isp_type |= DT_ISP2261; 2513 ha->device_type |= DT_ZIO_SUPPORTED; 2514 ha->device_type |= DT_FWI2; 2515 ha->device_type |= DT_IIDMA; 2516 ha->device_type |= DT_T10_PI; 2517 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2518 break; 2519 } 2520 2521 if (IS_QLA82XX(ha)) 2522 ha->port_no = ha->portnum & 1; 2523 else { 2524 /* Get adapter physical port no from interrupt pin register. */ 2525 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); 2526 if (IS_QLA27XX(ha)) 2527 ha->port_no--; 2528 else 2529 ha->port_no = !(ha->port_no & 1); 2530 } 2531 2532 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, 2533 "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", 2534 ha->device_type, ha->port_no, ha->fw_srisc_address); 2535 } 2536 2537 static void 2538 qla2xxx_scan_start(struct Scsi_Host *shost) 2539 { 2540 scsi_qla_host_t *vha = shost_priv(shost); 2541 2542 if (vha->hw->flags.running_gold_fw) 2543 return; 2544 2545 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2546 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2547 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2548 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags); 2549 } 2550 2551 static int 2552 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) 2553 { 2554 scsi_qla_host_t *vha = shost_priv(shost); 2555 2556 if (test_bit(UNLOADING, &vha->dpc_flags)) 2557 return 1; 2558 if (!vha->host) 2559 return 1; 2560 if (time > vha->hw->loop_reset_delay * HZ) 2561 return 1; 2562 2563 return atomic_read(&vha->loop_state) == LOOP_READY; 2564 } 2565 2566 static void qla2x00_iocb_work_fn(struct work_struct *work) 2567 { 2568 struct scsi_qla_host *vha = container_of(work, 2569 struct scsi_qla_host, iocb_work); 2570 int cnt = 0; 2571 2572 while (!list_empty(&vha->work_list)) { 2573 qla2x00_do_work(vha); 2574 cnt++; 2575 if (cnt > 10) 2576 break; 2577 } 2578 } 2579 2580 /* 2581 * PCI driver interface 2582 */ 2583 static int 2584 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 2585 { 2586 int ret = -ENODEV; 2587 struct Scsi_Host *host; 2588 scsi_qla_host_t *base_vha = NULL; 2589 struct qla_hw_data *ha; 2590 char pci_info[30]; 2591 char fw_str[30], wq_name[30]; 2592 struct scsi_host_template *sht; 2593 int bars, mem_only = 0; 2594 uint16_t req_length = 0, rsp_length = 0; 2595 struct req_que *req = NULL; 2596 struct rsp_que *rsp = NULL; 2597 int i; 2598 2599 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 2600 sht = &qla2xxx_driver_template; 2601 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || 2602 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || 2603 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || 2604 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 2605 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || 2606 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 || 2607 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || 2608 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || 2609 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || 2610 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || 2611 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || 2612 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || 2613 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || 2614 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 || 2615 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261) { 2616 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2617 mem_only = 1; 2618 ql_dbg_pci(ql_dbg_init, pdev, 0x0007, 2619 "Mem only adapter.\n"); 2620 } 2621 ql_dbg_pci(ql_dbg_init, pdev, 0x0008, 2622 "Bars=%d.\n", bars); 2623 2624 if (mem_only) { 2625 if (pci_enable_device_mem(pdev)) 2626 goto probe_out; 2627 } else { 2628 if (pci_enable_device(pdev)) 2629 goto probe_out; 2630 } 2631 2632 /* This may fail but that's ok */ 2633 pci_enable_pcie_error_reporting(pdev); 2634 2635 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); 2636 if (!ha) { 2637 ql_log_pci(ql_log_fatal, pdev, 0x0009, 2638 "Unable to allocate memory for ha.\n"); 2639 goto probe_out; 2640 } 2641 ql_dbg_pci(ql_dbg_init, pdev, 0x000a, 2642 "Memory allocated for ha=%p.\n", ha); 2643 ha->pdev = pdev; 2644 ha->tgt.enable_class_2 = ql2xenableclass2; 2645 INIT_LIST_HEAD(&ha->tgt.q_full_list); 2646 spin_lock_init(&ha->tgt.q_full_lock); 2647 spin_lock_init(&ha->tgt.sess_lock); 2648 spin_lock_init(&ha->tgt.atio_lock); 2649 2650 2651 /* Clear our data area */ 2652 ha->bars = bars; 2653 ha->mem_only = mem_only; 2654 spin_lock_init(&ha->hardware_lock); 2655 spin_lock_init(&ha->vport_slock); 2656 mutex_init(&ha->selflogin_lock); 2657 mutex_init(&ha->optrom_mutex); 2658 2659 /* Set ISP-type information. */ 2660 qla2x00_set_isp_flags(ha); 2661 2662 /* Set EEH reset type to fundamental if required by hba */ 2663 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || 2664 IS_QLA83XX(ha) || IS_QLA27XX(ha)) 2665 pdev->needs_freset = 1; 2666 2667 ha->prev_topology = 0; 2668 ha->init_cb_size = sizeof(init_cb_t); 2669 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2670 ha->optrom_size = OPTROM_SIZE_2300; 2671 2672 /* Assign ISP specific operations. */ 2673 if (IS_QLA2100(ha)) { 2674 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2675 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 2676 req_length = REQUEST_ENTRY_CNT_2100; 2677 rsp_length = RESPONSE_ENTRY_CNT_2100; 2678 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 2679 ha->gid_list_info_size = 4; 2680 ha->flash_conf_off = ~0; 2681 ha->flash_data_off = ~0; 2682 ha->nvram_conf_off = ~0; 2683 ha->nvram_data_off = ~0; 2684 ha->isp_ops = &qla2100_isp_ops; 2685 } else if (IS_QLA2200(ha)) { 2686 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2687 ha->mbx_count = MAILBOX_REGISTER_COUNT_2200; 2688 req_length = REQUEST_ENTRY_CNT_2200; 2689 rsp_length = RESPONSE_ENTRY_CNT_2100; 2690 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 2691 ha->gid_list_info_size = 4; 2692 ha->flash_conf_off = ~0; 2693 ha->flash_data_off = ~0; 2694 ha->nvram_conf_off = ~0; 2695 ha->nvram_data_off = ~0; 2696 ha->isp_ops = &qla2100_isp_ops; 2697 } else if (IS_QLA23XX(ha)) { 2698 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2699 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2700 req_length = REQUEST_ENTRY_CNT_2200; 2701 rsp_length = RESPONSE_ENTRY_CNT_2300; 2702 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2703 ha->gid_list_info_size = 6; 2704 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 2705 ha->optrom_size = OPTROM_SIZE_2322; 2706 ha->flash_conf_off = ~0; 2707 ha->flash_data_off = ~0; 2708 ha->nvram_conf_off = ~0; 2709 ha->nvram_data_off = ~0; 2710 ha->isp_ops = &qla2300_isp_ops; 2711 } else if (IS_QLA24XX_TYPE(ha)) { 2712 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2713 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2714 req_length = REQUEST_ENTRY_CNT_24XX; 2715 rsp_length = RESPONSE_ENTRY_CNT_2300; 2716 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2717 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2718 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2719 ha->gid_list_info_size = 8; 2720 ha->optrom_size = OPTROM_SIZE_24XX; 2721 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX; 2722 ha->isp_ops = &qla24xx_isp_ops; 2723 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2724 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2725 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2726 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2727 } else if (IS_QLA25XX(ha)) { 2728 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2729 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2730 req_length = REQUEST_ENTRY_CNT_24XX; 2731 rsp_length = RESPONSE_ENTRY_CNT_2300; 2732 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2733 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2734 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2735 ha->gid_list_info_size = 8; 2736 ha->optrom_size = OPTROM_SIZE_25XX; 2737 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2738 ha->isp_ops = &qla25xx_isp_ops; 2739 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2740 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2741 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2742 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2743 } else if (IS_QLA81XX(ha)) { 2744 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2745 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2746 req_length = REQUEST_ENTRY_CNT_24XX; 2747 rsp_length = RESPONSE_ENTRY_CNT_2300; 2748 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2749 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2750 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2751 ha->gid_list_info_size = 8; 2752 ha->optrom_size = OPTROM_SIZE_81XX; 2753 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2754 ha->isp_ops = &qla81xx_isp_ops; 2755 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 2756 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2757 ha->nvram_conf_off = ~0; 2758 ha->nvram_data_off = ~0; 2759 } else if (IS_QLA82XX(ha)) { 2760 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2761 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2762 req_length = REQUEST_ENTRY_CNT_82XX; 2763 rsp_length = RESPONSE_ENTRY_CNT_82XX; 2764 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2765 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2766 ha->gid_list_info_size = 8; 2767 ha->optrom_size = OPTROM_SIZE_82XX; 2768 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2769 ha->isp_ops = &qla82xx_isp_ops; 2770 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2771 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2772 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2773 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2774 } else if (IS_QLA8044(ha)) { 2775 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2776 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2777 req_length = REQUEST_ENTRY_CNT_82XX; 2778 rsp_length = RESPONSE_ENTRY_CNT_82XX; 2779 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2780 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2781 ha->gid_list_info_size = 8; 2782 ha->optrom_size = OPTROM_SIZE_83XX; 2783 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2784 ha->isp_ops = &qla8044_isp_ops; 2785 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2786 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2787 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2788 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2789 } else if (IS_QLA83XX(ha)) { 2790 ha->portnum = PCI_FUNC(ha->pdev->devfn); 2791 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2792 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2793 req_length = REQUEST_ENTRY_CNT_83XX; 2794 rsp_length = RESPONSE_ENTRY_CNT_83XX; 2795 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2796 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2797 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2798 ha->gid_list_info_size = 8; 2799 ha->optrom_size = OPTROM_SIZE_83XX; 2800 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2801 ha->isp_ops = &qla83xx_isp_ops; 2802 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 2803 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2804 ha->nvram_conf_off = ~0; 2805 ha->nvram_data_off = ~0; 2806 } else if (IS_QLAFX00(ha)) { 2807 ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00; 2808 ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00; 2809 ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00; 2810 req_length = REQUEST_ENTRY_CNT_FX00; 2811 rsp_length = RESPONSE_ENTRY_CNT_FX00; 2812 ha->isp_ops = &qlafx00_isp_ops; 2813 ha->port_down_retry_count = 30; /* default value */ 2814 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; 2815 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 2816 ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL; 2817 ha->mr.fw_hbt_en = 1; 2818 ha->mr.host_info_resend = false; 2819 ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; 2820 } else if (IS_QLA27XX(ha)) { 2821 ha->portnum = PCI_FUNC(ha->pdev->devfn); 2822 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2823 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2824 req_length = REQUEST_ENTRY_CNT_83XX; 2825 rsp_length = RESPONSE_ENTRY_CNT_83XX; 2826 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2827 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2828 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2829 ha->gid_list_info_size = 8; 2830 ha->optrom_size = OPTROM_SIZE_83XX; 2831 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2832 ha->isp_ops = &qla27xx_isp_ops; 2833 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 2834 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2835 ha->nvram_conf_off = ~0; 2836 ha->nvram_data_off = ~0; 2837 } 2838 2839 ql_dbg_pci(ql_dbg_init, pdev, 0x001e, 2840 "mbx_count=%d, req_length=%d, " 2841 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, " 2842 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, " 2843 "max_fibre_devices=%d.\n", 2844 ha->mbx_count, req_length, rsp_length, ha->max_loop_id, 2845 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, 2846 ha->nvram_npiv_size, ha->max_fibre_devices); 2847 ql_dbg_pci(ql_dbg_init, pdev, 0x001f, 2848 "isp_ops=%p, flash_conf_off=%d, " 2849 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", 2850 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, 2851 ha->nvram_conf_off, ha->nvram_data_off); 2852 2853 /* Configure PCI I/O space */ 2854 ret = ha->isp_ops->iospace_config(ha); 2855 if (ret) 2856 goto iospace_config_failed; 2857 2858 ql_log_pci(ql_log_info, pdev, 0x001d, 2859 "Found an ISP%04X irq %d iobase 0x%p.\n", 2860 pdev->device, pdev->irq, ha->iobase); 2861 mutex_init(&ha->vport_lock); 2862 mutex_init(&ha->mq_lock); 2863 init_completion(&ha->mbx_cmd_comp); 2864 complete(&ha->mbx_cmd_comp); 2865 init_completion(&ha->mbx_intr_comp); 2866 init_completion(&ha->dcbx_comp); 2867 init_completion(&ha->lb_portup_comp); 2868 2869 set_bit(0, (unsigned long *) ha->vp_idx_map); 2870 2871 qla2x00_config_dma_addressing(ha); 2872 ql_dbg_pci(ql_dbg_init, pdev, 0x0020, 2873 "64 Bit addressing is %s.\n", 2874 ha->flags.enable_64bit_addressing ? "enable" : 2875 "disable"); 2876 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 2877 if (ret) { 2878 ql_log_pci(ql_log_fatal, pdev, 0x0031, 2879 "Failed to allocate memory for adapter, aborting.\n"); 2880 2881 goto probe_hw_failed; 2882 } 2883 2884 req->max_q_depth = MAX_Q_DEPTH; 2885 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) 2886 req->max_q_depth = ql2xmaxqdepth; 2887 2888 2889 base_vha = qla2x00_create_host(sht, ha); 2890 if (!base_vha) { 2891 ret = -ENOMEM; 2892 qla2x00_mem_free(ha); 2893 qla2x00_free_req_que(ha, req); 2894 qla2x00_free_rsp_que(ha, rsp); 2895 goto probe_hw_failed; 2896 } 2897 2898 pci_set_drvdata(pdev, base_vha); 2899 set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); 2900 2901 host = base_vha->host; 2902 base_vha->req = req; 2903 if (IS_QLA2XXX_MIDTYPE(ha)) 2904 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; 2905 else 2906 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + 2907 base_vha->vp_idx; 2908 2909 /* Setup fcport template structure. */ 2910 ha->mr.fcport.vha = base_vha; 2911 ha->mr.fcport.port_type = FCT_UNKNOWN; 2912 ha->mr.fcport.loop_id = FC_NO_LOOP_ID; 2913 qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED); 2914 ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED; 2915 ha->mr.fcport.scan_state = 1; 2916 2917 /* Set the SG table size based on ISP type */ 2918 if (!IS_FWI2_CAPABLE(ha)) { 2919 if (IS_QLA2100(ha)) 2920 host->sg_tablesize = 32; 2921 } else { 2922 if (!IS_QLA82XX(ha)) 2923 host->sg_tablesize = QLA_SG_ALL; 2924 } 2925 host->max_id = ha->max_fibre_devices; 2926 host->cmd_per_lun = 3; 2927 host->unique_id = host->host_no; 2928 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 2929 host->max_cmd_len = 32; 2930 else 2931 host->max_cmd_len = MAX_CMDSZ; 2932 host->max_channel = MAX_BUSES - 1; 2933 /* Older HBAs support only 16-bit LUNs */ 2934 if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) && 2935 ql2xmaxlun > 0xffff) 2936 host->max_lun = 0xffff; 2937 else 2938 host->max_lun = ql2xmaxlun; 2939 host->transportt = qla2xxx_transport_template; 2940 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); 2941 2942 ql_dbg(ql_dbg_init, base_vha, 0x0033, 2943 "max_id=%d this_id=%d " 2944 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " 2945 "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id, 2946 host->this_id, host->cmd_per_lun, host->unique_id, 2947 host->max_cmd_len, host->max_channel, host->max_lun, 2948 host->transportt, sht->vendor_id); 2949 2950 /* Set up the irqs */ 2951 ret = qla2x00_request_irqs(ha, rsp); 2952 if (ret) 2953 goto probe_init_failed; 2954 2955 /* Alloc arrays of request and response ring ptrs */ 2956 if (!qla2x00_alloc_queues(ha, req, rsp)) { 2957 ql_log(ql_log_fatal, base_vha, 0x003d, 2958 "Failed to allocate memory for queue pointers..." 2959 "aborting.\n"); 2960 goto probe_init_failed; 2961 } 2962 2963 if (ha->mqenable && shost_use_blk_mq(host)) { 2964 /* number of hardware queues supported by blk/scsi-mq*/ 2965 host->nr_hw_queues = ha->max_qpairs; 2966 2967 ql_dbg(ql_dbg_init, base_vha, 0x0192, 2968 "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues); 2969 } else 2970 ql_dbg(ql_dbg_init, base_vha, 0x0193, 2971 "blk/scsi-mq disabled.\n"); 2972 2973 qlt_probe_one_stage1(base_vha, ha); 2974 2975 pci_save_state(pdev); 2976 2977 /* Assign back pointers */ 2978 rsp->req = req; 2979 req->rsp = rsp; 2980 2981 if (IS_QLAFX00(ha)) { 2982 ha->rsp_q_map[0] = rsp; 2983 ha->req_q_map[0] = req; 2984 set_bit(0, ha->req_qid_map); 2985 set_bit(0, ha->rsp_qid_map); 2986 } 2987 2988 /* FWI2-capable only. */ 2989 req->req_q_in = &ha->iobase->isp24.req_q_in; 2990 req->req_q_out = &ha->iobase->isp24.req_q_out; 2991 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; 2992 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; 2993 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 2994 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; 2995 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; 2996 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; 2997 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out; 2998 } 2999 3000 if (IS_QLAFX00(ha)) { 3001 req->req_q_in = &ha->iobase->ispfx00.req_q_in; 3002 req->req_q_out = &ha->iobase->ispfx00.req_q_out; 3003 rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in; 3004 rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out; 3005 } 3006 3007 if (IS_P3P_TYPE(ha)) { 3008 req->req_q_out = &ha->iobase->isp82.req_q_out[0]; 3009 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0]; 3010 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; 3011 } 3012 3013 ql_dbg(ql_dbg_multiq, base_vha, 0xc009, 3014 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 3015 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 3016 ql_dbg(ql_dbg_multiq, base_vha, 0xc00a, 3017 "req->req_q_in=%p req->req_q_out=%p " 3018 "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 3019 req->req_q_in, req->req_q_out, 3020 rsp->rsp_q_in, rsp->rsp_q_out); 3021 ql_dbg(ql_dbg_init, base_vha, 0x003e, 3022 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 3023 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 3024 ql_dbg(ql_dbg_init, base_vha, 0x003f, 3025 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 3026 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); 3027 3028 if (ha->isp_ops->initialize_adapter(base_vha)) { 3029 ql_log(ql_log_fatal, base_vha, 0x00d6, 3030 "Failed to initialize adapter - Adapter flags %x.\n", 3031 base_vha->device_flags); 3032 3033 if (IS_QLA82XX(ha)) { 3034 qla82xx_idc_lock(ha); 3035 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3036 QLA8XXX_DEV_FAILED); 3037 qla82xx_idc_unlock(ha); 3038 ql_log(ql_log_fatal, base_vha, 0x00d7, 3039 "HW State: FAILED.\n"); 3040 } else if (IS_QLA8044(ha)) { 3041 qla8044_idc_lock(ha); 3042 qla8044_wr_direct(base_vha, 3043 QLA8044_CRB_DEV_STATE_INDEX, 3044 QLA8XXX_DEV_FAILED); 3045 qla8044_idc_unlock(ha); 3046 ql_log(ql_log_fatal, base_vha, 0x0150, 3047 "HW State: FAILED.\n"); 3048 } 3049 3050 ret = -ENODEV; 3051 goto probe_failed; 3052 } 3053 3054 if (IS_QLAFX00(ha)) 3055 host->can_queue = QLAFX00_MAX_CANQUEUE; 3056 else 3057 host->can_queue = req->num_outstanding_cmds - 10; 3058 3059 ql_dbg(ql_dbg_init, base_vha, 0x0032, 3060 "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", 3061 host->can_queue, base_vha->req, 3062 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 3063 3064 if (ha->mqenable && qla_ini_mode_enabled(base_vha)) { 3065 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1); 3066 /* Create start of day qpairs for Block MQ */ 3067 if (shost_use_blk_mq(host)) { 3068 for (i = 0; i < ha->max_qpairs; i++) 3069 qla2xxx_create_qpair(base_vha, 5, 0); 3070 } 3071 } 3072 3073 if (ha->flags.running_gold_fw) 3074 goto skip_dpc; 3075 3076 /* 3077 * Startup the kernel thread for this host adapter 3078 */ 3079 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 3080 "%s_dpc", base_vha->host_str); 3081 if (IS_ERR(ha->dpc_thread)) { 3082 ql_log(ql_log_fatal, base_vha, 0x00ed, 3083 "Failed to start DPC thread.\n"); 3084 ret = PTR_ERR(ha->dpc_thread); 3085 goto probe_failed; 3086 } 3087 ql_dbg(ql_dbg_init, base_vha, 0x00ee, 3088 "DPC thread started successfully.\n"); 3089 3090 /* 3091 * If we're not coming up in initiator mode, we might sit for 3092 * a while without waking up the dpc thread, which leads to a 3093 * stuck process warning. So just kick the dpc once here and 3094 * let the kthread start (and go back to sleep in qla2x00_do_dpc). 3095 */ 3096 qla2xxx_wake_dpc(base_vha); 3097 3098 INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); 3099 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); 3100 3101 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { 3102 sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); 3103 ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); 3104 INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen); 3105 3106 sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); 3107 ha->dpc_hp_wq = create_singlethread_workqueue(wq_name); 3108 INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work); 3109 INIT_WORK(&ha->idc_state_handler, 3110 qla83xx_idc_state_handler_work); 3111 INIT_WORK(&ha->nic_core_unrecoverable, 3112 qla83xx_nic_core_unrecoverable_work); 3113 } 3114 3115 skip_dpc: 3116 list_add_tail(&base_vha->list, &ha->vp_list); 3117 base_vha->host->irq = ha->pdev->irq; 3118 3119 /* Initialized the timer */ 3120 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL); 3121 ql_dbg(ql_dbg_init, base_vha, 0x00ef, 3122 "Started qla2x00_timer with " 3123 "interval=%d.\n", WATCH_INTERVAL); 3124 ql_dbg(ql_dbg_init, base_vha, 0x00f0, 3125 "Detected hba at address=%p.\n", 3126 ha); 3127 3128 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 3129 if (ha->fw_attributes & BIT_4) { 3130 int prot = 0, guard; 3131 base_vha->flags.difdix_supported = 1; 3132 ql_dbg(ql_dbg_init, base_vha, 0x00f1, 3133 "Registering for DIF/DIX type 1 and 3 protection.\n"); 3134 if (ql2xenabledif == 1) 3135 prot = SHOST_DIX_TYPE0_PROTECTION; 3136 scsi_host_set_prot(host, 3137 prot | SHOST_DIF_TYPE1_PROTECTION 3138 | SHOST_DIF_TYPE2_PROTECTION 3139 | SHOST_DIF_TYPE3_PROTECTION 3140 | SHOST_DIX_TYPE1_PROTECTION 3141 | SHOST_DIX_TYPE2_PROTECTION 3142 | SHOST_DIX_TYPE3_PROTECTION); 3143 3144 guard = SHOST_DIX_GUARD_CRC; 3145 3146 if (IS_PI_IPGUARD_CAPABLE(ha) && 3147 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) 3148 guard |= SHOST_DIX_GUARD_IP; 3149 3150 scsi_host_set_guard(host, guard); 3151 } else 3152 base_vha->flags.difdix_supported = 0; 3153 } 3154 3155 ha->isp_ops->enable_intrs(ha); 3156 3157 if (IS_QLAFX00(ha)) { 3158 ret = qlafx00_fx_disc(base_vha, 3159 &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO); 3160 host->sg_tablesize = (ha->mr.extended_io_enabled) ? 3161 QLA_SG_ALL : 128; 3162 } 3163 3164 ret = scsi_add_host(host, &pdev->dev); 3165 if (ret) 3166 goto probe_failed; 3167 3168 base_vha->flags.init_done = 1; 3169 base_vha->flags.online = 1; 3170 ha->prev_minidump_failed = 0; 3171 3172 ql_dbg(ql_dbg_init, base_vha, 0x00f2, 3173 "Init done and hba is online.\n"); 3174 3175 if (qla_ini_mode_enabled(base_vha) || 3176 qla_dual_mode_enabled(base_vha)) 3177 scsi_scan_host(host); 3178 else 3179 ql_dbg(ql_dbg_init, base_vha, 0x0122, 3180 "skipping scsi_scan_host() for non-initiator port\n"); 3181 3182 qla2x00_alloc_sysfs_attr(base_vha); 3183 3184 if (IS_QLAFX00(ha)) { 3185 ret = qlafx00_fx_disc(base_vha, 3186 &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO); 3187 3188 /* Register system information */ 3189 ret = qlafx00_fx_disc(base_vha, 3190 &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO); 3191 } 3192 3193 qla2x00_init_host_attr(base_vha); 3194 3195 qla2x00_dfs_setup(base_vha); 3196 3197 ql_log(ql_log_info, base_vha, 0x00fb, 3198 "QLogic %s - %s.\n", ha->model_number, ha->model_desc); 3199 ql_log(ql_log_info, base_vha, 0x00fc, 3200 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", 3201 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info), 3202 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', 3203 base_vha->host_no, 3204 ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str))); 3205 3206 qlt_add_target(ha, base_vha); 3207 3208 clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); 3209 3210 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 3211 return -ENODEV; 3212 3213 return 0; 3214 3215 probe_init_failed: 3216 qla2x00_free_req_que(ha, req); 3217 ha->req_q_map[0] = NULL; 3218 clear_bit(0, ha->req_qid_map); 3219 qla2x00_free_rsp_que(ha, rsp); 3220 ha->rsp_q_map[0] = NULL; 3221 clear_bit(0, ha->rsp_qid_map); 3222 ha->max_req_queues = ha->max_rsp_queues = 0; 3223 3224 probe_failed: 3225 if (base_vha->timer_active) 3226 qla2x00_stop_timer(base_vha); 3227 base_vha->flags.online = 0; 3228 if (ha->dpc_thread) { 3229 struct task_struct *t = ha->dpc_thread; 3230 3231 ha->dpc_thread = NULL; 3232 kthread_stop(t); 3233 } 3234 3235 qla2x00_free_device(base_vha); 3236 3237 scsi_host_put(base_vha->host); 3238 3239 probe_hw_failed: 3240 qla2x00_clear_drv_active(ha); 3241 3242 iospace_config_failed: 3243 if (IS_P3P_TYPE(ha)) { 3244 if (!ha->nx_pcibase) 3245 iounmap((device_reg_t *)ha->nx_pcibase); 3246 if (!ql2xdbwr) 3247 iounmap((device_reg_t *)ha->nxdb_wr_ptr); 3248 } else { 3249 if (ha->iobase) 3250 iounmap(ha->iobase); 3251 if (ha->cregbase) 3252 iounmap(ha->cregbase); 3253 } 3254 pci_release_selected_regions(ha->pdev, ha->bars); 3255 kfree(ha); 3256 3257 probe_out: 3258 pci_disable_device(pdev); 3259 return ret; 3260 } 3261 3262 static void 3263 qla2x00_shutdown(struct pci_dev *pdev) 3264 { 3265 scsi_qla_host_t *vha; 3266 struct qla_hw_data *ha; 3267 3268 if (!atomic_read(&pdev->enable_cnt)) 3269 return; 3270 3271 vha = pci_get_drvdata(pdev); 3272 ha = vha->hw; 3273 3274 /* Notify ISPFX00 firmware */ 3275 if (IS_QLAFX00(ha)) 3276 qlafx00_driver_shutdown(vha, 20); 3277 3278 /* Turn-off FCE trace */ 3279 if (ha->flags.fce_enabled) { 3280 qla2x00_disable_fce_trace(vha, NULL, NULL); 3281 ha->flags.fce_enabled = 0; 3282 } 3283 3284 /* Turn-off EFT trace */ 3285 if (ha->eft) 3286 qla2x00_disable_eft_trace(vha); 3287 3288 /* Stop currently executing firmware. */ 3289 qla2x00_try_to_stop_firmware(vha); 3290 3291 /* Turn adapter off line */ 3292 vha->flags.online = 0; 3293 3294 /* turn-off interrupts on the card */ 3295 if (ha->interrupts_on) { 3296 vha->flags.init_done = 0; 3297 ha->isp_ops->disable_intrs(ha); 3298 } 3299 3300 qla2x00_free_irqs(vha); 3301 3302 qla2x00_free_fw_dump(ha); 3303 3304 pci_disable_pcie_error_reporting(pdev); 3305 pci_disable_device(pdev); 3306 } 3307 3308 /* Deletes all the virtual ports for a given ha */ 3309 static void 3310 qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) 3311 { 3312 scsi_qla_host_t *vha; 3313 unsigned long flags; 3314 3315 mutex_lock(&ha->vport_lock); 3316 while (ha->cur_vport_count) { 3317 spin_lock_irqsave(&ha->vport_slock, flags); 3318 3319 BUG_ON(base_vha->list.next == &ha->vp_list); 3320 /* This assumes first entry in ha->vp_list is always base vha */ 3321 vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); 3322 scsi_host_get(vha->host); 3323 3324 spin_unlock_irqrestore(&ha->vport_slock, flags); 3325 mutex_unlock(&ha->vport_lock); 3326 3327 fc_vport_terminate(vha->fc_vport); 3328 scsi_host_put(vha->host); 3329 3330 mutex_lock(&ha->vport_lock); 3331 } 3332 mutex_unlock(&ha->vport_lock); 3333 } 3334 3335 /* Stops all deferred work threads */ 3336 static void 3337 qla2x00_destroy_deferred_work(struct qla_hw_data *ha) 3338 { 3339 /* Cancel all work and destroy DPC workqueues */ 3340 if (ha->dpc_lp_wq) { 3341 cancel_work_sync(&ha->idc_aen); 3342 destroy_workqueue(ha->dpc_lp_wq); 3343 ha->dpc_lp_wq = NULL; 3344 } 3345 3346 if (ha->dpc_hp_wq) { 3347 cancel_work_sync(&ha->nic_core_reset); 3348 cancel_work_sync(&ha->idc_state_handler); 3349 cancel_work_sync(&ha->nic_core_unrecoverable); 3350 destroy_workqueue(ha->dpc_hp_wq); 3351 ha->dpc_hp_wq = NULL; 3352 } 3353 3354 /* Kill the kernel thread for this host */ 3355 if (ha->dpc_thread) { 3356 struct task_struct *t = ha->dpc_thread; 3357 3358 /* 3359 * qla2xxx_wake_dpc checks for ->dpc_thread 3360 * so we need to zero it out. 3361 */ 3362 ha->dpc_thread = NULL; 3363 kthread_stop(t); 3364 } 3365 } 3366 3367 static void 3368 qla2x00_unmap_iobases(struct qla_hw_data *ha) 3369 { 3370 if (IS_QLA82XX(ha)) { 3371 3372 iounmap((device_reg_t *)ha->nx_pcibase); 3373 if (!ql2xdbwr) 3374 iounmap((device_reg_t *)ha->nxdb_wr_ptr); 3375 } else { 3376 if (ha->iobase) 3377 iounmap(ha->iobase); 3378 3379 if (ha->cregbase) 3380 iounmap(ha->cregbase); 3381 3382 if (ha->mqiobase) 3383 iounmap(ha->mqiobase); 3384 3385 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase) 3386 iounmap(ha->msixbase); 3387 } 3388 } 3389 3390 static void 3391 qla2x00_clear_drv_active(struct qla_hw_data *ha) 3392 { 3393 if (IS_QLA8044(ha)) { 3394 qla8044_idc_lock(ha); 3395 qla8044_clear_drv_active(ha); 3396 qla8044_idc_unlock(ha); 3397 } else if (IS_QLA82XX(ha)) { 3398 qla82xx_idc_lock(ha); 3399 qla82xx_clear_drv_active(ha); 3400 qla82xx_idc_unlock(ha); 3401 } 3402 } 3403 3404 static void 3405 qla2x00_remove_one(struct pci_dev *pdev) 3406 { 3407 scsi_qla_host_t *base_vha; 3408 struct qla_hw_data *ha; 3409 3410 base_vha = pci_get_drvdata(pdev); 3411 ha = base_vha->hw; 3412 3413 /* Indicate device removal to prevent future board_disable and wait 3414 * until any pending board_disable has completed. */ 3415 set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags); 3416 cancel_work_sync(&ha->board_disable); 3417 3418 /* 3419 * If the PCI device is disabled then there was a PCI-disconnect and 3420 * qla2x00_disable_board_on_pci_error has taken care of most of the 3421 * resources. 3422 */ 3423 if (!atomic_read(&pdev->enable_cnt)) { 3424 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3425 base_vha->gnl.l, base_vha->gnl.ldma); 3426 3427 scsi_host_put(base_vha->host); 3428 kfree(ha); 3429 pci_set_drvdata(pdev, NULL); 3430 return; 3431 } 3432 qla2x00_wait_for_hba_ready(base_vha); 3433 3434 /* 3435 * if UNLOAD flag is already set, then continue unload, 3436 * where it was set first. 3437 */ 3438 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 3439 return; 3440 3441 set_bit(UNLOADING, &base_vha->dpc_flags); 3442 dma_free_coherent(&ha->pdev->dev, 3443 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); 3444 3445 if (IS_QLAFX00(ha)) 3446 qlafx00_driver_shutdown(base_vha, 20); 3447 3448 qla2x00_delete_all_vps(ha, base_vha); 3449 3450 if (IS_QLA8031(ha)) { 3451 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, 3452 "Clearing fcoe driver presence.\n"); 3453 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) 3454 ql_dbg(ql_dbg_p3p, base_vha, 0xb079, 3455 "Error while clearing DRV-Presence.\n"); 3456 } 3457 3458 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 3459 3460 qla2x00_dfs_remove(base_vha); 3461 3462 qla84xx_put_chip(base_vha); 3463 3464 /* Laser should be disabled only for ISP2031 */ 3465 if (IS_QLA2031(ha)) 3466 qla83xx_disable_laser(base_vha); 3467 3468 /* Disable timer */ 3469 if (base_vha->timer_active) 3470 qla2x00_stop_timer(base_vha); 3471 3472 base_vha->flags.online = 0; 3473 3474 /* free DMA memory */ 3475 if (ha->exlogin_buf) 3476 qla2x00_free_exlogin_buffer(ha); 3477 3478 /* free DMA memory */ 3479 if (ha->exchoffld_buf) 3480 qla2x00_free_exchoffld_buffer(ha); 3481 3482 qla2x00_destroy_deferred_work(ha); 3483 3484 qlt_remove_target(ha, base_vha); 3485 3486 qla2x00_free_sysfs_attr(base_vha, true); 3487 3488 fc_remove_host(base_vha->host); 3489 qlt_remove_target_resources(ha); 3490 3491 scsi_remove_host(base_vha->host); 3492 3493 qla2x00_free_device(base_vha); 3494 3495 qla2x00_clear_drv_active(ha); 3496 3497 scsi_host_put(base_vha->host); 3498 3499 qla2x00_unmap_iobases(ha); 3500 3501 pci_release_selected_regions(ha->pdev, ha->bars); 3502 kfree(ha); 3503 3504 pci_disable_pcie_error_reporting(pdev); 3505 3506 pci_disable_device(pdev); 3507 } 3508 3509 static void 3510 qla2x00_free_device(scsi_qla_host_t *vha) 3511 { 3512 struct qla_hw_data *ha = vha->hw; 3513 3514 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 3515 3516 /* Disable timer */ 3517 if (vha->timer_active) 3518 qla2x00_stop_timer(vha); 3519 3520 qla25xx_delete_queues(vha); 3521 3522 if (ha->flags.fce_enabled) 3523 qla2x00_disable_fce_trace(vha, NULL, NULL); 3524 3525 if (ha->eft) 3526 qla2x00_disable_eft_trace(vha); 3527 3528 /* Stop currently executing firmware. */ 3529 qla2x00_try_to_stop_firmware(vha); 3530 3531 vha->flags.online = 0; 3532 3533 /* turn-off interrupts on the card */ 3534 if (ha->interrupts_on) { 3535 vha->flags.init_done = 0; 3536 ha->isp_ops->disable_intrs(ha); 3537 } 3538 3539 qla2x00_free_fcports(vha); 3540 3541 qla2x00_free_irqs(vha); 3542 3543 /* Flush the work queue and remove it */ 3544 if (ha->wq) { 3545 flush_workqueue(ha->wq); 3546 destroy_workqueue(ha->wq); 3547 ha->wq = NULL; 3548 } 3549 3550 3551 qla2x00_mem_free(ha); 3552 3553 qla82xx_md_free(vha); 3554 3555 qla2x00_free_queues(ha); 3556 } 3557 3558 void qla2x00_free_fcports(struct scsi_qla_host *vha) 3559 { 3560 fc_port_t *fcport, *tfcport; 3561 3562 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) { 3563 list_del(&fcport->list); 3564 qla2x00_clear_loop_id(fcport); 3565 kfree(fcport); 3566 } 3567 } 3568 3569 static inline void 3570 qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, 3571 int defer) 3572 { 3573 struct fc_rport *rport; 3574 scsi_qla_host_t *base_vha; 3575 unsigned long flags; 3576 3577 if (!fcport->rport) 3578 return; 3579 3580 rport = fcport->rport; 3581 if (defer) { 3582 base_vha = pci_get_drvdata(vha->hw->pdev); 3583 spin_lock_irqsave(vha->host->host_lock, flags); 3584 fcport->drport = rport; 3585 spin_unlock_irqrestore(vha->host->host_lock, flags); 3586 qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen); 3587 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 3588 qla2xxx_wake_dpc(base_vha); 3589 } else { 3590 int now; 3591 if (rport) { 3592 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, 3593 "%s %8phN. rport %p roles %x \n", 3594 __func__, fcport->port_name, rport, 3595 rport->roles); 3596 fc_remote_port_delete(rport); 3597 } 3598 qlt_do_generation_tick(vha, &now); 3599 } 3600 } 3601 3602 /* 3603 * qla2x00_mark_device_lost Updates fcport state when device goes offline. 3604 * 3605 * Input: ha = adapter block pointer. fcport = port structure pointer. 3606 * 3607 * Return: None. 3608 * 3609 * Context: 3610 */ 3611 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, 3612 int do_login, int defer) 3613 { 3614 if (IS_QLAFX00(vha->hw)) { 3615 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3616 qla2x00_schedule_rport_del(vha, fcport, defer); 3617 return; 3618 } 3619 3620 if (atomic_read(&fcport->state) == FCS_ONLINE && 3621 vha->vp_idx == fcport->vha->vp_idx) { 3622 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3623 qla2x00_schedule_rport_del(vha, fcport, defer); 3624 } 3625 /* 3626 * We may need to retry the login, so don't change the state of the 3627 * port but do the retries. 3628 */ 3629 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) 3630 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3631 3632 if (!do_login) 3633 return; 3634 3635 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 3636 3637 if (fcport->login_retry == 0) { 3638 fcport->login_retry = vha->hw->login_retry_count; 3639 3640 ql_dbg(ql_dbg_disc, vha, 0x2067, 3641 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", 3642 fcport->port_name, fcport->loop_id, fcport->login_retry); 3643 } 3644 } 3645 3646 /* 3647 * qla2x00_mark_all_devices_lost 3648 * Updates fcport state when device goes offline. 3649 * 3650 * Input: 3651 * ha = adapter block pointer. 3652 * fcport = port structure pointer. 3653 * 3654 * Return: 3655 * None. 3656 * 3657 * Context: 3658 */ 3659 void 3660 qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) 3661 { 3662 fc_port_t *fcport; 3663 3664 ql_dbg(ql_dbg_disc, vha, 0xffff, 3665 "Mark all dev lost\n"); 3666 3667 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3668 fcport->scan_state = 0; 3669 qlt_schedule_sess_for_deletion_lock(fcport); 3670 3671 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx) 3672 continue; 3673 3674 /* 3675 * No point in marking the device as lost, if the device is 3676 * already DEAD. 3677 */ 3678 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 3679 continue; 3680 if (atomic_read(&fcport->state) == FCS_ONLINE) { 3681 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3682 if (defer) 3683 qla2x00_schedule_rport_del(vha, fcport, defer); 3684 else if (vha->vp_idx == fcport->vha->vp_idx) 3685 qla2x00_schedule_rport_del(vha, fcport, defer); 3686 } 3687 } 3688 } 3689 3690 /* 3691 * qla2x00_mem_alloc 3692 * Allocates adapter memory. 3693 * 3694 * Returns: 3695 * 0 = success. 3696 * !0 = failure. 3697 */ 3698 static int 3699 qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, 3700 struct req_que **req, struct rsp_que **rsp) 3701 { 3702 char name[16]; 3703 3704 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 3705 &ha->init_cb_dma, GFP_KERNEL); 3706 if (!ha->init_cb) 3707 goto fail; 3708 3709 if (qlt_mem_alloc(ha) < 0) 3710 goto fail_free_init_cb; 3711 3712 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, 3713 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); 3714 if (!ha->gid_list) 3715 goto fail_free_tgt_mem; 3716 3717 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 3718 if (!ha->srb_mempool) 3719 goto fail_free_gid_list; 3720 3721 if (IS_P3P_TYPE(ha)) { 3722 /* Allocate cache for CT6 Ctx. */ 3723 if (!ctx_cachep) { 3724 ctx_cachep = kmem_cache_create("qla2xxx_ctx", 3725 sizeof(struct ct6_dsd), 0, 3726 SLAB_HWCACHE_ALIGN, NULL); 3727 if (!ctx_cachep) 3728 goto fail_free_srb_mempool; 3729 } 3730 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, 3731 ctx_cachep); 3732 if (!ha->ctx_mempool) 3733 goto fail_free_srb_mempool; 3734 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021, 3735 "ctx_cachep=%p ctx_mempool=%p.\n", 3736 ctx_cachep, ha->ctx_mempool); 3737 } 3738 3739 /* Get memory for cached NVRAM */ 3740 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 3741 if (!ha->nvram) 3742 goto fail_free_ctx_mempool; 3743 3744 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME, 3745 ha->pdev->device); 3746 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 3747 DMA_POOL_SIZE, 8, 0); 3748 if (!ha->s_dma_pool) 3749 goto fail_free_nvram; 3750 3751 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022, 3752 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", 3753 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); 3754 3755 if (IS_P3P_TYPE(ha) || ql2xenabledif) { 3756 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, 3757 DSD_LIST_DMA_POOL_SIZE, 8, 0); 3758 if (!ha->dl_dma_pool) { 3759 ql_log_pci(ql_log_fatal, ha->pdev, 0x0023, 3760 "Failed to allocate memory for dl_dma_pool.\n"); 3761 goto fail_s_dma_pool; 3762 } 3763 3764 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, 3765 FCP_CMND_DMA_POOL_SIZE, 8, 0); 3766 if (!ha->fcp_cmnd_dma_pool) { 3767 ql_log_pci(ql_log_fatal, ha->pdev, 0x0024, 3768 "Failed to allocate memory for fcp_cmnd_dma_pool.\n"); 3769 goto fail_dl_dma_pool; 3770 } 3771 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025, 3772 "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n", 3773 ha->dl_dma_pool, ha->fcp_cmnd_dma_pool); 3774 } 3775 3776 /* Allocate memory for SNS commands */ 3777 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 3778 /* Get consistent memory allocated for SNS commands */ 3779 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 3780 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 3781 if (!ha->sns_cmd) 3782 goto fail_dma_pool; 3783 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, 3784 "sns_cmd: %p.\n", ha->sns_cmd); 3785 } else { 3786 /* Get consistent memory allocated for MS IOCB */ 3787 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 3788 &ha->ms_iocb_dma); 3789 if (!ha->ms_iocb) 3790 goto fail_dma_pool; 3791 /* Get consistent memory allocated for CT SNS commands */ 3792 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 3793 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 3794 if (!ha->ct_sns) 3795 goto fail_free_ms_iocb; 3796 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027, 3797 "ms_iocb=%p ct_sns=%p.\n", 3798 ha->ms_iocb, ha->ct_sns); 3799 } 3800 3801 /* Allocate memory for request ring */ 3802 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 3803 if (!*req) { 3804 ql_log_pci(ql_log_fatal, ha->pdev, 0x0028, 3805 "Failed to allocate memory for req.\n"); 3806 goto fail_req; 3807 } 3808 (*req)->length = req_len; 3809 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev, 3810 ((*req)->length + 1) * sizeof(request_t), 3811 &(*req)->dma, GFP_KERNEL); 3812 if (!(*req)->ring) { 3813 ql_log_pci(ql_log_fatal, ha->pdev, 0x0029, 3814 "Failed to allocate memory for req_ring.\n"); 3815 goto fail_req_ring; 3816 } 3817 /* Allocate memory for response ring */ 3818 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 3819 if (!*rsp) { 3820 ql_log_pci(ql_log_fatal, ha->pdev, 0x002a, 3821 "Failed to allocate memory for rsp.\n"); 3822 goto fail_rsp; 3823 } 3824 (*rsp)->hw = ha; 3825 (*rsp)->length = rsp_len; 3826 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev, 3827 ((*rsp)->length + 1) * sizeof(response_t), 3828 &(*rsp)->dma, GFP_KERNEL); 3829 if (!(*rsp)->ring) { 3830 ql_log_pci(ql_log_fatal, ha->pdev, 0x002b, 3831 "Failed to allocate memory for rsp_ring.\n"); 3832 goto fail_rsp_ring; 3833 } 3834 (*req)->rsp = *rsp; 3835 (*rsp)->req = *req; 3836 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c, 3837 "req=%p req->length=%d req->ring=%p rsp=%p " 3838 "rsp->length=%d rsp->ring=%p.\n", 3839 *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length, 3840 (*rsp)->ring); 3841 /* Allocate memory for NVRAM data for vports */ 3842 if (ha->nvram_npiv_size) { 3843 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) * 3844 ha->nvram_npiv_size, GFP_KERNEL); 3845 if (!ha->npiv_info) { 3846 ql_log_pci(ql_log_fatal, ha->pdev, 0x002d, 3847 "Failed to allocate memory for npiv_info.\n"); 3848 goto fail_npiv_info; 3849 } 3850 } else 3851 ha->npiv_info = NULL; 3852 3853 /* Get consistent memory allocated for EX-INIT-CB. */ 3854 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) { 3855 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 3856 &ha->ex_init_cb_dma); 3857 if (!ha->ex_init_cb) 3858 goto fail_ex_init_cb; 3859 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e, 3860 "ex_init_cb=%p.\n", ha->ex_init_cb); 3861 } 3862 3863 INIT_LIST_HEAD(&ha->gbl_dsd_list); 3864 3865 /* Get consistent memory allocated for Async Port-Database. */ 3866 if (!IS_FWI2_CAPABLE(ha)) { 3867 ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 3868 &ha->async_pd_dma); 3869 if (!ha->async_pd) 3870 goto fail_async_pd; 3871 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f, 3872 "async_pd=%p.\n", ha->async_pd); 3873 } 3874 3875 INIT_LIST_HEAD(&ha->vp_list); 3876 3877 /* Allocate memory for our loop_id bitmap */ 3878 ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long), 3879 GFP_KERNEL); 3880 if (!ha->loop_id_map) 3881 goto fail_loop_id_map; 3882 else { 3883 qla2x00_set_reserved_loop_ids(ha); 3884 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, 3885 "loop_id_map=%p.\n", ha->loop_id_map); 3886 } 3887 3888 return 0; 3889 3890 fail_loop_id_map: 3891 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 3892 fail_async_pd: 3893 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); 3894 fail_ex_init_cb: 3895 kfree(ha->npiv_info); 3896 fail_npiv_info: 3897 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) * 3898 sizeof(response_t), (*rsp)->ring, (*rsp)->dma); 3899 (*rsp)->ring = NULL; 3900 (*rsp)->dma = 0; 3901 fail_rsp_ring: 3902 kfree(*rsp); 3903 fail_rsp: 3904 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) * 3905 sizeof(request_t), (*req)->ring, (*req)->dma); 3906 (*req)->ring = NULL; 3907 (*req)->dma = 0; 3908 fail_req_ring: 3909 kfree(*req); 3910 fail_req: 3911 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 3912 ha->ct_sns, ha->ct_sns_dma); 3913 ha->ct_sns = NULL; 3914 ha->ct_sns_dma = 0; 3915 fail_free_ms_iocb: 3916 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 3917 ha->ms_iocb = NULL; 3918 ha->ms_iocb_dma = 0; 3919 3920 if (ha->sns_cmd) 3921 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 3922 ha->sns_cmd, ha->sns_cmd_dma); 3923 fail_dma_pool: 3924 if (IS_QLA82XX(ha) || ql2xenabledif) { 3925 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 3926 ha->fcp_cmnd_dma_pool = NULL; 3927 } 3928 fail_dl_dma_pool: 3929 if (IS_QLA82XX(ha) || ql2xenabledif) { 3930 dma_pool_destroy(ha->dl_dma_pool); 3931 ha->dl_dma_pool = NULL; 3932 } 3933 fail_s_dma_pool: 3934 dma_pool_destroy(ha->s_dma_pool); 3935 ha->s_dma_pool = NULL; 3936 fail_free_nvram: 3937 kfree(ha->nvram); 3938 ha->nvram = NULL; 3939 fail_free_ctx_mempool: 3940 if (ha->ctx_mempool) 3941 mempool_destroy(ha->ctx_mempool); 3942 ha->ctx_mempool = NULL; 3943 fail_free_srb_mempool: 3944 if (ha->srb_mempool) 3945 mempool_destroy(ha->srb_mempool); 3946 ha->srb_mempool = NULL; 3947 fail_free_gid_list: 3948 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 3949 ha->gid_list, 3950 ha->gid_list_dma); 3951 ha->gid_list = NULL; 3952 ha->gid_list_dma = 0; 3953 fail_free_tgt_mem: 3954 qlt_mem_free(ha); 3955 fail_free_init_cb: 3956 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, 3957 ha->init_cb_dma); 3958 ha->init_cb = NULL; 3959 ha->init_cb_dma = 0; 3960 fail: 3961 ql_log(ql_log_fatal, NULL, 0x0030, 3962 "Memory allocation failure.\n"); 3963 return -ENOMEM; 3964 } 3965 3966 int 3967 qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha) 3968 { 3969 int rval; 3970 uint16_t size, max_cnt, temp; 3971 struct qla_hw_data *ha = vha->hw; 3972 3973 /* Return if we don't need to alloacate any extended logins */ 3974 if (!ql2xexlogins) 3975 return QLA_SUCCESS; 3976 3977 ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins); 3978 max_cnt = 0; 3979 rval = qla_get_exlogin_status(vha, &size, &max_cnt); 3980 if (rval != QLA_SUCCESS) { 3981 ql_log_pci(ql_log_fatal, ha->pdev, 0xd029, 3982 "Failed to get exlogin status.\n"); 3983 return rval; 3984 } 3985 3986 temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins; 3987 ha->exlogin_size = (size * temp); 3988 ql_log(ql_log_info, vha, 0xd024, 3989 "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n", 3990 max_cnt, size, temp); 3991 3992 ql_log(ql_log_info, vha, 0xd025, "EXLOGIN: requested size=0x%x\n", 3993 ha->exlogin_size); 3994 3995 /* Get consistent memory for extended logins */ 3996 ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev, 3997 ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL); 3998 if (!ha->exlogin_buf) { 3999 ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a, 4000 "Failed to allocate memory for exlogin_buf_dma.\n"); 4001 return -ENOMEM; 4002 } 4003 4004 /* Now configure the dma buffer */ 4005 rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma); 4006 if (rval) { 4007 ql_log(ql_log_fatal, vha, 0x00cf, 4008 "Setup extended login buffer ****FAILED****.\n"); 4009 qla2x00_free_exlogin_buffer(ha); 4010 } 4011 4012 return rval; 4013 } 4014 4015 /* 4016 * qla2x00_free_exlogin_buffer 4017 * 4018 * Input: 4019 * ha = adapter block pointer 4020 */ 4021 void 4022 qla2x00_free_exlogin_buffer(struct qla_hw_data *ha) 4023 { 4024 if (ha->exlogin_buf) { 4025 dma_free_coherent(&ha->pdev->dev, ha->exlogin_size, 4026 ha->exlogin_buf, ha->exlogin_buf_dma); 4027 ha->exlogin_buf = NULL; 4028 ha->exlogin_size = 0; 4029 } 4030 } 4031 4032 int 4033 qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha) 4034 { 4035 int rval; 4036 uint16_t size, max_cnt, temp; 4037 struct qla_hw_data *ha = vha->hw; 4038 4039 /* Return if we don't need to alloacate any extended logins */ 4040 if (!ql2xexchoffld) 4041 return QLA_SUCCESS; 4042 4043 ql_log(ql_log_info, vha, 0xd014, 4044 "Exchange offload count: %d.\n", ql2xexlogins); 4045 4046 max_cnt = 0; 4047 rval = qla_get_exchoffld_status(vha, &size, &max_cnt); 4048 if (rval != QLA_SUCCESS) { 4049 ql_log_pci(ql_log_fatal, ha->pdev, 0xd012, 4050 "Failed to get exlogin status.\n"); 4051 return rval; 4052 } 4053 4054 temp = (ql2xexchoffld > max_cnt) ? max_cnt : ql2xexchoffld; 4055 ha->exchoffld_size = (size * temp); 4056 ql_log(ql_log_info, vha, 0xd016, 4057 "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n", 4058 max_cnt, size, temp); 4059 4060 ql_log(ql_log_info, vha, 0xd017, 4061 "Exchange Buffers requested size = 0x%x\n", ha->exchoffld_size); 4062 4063 /* Get consistent memory for extended logins */ 4064 ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev, 4065 ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL); 4066 if (!ha->exchoffld_buf) { 4067 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, 4068 "Failed to allocate memory for exchoffld_buf_dma.\n"); 4069 return -ENOMEM; 4070 } 4071 4072 /* Now configure the dma buffer */ 4073 rval = qla_set_exchoffld_mem_cfg(vha, ha->exchoffld_buf_dma); 4074 if (rval) { 4075 ql_log(ql_log_fatal, vha, 0xd02e, 4076 "Setup exchange offload buffer ****FAILED****.\n"); 4077 qla2x00_free_exchoffld_buffer(ha); 4078 } 4079 4080 return rval; 4081 } 4082 4083 /* 4084 * qla2x00_free_exchoffld_buffer 4085 * 4086 * Input: 4087 * ha = adapter block pointer 4088 */ 4089 void 4090 qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha) 4091 { 4092 if (ha->exchoffld_buf) { 4093 dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size, 4094 ha->exchoffld_buf, ha->exchoffld_buf_dma); 4095 ha->exchoffld_buf = NULL; 4096 ha->exchoffld_size = 0; 4097 } 4098 } 4099 4100 /* 4101 * qla2x00_free_fw_dump 4102 * Frees fw dump stuff. 4103 * 4104 * Input: 4105 * ha = adapter block pointer 4106 */ 4107 static void 4108 qla2x00_free_fw_dump(struct qla_hw_data *ha) 4109 { 4110 if (ha->fce) 4111 dma_free_coherent(&ha->pdev->dev, 4112 FCE_SIZE, ha->fce, ha->fce_dma); 4113 4114 if (ha->eft) 4115 dma_free_coherent(&ha->pdev->dev, 4116 EFT_SIZE, ha->eft, ha->eft_dma); 4117 4118 if (ha->fw_dump) 4119 vfree(ha->fw_dump); 4120 if (ha->fw_dump_template) 4121 vfree(ha->fw_dump_template); 4122 4123 ha->fce = NULL; 4124 ha->fce_dma = 0; 4125 ha->eft = NULL; 4126 ha->eft_dma = 0; 4127 ha->fw_dumped = 0; 4128 ha->fw_dump_cap_flags = 0; 4129 ha->fw_dump_reading = 0; 4130 ha->fw_dump = NULL; 4131 ha->fw_dump_len = 0; 4132 ha->fw_dump_template = NULL; 4133 ha->fw_dump_template_len = 0; 4134 } 4135 4136 /* 4137 * qla2x00_mem_free 4138 * Frees all adapter allocated memory. 4139 * 4140 * Input: 4141 * ha = adapter block pointer. 4142 */ 4143 static void 4144 qla2x00_mem_free(struct qla_hw_data *ha) 4145 { 4146 qla2x00_free_fw_dump(ha); 4147 4148 if (ha->mctp_dump) 4149 dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump, 4150 ha->mctp_dump_dma); 4151 4152 if (ha->srb_mempool) 4153 mempool_destroy(ha->srb_mempool); 4154 4155 if (ha->dcbx_tlv) 4156 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 4157 ha->dcbx_tlv, ha->dcbx_tlv_dma); 4158 4159 if (ha->xgmac_data) 4160 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 4161 ha->xgmac_data, ha->xgmac_data_dma); 4162 4163 if (ha->sns_cmd) 4164 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 4165 ha->sns_cmd, ha->sns_cmd_dma); 4166 4167 if (ha->ct_sns) 4168 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 4169 ha->ct_sns, ha->ct_sns_dma); 4170 4171 if (ha->sfp_data) 4172 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); 4173 4174 if (ha->ms_iocb) 4175 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 4176 4177 if (ha->ex_init_cb) 4178 dma_pool_free(ha->s_dma_pool, 4179 ha->ex_init_cb, ha->ex_init_cb_dma); 4180 4181 if (ha->async_pd) 4182 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 4183 4184 if (ha->s_dma_pool) 4185 dma_pool_destroy(ha->s_dma_pool); 4186 4187 if (ha->gid_list) 4188 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 4189 ha->gid_list, ha->gid_list_dma); 4190 4191 if (IS_QLA82XX(ha)) { 4192 if (!list_empty(&ha->gbl_dsd_list)) { 4193 struct dsd_dma *dsd_ptr, *tdsd_ptr; 4194 4195 /* clean up allocated prev pool */ 4196 list_for_each_entry_safe(dsd_ptr, 4197 tdsd_ptr, &ha->gbl_dsd_list, list) { 4198 dma_pool_free(ha->dl_dma_pool, 4199 dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); 4200 list_del(&dsd_ptr->list); 4201 kfree(dsd_ptr); 4202 } 4203 } 4204 } 4205 4206 if (ha->dl_dma_pool) 4207 dma_pool_destroy(ha->dl_dma_pool); 4208 4209 if (ha->fcp_cmnd_dma_pool) 4210 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 4211 4212 if (ha->ctx_mempool) 4213 mempool_destroy(ha->ctx_mempool); 4214 4215 qlt_mem_free(ha); 4216 4217 if (ha->init_cb) 4218 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 4219 ha->init_cb, ha->init_cb_dma); 4220 vfree(ha->optrom_buffer); 4221 kfree(ha->nvram); 4222 kfree(ha->npiv_info); 4223 kfree(ha->swl); 4224 kfree(ha->loop_id_map); 4225 4226 ha->srb_mempool = NULL; 4227 ha->ctx_mempool = NULL; 4228 ha->sns_cmd = NULL; 4229 ha->sns_cmd_dma = 0; 4230 ha->ct_sns = NULL; 4231 ha->ct_sns_dma = 0; 4232 ha->ms_iocb = NULL; 4233 ha->ms_iocb_dma = 0; 4234 ha->init_cb = NULL; 4235 ha->init_cb_dma = 0; 4236 ha->ex_init_cb = NULL; 4237 ha->ex_init_cb_dma = 0; 4238 ha->async_pd = NULL; 4239 ha->async_pd_dma = 0; 4240 4241 ha->s_dma_pool = NULL; 4242 ha->dl_dma_pool = NULL; 4243 ha->fcp_cmnd_dma_pool = NULL; 4244 4245 ha->gid_list = NULL; 4246 ha->gid_list_dma = 0; 4247 4248 ha->tgt.atio_ring = NULL; 4249 ha->tgt.atio_dma = 0; 4250 ha->tgt.tgt_vp_map = NULL; 4251 } 4252 4253 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, 4254 struct qla_hw_data *ha) 4255 { 4256 struct Scsi_Host *host; 4257 struct scsi_qla_host *vha = NULL; 4258 4259 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 4260 if (!host) { 4261 ql_log_pci(ql_log_fatal, ha->pdev, 0x0107, 4262 "Failed to allocate host from the scsi layer, aborting.\n"); 4263 return NULL; 4264 } 4265 4266 /* Clear our data area */ 4267 vha = shost_priv(host); 4268 memset(vha, 0, sizeof(scsi_qla_host_t)); 4269 4270 vha->host = host; 4271 vha->host_no = host->host_no; 4272 vha->hw = ha; 4273 4274 INIT_LIST_HEAD(&vha->vp_fcports); 4275 INIT_LIST_HEAD(&vha->work_list); 4276 INIT_LIST_HEAD(&vha->list); 4277 INIT_LIST_HEAD(&vha->qla_cmd_list); 4278 INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list); 4279 INIT_LIST_HEAD(&vha->logo_list); 4280 INIT_LIST_HEAD(&vha->plogi_ack_list); 4281 INIT_LIST_HEAD(&vha->qp_list); 4282 INIT_LIST_HEAD(&vha->gnl.fcports); 4283 4284 spin_lock_init(&vha->work_lock); 4285 spin_lock_init(&vha->cmd_list_lock); 4286 init_waitqueue_head(&vha->fcport_waitQ); 4287 init_waitqueue_head(&vha->vref_waitq); 4288 4289 vha->gnl.size = sizeof(struct get_name_list_extended) * 4290 (ha->max_loop_id + 1); 4291 vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev, 4292 vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL); 4293 if (!vha->gnl.l) { 4294 ql_log(ql_log_fatal, vha, 0xffff, 4295 "Alloc failed for name list.\n"); 4296 scsi_remove_host(vha->host); 4297 return NULL; 4298 } 4299 4300 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 4301 ql_dbg(ql_dbg_init, vha, 0x0041, 4302 "Allocated the host=%p hw=%p vha=%p dev_name=%s", 4303 vha->host, vha->hw, vha, 4304 dev_name(&(ha->pdev->dev))); 4305 4306 return vha; 4307 } 4308 4309 struct qla_work_evt * 4310 qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) 4311 { 4312 struct qla_work_evt *e; 4313 uint8_t bail; 4314 4315 QLA_VHA_MARK_BUSY(vha, bail); 4316 if (bail) 4317 return NULL; 4318 4319 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); 4320 if (!e) { 4321 QLA_VHA_MARK_NOT_BUSY(vha); 4322 return NULL; 4323 } 4324 4325 INIT_LIST_HEAD(&e->list); 4326 e->type = type; 4327 e->flags = QLA_EVT_FLAG_FREE; 4328 return e; 4329 } 4330 4331 int 4332 qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 4333 { 4334 unsigned long flags; 4335 4336 spin_lock_irqsave(&vha->work_lock, flags); 4337 list_add_tail(&e->list, &vha->work_list); 4338 spin_unlock_irqrestore(&vha->work_lock, flags); 4339 4340 if (QLA_EARLY_LINKUP(vha->hw)) 4341 schedule_work(&vha->iocb_work); 4342 else 4343 qla2xxx_wake_dpc(vha); 4344 4345 return QLA_SUCCESS; 4346 } 4347 4348 int 4349 qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, 4350 u32 data) 4351 { 4352 struct qla_work_evt *e; 4353 4354 e = qla2x00_alloc_work(vha, QLA_EVT_AEN); 4355 if (!e) 4356 return QLA_FUNCTION_FAILED; 4357 4358 e->u.aen.code = code; 4359 e->u.aen.data = data; 4360 return qla2x00_post_work(vha, e); 4361 } 4362 4363 int 4364 qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) 4365 { 4366 struct qla_work_evt *e; 4367 4368 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK); 4369 if (!e) 4370 return QLA_FUNCTION_FAILED; 4371 4372 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4373 return qla2x00_post_work(vha, e); 4374 } 4375 4376 #define qla2x00_post_async_work(name, type) \ 4377 int qla2x00_post_async_##name##_work( \ 4378 struct scsi_qla_host *vha, \ 4379 fc_port_t *fcport, uint16_t *data) \ 4380 { \ 4381 struct qla_work_evt *e; \ 4382 \ 4383 e = qla2x00_alloc_work(vha, type); \ 4384 if (!e) \ 4385 return QLA_FUNCTION_FAILED; \ 4386 \ 4387 e->u.logio.fcport = fcport; \ 4388 if (data) { \ 4389 e->u.logio.data[0] = data[0]; \ 4390 e->u.logio.data[1] = data[1]; \ 4391 } \ 4392 return qla2x00_post_work(vha, e); \ 4393 } 4394 4395 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); 4396 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); 4397 qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); 4398 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); 4399 qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE); 4400 4401 int 4402 qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) 4403 { 4404 struct qla_work_evt *e; 4405 4406 e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT); 4407 if (!e) 4408 return QLA_FUNCTION_FAILED; 4409 4410 e->u.uevent.code = code; 4411 return qla2x00_post_work(vha, e); 4412 } 4413 4414 static void 4415 qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) 4416 { 4417 char event_string[40]; 4418 char *envp[] = { event_string, NULL }; 4419 4420 switch (code) { 4421 case QLA_UEVENT_CODE_FW_DUMP: 4422 snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld", 4423 vha->host_no); 4424 break; 4425 default: 4426 /* do nothing */ 4427 break; 4428 } 4429 kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp); 4430 } 4431 4432 int 4433 qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, 4434 uint32_t *data, int cnt) 4435 { 4436 struct qla_work_evt *e; 4437 4438 e = qla2x00_alloc_work(vha, QLA_EVT_AENFX); 4439 if (!e) 4440 return QLA_FUNCTION_FAILED; 4441 4442 e->u.aenfx.evtcode = evtcode; 4443 e->u.aenfx.count = cnt; 4444 memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt); 4445 return qla2x00_post_work(vha, e); 4446 } 4447 4448 int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport) 4449 { 4450 struct qla_work_evt *e; 4451 4452 e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT); 4453 if (!e) 4454 return QLA_FUNCTION_FAILED; 4455 4456 e->u.fcport.fcport = fcport; 4457 return qla2x00_post_work(vha, e); 4458 } 4459 4460 static 4461 void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) 4462 { 4463 unsigned long flags; 4464 fc_port_t *fcport = NULL; 4465 struct qlt_plogi_ack_t *pla = 4466 (struct qlt_plogi_ack_t *)e->u.new_sess.pla; 4467 4468 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4469 fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1); 4470 if (fcport) { 4471 fcport->d_id = e->u.new_sess.id; 4472 if (pla) { 4473 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 4474 qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN); 4475 /* we took an extra ref_count to prevent PLOGI ACK when 4476 * fcport/sess has not been created. 4477 */ 4478 pla->ref_count--; 4479 } 4480 } else { 4481 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 4482 if (fcport) { 4483 fcport->d_id = e->u.new_sess.id; 4484 fcport->scan_state = QLA_FCPORT_FOUND; 4485 fcport->flags |= FCF_FABRIC_DEVICE; 4486 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 4487 4488 memcpy(fcport->port_name, e->u.new_sess.port_name, 4489 WWN_SIZE); 4490 list_add_tail(&fcport->list, &vha->vp_fcports); 4491 4492 if (pla) { 4493 qlt_plogi_ack_link(vha, pla, fcport, 4494 QLT_PLOGI_LINK_SAME_WWN); 4495 pla->ref_count--; 4496 } 4497 } 4498 } 4499 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4500 4501 if (fcport) { 4502 if (pla) 4503 qlt_plogi_ack_unref(vha, pla); 4504 else 4505 qla24xx_async_gnl(vha, fcport); 4506 } 4507 } 4508 4509 void 4510 qla2x00_do_work(struct scsi_qla_host *vha) 4511 { 4512 struct qla_work_evt *e, *tmp; 4513 unsigned long flags; 4514 LIST_HEAD(work); 4515 4516 spin_lock_irqsave(&vha->work_lock, flags); 4517 list_splice_init(&vha->work_list, &work); 4518 spin_unlock_irqrestore(&vha->work_lock, flags); 4519 4520 list_for_each_entry_safe(e, tmp, &work, list) { 4521 list_del_init(&e->list); 4522 4523 switch (e->type) { 4524 case QLA_EVT_AEN: 4525 fc_host_post_event(vha->host, fc_get_event_number(), 4526 e->u.aen.code, e->u.aen.data); 4527 break; 4528 case QLA_EVT_IDC_ACK: 4529 qla81xx_idc_ack(vha, e->u.idc_ack.mb); 4530 break; 4531 case QLA_EVT_ASYNC_LOGIN: 4532 qla2x00_async_login(vha, e->u.logio.fcport, 4533 e->u.logio.data); 4534 break; 4535 case QLA_EVT_ASYNC_LOGOUT: 4536 qla2x00_async_logout(vha, e->u.logio.fcport); 4537 break; 4538 case QLA_EVT_ASYNC_LOGOUT_DONE: 4539 qla2x00_async_logout_done(vha, e->u.logio.fcport, 4540 e->u.logio.data); 4541 break; 4542 case QLA_EVT_ASYNC_ADISC: 4543 qla2x00_async_adisc(vha, e->u.logio.fcport, 4544 e->u.logio.data); 4545 break; 4546 case QLA_EVT_ASYNC_ADISC_DONE: 4547 qla2x00_async_adisc_done(vha, e->u.logio.fcport, 4548 e->u.logio.data); 4549 break; 4550 case QLA_EVT_UEVENT: 4551 qla2x00_uevent_emit(vha, e->u.uevent.code); 4552 break; 4553 case QLA_EVT_AENFX: 4554 qlafx00_process_aen(vha, e); 4555 break; 4556 case QLA_EVT_GIDPN: 4557 qla24xx_async_gidpn(vha, e->u.fcport.fcport); 4558 break; 4559 case QLA_EVT_GPNID: 4560 qla24xx_async_gpnid(vha, &e->u.gpnid.id); 4561 break; 4562 case QLA_EVT_GPNID_DONE: 4563 qla24xx_async_gpnid_done(vha, e->u.iosb.sp); 4564 break; 4565 case QLA_EVT_NEW_SESS: 4566 qla24xx_create_new_sess(vha, e); 4567 break; 4568 case QLA_EVT_GPDB: 4569 qla24xx_async_gpdb(vha, e->u.fcport.fcport, 4570 e->u.fcport.opt); 4571 break; 4572 case QLA_EVT_GPSC: 4573 qla24xx_async_gpsc(vha, e->u.fcport.fcport); 4574 break; 4575 case QLA_EVT_UPD_FCPORT: 4576 qla2x00_update_fcport(vha, e->u.fcport.fcport); 4577 break; 4578 case QLA_EVT_GNL: 4579 qla24xx_async_gnl(vha, e->u.fcport.fcport); 4580 break; 4581 case QLA_EVT_NACK: 4582 qla24xx_do_nack_work(vha, e); 4583 break; 4584 } 4585 if (e->flags & QLA_EVT_FLAG_FREE) 4586 kfree(e); 4587 4588 /* For each work completed decrement vha ref count */ 4589 QLA_VHA_MARK_NOT_BUSY(vha); 4590 } 4591 } 4592 4593 /* Relogins all the fcports of a vport 4594 * Context: dpc thread 4595 */ 4596 void qla2x00_relogin(struct scsi_qla_host *vha) 4597 { 4598 fc_port_t *fcport; 4599 int status; 4600 struct event_arg ea; 4601 4602 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4603 /* 4604 * If the port is not ONLINE then try to login 4605 * to it if we haven't run out of retries. 4606 */ 4607 if (atomic_read(&fcport->state) != FCS_ONLINE && 4608 fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) { 4609 fcport->login_retry--; 4610 if (fcport->flags & FCF_FABRIC_DEVICE) { 4611 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, 4612 "%s %8phC DS %d LS %d\n", __func__, 4613 fcport->port_name, fcport->disc_state, 4614 fcport->fw_login_state); 4615 memset(&ea, 0, sizeof(ea)); 4616 ea.event = FCME_RELOGIN; 4617 ea.fcport = fcport; 4618 qla2x00_fcport_event_handler(vha, &ea); 4619 } else { 4620 status = qla2x00_local_device_login(vha, 4621 fcport); 4622 if (status == QLA_SUCCESS) { 4623 fcport->old_loop_id = fcport->loop_id; 4624 ql_dbg(ql_dbg_disc, vha, 0x2003, 4625 "Port login OK: logged in ID 0x%x.\n", 4626 fcport->loop_id); 4627 qla2x00_update_fcport(vha, fcport); 4628 } else if (status == 1) { 4629 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 4630 /* retry the login again */ 4631 ql_dbg(ql_dbg_disc, vha, 0x2007, 4632 "Retrying %d login again loop_id 0x%x.\n", 4633 fcport->login_retry, 4634 fcport->loop_id); 4635 } else { 4636 fcport->login_retry = 0; 4637 } 4638 4639 if (fcport->login_retry == 0 && 4640 status != QLA_SUCCESS) 4641 qla2x00_clear_loop_id(fcport); 4642 } 4643 } 4644 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 4645 break; 4646 } 4647 } 4648 4649 /* Schedule work on any of the dpc-workqueues */ 4650 void 4651 qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code) 4652 { 4653 struct qla_hw_data *ha = base_vha->hw; 4654 4655 switch (work_code) { 4656 case MBA_IDC_AEN: /* 0x8200 */ 4657 if (ha->dpc_lp_wq) 4658 queue_work(ha->dpc_lp_wq, &ha->idc_aen); 4659 break; 4660 4661 case QLA83XX_NIC_CORE_RESET: /* 0x1 */ 4662 if (!ha->flags.nic_core_reset_hdlr_active) { 4663 if (ha->dpc_hp_wq) 4664 queue_work(ha->dpc_hp_wq, &ha->nic_core_reset); 4665 } else 4666 ql_dbg(ql_dbg_p3p, base_vha, 0xb05e, 4667 "NIC Core reset is already active. Skip " 4668 "scheduling it again.\n"); 4669 break; 4670 case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */ 4671 if (ha->dpc_hp_wq) 4672 queue_work(ha->dpc_hp_wq, &ha->idc_state_handler); 4673 break; 4674 case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */ 4675 if (ha->dpc_hp_wq) 4676 queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable); 4677 break; 4678 default: 4679 ql_log(ql_log_warn, base_vha, 0xb05f, 4680 "Unknown work-code=0x%x.\n", work_code); 4681 } 4682 4683 return; 4684 } 4685 4686 /* Work: Perform NIC Core Unrecoverable state handling */ 4687 void 4688 qla83xx_nic_core_unrecoverable_work(struct work_struct *work) 4689 { 4690 struct qla_hw_data *ha = 4691 container_of(work, struct qla_hw_data, nic_core_unrecoverable); 4692 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4693 uint32_t dev_state = 0; 4694 4695 qla83xx_idc_lock(base_vha, 0); 4696 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 4697 qla83xx_reset_ownership(base_vha); 4698 if (ha->flags.nic_core_reset_owner) { 4699 ha->flags.nic_core_reset_owner = 0; 4700 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 4701 QLA8XXX_DEV_FAILED); 4702 ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n"); 4703 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 4704 } 4705 qla83xx_idc_unlock(base_vha, 0); 4706 } 4707 4708 /* Work: Execute IDC state handler */ 4709 void 4710 qla83xx_idc_state_handler_work(struct work_struct *work) 4711 { 4712 struct qla_hw_data *ha = 4713 container_of(work, struct qla_hw_data, idc_state_handler); 4714 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4715 uint32_t dev_state = 0; 4716 4717 qla83xx_idc_lock(base_vha, 0); 4718 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 4719 if (dev_state == QLA8XXX_DEV_FAILED || 4720 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) 4721 qla83xx_idc_state_handler(base_vha); 4722 qla83xx_idc_unlock(base_vha, 0); 4723 } 4724 4725 static int 4726 qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha) 4727 { 4728 int rval = QLA_SUCCESS; 4729 unsigned long heart_beat_wait = jiffies + (1 * HZ); 4730 uint32_t heart_beat_counter1, heart_beat_counter2; 4731 4732 do { 4733 if (time_after(jiffies, heart_beat_wait)) { 4734 ql_dbg(ql_dbg_p3p, base_vha, 0xb07c, 4735 "Nic Core f/w is not alive.\n"); 4736 rval = QLA_FUNCTION_FAILED; 4737 break; 4738 } 4739 4740 qla83xx_idc_lock(base_vha, 0); 4741 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 4742 &heart_beat_counter1); 4743 qla83xx_idc_unlock(base_vha, 0); 4744 msleep(100); 4745 qla83xx_idc_lock(base_vha, 0); 4746 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 4747 &heart_beat_counter2); 4748 qla83xx_idc_unlock(base_vha, 0); 4749 } while (heart_beat_counter1 == heart_beat_counter2); 4750 4751 return rval; 4752 } 4753 4754 /* Work: Perform NIC Core Reset handling */ 4755 void 4756 qla83xx_nic_core_reset_work(struct work_struct *work) 4757 { 4758 struct qla_hw_data *ha = 4759 container_of(work, struct qla_hw_data, nic_core_reset); 4760 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4761 uint32_t dev_state = 0; 4762 4763 if (IS_QLA2031(ha)) { 4764 if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS) 4765 ql_log(ql_log_warn, base_vha, 0xb081, 4766 "Failed to dump mctp\n"); 4767 return; 4768 } 4769 4770 if (!ha->flags.nic_core_reset_hdlr_active) { 4771 if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) { 4772 qla83xx_idc_lock(base_vha, 0); 4773 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, 4774 &dev_state); 4775 qla83xx_idc_unlock(base_vha, 0); 4776 if (dev_state != QLA8XXX_DEV_NEED_RESET) { 4777 ql_dbg(ql_dbg_p3p, base_vha, 0xb07a, 4778 "Nic Core f/w is alive.\n"); 4779 return; 4780 } 4781 } 4782 4783 ha->flags.nic_core_reset_hdlr_active = 1; 4784 if (qla83xx_nic_core_reset(base_vha)) { 4785 /* NIC Core reset failed. */ 4786 ql_dbg(ql_dbg_p3p, base_vha, 0xb061, 4787 "NIC Core reset failed.\n"); 4788 } 4789 ha->flags.nic_core_reset_hdlr_active = 0; 4790 } 4791 } 4792 4793 /* Work: Handle 8200 IDC aens */ 4794 void 4795 qla83xx_service_idc_aen(struct work_struct *work) 4796 { 4797 struct qla_hw_data *ha = 4798 container_of(work, struct qla_hw_data, idc_aen); 4799 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4800 uint32_t dev_state, idc_control; 4801 4802 qla83xx_idc_lock(base_vha, 0); 4803 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 4804 qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control); 4805 qla83xx_idc_unlock(base_vha, 0); 4806 if (dev_state == QLA8XXX_DEV_NEED_RESET) { 4807 if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) { 4808 ql_dbg(ql_dbg_p3p, base_vha, 0xb062, 4809 "Application requested NIC Core Reset.\n"); 4810 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 4811 } else if (qla83xx_check_nic_core_fw_alive(base_vha) == 4812 QLA_SUCCESS) { 4813 ql_dbg(ql_dbg_p3p, base_vha, 0xb07b, 4814 "Other protocol driver requested NIC Core Reset.\n"); 4815 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 4816 } 4817 } else if (dev_state == QLA8XXX_DEV_FAILED || 4818 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { 4819 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 4820 } 4821 } 4822 4823 static void 4824 qla83xx_wait_logic(void) 4825 { 4826 int i; 4827 4828 /* Yield CPU */ 4829 if (!in_interrupt()) { 4830 /* 4831 * Wait about 200ms before retrying again. 4832 * This controls the number of retries for single 4833 * lock operation. 4834 */ 4835 msleep(100); 4836 schedule(); 4837 } else { 4838 for (i = 0; i < 20; i++) 4839 cpu_relax(); /* This a nop instr on i386 */ 4840 } 4841 } 4842 4843 static int 4844 qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha) 4845 { 4846 int rval; 4847 uint32_t data; 4848 uint32_t idc_lck_rcvry_stage_mask = 0x3; 4849 uint32_t idc_lck_rcvry_owner_mask = 0x3c; 4850 struct qla_hw_data *ha = base_vha->hw; 4851 ql_dbg(ql_dbg_p3p, base_vha, 0xb086, 4852 "Trying force recovery of the IDC lock.\n"); 4853 4854 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data); 4855 if (rval) 4856 return rval; 4857 4858 if ((data & idc_lck_rcvry_stage_mask) > 0) { 4859 return QLA_SUCCESS; 4860 } else { 4861 data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2); 4862 rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 4863 data); 4864 if (rval) 4865 return rval; 4866 4867 msleep(200); 4868 4869 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 4870 &data); 4871 if (rval) 4872 return rval; 4873 4874 if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) { 4875 data &= (IDC_LOCK_RECOVERY_STAGE2 | 4876 ~(idc_lck_rcvry_stage_mask)); 4877 rval = qla83xx_wr_reg(base_vha, 4878 QLA83XX_IDC_LOCK_RECOVERY, data); 4879 if (rval) 4880 return rval; 4881 4882 /* Forcefully perform IDC UnLock */ 4883 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, 4884 &data); 4885 if (rval) 4886 return rval; 4887 /* Clear lock-id by setting 0xff */ 4888 rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 4889 0xff); 4890 if (rval) 4891 return rval; 4892 /* Clear lock-recovery by setting 0x0 */ 4893 rval = qla83xx_wr_reg(base_vha, 4894 QLA83XX_IDC_LOCK_RECOVERY, 0x0); 4895 if (rval) 4896 return rval; 4897 } else 4898 return QLA_SUCCESS; 4899 } 4900 4901 return rval; 4902 } 4903 4904 static int 4905 qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha) 4906 { 4907 int rval = QLA_SUCCESS; 4908 uint32_t o_drv_lockid, n_drv_lockid; 4909 unsigned long lock_recovery_timeout; 4910 4911 lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT; 4912 retry_lockid: 4913 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid); 4914 if (rval) 4915 goto exit; 4916 4917 /* MAX wait time before forcing IDC Lock recovery = 2 secs */ 4918 if (time_after_eq(jiffies, lock_recovery_timeout)) { 4919 if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS) 4920 return QLA_SUCCESS; 4921 else 4922 return QLA_FUNCTION_FAILED; 4923 } 4924 4925 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid); 4926 if (rval) 4927 goto exit; 4928 4929 if (o_drv_lockid == n_drv_lockid) { 4930 qla83xx_wait_logic(); 4931 goto retry_lockid; 4932 } else 4933 return QLA_SUCCESS; 4934 4935 exit: 4936 return rval; 4937 } 4938 4939 void 4940 qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) 4941 { 4942 uint16_t options = (requester_id << 15) | BIT_6; 4943 uint32_t data; 4944 uint32_t lock_owner; 4945 struct qla_hw_data *ha = base_vha->hw; 4946 4947 /* IDC-lock implementation using driver-lock/lock-id remote registers */ 4948 retry_lock: 4949 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data) 4950 == QLA_SUCCESS) { 4951 if (data) { 4952 /* Setting lock-id to our function-number */ 4953 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 4954 ha->portnum); 4955 } else { 4956 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, 4957 &lock_owner); 4958 ql_dbg(ql_dbg_p3p, base_vha, 0xb063, 4959 "Failed to acquire IDC lock, acquired by %d, " 4960 "retrying...\n", lock_owner); 4961 4962 /* Retry/Perform IDC-Lock recovery */ 4963 if (qla83xx_idc_lock_recovery(base_vha) 4964 == QLA_SUCCESS) { 4965 qla83xx_wait_logic(); 4966 goto retry_lock; 4967 } else 4968 ql_log(ql_log_warn, base_vha, 0xb075, 4969 "IDC Lock recovery FAILED.\n"); 4970 } 4971 4972 } 4973 4974 return; 4975 4976 /* XXX: IDC-lock implementation using access-control mbx */ 4977 retry_lock2: 4978 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { 4979 ql_dbg(ql_dbg_p3p, base_vha, 0xb072, 4980 "Failed to acquire IDC lock. retrying...\n"); 4981 /* Retry/Perform IDC-Lock recovery */ 4982 if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) { 4983 qla83xx_wait_logic(); 4984 goto retry_lock2; 4985 } else 4986 ql_log(ql_log_warn, base_vha, 0xb076, 4987 "IDC Lock recovery FAILED.\n"); 4988 } 4989 4990 return; 4991 } 4992 4993 void 4994 qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id) 4995 { 4996 #if 0 4997 uint16_t options = (requester_id << 15) | BIT_7; 4998 #endif 4999 uint16_t retry; 5000 uint32_t data; 5001 struct qla_hw_data *ha = base_vha->hw; 5002 5003 /* IDC-unlock implementation using driver-unlock/lock-id 5004 * remote registers 5005 */ 5006 retry = 0; 5007 retry_unlock: 5008 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data) 5009 == QLA_SUCCESS) { 5010 if (data == ha->portnum) { 5011 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data); 5012 /* Clearing lock-id by setting 0xff */ 5013 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff); 5014 } else if (retry < 10) { 5015 /* SV: XXX: IDC unlock retrying needed here? */ 5016 5017 /* Retry for IDC-unlock */ 5018 qla83xx_wait_logic(); 5019 retry++; 5020 ql_dbg(ql_dbg_p3p, base_vha, 0xb064, 5021 "Failed to release IDC lock, retrying=%d\n", retry); 5022 goto retry_unlock; 5023 } 5024 } else if (retry < 10) { 5025 /* Retry for IDC-unlock */ 5026 qla83xx_wait_logic(); 5027 retry++; 5028 ql_dbg(ql_dbg_p3p, base_vha, 0xb065, 5029 "Failed to read drv-lockid, retrying=%d\n", retry); 5030 goto retry_unlock; 5031 } 5032 5033 return; 5034 5035 #if 0 5036 /* XXX: IDC-unlock implementation using access-control mbx */ 5037 retry = 0; 5038 retry_unlock2: 5039 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { 5040 if (retry < 10) { 5041 /* Retry for IDC-unlock */ 5042 qla83xx_wait_logic(); 5043 retry++; 5044 ql_dbg(ql_dbg_p3p, base_vha, 0xb066, 5045 "Failed to release IDC lock, retrying=%d\n", retry); 5046 goto retry_unlock2; 5047 } 5048 } 5049 5050 return; 5051 #endif 5052 } 5053 5054 int 5055 __qla83xx_set_drv_presence(scsi_qla_host_t *vha) 5056 { 5057 int rval = QLA_SUCCESS; 5058 struct qla_hw_data *ha = vha->hw; 5059 uint32_t drv_presence; 5060 5061 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 5062 if (rval == QLA_SUCCESS) { 5063 drv_presence |= (1 << ha->portnum); 5064 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 5065 drv_presence); 5066 } 5067 5068 return rval; 5069 } 5070 5071 int 5072 qla83xx_set_drv_presence(scsi_qla_host_t *vha) 5073 { 5074 int rval = QLA_SUCCESS; 5075 5076 qla83xx_idc_lock(vha, 0); 5077 rval = __qla83xx_set_drv_presence(vha); 5078 qla83xx_idc_unlock(vha, 0); 5079 5080 return rval; 5081 } 5082 5083 int 5084 __qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 5085 { 5086 int rval = QLA_SUCCESS; 5087 struct qla_hw_data *ha = vha->hw; 5088 uint32_t drv_presence; 5089 5090 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 5091 if (rval == QLA_SUCCESS) { 5092 drv_presence &= ~(1 << ha->portnum); 5093 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 5094 drv_presence); 5095 } 5096 5097 return rval; 5098 } 5099 5100 int 5101 qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 5102 { 5103 int rval = QLA_SUCCESS; 5104 5105 qla83xx_idc_lock(vha, 0); 5106 rval = __qla83xx_clear_drv_presence(vha); 5107 qla83xx_idc_unlock(vha, 0); 5108 5109 return rval; 5110 } 5111 5112 static void 5113 qla83xx_need_reset_handler(scsi_qla_host_t *vha) 5114 { 5115 struct qla_hw_data *ha = vha->hw; 5116 uint32_t drv_ack, drv_presence; 5117 unsigned long ack_timeout; 5118 5119 /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */ 5120 ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); 5121 while (1) { 5122 qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 5123 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 5124 if ((drv_ack & drv_presence) == drv_presence) 5125 break; 5126 5127 if (time_after_eq(jiffies, ack_timeout)) { 5128 ql_log(ql_log_warn, vha, 0xb067, 5129 "RESET ACK TIMEOUT! drv_presence=0x%x " 5130 "drv_ack=0x%x\n", drv_presence, drv_ack); 5131 /* 5132 * The function(s) which did not ack in time are forced 5133 * to withdraw any further participation in the IDC 5134 * reset. 5135 */ 5136 if (drv_ack != drv_presence) 5137 qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 5138 drv_ack); 5139 break; 5140 } 5141 5142 qla83xx_idc_unlock(vha, 0); 5143 msleep(1000); 5144 qla83xx_idc_lock(vha, 0); 5145 } 5146 5147 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD); 5148 ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n"); 5149 } 5150 5151 static int 5152 qla83xx_device_bootstrap(scsi_qla_host_t *vha) 5153 { 5154 int rval = QLA_SUCCESS; 5155 uint32_t idc_control; 5156 5157 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING); 5158 ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n"); 5159 5160 /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */ 5161 __qla83xx_get_idc_control(vha, &idc_control); 5162 idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET; 5163 __qla83xx_set_idc_control(vha, 0); 5164 5165 qla83xx_idc_unlock(vha, 0); 5166 rval = qla83xx_restart_nic_firmware(vha); 5167 qla83xx_idc_lock(vha, 0); 5168 5169 if (rval != QLA_SUCCESS) { 5170 ql_log(ql_log_fatal, vha, 0xb06a, 5171 "Failed to restart NIC f/w.\n"); 5172 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED); 5173 ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n"); 5174 } else { 5175 ql_dbg(ql_dbg_p3p, vha, 0xb06c, 5176 "Success in restarting nic f/w.\n"); 5177 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY); 5178 ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n"); 5179 } 5180 5181 return rval; 5182 } 5183 5184 /* Assumes idc_lock always held on entry */ 5185 int 5186 qla83xx_idc_state_handler(scsi_qla_host_t *base_vha) 5187 { 5188 struct qla_hw_data *ha = base_vha->hw; 5189 int rval = QLA_SUCCESS; 5190 unsigned long dev_init_timeout; 5191 uint32_t dev_state; 5192 5193 /* Wait for MAX-INIT-TIMEOUT for the device to go ready */ 5194 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); 5195 5196 while (1) { 5197 5198 if (time_after_eq(jiffies, dev_init_timeout)) { 5199 ql_log(ql_log_warn, base_vha, 0xb06e, 5200 "Initialization TIMEOUT!\n"); 5201 /* Init timeout. Disable further NIC Core 5202 * communication. 5203 */ 5204 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 5205 QLA8XXX_DEV_FAILED); 5206 ql_log(ql_log_info, base_vha, 0xb06f, 5207 "HW State: FAILED.\n"); 5208 } 5209 5210 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5211 switch (dev_state) { 5212 case QLA8XXX_DEV_READY: 5213 if (ha->flags.nic_core_reset_owner) 5214 qla83xx_idc_audit(base_vha, 5215 IDC_AUDIT_COMPLETION); 5216 ha->flags.nic_core_reset_owner = 0; 5217 ql_dbg(ql_dbg_p3p, base_vha, 0xb070, 5218 "Reset_owner reset by 0x%x.\n", 5219 ha->portnum); 5220 goto exit; 5221 case QLA8XXX_DEV_COLD: 5222 if (ha->flags.nic_core_reset_owner) 5223 rval = qla83xx_device_bootstrap(base_vha); 5224 else { 5225 /* Wait for AEN to change device-state */ 5226 qla83xx_idc_unlock(base_vha, 0); 5227 msleep(1000); 5228 qla83xx_idc_lock(base_vha, 0); 5229 } 5230 break; 5231 case QLA8XXX_DEV_INITIALIZING: 5232 /* Wait for AEN to change device-state */ 5233 qla83xx_idc_unlock(base_vha, 0); 5234 msleep(1000); 5235 qla83xx_idc_lock(base_vha, 0); 5236 break; 5237 case QLA8XXX_DEV_NEED_RESET: 5238 if (!ql2xdontresethba && ha->flags.nic_core_reset_owner) 5239 qla83xx_need_reset_handler(base_vha); 5240 else { 5241 /* Wait for AEN to change device-state */ 5242 qla83xx_idc_unlock(base_vha, 0); 5243 msleep(1000); 5244 qla83xx_idc_lock(base_vha, 0); 5245 } 5246 /* reset timeout value after need reset handler */ 5247 dev_init_timeout = jiffies + 5248 (ha->fcoe_dev_init_timeout * HZ); 5249 break; 5250 case QLA8XXX_DEV_NEED_QUIESCENT: 5251 /* XXX: DEBUG for now */ 5252 qla83xx_idc_unlock(base_vha, 0); 5253 msleep(1000); 5254 qla83xx_idc_lock(base_vha, 0); 5255 break; 5256 case QLA8XXX_DEV_QUIESCENT: 5257 /* XXX: DEBUG for now */ 5258 if (ha->flags.quiesce_owner) 5259 goto exit; 5260 5261 qla83xx_idc_unlock(base_vha, 0); 5262 msleep(1000); 5263 qla83xx_idc_lock(base_vha, 0); 5264 dev_init_timeout = jiffies + 5265 (ha->fcoe_dev_init_timeout * HZ); 5266 break; 5267 case QLA8XXX_DEV_FAILED: 5268 if (ha->flags.nic_core_reset_owner) 5269 qla83xx_idc_audit(base_vha, 5270 IDC_AUDIT_COMPLETION); 5271 ha->flags.nic_core_reset_owner = 0; 5272 __qla83xx_clear_drv_presence(base_vha); 5273 qla83xx_idc_unlock(base_vha, 0); 5274 qla8xxx_dev_failed_handler(base_vha); 5275 rval = QLA_FUNCTION_FAILED; 5276 qla83xx_idc_lock(base_vha, 0); 5277 goto exit; 5278 case QLA8XXX_BAD_VALUE: 5279 qla83xx_idc_unlock(base_vha, 0); 5280 msleep(1000); 5281 qla83xx_idc_lock(base_vha, 0); 5282 break; 5283 default: 5284 ql_log(ql_log_warn, base_vha, 0xb071, 5285 "Unknown Device State: %x.\n", dev_state); 5286 qla83xx_idc_unlock(base_vha, 0); 5287 qla8xxx_dev_failed_handler(base_vha); 5288 rval = QLA_FUNCTION_FAILED; 5289 qla83xx_idc_lock(base_vha, 0); 5290 goto exit; 5291 } 5292 } 5293 5294 exit: 5295 return rval; 5296 } 5297 5298 void 5299 qla2x00_disable_board_on_pci_error(struct work_struct *work) 5300 { 5301 struct qla_hw_data *ha = container_of(work, struct qla_hw_data, 5302 board_disable); 5303 struct pci_dev *pdev = ha->pdev; 5304 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5305 5306 /* 5307 * if UNLOAD flag is already set, then continue unload, 5308 * where it was set first. 5309 */ 5310 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 5311 return; 5312 5313 ql_log(ql_log_warn, base_vha, 0x015b, 5314 "Disabling adapter.\n"); 5315 5316 qla2x00_wait_for_sess_deletion(base_vha); 5317 5318 set_bit(UNLOADING, &base_vha->dpc_flags); 5319 5320 qla2x00_delete_all_vps(ha, base_vha); 5321 5322 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 5323 5324 qla2x00_dfs_remove(base_vha); 5325 5326 qla84xx_put_chip(base_vha); 5327 5328 if (base_vha->timer_active) 5329 qla2x00_stop_timer(base_vha); 5330 5331 base_vha->flags.online = 0; 5332 5333 qla2x00_destroy_deferred_work(ha); 5334 5335 /* 5336 * Do not try to stop beacon blink as it will issue a mailbox 5337 * command. 5338 */ 5339 qla2x00_free_sysfs_attr(base_vha, false); 5340 5341 fc_remove_host(base_vha->host); 5342 5343 scsi_remove_host(base_vha->host); 5344 5345 base_vha->flags.init_done = 0; 5346 qla25xx_delete_queues(base_vha); 5347 qla2x00_free_fcports(base_vha); 5348 qla2x00_free_irqs(base_vha); 5349 qla2x00_mem_free(ha); 5350 qla82xx_md_free(base_vha); 5351 qla2x00_free_queues(ha); 5352 5353 qla2x00_unmap_iobases(ha); 5354 5355 pci_release_selected_regions(ha->pdev, ha->bars); 5356 pci_disable_pcie_error_reporting(pdev); 5357 pci_disable_device(pdev); 5358 5359 /* 5360 * Let qla2x00_remove_one cleanup qla_hw_data on device removal. 5361 */ 5362 } 5363 5364 /************************************************************************** 5365 * qla2x00_do_dpc 5366 * This kernel thread is a task that is schedule by the interrupt handler 5367 * to perform the background processing for interrupts. 5368 * 5369 * Notes: 5370 * This task always run in the context of a kernel thread. It 5371 * is kick-off by the driver's detect code and starts up 5372 * up one per adapter. It immediately goes to sleep and waits for 5373 * some fibre event. When either the interrupt handler or 5374 * the timer routine detects a event it will one of the task 5375 * bits then wake us up. 5376 **************************************************************************/ 5377 static int 5378 qla2x00_do_dpc(void *data) 5379 { 5380 scsi_qla_host_t *base_vha; 5381 struct qla_hw_data *ha; 5382 uint32_t online; 5383 struct qla_qpair *qpair; 5384 5385 ha = (struct qla_hw_data *)data; 5386 base_vha = pci_get_drvdata(ha->pdev); 5387 5388 set_user_nice(current, MIN_NICE); 5389 5390 set_current_state(TASK_INTERRUPTIBLE); 5391 while (!kthread_should_stop()) { 5392 ql_dbg(ql_dbg_dpc, base_vha, 0x4000, 5393 "DPC handler sleeping.\n"); 5394 5395 schedule(); 5396 5397 if (!base_vha->flags.init_done || ha->flags.mbox_busy) 5398 goto end_loop; 5399 5400 if (ha->flags.eeh_busy) { 5401 ql_dbg(ql_dbg_dpc, base_vha, 0x4003, 5402 "eeh_busy=%d.\n", ha->flags.eeh_busy); 5403 goto end_loop; 5404 } 5405 5406 ha->dpc_active = 1; 5407 5408 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001, 5409 "DPC handler waking up, dpc_flags=0x%lx.\n", 5410 base_vha->dpc_flags); 5411 5412 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 5413 break; 5414 5415 qla2x00_do_work(base_vha); 5416 5417 if (IS_P3P_TYPE(ha)) { 5418 if (IS_QLA8044(ha)) { 5419 if (test_and_clear_bit(ISP_UNRECOVERABLE, 5420 &base_vha->dpc_flags)) { 5421 qla8044_idc_lock(ha); 5422 qla8044_wr_direct(base_vha, 5423 QLA8044_CRB_DEV_STATE_INDEX, 5424 QLA8XXX_DEV_FAILED); 5425 qla8044_idc_unlock(ha); 5426 ql_log(ql_log_info, base_vha, 0x4004, 5427 "HW State: FAILED.\n"); 5428 qla8044_device_state_handler(base_vha); 5429 continue; 5430 } 5431 5432 } else { 5433 if (test_and_clear_bit(ISP_UNRECOVERABLE, 5434 &base_vha->dpc_flags)) { 5435 qla82xx_idc_lock(ha); 5436 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5437 QLA8XXX_DEV_FAILED); 5438 qla82xx_idc_unlock(ha); 5439 ql_log(ql_log_info, base_vha, 0x0151, 5440 "HW State: FAILED.\n"); 5441 qla82xx_device_state_handler(base_vha); 5442 continue; 5443 } 5444 } 5445 5446 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, 5447 &base_vha->dpc_flags)) { 5448 5449 ql_dbg(ql_dbg_dpc, base_vha, 0x4005, 5450 "FCoE context reset scheduled.\n"); 5451 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 5452 &base_vha->dpc_flags))) { 5453 if (qla82xx_fcoe_ctx_reset(base_vha)) { 5454 /* FCoE-ctx reset failed. 5455 * Escalate to chip-reset 5456 */ 5457 set_bit(ISP_ABORT_NEEDED, 5458 &base_vha->dpc_flags); 5459 } 5460 clear_bit(ABORT_ISP_ACTIVE, 5461 &base_vha->dpc_flags); 5462 } 5463 5464 ql_dbg(ql_dbg_dpc, base_vha, 0x4006, 5465 "FCoE context reset end.\n"); 5466 } 5467 } else if (IS_QLAFX00(ha)) { 5468 if (test_and_clear_bit(ISP_UNRECOVERABLE, 5469 &base_vha->dpc_flags)) { 5470 ql_dbg(ql_dbg_dpc, base_vha, 0x4020, 5471 "Firmware Reset Recovery\n"); 5472 if (qlafx00_reset_initialize(base_vha)) { 5473 /* Failed. Abort isp later. */ 5474 if (!test_bit(UNLOADING, 5475 &base_vha->dpc_flags)) { 5476 set_bit(ISP_UNRECOVERABLE, 5477 &base_vha->dpc_flags); 5478 ql_dbg(ql_dbg_dpc, base_vha, 5479 0x4021, 5480 "Reset Recovery Failed\n"); 5481 } 5482 } 5483 } 5484 5485 if (test_and_clear_bit(FX00_TARGET_SCAN, 5486 &base_vha->dpc_flags)) { 5487 ql_dbg(ql_dbg_dpc, base_vha, 0x4022, 5488 "ISPFx00 Target Scan scheduled\n"); 5489 if (qlafx00_rescan_isp(base_vha)) { 5490 if (!test_bit(UNLOADING, 5491 &base_vha->dpc_flags)) 5492 set_bit(ISP_UNRECOVERABLE, 5493 &base_vha->dpc_flags); 5494 ql_dbg(ql_dbg_dpc, base_vha, 0x401e, 5495 "ISPFx00 Target Scan Failed\n"); 5496 } 5497 ql_dbg(ql_dbg_dpc, base_vha, 0x401f, 5498 "ISPFx00 Target Scan End\n"); 5499 } 5500 if (test_and_clear_bit(FX00_HOST_INFO_RESEND, 5501 &base_vha->dpc_flags)) { 5502 ql_dbg(ql_dbg_dpc, base_vha, 0x4023, 5503 "ISPFx00 Host Info resend scheduled\n"); 5504 qlafx00_fx_disc(base_vha, 5505 &base_vha->hw->mr.fcport, 5506 FXDISC_REG_HOST_INFO); 5507 } 5508 } 5509 5510 if (test_and_clear_bit(ISP_ABORT_NEEDED, 5511 &base_vha->dpc_flags)) { 5512 5513 ql_dbg(ql_dbg_dpc, base_vha, 0x4007, 5514 "ISP abort scheduled.\n"); 5515 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 5516 &base_vha->dpc_flags))) { 5517 5518 if (ha->isp_ops->abort_isp(base_vha)) { 5519 /* failed. retry later */ 5520 set_bit(ISP_ABORT_NEEDED, 5521 &base_vha->dpc_flags); 5522 } 5523 clear_bit(ABORT_ISP_ACTIVE, 5524 &base_vha->dpc_flags); 5525 } 5526 5527 ql_dbg(ql_dbg_dpc, base_vha, 0x4008, 5528 "ISP abort end.\n"); 5529 } 5530 5531 if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, 5532 &base_vha->dpc_flags)) { 5533 qla2x00_update_fcports(base_vha); 5534 } 5535 5536 if (IS_QLAFX00(ha)) 5537 goto loop_resync_check; 5538 5539 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 5540 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 5541 "Quiescence mode scheduled.\n"); 5542 if (IS_P3P_TYPE(ha)) { 5543 if (IS_QLA82XX(ha)) 5544 qla82xx_device_state_handler(base_vha); 5545 if (IS_QLA8044(ha)) 5546 qla8044_device_state_handler(base_vha); 5547 clear_bit(ISP_QUIESCE_NEEDED, 5548 &base_vha->dpc_flags); 5549 if (!ha->flags.quiesce_owner) { 5550 qla2x00_perform_loop_resync(base_vha); 5551 if (IS_QLA82XX(ha)) { 5552 qla82xx_idc_lock(ha); 5553 qla82xx_clear_qsnt_ready( 5554 base_vha); 5555 qla82xx_idc_unlock(ha); 5556 } else if (IS_QLA8044(ha)) { 5557 qla8044_idc_lock(ha); 5558 qla8044_clear_qsnt_ready( 5559 base_vha); 5560 qla8044_idc_unlock(ha); 5561 } 5562 } 5563 } else { 5564 clear_bit(ISP_QUIESCE_NEEDED, 5565 &base_vha->dpc_flags); 5566 qla2x00_quiesce_io(base_vha); 5567 } 5568 ql_dbg(ql_dbg_dpc, base_vha, 0x400a, 5569 "Quiescence mode end.\n"); 5570 } 5571 5572 if (test_and_clear_bit(RESET_MARKER_NEEDED, 5573 &base_vha->dpc_flags) && 5574 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { 5575 5576 ql_dbg(ql_dbg_dpc, base_vha, 0x400b, 5577 "Reset marker scheduled.\n"); 5578 qla2x00_rst_aen(base_vha); 5579 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); 5580 ql_dbg(ql_dbg_dpc, base_vha, 0x400c, 5581 "Reset marker end.\n"); 5582 } 5583 5584 /* Retry each device up to login retry count */ 5585 if ((test_and_clear_bit(RELOGIN_NEEDED, 5586 &base_vha->dpc_flags)) && 5587 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && 5588 atomic_read(&base_vha->loop_state) != LOOP_DOWN) { 5589 5590 ql_dbg(ql_dbg_dpc, base_vha, 0x400d, 5591 "Relogin scheduled.\n"); 5592 qla2x00_relogin(base_vha); 5593 ql_dbg(ql_dbg_dpc, base_vha, 0x400e, 5594 "Relogin end.\n"); 5595 } 5596 loop_resync_check: 5597 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, 5598 &base_vha->dpc_flags)) { 5599 5600 ql_dbg(ql_dbg_dpc, base_vha, 0x400f, 5601 "Loop resync scheduled.\n"); 5602 5603 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 5604 &base_vha->dpc_flags))) { 5605 5606 qla2x00_loop_resync(base_vha); 5607 5608 clear_bit(LOOP_RESYNC_ACTIVE, 5609 &base_vha->dpc_flags); 5610 } 5611 5612 ql_dbg(ql_dbg_dpc, base_vha, 0x4010, 5613 "Loop resync end.\n"); 5614 } 5615 5616 if (IS_QLAFX00(ha)) 5617 goto intr_on_check; 5618 5619 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && 5620 atomic_read(&base_vha->loop_state) == LOOP_READY) { 5621 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags); 5622 qla2xxx_flash_npiv_conf(base_vha); 5623 } 5624 5625 intr_on_check: 5626 if (!ha->interrupts_on) 5627 ha->isp_ops->enable_intrs(ha); 5628 5629 if (test_and_clear_bit(BEACON_BLINK_NEEDED, 5630 &base_vha->dpc_flags)) { 5631 if (ha->beacon_blink_led == 1) 5632 ha->isp_ops->beacon_blink(base_vha); 5633 } 5634 5635 /* qpair online check */ 5636 if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED, 5637 &base_vha->dpc_flags)) { 5638 if (ha->flags.eeh_busy || 5639 ha->flags.pci_channel_io_perm_failure) 5640 online = 0; 5641 else 5642 online = 1; 5643 5644 mutex_lock(&ha->mq_lock); 5645 list_for_each_entry(qpair, &base_vha->qp_list, 5646 qp_list_elem) 5647 qpair->online = online; 5648 mutex_unlock(&ha->mq_lock); 5649 } 5650 5651 if (!IS_QLAFX00(ha)) 5652 qla2x00_do_dpc_all_vps(base_vha); 5653 5654 ha->dpc_active = 0; 5655 end_loop: 5656 set_current_state(TASK_INTERRUPTIBLE); 5657 } /* End of while(1) */ 5658 __set_current_state(TASK_RUNNING); 5659 5660 ql_dbg(ql_dbg_dpc, base_vha, 0x4011, 5661 "DPC handler exiting.\n"); 5662 5663 /* 5664 * Make sure that nobody tries to wake us up again. 5665 */ 5666 ha->dpc_active = 0; 5667 5668 /* Cleanup any residual CTX SRBs. */ 5669 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 5670 5671 return 0; 5672 } 5673 5674 void 5675 qla2xxx_wake_dpc(struct scsi_qla_host *vha) 5676 { 5677 struct qla_hw_data *ha = vha->hw; 5678 struct task_struct *t = ha->dpc_thread; 5679 5680 if (!test_bit(UNLOADING, &vha->dpc_flags) && t) 5681 wake_up_process(t); 5682 } 5683 5684 /* 5685 * qla2x00_rst_aen 5686 * Processes asynchronous reset. 5687 * 5688 * Input: 5689 * ha = adapter block pointer. 5690 */ 5691 static void 5692 qla2x00_rst_aen(scsi_qla_host_t *vha) 5693 { 5694 if (vha->flags.online && !vha->flags.reset_active && 5695 !atomic_read(&vha->loop_down_timer) && 5696 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) { 5697 do { 5698 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 5699 5700 /* 5701 * Issue marker command only when we are going to start 5702 * the I/O. 5703 */ 5704 vha->marker_needed = 1; 5705 } while (!atomic_read(&vha->loop_down_timer) && 5706 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags))); 5707 } 5708 } 5709 5710 /************************************************************************** 5711 * qla2x00_timer 5712 * 5713 * Description: 5714 * One second timer 5715 * 5716 * Context: Interrupt 5717 ***************************************************************************/ 5718 void 5719 qla2x00_timer(scsi_qla_host_t *vha) 5720 { 5721 unsigned long cpu_flags = 0; 5722 int start_dpc = 0; 5723 int index; 5724 srb_t *sp; 5725 uint16_t w; 5726 struct qla_hw_data *ha = vha->hw; 5727 struct req_que *req; 5728 5729 if (ha->flags.eeh_busy) { 5730 ql_dbg(ql_dbg_timer, vha, 0x6000, 5731 "EEH = %d, restarting timer.\n", 5732 ha->flags.eeh_busy); 5733 qla2x00_restart_timer(vha, WATCH_INTERVAL); 5734 return; 5735 } 5736 5737 /* 5738 * Hardware read to raise pending EEH errors during mailbox waits. If 5739 * the read returns -1 then disable the board. 5740 */ 5741 if (!pci_channel_offline(ha->pdev)) { 5742 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 5743 qla2x00_check_reg16_for_disconnect(vha, w); 5744 } 5745 5746 /* Make sure qla82xx_watchdog is run only for physical port */ 5747 if (!vha->vp_idx && IS_P3P_TYPE(ha)) { 5748 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) 5749 start_dpc++; 5750 if (IS_QLA82XX(ha)) 5751 qla82xx_watchdog(vha); 5752 else if (IS_QLA8044(ha)) 5753 qla8044_watchdog(vha); 5754 } 5755 5756 if (!vha->vp_idx && IS_QLAFX00(ha)) 5757 qlafx00_timer_routine(vha); 5758 5759 /* Loop down handler. */ 5760 if (atomic_read(&vha->loop_down_timer) > 0 && 5761 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && 5762 !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags)) 5763 && vha->flags.online) { 5764 5765 if (atomic_read(&vha->loop_down_timer) == 5766 vha->loop_down_abort_time) { 5767 5768 ql_log(ql_log_info, vha, 0x6008, 5769 "Loop down - aborting the queues before time expires.\n"); 5770 5771 if (!IS_QLA2100(ha) && vha->link_down_timeout) 5772 atomic_set(&vha->loop_state, LOOP_DEAD); 5773 5774 /* 5775 * Schedule an ISP abort to return any FCP2-device 5776 * commands. 5777 */ 5778 /* NPIV - scan physical port only */ 5779 if (!vha->vp_idx) { 5780 spin_lock_irqsave(&ha->hardware_lock, 5781 cpu_flags); 5782 req = ha->req_q_map[0]; 5783 for (index = 1; 5784 index < req->num_outstanding_cmds; 5785 index++) { 5786 fc_port_t *sfcp; 5787 5788 sp = req->outstanding_cmds[index]; 5789 if (!sp) 5790 continue; 5791 if (sp->type != SRB_SCSI_CMD) 5792 continue; 5793 sfcp = sp->fcport; 5794 if (!(sfcp->flags & FCF_FCP2_DEVICE)) 5795 continue; 5796 5797 if (IS_QLA82XX(ha)) 5798 set_bit(FCOE_CTX_RESET_NEEDED, 5799 &vha->dpc_flags); 5800 else 5801 set_bit(ISP_ABORT_NEEDED, 5802 &vha->dpc_flags); 5803 break; 5804 } 5805 spin_unlock_irqrestore(&ha->hardware_lock, 5806 cpu_flags); 5807 } 5808 start_dpc++; 5809 } 5810 5811 /* if the loop has been down for 4 minutes, reinit adapter */ 5812 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { 5813 if (!(vha->device_flags & DFLG_NO_CABLE)) { 5814 ql_log(ql_log_warn, vha, 0x6009, 5815 "Loop down - aborting ISP.\n"); 5816 5817 if (IS_QLA82XX(ha)) 5818 set_bit(FCOE_CTX_RESET_NEEDED, 5819 &vha->dpc_flags); 5820 else 5821 set_bit(ISP_ABORT_NEEDED, 5822 &vha->dpc_flags); 5823 } 5824 } 5825 ql_dbg(ql_dbg_timer, vha, 0x600a, 5826 "Loop down - seconds remaining %d.\n", 5827 atomic_read(&vha->loop_down_timer)); 5828 } 5829 /* Check if beacon LED needs to be blinked for physical host only */ 5830 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { 5831 /* There is no beacon_blink function for ISP82xx */ 5832 if (!IS_P3P_TYPE(ha)) { 5833 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); 5834 start_dpc++; 5835 } 5836 } 5837 5838 /* Process any deferred work. */ 5839 if (!list_empty(&vha->work_list)) 5840 start_dpc++; 5841 5842 /* Schedule the DPC routine if needed */ 5843 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 5844 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || 5845 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) || 5846 start_dpc || 5847 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) || 5848 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) || 5849 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || 5850 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || 5851 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || 5852 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) { 5853 ql_dbg(ql_dbg_timer, vha, 0x600b, 5854 "isp_abort_needed=%d loop_resync_needed=%d " 5855 "fcport_update_needed=%d start_dpc=%d " 5856 "reset_marker_needed=%d", 5857 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags), 5858 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags), 5859 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags), 5860 start_dpc, 5861 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); 5862 ql_dbg(ql_dbg_timer, vha, 0x600c, 5863 "beacon_blink_needed=%d isp_unrecoverable=%d " 5864 "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " 5865 "relogin_needed=%d.\n", 5866 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), 5867 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), 5868 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), 5869 test_bit(VP_DPC_NEEDED, &vha->dpc_flags), 5870 test_bit(RELOGIN_NEEDED, &vha->dpc_flags)); 5871 qla2xxx_wake_dpc(vha); 5872 } 5873 5874 qla2x00_restart_timer(vha, WATCH_INTERVAL); 5875 } 5876 5877 /* Firmware interface routines. */ 5878 5879 #define FW_BLOBS 11 5880 #define FW_ISP21XX 0 5881 #define FW_ISP22XX 1 5882 #define FW_ISP2300 2 5883 #define FW_ISP2322 3 5884 #define FW_ISP24XX 4 5885 #define FW_ISP25XX 5 5886 #define FW_ISP81XX 6 5887 #define FW_ISP82XX 7 5888 #define FW_ISP2031 8 5889 #define FW_ISP8031 9 5890 #define FW_ISP27XX 10 5891 5892 #define FW_FILE_ISP21XX "ql2100_fw.bin" 5893 #define FW_FILE_ISP22XX "ql2200_fw.bin" 5894 #define FW_FILE_ISP2300 "ql2300_fw.bin" 5895 #define FW_FILE_ISP2322 "ql2322_fw.bin" 5896 #define FW_FILE_ISP24XX "ql2400_fw.bin" 5897 #define FW_FILE_ISP25XX "ql2500_fw.bin" 5898 #define FW_FILE_ISP81XX "ql8100_fw.bin" 5899 #define FW_FILE_ISP82XX "ql8200_fw.bin" 5900 #define FW_FILE_ISP2031 "ql2600_fw.bin" 5901 #define FW_FILE_ISP8031 "ql8300_fw.bin" 5902 #define FW_FILE_ISP27XX "ql2700_fw.bin" 5903 5904 5905 static DEFINE_MUTEX(qla_fw_lock); 5906 5907 static struct fw_blob qla_fw_blobs[FW_BLOBS] = { 5908 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, 5909 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, 5910 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, 5911 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 5912 { .name = FW_FILE_ISP24XX, }, 5913 { .name = FW_FILE_ISP25XX, }, 5914 { .name = FW_FILE_ISP81XX, }, 5915 { .name = FW_FILE_ISP82XX, }, 5916 { .name = FW_FILE_ISP2031, }, 5917 { .name = FW_FILE_ISP8031, }, 5918 { .name = FW_FILE_ISP27XX, }, 5919 }; 5920 5921 struct fw_blob * 5922 qla2x00_request_firmware(scsi_qla_host_t *vha) 5923 { 5924 struct qla_hw_data *ha = vha->hw; 5925 struct fw_blob *blob; 5926 5927 if (IS_QLA2100(ha)) { 5928 blob = &qla_fw_blobs[FW_ISP21XX]; 5929 } else if (IS_QLA2200(ha)) { 5930 blob = &qla_fw_blobs[FW_ISP22XX]; 5931 } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 5932 blob = &qla_fw_blobs[FW_ISP2300]; 5933 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 5934 blob = &qla_fw_blobs[FW_ISP2322]; 5935 } else if (IS_QLA24XX_TYPE(ha)) { 5936 blob = &qla_fw_blobs[FW_ISP24XX]; 5937 } else if (IS_QLA25XX(ha)) { 5938 blob = &qla_fw_blobs[FW_ISP25XX]; 5939 } else if (IS_QLA81XX(ha)) { 5940 blob = &qla_fw_blobs[FW_ISP81XX]; 5941 } else if (IS_QLA82XX(ha)) { 5942 blob = &qla_fw_blobs[FW_ISP82XX]; 5943 } else if (IS_QLA2031(ha)) { 5944 blob = &qla_fw_blobs[FW_ISP2031]; 5945 } else if (IS_QLA8031(ha)) { 5946 blob = &qla_fw_blobs[FW_ISP8031]; 5947 } else if (IS_QLA27XX(ha)) { 5948 blob = &qla_fw_blobs[FW_ISP27XX]; 5949 } else { 5950 return NULL; 5951 } 5952 5953 mutex_lock(&qla_fw_lock); 5954 if (blob->fw) 5955 goto out; 5956 5957 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 5958 ql_log(ql_log_warn, vha, 0x0063, 5959 "Failed to load firmware image (%s).\n", blob->name); 5960 blob->fw = NULL; 5961 blob = NULL; 5962 goto out; 5963 } 5964 5965 out: 5966 mutex_unlock(&qla_fw_lock); 5967 return blob; 5968 } 5969 5970 static void 5971 qla2x00_release_firmware(void) 5972 { 5973 int idx; 5974 5975 mutex_lock(&qla_fw_lock); 5976 for (idx = 0; idx < FW_BLOBS; idx++) 5977 release_firmware(qla_fw_blobs[idx].fw); 5978 mutex_unlock(&qla_fw_lock); 5979 } 5980 5981 static pci_ers_result_t 5982 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 5983 { 5984 scsi_qla_host_t *vha = pci_get_drvdata(pdev); 5985 struct qla_hw_data *ha = vha->hw; 5986 5987 ql_dbg(ql_dbg_aer, vha, 0x9000, 5988 "PCI error detected, state %x.\n", state); 5989 5990 switch (state) { 5991 case pci_channel_io_normal: 5992 ha->flags.eeh_busy = 0; 5993 if (ql2xmqsupport) { 5994 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 5995 qla2xxx_wake_dpc(vha); 5996 } 5997 return PCI_ERS_RESULT_CAN_RECOVER; 5998 case pci_channel_io_frozen: 5999 ha->flags.eeh_busy = 1; 6000 /* For ISP82XX complete any pending mailbox cmd */ 6001 if (IS_QLA82XX(ha)) { 6002 ha->flags.isp82xx_fw_hung = 1; 6003 ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n"); 6004 qla82xx_clear_pending_mbx(vha); 6005 } 6006 qla2x00_free_irqs(vha); 6007 pci_disable_device(pdev); 6008 /* Return back all IOs */ 6009 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 6010 if (ql2xmqsupport) { 6011 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 6012 qla2xxx_wake_dpc(vha); 6013 } 6014 return PCI_ERS_RESULT_NEED_RESET; 6015 case pci_channel_io_perm_failure: 6016 ha->flags.pci_channel_io_perm_failure = 1; 6017 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 6018 if (ql2xmqsupport) { 6019 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 6020 qla2xxx_wake_dpc(vha); 6021 } 6022 return PCI_ERS_RESULT_DISCONNECT; 6023 } 6024 return PCI_ERS_RESULT_NEED_RESET; 6025 } 6026 6027 static pci_ers_result_t 6028 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) 6029 { 6030 int risc_paused = 0; 6031 uint32_t stat; 6032 unsigned long flags; 6033 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 6034 struct qla_hw_data *ha = base_vha->hw; 6035 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 6036 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 6037 6038 if (IS_QLA82XX(ha)) 6039 return PCI_ERS_RESULT_RECOVERED; 6040 6041 spin_lock_irqsave(&ha->hardware_lock, flags); 6042 if (IS_QLA2100(ha) || IS_QLA2200(ha)){ 6043 stat = RD_REG_DWORD(®->hccr); 6044 if (stat & HCCR_RISC_PAUSE) 6045 risc_paused = 1; 6046 } else if (IS_QLA23XX(ha)) { 6047 stat = RD_REG_DWORD(®->u.isp2300.host_status); 6048 if (stat & HSR_RISC_PAUSED) 6049 risc_paused = 1; 6050 } else if (IS_FWI2_CAPABLE(ha)) { 6051 stat = RD_REG_DWORD(®24->host_status); 6052 if (stat & HSRX_RISC_PAUSED) 6053 risc_paused = 1; 6054 } 6055 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6056 6057 if (risc_paused) { 6058 ql_log(ql_log_info, base_vha, 0x9003, 6059 "RISC paused -- mmio_enabled, Dumping firmware.\n"); 6060 ha->isp_ops->fw_dump(base_vha, 0); 6061 6062 return PCI_ERS_RESULT_NEED_RESET; 6063 } else 6064 return PCI_ERS_RESULT_RECOVERED; 6065 } 6066 6067 static uint32_t 6068 qla82xx_error_recovery(scsi_qla_host_t *base_vha) 6069 { 6070 uint32_t rval = QLA_FUNCTION_FAILED; 6071 uint32_t drv_active = 0; 6072 struct qla_hw_data *ha = base_vha->hw; 6073 int fn; 6074 struct pci_dev *other_pdev = NULL; 6075 6076 ql_dbg(ql_dbg_aer, base_vha, 0x9006, 6077 "Entered %s.\n", __func__); 6078 6079 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 6080 6081 if (base_vha->flags.online) { 6082 /* Abort all outstanding commands, 6083 * so as to be requeued later */ 6084 qla2x00_abort_isp_cleanup(base_vha); 6085 } 6086 6087 6088 fn = PCI_FUNC(ha->pdev->devfn); 6089 while (fn > 0) { 6090 fn--; 6091 ql_dbg(ql_dbg_aer, base_vha, 0x9007, 6092 "Finding pci device at function = 0x%x.\n", fn); 6093 other_pdev = 6094 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 6095 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 6096 fn)); 6097 6098 if (!other_pdev) 6099 continue; 6100 if (atomic_read(&other_pdev->enable_cnt)) { 6101 ql_dbg(ql_dbg_aer, base_vha, 0x9008, 6102 "Found PCI func available and enable at 0x%x.\n", 6103 fn); 6104 pci_dev_put(other_pdev); 6105 break; 6106 } 6107 pci_dev_put(other_pdev); 6108 } 6109 6110 if (!fn) { 6111 /* Reset owner */ 6112 ql_dbg(ql_dbg_aer, base_vha, 0x9009, 6113 "This devfn is reset owner = 0x%x.\n", 6114 ha->pdev->devfn); 6115 qla82xx_idc_lock(ha); 6116 6117 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6118 QLA8XXX_DEV_INITIALIZING); 6119 6120 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, 6121 QLA82XX_IDC_VERSION); 6122 6123 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 6124 ql_dbg(ql_dbg_aer, base_vha, 0x900a, 6125 "drv_active = 0x%x.\n", drv_active); 6126 6127 qla82xx_idc_unlock(ha); 6128 /* Reset if device is not already reset 6129 * drv_active would be 0 if a reset has already been done 6130 */ 6131 if (drv_active) 6132 rval = qla82xx_start_firmware(base_vha); 6133 else 6134 rval = QLA_SUCCESS; 6135 qla82xx_idc_lock(ha); 6136 6137 if (rval != QLA_SUCCESS) { 6138 ql_log(ql_log_info, base_vha, 0x900b, 6139 "HW State: FAILED.\n"); 6140 qla82xx_clear_drv_active(ha); 6141 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6142 QLA8XXX_DEV_FAILED); 6143 } else { 6144 ql_log(ql_log_info, base_vha, 0x900c, 6145 "HW State: READY.\n"); 6146 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6147 QLA8XXX_DEV_READY); 6148 qla82xx_idc_unlock(ha); 6149 ha->flags.isp82xx_fw_hung = 0; 6150 rval = qla82xx_restart_isp(base_vha); 6151 qla82xx_idc_lock(ha); 6152 /* Clear driver state register */ 6153 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0); 6154 qla82xx_set_drv_active(base_vha); 6155 } 6156 qla82xx_idc_unlock(ha); 6157 } else { 6158 ql_dbg(ql_dbg_aer, base_vha, 0x900d, 6159 "This devfn is not reset owner = 0x%x.\n", 6160 ha->pdev->devfn); 6161 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == 6162 QLA8XXX_DEV_READY)) { 6163 ha->flags.isp82xx_fw_hung = 0; 6164 rval = qla82xx_restart_isp(base_vha); 6165 qla82xx_idc_lock(ha); 6166 qla82xx_set_drv_active(base_vha); 6167 qla82xx_idc_unlock(ha); 6168 } 6169 } 6170 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 6171 6172 return rval; 6173 } 6174 6175 static pci_ers_result_t 6176 qla2xxx_pci_slot_reset(struct pci_dev *pdev) 6177 { 6178 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 6179 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 6180 struct qla_hw_data *ha = base_vha->hw; 6181 struct rsp_que *rsp; 6182 int rc, retries = 10; 6183 6184 ql_dbg(ql_dbg_aer, base_vha, 0x9004, 6185 "Slot Reset.\n"); 6186 6187 /* Workaround: qla2xxx driver which access hardware earlier 6188 * needs error state to be pci_channel_io_online. 6189 * Otherwise mailbox command timesout. 6190 */ 6191 pdev->error_state = pci_channel_io_normal; 6192 6193 pci_restore_state(pdev); 6194 6195 /* pci_restore_state() clears the saved_state flag of the device 6196 * save restored state which resets saved_state flag 6197 */ 6198 pci_save_state(pdev); 6199 6200 if (ha->mem_only) 6201 rc = pci_enable_device_mem(pdev); 6202 else 6203 rc = pci_enable_device(pdev); 6204 6205 if (rc) { 6206 ql_log(ql_log_warn, base_vha, 0x9005, 6207 "Can't re-enable PCI device after reset.\n"); 6208 goto exit_slot_reset; 6209 } 6210 6211 rsp = ha->rsp_q_map[0]; 6212 if (qla2x00_request_irqs(ha, rsp)) 6213 goto exit_slot_reset; 6214 6215 if (ha->isp_ops->pci_config(base_vha)) 6216 goto exit_slot_reset; 6217 6218 if (IS_QLA82XX(ha)) { 6219 if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) { 6220 ret = PCI_ERS_RESULT_RECOVERED; 6221 goto exit_slot_reset; 6222 } else 6223 goto exit_slot_reset; 6224 } 6225 6226 while (ha->flags.mbox_busy && retries--) 6227 msleep(1000); 6228 6229 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 6230 if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS) 6231 ret = PCI_ERS_RESULT_RECOVERED; 6232 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 6233 6234 6235 exit_slot_reset: 6236 ql_dbg(ql_dbg_aer, base_vha, 0x900e, 6237 "slot_reset return %x.\n", ret); 6238 6239 return ret; 6240 } 6241 6242 static void 6243 qla2xxx_pci_resume(struct pci_dev *pdev) 6244 { 6245 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 6246 struct qla_hw_data *ha = base_vha->hw; 6247 int ret; 6248 6249 ql_dbg(ql_dbg_aer, base_vha, 0x900f, 6250 "pci_resume.\n"); 6251 6252 ret = qla2x00_wait_for_hba_online(base_vha); 6253 if (ret != QLA_SUCCESS) { 6254 ql_log(ql_log_fatal, base_vha, 0x9002, 6255 "The device failed to resume I/O from slot/link_reset.\n"); 6256 } 6257 6258 pci_cleanup_aer_uncorrect_error_status(pdev); 6259 6260 ha->flags.eeh_busy = 0; 6261 } 6262 6263 static void 6264 qla83xx_disable_laser(scsi_qla_host_t *vha) 6265 { 6266 uint32_t reg, data, fn; 6267 struct qla_hw_data *ha = vha->hw; 6268 struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24; 6269 6270 /* pci func #/port # */ 6271 ql_dbg(ql_dbg_init, vha, 0x004b, 6272 "Disabling Laser for hba: %p\n", vha); 6273 6274 fn = (RD_REG_DWORD(&isp_reg->ctrl_status) & 6275 (BIT_15|BIT_14|BIT_13|BIT_12)); 6276 6277 fn = (fn >> 12); 6278 6279 if (fn & 1) 6280 reg = PORT_1_2031; 6281 else 6282 reg = PORT_0_2031; 6283 6284 data = LASER_OFF_2031; 6285 6286 qla83xx_wr_reg(vha, reg, data); 6287 } 6288 6289 static int qla2xxx_map_queues(struct Scsi_Host *shost) 6290 { 6291 scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; 6292 6293 return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev); 6294 } 6295 6296 static const struct pci_error_handlers qla2xxx_err_handler = { 6297 .error_detected = qla2xxx_pci_error_detected, 6298 .mmio_enabled = qla2xxx_pci_mmio_enabled, 6299 .slot_reset = qla2xxx_pci_slot_reset, 6300 .resume = qla2xxx_pci_resume, 6301 }; 6302 6303 static struct pci_device_id qla2xxx_pci_tbl[] = { 6304 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) }, 6305 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) }, 6306 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) }, 6307 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) }, 6308 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) }, 6309 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) }, 6310 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, 6311 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, 6312 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, 6313 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) }, 6314 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 6315 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 6316 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, 6317 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) }, 6318 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, 6319 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, 6320 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, 6321 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, 6322 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, 6323 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, 6324 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, 6325 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) }, 6326 { 0 }, 6327 }; 6328 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 6329 6330 static struct pci_driver qla2xxx_pci_driver = { 6331 .name = QLA2XXX_DRIVER_NAME, 6332 .driver = { 6333 .owner = THIS_MODULE, 6334 }, 6335 .id_table = qla2xxx_pci_tbl, 6336 .probe = qla2x00_probe_one, 6337 .remove = qla2x00_remove_one, 6338 .shutdown = qla2x00_shutdown, 6339 .err_handler = &qla2xxx_err_handler, 6340 }; 6341 6342 static const struct file_operations apidev_fops = { 6343 .owner = THIS_MODULE, 6344 .llseek = noop_llseek, 6345 }; 6346 6347 /** 6348 * qla2x00_module_init - Module initialization. 6349 **/ 6350 static int __init 6351 qla2x00_module_init(void) 6352 { 6353 int ret = 0; 6354 6355 /* Allocate cache for SRBs. */ 6356 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 6357 SLAB_HWCACHE_ALIGN, NULL); 6358 if (srb_cachep == NULL) { 6359 ql_log(ql_log_fatal, NULL, 0x0001, 6360 "Unable to allocate SRB cache...Failing load!.\n"); 6361 return -ENOMEM; 6362 } 6363 6364 /* Initialize target kmem_cache and mem_pools */ 6365 ret = qlt_init(); 6366 if (ret < 0) { 6367 kmem_cache_destroy(srb_cachep); 6368 return ret; 6369 } else if (ret > 0) { 6370 /* 6371 * If initiator mode is explictly disabled by qlt_init(), 6372 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from 6373 * performing scsi_scan_target() during LOOP UP event. 6374 */ 6375 qla2xxx_transport_functions.disable_target_scan = 1; 6376 qla2xxx_transport_vport_functions.disable_target_scan = 1; 6377 } 6378 6379 /* Derive version string. */ 6380 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 6381 if (ql2xextended_error_logging) 6382 strcat(qla2x00_version_str, "-debug"); 6383 6384 qla2xxx_transport_template = 6385 fc_attach_transport(&qla2xxx_transport_functions); 6386 if (!qla2xxx_transport_template) { 6387 kmem_cache_destroy(srb_cachep); 6388 ql_log(ql_log_fatal, NULL, 0x0002, 6389 "fc_attach_transport failed...Failing load!.\n"); 6390 qlt_exit(); 6391 return -ENODEV; 6392 } 6393 6394 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); 6395 if (apidev_major < 0) { 6396 ql_log(ql_log_fatal, NULL, 0x0003, 6397 "Unable to register char device %s.\n", QLA2XXX_APIDEV); 6398 } 6399 6400 qla2xxx_transport_vport_template = 6401 fc_attach_transport(&qla2xxx_transport_vport_functions); 6402 if (!qla2xxx_transport_vport_template) { 6403 kmem_cache_destroy(srb_cachep); 6404 qlt_exit(); 6405 fc_release_transport(qla2xxx_transport_template); 6406 ql_log(ql_log_fatal, NULL, 0x0004, 6407 "fc_attach_transport vport failed...Failing load!.\n"); 6408 return -ENODEV; 6409 } 6410 ql_log(ql_log_info, NULL, 0x0005, 6411 "QLogic Fibre Channel HBA Driver: %s.\n", 6412 qla2x00_version_str); 6413 ret = pci_register_driver(&qla2xxx_pci_driver); 6414 if (ret) { 6415 kmem_cache_destroy(srb_cachep); 6416 qlt_exit(); 6417 fc_release_transport(qla2xxx_transport_template); 6418 fc_release_transport(qla2xxx_transport_vport_template); 6419 ql_log(ql_log_fatal, NULL, 0x0006, 6420 "pci_register_driver failed...ret=%d Failing load!.\n", 6421 ret); 6422 } 6423 return ret; 6424 } 6425 6426 /** 6427 * qla2x00_module_exit - Module cleanup. 6428 **/ 6429 static void __exit 6430 qla2x00_module_exit(void) 6431 { 6432 unregister_chrdev(apidev_major, QLA2XXX_APIDEV); 6433 pci_unregister_driver(&qla2xxx_pci_driver); 6434 qla2x00_release_firmware(); 6435 kmem_cache_destroy(srb_cachep); 6436 qlt_exit(); 6437 if (ctx_cachep) 6438 kmem_cache_destroy(ctx_cachep); 6439 fc_release_transport(qla2xxx_transport_template); 6440 fc_release_transport(qla2xxx_transport_vport_template); 6441 } 6442 6443 module_init(qla2x00_module_init); 6444 module_exit(qla2x00_module_exit); 6445 6446 MODULE_AUTHOR("QLogic Corporation"); 6447 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); 6448 MODULE_LICENSE("GPL"); 6449 MODULE_VERSION(QLA2XXX_VERSION); 6450 MODULE_FIRMWARE(FW_FILE_ISP21XX); 6451 MODULE_FIRMWARE(FW_FILE_ISP22XX); 6452 MODULE_FIRMWARE(FW_FILE_ISP2300); 6453 MODULE_FIRMWARE(FW_FILE_ISP2322); 6454 MODULE_FIRMWARE(FW_FILE_ISP24XX); 6455 MODULE_FIRMWARE(FW_FILE_ISP25XX); 6456