1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/moduleparam.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mutex.h> 14 #include <linux/kobject.h> 15 #include <linux/slab.h> 16 #include <linux/blk-mq-pci.h> 17 #include <scsi/scsi_tcq.h> 18 #include <scsi/scsicam.h> 19 #include <scsi/scsi_transport.h> 20 #include <scsi/scsi_transport_fc.h> 21 22 #include "qla_target.h" 23 24 /* 25 * Driver version 26 */ 27 char qla2x00_version_str[40]; 28 29 static int apidev_major; 30 31 /* 32 * SRB allocation cache 33 */ 34 struct kmem_cache *srb_cachep; 35 36 /* 37 * CT6 CTX allocation cache 38 */ 39 static struct kmem_cache *ctx_cachep; 40 /* 41 * error level for logging 42 */ 43 int ql_errlev = ql_log_all; 44 45 static int ql2xenableclass2; 46 module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); 47 MODULE_PARM_DESC(ql2xenableclass2, 48 "Specify if Class 2 operations are supported from the very " 49 "beginning. Default is 0 - class 2 not supported."); 50 51 52 int ql2xlogintimeout = 20; 53 module_param(ql2xlogintimeout, int, S_IRUGO); 54 MODULE_PARM_DESC(ql2xlogintimeout, 55 "Login timeout value in seconds."); 56 57 int qlport_down_retry; 58 module_param(qlport_down_retry, int, S_IRUGO); 59 MODULE_PARM_DESC(qlport_down_retry, 60 "Maximum number of command retries to a port that returns " 61 "a PORT-DOWN status."); 62 63 int ql2xplogiabsentdevice; 64 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); 65 MODULE_PARM_DESC(ql2xplogiabsentdevice, 66 "Option to enable PLOGI to devices that are not present after " 67 "a Fabric scan. This is needed for several broken switches. " 68 "Default is 0 - no PLOGI. 1 - perfom PLOGI."); 69 70 int ql2xloginretrycount = 0; 71 module_param(ql2xloginretrycount, int, S_IRUGO); 72 MODULE_PARM_DESC(ql2xloginretrycount, 73 "Specify an alternate value for the NVRAM login retry count."); 74 75 int ql2xallocfwdump = 1; 76 module_param(ql2xallocfwdump, int, S_IRUGO); 77 MODULE_PARM_DESC(ql2xallocfwdump, 78 "Option to enable allocation of memory for a firmware dump " 79 "during HBA initialization. Memory allocation requirements " 80 "vary by ISP type. Default is 1 - allocate memory."); 81 82 int ql2xextended_error_logging; 83 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 84 module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 85 MODULE_PARM_DESC(ql2xextended_error_logging, 86 "Option to enable extended error logging,\n" 87 "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n" 88 "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n" 89 "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n" 90 "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n" 91 "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n" 92 "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n" 93 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" 94 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" 95 "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n" 96 "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n" 97 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" 98 "\t\t0x1e400000 - Preferred value for capturing essential " 99 "debug information (equivalent to old " 100 "ql2xextended_error_logging=1).\n" 101 "\t\tDo LOGICAL OR of the value to enable more than one level"); 102 103 int ql2xshiftctondsd = 6; 104 module_param(ql2xshiftctondsd, int, S_IRUGO); 105 MODULE_PARM_DESC(ql2xshiftctondsd, 106 "Set to control shifting of command type processing " 107 "based on total number of SG elements."); 108 109 int ql2xfdmienable=1; 110 module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR); 111 module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR); 112 MODULE_PARM_DESC(ql2xfdmienable, 113 "Enables FDMI registrations. " 114 "0 - no FDMI. Default is 1 - perform FDMI."); 115 116 #define MAX_Q_DEPTH 32 117 static int ql2xmaxqdepth = MAX_Q_DEPTH; 118 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); 119 MODULE_PARM_DESC(ql2xmaxqdepth, 120 "Maximum queue depth to set for each LUN. " 121 "Default is 32."); 122 123 int ql2xenabledif = 2; 124 module_param(ql2xenabledif, int, S_IRUGO); 125 MODULE_PARM_DESC(ql2xenabledif, 126 " Enable T10-CRC-DIF:\n" 127 " Default is 2.\n" 128 " 0 -- No DIF Support\n" 129 " 1 -- Enable DIF for all types\n" 130 " 2 -- Enable DIF for all types, except Type 0.\n"); 131 132 int ql2xenablehba_err_chk = 2; 133 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); 134 MODULE_PARM_DESC(ql2xenablehba_err_chk, 135 " Enable T10-CRC-DIF Error isolation by HBA:\n" 136 " Default is 2.\n" 137 " 0 -- Error isolation disabled\n" 138 " 1 -- Error isolation enabled only for DIX Type 0\n" 139 " 2 -- Error isolation enabled for all Types\n"); 140 141 int ql2xiidmaenable=1; 142 module_param(ql2xiidmaenable, int, S_IRUGO); 143 MODULE_PARM_DESC(ql2xiidmaenable, 144 "Enables iIDMA settings " 145 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 146 147 int ql2xmqsupport = 1; 148 module_param(ql2xmqsupport, int, S_IRUGO); 149 MODULE_PARM_DESC(ql2xmqsupport, 150 "Enable on demand multiple queue pairs support " 151 "Default is 1 for supported. " 152 "Set it to 0 to turn off mq qpair support."); 153 154 int ql2xfwloadbin; 155 module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 156 module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 157 MODULE_PARM_DESC(ql2xfwloadbin, 158 "Option to specify location from which to load ISP firmware:.\n" 159 " 2 -- load firmware via the request_firmware() (hotplug).\n" 160 " interface.\n" 161 " 1 -- load firmware from flash.\n" 162 " 0 -- use default semantics.\n"); 163 164 int ql2xetsenable; 165 module_param(ql2xetsenable, int, S_IRUGO); 166 MODULE_PARM_DESC(ql2xetsenable, 167 "Enables firmware ETS burst." 168 "Default is 0 - skip ETS enablement."); 169 170 int ql2xdbwr = 1; 171 module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); 172 MODULE_PARM_DESC(ql2xdbwr, 173 "Option to specify scheme for request queue posting.\n" 174 " 0 -- Regular doorbell.\n" 175 " 1 -- CAMRAM doorbell (faster).\n"); 176 177 int ql2xtargetreset = 1; 178 module_param(ql2xtargetreset, int, S_IRUGO); 179 MODULE_PARM_DESC(ql2xtargetreset, 180 "Enable target reset." 181 "Default is 1 - use hw defaults."); 182 183 int ql2xgffidenable; 184 module_param(ql2xgffidenable, int, S_IRUGO); 185 MODULE_PARM_DESC(ql2xgffidenable, 186 "Enables GFF_ID checks of port type. " 187 "Default is 0 - Do not use GFF_ID information."); 188 189 int ql2xasynctmfenable; 190 module_param(ql2xasynctmfenable, int, S_IRUGO); 191 MODULE_PARM_DESC(ql2xasynctmfenable, 192 "Enables issue of TM IOCBs asynchronously via IOCB mechanism" 193 "Default is 0 - Issue TM IOCBs via mailbox mechanism."); 194 195 int ql2xdontresethba; 196 module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR); 197 MODULE_PARM_DESC(ql2xdontresethba, 198 "Option to specify reset behaviour.\n" 199 " 0 (Default) -- Reset on failure.\n" 200 " 1 -- Do not reset on failure.\n"); 201 202 uint64_t ql2xmaxlun = MAX_LUNS; 203 module_param(ql2xmaxlun, ullong, S_IRUGO); 204 MODULE_PARM_DESC(ql2xmaxlun, 205 "Defines the maximum LU number to register with the SCSI " 206 "midlayer. Default is 65535."); 207 208 int ql2xmdcapmask = 0x1F; 209 module_param(ql2xmdcapmask, int, S_IRUGO); 210 MODULE_PARM_DESC(ql2xmdcapmask, 211 "Set the Minidump driver capture mask level. " 212 "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); 213 214 int ql2xmdenable = 1; 215 module_param(ql2xmdenable, int, S_IRUGO); 216 MODULE_PARM_DESC(ql2xmdenable, 217 "Enable/disable MiniDump. " 218 "0 - MiniDump disabled. " 219 "1 (Default) - MiniDump enabled."); 220 221 int ql2xexlogins = 0; 222 module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR); 223 MODULE_PARM_DESC(ql2xexlogins, 224 "Number of extended Logins. " 225 "0 (Default)- Disabled."); 226 227 int ql2xexchoffld = 0; 228 module_param(ql2xexchoffld, uint, S_IRUGO|S_IWUSR); 229 MODULE_PARM_DESC(ql2xexchoffld, 230 "Number of exchanges to offload. " 231 "0 (Default)- Disabled."); 232 233 int ql2xfwholdabts = 0; 234 module_param(ql2xfwholdabts, int, S_IRUGO); 235 MODULE_PARM_DESC(ql2xfwholdabts, 236 "Allow FW to hold status IOCB until ABTS rsp received. " 237 "0 (Default) Do not set fw option. " 238 "1 - Set fw option to hold ABTS."); 239 240 int ql2xmvasynctoatio = 1; 241 module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR); 242 MODULE_PARM_DESC(ql2xmvasynctoatio, 243 "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ" 244 "0 (Default). Do not move IOCBs" 245 "1 - Move IOCBs."); 246 247 /* 248 * SCSI host template entry points 249 */ 250 static int qla2xxx_slave_configure(struct scsi_device * device); 251 static int qla2xxx_slave_alloc(struct scsi_device *); 252 static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time); 253 static void qla2xxx_scan_start(struct Scsi_Host *); 254 static void qla2xxx_slave_destroy(struct scsi_device *); 255 static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); 256 static int qla2xxx_eh_abort(struct scsi_cmnd *); 257 static int qla2xxx_eh_device_reset(struct scsi_cmnd *); 258 static int qla2xxx_eh_target_reset(struct scsi_cmnd *); 259 static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); 260 static int qla2xxx_eh_host_reset(struct scsi_cmnd *); 261 262 static void qla2x00_clear_drv_active(struct qla_hw_data *); 263 static void qla2x00_free_device(scsi_qla_host_t *); 264 static void qla83xx_disable_laser(scsi_qla_host_t *vha); 265 static int qla2xxx_map_queues(struct Scsi_Host *shost); 266 267 struct scsi_host_template qla2xxx_driver_template = { 268 .module = THIS_MODULE, 269 .name = QLA2XXX_DRIVER_NAME, 270 .queuecommand = qla2xxx_queuecommand, 271 272 .eh_timed_out = fc_eh_timed_out, 273 .eh_abort_handler = qla2xxx_eh_abort, 274 .eh_device_reset_handler = qla2xxx_eh_device_reset, 275 .eh_target_reset_handler = qla2xxx_eh_target_reset, 276 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 277 .eh_host_reset_handler = qla2xxx_eh_host_reset, 278 279 .slave_configure = qla2xxx_slave_configure, 280 281 .slave_alloc = qla2xxx_slave_alloc, 282 .slave_destroy = qla2xxx_slave_destroy, 283 .scan_finished = qla2xxx_scan_finished, 284 .scan_start = qla2xxx_scan_start, 285 .change_queue_depth = scsi_change_queue_depth, 286 .map_queues = qla2xxx_map_queues, 287 .this_id = -1, 288 .cmd_per_lun = 3, 289 .use_clustering = ENABLE_CLUSTERING, 290 .sg_tablesize = SG_ALL, 291 292 .max_sectors = 0xFFFF, 293 .shost_attrs = qla2x00_host_attrs, 294 295 .supported_mode = MODE_INITIATOR, 296 .track_queue_depth = 1, 297 }; 298 299 static struct scsi_transport_template *qla2xxx_transport_template = NULL; 300 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; 301 302 /* TODO Convert to inlines 303 * 304 * Timer routines 305 */ 306 307 __inline__ void 308 qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval) 309 { 310 init_timer(&vha->timer); 311 vha->timer.expires = jiffies + interval * HZ; 312 vha->timer.data = (unsigned long)vha; 313 vha->timer.function = (void (*)(unsigned long))func; 314 add_timer(&vha->timer); 315 vha->timer_active = 1; 316 } 317 318 static inline void 319 qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) 320 { 321 /* Currently used for 82XX only. */ 322 if (vha->device_flags & DFLG_DEV_FAILED) { 323 ql_dbg(ql_dbg_timer, vha, 0x600d, 324 "Device in a failed state, returning.\n"); 325 return; 326 } 327 328 mod_timer(&vha->timer, jiffies + interval * HZ); 329 } 330 331 static __inline__ void 332 qla2x00_stop_timer(scsi_qla_host_t *vha) 333 { 334 del_timer_sync(&vha->timer); 335 vha->timer_active = 0; 336 } 337 338 static int qla2x00_do_dpc(void *data); 339 340 static void qla2x00_rst_aen(scsi_qla_host_t *); 341 342 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, 343 struct req_que **, struct rsp_que **); 344 static void qla2x00_free_fw_dump(struct qla_hw_data *); 345 static void qla2x00_mem_free(struct qla_hw_data *); 346 int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, 347 struct qla_qpair *qpair); 348 349 /* -------------------------------------------------------------------------- */ 350 static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, 351 struct rsp_que *rsp) 352 { 353 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 354 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, 355 GFP_KERNEL); 356 if (!ha->req_q_map) { 357 ql_log(ql_log_fatal, vha, 0x003b, 358 "Unable to allocate memory for request queue ptrs.\n"); 359 goto fail_req_map; 360 } 361 362 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues, 363 GFP_KERNEL); 364 if (!ha->rsp_q_map) { 365 ql_log(ql_log_fatal, vha, 0x003c, 366 "Unable to allocate memory for response queue ptrs.\n"); 367 goto fail_rsp_map; 368 } 369 370 if (ql2xmqsupport && ha->max_qpairs) { 371 ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *), 372 GFP_KERNEL); 373 if (!ha->queue_pair_map) { 374 ql_log(ql_log_fatal, vha, 0x0180, 375 "Unable to allocate memory for queue pair ptrs.\n"); 376 goto fail_qpair_map; 377 } 378 ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); 379 if (ha->base_qpair == NULL) { 380 ql_log(ql_log_warn, vha, 0x0182, 381 "Failed to allocate base queue pair memory.\n"); 382 goto fail_base_qpair; 383 } 384 ha->base_qpair->req = req; 385 ha->base_qpair->rsp = rsp; 386 } 387 388 /* 389 * Make sure we record at least the request and response queue zero in 390 * case we need to free them if part of the probe fails. 391 */ 392 ha->rsp_q_map[0] = rsp; 393 ha->req_q_map[0] = req; 394 set_bit(0, ha->rsp_qid_map); 395 set_bit(0, ha->req_qid_map); 396 return 1; 397 398 fail_base_qpair: 399 kfree(ha->queue_pair_map); 400 fail_qpair_map: 401 kfree(ha->rsp_q_map); 402 ha->rsp_q_map = NULL; 403 fail_rsp_map: 404 kfree(ha->req_q_map); 405 ha->req_q_map = NULL; 406 fail_req_map: 407 return -ENOMEM; 408 } 409 410 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) 411 { 412 if (IS_QLAFX00(ha)) { 413 if (req && req->ring_fx00) 414 dma_free_coherent(&ha->pdev->dev, 415 (req->length_fx00 + 1) * sizeof(request_t), 416 req->ring_fx00, req->dma_fx00); 417 } else if (req && req->ring) 418 dma_free_coherent(&ha->pdev->dev, 419 (req->length + 1) * sizeof(request_t), 420 req->ring, req->dma); 421 422 if (req) 423 kfree(req->outstanding_cmds); 424 425 kfree(req); 426 } 427 428 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) 429 { 430 if (IS_QLAFX00(ha)) { 431 if (rsp && rsp->ring) 432 dma_free_coherent(&ha->pdev->dev, 433 (rsp->length_fx00 + 1) * sizeof(request_t), 434 rsp->ring_fx00, rsp->dma_fx00); 435 } else if (rsp && rsp->ring) { 436 dma_free_coherent(&ha->pdev->dev, 437 (rsp->length + 1) * sizeof(response_t), 438 rsp->ring, rsp->dma); 439 } 440 kfree(rsp); 441 } 442 443 static void qla2x00_free_queues(struct qla_hw_data *ha) 444 { 445 struct req_que *req; 446 struct rsp_que *rsp; 447 int cnt; 448 unsigned long flags; 449 450 spin_lock_irqsave(&ha->hardware_lock, flags); 451 for (cnt = 0; cnt < ha->max_req_queues; cnt++) { 452 if (!test_bit(cnt, ha->req_qid_map)) 453 continue; 454 455 req = ha->req_q_map[cnt]; 456 clear_bit(cnt, ha->req_qid_map); 457 ha->req_q_map[cnt] = NULL; 458 459 spin_unlock_irqrestore(&ha->hardware_lock, flags); 460 qla2x00_free_req_que(ha, req); 461 spin_lock_irqsave(&ha->hardware_lock, flags); 462 } 463 spin_unlock_irqrestore(&ha->hardware_lock, flags); 464 465 kfree(ha->req_q_map); 466 ha->req_q_map = NULL; 467 468 469 spin_lock_irqsave(&ha->hardware_lock, flags); 470 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { 471 if (!test_bit(cnt, ha->rsp_qid_map)) 472 continue; 473 474 rsp = ha->rsp_q_map[cnt]; 475 clear_bit(cnt, ha->rsp_qid_map); 476 ha->rsp_q_map[cnt] = NULL; 477 spin_unlock_irqrestore(&ha->hardware_lock, flags); 478 qla2x00_free_rsp_que(ha, rsp); 479 spin_lock_irqsave(&ha->hardware_lock, flags); 480 } 481 spin_unlock_irqrestore(&ha->hardware_lock, flags); 482 483 kfree(ha->rsp_q_map); 484 ha->rsp_q_map = NULL; 485 } 486 487 static char * 488 qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str) 489 { 490 struct qla_hw_data *ha = vha->hw; 491 static char *pci_bus_modes[] = { 492 "33", "66", "100", "133", 493 }; 494 uint16_t pci_bus; 495 496 strcpy(str, "PCI"); 497 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; 498 if (pci_bus) { 499 strcat(str, "-X ("); 500 strcat(str, pci_bus_modes[pci_bus]); 501 } else { 502 pci_bus = (ha->pci_attr & BIT_8) >> 8; 503 strcat(str, " ("); 504 strcat(str, pci_bus_modes[pci_bus]); 505 } 506 strcat(str, " MHz)"); 507 508 return (str); 509 } 510 511 static char * 512 qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str) 513 { 514 static char *pci_bus_modes[] = { "33", "66", "100", "133", }; 515 struct qla_hw_data *ha = vha->hw; 516 uint32_t pci_bus; 517 518 if (pci_is_pcie(ha->pdev)) { 519 char lwstr[6]; 520 uint32_t lstat, lspeed, lwidth; 521 522 pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat); 523 lspeed = lstat & PCI_EXP_LNKCAP_SLS; 524 lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4; 525 526 strcpy(str, "PCIe ("); 527 switch (lspeed) { 528 case 1: 529 strcat(str, "2.5GT/s "); 530 break; 531 case 2: 532 strcat(str, "5.0GT/s "); 533 break; 534 case 3: 535 strcat(str, "8.0GT/s "); 536 break; 537 default: 538 strcat(str, "<unknown> "); 539 break; 540 } 541 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth); 542 strcat(str, lwstr); 543 544 return str; 545 } 546 547 strcpy(str, "PCI"); 548 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; 549 if (pci_bus == 0 || pci_bus == 8) { 550 strcat(str, " ("); 551 strcat(str, pci_bus_modes[pci_bus >> 3]); 552 } else { 553 strcat(str, "-X "); 554 if (pci_bus & BIT_2) 555 strcat(str, "Mode 2"); 556 else 557 strcat(str, "Mode 1"); 558 strcat(str, " ("); 559 strcat(str, pci_bus_modes[pci_bus & ~BIT_2]); 560 } 561 strcat(str, " MHz)"); 562 563 return str; 564 } 565 566 static char * 567 qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 568 { 569 char un_str[10]; 570 struct qla_hw_data *ha = vha->hw; 571 572 snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version, 573 ha->fw_minor_version, ha->fw_subminor_version); 574 575 if (ha->fw_attributes & BIT_9) { 576 strcat(str, "FLX"); 577 return (str); 578 } 579 580 switch (ha->fw_attributes & 0xFF) { 581 case 0x7: 582 strcat(str, "EF"); 583 break; 584 case 0x17: 585 strcat(str, "TP"); 586 break; 587 case 0x37: 588 strcat(str, "IP"); 589 break; 590 case 0x77: 591 strcat(str, "VI"); 592 break; 593 default: 594 sprintf(un_str, "(%x)", ha->fw_attributes); 595 strcat(str, un_str); 596 break; 597 } 598 if (ha->fw_attributes & 0x100) 599 strcat(str, "X"); 600 601 return (str); 602 } 603 604 static char * 605 qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 606 { 607 struct qla_hw_data *ha = vha->hw; 608 609 snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version, 610 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes); 611 return str; 612 } 613 614 void 615 qla2x00_sp_free_dma(void *ptr) 616 { 617 srb_t *sp = ptr; 618 struct qla_hw_data *ha = sp->vha->hw; 619 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 620 void *ctx = GET_CMD_CTX_SP(sp); 621 622 if (sp->flags & SRB_DMA_VALID) { 623 scsi_dma_unmap(cmd); 624 sp->flags &= ~SRB_DMA_VALID; 625 } 626 627 if (sp->flags & SRB_CRC_PROT_DMA_VALID) { 628 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 629 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 630 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 631 } 632 633 if (!ctx) 634 goto end; 635 636 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 637 /* List assured to be having elements */ 638 qla2x00_clean_dsd_pool(ha, ctx); 639 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 640 } 641 642 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 643 struct crc_context *ctx0 = ctx; 644 645 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); 646 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 647 } 648 649 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 650 struct ct6_dsd *ctx1 = ctx; 651 652 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 653 ctx1->fcp_cmnd_dma); 654 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 655 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 656 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 657 mempool_free(ctx1, ha->ctx_mempool); 658 } 659 660 end: 661 CMD_SP(cmd) = NULL; 662 qla2x00_rel_sp(sp); 663 } 664 665 void 666 qla2x00_sp_compl(void *ptr, int res) 667 { 668 srb_t *sp = ptr; 669 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 670 671 cmd->result = res; 672 673 if (atomic_read(&sp->ref_count) == 0) { 674 ql_dbg(ql_dbg_io, sp->vha, 0x3015, 675 "SP reference-count to ZERO -- sp=%p cmd=%p.\n", 676 sp, GET_CMD_SP(sp)); 677 if (ql2xextended_error_logging & ql_dbg_io) 678 WARN_ON(atomic_read(&sp->ref_count) == 0); 679 return; 680 } 681 if (!atomic_dec_and_test(&sp->ref_count)) 682 return; 683 684 qla2x00_sp_free_dma(sp); 685 cmd->scsi_done(cmd); 686 } 687 688 void 689 qla2xxx_qpair_sp_free_dma(void *ptr) 690 { 691 srb_t *sp = (srb_t *)ptr; 692 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 693 struct qla_hw_data *ha = sp->fcport->vha->hw; 694 void *ctx = GET_CMD_CTX_SP(sp); 695 696 if (sp->flags & SRB_DMA_VALID) { 697 scsi_dma_unmap(cmd); 698 sp->flags &= ~SRB_DMA_VALID; 699 } 700 701 if (sp->flags & SRB_CRC_PROT_DMA_VALID) { 702 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 703 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 704 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 705 } 706 707 if (!ctx) 708 goto end; 709 710 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 711 /* List assured to be having elements */ 712 qla2x00_clean_dsd_pool(ha, ctx); 713 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 714 } 715 716 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 717 struct crc_context *ctx0 = ctx; 718 719 dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma); 720 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 721 } 722 723 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 724 struct ct6_dsd *ctx1 = ctx; 725 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 726 ctx1->fcp_cmnd_dma); 727 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 728 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 729 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 730 mempool_free(ctx1, ha->ctx_mempool); 731 } 732 end: 733 CMD_SP(cmd) = NULL; 734 qla2xxx_rel_qpair_sp(sp->qpair, sp); 735 } 736 737 void 738 qla2xxx_qpair_sp_compl(void *ptr, int res) 739 { 740 srb_t *sp = ptr; 741 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 742 743 cmd->result = res; 744 745 if (atomic_read(&sp->ref_count) == 0) { 746 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079, 747 "SP reference-count to ZERO -- sp=%p cmd=%p.\n", 748 sp, GET_CMD_SP(sp)); 749 if (ql2xextended_error_logging & ql_dbg_io) 750 WARN_ON(atomic_read(&sp->ref_count) == 0); 751 return; 752 } 753 if (!atomic_dec_and_test(&sp->ref_count)) 754 return; 755 756 qla2xxx_qpair_sp_free_dma(sp); 757 cmd->scsi_done(cmd); 758 } 759 760 /* If we are SP1 here, we need to still take and release the host_lock as SP1 761 * does not have the changes necessary to avoid taking host->host_lock. 762 */ 763 static int 764 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 765 { 766 scsi_qla_host_t *vha = shost_priv(host); 767 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 768 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 769 struct qla_hw_data *ha = vha->hw; 770 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 771 srb_t *sp; 772 int rval; 773 struct qla_qpair *qpair = NULL; 774 uint32_t tag; 775 uint16_t hwq; 776 777 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) { 778 cmd->result = DID_NO_CONNECT << 16; 779 goto qc24_fail_command; 780 } 781 782 if (ha->mqenable) { 783 if (shost_use_blk_mq(vha->host)) { 784 tag = blk_mq_unique_tag(cmd->request); 785 hwq = blk_mq_unique_tag_to_hwq(tag); 786 qpair = ha->queue_pair_map[hwq]; 787 } else if (vha->vp_idx && vha->qpair) { 788 qpair = vha->qpair; 789 } 790 791 if (qpair) 792 return qla2xxx_mqueuecommand(host, cmd, qpair); 793 } 794 795 if (ha->flags.eeh_busy) { 796 if (ha->flags.pci_channel_io_perm_failure) { 797 ql_dbg(ql_dbg_aer, vha, 0x9010, 798 "PCI Channel IO permanent failure, exiting " 799 "cmd=%p.\n", cmd); 800 cmd->result = DID_NO_CONNECT << 16; 801 } else { 802 ql_dbg(ql_dbg_aer, vha, 0x9011, 803 "EEH_Busy, Requeuing the cmd=%p.\n", cmd); 804 cmd->result = DID_REQUEUE << 16; 805 } 806 goto qc24_fail_command; 807 } 808 809 rval = fc_remote_port_chkready(rport); 810 if (rval) { 811 cmd->result = rval; 812 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003, 813 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 814 cmd, rval); 815 goto qc24_fail_command; 816 } 817 818 if (!vha->flags.difdix_supported && 819 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 820 ql_dbg(ql_dbg_io, vha, 0x3004, 821 "DIF Cap not reg, fail DIF capable cmd's:%p.\n", 822 cmd); 823 cmd->result = DID_NO_CONNECT << 16; 824 goto qc24_fail_command; 825 } 826 827 if (!fcport) { 828 cmd->result = DID_NO_CONNECT << 16; 829 goto qc24_fail_command; 830 } 831 832 if (atomic_read(&fcport->state) != FCS_ONLINE) { 833 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 834 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 835 ql_dbg(ql_dbg_io, vha, 0x3005, 836 "Returning DNC, fcport_state=%d loop_state=%d.\n", 837 atomic_read(&fcport->state), 838 atomic_read(&base_vha->loop_state)); 839 cmd->result = DID_NO_CONNECT << 16; 840 goto qc24_fail_command; 841 } 842 goto qc24_target_busy; 843 } 844 845 /* 846 * Return target busy if we've received a non-zero retry_delay_timer 847 * in a FCP_RSP. 848 */ 849 if (fcport->retry_delay_timestamp == 0) { 850 /* retry delay not set */ 851 } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 852 fcport->retry_delay_timestamp = 0; 853 else 854 goto qc24_target_busy; 855 856 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 857 if (!sp) 858 goto qc24_host_busy; 859 860 sp->u.scmd.cmd = cmd; 861 sp->type = SRB_SCSI_CMD; 862 atomic_set(&sp->ref_count, 1); 863 CMD_SP(cmd) = (void *)sp; 864 sp->free = qla2x00_sp_free_dma; 865 sp->done = qla2x00_sp_compl; 866 867 rval = ha->isp_ops->start_scsi(sp); 868 if (rval != QLA_SUCCESS) { 869 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, 870 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); 871 goto qc24_host_busy_free_sp; 872 } 873 874 return 0; 875 876 qc24_host_busy_free_sp: 877 qla2x00_sp_free_dma(sp); 878 879 qc24_host_busy: 880 return SCSI_MLQUEUE_HOST_BUSY; 881 882 qc24_target_busy: 883 return SCSI_MLQUEUE_TARGET_BUSY; 884 885 qc24_fail_command: 886 cmd->scsi_done(cmd); 887 888 return 0; 889 } 890 891 /* For MQ supported I/O */ 892 int 893 qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, 894 struct qla_qpair *qpair) 895 { 896 scsi_qla_host_t *vha = shost_priv(host); 897 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 898 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 899 struct qla_hw_data *ha = vha->hw; 900 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 901 srb_t *sp; 902 int rval; 903 904 rval = fc_remote_port_chkready(rport); 905 if (rval) { 906 cmd->result = rval; 907 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076, 908 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 909 cmd, rval); 910 goto qc24_fail_command; 911 } 912 913 if (!fcport) { 914 cmd->result = DID_NO_CONNECT << 16; 915 goto qc24_fail_command; 916 } 917 918 if (atomic_read(&fcport->state) != FCS_ONLINE) { 919 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 920 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 921 ql_dbg(ql_dbg_io, vha, 0x3077, 922 "Returning DNC, fcport_state=%d loop_state=%d.\n", 923 atomic_read(&fcport->state), 924 atomic_read(&base_vha->loop_state)); 925 cmd->result = DID_NO_CONNECT << 16; 926 goto qc24_fail_command; 927 } 928 goto qc24_target_busy; 929 } 930 931 /* 932 * Return target busy if we've received a non-zero retry_delay_timer 933 * in a FCP_RSP. 934 */ 935 if (fcport->retry_delay_timestamp == 0) { 936 /* retry delay not set */ 937 } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 938 fcport->retry_delay_timestamp = 0; 939 else 940 goto qc24_target_busy; 941 942 sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC); 943 if (!sp) 944 goto qc24_host_busy; 945 946 sp->u.scmd.cmd = cmd; 947 sp->type = SRB_SCSI_CMD; 948 atomic_set(&sp->ref_count, 1); 949 CMD_SP(cmd) = (void *)sp; 950 sp->free = qla2xxx_qpair_sp_free_dma; 951 sp->done = qla2xxx_qpair_sp_compl; 952 sp->qpair = qpair; 953 954 rval = ha->isp_ops->start_scsi_mq(sp); 955 if (rval != QLA_SUCCESS) { 956 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078, 957 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); 958 if (rval == QLA_INTERFACE_ERROR) 959 goto qc24_fail_command; 960 goto qc24_host_busy_free_sp; 961 } 962 963 return 0; 964 965 qc24_host_busy_free_sp: 966 qla2xxx_qpair_sp_free_dma(sp); 967 968 qc24_host_busy: 969 return SCSI_MLQUEUE_HOST_BUSY; 970 971 qc24_target_busy: 972 return SCSI_MLQUEUE_TARGET_BUSY; 973 974 qc24_fail_command: 975 cmd->scsi_done(cmd); 976 977 return 0; 978 } 979 980 /* 981 * qla2x00_eh_wait_on_command 982 * Waits for the command to be returned by the Firmware for some 983 * max time. 984 * 985 * Input: 986 * cmd = Scsi Command to wait on. 987 * 988 * Return: 989 * Not Found : 0 990 * Found : 1 991 */ 992 static int 993 qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) 994 { 995 #define ABORT_POLLING_PERIOD 1000 996 #define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD)) 997 unsigned long wait_iter = ABORT_WAIT_ITER; 998 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 999 struct qla_hw_data *ha = vha->hw; 1000 int ret = QLA_SUCCESS; 1001 1002 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { 1003 ql_dbg(ql_dbg_taskm, vha, 0x8005, 1004 "Return:eh_wait.\n"); 1005 return ret; 1006 } 1007 1008 while (CMD_SP(cmd) && wait_iter--) { 1009 msleep(ABORT_POLLING_PERIOD); 1010 } 1011 if (CMD_SP(cmd)) 1012 ret = QLA_FUNCTION_FAILED; 1013 1014 return ret; 1015 } 1016 1017 /* 1018 * qla2x00_wait_for_hba_online 1019 * Wait till the HBA is online after going through 1020 * <= MAX_RETRIES_OF_ISP_ABORT or 1021 * finally HBA is disabled ie marked offline 1022 * 1023 * Input: 1024 * ha - pointer to host adapter structure 1025 * 1026 * Note: 1027 * Does context switching-Release SPIN_LOCK 1028 * (if any) before calling this routine. 1029 * 1030 * Return: 1031 * Success (Adapter is online) : 0 1032 * Failed (Adapter is offline/disabled) : 1 1033 */ 1034 int 1035 qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) 1036 { 1037 int return_status; 1038 unsigned long wait_online; 1039 struct qla_hw_data *ha = vha->hw; 1040 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1041 1042 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 1043 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 1044 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 1045 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 1046 ha->dpc_active) && time_before(jiffies, wait_online)) { 1047 1048 msleep(1000); 1049 } 1050 if (base_vha->flags.online) 1051 return_status = QLA_SUCCESS; 1052 else 1053 return_status = QLA_FUNCTION_FAILED; 1054 1055 return (return_status); 1056 } 1057 1058 static inline int test_fcport_count(scsi_qla_host_t *vha) 1059 { 1060 struct qla_hw_data *ha = vha->hw; 1061 unsigned long flags; 1062 int res; 1063 1064 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1065 ql_dbg(ql_dbg_init, vha, 0xffff, 1066 "tgt %p, fcport_count=%d\n", 1067 vha, vha->fcport_count); 1068 res = (vha->fcport_count == 0); 1069 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1070 1071 return res; 1072 } 1073 1074 /* 1075 * qla2x00_wait_for_sess_deletion can only be called from remove_one. 1076 * it has dependency on UNLOADING flag to stop device discovery 1077 */ 1078 static void 1079 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) 1080 { 1081 qla2x00_mark_all_devices_lost(vha, 0); 1082 1083 wait_event(vha->fcport_waitQ, test_fcport_count(vha)); 1084 } 1085 1086 /* 1087 * qla2x00_wait_for_hba_ready 1088 * Wait till the HBA is ready before doing driver unload 1089 * 1090 * Input: 1091 * ha - pointer to host adapter structure 1092 * 1093 * Note: 1094 * Does context switching-Release SPIN_LOCK 1095 * (if any) before calling this routine. 1096 * 1097 */ 1098 static void 1099 qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) 1100 { 1101 struct qla_hw_data *ha = vha->hw; 1102 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1103 1104 while ((qla2x00_reset_active(vha) || ha->dpc_active || 1105 ha->flags.mbox_busy) || 1106 test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) || 1107 test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) { 1108 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 1109 break; 1110 msleep(1000); 1111 } 1112 } 1113 1114 int 1115 qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) 1116 { 1117 int return_status; 1118 unsigned long wait_reset; 1119 struct qla_hw_data *ha = vha->hw; 1120 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1121 1122 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); 1123 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 1124 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 1125 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 1126 ha->dpc_active) && time_before(jiffies, wait_reset)) { 1127 1128 msleep(1000); 1129 1130 if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 1131 ha->flags.chip_reset_done) 1132 break; 1133 } 1134 if (ha->flags.chip_reset_done) 1135 return_status = QLA_SUCCESS; 1136 else 1137 return_status = QLA_FUNCTION_FAILED; 1138 1139 return return_status; 1140 } 1141 1142 static void 1143 sp_get(struct srb *sp) 1144 { 1145 atomic_inc(&sp->ref_count); 1146 } 1147 1148 #define ISP_REG_DISCONNECT 0xffffffffU 1149 /************************************************************************** 1150 * qla2x00_isp_reg_stat 1151 * 1152 * Description: 1153 * Read the host status register of ISP before aborting the command. 1154 * 1155 * Input: 1156 * ha = pointer to host adapter structure. 1157 * 1158 * 1159 * Returns: 1160 * Either true or false. 1161 * 1162 * Note: Return true if there is register disconnect. 1163 **************************************************************************/ 1164 static inline 1165 uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) 1166 { 1167 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1168 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 1169 1170 if (IS_P3P_TYPE(ha)) 1171 return ((RD_REG_DWORD(®82->host_int)) == ISP_REG_DISCONNECT); 1172 else 1173 return ((RD_REG_DWORD(®->host_status)) == 1174 ISP_REG_DISCONNECT); 1175 } 1176 1177 /************************************************************************** 1178 * qla2xxx_eh_abort 1179 * 1180 * Description: 1181 * The abort function will abort the specified command. 1182 * 1183 * Input: 1184 * cmd = Linux SCSI command packet to be aborted. 1185 * 1186 * Returns: 1187 * Either SUCCESS or FAILED. 1188 * 1189 * Note: 1190 * Only return FAILED if command not returned by firmware. 1191 **************************************************************************/ 1192 static int 1193 qla2xxx_eh_abort(struct scsi_cmnd *cmd) 1194 { 1195 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1196 srb_t *sp; 1197 int ret; 1198 unsigned int id; 1199 uint64_t lun; 1200 unsigned long flags; 1201 int rval, wait = 0; 1202 struct qla_hw_data *ha = vha->hw; 1203 1204 if (qla2x00_isp_reg_stat(ha)) { 1205 ql_log(ql_log_info, vha, 0x8042, 1206 "PCI/Register disconnect, exiting.\n"); 1207 return FAILED; 1208 } 1209 if (!CMD_SP(cmd)) 1210 return SUCCESS; 1211 1212 ret = fc_block_scsi_eh(cmd); 1213 if (ret != 0) 1214 return ret; 1215 ret = SUCCESS; 1216 1217 id = cmd->device->id; 1218 lun = cmd->device->lun; 1219 1220 spin_lock_irqsave(&ha->hardware_lock, flags); 1221 sp = (srb_t *) CMD_SP(cmd); 1222 if (!sp) { 1223 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1224 return SUCCESS; 1225 } 1226 1227 ql_dbg(ql_dbg_taskm, vha, 0x8002, 1228 "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n", 1229 vha->host_no, id, lun, sp, cmd, sp->handle); 1230 1231 /* Get a reference to the sp and drop the lock.*/ 1232 sp_get(sp); 1233 1234 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1235 rval = ha->isp_ops->abort_command(sp); 1236 if (rval) { 1237 if (rval == QLA_FUNCTION_PARAMETER_ERROR) 1238 ret = SUCCESS; 1239 else 1240 ret = FAILED; 1241 1242 ql_dbg(ql_dbg_taskm, vha, 0x8003, 1243 "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval); 1244 } else { 1245 ql_dbg(ql_dbg_taskm, vha, 0x8004, 1246 "Abort command mbx success cmd=%p.\n", cmd); 1247 wait = 1; 1248 } 1249 1250 spin_lock_irqsave(&ha->hardware_lock, flags); 1251 sp->done(sp, 0); 1252 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1253 1254 /* Did the command return during mailbox execution? */ 1255 if (ret == FAILED && !CMD_SP(cmd)) 1256 ret = SUCCESS; 1257 1258 /* Wait for the command to be returned. */ 1259 if (wait) { 1260 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { 1261 ql_log(ql_log_warn, vha, 0x8006, 1262 "Abort handler timed out cmd=%p.\n", cmd); 1263 ret = FAILED; 1264 } 1265 } 1266 1267 ql_log(ql_log_info, vha, 0x801c, 1268 "Abort command issued nexus=%ld:%d:%llu -- %d %x.\n", 1269 vha->host_no, id, lun, wait, ret); 1270 1271 return ret; 1272 } 1273 1274 int 1275 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, 1276 uint64_t l, enum nexus_wait_type type) 1277 { 1278 int cnt, match, status; 1279 unsigned long flags; 1280 struct qla_hw_data *ha = vha->hw; 1281 struct req_que *req; 1282 srb_t *sp; 1283 struct scsi_cmnd *cmd; 1284 1285 status = QLA_SUCCESS; 1286 1287 spin_lock_irqsave(&ha->hardware_lock, flags); 1288 req = vha->req; 1289 for (cnt = 1; status == QLA_SUCCESS && 1290 cnt < req->num_outstanding_cmds; cnt++) { 1291 sp = req->outstanding_cmds[cnt]; 1292 if (!sp) 1293 continue; 1294 if (sp->type != SRB_SCSI_CMD) 1295 continue; 1296 if (vha->vp_idx != sp->vha->vp_idx) 1297 continue; 1298 match = 0; 1299 cmd = GET_CMD_SP(sp); 1300 switch (type) { 1301 case WAIT_HOST: 1302 match = 1; 1303 break; 1304 case WAIT_TARGET: 1305 match = cmd->device->id == t; 1306 break; 1307 case WAIT_LUN: 1308 match = (cmd->device->id == t && 1309 cmd->device->lun == l); 1310 break; 1311 } 1312 if (!match) 1313 continue; 1314 1315 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1316 status = qla2x00_eh_wait_on_command(cmd); 1317 spin_lock_irqsave(&ha->hardware_lock, flags); 1318 } 1319 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1320 1321 return status; 1322 } 1323 1324 static char *reset_errors[] = { 1325 "HBA not online", 1326 "HBA not ready", 1327 "Task management failed", 1328 "Waiting for command completions", 1329 }; 1330 1331 static int 1332 __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 1333 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, uint64_t, int)) 1334 { 1335 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1336 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1337 int err; 1338 1339 if (!fcport) { 1340 return FAILED; 1341 } 1342 1343 err = fc_block_scsi_eh(cmd); 1344 if (err != 0) 1345 return err; 1346 1347 ql_log(ql_log_info, vha, 0x8009, 1348 "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no, 1349 cmd->device->id, cmd->device->lun, cmd); 1350 1351 err = 0; 1352 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1353 ql_log(ql_log_warn, vha, 0x800a, 1354 "Wait for hba online failed for cmd=%p.\n", cmd); 1355 goto eh_reset_failed; 1356 } 1357 err = 2; 1358 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) 1359 != QLA_SUCCESS) { 1360 ql_log(ql_log_warn, vha, 0x800c, 1361 "do_reset failed for cmd=%p.\n", cmd); 1362 goto eh_reset_failed; 1363 } 1364 err = 3; 1365 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 1366 cmd->device->lun, type) != QLA_SUCCESS) { 1367 ql_log(ql_log_warn, vha, 0x800d, 1368 "wait for pending cmds failed for cmd=%p.\n", cmd); 1369 goto eh_reset_failed; 1370 } 1371 1372 ql_log(ql_log_info, vha, 0x800e, 1373 "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name, 1374 vha->host_no, cmd->device->id, cmd->device->lun, cmd); 1375 1376 return SUCCESS; 1377 1378 eh_reset_failed: 1379 ql_log(ql_log_info, vha, 0x800f, 1380 "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name, 1381 reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun, 1382 cmd); 1383 return FAILED; 1384 } 1385 1386 static int 1387 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 1388 { 1389 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1390 struct qla_hw_data *ha = vha->hw; 1391 1392 if (qla2x00_isp_reg_stat(ha)) { 1393 ql_log(ql_log_info, vha, 0x803e, 1394 "PCI/Register disconnect, exiting.\n"); 1395 return FAILED; 1396 } 1397 1398 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd, 1399 ha->isp_ops->lun_reset); 1400 } 1401 1402 static int 1403 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) 1404 { 1405 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1406 struct qla_hw_data *ha = vha->hw; 1407 1408 if (qla2x00_isp_reg_stat(ha)) { 1409 ql_log(ql_log_info, vha, 0x803f, 1410 "PCI/Register disconnect, exiting.\n"); 1411 return FAILED; 1412 } 1413 1414 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd, 1415 ha->isp_ops->target_reset); 1416 } 1417 1418 /************************************************************************** 1419 * qla2xxx_eh_bus_reset 1420 * 1421 * Description: 1422 * The bus reset function will reset the bus and abort any executing 1423 * commands. 1424 * 1425 * Input: 1426 * cmd = Linux SCSI command packet of the command that cause the 1427 * bus reset. 1428 * 1429 * Returns: 1430 * SUCCESS/FAILURE (defined as macro in scsi.h). 1431 * 1432 **************************************************************************/ 1433 static int 1434 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 1435 { 1436 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1437 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1438 int ret = FAILED; 1439 unsigned int id; 1440 uint64_t lun; 1441 struct qla_hw_data *ha = vha->hw; 1442 1443 if (qla2x00_isp_reg_stat(ha)) { 1444 ql_log(ql_log_info, vha, 0x8040, 1445 "PCI/Register disconnect, exiting.\n"); 1446 return FAILED; 1447 } 1448 1449 id = cmd->device->id; 1450 lun = cmd->device->lun; 1451 1452 if (!fcport) { 1453 return ret; 1454 } 1455 1456 ret = fc_block_scsi_eh(cmd); 1457 if (ret != 0) 1458 return ret; 1459 ret = FAILED; 1460 1461 ql_log(ql_log_info, vha, 0x8012, 1462 "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1463 1464 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1465 ql_log(ql_log_fatal, vha, 0x8013, 1466 "Wait for hba online failed board disabled.\n"); 1467 goto eh_bus_reset_done; 1468 } 1469 1470 if (qla2x00_loop_reset(vha) == QLA_SUCCESS) 1471 ret = SUCCESS; 1472 1473 if (ret == FAILED) 1474 goto eh_bus_reset_done; 1475 1476 /* Flush outstanding commands. */ 1477 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != 1478 QLA_SUCCESS) { 1479 ql_log(ql_log_warn, vha, 0x8014, 1480 "Wait for pending commands failed.\n"); 1481 ret = FAILED; 1482 } 1483 1484 eh_bus_reset_done: 1485 ql_log(ql_log_warn, vha, 0x802b, 1486 "BUS RESET %s nexus=%ld:%d:%llu.\n", 1487 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1488 1489 return ret; 1490 } 1491 1492 /************************************************************************** 1493 * qla2xxx_eh_host_reset 1494 * 1495 * Description: 1496 * The reset function will reset the Adapter. 1497 * 1498 * Input: 1499 * cmd = Linux SCSI command packet of the command that cause the 1500 * adapter reset. 1501 * 1502 * Returns: 1503 * Either SUCCESS or FAILED. 1504 * 1505 * Note: 1506 **************************************************************************/ 1507 static int 1508 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 1509 { 1510 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1511 struct qla_hw_data *ha = vha->hw; 1512 int ret = FAILED; 1513 unsigned int id; 1514 uint64_t lun; 1515 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1516 1517 if (qla2x00_isp_reg_stat(ha)) { 1518 ql_log(ql_log_info, vha, 0x8041, 1519 "PCI/Register disconnect, exiting.\n"); 1520 schedule_work(&ha->board_disable); 1521 return SUCCESS; 1522 } 1523 1524 id = cmd->device->id; 1525 lun = cmd->device->lun; 1526 1527 ql_log(ql_log_info, vha, 0x8018, 1528 "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1529 1530 /* 1531 * No point in issuing another reset if one is active. Also do not 1532 * attempt a reset if we are updating flash. 1533 */ 1534 if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING) 1535 goto eh_host_reset_lock; 1536 1537 if (vha != base_vha) { 1538 if (qla2x00_vp_abort_isp(vha)) 1539 goto eh_host_reset_lock; 1540 } else { 1541 if (IS_P3P_TYPE(vha->hw)) { 1542 if (!qla82xx_fcoe_ctx_reset(vha)) { 1543 /* Ctx reset success */ 1544 ret = SUCCESS; 1545 goto eh_host_reset_lock; 1546 } 1547 /* fall thru if ctx reset failed */ 1548 } 1549 if (ha->wq) 1550 flush_workqueue(ha->wq); 1551 1552 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1553 if (ha->isp_ops->abort_isp(base_vha)) { 1554 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1555 /* failed. schedule dpc to try */ 1556 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 1557 1558 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1559 ql_log(ql_log_warn, vha, 0x802a, 1560 "wait for hba online failed.\n"); 1561 goto eh_host_reset_lock; 1562 } 1563 } 1564 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1565 } 1566 1567 /* Waiting for command to be returned to OS.*/ 1568 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) == 1569 QLA_SUCCESS) 1570 ret = SUCCESS; 1571 1572 eh_host_reset_lock: 1573 ql_log(ql_log_info, vha, 0x8017, 1574 "ADAPTER RESET %s nexus=%ld:%d:%llu.\n", 1575 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1576 1577 return ret; 1578 } 1579 1580 /* 1581 * qla2x00_loop_reset 1582 * Issue loop reset. 1583 * 1584 * Input: 1585 * ha = adapter block pointer. 1586 * 1587 * Returns: 1588 * 0 = success 1589 */ 1590 int 1591 qla2x00_loop_reset(scsi_qla_host_t *vha) 1592 { 1593 int ret; 1594 struct fc_port *fcport; 1595 struct qla_hw_data *ha = vha->hw; 1596 1597 if (IS_QLAFX00(ha)) { 1598 return qlafx00_loop_reset(vha); 1599 } 1600 1601 if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) { 1602 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1603 if (fcport->port_type != FCT_TARGET) 1604 continue; 1605 1606 ret = ha->isp_ops->target_reset(fcport, 0, 0); 1607 if (ret != QLA_SUCCESS) { 1608 ql_dbg(ql_dbg_taskm, vha, 0x802c, 1609 "Bus Reset failed: Reset=%d " 1610 "d_id=%x.\n", ret, fcport->d_id.b24); 1611 } 1612 } 1613 } 1614 1615 1616 if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { 1617 atomic_set(&vha->loop_state, LOOP_DOWN); 1618 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1619 qla2x00_mark_all_devices_lost(vha, 0); 1620 ret = qla2x00_full_login_lip(vha); 1621 if (ret != QLA_SUCCESS) { 1622 ql_dbg(ql_dbg_taskm, vha, 0x802d, 1623 "full_login_lip=%d.\n", ret); 1624 } 1625 } 1626 1627 if (ha->flags.enable_lip_reset) { 1628 ret = qla2x00_lip_reset(vha); 1629 if (ret != QLA_SUCCESS) 1630 ql_dbg(ql_dbg_taskm, vha, 0x802e, 1631 "lip_reset failed (%d).\n", ret); 1632 } 1633 1634 /* Issue marker command only when we are going to start the I/O */ 1635 vha->marker_needed = 1; 1636 1637 return QLA_SUCCESS; 1638 } 1639 1640 void 1641 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) 1642 { 1643 int que, cnt, status; 1644 unsigned long flags; 1645 srb_t *sp; 1646 struct qla_hw_data *ha = vha->hw; 1647 struct req_que *req; 1648 1649 qlt_host_reset_handler(ha); 1650 1651 spin_lock_irqsave(&ha->hardware_lock, flags); 1652 for (que = 0; que < ha->max_req_queues; que++) { 1653 req = ha->req_q_map[que]; 1654 if (!req) 1655 continue; 1656 if (!req->outstanding_cmds) 1657 continue; 1658 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1659 sp = req->outstanding_cmds[cnt]; 1660 if (sp) { 1661 /* Don't abort commands in adapter during EEH 1662 * recovery as it's not accessible/responding. 1663 */ 1664 if (GET_CMD_SP(sp) && !ha->flags.eeh_busy && 1665 (sp->type == SRB_SCSI_CMD)) { 1666 /* Get a reference to the sp and drop the lock. 1667 * The reference ensures this sp->done() call 1668 * - and not the call in qla2xxx_eh_abort() - 1669 * ends the SCSI command (with result 'res'). 1670 */ 1671 sp_get(sp); 1672 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1673 status = qla2xxx_eh_abort(GET_CMD_SP(sp)); 1674 spin_lock_irqsave(&ha->hardware_lock, flags); 1675 /* Get rid of extra reference if immediate exit 1676 * from ql2xxx_eh_abort */ 1677 if (status == FAILED && (qla2x00_isp_reg_stat(ha))) 1678 atomic_dec(&sp->ref_count); 1679 } 1680 req->outstanding_cmds[cnt] = NULL; 1681 sp->done(sp, res); 1682 } 1683 } 1684 } 1685 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1686 } 1687 1688 static int 1689 qla2xxx_slave_alloc(struct scsi_device *sdev) 1690 { 1691 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1692 1693 if (!rport || fc_remote_port_chkready(rport)) 1694 return -ENXIO; 1695 1696 sdev->hostdata = *(fc_port_t **)rport->dd_data; 1697 1698 return 0; 1699 } 1700 1701 static int 1702 qla2xxx_slave_configure(struct scsi_device *sdev) 1703 { 1704 scsi_qla_host_t *vha = shost_priv(sdev->host); 1705 struct req_que *req = vha->req; 1706 1707 if (IS_T10_PI_CAPABLE(vha->hw)) 1708 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1709 1710 scsi_change_queue_depth(sdev, req->max_q_depth); 1711 return 0; 1712 } 1713 1714 static void 1715 qla2xxx_slave_destroy(struct scsi_device *sdev) 1716 { 1717 sdev->hostdata = NULL; 1718 } 1719 1720 /** 1721 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1722 * @ha: HA context 1723 * 1724 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 1725 * supported addressing method. 1726 */ 1727 static void 1728 qla2x00_config_dma_addressing(struct qla_hw_data *ha) 1729 { 1730 /* Assume a 32bit DMA mask. */ 1731 ha->flags.enable_64bit_addressing = 0; 1732 1733 if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 1734 /* Any upper-dword bits set? */ 1735 if (MSD(dma_get_required_mask(&ha->pdev->dev)) && 1736 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { 1737 /* Ok, a 64bit DMA mask is applicable. */ 1738 ha->flags.enable_64bit_addressing = 1; 1739 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; 1740 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; 1741 return; 1742 } 1743 } 1744 1745 dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); 1746 pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32)); 1747 } 1748 1749 static void 1750 qla2x00_enable_intrs(struct qla_hw_data *ha) 1751 { 1752 unsigned long flags = 0; 1753 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1754 1755 spin_lock_irqsave(&ha->hardware_lock, flags); 1756 ha->interrupts_on = 1; 1757 /* enable risc and host interrupts */ 1758 WRT_REG_WORD(®->ictrl, ICR_EN_INT | ICR_EN_RISC); 1759 RD_REG_WORD(®->ictrl); 1760 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1761 1762 } 1763 1764 static void 1765 qla2x00_disable_intrs(struct qla_hw_data *ha) 1766 { 1767 unsigned long flags = 0; 1768 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1769 1770 spin_lock_irqsave(&ha->hardware_lock, flags); 1771 ha->interrupts_on = 0; 1772 /* disable risc and host interrupts */ 1773 WRT_REG_WORD(®->ictrl, 0); 1774 RD_REG_WORD(®->ictrl); 1775 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1776 } 1777 1778 static void 1779 qla24xx_enable_intrs(struct qla_hw_data *ha) 1780 { 1781 unsigned long flags = 0; 1782 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1783 1784 spin_lock_irqsave(&ha->hardware_lock, flags); 1785 ha->interrupts_on = 1; 1786 WRT_REG_DWORD(®->ictrl, ICRX_EN_RISC_INT); 1787 RD_REG_DWORD(®->ictrl); 1788 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1789 } 1790 1791 static void 1792 qla24xx_disable_intrs(struct qla_hw_data *ha) 1793 { 1794 unsigned long flags = 0; 1795 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1796 1797 if (IS_NOPOLLING_TYPE(ha)) 1798 return; 1799 spin_lock_irqsave(&ha->hardware_lock, flags); 1800 ha->interrupts_on = 0; 1801 WRT_REG_DWORD(®->ictrl, 0); 1802 RD_REG_DWORD(®->ictrl); 1803 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1804 } 1805 1806 static int 1807 qla2x00_iospace_config(struct qla_hw_data *ha) 1808 { 1809 resource_size_t pio; 1810 uint16_t msix; 1811 1812 if (pci_request_selected_regions(ha->pdev, ha->bars, 1813 QLA2XXX_DRIVER_NAME)) { 1814 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011, 1815 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 1816 pci_name(ha->pdev)); 1817 goto iospace_error_exit; 1818 } 1819 if (!(ha->bars & 1)) 1820 goto skip_pio; 1821 1822 /* We only need PIO for Flash operations on ISP2312 v2 chips. */ 1823 pio = pci_resource_start(ha->pdev, 0); 1824 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 1825 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1826 ql_log_pci(ql_log_warn, ha->pdev, 0x0012, 1827 "Invalid pci I/O region size (%s).\n", 1828 pci_name(ha->pdev)); 1829 pio = 0; 1830 } 1831 } else { 1832 ql_log_pci(ql_log_warn, ha->pdev, 0x0013, 1833 "Region #0 no a PIO resource (%s).\n", 1834 pci_name(ha->pdev)); 1835 pio = 0; 1836 } 1837 ha->pio_address = pio; 1838 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014, 1839 "PIO address=%llu.\n", 1840 (unsigned long long)ha->pio_address); 1841 1842 skip_pio: 1843 /* Use MMIO operations for all accesses. */ 1844 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 1845 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015, 1846 "Region #1 not an MMIO resource (%s), aborting.\n", 1847 pci_name(ha->pdev)); 1848 goto iospace_error_exit; 1849 } 1850 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 1851 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016, 1852 "Invalid PCI mem region size (%s), aborting.\n", 1853 pci_name(ha->pdev)); 1854 goto iospace_error_exit; 1855 } 1856 1857 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 1858 if (!ha->iobase) { 1859 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017, 1860 "Cannot remap MMIO (%s), aborting.\n", 1861 pci_name(ha->pdev)); 1862 goto iospace_error_exit; 1863 } 1864 1865 /* Determine queue resources */ 1866 ha->max_req_queues = ha->max_rsp_queues = 1; 1867 ha->msix_count = QLA_BASE_VECTORS; 1868 if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 1869 goto mqiobase_exit; 1870 1871 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1872 pci_resource_len(ha->pdev, 3)); 1873 if (ha->mqiobase) { 1874 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018, 1875 "MQIO Base=%p.\n", ha->mqiobase); 1876 /* Read MSIX vector size of the board */ 1877 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 1878 ha->msix_count = msix + 1; 1879 /* Max queues are bounded by available msix vectors */ 1880 /* MB interrupt uses 1 vector */ 1881 ha->max_req_queues = ha->msix_count - 1; 1882 ha->max_rsp_queues = ha->max_req_queues; 1883 /* Queue pairs is the max value minus the base queue pair */ 1884 ha->max_qpairs = ha->max_rsp_queues - 1; 1885 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188, 1886 "Max no of queues pairs: %d.\n", ha->max_qpairs); 1887 1888 ql_log_pci(ql_log_info, ha->pdev, 0x001a, 1889 "MSI-X vector count: %d.\n", ha->msix_count); 1890 } else 1891 ql_log_pci(ql_log_info, ha->pdev, 0x001b, 1892 "BAR 3 not enabled.\n"); 1893 1894 mqiobase_exit: 1895 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c, 1896 "MSIX Count: %d.\n", ha->msix_count); 1897 return (0); 1898 1899 iospace_error_exit: 1900 return (-ENOMEM); 1901 } 1902 1903 1904 static int 1905 qla83xx_iospace_config(struct qla_hw_data *ha) 1906 { 1907 uint16_t msix; 1908 1909 if (pci_request_selected_regions(ha->pdev, ha->bars, 1910 QLA2XXX_DRIVER_NAME)) { 1911 ql_log_pci(ql_log_fatal, ha->pdev, 0x0117, 1912 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 1913 pci_name(ha->pdev)); 1914 1915 goto iospace_error_exit; 1916 } 1917 1918 /* Use MMIO operations for all accesses. */ 1919 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 1920 ql_log_pci(ql_log_warn, ha->pdev, 0x0118, 1921 "Invalid pci I/O region size (%s).\n", 1922 pci_name(ha->pdev)); 1923 goto iospace_error_exit; 1924 } 1925 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1926 ql_log_pci(ql_log_warn, ha->pdev, 0x0119, 1927 "Invalid PCI mem region size (%s), aborting\n", 1928 pci_name(ha->pdev)); 1929 goto iospace_error_exit; 1930 } 1931 1932 ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN); 1933 if (!ha->iobase) { 1934 ql_log_pci(ql_log_fatal, ha->pdev, 0x011a, 1935 "Cannot remap MMIO (%s), aborting.\n", 1936 pci_name(ha->pdev)); 1937 goto iospace_error_exit; 1938 } 1939 1940 /* 64bit PCI BAR - BAR2 will correspoond to region 4 */ 1941 /* 83XX 26XX always use MQ type access for queues 1942 * - mbar 2, a.k.a region 4 */ 1943 ha->max_req_queues = ha->max_rsp_queues = 1; 1944 ha->msix_count = QLA_BASE_VECTORS; 1945 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4), 1946 pci_resource_len(ha->pdev, 4)); 1947 1948 if (!ha->mqiobase) { 1949 ql_log_pci(ql_log_fatal, ha->pdev, 0x011d, 1950 "BAR2/region4 not enabled\n"); 1951 goto mqiobase_exit; 1952 } 1953 1954 ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2), 1955 pci_resource_len(ha->pdev, 2)); 1956 if (ha->msixbase) { 1957 /* Read MSIX vector size of the board */ 1958 pci_read_config_word(ha->pdev, 1959 QLA_83XX_PCI_MSIX_CONTROL, &msix); 1960 ha->msix_count = msix + 1; 1961 /* 1962 * By default, driver uses at least two msix vectors 1963 * (default & rspq) 1964 */ 1965 if (ql2xmqsupport) { 1966 /* MB interrupt uses 1 vector */ 1967 ha->max_req_queues = ha->msix_count - 1; 1968 1969 /* ATIOQ needs 1 vector. That's 1 less QPair */ 1970 if (QLA_TGT_MODE_ENABLED()) 1971 ha->max_req_queues--; 1972 1973 ha->max_rsp_queues = ha->max_req_queues; 1974 1975 /* Queue pairs is the max value minus 1976 * the base queue pair */ 1977 ha->max_qpairs = ha->max_req_queues - 1; 1978 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, 1979 "Max no of queues pairs: %d.\n", ha->max_qpairs); 1980 } 1981 ql_log_pci(ql_log_info, ha->pdev, 0x011c, 1982 "MSI-X vector count: %d.\n", ha->msix_count); 1983 } else 1984 ql_log_pci(ql_log_info, ha->pdev, 0x011e, 1985 "BAR 1 not enabled.\n"); 1986 1987 mqiobase_exit: 1988 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f, 1989 "MSIX Count: %d.\n", ha->msix_count); 1990 return 0; 1991 1992 iospace_error_exit: 1993 return -ENOMEM; 1994 } 1995 1996 static struct isp_operations qla2100_isp_ops = { 1997 .pci_config = qla2100_pci_config, 1998 .reset_chip = qla2x00_reset_chip, 1999 .chip_diag = qla2x00_chip_diag, 2000 .config_rings = qla2x00_config_rings, 2001 .reset_adapter = qla2x00_reset_adapter, 2002 .nvram_config = qla2x00_nvram_config, 2003 .update_fw_options = qla2x00_update_fw_options, 2004 .load_risc = qla2x00_load_risc, 2005 .pci_info_str = qla2x00_pci_info_str, 2006 .fw_version_str = qla2x00_fw_version_str, 2007 .intr_handler = qla2100_intr_handler, 2008 .enable_intrs = qla2x00_enable_intrs, 2009 .disable_intrs = qla2x00_disable_intrs, 2010 .abort_command = qla2x00_abort_command, 2011 .target_reset = qla2x00_abort_target, 2012 .lun_reset = qla2x00_lun_reset, 2013 .fabric_login = qla2x00_login_fabric, 2014 .fabric_logout = qla2x00_fabric_logout, 2015 .calc_req_entries = qla2x00_calc_iocbs_32, 2016 .build_iocbs = qla2x00_build_scsi_iocbs_32, 2017 .prep_ms_iocb = qla2x00_prep_ms_iocb, 2018 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 2019 .read_nvram = qla2x00_read_nvram_data, 2020 .write_nvram = qla2x00_write_nvram_data, 2021 .fw_dump = qla2100_fw_dump, 2022 .beacon_on = NULL, 2023 .beacon_off = NULL, 2024 .beacon_blink = NULL, 2025 .read_optrom = qla2x00_read_optrom_data, 2026 .write_optrom = qla2x00_write_optrom_data, 2027 .get_flash_version = qla2x00_get_flash_version, 2028 .start_scsi = qla2x00_start_scsi, 2029 .start_scsi_mq = NULL, 2030 .abort_isp = qla2x00_abort_isp, 2031 .iospace_config = qla2x00_iospace_config, 2032 .initialize_adapter = qla2x00_initialize_adapter, 2033 }; 2034 2035 static struct isp_operations qla2300_isp_ops = { 2036 .pci_config = qla2300_pci_config, 2037 .reset_chip = qla2x00_reset_chip, 2038 .chip_diag = qla2x00_chip_diag, 2039 .config_rings = qla2x00_config_rings, 2040 .reset_adapter = qla2x00_reset_adapter, 2041 .nvram_config = qla2x00_nvram_config, 2042 .update_fw_options = qla2x00_update_fw_options, 2043 .load_risc = qla2x00_load_risc, 2044 .pci_info_str = qla2x00_pci_info_str, 2045 .fw_version_str = qla2x00_fw_version_str, 2046 .intr_handler = qla2300_intr_handler, 2047 .enable_intrs = qla2x00_enable_intrs, 2048 .disable_intrs = qla2x00_disable_intrs, 2049 .abort_command = qla2x00_abort_command, 2050 .target_reset = qla2x00_abort_target, 2051 .lun_reset = qla2x00_lun_reset, 2052 .fabric_login = qla2x00_login_fabric, 2053 .fabric_logout = qla2x00_fabric_logout, 2054 .calc_req_entries = qla2x00_calc_iocbs_32, 2055 .build_iocbs = qla2x00_build_scsi_iocbs_32, 2056 .prep_ms_iocb = qla2x00_prep_ms_iocb, 2057 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 2058 .read_nvram = qla2x00_read_nvram_data, 2059 .write_nvram = qla2x00_write_nvram_data, 2060 .fw_dump = qla2300_fw_dump, 2061 .beacon_on = qla2x00_beacon_on, 2062 .beacon_off = qla2x00_beacon_off, 2063 .beacon_blink = qla2x00_beacon_blink, 2064 .read_optrom = qla2x00_read_optrom_data, 2065 .write_optrom = qla2x00_write_optrom_data, 2066 .get_flash_version = qla2x00_get_flash_version, 2067 .start_scsi = qla2x00_start_scsi, 2068 .start_scsi_mq = NULL, 2069 .abort_isp = qla2x00_abort_isp, 2070 .iospace_config = qla2x00_iospace_config, 2071 .initialize_adapter = qla2x00_initialize_adapter, 2072 }; 2073 2074 static struct isp_operations qla24xx_isp_ops = { 2075 .pci_config = qla24xx_pci_config, 2076 .reset_chip = qla24xx_reset_chip, 2077 .chip_diag = qla24xx_chip_diag, 2078 .config_rings = qla24xx_config_rings, 2079 .reset_adapter = qla24xx_reset_adapter, 2080 .nvram_config = qla24xx_nvram_config, 2081 .update_fw_options = qla24xx_update_fw_options, 2082 .load_risc = qla24xx_load_risc, 2083 .pci_info_str = qla24xx_pci_info_str, 2084 .fw_version_str = qla24xx_fw_version_str, 2085 .intr_handler = qla24xx_intr_handler, 2086 .enable_intrs = qla24xx_enable_intrs, 2087 .disable_intrs = qla24xx_disable_intrs, 2088 .abort_command = qla24xx_abort_command, 2089 .target_reset = qla24xx_abort_target, 2090 .lun_reset = qla24xx_lun_reset, 2091 .fabric_login = qla24xx_login_fabric, 2092 .fabric_logout = qla24xx_fabric_logout, 2093 .calc_req_entries = NULL, 2094 .build_iocbs = NULL, 2095 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2096 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2097 .read_nvram = qla24xx_read_nvram_data, 2098 .write_nvram = qla24xx_write_nvram_data, 2099 .fw_dump = qla24xx_fw_dump, 2100 .beacon_on = qla24xx_beacon_on, 2101 .beacon_off = qla24xx_beacon_off, 2102 .beacon_blink = qla24xx_beacon_blink, 2103 .read_optrom = qla24xx_read_optrom_data, 2104 .write_optrom = qla24xx_write_optrom_data, 2105 .get_flash_version = qla24xx_get_flash_version, 2106 .start_scsi = qla24xx_start_scsi, 2107 .start_scsi_mq = NULL, 2108 .abort_isp = qla2x00_abort_isp, 2109 .iospace_config = qla2x00_iospace_config, 2110 .initialize_adapter = qla2x00_initialize_adapter, 2111 }; 2112 2113 static struct isp_operations qla25xx_isp_ops = { 2114 .pci_config = qla25xx_pci_config, 2115 .reset_chip = qla24xx_reset_chip, 2116 .chip_diag = qla24xx_chip_diag, 2117 .config_rings = qla24xx_config_rings, 2118 .reset_adapter = qla24xx_reset_adapter, 2119 .nvram_config = qla24xx_nvram_config, 2120 .update_fw_options = qla24xx_update_fw_options, 2121 .load_risc = qla24xx_load_risc, 2122 .pci_info_str = qla24xx_pci_info_str, 2123 .fw_version_str = qla24xx_fw_version_str, 2124 .intr_handler = qla24xx_intr_handler, 2125 .enable_intrs = qla24xx_enable_intrs, 2126 .disable_intrs = qla24xx_disable_intrs, 2127 .abort_command = qla24xx_abort_command, 2128 .target_reset = qla24xx_abort_target, 2129 .lun_reset = qla24xx_lun_reset, 2130 .fabric_login = qla24xx_login_fabric, 2131 .fabric_logout = qla24xx_fabric_logout, 2132 .calc_req_entries = NULL, 2133 .build_iocbs = NULL, 2134 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2135 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2136 .read_nvram = qla25xx_read_nvram_data, 2137 .write_nvram = qla25xx_write_nvram_data, 2138 .fw_dump = qla25xx_fw_dump, 2139 .beacon_on = qla24xx_beacon_on, 2140 .beacon_off = qla24xx_beacon_off, 2141 .beacon_blink = qla24xx_beacon_blink, 2142 .read_optrom = qla25xx_read_optrom_data, 2143 .write_optrom = qla24xx_write_optrom_data, 2144 .get_flash_version = qla24xx_get_flash_version, 2145 .start_scsi = qla24xx_dif_start_scsi, 2146 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2147 .abort_isp = qla2x00_abort_isp, 2148 .iospace_config = qla2x00_iospace_config, 2149 .initialize_adapter = qla2x00_initialize_adapter, 2150 }; 2151 2152 static struct isp_operations qla81xx_isp_ops = { 2153 .pci_config = qla25xx_pci_config, 2154 .reset_chip = qla24xx_reset_chip, 2155 .chip_diag = qla24xx_chip_diag, 2156 .config_rings = qla24xx_config_rings, 2157 .reset_adapter = qla24xx_reset_adapter, 2158 .nvram_config = qla81xx_nvram_config, 2159 .update_fw_options = qla81xx_update_fw_options, 2160 .load_risc = qla81xx_load_risc, 2161 .pci_info_str = qla24xx_pci_info_str, 2162 .fw_version_str = qla24xx_fw_version_str, 2163 .intr_handler = qla24xx_intr_handler, 2164 .enable_intrs = qla24xx_enable_intrs, 2165 .disable_intrs = qla24xx_disable_intrs, 2166 .abort_command = qla24xx_abort_command, 2167 .target_reset = qla24xx_abort_target, 2168 .lun_reset = qla24xx_lun_reset, 2169 .fabric_login = qla24xx_login_fabric, 2170 .fabric_logout = qla24xx_fabric_logout, 2171 .calc_req_entries = NULL, 2172 .build_iocbs = NULL, 2173 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2174 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2175 .read_nvram = NULL, 2176 .write_nvram = NULL, 2177 .fw_dump = qla81xx_fw_dump, 2178 .beacon_on = qla24xx_beacon_on, 2179 .beacon_off = qla24xx_beacon_off, 2180 .beacon_blink = qla83xx_beacon_blink, 2181 .read_optrom = qla25xx_read_optrom_data, 2182 .write_optrom = qla24xx_write_optrom_data, 2183 .get_flash_version = qla24xx_get_flash_version, 2184 .start_scsi = qla24xx_dif_start_scsi, 2185 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2186 .abort_isp = qla2x00_abort_isp, 2187 .iospace_config = qla2x00_iospace_config, 2188 .initialize_adapter = qla2x00_initialize_adapter, 2189 }; 2190 2191 static struct isp_operations qla82xx_isp_ops = { 2192 .pci_config = qla82xx_pci_config, 2193 .reset_chip = qla82xx_reset_chip, 2194 .chip_diag = qla24xx_chip_diag, 2195 .config_rings = qla82xx_config_rings, 2196 .reset_adapter = qla24xx_reset_adapter, 2197 .nvram_config = qla81xx_nvram_config, 2198 .update_fw_options = qla24xx_update_fw_options, 2199 .load_risc = qla82xx_load_risc, 2200 .pci_info_str = qla24xx_pci_info_str, 2201 .fw_version_str = qla24xx_fw_version_str, 2202 .intr_handler = qla82xx_intr_handler, 2203 .enable_intrs = qla82xx_enable_intrs, 2204 .disable_intrs = qla82xx_disable_intrs, 2205 .abort_command = qla24xx_abort_command, 2206 .target_reset = qla24xx_abort_target, 2207 .lun_reset = qla24xx_lun_reset, 2208 .fabric_login = qla24xx_login_fabric, 2209 .fabric_logout = qla24xx_fabric_logout, 2210 .calc_req_entries = NULL, 2211 .build_iocbs = NULL, 2212 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2213 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2214 .read_nvram = qla24xx_read_nvram_data, 2215 .write_nvram = qla24xx_write_nvram_data, 2216 .fw_dump = qla82xx_fw_dump, 2217 .beacon_on = qla82xx_beacon_on, 2218 .beacon_off = qla82xx_beacon_off, 2219 .beacon_blink = NULL, 2220 .read_optrom = qla82xx_read_optrom_data, 2221 .write_optrom = qla82xx_write_optrom_data, 2222 .get_flash_version = qla82xx_get_flash_version, 2223 .start_scsi = qla82xx_start_scsi, 2224 .start_scsi_mq = NULL, 2225 .abort_isp = qla82xx_abort_isp, 2226 .iospace_config = qla82xx_iospace_config, 2227 .initialize_adapter = qla2x00_initialize_adapter, 2228 }; 2229 2230 static struct isp_operations qla8044_isp_ops = { 2231 .pci_config = qla82xx_pci_config, 2232 .reset_chip = qla82xx_reset_chip, 2233 .chip_diag = qla24xx_chip_diag, 2234 .config_rings = qla82xx_config_rings, 2235 .reset_adapter = qla24xx_reset_adapter, 2236 .nvram_config = qla81xx_nvram_config, 2237 .update_fw_options = qla24xx_update_fw_options, 2238 .load_risc = qla82xx_load_risc, 2239 .pci_info_str = qla24xx_pci_info_str, 2240 .fw_version_str = qla24xx_fw_version_str, 2241 .intr_handler = qla8044_intr_handler, 2242 .enable_intrs = qla82xx_enable_intrs, 2243 .disable_intrs = qla82xx_disable_intrs, 2244 .abort_command = qla24xx_abort_command, 2245 .target_reset = qla24xx_abort_target, 2246 .lun_reset = qla24xx_lun_reset, 2247 .fabric_login = qla24xx_login_fabric, 2248 .fabric_logout = qla24xx_fabric_logout, 2249 .calc_req_entries = NULL, 2250 .build_iocbs = NULL, 2251 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2252 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2253 .read_nvram = NULL, 2254 .write_nvram = NULL, 2255 .fw_dump = qla8044_fw_dump, 2256 .beacon_on = qla82xx_beacon_on, 2257 .beacon_off = qla82xx_beacon_off, 2258 .beacon_blink = NULL, 2259 .read_optrom = qla8044_read_optrom_data, 2260 .write_optrom = qla8044_write_optrom_data, 2261 .get_flash_version = qla82xx_get_flash_version, 2262 .start_scsi = qla82xx_start_scsi, 2263 .start_scsi_mq = NULL, 2264 .abort_isp = qla8044_abort_isp, 2265 .iospace_config = qla82xx_iospace_config, 2266 .initialize_adapter = qla2x00_initialize_adapter, 2267 }; 2268 2269 static struct isp_operations qla83xx_isp_ops = { 2270 .pci_config = qla25xx_pci_config, 2271 .reset_chip = qla24xx_reset_chip, 2272 .chip_diag = qla24xx_chip_diag, 2273 .config_rings = qla24xx_config_rings, 2274 .reset_adapter = qla24xx_reset_adapter, 2275 .nvram_config = qla81xx_nvram_config, 2276 .update_fw_options = qla81xx_update_fw_options, 2277 .load_risc = qla81xx_load_risc, 2278 .pci_info_str = qla24xx_pci_info_str, 2279 .fw_version_str = qla24xx_fw_version_str, 2280 .intr_handler = qla24xx_intr_handler, 2281 .enable_intrs = qla24xx_enable_intrs, 2282 .disable_intrs = qla24xx_disable_intrs, 2283 .abort_command = qla24xx_abort_command, 2284 .target_reset = qla24xx_abort_target, 2285 .lun_reset = qla24xx_lun_reset, 2286 .fabric_login = qla24xx_login_fabric, 2287 .fabric_logout = qla24xx_fabric_logout, 2288 .calc_req_entries = NULL, 2289 .build_iocbs = NULL, 2290 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2291 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2292 .read_nvram = NULL, 2293 .write_nvram = NULL, 2294 .fw_dump = qla83xx_fw_dump, 2295 .beacon_on = qla24xx_beacon_on, 2296 .beacon_off = qla24xx_beacon_off, 2297 .beacon_blink = qla83xx_beacon_blink, 2298 .read_optrom = qla25xx_read_optrom_data, 2299 .write_optrom = qla24xx_write_optrom_data, 2300 .get_flash_version = qla24xx_get_flash_version, 2301 .start_scsi = qla24xx_dif_start_scsi, 2302 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2303 .abort_isp = qla2x00_abort_isp, 2304 .iospace_config = qla83xx_iospace_config, 2305 .initialize_adapter = qla2x00_initialize_adapter, 2306 }; 2307 2308 static struct isp_operations qlafx00_isp_ops = { 2309 .pci_config = qlafx00_pci_config, 2310 .reset_chip = qlafx00_soft_reset, 2311 .chip_diag = qlafx00_chip_diag, 2312 .config_rings = qlafx00_config_rings, 2313 .reset_adapter = qlafx00_soft_reset, 2314 .nvram_config = NULL, 2315 .update_fw_options = NULL, 2316 .load_risc = NULL, 2317 .pci_info_str = qlafx00_pci_info_str, 2318 .fw_version_str = qlafx00_fw_version_str, 2319 .intr_handler = qlafx00_intr_handler, 2320 .enable_intrs = qlafx00_enable_intrs, 2321 .disable_intrs = qlafx00_disable_intrs, 2322 .abort_command = qla24xx_async_abort_command, 2323 .target_reset = qlafx00_abort_target, 2324 .lun_reset = qlafx00_lun_reset, 2325 .fabric_login = NULL, 2326 .fabric_logout = NULL, 2327 .calc_req_entries = NULL, 2328 .build_iocbs = NULL, 2329 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2330 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2331 .read_nvram = qla24xx_read_nvram_data, 2332 .write_nvram = qla24xx_write_nvram_data, 2333 .fw_dump = NULL, 2334 .beacon_on = qla24xx_beacon_on, 2335 .beacon_off = qla24xx_beacon_off, 2336 .beacon_blink = NULL, 2337 .read_optrom = qla24xx_read_optrom_data, 2338 .write_optrom = qla24xx_write_optrom_data, 2339 .get_flash_version = qla24xx_get_flash_version, 2340 .start_scsi = qlafx00_start_scsi, 2341 .start_scsi_mq = NULL, 2342 .abort_isp = qlafx00_abort_isp, 2343 .iospace_config = qlafx00_iospace_config, 2344 .initialize_adapter = qlafx00_initialize_adapter, 2345 }; 2346 2347 static struct isp_operations qla27xx_isp_ops = { 2348 .pci_config = qla25xx_pci_config, 2349 .reset_chip = qla24xx_reset_chip, 2350 .chip_diag = qla24xx_chip_diag, 2351 .config_rings = qla24xx_config_rings, 2352 .reset_adapter = qla24xx_reset_adapter, 2353 .nvram_config = qla81xx_nvram_config, 2354 .update_fw_options = qla81xx_update_fw_options, 2355 .load_risc = qla81xx_load_risc, 2356 .pci_info_str = qla24xx_pci_info_str, 2357 .fw_version_str = qla24xx_fw_version_str, 2358 .intr_handler = qla24xx_intr_handler, 2359 .enable_intrs = qla24xx_enable_intrs, 2360 .disable_intrs = qla24xx_disable_intrs, 2361 .abort_command = qla24xx_abort_command, 2362 .target_reset = qla24xx_abort_target, 2363 .lun_reset = qla24xx_lun_reset, 2364 .fabric_login = qla24xx_login_fabric, 2365 .fabric_logout = qla24xx_fabric_logout, 2366 .calc_req_entries = NULL, 2367 .build_iocbs = NULL, 2368 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2369 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2370 .read_nvram = NULL, 2371 .write_nvram = NULL, 2372 .fw_dump = qla27xx_fwdump, 2373 .beacon_on = qla24xx_beacon_on, 2374 .beacon_off = qla24xx_beacon_off, 2375 .beacon_blink = qla83xx_beacon_blink, 2376 .read_optrom = qla25xx_read_optrom_data, 2377 .write_optrom = qla24xx_write_optrom_data, 2378 .get_flash_version = qla24xx_get_flash_version, 2379 .start_scsi = qla24xx_dif_start_scsi, 2380 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2381 .abort_isp = qla2x00_abort_isp, 2382 .iospace_config = qla83xx_iospace_config, 2383 .initialize_adapter = qla2x00_initialize_adapter, 2384 }; 2385 2386 static inline void 2387 qla2x00_set_isp_flags(struct qla_hw_data *ha) 2388 { 2389 ha->device_type = DT_EXTENDED_IDS; 2390 switch (ha->pdev->device) { 2391 case PCI_DEVICE_ID_QLOGIC_ISP2100: 2392 ha->isp_type |= DT_ISP2100; 2393 ha->device_type &= ~DT_EXTENDED_IDS; 2394 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2395 break; 2396 case PCI_DEVICE_ID_QLOGIC_ISP2200: 2397 ha->isp_type |= DT_ISP2200; 2398 ha->device_type &= ~DT_EXTENDED_IDS; 2399 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2400 break; 2401 case PCI_DEVICE_ID_QLOGIC_ISP2300: 2402 ha->isp_type |= DT_ISP2300; 2403 ha->device_type |= DT_ZIO_SUPPORTED; 2404 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2405 break; 2406 case PCI_DEVICE_ID_QLOGIC_ISP2312: 2407 ha->isp_type |= DT_ISP2312; 2408 ha->device_type |= DT_ZIO_SUPPORTED; 2409 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2410 break; 2411 case PCI_DEVICE_ID_QLOGIC_ISP2322: 2412 ha->isp_type |= DT_ISP2322; 2413 ha->device_type |= DT_ZIO_SUPPORTED; 2414 if (ha->pdev->subsystem_vendor == 0x1028 && 2415 ha->pdev->subsystem_device == 0x0170) 2416 ha->device_type |= DT_OEM_001; 2417 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2418 break; 2419 case PCI_DEVICE_ID_QLOGIC_ISP6312: 2420 ha->isp_type |= DT_ISP6312; 2421 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2422 break; 2423 case PCI_DEVICE_ID_QLOGIC_ISP6322: 2424 ha->isp_type |= DT_ISP6322; 2425 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2426 break; 2427 case PCI_DEVICE_ID_QLOGIC_ISP2422: 2428 ha->isp_type |= DT_ISP2422; 2429 ha->device_type |= DT_ZIO_SUPPORTED; 2430 ha->device_type |= DT_FWI2; 2431 ha->device_type |= DT_IIDMA; 2432 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2433 break; 2434 case PCI_DEVICE_ID_QLOGIC_ISP2432: 2435 ha->isp_type |= DT_ISP2432; 2436 ha->device_type |= DT_ZIO_SUPPORTED; 2437 ha->device_type |= DT_FWI2; 2438 ha->device_type |= DT_IIDMA; 2439 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2440 break; 2441 case PCI_DEVICE_ID_QLOGIC_ISP8432: 2442 ha->isp_type |= DT_ISP8432; 2443 ha->device_type |= DT_ZIO_SUPPORTED; 2444 ha->device_type |= DT_FWI2; 2445 ha->device_type |= DT_IIDMA; 2446 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2447 break; 2448 case PCI_DEVICE_ID_QLOGIC_ISP5422: 2449 ha->isp_type |= DT_ISP5422; 2450 ha->device_type |= DT_FWI2; 2451 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2452 break; 2453 case PCI_DEVICE_ID_QLOGIC_ISP5432: 2454 ha->isp_type |= DT_ISP5432; 2455 ha->device_type |= DT_FWI2; 2456 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2457 break; 2458 case PCI_DEVICE_ID_QLOGIC_ISP2532: 2459 ha->isp_type |= DT_ISP2532; 2460 ha->device_type |= DT_ZIO_SUPPORTED; 2461 ha->device_type |= DT_FWI2; 2462 ha->device_type |= DT_IIDMA; 2463 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2464 break; 2465 case PCI_DEVICE_ID_QLOGIC_ISP8001: 2466 ha->isp_type |= DT_ISP8001; 2467 ha->device_type |= DT_ZIO_SUPPORTED; 2468 ha->device_type |= DT_FWI2; 2469 ha->device_type |= DT_IIDMA; 2470 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2471 break; 2472 case PCI_DEVICE_ID_QLOGIC_ISP8021: 2473 ha->isp_type |= DT_ISP8021; 2474 ha->device_type |= DT_ZIO_SUPPORTED; 2475 ha->device_type |= DT_FWI2; 2476 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2477 /* Initialize 82XX ISP flags */ 2478 qla82xx_init_flags(ha); 2479 break; 2480 case PCI_DEVICE_ID_QLOGIC_ISP8044: 2481 ha->isp_type |= DT_ISP8044; 2482 ha->device_type |= DT_ZIO_SUPPORTED; 2483 ha->device_type |= DT_FWI2; 2484 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2485 /* Initialize 82XX ISP flags */ 2486 qla82xx_init_flags(ha); 2487 break; 2488 case PCI_DEVICE_ID_QLOGIC_ISP2031: 2489 ha->isp_type |= DT_ISP2031; 2490 ha->device_type |= DT_ZIO_SUPPORTED; 2491 ha->device_type |= DT_FWI2; 2492 ha->device_type |= DT_IIDMA; 2493 ha->device_type |= DT_T10_PI; 2494 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2495 break; 2496 case PCI_DEVICE_ID_QLOGIC_ISP8031: 2497 ha->isp_type |= DT_ISP8031; 2498 ha->device_type |= DT_ZIO_SUPPORTED; 2499 ha->device_type |= DT_FWI2; 2500 ha->device_type |= DT_IIDMA; 2501 ha->device_type |= DT_T10_PI; 2502 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2503 break; 2504 case PCI_DEVICE_ID_QLOGIC_ISPF001: 2505 ha->isp_type |= DT_ISPFX00; 2506 break; 2507 case PCI_DEVICE_ID_QLOGIC_ISP2071: 2508 ha->isp_type |= DT_ISP2071; 2509 ha->device_type |= DT_ZIO_SUPPORTED; 2510 ha->device_type |= DT_FWI2; 2511 ha->device_type |= DT_IIDMA; 2512 ha->device_type |= DT_T10_PI; 2513 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2514 break; 2515 case PCI_DEVICE_ID_QLOGIC_ISP2271: 2516 ha->isp_type |= DT_ISP2271; 2517 ha->device_type |= DT_ZIO_SUPPORTED; 2518 ha->device_type |= DT_FWI2; 2519 ha->device_type |= DT_IIDMA; 2520 ha->device_type |= DT_T10_PI; 2521 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2522 break; 2523 case PCI_DEVICE_ID_QLOGIC_ISP2261: 2524 ha->isp_type |= DT_ISP2261; 2525 ha->device_type |= DT_ZIO_SUPPORTED; 2526 ha->device_type |= DT_FWI2; 2527 ha->device_type |= DT_IIDMA; 2528 ha->device_type |= DT_T10_PI; 2529 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2530 break; 2531 } 2532 2533 if (IS_QLA82XX(ha)) 2534 ha->port_no = ha->portnum & 1; 2535 else { 2536 /* Get adapter physical port no from interrupt pin register. */ 2537 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); 2538 if (IS_QLA27XX(ha)) 2539 ha->port_no--; 2540 else 2541 ha->port_no = !(ha->port_no & 1); 2542 } 2543 2544 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, 2545 "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", 2546 ha->device_type, ha->port_no, ha->fw_srisc_address); 2547 } 2548 2549 static void 2550 qla2xxx_scan_start(struct Scsi_Host *shost) 2551 { 2552 scsi_qla_host_t *vha = shost_priv(shost); 2553 2554 if (vha->hw->flags.running_gold_fw) 2555 return; 2556 2557 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2558 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2559 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2560 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags); 2561 } 2562 2563 static int 2564 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) 2565 { 2566 scsi_qla_host_t *vha = shost_priv(shost); 2567 2568 if (test_bit(UNLOADING, &vha->dpc_flags)) 2569 return 1; 2570 if (!vha->host) 2571 return 1; 2572 if (time > vha->hw->loop_reset_delay * HZ) 2573 return 1; 2574 2575 return atomic_read(&vha->loop_state) == LOOP_READY; 2576 } 2577 2578 static void qla2x00_iocb_work_fn(struct work_struct *work) 2579 { 2580 struct scsi_qla_host *vha = container_of(work, 2581 struct scsi_qla_host, iocb_work); 2582 int cnt = 0; 2583 2584 while (!list_empty(&vha->work_list)) { 2585 qla2x00_do_work(vha); 2586 cnt++; 2587 if (cnt > 10) 2588 break; 2589 } 2590 } 2591 2592 /* 2593 * PCI driver interface 2594 */ 2595 static int 2596 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 2597 { 2598 int ret = -ENODEV; 2599 struct Scsi_Host *host; 2600 scsi_qla_host_t *base_vha = NULL; 2601 struct qla_hw_data *ha; 2602 char pci_info[30]; 2603 char fw_str[30], wq_name[30]; 2604 struct scsi_host_template *sht; 2605 int bars, mem_only = 0; 2606 uint16_t req_length = 0, rsp_length = 0; 2607 struct req_que *req = NULL; 2608 struct rsp_que *rsp = NULL; 2609 int i; 2610 2611 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 2612 sht = &qla2xxx_driver_template; 2613 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || 2614 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || 2615 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || 2616 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 2617 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || 2618 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 || 2619 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || 2620 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || 2621 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || 2622 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || 2623 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || 2624 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || 2625 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || 2626 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 || 2627 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261) { 2628 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2629 mem_only = 1; 2630 ql_dbg_pci(ql_dbg_init, pdev, 0x0007, 2631 "Mem only adapter.\n"); 2632 } 2633 ql_dbg_pci(ql_dbg_init, pdev, 0x0008, 2634 "Bars=%d.\n", bars); 2635 2636 if (mem_only) { 2637 if (pci_enable_device_mem(pdev)) 2638 return ret; 2639 } else { 2640 if (pci_enable_device(pdev)) 2641 return ret; 2642 } 2643 2644 /* This may fail but that's ok */ 2645 pci_enable_pcie_error_reporting(pdev); 2646 2647 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); 2648 if (!ha) { 2649 ql_log_pci(ql_log_fatal, pdev, 0x0009, 2650 "Unable to allocate memory for ha.\n"); 2651 goto disable_device; 2652 } 2653 ql_dbg_pci(ql_dbg_init, pdev, 0x000a, 2654 "Memory allocated for ha=%p.\n", ha); 2655 ha->pdev = pdev; 2656 ha->tgt.enable_class_2 = ql2xenableclass2; 2657 INIT_LIST_HEAD(&ha->tgt.q_full_list); 2658 spin_lock_init(&ha->tgt.q_full_lock); 2659 spin_lock_init(&ha->tgt.sess_lock); 2660 spin_lock_init(&ha->tgt.atio_lock); 2661 2662 2663 /* Clear our data area */ 2664 ha->bars = bars; 2665 ha->mem_only = mem_only; 2666 spin_lock_init(&ha->hardware_lock); 2667 spin_lock_init(&ha->vport_slock); 2668 mutex_init(&ha->selflogin_lock); 2669 mutex_init(&ha->optrom_mutex); 2670 2671 /* Set ISP-type information. */ 2672 qla2x00_set_isp_flags(ha); 2673 2674 /* Set EEH reset type to fundamental if required by hba */ 2675 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || 2676 IS_QLA83XX(ha) || IS_QLA27XX(ha)) 2677 pdev->needs_freset = 1; 2678 2679 ha->prev_topology = 0; 2680 ha->init_cb_size = sizeof(init_cb_t); 2681 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2682 ha->optrom_size = OPTROM_SIZE_2300; 2683 2684 /* Assign ISP specific operations. */ 2685 if (IS_QLA2100(ha)) { 2686 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2687 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 2688 req_length = REQUEST_ENTRY_CNT_2100; 2689 rsp_length = RESPONSE_ENTRY_CNT_2100; 2690 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 2691 ha->gid_list_info_size = 4; 2692 ha->flash_conf_off = ~0; 2693 ha->flash_data_off = ~0; 2694 ha->nvram_conf_off = ~0; 2695 ha->nvram_data_off = ~0; 2696 ha->isp_ops = &qla2100_isp_ops; 2697 } else if (IS_QLA2200(ha)) { 2698 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2699 ha->mbx_count = MAILBOX_REGISTER_COUNT_2200; 2700 req_length = REQUEST_ENTRY_CNT_2200; 2701 rsp_length = RESPONSE_ENTRY_CNT_2100; 2702 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 2703 ha->gid_list_info_size = 4; 2704 ha->flash_conf_off = ~0; 2705 ha->flash_data_off = ~0; 2706 ha->nvram_conf_off = ~0; 2707 ha->nvram_data_off = ~0; 2708 ha->isp_ops = &qla2100_isp_ops; 2709 } else if (IS_QLA23XX(ha)) { 2710 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2711 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2712 req_length = REQUEST_ENTRY_CNT_2200; 2713 rsp_length = RESPONSE_ENTRY_CNT_2300; 2714 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2715 ha->gid_list_info_size = 6; 2716 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 2717 ha->optrom_size = OPTROM_SIZE_2322; 2718 ha->flash_conf_off = ~0; 2719 ha->flash_data_off = ~0; 2720 ha->nvram_conf_off = ~0; 2721 ha->nvram_data_off = ~0; 2722 ha->isp_ops = &qla2300_isp_ops; 2723 } else if (IS_QLA24XX_TYPE(ha)) { 2724 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2725 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2726 req_length = REQUEST_ENTRY_CNT_24XX; 2727 rsp_length = RESPONSE_ENTRY_CNT_2300; 2728 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2729 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2730 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2731 ha->gid_list_info_size = 8; 2732 ha->optrom_size = OPTROM_SIZE_24XX; 2733 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX; 2734 ha->isp_ops = &qla24xx_isp_ops; 2735 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2736 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2737 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2738 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2739 } else if (IS_QLA25XX(ha)) { 2740 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2741 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2742 req_length = REQUEST_ENTRY_CNT_24XX; 2743 rsp_length = RESPONSE_ENTRY_CNT_2300; 2744 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2745 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2746 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2747 ha->gid_list_info_size = 8; 2748 ha->optrom_size = OPTROM_SIZE_25XX; 2749 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2750 ha->isp_ops = &qla25xx_isp_ops; 2751 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2752 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2753 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2754 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2755 } else if (IS_QLA81XX(ha)) { 2756 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2757 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2758 req_length = REQUEST_ENTRY_CNT_24XX; 2759 rsp_length = RESPONSE_ENTRY_CNT_2300; 2760 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2761 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2762 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2763 ha->gid_list_info_size = 8; 2764 ha->optrom_size = OPTROM_SIZE_81XX; 2765 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2766 ha->isp_ops = &qla81xx_isp_ops; 2767 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 2768 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2769 ha->nvram_conf_off = ~0; 2770 ha->nvram_data_off = ~0; 2771 } else if (IS_QLA82XX(ha)) { 2772 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2773 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2774 req_length = REQUEST_ENTRY_CNT_82XX; 2775 rsp_length = RESPONSE_ENTRY_CNT_82XX; 2776 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2777 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2778 ha->gid_list_info_size = 8; 2779 ha->optrom_size = OPTROM_SIZE_82XX; 2780 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2781 ha->isp_ops = &qla82xx_isp_ops; 2782 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2783 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2784 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2785 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2786 } else if (IS_QLA8044(ha)) { 2787 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2788 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2789 req_length = REQUEST_ENTRY_CNT_82XX; 2790 rsp_length = RESPONSE_ENTRY_CNT_82XX; 2791 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2792 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2793 ha->gid_list_info_size = 8; 2794 ha->optrom_size = OPTROM_SIZE_83XX; 2795 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2796 ha->isp_ops = &qla8044_isp_ops; 2797 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2798 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2799 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2800 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2801 } else if (IS_QLA83XX(ha)) { 2802 ha->portnum = PCI_FUNC(ha->pdev->devfn); 2803 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2804 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2805 req_length = REQUEST_ENTRY_CNT_83XX; 2806 rsp_length = RESPONSE_ENTRY_CNT_83XX; 2807 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2808 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2809 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2810 ha->gid_list_info_size = 8; 2811 ha->optrom_size = OPTROM_SIZE_83XX; 2812 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2813 ha->isp_ops = &qla83xx_isp_ops; 2814 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 2815 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2816 ha->nvram_conf_off = ~0; 2817 ha->nvram_data_off = ~0; 2818 } else if (IS_QLAFX00(ha)) { 2819 ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00; 2820 ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00; 2821 ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00; 2822 req_length = REQUEST_ENTRY_CNT_FX00; 2823 rsp_length = RESPONSE_ENTRY_CNT_FX00; 2824 ha->isp_ops = &qlafx00_isp_ops; 2825 ha->port_down_retry_count = 30; /* default value */ 2826 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; 2827 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 2828 ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL; 2829 ha->mr.fw_hbt_en = 1; 2830 ha->mr.host_info_resend = false; 2831 ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; 2832 } else if (IS_QLA27XX(ha)) { 2833 ha->portnum = PCI_FUNC(ha->pdev->devfn); 2834 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2835 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2836 req_length = REQUEST_ENTRY_CNT_83XX; 2837 rsp_length = RESPONSE_ENTRY_CNT_83XX; 2838 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2839 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2840 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2841 ha->gid_list_info_size = 8; 2842 ha->optrom_size = OPTROM_SIZE_83XX; 2843 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2844 ha->isp_ops = &qla27xx_isp_ops; 2845 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 2846 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2847 ha->nvram_conf_off = ~0; 2848 ha->nvram_data_off = ~0; 2849 } 2850 2851 ql_dbg_pci(ql_dbg_init, pdev, 0x001e, 2852 "mbx_count=%d, req_length=%d, " 2853 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, " 2854 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, " 2855 "max_fibre_devices=%d.\n", 2856 ha->mbx_count, req_length, rsp_length, ha->max_loop_id, 2857 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, 2858 ha->nvram_npiv_size, ha->max_fibre_devices); 2859 ql_dbg_pci(ql_dbg_init, pdev, 0x001f, 2860 "isp_ops=%p, flash_conf_off=%d, " 2861 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", 2862 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, 2863 ha->nvram_conf_off, ha->nvram_data_off); 2864 2865 /* Configure PCI I/O space */ 2866 ret = ha->isp_ops->iospace_config(ha); 2867 if (ret) 2868 goto iospace_config_failed; 2869 2870 ql_log_pci(ql_log_info, pdev, 0x001d, 2871 "Found an ISP%04X irq %d iobase 0x%p.\n", 2872 pdev->device, pdev->irq, ha->iobase); 2873 mutex_init(&ha->vport_lock); 2874 mutex_init(&ha->mq_lock); 2875 init_completion(&ha->mbx_cmd_comp); 2876 complete(&ha->mbx_cmd_comp); 2877 init_completion(&ha->mbx_intr_comp); 2878 init_completion(&ha->dcbx_comp); 2879 init_completion(&ha->lb_portup_comp); 2880 2881 set_bit(0, (unsigned long *) ha->vp_idx_map); 2882 2883 qla2x00_config_dma_addressing(ha); 2884 ql_dbg_pci(ql_dbg_init, pdev, 0x0020, 2885 "64 Bit addressing is %s.\n", 2886 ha->flags.enable_64bit_addressing ? "enable" : 2887 "disable"); 2888 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 2889 if (ret) { 2890 ql_log_pci(ql_log_fatal, pdev, 0x0031, 2891 "Failed to allocate memory for adapter, aborting.\n"); 2892 2893 goto probe_hw_failed; 2894 } 2895 2896 req->max_q_depth = MAX_Q_DEPTH; 2897 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) 2898 req->max_q_depth = ql2xmaxqdepth; 2899 2900 2901 base_vha = qla2x00_create_host(sht, ha); 2902 if (!base_vha) { 2903 ret = -ENOMEM; 2904 qla2x00_mem_free(ha); 2905 qla2x00_free_req_que(ha, req); 2906 qla2x00_free_rsp_que(ha, rsp); 2907 goto probe_hw_failed; 2908 } 2909 2910 pci_set_drvdata(pdev, base_vha); 2911 set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); 2912 2913 host = base_vha->host; 2914 base_vha->req = req; 2915 if (IS_QLA2XXX_MIDTYPE(ha)) 2916 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; 2917 else 2918 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + 2919 base_vha->vp_idx; 2920 2921 /* Setup fcport template structure. */ 2922 ha->mr.fcport.vha = base_vha; 2923 ha->mr.fcport.port_type = FCT_UNKNOWN; 2924 ha->mr.fcport.loop_id = FC_NO_LOOP_ID; 2925 qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED); 2926 ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED; 2927 ha->mr.fcport.scan_state = 1; 2928 2929 /* Set the SG table size based on ISP type */ 2930 if (!IS_FWI2_CAPABLE(ha)) { 2931 if (IS_QLA2100(ha)) 2932 host->sg_tablesize = 32; 2933 } else { 2934 if (!IS_QLA82XX(ha)) 2935 host->sg_tablesize = QLA_SG_ALL; 2936 } 2937 host->max_id = ha->max_fibre_devices; 2938 host->cmd_per_lun = 3; 2939 host->unique_id = host->host_no; 2940 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 2941 host->max_cmd_len = 32; 2942 else 2943 host->max_cmd_len = MAX_CMDSZ; 2944 host->max_channel = MAX_BUSES - 1; 2945 /* Older HBAs support only 16-bit LUNs */ 2946 if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) && 2947 ql2xmaxlun > 0xffff) 2948 host->max_lun = 0xffff; 2949 else 2950 host->max_lun = ql2xmaxlun; 2951 host->transportt = qla2xxx_transport_template; 2952 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); 2953 2954 ql_dbg(ql_dbg_init, base_vha, 0x0033, 2955 "max_id=%d this_id=%d " 2956 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " 2957 "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id, 2958 host->this_id, host->cmd_per_lun, host->unique_id, 2959 host->max_cmd_len, host->max_channel, host->max_lun, 2960 host->transportt, sht->vendor_id); 2961 2962 /* Set up the irqs */ 2963 ret = qla2x00_request_irqs(ha, rsp); 2964 if (ret) 2965 goto probe_init_failed; 2966 2967 /* Alloc arrays of request and response ring ptrs */ 2968 if (!qla2x00_alloc_queues(ha, req, rsp)) { 2969 ql_log(ql_log_fatal, base_vha, 0x003d, 2970 "Failed to allocate memory for queue pointers..." 2971 "aborting.\n"); 2972 goto probe_init_failed; 2973 } 2974 2975 if (ha->mqenable && shost_use_blk_mq(host)) { 2976 /* number of hardware queues supported by blk/scsi-mq*/ 2977 host->nr_hw_queues = ha->max_qpairs; 2978 2979 ql_dbg(ql_dbg_init, base_vha, 0x0192, 2980 "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues); 2981 } else 2982 ql_dbg(ql_dbg_init, base_vha, 0x0193, 2983 "blk/scsi-mq disabled.\n"); 2984 2985 qlt_probe_one_stage1(base_vha, ha); 2986 2987 pci_save_state(pdev); 2988 2989 /* Assign back pointers */ 2990 rsp->req = req; 2991 req->rsp = rsp; 2992 2993 if (IS_QLAFX00(ha)) { 2994 ha->rsp_q_map[0] = rsp; 2995 ha->req_q_map[0] = req; 2996 set_bit(0, ha->req_qid_map); 2997 set_bit(0, ha->rsp_qid_map); 2998 } 2999 3000 /* FWI2-capable only. */ 3001 req->req_q_in = &ha->iobase->isp24.req_q_in; 3002 req->req_q_out = &ha->iobase->isp24.req_q_out; 3003 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; 3004 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; 3005 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3006 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; 3007 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; 3008 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; 3009 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out; 3010 } 3011 3012 if (IS_QLAFX00(ha)) { 3013 req->req_q_in = &ha->iobase->ispfx00.req_q_in; 3014 req->req_q_out = &ha->iobase->ispfx00.req_q_out; 3015 rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in; 3016 rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out; 3017 } 3018 3019 if (IS_P3P_TYPE(ha)) { 3020 req->req_q_out = &ha->iobase->isp82.req_q_out[0]; 3021 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0]; 3022 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; 3023 } 3024 3025 ql_dbg(ql_dbg_multiq, base_vha, 0xc009, 3026 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 3027 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 3028 ql_dbg(ql_dbg_multiq, base_vha, 0xc00a, 3029 "req->req_q_in=%p req->req_q_out=%p " 3030 "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 3031 req->req_q_in, req->req_q_out, 3032 rsp->rsp_q_in, rsp->rsp_q_out); 3033 ql_dbg(ql_dbg_init, base_vha, 0x003e, 3034 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 3035 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 3036 ql_dbg(ql_dbg_init, base_vha, 0x003f, 3037 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 3038 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); 3039 3040 if (ha->isp_ops->initialize_adapter(base_vha)) { 3041 ql_log(ql_log_fatal, base_vha, 0x00d6, 3042 "Failed to initialize adapter - Adapter flags %x.\n", 3043 base_vha->device_flags); 3044 3045 if (IS_QLA82XX(ha)) { 3046 qla82xx_idc_lock(ha); 3047 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3048 QLA8XXX_DEV_FAILED); 3049 qla82xx_idc_unlock(ha); 3050 ql_log(ql_log_fatal, base_vha, 0x00d7, 3051 "HW State: FAILED.\n"); 3052 } else if (IS_QLA8044(ha)) { 3053 qla8044_idc_lock(ha); 3054 qla8044_wr_direct(base_vha, 3055 QLA8044_CRB_DEV_STATE_INDEX, 3056 QLA8XXX_DEV_FAILED); 3057 qla8044_idc_unlock(ha); 3058 ql_log(ql_log_fatal, base_vha, 0x0150, 3059 "HW State: FAILED.\n"); 3060 } 3061 3062 ret = -ENODEV; 3063 goto probe_failed; 3064 } 3065 3066 if (IS_QLAFX00(ha)) 3067 host->can_queue = QLAFX00_MAX_CANQUEUE; 3068 else 3069 host->can_queue = req->num_outstanding_cmds - 10; 3070 3071 ql_dbg(ql_dbg_init, base_vha, 0x0032, 3072 "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", 3073 host->can_queue, base_vha->req, 3074 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 3075 3076 if (ha->mqenable && qla_ini_mode_enabled(base_vha)) { 3077 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1); 3078 /* Create start of day qpairs for Block MQ */ 3079 if (shost_use_blk_mq(host)) { 3080 for (i = 0; i < ha->max_qpairs; i++) 3081 qla2xxx_create_qpair(base_vha, 5, 0); 3082 } 3083 } 3084 3085 if (ha->flags.running_gold_fw) 3086 goto skip_dpc; 3087 3088 /* 3089 * Startup the kernel thread for this host adapter 3090 */ 3091 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 3092 "%s_dpc", base_vha->host_str); 3093 if (IS_ERR(ha->dpc_thread)) { 3094 ql_log(ql_log_fatal, base_vha, 0x00ed, 3095 "Failed to start DPC thread.\n"); 3096 ret = PTR_ERR(ha->dpc_thread); 3097 goto probe_failed; 3098 } 3099 ql_dbg(ql_dbg_init, base_vha, 0x00ee, 3100 "DPC thread started successfully.\n"); 3101 3102 /* 3103 * If we're not coming up in initiator mode, we might sit for 3104 * a while without waking up the dpc thread, which leads to a 3105 * stuck process warning. So just kick the dpc once here and 3106 * let the kthread start (and go back to sleep in qla2x00_do_dpc). 3107 */ 3108 qla2xxx_wake_dpc(base_vha); 3109 3110 INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); 3111 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); 3112 3113 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { 3114 sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); 3115 ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); 3116 INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen); 3117 3118 sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); 3119 ha->dpc_hp_wq = create_singlethread_workqueue(wq_name); 3120 INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work); 3121 INIT_WORK(&ha->idc_state_handler, 3122 qla83xx_idc_state_handler_work); 3123 INIT_WORK(&ha->nic_core_unrecoverable, 3124 qla83xx_nic_core_unrecoverable_work); 3125 } 3126 3127 skip_dpc: 3128 list_add_tail(&base_vha->list, &ha->vp_list); 3129 base_vha->host->irq = ha->pdev->irq; 3130 3131 /* Initialized the timer */ 3132 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL); 3133 ql_dbg(ql_dbg_init, base_vha, 0x00ef, 3134 "Started qla2x00_timer with " 3135 "interval=%d.\n", WATCH_INTERVAL); 3136 ql_dbg(ql_dbg_init, base_vha, 0x00f0, 3137 "Detected hba at address=%p.\n", 3138 ha); 3139 3140 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 3141 if (ha->fw_attributes & BIT_4) { 3142 int prot = 0, guard; 3143 base_vha->flags.difdix_supported = 1; 3144 ql_dbg(ql_dbg_init, base_vha, 0x00f1, 3145 "Registering for DIF/DIX type 1 and 3 protection.\n"); 3146 if (ql2xenabledif == 1) 3147 prot = SHOST_DIX_TYPE0_PROTECTION; 3148 scsi_host_set_prot(host, 3149 prot | SHOST_DIF_TYPE1_PROTECTION 3150 | SHOST_DIF_TYPE2_PROTECTION 3151 | SHOST_DIF_TYPE3_PROTECTION 3152 | SHOST_DIX_TYPE1_PROTECTION 3153 | SHOST_DIX_TYPE2_PROTECTION 3154 | SHOST_DIX_TYPE3_PROTECTION); 3155 3156 guard = SHOST_DIX_GUARD_CRC; 3157 3158 if (IS_PI_IPGUARD_CAPABLE(ha) && 3159 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) 3160 guard |= SHOST_DIX_GUARD_IP; 3161 3162 scsi_host_set_guard(host, guard); 3163 } else 3164 base_vha->flags.difdix_supported = 0; 3165 } 3166 3167 ha->isp_ops->enable_intrs(ha); 3168 3169 if (IS_QLAFX00(ha)) { 3170 ret = qlafx00_fx_disc(base_vha, 3171 &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO); 3172 host->sg_tablesize = (ha->mr.extended_io_enabled) ? 3173 QLA_SG_ALL : 128; 3174 } 3175 3176 ret = scsi_add_host(host, &pdev->dev); 3177 if (ret) 3178 goto probe_failed; 3179 3180 base_vha->flags.init_done = 1; 3181 base_vha->flags.online = 1; 3182 ha->prev_minidump_failed = 0; 3183 3184 ql_dbg(ql_dbg_init, base_vha, 0x00f2, 3185 "Init done and hba is online.\n"); 3186 3187 if (qla_ini_mode_enabled(base_vha) || 3188 qla_dual_mode_enabled(base_vha)) 3189 scsi_scan_host(host); 3190 else 3191 ql_dbg(ql_dbg_init, base_vha, 0x0122, 3192 "skipping scsi_scan_host() for non-initiator port\n"); 3193 3194 qla2x00_alloc_sysfs_attr(base_vha); 3195 3196 if (IS_QLAFX00(ha)) { 3197 ret = qlafx00_fx_disc(base_vha, 3198 &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO); 3199 3200 /* Register system information */ 3201 ret = qlafx00_fx_disc(base_vha, 3202 &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO); 3203 } 3204 3205 qla2x00_init_host_attr(base_vha); 3206 3207 qla2x00_dfs_setup(base_vha); 3208 3209 ql_log(ql_log_info, base_vha, 0x00fb, 3210 "QLogic %s - %s.\n", ha->model_number, ha->model_desc); 3211 ql_log(ql_log_info, base_vha, 0x00fc, 3212 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", 3213 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info), 3214 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', 3215 base_vha->host_no, 3216 ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str))); 3217 3218 qlt_add_target(ha, base_vha); 3219 3220 clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); 3221 3222 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 3223 return -ENODEV; 3224 3225 return 0; 3226 3227 probe_init_failed: 3228 qla2x00_free_req_que(ha, req); 3229 ha->req_q_map[0] = NULL; 3230 clear_bit(0, ha->req_qid_map); 3231 qla2x00_free_rsp_que(ha, rsp); 3232 ha->rsp_q_map[0] = NULL; 3233 clear_bit(0, ha->rsp_qid_map); 3234 ha->max_req_queues = ha->max_rsp_queues = 0; 3235 3236 probe_failed: 3237 if (base_vha->timer_active) 3238 qla2x00_stop_timer(base_vha); 3239 base_vha->flags.online = 0; 3240 if (ha->dpc_thread) { 3241 struct task_struct *t = ha->dpc_thread; 3242 3243 ha->dpc_thread = NULL; 3244 kthread_stop(t); 3245 } 3246 3247 qla2x00_free_device(base_vha); 3248 3249 scsi_host_put(base_vha->host); 3250 3251 probe_hw_failed: 3252 qla2x00_clear_drv_active(ha); 3253 3254 iospace_config_failed: 3255 if (IS_P3P_TYPE(ha)) { 3256 if (!ha->nx_pcibase) 3257 iounmap((device_reg_t *)ha->nx_pcibase); 3258 if (!ql2xdbwr) 3259 iounmap((device_reg_t *)ha->nxdb_wr_ptr); 3260 } else { 3261 if (ha->iobase) 3262 iounmap(ha->iobase); 3263 if (ha->cregbase) 3264 iounmap(ha->cregbase); 3265 } 3266 pci_release_selected_regions(ha->pdev, ha->bars); 3267 kfree(ha); 3268 3269 disable_device: 3270 pci_disable_device(pdev); 3271 return ret; 3272 } 3273 3274 static void 3275 qla2x00_shutdown(struct pci_dev *pdev) 3276 { 3277 scsi_qla_host_t *vha; 3278 struct qla_hw_data *ha; 3279 3280 if (!atomic_read(&pdev->enable_cnt)) 3281 return; 3282 3283 vha = pci_get_drvdata(pdev); 3284 ha = vha->hw; 3285 3286 /* Notify ISPFX00 firmware */ 3287 if (IS_QLAFX00(ha)) 3288 qlafx00_driver_shutdown(vha, 20); 3289 3290 /* Turn-off FCE trace */ 3291 if (ha->flags.fce_enabled) { 3292 qla2x00_disable_fce_trace(vha, NULL, NULL); 3293 ha->flags.fce_enabled = 0; 3294 } 3295 3296 /* Turn-off EFT trace */ 3297 if (ha->eft) 3298 qla2x00_disable_eft_trace(vha); 3299 3300 /* Stop currently executing firmware. */ 3301 qla2x00_try_to_stop_firmware(vha); 3302 3303 /* Turn adapter off line */ 3304 vha->flags.online = 0; 3305 3306 /* turn-off interrupts on the card */ 3307 if (ha->interrupts_on) { 3308 vha->flags.init_done = 0; 3309 ha->isp_ops->disable_intrs(ha); 3310 } 3311 3312 qla2x00_free_irqs(vha); 3313 3314 qla2x00_free_fw_dump(ha); 3315 3316 pci_disable_pcie_error_reporting(pdev); 3317 pci_disable_device(pdev); 3318 } 3319 3320 /* Deletes all the virtual ports for a given ha */ 3321 static void 3322 qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) 3323 { 3324 scsi_qla_host_t *vha; 3325 unsigned long flags; 3326 3327 mutex_lock(&ha->vport_lock); 3328 while (ha->cur_vport_count) { 3329 spin_lock_irqsave(&ha->vport_slock, flags); 3330 3331 BUG_ON(base_vha->list.next == &ha->vp_list); 3332 /* This assumes first entry in ha->vp_list is always base vha */ 3333 vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); 3334 scsi_host_get(vha->host); 3335 3336 spin_unlock_irqrestore(&ha->vport_slock, flags); 3337 mutex_unlock(&ha->vport_lock); 3338 3339 fc_vport_terminate(vha->fc_vport); 3340 scsi_host_put(vha->host); 3341 3342 mutex_lock(&ha->vport_lock); 3343 } 3344 mutex_unlock(&ha->vport_lock); 3345 } 3346 3347 /* Stops all deferred work threads */ 3348 static void 3349 qla2x00_destroy_deferred_work(struct qla_hw_data *ha) 3350 { 3351 /* Cancel all work and destroy DPC workqueues */ 3352 if (ha->dpc_lp_wq) { 3353 cancel_work_sync(&ha->idc_aen); 3354 destroy_workqueue(ha->dpc_lp_wq); 3355 ha->dpc_lp_wq = NULL; 3356 } 3357 3358 if (ha->dpc_hp_wq) { 3359 cancel_work_sync(&ha->nic_core_reset); 3360 cancel_work_sync(&ha->idc_state_handler); 3361 cancel_work_sync(&ha->nic_core_unrecoverable); 3362 destroy_workqueue(ha->dpc_hp_wq); 3363 ha->dpc_hp_wq = NULL; 3364 } 3365 3366 /* Kill the kernel thread for this host */ 3367 if (ha->dpc_thread) { 3368 struct task_struct *t = ha->dpc_thread; 3369 3370 /* 3371 * qla2xxx_wake_dpc checks for ->dpc_thread 3372 * so we need to zero it out. 3373 */ 3374 ha->dpc_thread = NULL; 3375 kthread_stop(t); 3376 } 3377 } 3378 3379 static void 3380 qla2x00_unmap_iobases(struct qla_hw_data *ha) 3381 { 3382 if (IS_QLA82XX(ha)) { 3383 3384 iounmap((device_reg_t *)ha->nx_pcibase); 3385 if (!ql2xdbwr) 3386 iounmap((device_reg_t *)ha->nxdb_wr_ptr); 3387 } else { 3388 if (ha->iobase) 3389 iounmap(ha->iobase); 3390 3391 if (ha->cregbase) 3392 iounmap(ha->cregbase); 3393 3394 if (ha->mqiobase) 3395 iounmap(ha->mqiobase); 3396 3397 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase) 3398 iounmap(ha->msixbase); 3399 } 3400 } 3401 3402 static void 3403 qla2x00_clear_drv_active(struct qla_hw_data *ha) 3404 { 3405 if (IS_QLA8044(ha)) { 3406 qla8044_idc_lock(ha); 3407 qla8044_clear_drv_active(ha); 3408 qla8044_idc_unlock(ha); 3409 } else if (IS_QLA82XX(ha)) { 3410 qla82xx_idc_lock(ha); 3411 qla82xx_clear_drv_active(ha); 3412 qla82xx_idc_unlock(ha); 3413 } 3414 } 3415 3416 static void 3417 qla2x00_remove_one(struct pci_dev *pdev) 3418 { 3419 scsi_qla_host_t *base_vha; 3420 struct qla_hw_data *ha; 3421 3422 base_vha = pci_get_drvdata(pdev); 3423 ha = base_vha->hw; 3424 3425 /* Indicate device removal to prevent future board_disable and wait 3426 * until any pending board_disable has completed. */ 3427 set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags); 3428 cancel_work_sync(&ha->board_disable); 3429 3430 /* 3431 * If the PCI device is disabled then there was a PCI-disconnect and 3432 * qla2x00_disable_board_on_pci_error has taken care of most of the 3433 * resources. 3434 */ 3435 if (!atomic_read(&pdev->enable_cnt)) { 3436 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3437 base_vha->gnl.l, base_vha->gnl.ldma); 3438 3439 scsi_host_put(base_vha->host); 3440 kfree(ha); 3441 pci_set_drvdata(pdev, NULL); 3442 return; 3443 } 3444 qla2x00_wait_for_hba_ready(base_vha); 3445 3446 /* 3447 * if UNLOAD flag is already set, then continue unload, 3448 * where it was set first. 3449 */ 3450 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 3451 return; 3452 3453 set_bit(UNLOADING, &base_vha->dpc_flags); 3454 dma_free_coherent(&ha->pdev->dev, 3455 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); 3456 3457 if (IS_QLAFX00(ha)) 3458 qlafx00_driver_shutdown(base_vha, 20); 3459 3460 qla2x00_delete_all_vps(ha, base_vha); 3461 3462 if (IS_QLA8031(ha)) { 3463 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, 3464 "Clearing fcoe driver presence.\n"); 3465 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) 3466 ql_dbg(ql_dbg_p3p, base_vha, 0xb079, 3467 "Error while clearing DRV-Presence.\n"); 3468 } 3469 3470 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 3471 3472 qla2x00_dfs_remove(base_vha); 3473 3474 qla84xx_put_chip(base_vha); 3475 3476 /* Laser should be disabled only for ISP2031 */ 3477 if (IS_QLA2031(ha)) 3478 qla83xx_disable_laser(base_vha); 3479 3480 /* Disable timer */ 3481 if (base_vha->timer_active) 3482 qla2x00_stop_timer(base_vha); 3483 3484 base_vha->flags.online = 0; 3485 3486 /* free DMA memory */ 3487 if (ha->exlogin_buf) 3488 qla2x00_free_exlogin_buffer(ha); 3489 3490 /* free DMA memory */ 3491 if (ha->exchoffld_buf) 3492 qla2x00_free_exchoffld_buffer(ha); 3493 3494 qla2x00_destroy_deferred_work(ha); 3495 3496 qlt_remove_target(ha, base_vha); 3497 3498 qla2x00_free_sysfs_attr(base_vha, true); 3499 3500 fc_remove_host(base_vha->host); 3501 qlt_remove_target_resources(ha); 3502 3503 scsi_remove_host(base_vha->host); 3504 3505 qla2x00_free_device(base_vha); 3506 3507 qla2x00_clear_drv_active(ha); 3508 3509 scsi_host_put(base_vha->host); 3510 3511 qla2x00_unmap_iobases(ha); 3512 3513 pci_release_selected_regions(ha->pdev, ha->bars); 3514 kfree(ha); 3515 3516 pci_disable_pcie_error_reporting(pdev); 3517 3518 pci_disable_device(pdev); 3519 } 3520 3521 static void 3522 qla2x00_free_device(scsi_qla_host_t *vha) 3523 { 3524 struct qla_hw_data *ha = vha->hw; 3525 3526 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 3527 3528 /* Disable timer */ 3529 if (vha->timer_active) 3530 qla2x00_stop_timer(vha); 3531 3532 qla25xx_delete_queues(vha); 3533 3534 if (ha->flags.fce_enabled) 3535 qla2x00_disable_fce_trace(vha, NULL, NULL); 3536 3537 if (ha->eft) 3538 qla2x00_disable_eft_trace(vha); 3539 3540 /* Stop currently executing firmware. */ 3541 qla2x00_try_to_stop_firmware(vha); 3542 3543 vha->flags.online = 0; 3544 3545 /* turn-off interrupts on the card */ 3546 if (ha->interrupts_on) { 3547 vha->flags.init_done = 0; 3548 ha->isp_ops->disable_intrs(ha); 3549 } 3550 3551 qla2x00_free_fcports(vha); 3552 3553 qla2x00_free_irqs(vha); 3554 3555 /* Flush the work queue and remove it */ 3556 if (ha->wq) { 3557 flush_workqueue(ha->wq); 3558 destroy_workqueue(ha->wq); 3559 ha->wq = NULL; 3560 } 3561 3562 3563 qla2x00_mem_free(ha); 3564 3565 qla82xx_md_free(vha); 3566 3567 qla2x00_free_queues(ha); 3568 } 3569 3570 void qla2x00_free_fcports(struct scsi_qla_host *vha) 3571 { 3572 fc_port_t *fcport, *tfcport; 3573 3574 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) { 3575 list_del(&fcport->list); 3576 qla2x00_clear_loop_id(fcport); 3577 kfree(fcport); 3578 } 3579 } 3580 3581 static inline void 3582 qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, 3583 int defer) 3584 { 3585 struct fc_rport *rport; 3586 scsi_qla_host_t *base_vha; 3587 unsigned long flags; 3588 3589 if (!fcport->rport) 3590 return; 3591 3592 rport = fcport->rport; 3593 if (defer) { 3594 base_vha = pci_get_drvdata(vha->hw->pdev); 3595 spin_lock_irqsave(vha->host->host_lock, flags); 3596 fcport->drport = rport; 3597 spin_unlock_irqrestore(vha->host->host_lock, flags); 3598 qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen); 3599 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 3600 qla2xxx_wake_dpc(base_vha); 3601 } else { 3602 int now; 3603 if (rport) { 3604 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, 3605 "%s %8phN. rport %p roles %x \n", 3606 __func__, fcport->port_name, rport, 3607 rport->roles); 3608 fc_remote_port_delete(rport); 3609 } 3610 qlt_do_generation_tick(vha, &now); 3611 } 3612 } 3613 3614 /* 3615 * qla2x00_mark_device_lost Updates fcport state when device goes offline. 3616 * 3617 * Input: ha = adapter block pointer. fcport = port structure pointer. 3618 * 3619 * Return: None. 3620 * 3621 * Context: 3622 */ 3623 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, 3624 int do_login, int defer) 3625 { 3626 if (IS_QLAFX00(vha->hw)) { 3627 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3628 qla2x00_schedule_rport_del(vha, fcport, defer); 3629 return; 3630 } 3631 3632 if (atomic_read(&fcport->state) == FCS_ONLINE && 3633 vha->vp_idx == fcport->vha->vp_idx) { 3634 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3635 qla2x00_schedule_rport_del(vha, fcport, defer); 3636 } 3637 /* 3638 * We may need to retry the login, so don't change the state of the 3639 * port but do the retries. 3640 */ 3641 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) 3642 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3643 3644 if (!do_login) 3645 return; 3646 3647 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 3648 3649 if (fcport->login_retry == 0) { 3650 fcport->login_retry = vha->hw->login_retry_count; 3651 3652 ql_dbg(ql_dbg_disc, vha, 0x2067, 3653 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", 3654 fcport->port_name, fcport->loop_id, fcport->login_retry); 3655 } 3656 } 3657 3658 /* 3659 * qla2x00_mark_all_devices_lost 3660 * Updates fcport state when device goes offline. 3661 * 3662 * Input: 3663 * ha = adapter block pointer. 3664 * fcport = port structure pointer. 3665 * 3666 * Return: 3667 * None. 3668 * 3669 * Context: 3670 */ 3671 void 3672 qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) 3673 { 3674 fc_port_t *fcport; 3675 3676 ql_dbg(ql_dbg_disc, vha, 0xffff, 3677 "Mark all dev lost\n"); 3678 3679 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3680 fcport->scan_state = 0; 3681 qlt_schedule_sess_for_deletion_lock(fcport); 3682 3683 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx) 3684 continue; 3685 3686 /* 3687 * No point in marking the device as lost, if the device is 3688 * already DEAD. 3689 */ 3690 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 3691 continue; 3692 if (atomic_read(&fcport->state) == FCS_ONLINE) { 3693 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3694 if (defer) 3695 qla2x00_schedule_rport_del(vha, fcport, defer); 3696 else if (vha->vp_idx == fcport->vha->vp_idx) 3697 qla2x00_schedule_rport_del(vha, fcport, defer); 3698 } 3699 } 3700 } 3701 3702 /* 3703 * qla2x00_mem_alloc 3704 * Allocates adapter memory. 3705 * 3706 * Returns: 3707 * 0 = success. 3708 * !0 = failure. 3709 */ 3710 static int 3711 qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, 3712 struct req_que **req, struct rsp_que **rsp) 3713 { 3714 char name[16]; 3715 3716 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 3717 &ha->init_cb_dma, GFP_KERNEL); 3718 if (!ha->init_cb) 3719 goto fail; 3720 3721 if (qlt_mem_alloc(ha) < 0) 3722 goto fail_free_init_cb; 3723 3724 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, 3725 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); 3726 if (!ha->gid_list) 3727 goto fail_free_tgt_mem; 3728 3729 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 3730 if (!ha->srb_mempool) 3731 goto fail_free_gid_list; 3732 3733 if (IS_P3P_TYPE(ha)) { 3734 /* Allocate cache for CT6 Ctx. */ 3735 if (!ctx_cachep) { 3736 ctx_cachep = kmem_cache_create("qla2xxx_ctx", 3737 sizeof(struct ct6_dsd), 0, 3738 SLAB_HWCACHE_ALIGN, NULL); 3739 if (!ctx_cachep) 3740 goto fail_free_srb_mempool; 3741 } 3742 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, 3743 ctx_cachep); 3744 if (!ha->ctx_mempool) 3745 goto fail_free_srb_mempool; 3746 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021, 3747 "ctx_cachep=%p ctx_mempool=%p.\n", 3748 ctx_cachep, ha->ctx_mempool); 3749 } 3750 3751 /* Get memory for cached NVRAM */ 3752 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 3753 if (!ha->nvram) 3754 goto fail_free_ctx_mempool; 3755 3756 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME, 3757 ha->pdev->device); 3758 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 3759 DMA_POOL_SIZE, 8, 0); 3760 if (!ha->s_dma_pool) 3761 goto fail_free_nvram; 3762 3763 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022, 3764 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", 3765 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); 3766 3767 if (IS_P3P_TYPE(ha) || ql2xenabledif) { 3768 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, 3769 DSD_LIST_DMA_POOL_SIZE, 8, 0); 3770 if (!ha->dl_dma_pool) { 3771 ql_log_pci(ql_log_fatal, ha->pdev, 0x0023, 3772 "Failed to allocate memory for dl_dma_pool.\n"); 3773 goto fail_s_dma_pool; 3774 } 3775 3776 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, 3777 FCP_CMND_DMA_POOL_SIZE, 8, 0); 3778 if (!ha->fcp_cmnd_dma_pool) { 3779 ql_log_pci(ql_log_fatal, ha->pdev, 0x0024, 3780 "Failed to allocate memory for fcp_cmnd_dma_pool.\n"); 3781 goto fail_dl_dma_pool; 3782 } 3783 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025, 3784 "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n", 3785 ha->dl_dma_pool, ha->fcp_cmnd_dma_pool); 3786 } 3787 3788 /* Allocate memory for SNS commands */ 3789 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 3790 /* Get consistent memory allocated for SNS commands */ 3791 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 3792 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 3793 if (!ha->sns_cmd) 3794 goto fail_dma_pool; 3795 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, 3796 "sns_cmd: %p.\n", ha->sns_cmd); 3797 } else { 3798 /* Get consistent memory allocated for MS IOCB */ 3799 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 3800 &ha->ms_iocb_dma); 3801 if (!ha->ms_iocb) 3802 goto fail_dma_pool; 3803 /* Get consistent memory allocated for CT SNS commands */ 3804 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 3805 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 3806 if (!ha->ct_sns) 3807 goto fail_free_ms_iocb; 3808 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027, 3809 "ms_iocb=%p ct_sns=%p.\n", 3810 ha->ms_iocb, ha->ct_sns); 3811 } 3812 3813 /* Allocate memory for request ring */ 3814 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 3815 if (!*req) { 3816 ql_log_pci(ql_log_fatal, ha->pdev, 0x0028, 3817 "Failed to allocate memory for req.\n"); 3818 goto fail_req; 3819 } 3820 (*req)->length = req_len; 3821 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev, 3822 ((*req)->length + 1) * sizeof(request_t), 3823 &(*req)->dma, GFP_KERNEL); 3824 if (!(*req)->ring) { 3825 ql_log_pci(ql_log_fatal, ha->pdev, 0x0029, 3826 "Failed to allocate memory for req_ring.\n"); 3827 goto fail_req_ring; 3828 } 3829 /* Allocate memory for response ring */ 3830 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 3831 if (!*rsp) { 3832 ql_log_pci(ql_log_fatal, ha->pdev, 0x002a, 3833 "Failed to allocate memory for rsp.\n"); 3834 goto fail_rsp; 3835 } 3836 (*rsp)->hw = ha; 3837 (*rsp)->length = rsp_len; 3838 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev, 3839 ((*rsp)->length + 1) * sizeof(response_t), 3840 &(*rsp)->dma, GFP_KERNEL); 3841 if (!(*rsp)->ring) { 3842 ql_log_pci(ql_log_fatal, ha->pdev, 0x002b, 3843 "Failed to allocate memory for rsp_ring.\n"); 3844 goto fail_rsp_ring; 3845 } 3846 (*req)->rsp = *rsp; 3847 (*rsp)->req = *req; 3848 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c, 3849 "req=%p req->length=%d req->ring=%p rsp=%p " 3850 "rsp->length=%d rsp->ring=%p.\n", 3851 *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length, 3852 (*rsp)->ring); 3853 /* Allocate memory for NVRAM data for vports */ 3854 if (ha->nvram_npiv_size) { 3855 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) * 3856 ha->nvram_npiv_size, GFP_KERNEL); 3857 if (!ha->npiv_info) { 3858 ql_log_pci(ql_log_fatal, ha->pdev, 0x002d, 3859 "Failed to allocate memory for npiv_info.\n"); 3860 goto fail_npiv_info; 3861 } 3862 } else 3863 ha->npiv_info = NULL; 3864 3865 /* Get consistent memory allocated for EX-INIT-CB. */ 3866 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) { 3867 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 3868 &ha->ex_init_cb_dma); 3869 if (!ha->ex_init_cb) 3870 goto fail_ex_init_cb; 3871 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e, 3872 "ex_init_cb=%p.\n", ha->ex_init_cb); 3873 } 3874 3875 INIT_LIST_HEAD(&ha->gbl_dsd_list); 3876 3877 /* Get consistent memory allocated for Async Port-Database. */ 3878 if (!IS_FWI2_CAPABLE(ha)) { 3879 ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 3880 &ha->async_pd_dma); 3881 if (!ha->async_pd) 3882 goto fail_async_pd; 3883 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f, 3884 "async_pd=%p.\n", ha->async_pd); 3885 } 3886 3887 INIT_LIST_HEAD(&ha->vp_list); 3888 3889 /* Allocate memory for our loop_id bitmap */ 3890 ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long), 3891 GFP_KERNEL); 3892 if (!ha->loop_id_map) 3893 goto fail_loop_id_map; 3894 else { 3895 qla2x00_set_reserved_loop_ids(ha); 3896 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, 3897 "loop_id_map=%p.\n", ha->loop_id_map); 3898 } 3899 3900 return 0; 3901 3902 fail_loop_id_map: 3903 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 3904 fail_async_pd: 3905 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); 3906 fail_ex_init_cb: 3907 kfree(ha->npiv_info); 3908 fail_npiv_info: 3909 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) * 3910 sizeof(response_t), (*rsp)->ring, (*rsp)->dma); 3911 (*rsp)->ring = NULL; 3912 (*rsp)->dma = 0; 3913 fail_rsp_ring: 3914 kfree(*rsp); 3915 fail_rsp: 3916 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) * 3917 sizeof(request_t), (*req)->ring, (*req)->dma); 3918 (*req)->ring = NULL; 3919 (*req)->dma = 0; 3920 fail_req_ring: 3921 kfree(*req); 3922 fail_req: 3923 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 3924 ha->ct_sns, ha->ct_sns_dma); 3925 ha->ct_sns = NULL; 3926 ha->ct_sns_dma = 0; 3927 fail_free_ms_iocb: 3928 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 3929 ha->ms_iocb = NULL; 3930 ha->ms_iocb_dma = 0; 3931 3932 if (ha->sns_cmd) 3933 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 3934 ha->sns_cmd, ha->sns_cmd_dma); 3935 fail_dma_pool: 3936 if (IS_QLA82XX(ha) || ql2xenabledif) { 3937 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 3938 ha->fcp_cmnd_dma_pool = NULL; 3939 } 3940 fail_dl_dma_pool: 3941 if (IS_QLA82XX(ha) || ql2xenabledif) { 3942 dma_pool_destroy(ha->dl_dma_pool); 3943 ha->dl_dma_pool = NULL; 3944 } 3945 fail_s_dma_pool: 3946 dma_pool_destroy(ha->s_dma_pool); 3947 ha->s_dma_pool = NULL; 3948 fail_free_nvram: 3949 kfree(ha->nvram); 3950 ha->nvram = NULL; 3951 fail_free_ctx_mempool: 3952 if (ha->ctx_mempool) 3953 mempool_destroy(ha->ctx_mempool); 3954 ha->ctx_mempool = NULL; 3955 fail_free_srb_mempool: 3956 if (ha->srb_mempool) 3957 mempool_destroy(ha->srb_mempool); 3958 ha->srb_mempool = NULL; 3959 fail_free_gid_list: 3960 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 3961 ha->gid_list, 3962 ha->gid_list_dma); 3963 ha->gid_list = NULL; 3964 ha->gid_list_dma = 0; 3965 fail_free_tgt_mem: 3966 qlt_mem_free(ha); 3967 fail_free_init_cb: 3968 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, 3969 ha->init_cb_dma); 3970 ha->init_cb = NULL; 3971 ha->init_cb_dma = 0; 3972 fail: 3973 ql_log(ql_log_fatal, NULL, 0x0030, 3974 "Memory allocation failure.\n"); 3975 return -ENOMEM; 3976 } 3977 3978 int 3979 qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha) 3980 { 3981 int rval; 3982 uint16_t size, max_cnt, temp; 3983 struct qla_hw_data *ha = vha->hw; 3984 3985 /* Return if we don't need to alloacate any extended logins */ 3986 if (!ql2xexlogins) 3987 return QLA_SUCCESS; 3988 3989 ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins); 3990 max_cnt = 0; 3991 rval = qla_get_exlogin_status(vha, &size, &max_cnt); 3992 if (rval != QLA_SUCCESS) { 3993 ql_log_pci(ql_log_fatal, ha->pdev, 0xd029, 3994 "Failed to get exlogin status.\n"); 3995 return rval; 3996 } 3997 3998 temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins; 3999 ha->exlogin_size = (size * temp); 4000 ql_log(ql_log_info, vha, 0xd024, 4001 "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n", 4002 max_cnt, size, temp); 4003 4004 ql_log(ql_log_info, vha, 0xd025, "EXLOGIN: requested size=0x%x\n", 4005 ha->exlogin_size); 4006 4007 /* Get consistent memory for extended logins */ 4008 ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev, 4009 ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL); 4010 if (!ha->exlogin_buf) { 4011 ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a, 4012 "Failed to allocate memory for exlogin_buf_dma.\n"); 4013 return -ENOMEM; 4014 } 4015 4016 /* Now configure the dma buffer */ 4017 rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma); 4018 if (rval) { 4019 ql_log(ql_log_fatal, vha, 0x00cf, 4020 "Setup extended login buffer ****FAILED****.\n"); 4021 qla2x00_free_exlogin_buffer(ha); 4022 } 4023 4024 return rval; 4025 } 4026 4027 /* 4028 * qla2x00_free_exlogin_buffer 4029 * 4030 * Input: 4031 * ha = adapter block pointer 4032 */ 4033 void 4034 qla2x00_free_exlogin_buffer(struct qla_hw_data *ha) 4035 { 4036 if (ha->exlogin_buf) { 4037 dma_free_coherent(&ha->pdev->dev, ha->exlogin_size, 4038 ha->exlogin_buf, ha->exlogin_buf_dma); 4039 ha->exlogin_buf = NULL; 4040 ha->exlogin_size = 0; 4041 } 4042 } 4043 4044 int 4045 qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha) 4046 { 4047 int rval; 4048 uint16_t size, max_cnt, temp; 4049 struct qla_hw_data *ha = vha->hw; 4050 4051 /* Return if we don't need to alloacate any extended logins */ 4052 if (!ql2xexchoffld) 4053 return QLA_SUCCESS; 4054 4055 ql_log(ql_log_info, vha, 0xd014, 4056 "Exchange offload count: %d.\n", ql2xexlogins); 4057 4058 max_cnt = 0; 4059 rval = qla_get_exchoffld_status(vha, &size, &max_cnt); 4060 if (rval != QLA_SUCCESS) { 4061 ql_log_pci(ql_log_fatal, ha->pdev, 0xd012, 4062 "Failed to get exlogin status.\n"); 4063 return rval; 4064 } 4065 4066 temp = (ql2xexchoffld > max_cnt) ? max_cnt : ql2xexchoffld; 4067 ha->exchoffld_size = (size * temp); 4068 ql_log(ql_log_info, vha, 0xd016, 4069 "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n", 4070 max_cnt, size, temp); 4071 4072 ql_log(ql_log_info, vha, 0xd017, 4073 "Exchange Buffers requested size = 0x%x\n", ha->exchoffld_size); 4074 4075 /* Get consistent memory for extended logins */ 4076 ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev, 4077 ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL); 4078 if (!ha->exchoffld_buf) { 4079 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, 4080 "Failed to allocate memory for exchoffld_buf_dma.\n"); 4081 return -ENOMEM; 4082 } 4083 4084 /* Now configure the dma buffer */ 4085 rval = qla_set_exchoffld_mem_cfg(vha, ha->exchoffld_buf_dma); 4086 if (rval) { 4087 ql_log(ql_log_fatal, vha, 0xd02e, 4088 "Setup exchange offload buffer ****FAILED****.\n"); 4089 qla2x00_free_exchoffld_buffer(ha); 4090 } 4091 4092 return rval; 4093 } 4094 4095 /* 4096 * qla2x00_free_exchoffld_buffer 4097 * 4098 * Input: 4099 * ha = adapter block pointer 4100 */ 4101 void 4102 qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha) 4103 { 4104 if (ha->exchoffld_buf) { 4105 dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size, 4106 ha->exchoffld_buf, ha->exchoffld_buf_dma); 4107 ha->exchoffld_buf = NULL; 4108 ha->exchoffld_size = 0; 4109 } 4110 } 4111 4112 /* 4113 * qla2x00_free_fw_dump 4114 * Frees fw dump stuff. 4115 * 4116 * Input: 4117 * ha = adapter block pointer 4118 */ 4119 static void 4120 qla2x00_free_fw_dump(struct qla_hw_data *ha) 4121 { 4122 if (ha->fce) 4123 dma_free_coherent(&ha->pdev->dev, 4124 FCE_SIZE, ha->fce, ha->fce_dma); 4125 4126 if (ha->eft) 4127 dma_free_coherent(&ha->pdev->dev, 4128 EFT_SIZE, ha->eft, ha->eft_dma); 4129 4130 if (ha->fw_dump) 4131 vfree(ha->fw_dump); 4132 if (ha->fw_dump_template) 4133 vfree(ha->fw_dump_template); 4134 4135 ha->fce = NULL; 4136 ha->fce_dma = 0; 4137 ha->eft = NULL; 4138 ha->eft_dma = 0; 4139 ha->fw_dumped = 0; 4140 ha->fw_dump_cap_flags = 0; 4141 ha->fw_dump_reading = 0; 4142 ha->fw_dump = NULL; 4143 ha->fw_dump_len = 0; 4144 ha->fw_dump_template = NULL; 4145 ha->fw_dump_template_len = 0; 4146 } 4147 4148 /* 4149 * qla2x00_mem_free 4150 * Frees all adapter allocated memory. 4151 * 4152 * Input: 4153 * ha = adapter block pointer. 4154 */ 4155 static void 4156 qla2x00_mem_free(struct qla_hw_data *ha) 4157 { 4158 qla2x00_free_fw_dump(ha); 4159 4160 if (ha->mctp_dump) 4161 dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump, 4162 ha->mctp_dump_dma); 4163 4164 if (ha->srb_mempool) 4165 mempool_destroy(ha->srb_mempool); 4166 4167 if (ha->dcbx_tlv) 4168 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 4169 ha->dcbx_tlv, ha->dcbx_tlv_dma); 4170 4171 if (ha->xgmac_data) 4172 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 4173 ha->xgmac_data, ha->xgmac_data_dma); 4174 4175 if (ha->sns_cmd) 4176 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 4177 ha->sns_cmd, ha->sns_cmd_dma); 4178 4179 if (ha->ct_sns) 4180 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 4181 ha->ct_sns, ha->ct_sns_dma); 4182 4183 if (ha->sfp_data) 4184 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); 4185 4186 if (ha->ms_iocb) 4187 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 4188 4189 if (ha->ex_init_cb) 4190 dma_pool_free(ha->s_dma_pool, 4191 ha->ex_init_cb, ha->ex_init_cb_dma); 4192 4193 if (ha->async_pd) 4194 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 4195 4196 if (ha->s_dma_pool) 4197 dma_pool_destroy(ha->s_dma_pool); 4198 4199 if (ha->gid_list) 4200 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 4201 ha->gid_list, ha->gid_list_dma); 4202 4203 if (IS_QLA82XX(ha)) { 4204 if (!list_empty(&ha->gbl_dsd_list)) { 4205 struct dsd_dma *dsd_ptr, *tdsd_ptr; 4206 4207 /* clean up allocated prev pool */ 4208 list_for_each_entry_safe(dsd_ptr, 4209 tdsd_ptr, &ha->gbl_dsd_list, list) { 4210 dma_pool_free(ha->dl_dma_pool, 4211 dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); 4212 list_del(&dsd_ptr->list); 4213 kfree(dsd_ptr); 4214 } 4215 } 4216 } 4217 4218 if (ha->dl_dma_pool) 4219 dma_pool_destroy(ha->dl_dma_pool); 4220 4221 if (ha->fcp_cmnd_dma_pool) 4222 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 4223 4224 if (ha->ctx_mempool) 4225 mempool_destroy(ha->ctx_mempool); 4226 4227 qlt_mem_free(ha); 4228 4229 if (ha->init_cb) 4230 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 4231 ha->init_cb, ha->init_cb_dma); 4232 vfree(ha->optrom_buffer); 4233 kfree(ha->nvram); 4234 kfree(ha->npiv_info); 4235 kfree(ha->swl); 4236 kfree(ha->loop_id_map); 4237 4238 ha->srb_mempool = NULL; 4239 ha->ctx_mempool = NULL; 4240 ha->sns_cmd = NULL; 4241 ha->sns_cmd_dma = 0; 4242 ha->ct_sns = NULL; 4243 ha->ct_sns_dma = 0; 4244 ha->ms_iocb = NULL; 4245 ha->ms_iocb_dma = 0; 4246 ha->init_cb = NULL; 4247 ha->init_cb_dma = 0; 4248 ha->ex_init_cb = NULL; 4249 ha->ex_init_cb_dma = 0; 4250 ha->async_pd = NULL; 4251 ha->async_pd_dma = 0; 4252 4253 ha->s_dma_pool = NULL; 4254 ha->dl_dma_pool = NULL; 4255 ha->fcp_cmnd_dma_pool = NULL; 4256 4257 ha->gid_list = NULL; 4258 ha->gid_list_dma = 0; 4259 4260 ha->tgt.atio_ring = NULL; 4261 ha->tgt.atio_dma = 0; 4262 ha->tgt.tgt_vp_map = NULL; 4263 } 4264 4265 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, 4266 struct qla_hw_data *ha) 4267 { 4268 struct Scsi_Host *host; 4269 struct scsi_qla_host *vha = NULL; 4270 4271 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 4272 if (!host) { 4273 ql_log_pci(ql_log_fatal, ha->pdev, 0x0107, 4274 "Failed to allocate host from the scsi layer, aborting.\n"); 4275 return NULL; 4276 } 4277 4278 /* Clear our data area */ 4279 vha = shost_priv(host); 4280 memset(vha, 0, sizeof(scsi_qla_host_t)); 4281 4282 vha->host = host; 4283 vha->host_no = host->host_no; 4284 vha->hw = ha; 4285 4286 INIT_LIST_HEAD(&vha->vp_fcports); 4287 INIT_LIST_HEAD(&vha->work_list); 4288 INIT_LIST_HEAD(&vha->list); 4289 INIT_LIST_HEAD(&vha->qla_cmd_list); 4290 INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list); 4291 INIT_LIST_HEAD(&vha->logo_list); 4292 INIT_LIST_HEAD(&vha->plogi_ack_list); 4293 INIT_LIST_HEAD(&vha->qp_list); 4294 INIT_LIST_HEAD(&vha->gnl.fcports); 4295 4296 spin_lock_init(&vha->work_lock); 4297 spin_lock_init(&vha->cmd_list_lock); 4298 init_waitqueue_head(&vha->fcport_waitQ); 4299 init_waitqueue_head(&vha->vref_waitq); 4300 4301 vha->gnl.size = sizeof(struct get_name_list_extended) * 4302 (ha->max_loop_id + 1); 4303 vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev, 4304 vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL); 4305 if (!vha->gnl.l) { 4306 ql_log(ql_log_fatal, vha, 0xffff, 4307 "Alloc failed for name list.\n"); 4308 scsi_remove_host(vha->host); 4309 return NULL; 4310 } 4311 4312 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 4313 ql_dbg(ql_dbg_init, vha, 0x0041, 4314 "Allocated the host=%p hw=%p vha=%p dev_name=%s", 4315 vha->host, vha->hw, vha, 4316 dev_name(&(ha->pdev->dev))); 4317 4318 return vha; 4319 } 4320 4321 struct qla_work_evt * 4322 qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) 4323 { 4324 struct qla_work_evt *e; 4325 uint8_t bail; 4326 4327 QLA_VHA_MARK_BUSY(vha, bail); 4328 if (bail) 4329 return NULL; 4330 4331 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); 4332 if (!e) { 4333 QLA_VHA_MARK_NOT_BUSY(vha); 4334 return NULL; 4335 } 4336 4337 INIT_LIST_HEAD(&e->list); 4338 e->type = type; 4339 e->flags = QLA_EVT_FLAG_FREE; 4340 return e; 4341 } 4342 4343 int 4344 qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 4345 { 4346 unsigned long flags; 4347 4348 spin_lock_irqsave(&vha->work_lock, flags); 4349 list_add_tail(&e->list, &vha->work_list); 4350 spin_unlock_irqrestore(&vha->work_lock, flags); 4351 4352 if (QLA_EARLY_LINKUP(vha->hw)) 4353 schedule_work(&vha->iocb_work); 4354 else 4355 qla2xxx_wake_dpc(vha); 4356 4357 return QLA_SUCCESS; 4358 } 4359 4360 int 4361 qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, 4362 u32 data) 4363 { 4364 struct qla_work_evt *e; 4365 4366 e = qla2x00_alloc_work(vha, QLA_EVT_AEN); 4367 if (!e) 4368 return QLA_FUNCTION_FAILED; 4369 4370 e->u.aen.code = code; 4371 e->u.aen.data = data; 4372 return qla2x00_post_work(vha, e); 4373 } 4374 4375 int 4376 qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) 4377 { 4378 struct qla_work_evt *e; 4379 4380 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK); 4381 if (!e) 4382 return QLA_FUNCTION_FAILED; 4383 4384 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4385 return qla2x00_post_work(vha, e); 4386 } 4387 4388 #define qla2x00_post_async_work(name, type) \ 4389 int qla2x00_post_async_##name##_work( \ 4390 struct scsi_qla_host *vha, \ 4391 fc_port_t *fcport, uint16_t *data) \ 4392 { \ 4393 struct qla_work_evt *e; \ 4394 \ 4395 e = qla2x00_alloc_work(vha, type); \ 4396 if (!e) \ 4397 return QLA_FUNCTION_FAILED; \ 4398 \ 4399 e->u.logio.fcport = fcport; \ 4400 if (data) { \ 4401 e->u.logio.data[0] = data[0]; \ 4402 e->u.logio.data[1] = data[1]; \ 4403 } \ 4404 return qla2x00_post_work(vha, e); \ 4405 } 4406 4407 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); 4408 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); 4409 qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); 4410 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); 4411 qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE); 4412 4413 int 4414 qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) 4415 { 4416 struct qla_work_evt *e; 4417 4418 e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT); 4419 if (!e) 4420 return QLA_FUNCTION_FAILED; 4421 4422 e->u.uevent.code = code; 4423 return qla2x00_post_work(vha, e); 4424 } 4425 4426 static void 4427 qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) 4428 { 4429 char event_string[40]; 4430 char *envp[] = { event_string, NULL }; 4431 4432 switch (code) { 4433 case QLA_UEVENT_CODE_FW_DUMP: 4434 snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld", 4435 vha->host_no); 4436 break; 4437 default: 4438 /* do nothing */ 4439 break; 4440 } 4441 kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp); 4442 } 4443 4444 int 4445 qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, 4446 uint32_t *data, int cnt) 4447 { 4448 struct qla_work_evt *e; 4449 4450 e = qla2x00_alloc_work(vha, QLA_EVT_AENFX); 4451 if (!e) 4452 return QLA_FUNCTION_FAILED; 4453 4454 e->u.aenfx.evtcode = evtcode; 4455 e->u.aenfx.count = cnt; 4456 memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt); 4457 return qla2x00_post_work(vha, e); 4458 } 4459 4460 int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport) 4461 { 4462 struct qla_work_evt *e; 4463 4464 e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT); 4465 if (!e) 4466 return QLA_FUNCTION_FAILED; 4467 4468 e->u.fcport.fcport = fcport; 4469 return qla2x00_post_work(vha, e); 4470 } 4471 4472 static 4473 void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) 4474 { 4475 unsigned long flags; 4476 fc_port_t *fcport = NULL; 4477 struct qlt_plogi_ack_t *pla = 4478 (struct qlt_plogi_ack_t *)e->u.new_sess.pla; 4479 4480 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4481 fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1); 4482 if (fcport) { 4483 fcport->d_id = e->u.new_sess.id; 4484 if (pla) { 4485 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 4486 qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN); 4487 /* we took an extra ref_count to prevent PLOGI ACK when 4488 * fcport/sess has not been created. 4489 */ 4490 pla->ref_count--; 4491 } 4492 } else { 4493 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 4494 if (fcport) { 4495 fcport->d_id = e->u.new_sess.id; 4496 fcport->scan_state = QLA_FCPORT_FOUND; 4497 fcport->flags |= FCF_FABRIC_DEVICE; 4498 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 4499 4500 memcpy(fcport->port_name, e->u.new_sess.port_name, 4501 WWN_SIZE); 4502 list_add_tail(&fcport->list, &vha->vp_fcports); 4503 4504 if (pla) { 4505 qlt_plogi_ack_link(vha, pla, fcport, 4506 QLT_PLOGI_LINK_SAME_WWN); 4507 pla->ref_count--; 4508 } 4509 } 4510 } 4511 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4512 4513 if (fcport) { 4514 if (pla) 4515 qlt_plogi_ack_unref(vha, pla); 4516 else 4517 qla24xx_async_gnl(vha, fcport); 4518 } 4519 } 4520 4521 void 4522 qla2x00_do_work(struct scsi_qla_host *vha) 4523 { 4524 struct qla_work_evt *e, *tmp; 4525 unsigned long flags; 4526 LIST_HEAD(work); 4527 4528 spin_lock_irqsave(&vha->work_lock, flags); 4529 list_splice_init(&vha->work_list, &work); 4530 spin_unlock_irqrestore(&vha->work_lock, flags); 4531 4532 list_for_each_entry_safe(e, tmp, &work, list) { 4533 list_del_init(&e->list); 4534 4535 switch (e->type) { 4536 case QLA_EVT_AEN: 4537 fc_host_post_event(vha->host, fc_get_event_number(), 4538 e->u.aen.code, e->u.aen.data); 4539 break; 4540 case QLA_EVT_IDC_ACK: 4541 qla81xx_idc_ack(vha, e->u.idc_ack.mb); 4542 break; 4543 case QLA_EVT_ASYNC_LOGIN: 4544 qla2x00_async_login(vha, e->u.logio.fcport, 4545 e->u.logio.data); 4546 break; 4547 case QLA_EVT_ASYNC_LOGOUT: 4548 qla2x00_async_logout(vha, e->u.logio.fcport); 4549 break; 4550 case QLA_EVT_ASYNC_LOGOUT_DONE: 4551 qla2x00_async_logout_done(vha, e->u.logio.fcport, 4552 e->u.logio.data); 4553 break; 4554 case QLA_EVT_ASYNC_ADISC: 4555 qla2x00_async_adisc(vha, e->u.logio.fcport, 4556 e->u.logio.data); 4557 break; 4558 case QLA_EVT_ASYNC_ADISC_DONE: 4559 qla2x00_async_adisc_done(vha, e->u.logio.fcport, 4560 e->u.logio.data); 4561 break; 4562 case QLA_EVT_UEVENT: 4563 qla2x00_uevent_emit(vha, e->u.uevent.code); 4564 break; 4565 case QLA_EVT_AENFX: 4566 qlafx00_process_aen(vha, e); 4567 break; 4568 case QLA_EVT_GIDPN: 4569 qla24xx_async_gidpn(vha, e->u.fcport.fcport); 4570 break; 4571 case QLA_EVT_GPNID: 4572 qla24xx_async_gpnid(vha, &e->u.gpnid.id); 4573 break; 4574 case QLA_EVT_GPNID_DONE: 4575 qla24xx_async_gpnid_done(vha, e->u.iosb.sp); 4576 break; 4577 case QLA_EVT_NEW_SESS: 4578 qla24xx_create_new_sess(vha, e); 4579 break; 4580 case QLA_EVT_GPDB: 4581 qla24xx_async_gpdb(vha, e->u.fcport.fcport, 4582 e->u.fcport.opt); 4583 break; 4584 case QLA_EVT_GPSC: 4585 qla24xx_async_gpsc(vha, e->u.fcport.fcport); 4586 break; 4587 case QLA_EVT_UPD_FCPORT: 4588 qla2x00_update_fcport(vha, e->u.fcport.fcport); 4589 break; 4590 case QLA_EVT_GNL: 4591 qla24xx_async_gnl(vha, e->u.fcport.fcport); 4592 break; 4593 case QLA_EVT_NACK: 4594 qla24xx_do_nack_work(vha, e); 4595 break; 4596 } 4597 if (e->flags & QLA_EVT_FLAG_FREE) 4598 kfree(e); 4599 4600 /* For each work completed decrement vha ref count */ 4601 QLA_VHA_MARK_NOT_BUSY(vha); 4602 } 4603 } 4604 4605 /* Relogins all the fcports of a vport 4606 * Context: dpc thread 4607 */ 4608 void qla2x00_relogin(struct scsi_qla_host *vha) 4609 { 4610 fc_port_t *fcport; 4611 int status; 4612 struct event_arg ea; 4613 4614 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4615 /* 4616 * If the port is not ONLINE then try to login 4617 * to it if we haven't run out of retries. 4618 */ 4619 if (atomic_read(&fcport->state) != FCS_ONLINE && 4620 fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) { 4621 fcport->login_retry--; 4622 if (fcport->flags & FCF_FABRIC_DEVICE) { 4623 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, 4624 "%s %8phC DS %d LS %d\n", __func__, 4625 fcport->port_name, fcport->disc_state, 4626 fcport->fw_login_state); 4627 memset(&ea, 0, sizeof(ea)); 4628 ea.event = FCME_RELOGIN; 4629 ea.fcport = fcport; 4630 qla2x00_fcport_event_handler(vha, &ea); 4631 } else { 4632 status = qla2x00_local_device_login(vha, 4633 fcport); 4634 if (status == QLA_SUCCESS) { 4635 fcport->old_loop_id = fcport->loop_id; 4636 ql_dbg(ql_dbg_disc, vha, 0x2003, 4637 "Port login OK: logged in ID 0x%x.\n", 4638 fcport->loop_id); 4639 qla2x00_update_fcport(vha, fcport); 4640 } else if (status == 1) { 4641 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 4642 /* retry the login again */ 4643 ql_dbg(ql_dbg_disc, vha, 0x2007, 4644 "Retrying %d login again loop_id 0x%x.\n", 4645 fcport->login_retry, 4646 fcport->loop_id); 4647 } else { 4648 fcport->login_retry = 0; 4649 } 4650 4651 if (fcport->login_retry == 0 && 4652 status != QLA_SUCCESS) 4653 qla2x00_clear_loop_id(fcport); 4654 } 4655 } 4656 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 4657 break; 4658 } 4659 } 4660 4661 /* Schedule work on any of the dpc-workqueues */ 4662 void 4663 qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code) 4664 { 4665 struct qla_hw_data *ha = base_vha->hw; 4666 4667 switch (work_code) { 4668 case MBA_IDC_AEN: /* 0x8200 */ 4669 if (ha->dpc_lp_wq) 4670 queue_work(ha->dpc_lp_wq, &ha->idc_aen); 4671 break; 4672 4673 case QLA83XX_NIC_CORE_RESET: /* 0x1 */ 4674 if (!ha->flags.nic_core_reset_hdlr_active) { 4675 if (ha->dpc_hp_wq) 4676 queue_work(ha->dpc_hp_wq, &ha->nic_core_reset); 4677 } else 4678 ql_dbg(ql_dbg_p3p, base_vha, 0xb05e, 4679 "NIC Core reset is already active. Skip " 4680 "scheduling it again.\n"); 4681 break; 4682 case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */ 4683 if (ha->dpc_hp_wq) 4684 queue_work(ha->dpc_hp_wq, &ha->idc_state_handler); 4685 break; 4686 case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */ 4687 if (ha->dpc_hp_wq) 4688 queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable); 4689 break; 4690 default: 4691 ql_log(ql_log_warn, base_vha, 0xb05f, 4692 "Unknown work-code=0x%x.\n", work_code); 4693 } 4694 4695 return; 4696 } 4697 4698 /* Work: Perform NIC Core Unrecoverable state handling */ 4699 void 4700 qla83xx_nic_core_unrecoverable_work(struct work_struct *work) 4701 { 4702 struct qla_hw_data *ha = 4703 container_of(work, struct qla_hw_data, nic_core_unrecoverable); 4704 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4705 uint32_t dev_state = 0; 4706 4707 qla83xx_idc_lock(base_vha, 0); 4708 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 4709 qla83xx_reset_ownership(base_vha); 4710 if (ha->flags.nic_core_reset_owner) { 4711 ha->flags.nic_core_reset_owner = 0; 4712 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 4713 QLA8XXX_DEV_FAILED); 4714 ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n"); 4715 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 4716 } 4717 qla83xx_idc_unlock(base_vha, 0); 4718 } 4719 4720 /* Work: Execute IDC state handler */ 4721 void 4722 qla83xx_idc_state_handler_work(struct work_struct *work) 4723 { 4724 struct qla_hw_data *ha = 4725 container_of(work, struct qla_hw_data, idc_state_handler); 4726 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4727 uint32_t dev_state = 0; 4728 4729 qla83xx_idc_lock(base_vha, 0); 4730 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 4731 if (dev_state == QLA8XXX_DEV_FAILED || 4732 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) 4733 qla83xx_idc_state_handler(base_vha); 4734 qla83xx_idc_unlock(base_vha, 0); 4735 } 4736 4737 static int 4738 qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha) 4739 { 4740 int rval = QLA_SUCCESS; 4741 unsigned long heart_beat_wait = jiffies + (1 * HZ); 4742 uint32_t heart_beat_counter1, heart_beat_counter2; 4743 4744 do { 4745 if (time_after(jiffies, heart_beat_wait)) { 4746 ql_dbg(ql_dbg_p3p, base_vha, 0xb07c, 4747 "Nic Core f/w is not alive.\n"); 4748 rval = QLA_FUNCTION_FAILED; 4749 break; 4750 } 4751 4752 qla83xx_idc_lock(base_vha, 0); 4753 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 4754 &heart_beat_counter1); 4755 qla83xx_idc_unlock(base_vha, 0); 4756 msleep(100); 4757 qla83xx_idc_lock(base_vha, 0); 4758 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 4759 &heart_beat_counter2); 4760 qla83xx_idc_unlock(base_vha, 0); 4761 } while (heart_beat_counter1 == heart_beat_counter2); 4762 4763 return rval; 4764 } 4765 4766 /* Work: Perform NIC Core Reset handling */ 4767 void 4768 qla83xx_nic_core_reset_work(struct work_struct *work) 4769 { 4770 struct qla_hw_data *ha = 4771 container_of(work, struct qla_hw_data, nic_core_reset); 4772 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4773 uint32_t dev_state = 0; 4774 4775 if (IS_QLA2031(ha)) { 4776 if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS) 4777 ql_log(ql_log_warn, base_vha, 0xb081, 4778 "Failed to dump mctp\n"); 4779 return; 4780 } 4781 4782 if (!ha->flags.nic_core_reset_hdlr_active) { 4783 if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) { 4784 qla83xx_idc_lock(base_vha, 0); 4785 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, 4786 &dev_state); 4787 qla83xx_idc_unlock(base_vha, 0); 4788 if (dev_state != QLA8XXX_DEV_NEED_RESET) { 4789 ql_dbg(ql_dbg_p3p, base_vha, 0xb07a, 4790 "Nic Core f/w is alive.\n"); 4791 return; 4792 } 4793 } 4794 4795 ha->flags.nic_core_reset_hdlr_active = 1; 4796 if (qla83xx_nic_core_reset(base_vha)) { 4797 /* NIC Core reset failed. */ 4798 ql_dbg(ql_dbg_p3p, base_vha, 0xb061, 4799 "NIC Core reset failed.\n"); 4800 } 4801 ha->flags.nic_core_reset_hdlr_active = 0; 4802 } 4803 } 4804 4805 /* Work: Handle 8200 IDC aens */ 4806 void 4807 qla83xx_service_idc_aen(struct work_struct *work) 4808 { 4809 struct qla_hw_data *ha = 4810 container_of(work, struct qla_hw_data, idc_aen); 4811 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4812 uint32_t dev_state, idc_control; 4813 4814 qla83xx_idc_lock(base_vha, 0); 4815 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 4816 qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control); 4817 qla83xx_idc_unlock(base_vha, 0); 4818 if (dev_state == QLA8XXX_DEV_NEED_RESET) { 4819 if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) { 4820 ql_dbg(ql_dbg_p3p, base_vha, 0xb062, 4821 "Application requested NIC Core Reset.\n"); 4822 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 4823 } else if (qla83xx_check_nic_core_fw_alive(base_vha) == 4824 QLA_SUCCESS) { 4825 ql_dbg(ql_dbg_p3p, base_vha, 0xb07b, 4826 "Other protocol driver requested NIC Core Reset.\n"); 4827 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 4828 } 4829 } else if (dev_state == QLA8XXX_DEV_FAILED || 4830 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { 4831 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 4832 } 4833 } 4834 4835 static void 4836 qla83xx_wait_logic(void) 4837 { 4838 int i; 4839 4840 /* Yield CPU */ 4841 if (!in_interrupt()) { 4842 /* 4843 * Wait about 200ms before retrying again. 4844 * This controls the number of retries for single 4845 * lock operation. 4846 */ 4847 msleep(100); 4848 schedule(); 4849 } else { 4850 for (i = 0; i < 20; i++) 4851 cpu_relax(); /* This a nop instr on i386 */ 4852 } 4853 } 4854 4855 static int 4856 qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha) 4857 { 4858 int rval; 4859 uint32_t data; 4860 uint32_t idc_lck_rcvry_stage_mask = 0x3; 4861 uint32_t idc_lck_rcvry_owner_mask = 0x3c; 4862 struct qla_hw_data *ha = base_vha->hw; 4863 ql_dbg(ql_dbg_p3p, base_vha, 0xb086, 4864 "Trying force recovery of the IDC lock.\n"); 4865 4866 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data); 4867 if (rval) 4868 return rval; 4869 4870 if ((data & idc_lck_rcvry_stage_mask) > 0) { 4871 return QLA_SUCCESS; 4872 } else { 4873 data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2); 4874 rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 4875 data); 4876 if (rval) 4877 return rval; 4878 4879 msleep(200); 4880 4881 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 4882 &data); 4883 if (rval) 4884 return rval; 4885 4886 if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) { 4887 data &= (IDC_LOCK_RECOVERY_STAGE2 | 4888 ~(idc_lck_rcvry_stage_mask)); 4889 rval = qla83xx_wr_reg(base_vha, 4890 QLA83XX_IDC_LOCK_RECOVERY, data); 4891 if (rval) 4892 return rval; 4893 4894 /* Forcefully perform IDC UnLock */ 4895 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, 4896 &data); 4897 if (rval) 4898 return rval; 4899 /* Clear lock-id by setting 0xff */ 4900 rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 4901 0xff); 4902 if (rval) 4903 return rval; 4904 /* Clear lock-recovery by setting 0x0 */ 4905 rval = qla83xx_wr_reg(base_vha, 4906 QLA83XX_IDC_LOCK_RECOVERY, 0x0); 4907 if (rval) 4908 return rval; 4909 } else 4910 return QLA_SUCCESS; 4911 } 4912 4913 return rval; 4914 } 4915 4916 static int 4917 qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha) 4918 { 4919 int rval = QLA_SUCCESS; 4920 uint32_t o_drv_lockid, n_drv_lockid; 4921 unsigned long lock_recovery_timeout; 4922 4923 lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT; 4924 retry_lockid: 4925 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid); 4926 if (rval) 4927 goto exit; 4928 4929 /* MAX wait time before forcing IDC Lock recovery = 2 secs */ 4930 if (time_after_eq(jiffies, lock_recovery_timeout)) { 4931 if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS) 4932 return QLA_SUCCESS; 4933 else 4934 return QLA_FUNCTION_FAILED; 4935 } 4936 4937 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid); 4938 if (rval) 4939 goto exit; 4940 4941 if (o_drv_lockid == n_drv_lockid) { 4942 qla83xx_wait_logic(); 4943 goto retry_lockid; 4944 } else 4945 return QLA_SUCCESS; 4946 4947 exit: 4948 return rval; 4949 } 4950 4951 void 4952 qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) 4953 { 4954 uint16_t options = (requester_id << 15) | BIT_6; 4955 uint32_t data; 4956 uint32_t lock_owner; 4957 struct qla_hw_data *ha = base_vha->hw; 4958 4959 /* IDC-lock implementation using driver-lock/lock-id remote registers */ 4960 retry_lock: 4961 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data) 4962 == QLA_SUCCESS) { 4963 if (data) { 4964 /* Setting lock-id to our function-number */ 4965 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 4966 ha->portnum); 4967 } else { 4968 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, 4969 &lock_owner); 4970 ql_dbg(ql_dbg_p3p, base_vha, 0xb063, 4971 "Failed to acquire IDC lock, acquired by %d, " 4972 "retrying...\n", lock_owner); 4973 4974 /* Retry/Perform IDC-Lock recovery */ 4975 if (qla83xx_idc_lock_recovery(base_vha) 4976 == QLA_SUCCESS) { 4977 qla83xx_wait_logic(); 4978 goto retry_lock; 4979 } else 4980 ql_log(ql_log_warn, base_vha, 0xb075, 4981 "IDC Lock recovery FAILED.\n"); 4982 } 4983 4984 } 4985 4986 return; 4987 4988 /* XXX: IDC-lock implementation using access-control mbx */ 4989 retry_lock2: 4990 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { 4991 ql_dbg(ql_dbg_p3p, base_vha, 0xb072, 4992 "Failed to acquire IDC lock. retrying...\n"); 4993 /* Retry/Perform IDC-Lock recovery */ 4994 if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) { 4995 qla83xx_wait_logic(); 4996 goto retry_lock2; 4997 } else 4998 ql_log(ql_log_warn, base_vha, 0xb076, 4999 "IDC Lock recovery FAILED.\n"); 5000 } 5001 5002 return; 5003 } 5004 5005 void 5006 qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id) 5007 { 5008 #if 0 5009 uint16_t options = (requester_id << 15) | BIT_7; 5010 #endif 5011 uint16_t retry; 5012 uint32_t data; 5013 struct qla_hw_data *ha = base_vha->hw; 5014 5015 /* IDC-unlock implementation using driver-unlock/lock-id 5016 * remote registers 5017 */ 5018 retry = 0; 5019 retry_unlock: 5020 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data) 5021 == QLA_SUCCESS) { 5022 if (data == ha->portnum) { 5023 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data); 5024 /* Clearing lock-id by setting 0xff */ 5025 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff); 5026 } else if (retry < 10) { 5027 /* SV: XXX: IDC unlock retrying needed here? */ 5028 5029 /* Retry for IDC-unlock */ 5030 qla83xx_wait_logic(); 5031 retry++; 5032 ql_dbg(ql_dbg_p3p, base_vha, 0xb064, 5033 "Failed to release IDC lock, retrying=%d\n", retry); 5034 goto retry_unlock; 5035 } 5036 } else if (retry < 10) { 5037 /* Retry for IDC-unlock */ 5038 qla83xx_wait_logic(); 5039 retry++; 5040 ql_dbg(ql_dbg_p3p, base_vha, 0xb065, 5041 "Failed to read drv-lockid, retrying=%d\n", retry); 5042 goto retry_unlock; 5043 } 5044 5045 return; 5046 5047 #if 0 5048 /* XXX: IDC-unlock implementation using access-control mbx */ 5049 retry = 0; 5050 retry_unlock2: 5051 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { 5052 if (retry < 10) { 5053 /* Retry for IDC-unlock */ 5054 qla83xx_wait_logic(); 5055 retry++; 5056 ql_dbg(ql_dbg_p3p, base_vha, 0xb066, 5057 "Failed to release IDC lock, retrying=%d\n", retry); 5058 goto retry_unlock2; 5059 } 5060 } 5061 5062 return; 5063 #endif 5064 } 5065 5066 int 5067 __qla83xx_set_drv_presence(scsi_qla_host_t *vha) 5068 { 5069 int rval = QLA_SUCCESS; 5070 struct qla_hw_data *ha = vha->hw; 5071 uint32_t drv_presence; 5072 5073 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 5074 if (rval == QLA_SUCCESS) { 5075 drv_presence |= (1 << ha->portnum); 5076 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 5077 drv_presence); 5078 } 5079 5080 return rval; 5081 } 5082 5083 int 5084 qla83xx_set_drv_presence(scsi_qla_host_t *vha) 5085 { 5086 int rval = QLA_SUCCESS; 5087 5088 qla83xx_idc_lock(vha, 0); 5089 rval = __qla83xx_set_drv_presence(vha); 5090 qla83xx_idc_unlock(vha, 0); 5091 5092 return rval; 5093 } 5094 5095 int 5096 __qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 5097 { 5098 int rval = QLA_SUCCESS; 5099 struct qla_hw_data *ha = vha->hw; 5100 uint32_t drv_presence; 5101 5102 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 5103 if (rval == QLA_SUCCESS) { 5104 drv_presence &= ~(1 << ha->portnum); 5105 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 5106 drv_presence); 5107 } 5108 5109 return rval; 5110 } 5111 5112 int 5113 qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 5114 { 5115 int rval = QLA_SUCCESS; 5116 5117 qla83xx_idc_lock(vha, 0); 5118 rval = __qla83xx_clear_drv_presence(vha); 5119 qla83xx_idc_unlock(vha, 0); 5120 5121 return rval; 5122 } 5123 5124 static void 5125 qla83xx_need_reset_handler(scsi_qla_host_t *vha) 5126 { 5127 struct qla_hw_data *ha = vha->hw; 5128 uint32_t drv_ack, drv_presence; 5129 unsigned long ack_timeout; 5130 5131 /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */ 5132 ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); 5133 while (1) { 5134 qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 5135 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 5136 if ((drv_ack & drv_presence) == drv_presence) 5137 break; 5138 5139 if (time_after_eq(jiffies, ack_timeout)) { 5140 ql_log(ql_log_warn, vha, 0xb067, 5141 "RESET ACK TIMEOUT! drv_presence=0x%x " 5142 "drv_ack=0x%x\n", drv_presence, drv_ack); 5143 /* 5144 * The function(s) which did not ack in time are forced 5145 * to withdraw any further participation in the IDC 5146 * reset. 5147 */ 5148 if (drv_ack != drv_presence) 5149 qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 5150 drv_ack); 5151 break; 5152 } 5153 5154 qla83xx_idc_unlock(vha, 0); 5155 msleep(1000); 5156 qla83xx_idc_lock(vha, 0); 5157 } 5158 5159 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD); 5160 ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n"); 5161 } 5162 5163 static int 5164 qla83xx_device_bootstrap(scsi_qla_host_t *vha) 5165 { 5166 int rval = QLA_SUCCESS; 5167 uint32_t idc_control; 5168 5169 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING); 5170 ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n"); 5171 5172 /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */ 5173 __qla83xx_get_idc_control(vha, &idc_control); 5174 idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET; 5175 __qla83xx_set_idc_control(vha, 0); 5176 5177 qla83xx_idc_unlock(vha, 0); 5178 rval = qla83xx_restart_nic_firmware(vha); 5179 qla83xx_idc_lock(vha, 0); 5180 5181 if (rval != QLA_SUCCESS) { 5182 ql_log(ql_log_fatal, vha, 0xb06a, 5183 "Failed to restart NIC f/w.\n"); 5184 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED); 5185 ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n"); 5186 } else { 5187 ql_dbg(ql_dbg_p3p, vha, 0xb06c, 5188 "Success in restarting nic f/w.\n"); 5189 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY); 5190 ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n"); 5191 } 5192 5193 return rval; 5194 } 5195 5196 /* Assumes idc_lock always held on entry */ 5197 int 5198 qla83xx_idc_state_handler(scsi_qla_host_t *base_vha) 5199 { 5200 struct qla_hw_data *ha = base_vha->hw; 5201 int rval = QLA_SUCCESS; 5202 unsigned long dev_init_timeout; 5203 uint32_t dev_state; 5204 5205 /* Wait for MAX-INIT-TIMEOUT for the device to go ready */ 5206 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); 5207 5208 while (1) { 5209 5210 if (time_after_eq(jiffies, dev_init_timeout)) { 5211 ql_log(ql_log_warn, base_vha, 0xb06e, 5212 "Initialization TIMEOUT!\n"); 5213 /* Init timeout. Disable further NIC Core 5214 * communication. 5215 */ 5216 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 5217 QLA8XXX_DEV_FAILED); 5218 ql_log(ql_log_info, base_vha, 0xb06f, 5219 "HW State: FAILED.\n"); 5220 } 5221 5222 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5223 switch (dev_state) { 5224 case QLA8XXX_DEV_READY: 5225 if (ha->flags.nic_core_reset_owner) 5226 qla83xx_idc_audit(base_vha, 5227 IDC_AUDIT_COMPLETION); 5228 ha->flags.nic_core_reset_owner = 0; 5229 ql_dbg(ql_dbg_p3p, base_vha, 0xb070, 5230 "Reset_owner reset by 0x%x.\n", 5231 ha->portnum); 5232 goto exit; 5233 case QLA8XXX_DEV_COLD: 5234 if (ha->flags.nic_core_reset_owner) 5235 rval = qla83xx_device_bootstrap(base_vha); 5236 else { 5237 /* Wait for AEN to change device-state */ 5238 qla83xx_idc_unlock(base_vha, 0); 5239 msleep(1000); 5240 qla83xx_idc_lock(base_vha, 0); 5241 } 5242 break; 5243 case QLA8XXX_DEV_INITIALIZING: 5244 /* Wait for AEN to change device-state */ 5245 qla83xx_idc_unlock(base_vha, 0); 5246 msleep(1000); 5247 qla83xx_idc_lock(base_vha, 0); 5248 break; 5249 case QLA8XXX_DEV_NEED_RESET: 5250 if (!ql2xdontresethba && ha->flags.nic_core_reset_owner) 5251 qla83xx_need_reset_handler(base_vha); 5252 else { 5253 /* Wait for AEN to change device-state */ 5254 qla83xx_idc_unlock(base_vha, 0); 5255 msleep(1000); 5256 qla83xx_idc_lock(base_vha, 0); 5257 } 5258 /* reset timeout value after need reset handler */ 5259 dev_init_timeout = jiffies + 5260 (ha->fcoe_dev_init_timeout * HZ); 5261 break; 5262 case QLA8XXX_DEV_NEED_QUIESCENT: 5263 /* XXX: DEBUG for now */ 5264 qla83xx_idc_unlock(base_vha, 0); 5265 msleep(1000); 5266 qla83xx_idc_lock(base_vha, 0); 5267 break; 5268 case QLA8XXX_DEV_QUIESCENT: 5269 /* XXX: DEBUG for now */ 5270 if (ha->flags.quiesce_owner) 5271 goto exit; 5272 5273 qla83xx_idc_unlock(base_vha, 0); 5274 msleep(1000); 5275 qla83xx_idc_lock(base_vha, 0); 5276 dev_init_timeout = jiffies + 5277 (ha->fcoe_dev_init_timeout * HZ); 5278 break; 5279 case QLA8XXX_DEV_FAILED: 5280 if (ha->flags.nic_core_reset_owner) 5281 qla83xx_idc_audit(base_vha, 5282 IDC_AUDIT_COMPLETION); 5283 ha->flags.nic_core_reset_owner = 0; 5284 __qla83xx_clear_drv_presence(base_vha); 5285 qla83xx_idc_unlock(base_vha, 0); 5286 qla8xxx_dev_failed_handler(base_vha); 5287 rval = QLA_FUNCTION_FAILED; 5288 qla83xx_idc_lock(base_vha, 0); 5289 goto exit; 5290 case QLA8XXX_BAD_VALUE: 5291 qla83xx_idc_unlock(base_vha, 0); 5292 msleep(1000); 5293 qla83xx_idc_lock(base_vha, 0); 5294 break; 5295 default: 5296 ql_log(ql_log_warn, base_vha, 0xb071, 5297 "Unknown Device State: %x.\n", dev_state); 5298 qla83xx_idc_unlock(base_vha, 0); 5299 qla8xxx_dev_failed_handler(base_vha); 5300 rval = QLA_FUNCTION_FAILED; 5301 qla83xx_idc_lock(base_vha, 0); 5302 goto exit; 5303 } 5304 } 5305 5306 exit: 5307 return rval; 5308 } 5309 5310 void 5311 qla2x00_disable_board_on_pci_error(struct work_struct *work) 5312 { 5313 struct qla_hw_data *ha = container_of(work, struct qla_hw_data, 5314 board_disable); 5315 struct pci_dev *pdev = ha->pdev; 5316 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5317 5318 /* 5319 * if UNLOAD flag is already set, then continue unload, 5320 * where it was set first. 5321 */ 5322 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 5323 return; 5324 5325 ql_log(ql_log_warn, base_vha, 0x015b, 5326 "Disabling adapter.\n"); 5327 5328 qla2x00_wait_for_sess_deletion(base_vha); 5329 5330 set_bit(UNLOADING, &base_vha->dpc_flags); 5331 5332 qla2x00_delete_all_vps(ha, base_vha); 5333 5334 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 5335 5336 qla2x00_dfs_remove(base_vha); 5337 5338 qla84xx_put_chip(base_vha); 5339 5340 if (base_vha->timer_active) 5341 qla2x00_stop_timer(base_vha); 5342 5343 base_vha->flags.online = 0; 5344 5345 qla2x00_destroy_deferred_work(ha); 5346 5347 /* 5348 * Do not try to stop beacon blink as it will issue a mailbox 5349 * command. 5350 */ 5351 qla2x00_free_sysfs_attr(base_vha, false); 5352 5353 fc_remove_host(base_vha->host); 5354 5355 scsi_remove_host(base_vha->host); 5356 5357 base_vha->flags.init_done = 0; 5358 qla25xx_delete_queues(base_vha); 5359 qla2x00_free_fcports(base_vha); 5360 qla2x00_free_irqs(base_vha); 5361 qla2x00_mem_free(ha); 5362 qla82xx_md_free(base_vha); 5363 qla2x00_free_queues(ha); 5364 5365 qla2x00_unmap_iobases(ha); 5366 5367 pci_release_selected_regions(ha->pdev, ha->bars); 5368 pci_disable_pcie_error_reporting(pdev); 5369 pci_disable_device(pdev); 5370 5371 /* 5372 * Let qla2x00_remove_one cleanup qla_hw_data on device removal. 5373 */ 5374 } 5375 5376 /************************************************************************** 5377 * qla2x00_do_dpc 5378 * This kernel thread is a task that is schedule by the interrupt handler 5379 * to perform the background processing for interrupts. 5380 * 5381 * Notes: 5382 * This task always run in the context of a kernel thread. It 5383 * is kick-off by the driver's detect code and starts up 5384 * up one per adapter. It immediately goes to sleep and waits for 5385 * some fibre event. When either the interrupt handler or 5386 * the timer routine detects a event it will one of the task 5387 * bits then wake us up. 5388 **************************************************************************/ 5389 static int 5390 qla2x00_do_dpc(void *data) 5391 { 5392 scsi_qla_host_t *base_vha; 5393 struct qla_hw_data *ha; 5394 uint32_t online; 5395 struct qla_qpair *qpair; 5396 5397 ha = (struct qla_hw_data *)data; 5398 base_vha = pci_get_drvdata(ha->pdev); 5399 5400 set_user_nice(current, MIN_NICE); 5401 5402 set_current_state(TASK_INTERRUPTIBLE); 5403 while (!kthread_should_stop()) { 5404 ql_dbg(ql_dbg_dpc, base_vha, 0x4000, 5405 "DPC handler sleeping.\n"); 5406 5407 schedule(); 5408 5409 if (!base_vha->flags.init_done || ha->flags.mbox_busy) 5410 goto end_loop; 5411 5412 if (ha->flags.eeh_busy) { 5413 ql_dbg(ql_dbg_dpc, base_vha, 0x4003, 5414 "eeh_busy=%d.\n", ha->flags.eeh_busy); 5415 goto end_loop; 5416 } 5417 5418 ha->dpc_active = 1; 5419 5420 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001, 5421 "DPC handler waking up, dpc_flags=0x%lx.\n", 5422 base_vha->dpc_flags); 5423 5424 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 5425 break; 5426 5427 qla2x00_do_work(base_vha); 5428 5429 if (IS_P3P_TYPE(ha)) { 5430 if (IS_QLA8044(ha)) { 5431 if (test_and_clear_bit(ISP_UNRECOVERABLE, 5432 &base_vha->dpc_flags)) { 5433 qla8044_idc_lock(ha); 5434 qla8044_wr_direct(base_vha, 5435 QLA8044_CRB_DEV_STATE_INDEX, 5436 QLA8XXX_DEV_FAILED); 5437 qla8044_idc_unlock(ha); 5438 ql_log(ql_log_info, base_vha, 0x4004, 5439 "HW State: FAILED.\n"); 5440 qla8044_device_state_handler(base_vha); 5441 continue; 5442 } 5443 5444 } else { 5445 if (test_and_clear_bit(ISP_UNRECOVERABLE, 5446 &base_vha->dpc_flags)) { 5447 qla82xx_idc_lock(ha); 5448 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5449 QLA8XXX_DEV_FAILED); 5450 qla82xx_idc_unlock(ha); 5451 ql_log(ql_log_info, base_vha, 0x0151, 5452 "HW State: FAILED.\n"); 5453 qla82xx_device_state_handler(base_vha); 5454 continue; 5455 } 5456 } 5457 5458 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, 5459 &base_vha->dpc_flags)) { 5460 5461 ql_dbg(ql_dbg_dpc, base_vha, 0x4005, 5462 "FCoE context reset scheduled.\n"); 5463 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 5464 &base_vha->dpc_flags))) { 5465 if (qla82xx_fcoe_ctx_reset(base_vha)) { 5466 /* FCoE-ctx reset failed. 5467 * Escalate to chip-reset 5468 */ 5469 set_bit(ISP_ABORT_NEEDED, 5470 &base_vha->dpc_flags); 5471 } 5472 clear_bit(ABORT_ISP_ACTIVE, 5473 &base_vha->dpc_flags); 5474 } 5475 5476 ql_dbg(ql_dbg_dpc, base_vha, 0x4006, 5477 "FCoE context reset end.\n"); 5478 } 5479 } else if (IS_QLAFX00(ha)) { 5480 if (test_and_clear_bit(ISP_UNRECOVERABLE, 5481 &base_vha->dpc_flags)) { 5482 ql_dbg(ql_dbg_dpc, base_vha, 0x4020, 5483 "Firmware Reset Recovery\n"); 5484 if (qlafx00_reset_initialize(base_vha)) { 5485 /* Failed. Abort isp later. */ 5486 if (!test_bit(UNLOADING, 5487 &base_vha->dpc_flags)) { 5488 set_bit(ISP_UNRECOVERABLE, 5489 &base_vha->dpc_flags); 5490 ql_dbg(ql_dbg_dpc, base_vha, 5491 0x4021, 5492 "Reset Recovery Failed\n"); 5493 } 5494 } 5495 } 5496 5497 if (test_and_clear_bit(FX00_TARGET_SCAN, 5498 &base_vha->dpc_flags)) { 5499 ql_dbg(ql_dbg_dpc, base_vha, 0x4022, 5500 "ISPFx00 Target Scan scheduled\n"); 5501 if (qlafx00_rescan_isp(base_vha)) { 5502 if (!test_bit(UNLOADING, 5503 &base_vha->dpc_flags)) 5504 set_bit(ISP_UNRECOVERABLE, 5505 &base_vha->dpc_flags); 5506 ql_dbg(ql_dbg_dpc, base_vha, 0x401e, 5507 "ISPFx00 Target Scan Failed\n"); 5508 } 5509 ql_dbg(ql_dbg_dpc, base_vha, 0x401f, 5510 "ISPFx00 Target Scan End\n"); 5511 } 5512 if (test_and_clear_bit(FX00_HOST_INFO_RESEND, 5513 &base_vha->dpc_flags)) { 5514 ql_dbg(ql_dbg_dpc, base_vha, 0x4023, 5515 "ISPFx00 Host Info resend scheduled\n"); 5516 qlafx00_fx_disc(base_vha, 5517 &base_vha->hw->mr.fcport, 5518 FXDISC_REG_HOST_INFO); 5519 } 5520 } 5521 5522 if (test_and_clear_bit(ISP_ABORT_NEEDED, 5523 &base_vha->dpc_flags)) { 5524 5525 ql_dbg(ql_dbg_dpc, base_vha, 0x4007, 5526 "ISP abort scheduled.\n"); 5527 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 5528 &base_vha->dpc_flags))) { 5529 5530 if (ha->isp_ops->abort_isp(base_vha)) { 5531 /* failed. retry later */ 5532 set_bit(ISP_ABORT_NEEDED, 5533 &base_vha->dpc_flags); 5534 } 5535 clear_bit(ABORT_ISP_ACTIVE, 5536 &base_vha->dpc_flags); 5537 } 5538 5539 ql_dbg(ql_dbg_dpc, base_vha, 0x4008, 5540 "ISP abort end.\n"); 5541 } 5542 5543 if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, 5544 &base_vha->dpc_flags)) { 5545 qla2x00_update_fcports(base_vha); 5546 } 5547 5548 if (IS_QLAFX00(ha)) 5549 goto loop_resync_check; 5550 5551 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 5552 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 5553 "Quiescence mode scheduled.\n"); 5554 if (IS_P3P_TYPE(ha)) { 5555 if (IS_QLA82XX(ha)) 5556 qla82xx_device_state_handler(base_vha); 5557 if (IS_QLA8044(ha)) 5558 qla8044_device_state_handler(base_vha); 5559 clear_bit(ISP_QUIESCE_NEEDED, 5560 &base_vha->dpc_flags); 5561 if (!ha->flags.quiesce_owner) { 5562 qla2x00_perform_loop_resync(base_vha); 5563 if (IS_QLA82XX(ha)) { 5564 qla82xx_idc_lock(ha); 5565 qla82xx_clear_qsnt_ready( 5566 base_vha); 5567 qla82xx_idc_unlock(ha); 5568 } else if (IS_QLA8044(ha)) { 5569 qla8044_idc_lock(ha); 5570 qla8044_clear_qsnt_ready( 5571 base_vha); 5572 qla8044_idc_unlock(ha); 5573 } 5574 } 5575 } else { 5576 clear_bit(ISP_QUIESCE_NEEDED, 5577 &base_vha->dpc_flags); 5578 qla2x00_quiesce_io(base_vha); 5579 } 5580 ql_dbg(ql_dbg_dpc, base_vha, 0x400a, 5581 "Quiescence mode end.\n"); 5582 } 5583 5584 if (test_and_clear_bit(RESET_MARKER_NEEDED, 5585 &base_vha->dpc_flags) && 5586 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { 5587 5588 ql_dbg(ql_dbg_dpc, base_vha, 0x400b, 5589 "Reset marker scheduled.\n"); 5590 qla2x00_rst_aen(base_vha); 5591 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); 5592 ql_dbg(ql_dbg_dpc, base_vha, 0x400c, 5593 "Reset marker end.\n"); 5594 } 5595 5596 /* Retry each device up to login retry count */ 5597 if ((test_and_clear_bit(RELOGIN_NEEDED, 5598 &base_vha->dpc_flags)) && 5599 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && 5600 atomic_read(&base_vha->loop_state) != LOOP_DOWN) { 5601 5602 ql_dbg(ql_dbg_dpc, base_vha, 0x400d, 5603 "Relogin scheduled.\n"); 5604 qla2x00_relogin(base_vha); 5605 ql_dbg(ql_dbg_dpc, base_vha, 0x400e, 5606 "Relogin end.\n"); 5607 } 5608 loop_resync_check: 5609 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, 5610 &base_vha->dpc_flags)) { 5611 5612 ql_dbg(ql_dbg_dpc, base_vha, 0x400f, 5613 "Loop resync scheduled.\n"); 5614 5615 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 5616 &base_vha->dpc_flags))) { 5617 5618 qla2x00_loop_resync(base_vha); 5619 5620 clear_bit(LOOP_RESYNC_ACTIVE, 5621 &base_vha->dpc_flags); 5622 } 5623 5624 ql_dbg(ql_dbg_dpc, base_vha, 0x4010, 5625 "Loop resync end.\n"); 5626 } 5627 5628 if (IS_QLAFX00(ha)) 5629 goto intr_on_check; 5630 5631 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && 5632 atomic_read(&base_vha->loop_state) == LOOP_READY) { 5633 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags); 5634 qla2xxx_flash_npiv_conf(base_vha); 5635 } 5636 5637 intr_on_check: 5638 if (!ha->interrupts_on) 5639 ha->isp_ops->enable_intrs(ha); 5640 5641 if (test_and_clear_bit(BEACON_BLINK_NEEDED, 5642 &base_vha->dpc_flags)) { 5643 if (ha->beacon_blink_led == 1) 5644 ha->isp_ops->beacon_blink(base_vha); 5645 } 5646 5647 /* qpair online check */ 5648 if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED, 5649 &base_vha->dpc_flags)) { 5650 if (ha->flags.eeh_busy || 5651 ha->flags.pci_channel_io_perm_failure) 5652 online = 0; 5653 else 5654 online = 1; 5655 5656 mutex_lock(&ha->mq_lock); 5657 list_for_each_entry(qpair, &base_vha->qp_list, 5658 qp_list_elem) 5659 qpair->online = online; 5660 mutex_unlock(&ha->mq_lock); 5661 } 5662 5663 if (!IS_QLAFX00(ha)) 5664 qla2x00_do_dpc_all_vps(base_vha); 5665 5666 ha->dpc_active = 0; 5667 end_loop: 5668 set_current_state(TASK_INTERRUPTIBLE); 5669 } /* End of while(1) */ 5670 __set_current_state(TASK_RUNNING); 5671 5672 ql_dbg(ql_dbg_dpc, base_vha, 0x4011, 5673 "DPC handler exiting.\n"); 5674 5675 /* 5676 * Make sure that nobody tries to wake us up again. 5677 */ 5678 ha->dpc_active = 0; 5679 5680 /* Cleanup any residual CTX SRBs. */ 5681 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 5682 5683 return 0; 5684 } 5685 5686 void 5687 qla2xxx_wake_dpc(struct scsi_qla_host *vha) 5688 { 5689 struct qla_hw_data *ha = vha->hw; 5690 struct task_struct *t = ha->dpc_thread; 5691 5692 if (!test_bit(UNLOADING, &vha->dpc_flags) && t) 5693 wake_up_process(t); 5694 } 5695 5696 /* 5697 * qla2x00_rst_aen 5698 * Processes asynchronous reset. 5699 * 5700 * Input: 5701 * ha = adapter block pointer. 5702 */ 5703 static void 5704 qla2x00_rst_aen(scsi_qla_host_t *vha) 5705 { 5706 if (vha->flags.online && !vha->flags.reset_active && 5707 !atomic_read(&vha->loop_down_timer) && 5708 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) { 5709 do { 5710 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 5711 5712 /* 5713 * Issue marker command only when we are going to start 5714 * the I/O. 5715 */ 5716 vha->marker_needed = 1; 5717 } while (!atomic_read(&vha->loop_down_timer) && 5718 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags))); 5719 } 5720 } 5721 5722 /************************************************************************** 5723 * qla2x00_timer 5724 * 5725 * Description: 5726 * One second timer 5727 * 5728 * Context: Interrupt 5729 ***************************************************************************/ 5730 void 5731 qla2x00_timer(scsi_qla_host_t *vha) 5732 { 5733 unsigned long cpu_flags = 0; 5734 int start_dpc = 0; 5735 int index; 5736 srb_t *sp; 5737 uint16_t w; 5738 struct qla_hw_data *ha = vha->hw; 5739 struct req_que *req; 5740 5741 if (ha->flags.eeh_busy) { 5742 ql_dbg(ql_dbg_timer, vha, 0x6000, 5743 "EEH = %d, restarting timer.\n", 5744 ha->flags.eeh_busy); 5745 qla2x00_restart_timer(vha, WATCH_INTERVAL); 5746 return; 5747 } 5748 5749 /* 5750 * Hardware read to raise pending EEH errors during mailbox waits. If 5751 * the read returns -1 then disable the board. 5752 */ 5753 if (!pci_channel_offline(ha->pdev)) { 5754 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 5755 qla2x00_check_reg16_for_disconnect(vha, w); 5756 } 5757 5758 /* Make sure qla82xx_watchdog is run only for physical port */ 5759 if (!vha->vp_idx && IS_P3P_TYPE(ha)) { 5760 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) 5761 start_dpc++; 5762 if (IS_QLA82XX(ha)) 5763 qla82xx_watchdog(vha); 5764 else if (IS_QLA8044(ha)) 5765 qla8044_watchdog(vha); 5766 } 5767 5768 if (!vha->vp_idx && IS_QLAFX00(ha)) 5769 qlafx00_timer_routine(vha); 5770 5771 /* Loop down handler. */ 5772 if (atomic_read(&vha->loop_down_timer) > 0 && 5773 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && 5774 !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags)) 5775 && vha->flags.online) { 5776 5777 if (atomic_read(&vha->loop_down_timer) == 5778 vha->loop_down_abort_time) { 5779 5780 ql_log(ql_log_info, vha, 0x6008, 5781 "Loop down - aborting the queues before time expires.\n"); 5782 5783 if (!IS_QLA2100(ha) && vha->link_down_timeout) 5784 atomic_set(&vha->loop_state, LOOP_DEAD); 5785 5786 /* 5787 * Schedule an ISP abort to return any FCP2-device 5788 * commands. 5789 */ 5790 /* NPIV - scan physical port only */ 5791 if (!vha->vp_idx) { 5792 spin_lock_irqsave(&ha->hardware_lock, 5793 cpu_flags); 5794 req = ha->req_q_map[0]; 5795 for (index = 1; 5796 index < req->num_outstanding_cmds; 5797 index++) { 5798 fc_port_t *sfcp; 5799 5800 sp = req->outstanding_cmds[index]; 5801 if (!sp) 5802 continue; 5803 if (sp->type != SRB_SCSI_CMD) 5804 continue; 5805 sfcp = sp->fcport; 5806 if (!(sfcp->flags & FCF_FCP2_DEVICE)) 5807 continue; 5808 5809 if (IS_QLA82XX(ha)) 5810 set_bit(FCOE_CTX_RESET_NEEDED, 5811 &vha->dpc_flags); 5812 else 5813 set_bit(ISP_ABORT_NEEDED, 5814 &vha->dpc_flags); 5815 break; 5816 } 5817 spin_unlock_irqrestore(&ha->hardware_lock, 5818 cpu_flags); 5819 } 5820 start_dpc++; 5821 } 5822 5823 /* if the loop has been down for 4 minutes, reinit adapter */ 5824 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { 5825 if (!(vha->device_flags & DFLG_NO_CABLE)) { 5826 ql_log(ql_log_warn, vha, 0x6009, 5827 "Loop down - aborting ISP.\n"); 5828 5829 if (IS_QLA82XX(ha)) 5830 set_bit(FCOE_CTX_RESET_NEEDED, 5831 &vha->dpc_flags); 5832 else 5833 set_bit(ISP_ABORT_NEEDED, 5834 &vha->dpc_flags); 5835 } 5836 } 5837 ql_dbg(ql_dbg_timer, vha, 0x600a, 5838 "Loop down - seconds remaining %d.\n", 5839 atomic_read(&vha->loop_down_timer)); 5840 } 5841 /* Check if beacon LED needs to be blinked for physical host only */ 5842 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { 5843 /* There is no beacon_blink function for ISP82xx */ 5844 if (!IS_P3P_TYPE(ha)) { 5845 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); 5846 start_dpc++; 5847 } 5848 } 5849 5850 /* Process any deferred work. */ 5851 if (!list_empty(&vha->work_list)) 5852 start_dpc++; 5853 5854 /* Schedule the DPC routine if needed */ 5855 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 5856 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || 5857 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) || 5858 start_dpc || 5859 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) || 5860 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) || 5861 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || 5862 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || 5863 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || 5864 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) { 5865 ql_dbg(ql_dbg_timer, vha, 0x600b, 5866 "isp_abort_needed=%d loop_resync_needed=%d " 5867 "fcport_update_needed=%d start_dpc=%d " 5868 "reset_marker_needed=%d", 5869 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags), 5870 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags), 5871 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags), 5872 start_dpc, 5873 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); 5874 ql_dbg(ql_dbg_timer, vha, 0x600c, 5875 "beacon_blink_needed=%d isp_unrecoverable=%d " 5876 "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " 5877 "relogin_needed=%d.\n", 5878 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), 5879 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), 5880 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), 5881 test_bit(VP_DPC_NEEDED, &vha->dpc_flags), 5882 test_bit(RELOGIN_NEEDED, &vha->dpc_flags)); 5883 qla2xxx_wake_dpc(vha); 5884 } 5885 5886 qla2x00_restart_timer(vha, WATCH_INTERVAL); 5887 } 5888 5889 /* Firmware interface routines. */ 5890 5891 #define FW_BLOBS 11 5892 #define FW_ISP21XX 0 5893 #define FW_ISP22XX 1 5894 #define FW_ISP2300 2 5895 #define FW_ISP2322 3 5896 #define FW_ISP24XX 4 5897 #define FW_ISP25XX 5 5898 #define FW_ISP81XX 6 5899 #define FW_ISP82XX 7 5900 #define FW_ISP2031 8 5901 #define FW_ISP8031 9 5902 #define FW_ISP27XX 10 5903 5904 #define FW_FILE_ISP21XX "ql2100_fw.bin" 5905 #define FW_FILE_ISP22XX "ql2200_fw.bin" 5906 #define FW_FILE_ISP2300 "ql2300_fw.bin" 5907 #define FW_FILE_ISP2322 "ql2322_fw.bin" 5908 #define FW_FILE_ISP24XX "ql2400_fw.bin" 5909 #define FW_FILE_ISP25XX "ql2500_fw.bin" 5910 #define FW_FILE_ISP81XX "ql8100_fw.bin" 5911 #define FW_FILE_ISP82XX "ql8200_fw.bin" 5912 #define FW_FILE_ISP2031 "ql2600_fw.bin" 5913 #define FW_FILE_ISP8031 "ql8300_fw.bin" 5914 #define FW_FILE_ISP27XX "ql2700_fw.bin" 5915 5916 5917 static DEFINE_MUTEX(qla_fw_lock); 5918 5919 static struct fw_blob qla_fw_blobs[FW_BLOBS] = { 5920 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, 5921 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, 5922 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, 5923 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 5924 { .name = FW_FILE_ISP24XX, }, 5925 { .name = FW_FILE_ISP25XX, }, 5926 { .name = FW_FILE_ISP81XX, }, 5927 { .name = FW_FILE_ISP82XX, }, 5928 { .name = FW_FILE_ISP2031, }, 5929 { .name = FW_FILE_ISP8031, }, 5930 { .name = FW_FILE_ISP27XX, }, 5931 }; 5932 5933 struct fw_blob * 5934 qla2x00_request_firmware(scsi_qla_host_t *vha) 5935 { 5936 struct qla_hw_data *ha = vha->hw; 5937 struct fw_blob *blob; 5938 5939 if (IS_QLA2100(ha)) { 5940 blob = &qla_fw_blobs[FW_ISP21XX]; 5941 } else if (IS_QLA2200(ha)) { 5942 blob = &qla_fw_blobs[FW_ISP22XX]; 5943 } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 5944 blob = &qla_fw_blobs[FW_ISP2300]; 5945 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 5946 blob = &qla_fw_blobs[FW_ISP2322]; 5947 } else if (IS_QLA24XX_TYPE(ha)) { 5948 blob = &qla_fw_blobs[FW_ISP24XX]; 5949 } else if (IS_QLA25XX(ha)) { 5950 blob = &qla_fw_blobs[FW_ISP25XX]; 5951 } else if (IS_QLA81XX(ha)) { 5952 blob = &qla_fw_blobs[FW_ISP81XX]; 5953 } else if (IS_QLA82XX(ha)) { 5954 blob = &qla_fw_blobs[FW_ISP82XX]; 5955 } else if (IS_QLA2031(ha)) { 5956 blob = &qla_fw_blobs[FW_ISP2031]; 5957 } else if (IS_QLA8031(ha)) { 5958 blob = &qla_fw_blobs[FW_ISP8031]; 5959 } else if (IS_QLA27XX(ha)) { 5960 blob = &qla_fw_blobs[FW_ISP27XX]; 5961 } else { 5962 return NULL; 5963 } 5964 5965 mutex_lock(&qla_fw_lock); 5966 if (blob->fw) 5967 goto out; 5968 5969 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 5970 ql_log(ql_log_warn, vha, 0x0063, 5971 "Failed to load firmware image (%s).\n", blob->name); 5972 blob->fw = NULL; 5973 blob = NULL; 5974 goto out; 5975 } 5976 5977 out: 5978 mutex_unlock(&qla_fw_lock); 5979 return blob; 5980 } 5981 5982 static void 5983 qla2x00_release_firmware(void) 5984 { 5985 int idx; 5986 5987 mutex_lock(&qla_fw_lock); 5988 for (idx = 0; idx < FW_BLOBS; idx++) 5989 release_firmware(qla_fw_blobs[idx].fw); 5990 mutex_unlock(&qla_fw_lock); 5991 } 5992 5993 static pci_ers_result_t 5994 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 5995 { 5996 scsi_qla_host_t *vha = pci_get_drvdata(pdev); 5997 struct qla_hw_data *ha = vha->hw; 5998 5999 ql_dbg(ql_dbg_aer, vha, 0x9000, 6000 "PCI error detected, state %x.\n", state); 6001 6002 switch (state) { 6003 case pci_channel_io_normal: 6004 ha->flags.eeh_busy = 0; 6005 if (ql2xmqsupport) { 6006 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 6007 qla2xxx_wake_dpc(vha); 6008 } 6009 return PCI_ERS_RESULT_CAN_RECOVER; 6010 case pci_channel_io_frozen: 6011 ha->flags.eeh_busy = 1; 6012 /* For ISP82XX complete any pending mailbox cmd */ 6013 if (IS_QLA82XX(ha)) { 6014 ha->flags.isp82xx_fw_hung = 1; 6015 ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n"); 6016 qla82xx_clear_pending_mbx(vha); 6017 } 6018 qla2x00_free_irqs(vha); 6019 pci_disable_device(pdev); 6020 /* Return back all IOs */ 6021 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 6022 if (ql2xmqsupport) { 6023 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 6024 qla2xxx_wake_dpc(vha); 6025 } 6026 return PCI_ERS_RESULT_NEED_RESET; 6027 case pci_channel_io_perm_failure: 6028 ha->flags.pci_channel_io_perm_failure = 1; 6029 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 6030 if (ql2xmqsupport) { 6031 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 6032 qla2xxx_wake_dpc(vha); 6033 } 6034 return PCI_ERS_RESULT_DISCONNECT; 6035 } 6036 return PCI_ERS_RESULT_NEED_RESET; 6037 } 6038 6039 static pci_ers_result_t 6040 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) 6041 { 6042 int risc_paused = 0; 6043 uint32_t stat; 6044 unsigned long flags; 6045 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 6046 struct qla_hw_data *ha = base_vha->hw; 6047 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 6048 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 6049 6050 if (IS_QLA82XX(ha)) 6051 return PCI_ERS_RESULT_RECOVERED; 6052 6053 spin_lock_irqsave(&ha->hardware_lock, flags); 6054 if (IS_QLA2100(ha) || IS_QLA2200(ha)){ 6055 stat = RD_REG_DWORD(®->hccr); 6056 if (stat & HCCR_RISC_PAUSE) 6057 risc_paused = 1; 6058 } else if (IS_QLA23XX(ha)) { 6059 stat = RD_REG_DWORD(®->u.isp2300.host_status); 6060 if (stat & HSR_RISC_PAUSED) 6061 risc_paused = 1; 6062 } else if (IS_FWI2_CAPABLE(ha)) { 6063 stat = RD_REG_DWORD(®24->host_status); 6064 if (stat & HSRX_RISC_PAUSED) 6065 risc_paused = 1; 6066 } 6067 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6068 6069 if (risc_paused) { 6070 ql_log(ql_log_info, base_vha, 0x9003, 6071 "RISC paused -- mmio_enabled, Dumping firmware.\n"); 6072 ha->isp_ops->fw_dump(base_vha, 0); 6073 6074 return PCI_ERS_RESULT_NEED_RESET; 6075 } else 6076 return PCI_ERS_RESULT_RECOVERED; 6077 } 6078 6079 static uint32_t 6080 qla82xx_error_recovery(scsi_qla_host_t *base_vha) 6081 { 6082 uint32_t rval = QLA_FUNCTION_FAILED; 6083 uint32_t drv_active = 0; 6084 struct qla_hw_data *ha = base_vha->hw; 6085 int fn; 6086 struct pci_dev *other_pdev = NULL; 6087 6088 ql_dbg(ql_dbg_aer, base_vha, 0x9006, 6089 "Entered %s.\n", __func__); 6090 6091 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 6092 6093 if (base_vha->flags.online) { 6094 /* Abort all outstanding commands, 6095 * so as to be requeued later */ 6096 qla2x00_abort_isp_cleanup(base_vha); 6097 } 6098 6099 6100 fn = PCI_FUNC(ha->pdev->devfn); 6101 while (fn > 0) { 6102 fn--; 6103 ql_dbg(ql_dbg_aer, base_vha, 0x9007, 6104 "Finding pci device at function = 0x%x.\n", fn); 6105 other_pdev = 6106 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 6107 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 6108 fn)); 6109 6110 if (!other_pdev) 6111 continue; 6112 if (atomic_read(&other_pdev->enable_cnt)) { 6113 ql_dbg(ql_dbg_aer, base_vha, 0x9008, 6114 "Found PCI func available and enable at 0x%x.\n", 6115 fn); 6116 pci_dev_put(other_pdev); 6117 break; 6118 } 6119 pci_dev_put(other_pdev); 6120 } 6121 6122 if (!fn) { 6123 /* Reset owner */ 6124 ql_dbg(ql_dbg_aer, base_vha, 0x9009, 6125 "This devfn is reset owner = 0x%x.\n", 6126 ha->pdev->devfn); 6127 qla82xx_idc_lock(ha); 6128 6129 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6130 QLA8XXX_DEV_INITIALIZING); 6131 6132 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, 6133 QLA82XX_IDC_VERSION); 6134 6135 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 6136 ql_dbg(ql_dbg_aer, base_vha, 0x900a, 6137 "drv_active = 0x%x.\n", drv_active); 6138 6139 qla82xx_idc_unlock(ha); 6140 /* Reset if device is not already reset 6141 * drv_active would be 0 if a reset has already been done 6142 */ 6143 if (drv_active) 6144 rval = qla82xx_start_firmware(base_vha); 6145 else 6146 rval = QLA_SUCCESS; 6147 qla82xx_idc_lock(ha); 6148 6149 if (rval != QLA_SUCCESS) { 6150 ql_log(ql_log_info, base_vha, 0x900b, 6151 "HW State: FAILED.\n"); 6152 qla82xx_clear_drv_active(ha); 6153 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6154 QLA8XXX_DEV_FAILED); 6155 } else { 6156 ql_log(ql_log_info, base_vha, 0x900c, 6157 "HW State: READY.\n"); 6158 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6159 QLA8XXX_DEV_READY); 6160 qla82xx_idc_unlock(ha); 6161 ha->flags.isp82xx_fw_hung = 0; 6162 rval = qla82xx_restart_isp(base_vha); 6163 qla82xx_idc_lock(ha); 6164 /* Clear driver state register */ 6165 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0); 6166 qla82xx_set_drv_active(base_vha); 6167 } 6168 qla82xx_idc_unlock(ha); 6169 } else { 6170 ql_dbg(ql_dbg_aer, base_vha, 0x900d, 6171 "This devfn is not reset owner = 0x%x.\n", 6172 ha->pdev->devfn); 6173 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == 6174 QLA8XXX_DEV_READY)) { 6175 ha->flags.isp82xx_fw_hung = 0; 6176 rval = qla82xx_restart_isp(base_vha); 6177 qla82xx_idc_lock(ha); 6178 qla82xx_set_drv_active(base_vha); 6179 qla82xx_idc_unlock(ha); 6180 } 6181 } 6182 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 6183 6184 return rval; 6185 } 6186 6187 static pci_ers_result_t 6188 qla2xxx_pci_slot_reset(struct pci_dev *pdev) 6189 { 6190 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 6191 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 6192 struct qla_hw_data *ha = base_vha->hw; 6193 struct rsp_que *rsp; 6194 int rc, retries = 10; 6195 6196 ql_dbg(ql_dbg_aer, base_vha, 0x9004, 6197 "Slot Reset.\n"); 6198 6199 /* Workaround: qla2xxx driver which access hardware earlier 6200 * needs error state to be pci_channel_io_online. 6201 * Otherwise mailbox command timesout. 6202 */ 6203 pdev->error_state = pci_channel_io_normal; 6204 6205 pci_restore_state(pdev); 6206 6207 /* pci_restore_state() clears the saved_state flag of the device 6208 * save restored state which resets saved_state flag 6209 */ 6210 pci_save_state(pdev); 6211 6212 if (ha->mem_only) 6213 rc = pci_enable_device_mem(pdev); 6214 else 6215 rc = pci_enable_device(pdev); 6216 6217 if (rc) { 6218 ql_log(ql_log_warn, base_vha, 0x9005, 6219 "Can't re-enable PCI device after reset.\n"); 6220 goto exit_slot_reset; 6221 } 6222 6223 rsp = ha->rsp_q_map[0]; 6224 if (qla2x00_request_irqs(ha, rsp)) 6225 goto exit_slot_reset; 6226 6227 if (ha->isp_ops->pci_config(base_vha)) 6228 goto exit_slot_reset; 6229 6230 if (IS_QLA82XX(ha)) { 6231 if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) { 6232 ret = PCI_ERS_RESULT_RECOVERED; 6233 goto exit_slot_reset; 6234 } else 6235 goto exit_slot_reset; 6236 } 6237 6238 while (ha->flags.mbox_busy && retries--) 6239 msleep(1000); 6240 6241 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 6242 if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS) 6243 ret = PCI_ERS_RESULT_RECOVERED; 6244 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 6245 6246 6247 exit_slot_reset: 6248 ql_dbg(ql_dbg_aer, base_vha, 0x900e, 6249 "slot_reset return %x.\n", ret); 6250 6251 return ret; 6252 } 6253 6254 static void 6255 qla2xxx_pci_resume(struct pci_dev *pdev) 6256 { 6257 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 6258 struct qla_hw_data *ha = base_vha->hw; 6259 int ret; 6260 6261 ql_dbg(ql_dbg_aer, base_vha, 0x900f, 6262 "pci_resume.\n"); 6263 6264 ret = qla2x00_wait_for_hba_online(base_vha); 6265 if (ret != QLA_SUCCESS) { 6266 ql_log(ql_log_fatal, base_vha, 0x9002, 6267 "The device failed to resume I/O from slot/link_reset.\n"); 6268 } 6269 6270 pci_cleanup_aer_uncorrect_error_status(pdev); 6271 6272 ha->flags.eeh_busy = 0; 6273 } 6274 6275 static void 6276 qla83xx_disable_laser(scsi_qla_host_t *vha) 6277 { 6278 uint32_t reg, data, fn; 6279 struct qla_hw_data *ha = vha->hw; 6280 struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24; 6281 6282 /* pci func #/port # */ 6283 ql_dbg(ql_dbg_init, vha, 0x004b, 6284 "Disabling Laser for hba: %p\n", vha); 6285 6286 fn = (RD_REG_DWORD(&isp_reg->ctrl_status) & 6287 (BIT_15|BIT_14|BIT_13|BIT_12)); 6288 6289 fn = (fn >> 12); 6290 6291 if (fn & 1) 6292 reg = PORT_1_2031; 6293 else 6294 reg = PORT_0_2031; 6295 6296 data = LASER_OFF_2031; 6297 6298 qla83xx_wr_reg(vha, reg, data); 6299 } 6300 6301 static int qla2xxx_map_queues(struct Scsi_Host *shost) 6302 { 6303 scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; 6304 6305 return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev); 6306 } 6307 6308 static const struct pci_error_handlers qla2xxx_err_handler = { 6309 .error_detected = qla2xxx_pci_error_detected, 6310 .mmio_enabled = qla2xxx_pci_mmio_enabled, 6311 .slot_reset = qla2xxx_pci_slot_reset, 6312 .resume = qla2xxx_pci_resume, 6313 }; 6314 6315 static struct pci_device_id qla2xxx_pci_tbl[] = { 6316 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) }, 6317 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) }, 6318 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) }, 6319 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) }, 6320 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) }, 6321 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) }, 6322 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, 6323 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, 6324 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, 6325 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) }, 6326 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 6327 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 6328 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, 6329 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) }, 6330 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, 6331 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, 6332 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, 6333 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, 6334 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, 6335 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, 6336 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, 6337 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) }, 6338 { 0 }, 6339 }; 6340 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 6341 6342 static struct pci_driver qla2xxx_pci_driver = { 6343 .name = QLA2XXX_DRIVER_NAME, 6344 .driver = { 6345 .owner = THIS_MODULE, 6346 }, 6347 .id_table = qla2xxx_pci_tbl, 6348 .probe = qla2x00_probe_one, 6349 .remove = qla2x00_remove_one, 6350 .shutdown = qla2x00_shutdown, 6351 .err_handler = &qla2xxx_err_handler, 6352 }; 6353 6354 static const struct file_operations apidev_fops = { 6355 .owner = THIS_MODULE, 6356 .llseek = noop_llseek, 6357 }; 6358 6359 /** 6360 * qla2x00_module_init - Module initialization. 6361 **/ 6362 static int __init 6363 qla2x00_module_init(void) 6364 { 6365 int ret = 0; 6366 6367 /* Allocate cache for SRBs. */ 6368 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 6369 SLAB_HWCACHE_ALIGN, NULL); 6370 if (srb_cachep == NULL) { 6371 ql_log(ql_log_fatal, NULL, 0x0001, 6372 "Unable to allocate SRB cache...Failing load!.\n"); 6373 return -ENOMEM; 6374 } 6375 6376 /* Initialize target kmem_cache and mem_pools */ 6377 ret = qlt_init(); 6378 if (ret < 0) { 6379 kmem_cache_destroy(srb_cachep); 6380 return ret; 6381 } else if (ret > 0) { 6382 /* 6383 * If initiator mode is explictly disabled by qlt_init(), 6384 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from 6385 * performing scsi_scan_target() during LOOP UP event. 6386 */ 6387 qla2xxx_transport_functions.disable_target_scan = 1; 6388 qla2xxx_transport_vport_functions.disable_target_scan = 1; 6389 } 6390 6391 /* Derive version string. */ 6392 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 6393 if (ql2xextended_error_logging) 6394 strcat(qla2x00_version_str, "-debug"); 6395 6396 qla2xxx_transport_template = 6397 fc_attach_transport(&qla2xxx_transport_functions); 6398 if (!qla2xxx_transport_template) { 6399 kmem_cache_destroy(srb_cachep); 6400 ql_log(ql_log_fatal, NULL, 0x0002, 6401 "fc_attach_transport failed...Failing load!.\n"); 6402 qlt_exit(); 6403 return -ENODEV; 6404 } 6405 6406 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); 6407 if (apidev_major < 0) { 6408 ql_log(ql_log_fatal, NULL, 0x0003, 6409 "Unable to register char device %s.\n", QLA2XXX_APIDEV); 6410 } 6411 6412 qla2xxx_transport_vport_template = 6413 fc_attach_transport(&qla2xxx_transport_vport_functions); 6414 if (!qla2xxx_transport_vport_template) { 6415 kmem_cache_destroy(srb_cachep); 6416 qlt_exit(); 6417 fc_release_transport(qla2xxx_transport_template); 6418 ql_log(ql_log_fatal, NULL, 0x0004, 6419 "fc_attach_transport vport failed...Failing load!.\n"); 6420 return -ENODEV; 6421 } 6422 ql_log(ql_log_info, NULL, 0x0005, 6423 "QLogic Fibre Channel HBA Driver: %s.\n", 6424 qla2x00_version_str); 6425 ret = pci_register_driver(&qla2xxx_pci_driver); 6426 if (ret) { 6427 kmem_cache_destroy(srb_cachep); 6428 qlt_exit(); 6429 fc_release_transport(qla2xxx_transport_template); 6430 fc_release_transport(qla2xxx_transport_vport_template); 6431 ql_log(ql_log_fatal, NULL, 0x0006, 6432 "pci_register_driver failed...ret=%d Failing load!.\n", 6433 ret); 6434 } 6435 return ret; 6436 } 6437 6438 /** 6439 * qla2x00_module_exit - Module cleanup. 6440 **/ 6441 static void __exit 6442 qla2x00_module_exit(void) 6443 { 6444 unregister_chrdev(apidev_major, QLA2XXX_APIDEV); 6445 pci_unregister_driver(&qla2xxx_pci_driver); 6446 qla2x00_release_firmware(); 6447 kmem_cache_destroy(srb_cachep); 6448 qlt_exit(); 6449 if (ctx_cachep) 6450 kmem_cache_destroy(ctx_cachep); 6451 fc_release_transport(qla2xxx_transport_template); 6452 fc_release_transport(qla2xxx_transport_vport_template); 6453 } 6454 6455 module_init(qla2x00_module_init); 6456 module_exit(qla2x00_module_exit); 6457 6458 MODULE_AUTHOR("QLogic Corporation"); 6459 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); 6460 MODULE_LICENSE("GPL"); 6461 MODULE_VERSION(QLA2XXX_VERSION); 6462 MODULE_FIRMWARE(FW_FILE_ISP21XX); 6463 MODULE_FIRMWARE(FW_FILE_ISP22XX); 6464 MODULE_FIRMWARE(FW_FILE_ISP2300); 6465 MODULE_FIRMWARE(FW_FILE_ISP2322); 6466 MODULE_FIRMWARE(FW_FILE_ISP24XX); 6467 MODULE_FIRMWARE(FW_FILE_ISP25XX); 6468