1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 8 #include <linux/moduleparam.h> 9 #include <linux/vmalloc.h> 10 #include <linux/delay.h> 11 #include <linux/kthread.h> 12 #include <linux/mutex.h> 13 #include <linux/kobject.h> 14 #include <linux/slab.h> 15 #include <linux/blk-mq-pci.h> 16 #include <linux/refcount.h> 17 #include <linux/crash_dump.h> 18 19 #include <scsi/scsi_tcq.h> 20 #include <scsi/scsicam.h> 21 #include <scsi/scsi_transport.h> 22 #include <scsi/scsi_transport_fc.h> 23 24 #include "qla_target.h" 25 26 /* 27 * Driver version 28 */ 29 char qla2x00_version_str[40]; 30 31 static int apidev_major; 32 33 /* 34 * SRB allocation cache 35 */ 36 struct kmem_cache *srb_cachep; 37 38 int ql2xfulldump_on_mpifail; 39 module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR); 40 MODULE_PARM_DESC(ql2xfulldump_on_mpifail, 41 "Set this to take full dump on MPI hang."); 42 43 int ql2xenforce_iocb_limit = 1; 44 module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR); 45 MODULE_PARM_DESC(ql2xenforce_iocb_limit, 46 "Enforce IOCB throttling, to avoid FW congestion. (default: 1)"); 47 48 /* 49 * CT6 CTX allocation cache 50 */ 51 static struct kmem_cache *ctx_cachep; 52 /* 53 * error level for logging 54 */ 55 uint ql_errlev = 0x8001; 56 57 int ql2xsecenable; 58 module_param(ql2xsecenable, int, S_IRUGO); 59 MODULE_PARM_DESC(ql2xsecenable, 60 "Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled."); 61 62 static int ql2xenableclass2; 63 module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); 64 MODULE_PARM_DESC(ql2xenableclass2, 65 "Specify if Class 2 operations are supported from the very " 66 "beginning. Default is 0 - class 2 not supported."); 67 68 69 int ql2xlogintimeout = 20; 70 module_param(ql2xlogintimeout, int, S_IRUGO); 71 MODULE_PARM_DESC(ql2xlogintimeout, 72 "Login timeout value in seconds."); 73 74 int qlport_down_retry; 75 module_param(qlport_down_retry, int, S_IRUGO); 76 MODULE_PARM_DESC(qlport_down_retry, 77 "Maximum number of command retries to a port that returns " 78 "a PORT-DOWN status."); 79 80 int ql2xplogiabsentdevice; 81 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); 82 MODULE_PARM_DESC(ql2xplogiabsentdevice, 83 "Option to enable PLOGI to devices that are not present after " 84 "a Fabric scan. This is needed for several broken switches. " 85 "Default is 0 - no PLOGI. 1 - perform PLOGI."); 86 87 int ql2xloginretrycount; 88 module_param(ql2xloginretrycount, int, S_IRUGO); 89 MODULE_PARM_DESC(ql2xloginretrycount, 90 "Specify an alternate value for the NVRAM login retry count."); 91 92 int ql2xallocfwdump = 1; 93 module_param(ql2xallocfwdump, int, S_IRUGO); 94 MODULE_PARM_DESC(ql2xallocfwdump, 95 "Option to enable allocation of memory for a firmware dump " 96 "during HBA initialization. Memory allocation requirements " 97 "vary by ISP type. Default is 1 - allocate memory."); 98 99 int ql2xextended_error_logging; 100 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 101 module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 102 MODULE_PARM_DESC(ql2xextended_error_logging, 103 "Option to enable extended error logging,\n" 104 "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n" 105 "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n" 106 "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n" 107 "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n" 108 "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n" 109 "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n" 110 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" 111 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" 112 "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n" 113 "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n" 114 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" 115 "\t\t0x1e400000 - Preferred value for capturing essential " 116 "debug information (equivalent to old " 117 "ql2xextended_error_logging=1).\n" 118 "\t\tDo LOGICAL OR of the value to enable more than one level"); 119 120 int ql2xshiftctondsd = 6; 121 module_param(ql2xshiftctondsd, int, S_IRUGO); 122 MODULE_PARM_DESC(ql2xshiftctondsd, 123 "Set to control shifting of command type processing " 124 "based on total number of SG elements."); 125 126 int ql2xfdmienable = 1; 127 module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR); 128 module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR); 129 MODULE_PARM_DESC(ql2xfdmienable, 130 "Enables FDMI registrations. " 131 "0 - no FDMI registrations. " 132 "1 - provide FDMI registrations (default)."); 133 134 #define MAX_Q_DEPTH 64 135 static int ql2xmaxqdepth = MAX_Q_DEPTH; 136 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); 137 MODULE_PARM_DESC(ql2xmaxqdepth, 138 "Maximum queue depth to set for each LUN. " 139 "Default is 64."); 140 141 int ql2xenabledif = 2; 142 module_param(ql2xenabledif, int, S_IRUGO); 143 MODULE_PARM_DESC(ql2xenabledif, 144 " Enable T10-CRC-DIF:\n" 145 " Default is 2.\n" 146 " 0 -- No DIF Support\n" 147 " 1 -- Enable DIF for all types\n" 148 " 2 -- Enable DIF for all types, except Type 0.\n"); 149 150 #if (IS_ENABLED(CONFIG_NVME_FC)) 151 int ql2xnvmeenable = 1; 152 #else 153 int ql2xnvmeenable; 154 #endif 155 module_param(ql2xnvmeenable, int, 0644); 156 MODULE_PARM_DESC(ql2xnvmeenable, 157 "Enables NVME support. " 158 "0 - no NVMe. Default is Y"); 159 160 int ql2xenablehba_err_chk = 2; 161 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); 162 MODULE_PARM_DESC(ql2xenablehba_err_chk, 163 " Enable T10-CRC-DIF Error isolation by HBA:\n" 164 " Default is 2.\n" 165 " 0 -- Error isolation disabled\n" 166 " 1 -- Error isolation enabled only for DIX Type 0\n" 167 " 2 -- Error isolation enabled for all Types\n"); 168 169 int ql2xiidmaenable = 1; 170 module_param(ql2xiidmaenable, int, S_IRUGO); 171 MODULE_PARM_DESC(ql2xiidmaenable, 172 "Enables iIDMA settings " 173 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 174 175 int ql2xmqsupport = 1; 176 module_param(ql2xmqsupport, int, S_IRUGO); 177 MODULE_PARM_DESC(ql2xmqsupport, 178 "Enable on demand multiple queue pairs support " 179 "Default is 1 for supported. " 180 "Set it to 0 to turn off mq qpair support."); 181 182 int ql2xfwloadbin; 183 module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 184 module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 185 MODULE_PARM_DESC(ql2xfwloadbin, 186 "Option to specify location from which to load ISP firmware:.\n" 187 " 2 -- load firmware via the request_firmware() (hotplug).\n" 188 " interface.\n" 189 " 1 -- load firmware from flash.\n" 190 " 0 -- use default semantics.\n"); 191 192 int ql2xetsenable; 193 module_param(ql2xetsenable, int, S_IRUGO); 194 MODULE_PARM_DESC(ql2xetsenable, 195 "Enables firmware ETS burst." 196 "Default is 0 - skip ETS enablement."); 197 198 int ql2xdbwr = 1; 199 module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); 200 MODULE_PARM_DESC(ql2xdbwr, 201 "Option to specify scheme for request queue posting.\n" 202 " 0 -- Regular doorbell.\n" 203 " 1 -- CAMRAM doorbell (faster).\n"); 204 205 int ql2xtargetreset = 1; 206 module_param(ql2xtargetreset, int, S_IRUGO); 207 MODULE_PARM_DESC(ql2xtargetreset, 208 "Enable target reset." 209 "Default is 1 - use hw defaults."); 210 211 int ql2xgffidenable; 212 module_param(ql2xgffidenable, int, S_IRUGO); 213 MODULE_PARM_DESC(ql2xgffidenable, 214 "Enables GFF_ID checks of port type. " 215 "Default is 0 - Do not use GFF_ID information."); 216 217 int ql2xasynctmfenable = 1; 218 module_param(ql2xasynctmfenable, int, S_IRUGO); 219 MODULE_PARM_DESC(ql2xasynctmfenable, 220 "Enables issue of TM IOCBs asynchronously via IOCB mechanism" 221 "Default is 1 - Issue TM IOCBs via mailbox mechanism."); 222 223 int ql2xdontresethba; 224 module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR); 225 MODULE_PARM_DESC(ql2xdontresethba, 226 "Option to specify reset behaviour.\n" 227 " 0 (Default) -- Reset on failure.\n" 228 " 1 -- Do not reset on failure.\n"); 229 230 uint64_t ql2xmaxlun = MAX_LUNS; 231 module_param(ql2xmaxlun, ullong, S_IRUGO); 232 MODULE_PARM_DESC(ql2xmaxlun, 233 "Defines the maximum LU number to register with the SCSI " 234 "midlayer. Default is 65535."); 235 236 int ql2xmdcapmask = 0x1F; 237 module_param(ql2xmdcapmask, int, S_IRUGO); 238 MODULE_PARM_DESC(ql2xmdcapmask, 239 "Set the Minidump driver capture mask level. " 240 "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); 241 242 int ql2xmdenable = 1; 243 module_param(ql2xmdenable, int, S_IRUGO); 244 MODULE_PARM_DESC(ql2xmdenable, 245 "Enable/disable MiniDump. " 246 "0 - MiniDump disabled. " 247 "1 (Default) - MiniDump enabled."); 248 249 int ql2xexlogins; 250 module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR); 251 MODULE_PARM_DESC(ql2xexlogins, 252 "Number of extended Logins. " 253 "0 (Default)- Disabled."); 254 255 int ql2xexchoffld = 1024; 256 module_param(ql2xexchoffld, uint, 0644); 257 MODULE_PARM_DESC(ql2xexchoffld, 258 "Number of target exchanges."); 259 260 int ql2xiniexchg = 1024; 261 module_param(ql2xiniexchg, uint, 0644); 262 MODULE_PARM_DESC(ql2xiniexchg, 263 "Number of initiator exchanges."); 264 265 int ql2xfwholdabts; 266 module_param(ql2xfwholdabts, int, S_IRUGO); 267 MODULE_PARM_DESC(ql2xfwholdabts, 268 "Allow FW to hold status IOCB until ABTS rsp received. " 269 "0 (Default) Do not set fw option. " 270 "1 - Set fw option to hold ABTS."); 271 272 int ql2xmvasynctoatio = 1; 273 module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR); 274 MODULE_PARM_DESC(ql2xmvasynctoatio, 275 "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ" 276 "0 (Default). Do not move IOCBs" 277 "1 - Move IOCBs."); 278 279 int ql2xautodetectsfp = 1; 280 module_param(ql2xautodetectsfp, int, 0444); 281 MODULE_PARM_DESC(ql2xautodetectsfp, 282 "Detect SFP range and set appropriate distance.\n" 283 "1 (Default): Enable\n"); 284 285 int ql2xenablemsix = 1; 286 module_param(ql2xenablemsix, int, 0444); 287 MODULE_PARM_DESC(ql2xenablemsix, 288 "Set to enable MSI or MSI-X interrupt mechanism.\n" 289 " Default is 1, enable MSI-X interrupt mechanism.\n" 290 " 0 -- enable traditional pin-based mechanism.\n" 291 " 1 -- enable MSI-X interrupt mechanism.\n" 292 " 2 -- enable MSI interrupt mechanism.\n"); 293 294 int qla2xuseresexchforels; 295 module_param(qla2xuseresexchforels, int, 0444); 296 MODULE_PARM_DESC(qla2xuseresexchforels, 297 "Reserve 1/2 of emergency exchanges for ELS.\n" 298 " 0 (default): disabled"); 299 300 static int ql2xprotmask; 301 module_param(ql2xprotmask, int, 0644); 302 MODULE_PARM_DESC(ql2xprotmask, 303 "Override DIF/DIX protection capabilities mask\n" 304 "Default is 0 which sets protection mask based on " 305 "capabilities reported by HBA firmware.\n"); 306 307 static int ql2xprotguard; 308 module_param(ql2xprotguard, int, 0644); 309 MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n" 310 " 0 -- Let HBA firmware decide\n" 311 " 1 -- Force T10 CRC\n" 312 " 2 -- Force IP checksum\n"); 313 314 int ql2xdifbundlinginternalbuffers; 315 module_param(ql2xdifbundlinginternalbuffers, int, 0644); 316 MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers, 317 "Force using internal buffers for DIF information\n" 318 "0 (Default). Based on check.\n" 319 "1 Force using internal buffers\n"); 320 321 int ql2xsmartsan; 322 module_param(ql2xsmartsan, int, 0444); 323 module_param_named(smartsan, ql2xsmartsan, int, 0444); 324 MODULE_PARM_DESC(ql2xsmartsan, 325 "Send SmartSAN Management Attributes for FDMI Registration." 326 " Default is 0 - No SmartSAN registration," 327 " 1 - Register SmartSAN Management Attributes."); 328 329 int ql2xrdpenable; 330 module_param(ql2xrdpenable, int, 0444); 331 module_param_named(rdpenable, ql2xrdpenable, int, 0444); 332 MODULE_PARM_DESC(ql2xrdpenable, 333 "Enables RDP responses. " 334 "0 - no RDP responses (default). " 335 "1 - provide RDP responses."); 336 int ql2xabts_wait_nvme = 1; 337 module_param(ql2xabts_wait_nvme, int, 0444); 338 MODULE_PARM_DESC(ql2xabts_wait_nvme, 339 "To wait for ABTS response on I/O timeouts for NVMe. (default: 1)"); 340 341 342 static void qla2x00_clear_drv_active(struct qla_hw_data *); 343 static void qla2x00_free_device(scsi_qla_host_t *); 344 static int qla2xxx_map_queues(struct Scsi_Host *shost); 345 static void qla2x00_destroy_deferred_work(struct qla_hw_data *); 346 347 348 static struct scsi_transport_template *qla2xxx_transport_template = NULL; 349 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; 350 351 /* TODO Convert to inlines 352 * 353 * Timer routines 354 */ 355 356 __inline__ void 357 qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval) 358 { 359 timer_setup(&vha->timer, qla2x00_timer, 0); 360 vha->timer.expires = jiffies + interval * HZ; 361 add_timer(&vha->timer); 362 vha->timer_active = 1; 363 } 364 365 static inline void 366 qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) 367 { 368 /* Currently used for 82XX only. */ 369 if (vha->device_flags & DFLG_DEV_FAILED) { 370 ql_dbg(ql_dbg_timer, vha, 0x600d, 371 "Device in a failed state, returning.\n"); 372 return; 373 } 374 375 mod_timer(&vha->timer, jiffies + interval * HZ); 376 } 377 378 static __inline__ void 379 qla2x00_stop_timer(scsi_qla_host_t *vha) 380 { 381 del_timer_sync(&vha->timer); 382 vha->timer_active = 0; 383 } 384 385 static int qla2x00_do_dpc(void *data); 386 387 static void qla2x00_rst_aen(scsi_qla_host_t *); 388 389 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, 390 struct req_que **, struct rsp_que **); 391 static void qla2x00_free_fw_dump(struct qla_hw_data *); 392 static void qla2x00_mem_free(struct qla_hw_data *); 393 int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, 394 struct qla_qpair *qpair); 395 396 /* -------------------------------------------------------------------------- */ 397 static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req, 398 struct rsp_que *rsp) 399 { 400 struct qla_hw_data *ha = vha->hw; 401 402 rsp->qpair = ha->base_qpair; 403 rsp->req = req; 404 ha->base_qpair->hw = ha; 405 ha->base_qpair->req = req; 406 ha->base_qpair->rsp = rsp; 407 ha->base_qpair->vha = vha; 408 ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; 409 ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; 410 ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; 411 ha->base_qpair->srb_mempool = ha->srb_mempool; 412 INIT_LIST_HEAD(&ha->base_qpair->hints_list); 413 ha->base_qpair->enable_class_2 = ql2xenableclass2; 414 /* init qpair to this cpu. Will adjust at run time. */ 415 qla_cpu_update(rsp->qpair, raw_smp_processor_id()); 416 ha->base_qpair->pdev = ha->pdev; 417 418 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) 419 ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs; 420 } 421 422 static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, 423 struct rsp_que *rsp) 424 { 425 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 426 427 ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *), 428 GFP_KERNEL); 429 if (!ha->req_q_map) { 430 ql_log(ql_log_fatal, vha, 0x003b, 431 "Unable to allocate memory for request queue ptrs.\n"); 432 goto fail_req_map; 433 } 434 435 ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *), 436 GFP_KERNEL); 437 if (!ha->rsp_q_map) { 438 ql_log(ql_log_fatal, vha, 0x003c, 439 "Unable to allocate memory for response queue ptrs.\n"); 440 goto fail_rsp_map; 441 } 442 443 ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); 444 if (ha->base_qpair == NULL) { 445 ql_log(ql_log_warn, vha, 0x00e0, 446 "Failed to allocate base queue pair memory.\n"); 447 goto fail_base_qpair; 448 } 449 450 qla_init_base_qpair(vha, req, rsp); 451 452 if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) { 453 ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *), 454 GFP_KERNEL); 455 if (!ha->queue_pair_map) { 456 ql_log(ql_log_fatal, vha, 0x0180, 457 "Unable to allocate memory for queue pair ptrs.\n"); 458 goto fail_qpair_map; 459 } 460 } 461 462 /* 463 * Make sure we record at least the request and response queue zero in 464 * case we need to free them if part of the probe fails. 465 */ 466 ha->rsp_q_map[0] = rsp; 467 ha->req_q_map[0] = req; 468 set_bit(0, ha->rsp_qid_map); 469 set_bit(0, ha->req_qid_map); 470 return 0; 471 472 fail_qpair_map: 473 kfree(ha->base_qpair); 474 ha->base_qpair = NULL; 475 fail_base_qpair: 476 kfree(ha->rsp_q_map); 477 ha->rsp_q_map = NULL; 478 fail_rsp_map: 479 kfree(ha->req_q_map); 480 ha->req_q_map = NULL; 481 fail_req_map: 482 return -ENOMEM; 483 } 484 485 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) 486 { 487 if (IS_QLAFX00(ha)) { 488 if (req && req->ring_fx00) 489 dma_free_coherent(&ha->pdev->dev, 490 (req->length_fx00 + 1) * sizeof(request_t), 491 req->ring_fx00, req->dma_fx00); 492 } else if (req && req->ring) 493 dma_free_coherent(&ha->pdev->dev, 494 (req->length + 1) * sizeof(request_t), 495 req->ring, req->dma); 496 497 if (req) 498 kfree(req->outstanding_cmds); 499 500 kfree(req); 501 } 502 503 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) 504 { 505 if (IS_QLAFX00(ha)) { 506 if (rsp && rsp->ring_fx00) 507 dma_free_coherent(&ha->pdev->dev, 508 (rsp->length_fx00 + 1) * sizeof(request_t), 509 rsp->ring_fx00, rsp->dma_fx00); 510 } else if (rsp && rsp->ring) { 511 dma_free_coherent(&ha->pdev->dev, 512 (rsp->length + 1) * sizeof(response_t), 513 rsp->ring, rsp->dma); 514 } 515 kfree(rsp); 516 } 517 518 static void qla2x00_free_queues(struct qla_hw_data *ha) 519 { 520 struct req_que *req; 521 struct rsp_que *rsp; 522 int cnt; 523 unsigned long flags; 524 525 if (ha->queue_pair_map) { 526 kfree(ha->queue_pair_map); 527 ha->queue_pair_map = NULL; 528 } 529 if (ha->base_qpair) { 530 kfree(ha->base_qpair); 531 ha->base_qpair = NULL; 532 } 533 534 spin_lock_irqsave(&ha->hardware_lock, flags); 535 for (cnt = 0; cnt < ha->max_req_queues; cnt++) { 536 if (!test_bit(cnt, ha->req_qid_map)) 537 continue; 538 539 req = ha->req_q_map[cnt]; 540 clear_bit(cnt, ha->req_qid_map); 541 ha->req_q_map[cnt] = NULL; 542 543 spin_unlock_irqrestore(&ha->hardware_lock, flags); 544 qla2x00_free_req_que(ha, req); 545 spin_lock_irqsave(&ha->hardware_lock, flags); 546 } 547 spin_unlock_irqrestore(&ha->hardware_lock, flags); 548 549 kfree(ha->req_q_map); 550 ha->req_q_map = NULL; 551 552 553 spin_lock_irqsave(&ha->hardware_lock, flags); 554 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { 555 if (!test_bit(cnt, ha->rsp_qid_map)) 556 continue; 557 558 rsp = ha->rsp_q_map[cnt]; 559 clear_bit(cnt, ha->rsp_qid_map); 560 ha->rsp_q_map[cnt] = NULL; 561 spin_unlock_irqrestore(&ha->hardware_lock, flags); 562 qla2x00_free_rsp_que(ha, rsp); 563 spin_lock_irqsave(&ha->hardware_lock, flags); 564 } 565 spin_unlock_irqrestore(&ha->hardware_lock, flags); 566 567 kfree(ha->rsp_q_map); 568 ha->rsp_q_map = NULL; 569 } 570 571 static char * 572 qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) 573 { 574 struct qla_hw_data *ha = vha->hw; 575 static const char *const pci_bus_modes[] = { 576 "33", "66", "100", "133", 577 }; 578 uint16_t pci_bus; 579 580 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; 581 if (pci_bus) { 582 snprintf(str, str_len, "PCI-X (%s MHz)", 583 pci_bus_modes[pci_bus]); 584 } else { 585 pci_bus = (ha->pci_attr & BIT_8) >> 8; 586 snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]); 587 } 588 589 return str; 590 } 591 592 static char * 593 qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) 594 { 595 static const char *const pci_bus_modes[] = { 596 "33", "66", "100", "133", 597 }; 598 struct qla_hw_data *ha = vha->hw; 599 uint32_t pci_bus; 600 601 if (pci_is_pcie(ha->pdev)) { 602 uint32_t lstat, lspeed, lwidth; 603 const char *speed_str; 604 605 pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat); 606 lspeed = lstat & PCI_EXP_LNKCAP_SLS; 607 lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4; 608 609 switch (lspeed) { 610 case 1: 611 speed_str = "2.5GT/s"; 612 break; 613 case 2: 614 speed_str = "5.0GT/s"; 615 break; 616 case 3: 617 speed_str = "8.0GT/s"; 618 break; 619 case 4: 620 speed_str = "16.0GT/s"; 621 break; 622 default: 623 speed_str = "<unknown>"; 624 break; 625 } 626 snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth); 627 628 return str; 629 } 630 631 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; 632 if (pci_bus == 0 || pci_bus == 8) 633 snprintf(str, str_len, "PCI (%s MHz)", 634 pci_bus_modes[pci_bus >> 3]); 635 else 636 snprintf(str, str_len, "PCI-X Mode %d (%s MHz)", 637 pci_bus & 4 ? 2 : 1, 638 pci_bus_modes[pci_bus & 3]); 639 640 return str; 641 } 642 643 static char * 644 qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 645 { 646 char un_str[10]; 647 struct qla_hw_data *ha = vha->hw; 648 649 snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version, 650 ha->fw_minor_version, ha->fw_subminor_version); 651 652 if (ha->fw_attributes & BIT_9) { 653 strcat(str, "FLX"); 654 return (str); 655 } 656 657 switch (ha->fw_attributes & 0xFF) { 658 case 0x7: 659 strcat(str, "EF"); 660 break; 661 case 0x17: 662 strcat(str, "TP"); 663 break; 664 case 0x37: 665 strcat(str, "IP"); 666 break; 667 case 0x77: 668 strcat(str, "VI"); 669 break; 670 default: 671 sprintf(un_str, "(%x)", ha->fw_attributes); 672 strcat(str, un_str); 673 break; 674 } 675 if (ha->fw_attributes & 0x100) 676 strcat(str, "X"); 677 678 return (str); 679 } 680 681 static char * 682 qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 683 { 684 struct qla_hw_data *ha = vha->hw; 685 686 snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version, 687 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes); 688 return str; 689 } 690 691 void qla2x00_sp_free_dma(srb_t *sp) 692 { 693 struct qla_hw_data *ha = sp->vha->hw; 694 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 695 696 if (sp->flags & SRB_DMA_VALID) { 697 scsi_dma_unmap(cmd); 698 sp->flags &= ~SRB_DMA_VALID; 699 } 700 701 if (sp->flags & SRB_CRC_PROT_DMA_VALID) { 702 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 703 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 704 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 705 } 706 707 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 708 /* List assured to be having elements */ 709 qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx); 710 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 711 } 712 713 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 714 struct crc_context *ctx0 = sp->u.scmd.crc_ctx; 715 716 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); 717 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 718 } 719 720 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 721 struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx; 722 723 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 724 ctx1->fcp_cmnd_dma); 725 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 726 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 727 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 728 mempool_free(ctx1, ha->ctx_mempool); 729 } 730 } 731 732 void qla2x00_sp_compl(srb_t *sp, int res) 733 { 734 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 735 struct completion *comp = sp->comp; 736 737 sp->free(sp); 738 cmd->result = res; 739 CMD_SP(cmd) = NULL; 740 cmd->scsi_done(cmd); 741 if (comp) 742 complete(comp); 743 } 744 745 void qla2xxx_qpair_sp_free_dma(srb_t *sp) 746 { 747 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 748 struct qla_hw_data *ha = sp->fcport->vha->hw; 749 750 if (sp->flags & SRB_DMA_VALID) { 751 scsi_dma_unmap(cmd); 752 sp->flags &= ~SRB_DMA_VALID; 753 } 754 755 if (sp->flags & SRB_CRC_PROT_DMA_VALID) { 756 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 757 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 758 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 759 } 760 761 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 762 /* List assured to be having elements */ 763 qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx); 764 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 765 } 766 767 if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) { 768 struct crc_context *difctx = sp->u.scmd.crc_ctx; 769 struct dsd_dma *dif_dsd, *nxt_dsd; 770 771 list_for_each_entry_safe(dif_dsd, nxt_dsd, 772 &difctx->ldif_dma_hndl_list, list) { 773 list_del(&dif_dsd->list); 774 dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr, 775 dif_dsd->dsd_list_dma); 776 kfree(dif_dsd); 777 difctx->no_dif_bundl--; 778 } 779 780 list_for_each_entry_safe(dif_dsd, nxt_dsd, 781 &difctx->ldif_dsd_list, list) { 782 list_del(&dif_dsd->list); 783 dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr, 784 dif_dsd->dsd_list_dma); 785 kfree(dif_dsd); 786 difctx->no_ldif_dsd--; 787 } 788 789 if (difctx->no_ldif_dsd) { 790 ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022, 791 "%s: difctx->no_ldif_dsd=%x\n", 792 __func__, difctx->no_ldif_dsd); 793 } 794 795 if (difctx->no_dif_bundl) { 796 ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022, 797 "%s: difctx->no_dif_bundl=%x\n", 798 __func__, difctx->no_dif_bundl); 799 } 800 sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID; 801 } 802 803 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 804 struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx; 805 806 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 807 ctx1->fcp_cmnd_dma); 808 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 809 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 810 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 811 mempool_free(ctx1, ha->ctx_mempool); 812 sp->flags &= ~SRB_FCP_CMND_DMA_VALID; 813 } 814 815 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 816 struct crc_context *ctx0 = sp->u.scmd.crc_ctx; 817 818 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); 819 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 820 } 821 } 822 823 void qla2xxx_qpair_sp_compl(srb_t *sp, int res) 824 { 825 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 826 struct completion *comp = sp->comp; 827 828 sp->free(sp); 829 cmd->result = res; 830 CMD_SP(cmd) = NULL; 831 cmd->scsi_done(cmd); 832 if (comp) 833 complete(comp); 834 } 835 836 static int 837 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 838 { 839 scsi_qla_host_t *vha = shost_priv(host); 840 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 841 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 842 struct qla_hw_data *ha = vha->hw; 843 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 844 srb_t *sp; 845 int rval; 846 847 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) || 848 WARN_ON_ONCE(!rport)) { 849 cmd->result = DID_NO_CONNECT << 16; 850 goto qc24_fail_command; 851 } 852 853 if (ha->mqenable) { 854 uint32_t tag; 855 uint16_t hwq; 856 struct qla_qpair *qpair = NULL; 857 858 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd)); 859 hwq = blk_mq_unique_tag_to_hwq(tag); 860 qpair = ha->queue_pair_map[hwq]; 861 862 if (qpair) 863 return qla2xxx_mqueuecommand(host, cmd, qpair); 864 } 865 866 if (ha->flags.eeh_busy) { 867 if (ha->flags.pci_channel_io_perm_failure) { 868 ql_dbg(ql_dbg_aer, vha, 0x9010, 869 "PCI Channel IO permanent failure, exiting " 870 "cmd=%p.\n", cmd); 871 cmd->result = DID_NO_CONNECT << 16; 872 } else { 873 ql_dbg(ql_dbg_aer, vha, 0x9011, 874 "EEH_Busy, Requeuing the cmd=%p.\n", cmd); 875 cmd->result = DID_REQUEUE << 16; 876 } 877 goto qc24_fail_command; 878 } 879 880 rval = fc_remote_port_chkready(rport); 881 if (rval) { 882 cmd->result = rval; 883 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003, 884 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 885 cmd, rval); 886 goto qc24_fail_command; 887 } 888 889 if (!vha->flags.difdix_supported && 890 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 891 ql_dbg(ql_dbg_io, vha, 0x3004, 892 "DIF Cap not reg, fail DIF capable cmd's:%p.\n", 893 cmd); 894 cmd->result = DID_NO_CONNECT << 16; 895 goto qc24_fail_command; 896 } 897 898 if (!fcport || fcport->deleted) { 899 cmd->result = DID_IMM_RETRY << 16; 900 goto qc24_fail_command; 901 } 902 903 if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { 904 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 905 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 906 ql_dbg(ql_dbg_io, vha, 0x3005, 907 "Returning DNC, fcport_state=%d loop_state=%d.\n", 908 atomic_read(&fcport->state), 909 atomic_read(&base_vha->loop_state)); 910 cmd->result = DID_NO_CONNECT << 16; 911 goto qc24_fail_command; 912 } 913 goto qc24_target_busy; 914 } 915 916 /* 917 * Return target busy if we've received a non-zero retry_delay_timer 918 * in a FCP_RSP. 919 */ 920 if (fcport->retry_delay_timestamp == 0) { 921 /* retry delay not set */ 922 } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 923 fcport->retry_delay_timestamp = 0; 924 else 925 goto qc24_target_busy; 926 927 sp = scsi_cmd_priv(cmd); 928 qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport); 929 930 sp->u.scmd.cmd = cmd; 931 sp->type = SRB_SCSI_CMD; 932 933 CMD_SP(cmd) = (void *)sp; 934 sp->free = qla2x00_sp_free_dma; 935 sp->done = qla2x00_sp_compl; 936 937 rval = ha->isp_ops->start_scsi(sp); 938 if (rval != QLA_SUCCESS) { 939 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, 940 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); 941 goto qc24_host_busy_free_sp; 942 } 943 944 return 0; 945 946 qc24_host_busy_free_sp: 947 sp->free(sp); 948 949 qc24_target_busy: 950 return SCSI_MLQUEUE_TARGET_BUSY; 951 952 qc24_fail_command: 953 cmd->scsi_done(cmd); 954 955 return 0; 956 } 957 958 /* For MQ supported I/O */ 959 int 960 qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, 961 struct qla_qpair *qpair) 962 { 963 scsi_qla_host_t *vha = shost_priv(host); 964 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 965 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 966 struct qla_hw_data *ha = vha->hw; 967 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 968 srb_t *sp; 969 int rval; 970 971 rval = rport ? fc_remote_port_chkready(rport) : (DID_NO_CONNECT << 16); 972 if (rval) { 973 cmd->result = rval; 974 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076, 975 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 976 cmd, rval); 977 goto qc24_fail_command; 978 } 979 980 if (!qpair->online) { 981 ql_dbg(ql_dbg_io, vha, 0x3077, 982 "qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy); 983 cmd->result = DID_NO_CONNECT << 16; 984 goto qc24_fail_command; 985 } 986 987 if (!fcport || fcport->deleted) { 988 cmd->result = DID_IMM_RETRY << 16; 989 goto qc24_fail_command; 990 } 991 992 if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { 993 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 994 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 995 ql_dbg(ql_dbg_io, vha, 0x3077, 996 "Returning DNC, fcport_state=%d loop_state=%d.\n", 997 atomic_read(&fcport->state), 998 atomic_read(&base_vha->loop_state)); 999 cmd->result = DID_NO_CONNECT << 16; 1000 goto qc24_fail_command; 1001 } 1002 goto qc24_target_busy; 1003 } 1004 1005 /* 1006 * Return target busy if we've received a non-zero retry_delay_timer 1007 * in a FCP_RSP. 1008 */ 1009 if (fcport->retry_delay_timestamp == 0) { 1010 /* retry delay not set */ 1011 } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 1012 fcport->retry_delay_timestamp = 0; 1013 else 1014 goto qc24_target_busy; 1015 1016 sp = scsi_cmd_priv(cmd); 1017 qla2xxx_init_sp(sp, vha, qpair, fcport); 1018 1019 sp->u.scmd.cmd = cmd; 1020 sp->type = SRB_SCSI_CMD; 1021 CMD_SP(cmd) = (void *)sp; 1022 sp->free = qla2xxx_qpair_sp_free_dma; 1023 sp->done = qla2xxx_qpair_sp_compl; 1024 1025 rval = ha->isp_ops->start_scsi_mq(sp); 1026 if (rval != QLA_SUCCESS) { 1027 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078, 1028 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); 1029 goto qc24_host_busy_free_sp; 1030 } 1031 1032 return 0; 1033 1034 qc24_host_busy_free_sp: 1035 sp->free(sp); 1036 1037 qc24_target_busy: 1038 return SCSI_MLQUEUE_TARGET_BUSY; 1039 1040 qc24_fail_command: 1041 cmd->scsi_done(cmd); 1042 1043 return 0; 1044 } 1045 1046 /* 1047 * qla2x00_eh_wait_on_command 1048 * Waits for the command to be returned by the Firmware for some 1049 * max time. 1050 * 1051 * Input: 1052 * cmd = Scsi Command to wait on. 1053 * 1054 * Return: 1055 * Completed in time : QLA_SUCCESS 1056 * Did not complete in time : QLA_FUNCTION_FAILED 1057 */ 1058 static int 1059 qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) 1060 { 1061 #define ABORT_POLLING_PERIOD 1000 1062 #define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD)) 1063 unsigned long wait_iter = ABORT_WAIT_ITER; 1064 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1065 struct qla_hw_data *ha = vha->hw; 1066 int ret = QLA_SUCCESS; 1067 1068 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { 1069 ql_dbg(ql_dbg_taskm, vha, 0x8005, 1070 "Return:eh_wait.\n"); 1071 return ret; 1072 } 1073 1074 while (CMD_SP(cmd) && wait_iter--) { 1075 msleep(ABORT_POLLING_PERIOD); 1076 } 1077 if (CMD_SP(cmd)) 1078 ret = QLA_FUNCTION_FAILED; 1079 1080 return ret; 1081 } 1082 1083 /* 1084 * qla2x00_wait_for_hba_online 1085 * Wait till the HBA is online after going through 1086 * <= MAX_RETRIES_OF_ISP_ABORT or 1087 * finally HBA is disabled ie marked offline 1088 * 1089 * Input: 1090 * ha - pointer to host adapter structure 1091 * 1092 * Note: 1093 * Does context switching-Release SPIN_LOCK 1094 * (if any) before calling this routine. 1095 * 1096 * Return: 1097 * Success (Adapter is online) : 0 1098 * Failed (Adapter is offline/disabled) : 1 1099 */ 1100 int 1101 qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) 1102 { 1103 int return_status; 1104 unsigned long wait_online; 1105 struct qla_hw_data *ha = vha->hw; 1106 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1107 1108 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 1109 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 1110 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 1111 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 1112 ha->dpc_active) && time_before(jiffies, wait_online)) { 1113 1114 msleep(1000); 1115 } 1116 if (base_vha->flags.online) 1117 return_status = QLA_SUCCESS; 1118 else 1119 return_status = QLA_FUNCTION_FAILED; 1120 1121 return (return_status); 1122 } 1123 1124 static inline int test_fcport_count(scsi_qla_host_t *vha) 1125 { 1126 struct qla_hw_data *ha = vha->hw; 1127 unsigned long flags; 1128 int res; 1129 /* Return 0 = sleep, x=wake */ 1130 1131 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1132 ql_dbg(ql_dbg_init, vha, 0x00ec, 1133 "tgt %p, fcport_count=%d\n", 1134 vha, vha->fcport_count); 1135 res = (vha->fcport_count == 0); 1136 if (res) { 1137 struct fc_port *fcport; 1138 1139 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1140 if (fcport->deleted != QLA_SESS_DELETED) { 1141 /* session(s) may not be fully logged in 1142 * (ie fcport_count=0), but session 1143 * deletion thread(s) may be inflight. 1144 */ 1145 1146 res = 0; 1147 break; 1148 } 1149 } 1150 } 1151 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1152 1153 return res; 1154 } 1155 1156 /* 1157 * qla2x00_wait_for_sess_deletion can only be called from remove_one. 1158 * it has dependency on UNLOADING flag to stop device discovery 1159 */ 1160 void 1161 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) 1162 { 1163 u8 i; 1164 1165 qla2x00_mark_all_devices_lost(vha); 1166 1167 for (i = 0; i < 10; i++) { 1168 if (wait_event_timeout(vha->fcport_waitQ, 1169 test_fcport_count(vha), HZ) > 0) 1170 break; 1171 } 1172 1173 flush_workqueue(vha->hw->wq); 1174 } 1175 1176 /* 1177 * qla2x00_wait_for_hba_ready 1178 * Wait till the HBA is ready before doing driver unload 1179 * 1180 * Input: 1181 * ha - pointer to host adapter structure 1182 * 1183 * Note: 1184 * Does context switching-Release SPIN_LOCK 1185 * (if any) before calling this routine. 1186 * 1187 */ 1188 static void 1189 qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) 1190 { 1191 struct qla_hw_data *ha = vha->hw; 1192 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1193 1194 while ((qla2x00_reset_active(vha) || ha->dpc_active || 1195 ha->flags.mbox_busy) || 1196 test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) || 1197 test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) { 1198 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 1199 break; 1200 msleep(1000); 1201 } 1202 } 1203 1204 int 1205 qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) 1206 { 1207 int return_status; 1208 unsigned long wait_reset; 1209 struct qla_hw_data *ha = vha->hw; 1210 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1211 1212 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); 1213 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 1214 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 1215 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 1216 ha->dpc_active) && time_before(jiffies, wait_reset)) { 1217 1218 msleep(1000); 1219 1220 if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 1221 ha->flags.chip_reset_done) 1222 break; 1223 } 1224 if (ha->flags.chip_reset_done) 1225 return_status = QLA_SUCCESS; 1226 else 1227 return_status = QLA_FUNCTION_FAILED; 1228 1229 return return_status; 1230 } 1231 1232 /************************************************************************** 1233 * qla2xxx_eh_abort 1234 * 1235 * Description: 1236 * The abort function will abort the specified command. 1237 * 1238 * Input: 1239 * cmd = Linux SCSI command packet to be aborted. 1240 * 1241 * Returns: 1242 * Either SUCCESS or FAILED. 1243 * 1244 * Note: 1245 * Only return FAILED if command not returned by firmware. 1246 **************************************************************************/ 1247 static int 1248 qla2xxx_eh_abort(struct scsi_cmnd *cmd) 1249 { 1250 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1251 DECLARE_COMPLETION_ONSTACK(comp); 1252 srb_t *sp; 1253 int ret; 1254 unsigned int id; 1255 uint64_t lun; 1256 int rval; 1257 struct qla_hw_data *ha = vha->hw; 1258 uint32_t ratov_j; 1259 struct qla_qpair *qpair; 1260 unsigned long flags; 1261 1262 if (qla2x00_isp_reg_stat(ha)) { 1263 ql_log(ql_log_info, vha, 0x8042, 1264 "PCI/Register disconnect, exiting.\n"); 1265 qla_pci_set_eeh_busy(vha); 1266 return FAILED; 1267 } 1268 1269 ret = fc_block_scsi_eh(cmd); 1270 if (ret != 0) 1271 return ret; 1272 1273 sp = scsi_cmd_priv(cmd); 1274 qpair = sp->qpair; 1275 1276 vha->cmd_timeout_cnt++; 1277 1278 if ((sp->fcport && sp->fcport->deleted) || !qpair) 1279 return SUCCESS; 1280 1281 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 1282 sp->comp = ∁ 1283 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 1284 1285 1286 id = cmd->device->id; 1287 lun = cmd->device->lun; 1288 1289 ql_dbg(ql_dbg_taskm, vha, 0x8002, 1290 "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n", 1291 vha->host_no, id, lun, sp, cmd, sp->handle); 1292 1293 /* 1294 * Abort will release the original Command/sp from FW. Let the 1295 * original command call scsi_done. In return, he will wakeup 1296 * this sleeping thread. 1297 */ 1298 rval = ha->isp_ops->abort_command(sp); 1299 1300 ql_dbg(ql_dbg_taskm, vha, 0x8003, 1301 "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval); 1302 1303 /* Wait for the command completion. */ 1304 ratov_j = ha->r_a_tov/10 * 4 * 1000; 1305 ratov_j = msecs_to_jiffies(ratov_j); 1306 switch (rval) { 1307 case QLA_SUCCESS: 1308 if (!wait_for_completion_timeout(&comp, ratov_j)) { 1309 ql_dbg(ql_dbg_taskm, vha, 0xffff, 1310 "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", 1311 __func__, ha->r_a_tov/10); 1312 ret = FAILED; 1313 } else { 1314 ret = SUCCESS; 1315 } 1316 break; 1317 default: 1318 ret = FAILED; 1319 break; 1320 } 1321 1322 sp->comp = NULL; 1323 1324 ql_log(ql_log_info, vha, 0x801c, 1325 "Abort command issued nexus=%ld:%d:%llu -- %x.\n", 1326 vha->host_no, id, lun, ret); 1327 1328 return ret; 1329 } 1330 1331 /* 1332 * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED. 1333 */ 1334 int 1335 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, 1336 uint64_t l, enum nexus_wait_type type) 1337 { 1338 int cnt, match, status; 1339 unsigned long flags; 1340 struct qla_hw_data *ha = vha->hw; 1341 struct req_que *req; 1342 srb_t *sp; 1343 struct scsi_cmnd *cmd; 1344 1345 status = QLA_SUCCESS; 1346 1347 spin_lock_irqsave(&ha->hardware_lock, flags); 1348 req = vha->req; 1349 for (cnt = 1; status == QLA_SUCCESS && 1350 cnt < req->num_outstanding_cmds; cnt++) { 1351 sp = req->outstanding_cmds[cnt]; 1352 if (!sp) 1353 continue; 1354 if (sp->type != SRB_SCSI_CMD) 1355 continue; 1356 if (vha->vp_idx != sp->vha->vp_idx) 1357 continue; 1358 match = 0; 1359 cmd = GET_CMD_SP(sp); 1360 switch (type) { 1361 case WAIT_HOST: 1362 match = 1; 1363 break; 1364 case WAIT_TARGET: 1365 match = cmd->device->id == t; 1366 break; 1367 case WAIT_LUN: 1368 match = (cmd->device->id == t && 1369 cmd->device->lun == l); 1370 break; 1371 } 1372 if (!match) 1373 continue; 1374 1375 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1376 status = qla2x00_eh_wait_on_command(cmd); 1377 spin_lock_irqsave(&ha->hardware_lock, flags); 1378 } 1379 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1380 1381 return status; 1382 } 1383 1384 static char *reset_errors[] = { 1385 "HBA not online", 1386 "HBA not ready", 1387 "Task management failed", 1388 "Waiting for command completions", 1389 }; 1390 1391 static int 1392 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 1393 { 1394 struct scsi_device *sdev = cmd->device; 1395 scsi_qla_host_t *vha = shost_priv(sdev->host); 1396 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1397 fc_port_t *fcport = (struct fc_port *) sdev->hostdata; 1398 struct qla_hw_data *ha = vha->hw; 1399 int err; 1400 1401 if (qla2x00_isp_reg_stat(ha)) { 1402 ql_log(ql_log_info, vha, 0x803e, 1403 "PCI/Register disconnect, exiting.\n"); 1404 qla_pci_set_eeh_busy(vha); 1405 return FAILED; 1406 } 1407 1408 if (!fcport) { 1409 return FAILED; 1410 } 1411 1412 err = fc_block_rport(rport); 1413 if (err != 0) 1414 return err; 1415 1416 if (fcport->deleted) 1417 return SUCCESS; 1418 1419 ql_log(ql_log_info, vha, 0x8009, 1420 "DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no, 1421 sdev->id, sdev->lun, cmd); 1422 1423 err = 0; 1424 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1425 ql_log(ql_log_warn, vha, 0x800a, 1426 "Wait for hba online failed for cmd=%p.\n", cmd); 1427 goto eh_reset_failed; 1428 } 1429 err = 2; 1430 if (ha->isp_ops->lun_reset(fcport, sdev->lun, 1) 1431 != QLA_SUCCESS) { 1432 ql_log(ql_log_warn, vha, 0x800c, 1433 "do_reset failed for cmd=%p.\n", cmd); 1434 goto eh_reset_failed; 1435 } 1436 err = 3; 1437 if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id, 1438 sdev->lun, WAIT_LUN) != QLA_SUCCESS) { 1439 ql_log(ql_log_warn, vha, 0x800d, 1440 "wait for pending cmds failed for cmd=%p.\n", cmd); 1441 goto eh_reset_failed; 1442 } 1443 1444 ql_log(ql_log_info, vha, 0x800e, 1445 "DEVICE RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", 1446 vha->host_no, sdev->id, sdev->lun, cmd); 1447 1448 return SUCCESS; 1449 1450 eh_reset_failed: 1451 ql_log(ql_log_info, vha, 0x800f, 1452 "DEVICE RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", 1453 reset_errors[err], vha->host_no, sdev->id, sdev->lun, 1454 cmd); 1455 vha->reset_cmd_err_cnt++; 1456 return FAILED; 1457 } 1458 1459 static int 1460 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) 1461 { 1462 struct scsi_device *sdev = cmd->device; 1463 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1464 scsi_qla_host_t *vha = shost_priv(rport_to_shost(rport)); 1465 struct qla_hw_data *ha = vha->hw; 1466 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1467 int err; 1468 1469 if (qla2x00_isp_reg_stat(ha)) { 1470 ql_log(ql_log_info, vha, 0x803f, 1471 "PCI/Register disconnect, exiting.\n"); 1472 qla_pci_set_eeh_busy(vha); 1473 return FAILED; 1474 } 1475 1476 if (!fcport) { 1477 return FAILED; 1478 } 1479 1480 err = fc_block_rport(rport); 1481 if (err != 0) 1482 return err; 1483 1484 if (fcport->deleted) 1485 return SUCCESS; 1486 1487 ql_log(ql_log_info, vha, 0x8009, 1488 "TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no, 1489 sdev->id, cmd); 1490 1491 err = 0; 1492 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1493 ql_log(ql_log_warn, vha, 0x800a, 1494 "Wait for hba online failed for cmd=%p.\n", cmd); 1495 goto eh_reset_failed; 1496 } 1497 err = 2; 1498 if (ha->isp_ops->target_reset(fcport, 0, 0) != QLA_SUCCESS) { 1499 ql_log(ql_log_warn, vha, 0x800c, 1500 "target_reset failed for cmd=%p.\n", cmd); 1501 goto eh_reset_failed; 1502 } 1503 err = 3; 1504 if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id, 1505 0, WAIT_TARGET) != QLA_SUCCESS) { 1506 ql_log(ql_log_warn, vha, 0x800d, 1507 "wait for pending cmds failed for cmd=%p.\n", cmd); 1508 goto eh_reset_failed; 1509 } 1510 1511 ql_log(ql_log_info, vha, 0x800e, 1512 "TARGET RESET SUCCEEDED nexus:%ld:%d cmd=%p.\n", 1513 vha->host_no, sdev->id, cmd); 1514 1515 return SUCCESS; 1516 1517 eh_reset_failed: 1518 ql_log(ql_log_info, vha, 0x800f, 1519 "TARGET RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", 1520 reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun, 1521 cmd); 1522 vha->reset_cmd_err_cnt++; 1523 return FAILED; 1524 } 1525 1526 /************************************************************************** 1527 * qla2xxx_eh_bus_reset 1528 * 1529 * Description: 1530 * The bus reset function will reset the bus and abort any executing 1531 * commands. 1532 * 1533 * Input: 1534 * cmd = Linux SCSI command packet of the command that cause the 1535 * bus reset. 1536 * 1537 * Returns: 1538 * SUCCESS/FAILURE (defined as macro in scsi.h). 1539 * 1540 **************************************************************************/ 1541 static int 1542 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 1543 { 1544 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1545 int ret = FAILED; 1546 unsigned int id; 1547 uint64_t lun; 1548 struct qla_hw_data *ha = vha->hw; 1549 1550 if (qla2x00_isp_reg_stat(ha)) { 1551 ql_log(ql_log_info, vha, 0x8040, 1552 "PCI/Register disconnect, exiting.\n"); 1553 qla_pci_set_eeh_busy(vha); 1554 return FAILED; 1555 } 1556 1557 id = cmd->device->id; 1558 lun = cmd->device->lun; 1559 1560 if (qla2x00_chip_is_down(vha)) 1561 return ret; 1562 1563 ql_log(ql_log_info, vha, 0x8012, 1564 "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1565 1566 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1567 ql_log(ql_log_fatal, vha, 0x8013, 1568 "Wait for hba online failed board disabled.\n"); 1569 goto eh_bus_reset_done; 1570 } 1571 1572 if (qla2x00_loop_reset(vha) == QLA_SUCCESS) 1573 ret = SUCCESS; 1574 1575 if (ret == FAILED) 1576 goto eh_bus_reset_done; 1577 1578 /* Flush outstanding commands. */ 1579 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != 1580 QLA_SUCCESS) { 1581 ql_log(ql_log_warn, vha, 0x8014, 1582 "Wait for pending commands failed.\n"); 1583 ret = FAILED; 1584 } 1585 1586 eh_bus_reset_done: 1587 ql_log(ql_log_warn, vha, 0x802b, 1588 "BUS RESET %s nexus=%ld:%d:%llu.\n", 1589 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1590 1591 return ret; 1592 } 1593 1594 /************************************************************************** 1595 * qla2xxx_eh_host_reset 1596 * 1597 * Description: 1598 * The reset function will reset the Adapter. 1599 * 1600 * Input: 1601 * cmd = Linux SCSI command packet of the command that cause the 1602 * adapter reset. 1603 * 1604 * Returns: 1605 * Either SUCCESS or FAILED. 1606 * 1607 * Note: 1608 **************************************************************************/ 1609 static int 1610 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 1611 { 1612 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1613 struct qla_hw_data *ha = vha->hw; 1614 int ret = FAILED; 1615 unsigned int id; 1616 uint64_t lun; 1617 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1618 1619 if (qla2x00_isp_reg_stat(ha)) { 1620 ql_log(ql_log_info, vha, 0x8041, 1621 "PCI/Register disconnect, exiting.\n"); 1622 qla_pci_set_eeh_busy(vha); 1623 return SUCCESS; 1624 } 1625 1626 id = cmd->device->id; 1627 lun = cmd->device->lun; 1628 1629 ql_log(ql_log_info, vha, 0x8018, 1630 "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1631 1632 /* 1633 * No point in issuing another reset if one is active. Also do not 1634 * attempt a reset if we are updating flash. 1635 */ 1636 if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING) 1637 goto eh_host_reset_lock; 1638 1639 if (vha != base_vha) { 1640 if (qla2x00_vp_abort_isp(vha)) 1641 goto eh_host_reset_lock; 1642 } else { 1643 if (IS_P3P_TYPE(vha->hw)) { 1644 if (!qla82xx_fcoe_ctx_reset(vha)) { 1645 /* Ctx reset success */ 1646 ret = SUCCESS; 1647 goto eh_host_reset_lock; 1648 } 1649 /* fall thru if ctx reset failed */ 1650 } 1651 if (ha->wq) 1652 flush_workqueue(ha->wq); 1653 1654 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1655 if (ha->isp_ops->abort_isp(base_vha)) { 1656 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1657 /* failed. schedule dpc to try */ 1658 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 1659 1660 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1661 ql_log(ql_log_warn, vha, 0x802a, 1662 "wait for hba online failed.\n"); 1663 goto eh_host_reset_lock; 1664 } 1665 } 1666 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1667 } 1668 1669 /* Waiting for command to be returned to OS.*/ 1670 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) == 1671 QLA_SUCCESS) 1672 ret = SUCCESS; 1673 1674 eh_host_reset_lock: 1675 ql_log(ql_log_info, vha, 0x8017, 1676 "ADAPTER RESET %s nexus=%ld:%d:%llu.\n", 1677 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1678 1679 return ret; 1680 } 1681 1682 /* 1683 * qla2x00_loop_reset 1684 * Issue loop reset. 1685 * 1686 * Input: 1687 * ha = adapter block pointer. 1688 * 1689 * Returns: 1690 * 0 = success 1691 */ 1692 int 1693 qla2x00_loop_reset(scsi_qla_host_t *vha) 1694 { 1695 int ret; 1696 struct fc_port *fcport; 1697 struct qla_hw_data *ha = vha->hw; 1698 1699 if (IS_QLAFX00(ha)) { 1700 return qlafx00_loop_reset(vha); 1701 } 1702 1703 if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) { 1704 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1705 if (fcport->port_type != FCT_TARGET) 1706 continue; 1707 1708 ret = ha->isp_ops->target_reset(fcport, 0, 0); 1709 if (ret != QLA_SUCCESS) { 1710 ql_dbg(ql_dbg_taskm, vha, 0x802c, 1711 "Bus Reset failed: Reset=%d " 1712 "d_id=%x.\n", ret, fcport->d_id.b24); 1713 } 1714 } 1715 } 1716 1717 1718 if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { 1719 atomic_set(&vha->loop_state, LOOP_DOWN); 1720 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1721 qla2x00_mark_all_devices_lost(vha); 1722 ret = qla2x00_full_login_lip(vha); 1723 if (ret != QLA_SUCCESS) { 1724 ql_dbg(ql_dbg_taskm, vha, 0x802d, 1725 "full_login_lip=%d.\n", ret); 1726 } 1727 } 1728 1729 if (ha->flags.enable_lip_reset) { 1730 ret = qla2x00_lip_reset(vha); 1731 if (ret != QLA_SUCCESS) 1732 ql_dbg(ql_dbg_taskm, vha, 0x802e, 1733 "lip_reset failed (%d).\n", ret); 1734 } 1735 1736 /* Issue marker command only when we are going to start the I/O */ 1737 vha->marker_needed = 1; 1738 1739 return QLA_SUCCESS; 1740 } 1741 1742 /* 1743 * The caller must ensure that no completion interrupts will happen 1744 * while this function is in progress. 1745 */ 1746 static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res, 1747 unsigned long *flags) 1748 __releases(qp->qp_lock_ptr) 1749 __acquires(qp->qp_lock_ptr) 1750 { 1751 DECLARE_COMPLETION_ONSTACK(comp); 1752 scsi_qla_host_t *vha = qp->vha; 1753 struct qla_hw_data *ha = vha->hw; 1754 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1755 int rval; 1756 bool ret_cmd; 1757 uint32_t ratov_j; 1758 1759 lockdep_assert_held(qp->qp_lock_ptr); 1760 1761 if (qla2x00_chip_is_down(vha)) { 1762 sp->done(sp, res); 1763 return; 1764 } 1765 1766 if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS || 1767 (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy && 1768 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 1769 !qla2x00_isp_reg_stat(ha))) { 1770 if (sp->comp) { 1771 sp->done(sp, res); 1772 return; 1773 } 1774 1775 sp->comp = ∁ 1776 spin_unlock_irqrestore(qp->qp_lock_ptr, *flags); 1777 1778 rval = ha->isp_ops->abort_command(sp); 1779 /* Wait for command completion. */ 1780 ret_cmd = false; 1781 ratov_j = ha->r_a_tov/10 * 4 * 1000; 1782 ratov_j = msecs_to_jiffies(ratov_j); 1783 switch (rval) { 1784 case QLA_SUCCESS: 1785 if (wait_for_completion_timeout(&comp, ratov_j)) { 1786 ql_dbg(ql_dbg_taskm, vha, 0xffff, 1787 "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", 1788 __func__, ha->r_a_tov/10); 1789 ret_cmd = true; 1790 } 1791 /* else FW return SP to driver */ 1792 break; 1793 default: 1794 ret_cmd = true; 1795 break; 1796 } 1797 1798 spin_lock_irqsave(qp->qp_lock_ptr, *flags); 1799 if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd))) 1800 sp->done(sp, res); 1801 } else { 1802 sp->done(sp, res); 1803 } 1804 } 1805 1806 /* 1807 * The caller must ensure that no completion interrupts will happen 1808 * while this function is in progress. 1809 */ 1810 static void 1811 __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) 1812 { 1813 int cnt; 1814 unsigned long flags; 1815 srb_t *sp; 1816 scsi_qla_host_t *vha = qp->vha; 1817 struct qla_hw_data *ha = vha->hw; 1818 struct req_que *req; 1819 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1820 struct qla_tgt_cmd *cmd; 1821 1822 if (!ha->req_q_map) 1823 return; 1824 spin_lock_irqsave(qp->qp_lock_ptr, flags); 1825 req = qp->req; 1826 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1827 sp = req->outstanding_cmds[cnt]; 1828 if (sp) { 1829 switch (sp->cmd_type) { 1830 case TYPE_SRB: 1831 qla2x00_abort_srb(qp, sp, res, &flags); 1832 break; 1833 case TYPE_TGT_CMD: 1834 if (!vha->hw->tgt.tgt_ops || !tgt || 1835 qla_ini_mode_enabled(vha)) { 1836 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, 1837 "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n", 1838 vha->dpc_flags); 1839 continue; 1840 } 1841 cmd = (struct qla_tgt_cmd *)sp; 1842 cmd->aborted = 1; 1843 break; 1844 case TYPE_TGT_TMCMD: 1845 /* Skip task management functions. */ 1846 break; 1847 default: 1848 break; 1849 } 1850 req->outstanding_cmds[cnt] = NULL; 1851 } 1852 } 1853 spin_unlock_irqrestore(qp->qp_lock_ptr, flags); 1854 } 1855 1856 /* 1857 * The caller must ensure that no completion interrupts will happen 1858 * while this function is in progress. 1859 */ 1860 void 1861 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) 1862 { 1863 int que; 1864 struct qla_hw_data *ha = vha->hw; 1865 1866 /* Continue only if initialization complete. */ 1867 if (!ha->base_qpair) 1868 return; 1869 __qla2x00_abort_all_cmds(ha->base_qpair, res); 1870 1871 if (!ha->queue_pair_map) 1872 return; 1873 for (que = 0; que < ha->max_qpairs; que++) { 1874 if (!ha->queue_pair_map[que]) 1875 continue; 1876 1877 __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res); 1878 } 1879 } 1880 1881 static int 1882 qla2xxx_slave_alloc(struct scsi_device *sdev) 1883 { 1884 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1885 1886 if (!rport || fc_remote_port_chkready(rport)) 1887 return -ENXIO; 1888 1889 sdev->hostdata = *(fc_port_t **)rport->dd_data; 1890 1891 return 0; 1892 } 1893 1894 static int 1895 qla2xxx_slave_configure(struct scsi_device *sdev) 1896 { 1897 scsi_qla_host_t *vha = shost_priv(sdev->host); 1898 struct req_que *req = vha->req; 1899 1900 if (IS_T10_PI_CAPABLE(vha->hw)) 1901 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1902 1903 scsi_change_queue_depth(sdev, req->max_q_depth); 1904 return 0; 1905 } 1906 1907 static void 1908 qla2xxx_slave_destroy(struct scsi_device *sdev) 1909 { 1910 sdev->hostdata = NULL; 1911 } 1912 1913 /** 1914 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1915 * @ha: HA context 1916 * 1917 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 1918 * supported addressing method. 1919 */ 1920 static void 1921 qla2x00_config_dma_addressing(struct qla_hw_data *ha) 1922 { 1923 /* Assume a 32bit DMA mask. */ 1924 ha->flags.enable_64bit_addressing = 0; 1925 1926 if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 1927 /* Any upper-dword bits set? */ 1928 if (MSD(dma_get_required_mask(&ha->pdev->dev)) && 1929 !dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 1930 /* Ok, a 64bit DMA mask is applicable. */ 1931 ha->flags.enable_64bit_addressing = 1; 1932 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; 1933 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; 1934 return; 1935 } 1936 } 1937 1938 dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); 1939 dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); 1940 } 1941 1942 static void 1943 qla2x00_enable_intrs(struct qla_hw_data *ha) 1944 { 1945 unsigned long flags = 0; 1946 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1947 1948 spin_lock_irqsave(&ha->hardware_lock, flags); 1949 ha->interrupts_on = 1; 1950 /* enable risc and host interrupts */ 1951 wrt_reg_word(®->ictrl, ICR_EN_INT | ICR_EN_RISC); 1952 rd_reg_word(®->ictrl); 1953 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1954 1955 } 1956 1957 static void 1958 qla2x00_disable_intrs(struct qla_hw_data *ha) 1959 { 1960 unsigned long flags = 0; 1961 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1962 1963 spin_lock_irqsave(&ha->hardware_lock, flags); 1964 ha->interrupts_on = 0; 1965 /* disable risc and host interrupts */ 1966 wrt_reg_word(®->ictrl, 0); 1967 rd_reg_word(®->ictrl); 1968 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1969 } 1970 1971 static void 1972 qla24xx_enable_intrs(struct qla_hw_data *ha) 1973 { 1974 unsigned long flags = 0; 1975 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1976 1977 spin_lock_irqsave(&ha->hardware_lock, flags); 1978 ha->interrupts_on = 1; 1979 wrt_reg_dword(®->ictrl, ICRX_EN_RISC_INT); 1980 rd_reg_dword(®->ictrl); 1981 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1982 } 1983 1984 static void 1985 qla24xx_disable_intrs(struct qla_hw_data *ha) 1986 { 1987 unsigned long flags = 0; 1988 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1989 1990 if (IS_NOPOLLING_TYPE(ha)) 1991 return; 1992 spin_lock_irqsave(&ha->hardware_lock, flags); 1993 ha->interrupts_on = 0; 1994 wrt_reg_dword(®->ictrl, 0); 1995 rd_reg_dword(®->ictrl); 1996 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1997 } 1998 1999 static int 2000 qla2x00_iospace_config(struct qla_hw_data *ha) 2001 { 2002 resource_size_t pio; 2003 uint16_t msix; 2004 2005 if (pci_request_selected_regions(ha->pdev, ha->bars, 2006 QLA2XXX_DRIVER_NAME)) { 2007 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011, 2008 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 2009 pci_name(ha->pdev)); 2010 goto iospace_error_exit; 2011 } 2012 if (!(ha->bars & 1)) 2013 goto skip_pio; 2014 2015 /* We only need PIO for Flash operations on ISP2312 v2 chips. */ 2016 pio = pci_resource_start(ha->pdev, 0); 2017 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 2018 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 2019 ql_log_pci(ql_log_warn, ha->pdev, 0x0012, 2020 "Invalid pci I/O region size (%s).\n", 2021 pci_name(ha->pdev)); 2022 pio = 0; 2023 } 2024 } else { 2025 ql_log_pci(ql_log_warn, ha->pdev, 0x0013, 2026 "Region #0 no a PIO resource (%s).\n", 2027 pci_name(ha->pdev)); 2028 pio = 0; 2029 } 2030 ha->pio_address = pio; 2031 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014, 2032 "PIO address=%llu.\n", 2033 (unsigned long long)ha->pio_address); 2034 2035 skip_pio: 2036 /* Use MMIO operations for all accesses. */ 2037 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 2038 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015, 2039 "Region #1 not an MMIO resource (%s), aborting.\n", 2040 pci_name(ha->pdev)); 2041 goto iospace_error_exit; 2042 } 2043 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 2044 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016, 2045 "Invalid PCI mem region size (%s), aborting.\n", 2046 pci_name(ha->pdev)); 2047 goto iospace_error_exit; 2048 } 2049 2050 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 2051 if (!ha->iobase) { 2052 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017, 2053 "Cannot remap MMIO (%s), aborting.\n", 2054 pci_name(ha->pdev)); 2055 goto iospace_error_exit; 2056 } 2057 2058 /* Determine queue resources */ 2059 ha->max_req_queues = ha->max_rsp_queues = 1; 2060 ha->msix_count = QLA_BASE_VECTORS; 2061 2062 /* Check if FW supports MQ or not */ 2063 if (!(ha->fw_attributes & BIT_6)) 2064 goto mqiobase_exit; 2065 2066 if (!ql2xmqsupport || !ql2xnvmeenable || 2067 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 2068 goto mqiobase_exit; 2069 2070 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 2071 pci_resource_len(ha->pdev, 3)); 2072 if (ha->mqiobase) { 2073 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018, 2074 "MQIO Base=%p.\n", ha->mqiobase); 2075 /* Read MSIX vector size of the board */ 2076 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 2077 ha->msix_count = msix + 1; 2078 /* Max queues are bounded by available msix vectors */ 2079 /* MB interrupt uses 1 vector */ 2080 ha->max_req_queues = ha->msix_count - 1; 2081 ha->max_rsp_queues = ha->max_req_queues; 2082 /* Queue pairs is the max value minus the base queue pair */ 2083 ha->max_qpairs = ha->max_rsp_queues - 1; 2084 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188, 2085 "Max no of queues pairs: %d.\n", ha->max_qpairs); 2086 2087 ql_log_pci(ql_log_info, ha->pdev, 0x001a, 2088 "MSI-X vector count: %d.\n", ha->msix_count); 2089 } else 2090 ql_log_pci(ql_log_info, ha->pdev, 0x001b, 2091 "BAR 3 not enabled.\n"); 2092 2093 mqiobase_exit: 2094 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c, 2095 "MSIX Count: %d.\n", ha->msix_count); 2096 return (0); 2097 2098 iospace_error_exit: 2099 return (-ENOMEM); 2100 } 2101 2102 2103 static int 2104 qla83xx_iospace_config(struct qla_hw_data *ha) 2105 { 2106 uint16_t msix; 2107 2108 if (pci_request_selected_regions(ha->pdev, ha->bars, 2109 QLA2XXX_DRIVER_NAME)) { 2110 ql_log_pci(ql_log_fatal, ha->pdev, 0x0117, 2111 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 2112 pci_name(ha->pdev)); 2113 2114 goto iospace_error_exit; 2115 } 2116 2117 /* Use MMIO operations for all accesses. */ 2118 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 2119 ql_log_pci(ql_log_warn, ha->pdev, 0x0118, 2120 "Invalid pci I/O region size (%s).\n", 2121 pci_name(ha->pdev)); 2122 goto iospace_error_exit; 2123 } 2124 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 2125 ql_log_pci(ql_log_warn, ha->pdev, 0x0119, 2126 "Invalid PCI mem region size (%s), aborting\n", 2127 pci_name(ha->pdev)); 2128 goto iospace_error_exit; 2129 } 2130 2131 ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN); 2132 if (!ha->iobase) { 2133 ql_log_pci(ql_log_fatal, ha->pdev, 0x011a, 2134 "Cannot remap MMIO (%s), aborting.\n", 2135 pci_name(ha->pdev)); 2136 goto iospace_error_exit; 2137 } 2138 2139 /* 64bit PCI BAR - BAR2 will correspoond to region 4 */ 2140 /* 83XX 26XX always use MQ type access for queues 2141 * - mbar 2, a.k.a region 4 */ 2142 ha->max_req_queues = ha->max_rsp_queues = 1; 2143 ha->msix_count = QLA_BASE_VECTORS; 2144 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4), 2145 pci_resource_len(ha->pdev, 4)); 2146 2147 if (!ha->mqiobase) { 2148 ql_log_pci(ql_log_fatal, ha->pdev, 0x011d, 2149 "BAR2/region4 not enabled\n"); 2150 goto mqiobase_exit; 2151 } 2152 2153 ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2), 2154 pci_resource_len(ha->pdev, 2)); 2155 if (ha->msixbase) { 2156 /* Read MSIX vector size of the board */ 2157 pci_read_config_word(ha->pdev, 2158 QLA_83XX_PCI_MSIX_CONTROL, &msix); 2159 ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1; 2160 /* 2161 * By default, driver uses at least two msix vectors 2162 * (default & rspq) 2163 */ 2164 if (ql2xmqsupport || ql2xnvmeenable) { 2165 /* MB interrupt uses 1 vector */ 2166 ha->max_req_queues = ha->msix_count - 1; 2167 2168 /* ATIOQ needs 1 vector. That's 1 less QPair */ 2169 if (QLA_TGT_MODE_ENABLED()) 2170 ha->max_req_queues--; 2171 2172 ha->max_rsp_queues = ha->max_req_queues; 2173 2174 /* Queue pairs is the max value minus 2175 * the base queue pair */ 2176 ha->max_qpairs = ha->max_req_queues - 1; 2177 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3, 2178 "Max no of queues pairs: %d.\n", ha->max_qpairs); 2179 } 2180 ql_log_pci(ql_log_info, ha->pdev, 0x011c, 2181 "MSI-X vector count: %d.\n", ha->msix_count); 2182 } else 2183 ql_log_pci(ql_log_info, ha->pdev, 0x011e, 2184 "BAR 1 not enabled.\n"); 2185 2186 mqiobase_exit: 2187 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f, 2188 "MSIX Count: %d.\n", ha->msix_count); 2189 return 0; 2190 2191 iospace_error_exit: 2192 return -ENOMEM; 2193 } 2194 2195 static struct isp_operations qla2100_isp_ops = { 2196 .pci_config = qla2100_pci_config, 2197 .reset_chip = qla2x00_reset_chip, 2198 .chip_diag = qla2x00_chip_diag, 2199 .config_rings = qla2x00_config_rings, 2200 .reset_adapter = qla2x00_reset_adapter, 2201 .nvram_config = qla2x00_nvram_config, 2202 .update_fw_options = qla2x00_update_fw_options, 2203 .load_risc = qla2x00_load_risc, 2204 .pci_info_str = qla2x00_pci_info_str, 2205 .fw_version_str = qla2x00_fw_version_str, 2206 .intr_handler = qla2100_intr_handler, 2207 .enable_intrs = qla2x00_enable_intrs, 2208 .disable_intrs = qla2x00_disable_intrs, 2209 .abort_command = qla2x00_abort_command, 2210 .target_reset = qla2x00_abort_target, 2211 .lun_reset = qla2x00_lun_reset, 2212 .fabric_login = qla2x00_login_fabric, 2213 .fabric_logout = qla2x00_fabric_logout, 2214 .calc_req_entries = qla2x00_calc_iocbs_32, 2215 .build_iocbs = qla2x00_build_scsi_iocbs_32, 2216 .prep_ms_iocb = qla2x00_prep_ms_iocb, 2217 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 2218 .read_nvram = qla2x00_read_nvram_data, 2219 .write_nvram = qla2x00_write_nvram_data, 2220 .fw_dump = qla2100_fw_dump, 2221 .beacon_on = NULL, 2222 .beacon_off = NULL, 2223 .beacon_blink = NULL, 2224 .read_optrom = qla2x00_read_optrom_data, 2225 .write_optrom = qla2x00_write_optrom_data, 2226 .get_flash_version = qla2x00_get_flash_version, 2227 .start_scsi = qla2x00_start_scsi, 2228 .start_scsi_mq = NULL, 2229 .abort_isp = qla2x00_abort_isp, 2230 .iospace_config = qla2x00_iospace_config, 2231 .initialize_adapter = qla2x00_initialize_adapter, 2232 }; 2233 2234 static struct isp_operations qla2300_isp_ops = { 2235 .pci_config = qla2300_pci_config, 2236 .reset_chip = qla2x00_reset_chip, 2237 .chip_diag = qla2x00_chip_diag, 2238 .config_rings = qla2x00_config_rings, 2239 .reset_adapter = qla2x00_reset_adapter, 2240 .nvram_config = qla2x00_nvram_config, 2241 .update_fw_options = qla2x00_update_fw_options, 2242 .load_risc = qla2x00_load_risc, 2243 .pci_info_str = qla2x00_pci_info_str, 2244 .fw_version_str = qla2x00_fw_version_str, 2245 .intr_handler = qla2300_intr_handler, 2246 .enable_intrs = qla2x00_enable_intrs, 2247 .disable_intrs = qla2x00_disable_intrs, 2248 .abort_command = qla2x00_abort_command, 2249 .target_reset = qla2x00_abort_target, 2250 .lun_reset = qla2x00_lun_reset, 2251 .fabric_login = qla2x00_login_fabric, 2252 .fabric_logout = qla2x00_fabric_logout, 2253 .calc_req_entries = qla2x00_calc_iocbs_32, 2254 .build_iocbs = qla2x00_build_scsi_iocbs_32, 2255 .prep_ms_iocb = qla2x00_prep_ms_iocb, 2256 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 2257 .read_nvram = qla2x00_read_nvram_data, 2258 .write_nvram = qla2x00_write_nvram_data, 2259 .fw_dump = qla2300_fw_dump, 2260 .beacon_on = qla2x00_beacon_on, 2261 .beacon_off = qla2x00_beacon_off, 2262 .beacon_blink = qla2x00_beacon_blink, 2263 .read_optrom = qla2x00_read_optrom_data, 2264 .write_optrom = qla2x00_write_optrom_data, 2265 .get_flash_version = qla2x00_get_flash_version, 2266 .start_scsi = qla2x00_start_scsi, 2267 .start_scsi_mq = NULL, 2268 .abort_isp = qla2x00_abort_isp, 2269 .iospace_config = qla2x00_iospace_config, 2270 .initialize_adapter = qla2x00_initialize_adapter, 2271 }; 2272 2273 static struct isp_operations qla24xx_isp_ops = { 2274 .pci_config = qla24xx_pci_config, 2275 .reset_chip = qla24xx_reset_chip, 2276 .chip_diag = qla24xx_chip_diag, 2277 .config_rings = qla24xx_config_rings, 2278 .reset_adapter = qla24xx_reset_adapter, 2279 .nvram_config = qla24xx_nvram_config, 2280 .update_fw_options = qla24xx_update_fw_options, 2281 .load_risc = qla24xx_load_risc, 2282 .pci_info_str = qla24xx_pci_info_str, 2283 .fw_version_str = qla24xx_fw_version_str, 2284 .intr_handler = qla24xx_intr_handler, 2285 .enable_intrs = qla24xx_enable_intrs, 2286 .disable_intrs = qla24xx_disable_intrs, 2287 .abort_command = qla24xx_abort_command, 2288 .target_reset = qla24xx_abort_target, 2289 .lun_reset = qla24xx_lun_reset, 2290 .fabric_login = qla24xx_login_fabric, 2291 .fabric_logout = qla24xx_fabric_logout, 2292 .calc_req_entries = NULL, 2293 .build_iocbs = NULL, 2294 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2295 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2296 .read_nvram = qla24xx_read_nvram_data, 2297 .write_nvram = qla24xx_write_nvram_data, 2298 .fw_dump = qla24xx_fw_dump, 2299 .beacon_on = qla24xx_beacon_on, 2300 .beacon_off = qla24xx_beacon_off, 2301 .beacon_blink = qla24xx_beacon_blink, 2302 .read_optrom = qla24xx_read_optrom_data, 2303 .write_optrom = qla24xx_write_optrom_data, 2304 .get_flash_version = qla24xx_get_flash_version, 2305 .start_scsi = qla24xx_start_scsi, 2306 .start_scsi_mq = NULL, 2307 .abort_isp = qla2x00_abort_isp, 2308 .iospace_config = qla2x00_iospace_config, 2309 .initialize_adapter = qla2x00_initialize_adapter, 2310 }; 2311 2312 static struct isp_operations qla25xx_isp_ops = { 2313 .pci_config = qla25xx_pci_config, 2314 .reset_chip = qla24xx_reset_chip, 2315 .chip_diag = qla24xx_chip_diag, 2316 .config_rings = qla24xx_config_rings, 2317 .reset_adapter = qla24xx_reset_adapter, 2318 .nvram_config = qla24xx_nvram_config, 2319 .update_fw_options = qla24xx_update_fw_options, 2320 .load_risc = qla24xx_load_risc, 2321 .pci_info_str = qla24xx_pci_info_str, 2322 .fw_version_str = qla24xx_fw_version_str, 2323 .intr_handler = qla24xx_intr_handler, 2324 .enable_intrs = qla24xx_enable_intrs, 2325 .disable_intrs = qla24xx_disable_intrs, 2326 .abort_command = qla24xx_abort_command, 2327 .target_reset = qla24xx_abort_target, 2328 .lun_reset = qla24xx_lun_reset, 2329 .fabric_login = qla24xx_login_fabric, 2330 .fabric_logout = qla24xx_fabric_logout, 2331 .calc_req_entries = NULL, 2332 .build_iocbs = NULL, 2333 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2334 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2335 .read_nvram = qla25xx_read_nvram_data, 2336 .write_nvram = qla25xx_write_nvram_data, 2337 .fw_dump = qla25xx_fw_dump, 2338 .beacon_on = qla24xx_beacon_on, 2339 .beacon_off = qla24xx_beacon_off, 2340 .beacon_blink = qla24xx_beacon_blink, 2341 .read_optrom = qla25xx_read_optrom_data, 2342 .write_optrom = qla24xx_write_optrom_data, 2343 .get_flash_version = qla24xx_get_flash_version, 2344 .start_scsi = qla24xx_dif_start_scsi, 2345 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2346 .abort_isp = qla2x00_abort_isp, 2347 .iospace_config = qla2x00_iospace_config, 2348 .initialize_adapter = qla2x00_initialize_adapter, 2349 }; 2350 2351 static struct isp_operations qla81xx_isp_ops = { 2352 .pci_config = qla25xx_pci_config, 2353 .reset_chip = qla24xx_reset_chip, 2354 .chip_diag = qla24xx_chip_diag, 2355 .config_rings = qla24xx_config_rings, 2356 .reset_adapter = qla24xx_reset_adapter, 2357 .nvram_config = qla81xx_nvram_config, 2358 .update_fw_options = qla24xx_update_fw_options, 2359 .load_risc = qla81xx_load_risc, 2360 .pci_info_str = qla24xx_pci_info_str, 2361 .fw_version_str = qla24xx_fw_version_str, 2362 .intr_handler = qla24xx_intr_handler, 2363 .enable_intrs = qla24xx_enable_intrs, 2364 .disable_intrs = qla24xx_disable_intrs, 2365 .abort_command = qla24xx_abort_command, 2366 .target_reset = qla24xx_abort_target, 2367 .lun_reset = qla24xx_lun_reset, 2368 .fabric_login = qla24xx_login_fabric, 2369 .fabric_logout = qla24xx_fabric_logout, 2370 .calc_req_entries = NULL, 2371 .build_iocbs = NULL, 2372 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2373 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2374 .read_nvram = NULL, 2375 .write_nvram = NULL, 2376 .fw_dump = qla81xx_fw_dump, 2377 .beacon_on = qla24xx_beacon_on, 2378 .beacon_off = qla24xx_beacon_off, 2379 .beacon_blink = qla83xx_beacon_blink, 2380 .read_optrom = qla25xx_read_optrom_data, 2381 .write_optrom = qla24xx_write_optrom_data, 2382 .get_flash_version = qla24xx_get_flash_version, 2383 .start_scsi = qla24xx_dif_start_scsi, 2384 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2385 .abort_isp = qla2x00_abort_isp, 2386 .iospace_config = qla2x00_iospace_config, 2387 .initialize_adapter = qla2x00_initialize_adapter, 2388 }; 2389 2390 static struct isp_operations qla82xx_isp_ops = { 2391 .pci_config = qla82xx_pci_config, 2392 .reset_chip = qla82xx_reset_chip, 2393 .chip_diag = qla24xx_chip_diag, 2394 .config_rings = qla82xx_config_rings, 2395 .reset_adapter = qla24xx_reset_adapter, 2396 .nvram_config = qla81xx_nvram_config, 2397 .update_fw_options = qla24xx_update_fw_options, 2398 .load_risc = qla82xx_load_risc, 2399 .pci_info_str = qla24xx_pci_info_str, 2400 .fw_version_str = qla24xx_fw_version_str, 2401 .intr_handler = qla82xx_intr_handler, 2402 .enable_intrs = qla82xx_enable_intrs, 2403 .disable_intrs = qla82xx_disable_intrs, 2404 .abort_command = qla24xx_abort_command, 2405 .target_reset = qla24xx_abort_target, 2406 .lun_reset = qla24xx_lun_reset, 2407 .fabric_login = qla24xx_login_fabric, 2408 .fabric_logout = qla24xx_fabric_logout, 2409 .calc_req_entries = NULL, 2410 .build_iocbs = NULL, 2411 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2412 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2413 .read_nvram = qla24xx_read_nvram_data, 2414 .write_nvram = qla24xx_write_nvram_data, 2415 .fw_dump = qla82xx_fw_dump, 2416 .beacon_on = qla82xx_beacon_on, 2417 .beacon_off = qla82xx_beacon_off, 2418 .beacon_blink = NULL, 2419 .read_optrom = qla82xx_read_optrom_data, 2420 .write_optrom = qla82xx_write_optrom_data, 2421 .get_flash_version = qla82xx_get_flash_version, 2422 .start_scsi = qla82xx_start_scsi, 2423 .start_scsi_mq = NULL, 2424 .abort_isp = qla82xx_abort_isp, 2425 .iospace_config = qla82xx_iospace_config, 2426 .initialize_adapter = qla2x00_initialize_adapter, 2427 }; 2428 2429 static struct isp_operations qla8044_isp_ops = { 2430 .pci_config = qla82xx_pci_config, 2431 .reset_chip = qla82xx_reset_chip, 2432 .chip_diag = qla24xx_chip_diag, 2433 .config_rings = qla82xx_config_rings, 2434 .reset_adapter = qla24xx_reset_adapter, 2435 .nvram_config = qla81xx_nvram_config, 2436 .update_fw_options = qla24xx_update_fw_options, 2437 .load_risc = qla82xx_load_risc, 2438 .pci_info_str = qla24xx_pci_info_str, 2439 .fw_version_str = qla24xx_fw_version_str, 2440 .intr_handler = qla8044_intr_handler, 2441 .enable_intrs = qla82xx_enable_intrs, 2442 .disable_intrs = qla82xx_disable_intrs, 2443 .abort_command = qla24xx_abort_command, 2444 .target_reset = qla24xx_abort_target, 2445 .lun_reset = qla24xx_lun_reset, 2446 .fabric_login = qla24xx_login_fabric, 2447 .fabric_logout = qla24xx_fabric_logout, 2448 .calc_req_entries = NULL, 2449 .build_iocbs = NULL, 2450 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2451 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2452 .read_nvram = NULL, 2453 .write_nvram = NULL, 2454 .fw_dump = qla8044_fw_dump, 2455 .beacon_on = qla82xx_beacon_on, 2456 .beacon_off = qla82xx_beacon_off, 2457 .beacon_blink = NULL, 2458 .read_optrom = qla8044_read_optrom_data, 2459 .write_optrom = qla8044_write_optrom_data, 2460 .get_flash_version = qla82xx_get_flash_version, 2461 .start_scsi = qla82xx_start_scsi, 2462 .start_scsi_mq = NULL, 2463 .abort_isp = qla8044_abort_isp, 2464 .iospace_config = qla82xx_iospace_config, 2465 .initialize_adapter = qla2x00_initialize_adapter, 2466 }; 2467 2468 static struct isp_operations qla83xx_isp_ops = { 2469 .pci_config = qla25xx_pci_config, 2470 .reset_chip = qla24xx_reset_chip, 2471 .chip_diag = qla24xx_chip_diag, 2472 .config_rings = qla24xx_config_rings, 2473 .reset_adapter = qla24xx_reset_adapter, 2474 .nvram_config = qla81xx_nvram_config, 2475 .update_fw_options = qla24xx_update_fw_options, 2476 .load_risc = qla81xx_load_risc, 2477 .pci_info_str = qla24xx_pci_info_str, 2478 .fw_version_str = qla24xx_fw_version_str, 2479 .intr_handler = qla24xx_intr_handler, 2480 .enable_intrs = qla24xx_enable_intrs, 2481 .disable_intrs = qla24xx_disable_intrs, 2482 .abort_command = qla24xx_abort_command, 2483 .target_reset = qla24xx_abort_target, 2484 .lun_reset = qla24xx_lun_reset, 2485 .fabric_login = qla24xx_login_fabric, 2486 .fabric_logout = qla24xx_fabric_logout, 2487 .calc_req_entries = NULL, 2488 .build_iocbs = NULL, 2489 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2490 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2491 .read_nvram = NULL, 2492 .write_nvram = NULL, 2493 .fw_dump = qla83xx_fw_dump, 2494 .beacon_on = qla24xx_beacon_on, 2495 .beacon_off = qla24xx_beacon_off, 2496 .beacon_blink = qla83xx_beacon_blink, 2497 .read_optrom = qla25xx_read_optrom_data, 2498 .write_optrom = qla24xx_write_optrom_data, 2499 .get_flash_version = qla24xx_get_flash_version, 2500 .start_scsi = qla24xx_dif_start_scsi, 2501 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2502 .abort_isp = qla2x00_abort_isp, 2503 .iospace_config = qla83xx_iospace_config, 2504 .initialize_adapter = qla2x00_initialize_adapter, 2505 }; 2506 2507 static struct isp_operations qlafx00_isp_ops = { 2508 .pci_config = qlafx00_pci_config, 2509 .reset_chip = qlafx00_soft_reset, 2510 .chip_diag = qlafx00_chip_diag, 2511 .config_rings = qlafx00_config_rings, 2512 .reset_adapter = qlafx00_soft_reset, 2513 .nvram_config = NULL, 2514 .update_fw_options = NULL, 2515 .load_risc = NULL, 2516 .pci_info_str = qlafx00_pci_info_str, 2517 .fw_version_str = qlafx00_fw_version_str, 2518 .intr_handler = qlafx00_intr_handler, 2519 .enable_intrs = qlafx00_enable_intrs, 2520 .disable_intrs = qlafx00_disable_intrs, 2521 .abort_command = qla24xx_async_abort_command, 2522 .target_reset = qlafx00_abort_target, 2523 .lun_reset = qlafx00_lun_reset, 2524 .fabric_login = NULL, 2525 .fabric_logout = NULL, 2526 .calc_req_entries = NULL, 2527 .build_iocbs = NULL, 2528 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2529 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2530 .read_nvram = qla24xx_read_nvram_data, 2531 .write_nvram = qla24xx_write_nvram_data, 2532 .fw_dump = NULL, 2533 .beacon_on = qla24xx_beacon_on, 2534 .beacon_off = qla24xx_beacon_off, 2535 .beacon_blink = NULL, 2536 .read_optrom = qla24xx_read_optrom_data, 2537 .write_optrom = qla24xx_write_optrom_data, 2538 .get_flash_version = qla24xx_get_flash_version, 2539 .start_scsi = qlafx00_start_scsi, 2540 .start_scsi_mq = NULL, 2541 .abort_isp = qlafx00_abort_isp, 2542 .iospace_config = qlafx00_iospace_config, 2543 .initialize_adapter = qlafx00_initialize_adapter, 2544 }; 2545 2546 static struct isp_operations qla27xx_isp_ops = { 2547 .pci_config = qla25xx_pci_config, 2548 .reset_chip = qla24xx_reset_chip, 2549 .chip_diag = qla24xx_chip_diag, 2550 .config_rings = qla24xx_config_rings, 2551 .reset_adapter = qla24xx_reset_adapter, 2552 .nvram_config = qla81xx_nvram_config, 2553 .update_fw_options = qla24xx_update_fw_options, 2554 .load_risc = qla81xx_load_risc, 2555 .pci_info_str = qla24xx_pci_info_str, 2556 .fw_version_str = qla24xx_fw_version_str, 2557 .intr_handler = qla24xx_intr_handler, 2558 .enable_intrs = qla24xx_enable_intrs, 2559 .disable_intrs = qla24xx_disable_intrs, 2560 .abort_command = qla24xx_abort_command, 2561 .target_reset = qla24xx_abort_target, 2562 .lun_reset = qla24xx_lun_reset, 2563 .fabric_login = qla24xx_login_fabric, 2564 .fabric_logout = qla24xx_fabric_logout, 2565 .calc_req_entries = NULL, 2566 .build_iocbs = NULL, 2567 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2568 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2569 .read_nvram = NULL, 2570 .write_nvram = NULL, 2571 .fw_dump = qla27xx_fwdump, 2572 .mpi_fw_dump = qla27xx_mpi_fwdump, 2573 .beacon_on = qla24xx_beacon_on, 2574 .beacon_off = qla24xx_beacon_off, 2575 .beacon_blink = qla83xx_beacon_blink, 2576 .read_optrom = qla25xx_read_optrom_data, 2577 .write_optrom = qla24xx_write_optrom_data, 2578 .get_flash_version = qla24xx_get_flash_version, 2579 .start_scsi = qla24xx_dif_start_scsi, 2580 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2581 .abort_isp = qla2x00_abort_isp, 2582 .iospace_config = qla83xx_iospace_config, 2583 .initialize_adapter = qla2x00_initialize_adapter, 2584 }; 2585 2586 static inline void 2587 qla2x00_set_isp_flags(struct qla_hw_data *ha) 2588 { 2589 ha->device_type = DT_EXTENDED_IDS; 2590 switch (ha->pdev->device) { 2591 case PCI_DEVICE_ID_QLOGIC_ISP2100: 2592 ha->isp_type |= DT_ISP2100; 2593 ha->device_type &= ~DT_EXTENDED_IDS; 2594 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2595 break; 2596 case PCI_DEVICE_ID_QLOGIC_ISP2200: 2597 ha->isp_type |= DT_ISP2200; 2598 ha->device_type &= ~DT_EXTENDED_IDS; 2599 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2600 break; 2601 case PCI_DEVICE_ID_QLOGIC_ISP2300: 2602 ha->isp_type |= DT_ISP2300; 2603 ha->device_type |= DT_ZIO_SUPPORTED; 2604 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2605 break; 2606 case PCI_DEVICE_ID_QLOGIC_ISP2312: 2607 ha->isp_type |= DT_ISP2312; 2608 ha->device_type |= DT_ZIO_SUPPORTED; 2609 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2610 break; 2611 case PCI_DEVICE_ID_QLOGIC_ISP2322: 2612 ha->isp_type |= DT_ISP2322; 2613 ha->device_type |= DT_ZIO_SUPPORTED; 2614 if (ha->pdev->subsystem_vendor == 0x1028 && 2615 ha->pdev->subsystem_device == 0x0170) 2616 ha->device_type |= DT_OEM_001; 2617 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2618 break; 2619 case PCI_DEVICE_ID_QLOGIC_ISP6312: 2620 ha->isp_type |= DT_ISP6312; 2621 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2622 break; 2623 case PCI_DEVICE_ID_QLOGIC_ISP6322: 2624 ha->isp_type |= DT_ISP6322; 2625 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2626 break; 2627 case PCI_DEVICE_ID_QLOGIC_ISP2422: 2628 ha->isp_type |= DT_ISP2422; 2629 ha->device_type |= DT_ZIO_SUPPORTED; 2630 ha->device_type |= DT_FWI2; 2631 ha->device_type |= DT_IIDMA; 2632 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2633 break; 2634 case PCI_DEVICE_ID_QLOGIC_ISP2432: 2635 ha->isp_type |= DT_ISP2432; 2636 ha->device_type |= DT_ZIO_SUPPORTED; 2637 ha->device_type |= DT_FWI2; 2638 ha->device_type |= DT_IIDMA; 2639 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2640 break; 2641 case PCI_DEVICE_ID_QLOGIC_ISP8432: 2642 ha->isp_type |= DT_ISP8432; 2643 ha->device_type |= DT_ZIO_SUPPORTED; 2644 ha->device_type |= DT_FWI2; 2645 ha->device_type |= DT_IIDMA; 2646 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2647 break; 2648 case PCI_DEVICE_ID_QLOGIC_ISP5422: 2649 ha->isp_type |= DT_ISP5422; 2650 ha->device_type |= DT_FWI2; 2651 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2652 break; 2653 case PCI_DEVICE_ID_QLOGIC_ISP5432: 2654 ha->isp_type |= DT_ISP5432; 2655 ha->device_type |= DT_FWI2; 2656 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2657 break; 2658 case PCI_DEVICE_ID_QLOGIC_ISP2532: 2659 ha->isp_type |= DT_ISP2532; 2660 ha->device_type |= DT_ZIO_SUPPORTED; 2661 ha->device_type |= DT_FWI2; 2662 ha->device_type |= DT_IIDMA; 2663 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2664 break; 2665 case PCI_DEVICE_ID_QLOGIC_ISP8001: 2666 ha->isp_type |= DT_ISP8001; 2667 ha->device_type |= DT_ZIO_SUPPORTED; 2668 ha->device_type |= DT_FWI2; 2669 ha->device_type |= DT_IIDMA; 2670 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2671 break; 2672 case PCI_DEVICE_ID_QLOGIC_ISP8021: 2673 ha->isp_type |= DT_ISP8021; 2674 ha->device_type |= DT_ZIO_SUPPORTED; 2675 ha->device_type |= DT_FWI2; 2676 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2677 /* Initialize 82XX ISP flags */ 2678 qla82xx_init_flags(ha); 2679 break; 2680 case PCI_DEVICE_ID_QLOGIC_ISP8044: 2681 ha->isp_type |= DT_ISP8044; 2682 ha->device_type |= DT_ZIO_SUPPORTED; 2683 ha->device_type |= DT_FWI2; 2684 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2685 /* Initialize 82XX ISP flags */ 2686 qla82xx_init_flags(ha); 2687 break; 2688 case PCI_DEVICE_ID_QLOGIC_ISP2031: 2689 ha->isp_type |= DT_ISP2031; 2690 ha->device_type |= DT_ZIO_SUPPORTED; 2691 ha->device_type |= DT_FWI2; 2692 ha->device_type |= DT_IIDMA; 2693 ha->device_type |= DT_T10_PI; 2694 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2695 break; 2696 case PCI_DEVICE_ID_QLOGIC_ISP8031: 2697 ha->isp_type |= DT_ISP8031; 2698 ha->device_type |= DT_ZIO_SUPPORTED; 2699 ha->device_type |= DT_FWI2; 2700 ha->device_type |= DT_IIDMA; 2701 ha->device_type |= DT_T10_PI; 2702 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2703 break; 2704 case PCI_DEVICE_ID_QLOGIC_ISPF001: 2705 ha->isp_type |= DT_ISPFX00; 2706 break; 2707 case PCI_DEVICE_ID_QLOGIC_ISP2071: 2708 ha->isp_type |= DT_ISP2071; 2709 ha->device_type |= DT_ZIO_SUPPORTED; 2710 ha->device_type |= DT_FWI2; 2711 ha->device_type |= DT_IIDMA; 2712 ha->device_type |= DT_T10_PI; 2713 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2714 break; 2715 case PCI_DEVICE_ID_QLOGIC_ISP2271: 2716 ha->isp_type |= DT_ISP2271; 2717 ha->device_type |= DT_ZIO_SUPPORTED; 2718 ha->device_type |= DT_FWI2; 2719 ha->device_type |= DT_IIDMA; 2720 ha->device_type |= DT_T10_PI; 2721 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2722 break; 2723 case PCI_DEVICE_ID_QLOGIC_ISP2261: 2724 ha->isp_type |= DT_ISP2261; 2725 ha->device_type |= DT_ZIO_SUPPORTED; 2726 ha->device_type |= DT_FWI2; 2727 ha->device_type |= DT_IIDMA; 2728 ha->device_type |= DT_T10_PI; 2729 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2730 break; 2731 case PCI_DEVICE_ID_QLOGIC_ISP2081: 2732 case PCI_DEVICE_ID_QLOGIC_ISP2089: 2733 ha->isp_type |= DT_ISP2081; 2734 ha->device_type |= DT_ZIO_SUPPORTED; 2735 ha->device_type |= DT_FWI2; 2736 ha->device_type |= DT_IIDMA; 2737 ha->device_type |= DT_T10_PI; 2738 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2739 break; 2740 case PCI_DEVICE_ID_QLOGIC_ISP2281: 2741 case PCI_DEVICE_ID_QLOGIC_ISP2289: 2742 ha->isp_type |= DT_ISP2281; 2743 ha->device_type |= DT_ZIO_SUPPORTED; 2744 ha->device_type |= DT_FWI2; 2745 ha->device_type |= DT_IIDMA; 2746 ha->device_type |= DT_T10_PI; 2747 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2748 break; 2749 } 2750 2751 if (IS_QLA82XX(ha)) 2752 ha->port_no = ha->portnum & 1; 2753 else { 2754 /* Get adapter physical port no from interrupt pin register. */ 2755 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); 2756 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || 2757 IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2758 ha->port_no--; 2759 else 2760 ha->port_no = !(ha->port_no & 1); 2761 } 2762 2763 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, 2764 "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", 2765 ha->device_type, ha->port_no, ha->fw_srisc_address); 2766 } 2767 2768 static void 2769 qla2xxx_scan_start(struct Scsi_Host *shost) 2770 { 2771 scsi_qla_host_t *vha = shost_priv(shost); 2772 2773 if (vha->hw->flags.running_gold_fw) 2774 return; 2775 2776 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2777 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2778 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2779 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags); 2780 } 2781 2782 static int 2783 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) 2784 { 2785 scsi_qla_host_t *vha = shost_priv(shost); 2786 2787 if (test_bit(UNLOADING, &vha->dpc_flags)) 2788 return 1; 2789 if (!vha->host) 2790 return 1; 2791 if (time > vha->hw->loop_reset_delay * HZ) 2792 return 1; 2793 2794 return atomic_read(&vha->loop_state) == LOOP_READY; 2795 } 2796 2797 static void qla2x00_iocb_work_fn(struct work_struct *work) 2798 { 2799 struct scsi_qla_host *vha = container_of(work, 2800 struct scsi_qla_host, iocb_work); 2801 struct qla_hw_data *ha = vha->hw; 2802 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2803 int i = 2; 2804 unsigned long flags; 2805 2806 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 2807 return; 2808 2809 while (!list_empty(&vha->work_list) && i > 0) { 2810 qla2x00_do_work(vha); 2811 i--; 2812 } 2813 2814 spin_lock_irqsave(&vha->work_lock, flags); 2815 clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags); 2816 spin_unlock_irqrestore(&vha->work_lock, flags); 2817 } 2818 2819 /* 2820 * PCI driver interface 2821 */ 2822 static int 2823 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 2824 { 2825 int ret = -ENODEV; 2826 struct Scsi_Host *host; 2827 scsi_qla_host_t *base_vha = NULL; 2828 struct qla_hw_data *ha; 2829 char pci_info[30]; 2830 char fw_str[30], wq_name[30]; 2831 struct scsi_host_template *sht; 2832 int bars, mem_only = 0; 2833 uint16_t req_length = 0, rsp_length = 0; 2834 struct req_que *req = NULL; 2835 struct rsp_que *rsp = NULL; 2836 int i; 2837 2838 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 2839 sht = &qla2xxx_driver_template; 2840 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || 2841 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || 2842 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || 2843 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 2844 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || 2845 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 || 2846 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || 2847 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || 2848 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || 2849 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || 2850 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || 2851 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || 2852 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || 2853 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 || 2854 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 || 2855 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 || 2856 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 || 2857 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 || 2858 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) { 2859 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2860 mem_only = 1; 2861 ql_dbg_pci(ql_dbg_init, pdev, 0x0007, 2862 "Mem only adapter.\n"); 2863 } 2864 ql_dbg_pci(ql_dbg_init, pdev, 0x0008, 2865 "Bars=%d.\n", bars); 2866 2867 if (mem_only) { 2868 if (pci_enable_device_mem(pdev)) 2869 return ret; 2870 } else { 2871 if (pci_enable_device(pdev)) 2872 return ret; 2873 } 2874 2875 if (is_kdump_kernel()) { 2876 ql2xmqsupport = 0; 2877 ql2xallocfwdump = 0; 2878 } 2879 2880 /* This may fail but that's ok */ 2881 pci_enable_pcie_error_reporting(pdev); 2882 2883 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); 2884 if (!ha) { 2885 ql_log_pci(ql_log_fatal, pdev, 0x0009, 2886 "Unable to allocate memory for ha.\n"); 2887 goto disable_device; 2888 } 2889 ql_dbg_pci(ql_dbg_init, pdev, 0x000a, 2890 "Memory allocated for ha=%p.\n", ha); 2891 ha->pdev = pdev; 2892 INIT_LIST_HEAD(&ha->tgt.q_full_list); 2893 spin_lock_init(&ha->tgt.q_full_lock); 2894 spin_lock_init(&ha->tgt.sess_lock); 2895 spin_lock_init(&ha->tgt.atio_lock); 2896 2897 spin_lock_init(&ha->sadb_lock); 2898 INIT_LIST_HEAD(&ha->sadb_tx_index_list); 2899 INIT_LIST_HEAD(&ha->sadb_rx_index_list); 2900 2901 spin_lock_init(&ha->sadb_fp_lock); 2902 2903 if (qla_edif_sadb_build_free_pool(ha)) { 2904 kfree(ha); 2905 goto disable_device; 2906 } 2907 2908 atomic_set(&ha->nvme_active_aen_cnt, 0); 2909 2910 /* Clear our data area */ 2911 ha->bars = bars; 2912 ha->mem_only = mem_only; 2913 spin_lock_init(&ha->hardware_lock); 2914 spin_lock_init(&ha->vport_slock); 2915 mutex_init(&ha->selflogin_lock); 2916 mutex_init(&ha->optrom_mutex); 2917 2918 /* Set ISP-type information. */ 2919 qla2x00_set_isp_flags(ha); 2920 2921 /* Set EEH reset type to fundamental if required by hba */ 2922 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || 2923 IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2924 pdev->needs_freset = 1; 2925 2926 ha->prev_topology = 0; 2927 ha->init_cb_size = sizeof(init_cb_t); 2928 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2929 ha->optrom_size = OPTROM_SIZE_2300; 2930 ha->max_exchg = FW_MAX_EXCHANGES_CNT; 2931 atomic_set(&ha->num_pend_mbx_stage1, 0); 2932 atomic_set(&ha->num_pend_mbx_stage2, 0); 2933 atomic_set(&ha->num_pend_mbx_stage3, 0); 2934 atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD); 2935 ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD; 2936 2937 /* Assign ISP specific operations. */ 2938 if (IS_QLA2100(ha)) { 2939 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2940 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 2941 req_length = REQUEST_ENTRY_CNT_2100; 2942 rsp_length = RESPONSE_ENTRY_CNT_2100; 2943 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 2944 ha->gid_list_info_size = 4; 2945 ha->flash_conf_off = ~0; 2946 ha->flash_data_off = ~0; 2947 ha->nvram_conf_off = ~0; 2948 ha->nvram_data_off = ~0; 2949 ha->isp_ops = &qla2100_isp_ops; 2950 } else if (IS_QLA2200(ha)) { 2951 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2952 ha->mbx_count = MAILBOX_REGISTER_COUNT_2200; 2953 req_length = REQUEST_ENTRY_CNT_2200; 2954 rsp_length = RESPONSE_ENTRY_CNT_2100; 2955 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 2956 ha->gid_list_info_size = 4; 2957 ha->flash_conf_off = ~0; 2958 ha->flash_data_off = ~0; 2959 ha->nvram_conf_off = ~0; 2960 ha->nvram_data_off = ~0; 2961 ha->isp_ops = &qla2100_isp_ops; 2962 } else if (IS_QLA23XX(ha)) { 2963 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2964 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2965 req_length = REQUEST_ENTRY_CNT_2200; 2966 rsp_length = RESPONSE_ENTRY_CNT_2300; 2967 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2968 ha->gid_list_info_size = 6; 2969 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 2970 ha->optrom_size = OPTROM_SIZE_2322; 2971 ha->flash_conf_off = ~0; 2972 ha->flash_data_off = ~0; 2973 ha->nvram_conf_off = ~0; 2974 ha->nvram_data_off = ~0; 2975 ha->isp_ops = &qla2300_isp_ops; 2976 } else if (IS_QLA24XX_TYPE(ha)) { 2977 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2978 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2979 req_length = REQUEST_ENTRY_CNT_24XX; 2980 rsp_length = RESPONSE_ENTRY_CNT_2300; 2981 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2982 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2983 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2984 ha->gid_list_info_size = 8; 2985 ha->optrom_size = OPTROM_SIZE_24XX; 2986 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX; 2987 ha->isp_ops = &qla24xx_isp_ops; 2988 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2989 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2990 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2991 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2992 } else if (IS_QLA25XX(ha)) { 2993 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2994 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2995 req_length = REQUEST_ENTRY_CNT_24XX; 2996 rsp_length = RESPONSE_ENTRY_CNT_2300; 2997 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2998 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2999 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 3000 ha->gid_list_info_size = 8; 3001 ha->optrom_size = OPTROM_SIZE_25XX; 3002 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3003 ha->isp_ops = &qla25xx_isp_ops; 3004 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 3005 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 3006 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 3007 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 3008 } else if (IS_QLA81XX(ha)) { 3009 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3010 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3011 req_length = REQUEST_ENTRY_CNT_24XX; 3012 rsp_length = RESPONSE_ENTRY_CNT_2300; 3013 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3014 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3015 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3016 ha->gid_list_info_size = 8; 3017 ha->optrom_size = OPTROM_SIZE_81XX; 3018 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3019 ha->isp_ops = &qla81xx_isp_ops; 3020 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 3021 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 3022 ha->nvram_conf_off = ~0; 3023 ha->nvram_data_off = ~0; 3024 } else if (IS_QLA82XX(ha)) { 3025 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3026 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3027 req_length = REQUEST_ENTRY_CNT_82XX; 3028 rsp_length = RESPONSE_ENTRY_CNT_82XX; 3029 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3030 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3031 ha->gid_list_info_size = 8; 3032 ha->optrom_size = OPTROM_SIZE_82XX; 3033 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3034 ha->isp_ops = &qla82xx_isp_ops; 3035 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 3036 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 3037 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 3038 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 3039 } else if (IS_QLA8044(ha)) { 3040 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3041 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3042 req_length = REQUEST_ENTRY_CNT_82XX; 3043 rsp_length = RESPONSE_ENTRY_CNT_82XX; 3044 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3045 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3046 ha->gid_list_info_size = 8; 3047 ha->optrom_size = OPTROM_SIZE_83XX; 3048 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3049 ha->isp_ops = &qla8044_isp_ops; 3050 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 3051 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 3052 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 3053 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 3054 } else if (IS_QLA83XX(ha)) { 3055 ha->portnum = PCI_FUNC(ha->pdev->devfn); 3056 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3057 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3058 req_length = REQUEST_ENTRY_CNT_83XX; 3059 rsp_length = RESPONSE_ENTRY_CNT_83XX; 3060 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3061 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3062 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3063 ha->gid_list_info_size = 8; 3064 ha->optrom_size = OPTROM_SIZE_83XX; 3065 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3066 ha->isp_ops = &qla83xx_isp_ops; 3067 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 3068 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 3069 ha->nvram_conf_off = ~0; 3070 ha->nvram_data_off = ~0; 3071 } else if (IS_QLAFX00(ha)) { 3072 ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00; 3073 ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00; 3074 ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00; 3075 req_length = REQUEST_ENTRY_CNT_FX00; 3076 rsp_length = RESPONSE_ENTRY_CNT_FX00; 3077 ha->isp_ops = &qlafx00_isp_ops; 3078 ha->port_down_retry_count = 30; /* default value */ 3079 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; 3080 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 3081 ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL; 3082 ha->mr.fw_hbt_en = 1; 3083 ha->mr.host_info_resend = false; 3084 ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; 3085 } else if (IS_QLA27XX(ha)) { 3086 ha->portnum = PCI_FUNC(ha->pdev->devfn); 3087 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3088 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3089 req_length = REQUEST_ENTRY_CNT_83XX; 3090 rsp_length = RESPONSE_ENTRY_CNT_83XX; 3091 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3092 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3093 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3094 ha->gid_list_info_size = 8; 3095 ha->optrom_size = OPTROM_SIZE_83XX; 3096 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3097 ha->isp_ops = &qla27xx_isp_ops; 3098 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 3099 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 3100 ha->nvram_conf_off = ~0; 3101 ha->nvram_data_off = ~0; 3102 } else if (IS_QLA28XX(ha)) { 3103 ha->portnum = PCI_FUNC(ha->pdev->devfn); 3104 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3105 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3106 req_length = REQUEST_ENTRY_CNT_83XX; 3107 rsp_length = RESPONSE_ENTRY_CNT_83XX; 3108 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3109 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3110 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3111 ha->gid_list_info_size = 8; 3112 ha->optrom_size = OPTROM_SIZE_28XX; 3113 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3114 ha->isp_ops = &qla27xx_isp_ops; 3115 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX; 3116 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX; 3117 ha->nvram_conf_off = ~0; 3118 ha->nvram_data_off = ~0; 3119 } 3120 3121 ql_dbg_pci(ql_dbg_init, pdev, 0x001e, 3122 "mbx_count=%d, req_length=%d, " 3123 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, " 3124 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, " 3125 "max_fibre_devices=%d.\n", 3126 ha->mbx_count, req_length, rsp_length, ha->max_loop_id, 3127 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, 3128 ha->nvram_npiv_size, ha->max_fibre_devices); 3129 ql_dbg_pci(ql_dbg_init, pdev, 0x001f, 3130 "isp_ops=%p, flash_conf_off=%d, " 3131 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", 3132 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, 3133 ha->nvram_conf_off, ha->nvram_data_off); 3134 3135 /* Configure PCI I/O space */ 3136 ret = ha->isp_ops->iospace_config(ha); 3137 if (ret) 3138 goto iospace_config_failed; 3139 3140 ql_log_pci(ql_log_info, pdev, 0x001d, 3141 "Found an ISP%04X irq %d iobase 0x%p.\n", 3142 pdev->device, pdev->irq, ha->iobase); 3143 mutex_init(&ha->vport_lock); 3144 mutex_init(&ha->mq_lock); 3145 init_completion(&ha->mbx_cmd_comp); 3146 complete(&ha->mbx_cmd_comp); 3147 init_completion(&ha->mbx_intr_comp); 3148 init_completion(&ha->dcbx_comp); 3149 init_completion(&ha->lb_portup_comp); 3150 3151 set_bit(0, (unsigned long *) ha->vp_idx_map); 3152 3153 qla2x00_config_dma_addressing(ha); 3154 ql_dbg_pci(ql_dbg_init, pdev, 0x0020, 3155 "64 Bit addressing is %s.\n", 3156 ha->flags.enable_64bit_addressing ? "enable" : 3157 "disable"); 3158 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 3159 if (ret) { 3160 ql_log_pci(ql_log_fatal, pdev, 0x0031, 3161 "Failed to allocate memory for adapter, aborting.\n"); 3162 3163 goto probe_hw_failed; 3164 } 3165 3166 req->max_q_depth = MAX_Q_DEPTH; 3167 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) 3168 req->max_q_depth = ql2xmaxqdepth; 3169 3170 3171 base_vha = qla2x00_create_host(sht, ha); 3172 if (!base_vha) { 3173 ret = -ENOMEM; 3174 goto probe_hw_failed; 3175 } 3176 3177 pci_set_drvdata(pdev, base_vha); 3178 set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); 3179 3180 host = base_vha->host; 3181 base_vha->req = req; 3182 if (IS_QLA2XXX_MIDTYPE(ha)) 3183 base_vha->mgmt_svr_loop_id = 3184 qla2x00_reserve_mgmt_server_loop_id(base_vha); 3185 else 3186 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + 3187 base_vha->vp_idx; 3188 3189 /* Setup fcport template structure. */ 3190 ha->mr.fcport.vha = base_vha; 3191 ha->mr.fcport.port_type = FCT_UNKNOWN; 3192 ha->mr.fcport.loop_id = FC_NO_LOOP_ID; 3193 qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED); 3194 ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED; 3195 ha->mr.fcport.scan_state = 1; 3196 3197 qla2xxx_reset_stats(host, QLA2XX_HW_ERROR | QLA2XX_SHT_LNK_DWN | 3198 QLA2XX_INT_ERR | QLA2XX_CMD_TIMEOUT | 3199 QLA2XX_RESET_CMD_ERR | QLA2XX_TGT_SHT_LNK_DOWN); 3200 3201 /* Set the SG table size based on ISP type */ 3202 if (!IS_FWI2_CAPABLE(ha)) { 3203 if (IS_QLA2100(ha)) 3204 host->sg_tablesize = 32; 3205 } else { 3206 if (!IS_QLA82XX(ha)) 3207 host->sg_tablesize = QLA_SG_ALL; 3208 } 3209 host->max_id = ha->max_fibre_devices; 3210 host->cmd_per_lun = 3; 3211 host->unique_id = host->host_no; 3212 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 3213 host->max_cmd_len = 32; 3214 else 3215 host->max_cmd_len = MAX_CMDSZ; 3216 host->max_channel = MAX_BUSES - 1; 3217 /* Older HBAs support only 16-bit LUNs */ 3218 if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) && 3219 ql2xmaxlun > 0xffff) 3220 host->max_lun = 0xffff; 3221 else 3222 host->max_lun = ql2xmaxlun; 3223 host->transportt = qla2xxx_transport_template; 3224 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); 3225 3226 ql_dbg(ql_dbg_init, base_vha, 0x0033, 3227 "max_id=%d this_id=%d " 3228 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " 3229 "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id, 3230 host->this_id, host->cmd_per_lun, host->unique_id, 3231 host->max_cmd_len, host->max_channel, host->max_lun, 3232 host->transportt, sht->vendor_id); 3233 3234 INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); 3235 3236 /* Set up the irqs */ 3237 ret = qla2x00_request_irqs(ha, rsp); 3238 if (ret) 3239 goto probe_failed; 3240 3241 /* Alloc arrays of request and response ring ptrs */ 3242 ret = qla2x00_alloc_queues(ha, req, rsp); 3243 if (ret) { 3244 ql_log(ql_log_fatal, base_vha, 0x003d, 3245 "Failed to allocate memory for queue pointers..." 3246 "aborting.\n"); 3247 ret = -ENODEV; 3248 goto probe_failed; 3249 } 3250 3251 if (ha->mqenable) { 3252 /* number of hardware queues supported by blk/scsi-mq*/ 3253 host->nr_hw_queues = ha->max_qpairs; 3254 3255 ql_dbg(ql_dbg_init, base_vha, 0x0192, 3256 "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues); 3257 } else { 3258 if (ql2xnvmeenable) { 3259 host->nr_hw_queues = ha->max_qpairs; 3260 ql_dbg(ql_dbg_init, base_vha, 0x0194, 3261 "FC-NVMe support is enabled, HW queues=%d\n", 3262 host->nr_hw_queues); 3263 } else { 3264 ql_dbg(ql_dbg_init, base_vha, 0x0193, 3265 "blk/scsi-mq disabled.\n"); 3266 } 3267 } 3268 3269 qlt_probe_one_stage1(base_vha, ha); 3270 3271 pci_save_state(pdev); 3272 3273 /* Assign back pointers */ 3274 rsp->req = req; 3275 req->rsp = rsp; 3276 3277 if (IS_QLAFX00(ha)) { 3278 ha->rsp_q_map[0] = rsp; 3279 ha->req_q_map[0] = req; 3280 set_bit(0, ha->req_qid_map); 3281 set_bit(0, ha->rsp_qid_map); 3282 } 3283 3284 /* FWI2-capable only. */ 3285 req->req_q_in = &ha->iobase->isp24.req_q_in; 3286 req->req_q_out = &ha->iobase->isp24.req_q_out; 3287 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; 3288 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; 3289 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 3290 IS_QLA28XX(ha)) { 3291 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; 3292 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; 3293 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; 3294 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out; 3295 } 3296 3297 if (IS_QLAFX00(ha)) { 3298 req->req_q_in = &ha->iobase->ispfx00.req_q_in; 3299 req->req_q_out = &ha->iobase->ispfx00.req_q_out; 3300 rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in; 3301 rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out; 3302 } 3303 3304 if (IS_P3P_TYPE(ha)) { 3305 req->req_q_out = &ha->iobase->isp82.req_q_out[0]; 3306 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0]; 3307 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; 3308 } 3309 3310 ql_dbg(ql_dbg_multiq, base_vha, 0xc009, 3311 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 3312 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 3313 ql_dbg(ql_dbg_multiq, base_vha, 0xc00a, 3314 "req->req_q_in=%p req->req_q_out=%p " 3315 "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 3316 req->req_q_in, req->req_q_out, 3317 rsp->rsp_q_in, rsp->rsp_q_out); 3318 ql_dbg(ql_dbg_init, base_vha, 0x003e, 3319 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 3320 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 3321 ql_dbg(ql_dbg_init, base_vha, 0x003f, 3322 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 3323 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); 3324 3325 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0); 3326 if (unlikely(!ha->wq)) { 3327 ret = -ENOMEM; 3328 goto probe_failed; 3329 } 3330 3331 if (ha->isp_ops->initialize_adapter(base_vha)) { 3332 ql_log(ql_log_fatal, base_vha, 0x00d6, 3333 "Failed to initialize adapter - Adapter flags %x.\n", 3334 base_vha->device_flags); 3335 3336 if (IS_QLA82XX(ha)) { 3337 qla82xx_idc_lock(ha); 3338 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3339 QLA8XXX_DEV_FAILED); 3340 qla82xx_idc_unlock(ha); 3341 ql_log(ql_log_fatal, base_vha, 0x00d7, 3342 "HW State: FAILED.\n"); 3343 } else if (IS_QLA8044(ha)) { 3344 qla8044_idc_lock(ha); 3345 qla8044_wr_direct(base_vha, 3346 QLA8044_CRB_DEV_STATE_INDEX, 3347 QLA8XXX_DEV_FAILED); 3348 qla8044_idc_unlock(ha); 3349 ql_log(ql_log_fatal, base_vha, 0x0150, 3350 "HW State: FAILED.\n"); 3351 } 3352 3353 ret = -ENODEV; 3354 goto probe_failed; 3355 } 3356 3357 if (IS_QLAFX00(ha)) 3358 host->can_queue = QLAFX00_MAX_CANQUEUE; 3359 else 3360 host->can_queue = req->num_outstanding_cmds - 10; 3361 3362 ql_dbg(ql_dbg_init, base_vha, 0x0032, 3363 "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", 3364 host->can_queue, base_vha->req, 3365 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 3366 3367 if (ha->mqenable) { 3368 bool startit = false; 3369 3370 if (QLA_TGT_MODE_ENABLED()) 3371 startit = false; 3372 3373 if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) 3374 startit = true; 3375 3376 /* Create start of day qpairs for Block MQ */ 3377 for (i = 0; i < ha->max_qpairs; i++) 3378 qla2xxx_create_qpair(base_vha, 5, 0, startit); 3379 } 3380 qla_init_iocb_limit(base_vha); 3381 3382 if (ha->flags.running_gold_fw) 3383 goto skip_dpc; 3384 3385 /* 3386 * Startup the kernel thread for this host adapter 3387 */ 3388 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 3389 "%s_dpc", base_vha->host_str); 3390 if (IS_ERR(ha->dpc_thread)) { 3391 ql_log(ql_log_fatal, base_vha, 0x00ed, 3392 "Failed to start DPC thread.\n"); 3393 ret = PTR_ERR(ha->dpc_thread); 3394 ha->dpc_thread = NULL; 3395 goto probe_failed; 3396 } 3397 ql_dbg(ql_dbg_init, base_vha, 0x00ee, 3398 "DPC thread started successfully.\n"); 3399 3400 /* 3401 * If we're not coming up in initiator mode, we might sit for 3402 * a while without waking up the dpc thread, which leads to a 3403 * stuck process warning. So just kick the dpc once here and 3404 * let the kthread start (and go back to sleep in qla2x00_do_dpc). 3405 */ 3406 qla2xxx_wake_dpc(base_vha); 3407 3408 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); 3409 3410 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { 3411 sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); 3412 ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); 3413 INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen); 3414 3415 sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); 3416 ha->dpc_hp_wq = create_singlethread_workqueue(wq_name); 3417 INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work); 3418 INIT_WORK(&ha->idc_state_handler, 3419 qla83xx_idc_state_handler_work); 3420 INIT_WORK(&ha->nic_core_unrecoverable, 3421 qla83xx_nic_core_unrecoverable_work); 3422 } 3423 3424 skip_dpc: 3425 list_add_tail(&base_vha->list, &ha->vp_list); 3426 base_vha->host->irq = ha->pdev->irq; 3427 3428 /* Initialized the timer */ 3429 qla2x00_start_timer(base_vha, WATCH_INTERVAL); 3430 ql_dbg(ql_dbg_init, base_vha, 0x00ef, 3431 "Started qla2x00_timer with " 3432 "interval=%d.\n", WATCH_INTERVAL); 3433 ql_dbg(ql_dbg_init, base_vha, 0x00f0, 3434 "Detected hba at address=%p.\n", 3435 ha); 3436 3437 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 3438 if (ha->fw_attributes & BIT_4) { 3439 int prot = 0, guard; 3440 3441 base_vha->flags.difdix_supported = 1; 3442 ql_dbg(ql_dbg_init, base_vha, 0x00f1, 3443 "Registering for DIF/DIX type 1 and 3 protection.\n"); 3444 if (ql2xenabledif == 1) 3445 prot = SHOST_DIX_TYPE0_PROTECTION; 3446 if (ql2xprotmask) 3447 scsi_host_set_prot(host, ql2xprotmask); 3448 else 3449 scsi_host_set_prot(host, 3450 prot | SHOST_DIF_TYPE1_PROTECTION 3451 | SHOST_DIF_TYPE2_PROTECTION 3452 | SHOST_DIF_TYPE3_PROTECTION 3453 | SHOST_DIX_TYPE1_PROTECTION 3454 | SHOST_DIX_TYPE2_PROTECTION 3455 | SHOST_DIX_TYPE3_PROTECTION); 3456 3457 guard = SHOST_DIX_GUARD_CRC; 3458 3459 if (IS_PI_IPGUARD_CAPABLE(ha) && 3460 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) 3461 guard |= SHOST_DIX_GUARD_IP; 3462 3463 if (ql2xprotguard) 3464 scsi_host_set_guard(host, ql2xprotguard); 3465 else 3466 scsi_host_set_guard(host, guard); 3467 } else 3468 base_vha->flags.difdix_supported = 0; 3469 } 3470 3471 ha->isp_ops->enable_intrs(ha); 3472 3473 if (IS_QLAFX00(ha)) { 3474 ret = qlafx00_fx_disc(base_vha, 3475 &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO); 3476 host->sg_tablesize = (ha->mr.extended_io_enabled) ? 3477 QLA_SG_ALL : 128; 3478 } 3479 3480 ret = scsi_add_host(host, &pdev->dev); 3481 if (ret) 3482 goto probe_failed; 3483 3484 base_vha->flags.init_done = 1; 3485 base_vha->flags.online = 1; 3486 ha->prev_minidump_failed = 0; 3487 3488 ql_dbg(ql_dbg_init, base_vha, 0x00f2, 3489 "Init done and hba is online.\n"); 3490 3491 if (qla_ini_mode_enabled(base_vha) || 3492 qla_dual_mode_enabled(base_vha)) 3493 scsi_scan_host(host); 3494 else 3495 ql_dbg(ql_dbg_init, base_vha, 0x0122, 3496 "skipping scsi_scan_host() for non-initiator port\n"); 3497 3498 qla2x00_alloc_sysfs_attr(base_vha); 3499 3500 if (IS_QLAFX00(ha)) { 3501 ret = qlafx00_fx_disc(base_vha, 3502 &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO); 3503 3504 /* Register system information */ 3505 ret = qlafx00_fx_disc(base_vha, 3506 &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO); 3507 } 3508 3509 qla2x00_init_host_attr(base_vha); 3510 3511 qla2x00_dfs_setup(base_vha); 3512 3513 ql_log(ql_log_info, base_vha, 0x00fb, 3514 "QLogic %s - %s.\n", ha->model_number, ha->model_desc); 3515 ql_log(ql_log_info, base_vha, 0x00fc, 3516 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", 3517 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info, 3518 sizeof(pci_info)), 3519 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', 3520 base_vha->host_no, 3521 ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str))); 3522 3523 qlt_add_target(ha, base_vha); 3524 3525 clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); 3526 3527 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 3528 return -ENODEV; 3529 3530 return 0; 3531 3532 probe_failed: 3533 qla_enode_stop(base_vha); 3534 qla_edb_stop(base_vha); 3535 if (base_vha->gnl.l) { 3536 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3537 base_vha->gnl.l, base_vha->gnl.ldma); 3538 base_vha->gnl.l = NULL; 3539 } 3540 3541 if (base_vha->timer_active) 3542 qla2x00_stop_timer(base_vha); 3543 base_vha->flags.online = 0; 3544 if (ha->dpc_thread) { 3545 struct task_struct *t = ha->dpc_thread; 3546 3547 ha->dpc_thread = NULL; 3548 kthread_stop(t); 3549 } 3550 3551 qla2x00_free_device(base_vha); 3552 scsi_host_put(base_vha->host); 3553 /* 3554 * Need to NULL out local req/rsp after 3555 * qla2x00_free_device => qla2x00_free_queues frees 3556 * what these are pointing to. Or else we'll 3557 * fall over below in qla2x00_free_req/rsp_que. 3558 */ 3559 req = NULL; 3560 rsp = NULL; 3561 3562 probe_hw_failed: 3563 qla2x00_mem_free(ha); 3564 qla2x00_free_req_que(ha, req); 3565 qla2x00_free_rsp_que(ha, rsp); 3566 qla2x00_clear_drv_active(ha); 3567 3568 iospace_config_failed: 3569 if (IS_P3P_TYPE(ha)) { 3570 if (!ha->nx_pcibase) 3571 iounmap((device_reg_t *)ha->nx_pcibase); 3572 if (!ql2xdbwr) 3573 iounmap((device_reg_t *)ha->nxdb_wr_ptr); 3574 } else { 3575 if (ha->iobase) 3576 iounmap(ha->iobase); 3577 if (ha->cregbase) 3578 iounmap(ha->cregbase); 3579 } 3580 pci_release_selected_regions(ha->pdev, ha->bars); 3581 kfree(ha); 3582 3583 disable_device: 3584 pci_disable_device(pdev); 3585 return ret; 3586 } 3587 3588 static void __qla_set_remove_flag(scsi_qla_host_t *base_vha) 3589 { 3590 scsi_qla_host_t *vp; 3591 unsigned long flags; 3592 struct qla_hw_data *ha; 3593 3594 if (!base_vha) 3595 return; 3596 3597 ha = base_vha->hw; 3598 3599 spin_lock_irqsave(&ha->vport_slock, flags); 3600 list_for_each_entry(vp, &ha->vp_list, list) 3601 set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags); 3602 3603 /* 3604 * Indicate device removal to prevent future board_disable 3605 * and wait until any pending board_disable has completed. 3606 */ 3607 set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags); 3608 spin_unlock_irqrestore(&ha->vport_slock, flags); 3609 } 3610 3611 static void 3612 qla2x00_shutdown(struct pci_dev *pdev) 3613 { 3614 scsi_qla_host_t *vha; 3615 struct qla_hw_data *ha; 3616 3617 vha = pci_get_drvdata(pdev); 3618 ha = vha->hw; 3619 3620 ql_log(ql_log_info, vha, 0xfffa, 3621 "Adapter shutdown\n"); 3622 3623 /* 3624 * Prevent future board_disable and wait 3625 * until any pending board_disable has completed. 3626 */ 3627 __qla_set_remove_flag(vha); 3628 cancel_work_sync(&ha->board_disable); 3629 3630 if (!atomic_read(&pdev->enable_cnt)) 3631 return; 3632 3633 /* Notify ISPFX00 firmware */ 3634 if (IS_QLAFX00(ha)) 3635 qlafx00_driver_shutdown(vha, 20); 3636 3637 /* Turn-off FCE trace */ 3638 if (ha->flags.fce_enabled) { 3639 qla2x00_disable_fce_trace(vha, NULL, NULL); 3640 ha->flags.fce_enabled = 0; 3641 } 3642 3643 /* Turn-off EFT trace */ 3644 if (ha->eft) 3645 qla2x00_disable_eft_trace(vha); 3646 3647 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 3648 IS_QLA28XX(ha)) { 3649 if (ha->flags.fw_started) 3650 qla2x00_abort_isp_cleanup(vha); 3651 } else { 3652 /* Stop currently executing firmware. */ 3653 qla2x00_try_to_stop_firmware(vha); 3654 } 3655 3656 /* Disable timer */ 3657 if (vha->timer_active) 3658 qla2x00_stop_timer(vha); 3659 3660 /* Turn adapter off line */ 3661 vha->flags.online = 0; 3662 3663 /* turn-off interrupts on the card */ 3664 if (ha->interrupts_on) { 3665 vha->flags.init_done = 0; 3666 ha->isp_ops->disable_intrs(ha); 3667 } 3668 3669 qla2x00_free_irqs(vha); 3670 3671 qla2x00_free_fw_dump(ha); 3672 3673 pci_disable_device(pdev); 3674 ql_log(ql_log_info, vha, 0xfffe, 3675 "Adapter shutdown successfully.\n"); 3676 } 3677 3678 /* Deletes all the virtual ports for a given ha */ 3679 static void 3680 qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) 3681 { 3682 scsi_qla_host_t *vha; 3683 unsigned long flags; 3684 3685 mutex_lock(&ha->vport_lock); 3686 while (ha->cur_vport_count) { 3687 spin_lock_irqsave(&ha->vport_slock, flags); 3688 3689 BUG_ON(base_vha->list.next == &ha->vp_list); 3690 /* This assumes first entry in ha->vp_list is always base vha */ 3691 vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); 3692 scsi_host_get(vha->host); 3693 3694 spin_unlock_irqrestore(&ha->vport_slock, flags); 3695 mutex_unlock(&ha->vport_lock); 3696 3697 qla_nvme_delete(vha); 3698 3699 fc_vport_terminate(vha->fc_vport); 3700 scsi_host_put(vha->host); 3701 3702 mutex_lock(&ha->vport_lock); 3703 } 3704 mutex_unlock(&ha->vport_lock); 3705 } 3706 3707 /* Stops all deferred work threads */ 3708 static void 3709 qla2x00_destroy_deferred_work(struct qla_hw_data *ha) 3710 { 3711 /* Cancel all work and destroy DPC workqueues */ 3712 if (ha->dpc_lp_wq) { 3713 cancel_work_sync(&ha->idc_aen); 3714 destroy_workqueue(ha->dpc_lp_wq); 3715 ha->dpc_lp_wq = NULL; 3716 } 3717 3718 if (ha->dpc_hp_wq) { 3719 cancel_work_sync(&ha->nic_core_reset); 3720 cancel_work_sync(&ha->idc_state_handler); 3721 cancel_work_sync(&ha->nic_core_unrecoverable); 3722 destroy_workqueue(ha->dpc_hp_wq); 3723 ha->dpc_hp_wq = NULL; 3724 } 3725 3726 /* Kill the kernel thread for this host */ 3727 if (ha->dpc_thread) { 3728 struct task_struct *t = ha->dpc_thread; 3729 3730 /* 3731 * qla2xxx_wake_dpc checks for ->dpc_thread 3732 * so we need to zero it out. 3733 */ 3734 ha->dpc_thread = NULL; 3735 kthread_stop(t); 3736 } 3737 } 3738 3739 static void 3740 qla2x00_unmap_iobases(struct qla_hw_data *ha) 3741 { 3742 if (IS_QLA82XX(ha)) { 3743 3744 iounmap((device_reg_t *)ha->nx_pcibase); 3745 if (!ql2xdbwr) 3746 iounmap((device_reg_t *)ha->nxdb_wr_ptr); 3747 } else { 3748 if (ha->iobase) 3749 iounmap(ha->iobase); 3750 3751 if (ha->cregbase) 3752 iounmap(ha->cregbase); 3753 3754 if (ha->mqiobase) 3755 iounmap(ha->mqiobase); 3756 3757 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) && 3758 ha->msixbase) 3759 iounmap(ha->msixbase); 3760 } 3761 } 3762 3763 static void 3764 qla2x00_clear_drv_active(struct qla_hw_data *ha) 3765 { 3766 if (IS_QLA8044(ha)) { 3767 qla8044_idc_lock(ha); 3768 qla8044_clear_drv_active(ha); 3769 qla8044_idc_unlock(ha); 3770 } else if (IS_QLA82XX(ha)) { 3771 qla82xx_idc_lock(ha); 3772 qla82xx_clear_drv_active(ha); 3773 qla82xx_idc_unlock(ha); 3774 } 3775 } 3776 3777 static void 3778 qla2x00_remove_one(struct pci_dev *pdev) 3779 { 3780 scsi_qla_host_t *base_vha; 3781 struct qla_hw_data *ha; 3782 3783 base_vha = pci_get_drvdata(pdev); 3784 ha = base_vha->hw; 3785 ql_log(ql_log_info, base_vha, 0xb079, 3786 "Removing driver\n"); 3787 __qla_set_remove_flag(base_vha); 3788 cancel_work_sync(&ha->board_disable); 3789 3790 /* 3791 * If the PCI device is disabled then there was a PCI-disconnect and 3792 * qla2x00_disable_board_on_pci_error has taken care of most of the 3793 * resources. 3794 */ 3795 if (!atomic_read(&pdev->enable_cnt)) { 3796 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3797 base_vha->gnl.l, base_vha->gnl.ldma); 3798 base_vha->gnl.l = NULL; 3799 scsi_host_put(base_vha->host); 3800 kfree(ha); 3801 pci_set_drvdata(pdev, NULL); 3802 return; 3803 } 3804 qla2x00_wait_for_hba_ready(base_vha); 3805 3806 /* 3807 * if UNLOADING flag is already set, then continue unload, 3808 * where it was set first. 3809 */ 3810 if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) 3811 return; 3812 3813 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 3814 IS_QLA28XX(ha)) { 3815 if (ha->flags.fw_started) 3816 qla2x00_abort_isp_cleanup(base_vha); 3817 } else if (!IS_QLAFX00(ha)) { 3818 if (IS_QLA8031(ha)) { 3819 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, 3820 "Clearing fcoe driver presence.\n"); 3821 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) 3822 ql_dbg(ql_dbg_p3p, base_vha, 0xb079, 3823 "Error while clearing DRV-Presence.\n"); 3824 } 3825 3826 qla2x00_try_to_stop_firmware(base_vha); 3827 } 3828 3829 qla2x00_wait_for_sess_deletion(base_vha); 3830 3831 qla_nvme_delete(base_vha); 3832 3833 dma_free_coherent(&ha->pdev->dev, 3834 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); 3835 3836 base_vha->gnl.l = NULL; 3837 qla_enode_stop(base_vha); 3838 qla_edb_stop(base_vha); 3839 3840 vfree(base_vha->scan.l); 3841 3842 if (IS_QLAFX00(ha)) 3843 qlafx00_driver_shutdown(base_vha, 20); 3844 3845 qla2x00_delete_all_vps(ha, base_vha); 3846 3847 qla2x00_dfs_remove(base_vha); 3848 3849 qla84xx_put_chip(base_vha); 3850 3851 /* Disable timer */ 3852 if (base_vha->timer_active) 3853 qla2x00_stop_timer(base_vha); 3854 3855 base_vha->flags.online = 0; 3856 3857 /* free DMA memory */ 3858 if (ha->exlogin_buf) 3859 qla2x00_free_exlogin_buffer(ha); 3860 3861 /* free DMA memory */ 3862 if (ha->exchoffld_buf) 3863 qla2x00_free_exchoffld_buffer(ha); 3864 3865 qla2x00_destroy_deferred_work(ha); 3866 3867 qlt_remove_target(ha, base_vha); 3868 3869 qla2x00_free_sysfs_attr(base_vha, true); 3870 3871 fc_remove_host(base_vha->host); 3872 3873 scsi_remove_host(base_vha->host); 3874 3875 qla2x00_free_device(base_vha); 3876 3877 qla2x00_clear_drv_active(ha); 3878 3879 scsi_host_put(base_vha->host); 3880 3881 qla2x00_unmap_iobases(ha); 3882 3883 pci_release_selected_regions(ha->pdev, ha->bars); 3884 kfree(ha); 3885 3886 pci_disable_pcie_error_reporting(pdev); 3887 3888 pci_disable_device(pdev); 3889 } 3890 3891 static inline void 3892 qla24xx_free_purex_list(struct purex_list *list) 3893 { 3894 struct list_head *item, *next; 3895 ulong flags; 3896 3897 spin_lock_irqsave(&list->lock, flags); 3898 list_for_each_safe(item, next, &list->head) { 3899 list_del(item); 3900 kfree(list_entry(item, struct purex_item, list)); 3901 } 3902 spin_unlock_irqrestore(&list->lock, flags); 3903 } 3904 3905 static void 3906 qla2x00_free_device(scsi_qla_host_t *vha) 3907 { 3908 struct qla_hw_data *ha = vha->hw; 3909 3910 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 3911 3912 /* Disable timer */ 3913 if (vha->timer_active) 3914 qla2x00_stop_timer(vha); 3915 3916 qla25xx_delete_queues(vha); 3917 vha->flags.online = 0; 3918 3919 /* turn-off interrupts on the card */ 3920 if (ha->interrupts_on) { 3921 vha->flags.init_done = 0; 3922 ha->isp_ops->disable_intrs(ha); 3923 } 3924 3925 qla2x00_free_fcports(vha); 3926 3927 qla2x00_free_irqs(vha); 3928 3929 /* Flush the work queue and remove it */ 3930 if (ha->wq) { 3931 flush_workqueue(ha->wq); 3932 destroy_workqueue(ha->wq); 3933 ha->wq = NULL; 3934 } 3935 3936 3937 qla24xx_free_purex_list(&vha->purex_list); 3938 3939 qla2x00_mem_free(ha); 3940 3941 qla82xx_md_free(vha); 3942 3943 qla_edif_sadb_release_free_pool(ha); 3944 qla_edif_sadb_release(ha); 3945 3946 qla2x00_free_queues(ha); 3947 } 3948 3949 void qla2x00_free_fcports(struct scsi_qla_host *vha) 3950 { 3951 fc_port_t *fcport, *tfcport; 3952 3953 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) 3954 qla2x00_free_fcport(fcport); 3955 } 3956 3957 static inline void 3958 qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport) 3959 { 3960 int now; 3961 3962 if (!fcport->rport) 3963 return; 3964 3965 if (fcport->rport) { 3966 ql_dbg(ql_dbg_disc, fcport->vha, 0x2109, 3967 "%s %8phN. rport %p roles %x\n", 3968 __func__, fcport->port_name, fcport->rport, 3969 fcport->rport->roles); 3970 fc_remote_port_delete(fcport->rport); 3971 } 3972 qlt_do_generation_tick(vha, &now); 3973 } 3974 3975 /* 3976 * qla2x00_mark_device_lost Updates fcport state when device goes offline. 3977 * 3978 * Input: ha = adapter block pointer. fcport = port structure pointer. 3979 * 3980 * Return: None. 3981 * 3982 * Context: 3983 */ 3984 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, 3985 int do_login) 3986 { 3987 if (IS_QLAFX00(vha->hw)) { 3988 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3989 qla2x00_schedule_rport_del(vha, fcport); 3990 return; 3991 } 3992 3993 if (atomic_read(&fcport->state) == FCS_ONLINE && 3994 vha->vp_idx == fcport->vha->vp_idx) { 3995 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3996 qla2x00_schedule_rport_del(vha, fcport); 3997 } 3998 3999 /* 4000 * We may need to retry the login, so don't change the state of the 4001 * port but do the retries. 4002 */ 4003 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) 4004 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 4005 4006 if (!do_login) 4007 return; 4008 4009 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 4010 } 4011 4012 void 4013 qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha) 4014 { 4015 fc_port_t *fcport; 4016 4017 ql_dbg(ql_dbg_disc, vha, 0x20f1, 4018 "Mark all dev lost\n"); 4019 4020 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4021 if (fcport->loop_id != FC_NO_LOOP_ID && 4022 (fcport->flags & FCF_FCP2_DEVICE) && 4023 fcport->port_type == FCT_TARGET && 4024 !qla2x00_reset_active(vha)) { 4025 ql_dbg(ql_dbg_disc, vha, 0x211a, 4026 "Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC", 4027 fcport->flags, fcport->port_type, 4028 fcport->d_id.b24, fcport->port_name); 4029 continue; 4030 } 4031 fcport->scan_state = 0; 4032 qlt_schedule_sess_for_deletion(fcport); 4033 } 4034 } 4035 4036 static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha) 4037 { 4038 int i; 4039 4040 if (IS_FWI2_CAPABLE(ha)) 4041 return; 4042 4043 for (i = 0; i < SNS_FIRST_LOOP_ID; i++) 4044 set_bit(i, ha->loop_id_map); 4045 set_bit(MANAGEMENT_SERVER, ha->loop_id_map); 4046 set_bit(BROADCAST, ha->loop_id_map); 4047 } 4048 4049 /* 4050 * qla2x00_mem_alloc 4051 * Allocates adapter memory. 4052 * 4053 * Returns: 4054 * 0 = success. 4055 * !0 = failure. 4056 */ 4057 static int 4058 qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, 4059 struct req_que **req, struct rsp_que **rsp) 4060 { 4061 char name[16]; 4062 int rc; 4063 4064 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 4065 &ha->init_cb_dma, GFP_KERNEL); 4066 if (!ha->init_cb) 4067 goto fail; 4068 4069 rc = btree_init32(&ha->host_map); 4070 if (rc) 4071 goto fail_free_init_cb; 4072 4073 if (qlt_mem_alloc(ha) < 0) 4074 goto fail_free_btree; 4075 4076 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, 4077 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); 4078 if (!ha->gid_list) 4079 goto fail_free_tgt_mem; 4080 4081 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 4082 if (!ha->srb_mempool) 4083 goto fail_free_gid_list; 4084 4085 if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable && IS_QLA28XX(ha))) { 4086 /* Allocate cache for CT6 Ctx. */ 4087 if (!ctx_cachep) { 4088 ctx_cachep = kmem_cache_create("qla2xxx_ctx", 4089 sizeof(struct ct6_dsd), 0, 4090 SLAB_HWCACHE_ALIGN, NULL); 4091 if (!ctx_cachep) 4092 goto fail_free_srb_mempool; 4093 } 4094 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, 4095 ctx_cachep); 4096 if (!ha->ctx_mempool) 4097 goto fail_free_srb_mempool; 4098 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021, 4099 "ctx_cachep=%p ctx_mempool=%p.\n", 4100 ctx_cachep, ha->ctx_mempool); 4101 } 4102 4103 /* Get memory for cached NVRAM */ 4104 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 4105 if (!ha->nvram) 4106 goto fail_free_ctx_mempool; 4107 4108 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME, 4109 ha->pdev->device); 4110 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4111 DMA_POOL_SIZE, 8, 0); 4112 if (!ha->s_dma_pool) 4113 goto fail_free_nvram; 4114 4115 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022, 4116 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", 4117 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); 4118 4119 if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) && ql2xsecenable)) { 4120 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4121 DSD_LIST_DMA_POOL_SIZE, 8, 0); 4122 if (!ha->dl_dma_pool) { 4123 ql_log_pci(ql_log_fatal, ha->pdev, 0x0023, 4124 "Failed to allocate memory for dl_dma_pool.\n"); 4125 goto fail_s_dma_pool; 4126 } 4127 4128 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4129 FCP_CMND_DMA_POOL_SIZE, 8, 0); 4130 if (!ha->fcp_cmnd_dma_pool) { 4131 ql_log_pci(ql_log_fatal, ha->pdev, 0x0024, 4132 "Failed to allocate memory for fcp_cmnd_dma_pool.\n"); 4133 goto fail_dl_dma_pool; 4134 } 4135 4136 if (ql2xenabledif) { 4137 u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE; 4138 struct dsd_dma *dsd, *nxt; 4139 uint i; 4140 /* Creata a DMA pool of buffers for DIF bundling */ 4141 ha->dif_bundl_pool = dma_pool_create(name, 4142 &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0); 4143 if (!ha->dif_bundl_pool) { 4144 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024, 4145 "%s: failed create dif_bundl_pool\n", 4146 __func__); 4147 goto fail_dif_bundl_dma_pool; 4148 } 4149 4150 INIT_LIST_HEAD(&ha->pool.good.head); 4151 INIT_LIST_HEAD(&ha->pool.unusable.head); 4152 ha->pool.good.count = 0; 4153 ha->pool.unusable.count = 0; 4154 for (i = 0; i < 128; i++) { 4155 dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC); 4156 if (!dsd) { 4157 ql_dbg_pci(ql_dbg_init, ha->pdev, 4158 0xe0ee, "%s: failed alloc dsd\n", 4159 __func__); 4160 return 1; 4161 } 4162 ha->dif_bundle_kallocs++; 4163 4164 dsd->dsd_addr = dma_pool_alloc( 4165 ha->dif_bundl_pool, GFP_ATOMIC, 4166 &dsd->dsd_list_dma); 4167 if (!dsd->dsd_addr) { 4168 ql_dbg_pci(ql_dbg_init, ha->pdev, 4169 0xe0ee, 4170 "%s: failed alloc ->dsd_addr\n", 4171 __func__); 4172 kfree(dsd); 4173 ha->dif_bundle_kallocs--; 4174 continue; 4175 } 4176 ha->dif_bundle_dma_allocs++; 4177 4178 /* 4179 * if DMA buffer crosses 4G boundary, 4180 * put it on bad list 4181 */ 4182 if (MSD(dsd->dsd_list_dma) ^ 4183 MSD(dsd->dsd_list_dma + bufsize)) { 4184 list_add_tail(&dsd->list, 4185 &ha->pool.unusable.head); 4186 ha->pool.unusable.count++; 4187 } else { 4188 list_add_tail(&dsd->list, 4189 &ha->pool.good.head); 4190 ha->pool.good.count++; 4191 } 4192 } 4193 4194 /* return the good ones back to the pool */ 4195 list_for_each_entry_safe(dsd, nxt, 4196 &ha->pool.good.head, list) { 4197 list_del(&dsd->list); 4198 dma_pool_free(ha->dif_bundl_pool, 4199 dsd->dsd_addr, dsd->dsd_list_dma); 4200 ha->dif_bundle_dma_allocs--; 4201 kfree(dsd); 4202 ha->dif_bundle_kallocs--; 4203 } 4204 4205 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024, 4206 "%s: dif dma pool (good=%u unusable=%u)\n", 4207 __func__, ha->pool.good.count, 4208 ha->pool.unusable.count); 4209 } 4210 4211 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025, 4212 "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n", 4213 ha->dl_dma_pool, ha->fcp_cmnd_dma_pool, 4214 ha->dif_bundl_pool); 4215 } 4216 4217 /* Allocate memory for SNS commands */ 4218 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 4219 /* Get consistent memory allocated for SNS commands */ 4220 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 4221 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 4222 if (!ha->sns_cmd) 4223 goto fail_dma_pool; 4224 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, 4225 "sns_cmd: %p.\n", ha->sns_cmd); 4226 } else { 4227 /* Get consistent memory allocated for MS IOCB */ 4228 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 4229 &ha->ms_iocb_dma); 4230 if (!ha->ms_iocb) 4231 goto fail_dma_pool; 4232 /* Get consistent memory allocated for CT SNS commands */ 4233 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 4234 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 4235 if (!ha->ct_sns) 4236 goto fail_free_ms_iocb; 4237 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027, 4238 "ms_iocb=%p ct_sns=%p.\n", 4239 ha->ms_iocb, ha->ct_sns); 4240 } 4241 4242 /* Allocate memory for request ring */ 4243 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 4244 if (!*req) { 4245 ql_log_pci(ql_log_fatal, ha->pdev, 0x0028, 4246 "Failed to allocate memory for req.\n"); 4247 goto fail_req; 4248 } 4249 (*req)->length = req_len; 4250 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev, 4251 ((*req)->length + 1) * sizeof(request_t), 4252 &(*req)->dma, GFP_KERNEL); 4253 if (!(*req)->ring) { 4254 ql_log_pci(ql_log_fatal, ha->pdev, 0x0029, 4255 "Failed to allocate memory for req_ring.\n"); 4256 goto fail_req_ring; 4257 } 4258 /* Allocate memory for response ring */ 4259 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 4260 if (!*rsp) { 4261 ql_log_pci(ql_log_fatal, ha->pdev, 0x002a, 4262 "Failed to allocate memory for rsp.\n"); 4263 goto fail_rsp; 4264 } 4265 (*rsp)->hw = ha; 4266 (*rsp)->length = rsp_len; 4267 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev, 4268 ((*rsp)->length + 1) * sizeof(response_t), 4269 &(*rsp)->dma, GFP_KERNEL); 4270 if (!(*rsp)->ring) { 4271 ql_log_pci(ql_log_fatal, ha->pdev, 0x002b, 4272 "Failed to allocate memory for rsp_ring.\n"); 4273 goto fail_rsp_ring; 4274 } 4275 (*req)->rsp = *rsp; 4276 (*rsp)->req = *req; 4277 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c, 4278 "req=%p req->length=%d req->ring=%p rsp=%p " 4279 "rsp->length=%d rsp->ring=%p.\n", 4280 *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length, 4281 (*rsp)->ring); 4282 /* Allocate memory for NVRAM data for vports */ 4283 if (ha->nvram_npiv_size) { 4284 ha->npiv_info = kcalloc(ha->nvram_npiv_size, 4285 sizeof(struct qla_npiv_entry), 4286 GFP_KERNEL); 4287 if (!ha->npiv_info) { 4288 ql_log_pci(ql_log_fatal, ha->pdev, 0x002d, 4289 "Failed to allocate memory for npiv_info.\n"); 4290 goto fail_npiv_info; 4291 } 4292 } else 4293 ha->npiv_info = NULL; 4294 4295 /* Get consistent memory allocated for EX-INIT-CB. */ 4296 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 4297 IS_QLA28XX(ha)) { 4298 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 4299 &ha->ex_init_cb_dma); 4300 if (!ha->ex_init_cb) 4301 goto fail_ex_init_cb; 4302 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e, 4303 "ex_init_cb=%p.\n", ha->ex_init_cb); 4304 } 4305 4306 /* Get consistent memory allocated for Special Features-CB. */ 4307 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4308 ha->sf_init_cb = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, 4309 &ha->sf_init_cb_dma); 4310 if (!ha->sf_init_cb) 4311 goto fail_sf_init_cb; 4312 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199, 4313 "sf_init_cb=%p.\n", ha->sf_init_cb); 4314 } 4315 4316 INIT_LIST_HEAD(&ha->gbl_dsd_list); 4317 4318 /* Get consistent memory allocated for Async Port-Database. */ 4319 if (!IS_FWI2_CAPABLE(ha)) { 4320 ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 4321 &ha->async_pd_dma); 4322 if (!ha->async_pd) 4323 goto fail_async_pd; 4324 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f, 4325 "async_pd=%p.\n", ha->async_pd); 4326 } 4327 4328 INIT_LIST_HEAD(&ha->vp_list); 4329 4330 /* Allocate memory for our loop_id bitmap */ 4331 ha->loop_id_map = kcalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE), 4332 sizeof(long), 4333 GFP_KERNEL); 4334 if (!ha->loop_id_map) 4335 goto fail_loop_id_map; 4336 else { 4337 qla2x00_set_reserved_loop_ids(ha); 4338 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, 4339 "loop_id_map=%p.\n", ha->loop_id_map); 4340 } 4341 4342 ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev, 4343 SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL); 4344 if (!ha->sfp_data) { 4345 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, 4346 "Unable to allocate memory for SFP read-data.\n"); 4347 goto fail_sfp_data; 4348 } 4349 4350 ha->flt = dma_alloc_coherent(&ha->pdev->dev, 4351 sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma, 4352 GFP_KERNEL); 4353 if (!ha->flt) { 4354 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, 4355 "Unable to allocate memory for FLT.\n"); 4356 goto fail_flt_buffer; 4357 } 4358 4359 /* allocate the purex dma pool */ 4360 ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4361 MAX_PAYLOAD, 8, 0); 4362 4363 if (!ha->purex_dma_pool) { 4364 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, 4365 "Unable to allocate purex_dma_pool.\n"); 4366 goto fail_flt; 4367 } 4368 4369 ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16; 4370 ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev, 4371 ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL); 4372 4373 if (!ha->elsrej.c) { 4374 ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff, 4375 "Alloc failed for els reject cmd.\n"); 4376 goto fail_elsrej; 4377 } 4378 ha->elsrej.c->er_cmd = ELS_LS_RJT; 4379 ha->elsrej.c->er_reason = ELS_RJT_LOGIC; 4380 ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA; 4381 return 0; 4382 4383 fail_elsrej: 4384 dma_pool_destroy(ha->purex_dma_pool); 4385 fail_flt: 4386 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, 4387 ha->flt, ha->flt_dma); 4388 4389 fail_flt_buffer: 4390 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, 4391 ha->sfp_data, ha->sfp_data_dma); 4392 fail_sfp_data: 4393 kfree(ha->loop_id_map); 4394 fail_loop_id_map: 4395 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 4396 fail_async_pd: 4397 dma_pool_free(ha->s_dma_pool, ha->sf_init_cb, ha->sf_init_cb_dma); 4398 fail_sf_init_cb: 4399 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); 4400 fail_ex_init_cb: 4401 kfree(ha->npiv_info); 4402 fail_npiv_info: 4403 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) * 4404 sizeof(response_t), (*rsp)->ring, (*rsp)->dma); 4405 (*rsp)->ring = NULL; 4406 (*rsp)->dma = 0; 4407 fail_rsp_ring: 4408 kfree(*rsp); 4409 *rsp = NULL; 4410 fail_rsp: 4411 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) * 4412 sizeof(request_t), (*req)->ring, (*req)->dma); 4413 (*req)->ring = NULL; 4414 (*req)->dma = 0; 4415 fail_req_ring: 4416 kfree(*req); 4417 *req = NULL; 4418 fail_req: 4419 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 4420 ha->ct_sns, ha->ct_sns_dma); 4421 ha->ct_sns = NULL; 4422 ha->ct_sns_dma = 0; 4423 fail_free_ms_iocb: 4424 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 4425 ha->ms_iocb = NULL; 4426 ha->ms_iocb_dma = 0; 4427 4428 if (ha->sns_cmd) 4429 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 4430 ha->sns_cmd, ha->sns_cmd_dma); 4431 fail_dma_pool: 4432 if (ql2xenabledif) { 4433 struct dsd_dma *dsd, *nxt; 4434 4435 list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, 4436 list) { 4437 list_del(&dsd->list); 4438 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, 4439 dsd->dsd_list_dma); 4440 ha->dif_bundle_dma_allocs--; 4441 kfree(dsd); 4442 ha->dif_bundle_kallocs--; 4443 ha->pool.unusable.count--; 4444 } 4445 dma_pool_destroy(ha->dif_bundl_pool); 4446 ha->dif_bundl_pool = NULL; 4447 } 4448 4449 fail_dif_bundl_dma_pool: 4450 if (IS_QLA82XX(ha) || ql2xenabledif) { 4451 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 4452 ha->fcp_cmnd_dma_pool = NULL; 4453 } 4454 fail_dl_dma_pool: 4455 if (IS_QLA82XX(ha) || ql2xenabledif) { 4456 dma_pool_destroy(ha->dl_dma_pool); 4457 ha->dl_dma_pool = NULL; 4458 } 4459 fail_s_dma_pool: 4460 dma_pool_destroy(ha->s_dma_pool); 4461 ha->s_dma_pool = NULL; 4462 fail_free_nvram: 4463 kfree(ha->nvram); 4464 ha->nvram = NULL; 4465 fail_free_ctx_mempool: 4466 mempool_destroy(ha->ctx_mempool); 4467 ha->ctx_mempool = NULL; 4468 fail_free_srb_mempool: 4469 mempool_destroy(ha->srb_mempool); 4470 ha->srb_mempool = NULL; 4471 fail_free_gid_list: 4472 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 4473 ha->gid_list, 4474 ha->gid_list_dma); 4475 ha->gid_list = NULL; 4476 ha->gid_list_dma = 0; 4477 fail_free_tgt_mem: 4478 qlt_mem_free(ha); 4479 fail_free_btree: 4480 btree_destroy32(&ha->host_map); 4481 fail_free_init_cb: 4482 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, 4483 ha->init_cb_dma); 4484 ha->init_cb = NULL; 4485 ha->init_cb_dma = 0; 4486 fail: 4487 ql_log(ql_log_fatal, NULL, 0x0030, 4488 "Memory allocation failure.\n"); 4489 return -ENOMEM; 4490 } 4491 4492 int 4493 qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha) 4494 { 4495 int rval; 4496 uint16_t size, max_cnt; 4497 uint32_t temp; 4498 struct qla_hw_data *ha = vha->hw; 4499 4500 /* Return if we don't need to alloacate any extended logins */ 4501 if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400) 4502 return QLA_SUCCESS; 4503 4504 if (!IS_EXLOGIN_OFFLD_CAPABLE(ha)) 4505 return QLA_SUCCESS; 4506 4507 ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins); 4508 max_cnt = 0; 4509 rval = qla_get_exlogin_status(vha, &size, &max_cnt); 4510 if (rval != QLA_SUCCESS) { 4511 ql_log_pci(ql_log_fatal, ha->pdev, 0xd029, 4512 "Failed to get exlogin status.\n"); 4513 return rval; 4514 } 4515 4516 temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins; 4517 temp *= size; 4518 4519 if (temp != ha->exlogin_size) { 4520 qla2x00_free_exlogin_buffer(ha); 4521 ha->exlogin_size = temp; 4522 4523 ql_log(ql_log_info, vha, 0xd024, 4524 "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n", 4525 max_cnt, size, temp); 4526 4527 ql_log(ql_log_info, vha, 0xd025, 4528 "EXLOGIN: requested size=0x%x\n", ha->exlogin_size); 4529 4530 /* Get consistent memory for extended logins */ 4531 ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev, 4532 ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL); 4533 if (!ha->exlogin_buf) { 4534 ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a, 4535 "Failed to allocate memory for exlogin_buf_dma.\n"); 4536 return -ENOMEM; 4537 } 4538 } 4539 4540 /* Now configure the dma buffer */ 4541 rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma); 4542 if (rval) { 4543 ql_log(ql_log_fatal, vha, 0xd033, 4544 "Setup extended login buffer ****FAILED****.\n"); 4545 qla2x00_free_exlogin_buffer(ha); 4546 } 4547 4548 return rval; 4549 } 4550 4551 /* 4552 * qla2x00_free_exlogin_buffer 4553 * 4554 * Input: 4555 * ha = adapter block pointer 4556 */ 4557 void 4558 qla2x00_free_exlogin_buffer(struct qla_hw_data *ha) 4559 { 4560 if (ha->exlogin_buf) { 4561 dma_free_coherent(&ha->pdev->dev, ha->exlogin_size, 4562 ha->exlogin_buf, ha->exlogin_buf_dma); 4563 ha->exlogin_buf = NULL; 4564 ha->exlogin_size = 0; 4565 } 4566 } 4567 4568 static void 4569 qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt) 4570 { 4571 u32 temp; 4572 struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb; 4573 *ret_cnt = FW_DEF_EXCHANGES_CNT; 4574 4575 if (max_cnt > vha->hw->max_exchg) 4576 max_cnt = vha->hw->max_exchg; 4577 4578 if (qla_ini_mode_enabled(vha)) { 4579 if (vha->ql2xiniexchg > max_cnt) 4580 vha->ql2xiniexchg = max_cnt; 4581 4582 if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT) 4583 *ret_cnt = vha->ql2xiniexchg; 4584 4585 } else if (qla_tgt_mode_enabled(vha)) { 4586 if (vha->ql2xexchoffld > max_cnt) { 4587 vha->ql2xexchoffld = max_cnt; 4588 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 4589 } 4590 4591 if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT) 4592 *ret_cnt = vha->ql2xexchoffld; 4593 } else if (qla_dual_mode_enabled(vha)) { 4594 temp = vha->ql2xiniexchg + vha->ql2xexchoffld; 4595 if (temp > max_cnt) { 4596 vha->ql2xiniexchg -= (temp - max_cnt)/2; 4597 vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1); 4598 temp = max_cnt; 4599 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 4600 } 4601 4602 if (temp > FW_DEF_EXCHANGES_CNT) 4603 *ret_cnt = temp; 4604 } 4605 } 4606 4607 int 4608 qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha) 4609 { 4610 int rval; 4611 u16 size, max_cnt; 4612 u32 actual_cnt, totsz; 4613 struct qla_hw_data *ha = vha->hw; 4614 4615 if (!ha->flags.exchoffld_enabled) 4616 return QLA_SUCCESS; 4617 4618 if (!IS_EXCHG_OFFLD_CAPABLE(ha)) 4619 return QLA_SUCCESS; 4620 4621 max_cnt = 0; 4622 rval = qla_get_exchoffld_status(vha, &size, &max_cnt); 4623 if (rval != QLA_SUCCESS) { 4624 ql_log_pci(ql_log_fatal, ha->pdev, 0xd012, 4625 "Failed to get exlogin status.\n"); 4626 return rval; 4627 } 4628 4629 qla2x00_number_of_exch(vha, &actual_cnt, max_cnt); 4630 ql_log(ql_log_info, vha, 0xd014, 4631 "Actual exchange offload count: %d.\n", actual_cnt); 4632 4633 totsz = actual_cnt * size; 4634 4635 if (totsz != ha->exchoffld_size) { 4636 qla2x00_free_exchoffld_buffer(ha); 4637 if (actual_cnt <= FW_DEF_EXCHANGES_CNT) { 4638 ha->exchoffld_size = 0; 4639 ha->flags.exchoffld_enabled = 0; 4640 return QLA_SUCCESS; 4641 } 4642 4643 ha->exchoffld_size = totsz; 4644 4645 ql_log(ql_log_info, vha, 0xd016, 4646 "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n", 4647 max_cnt, actual_cnt, size, totsz); 4648 4649 ql_log(ql_log_info, vha, 0xd017, 4650 "Exchange Buffers requested size = 0x%x\n", 4651 ha->exchoffld_size); 4652 4653 /* Get consistent memory for extended logins */ 4654 ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev, 4655 ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL); 4656 if (!ha->exchoffld_buf) { 4657 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, 4658 "Failed to allocate memory for Exchange Offload.\n"); 4659 4660 if (ha->max_exchg > 4661 (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) { 4662 ha->max_exchg -= REDUCE_EXCHANGES_CNT; 4663 } else if (ha->max_exchg > 4664 (FW_DEF_EXCHANGES_CNT + 512)) { 4665 ha->max_exchg -= 512; 4666 } else { 4667 ha->flags.exchoffld_enabled = 0; 4668 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, 4669 "Disabling Exchange offload due to lack of memory\n"); 4670 } 4671 ha->exchoffld_size = 0; 4672 4673 return -ENOMEM; 4674 } 4675 } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) { 4676 /* pathological case */ 4677 qla2x00_free_exchoffld_buffer(ha); 4678 ha->exchoffld_size = 0; 4679 ha->flags.exchoffld_enabled = 0; 4680 ql_log(ql_log_info, vha, 0xd016, 4681 "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n", 4682 ha->exchoffld_size, actual_cnt, size, totsz); 4683 return 0; 4684 } 4685 4686 /* Now configure the dma buffer */ 4687 rval = qla_set_exchoffld_mem_cfg(vha); 4688 if (rval) { 4689 ql_log(ql_log_fatal, vha, 0xd02e, 4690 "Setup exchange offload buffer ****FAILED****.\n"); 4691 qla2x00_free_exchoffld_buffer(ha); 4692 } else { 4693 /* re-adjust number of target exchange */ 4694 struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb; 4695 4696 if (qla_ini_mode_enabled(vha)) 4697 icb->exchange_count = 0; 4698 else 4699 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 4700 } 4701 4702 return rval; 4703 } 4704 4705 /* 4706 * qla2x00_free_exchoffld_buffer 4707 * 4708 * Input: 4709 * ha = adapter block pointer 4710 */ 4711 void 4712 qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha) 4713 { 4714 if (ha->exchoffld_buf) { 4715 dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size, 4716 ha->exchoffld_buf, ha->exchoffld_buf_dma); 4717 ha->exchoffld_buf = NULL; 4718 ha->exchoffld_size = 0; 4719 } 4720 } 4721 4722 /* 4723 * qla2x00_free_fw_dump 4724 * Frees fw dump stuff. 4725 * 4726 * Input: 4727 * ha = adapter block pointer 4728 */ 4729 static void 4730 qla2x00_free_fw_dump(struct qla_hw_data *ha) 4731 { 4732 struct fwdt *fwdt = ha->fwdt; 4733 uint j; 4734 4735 if (ha->fce) 4736 dma_free_coherent(&ha->pdev->dev, 4737 FCE_SIZE, ha->fce, ha->fce_dma); 4738 4739 if (ha->eft) 4740 dma_free_coherent(&ha->pdev->dev, 4741 EFT_SIZE, ha->eft, ha->eft_dma); 4742 4743 vfree(ha->fw_dump); 4744 4745 ha->fce = NULL; 4746 ha->fce_dma = 0; 4747 ha->flags.fce_enabled = 0; 4748 ha->eft = NULL; 4749 ha->eft_dma = 0; 4750 ha->fw_dumped = false; 4751 ha->fw_dump_cap_flags = 0; 4752 ha->fw_dump_reading = 0; 4753 ha->fw_dump = NULL; 4754 ha->fw_dump_len = 0; 4755 4756 for (j = 0; j < 2; j++, fwdt++) { 4757 vfree(fwdt->template); 4758 fwdt->template = NULL; 4759 fwdt->length = 0; 4760 } 4761 } 4762 4763 /* 4764 * qla2x00_mem_free 4765 * Frees all adapter allocated memory. 4766 * 4767 * Input: 4768 * ha = adapter block pointer. 4769 */ 4770 static void 4771 qla2x00_mem_free(struct qla_hw_data *ha) 4772 { 4773 qla2x00_free_fw_dump(ha); 4774 4775 if (ha->mctp_dump) 4776 dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump, 4777 ha->mctp_dump_dma); 4778 ha->mctp_dump = NULL; 4779 4780 mempool_destroy(ha->srb_mempool); 4781 ha->srb_mempool = NULL; 4782 4783 if (ha->dcbx_tlv) 4784 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 4785 ha->dcbx_tlv, ha->dcbx_tlv_dma); 4786 ha->dcbx_tlv = NULL; 4787 4788 if (ha->xgmac_data) 4789 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 4790 ha->xgmac_data, ha->xgmac_data_dma); 4791 ha->xgmac_data = NULL; 4792 4793 if (ha->sns_cmd) 4794 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 4795 ha->sns_cmd, ha->sns_cmd_dma); 4796 ha->sns_cmd = NULL; 4797 ha->sns_cmd_dma = 0; 4798 4799 if (ha->ct_sns) 4800 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 4801 ha->ct_sns, ha->ct_sns_dma); 4802 ha->ct_sns = NULL; 4803 ha->ct_sns_dma = 0; 4804 4805 if (ha->sfp_data) 4806 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data, 4807 ha->sfp_data_dma); 4808 ha->sfp_data = NULL; 4809 4810 if (ha->flt) 4811 dma_free_coherent(&ha->pdev->dev, 4812 sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, 4813 ha->flt, ha->flt_dma); 4814 ha->flt = NULL; 4815 ha->flt_dma = 0; 4816 4817 if (ha->ms_iocb) 4818 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 4819 ha->ms_iocb = NULL; 4820 ha->ms_iocb_dma = 0; 4821 4822 if (ha->sf_init_cb) 4823 dma_pool_free(ha->s_dma_pool, 4824 ha->sf_init_cb, ha->sf_init_cb_dma); 4825 4826 if (ha->ex_init_cb) 4827 dma_pool_free(ha->s_dma_pool, 4828 ha->ex_init_cb, ha->ex_init_cb_dma); 4829 ha->ex_init_cb = NULL; 4830 ha->ex_init_cb_dma = 0; 4831 4832 if (ha->async_pd) 4833 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 4834 ha->async_pd = NULL; 4835 ha->async_pd_dma = 0; 4836 4837 dma_pool_destroy(ha->s_dma_pool); 4838 ha->s_dma_pool = NULL; 4839 4840 if (ha->gid_list) 4841 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 4842 ha->gid_list, ha->gid_list_dma); 4843 ha->gid_list = NULL; 4844 ha->gid_list_dma = 0; 4845 4846 if (IS_QLA82XX(ha)) { 4847 if (!list_empty(&ha->gbl_dsd_list)) { 4848 struct dsd_dma *dsd_ptr, *tdsd_ptr; 4849 4850 /* clean up allocated prev pool */ 4851 list_for_each_entry_safe(dsd_ptr, 4852 tdsd_ptr, &ha->gbl_dsd_list, list) { 4853 dma_pool_free(ha->dl_dma_pool, 4854 dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); 4855 list_del(&dsd_ptr->list); 4856 kfree(dsd_ptr); 4857 } 4858 } 4859 } 4860 4861 dma_pool_destroy(ha->dl_dma_pool); 4862 ha->dl_dma_pool = NULL; 4863 4864 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 4865 ha->fcp_cmnd_dma_pool = NULL; 4866 4867 mempool_destroy(ha->ctx_mempool); 4868 ha->ctx_mempool = NULL; 4869 4870 if (ql2xenabledif && ha->dif_bundl_pool) { 4871 struct dsd_dma *dsd, *nxt; 4872 4873 list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, 4874 list) { 4875 list_del(&dsd->list); 4876 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, 4877 dsd->dsd_list_dma); 4878 ha->dif_bundle_dma_allocs--; 4879 kfree(dsd); 4880 ha->dif_bundle_kallocs--; 4881 ha->pool.unusable.count--; 4882 } 4883 list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) { 4884 list_del(&dsd->list); 4885 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, 4886 dsd->dsd_list_dma); 4887 ha->dif_bundle_dma_allocs--; 4888 kfree(dsd); 4889 ha->dif_bundle_kallocs--; 4890 } 4891 } 4892 4893 dma_pool_destroy(ha->dif_bundl_pool); 4894 ha->dif_bundl_pool = NULL; 4895 4896 qlt_mem_free(ha); 4897 qla_remove_hostmap(ha); 4898 4899 if (ha->init_cb) 4900 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 4901 ha->init_cb, ha->init_cb_dma); 4902 4903 dma_pool_destroy(ha->purex_dma_pool); 4904 ha->purex_dma_pool = NULL; 4905 4906 if (ha->elsrej.c) { 4907 dma_free_coherent(&ha->pdev->dev, ha->elsrej.size, 4908 ha->elsrej.c, ha->elsrej.cdma); 4909 ha->elsrej.c = NULL; 4910 } 4911 4912 ha->init_cb = NULL; 4913 ha->init_cb_dma = 0; 4914 4915 vfree(ha->optrom_buffer); 4916 ha->optrom_buffer = NULL; 4917 kfree(ha->nvram); 4918 ha->nvram = NULL; 4919 kfree(ha->npiv_info); 4920 ha->npiv_info = NULL; 4921 kfree(ha->swl); 4922 ha->swl = NULL; 4923 kfree(ha->loop_id_map); 4924 ha->sf_init_cb = NULL; 4925 ha->sf_init_cb_dma = 0; 4926 ha->loop_id_map = NULL; 4927 } 4928 4929 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, 4930 struct qla_hw_data *ha) 4931 { 4932 struct Scsi_Host *host; 4933 struct scsi_qla_host *vha = NULL; 4934 4935 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 4936 if (!host) { 4937 ql_log_pci(ql_log_fatal, ha->pdev, 0x0107, 4938 "Failed to allocate host from the scsi layer, aborting.\n"); 4939 return NULL; 4940 } 4941 4942 /* Clear our data area */ 4943 vha = shost_priv(host); 4944 memset(vha, 0, sizeof(scsi_qla_host_t)); 4945 4946 vha->host = host; 4947 vha->host_no = host->host_no; 4948 vha->hw = ha; 4949 4950 vha->qlini_mode = ql2x_ini_mode; 4951 vha->ql2xexchoffld = ql2xexchoffld; 4952 vha->ql2xiniexchg = ql2xiniexchg; 4953 4954 INIT_LIST_HEAD(&vha->vp_fcports); 4955 INIT_LIST_HEAD(&vha->work_list); 4956 INIT_LIST_HEAD(&vha->list); 4957 INIT_LIST_HEAD(&vha->qla_cmd_list); 4958 INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list); 4959 INIT_LIST_HEAD(&vha->logo_list); 4960 INIT_LIST_HEAD(&vha->plogi_ack_list); 4961 INIT_LIST_HEAD(&vha->qp_list); 4962 INIT_LIST_HEAD(&vha->gnl.fcports); 4963 INIT_LIST_HEAD(&vha->gpnid_list); 4964 INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn); 4965 4966 INIT_LIST_HEAD(&vha->purex_list.head); 4967 spin_lock_init(&vha->purex_list.lock); 4968 4969 spin_lock_init(&vha->work_lock); 4970 spin_lock_init(&vha->cmd_list_lock); 4971 init_waitqueue_head(&vha->fcport_waitQ); 4972 init_waitqueue_head(&vha->vref_waitq); 4973 qla_enode_init(vha); 4974 qla_edb_init(vha); 4975 4976 4977 vha->gnl.size = sizeof(struct get_name_list_extended) * 4978 (ha->max_loop_id + 1); 4979 vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev, 4980 vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL); 4981 if (!vha->gnl.l) { 4982 ql_log(ql_log_fatal, vha, 0xd04a, 4983 "Alloc failed for name list.\n"); 4984 scsi_host_put(vha->host); 4985 return NULL; 4986 } 4987 4988 /* todo: what about ext login? */ 4989 vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp); 4990 vha->scan.l = vmalloc(vha->scan.size); 4991 if (!vha->scan.l) { 4992 ql_log(ql_log_fatal, vha, 0xd04a, 4993 "Alloc failed for scan database.\n"); 4994 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, 4995 vha->gnl.l, vha->gnl.ldma); 4996 vha->gnl.l = NULL; 4997 scsi_host_put(vha->host); 4998 return NULL; 4999 } 5000 INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn); 5001 5002 sprintf(vha->host_str, "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no); 5003 ql_dbg(ql_dbg_init, vha, 0x0041, 5004 "Allocated the host=%p hw=%p vha=%p dev_name=%s", 5005 vha->host, vha->hw, vha, 5006 dev_name(&(ha->pdev->dev))); 5007 5008 return vha; 5009 } 5010 5011 struct qla_work_evt * 5012 qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) 5013 { 5014 struct qla_work_evt *e; 5015 uint8_t bail; 5016 5017 if (test_bit(UNLOADING, &vha->dpc_flags)) 5018 return NULL; 5019 5020 QLA_VHA_MARK_BUSY(vha, bail); 5021 if (bail) 5022 return NULL; 5023 5024 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); 5025 if (!e) { 5026 QLA_VHA_MARK_NOT_BUSY(vha); 5027 return NULL; 5028 } 5029 5030 INIT_LIST_HEAD(&e->list); 5031 e->type = type; 5032 e->flags = QLA_EVT_FLAG_FREE; 5033 return e; 5034 } 5035 5036 int 5037 qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 5038 { 5039 unsigned long flags; 5040 bool q = false; 5041 5042 spin_lock_irqsave(&vha->work_lock, flags); 5043 list_add_tail(&e->list, &vha->work_list); 5044 5045 if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags)) 5046 q = true; 5047 5048 spin_unlock_irqrestore(&vha->work_lock, flags); 5049 5050 if (q) 5051 queue_work(vha->hw->wq, &vha->iocb_work); 5052 5053 return QLA_SUCCESS; 5054 } 5055 5056 int 5057 qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, 5058 u32 data) 5059 { 5060 struct qla_work_evt *e; 5061 5062 e = qla2x00_alloc_work(vha, QLA_EVT_AEN); 5063 if (!e) 5064 return QLA_FUNCTION_FAILED; 5065 5066 e->u.aen.code = code; 5067 e->u.aen.data = data; 5068 return qla2x00_post_work(vha, e); 5069 } 5070 5071 int 5072 qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) 5073 { 5074 struct qla_work_evt *e; 5075 5076 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK); 5077 if (!e) 5078 return QLA_FUNCTION_FAILED; 5079 5080 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 5081 return qla2x00_post_work(vha, e); 5082 } 5083 5084 #define qla2x00_post_async_work(name, type) \ 5085 int qla2x00_post_async_##name##_work( \ 5086 struct scsi_qla_host *vha, \ 5087 fc_port_t *fcport, uint16_t *data) \ 5088 { \ 5089 struct qla_work_evt *e; \ 5090 \ 5091 e = qla2x00_alloc_work(vha, type); \ 5092 if (!e) \ 5093 return QLA_FUNCTION_FAILED; \ 5094 \ 5095 e->u.logio.fcport = fcport; \ 5096 if (data) { \ 5097 e->u.logio.data[0] = data[0]; \ 5098 e->u.logio.data[1] = data[1]; \ 5099 } \ 5100 fcport->flags |= FCF_ASYNC_ACTIVE; \ 5101 return qla2x00_post_work(vha, e); \ 5102 } 5103 5104 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); 5105 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); 5106 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); 5107 qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO); 5108 qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE); 5109 5110 int 5111 qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) 5112 { 5113 struct qla_work_evt *e; 5114 5115 e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT); 5116 if (!e) 5117 return QLA_FUNCTION_FAILED; 5118 5119 e->u.uevent.code = code; 5120 return qla2x00_post_work(vha, e); 5121 } 5122 5123 static void 5124 qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) 5125 { 5126 char event_string[40]; 5127 char *envp[] = { event_string, NULL }; 5128 5129 switch (code) { 5130 case QLA_UEVENT_CODE_FW_DUMP: 5131 snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu", 5132 vha->host_no); 5133 break; 5134 default: 5135 /* do nothing */ 5136 break; 5137 } 5138 kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp); 5139 } 5140 5141 int 5142 qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, 5143 uint32_t *data, int cnt) 5144 { 5145 struct qla_work_evt *e; 5146 5147 e = qla2x00_alloc_work(vha, QLA_EVT_AENFX); 5148 if (!e) 5149 return QLA_FUNCTION_FAILED; 5150 5151 e->u.aenfx.evtcode = evtcode; 5152 e->u.aenfx.count = cnt; 5153 memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt); 5154 return qla2x00_post_work(vha, e); 5155 } 5156 5157 void qla24xx_sched_upd_fcport(fc_port_t *fcport) 5158 { 5159 unsigned long flags; 5160 5161 if (IS_SW_RESV_ADDR(fcport->d_id)) 5162 return; 5163 5164 spin_lock_irqsave(&fcport->vha->work_lock, flags); 5165 if (fcport->disc_state == DSC_UPD_FCPORT) { 5166 spin_unlock_irqrestore(&fcport->vha->work_lock, flags); 5167 return; 5168 } 5169 fcport->jiffies_at_registration = jiffies; 5170 fcport->sec_since_registration = 0; 5171 fcport->next_disc_state = DSC_DELETED; 5172 qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); 5173 spin_unlock_irqrestore(&fcport->vha->work_lock, flags); 5174 5175 queue_work(system_unbound_wq, &fcport->reg_work); 5176 } 5177 5178 static 5179 void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) 5180 { 5181 unsigned long flags; 5182 fc_port_t *fcport = NULL, *tfcp; 5183 struct qlt_plogi_ack_t *pla = 5184 (struct qlt_plogi_ack_t *)e->u.new_sess.pla; 5185 uint8_t free_fcport = 0; 5186 5187 ql_dbg(ql_dbg_disc, vha, 0xffff, 5188 "%s %d %8phC enter\n", 5189 __func__, __LINE__, e->u.new_sess.port_name); 5190 5191 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5192 fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1); 5193 if (fcport) { 5194 fcport->d_id = e->u.new_sess.id; 5195 if (pla) { 5196 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 5197 memcpy(fcport->node_name, 5198 pla->iocb.u.isp24.u.plogi.node_name, 5199 WWN_SIZE); 5200 qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN); 5201 /* we took an extra ref_count to prevent PLOGI ACK when 5202 * fcport/sess has not been created. 5203 */ 5204 pla->ref_count--; 5205 } 5206 } else { 5207 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5208 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5209 if (fcport) { 5210 fcport->d_id = e->u.new_sess.id; 5211 fcport->flags |= FCF_FABRIC_DEVICE; 5212 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 5213 fcport->tgt_short_link_down_cnt = 0; 5214 5215 memcpy(fcport->port_name, e->u.new_sess.port_name, 5216 WWN_SIZE); 5217 5218 fcport->fc4_type = e->u.new_sess.fc4_type; 5219 if (NVME_PRIORITY(vha->hw, fcport)) 5220 fcport->do_prli_nvme = 1; 5221 else 5222 fcport->do_prli_nvme = 0; 5223 5224 if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) { 5225 fcport->dm_login_expire = jiffies + 5226 QLA_N2N_WAIT_TIME * HZ; 5227 fcport->fc4_type = FS_FC4TYPE_FCP; 5228 fcport->n2n_flag = 1; 5229 if (vha->flags.nvme_enabled) 5230 fcport->fc4_type |= FS_FC4TYPE_NVME; 5231 } 5232 5233 } else { 5234 ql_dbg(ql_dbg_disc, vha, 0xffff, 5235 "%s %8phC mem alloc fail.\n", 5236 __func__, e->u.new_sess.port_name); 5237 5238 if (pla) { 5239 list_del(&pla->list); 5240 kmem_cache_free(qla_tgt_plogi_cachep, pla); 5241 } 5242 return; 5243 } 5244 5245 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5246 /* search again to make sure no one else got ahead */ 5247 tfcp = qla2x00_find_fcport_by_wwpn(vha, 5248 e->u.new_sess.port_name, 1); 5249 if (tfcp) { 5250 /* should rarily happen */ 5251 ql_dbg(ql_dbg_disc, vha, 0xffff, 5252 "%s %8phC found existing fcport b4 add. DS %d LS %d\n", 5253 __func__, tfcp->port_name, tfcp->disc_state, 5254 tfcp->fw_login_state); 5255 5256 free_fcport = 1; 5257 } else { 5258 list_add_tail(&fcport->list, &vha->vp_fcports); 5259 5260 } 5261 if (pla) { 5262 qlt_plogi_ack_link(vha, pla, fcport, 5263 QLT_PLOGI_LINK_SAME_WWN); 5264 pla->ref_count--; 5265 } 5266 } 5267 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5268 5269 if (fcport) { 5270 fcport->id_changed = 1; 5271 fcport->scan_state = QLA_FCPORT_FOUND; 5272 fcport->chip_reset = vha->hw->base_qpair->chip_reset; 5273 memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE); 5274 5275 if (pla) { 5276 if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) { 5277 u16 wd3_lo; 5278 5279 fcport->fw_login_state = DSC_LS_PRLI_PEND; 5280 fcport->local = 0; 5281 fcport->loop_id = 5282 le16_to_cpu( 5283 pla->iocb.u.isp24.nport_handle); 5284 fcport->fw_login_state = DSC_LS_PRLI_PEND; 5285 wd3_lo = 5286 le16_to_cpu( 5287 pla->iocb.u.isp24.u.prli.wd3_lo); 5288 5289 if (wd3_lo & BIT_7) 5290 fcport->conf_compl_supported = 1; 5291 5292 if ((wd3_lo & BIT_4) == 0) 5293 fcport->port_type = FCT_INITIATOR; 5294 else 5295 fcport->port_type = FCT_TARGET; 5296 } 5297 qlt_plogi_ack_unref(vha, pla); 5298 } else { 5299 fc_port_t *dfcp = NULL; 5300 5301 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5302 tfcp = qla2x00_find_fcport_by_nportid(vha, 5303 &e->u.new_sess.id, 1); 5304 if (tfcp && (tfcp != fcport)) { 5305 /* 5306 * We have a conflict fcport with same NportID. 5307 */ 5308 ql_dbg(ql_dbg_disc, vha, 0xffff, 5309 "%s %8phC found conflict b4 add. DS %d LS %d\n", 5310 __func__, tfcp->port_name, tfcp->disc_state, 5311 tfcp->fw_login_state); 5312 5313 switch (tfcp->disc_state) { 5314 case DSC_DELETED: 5315 break; 5316 case DSC_DELETE_PEND: 5317 fcport->login_pause = 1; 5318 tfcp->conflict = fcport; 5319 break; 5320 default: 5321 fcport->login_pause = 1; 5322 tfcp->conflict = fcport; 5323 dfcp = tfcp; 5324 break; 5325 } 5326 } 5327 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5328 if (dfcp) 5329 qlt_schedule_sess_for_deletion(tfcp); 5330 5331 if (N2N_TOPO(vha->hw)) { 5332 fcport->flags &= ~FCF_FABRIC_DEVICE; 5333 fcport->keep_nport_handle = 1; 5334 if (vha->flags.nvme_enabled) { 5335 fcport->fc4_type = 5336 (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP); 5337 fcport->n2n_flag = 1; 5338 } 5339 fcport->fw_login_state = 0; 5340 5341 schedule_delayed_work(&vha->scan.scan_work, 5); 5342 } else { 5343 qla24xx_fcport_handle_login(vha, fcport); 5344 } 5345 } 5346 } 5347 5348 if (free_fcport) { 5349 qla2x00_free_fcport(fcport); 5350 if (pla) { 5351 list_del(&pla->list); 5352 kmem_cache_free(qla_tgt_plogi_cachep, pla); 5353 } 5354 } 5355 } 5356 5357 static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e) 5358 { 5359 struct srb *sp = e->u.iosb.sp; 5360 int rval; 5361 5362 rval = qla2x00_start_sp(sp); 5363 if (rval != QLA_SUCCESS) { 5364 ql_dbg(ql_dbg_disc, vha, 0x2043, 5365 "%s: %s: Re-issue IOCB failed (%d).\n", 5366 __func__, sp->name, rval); 5367 qla24xx_sp_unmap(vha, sp); 5368 } 5369 } 5370 5371 void 5372 qla2x00_do_work(struct scsi_qla_host *vha) 5373 { 5374 struct qla_work_evt *e, *tmp; 5375 unsigned long flags; 5376 LIST_HEAD(work); 5377 int rc; 5378 5379 spin_lock_irqsave(&vha->work_lock, flags); 5380 list_splice_init(&vha->work_list, &work); 5381 spin_unlock_irqrestore(&vha->work_lock, flags); 5382 5383 list_for_each_entry_safe(e, tmp, &work, list) { 5384 rc = QLA_SUCCESS; 5385 switch (e->type) { 5386 case QLA_EVT_AEN: 5387 fc_host_post_event(vha->host, fc_get_event_number(), 5388 e->u.aen.code, e->u.aen.data); 5389 break; 5390 case QLA_EVT_IDC_ACK: 5391 qla81xx_idc_ack(vha, e->u.idc_ack.mb); 5392 break; 5393 case QLA_EVT_ASYNC_LOGIN: 5394 qla2x00_async_login(vha, e->u.logio.fcport, 5395 e->u.logio.data); 5396 break; 5397 case QLA_EVT_ASYNC_LOGOUT: 5398 rc = qla2x00_async_logout(vha, e->u.logio.fcport); 5399 break; 5400 case QLA_EVT_ASYNC_ADISC: 5401 qla2x00_async_adisc(vha, e->u.logio.fcport, 5402 e->u.logio.data); 5403 break; 5404 case QLA_EVT_UEVENT: 5405 qla2x00_uevent_emit(vha, e->u.uevent.code); 5406 break; 5407 case QLA_EVT_AENFX: 5408 qlafx00_process_aen(vha, e); 5409 break; 5410 case QLA_EVT_GPNID: 5411 qla24xx_async_gpnid(vha, &e->u.gpnid.id); 5412 break; 5413 case QLA_EVT_UNMAP: 5414 qla24xx_sp_unmap(vha, e->u.iosb.sp); 5415 break; 5416 case QLA_EVT_RELOGIN: 5417 qla2x00_relogin(vha); 5418 break; 5419 case QLA_EVT_NEW_SESS: 5420 qla24xx_create_new_sess(vha, e); 5421 break; 5422 case QLA_EVT_GPDB: 5423 qla24xx_async_gpdb(vha, e->u.fcport.fcport, 5424 e->u.fcport.opt); 5425 break; 5426 case QLA_EVT_PRLI: 5427 qla24xx_async_prli(vha, e->u.fcport.fcport); 5428 break; 5429 case QLA_EVT_GPSC: 5430 qla24xx_async_gpsc(vha, e->u.fcport.fcport); 5431 break; 5432 case QLA_EVT_GNL: 5433 qla24xx_async_gnl(vha, e->u.fcport.fcport); 5434 break; 5435 case QLA_EVT_NACK: 5436 qla24xx_do_nack_work(vha, e); 5437 break; 5438 case QLA_EVT_ASYNC_PRLO: 5439 rc = qla2x00_async_prlo(vha, e->u.logio.fcport); 5440 break; 5441 case QLA_EVT_ASYNC_PRLO_DONE: 5442 qla2x00_async_prlo_done(vha, e->u.logio.fcport, 5443 e->u.logio.data); 5444 break; 5445 case QLA_EVT_GPNFT: 5446 qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type, 5447 e->u.gpnft.sp); 5448 break; 5449 case QLA_EVT_GPNFT_DONE: 5450 qla24xx_async_gpnft_done(vha, e->u.iosb.sp); 5451 break; 5452 case QLA_EVT_GNNFT_DONE: 5453 qla24xx_async_gnnft_done(vha, e->u.iosb.sp); 5454 break; 5455 case QLA_EVT_GNNID: 5456 qla24xx_async_gnnid(vha, e->u.fcport.fcport); 5457 break; 5458 case QLA_EVT_GFPNID: 5459 qla24xx_async_gfpnid(vha, e->u.fcport.fcport); 5460 break; 5461 case QLA_EVT_SP_RETRY: 5462 qla_sp_retry(vha, e); 5463 break; 5464 case QLA_EVT_IIDMA: 5465 qla_do_iidma_work(vha, e->u.fcport.fcport); 5466 break; 5467 case QLA_EVT_ELS_PLOGI: 5468 qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI, 5469 e->u.fcport.fcport, false); 5470 break; 5471 case QLA_EVT_SA_REPLACE: 5472 qla24xx_issue_sa_replace_iocb(vha, e); 5473 break; 5474 } 5475 5476 if (rc == EAGAIN) { 5477 /* put 'work' at head of 'vha->work_list' */ 5478 spin_lock_irqsave(&vha->work_lock, flags); 5479 list_splice(&work, &vha->work_list); 5480 spin_unlock_irqrestore(&vha->work_lock, flags); 5481 break; 5482 } 5483 list_del_init(&e->list); 5484 if (e->flags & QLA_EVT_FLAG_FREE) 5485 kfree(e); 5486 5487 /* For each work completed decrement vha ref count */ 5488 QLA_VHA_MARK_NOT_BUSY(vha); 5489 } 5490 } 5491 5492 int qla24xx_post_relogin_work(struct scsi_qla_host *vha) 5493 { 5494 struct qla_work_evt *e; 5495 5496 e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN); 5497 5498 if (!e) { 5499 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 5500 return QLA_FUNCTION_FAILED; 5501 } 5502 5503 return qla2x00_post_work(vha, e); 5504 } 5505 5506 /* Relogins all the fcports of a vport 5507 * Context: dpc thread 5508 */ 5509 void qla2x00_relogin(struct scsi_qla_host *vha) 5510 { 5511 fc_port_t *fcport; 5512 int status, relogin_needed = 0; 5513 struct event_arg ea; 5514 5515 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5516 /* 5517 * If the port is not ONLINE then try to login 5518 * to it if we haven't run out of retries. 5519 */ 5520 if (atomic_read(&fcport->state) != FCS_ONLINE && 5521 fcport->login_retry) { 5522 if (fcport->scan_state != QLA_FCPORT_FOUND || 5523 fcport->disc_state == DSC_LOGIN_AUTH_PEND || 5524 fcport->disc_state == DSC_LOGIN_COMPLETE) 5525 continue; 5526 5527 if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) || 5528 fcport->disc_state == DSC_DELETE_PEND) { 5529 relogin_needed = 1; 5530 } else { 5531 if (vha->hw->current_topology != ISP_CFG_NL) { 5532 memset(&ea, 0, sizeof(ea)); 5533 ea.fcport = fcport; 5534 qla24xx_handle_relogin_event(vha, &ea); 5535 } else if (vha->hw->current_topology == 5536 ISP_CFG_NL) { 5537 fcport->login_retry--; 5538 status = 5539 qla2x00_local_device_login(vha, 5540 fcport); 5541 if (status == QLA_SUCCESS) { 5542 fcport->old_loop_id = 5543 fcport->loop_id; 5544 ql_dbg(ql_dbg_disc, vha, 0x2003, 5545 "Port login OK: logged in ID 0x%x.\n", 5546 fcport->loop_id); 5547 qla2x00_update_fcport 5548 (vha, fcport); 5549 } else if (status == 1) { 5550 set_bit(RELOGIN_NEEDED, 5551 &vha->dpc_flags); 5552 /* retry the login again */ 5553 ql_dbg(ql_dbg_disc, vha, 0x2007, 5554 "Retrying %d login again loop_id 0x%x.\n", 5555 fcport->login_retry, 5556 fcport->loop_id); 5557 } else { 5558 fcport->login_retry = 0; 5559 } 5560 5561 if (fcport->login_retry == 0 && 5562 status != QLA_SUCCESS) 5563 qla2x00_clear_loop_id(fcport); 5564 } 5565 } 5566 } 5567 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5568 break; 5569 } 5570 5571 if (relogin_needed) 5572 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 5573 5574 ql_dbg(ql_dbg_disc, vha, 0x400e, 5575 "Relogin end.\n"); 5576 } 5577 5578 /* Schedule work on any of the dpc-workqueues */ 5579 void 5580 qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code) 5581 { 5582 struct qla_hw_data *ha = base_vha->hw; 5583 5584 switch (work_code) { 5585 case MBA_IDC_AEN: /* 0x8200 */ 5586 if (ha->dpc_lp_wq) 5587 queue_work(ha->dpc_lp_wq, &ha->idc_aen); 5588 break; 5589 5590 case QLA83XX_NIC_CORE_RESET: /* 0x1 */ 5591 if (!ha->flags.nic_core_reset_hdlr_active) { 5592 if (ha->dpc_hp_wq) 5593 queue_work(ha->dpc_hp_wq, &ha->nic_core_reset); 5594 } else 5595 ql_dbg(ql_dbg_p3p, base_vha, 0xb05e, 5596 "NIC Core reset is already active. Skip " 5597 "scheduling it again.\n"); 5598 break; 5599 case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */ 5600 if (ha->dpc_hp_wq) 5601 queue_work(ha->dpc_hp_wq, &ha->idc_state_handler); 5602 break; 5603 case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */ 5604 if (ha->dpc_hp_wq) 5605 queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable); 5606 break; 5607 default: 5608 ql_log(ql_log_warn, base_vha, 0xb05f, 5609 "Unknown work-code=0x%x.\n", work_code); 5610 } 5611 5612 return; 5613 } 5614 5615 /* Work: Perform NIC Core Unrecoverable state handling */ 5616 void 5617 qla83xx_nic_core_unrecoverable_work(struct work_struct *work) 5618 { 5619 struct qla_hw_data *ha = 5620 container_of(work, struct qla_hw_data, nic_core_unrecoverable); 5621 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5622 uint32_t dev_state = 0; 5623 5624 qla83xx_idc_lock(base_vha, 0); 5625 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5626 qla83xx_reset_ownership(base_vha); 5627 if (ha->flags.nic_core_reset_owner) { 5628 ha->flags.nic_core_reset_owner = 0; 5629 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 5630 QLA8XXX_DEV_FAILED); 5631 ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n"); 5632 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 5633 } 5634 qla83xx_idc_unlock(base_vha, 0); 5635 } 5636 5637 /* Work: Execute IDC state handler */ 5638 void 5639 qla83xx_idc_state_handler_work(struct work_struct *work) 5640 { 5641 struct qla_hw_data *ha = 5642 container_of(work, struct qla_hw_data, idc_state_handler); 5643 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5644 uint32_t dev_state = 0; 5645 5646 qla83xx_idc_lock(base_vha, 0); 5647 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5648 if (dev_state == QLA8XXX_DEV_FAILED || 5649 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) 5650 qla83xx_idc_state_handler(base_vha); 5651 qla83xx_idc_unlock(base_vha, 0); 5652 } 5653 5654 static int 5655 qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha) 5656 { 5657 int rval = QLA_SUCCESS; 5658 unsigned long heart_beat_wait = jiffies + (1 * HZ); 5659 uint32_t heart_beat_counter1, heart_beat_counter2; 5660 5661 do { 5662 if (time_after(jiffies, heart_beat_wait)) { 5663 ql_dbg(ql_dbg_p3p, base_vha, 0xb07c, 5664 "Nic Core f/w is not alive.\n"); 5665 rval = QLA_FUNCTION_FAILED; 5666 break; 5667 } 5668 5669 qla83xx_idc_lock(base_vha, 0); 5670 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 5671 &heart_beat_counter1); 5672 qla83xx_idc_unlock(base_vha, 0); 5673 msleep(100); 5674 qla83xx_idc_lock(base_vha, 0); 5675 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 5676 &heart_beat_counter2); 5677 qla83xx_idc_unlock(base_vha, 0); 5678 } while (heart_beat_counter1 == heart_beat_counter2); 5679 5680 return rval; 5681 } 5682 5683 /* Work: Perform NIC Core Reset handling */ 5684 void 5685 qla83xx_nic_core_reset_work(struct work_struct *work) 5686 { 5687 struct qla_hw_data *ha = 5688 container_of(work, struct qla_hw_data, nic_core_reset); 5689 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5690 uint32_t dev_state = 0; 5691 5692 if (IS_QLA2031(ha)) { 5693 if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS) 5694 ql_log(ql_log_warn, base_vha, 0xb081, 5695 "Failed to dump mctp\n"); 5696 return; 5697 } 5698 5699 if (!ha->flags.nic_core_reset_hdlr_active) { 5700 if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) { 5701 qla83xx_idc_lock(base_vha, 0); 5702 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, 5703 &dev_state); 5704 qla83xx_idc_unlock(base_vha, 0); 5705 if (dev_state != QLA8XXX_DEV_NEED_RESET) { 5706 ql_dbg(ql_dbg_p3p, base_vha, 0xb07a, 5707 "Nic Core f/w is alive.\n"); 5708 return; 5709 } 5710 } 5711 5712 ha->flags.nic_core_reset_hdlr_active = 1; 5713 if (qla83xx_nic_core_reset(base_vha)) { 5714 /* NIC Core reset failed. */ 5715 ql_dbg(ql_dbg_p3p, base_vha, 0xb061, 5716 "NIC Core reset failed.\n"); 5717 } 5718 ha->flags.nic_core_reset_hdlr_active = 0; 5719 } 5720 } 5721 5722 /* Work: Handle 8200 IDC aens */ 5723 void 5724 qla83xx_service_idc_aen(struct work_struct *work) 5725 { 5726 struct qla_hw_data *ha = 5727 container_of(work, struct qla_hw_data, idc_aen); 5728 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5729 uint32_t dev_state, idc_control; 5730 5731 qla83xx_idc_lock(base_vha, 0); 5732 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5733 qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control); 5734 qla83xx_idc_unlock(base_vha, 0); 5735 if (dev_state == QLA8XXX_DEV_NEED_RESET) { 5736 if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) { 5737 ql_dbg(ql_dbg_p3p, base_vha, 0xb062, 5738 "Application requested NIC Core Reset.\n"); 5739 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 5740 } else if (qla83xx_check_nic_core_fw_alive(base_vha) == 5741 QLA_SUCCESS) { 5742 ql_dbg(ql_dbg_p3p, base_vha, 0xb07b, 5743 "Other protocol driver requested NIC Core Reset.\n"); 5744 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 5745 } 5746 } else if (dev_state == QLA8XXX_DEV_FAILED || 5747 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { 5748 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 5749 } 5750 } 5751 5752 /* 5753 * Control the frequency of IDC lock retries 5754 */ 5755 #define QLA83XX_WAIT_LOGIC_MS 100 5756 5757 static int 5758 qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha) 5759 { 5760 int rval; 5761 uint32_t data; 5762 uint32_t idc_lck_rcvry_stage_mask = 0x3; 5763 uint32_t idc_lck_rcvry_owner_mask = 0x3c; 5764 struct qla_hw_data *ha = base_vha->hw; 5765 5766 ql_dbg(ql_dbg_p3p, base_vha, 0xb086, 5767 "Trying force recovery of the IDC lock.\n"); 5768 5769 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data); 5770 if (rval) 5771 return rval; 5772 5773 if ((data & idc_lck_rcvry_stage_mask) > 0) { 5774 return QLA_SUCCESS; 5775 } else { 5776 data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2); 5777 rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 5778 data); 5779 if (rval) 5780 return rval; 5781 5782 msleep(200); 5783 5784 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 5785 &data); 5786 if (rval) 5787 return rval; 5788 5789 if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) { 5790 data &= (IDC_LOCK_RECOVERY_STAGE2 | 5791 ~(idc_lck_rcvry_stage_mask)); 5792 rval = qla83xx_wr_reg(base_vha, 5793 QLA83XX_IDC_LOCK_RECOVERY, data); 5794 if (rval) 5795 return rval; 5796 5797 /* Forcefully perform IDC UnLock */ 5798 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, 5799 &data); 5800 if (rval) 5801 return rval; 5802 /* Clear lock-id by setting 0xff */ 5803 rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 5804 0xff); 5805 if (rval) 5806 return rval; 5807 /* Clear lock-recovery by setting 0x0 */ 5808 rval = qla83xx_wr_reg(base_vha, 5809 QLA83XX_IDC_LOCK_RECOVERY, 0x0); 5810 if (rval) 5811 return rval; 5812 } else 5813 return QLA_SUCCESS; 5814 } 5815 5816 return rval; 5817 } 5818 5819 static int 5820 qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha) 5821 { 5822 int rval = QLA_SUCCESS; 5823 uint32_t o_drv_lockid, n_drv_lockid; 5824 unsigned long lock_recovery_timeout; 5825 5826 lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT; 5827 retry_lockid: 5828 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid); 5829 if (rval) 5830 goto exit; 5831 5832 /* MAX wait time before forcing IDC Lock recovery = 2 secs */ 5833 if (time_after_eq(jiffies, lock_recovery_timeout)) { 5834 if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS) 5835 return QLA_SUCCESS; 5836 else 5837 return QLA_FUNCTION_FAILED; 5838 } 5839 5840 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid); 5841 if (rval) 5842 goto exit; 5843 5844 if (o_drv_lockid == n_drv_lockid) { 5845 msleep(QLA83XX_WAIT_LOGIC_MS); 5846 goto retry_lockid; 5847 } else 5848 return QLA_SUCCESS; 5849 5850 exit: 5851 return rval; 5852 } 5853 5854 /* 5855 * Context: task, can sleep 5856 */ 5857 void 5858 qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) 5859 { 5860 uint32_t data; 5861 uint32_t lock_owner; 5862 struct qla_hw_data *ha = base_vha->hw; 5863 5864 might_sleep(); 5865 5866 /* IDC-lock implementation using driver-lock/lock-id remote registers */ 5867 retry_lock: 5868 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data) 5869 == QLA_SUCCESS) { 5870 if (data) { 5871 /* Setting lock-id to our function-number */ 5872 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 5873 ha->portnum); 5874 } else { 5875 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, 5876 &lock_owner); 5877 ql_dbg(ql_dbg_p3p, base_vha, 0xb063, 5878 "Failed to acquire IDC lock, acquired by %d, " 5879 "retrying...\n", lock_owner); 5880 5881 /* Retry/Perform IDC-Lock recovery */ 5882 if (qla83xx_idc_lock_recovery(base_vha) 5883 == QLA_SUCCESS) { 5884 msleep(QLA83XX_WAIT_LOGIC_MS); 5885 goto retry_lock; 5886 } else 5887 ql_log(ql_log_warn, base_vha, 0xb075, 5888 "IDC Lock recovery FAILED.\n"); 5889 } 5890 5891 } 5892 5893 return; 5894 } 5895 5896 static bool 5897 qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha, 5898 struct purex_entry_24xx *purex) 5899 { 5900 char fwstr[16]; 5901 u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0]; 5902 struct port_database_24xx *pdb; 5903 5904 /* Domain Controller is always logged-out. */ 5905 /* if RDP request is not from Domain Controller: */ 5906 if (sid != 0xfffc01) 5907 return false; 5908 5909 ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid); 5910 5911 pdb = kzalloc(sizeof(*pdb), GFP_KERNEL); 5912 if (!pdb) { 5913 ql_dbg(ql_dbg_init, vha, 0x0181, 5914 "%s: Failed allocate pdb\n", __func__); 5915 } else if (qla24xx_get_port_database(vha, 5916 le16_to_cpu(purex->nport_handle), pdb)) { 5917 ql_dbg(ql_dbg_init, vha, 0x0181, 5918 "%s: Failed get pdb sid=%x\n", __func__, sid); 5919 } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE && 5920 pdb->current_login_state != PDS_PRLI_COMPLETE) { 5921 ql_dbg(ql_dbg_init, vha, 0x0181, 5922 "%s: Port not logged in sid=%#x\n", __func__, sid); 5923 } else { 5924 /* RDP request is from logged in port */ 5925 kfree(pdb); 5926 return false; 5927 } 5928 kfree(pdb); 5929 5930 vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr)); 5931 fwstr[strcspn(fwstr, " ")] = 0; 5932 /* if FW version allows RDP response length upto 2048 bytes: */ 5933 if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0) 5934 return false; 5935 5936 ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr); 5937 5938 /* RDP response length is to be reduced to maximum 256 bytes */ 5939 return true; 5940 } 5941 5942 /* 5943 * Function Name: qla24xx_process_purex_iocb 5944 * 5945 * Description: 5946 * Prepare a RDP response and send to Fabric switch 5947 * 5948 * PARAMETERS: 5949 * vha: SCSI qla host 5950 * purex: RDP request received by HBA 5951 */ 5952 void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, 5953 struct purex_item *item) 5954 { 5955 struct qla_hw_data *ha = vha->hw; 5956 struct purex_entry_24xx *purex = 5957 (struct purex_entry_24xx *)&item->iocb; 5958 dma_addr_t rsp_els_dma; 5959 dma_addr_t rsp_payload_dma; 5960 dma_addr_t stat_dma; 5961 dma_addr_t sfp_dma; 5962 struct els_entry_24xx *rsp_els = NULL; 5963 struct rdp_rsp_payload *rsp_payload = NULL; 5964 struct link_statistics *stat = NULL; 5965 uint8_t *sfp = NULL; 5966 uint16_t sfp_flags = 0; 5967 uint rsp_payload_length = sizeof(*rsp_payload); 5968 int rval; 5969 5970 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180, 5971 "%s: Enter\n", __func__); 5972 5973 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181, 5974 "-------- ELS REQ -------\n"); 5975 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182, 5976 purex, sizeof(*purex)); 5977 5978 if (qla25xx_rdp_rsp_reduce_size(vha, purex)) { 5979 rsp_payload_length = 5980 offsetof(typeof(*rsp_payload), optical_elmt_desc); 5981 ql_dbg(ql_dbg_init, vha, 0x0181, 5982 "Reducing RSP payload length to %u bytes...\n", 5983 rsp_payload_length); 5984 } 5985 5986 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), 5987 &rsp_els_dma, GFP_KERNEL); 5988 if (!rsp_els) { 5989 ql_log(ql_log_warn, vha, 0x0183, 5990 "Failed allocate dma buffer ELS RSP.\n"); 5991 goto dealloc; 5992 } 5993 5994 rsp_payload = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload), 5995 &rsp_payload_dma, GFP_KERNEL); 5996 if (!rsp_payload) { 5997 ql_log(ql_log_warn, vha, 0x0184, 5998 "Failed allocate dma buffer ELS RSP payload.\n"); 5999 goto dealloc; 6000 } 6001 6002 sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN, 6003 &sfp_dma, GFP_KERNEL); 6004 6005 stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat), 6006 &stat_dma, GFP_KERNEL); 6007 6008 /* Prepare Response IOCB */ 6009 rsp_els->entry_type = ELS_IOCB_TYPE; 6010 rsp_els->entry_count = 1; 6011 rsp_els->sys_define = 0; 6012 rsp_els->entry_status = 0; 6013 rsp_els->handle = 0; 6014 rsp_els->nport_handle = purex->nport_handle; 6015 rsp_els->tx_dsd_count = cpu_to_le16(1); 6016 rsp_els->vp_index = purex->vp_idx; 6017 rsp_els->sof_type = EST_SOFI3; 6018 rsp_els->rx_xchg_address = purex->rx_xchg_addr; 6019 rsp_els->rx_dsd_count = 0; 6020 rsp_els->opcode = purex->els_frame_payload[0]; 6021 6022 rsp_els->d_id[0] = purex->s_id[0]; 6023 rsp_els->d_id[1] = purex->s_id[1]; 6024 rsp_els->d_id[2] = purex->s_id[2]; 6025 6026 rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC); 6027 rsp_els->rx_byte_count = 0; 6028 rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length); 6029 6030 put_unaligned_le64(rsp_payload_dma, &rsp_els->tx_address); 6031 rsp_els->tx_len = rsp_els->tx_byte_count; 6032 6033 rsp_els->rx_address = 0; 6034 rsp_els->rx_len = 0; 6035 6036 /* Prepare Response Payload */ 6037 rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */ 6038 rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) - 6039 sizeof(rsp_payload->hdr)); 6040 6041 /* Link service Request Info Descriptor */ 6042 rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1); 6043 rsp_payload->ls_req_info_desc.desc_len = 6044 cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc)); 6045 rsp_payload->ls_req_info_desc.req_payload_word_0 = 6046 cpu_to_be32p((uint32_t *)purex->els_frame_payload); 6047 6048 /* Link service Request Info Descriptor 2 */ 6049 rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1); 6050 rsp_payload->ls_req_info_desc2.desc_len = 6051 cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2)); 6052 rsp_payload->ls_req_info_desc2.req_payload_word_0 = 6053 cpu_to_be32p((uint32_t *)purex->els_frame_payload); 6054 6055 6056 rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000); 6057 rsp_payload->sfp_diag_desc.desc_len = 6058 cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc)); 6059 6060 if (sfp) { 6061 /* SFP Flags */ 6062 memset(sfp, 0, SFP_RTDI_LEN); 6063 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0); 6064 if (!rval) { 6065 /* SFP Flags bits 3-0: Port Tx Laser Type */ 6066 if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5)) 6067 sfp_flags |= BIT_0; /* short wave */ 6068 else if (sfp[0] & BIT_1) 6069 sfp_flags |= BIT_1; /* long wave 1310nm */ 6070 else if (sfp[1] & BIT_4) 6071 sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */ 6072 } 6073 6074 /* SFP Type */ 6075 memset(sfp, 0, SFP_RTDI_LEN); 6076 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0); 6077 if (!rval) { 6078 sfp_flags |= BIT_4; /* optical */ 6079 if (sfp[0] == 0x3) 6080 sfp_flags |= BIT_6; /* sfp+ */ 6081 } 6082 6083 rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags); 6084 6085 /* SFP Diagnostics */ 6086 memset(sfp, 0, SFP_RTDI_LEN); 6087 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0); 6088 if (!rval) { 6089 __be16 *trx = (__force __be16 *)sfp; /* already be16 */ 6090 rsp_payload->sfp_diag_desc.temperature = trx[0]; 6091 rsp_payload->sfp_diag_desc.vcc = trx[1]; 6092 rsp_payload->sfp_diag_desc.tx_bias = trx[2]; 6093 rsp_payload->sfp_diag_desc.tx_power = trx[3]; 6094 rsp_payload->sfp_diag_desc.rx_power = trx[4]; 6095 } 6096 } 6097 6098 /* Port Speed Descriptor */ 6099 rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001); 6100 rsp_payload->port_speed_desc.desc_len = 6101 cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc)); 6102 rsp_payload->port_speed_desc.speed_capab = cpu_to_be16( 6103 qla25xx_fdmi_port_speed_capability(ha)); 6104 rsp_payload->port_speed_desc.operating_speed = cpu_to_be16( 6105 qla25xx_fdmi_port_speed_currently(ha)); 6106 6107 /* Link Error Status Descriptor */ 6108 rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002); 6109 rsp_payload->ls_err_desc.desc_len = 6110 cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc)); 6111 6112 if (stat) { 6113 rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0); 6114 if (!rval) { 6115 rsp_payload->ls_err_desc.link_fail_cnt = 6116 cpu_to_be32(le32_to_cpu(stat->link_fail_cnt)); 6117 rsp_payload->ls_err_desc.loss_sync_cnt = 6118 cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt)); 6119 rsp_payload->ls_err_desc.loss_sig_cnt = 6120 cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt)); 6121 rsp_payload->ls_err_desc.prim_seq_err_cnt = 6122 cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt)); 6123 rsp_payload->ls_err_desc.inval_xmit_word_cnt = 6124 cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt)); 6125 rsp_payload->ls_err_desc.inval_crc_cnt = 6126 cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt)); 6127 rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6; 6128 } 6129 } 6130 6131 /* Portname Descriptor */ 6132 rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003); 6133 rsp_payload->port_name_diag_desc.desc_len = 6134 cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc)); 6135 memcpy(rsp_payload->port_name_diag_desc.WWNN, 6136 vha->node_name, 6137 sizeof(rsp_payload->port_name_diag_desc.WWNN)); 6138 memcpy(rsp_payload->port_name_diag_desc.WWPN, 6139 vha->port_name, 6140 sizeof(rsp_payload->port_name_diag_desc.WWPN)); 6141 6142 /* F-Port Portname Descriptor */ 6143 rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003); 6144 rsp_payload->port_name_direct_desc.desc_len = 6145 cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc)); 6146 memcpy(rsp_payload->port_name_direct_desc.WWNN, 6147 vha->fabric_node_name, 6148 sizeof(rsp_payload->port_name_direct_desc.WWNN)); 6149 memcpy(rsp_payload->port_name_direct_desc.WWPN, 6150 vha->fabric_port_name, 6151 sizeof(rsp_payload->port_name_direct_desc.WWPN)); 6152 6153 /* Bufer Credit Descriptor */ 6154 rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006); 6155 rsp_payload->buffer_credit_desc.desc_len = 6156 cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc)); 6157 rsp_payload->buffer_credit_desc.fcport_b2b = 0; 6158 rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0); 6159 rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0); 6160 6161 if (ha->flags.plogi_template_valid) { 6162 uint32_t tmp = 6163 be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred); 6164 rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp); 6165 } 6166 6167 if (rsp_payload_length < sizeof(*rsp_payload)) 6168 goto send; 6169 6170 /* Optical Element Descriptor, Temperature */ 6171 rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007); 6172 rsp_payload->optical_elmt_desc[0].desc_len = 6173 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); 6174 /* Optical Element Descriptor, Voltage */ 6175 rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007); 6176 rsp_payload->optical_elmt_desc[1].desc_len = 6177 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); 6178 /* Optical Element Descriptor, Tx Bias Current */ 6179 rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007); 6180 rsp_payload->optical_elmt_desc[2].desc_len = 6181 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); 6182 /* Optical Element Descriptor, Tx Power */ 6183 rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007); 6184 rsp_payload->optical_elmt_desc[3].desc_len = 6185 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); 6186 /* Optical Element Descriptor, Rx Power */ 6187 rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007); 6188 rsp_payload->optical_elmt_desc[4].desc_len = 6189 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); 6190 6191 if (sfp) { 6192 memset(sfp, 0, SFP_RTDI_LEN); 6193 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0); 6194 if (!rval) { 6195 __be16 *trx = (__force __be16 *)sfp; /* already be16 */ 6196 6197 /* Optical Element Descriptor, Temperature */ 6198 rsp_payload->optical_elmt_desc[0].high_alarm = trx[0]; 6199 rsp_payload->optical_elmt_desc[0].low_alarm = trx[1]; 6200 rsp_payload->optical_elmt_desc[0].high_warn = trx[2]; 6201 rsp_payload->optical_elmt_desc[0].low_warn = trx[3]; 6202 rsp_payload->optical_elmt_desc[0].element_flags = 6203 cpu_to_be32(1 << 28); 6204 6205 /* Optical Element Descriptor, Voltage */ 6206 rsp_payload->optical_elmt_desc[1].high_alarm = trx[4]; 6207 rsp_payload->optical_elmt_desc[1].low_alarm = trx[5]; 6208 rsp_payload->optical_elmt_desc[1].high_warn = trx[6]; 6209 rsp_payload->optical_elmt_desc[1].low_warn = trx[7]; 6210 rsp_payload->optical_elmt_desc[1].element_flags = 6211 cpu_to_be32(2 << 28); 6212 6213 /* Optical Element Descriptor, Tx Bias Current */ 6214 rsp_payload->optical_elmt_desc[2].high_alarm = trx[8]; 6215 rsp_payload->optical_elmt_desc[2].low_alarm = trx[9]; 6216 rsp_payload->optical_elmt_desc[2].high_warn = trx[10]; 6217 rsp_payload->optical_elmt_desc[2].low_warn = trx[11]; 6218 rsp_payload->optical_elmt_desc[2].element_flags = 6219 cpu_to_be32(3 << 28); 6220 6221 /* Optical Element Descriptor, Tx Power */ 6222 rsp_payload->optical_elmt_desc[3].high_alarm = trx[12]; 6223 rsp_payload->optical_elmt_desc[3].low_alarm = trx[13]; 6224 rsp_payload->optical_elmt_desc[3].high_warn = trx[14]; 6225 rsp_payload->optical_elmt_desc[3].low_warn = trx[15]; 6226 rsp_payload->optical_elmt_desc[3].element_flags = 6227 cpu_to_be32(4 << 28); 6228 6229 /* Optical Element Descriptor, Rx Power */ 6230 rsp_payload->optical_elmt_desc[4].high_alarm = trx[16]; 6231 rsp_payload->optical_elmt_desc[4].low_alarm = trx[17]; 6232 rsp_payload->optical_elmt_desc[4].high_warn = trx[18]; 6233 rsp_payload->optical_elmt_desc[4].low_warn = trx[19]; 6234 rsp_payload->optical_elmt_desc[4].element_flags = 6235 cpu_to_be32(5 << 28); 6236 } 6237 6238 memset(sfp, 0, SFP_RTDI_LEN); 6239 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0); 6240 if (!rval) { 6241 /* Temperature high/low alarm/warning */ 6242 rsp_payload->optical_elmt_desc[0].element_flags |= 6243 cpu_to_be32( 6244 (sfp[0] >> 7 & 1) << 3 | 6245 (sfp[0] >> 6 & 1) << 2 | 6246 (sfp[4] >> 7 & 1) << 1 | 6247 (sfp[4] >> 6 & 1) << 0); 6248 6249 /* Voltage high/low alarm/warning */ 6250 rsp_payload->optical_elmt_desc[1].element_flags |= 6251 cpu_to_be32( 6252 (sfp[0] >> 5 & 1) << 3 | 6253 (sfp[0] >> 4 & 1) << 2 | 6254 (sfp[4] >> 5 & 1) << 1 | 6255 (sfp[4] >> 4 & 1) << 0); 6256 6257 /* Tx Bias Current high/low alarm/warning */ 6258 rsp_payload->optical_elmt_desc[2].element_flags |= 6259 cpu_to_be32( 6260 (sfp[0] >> 3 & 1) << 3 | 6261 (sfp[0] >> 2 & 1) << 2 | 6262 (sfp[4] >> 3 & 1) << 1 | 6263 (sfp[4] >> 2 & 1) << 0); 6264 6265 /* Tx Power high/low alarm/warning */ 6266 rsp_payload->optical_elmt_desc[3].element_flags |= 6267 cpu_to_be32( 6268 (sfp[0] >> 1 & 1) << 3 | 6269 (sfp[0] >> 0 & 1) << 2 | 6270 (sfp[4] >> 1 & 1) << 1 | 6271 (sfp[4] >> 0 & 1) << 0); 6272 6273 /* Rx Power high/low alarm/warning */ 6274 rsp_payload->optical_elmt_desc[4].element_flags |= 6275 cpu_to_be32( 6276 (sfp[1] >> 7 & 1) << 3 | 6277 (sfp[1] >> 6 & 1) << 2 | 6278 (sfp[5] >> 7 & 1) << 1 | 6279 (sfp[5] >> 6 & 1) << 0); 6280 } 6281 } 6282 6283 /* Optical Product Data Descriptor */ 6284 rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008); 6285 rsp_payload->optical_prod_desc.desc_len = 6286 cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc)); 6287 6288 if (sfp) { 6289 memset(sfp, 0, SFP_RTDI_LEN); 6290 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0); 6291 if (!rval) { 6292 memcpy(rsp_payload->optical_prod_desc.vendor_name, 6293 sfp + 0, 6294 sizeof(rsp_payload->optical_prod_desc.vendor_name)); 6295 memcpy(rsp_payload->optical_prod_desc.part_number, 6296 sfp + 20, 6297 sizeof(rsp_payload->optical_prod_desc.part_number)); 6298 memcpy(rsp_payload->optical_prod_desc.revision, 6299 sfp + 36, 6300 sizeof(rsp_payload->optical_prod_desc.revision)); 6301 memcpy(rsp_payload->optical_prod_desc.serial_number, 6302 sfp + 48, 6303 sizeof(rsp_payload->optical_prod_desc.serial_number)); 6304 } 6305 6306 memset(sfp, 0, SFP_RTDI_LEN); 6307 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0); 6308 if (!rval) { 6309 memcpy(rsp_payload->optical_prod_desc.date, 6310 sfp + 0, 6311 sizeof(rsp_payload->optical_prod_desc.date)); 6312 } 6313 } 6314 6315 send: 6316 ql_dbg(ql_dbg_init, vha, 0x0183, 6317 "Sending ELS Response to RDP Request...\n"); 6318 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184, 6319 "-------- ELS RSP -------\n"); 6320 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185, 6321 rsp_els, sizeof(*rsp_els)); 6322 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186, 6323 "-------- ELS RSP PAYLOAD -------\n"); 6324 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187, 6325 rsp_payload, rsp_payload_length); 6326 6327 rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0); 6328 6329 if (rval) { 6330 ql_log(ql_log_warn, vha, 0x0188, 6331 "%s: iocb failed to execute -> %x\n", __func__, rval); 6332 } else if (rsp_els->comp_status) { 6333 ql_log(ql_log_warn, vha, 0x0189, 6334 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 6335 __func__, rsp_els->comp_status, 6336 rsp_els->error_subcode_1, rsp_els->error_subcode_2); 6337 } else { 6338 ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__); 6339 } 6340 6341 dealloc: 6342 if (stat) 6343 dma_free_coherent(&ha->pdev->dev, sizeof(*stat), 6344 stat, stat_dma); 6345 if (sfp) 6346 dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN, 6347 sfp, sfp_dma); 6348 if (rsp_payload) 6349 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload), 6350 rsp_payload, rsp_payload_dma); 6351 if (rsp_els) 6352 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), 6353 rsp_els, rsp_els_dma); 6354 } 6355 6356 void 6357 qla24xx_free_purex_item(struct purex_item *item) 6358 { 6359 if (item == &item->vha->default_item) 6360 memset(&item->vha->default_item, 0, sizeof(struct purex_item)); 6361 else 6362 kfree(item); 6363 } 6364 6365 void qla24xx_process_purex_list(struct purex_list *list) 6366 { 6367 struct list_head head = LIST_HEAD_INIT(head); 6368 struct purex_item *item, *next; 6369 ulong flags; 6370 6371 spin_lock_irqsave(&list->lock, flags); 6372 list_splice_init(&list->head, &head); 6373 spin_unlock_irqrestore(&list->lock, flags); 6374 6375 list_for_each_entry_safe(item, next, &head, list) { 6376 list_del(&item->list); 6377 item->process_item(item->vha, item); 6378 qla24xx_free_purex_item(item); 6379 } 6380 } 6381 6382 /* 6383 * Context: task, can sleep 6384 */ 6385 void 6386 qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id) 6387 { 6388 #if 0 6389 uint16_t options = (requester_id << 15) | BIT_7; 6390 #endif 6391 uint16_t retry; 6392 uint32_t data; 6393 struct qla_hw_data *ha = base_vha->hw; 6394 6395 might_sleep(); 6396 6397 /* IDC-unlock implementation using driver-unlock/lock-id 6398 * remote registers 6399 */ 6400 retry = 0; 6401 retry_unlock: 6402 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data) 6403 == QLA_SUCCESS) { 6404 if (data == ha->portnum) { 6405 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data); 6406 /* Clearing lock-id by setting 0xff */ 6407 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff); 6408 } else if (retry < 10) { 6409 /* SV: XXX: IDC unlock retrying needed here? */ 6410 6411 /* Retry for IDC-unlock */ 6412 msleep(QLA83XX_WAIT_LOGIC_MS); 6413 retry++; 6414 ql_dbg(ql_dbg_p3p, base_vha, 0xb064, 6415 "Failed to release IDC lock, retrying=%d\n", retry); 6416 goto retry_unlock; 6417 } 6418 } else if (retry < 10) { 6419 /* Retry for IDC-unlock */ 6420 msleep(QLA83XX_WAIT_LOGIC_MS); 6421 retry++; 6422 ql_dbg(ql_dbg_p3p, base_vha, 0xb065, 6423 "Failed to read drv-lockid, retrying=%d\n", retry); 6424 goto retry_unlock; 6425 } 6426 6427 return; 6428 6429 #if 0 6430 /* XXX: IDC-unlock implementation using access-control mbx */ 6431 retry = 0; 6432 retry_unlock2: 6433 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { 6434 if (retry < 10) { 6435 /* Retry for IDC-unlock */ 6436 msleep(QLA83XX_WAIT_LOGIC_MS); 6437 retry++; 6438 ql_dbg(ql_dbg_p3p, base_vha, 0xb066, 6439 "Failed to release IDC lock, retrying=%d\n", retry); 6440 goto retry_unlock2; 6441 } 6442 } 6443 6444 return; 6445 #endif 6446 } 6447 6448 int 6449 __qla83xx_set_drv_presence(scsi_qla_host_t *vha) 6450 { 6451 int rval = QLA_SUCCESS; 6452 struct qla_hw_data *ha = vha->hw; 6453 uint32_t drv_presence; 6454 6455 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6456 if (rval == QLA_SUCCESS) { 6457 drv_presence |= (1 << ha->portnum); 6458 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 6459 drv_presence); 6460 } 6461 6462 return rval; 6463 } 6464 6465 int 6466 qla83xx_set_drv_presence(scsi_qla_host_t *vha) 6467 { 6468 int rval = QLA_SUCCESS; 6469 6470 qla83xx_idc_lock(vha, 0); 6471 rval = __qla83xx_set_drv_presence(vha); 6472 qla83xx_idc_unlock(vha, 0); 6473 6474 return rval; 6475 } 6476 6477 int 6478 __qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 6479 { 6480 int rval = QLA_SUCCESS; 6481 struct qla_hw_data *ha = vha->hw; 6482 uint32_t drv_presence; 6483 6484 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6485 if (rval == QLA_SUCCESS) { 6486 drv_presence &= ~(1 << ha->portnum); 6487 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 6488 drv_presence); 6489 } 6490 6491 return rval; 6492 } 6493 6494 int 6495 qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 6496 { 6497 int rval = QLA_SUCCESS; 6498 6499 qla83xx_idc_lock(vha, 0); 6500 rval = __qla83xx_clear_drv_presence(vha); 6501 qla83xx_idc_unlock(vha, 0); 6502 6503 return rval; 6504 } 6505 6506 static void 6507 qla83xx_need_reset_handler(scsi_qla_host_t *vha) 6508 { 6509 struct qla_hw_data *ha = vha->hw; 6510 uint32_t drv_ack, drv_presence; 6511 unsigned long ack_timeout; 6512 6513 /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */ 6514 ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); 6515 while (1) { 6516 qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 6517 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6518 if ((drv_ack & drv_presence) == drv_presence) 6519 break; 6520 6521 if (time_after_eq(jiffies, ack_timeout)) { 6522 ql_log(ql_log_warn, vha, 0xb067, 6523 "RESET ACK TIMEOUT! drv_presence=0x%x " 6524 "drv_ack=0x%x\n", drv_presence, drv_ack); 6525 /* 6526 * The function(s) which did not ack in time are forced 6527 * to withdraw any further participation in the IDC 6528 * reset. 6529 */ 6530 if (drv_ack != drv_presence) 6531 qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 6532 drv_ack); 6533 break; 6534 } 6535 6536 qla83xx_idc_unlock(vha, 0); 6537 msleep(1000); 6538 qla83xx_idc_lock(vha, 0); 6539 } 6540 6541 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD); 6542 ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n"); 6543 } 6544 6545 static int 6546 qla83xx_device_bootstrap(scsi_qla_host_t *vha) 6547 { 6548 int rval = QLA_SUCCESS; 6549 uint32_t idc_control; 6550 6551 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING); 6552 ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n"); 6553 6554 /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */ 6555 __qla83xx_get_idc_control(vha, &idc_control); 6556 idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET; 6557 __qla83xx_set_idc_control(vha, 0); 6558 6559 qla83xx_idc_unlock(vha, 0); 6560 rval = qla83xx_restart_nic_firmware(vha); 6561 qla83xx_idc_lock(vha, 0); 6562 6563 if (rval != QLA_SUCCESS) { 6564 ql_log(ql_log_fatal, vha, 0xb06a, 6565 "Failed to restart NIC f/w.\n"); 6566 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED); 6567 ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n"); 6568 } else { 6569 ql_dbg(ql_dbg_p3p, vha, 0xb06c, 6570 "Success in restarting nic f/w.\n"); 6571 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY); 6572 ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n"); 6573 } 6574 6575 return rval; 6576 } 6577 6578 /* Assumes idc_lock always held on entry */ 6579 int 6580 qla83xx_idc_state_handler(scsi_qla_host_t *base_vha) 6581 { 6582 struct qla_hw_data *ha = base_vha->hw; 6583 int rval = QLA_SUCCESS; 6584 unsigned long dev_init_timeout; 6585 uint32_t dev_state; 6586 6587 /* Wait for MAX-INIT-TIMEOUT for the device to go ready */ 6588 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); 6589 6590 while (1) { 6591 6592 if (time_after_eq(jiffies, dev_init_timeout)) { 6593 ql_log(ql_log_warn, base_vha, 0xb06e, 6594 "Initialization TIMEOUT!\n"); 6595 /* Init timeout. Disable further NIC Core 6596 * communication. 6597 */ 6598 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 6599 QLA8XXX_DEV_FAILED); 6600 ql_log(ql_log_info, base_vha, 0xb06f, 6601 "HW State: FAILED.\n"); 6602 } 6603 6604 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 6605 switch (dev_state) { 6606 case QLA8XXX_DEV_READY: 6607 if (ha->flags.nic_core_reset_owner) 6608 qla83xx_idc_audit(base_vha, 6609 IDC_AUDIT_COMPLETION); 6610 ha->flags.nic_core_reset_owner = 0; 6611 ql_dbg(ql_dbg_p3p, base_vha, 0xb070, 6612 "Reset_owner reset by 0x%x.\n", 6613 ha->portnum); 6614 goto exit; 6615 case QLA8XXX_DEV_COLD: 6616 if (ha->flags.nic_core_reset_owner) 6617 rval = qla83xx_device_bootstrap(base_vha); 6618 else { 6619 /* Wait for AEN to change device-state */ 6620 qla83xx_idc_unlock(base_vha, 0); 6621 msleep(1000); 6622 qla83xx_idc_lock(base_vha, 0); 6623 } 6624 break; 6625 case QLA8XXX_DEV_INITIALIZING: 6626 /* Wait for AEN to change device-state */ 6627 qla83xx_idc_unlock(base_vha, 0); 6628 msleep(1000); 6629 qla83xx_idc_lock(base_vha, 0); 6630 break; 6631 case QLA8XXX_DEV_NEED_RESET: 6632 if (!ql2xdontresethba && ha->flags.nic_core_reset_owner) 6633 qla83xx_need_reset_handler(base_vha); 6634 else { 6635 /* Wait for AEN to change device-state */ 6636 qla83xx_idc_unlock(base_vha, 0); 6637 msleep(1000); 6638 qla83xx_idc_lock(base_vha, 0); 6639 } 6640 /* reset timeout value after need reset handler */ 6641 dev_init_timeout = jiffies + 6642 (ha->fcoe_dev_init_timeout * HZ); 6643 break; 6644 case QLA8XXX_DEV_NEED_QUIESCENT: 6645 /* XXX: DEBUG for now */ 6646 qla83xx_idc_unlock(base_vha, 0); 6647 msleep(1000); 6648 qla83xx_idc_lock(base_vha, 0); 6649 break; 6650 case QLA8XXX_DEV_QUIESCENT: 6651 /* XXX: DEBUG for now */ 6652 if (ha->flags.quiesce_owner) 6653 goto exit; 6654 6655 qla83xx_idc_unlock(base_vha, 0); 6656 msleep(1000); 6657 qla83xx_idc_lock(base_vha, 0); 6658 dev_init_timeout = jiffies + 6659 (ha->fcoe_dev_init_timeout * HZ); 6660 break; 6661 case QLA8XXX_DEV_FAILED: 6662 if (ha->flags.nic_core_reset_owner) 6663 qla83xx_idc_audit(base_vha, 6664 IDC_AUDIT_COMPLETION); 6665 ha->flags.nic_core_reset_owner = 0; 6666 __qla83xx_clear_drv_presence(base_vha); 6667 qla83xx_idc_unlock(base_vha, 0); 6668 qla8xxx_dev_failed_handler(base_vha); 6669 rval = QLA_FUNCTION_FAILED; 6670 qla83xx_idc_lock(base_vha, 0); 6671 goto exit; 6672 case QLA8XXX_BAD_VALUE: 6673 qla83xx_idc_unlock(base_vha, 0); 6674 msleep(1000); 6675 qla83xx_idc_lock(base_vha, 0); 6676 break; 6677 default: 6678 ql_log(ql_log_warn, base_vha, 0xb071, 6679 "Unknown Device State: %x.\n", dev_state); 6680 qla83xx_idc_unlock(base_vha, 0); 6681 qla8xxx_dev_failed_handler(base_vha); 6682 rval = QLA_FUNCTION_FAILED; 6683 qla83xx_idc_lock(base_vha, 0); 6684 goto exit; 6685 } 6686 } 6687 6688 exit: 6689 return rval; 6690 } 6691 6692 void 6693 qla2x00_disable_board_on_pci_error(struct work_struct *work) 6694 { 6695 struct qla_hw_data *ha = container_of(work, struct qla_hw_data, 6696 board_disable); 6697 struct pci_dev *pdev = ha->pdev; 6698 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6699 6700 ql_log(ql_log_warn, base_vha, 0x015b, 6701 "Disabling adapter.\n"); 6702 6703 if (!atomic_read(&pdev->enable_cnt)) { 6704 ql_log(ql_log_info, base_vha, 0xfffc, 6705 "PCI device disabled, no action req for PCI error=%lx\n", 6706 base_vha->pci_flags); 6707 return; 6708 } 6709 6710 /* 6711 * if UNLOADING flag is already set, then continue unload, 6712 * where it was set first. 6713 */ 6714 if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) 6715 return; 6716 6717 qla2x00_wait_for_sess_deletion(base_vha); 6718 6719 qla2x00_delete_all_vps(ha, base_vha); 6720 6721 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 6722 6723 qla2x00_dfs_remove(base_vha); 6724 6725 qla84xx_put_chip(base_vha); 6726 6727 if (base_vha->timer_active) 6728 qla2x00_stop_timer(base_vha); 6729 6730 base_vha->flags.online = 0; 6731 6732 qla2x00_destroy_deferred_work(ha); 6733 6734 /* 6735 * Do not try to stop beacon blink as it will issue a mailbox 6736 * command. 6737 */ 6738 qla2x00_free_sysfs_attr(base_vha, false); 6739 6740 fc_remove_host(base_vha->host); 6741 6742 scsi_remove_host(base_vha->host); 6743 6744 base_vha->flags.init_done = 0; 6745 qla25xx_delete_queues(base_vha); 6746 qla2x00_free_fcports(base_vha); 6747 qla2x00_free_irqs(base_vha); 6748 qla2x00_mem_free(ha); 6749 qla82xx_md_free(base_vha); 6750 qla2x00_free_queues(ha); 6751 6752 qla2x00_unmap_iobases(ha); 6753 6754 pci_release_selected_regions(ha->pdev, ha->bars); 6755 pci_disable_pcie_error_reporting(pdev); 6756 pci_disable_device(pdev); 6757 6758 /* 6759 * Let qla2x00_remove_one cleanup qla_hw_data on device removal. 6760 */ 6761 } 6762 6763 /************************************************************************** 6764 * qla2x00_do_dpc 6765 * This kernel thread is a task that is schedule by the interrupt handler 6766 * to perform the background processing for interrupts. 6767 * 6768 * Notes: 6769 * This task always run in the context of a kernel thread. It 6770 * is kick-off by the driver's detect code and starts up 6771 * up one per adapter. It immediately goes to sleep and waits for 6772 * some fibre event. When either the interrupt handler or 6773 * the timer routine detects a event it will one of the task 6774 * bits then wake us up. 6775 **************************************************************************/ 6776 static int 6777 qla2x00_do_dpc(void *data) 6778 { 6779 scsi_qla_host_t *base_vha; 6780 struct qla_hw_data *ha; 6781 uint32_t online; 6782 struct qla_qpair *qpair; 6783 6784 ha = (struct qla_hw_data *)data; 6785 base_vha = pci_get_drvdata(ha->pdev); 6786 6787 set_user_nice(current, MIN_NICE); 6788 6789 set_current_state(TASK_INTERRUPTIBLE); 6790 while (!kthread_should_stop()) { 6791 ql_dbg(ql_dbg_dpc, base_vha, 0x4000, 6792 "DPC handler sleeping.\n"); 6793 6794 schedule(); 6795 6796 if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags)) 6797 qla_pci_set_eeh_busy(base_vha); 6798 6799 if (!base_vha->flags.init_done || ha->flags.mbox_busy) 6800 goto end_loop; 6801 6802 if (ha->flags.eeh_busy) { 6803 ql_dbg(ql_dbg_dpc, base_vha, 0x4003, 6804 "eeh_busy=%d.\n", ha->flags.eeh_busy); 6805 goto end_loop; 6806 } 6807 6808 ha->dpc_active = 1; 6809 6810 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001, 6811 "DPC handler waking up, dpc_flags=0x%lx.\n", 6812 base_vha->dpc_flags); 6813 6814 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 6815 break; 6816 6817 if (IS_P3P_TYPE(ha)) { 6818 if (IS_QLA8044(ha)) { 6819 if (test_and_clear_bit(ISP_UNRECOVERABLE, 6820 &base_vha->dpc_flags)) { 6821 qla8044_idc_lock(ha); 6822 qla8044_wr_direct(base_vha, 6823 QLA8044_CRB_DEV_STATE_INDEX, 6824 QLA8XXX_DEV_FAILED); 6825 qla8044_idc_unlock(ha); 6826 ql_log(ql_log_info, base_vha, 0x4004, 6827 "HW State: FAILED.\n"); 6828 qla8044_device_state_handler(base_vha); 6829 continue; 6830 } 6831 6832 } else { 6833 if (test_and_clear_bit(ISP_UNRECOVERABLE, 6834 &base_vha->dpc_flags)) { 6835 qla82xx_idc_lock(ha); 6836 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6837 QLA8XXX_DEV_FAILED); 6838 qla82xx_idc_unlock(ha); 6839 ql_log(ql_log_info, base_vha, 0x0151, 6840 "HW State: FAILED.\n"); 6841 qla82xx_device_state_handler(base_vha); 6842 continue; 6843 } 6844 } 6845 6846 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, 6847 &base_vha->dpc_flags)) { 6848 6849 ql_dbg(ql_dbg_dpc, base_vha, 0x4005, 6850 "FCoE context reset scheduled.\n"); 6851 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 6852 &base_vha->dpc_flags))) { 6853 if (qla82xx_fcoe_ctx_reset(base_vha)) { 6854 /* FCoE-ctx reset failed. 6855 * Escalate to chip-reset 6856 */ 6857 set_bit(ISP_ABORT_NEEDED, 6858 &base_vha->dpc_flags); 6859 } 6860 clear_bit(ABORT_ISP_ACTIVE, 6861 &base_vha->dpc_flags); 6862 } 6863 6864 ql_dbg(ql_dbg_dpc, base_vha, 0x4006, 6865 "FCoE context reset end.\n"); 6866 } 6867 } else if (IS_QLAFX00(ha)) { 6868 if (test_and_clear_bit(ISP_UNRECOVERABLE, 6869 &base_vha->dpc_flags)) { 6870 ql_dbg(ql_dbg_dpc, base_vha, 0x4020, 6871 "Firmware Reset Recovery\n"); 6872 if (qlafx00_reset_initialize(base_vha)) { 6873 /* Failed. Abort isp later. */ 6874 if (!test_bit(UNLOADING, 6875 &base_vha->dpc_flags)) { 6876 set_bit(ISP_UNRECOVERABLE, 6877 &base_vha->dpc_flags); 6878 ql_dbg(ql_dbg_dpc, base_vha, 6879 0x4021, 6880 "Reset Recovery Failed\n"); 6881 } 6882 } 6883 } 6884 6885 if (test_and_clear_bit(FX00_TARGET_SCAN, 6886 &base_vha->dpc_flags)) { 6887 ql_dbg(ql_dbg_dpc, base_vha, 0x4022, 6888 "ISPFx00 Target Scan scheduled\n"); 6889 if (qlafx00_rescan_isp(base_vha)) { 6890 if (!test_bit(UNLOADING, 6891 &base_vha->dpc_flags)) 6892 set_bit(ISP_UNRECOVERABLE, 6893 &base_vha->dpc_flags); 6894 ql_dbg(ql_dbg_dpc, base_vha, 0x401e, 6895 "ISPFx00 Target Scan Failed\n"); 6896 } 6897 ql_dbg(ql_dbg_dpc, base_vha, 0x401f, 6898 "ISPFx00 Target Scan End\n"); 6899 } 6900 if (test_and_clear_bit(FX00_HOST_INFO_RESEND, 6901 &base_vha->dpc_flags)) { 6902 ql_dbg(ql_dbg_dpc, base_vha, 0x4023, 6903 "ISPFx00 Host Info resend scheduled\n"); 6904 qlafx00_fx_disc(base_vha, 6905 &base_vha->hw->mr.fcport, 6906 FXDISC_REG_HOST_INFO); 6907 } 6908 } 6909 6910 if (test_and_clear_bit(DETECT_SFP_CHANGE, 6911 &base_vha->dpc_flags)) { 6912 /* Semantic: 6913 * - NO-OP -- await next ISP-ABORT. Preferred method 6914 * to minimize disruptions that will occur 6915 * when a forced chip-reset occurs. 6916 * - Force -- ISP-ABORT scheduled. 6917 */ 6918 /* set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); */ 6919 } 6920 6921 if (test_and_clear_bit 6922 (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 6923 !test_bit(UNLOADING, &base_vha->dpc_flags)) { 6924 bool do_reset = true; 6925 6926 switch (base_vha->qlini_mode) { 6927 case QLA2XXX_INI_MODE_ENABLED: 6928 break; 6929 case QLA2XXX_INI_MODE_DISABLED: 6930 if (!qla_tgt_mode_enabled(base_vha) && 6931 !ha->flags.fw_started) 6932 do_reset = false; 6933 break; 6934 case QLA2XXX_INI_MODE_DUAL: 6935 if (!qla_dual_mode_enabled(base_vha) && 6936 !ha->flags.fw_started) 6937 do_reset = false; 6938 break; 6939 default: 6940 break; 6941 } 6942 6943 if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE, 6944 &base_vha->dpc_flags))) { 6945 base_vha->flags.online = 1; 6946 ql_dbg(ql_dbg_dpc, base_vha, 0x4007, 6947 "ISP abort scheduled.\n"); 6948 if (ha->isp_ops->abort_isp(base_vha)) { 6949 /* failed. retry later */ 6950 set_bit(ISP_ABORT_NEEDED, 6951 &base_vha->dpc_flags); 6952 } 6953 clear_bit(ABORT_ISP_ACTIVE, 6954 &base_vha->dpc_flags); 6955 ql_dbg(ql_dbg_dpc, base_vha, 0x4008, 6956 "ISP abort end.\n"); 6957 } 6958 } 6959 6960 if (test_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags)) { 6961 if (atomic_read(&base_vha->loop_state) == LOOP_READY) { 6962 qla24xx_process_purex_list 6963 (&base_vha->purex_list); 6964 clear_bit(PROCESS_PUREX_IOCB, 6965 &base_vha->dpc_flags); 6966 } 6967 } 6968 6969 if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, 6970 &base_vha->dpc_flags)) { 6971 qla2x00_update_fcports(base_vha); 6972 } 6973 6974 if (IS_QLAFX00(ha)) 6975 goto loop_resync_check; 6976 6977 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 6978 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 6979 "Quiescence mode scheduled.\n"); 6980 if (IS_P3P_TYPE(ha)) { 6981 if (IS_QLA82XX(ha)) 6982 qla82xx_device_state_handler(base_vha); 6983 if (IS_QLA8044(ha)) 6984 qla8044_device_state_handler(base_vha); 6985 clear_bit(ISP_QUIESCE_NEEDED, 6986 &base_vha->dpc_flags); 6987 if (!ha->flags.quiesce_owner) { 6988 qla2x00_perform_loop_resync(base_vha); 6989 if (IS_QLA82XX(ha)) { 6990 qla82xx_idc_lock(ha); 6991 qla82xx_clear_qsnt_ready( 6992 base_vha); 6993 qla82xx_idc_unlock(ha); 6994 } else if (IS_QLA8044(ha)) { 6995 qla8044_idc_lock(ha); 6996 qla8044_clear_qsnt_ready( 6997 base_vha); 6998 qla8044_idc_unlock(ha); 6999 } 7000 } 7001 } else { 7002 clear_bit(ISP_QUIESCE_NEEDED, 7003 &base_vha->dpc_flags); 7004 qla2x00_quiesce_io(base_vha); 7005 } 7006 ql_dbg(ql_dbg_dpc, base_vha, 0x400a, 7007 "Quiescence mode end.\n"); 7008 } 7009 7010 if (test_and_clear_bit(RESET_MARKER_NEEDED, 7011 &base_vha->dpc_flags) && 7012 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { 7013 7014 ql_dbg(ql_dbg_dpc, base_vha, 0x400b, 7015 "Reset marker scheduled.\n"); 7016 qla2x00_rst_aen(base_vha); 7017 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); 7018 ql_dbg(ql_dbg_dpc, base_vha, 0x400c, 7019 "Reset marker end.\n"); 7020 } 7021 7022 /* Retry each device up to login retry count */ 7023 if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) && 7024 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && 7025 atomic_read(&base_vha->loop_state) != LOOP_DOWN) { 7026 7027 if (!base_vha->relogin_jif || 7028 time_after_eq(jiffies, base_vha->relogin_jif)) { 7029 base_vha->relogin_jif = jiffies + HZ; 7030 clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags); 7031 7032 ql_dbg(ql_dbg_disc, base_vha, 0x400d, 7033 "Relogin scheduled.\n"); 7034 qla24xx_post_relogin_work(base_vha); 7035 } 7036 } 7037 loop_resync_check: 7038 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, 7039 &base_vha->dpc_flags)) { 7040 7041 ql_dbg(ql_dbg_dpc, base_vha, 0x400f, 7042 "Loop resync scheduled.\n"); 7043 7044 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 7045 &base_vha->dpc_flags))) { 7046 7047 qla2x00_loop_resync(base_vha); 7048 7049 clear_bit(LOOP_RESYNC_ACTIVE, 7050 &base_vha->dpc_flags); 7051 } 7052 7053 ql_dbg(ql_dbg_dpc, base_vha, 0x4010, 7054 "Loop resync end.\n"); 7055 } 7056 7057 if (IS_QLAFX00(ha)) 7058 goto intr_on_check; 7059 7060 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && 7061 atomic_read(&base_vha->loop_state) == LOOP_READY) { 7062 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags); 7063 qla2xxx_flash_npiv_conf(base_vha); 7064 } 7065 7066 intr_on_check: 7067 if (!ha->interrupts_on) 7068 ha->isp_ops->enable_intrs(ha); 7069 7070 if (test_and_clear_bit(BEACON_BLINK_NEEDED, 7071 &base_vha->dpc_flags)) { 7072 if (ha->beacon_blink_led == 1) 7073 ha->isp_ops->beacon_blink(base_vha); 7074 } 7075 7076 /* qpair online check */ 7077 if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED, 7078 &base_vha->dpc_flags)) { 7079 if (ha->flags.eeh_busy || 7080 ha->flags.pci_channel_io_perm_failure) 7081 online = 0; 7082 else 7083 online = 1; 7084 7085 mutex_lock(&ha->mq_lock); 7086 list_for_each_entry(qpair, &base_vha->qp_list, 7087 qp_list_elem) 7088 qpair->online = online; 7089 mutex_unlock(&ha->mq_lock); 7090 } 7091 7092 if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, 7093 &base_vha->dpc_flags)) { 7094 u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold; 7095 7096 if (threshold > ha->orig_fw_xcb_count) 7097 threshold = ha->orig_fw_xcb_count; 7098 7099 ql_log(ql_log_info, base_vha, 0xffffff, 7100 "SET ZIO Activity exchange threshold to %d.\n", 7101 threshold); 7102 if (qla27xx_set_zio_threshold(base_vha, threshold)) { 7103 ql_log(ql_log_info, base_vha, 0xffffff, 7104 "Unable to SET ZIO Activity exchange threshold to %d.\n", 7105 threshold); 7106 } 7107 } 7108 7109 if (!IS_QLAFX00(ha)) 7110 qla2x00_do_dpc_all_vps(base_vha); 7111 7112 if (test_and_clear_bit(N2N_LINK_RESET, 7113 &base_vha->dpc_flags)) { 7114 qla2x00_lip_reset(base_vha); 7115 } 7116 7117 if (test_bit(HEARTBEAT_CHK, &base_vha->dpc_flags)) { 7118 /* 7119 * if there is a mb in progress then that's 7120 * enough of a check to see if fw is still ticking. 7121 */ 7122 if (!ha->flags.mbox_busy && base_vha->flags.init_done) 7123 qla_no_op_mb(base_vha); 7124 7125 clear_bit(HEARTBEAT_CHK, &base_vha->dpc_flags); 7126 } 7127 7128 ha->dpc_active = 0; 7129 end_loop: 7130 set_current_state(TASK_INTERRUPTIBLE); 7131 } /* End of while(1) */ 7132 __set_current_state(TASK_RUNNING); 7133 7134 ql_dbg(ql_dbg_dpc, base_vha, 0x4011, 7135 "DPC handler exiting.\n"); 7136 7137 /* 7138 * Make sure that nobody tries to wake us up again. 7139 */ 7140 ha->dpc_active = 0; 7141 7142 /* Cleanup any residual CTX SRBs. */ 7143 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 7144 7145 return 0; 7146 } 7147 7148 void 7149 qla2xxx_wake_dpc(struct scsi_qla_host *vha) 7150 { 7151 struct qla_hw_data *ha = vha->hw; 7152 struct task_struct *t = ha->dpc_thread; 7153 7154 if (!test_bit(UNLOADING, &vha->dpc_flags) && t) 7155 wake_up_process(t); 7156 } 7157 7158 /* 7159 * qla2x00_rst_aen 7160 * Processes asynchronous reset. 7161 * 7162 * Input: 7163 * ha = adapter block pointer. 7164 */ 7165 static void 7166 qla2x00_rst_aen(scsi_qla_host_t *vha) 7167 { 7168 if (vha->flags.online && !vha->flags.reset_active && 7169 !atomic_read(&vha->loop_down_timer) && 7170 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) { 7171 do { 7172 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 7173 7174 /* 7175 * Issue marker command only when we are going to start 7176 * the I/O. 7177 */ 7178 vha->marker_needed = 1; 7179 } while (!atomic_read(&vha->loop_down_timer) && 7180 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags))); 7181 } 7182 } 7183 7184 static bool qla_do_heartbeat(struct scsi_qla_host *vha) 7185 { 7186 u64 cmd_cnt, prev_cmd_cnt; 7187 bool do_hb = false; 7188 struct qla_hw_data *ha = vha->hw; 7189 int i; 7190 7191 /* if cmds are still pending down in fw, then do hb */ 7192 if (ha->base_qpair->cmd_cnt != ha->base_qpair->cmd_completion_cnt) { 7193 do_hb = true; 7194 goto skip; 7195 } 7196 7197 for (i = 0; i < ha->max_qpairs; i++) { 7198 if (ha->queue_pair_map[i] && 7199 ha->queue_pair_map[i]->cmd_cnt != 7200 ha->queue_pair_map[i]->cmd_completion_cnt) { 7201 do_hb = true; 7202 break; 7203 } 7204 } 7205 7206 skip: 7207 prev_cmd_cnt = ha->prev_cmd_cnt; 7208 cmd_cnt = ha->base_qpair->cmd_cnt; 7209 for (i = 0; i < ha->max_qpairs; i++) { 7210 if (ha->queue_pair_map[i]) 7211 cmd_cnt += ha->queue_pair_map[i]->cmd_cnt; 7212 } 7213 ha->prev_cmd_cnt = cmd_cnt; 7214 7215 if (!do_hb && ((cmd_cnt - prev_cmd_cnt) > 50)) 7216 /* 7217 * IOs are completing before periodic hb check. 7218 * IOs seems to be running, do hb for sanity check. 7219 */ 7220 do_hb = true; 7221 7222 return do_hb; 7223 } 7224 7225 static void qla_heart_beat(struct scsi_qla_host *vha) 7226 { 7227 if (vha->vp_idx) 7228 return; 7229 7230 if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha)) 7231 return; 7232 7233 if (qla_do_heartbeat(vha)) { 7234 set_bit(HEARTBEAT_CHK, &vha->dpc_flags); 7235 qla2xxx_wake_dpc(vha); 7236 } 7237 } 7238 7239 /************************************************************************** 7240 * qla2x00_timer 7241 * 7242 * Description: 7243 * One second timer 7244 * 7245 * Context: Interrupt 7246 ***************************************************************************/ 7247 void 7248 qla2x00_timer(struct timer_list *t) 7249 { 7250 scsi_qla_host_t *vha = from_timer(vha, t, timer); 7251 unsigned long cpu_flags = 0; 7252 int start_dpc = 0; 7253 int index; 7254 srb_t *sp; 7255 uint16_t w; 7256 struct qla_hw_data *ha = vha->hw; 7257 struct req_que *req; 7258 unsigned long flags; 7259 fc_port_t *fcport = NULL; 7260 7261 if (ha->flags.eeh_busy) { 7262 ql_dbg(ql_dbg_timer, vha, 0x6000, 7263 "EEH = %d, restarting timer.\n", 7264 ha->flags.eeh_busy); 7265 qla2x00_restart_timer(vha, WATCH_INTERVAL); 7266 return; 7267 } 7268 7269 /* 7270 * Hardware read to raise pending EEH errors during mailbox waits. If 7271 * the read returns -1 then disable the board. 7272 */ 7273 if (!pci_channel_offline(ha->pdev)) { 7274 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 7275 qla2x00_check_reg16_for_disconnect(vha, w); 7276 } 7277 7278 /* Make sure qla82xx_watchdog is run only for physical port */ 7279 if (!vha->vp_idx && IS_P3P_TYPE(ha)) { 7280 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) 7281 start_dpc++; 7282 if (IS_QLA82XX(ha)) 7283 qla82xx_watchdog(vha); 7284 else if (IS_QLA8044(ha)) 7285 qla8044_watchdog(vha); 7286 } 7287 7288 if (!vha->vp_idx && IS_QLAFX00(ha)) 7289 qlafx00_timer_routine(vha); 7290 7291 if (vha->link_down_time < QLA2XX_MAX_LINK_DOWN_TIME) 7292 vha->link_down_time++; 7293 7294 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 7295 list_for_each_entry(fcport, &vha->vp_fcports, list) { 7296 if (fcport->tgt_link_down_time < QLA2XX_MAX_LINK_DOWN_TIME) 7297 fcport->tgt_link_down_time++; 7298 } 7299 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 7300 7301 /* Loop down handler. */ 7302 if (atomic_read(&vha->loop_down_timer) > 0 && 7303 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && 7304 !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags)) 7305 && vha->flags.online) { 7306 7307 if (atomic_read(&vha->loop_down_timer) == 7308 vha->loop_down_abort_time) { 7309 7310 ql_log(ql_log_info, vha, 0x6008, 7311 "Loop down - aborting the queues before time expires.\n"); 7312 7313 if (!IS_QLA2100(ha) && vha->link_down_timeout) 7314 atomic_set(&vha->loop_state, LOOP_DEAD); 7315 7316 /* 7317 * Schedule an ISP abort to return any FCP2-device 7318 * commands. 7319 */ 7320 /* NPIV - scan physical port only */ 7321 if (!vha->vp_idx) { 7322 spin_lock_irqsave(&ha->hardware_lock, 7323 cpu_flags); 7324 req = ha->req_q_map[0]; 7325 for (index = 1; 7326 index < req->num_outstanding_cmds; 7327 index++) { 7328 fc_port_t *sfcp; 7329 7330 sp = req->outstanding_cmds[index]; 7331 if (!sp) 7332 continue; 7333 if (sp->cmd_type != TYPE_SRB) 7334 continue; 7335 if (sp->type != SRB_SCSI_CMD) 7336 continue; 7337 sfcp = sp->fcport; 7338 if (!(sfcp->flags & FCF_FCP2_DEVICE)) 7339 continue; 7340 7341 if (IS_QLA82XX(ha)) 7342 set_bit(FCOE_CTX_RESET_NEEDED, 7343 &vha->dpc_flags); 7344 else 7345 set_bit(ISP_ABORT_NEEDED, 7346 &vha->dpc_flags); 7347 break; 7348 } 7349 spin_unlock_irqrestore(&ha->hardware_lock, 7350 cpu_flags); 7351 } 7352 start_dpc++; 7353 } 7354 7355 /* if the loop has been down for 4 minutes, reinit adapter */ 7356 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { 7357 if (!(vha->device_flags & DFLG_NO_CABLE)) { 7358 ql_log(ql_log_warn, vha, 0x6009, 7359 "Loop down - aborting ISP.\n"); 7360 7361 if (IS_QLA82XX(ha)) 7362 set_bit(FCOE_CTX_RESET_NEEDED, 7363 &vha->dpc_flags); 7364 else 7365 set_bit(ISP_ABORT_NEEDED, 7366 &vha->dpc_flags); 7367 } 7368 } 7369 ql_dbg(ql_dbg_timer, vha, 0x600a, 7370 "Loop down - seconds remaining %d.\n", 7371 atomic_read(&vha->loop_down_timer)); 7372 } 7373 /* Check if beacon LED needs to be blinked for physical host only */ 7374 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { 7375 /* There is no beacon_blink function for ISP82xx */ 7376 if (!IS_P3P_TYPE(ha)) { 7377 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); 7378 start_dpc++; 7379 } 7380 } 7381 7382 /* check if edif running */ 7383 if (vha->hw->flags.edif_enabled) 7384 qla_edif_timer(vha); 7385 7386 /* Process any deferred work. */ 7387 if (!list_empty(&vha->work_list)) { 7388 unsigned long flags; 7389 bool q = false; 7390 7391 spin_lock_irqsave(&vha->work_lock, flags); 7392 if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags)) 7393 q = true; 7394 spin_unlock_irqrestore(&vha->work_lock, flags); 7395 if (q) 7396 queue_work(vha->hw->wq, &vha->iocb_work); 7397 } 7398 7399 /* 7400 * FC-NVME 7401 * see if the active AEN count has changed from what was last reported. 7402 */ 7403 index = atomic_read(&ha->nvme_active_aen_cnt); 7404 if (!vha->vp_idx && 7405 (index != ha->nvme_last_rptd_aen) && 7406 ha->zio_mode == QLA_ZIO_MODE_6 && 7407 !ha->flags.host_shutting_down) { 7408 ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt); 7409 ql_log(ql_log_info, vha, 0x3002, 7410 "nvme: Sched: Set ZIO exchange threshold to %d.\n", 7411 ha->nvme_last_rptd_aen); 7412 set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); 7413 start_dpc++; 7414 } 7415 7416 if (!vha->vp_idx && 7417 atomic_read(&ha->zio_threshold) != ha->last_zio_threshold && 7418 IS_ZIO_THRESHOLD_CAPABLE(ha)) { 7419 ql_log(ql_log_info, vha, 0x3002, 7420 "Sched: Set ZIO exchange threshold to %d.\n", 7421 ha->last_zio_threshold); 7422 ha->last_zio_threshold = atomic_read(&ha->zio_threshold); 7423 set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); 7424 start_dpc++; 7425 } 7426 7427 /* Schedule the DPC routine if needed */ 7428 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 7429 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || 7430 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) || 7431 start_dpc || 7432 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) || 7433 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) || 7434 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || 7435 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || 7436 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || 7437 test_bit(RELOGIN_NEEDED, &vha->dpc_flags) || 7438 test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) { 7439 ql_dbg(ql_dbg_timer, vha, 0x600b, 7440 "isp_abort_needed=%d loop_resync_needed=%d " 7441 "fcport_update_needed=%d start_dpc=%d " 7442 "reset_marker_needed=%d", 7443 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags), 7444 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags), 7445 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags), 7446 start_dpc, 7447 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); 7448 ql_dbg(ql_dbg_timer, vha, 0x600c, 7449 "beacon_blink_needed=%d isp_unrecoverable=%d " 7450 "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " 7451 "relogin_needed=%d, Process_purex_iocb=%d.\n", 7452 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), 7453 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), 7454 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), 7455 test_bit(VP_DPC_NEEDED, &vha->dpc_flags), 7456 test_bit(RELOGIN_NEEDED, &vha->dpc_flags), 7457 test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)); 7458 qla2xxx_wake_dpc(vha); 7459 } 7460 7461 qla_heart_beat(vha); 7462 7463 qla2x00_restart_timer(vha, WATCH_INTERVAL); 7464 } 7465 7466 /* Firmware interface routines. */ 7467 7468 #define FW_ISP21XX 0 7469 #define FW_ISP22XX 1 7470 #define FW_ISP2300 2 7471 #define FW_ISP2322 3 7472 #define FW_ISP24XX 4 7473 #define FW_ISP25XX 5 7474 #define FW_ISP81XX 6 7475 #define FW_ISP82XX 7 7476 #define FW_ISP2031 8 7477 #define FW_ISP8031 9 7478 #define FW_ISP27XX 10 7479 #define FW_ISP28XX 11 7480 7481 #define FW_FILE_ISP21XX "ql2100_fw.bin" 7482 #define FW_FILE_ISP22XX "ql2200_fw.bin" 7483 #define FW_FILE_ISP2300 "ql2300_fw.bin" 7484 #define FW_FILE_ISP2322 "ql2322_fw.bin" 7485 #define FW_FILE_ISP24XX "ql2400_fw.bin" 7486 #define FW_FILE_ISP25XX "ql2500_fw.bin" 7487 #define FW_FILE_ISP81XX "ql8100_fw.bin" 7488 #define FW_FILE_ISP82XX "ql8200_fw.bin" 7489 #define FW_FILE_ISP2031 "ql2600_fw.bin" 7490 #define FW_FILE_ISP8031 "ql8300_fw.bin" 7491 #define FW_FILE_ISP27XX "ql2700_fw.bin" 7492 #define FW_FILE_ISP28XX "ql2800_fw.bin" 7493 7494 7495 static DEFINE_MUTEX(qla_fw_lock); 7496 7497 static struct fw_blob qla_fw_blobs[] = { 7498 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, 7499 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, 7500 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, 7501 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 7502 { .name = FW_FILE_ISP24XX, }, 7503 { .name = FW_FILE_ISP25XX, }, 7504 { .name = FW_FILE_ISP81XX, }, 7505 { .name = FW_FILE_ISP82XX, }, 7506 { .name = FW_FILE_ISP2031, }, 7507 { .name = FW_FILE_ISP8031, }, 7508 { .name = FW_FILE_ISP27XX, }, 7509 { .name = FW_FILE_ISP28XX, }, 7510 { .name = NULL, }, 7511 }; 7512 7513 struct fw_blob * 7514 qla2x00_request_firmware(scsi_qla_host_t *vha) 7515 { 7516 struct qla_hw_data *ha = vha->hw; 7517 struct fw_blob *blob; 7518 7519 if (IS_QLA2100(ha)) { 7520 blob = &qla_fw_blobs[FW_ISP21XX]; 7521 } else if (IS_QLA2200(ha)) { 7522 blob = &qla_fw_blobs[FW_ISP22XX]; 7523 } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 7524 blob = &qla_fw_blobs[FW_ISP2300]; 7525 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 7526 blob = &qla_fw_blobs[FW_ISP2322]; 7527 } else if (IS_QLA24XX_TYPE(ha)) { 7528 blob = &qla_fw_blobs[FW_ISP24XX]; 7529 } else if (IS_QLA25XX(ha)) { 7530 blob = &qla_fw_blobs[FW_ISP25XX]; 7531 } else if (IS_QLA81XX(ha)) { 7532 blob = &qla_fw_blobs[FW_ISP81XX]; 7533 } else if (IS_QLA82XX(ha)) { 7534 blob = &qla_fw_blobs[FW_ISP82XX]; 7535 } else if (IS_QLA2031(ha)) { 7536 blob = &qla_fw_blobs[FW_ISP2031]; 7537 } else if (IS_QLA8031(ha)) { 7538 blob = &qla_fw_blobs[FW_ISP8031]; 7539 } else if (IS_QLA27XX(ha)) { 7540 blob = &qla_fw_blobs[FW_ISP27XX]; 7541 } else if (IS_QLA28XX(ha)) { 7542 blob = &qla_fw_blobs[FW_ISP28XX]; 7543 } else { 7544 return NULL; 7545 } 7546 7547 if (!blob->name) 7548 return NULL; 7549 7550 mutex_lock(&qla_fw_lock); 7551 if (blob->fw) 7552 goto out; 7553 7554 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 7555 ql_log(ql_log_warn, vha, 0x0063, 7556 "Failed to load firmware image (%s).\n", blob->name); 7557 blob->fw = NULL; 7558 blob = NULL; 7559 } 7560 7561 out: 7562 mutex_unlock(&qla_fw_lock); 7563 return blob; 7564 } 7565 7566 static void 7567 qla2x00_release_firmware(void) 7568 { 7569 struct fw_blob *blob; 7570 7571 mutex_lock(&qla_fw_lock); 7572 for (blob = qla_fw_blobs; blob->name; blob++) 7573 release_firmware(blob->fw); 7574 mutex_unlock(&qla_fw_lock); 7575 } 7576 7577 static void qla_pci_error_cleanup(scsi_qla_host_t *vha) 7578 { 7579 struct qla_hw_data *ha = vha->hw; 7580 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 7581 struct qla_qpair *qpair = NULL; 7582 struct scsi_qla_host *vp, *tvp; 7583 fc_port_t *fcport; 7584 int i; 7585 unsigned long flags; 7586 7587 ql_dbg(ql_dbg_aer, vha, 0x9000, 7588 "%s\n", __func__); 7589 ha->chip_reset++; 7590 7591 ha->base_qpair->chip_reset = ha->chip_reset; 7592 for (i = 0; i < ha->max_qpairs; i++) { 7593 if (ha->queue_pair_map[i]) 7594 ha->queue_pair_map[i]->chip_reset = 7595 ha->base_qpair->chip_reset; 7596 } 7597 7598 /* 7599 * purge mailbox might take a while. Slot Reset/chip reset 7600 * will take care of the purge 7601 */ 7602 7603 mutex_lock(&ha->mq_lock); 7604 ha->base_qpair->online = 0; 7605 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 7606 qpair->online = 0; 7607 wmb(); 7608 mutex_unlock(&ha->mq_lock); 7609 7610 qla2x00_mark_all_devices_lost(vha); 7611 7612 spin_lock_irqsave(&ha->vport_slock, flags); 7613 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 7614 atomic_inc(&vp->vref_count); 7615 spin_unlock_irqrestore(&ha->vport_slock, flags); 7616 qla2x00_mark_all_devices_lost(vp); 7617 spin_lock_irqsave(&ha->vport_slock, flags); 7618 atomic_dec(&vp->vref_count); 7619 } 7620 spin_unlock_irqrestore(&ha->vport_slock, flags); 7621 7622 /* Clear all async request states across all VPs. */ 7623 list_for_each_entry(fcport, &vha->vp_fcports, list) 7624 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 7625 7626 spin_lock_irqsave(&ha->vport_slock, flags); 7627 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 7628 atomic_inc(&vp->vref_count); 7629 spin_unlock_irqrestore(&ha->vport_slock, flags); 7630 list_for_each_entry(fcport, &vp->vp_fcports, list) 7631 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 7632 spin_lock_irqsave(&ha->vport_slock, flags); 7633 atomic_dec(&vp->vref_count); 7634 } 7635 spin_unlock_irqrestore(&ha->vport_slock, flags); 7636 } 7637 7638 7639 static pci_ers_result_t 7640 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 7641 { 7642 scsi_qla_host_t *vha = pci_get_drvdata(pdev); 7643 struct qla_hw_data *ha = vha->hw; 7644 pci_ers_result_t ret = PCI_ERS_RESULT_NEED_RESET; 7645 7646 ql_log(ql_log_warn, vha, 0x9000, 7647 "PCI error detected, state %x.\n", state); 7648 ha->pci_error_state = QLA_PCI_ERR_DETECTED; 7649 7650 if (!atomic_read(&pdev->enable_cnt)) { 7651 ql_log(ql_log_info, vha, 0xffff, 7652 "PCI device is disabled,state %x\n", state); 7653 ret = PCI_ERS_RESULT_NEED_RESET; 7654 goto out; 7655 } 7656 7657 switch (state) { 7658 case pci_channel_io_normal: 7659 ha->flags.eeh_busy = 0; 7660 if (ql2xmqsupport || ql2xnvmeenable) { 7661 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 7662 qla2xxx_wake_dpc(vha); 7663 } 7664 ret = PCI_ERS_RESULT_CAN_RECOVER; 7665 break; 7666 case pci_channel_io_frozen: 7667 qla_pci_set_eeh_busy(vha); 7668 ret = PCI_ERS_RESULT_NEED_RESET; 7669 break; 7670 case pci_channel_io_perm_failure: 7671 ha->flags.pci_channel_io_perm_failure = 1; 7672 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 7673 if (ql2xmqsupport || ql2xnvmeenable) { 7674 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 7675 qla2xxx_wake_dpc(vha); 7676 } 7677 ret = PCI_ERS_RESULT_DISCONNECT; 7678 } 7679 out: 7680 ql_dbg(ql_dbg_aer, vha, 0x600d, 7681 "PCI error detected returning [%x].\n", ret); 7682 return ret; 7683 } 7684 7685 static pci_ers_result_t 7686 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) 7687 { 7688 int risc_paused = 0; 7689 uint32_t stat; 7690 unsigned long flags; 7691 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7692 struct qla_hw_data *ha = base_vha->hw; 7693 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 7694 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 7695 7696 ql_log(ql_log_warn, base_vha, 0x9000, 7697 "mmio enabled\n"); 7698 7699 ha->pci_error_state = QLA_PCI_MMIO_ENABLED; 7700 if (IS_QLA82XX(ha)) 7701 return PCI_ERS_RESULT_RECOVERED; 7702 7703 spin_lock_irqsave(&ha->hardware_lock, flags); 7704 if (IS_QLA2100(ha) || IS_QLA2200(ha)){ 7705 stat = rd_reg_word(®->hccr); 7706 if (stat & HCCR_RISC_PAUSE) 7707 risc_paused = 1; 7708 } else if (IS_QLA23XX(ha)) { 7709 stat = rd_reg_dword(®->u.isp2300.host_status); 7710 if (stat & HSR_RISC_PAUSED) 7711 risc_paused = 1; 7712 } else if (IS_FWI2_CAPABLE(ha)) { 7713 stat = rd_reg_dword(®24->host_status); 7714 if (stat & HSRX_RISC_PAUSED) 7715 risc_paused = 1; 7716 } 7717 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7718 7719 if (risc_paused) { 7720 ql_log(ql_log_info, base_vha, 0x9003, 7721 "RISC paused -- mmio_enabled, Dumping firmware.\n"); 7722 qla2xxx_dump_fw(base_vha); 7723 } 7724 /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */ 7725 ql_dbg(ql_dbg_aer, base_vha, 0x600d, 7726 "mmio enabled returning.\n"); 7727 return PCI_ERS_RESULT_NEED_RESET; 7728 } 7729 7730 static pci_ers_result_t 7731 qla2xxx_pci_slot_reset(struct pci_dev *pdev) 7732 { 7733 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 7734 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7735 struct qla_hw_data *ha = base_vha->hw; 7736 int rc; 7737 struct qla_qpair *qpair = NULL; 7738 7739 ql_log(ql_log_warn, base_vha, 0x9004, 7740 "Slot Reset.\n"); 7741 7742 ha->pci_error_state = QLA_PCI_SLOT_RESET; 7743 /* Workaround: qla2xxx driver which access hardware earlier 7744 * needs error state to be pci_channel_io_online. 7745 * Otherwise mailbox command timesout. 7746 */ 7747 pdev->error_state = pci_channel_io_normal; 7748 7749 pci_restore_state(pdev); 7750 7751 /* pci_restore_state() clears the saved_state flag of the device 7752 * save restored state which resets saved_state flag 7753 */ 7754 pci_save_state(pdev); 7755 7756 if (ha->mem_only) 7757 rc = pci_enable_device_mem(pdev); 7758 else 7759 rc = pci_enable_device(pdev); 7760 7761 if (rc) { 7762 ql_log(ql_log_warn, base_vha, 0x9005, 7763 "Can't re-enable PCI device after reset.\n"); 7764 goto exit_slot_reset; 7765 } 7766 7767 7768 if (ha->isp_ops->pci_config(base_vha)) 7769 goto exit_slot_reset; 7770 7771 mutex_lock(&ha->mq_lock); 7772 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 7773 qpair->online = 1; 7774 mutex_unlock(&ha->mq_lock); 7775 7776 ha->flags.eeh_busy = 0; 7777 base_vha->flags.online = 1; 7778 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7779 ha->isp_ops->abort_isp(base_vha); 7780 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7781 7782 if (qla2x00_isp_reg_stat(ha)) { 7783 ha->flags.eeh_busy = 1; 7784 qla_pci_error_cleanup(base_vha); 7785 ql_log(ql_log_warn, base_vha, 0x9005, 7786 "Device unable to recover from PCI error.\n"); 7787 } else { 7788 ret = PCI_ERS_RESULT_RECOVERED; 7789 } 7790 7791 exit_slot_reset: 7792 ql_dbg(ql_dbg_aer, base_vha, 0x900e, 7793 "Slot Reset returning %x.\n", ret); 7794 7795 return ret; 7796 } 7797 7798 static void 7799 qla2xxx_pci_resume(struct pci_dev *pdev) 7800 { 7801 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7802 struct qla_hw_data *ha = base_vha->hw; 7803 int ret; 7804 7805 ql_log(ql_log_warn, base_vha, 0x900f, 7806 "Pci Resume.\n"); 7807 7808 7809 ret = qla2x00_wait_for_hba_online(base_vha); 7810 if (ret != QLA_SUCCESS) { 7811 ql_log(ql_log_fatal, base_vha, 0x9002, 7812 "The device failed to resume I/O from slot/link_reset.\n"); 7813 } 7814 ha->pci_error_state = QLA_PCI_RESUME; 7815 ql_dbg(ql_dbg_aer, base_vha, 0x600d, 7816 "Pci Resume returning.\n"); 7817 } 7818 7819 void qla_pci_set_eeh_busy(struct scsi_qla_host *vha) 7820 { 7821 struct qla_hw_data *ha = vha->hw; 7822 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 7823 bool do_cleanup = false; 7824 unsigned long flags; 7825 7826 if (ha->flags.eeh_busy) 7827 return; 7828 7829 spin_lock_irqsave(&base_vha->work_lock, flags); 7830 if (!ha->flags.eeh_busy) { 7831 ha->flags.eeh_busy = 1; 7832 do_cleanup = true; 7833 } 7834 spin_unlock_irqrestore(&base_vha->work_lock, flags); 7835 7836 if (do_cleanup) 7837 qla_pci_error_cleanup(base_vha); 7838 } 7839 7840 /* 7841 * this routine will schedule a task to pause IO from interrupt context 7842 * if caller sees a PCIE error event (register read = 0xf's) 7843 */ 7844 void qla_schedule_eeh_work(struct scsi_qla_host *vha) 7845 { 7846 struct qla_hw_data *ha = vha->hw; 7847 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 7848 7849 if (ha->flags.eeh_busy) 7850 return; 7851 7852 set_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags); 7853 qla2xxx_wake_dpc(base_vha); 7854 } 7855 7856 static void 7857 qla_pci_reset_prepare(struct pci_dev *pdev) 7858 { 7859 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7860 struct qla_hw_data *ha = base_vha->hw; 7861 struct qla_qpair *qpair; 7862 7863 ql_log(ql_log_warn, base_vha, 0xffff, 7864 "%s.\n", __func__); 7865 7866 /* 7867 * PCI FLR/function reset is about to reset the 7868 * slot. Stop the chip to stop all DMA access. 7869 * It is assumed that pci_reset_done will be called 7870 * after FLR to resume Chip operation. 7871 */ 7872 ha->flags.eeh_busy = 1; 7873 mutex_lock(&ha->mq_lock); 7874 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 7875 qpair->online = 0; 7876 mutex_unlock(&ha->mq_lock); 7877 7878 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7879 qla2x00_abort_isp_cleanup(base_vha); 7880 qla2x00_abort_all_cmds(base_vha, DID_RESET << 16); 7881 } 7882 7883 static void 7884 qla_pci_reset_done(struct pci_dev *pdev) 7885 { 7886 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7887 struct qla_hw_data *ha = base_vha->hw; 7888 struct qla_qpair *qpair; 7889 7890 ql_log(ql_log_warn, base_vha, 0xffff, 7891 "%s.\n", __func__); 7892 7893 /* 7894 * FLR just completed by PCI layer. Resume adapter 7895 */ 7896 ha->flags.eeh_busy = 0; 7897 mutex_lock(&ha->mq_lock); 7898 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 7899 qpair->online = 1; 7900 mutex_unlock(&ha->mq_lock); 7901 7902 base_vha->flags.online = 1; 7903 ha->isp_ops->abort_isp(base_vha); 7904 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7905 } 7906 7907 static int qla2xxx_map_queues(struct Scsi_Host *shost) 7908 { 7909 int rc; 7910 scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; 7911 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 7912 7913 if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase) 7914 rc = blk_mq_map_queues(qmap); 7915 else 7916 rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); 7917 return rc; 7918 } 7919 7920 struct scsi_host_template qla2xxx_driver_template = { 7921 .module = THIS_MODULE, 7922 .name = QLA2XXX_DRIVER_NAME, 7923 .queuecommand = qla2xxx_queuecommand, 7924 7925 .eh_timed_out = fc_eh_timed_out, 7926 .eh_abort_handler = qla2xxx_eh_abort, 7927 .eh_should_retry_cmd = fc_eh_should_retry_cmd, 7928 .eh_device_reset_handler = qla2xxx_eh_device_reset, 7929 .eh_target_reset_handler = qla2xxx_eh_target_reset, 7930 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 7931 .eh_host_reset_handler = qla2xxx_eh_host_reset, 7932 7933 .slave_configure = qla2xxx_slave_configure, 7934 7935 .slave_alloc = qla2xxx_slave_alloc, 7936 .slave_destroy = qla2xxx_slave_destroy, 7937 .scan_finished = qla2xxx_scan_finished, 7938 .scan_start = qla2xxx_scan_start, 7939 .change_queue_depth = scsi_change_queue_depth, 7940 .map_queues = qla2xxx_map_queues, 7941 .this_id = -1, 7942 .cmd_per_lun = 3, 7943 .sg_tablesize = SG_ALL, 7944 7945 .max_sectors = 0xFFFF, 7946 .shost_attrs = qla2x00_host_attrs, 7947 7948 .supported_mode = MODE_INITIATOR, 7949 .track_queue_depth = 1, 7950 .cmd_size = sizeof(srb_t), 7951 }; 7952 7953 static const struct pci_error_handlers qla2xxx_err_handler = { 7954 .error_detected = qla2xxx_pci_error_detected, 7955 .mmio_enabled = qla2xxx_pci_mmio_enabled, 7956 .slot_reset = qla2xxx_pci_slot_reset, 7957 .resume = qla2xxx_pci_resume, 7958 .reset_prepare = qla_pci_reset_prepare, 7959 .reset_done = qla_pci_reset_done, 7960 }; 7961 7962 static struct pci_device_id qla2xxx_pci_tbl[] = { 7963 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) }, 7964 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) }, 7965 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) }, 7966 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) }, 7967 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) }, 7968 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) }, 7969 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, 7970 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, 7971 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, 7972 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) }, 7973 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 7974 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 7975 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, 7976 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) }, 7977 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, 7978 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, 7979 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, 7980 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, 7981 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, 7982 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, 7983 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, 7984 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) }, 7985 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) }, 7986 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) }, 7987 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) }, 7988 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) }, 7989 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) }, 7990 { 0 }, 7991 }; 7992 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 7993 7994 static struct pci_driver qla2xxx_pci_driver = { 7995 .name = QLA2XXX_DRIVER_NAME, 7996 .driver = { 7997 .owner = THIS_MODULE, 7998 }, 7999 .id_table = qla2xxx_pci_tbl, 8000 .probe = qla2x00_probe_one, 8001 .remove = qla2x00_remove_one, 8002 .shutdown = qla2x00_shutdown, 8003 .err_handler = &qla2xxx_err_handler, 8004 }; 8005 8006 static const struct file_operations apidev_fops = { 8007 .owner = THIS_MODULE, 8008 .llseek = noop_llseek, 8009 }; 8010 8011 /** 8012 * qla2x00_module_init - Module initialization. 8013 **/ 8014 static int __init 8015 qla2x00_module_init(void) 8016 { 8017 int ret = 0; 8018 8019 BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64); 8020 BUILD_BUG_ON(sizeof(cmd_entry_t) != 64); 8021 BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64); 8022 BUILD_BUG_ON(sizeof(cont_entry_t) != 64); 8023 BUILD_BUG_ON(sizeof(init_cb_t) != 96); 8024 BUILD_BUG_ON(sizeof(mrk_entry_t) != 64); 8025 BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64); 8026 BUILD_BUG_ON(sizeof(request_t) != 64); 8027 BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64); 8028 BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64); 8029 BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64); 8030 BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64); 8031 BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64); 8032 BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64); 8033 BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64); 8034 BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64); 8035 BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64); 8036 BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64); 8037 BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64); 8038 BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64); 8039 BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2604); 8040 BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424); 8041 BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164); 8042 BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260); 8043 BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260); 8044 BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16); 8045 BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); 8046 BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256); 8047 BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24); 8048 BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256); 8049 BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288); 8050 BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216); 8051 BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64); 8052 BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64); 8053 BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64); 8054 BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64); 8055 BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128); 8056 BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128); 8057 BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64); 8058 BUILD_BUG_ON(sizeof(struct mbx_entry) != 64); 8059 BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252); 8060 BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64); 8061 BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512); 8062 BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512); 8063 BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64); 8064 BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64); 8065 BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64); 8066 BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634); 8067 BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100); 8068 BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976); 8069 BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228); 8070 BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52); 8071 BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172); 8072 BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524); 8073 BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8); 8074 BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12); 8075 BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24); 8076 BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420); 8077 BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28); 8078 BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32); 8079 BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196); 8080 BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE); 8081 BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128); 8082 BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8); 8083 BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16); 8084 BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24); 8085 BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16); 8086 BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336); 8087 BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064); 8088 BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64); 8089 BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64); 8090 BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64); 8091 BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64); 8092 BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52); 8093 BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56); 8094 BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64); 8095 BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64); 8096 BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64); 8097 BUILD_BUG_ON(sizeof(sts21_entry_t) != 64); 8098 BUILD_BUG_ON(sizeof(sts22_entry_t) != 64); 8099 BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64); 8100 BUILD_BUG_ON(sizeof(sts_entry_t) != 64); 8101 BUILD_BUG_ON(sizeof(sw_info_t) != 32); 8102 BUILD_BUG_ON(sizeof(target_id_t) != 2); 8103 8104 /* Allocate cache for SRBs. */ 8105 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 8106 SLAB_HWCACHE_ALIGN, NULL); 8107 if (srb_cachep == NULL) { 8108 ql_log(ql_log_fatal, NULL, 0x0001, 8109 "Unable to allocate SRB cache...Failing load!.\n"); 8110 return -ENOMEM; 8111 } 8112 8113 /* Initialize target kmem_cache and mem_pools */ 8114 ret = qlt_init(); 8115 if (ret < 0) { 8116 goto destroy_cache; 8117 } else if (ret > 0) { 8118 /* 8119 * If initiator mode is explictly disabled by qlt_init(), 8120 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from 8121 * performing scsi_scan_target() during LOOP UP event. 8122 */ 8123 qla2xxx_transport_functions.disable_target_scan = 1; 8124 qla2xxx_transport_vport_functions.disable_target_scan = 1; 8125 } 8126 8127 /* Derive version string. */ 8128 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 8129 if (ql2xextended_error_logging) 8130 strcat(qla2x00_version_str, "-debug"); 8131 if (ql2xextended_error_logging == 1) 8132 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; 8133 8134 if (ql2x_ini_mode == QLA2XXX_INI_MODE_DUAL) 8135 qla_insert_tgt_attrs(); 8136 8137 qla2xxx_transport_template = 8138 fc_attach_transport(&qla2xxx_transport_functions); 8139 if (!qla2xxx_transport_template) { 8140 ql_log(ql_log_fatal, NULL, 0x0002, 8141 "fc_attach_transport failed...Failing load!.\n"); 8142 ret = -ENODEV; 8143 goto qlt_exit; 8144 } 8145 8146 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); 8147 if (apidev_major < 0) { 8148 ql_log(ql_log_fatal, NULL, 0x0003, 8149 "Unable to register char device %s.\n", QLA2XXX_APIDEV); 8150 } 8151 8152 qla2xxx_transport_vport_template = 8153 fc_attach_transport(&qla2xxx_transport_vport_functions); 8154 if (!qla2xxx_transport_vport_template) { 8155 ql_log(ql_log_fatal, NULL, 0x0004, 8156 "fc_attach_transport vport failed...Failing load!.\n"); 8157 ret = -ENODEV; 8158 goto unreg_chrdev; 8159 } 8160 ql_log(ql_log_info, NULL, 0x0005, 8161 "QLogic Fibre Channel HBA Driver: %s.\n", 8162 qla2x00_version_str); 8163 ret = pci_register_driver(&qla2xxx_pci_driver); 8164 if (ret) { 8165 ql_log(ql_log_fatal, NULL, 0x0006, 8166 "pci_register_driver failed...ret=%d Failing load!.\n", 8167 ret); 8168 goto release_vport_transport; 8169 } 8170 return ret; 8171 8172 release_vport_transport: 8173 fc_release_transport(qla2xxx_transport_vport_template); 8174 8175 unreg_chrdev: 8176 if (apidev_major >= 0) 8177 unregister_chrdev(apidev_major, QLA2XXX_APIDEV); 8178 fc_release_transport(qla2xxx_transport_template); 8179 8180 qlt_exit: 8181 qlt_exit(); 8182 8183 destroy_cache: 8184 kmem_cache_destroy(srb_cachep); 8185 return ret; 8186 } 8187 8188 /** 8189 * qla2x00_module_exit - Module cleanup. 8190 **/ 8191 static void __exit 8192 qla2x00_module_exit(void) 8193 { 8194 pci_unregister_driver(&qla2xxx_pci_driver); 8195 qla2x00_release_firmware(); 8196 kmem_cache_destroy(ctx_cachep); 8197 fc_release_transport(qla2xxx_transport_vport_template); 8198 if (apidev_major >= 0) 8199 unregister_chrdev(apidev_major, QLA2XXX_APIDEV); 8200 fc_release_transport(qla2xxx_transport_template); 8201 qlt_exit(); 8202 kmem_cache_destroy(srb_cachep); 8203 } 8204 8205 module_init(qla2x00_module_init); 8206 module_exit(qla2x00_module_exit); 8207 8208 MODULE_AUTHOR("QLogic Corporation"); 8209 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); 8210 MODULE_LICENSE("GPL"); 8211 MODULE_FIRMWARE(FW_FILE_ISP21XX); 8212 MODULE_FIRMWARE(FW_FILE_ISP22XX); 8213 MODULE_FIRMWARE(FW_FILE_ISP2300); 8214 MODULE_FIRMWARE(FW_FILE_ISP2322); 8215 MODULE_FIRMWARE(FW_FILE_ISP24XX); 8216 MODULE_FIRMWARE(FW_FILE_ISP25XX); 8217