1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 8 #include <linux/moduleparam.h> 9 #include <linux/vmalloc.h> 10 #include <linux/delay.h> 11 #include <linux/kthread.h> 12 #include <linux/mutex.h> 13 #include <linux/kobject.h> 14 #include <linux/slab.h> 15 #include <linux/blk-mq-pci.h> 16 #include <linux/refcount.h> 17 #include <linux/crash_dump.h> 18 #include <linux/trace_events.h> 19 #include <linux/trace.h> 20 21 #include <scsi/scsi_tcq.h> 22 #include <scsi/scsicam.h> 23 #include <scsi/scsi_transport.h> 24 #include <scsi/scsi_transport_fc.h> 25 26 #include "qla_target.h" 27 28 /* 29 * Driver version 30 */ 31 char qla2x00_version_str[40]; 32 33 static int apidev_major; 34 35 /* 36 * SRB allocation cache 37 */ 38 struct kmem_cache *srb_cachep; 39 40 static struct trace_array *qla_trc_array; 41 42 int ql2xfulldump_on_mpifail; 43 module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR); 44 MODULE_PARM_DESC(ql2xfulldump_on_mpifail, 45 "Set this to take full dump on MPI hang."); 46 47 int ql2xenforce_iocb_limit = 1; 48 module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR); 49 MODULE_PARM_DESC(ql2xenforce_iocb_limit, 50 "Enforce IOCB throttling, to avoid FW congestion. (default: 1)"); 51 52 /* 53 * CT6 CTX allocation cache 54 */ 55 static struct kmem_cache *ctx_cachep; 56 /* 57 * error level for logging 58 */ 59 uint ql_errlev = 0x8001; 60 61 int ql2xsecenable; 62 module_param(ql2xsecenable, int, S_IRUGO); 63 MODULE_PARM_DESC(ql2xsecenable, 64 "Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled."); 65 66 static int ql2xenableclass2; 67 module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); 68 MODULE_PARM_DESC(ql2xenableclass2, 69 "Specify if Class 2 operations are supported from the very " 70 "beginning. Default is 0 - class 2 not supported."); 71 72 73 int ql2xlogintimeout = 20; 74 module_param(ql2xlogintimeout, int, S_IRUGO); 75 MODULE_PARM_DESC(ql2xlogintimeout, 76 "Login timeout value in seconds."); 77 78 int qlport_down_retry; 79 module_param(qlport_down_retry, int, S_IRUGO); 80 MODULE_PARM_DESC(qlport_down_retry, 81 "Maximum number of command retries to a port that returns " 82 "a PORT-DOWN status."); 83 84 int ql2xplogiabsentdevice; 85 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); 86 MODULE_PARM_DESC(ql2xplogiabsentdevice, 87 "Option to enable PLOGI to devices that are not present after " 88 "a Fabric scan. This is needed for several broken switches. " 89 "Default is 0 - no PLOGI. 1 - perform PLOGI."); 90 91 int ql2xloginretrycount; 92 module_param(ql2xloginretrycount, int, S_IRUGO); 93 MODULE_PARM_DESC(ql2xloginretrycount, 94 "Specify an alternate value for the NVRAM login retry count."); 95 96 int ql2xallocfwdump = 1; 97 module_param(ql2xallocfwdump, int, S_IRUGO); 98 MODULE_PARM_DESC(ql2xallocfwdump, 99 "Option to enable allocation of memory for a firmware dump " 100 "during HBA initialization. Memory allocation requirements " 101 "vary by ISP type. Default is 1 - allocate memory."); 102 103 int ql2xextended_error_logging; 104 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 105 module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 106 MODULE_PARM_DESC(ql2xextended_error_logging, 107 "Option to enable extended error logging,\n" 108 "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n" 109 "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n" 110 "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n" 111 "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n" 112 "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n" 113 "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n" 114 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" 115 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" 116 "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n" 117 "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n" 118 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" 119 "\t\t0x1e400000 - Preferred value for capturing essential " 120 "debug information (equivalent to old " 121 "ql2xextended_error_logging=1).\n" 122 "\t\tDo LOGICAL OR of the value to enable more than one level"); 123 124 int ql2xextended_error_logging_ktrace = 1; 125 module_param(ql2xextended_error_logging_ktrace, int, S_IRUGO|S_IWUSR); 126 MODULE_PARM_DESC(ql2xextended_error_logging_ktrace, 127 "Same BIT definition as ql2xextended_error_logging, but used to control logging to kernel trace buffer (default=1).\n"); 128 129 int ql2xshiftctondsd = 6; 130 module_param(ql2xshiftctondsd, int, S_IRUGO); 131 MODULE_PARM_DESC(ql2xshiftctondsd, 132 "Set to control shifting of command type processing " 133 "based on total number of SG elements."); 134 135 int ql2xfdmienable = 1; 136 module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR); 137 module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR); 138 MODULE_PARM_DESC(ql2xfdmienable, 139 "Enables FDMI registrations. " 140 "0 - no FDMI registrations. " 141 "1 - provide FDMI registrations (default)."); 142 143 #define MAX_Q_DEPTH 64 144 static int ql2xmaxqdepth = MAX_Q_DEPTH; 145 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); 146 MODULE_PARM_DESC(ql2xmaxqdepth, 147 "Maximum queue depth to set for each LUN. " 148 "Default is 64."); 149 150 int ql2xenabledif = 2; 151 module_param(ql2xenabledif, int, S_IRUGO); 152 MODULE_PARM_DESC(ql2xenabledif, 153 " Enable T10-CRC-DIF:\n" 154 " Default is 2.\n" 155 " 0 -- No DIF Support\n" 156 " 1 -- Enable DIF for all types\n" 157 " 2 -- Enable DIF for all types, except Type 0.\n"); 158 159 #if (IS_ENABLED(CONFIG_NVME_FC)) 160 int ql2xnvmeenable = 1; 161 #else 162 int ql2xnvmeenable; 163 #endif 164 module_param(ql2xnvmeenable, int, 0644); 165 MODULE_PARM_DESC(ql2xnvmeenable, 166 "Enables NVME support. " 167 "0 - no NVMe. Default is Y"); 168 169 int ql2xenablehba_err_chk = 2; 170 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); 171 MODULE_PARM_DESC(ql2xenablehba_err_chk, 172 " Enable T10-CRC-DIF Error isolation by HBA:\n" 173 " Default is 2.\n" 174 " 0 -- Error isolation disabled\n" 175 " 1 -- Error isolation enabled only for DIX Type 0\n" 176 " 2 -- Error isolation enabled for all Types\n"); 177 178 int ql2xiidmaenable = 1; 179 module_param(ql2xiidmaenable, int, S_IRUGO); 180 MODULE_PARM_DESC(ql2xiidmaenable, 181 "Enables iIDMA settings " 182 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 183 184 int ql2xmqsupport = 1; 185 module_param(ql2xmqsupport, int, S_IRUGO); 186 MODULE_PARM_DESC(ql2xmqsupport, 187 "Enable on demand multiple queue pairs support " 188 "Default is 1 for supported. " 189 "Set it to 0 to turn off mq qpair support."); 190 191 int ql2xfwloadbin; 192 module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 193 module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 194 MODULE_PARM_DESC(ql2xfwloadbin, 195 "Option to specify location from which to load ISP firmware:.\n" 196 " 2 -- load firmware via the request_firmware() (hotplug).\n" 197 " interface.\n" 198 " 1 -- load firmware from flash.\n" 199 " 0 -- use default semantics.\n"); 200 201 int ql2xetsenable; 202 module_param(ql2xetsenable, int, S_IRUGO); 203 MODULE_PARM_DESC(ql2xetsenable, 204 "Enables firmware ETS burst." 205 "Default is 0 - skip ETS enablement."); 206 207 int ql2xdbwr = 1; 208 module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); 209 MODULE_PARM_DESC(ql2xdbwr, 210 "Option to specify scheme for request queue posting.\n" 211 " 0 -- Regular doorbell.\n" 212 " 1 -- CAMRAM doorbell (faster).\n"); 213 214 int ql2xgffidenable; 215 module_param(ql2xgffidenable, int, S_IRUGO); 216 MODULE_PARM_DESC(ql2xgffidenable, 217 "Enables GFF_ID checks of port type. " 218 "Default is 0 - Do not use GFF_ID information."); 219 220 int ql2xasynctmfenable = 1; 221 module_param(ql2xasynctmfenable, int, S_IRUGO); 222 MODULE_PARM_DESC(ql2xasynctmfenable, 223 "Enables issue of TM IOCBs asynchronously via IOCB mechanism" 224 "Default is 1 - Issue TM IOCBs via mailbox mechanism."); 225 226 int ql2xdontresethba; 227 module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR); 228 MODULE_PARM_DESC(ql2xdontresethba, 229 "Option to specify reset behaviour.\n" 230 " 0 (Default) -- Reset on failure.\n" 231 " 1 -- Do not reset on failure.\n"); 232 233 uint64_t ql2xmaxlun = MAX_LUNS; 234 module_param(ql2xmaxlun, ullong, S_IRUGO); 235 MODULE_PARM_DESC(ql2xmaxlun, 236 "Defines the maximum LU number to register with the SCSI " 237 "midlayer. Default is 65535."); 238 239 int ql2xmdcapmask = 0x1F; 240 module_param(ql2xmdcapmask, int, S_IRUGO); 241 MODULE_PARM_DESC(ql2xmdcapmask, 242 "Set the Minidump driver capture mask level. " 243 "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); 244 245 int ql2xmdenable = 1; 246 module_param(ql2xmdenable, int, S_IRUGO); 247 MODULE_PARM_DESC(ql2xmdenable, 248 "Enable/disable MiniDump. " 249 "0 - MiniDump disabled. " 250 "1 (Default) - MiniDump enabled."); 251 252 int ql2xexlogins; 253 module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR); 254 MODULE_PARM_DESC(ql2xexlogins, 255 "Number of extended Logins. " 256 "0 (Default)- Disabled."); 257 258 int ql2xexchoffld = 1024; 259 module_param(ql2xexchoffld, uint, 0644); 260 MODULE_PARM_DESC(ql2xexchoffld, 261 "Number of target exchanges."); 262 263 int ql2xiniexchg = 1024; 264 module_param(ql2xiniexchg, uint, 0644); 265 MODULE_PARM_DESC(ql2xiniexchg, 266 "Number of initiator exchanges."); 267 268 int ql2xfwholdabts; 269 module_param(ql2xfwholdabts, int, S_IRUGO); 270 MODULE_PARM_DESC(ql2xfwholdabts, 271 "Allow FW to hold status IOCB until ABTS rsp received. " 272 "0 (Default) Do not set fw option. " 273 "1 - Set fw option to hold ABTS."); 274 275 int ql2xmvasynctoatio = 1; 276 module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR); 277 MODULE_PARM_DESC(ql2xmvasynctoatio, 278 "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ" 279 "0 (Default). Do not move IOCBs" 280 "1 - Move IOCBs."); 281 282 int ql2xautodetectsfp = 1; 283 module_param(ql2xautodetectsfp, int, 0444); 284 MODULE_PARM_DESC(ql2xautodetectsfp, 285 "Detect SFP range and set appropriate distance.\n" 286 "1 (Default): Enable\n"); 287 288 int ql2xenablemsix = 1; 289 module_param(ql2xenablemsix, int, 0444); 290 MODULE_PARM_DESC(ql2xenablemsix, 291 "Set to enable MSI or MSI-X interrupt mechanism.\n" 292 " Default is 1, enable MSI-X interrupt mechanism.\n" 293 " 0 -- enable traditional pin-based mechanism.\n" 294 " 1 -- enable MSI-X interrupt mechanism.\n" 295 " 2 -- enable MSI interrupt mechanism.\n"); 296 297 int qla2xuseresexchforels; 298 module_param(qla2xuseresexchforels, int, 0444); 299 MODULE_PARM_DESC(qla2xuseresexchforels, 300 "Reserve 1/2 of emergency exchanges for ELS.\n" 301 " 0 (default): disabled"); 302 303 static int ql2xprotmask; 304 module_param(ql2xprotmask, int, 0644); 305 MODULE_PARM_DESC(ql2xprotmask, 306 "Override DIF/DIX protection capabilities mask\n" 307 "Default is 0 which sets protection mask based on " 308 "capabilities reported by HBA firmware.\n"); 309 310 static int ql2xprotguard; 311 module_param(ql2xprotguard, int, 0644); 312 MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n" 313 " 0 -- Let HBA firmware decide\n" 314 " 1 -- Force T10 CRC\n" 315 " 2 -- Force IP checksum\n"); 316 317 int ql2xdifbundlinginternalbuffers; 318 module_param(ql2xdifbundlinginternalbuffers, int, 0644); 319 MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers, 320 "Force using internal buffers for DIF information\n" 321 "0 (Default). Based on check.\n" 322 "1 Force using internal buffers\n"); 323 324 int ql2xsmartsan; 325 module_param(ql2xsmartsan, int, 0444); 326 module_param_named(smartsan, ql2xsmartsan, int, 0444); 327 MODULE_PARM_DESC(ql2xsmartsan, 328 "Send SmartSAN Management Attributes for FDMI Registration." 329 " Default is 0 - No SmartSAN registration," 330 " 1 - Register SmartSAN Management Attributes."); 331 332 int ql2xrdpenable; 333 module_param(ql2xrdpenable, int, 0444); 334 module_param_named(rdpenable, ql2xrdpenable, int, 0444); 335 MODULE_PARM_DESC(ql2xrdpenable, 336 "Enables RDP responses. " 337 "0 - no RDP responses (default). " 338 "1 - provide RDP responses."); 339 int ql2xabts_wait_nvme = 1; 340 module_param(ql2xabts_wait_nvme, int, 0444); 341 MODULE_PARM_DESC(ql2xabts_wait_nvme, 342 "To wait for ABTS response on I/O timeouts for NVMe. (default: 1)"); 343 344 345 static u32 ql2xdelay_before_pci_error_handling = 5; 346 module_param(ql2xdelay_before_pci_error_handling, uint, 0644); 347 MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling, 348 "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n"); 349 350 static void qla2x00_clear_drv_active(struct qla_hw_data *); 351 static void qla2x00_free_device(scsi_qla_host_t *); 352 static void qla2xxx_map_queues(struct Scsi_Host *shost); 353 static void qla2x00_destroy_deferred_work(struct qla_hw_data *); 354 355 u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES; 356 module_param(ql2xnvme_queues, uint, S_IRUGO); 357 MODULE_PARM_DESC(ql2xnvme_queues, 358 "Number of NVMe Queues that can be configured.\n" 359 "Final value will be min(ql2xnvme_queues, num_cpus,num_chip_queues)\n" 360 "1 - Minimum number of queues supported\n" 361 "8 - Default value"); 362 363 static struct scsi_transport_template *qla2xxx_transport_template = NULL; 364 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; 365 366 /* TODO Convert to inlines 367 * 368 * Timer routines 369 */ 370 371 __inline__ void 372 qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval) 373 { 374 timer_setup(&vha->timer, qla2x00_timer, 0); 375 vha->timer.expires = jiffies + interval * HZ; 376 add_timer(&vha->timer); 377 vha->timer_active = 1; 378 } 379 380 static inline void 381 qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) 382 { 383 /* Currently used for 82XX only. */ 384 if (vha->device_flags & DFLG_DEV_FAILED) { 385 ql_dbg(ql_dbg_timer, vha, 0x600d, 386 "Device in a failed state, returning.\n"); 387 return; 388 } 389 390 mod_timer(&vha->timer, jiffies + interval * HZ); 391 } 392 393 static __inline__ void 394 qla2x00_stop_timer(scsi_qla_host_t *vha) 395 { 396 del_timer_sync(&vha->timer); 397 vha->timer_active = 0; 398 } 399 400 static int qla2x00_do_dpc(void *data); 401 402 static void qla2x00_rst_aen(scsi_qla_host_t *); 403 404 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, 405 struct req_que **, struct rsp_que **); 406 static void qla2x00_free_fw_dump(struct qla_hw_data *); 407 static void qla2x00_mem_free(struct qla_hw_data *); 408 int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, 409 struct qla_qpair *qpair); 410 411 /* -------------------------------------------------------------------------- */ 412 static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req, 413 struct rsp_que *rsp) 414 { 415 struct qla_hw_data *ha = vha->hw; 416 417 rsp->qpair = ha->base_qpair; 418 rsp->req = req; 419 ha->base_qpair->hw = ha; 420 ha->base_qpair->req = req; 421 ha->base_qpair->rsp = rsp; 422 ha->base_qpair->vha = vha; 423 ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; 424 ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; 425 ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; 426 ha->base_qpair->srb_mempool = ha->srb_mempool; 427 INIT_LIST_HEAD(&ha->base_qpair->hints_list); 428 ha->base_qpair->enable_class_2 = ql2xenableclass2; 429 /* init qpair to this cpu. Will adjust at run time. */ 430 qla_cpu_update(rsp->qpair, raw_smp_processor_id()); 431 ha->base_qpair->pdev = ha->pdev; 432 433 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) 434 ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs; 435 } 436 437 static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, 438 struct rsp_que *rsp) 439 { 440 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 441 442 ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *), 443 GFP_KERNEL); 444 if (!ha->req_q_map) { 445 ql_log(ql_log_fatal, vha, 0x003b, 446 "Unable to allocate memory for request queue ptrs.\n"); 447 goto fail_req_map; 448 } 449 450 ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *), 451 GFP_KERNEL); 452 if (!ha->rsp_q_map) { 453 ql_log(ql_log_fatal, vha, 0x003c, 454 "Unable to allocate memory for response queue ptrs.\n"); 455 goto fail_rsp_map; 456 } 457 458 ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); 459 if (ha->base_qpair == NULL) { 460 ql_log(ql_log_warn, vha, 0x00e0, 461 "Failed to allocate base queue pair memory.\n"); 462 goto fail_base_qpair; 463 } 464 465 qla_init_base_qpair(vha, req, rsp); 466 467 if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) { 468 ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *), 469 GFP_KERNEL); 470 if (!ha->queue_pair_map) { 471 ql_log(ql_log_fatal, vha, 0x0180, 472 "Unable to allocate memory for queue pair ptrs.\n"); 473 goto fail_qpair_map; 474 } 475 } 476 477 /* 478 * Make sure we record at least the request and response queue zero in 479 * case we need to free them if part of the probe fails. 480 */ 481 ha->rsp_q_map[0] = rsp; 482 ha->req_q_map[0] = req; 483 set_bit(0, ha->rsp_qid_map); 484 set_bit(0, ha->req_qid_map); 485 return 0; 486 487 fail_qpair_map: 488 kfree(ha->base_qpair); 489 ha->base_qpair = NULL; 490 fail_base_qpair: 491 kfree(ha->rsp_q_map); 492 ha->rsp_q_map = NULL; 493 fail_rsp_map: 494 kfree(ha->req_q_map); 495 ha->req_q_map = NULL; 496 fail_req_map: 497 return -ENOMEM; 498 } 499 500 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) 501 { 502 if (IS_QLAFX00(ha)) { 503 if (req && req->ring_fx00) 504 dma_free_coherent(&ha->pdev->dev, 505 (req->length_fx00 + 1) * sizeof(request_t), 506 req->ring_fx00, req->dma_fx00); 507 } else if (req && req->ring) 508 dma_free_coherent(&ha->pdev->dev, 509 (req->length + 1) * sizeof(request_t), 510 req->ring, req->dma); 511 512 if (req) 513 kfree(req->outstanding_cmds); 514 515 kfree(req); 516 } 517 518 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) 519 { 520 if (IS_QLAFX00(ha)) { 521 if (rsp && rsp->ring_fx00) 522 dma_free_coherent(&ha->pdev->dev, 523 (rsp->length_fx00 + 1) * sizeof(request_t), 524 rsp->ring_fx00, rsp->dma_fx00); 525 } else if (rsp && rsp->ring) { 526 dma_free_coherent(&ha->pdev->dev, 527 (rsp->length + 1) * sizeof(response_t), 528 rsp->ring, rsp->dma); 529 } 530 kfree(rsp); 531 } 532 533 static void qla2x00_free_queues(struct qla_hw_data *ha) 534 { 535 struct req_que *req; 536 struct rsp_que *rsp; 537 int cnt; 538 unsigned long flags; 539 540 if (ha->queue_pair_map) { 541 kfree(ha->queue_pair_map); 542 ha->queue_pair_map = NULL; 543 } 544 if (ha->base_qpair) { 545 kfree(ha->base_qpair); 546 ha->base_qpair = NULL; 547 } 548 549 spin_lock_irqsave(&ha->hardware_lock, flags); 550 for (cnt = 0; cnt < ha->max_req_queues; cnt++) { 551 if (!test_bit(cnt, ha->req_qid_map)) 552 continue; 553 554 req = ha->req_q_map[cnt]; 555 clear_bit(cnt, ha->req_qid_map); 556 ha->req_q_map[cnt] = NULL; 557 558 spin_unlock_irqrestore(&ha->hardware_lock, flags); 559 qla2x00_free_req_que(ha, req); 560 spin_lock_irqsave(&ha->hardware_lock, flags); 561 } 562 spin_unlock_irqrestore(&ha->hardware_lock, flags); 563 564 kfree(ha->req_q_map); 565 ha->req_q_map = NULL; 566 567 568 spin_lock_irqsave(&ha->hardware_lock, flags); 569 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { 570 if (!test_bit(cnt, ha->rsp_qid_map)) 571 continue; 572 573 rsp = ha->rsp_q_map[cnt]; 574 clear_bit(cnt, ha->rsp_qid_map); 575 ha->rsp_q_map[cnt] = NULL; 576 spin_unlock_irqrestore(&ha->hardware_lock, flags); 577 qla2x00_free_rsp_que(ha, rsp); 578 spin_lock_irqsave(&ha->hardware_lock, flags); 579 } 580 spin_unlock_irqrestore(&ha->hardware_lock, flags); 581 582 kfree(ha->rsp_q_map); 583 ha->rsp_q_map = NULL; 584 } 585 586 static char * 587 qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) 588 { 589 struct qla_hw_data *ha = vha->hw; 590 static const char *const pci_bus_modes[] = { 591 "33", "66", "100", "133", 592 }; 593 uint16_t pci_bus; 594 595 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; 596 if (pci_bus) { 597 snprintf(str, str_len, "PCI-X (%s MHz)", 598 pci_bus_modes[pci_bus]); 599 } else { 600 pci_bus = (ha->pci_attr & BIT_8) >> 8; 601 snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]); 602 } 603 604 return str; 605 } 606 607 static char * 608 qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) 609 { 610 static const char *const pci_bus_modes[] = { 611 "33", "66", "100", "133", 612 }; 613 struct qla_hw_data *ha = vha->hw; 614 uint32_t pci_bus; 615 616 if (pci_is_pcie(ha->pdev)) { 617 uint32_t lstat, lspeed, lwidth; 618 const char *speed_str; 619 620 pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat); 621 lspeed = lstat & PCI_EXP_LNKCAP_SLS; 622 lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4; 623 624 switch (lspeed) { 625 case 1: 626 speed_str = "2.5GT/s"; 627 break; 628 case 2: 629 speed_str = "5.0GT/s"; 630 break; 631 case 3: 632 speed_str = "8.0GT/s"; 633 break; 634 case 4: 635 speed_str = "16.0GT/s"; 636 break; 637 default: 638 speed_str = "<unknown>"; 639 break; 640 } 641 snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth); 642 643 return str; 644 } 645 646 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; 647 if (pci_bus == 0 || pci_bus == 8) 648 snprintf(str, str_len, "PCI (%s MHz)", 649 pci_bus_modes[pci_bus >> 3]); 650 else 651 snprintf(str, str_len, "PCI-X Mode %d (%s MHz)", 652 pci_bus & 4 ? 2 : 1, 653 pci_bus_modes[pci_bus & 3]); 654 655 return str; 656 } 657 658 static char * 659 qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 660 { 661 char un_str[10]; 662 struct qla_hw_data *ha = vha->hw; 663 664 snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version, 665 ha->fw_minor_version, ha->fw_subminor_version); 666 667 if (ha->fw_attributes & BIT_9) { 668 strcat(str, "FLX"); 669 return (str); 670 } 671 672 switch (ha->fw_attributes & 0xFF) { 673 case 0x7: 674 strcat(str, "EF"); 675 break; 676 case 0x17: 677 strcat(str, "TP"); 678 break; 679 case 0x37: 680 strcat(str, "IP"); 681 break; 682 case 0x77: 683 strcat(str, "VI"); 684 break; 685 default: 686 sprintf(un_str, "(%x)", ha->fw_attributes); 687 strcat(str, un_str); 688 break; 689 } 690 if (ha->fw_attributes & 0x100) 691 strcat(str, "X"); 692 693 return (str); 694 } 695 696 static char * 697 qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 698 { 699 struct qla_hw_data *ha = vha->hw; 700 701 snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version, 702 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes); 703 return str; 704 } 705 706 void qla2x00_sp_free_dma(srb_t *sp) 707 { 708 struct qla_hw_data *ha = sp->vha->hw; 709 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 710 711 if (sp->flags & SRB_DMA_VALID) { 712 scsi_dma_unmap(cmd); 713 sp->flags &= ~SRB_DMA_VALID; 714 } 715 716 if (sp->flags & SRB_CRC_PROT_DMA_VALID) { 717 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 718 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 719 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 720 } 721 722 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 723 /* List assured to be having elements */ 724 qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx); 725 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 726 } 727 728 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 729 struct crc_context *ctx0 = sp->u.scmd.crc_ctx; 730 731 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); 732 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 733 } 734 735 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 736 struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx; 737 738 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 739 ctx1->fcp_cmnd_dma); 740 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 741 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 742 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 743 mempool_free(ctx1, ha->ctx_mempool); 744 } 745 } 746 747 void qla2x00_sp_compl(srb_t *sp, int res) 748 { 749 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 750 struct completion *comp = sp->comp; 751 752 /* kref: INIT */ 753 kref_put(&sp->cmd_kref, qla2x00_sp_release); 754 cmd->result = res; 755 sp->type = 0; 756 scsi_done(cmd); 757 if (comp) 758 complete(comp); 759 } 760 761 void qla2xxx_qpair_sp_free_dma(srb_t *sp) 762 { 763 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 764 struct qla_hw_data *ha = sp->fcport->vha->hw; 765 766 if (sp->flags & SRB_DMA_VALID) { 767 scsi_dma_unmap(cmd); 768 sp->flags &= ~SRB_DMA_VALID; 769 } 770 771 if (sp->flags & SRB_CRC_PROT_DMA_VALID) { 772 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 773 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 774 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 775 } 776 777 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 778 /* List assured to be having elements */ 779 qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx); 780 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 781 } 782 783 if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) { 784 struct crc_context *difctx = sp->u.scmd.crc_ctx; 785 struct dsd_dma *dif_dsd, *nxt_dsd; 786 787 list_for_each_entry_safe(dif_dsd, nxt_dsd, 788 &difctx->ldif_dma_hndl_list, list) { 789 list_del(&dif_dsd->list); 790 dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr, 791 dif_dsd->dsd_list_dma); 792 kfree(dif_dsd); 793 difctx->no_dif_bundl--; 794 } 795 796 list_for_each_entry_safe(dif_dsd, nxt_dsd, 797 &difctx->ldif_dsd_list, list) { 798 list_del(&dif_dsd->list); 799 dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr, 800 dif_dsd->dsd_list_dma); 801 kfree(dif_dsd); 802 difctx->no_ldif_dsd--; 803 } 804 805 if (difctx->no_ldif_dsd) { 806 ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022, 807 "%s: difctx->no_ldif_dsd=%x\n", 808 __func__, difctx->no_ldif_dsd); 809 } 810 811 if (difctx->no_dif_bundl) { 812 ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022, 813 "%s: difctx->no_dif_bundl=%x\n", 814 __func__, difctx->no_dif_bundl); 815 } 816 sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID; 817 } 818 819 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 820 struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx; 821 822 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 823 ctx1->fcp_cmnd_dma); 824 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 825 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 826 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 827 mempool_free(ctx1, ha->ctx_mempool); 828 sp->flags &= ~SRB_FCP_CMND_DMA_VALID; 829 } 830 831 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 832 struct crc_context *ctx0 = sp->u.scmd.crc_ctx; 833 834 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); 835 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 836 } 837 } 838 839 void qla2xxx_qpair_sp_compl(srb_t *sp, int res) 840 { 841 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 842 struct completion *comp = sp->comp; 843 844 /* ref: INIT */ 845 kref_put(&sp->cmd_kref, qla2x00_sp_release); 846 cmd->result = res; 847 sp->type = 0; 848 scsi_done(cmd); 849 if (comp) 850 complete(comp); 851 } 852 853 static int 854 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 855 { 856 scsi_qla_host_t *vha = shost_priv(host); 857 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 858 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 859 struct qla_hw_data *ha = vha->hw; 860 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 861 srb_t *sp; 862 int rval; 863 864 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) || 865 WARN_ON_ONCE(!rport)) { 866 cmd->result = DID_NO_CONNECT << 16; 867 goto qc24_fail_command; 868 } 869 870 if (ha->mqenable) { 871 uint32_t tag; 872 uint16_t hwq; 873 struct qla_qpair *qpair = NULL; 874 875 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd)); 876 hwq = blk_mq_unique_tag_to_hwq(tag); 877 qpair = ha->queue_pair_map[hwq]; 878 879 if (qpair) 880 return qla2xxx_mqueuecommand(host, cmd, qpair); 881 } 882 883 if (ha->flags.eeh_busy) { 884 if (ha->flags.pci_channel_io_perm_failure) { 885 ql_dbg(ql_dbg_aer, vha, 0x9010, 886 "PCI Channel IO permanent failure, exiting " 887 "cmd=%p.\n", cmd); 888 cmd->result = DID_NO_CONNECT << 16; 889 } else { 890 ql_dbg(ql_dbg_aer, vha, 0x9011, 891 "EEH_Busy, Requeuing the cmd=%p.\n", cmd); 892 cmd->result = DID_REQUEUE << 16; 893 } 894 goto qc24_fail_command; 895 } 896 897 rval = fc_remote_port_chkready(rport); 898 if (rval) { 899 cmd->result = rval; 900 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003, 901 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 902 cmd, rval); 903 goto qc24_fail_command; 904 } 905 906 if (!vha->flags.difdix_supported && 907 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 908 ql_dbg(ql_dbg_io, vha, 0x3004, 909 "DIF Cap not reg, fail DIF capable cmd's:%p.\n", 910 cmd); 911 cmd->result = DID_NO_CONNECT << 16; 912 goto qc24_fail_command; 913 } 914 915 if (!fcport || fcport->deleted) { 916 cmd->result = DID_IMM_RETRY << 16; 917 goto qc24_fail_command; 918 } 919 920 if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { 921 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 922 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 923 ql_dbg(ql_dbg_io, vha, 0x3005, 924 "Returning DNC, fcport_state=%d loop_state=%d.\n", 925 atomic_read(&fcport->state), 926 atomic_read(&base_vha->loop_state)); 927 cmd->result = DID_NO_CONNECT << 16; 928 goto qc24_fail_command; 929 } 930 goto qc24_target_busy; 931 } 932 933 /* 934 * Return target busy if we've received a non-zero retry_delay_timer 935 * in a FCP_RSP. 936 */ 937 if (fcport->retry_delay_timestamp == 0) { 938 /* retry delay not set */ 939 } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 940 fcport->retry_delay_timestamp = 0; 941 else 942 goto qc24_target_busy; 943 944 sp = scsi_cmd_priv(cmd); 945 /* ref: INIT */ 946 qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport); 947 948 sp->u.scmd.cmd = cmd; 949 sp->type = SRB_SCSI_CMD; 950 sp->free = qla2x00_sp_free_dma; 951 sp->done = qla2x00_sp_compl; 952 953 rval = ha->isp_ops->start_scsi(sp); 954 if (rval != QLA_SUCCESS) { 955 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, 956 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); 957 goto qc24_host_busy_free_sp; 958 } 959 960 return 0; 961 962 qc24_host_busy_free_sp: 963 /* ref: INIT */ 964 kref_put(&sp->cmd_kref, qla2x00_sp_release); 965 966 qc24_target_busy: 967 return SCSI_MLQUEUE_TARGET_BUSY; 968 969 qc24_fail_command: 970 scsi_done(cmd); 971 972 return 0; 973 } 974 975 /* For MQ supported I/O */ 976 int 977 qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, 978 struct qla_qpair *qpair) 979 { 980 scsi_qla_host_t *vha = shost_priv(host); 981 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 982 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 983 struct qla_hw_data *ha = vha->hw; 984 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 985 srb_t *sp; 986 int rval; 987 988 rval = rport ? fc_remote_port_chkready(rport) : (DID_NO_CONNECT << 16); 989 if (rval) { 990 cmd->result = rval; 991 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076, 992 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 993 cmd, rval); 994 goto qc24_fail_command; 995 } 996 997 if (!qpair->online) { 998 ql_dbg(ql_dbg_io, vha, 0x3077, 999 "qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy); 1000 cmd->result = DID_NO_CONNECT << 16; 1001 goto qc24_fail_command; 1002 } 1003 1004 if (!fcport || fcport->deleted) { 1005 cmd->result = DID_IMM_RETRY << 16; 1006 goto qc24_fail_command; 1007 } 1008 1009 if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { 1010 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 1011 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 1012 ql_dbg(ql_dbg_io, vha, 0x3077, 1013 "Returning DNC, fcport_state=%d loop_state=%d.\n", 1014 atomic_read(&fcport->state), 1015 atomic_read(&base_vha->loop_state)); 1016 cmd->result = DID_NO_CONNECT << 16; 1017 goto qc24_fail_command; 1018 } 1019 goto qc24_target_busy; 1020 } 1021 1022 /* 1023 * Return target busy if we've received a non-zero retry_delay_timer 1024 * in a FCP_RSP. 1025 */ 1026 if (fcport->retry_delay_timestamp == 0) { 1027 /* retry delay not set */ 1028 } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 1029 fcport->retry_delay_timestamp = 0; 1030 else 1031 goto qc24_target_busy; 1032 1033 sp = scsi_cmd_priv(cmd); 1034 /* ref: INIT */ 1035 qla2xxx_init_sp(sp, vha, qpair, fcport); 1036 1037 sp->u.scmd.cmd = cmd; 1038 sp->type = SRB_SCSI_CMD; 1039 sp->free = qla2xxx_qpair_sp_free_dma; 1040 sp->done = qla2xxx_qpair_sp_compl; 1041 1042 rval = ha->isp_ops->start_scsi_mq(sp); 1043 if (rval != QLA_SUCCESS) { 1044 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078, 1045 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); 1046 goto qc24_host_busy_free_sp; 1047 } 1048 1049 return 0; 1050 1051 qc24_host_busy_free_sp: 1052 /* ref: INIT */ 1053 kref_put(&sp->cmd_kref, qla2x00_sp_release); 1054 1055 qc24_target_busy: 1056 return SCSI_MLQUEUE_TARGET_BUSY; 1057 1058 qc24_fail_command: 1059 scsi_done(cmd); 1060 1061 return 0; 1062 } 1063 1064 /* 1065 * qla2x00_eh_wait_on_command 1066 * Waits for the command to be returned by the Firmware for some 1067 * max time. 1068 * 1069 * Input: 1070 * cmd = Scsi Command to wait on. 1071 * 1072 * Return: 1073 * Completed in time : QLA_SUCCESS 1074 * Did not complete in time : QLA_FUNCTION_FAILED 1075 */ 1076 static int 1077 qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) 1078 { 1079 #define ABORT_POLLING_PERIOD 1000 1080 #define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD)) 1081 unsigned long wait_iter = ABORT_WAIT_ITER; 1082 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1083 struct qla_hw_data *ha = vha->hw; 1084 srb_t *sp = scsi_cmd_priv(cmd); 1085 int ret = QLA_SUCCESS; 1086 1087 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { 1088 ql_dbg(ql_dbg_taskm, vha, 0x8005, 1089 "Return:eh_wait.\n"); 1090 return ret; 1091 } 1092 1093 while (sp->type && wait_iter--) 1094 msleep(ABORT_POLLING_PERIOD); 1095 if (sp->type) 1096 ret = QLA_FUNCTION_FAILED; 1097 1098 return ret; 1099 } 1100 1101 /* 1102 * qla2x00_wait_for_hba_online 1103 * Wait till the HBA is online after going through 1104 * <= MAX_RETRIES_OF_ISP_ABORT or 1105 * finally HBA is disabled ie marked offline 1106 * 1107 * Input: 1108 * ha - pointer to host adapter structure 1109 * 1110 * Note: 1111 * Does context switching-Release SPIN_LOCK 1112 * (if any) before calling this routine. 1113 * 1114 * Return: 1115 * Success (Adapter is online) : 0 1116 * Failed (Adapter is offline/disabled) : 1 1117 */ 1118 int 1119 qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) 1120 { 1121 int return_status; 1122 unsigned long wait_online; 1123 struct qla_hw_data *ha = vha->hw; 1124 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1125 1126 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 1127 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 1128 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 1129 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 1130 ha->dpc_active) && time_before(jiffies, wait_online)) { 1131 1132 msleep(1000); 1133 } 1134 if (base_vha->flags.online) 1135 return_status = QLA_SUCCESS; 1136 else 1137 return_status = QLA_FUNCTION_FAILED; 1138 1139 return (return_status); 1140 } 1141 1142 static inline int test_fcport_count(scsi_qla_host_t *vha) 1143 { 1144 struct qla_hw_data *ha = vha->hw; 1145 unsigned long flags; 1146 int res; 1147 /* Return 0 = sleep, x=wake */ 1148 1149 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1150 ql_dbg(ql_dbg_init, vha, 0x00ec, 1151 "tgt %p, fcport_count=%d\n", 1152 vha, vha->fcport_count); 1153 res = (vha->fcport_count == 0); 1154 if (res) { 1155 struct fc_port *fcport; 1156 1157 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1158 if (fcport->deleted != QLA_SESS_DELETED) { 1159 /* session(s) may not be fully logged in 1160 * (ie fcport_count=0), but session 1161 * deletion thread(s) may be inflight. 1162 */ 1163 1164 res = 0; 1165 break; 1166 } 1167 } 1168 } 1169 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1170 1171 return res; 1172 } 1173 1174 /* 1175 * qla2x00_wait_for_sess_deletion can only be called from remove_one. 1176 * it has dependency on UNLOADING flag to stop device discovery 1177 */ 1178 void 1179 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) 1180 { 1181 u8 i; 1182 1183 qla2x00_mark_all_devices_lost(vha); 1184 1185 for (i = 0; i < 10; i++) { 1186 if (wait_event_timeout(vha->fcport_waitQ, 1187 test_fcport_count(vha), HZ) > 0) 1188 break; 1189 } 1190 1191 flush_workqueue(vha->hw->wq); 1192 } 1193 1194 /* 1195 * qla2x00_wait_for_hba_ready 1196 * Wait till the HBA is ready before doing driver unload 1197 * 1198 * Input: 1199 * ha - pointer to host adapter structure 1200 * 1201 * Note: 1202 * Does context switching-Release SPIN_LOCK 1203 * (if any) before calling this routine. 1204 * 1205 */ 1206 static void 1207 qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) 1208 { 1209 struct qla_hw_data *ha = vha->hw; 1210 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1211 1212 while ((qla2x00_reset_active(vha) || ha->dpc_active || 1213 ha->flags.mbox_busy) || 1214 test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) || 1215 test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) { 1216 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 1217 break; 1218 msleep(1000); 1219 } 1220 } 1221 1222 int 1223 qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) 1224 { 1225 int return_status; 1226 unsigned long wait_reset; 1227 struct qla_hw_data *ha = vha->hw; 1228 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1229 1230 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); 1231 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 1232 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 1233 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 1234 ha->dpc_active) && time_before(jiffies, wait_reset)) { 1235 1236 msleep(1000); 1237 1238 if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 1239 ha->flags.chip_reset_done) 1240 break; 1241 } 1242 if (ha->flags.chip_reset_done) 1243 return_status = QLA_SUCCESS; 1244 else 1245 return_status = QLA_FUNCTION_FAILED; 1246 1247 return return_status; 1248 } 1249 1250 /************************************************************************** 1251 * qla2xxx_eh_abort 1252 * 1253 * Description: 1254 * The abort function will abort the specified command. 1255 * 1256 * Input: 1257 * cmd = Linux SCSI command packet to be aborted. 1258 * 1259 * Returns: 1260 * Either SUCCESS or FAILED. 1261 * 1262 * Note: 1263 * Only return FAILED if command not returned by firmware. 1264 **************************************************************************/ 1265 static int 1266 qla2xxx_eh_abort(struct scsi_cmnd *cmd) 1267 { 1268 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1269 DECLARE_COMPLETION_ONSTACK(comp); 1270 srb_t *sp; 1271 int ret; 1272 unsigned int id; 1273 uint64_t lun; 1274 int rval; 1275 struct qla_hw_data *ha = vha->hw; 1276 uint32_t ratov_j; 1277 struct qla_qpair *qpair; 1278 unsigned long flags; 1279 int fast_fail_status = SUCCESS; 1280 1281 if (qla2x00_isp_reg_stat(ha)) { 1282 ql_log(ql_log_info, vha, 0x8042, 1283 "PCI/Register disconnect, exiting.\n"); 1284 qla_pci_set_eeh_busy(vha); 1285 return FAILED; 1286 } 1287 1288 /* Save any FAST_IO_FAIL value to return later if abort succeeds */ 1289 ret = fc_block_scsi_eh(cmd); 1290 if (ret != 0) 1291 fast_fail_status = ret; 1292 1293 sp = scsi_cmd_priv(cmd); 1294 qpair = sp->qpair; 1295 1296 vha->cmd_timeout_cnt++; 1297 1298 if ((sp->fcport && sp->fcport->deleted) || !qpair) 1299 return fast_fail_status != SUCCESS ? fast_fail_status : FAILED; 1300 1301 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 1302 sp->comp = ∁ 1303 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 1304 1305 1306 id = cmd->device->id; 1307 lun = cmd->device->lun; 1308 1309 ql_dbg(ql_dbg_taskm, vha, 0x8002, 1310 "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n", 1311 vha->host_no, id, lun, sp, cmd, sp->handle); 1312 1313 /* 1314 * Abort will release the original Command/sp from FW. Let the 1315 * original command call scsi_done. In return, he will wakeup 1316 * this sleeping thread. 1317 */ 1318 rval = ha->isp_ops->abort_command(sp); 1319 1320 ql_dbg(ql_dbg_taskm, vha, 0x8003, 1321 "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval); 1322 1323 /* Wait for the command completion. */ 1324 ratov_j = ha->r_a_tov/10 * 4 * 1000; 1325 ratov_j = msecs_to_jiffies(ratov_j); 1326 switch (rval) { 1327 case QLA_SUCCESS: 1328 if (!wait_for_completion_timeout(&comp, ratov_j)) { 1329 ql_dbg(ql_dbg_taskm, vha, 0xffff, 1330 "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", 1331 __func__, ha->r_a_tov/10); 1332 ret = FAILED; 1333 } else { 1334 ret = fast_fail_status; 1335 } 1336 break; 1337 default: 1338 ret = FAILED; 1339 break; 1340 } 1341 1342 sp->comp = NULL; 1343 1344 ql_log(ql_log_info, vha, 0x801c, 1345 "Abort command issued nexus=%ld:%d:%llu -- %x.\n", 1346 vha->host_no, id, lun, ret); 1347 1348 return ret; 1349 } 1350 1351 /* 1352 * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED. 1353 */ 1354 static int 1355 __qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t, 1356 uint64_t l, enum nexus_wait_type type) 1357 { 1358 int cnt, match, status; 1359 unsigned long flags; 1360 scsi_qla_host_t *vha = qpair->vha; 1361 struct req_que *req = qpair->req; 1362 srb_t *sp; 1363 struct scsi_cmnd *cmd; 1364 1365 status = QLA_SUCCESS; 1366 1367 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 1368 for (cnt = 1; status == QLA_SUCCESS && 1369 cnt < req->num_outstanding_cmds; cnt++) { 1370 sp = req->outstanding_cmds[cnt]; 1371 if (!sp) 1372 continue; 1373 if (sp->type != SRB_SCSI_CMD) 1374 continue; 1375 if (vha->vp_idx != sp->vha->vp_idx) 1376 continue; 1377 match = 0; 1378 cmd = GET_CMD_SP(sp); 1379 switch (type) { 1380 case WAIT_HOST: 1381 match = 1; 1382 break; 1383 case WAIT_TARGET: 1384 match = cmd->device->id == t; 1385 break; 1386 case WAIT_LUN: 1387 match = (cmd->device->id == t && 1388 cmd->device->lun == l); 1389 break; 1390 } 1391 if (!match) 1392 continue; 1393 1394 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 1395 status = qla2x00_eh_wait_on_command(cmd); 1396 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 1397 } 1398 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 1399 1400 return status; 1401 } 1402 1403 int 1404 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, 1405 uint64_t l, enum nexus_wait_type type) 1406 { 1407 struct qla_qpair *qpair; 1408 struct qla_hw_data *ha = vha->hw; 1409 int i, status = QLA_SUCCESS; 1410 1411 status = __qla2x00_eh_wait_for_pending_commands(ha->base_qpair, t, l, 1412 type); 1413 for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) { 1414 qpair = ha->queue_pair_map[i]; 1415 if (!qpair) 1416 continue; 1417 status = __qla2x00_eh_wait_for_pending_commands(qpair, t, l, 1418 type); 1419 } 1420 return status; 1421 } 1422 1423 static char *reset_errors[] = { 1424 "HBA not online", 1425 "HBA not ready", 1426 "Task management failed", 1427 "Waiting for command completions", 1428 }; 1429 1430 static int 1431 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 1432 { 1433 struct scsi_device *sdev = cmd->device; 1434 scsi_qla_host_t *vha = shost_priv(sdev->host); 1435 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1436 fc_port_t *fcport = (struct fc_port *) sdev->hostdata; 1437 struct qla_hw_data *ha = vha->hw; 1438 int err; 1439 1440 if (qla2x00_isp_reg_stat(ha)) { 1441 ql_log(ql_log_info, vha, 0x803e, 1442 "PCI/Register disconnect, exiting.\n"); 1443 qla_pci_set_eeh_busy(vha); 1444 return FAILED; 1445 } 1446 1447 if (!fcport) { 1448 return FAILED; 1449 } 1450 1451 err = fc_block_rport(rport); 1452 if (err != 0) 1453 return err; 1454 1455 if (fcport->deleted) 1456 return FAILED; 1457 1458 ql_log(ql_log_info, vha, 0x8009, 1459 "DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no, 1460 sdev->id, sdev->lun, cmd); 1461 1462 err = 0; 1463 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1464 ql_log(ql_log_warn, vha, 0x800a, 1465 "Wait for hba online failed for cmd=%p.\n", cmd); 1466 goto eh_reset_failed; 1467 } 1468 err = 2; 1469 if (ha->isp_ops->lun_reset(fcport, sdev->lun, 1) 1470 != QLA_SUCCESS) { 1471 ql_log(ql_log_warn, vha, 0x800c, 1472 "do_reset failed for cmd=%p.\n", cmd); 1473 goto eh_reset_failed; 1474 } 1475 err = 3; 1476 if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id, 1477 sdev->lun, WAIT_LUN) != QLA_SUCCESS) { 1478 ql_log(ql_log_warn, vha, 0x800d, 1479 "wait for pending cmds failed for cmd=%p.\n", cmd); 1480 goto eh_reset_failed; 1481 } 1482 1483 ql_log(ql_log_info, vha, 0x800e, 1484 "DEVICE RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", 1485 vha->host_no, sdev->id, sdev->lun, cmd); 1486 1487 return SUCCESS; 1488 1489 eh_reset_failed: 1490 ql_log(ql_log_info, vha, 0x800f, 1491 "DEVICE RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", 1492 reset_errors[err], vha->host_no, sdev->id, sdev->lun, 1493 cmd); 1494 vha->reset_cmd_err_cnt++; 1495 return FAILED; 1496 } 1497 1498 static int 1499 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) 1500 { 1501 struct scsi_device *sdev = cmd->device; 1502 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1503 scsi_qla_host_t *vha = shost_priv(rport_to_shost(rport)); 1504 struct qla_hw_data *ha = vha->hw; 1505 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1506 int err; 1507 1508 if (qla2x00_isp_reg_stat(ha)) { 1509 ql_log(ql_log_info, vha, 0x803f, 1510 "PCI/Register disconnect, exiting.\n"); 1511 qla_pci_set_eeh_busy(vha); 1512 return FAILED; 1513 } 1514 1515 if (!fcport) { 1516 return FAILED; 1517 } 1518 1519 err = fc_block_rport(rport); 1520 if (err != 0) 1521 return err; 1522 1523 if (fcport->deleted) 1524 return FAILED; 1525 1526 ql_log(ql_log_info, vha, 0x8009, 1527 "TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no, 1528 sdev->id, cmd); 1529 1530 err = 0; 1531 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1532 ql_log(ql_log_warn, vha, 0x800a, 1533 "Wait for hba online failed for cmd=%p.\n", cmd); 1534 goto eh_reset_failed; 1535 } 1536 err = 2; 1537 if (ha->isp_ops->target_reset(fcport, 0, 0) != QLA_SUCCESS) { 1538 ql_log(ql_log_warn, vha, 0x800c, 1539 "target_reset failed for cmd=%p.\n", cmd); 1540 goto eh_reset_failed; 1541 } 1542 err = 3; 1543 if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id, 1544 0, WAIT_TARGET) != QLA_SUCCESS) { 1545 ql_log(ql_log_warn, vha, 0x800d, 1546 "wait for pending cmds failed for cmd=%p.\n", cmd); 1547 goto eh_reset_failed; 1548 } 1549 1550 ql_log(ql_log_info, vha, 0x800e, 1551 "TARGET RESET SUCCEEDED nexus:%ld:%d cmd=%p.\n", 1552 vha->host_no, sdev->id, cmd); 1553 1554 return SUCCESS; 1555 1556 eh_reset_failed: 1557 ql_log(ql_log_info, vha, 0x800f, 1558 "TARGET RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", 1559 reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun, 1560 cmd); 1561 vha->reset_cmd_err_cnt++; 1562 return FAILED; 1563 } 1564 1565 /************************************************************************** 1566 * qla2xxx_eh_bus_reset 1567 * 1568 * Description: 1569 * The bus reset function will reset the bus and abort any executing 1570 * commands. 1571 * 1572 * Input: 1573 * cmd = Linux SCSI command packet of the command that cause the 1574 * bus reset. 1575 * 1576 * Returns: 1577 * SUCCESS/FAILURE (defined as macro in scsi.h). 1578 * 1579 **************************************************************************/ 1580 static int 1581 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 1582 { 1583 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1584 int ret = FAILED; 1585 unsigned int id; 1586 uint64_t lun; 1587 struct qla_hw_data *ha = vha->hw; 1588 1589 if (qla2x00_isp_reg_stat(ha)) { 1590 ql_log(ql_log_info, vha, 0x8040, 1591 "PCI/Register disconnect, exiting.\n"); 1592 qla_pci_set_eeh_busy(vha); 1593 return FAILED; 1594 } 1595 1596 id = cmd->device->id; 1597 lun = cmd->device->lun; 1598 1599 if (qla2x00_chip_is_down(vha)) 1600 return ret; 1601 1602 ql_log(ql_log_info, vha, 0x8012, 1603 "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1604 1605 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1606 ql_log(ql_log_fatal, vha, 0x8013, 1607 "Wait for hba online failed board disabled.\n"); 1608 goto eh_bus_reset_done; 1609 } 1610 1611 if (qla2x00_loop_reset(vha) == QLA_SUCCESS) 1612 ret = SUCCESS; 1613 1614 if (ret == FAILED) 1615 goto eh_bus_reset_done; 1616 1617 /* Flush outstanding commands. */ 1618 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != 1619 QLA_SUCCESS) { 1620 ql_log(ql_log_warn, vha, 0x8014, 1621 "Wait for pending commands failed.\n"); 1622 ret = FAILED; 1623 } 1624 1625 eh_bus_reset_done: 1626 ql_log(ql_log_warn, vha, 0x802b, 1627 "BUS RESET %s nexus=%ld:%d:%llu.\n", 1628 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1629 1630 return ret; 1631 } 1632 1633 /************************************************************************** 1634 * qla2xxx_eh_host_reset 1635 * 1636 * Description: 1637 * The reset function will reset the Adapter. 1638 * 1639 * Input: 1640 * cmd = Linux SCSI command packet of the command that cause the 1641 * adapter reset. 1642 * 1643 * Returns: 1644 * Either SUCCESS or FAILED. 1645 * 1646 * Note: 1647 **************************************************************************/ 1648 static int 1649 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 1650 { 1651 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1652 struct qla_hw_data *ha = vha->hw; 1653 int ret = FAILED; 1654 unsigned int id; 1655 uint64_t lun; 1656 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1657 1658 if (qla2x00_isp_reg_stat(ha)) { 1659 ql_log(ql_log_info, vha, 0x8041, 1660 "PCI/Register disconnect, exiting.\n"); 1661 qla_pci_set_eeh_busy(vha); 1662 return SUCCESS; 1663 } 1664 1665 id = cmd->device->id; 1666 lun = cmd->device->lun; 1667 1668 ql_log(ql_log_info, vha, 0x8018, 1669 "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1670 1671 /* 1672 * No point in issuing another reset if one is active. Also do not 1673 * attempt a reset if we are updating flash. 1674 */ 1675 if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING) 1676 goto eh_host_reset_lock; 1677 1678 if (vha != base_vha) { 1679 if (qla2x00_vp_abort_isp(vha)) 1680 goto eh_host_reset_lock; 1681 } else { 1682 if (IS_P3P_TYPE(vha->hw)) { 1683 if (!qla82xx_fcoe_ctx_reset(vha)) { 1684 /* Ctx reset success */ 1685 ret = SUCCESS; 1686 goto eh_host_reset_lock; 1687 } 1688 /* fall thru if ctx reset failed */ 1689 } 1690 if (ha->wq) 1691 flush_workqueue(ha->wq); 1692 1693 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1694 if (ha->isp_ops->abort_isp(base_vha)) { 1695 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1696 /* failed. schedule dpc to try */ 1697 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 1698 1699 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1700 ql_log(ql_log_warn, vha, 0x802a, 1701 "wait for hba online failed.\n"); 1702 goto eh_host_reset_lock; 1703 } 1704 } 1705 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1706 } 1707 1708 /* Waiting for command to be returned to OS.*/ 1709 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) == 1710 QLA_SUCCESS) 1711 ret = SUCCESS; 1712 1713 eh_host_reset_lock: 1714 ql_log(ql_log_info, vha, 0x8017, 1715 "ADAPTER RESET %s nexus=%ld:%d:%llu.\n", 1716 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1717 1718 return ret; 1719 } 1720 1721 /* 1722 * qla2x00_loop_reset 1723 * Issue loop reset. 1724 * 1725 * Input: 1726 * ha = adapter block pointer. 1727 * 1728 * Returns: 1729 * 0 = success 1730 */ 1731 int 1732 qla2x00_loop_reset(scsi_qla_host_t *vha) 1733 { 1734 int ret; 1735 struct qla_hw_data *ha = vha->hw; 1736 1737 if (IS_QLAFX00(ha)) 1738 return QLA_SUCCESS; 1739 1740 if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { 1741 atomic_set(&vha->loop_state, LOOP_DOWN); 1742 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1743 qla2x00_mark_all_devices_lost(vha); 1744 ret = qla2x00_full_login_lip(vha); 1745 if (ret != QLA_SUCCESS) { 1746 ql_dbg(ql_dbg_taskm, vha, 0x802d, 1747 "full_login_lip=%d.\n", ret); 1748 } 1749 } 1750 1751 if (ha->flags.enable_lip_reset) { 1752 ret = qla2x00_lip_reset(vha); 1753 if (ret != QLA_SUCCESS) 1754 ql_dbg(ql_dbg_taskm, vha, 0x802e, 1755 "lip_reset failed (%d).\n", ret); 1756 } 1757 1758 /* Issue marker command only when we are going to start the I/O */ 1759 vha->marker_needed = 1; 1760 1761 return QLA_SUCCESS; 1762 } 1763 1764 /* 1765 * The caller must ensure that no completion interrupts will happen 1766 * while this function is in progress. 1767 */ 1768 static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res, 1769 unsigned long *flags) 1770 __releases(qp->qp_lock_ptr) 1771 __acquires(qp->qp_lock_ptr) 1772 { 1773 DECLARE_COMPLETION_ONSTACK(comp); 1774 scsi_qla_host_t *vha = qp->vha; 1775 struct qla_hw_data *ha = vha->hw; 1776 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1777 int rval; 1778 bool ret_cmd; 1779 uint32_t ratov_j; 1780 1781 lockdep_assert_held(qp->qp_lock_ptr); 1782 1783 if (qla2x00_chip_is_down(vha)) { 1784 sp->done(sp, res); 1785 return; 1786 } 1787 1788 if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS || 1789 (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy && 1790 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 1791 !qla2x00_isp_reg_stat(ha))) { 1792 if (sp->comp) { 1793 sp->done(sp, res); 1794 return; 1795 } 1796 1797 sp->comp = ∁ 1798 spin_unlock_irqrestore(qp->qp_lock_ptr, *flags); 1799 1800 rval = ha->isp_ops->abort_command(sp); 1801 /* Wait for command completion. */ 1802 ret_cmd = false; 1803 ratov_j = ha->r_a_tov/10 * 4 * 1000; 1804 ratov_j = msecs_to_jiffies(ratov_j); 1805 switch (rval) { 1806 case QLA_SUCCESS: 1807 if (wait_for_completion_timeout(&comp, ratov_j)) { 1808 ql_dbg(ql_dbg_taskm, vha, 0xffff, 1809 "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", 1810 __func__, ha->r_a_tov/10); 1811 ret_cmd = true; 1812 } 1813 /* else FW return SP to driver */ 1814 break; 1815 default: 1816 ret_cmd = true; 1817 break; 1818 } 1819 1820 spin_lock_irqsave(qp->qp_lock_ptr, *flags); 1821 if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd))) 1822 sp->done(sp, res); 1823 } else { 1824 sp->done(sp, res); 1825 } 1826 } 1827 1828 /* 1829 * The caller must ensure that no completion interrupts will happen 1830 * while this function is in progress. 1831 */ 1832 static void 1833 __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) 1834 { 1835 int cnt; 1836 unsigned long flags; 1837 srb_t *sp; 1838 scsi_qla_host_t *vha = qp->vha; 1839 struct qla_hw_data *ha = vha->hw; 1840 struct req_que *req; 1841 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1842 struct qla_tgt_cmd *cmd; 1843 1844 if (!ha->req_q_map) 1845 return; 1846 spin_lock_irqsave(qp->qp_lock_ptr, flags); 1847 req = qp->req; 1848 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1849 sp = req->outstanding_cmds[cnt]; 1850 if (sp) { 1851 switch (sp->cmd_type) { 1852 case TYPE_SRB: 1853 qla2x00_abort_srb(qp, sp, res, &flags); 1854 break; 1855 case TYPE_TGT_CMD: 1856 if (!vha->hw->tgt.tgt_ops || !tgt || 1857 qla_ini_mode_enabled(vha)) { 1858 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, 1859 "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n", 1860 vha->dpc_flags); 1861 continue; 1862 } 1863 cmd = (struct qla_tgt_cmd *)sp; 1864 cmd->aborted = 1; 1865 break; 1866 case TYPE_TGT_TMCMD: 1867 /* Skip task management functions. */ 1868 break; 1869 default: 1870 break; 1871 } 1872 req->outstanding_cmds[cnt] = NULL; 1873 } 1874 } 1875 spin_unlock_irqrestore(qp->qp_lock_ptr, flags); 1876 } 1877 1878 /* 1879 * The caller must ensure that no completion interrupts will happen 1880 * while this function is in progress. 1881 */ 1882 void 1883 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) 1884 { 1885 int que; 1886 struct qla_hw_data *ha = vha->hw; 1887 1888 /* Continue only if initialization complete. */ 1889 if (!ha->base_qpair) 1890 return; 1891 __qla2x00_abort_all_cmds(ha->base_qpair, res); 1892 1893 if (!ha->queue_pair_map) 1894 return; 1895 for (que = 0; que < ha->max_qpairs; que++) { 1896 if (!ha->queue_pair_map[que]) 1897 continue; 1898 1899 __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res); 1900 } 1901 } 1902 1903 static int 1904 qla2xxx_slave_alloc(struct scsi_device *sdev) 1905 { 1906 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1907 1908 if (!rport || fc_remote_port_chkready(rport)) 1909 return -ENXIO; 1910 1911 sdev->hostdata = *(fc_port_t **)rport->dd_data; 1912 1913 return 0; 1914 } 1915 1916 static int 1917 qla2xxx_slave_configure(struct scsi_device *sdev) 1918 { 1919 scsi_qla_host_t *vha = shost_priv(sdev->host); 1920 struct req_que *req = vha->req; 1921 1922 if (IS_T10_PI_CAPABLE(vha->hw)) 1923 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1924 1925 scsi_change_queue_depth(sdev, req->max_q_depth); 1926 return 0; 1927 } 1928 1929 static void 1930 qla2xxx_slave_destroy(struct scsi_device *sdev) 1931 { 1932 sdev->hostdata = NULL; 1933 } 1934 1935 /** 1936 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1937 * @ha: HA context 1938 * 1939 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 1940 * supported addressing method. 1941 */ 1942 static void 1943 qla2x00_config_dma_addressing(struct qla_hw_data *ha) 1944 { 1945 /* Assume a 32bit DMA mask. */ 1946 ha->flags.enable_64bit_addressing = 0; 1947 1948 if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 1949 /* Any upper-dword bits set? */ 1950 if (MSD(dma_get_required_mask(&ha->pdev->dev)) && 1951 !dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 1952 /* Ok, a 64bit DMA mask is applicable. */ 1953 ha->flags.enable_64bit_addressing = 1; 1954 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; 1955 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; 1956 return; 1957 } 1958 } 1959 1960 dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); 1961 dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); 1962 } 1963 1964 static void 1965 qla2x00_enable_intrs(struct qla_hw_data *ha) 1966 { 1967 unsigned long flags = 0; 1968 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1969 1970 spin_lock_irqsave(&ha->hardware_lock, flags); 1971 ha->interrupts_on = 1; 1972 /* enable risc and host interrupts */ 1973 wrt_reg_word(®->ictrl, ICR_EN_INT | ICR_EN_RISC); 1974 rd_reg_word(®->ictrl); 1975 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1976 1977 } 1978 1979 static void 1980 qla2x00_disable_intrs(struct qla_hw_data *ha) 1981 { 1982 unsigned long flags = 0; 1983 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1984 1985 spin_lock_irqsave(&ha->hardware_lock, flags); 1986 ha->interrupts_on = 0; 1987 /* disable risc and host interrupts */ 1988 wrt_reg_word(®->ictrl, 0); 1989 rd_reg_word(®->ictrl); 1990 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1991 } 1992 1993 static void 1994 qla24xx_enable_intrs(struct qla_hw_data *ha) 1995 { 1996 unsigned long flags = 0; 1997 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1998 1999 spin_lock_irqsave(&ha->hardware_lock, flags); 2000 ha->interrupts_on = 1; 2001 wrt_reg_dword(®->ictrl, ICRX_EN_RISC_INT); 2002 rd_reg_dword(®->ictrl); 2003 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2004 } 2005 2006 static void 2007 qla24xx_disable_intrs(struct qla_hw_data *ha) 2008 { 2009 unsigned long flags = 0; 2010 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2011 2012 if (IS_NOPOLLING_TYPE(ha)) 2013 return; 2014 spin_lock_irqsave(&ha->hardware_lock, flags); 2015 ha->interrupts_on = 0; 2016 wrt_reg_dword(®->ictrl, 0); 2017 rd_reg_dword(®->ictrl); 2018 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2019 } 2020 2021 static int 2022 qla2x00_iospace_config(struct qla_hw_data *ha) 2023 { 2024 resource_size_t pio; 2025 uint16_t msix; 2026 2027 if (pci_request_selected_regions(ha->pdev, ha->bars, 2028 QLA2XXX_DRIVER_NAME)) { 2029 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011, 2030 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 2031 pci_name(ha->pdev)); 2032 goto iospace_error_exit; 2033 } 2034 if (!(ha->bars & 1)) 2035 goto skip_pio; 2036 2037 /* We only need PIO for Flash operations on ISP2312 v2 chips. */ 2038 pio = pci_resource_start(ha->pdev, 0); 2039 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 2040 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 2041 ql_log_pci(ql_log_warn, ha->pdev, 0x0012, 2042 "Invalid pci I/O region size (%s).\n", 2043 pci_name(ha->pdev)); 2044 pio = 0; 2045 } 2046 } else { 2047 ql_log_pci(ql_log_warn, ha->pdev, 0x0013, 2048 "Region #0 no a PIO resource (%s).\n", 2049 pci_name(ha->pdev)); 2050 pio = 0; 2051 } 2052 ha->pio_address = pio; 2053 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014, 2054 "PIO address=%llu.\n", 2055 (unsigned long long)ha->pio_address); 2056 2057 skip_pio: 2058 /* Use MMIO operations for all accesses. */ 2059 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 2060 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015, 2061 "Region #1 not an MMIO resource (%s), aborting.\n", 2062 pci_name(ha->pdev)); 2063 goto iospace_error_exit; 2064 } 2065 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 2066 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016, 2067 "Invalid PCI mem region size (%s), aborting.\n", 2068 pci_name(ha->pdev)); 2069 goto iospace_error_exit; 2070 } 2071 2072 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 2073 if (!ha->iobase) { 2074 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017, 2075 "Cannot remap MMIO (%s), aborting.\n", 2076 pci_name(ha->pdev)); 2077 goto iospace_error_exit; 2078 } 2079 2080 /* Determine queue resources */ 2081 ha->max_req_queues = ha->max_rsp_queues = 1; 2082 ha->msix_count = QLA_BASE_VECTORS; 2083 2084 /* Check if FW supports MQ or not */ 2085 if (!(ha->fw_attributes & BIT_6)) 2086 goto mqiobase_exit; 2087 2088 if (!ql2xmqsupport || !ql2xnvmeenable || 2089 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 2090 goto mqiobase_exit; 2091 2092 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 2093 pci_resource_len(ha->pdev, 3)); 2094 if (ha->mqiobase) { 2095 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018, 2096 "MQIO Base=%p.\n", ha->mqiobase); 2097 /* Read MSIX vector size of the board */ 2098 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 2099 ha->msix_count = msix + 1; 2100 /* Max queues are bounded by available msix vectors */ 2101 /* MB interrupt uses 1 vector */ 2102 ha->max_req_queues = ha->msix_count - 1; 2103 ha->max_rsp_queues = ha->max_req_queues; 2104 /* Queue pairs is the max value minus the base queue pair */ 2105 ha->max_qpairs = ha->max_rsp_queues - 1; 2106 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188, 2107 "Max no of queues pairs: %d.\n", ha->max_qpairs); 2108 2109 ql_log_pci(ql_log_info, ha->pdev, 0x001a, 2110 "MSI-X vector count: %d.\n", ha->msix_count); 2111 } else 2112 ql_log_pci(ql_log_info, ha->pdev, 0x001b, 2113 "BAR 3 not enabled.\n"); 2114 2115 mqiobase_exit: 2116 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c, 2117 "MSIX Count: %d.\n", ha->msix_count); 2118 return (0); 2119 2120 iospace_error_exit: 2121 return (-ENOMEM); 2122 } 2123 2124 2125 static int 2126 qla83xx_iospace_config(struct qla_hw_data *ha) 2127 { 2128 uint16_t msix; 2129 2130 if (pci_request_selected_regions(ha->pdev, ha->bars, 2131 QLA2XXX_DRIVER_NAME)) { 2132 ql_log_pci(ql_log_fatal, ha->pdev, 0x0117, 2133 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 2134 pci_name(ha->pdev)); 2135 2136 goto iospace_error_exit; 2137 } 2138 2139 /* Use MMIO operations for all accesses. */ 2140 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 2141 ql_log_pci(ql_log_warn, ha->pdev, 0x0118, 2142 "Invalid pci I/O region size (%s).\n", 2143 pci_name(ha->pdev)); 2144 goto iospace_error_exit; 2145 } 2146 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 2147 ql_log_pci(ql_log_warn, ha->pdev, 0x0119, 2148 "Invalid PCI mem region size (%s), aborting\n", 2149 pci_name(ha->pdev)); 2150 goto iospace_error_exit; 2151 } 2152 2153 ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN); 2154 if (!ha->iobase) { 2155 ql_log_pci(ql_log_fatal, ha->pdev, 0x011a, 2156 "Cannot remap MMIO (%s), aborting.\n", 2157 pci_name(ha->pdev)); 2158 goto iospace_error_exit; 2159 } 2160 2161 /* 64bit PCI BAR - BAR2 will correspoond to region 4 */ 2162 /* 83XX 26XX always use MQ type access for queues 2163 * - mbar 2, a.k.a region 4 */ 2164 ha->max_req_queues = ha->max_rsp_queues = 1; 2165 ha->msix_count = QLA_BASE_VECTORS; 2166 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4), 2167 pci_resource_len(ha->pdev, 4)); 2168 2169 if (!ha->mqiobase) { 2170 ql_log_pci(ql_log_fatal, ha->pdev, 0x011d, 2171 "BAR2/region4 not enabled\n"); 2172 goto mqiobase_exit; 2173 } 2174 2175 ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2), 2176 pci_resource_len(ha->pdev, 2)); 2177 if (ha->msixbase) { 2178 /* Read MSIX vector size of the board */ 2179 pci_read_config_word(ha->pdev, 2180 QLA_83XX_PCI_MSIX_CONTROL, &msix); 2181 ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1; 2182 /* 2183 * By default, driver uses at least two msix vectors 2184 * (default & rspq) 2185 */ 2186 if (ql2xmqsupport || ql2xnvmeenable) { 2187 /* MB interrupt uses 1 vector */ 2188 ha->max_req_queues = ha->msix_count - 1; 2189 2190 /* ATIOQ needs 1 vector. That's 1 less QPair */ 2191 if (QLA_TGT_MODE_ENABLED()) 2192 ha->max_req_queues--; 2193 2194 ha->max_rsp_queues = ha->max_req_queues; 2195 2196 /* Queue pairs is the max value minus 2197 * the base queue pair */ 2198 ha->max_qpairs = ha->max_req_queues - 1; 2199 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3, 2200 "Max no of queues pairs: %d.\n", ha->max_qpairs); 2201 } 2202 ql_log_pci(ql_log_info, ha->pdev, 0x011c, 2203 "MSI-X vector count: %d.\n", ha->msix_count); 2204 } else 2205 ql_log_pci(ql_log_info, ha->pdev, 0x011e, 2206 "BAR 1 not enabled.\n"); 2207 2208 mqiobase_exit: 2209 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f, 2210 "MSIX Count: %d.\n", ha->msix_count); 2211 return 0; 2212 2213 iospace_error_exit: 2214 return -ENOMEM; 2215 } 2216 2217 static struct isp_operations qla2100_isp_ops = { 2218 .pci_config = qla2100_pci_config, 2219 .reset_chip = qla2x00_reset_chip, 2220 .chip_diag = qla2x00_chip_diag, 2221 .config_rings = qla2x00_config_rings, 2222 .reset_adapter = qla2x00_reset_adapter, 2223 .nvram_config = qla2x00_nvram_config, 2224 .update_fw_options = qla2x00_update_fw_options, 2225 .load_risc = qla2x00_load_risc, 2226 .pci_info_str = qla2x00_pci_info_str, 2227 .fw_version_str = qla2x00_fw_version_str, 2228 .intr_handler = qla2100_intr_handler, 2229 .enable_intrs = qla2x00_enable_intrs, 2230 .disable_intrs = qla2x00_disable_intrs, 2231 .abort_command = qla2x00_abort_command, 2232 .target_reset = qla2x00_abort_target, 2233 .lun_reset = qla2x00_lun_reset, 2234 .fabric_login = qla2x00_login_fabric, 2235 .fabric_logout = qla2x00_fabric_logout, 2236 .calc_req_entries = qla2x00_calc_iocbs_32, 2237 .build_iocbs = qla2x00_build_scsi_iocbs_32, 2238 .prep_ms_iocb = qla2x00_prep_ms_iocb, 2239 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 2240 .read_nvram = qla2x00_read_nvram_data, 2241 .write_nvram = qla2x00_write_nvram_data, 2242 .fw_dump = qla2100_fw_dump, 2243 .beacon_on = NULL, 2244 .beacon_off = NULL, 2245 .beacon_blink = NULL, 2246 .read_optrom = qla2x00_read_optrom_data, 2247 .write_optrom = qla2x00_write_optrom_data, 2248 .get_flash_version = qla2x00_get_flash_version, 2249 .start_scsi = qla2x00_start_scsi, 2250 .start_scsi_mq = NULL, 2251 .abort_isp = qla2x00_abort_isp, 2252 .iospace_config = qla2x00_iospace_config, 2253 .initialize_adapter = qla2x00_initialize_adapter, 2254 }; 2255 2256 static struct isp_operations qla2300_isp_ops = { 2257 .pci_config = qla2300_pci_config, 2258 .reset_chip = qla2x00_reset_chip, 2259 .chip_diag = qla2x00_chip_diag, 2260 .config_rings = qla2x00_config_rings, 2261 .reset_adapter = qla2x00_reset_adapter, 2262 .nvram_config = qla2x00_nvram_config, 2263 .update_fw_options = qla2x00_update_fw_options, 2264 .load_risc = qla2x00_load_risc, 2265 .pci_info_str = qla2x00_pci_info_str, 2266 .fw_version_str = qla2x00_fw_version_str, 2267 .intr_handler = qla2300_intr_handler, 2268 .enable_intrs = qla2x00_enable_intrs, 2269 .disable_intrs = qla2x00_disable_intrs, 2270 .abort_command = qla2x00_abort_command, 2271 .target_reset = qla2x00_abort_target, 2272 .lun_reset = qla2x00_lun_reset, 2273 .fabric_login = qla2x00_login_fabric, 2274 .fabric_logout = qla2x00_fabric_logout, 2275 .calc_req_entries = qla2x00_calc_iocbs_32, 2276 .build_iocbs = qla2x00_build_scsi_iocbs_32, 2277 .prep_ms_iocb = qla2x00_prep_ms_iocb, 2278 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 2279 .read_nvram = qla2x00_read_nvram_data, 2280 .write_nvram = qla2x00_write_nvram_data, 2281 .fw_dump = qla2300_fw_dump, 2282 .beacon_on = qla2x00_beacon_on, 2283 .beacon_off = qla2x00_beacon_off, 2284 .beacon_blink = qla2x00_beacon_blink, 2285 .read_optrom = qla2x00_read_optrom_data, 2286 .write_optrom = qla2x00_write_optrom_data, 2287 .get_flash_version = qla2x00_get_flash_version, 2288 .start_scsi = qla2x00_start_scsi, 2289 .start_scsi_mq = NULL, 2290 .abort_isp = qla2x00_abort_isp, 2291 .iospace_config = qla2x00_iospace_config, 2292 .initialize_adapter = qla2x00_initialize_adapter, 2293 }; 2294 2295 static struct isp_operations qla24xx_isp_ops = { 2296 .pci_config = qla24xx_pci_config, 2297 .reset_chip = qla24xx_reset_chip, 2298 .chip_diag = qla24xx_chip_diag, 2299 .config_rings = qla24xx_config_rings, 2300 .reset_adapter = qla24xx_reset_adapter, 2301 .nvram_config = qla24xx_nvram_config, 2302 .update_fw_options = qla24xx_update_fw_options, 2303 .load_risc = qla24xx_load_risc, 2304 .pci_info_str = qla24xx_pci_info_str, 2305 .fw_version_str = qla24xx_fw_version_str, 2306 .intr_handler = qla24xx_intr_handler, 2307 .enable_intrs = qla24xx_enable_intrs, 2308 .disable_intrs = qla24xx_disable_intrs, 2309 .abort_command = qla24xx_abort_command, 2310 .target_reset = qla24xx_abort_target, 2311 .lun_reset = qla24xx_lun_reset, 2312 .fabric_login = qla24xx_login_fabric, 2313 .fabric_logout = qla24xx_fabric_logout, 2314 .calc_req_entries = NULL, 2315 .build_iocbs = NULL, 2316 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2317 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2318 .read_nvram = qla24xx_read_nvram_data, 2319 .write_nvram = qla24xx_write_nvram_data, 2320 .fw_dump = qla24xx_fw_dump, 2321 .beacon_on = qla24xx_beacon_on, 2322 .beacon_off = qla24xx_beacon_off, 2323 .beacon_blink = qla24xx_beacon_blink, 2324 .read_optrom = qla24xx_read_optrom_data, 2325 .write_optrom = qla24xx_write_optrom_data, 2326 .get_flash_version = qla24xx_get_flash_version, 2327 .start_scsi = qla24xx_start_scsi, 2328 .start_scsi_mq = NULL, 2329 .abort_isp = qla2x00_abort_isp, 2330 .iospace_config = qla2x00_iospace_config, 2331 .initialize_adapter = qla2x00_initialize_adapter, 2332 }; 2333 2334 static struct isp_operations qla25xx_isp_ops = { 2335 .pci_config = qla25xx_pci_config, 2336 .reset_chip = qla24xx_reset_chip, 2337 .chip_diag = qla24xx_chip_diag, 2338 .config_rings = qla24xx_config_rings, 2339 .reset_adapter = qla24xx_reset_adapter, 2340 .nvram_config = qla24xx_nvram_config, 2341 .update_fw_options = qla24xx_update_fw_options, 2342 .load_risc = qla24xx_load_risc, 2343 .pci_info_str = qla24xx_pci_info_str, 2344 .fw_version_str = qla24xx_fw_version_str, 2345 .intr_handler = qla24xx_intr_handler, 2346 .enable_intrs = qla24xx_enable_intrs, 2347 .disable_intrs = qla24xx_disable_intrs, 2348 .abort_command = qla24xx_abort_command, 2349 .target_reset = qla24xx_abort_target, 2350 .lun_reset = qla24xx_lun_reset, 2351 .fabric_login = qla24xx_login_fabric, 2352 .fabric_logout = qla24xx_fabric_logout, 2353 .calc_req_entries = NULL, 2354 .build_iocbs = NULL, 2355 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2356 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2357 .read_nvram = qla25xx_read_nvram_data, 2358 .write_nvram = qla25xx_write_nvram_data, 2359 .fw_dump = qla25xx_fw_dump, 2360 .beacon_on = qla24xx_beacon_on, 2361 .beacon_off = qla24xx_beacon_off, 2362 .beacon_blink = qla24xx_beacon_blink, 2363 .read_optrom = qla25xx_read_optrom_data, 2364 .write_optrom = qla24xx_write_optrom_data, 2365 .get_flash_version = qla24xx_get_flash_version, 2366 .start_scsi = qla24xx_dif_start_scsi, 2367 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2368 .abort_isp = qla2x00_abort_isp, 2369 .iospace_config = qla2x00_iospace_config, 2370 .initialize_adapter = qla2x00_initialize_adapter, 2371 }; 2372 2373 static struct isp_operations qla81xx_isp_ops = { 2374 .pci_config = qla25xx_pci_config, 2375 .reset_chip = qla24xx_reset_chip, 2376 .chip_diag = qla24xx_chip_diag, 2377 .config_rings = qla24xx_config_rings, 2378 .reset_adapter = qla24xx_reset_adapter, 2379 .nvram_config = qla81xx_nvram_config, 2380 .update_fw_options = qla24xx_update_fw_options, 2381 .load_risc = qla81xx_load_risc, 2382 .pci_info_str = qla24xx_pci_info_str, 2383 .fw_version_str = qla24xx_fw_version_str, 2384 .intr_handler = qla24xx_intr_handler, 2385 .enable_intrs = qla24xx_enable_intrs, 2386 .disable_intrs = qla24xx_disable_intrs, 2387 .abort_command = qla24xx_abort_command, 2388 .target_reset = qla24xx_abort_target, 2389 .lun_reset = qla24xx_lun_reset, 2390 .fabric_login = qla24xx_login_fabric, 2391 .fabric_logout = qla24xx_fabric_logout, 2392 .calc_req_entries = NULL, 2393 .build_iocbs = NULL, 2394 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2395 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2396 .read_nvram = NULL, 2397 .write_nvram = NULL, 2398 .fw_dump = qla81xx_fw_dump, 2399 .beacon_on = qla24xx_beacon_on, 2400 .beacon_off = qla24xx_beacon_off, 2401 .beacon_blink = qla83xx_beacon_blink, 2402 .read_optrom = qla25xx_read_optrom_data, 2403 .write_optrom = qla24xx_write_optrom_data, 2404 .get_flash_version = qla24xx_get_flash_version, 2405 .start_scsi = qla24xx_dif_start_scsi, 2406 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2407 .abort_isp = qla2x00_abort_isp, 2408 .iospace_config = qla2x00_iospace_config, 2409 .initialize_adapter = qla2x00_initialize_adapter, 2410 }; 2411 2412 static struct isp_operations qla82xx_isp_ops = { 2413 .pci_config = qla82xx_pci_config, 2414 .reset_chip = qla82xx_reset_chip, 2415 .chip_diag = qla24xx_chip_diag, 2416 .config_rings = qla82xx_config_rings, 2417 .reset_adapter = qla24xx_reset_adapter, 2418 .nvram_config = qla81xx_nvram_config, 2419 .update_fw_options = qla24xx_update_fw_options, 2420 .load_risc = qla82xx_load_risc, 2421 .pci_info_str = qla24xx_pci_info_str, 2422 .fw_version_str = qla24xx_fw_version_str, 2423 .intr_handler = qla82xx_intr_handler, 2424 .enable_intrs = qla82xx_enable_intrs, 2425 .disable_intrs = qla82xx_disable_intrs, 2426 .abort_command = qla24xx_abort_command, 2427 .target_reset = qla24xx_abort_target, 2428 .lun_reset = qla24xx_lun_reset, 2429 .fabric_login = qla24xx_login_fabric, 2430 .fabric_logout = qla24xx_fabric_logout, 2431 .calc_req_entries = NULL, 2432 .build_iocbs = NULL, 2433 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2434 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2435 .read_nvram = qla24xx_read_nvram_data, 2436 .write_nvram = qla24xx_write_nvram_data, 2437 .fw_dump = qla82xx_fw_dump, 2438 .beacon_on = qla82xx_beacon_on, 2439 .beacon_off = qla82xx_beacon_off, 2440 .beacon_blink = NULL, 2441 .read_optrom = qla82xx_read_optrom_data, 2442 .write_optrom = qla82xx_write_optrom_data, 2443 .get_flash_version = qla82xx_get_flash_version, 2444 .start_scsi = qla82xx_start_scsi, 2445 .start_scsi_mq = NULL, 2446 .abort_isp = qla82xx_abort_isp, 2447 .iospace_config = qla82xx_iospace_config, 2448 .initialize_adapter = qla2x00_initialize_adapter, 2449 }; 2450 2451 static struct isp_operations qla8044_isp_ops = { 2452 .pci_config = qla82xx_pci_config, 2453 .reset_chip = qla82xx_reset_chip, 2454 .chip_diag = qla24xx_chip_diag, 2455 .config_rings = qla82xx_config_rings, 2456 .reset_adapter = qla24xx_reset_adapter, 2457 .nvram_config = qla81xx_nvram_config, 2458 .update_fw_options = qla24xx_update_fw_options, 2459 .load_risc = qla82xx_load_risc, 2460 .pci_info_str = qla24xx_pci_info_str, 2461 .fw_version_str = qla24xx_fw_version_str, 2462 .intr_handler = qla8044_intr_handler, 2463 .enable_intrs = qla82xx_enable_intrs, 2464 .disable_intrs = qla82xx_disable_intrs, 2465 .abort_command = qla24xx_abort_command, 2466 .target_reset = qla24xx_abort_target, 2467 .lun_reset = qla24xx_lun_reset, 2468 .fabric_login = qla24xx_login_fabric, 2469 .fabric_logout = qla24xx_fabric_logout, 2470 .calc_req_entries = NULL, 2471 .build_iocbs = NULL, 2472 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2473 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2474 .read_nvram = NULL, 2475 .write_nvram = NULL, 2476 .fw_dump = qla8044_fw_dump, 2477 .beacon_on = qla82xx_beacon_on, 2478 .beacon_off = qla82xx_beacon_off, 2479 .beacon_blink = NULL, 2480 .read_optrom = qla8044_read_optrom_data, 2481 .write_optrom = qla8044_write_optrom_data, 2482 .get_flash_version = qla82xx_get_flash_version, 2483 .start_scsi = qla82xx_start_scsi, 2484 .start_scsi_mq = NULL, 2485 .abort_isp = qla8044_abort_isp, 2486 .iospace_config = qla82xx_iospace_config, 2487 .initialize_adapter = qla2x00_initialize_adapter, 2488 }; 2489 2490 static struct isp_operations qla83xx_isp_ops = { 2491 .pci_config = qla25xx_pci_config, 2492 .reset_chip = qla24xx_reset_chip, 2493 .chip_diag = qla24xx_chip_diag, 2494 .config_rings = qla24xx_config_rings, 2495 .reset_adapter = qla24xx_reset_adapter, 2496 .nvram_config = qla81xx_nvram_config, 2497 .update_fw_options = qla24xx_update_fw_options, 2498 .load_risc = qla81xx_load_risc, 2499 .pci_info_str = qla24xx_pci_info_str, 2500 .fw_version_str = qla24xx_fw_version_str, 2501 .intr_handler = qla24xx_intr_handler, 2502 .enable_intrs = qla24xx_enable_intrs, 2503 .disable_intrs = qla24xx_disable_intrs, 2504 .abort_command = qla24xx_abort_command, 2505 .target_reset = qla24xx_abort_target, 2506 .lun_reset = qla24xx_lun_reset, 2507 .fabric_login = qla24xx_login_fabric, 2508 .fabric_logout = qla24xx_fabric_logout, 2509 .calc_req_entries = NULL, 2510 .build_iocbs = NULL, 2511 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2512 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2513 .read_nvram = NULL, 2514 .write_nvram = NULL, 2515 .fw_dump = qla83xx_fw_dump, 2516 .beacon_on = qla24xx_beacon_on, 2517 .beacon_off = qla24xx_beacon_off, 2518 .beacon_blink = qla83xx_beacon_blink, 2519 .read_optrom = qla25xx_read_optrom_data, 2520 .write_optrom = qla24xx_write_optrom_data, 2521 .get_flash_version = qla24xx_get_flash_version, 2522 .start_scsi = qla24xx_dif_start_scsi, 2523 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2524 .abort_isp = qla2x00_abort_isp, 2525 .iospace_config = qla83xx_iospace_config, 2526 .initialize_adapter = qla2x00_initialize_adapter, 2527 }; 2528 2529 static struct isp_operations qlafx00_isp_ops = { 2530 .pci_config = qlafx00_pci_config, 2531 .reset_chip = qlafx00_soft_reset, 2532 .chip_diag = qlafx00_chip_diag, 2533 .config_rings = qlafx00_config_rings, 2534 .reset_adapter = qlafx00_soft_reset, 2535 .nvram_config = NULL, 2536 .update_fw_options = NULL, 2537 .load_risc = NULL, 2538 .pci_info_str = qlafx00_pci_info_str, 2539 .fw_version_str = qlafx00_fw_version_str, 2540 .intr_handler = qlafx00_intr_handler, 2541 .enable_intrs = qlafx00_enable_intrs, 2542 .disable_intrs = qlafx00_disable_intrs, 2543 .abort_command = qla24xx_async_abort_command, 2544 .target_reset = qlafx00_abort_target, 2545 .lun_reset = qlafx00_lun_reset, 2546 .fabric_login = NULL, 2547 .fabric_logout = NULL, 2548 .calc_req_entries = NULL, 2549 .build_iocbs = NULL, 2550 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2551 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2552 .read_nvram = qla24xx_read_nvram_data, 2553 .write_nvram = qla24xx_write_nvram_data, 2554 .fw_dump = NULL, 2555 .beacon_on = qla24xx_beacon_on, 2556 .beacon_off = qla24xx_beacon_off, 2557 .beacon_blink = NULL, 2558 .read_optrom = qla24xx_read_optrom_data, 2559 .write_optrom = qla24xx_write_optrom_data, 2560 .get_flash_version = qla24xx_get_flash_version, 2561 .start_scsi = qlafx00_start_scsi, 2562 .start_scsi_mq = NULL, 2563 .abort_isp = qlafx00_abort_isp, 2564 .iospace_config = qlafx00_iospace_config, 2565 .initialize_adapter = qlafx00_initialize_adapter, 2566 }; 2567 2568 static struct isp_operations qla27xx_isp_ops = { 2569 .pci_config = qla25xx_pci_config, 2570 .reset_chip = qla24xx_reset_chip, 2571 .chip_diag = qla24xx_chip_diag, 2572 .config_rings = qla24xx_config_rings, 2573 .reset_adapter = qla24xx_reset_adapter, 2574 .nvram_config = qla81xx_nvram_config, 2575 .update_fw_options = qla24xx_update_fw_options, 2576 .load_risc = qla81xx_load_risc, 2577 .pci_info_str = qla24xx_pci_info_str, 2578 .fw_version_str = qla24xx_fw_version_str, 2579 .intr_handler = qla24xx_intr_handler, 2580 .enable_intrs = qla24xx_enable_intrs, 2581 .disable_intrs = qla24xx_disable_intrs, 2582 .abort_command = qla24xx_abort_command, 2583 .target_reset = qla24xx_abort_target, 2584 .lun_reset = qla24xx_lun_reset, 2585 .fabric_login = qla24xx_login_fabric, 2586 .fabric_logout = qla24xx_fabric_logout, 2587 .calc_req_entries = NULL, 2588 .build_iocbs = NULL, 2589 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2590 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2591 .read_nvram = NULL, 2592 .write_nvram = NULL, 2593 .fw_dump = qla27xx_fwdump, 2594 .mpi_fw_dump = qla27xx_mpi_fwdump, 2595 .beacon_on = qla24xx_beacon_on, 2596 .beacon_off = qla24xx_beacon_off, 2597 .beacon_blink = qla83xx_beacon_blink, 2598 .read_optrom = qla25xx_read_optrom_data, 2599 .write_optrom = qla24xx_write_optrom_data, 2600 .get_flash_version = qla24xx_get_flash_version, 2601 .start_scsi = qla24xx_dif_start_scsi, 2602 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2603 .abort_isp = qla2x00_abort_isp, 2604 .iospace_config = qla83xx_iospace_config, 2605 .initialize_adapter = qla2x00_initialize_adapter, 2606 }; 2607 2608 static inline void 2609 qla2x00_set_isp_flags(struct qla_hw_data *ha) 2610 { 2611 ha->device_type = DT_EXTENDED_IDS; 2612 switch (ha->pdev->device) { 2613 case PCI_DEVICE_ID_QLOGIC_ISP2100: 2614 ha->isp_type |= DT_ISP2100; 2615 ha->device_type &= ~DT_EXTENDED_IDS; 2616 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2617 break; 2618 case PCI_DEVICE_ID_QLOGIC_ISP2200: 2619 ha->isp_type |= DT_ISP2200; 2620 ha->device_type &= ~DT_EXTENDED_IDS; 2621 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2622 break; 2623 case PCI_DEVICE_ID_QLOGIC_ISP2300: 2624 ha->isp_type |= DT_ISP2300; 2625 ha->device_type |= DT_ZIO_SUPPORTED; 2626 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2627 break; 2628 case PCI_DEVICE_ID_QLOGIC_ISP2312: 2629 ha->isp_type |= DT_ISP2312; 2630 ha->device_type |= DT_ZIO_SUPPORTED; 2631 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2632 break; 2633 case PCI_DEVICE_ID_QLOGIC_ISP2322: 2634 ha->isp_type |= DT_ISP2322; 2635 ha->device_type |= DT_ZIO_SUPPORTED; 2636 if (ha->pdev->subsystem_vendor == 0x1028 && 2637 ha->pdev->subsystem_device == 0x0170) 2638 ha->device_type |= DT_OEM_001; 2639 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2640 break; 2641 case PCI_DEVICE_ID_QLOGIC_ISP6312: 2642 ha->isp_type |= DT_ISP6312; 2643 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2644 break; 2645 case PCI_DEVICE_ID_QLOGIC_ISP6322: 2646 ha->isp_type |= DT_ISP6322; 2647 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2648 break; 2649 case PCI_DEVICE_ID_QLOGIC_ISP2422: 2650 ha->isp_type |= DT_ISP2422; 2651 ha->device_type |= DT_ZIO_SUPPORTED; 2652 ha->device_type |= DT_FWI2; 2653 ha->device_type |= DT_IIDMA; 2654 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2655 break; 2656 case PCI_DEVICE_ID_QLOGIC_ISP2432: 2657 ha->isp_type |= DT_ISP2432; 2658 ha->device_type |= DT_ZIO_SUPPORTED; 2659 ha->device_type |= DT_FWI2; 2660 ha->device_type |= DT_IIDMA; 2661 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2662 break; 2663 case PCI_DEVICE_ID_QLOGIC_ISP8432: 2664 ha->isp_type |= DT_ISP8432; 2665 ha->device_type |= DT_ZIO_SUPPORTED; 2666 ha->device_type |= DT_FWI2; 2667 ha->device_type |= DT_IIDMA; 2668 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2669 break; 2670 case PCI_DEVICE_ID_QLOGIC_ISP5422: 2671 ha->isp_type |= DT_ISP5422; 2672 ha->device_type |= DT_FWI2; 2673 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2674 break; 2675 case PCI_DEVICE_ID_QLOGIC_ISP5432: 2676 ha->isp_type |= DT_ISP5432; 2677 ha->device_type |= DT_FWI2; 2678 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2679 break; 2680 case PCI_DEVICE_ID_QLOGIC_ISP2532: 2681 ha->isp_type |= DT_ISP2532; 2682 ha->device_type |= DT_ZIO_SUPPORTED; 2683 ha->device_type |= DT_FWI2; 2684 ha->device_type |= DT_IIDMA; 2685 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2686 break; 2687 case PCI_DEVICE_ID_QLOGIC_ISP8001: 2688 ha->isp_type |= DT_ISP8001; 2689 ha->device_type |= DT_ZIO_SUPPORTED; 2690 ha->device_type |= DT_FWI2; 2691 ha->device_type |= DT_IIDMA; 2692 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2693 break; 2694 case PCI_DEVICE_ID_QLOGIC_ISP8021: 2695 ha->isp_type |= DT_ISP8021; 2696 ha->device_type |= DT_ZIO_SUPPORTED; 2697 ha->device_type |= DT_FWI2; 2698 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2699 /* Initialize 82XX ISP flags */ 2700 qla82xx_init_flags(ha); 2701 break; 2702 case PCI_DEVICE_ID_QLOGIC_ISP8044: 2703 ha->isp_type |= DT_ISP8044; 2704 ha->device_type |= DT_ZIO_SUPPORTED; 2705 ha->device_type |= DT_FWI2; 2706 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2707 /* Initialize 82XX ISP flags */ 2708 qla82xx_init_flags(ha); 2709 break; 2710 case PCI_DEVICE_ID_QLOGIC_ISP2031: 2711 ha->isp_type |= DT_ISP2031; 2712 ha->device_type |= DT_ZIO_SUPPORTED; 2713 ha->device_type |= DT_FWI2; 2714 ha->device_type |= DT_IIDMA; 2715 ha->device_type |= DT_T10_PI; 2716 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2717 break; 2718 case PCI_DEVICE_ID_QLOGIC_ISP8031: 2719 ha->isp_type |= DT_ISP8031; 2720 ha->device_type |= DT_ZIO_SUPPORTED; 2721 ha->device_type |= DT_FWI2; 2722 ha->device_type |= DT_IIDMA; 2723 ha->device_type |= DT_T10_PI; 2724 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2725 break; 2726 case PCI_DEVICE_ID_QLOGIC_ISPF001: 2727 ha->isp_type |= DT_ISPFX00; 2728 break; 2729 case PCI_DEVICE_ID_QLOGIC_ISP2071: 2730 ha->isp_type |= DT_ISP2071; 2731 ha->device_type |= DT_ZIO_SUPPORTED; 2732 ha->device_type |= DT_FWI2; 2733 ha->device_type |= DT_IIDMA; 2734 ha->device_type |= DT_T10_PI; 2735 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2736 break; 2737 case PCI_DEVICE_ID_QLOGIC_ISP2271: 2738 ha->isp_type |= DT_ISP2271; 2739 ha->device_type |= DT_ZIO_SUPPORTED; 2740 ha->device_type |= DT_FWI2; 2741 ha->device_type |= DT_IIDMA; 2742 ha->device_type |= DT_T10_PI; 2743 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2744 break; 2745 case PCI_DEVICE_ID_QLOGIC_ISP2261: 2746 ha->isp_type |= DT_ISP2261; 2747 ha->device_type |= DT_ZIO_SUPPORTED; 2748 ha->device_type |= DT_FWI2; 2749 ha->device_type |= DT_IIDMA; 2750 ha->device_type |= DT_T10_PI; 2751 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2752 break; 2753 case PCI_DEVICE_ID_QLOGIC_ISP2081: 2754 case PCI_DEVICE_ID_QLOGIC_ISP2089: 2755 ha->isp_type |= DT_ISP2081; 2756 ha->device_type |= DT_ZIO_SUPPORTED; 2757 ha->device_type |= DT_FWI2; 2758 ha->device_type |= DT_IIDMA; 2759 ha->device_type |= DT_T10_PI; 2760 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2761 break; 2762 case PCI_DEVICE_ID_QLOGIC_ISP2281: 2763 case PCI_DEVICE_ID_QLOGIC_ISP2289: 2764 ha->isp_type |= DT_ISP2281; 2765 ha->device_type |= DT_ZIO_SUPPORTED; 2766 ha->device_type |= DT_FWI2; 2767 ha->device_type |= DT_IIDMA; 2768 ha->device_type |= DT_T10_PI; 2769 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2770 break; 2771 } 2772 2773 if (IS_QLA82XX(ha)) 2774 ha->port_no = ha->portnum & 1; 2775 else { 2776 /* Get adapter physical port no from interrupt pin register. */ 2777 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); 2778 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || 2779 IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2780 ha->port_no--; 2781 else 2782 ha->port_no = !(ha->port_no & 1); 2783 } 2784 2785 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, 2786 "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", 2787 ha->device_type, ha->port_no, ha->fw_srisc_address); 2788 } 2789 2790 static void 2791 qla2xxx_scan_start(struct Scsi_Host *shost) 2792 { 2793 scsi_qla_host_t *vha = shost_priv(shost); 2794 2795 if (vha->hw->flags.running_gold_fw) 2796 return; 2797 2798 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2799 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2800 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2801 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags); 2802 } 2803 2804 static int 2805 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) 2806 { 2807 scsi_qla_host_t *vha = shost_priv(shost); 2808 2809 if (test_bit(UNLOADING, &vha->dpc_flags)) 2810 return 1; 2811 if (!vha->host) 2812 return 1; 2813 if (time > vha->hw->loop_reset_delay * HZ) 2814 return 1; 2815 2816 return atomic_read(&vha->loop_state) == LOOP_READY; 2817 } 2818 2819 static void qla_heartbeat_work_fn(struct work_struct *work) 2820 { 2821 struct qla_hw_data *ha = container_of(work, 2822 struct qla_hw_data, heartbeat_work); 2823 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2824 2825 if (!ha->flags.mbox_busy && base_vha->flags.init_done) 2826 qla_no_op_mb(base_vha); 2827 } 2828 2829 static void qla2x00_iocb_work_fn(struct work_struct *work) 2830 { 2831 struct scsi_qla_host *vha = container_of(work, 2832 struct scsi_qla_host, iocb_work); 2833 struct qla_hw_data *ha = vha->hw; 2834 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2835 int i = 2; 2836 unsigned long flags; 2837 2838 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 2839 return; 2840 2841 while (!list_empty(&vha->work_list) && i > 0) { 2842 qla2x00_do_work(vha); 2843 i--; 2844 } 2845 2846 spin_lock_irqsave(&vha->work_lock, flags); 2847 clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags); 2848 spin_unlock_irqrestore(&vha->work_lock, flags); 2849 } 2850 2851 static void 2852 qla_trace_init(void) 2853 { 2854 qla_trc_array = trace_array_get_by_name("qla2xxx"); 2855 if (!qla_trc_array) { 2856 ql_log(ql_log_fatal, NULL, 0x0001, 2857 "Unable to create qla2xxx trace instance, instance logging will be disabled.\n"); 2858 return; 2859 } 2860 2861 QLA_TRACE_ENABLE(qla_trc_array); 2862 } 2863 2864 static void 2865 qla_trace_uninit(void) 2866 { 2867 if (!qla_trc_array) 2868 return; 2869 trace_array_put(qla_trc_array); 2870 } 2871 2872 /* 2873 * PCI driver interface 2874 */ 2875 static int 2876 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 2877 { 2878 int ret = -ENODEV; 2879 struct Scsi_Host *host; 2880 scsi_qla_host_t *base_vha = NULL; 2881 struct qla_hw_data *ha; 2882 char pci_info[30]; 2883 char fw_str[30], wq_name[30]; 2884 struct scsi_host_template *sht; 2885 int bars, mem_only = 0; 2886 uint16_t req_length = 0, rsp_length = 0; 2887 struct req_que *req = NULL; 2888 struct rsp_que *rsp = NULL; 2889 int i; 2890 2891 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 2892 sht = &qla2xxx_driver_template; 2893 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || 2894 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || 2895 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || 2896 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 2897 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || 2898 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 || 2899 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || 2900 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || 2901 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || 2902 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || 2903 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || 2904 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || 2905 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || 2906 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 || 2907 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 || 2908 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 || 2909 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 || 2910 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 || 2911 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) { 2912 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2913 mem_only = 1; 2914 ql_dbg_pci(ql_dbg_init, pdev, 0x0007, 2915 "Mem only adapter.\n"); 2916 } 2917 ql_dbg_pci(ql_dbg_init, pdev, 0x0008, 2918 "Bars=%d.\n", bars); 2919 2920 if (mem_only) { 2921 if (pci_enable_device_mem(pdev)) 2922 return ret; 2923 } else { 2924 if (pci_enable_device(pdev)) 2925 return ret; 2926 } 2927 2928 if (is_kdump_kernel()) { 2929 ql2xmqsupport = 0; 2930 ql2xallocfwdump = 0; 2931 } 2932 2933 /* This may fail but that's ok */ 2934 pci_enable_pcie_error_reporting(pdev); 2935 2936 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); 2937 if (!ha) { 2938 ql_log_pci(ql_log_fatal, pdev, 0x0009, 2939 "Unable to allocate memory for ha.\n"); 2940 goto disable_device; 2941 } 2942 ql_dbg_pci(ql_dbg_init, pdev, 0x000a, 2943 "Memory allocated for ha=%p.\n", ha); 2944 ha->pdev = pdev; 2945 INIT_LIST_HEAD(&ha->tgt.q_full_list); 2946 spin_lock_init(&ha->tgt.q_full_lock); 2947 spin_lock_init(&ha->tgt.sess_lock); 2948 spin_lock_init(&ha->tgt.atio_lock); 2949 2950 spin_lock_init(&ha->sadb_lock); 2951 INIT_LIST_HEAD(&ha->sadb_tx_index_list); 2952 INIT_LIST_HEAD(&ha->sadb_rx_index_list); 2953 2954 spin_lock_init(&ha->sadb_fp_lock); 2955 2956 if (qla_edif_sadb_build_free_pool(ha)) { 2957 kfree(ha); 2958 goto disable_device; 2959 } 2960 2961 atomic_set(&ha->nvme_active_aen_cnt, 0); 2962 2963 /* Clear our data area */ 2964 ha->bars = bars; 2965 ha->mem_only = mem_only; 2966 spin_lock_init(&ha->hardware_lock); 2967 spin_lock_init(&ha->vport_slock); 2968 mutex_init(&ha->selflogin_lock); 2969 mutex_init(&ha->optrom_mutex); 2970 2971 /* Set ISP-type information. */ 2972 qla2x00_set_isp_flags(ha); 2973 2974 /* Set EEH reset type to fundamental if required by hba */ 2975 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || 2976 IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2977 pdev->needs_freset = 1; 2978 2979 ha->prev_topology = 0; 2980 ha->init_cb_size = sizeof(init_cb_t); 2981 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2982 ha->optrom_size = OPTROM_SIZE_2300; 2983 ha->max_exchg = FW_MAX_EXCHANGES_CNT; 2984 atomic_set(&ha->num_pend_mbx_stage1, 0); 2985 atomic_set(&ha->num_pend_mbx_stage2, 0); 2986 atomic_set(&ha->num_pend_mbx_stage3, 0); 2987 atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD); 2988 ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD; 2989 2990 /* Assign ISP specific operations. */ 2991 if (IS_QLA2100(ha)) { 2992 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2993 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 2994 req_length = REQUEST_ENTRY_CNT_2100; 2995 rsp_length = RESPONSE_ENTRY_CNT_2100; 2996 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 2997 ha->gid_list_info_size = 4; 2998 ha->flash_conf_off = ~0; 2999 ha->flash_data_off = ~0; 3000 ha->nvram_conf_off = ~0; 3001 ha->nvram_data_off = ~0; 3002 ha->isp_ops = &qla2100_isp_ops; 3003 } else if (IS_QLA2200(ha)) { 3004 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 3005 ha->mbx_count = MAILBOX_REGISTER_COUNT_2200; 3006 req_length = REQUEST_ENTRY_CNT_2200; 3007 rsp_length = RESPONSE_ENTRY_CNT_2100; 3008 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 3009 ha->gid_list_info_size = 4; 3010 ha->flash_conf_off = ~0; 3011 ha->flash_data_off = ~0; 3012 ha->nvram_conf_off = ~0; 3013 ha->nvram_data_off = ~0; 3014 ha->isp_ops = &qla2100_isp_ops; 3015 } else if (IS_QLA23XX(ha)) { 3016 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 3017 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3018 req_length = REQUEST_ENTRY_CNT_2200; 3019 rsp_length = RESPONSE_ENTRY_CNT_2300; 3020 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3021 ha->gid_list_info_size = 6; 3022 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 3023 ha->optrom_size = OPTROM_SIZE_2322; 3024 ha->flash_conf_off = ~0; 3025 ha->flash_data_off = ~0; 3026 ha->nvram_conf_off = ~0; 3027 ha->nvram_data_off = ~0; 3028 ha->isp_ops = &qla2300_isp_ops; 3029 } else if (IS_QLA24XX_TYPE(ha)) { 3030 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3031 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3032 req_length = REQUEST_ENTRY_CNT_24XX; 3033 rsp_length = RESPONSE_ENTRY_CNT_2300; 3034 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3035 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3036 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 3037 ha->gid_list_info_size = 8; 3038 ha->optrom_size = OPTROM_SIZE_24XX; 3039 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX; 3040 ha->isp_ops = &qla24xx_isp_ops; 3041 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 3042 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 3043 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 3044 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 3045 } else if (IS_QLA25XX(ha)) { 3046 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3047 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3048 req_length = REQUEST_ENTRY_CNT_24XX; 3049 rsp_length = RESPONSE_ENTRY_CNT_2300; 3050 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3051 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3052 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 3053 ha->gid_list_info_size = 8; 3054 ha->optrom_size = OPTROM_SIZE_25XX; 3055 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3056 ha->isp_ops = &qla25xx_isp_ops; 3057 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 3058 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 3059 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 3060 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 3061 } else if (IS_QLA81XX(ha)) { 3062 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3063 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3064 req_length = REQUEST_ENTRY_CNT_24XX; 3065 rsp_length = RESPONSE_ENTRY_CNT_2300; 3066 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3067 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3068 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3069 ha->gid_list_info_size = 8; 3070 ha->optrom_size = OPTROM_SIZE_81XX; 3071 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3072 ha->isp_ops = &qla81xx_isp_ops; 3073 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 3074 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 3075 ha->nvram_conf_off = ~0; 3076 ha->nvram_data_off = ~0; 3077 } else if (IS_QLA82XX(ha)) { 3078 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3079 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3080 req_length = REQUEST_ENTRY_CNT_82XX; 3081 rsp_length = RESPONSE_ENTRY_CNT_82XX; 3082 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3083 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3084 ha->gid_list_info_size = 8; 3085 ha->optrom_size = OPTROM_SIZE_82XX; 3086 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3087 ha->isp_ops = &qla82xx_isp_ops; 3088 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 3089 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 3090 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 3091 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 3092 } else if (IS_QLA8044(ha)) { 3093 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3094 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3095 req_length = REQUEST_ENTRY_CNT_82XX; 3096 rsp_length = RESPONSE_ENTRY_CNT_82XX; 3097 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3098 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3099 ha->gid_list_info_size = 8; 3100 ha->optrom_size = OPTROM_SIZE_83XX; 3101 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3102 ha->isp_ops = &qla8044_isp_ops; 3103 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 3104 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 3105 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 3106 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 3107 } else if (IS_QLA83XX(ha)) { 3108 ha->portnum = PCI_FUNC(ha->pdev->devfn); 3109 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3110 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3111 req_length = REQUEST_ENTRY_CNT_83XX; 3112 rsp_length = RESPONSE_ENTRY_CNT_83XX; 3113 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3114 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3115 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3116 ha->gid_list_info_size = 8; 3117 ha->optrom_size = OPTROM_SIZE_83XX; 3118 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3119 ha->isp_ops = &qla83xx_isp_ops; 3120 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 3121 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 3122 ha->nvram_conf_off = ~0; 3123 ha->nvram_data_off = ~0; 3124 } else if (IS_QLAFX00(ha)) { 3125 ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00; 3126 ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00; 3127 ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00; 3128 req_length = REQUEST_ENTRY_CNT_FX00; 3129 rsp_length = RESPONSE_ENTRY_CNT_FX00; 3130 ha->isp_ops = &qlafx00_isp_ops; 3131 ha->port_down_retry_count = 30; /* default value */ 3132 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; 3133 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 3134 ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL; 3135 ha->mr.fw_hbt_en = 1; 3136 ha->mr.host_info_resend = false; 3137 ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; 3138 } else if (IS_QLA27XX(ha)) { 3139 ha->portnum = PCI_FUNC(ha->pdev->devfn); 3140 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3141 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3142 req_length = REQUEST_ENTRY_CNT_83XX; 3143 rsp_length = RESPONSE_ENTRY_CNT_83XX; 3144 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3145 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3146 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3147 ha->gid_list_info_size = 8; 3148 ha->optrom_size = OPTROM_SIZE_83XX; 3149 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3150 ha->isp_ops = &qla27xx_isp_ops; 3151 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 3152 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 3153 ha->nvram_conf_off = ~0; 3154 ha->nvram_data_off = ~0; 3155 } else if (IS_QLA28XX(ha)) { 3156 ha->portnum = PCI_FUNC(ha->pdev->devfn); 3157 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3158 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3159 req_length = REQUEST_ENTRY_CNT_83XX; 3160 rsp_length = RESPONSE_ENTRY_CNT_83XX; 3161 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3162 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3163 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3164 ha->gid_list_info_size = 8; 3165 ha->optrom_size = OPTROM_SIZE_28XX; 3166 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3167 ha->isp_ops = &qla27xx_isp_ops; 3168 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX; 3169 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX; 3170 ha->nvram_conf_off = ~0; 3171 ha->nvram_data_off = ~0; 3172 } 3173 3174 ql_dbg_pci(ql_dbg_init, pdev, 0x001e, 3175 "mbx_count=%d, req_length=%d, " 3176 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, " 3177 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, " 3178 "max_fibre_devices=%d.\n", 3179 ha->mbx_count, req_length, rsp_length, ha->max_loop_id, 3180 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, 3181 ha->nvram_npiv_size, ha->max_fibre_devices); 3182 ql_dbg_pci(ql_dbg_init, pdev, 0x001f, 3183 "isp_ops=%p, flash_conf_off=%d, " 3184 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", 3185 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, 3186 ha->nvram_conf_off, ha->nvram_data_off); 3187 3188 /* Configure PCI I/O space */ 3189 ret = ha->isp_ops->iospace_config(ha); 3190 if (ret) 3191 goto iospace_config_failed; 3192 3193 ql_log_pci(ql_log_info, pdev, 0x001d, 3194 "Found an ISP%04X irq %d iobase 0x%p.\n", 3195 pdev->device, pdev->irq, ha->iobase); 3196 mutex_init(&ha->vport_lock); 3197 mutex_init(&ha->mq_lock); 3198 init_completion(&ha->mbx_cmd_comp); 3199 complete(&ha->mbx_cmd_comp); 3200 init_completion(&ha->mbx_intr_comp); 3201 init_completion(&ha->dcbx_comp); 3202 init_completion(&ha->lb_portup_comp); 3203 3204 set_bit(0, (unsigned long *) ha->vp_idx_map); 3205 3206 qla2x00_config_dma_addressing(ha); 3207 ql_dbg_pci(ql_dbg_init, pdev, 0x0020, 3208 "64 Bit addressing is %s.\n", 3209 ha->flags.enable_64bit_addressing ? "enable" : 3210 "disable"); 3211 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 3212 if (ret) { 3213 ql_log_pci(ql_log_fatal, pdev, 0x0031, 3214 "Failed to allocate memory for adapter, aborting.\n"); 3215 3216 goto probe_hw_failed; 3217 } 3218 3219 req->max_q_depth = MAX_Q_DEPTH; 3220 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) 3221 req->max_q_depth = ql2xmaxqdepth; 3222 3223 3224 base_vha = qla2x00_create_host(sht, ha); 3225 if (!base_vha) { 3226 ret = -ENOMEM; 3227 goto probe_hw_failed; 3228 } 3229 3230 pci_set_drvdata(pdev, base_vha); 3231 set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); 3232 3233 host = base_vha->host; 3234 base_vha->req = req; 3235 if (IS_QLA2XXX_MIDTYPE(ha)) 3236 base_vha->mgmt_svr_loop_id = 3237 qla2x00_reserve_mgmt_server_loop_id(base_vha); 3238 else 3239 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + 3240 base_vha->vp_idx; 3241 3242 /* Setup fcport template structure. */ 3243 ha->mr.fcport.vha = base_vha; 3244 ha->mr.fcport.port_type = FCT_UNKNOWN; 3245 ha->mr.fcport.loop_id = FC_NO_LOOP_ID; 3246 qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED); 3247 ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED; 3248 ha->mr.fcport.scan_state = 1; 3249 3250 qla2xxx_reset_stats(host, QLA2XX_HW_ERROR | QLA2XX_SHT_LNK_DWN | 3251 QLA2XX_INT_ERR | QLA2XX_CMD_TIMEOUT | 3252 QLA2XX_RESET_CMD_ERR | QLA2XX_TGT_SHT_LNK_DOWN); 3253 3254 /* Set the SG table size based on ISP type */ 3255 if (!IS_FWI2_CAPABLE(ha)) { 3256 if (IS_QLA2100(ha)) 3257 host->sg_tablesize = 32; 3258 } else { 3259 if (!IS_QLA82XX(ha)) 3260 host->sg_tablesize = QLA_SG_ALL; 3261 } 3262 host->max_id = ha->max_fibre_devices; 3263 host->cmd_per_lun = 3; 3264 host->unique_id = host->host_no; 3265 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 3266 host->max_cmd_len = 32; 3267 else 3268 host->max_cmd_len = MAX_CMDSZ; 3269 host->max_channel = MAX_BUSES - 1; 3270 /* Older HBAs support only 16-bit LUNs */ 3271 if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) && 3272 ql2xmaxlun > 0xffff) 3273 host->max_lun = 0xffff; 3274 else 3275 host->max_lun = ql2xmaxlun; 3276 host->transportt = qla2xxx_transport_template; 3277 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); 3278 3279 ql_dbg(ql_dbg_init, base_vha, 0x0033, 3280 "max_id=%d this_id=%d " 3281 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " 3282 "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id, 3283 host->this_id, host->cmd_per_lun, host->unique_id, 3284 host->max_cmd_len, host->max_channel, host->max_lun, 3285 host->transportt, sht->vendor_id); 3286 3287 INIT_WORK(&ha->heartbeat_work, qla_heartbeat_work_fn); 3288 3289 /* Set up the irqs */ 3290 ret = qla2x00_request_irqs(ha, rsp); 3291 if (ret) 3292 goto probe_failed; 3293 3294 /* Alloc arrays of request and response ring ptrs */ 3295 ret = qla2x00_alloc_queues(ha, req, rsp); 3296 if (ret) { 3297 ql_log(ql_log_fatal, base_vha, 0x003d, 3298 "Failed to allocate memory for queue pointers..." 3299 "aborting.\n"); 3300 ret = -ENODEV; 3301 goto probe_failed; 3302 } 3303 3304 if (ha->mqenable) { 3305 /* number of hardware queues supported by blk/scsi-mq*/ 3306 host->nr_hw_queues = ha->max_qpairs; 3307 3308 ql_dbg(ql_dbg_init, base_vha, 0x0192, 3309 "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues); 3310 } else { 3311 if (ql2xnvmeenable) { 3312 host->nr_hw_queues = ha->max_qpairs; 3313 ql_dbg(ql_dbg_init, base_vha, 0x0194, 3314 "FC-NVMe support is enabled, HW queues=%d\n", 3315 host->nr_hw_queues); 3316 } else { 3317 ql_dbg(ql_dbg_init, base_vha, 0x0193, 3318 "blk/scsi-mq disabled.\n"); 3319 } 3320 } 3321 3322 qlt_probe_one_stage1(base_vha, ha); 3323 3324 pci_save_state(pdev); 3325 3326 /* Assign back pointers */ 3327 rsp->req = req; 3328 req->rsp = rsp; 3329 3330 if (IS_QLAFX00(ha)) { 3331 ha->rsp_q_map[0] = rsp; 3332 ha->req_q_map[0] = req; 3333 set_bit(0, ha->req_qid_map); 3334 set_bit(0, ha->rsp_qid_map); 3335 } 3336 3337 /* FWI2-capable only. */ 3338 req->req_q_in = &ha->iobase->isp24.req_q_in; 3339 req->req_q_out = &ha->iobase->isp24.req_q_out; 3340 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; 3341 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; 3342 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 3343 IS_QLA28XX(ha)) { 3344 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; 3345 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; 3346 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; 3347 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out; 3348 } 3349 3350 if (IS_QLAFX00(ha)) { 3351 req->req_q_in = &ha->iobase->ispfx00.req_q_in; 3352 req->req_q_out = &ha->iobase->ispfx00.req_q_out; 3353 rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in; 3354 rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out; 3355 } 3356 3357 if (IS_P3P_TYPE(ha)) { 3358 req->req_q_out = &ha->iobase->isp82.req_q_out[0]; 3359 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0]; 3360 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; 3361 } 3362 3363 ql_dbg(ql_dbg_multiq, base_vha, 0xc009, 3364 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 3365 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 3366 ql_dbg(ql_dbg_multiq, base_vha, 0xc00a, 3367 "req->req_q_in=%p req->req_q_out=%p " 3368 "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 3369 req->req_q_in, req->req_q_out, 3370 rsp->rsp_q_in, rsp->rsp_q_out); 3371 ql_dbg(ql_dbg_init, base_vha, 0x003e, 3372 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 3373 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 3374 ql_dbg(ql_dbg_init, base_vha, 0x003f, 3375 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 3376 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); 3377 3378 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0); 3379 if (unlikely(!ha->wq)) { 3380 ret = -ENOMEM; 3381 goto probe_failed; 3382 } 3383 3384 if (ha->isp_ops->initialize_adapter(base_vha)) { 3385 ql_log(ql_log_fatal, base_vha, 0x00d6, 3386 "Failed to initialize adapter - Adapter flags %x.\n", 3387 base_vha->device_flags); 3388 3389 if (IS_QLA82XX(ha)) { 3390 qla82xx_idc_lock(ha); 3391 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3392 QLA8XXX_DEV_FAILED); 3393 qla82xx_idc_unlock(ha); 3394 ql_log(ql_log_fatal, base_vha, 0x00d7, 3395 "HW State: FAILED.\n"); 3396 } else if (IS_QLA8044(ha)) { 3397 qla8044_idc_lock(ha); 3398 qla8044_wr_direct(base_vha, 3399 QLA8044_CRB_DEV_STATE_INDEX, 3400 QLA8XXX_DEV_FAILED); 3401 qla8044_idc_unlock(ha); 3402 ql_log(ql_log_fatal, base_vha, 0x0150, 3403 "HW State: FAILED.\n"); 3404 } 3405 3406 ret = -ENODEV; 3407 goto probe_failed; 3408 } 3409 3410 if (IS_QLAFX00(ha)) 3411 host->can_queue = QLAFX00_MAX_CANQUEUE; 3412 else 3413 host->can_queue = req->num_outstanding_cmds - 10; 3414 3415 ql_dbg(ql_dbg_init, base_vha, 0x0032, 3416 "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", 3417 host->can_queue, base_vha->req, 3418 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 3419 3420 /* Check if FW supports MQ or not for ISP25xx */ 3421 if (IS_QLA25XX(ha) && !(ha->fw_attributes & BIT_6)) 3422 ha->mqenable = 0; 3423 3424 if (ha->mqenable) { 3425 bool startit = false; 3426 3427 if (QLA_TGT_MODE_ENABLED()) 3428 startit = false; 3429 3430 if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) 3431 startit = true; 3432 3433 /* Create start of day qpairs for Block MQ */ 3434 for (i = 0; i < ha->max_qpairs; i++) 3435 qla2xxx_create_qpair(base_vha, 5, 0, startit); 3436 } 3437 qla_init_iocb_limit(base_vha); 3438 3439 if (ha->flags.running_gold_fw) 3440 goto skip_dpc; 3441 3442 /* 3443 * Startup the kernel thread for this host adapter 3444 */ 3445 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 3446 "%s_dpc", base_vha->host_str); 3447 if (IS_ERR(ha->dpc_thread)) { 3448 ql_log(ql_log_fatal, base_vha, 0x00ed, 3449 "Failed to start DPC thread.\n"); 3450 ret = PTR_ERR(ha->dpc_thread); 3451 ha->dpc_thread = NULL; 3452 goto probe_failed; 3453 } 3454 ql_dbg(ql_dbg_init, base_vha, 0x00ee, 3455 "DPC thread started successfully.\n"); 3456 3457 /* 3458 * If we're not coming up in initiator mode, we might sit for 3459 * a while without waking up the dpc thread, which leads to a 3460 * stuck process warning. So just kick the dpc once here and 3461 * let the kthread start (and go back to sleep in qla2x00_do_dpc). 3462 */ 3463 qla2xxx_wake_dpc(base_vha); 3464 3465 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); 3466 3467 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { 3468 sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); 3469 ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); 3470 INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen); 3471 3472 sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); 3473 ha->dpc_hp_wq = create_singlethread_workqueue(wq_name); 3474 INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work); 3475 INIT_WORK(&ha->idc_state_handler, 3476 qla83xx_idc_state_handler_work); 3477 INIT_WORK(&ha->nic_core_unrecoverable, 3478 qla83xx_nic_core_unrecoverable_work); 3479 } 3480 3481 skip_dpc: 3482 list_add_tail(&base_vha->list, &ha->vp_list); 3483 base_vha->host->irq = ha->pdev->irq; 3484 3485 /* Initialized the timer */ 3486 qla2x00_start_timer(base_vha, WATCH_INTERVAL); 3487 ql_dbg(ql_dbg_init, base_vha, 0x00ef, 3488 "Started qla2x00_timer with " 3489 "interval=%d.\n", WATCH_INTERVAL); 3490 ql_dbg(ql_dbg_init, base_vha, 0x00f0, 3491 "Detected hba at address=%p.\n", 3492 ha); 3493 3494 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 3495 if (ha->fw_attributes & BIT_4) { 3496 int prot = 0, guard; 3497 3498 base_vha->flags.difdix_supported = 1; 3499 ql_dbg(ql_dbg_init, base_vha, 0x00f1, 3500 "Registering for DIF/DIX type 1 and 3 protection.\n"); 3501 if (ql2xenabledif == 1) 3502 prot = SHOST_DIX_TYPE0_PROTECTION; 3503 if (ql2xprotmask) 3504 scsi_host_set_prot(host, ql2xprotmask); 3505 else 3506 scsi_host_set_prot(host, 3507 prot | SHOST_DIF_TYPE1_PROTECTION 3508 | SHOST_DIF_TYPE2_PROTECTION 3509 | SHOST_DIF_TYPE3_PROTECTION 3510 | SHOST_DIX_TYPE1_PROTECTION 3511 | SHOST_DIX_TYPE2_PROTECTION 3512 | SHOST_DIX_TYPE3_PROTECTION); 3513 3514 guard = SHOST_DIX_GUARD_CRC; 3515 3516 if (IS_PI_IPGUARD_CAPABLE(ha) && 3517 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) 3518 guard |= SHOST_DIX_GUARD_IP; 3519 3520 if (ql2xprotguard) 3521 scsi_host_set_guard(host, ql2xprotguard); 3522 else 3523 scsi_host_set_guard(host, guard); 3524 } else 3525 base_vha->flags.difdix_supported = 0; 3526 } 3527 3528 ha->isp_ops->enable_intrs(ha); 3529 3530 if (IS_QLAFX00(ha)) { 3531 ret = qlafx00_fx_disc(base_vha, 3532 &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO); 3533 host->sg_tablesize = (ha->mr.extended_io_enabled) ? 3534 QLA_SG_ALL : 128; 3535 } 3536 3537 ret = scsi_add_host(host, &pdev->dev); 3538 if (ret) 3539 goto probe_failed; 3540 3541 base_vha->flags.init_done = 1; 3542 base_vha->flags.online = 1; 3543 ha->prev_minidump_failed = 0; 3544 3545 ql_dbg(ql_dbg_init, base_vha, 0x00f2, 3546 "Init done and hba is online.\n"); 3547 3548 if (qla_ini_mode_enabled(base_vha) || 3549 qla_dual_mode_enabled(base_vha)) 3550 scsi_scan_host(host); 3551 else 3552 ql_log(ql_log_info, base_vha, 0x0122, 3553 "skipping scsi_scan_host() for non-initiator port\n"); 3554 3555 qla2x00_alloc_sysfs_attr(base_vha); 3556 3557 if (IS_QLAFX00(ha)) { 3558 ret = qlafx00_fx_disc(base_vha, 3559 &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO); 3560 3561 /* Register system information */ 3562 ret = qlafx00_fx_disc(base_vha, 3563 &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO); 3564 } 3565 3566 qla2x00_init_host_attr(base_vha); 3567 3568 qla2x00_dfs_setup(base_vha); 3569 3570 ql_log(ql_log_info, base_vha, 0x00fb, 3571 "QLogic %s - %s.\n", ha->model_number, ha->model_desc); 3572 ql_log(ql_log_info, base_vha, 0x00fc, 3573 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", 3574 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info, 3575 sizeof(pci_info)), 3576 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', 3577 base_vha->host_no, 3578 ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str))); 3579 3580 qlt_add_target(ha, base_vha); 3581 3582 clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); 3583 3584 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 3585 return -ENODEV; 3586 3587 return 0; 3588 3589 probe_failed: 3590 qla_enode_stop(base_vha); 3591 qla_edb_stop(base_vha); 3592 if (base_vha->gnl.l) { 3593 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3594 base_vha->gnl.l, base_vha->gnl.ldma); 3595 base_vha->gnl.l = NULL; 3596 } 3597 3598 if (base_vha->timer_active) 3599 qla2x00_stop_timer(base_vha); 3600 base_vha->flags.online = 0; 3601 if (ha->dpc_thread) { 3602 struct task_struct *t = ha->dpc_thread; 3603 3604 ha->dpc_thread = NULL; 3605 kthread_stop(t); 3606 } 3607 3608 qla2x00_free_device(base_vha); 3609 scsi_host_put(base_vha->host); 3610 /* 3611 * Need to NULL out local req/rsp after 3612 * qla2x00_free_device => qla2x00_free_queues frees 3613 * what these are pointing to. Or else we'll 3614 * fall over below in qla2x00_free_req/rsp_que. 3615 */ 3616 req = NULL; 3617 rsp = NULL; 3618 3619 probe_hw_failed: 3620 qla2x00_mem_free(ha); 3621 qla2x00_free_req_que(ha, req); 3622 qla2x00_free_rsp_que(ha, rsp); 3623 qla2x00_clear_drv_active(ha); 3624 3625 iospace_config_failed: 3626 if (IS_P3P_TYPE(ha)) { 3627 if (!ha->nx_pcibase) 3628 iounmap((device_reg_t *)ha->nx_pcibase); 3629 if (!ql2xdbwr) 3630 iounmap((device_reg_t *)ha->nxdb_wr_ptr); 3631 } else { 3632 if (ha->iobase) 3633 iounmap(ha->iobase); 3634 if (ha->cregbase) 3635 iounmap(ha->cregbase); 3636 } 3637 pci_release_selected_regions(ha->pdev, ha->bars); 3638 kfree(ha); 3639 3640 disable_device: 3641 pci_disable_device(pdev); 3642 return ret; 3643 } 3644 3645 static void __qla_set_remove_flag(scsi_qla_host_t *base_vha) 3646 { 3647 scsi_qla_host_t *vp; 3648 unsigned long flags; 3649 struct qla_hw_data *ha; 3650 3651 if (!base_vha) 3652 return; 3653 3654 ha = base_vha->hw; 3655 3656 spin_lock_irqsave(&ha->vport_slock, flags); 3657 list_for_each_entry(vp, &ha->vp_list, list) 3658 set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags); 3659 3660 /* 3661 * Indicate device removal to prevent future board_disable 3662 * and wait until any pending board_disable has completed. 3663 */ 3664 set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags); 3665 spin_unlock_irqrestore(&ha->vport_slock, flags); 3666 } 3667 3668 static void 3669 qla2x00_shutdown(struct pci_dev *pdev) 3670 { 3671 scsi_qla_host_t *vha; 3672 struct qla_hw_data *ha; 3673 3674 vha = pci_get_drvdata(pdev); 3675 ha = vha->hw; 3676 3677 ql_log(ql_log_info, vha, 0xfffa, 3678 "Adapter shutdown\n"); 3679 3680 /* 3681 * Prevent future board_disable and wait 3682 * until any pending board_disable has completed. 3683 */ 3684 __qla_set_remove_flag(vha); 3685 cancel_work_sync(&ha->board_disable); 3686 3687 if (!atomic_read(&pdev->enable_cnt)) 3688 return; 3689 3690 /* Notify ISPFX00 firmware */ 3691 if (IS_QLAFX00(ha)) 3692 qlafx00_driver_shutdown(vha, 20); 3693 3694 /* Turn-off FCE trace */ 3695 if (ha->flags.fce_enabled) { 3696 qla2x00_disable_fce_trace(vha, NULL, NULL); 3697 ha->flags.fce_enabled = 0; 3698 } 3699 3700 /* Turn-off EFT trace */ 3701 if (ha->eft) 3702 qla2x00_disable_eft_trace(vha); 3703 3704 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 3705 IS_QLA28XX(ha)) { 3706 if (ha->flags.fw_started) 3707 qla2x00_abort_isp_cleanup(vha); 3708 } else { 3709 /* Stop currently executing firmware. */ 3710 qla2x00_try_to_stop_firmware(vha); 3711 } 3712 3713 /* Disable timer */ 3714 if (vha->timer_active) 3715 qla2x00_stop_timer(vha); 3716 3717 /* Turn adapter off line */ 3718 vha->flags.online = 0; 3719 3720 /* turn-off interrupts on the card */ 3721 if (ha->interrupts_on) { 3722 vha->flags.init_done = 0; 3723 ha->isp_ops->disable_intrs(ha); 3724 } 3725 3726 qla2x00_free_irqs(vha); 3727 3728 qla2x00_free_fw_dump(ha); 3729 3730 pci_disable_device(pdev); 3731 ql_log(ql_log_info, vha, 0xfffe, 3732 "Adapter shutdown successfully.\n"); 3733 } 3734 3735 /* Deletes all the virtual ports for a given ha */ 3736 static void 3737 qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) 3738 { 3739 scsi_qla_host_t *vha; 3740 unsigned long flags; 3741 3742 mutex_lock(&ha->vport_lock); 3743 while (ha->cur_vport_count) { 3744 spin_lock_irqsave(&ha->vport_slock, flags); 3745 3746 BUG_ON(base_vha->list.next == &ha->vp_list); 3747 /* This assumes first entry in ha->vp_list is always base vha */ 3748 vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); 3749 scsi_host_get(vha->host); 3750 3751 spin_unlock_irqrestore(&ha->vport_slock, flags); 3752 mutex_unlock(&ha->vport_lock); 3753 3754 qla_nvme_delete(vha); 3755 3756 fc_vport_terminate(vha->fc_vport); 3757 scsi_host_put(vha->host); 3758 3759 mutex_lock(&ha->vport_lock); 3760 } 3761 mutex_unlock(&ha->vport_lock); 3762 } 3763 3764 /* Stops all deferred work threads */ 3765 static void 3766 qla2x00_destroy_deferred_work(struct qla_hw_data *ha) 3767 { 3768 /* Cancel all work and destroy DPC workqueues */ 3769 if (ha->dpc_lp_wq) { 3770 cancel_work_sync(&ha->idc_aen); 3771 destroy_workqueue(ha->dpc_lp_wq); 3772 ha->dpc_lp_wq = NULL; 3773 } 3774 3775 if (ha->dpc_hp_wq) { 3776 cancel_work_sync(&ha->nic_core_reset); 3777 cancel_work_sync(&ha->idc_state_handler); 3778 cancel_work_sync(&ha->nic_core_unrecoverable); 3779 destroy_workqueue(ha->dpc_hp_wq); 3780 ha->dpc_hp_wq = NULL; 3781 } 3782 3783 /* Kill the kernel thread for this host */ 3784 if (ha->dpc_thread) { 3785 struct task_struct *t = ha->dpc_thread; 3786 3787 /* 3788 * qla2xxx_wake_dpc checks for ->dpc_thread 3789 * so we need to zero it out. 3790 */ 3791 ha->dpc_thread = NULL; 3792 kthread_stop(t); 3793 } 3794 } 3795 3796 static void 3797 qla2x00_unmap_iobases(struct qla_hw_data *ha) 3798 { 3799 if (IS_QLA82XX(ha)) { 3800 3801 iounmap((device_reg_t *)ha->nx_pcibase); 3802 if (!ql2xdbwr) 3803 iounmap((device_reg_t *)ha->nxdb_wr_ptr); 3804 } else { 3805 if (ha->iobase) 3806 iounmap(ha->iobase); 3807 3808 if (ha->cregbase) 3809 iounmap(ha->cregbase); 3810 3811 if (ha->mqiobase) 3812 iounmap(ha->mqiobase); 3813 3814 if (ha->msixbase) 3815 iounmap(ha->msixbase); 3816 } 3817 } 3818 3819 static void 3820 qla2x00_clear_drv_active(struct qla_hw_data *ha) 3821 { 3822 if (IS_QLA8044(ha)) { 3823 qla8044_idc_lock(ha); 3824 qla8044_clear_drv_active(ha); 3825 qla8044_idc_unlock(ha); 3826 } else if (IS_QLA82XX(ha)) { 3827 qla82xx_idc_lock(ha); 3828 qla82xx_clear_drv_active(ha); 3829 qla82xx_idc_unlock(ha); 3830 } 3831 } 3832 3833 static void 3834 qla2x00_remove_one(struct pci_dev *pdev) 3835 { 3836 scsi_qla_host_t *base_vha; 3837 struct qla_hw_data *ha; 3838 3839 base_vha = pci_get_drvdata(pdev); 3840 ha = base_vha->hw; 3841 ql_log(ql_log_info, base_vha, 0xb079, 3842 "Removing driver\n"); 3843 __qla_set_remove_flag(base_vha); 3844 cancel_work_sync(&ha->board_disable); 3845 3846 /* 3847 * If the PCI device is disabled then there was a PCI-disconnect and 3848 * qla2x00_disable_board_on_pci_error has taken care of most of the 3849 * resources. 3850 */ 3851 if (!atomic_read(&pdev->enable_cnt)) { 3852 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3853 base_vha->gnl.l, base_vha->gnl.ldma); 3854 base_vha->gnl.l = NULL; 3855 scsi_host_put(base_vha->host); 3856 kfree(ha); 3857 pci_set_drvdata(pdev, NULL); 3858 return; 3859 } 3860 qla2x00_wait_for_hba_ready(base_vha); 3861 3862 /* 3863 * if UNLOADING flag is already set, then continue unload, 3864 * where it was set first. 3865 */ 3866 if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) 3867 return; 3868 3869 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 3870 IS_QLA28XX(ha)) { 3871 if (ha->flags.fw_started) 3872 qla2x00_abort_isp_cleanup(base_vha); 3873 } else if (!IS_QLAFX00(ha)) { 3874 if (IS_QLA8031(ha)) { 3875 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, 3876 "Clearing fcoe driver presence.\n"); 3877 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) 3878 ql_dbg(ql_dbg_p3p, base_vha, 0xb079, 3879 "Error while clearing DRV-Presence.\n"); 3880 } 3881 3882 qla2x00_try_to_stop_firmware(base_vha); 3883 } 3884 3885 qla2x00_wait_for_sess_deletion(base_vha); 3886 3887 qla_nvme_delete(base_vha); 3888 3889 dma_free_coherent(&ha->pdev->dev, 3890 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); 3891 3892 base_vha->gnl.l = NULL; 3893 qla_enode_stop(base_vha); 3894 qla_edb_stop(base_vha); 3895 3896 vfree(base_vha->scan.l); 3897 3898 if (IS_QLAFX00(ha)) 3899 qlafx00_driver_shutdown(base_vha, 20); 3900 3901 qla2x00_delete_all_vps(ha, base_vha); 3902 3903 qla2x00_dfs_remove(base_vha); 3904 3905 qla84xx_put_chip(base_vha); 3906 3907 /* Disable timer */ 3908 if (base_vha->timer_active) 3909 qla2x00_stop_timer(base_vha); 3910 3911 base_vha->flags.online = 0; 3912 3913 /* free DMA memory */ 3914 if (ha->exlogin_buf) 3915 qla2x00_free_exlogin_buffer(ha); 3916 3917 /* free DMA memory */ 3918 if (ha->exchoffld_buf) 3919 qla2x00_free_exchoffld_buffer(ha); 3920 3921 qla2x00_destroy_deferred_work(ha); 3922 3923 qlt_remove_target(ha, base_vha); 3924 3925 qla2x00_free_sysfs_attr(base_vha, true); 3926 3927 fc_remove_host(base_vha->host); 3928 3929 scsi_remove_host(base_vha->host); 3930 3931 qla2x00_free_device(base_vha); 3932 3933 qla2x00_clear_drv_active(ha); 3934 3935 scsi_host_put(base_vha->host); 3936 3937 qla2x00_unmap_iobases(ha); 3938 3939 pci_release_selected_regions(ha->pdev, ha->bars); 3940 kfree(ha); 3941 3942 pci_disable_pcie_error_reporting(pdev); 3943 3944 pci_disable_device(pdev); 3945 } 3946 3947 static inline void 3948 qla24xx_free_purex_list(struct purex_list *list) 3949 { 3950 struct purex_item *item, *next; 3951 ulong flags; 3952 3953 spin_lock_irqsave(&list->lock, flags); 3954 list_for_each_entry_safe(item, next, &list->head, list) { 3955 list_del(&item->list); 3956 if (item == &item->vha->default_item) 3957 continue; 3958 kfree(item); 3959 } 3960 spin_unlock_irqrestore(&list->lock, flags); 3961 } 3962 3963 static void 3964 qla2x00_free_device(scsi_qla_host_t *vha) 3965 { 3966 struct qla_hw_data *ha = vha->hw; 3967 3968 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 3969 3970 /* Disable timer */ 3971 if (vha->timer_active) 3972 qla2x00_stop_timer(vha); 3973 3974 qla25xx_delete_queues(vha); 3975 vha->flags.online = 0; 3976 3977 /* turn-off interrupts on the card */ 3978 if (ha->interrupts_on) { 3979 vha->flags.init_done = 0; 3980 ha->isp_ops->disable_intrs(ha); 3981 } 3982 3983 qla2x00_free_fcports(vha); 3984 3985 qla2x00_free_irqs(vha); 3986 3987 /* Flush the work queue and remove it */ 3988 if (ha->wq) { 3989 destroy_workqueue(ha->wq); 3990 ha->wq = NULL; 3991 } 3992 3993 3994 qla24xx_free_purex_list(&vha->purex_list); 3995 3996 qla2x00_mem_free(ha); 3997 3998 qla82xx_md_free(vha); 3999 4000 qla_edif_sadb_release_free_pool(ha); 4001 qla_edif_sadb_release(ha); 4002 4003 qla2x00_free_queues(ha); 4004 } 4005 4006 void qla2x00_free_fcports(struct scsi_qla_host *vha) 4007 { 4008 fc_port_t *fcport, *tfcport; 4009 4010 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) 4011 qla2x00_free_fcport(fcport); 4012 } 4013 4014 static inline void 4015 qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport) 4016 { 4017 int now; 4018 4019 if (!fcport->rport) 4020 return; 4021 4022 if (fcport->rport) { 4023 ql_dbg(ql_dbg_disc, fcport->vha, 0x2109, 4024 "%s %8phN. rport %p roles %x\n", 4025 __func__, fcport->port_name, fcport->rport, 4026 fcport->rport->roles); 4027 fc_remote_port_delete(fcport->rport); 4028 } 4029 qlt_do_generation_tick(vha, &now); 4030 } 4031 4032 /* 4033 * qla2x00_mark_device_lost Updates fcport state when device goes offline. 4034 * 4035 * Input: ha = adapter block pointer. fcport = port structure pointer. 4036 * 4037 * Return: None. 4038 * 4039 * Context: 4040 */ 4041 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, 4042 int do_login) 4043 { 4044 if (IS_QLAFX00(vha->hw)) { 4045 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 4046 qla2x00_schedule_rport_del(vha, fcport); 4047 return; 4048 } 4049 4050 if (atomic_read(&fcport->state) == FCS_ONLINE && 4051 vha->vp_idx == fcport->vha->vp_idx) { 4052 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 4053 qla2x00_schedule_rport_del(vha, fcport); 4054 } 4055 4056 /* 4057 * We may need to retry the login, so don't change the state of the 4058 * port but do the retries. 4059 */ 4060 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) 4061 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 4062 4063 if (!do_login) 4064 return; 4065 4066 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 4067 } 4068 4069 void 4070 qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha) 4071 { 4072 fc_port_t *fcport; 4073 4074 ql_dbg(ql_dbg_disc, vha, 0x20f1, 4075 "Mark all dev lost\n"); 4076 4077 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4078 if (fcport->loop_id != FC_NO_LOOP_ID && 4079 (fcport->flags & FCF_FCP2_DEVICE) && 4080 fcport->port_type == FCT_TARGET && 4081 !qla2x00_reset_active(vha)) { 4082 ql_dbg(ql_dbg_disc, vha, 0x211a, 4083 "Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC", 4084 fcport->flags, fcport->port_type, 4085 fcport->d_id.b24, fcport->port_name); 4086 continue; 4087 } 4088 fcport->scan_state = 0; 4089 qlt_schedule_sess_for_deletion(fcport); 4090 } 4091 } 4092 4093 static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha) 4094 { 4095 int i; 4096 4097 if (IS_FWI2_CAPABLE(ha)) 4098 return; 4099 4100 for (i = 0; i < SNS_FIRST_LOOP_ID; i++) 4101 set_bit(i, ha->loop_id_map); 4102 set_bit(MANAGEMENT_SERVER, ha->loop_id_map); 4103 set_bit(BROADCAST, ha->loop_id_map); 4104 } 4105 4106 /* 4107 * qla2x00_mem_alloc 4108 * Allocates adapter memory. 4109 * 4110 * Returns: 4111 * 0 = success. 4112 * !0 = failure. 4113 */ 4114 static int 4115 qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, 4116 struct req_que **req, struct rsp_que **rsp) 4117 { 4118 char name[16]; 4119 int rc; 4120 4121 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 4122 &ha->init_cb_dma, GFP_KERNEL); 4123 if (!ha->init_cb) 4124 goto fail; 4125 4126 rc = btree_init32(&ha->host_map); 4127 if (rc) 4128 goto fail_free_init_cb; 4129 4130 if (qlt_mem_alloc(ha) < 0) 4131 goto fail_free_btree; 4132 4133 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, 4134 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); 4135 if (!ha->gid_list) 4136 goto fail_free_tgt_mem; 4137 4138 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 4139 if (!ha->srb_mempool) 4140 goto fail_free_gid_list; 4141 4142 if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable && IS_QLA28XX(ha))) { 4143 /* Allocate cache for CT6 Ctx. */ 4144 if (!ctx_cachep) { 4145 ctx_cachep = kmem_cache_create("qla2xxx_ctx", 4146 sizeof(struct ct6_dsd), 0, 4147 SLAB_HWCACHE_ALIGN, NULL); 4148 if (!ctx_cachep) 4149 goto fail_free_srb_mempool; 4150 } 4151 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, 4152 ctx_cachep); 4153 if (!ha->ctx_mempool) 4154 goto fail_free_srb_mempool; 4155 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021, 4156 "ctx_cachep=%p ctx_mempool=%p.\n", 4157 ctx_cachep, ha->ctx_mempool); 4158 } 4159 4160 /* Get memory for cached NVRAM */ 4161 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 4162 if (!ha->nvram) 4163 goto fail_free_ctx_mempool; 4164 4165 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME, 4166 ha->pdev->device); 4167 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4168 DMA_POOL_SIZE, 8, 0); 4169 if (!ha->s_dma_pool) 4170 goto fail_free_nvram; 4171 4172 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022, 4173 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", 4174 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); 4175 4176 if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) && ql2xsecenable)) { 4177 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4178 DSD_LIST_DMA_POOL_SIZE, 8, 0); 4179 if (!ha->dl_dma_pool) { 4180 ql_log_pci(ql_log_fatal, ha->pdev, 0x0023, 4181 "Failed to allocate memory for dl_dma_pool.\n"); 4182 goto fail_s_dma_pool; 4183 } 4184 4185 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4186 FCP_CMND_DMA_POOL_SIZE, 8, 0); 4187 if (!ha->fcp_cmnd_dma_pool) { 4188 ql_log_pci(ql_log_fatal, ha->pdev, 0x0024, 4189 "Failed to allocate memory for fcp_cmnd_dma_pool.\n"); 4190 goto fail_dl_dma_pool; 4191 } 4192 4193 if (ql2xenabledif) { 4194 u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE; 4195 struct dsd_dma *dsd, *nxt; 4196 uint i; 4197 /* Creata a DMA pool of buffers for DIF bundling */ 4198 ha->dif_bundl_pool = dma_pool_create(name, 4199 &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0); 4200 if (!ha->dif_bundl_pool) { 4201 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024, 4202 "%s: failed create dif_bundl_pool\n", 4203 __func__); 4204 goto fail_dif_bundl_dma_pool; 4205 } 4206 4207 INIT_LIST_HEAD(&ha->pool.good.head); 4208 INIT_LIST_HEAD(&ha->pool.unusable.head); 4209 ha->pool.good.count = 0; 4210 ha->pool.unusable.count = 0; 4211 for (i = 0; i < 128; i++) { 4212 dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC); 4213 if (!dsd) { 4214 ql_dbg_pci(ql_dbg_init, ha->pdev, 4215 0xe0ee, "%s: failed alloc dsd\n", 4216 __func__); 4217 return -ENOMEM; 4218 } 4219 ha->dif_bundle_kallocs++; 4220 4221 dsd->dsd_addr = dma_pool_alloc( 4222 ha->dif_bundl_pool, GFP_ATOMIC, 4223 &dsd->dsd_list_dma); 4224 if (!dsd->dsd_addr) { 4225 ql_dbg_pci(ql_dbg_init, ha->pdev, 4226 0xe0ee, 4227 "%s: failed alloc ->dsd_addr\n", 4228 __func__); 4229 kfree(dsd); 4230 ha->dif_bundle_kallocs--; 4231 continue; 4232 } 4233 ha->dif_bundle_dma_allocs++; 4234 4235 /* 4236 * if DMA buffer crosses 4G boundary, 4237 * put it on bad list 4238 */ 4239 if (MSD(dsd->dsd_list_dma) ^ 4240 MSD(dsd->dsd_list_dma + bufsize)) { 4241 list_add_tail(&dsd->list, 4242 &ha->pool.unusable.head); 4243 ha->pool.unusable.count++; 4244 } else { 4245 list_add_tail(&dsd->list, 4246 &ha->pool.good.head); 4247 ha->pool.good.count++; 4248 } 4249 } 4250 4251 /* return the good ones back to the pool */ 4252 list_for_each_entry_safe(dsd, nxt, 4253 &ha->pool.good.head, list) { 4254 list_del(&dsd->list); 4255 dma_pool_free(ha->dif_bundl_pool, 4256 dsd->dsd_addr, dsd->dsd_list_dma); 4257 ha->dif_bundle_dma_allocs--; 4258 kfree(dsd); 4259 ha->dif_bundle_kallocs--; 4260 } 4261 4262 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024, 4263 "%s: dif dma pool (good=%u unusable=%u)\n", 4264 __func__, ha->pool.good.count, 4265 ha->pool.unusable.count); 4266 } 4267 4268 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025, 4269 "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n", 4270 ha->dl_dma_pool, ha->fcp_cmnd_dma_pool, 4271 ha->dif_bundl_pool); 4272 } 4273 4274 /* Allocate memory for SNS commands */ 4275 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 4276 /* Get consistent memory allocated for SNS commands */ 4277 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 4278 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 4279 if (!ha->sns_cmd) 4280 goto fail_dma_pool; 4281 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, 4282 "sns_cmd: %p.\n", ha->sns_cmd); 4283 } else { 4284 /* Get consistent memory allocated for MS IOCB */ 4285 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 4286 &ha->ms_iocb_dma); 4287 if (!ha->ms_iocb) 4288 goto fail_dma_pool; 4289 /* Get consistent memory allocated for CT SNS commands */ 4290 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 4291 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 4292 if (!ha->ct_sns) 4293 goto fail_free_ms_iocb; 4294 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027, 4295 "ms_iocb=%p ct_sns=%p.\n", 4296 ha->ms_iocb, ha->ct_sns); 4297 } 4298 4299 /* Allocate memory for request ring */ 4300 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 4301 if (!*req) { 4302 ql_log_pci(ql_log_fatal, ha->pdev, 0x0028, 4303 "Failed to allocate memory for req.\n"); 4304 goto fail_req; 4305 } 4306 (*req)->length = req_len; 4307 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev, 4308 ((*req)->length + 1) * sizeof(request_t), 4309 &(*req)->dma, GFP_KERNEL); 4310 if (!(*req)->ring) { 4311 ql_log_pci(ql_log_fatal, ha->pdev, 0x0029, 4312 "Failed to allocate memory for req_ring.\n"); 4313 goto fail_req_ring; 4314 } 4315 /* Allocate memory for response ring */ 4316 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 4317 if (!*rsp) { 4318 ql_log_pci(ql_log_fatal, ha->pdev, 0x002a, 4319 "Failed to allocate memory for rsp.\n"); 4320 goto fail_rsp; 4321 } 4322 (*rsp)->hw = ha; 4323 (*rsp)->length = rsp_len; 4324 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev, 4325 ((*rsp)->length + 1) * sizeof(response_t), 4326 &(*rsp)->dma, GFP_KERNEL); 4327 if (!(*rsp)->ring) { 4328 ql_log_pci(ql_log_fatal, ha->pdev, 0x002b, 4329 "Failed to allocate memory for rsp_ring.\n"); 4330 goto fail_rsp_ring; 4331 } 4332 (*req)->rsp = *rsp; 4333 (*rsp)->req = *req; 4334 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c, 4335 "req=%p req->length=%d req->ring=%p rsp=%p " 4336 "rsp->length=%d rsp->ring=%p.\n", 4337 *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length, 4338 (*rsp)->ring); 4339 /* Allocate memory for NVRAM data for vports */ 4340 if (ha->nvram_npiv_size) { 4341 ha->npiv_info = kcalloc(ha->nvram_npiv_size, 4342 sizeof(struct qla_npiv_entry), 4343 GFP_KERNEL); 4344 if (!ha->npiv_info) { 4345 ql_log_pci(ql_log_fatal, ha->pdev, 0x002d, 4346 "Failed to allocate memory for npiv_info.\n"); 4347 goto fail_npiv_info; 4348 } 4349 } else 4350 ha->npiv_info = NULL; 4351 4352 /* Get consistent memory allocated for EX-INIT-CB. */ 4353 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 4354 IS_QLA28XX(ha)) { 4355 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 4356 &ha->ex_init_cb_dma); 4357 if (!ha->ex_init_cb) 4358 goto fail_ex_init_cb; 4359 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e, 4360 "ex_init_cb=%p.\n", ha->ex_init_cb); 4361 } 4362 4363 /* Get consistent memory allocated for Special Features-CB. */ 4364 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4365 ha->sf_init_cb = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, 4366 &ha->sf_init_cb_dma); 4367 if (!ha->sf_init_cb) 4368 goto fail_sf_init_cb; 4369 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199, 4370 "sf_init_cb=%p.\n", ha->sf_init_cb); 4371 } 4372 4373 INIT_LIST_HEAD(&ha->gbl_dsd_list); 4374 4375 /* Get consistent memory allocated for Async Port-Database. */ 4376 if (!IS_FWI2_CAPABLE(ha)) { 4377 ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 4378 &ha->async_pd_dma); 4379 if (!ha->async_pd) 4380 goto fail_async_pd; 4381 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f, 4382 "async_pd=%p.\n", ha->async_pd); 4383 } 4384 4385 INIT_LIST_HEAD(&ha->vp_list); 4386 4387 /* Allocate memory for our loop_id bitmap */ 4388 ha->loop_id_map = kcalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE), 4389 sizeof(long), 4390 GFP_KERNEL); 4391 if (!ha->loop_id_map) 4392 goto fail_loop_id_map; 4393 else { 4394 qla2x00_set_reserved_loop_ids(ha); 4395 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, 4396 "loop_id_map=%p.\n", ha->loop_id_map); 4397 } 4398 4399 ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev, 4400 SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL); 4401 if (!ha->sfp_data) { 4402 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, 4403 "Unable to allocate memory for SFP read-data.\n"); 4404 goto fail_sfp_data; 4405 } 4406 4407 ha->flt = dma_alloc_coherent(&ha->pdev->dev, 4408 sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma, 4409 GFP_KERNEL); 4410 if (!ha->flt) { 4411 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, 4412 "Unable to allocate memory for FLT.\n"); 4413 goto fail_flt_buffer; 4414 } 4415 4416 /* allocate the purex dma pool */ 4417 ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4418 ELS_MAX_PAYLOAD, 8, 0); 4419 4420 if (!ha->purex_dma_pool) { 4421 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, 4422 "Unable to allocate purex_dma_pool.\n"); 4423 goto fail_flt; 4424 } 4425 4426 ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16; 4427 ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev, 4428 ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL); 4429 4430 if (!ha->elsrej.c) { 4431 ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff, 4432 "Alloc failed for els reject cmd.\n"); 4433 goto fail_elsrej; 4434 } 4435 ha->elsrej.c->er_cmd = ELS_LS_RJT; 4436 ha->elsrej.c->er_reason = ELS_RJT_LOGIC; 4437 ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA; 4438 return 0; 4439 4440 fail_elsrej: 4441 dma_pool_destroy(ha->purex_dma_pool); 4442 fail_flt: 4443 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, 4444 ha->flt, ha->flt_dma); 4445 4446 fail_flt_buffer: 4447 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, 4448 ha->sfp_data, ha->sfp_data_dma); 4449 fail_sfp_data: 4450 kfree(ha->loop_id_map); 4451 fail_loop_id_map: 4452 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 4453 fail_async_pd: 4454 dma_pool_free(ha->s_dma_pool, ha->sf_init_cb, ha->sf_init_cb_dma); 4455 fail_sf_init_cb: 4456 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); 4457 fail_ex_init_cb: 4458 kfree(ha->npiv_info); 4459 fail_npiv_info: 4460 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) * 4461 sizeof(response_t), (*rsp)->ring, (*rsp)->dma); 4462 (*rsp)->ring = NULL; 4463 (*rsp)->dma = 0; 4464 fail_rsp_ring: 4465 kfree(*rsp); 4466 *rsp = NULL; 4467 fail_rsp: 4468 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) * 4469 sizeof(request_t), (*req)->ring, (*req)->dma); 4470 (*req)->ring = NULL; 4471 (*req)->dma = 0; 4472 fail_req_ring: 4473 kfree(*req); 4474 *req = NULL; 4475 fail_req: 4476 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 4477 ha->ct_sns, ha->ct_sns_dma); 4478 ha->ct_sns = NULL; 4479 ha->ct_sns_dma = 0; 4480 fail_free_ms_iocb: 4481 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 4482 ha->ms_iocb = NULL; 4483 ha->ms_iocb_dma = 0; 4484 4485 if (ha->sns_cmd) 4486 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 4487 ha->sns_cmd, ha->sns_cmd_dma); 4488 fail_dma_pool: 4489 if (ql2xenabledif) { 4490 struct dsd_dma *dsd, *nxt; 4491 4492 list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, 4493 list) { 4494 list_del(&dsd->list); 4495 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, 4496 dsd->dsd_list_dma); 4497 ha->dif_bundle_dma_allocs--; 4498 kfree(dsd); 4499 ha->dif_bundle_kallocs--; 4500 ha->pool.unusable.count--; 4501 } 4502 dma_pool_destroy(ha->dif_bundl_pool); 4503 ha->dif_bundl_pool = NULL; 4504 } 4505 4506 fail_dif_bundl_dma_pool: 4507 if (IS_QLA82XX(ha) || ql2xenabledif) { 4508 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 4509 ha->fcp_cmnd_dma_pool = NULL; 4510 } 4511 fail_dl_dma_pool: 4512 if (IS_QLA82XX(ha) || ql2xenabledif) { 4513 dma_pool_destroy(ha->dl_dma_pool); 4514 ha->dl_dma_pool = NULL; 4515 } 4516 fail_s_dma_pool: 4517 dma_pool_destroy(ha->s_dma_pool); 4518 ha->s_dma_pool = NULL; 4519 fail_free_nvram: 4520 kfree(ha->nvram); 4521 ha->nvram = NULL; 4522 fail_free_ctx_mempool: 4523 mempool_destroy(ha->ctx_mempool); 4524 ha->ctx_mempool = NULL; 4525 fail_free_srb_mempool: 4526 mempool_destroy(ha->srb_mempool); 4527 ha->srb_mempool = NULL; 4528 fail_free_gid_list: 4529 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 4530 ha->gid_list, 4531 ha->gid_list_dma); 4532 ha->gid_list = NULL; 4533 ha->gid_list_dma = 0; 4534 fail_free_tgt_mem: 4535 qlt_mem_free(ha); 4536 fail_free_btree: 4537 btree_destroy32(&ha->host_map); 4538 fail_free_init_cb: 4539 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, 4540 ha->init_cb_dma); 4541 ha->init_cb = NULL; 4542 ha->init_cb_dma = 0; 4543 fail: 4544 ql_log(ql_log_fatal, NULL, 0x0030, 4545 "Memory allocation failure.\n"); 4546 return -ENOMEM; 4547 } 4548 4549 int 4550 qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha) 4551 { 4552 int rval; 4553 uint16_t size, max_cnt; 4554 uint32_t temp; 4555 struct qla_hw_data *ha = vha->hw; 4556 4557 /* Return if we don't need to alloacate any extended logins */ 4558 if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400) 4559 return QLA_SUCCESS; 4560 4561 if (!IS_EXLOGIN_OFFLD_CAPABLE(ha)) 4562 return QLA_SUCCESS; 4563 4564 ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins); 4565 max_cnt = 0; 4566 rval = qla_get_exlogin_status(vha, &size, &max_cnt); 4567 if (rval != QLA_SUCCESS) { 4568 ql_log_pci(ql_log_fatal, ha->pdev, 0xd029, 4569 "Failed to get exlogin status.\n"); 4570 return rval; 4571 } 4572 4573 temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins; 4574 temp *= size; 4575 4576 if (temp != ha->exlogin_size) { 4577 qla2x00_free_exlogin_buffer(ha); 4578 ha->exlogin_size = temp; 4579 4580 ql_log(ql_log_info, vha, 0xd024, 4581 "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n", 4582 max_cnt, size, temp); 4583 4584 ql_log(ql_log_info, vha, 0xd025, 4585 "EXLOGIN: requested size=0x%x\n", ha->exlogin_size); 4586 4587 /* Get consistent memory for extended logins */ 4588 ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev, 4589 ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL); 4590 if (!ha->exlogin_buf) { 4591 ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a, 4592 "Failed to allocate memory for exlogin_buf_dma.\n"); 4593 return -ENOMEM; 4594 } 4595 } 4596 4597 /* Now configure the dma buffer */ 4598 rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma); 4599 if (rval) { 4600 ql_log(ql_log_fatal, vha, 0xd033, 4601 "Setup extended login buffer ****FAILED****.\n"); 4602 qla2x00_free_exlogin_buffer(ha); 4603 } 4604 4605 return rval; 4606 } 4607 4608 /* 4609 * qla2x00_free_exlogin_buffer 4610 * 4611 * Input: 4612 * ha = adapter block pointer 4613 */ 4614 void 4615 qla2x00_free_exlogin_buffer(struct qla_hw_data *ha) 4616 { 4617 if (ha->exlogin_buf) { 4618 dma_free_coherent(&ha->pdev->dev, ha->exlogin_size, 4619 ha->exlogin_buf, ha->exlogin_buf_dma); 4620 ha->exlogin_buf = NULL; 4621 ha->exlogin_size = 0; 4622 } 4623 } 4624 4625 static void 4626 qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt) 4627 { 4628 u32 temp; 4629 struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb; 4630 *ret_cnt = FW_DEF_EXCHANGES_CNT; 4631 4632 if (max_cnt > vha->hw->max_exchg) 4633 max_cnt = vha->hw->max_exchg; 4634 4635 if (qla_ini_mode_enabled(vha)) { 4636 if (vha->ql2xiniexchg > max_cnt) 4637 vha->ql2xiniexchg = max_cnt; 4638 4639 if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT) 4640 *ret_cnt = vha->ql2xiniexchg; 4641 4642 } else if (qla_tgt_mode_enabled(vha)) { 4643 if (vha->ql2xexchoffld > max_cnt) { 4644 vha->ql2xexchoffld = max_cnt; 4645 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 4646 } 4647 4648 if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT) 4649 *ret_cnt = vha->ql2xexchoffld; 4650 } else if (qla_dual_mode_enabled(vha)) { 4651 temp = vha->ql2xiniexchg + vha->ql2xexchoffld; 4652 if (temp > max_cnt) { 4653 vha->ql2xiniexchg -= (temp - max_cnt)/2; 4654 vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1); 4655 temp = max_cnt; 4656 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 4657 } 4658 4659 if (temp > FW_DEF_EXCHANGES_CNT) 4660 *ret_cnt = temp; 4661 } 4662 } 4663 4664 int 4665 qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha) 4666 { 4667 int rval; 4668 u16 size, max_cnt; 4669 u32 actual_cnt, totsz; 4670 struct qla_hw_data *ha = vha->hw; 4671 4672 if (!ha->flags.exchoffld_enabled) 4673 return QLA_SUCCESS; 4674 4675 if (!IS_EXCHG_OFFLD_CAPABLE(ha)) 4676 return QLA_SUCCESS; 4677 4678 max_cnt = 0; 4679 rval = qla_get_exchoffld_status(vha, &size, &max_cnt); 4680 if (rval != QLA_SUCCESS) { 4681 ql_log_pci(ql_log_fatal, ha->pdev, 0xd012, 4682 "Failed to get exlogin status.\n"); 4683 return rval; 4684 } 4685 4686 qla2x00_number_of_exch(vha, &actual_cnt, max_cnt); 4687 ql_log(ql_log_info, vha, 0xd014, 4688 "Actual exchange offload count: %d.\n", actual_cnt); 4689 4690 totsz = actual_cnt * size; 4691 4692 if (totsz != ha->exchoffld_size) { 4693 qla2x00_free_exchoffld_buffer(ha); 4694 if (actual_cnt <= FW_DEF_EXCHANGES_CNT) { 4695 ha->exchoffld_size = 0; 4696 ha->flags.exchoffld_enabled = 0; 4697 return QLA_SUCCESS; 4698 } 4699 4700 ha->exchoffld_size = totsz; 4701 4702 ql_log(ql_log_info, vha, 0xd016, 4703 "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n", 4704 max_cnt, actual_cnt, size, totsz); 4705 4706 ql_log(ql_log_info, vha, 0xd017, 4707 "Exchange Buffers requested size = 0x%x\n", 4708 ha->exchoffld_size); 4709 4710 /* Get consistent memory for extended logins */ 4711 ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev, 4712 ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL); 4713 if (!ha->exchoffld_buf) { 4714 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, 4715 "Failed to allocate memory for Exchange Offload.\n"); 4716 4717 if (ha->max_exchg > 4718 (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) { 4719 ha->max_exchg -= REDUCE_EXCHANGES_CNT; 4720 } else if (ha->max_exchg > 4721 (FW_DEF_EXCHANGES_CNT + 512)) { 4722 ha->max_exchg -= 512; 4723 } else { 4724 ha->flags.exchoffld_enabled = 0; 4725 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, 4726 "Disabling Exchange offload due to lack of memory\n"); 4727 } 4728 ha->exchoffld_size = 0; 4729 4730 return -ENOMEM; 4731 } 4732 } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) { 4733 /* pathological case */ 4734 qla2x00_free_exchoffld_buffer(ha); 4735 ha->exchoffld_size = 0; 4736 ha->flags.exchoffld_enabled = 0; 4737 ql_log(ql_log_info, vha, 0xd016, 4738 "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n", 4739 ha->exchoffld_size, actual_cnt, size, totsz); 4740 return 0; 4741 } 4742 4743 /* Now configure the dma buffer */ 4744 rval = qla_set_exchoffld_mem_cfg(vha); 4745 if (rval) { 4746 ql_log(ql_log_fatal, vha, 0xd02e, 4747 "Setup exchange offload buffer ****FAILED****.\n"); 4748 qla2x00_free_exchoffld_buffer(ha); 4749 } else { 4750 /* re-adjust number of target exchange */ 4751 struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb; 4752 4753 if (qla_ini_mode_enabled(vha)) 4754 icb->exchange_count = 0; 4755 else 4756 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 4757 } 4758 4759 return rval; 4760 } 4761 4762 /* 4763 * qla2x00_free_exchoffld_buffer 4764 * 4765 * Input: 4766 * ha = adapter block pointer 4767 */ 4768 void 4769 qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha) 4770 { 4771 if (ha->exchoffld_buf) { 4772 dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size, 4773 ha->exchoffld_buf, ha->exchoffld_buf_dma); 4774 ha->exchoffld_buf = NULL; 4775 ha->exchoffld_size = 0; 4776 } 4777 } 4778 4779 /* 4780 * qla2x00_free_fw_dump 4781 * Frees fw dump stuff. 4782 * 4783 * Input: 4784 * ha = adapter block pointer 4785 */ 4786 static void 4787 qla2x00_free_fw_dump(struct qla_hw_data *ha) 4788 { 4789 struct fwdt *fwdt = ha->fwdt; 4790 uint j; 4791 4792 if (ha->fce) 4793 dma_free_coherent(&ha->pdev->dev, 4794 FCE_SIZE, ha->fce, ha->fce_dma); 4795 4796 if (ha->eft) 4797 dma_free_coherent(&ha->pdev->dev, 4798 EFT_SIZE, ha->eft, ha->eft_dma); 4799 4800 vfree(ha->fw_dump); 4801 4802 ha->fce = NULL; 4803 ha->fce_dma = 0; 4804 ha->flags.fce_enabled = 0; 4805 ha->eft = NULL; 4806 ha->eft_dma = 0; 4807 ha->fw_dumped = false; 4808 ha->fw_dump_cap_flags = 0; 4809 ha->fw_dump_reading = 0; 4810 ha->fw_dump = NULL; 4811 ha->fw_dump_len = 0; 4812 4813 for (j = 0; j < 2; j++, fwdt++) { 4814 vfree(fwdt->template); 4815 fwdt->template = NULL; 4816 fwdt->length = 0; 4817 } 4818 } 4819 4820 /* 4821 * qla2x00_mem_free 4822 * Frees all adapter allocated memory. 4823 * 4824 * Input: 4825 * ha = adapter block pointer. 4826 */ 4827 static void 4828 qla2x00_mem_free(struct qla_hw_data *ha) 4829 { 4830 qla2x00_free_fw_dump(ha); 4831 4832 if (ha->mctp_dump) 4833 dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump, 4834 ha->mctp_dump_dma); 4835 ha->mctp_dump = NULL; 4836 4837 mempool_destroy(ha->srb_mempool); 4838 ha->srb_mempool = NULL; 4839 4840 if (ha->dcbx_tlv) 4841 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 4842 ha->dcbx_tlv, ha->dcbx_tlv_dma); 4843 ha->dcbx_tlv = NULL; 4844 4845 if (ha->xgmac_data) 4846 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 4847 ha->xgmac_data, ha->xgmac_data_dma); 4848 ha->xgmac_data = NULL; 4849 4850 if (ha->sns_cmd) 4851 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 4852 ha->sns_cmd, ha->sns_cmd_dma); 4853 ha->sns_cmd = NULL; 4854 ha->sns_cmd_dma = 0; 4855 4856 if (ha->ct_sns) 4857 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 4858 ha->ct_sns, ha->ct_sns_dma); 4859 ha->ct_sns = NULL; 4860 ha->ct_sns_dma = 0; 4861 4862 if (ha->sfp_data) 4863 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data, 4864 ha->sfp_data_dma); 4865 ha->sfp_data = NULL; 4866 4867 if (ha->flt) 4868 dma_free_coherent(&ha->pdev->dev, 4869 sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, 4870 ha->flt, ha->flt_dma); 4871 ha->flt = NULL; 4872 ha->flt_dma = 0; 4873 4874 if (ha->ms_iocb) 4875 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 4876 ha->ms_iocb = NULL; 4877 ha->ms_iocb_dma = 0; 4878 4879 if (ha->sf_init_cb) 4880 dma_pool_free(ha->s_dma_pool, 4881 ha->sf_init_cb, ha->sf_init_cb_dma); 4882 4883 if (ha->ex_init_cb) 4884 dma_pool_free(ha->s_dma_pool, 4885 ha->ex_init_cb, ha->ex_init_cb_dma); 4886 ha->ex_init_cb = NULL; 4887 ha->ex_init_cb_dma = 0; 4888 4889 if (ha->async_pd) 4890 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 4891 ha->async_pd = NULL; 4892 ha->async_pd_dma = 0; 4893 4894 dma_pool_destroy(ha->s_dma_pool); 4895 ha->s_dma_pool = NULL; 4896 4897 if (ha->gid_list) 4898 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 4899 ha->gid_list, ha->gid_list_dma); 4900 ha->gid_list = NULL; 4901 ha->gid_list_dma = 0; 4902 4903 if (IS_QLA82XX(ha)) { 4904 if (!list_empty(&ha->gbl_dsd_list)) { 4905 struct dsd_dma *dsd_ptr, *tdsd_ptr; 4906 4907 /* clean up allocated prev pool */ 4908 list_for_each_entry_safe(dsd_ptr, 4909 tdsd_ptr, &ha->gbl_dsd_list, list) { 4910 dma_pool_free(ha->dl_dma_pool, 4911 dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); 4912 list_del(&dsd_ptr->list); 4913 kfree(dsd_ptr); 4914 } 4915 } 4916 } 4917 4918 dma_pool_destroy(ha->dl_dma_pool); 4919 ha->dl_dma_pool = NULL; 4920 4921 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 4922 ha->fcp_cmnd_dma_pool = NULL; 4923 4924 mempool_destroy(ha->ctx_mempool); 4925 ha->ctx_mempool = NULL; 4926 4927 if (ql2xenabledif && ha->dif_bundl_pool) { 4928 struct dsd_dma *dsd, *nxt; 4929 4930 list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, 4931 list) { 4932 list_del(&dsd->list); 4933 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, 4934 dsd->dsd_list_dma); 4935 ha->dif_bundle_dma_allocs--; 4936 kfree(dsd); 4937 ha->dif_bundle_kallocs--; 4938 ha->pool.unusable.count--; 4939 } 4940 list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) { 4941 list_del(&dsd->list); 4942 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, 4943 dsd->dsd_list_dma); 4944 ha->dif_bundle_dma_allocs--; 4945 kfree(dsd); 4946 ha->dif_bundle_kallocs--; 4947 } 4948 } 4949 4950 dma_pool_destroy(ha->dif_bundl_pool); 4951 ha->dif_bundl_pool = NULL; 4952 4953 qlt_mem_free(ha); 4954 qla_remove_hostmap(ha); 4955 4956 if (ha->init_cb) 4957 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 4958 ha->init_cb, ha->init_cb_dma); 4959 4960 dma_pool_destroy(ha->purex_dma_pool); 4961 ha->purex_dma_pool = NULL; 4962 4963 if (ha->elsrej.c) { 4964 dma_free_coherent(&ha->pdev->dev, ha->elsrej.size, 4965 ha->elsrej.c, ha->elsrej.cdma); 4966 ha->elsrej.c = NULL; 4967 } 4968 4969 ha->init_cb = NULL; 4970 ha->init_cb_dma = 0; 4971 4972 vfree(ha->optrom_buffer); 4973 ha->optrom_buffer = NULL; 4974 kfree(ha->nvram); 4975 ha->nvram = NULL; 4976 kfree(ha->npiv_info); 4977 ha->npiv_info = NULL; 4978 kfree(ha->swl); 4979 ha->swl = NULL; 4980 kfree(ha->loop_id_map); 4981 ha->sf_init_cb = NULL; 4982 ha->sf_init_cb_dma = 0; 4983 ha->loop_id_map = NULL; 4984 } 4985 4986 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, 4987 struct qla_hw_data *ha) 4988 { 4989 struct Scsi_Host *host; 4990 struct scsi_qla_host *vha = NULL; 4991 4992 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 4993 if (!host) { 4994 ql_log_pci(ql_log_fatal, ha->pdev, 0x0107, 4995 "Failed to allocate host from the scsi layer, aborting.\n"); 4996 return NULL; 4997 } 4998 4999 /* Clear our data area */ 5000 vha = shost_priv(host); 5001 memset(vha, 0, sizeof(scsi_qla_host_t)); 5002 5003 vha->host = host; 5004 vha->host_no = host->host_no; 5005 vha->hw = ha; 5006 5007 vha->qlini_mode = ql2x_ini_mode; 5008 vha->ql2xexchoffld = ql2xexchoffld; 5009 vha->ql2xiniexchg = ql2xiniexchg; 5010 5011 INIT_LIST_HEAD(&vha->vp_fcports); 5012 INIT_LIST_HEAD(&vha->work_list); 5013 INIT_LIST_HEAD(&vha->list); 5014 INIT_LIST_HEAD(&vha->qla_cmd_list); 5015 INIT_LIST_HEAD(&vha->logo_list); 5016 INIT_LIST_HEAD(&vha->plogi_ack_list); 5017 INIT_LIST_HEAD(&vha->qp_list); 5018 INIT_LIST_HEAD(&vha->gnl.fcports); 5019 INIT_LIST_HEAD(&vha->gpnid_list); 5020 INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn); 5021 5022 INIT_LIST_HEAD(&vha->purex_list.head); 5023 spin_lock_init(&vha->purex_list.lock); 5024 5025 spin_lock_init(&vha->work_lock); 5026 spin_lock_init(&vha->cmd_list_lock); 5027 init_waitqueue_head(&vha->fcport_waitQ); 5028 init_waitqueue_head(&vha->vref_waitq); 5029 qla_enode_init(vha); 5030 qla_edb_init(vha); 5031 5032 5033 vha->gnl.size = sizeof(struct get_name_list_extended) * 5034 (ha->max_loop_id + 1); 5035 vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev, 5036 vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL); 5037 if (!vha->gnl.l) { 5038 ql_log(ql_log_fatal, vha, 0xd04a, 5039 "Alloc failed for name list.\n"); 5040 scsi_host_put(vha->host); 5041 return NULL; 5042 } 5043 5044 /* todo: what about ext login? */ 5045 vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp); 5046 vha->scan.l = vmalloc(vha->scan.size); 5047 if (!vha->scan.l) { 5048 ql_log(ql_log_fatal, vha, 0xd04a, 5049 "Alloc failed for scan database.\n"); 5050 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, 5051 vha->gnl.l, vha->gnl.ldma); 5052 vha->gnl.l = NULL; 5053 scsi_host_put(vha->host); 5054 return NULL; 5055 } 5056 INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn); 5057 5058 sprintf(vha->host_str, "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no); 5059 ql_dbg(ql_dbg_init, vha, 0x0041, 5060 "Allocated the host=%p hw=%p vha=%p dev_name=%s", 5061 vha->host, vha->hw, vha, 5062 dev_name(&(ha->pdev->dev))); 5063 5064 return vha; 5065 } 5066 5067 struct qla_work_evt * 5068 qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) 5069 { 5070 struct qla_work_evt *e; 5071 5072 if (test_bit(UNLOADING, &vha->dpc_flags)) 5073 return NULL; 5074 5075 if (qla_vha_mark_busy(vha)) 5076 return NULL; 5077 5078 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); 5079 if (!e) { 5080 QLA_VHA_MARK_NOT_BUSY(vha); 5081 return NULL; 5082 } 5083 5084 INIT_LIST_HEAD(&e->list); 5085 e->type = type; 5086 e->flags = QLA_EVT_FLAG_FREE; 5087 return e; 5088 } 5089 5090 int 5091 qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 5092 { 5093 unsigned long flags; 5094 bool q = false; 5095 5096 spin_lock_irqsave(&vha->work_lock, flags); 5097 list_add_tail(&e->list, &vha->work_list); 5098 5099 if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags)) 5100 q = true; 5101 5102 spin_unlock_irqrestore(&vha->work_lock, flags); 5103 5104 if (q) 5105 queue_work(vha->hw->wq, &vha->iocb_work); 5106 5107 return QLA_SUCCESS; 5108 } 5109 5110 int 5111 qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, 5112 u32 data) 5113 { 5114 struct qla_work_evt *e; 5115 5116 e = qla2x00_alloc_work(vha, QLA_EVT_AEN); 5117 if (!e) 5118 return QLA_FUNCTION_FAILED; 5119 5120 e->u.aen.code = code; 5121 e->u.aen.data = data; 5122 return qla2x00_post_work(vha, e); 5123 } 5124 5125 int 5126 qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) 5127 { 5128 struct qla_work_evt *e; 5129 5130 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK); 5131 if (!e) 5132 return QLA_FUNCTION_FAILED; 5133 5134 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 5135 return qla2x00_post_work(vha, e); 5136 } 5137 5138 #define qla2x00_post_async_work(name, type) \ 5139 int qla2x00_post_async_##name##_work( \ 5140 struct scsi_qla_host *vha, \ 5141 fc_port_t *fcport, uint16_t *data) \ 5142 { \ 5143 struct qla_work_evt *e; \ 5144 \ 5145 e = qla2x00_alloc_work(vha, type); \ 5146 if (!e) \ 5147 return QLA_FUNCTION_FAILED; \ 5148 \ 5149 e->u.logio.fcport = fcport; \ 5150 if (data) { \ 5151 e->u.logio.data[0] = data[0]; \ 5152 e->u.logio.data[1] = data[1]; \ 5153 } \ 5154 fcport->flags |= FCF_ASYNC_ACTIVE; \ 5155 return qla2x00_post_work(vha, e); \ 5156 } 5157 5158 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); 5159 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); 5160 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); 5161 qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO); 5162 qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE); 5163 5164 int 5165 qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) 5166 { 5167 struct qla_work_evt *e; 5168 5169 e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT); 5170 if (!e) 5171 return QLA_FUNCTION_FAILED; 5172 5173 e->u.uevent.code = code; 5174 return qla2x00_post_work(vha, e); 5175 } 5176 5177 static void 5178 qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) 5179 { 5180 char event_string[40]; 5181 char *envp[] = { event_string, NULL }; 5182 5183 switch (code) { 5184 case QLA_UEVENT_CODE_FW_DUMP: 5185 snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu", 5186 vha->host_no); 5187 break; 5188 default: 5189 /* do nothing */ 5190 break; 5191 } 5192 kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp); 5193 } 5194 5195 int 5196 qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, 5197 uint32_t *data, int cnt) 5198 { 5199 struct qla_work_evt *e; 5200 5201 e = qla2x00_alloc_work(vha, QLA_EVT_AENFX); 5202 if (!e) 5203 return QLA_FUNCTION_FAILED; 5204 5205 e->u.aenfx.evtcode = evtcode; 5206 e->u.aenfx.count = cnt; 5207 memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt); 5208 return qla2x00_post_work(vha, e); 5209 } 5210 5211 void qla24xx_sched_upd_fcport(fc_port_t *fcport) 5212 { 5213 unsigned long flags; 5214 5215 if (IS_SW_RESV_ADDR(fcport->d_id)) 5216 return; 5217 5218 spin_lock_irqsave(&fcport->vha->work_lock, flags); 5219 if (fcport->disc_state == DSC_UPD_FCPORT) { 5220 spin_unlock_irqrestore(&fcport->vha->work_lock, flags); 5221 return; 5222 } 5223 fcport->jiffies_at_registration = jiffies; 5224 fcport->sec_since_registration = 0; 5225 fcport->next_disc_state = DSC_DELETED; 5226 qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); 5227 spin_unlock_irqrestore(&fcport->vha->work_lock, flags); 5228 5229 queue_work(system_unbound_wq, &fcport->reg_work); 5230 } 5231 5232 static 5233 void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) 5234 { 5235 unsigned long flags; 5236 fc_port_t *fcport = NULL, *tfcp; 5237 struct qlt_plogi_ack_t *pla = 5238 (struct qlt_plogi_ack_t *)e->u.new_sess.pla; 5239 uint8_t free_fcport = 0; 5240 5241 ql_dbg(ql_dbg_disc, vha, 0xffff, 5242 "%s %d %8phC enter\n", 5243 __func__, __LINE__, e->u.new_sess.port_name); 5244 5245 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5246 fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1); 5247 if (fcport) { 5248 fcport->d_id = e->u.new_sess.id; 5249 if (pla) { 5250 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 5251 memcpy(fcport->node_name, 5252 pla->iocb.u.isp24.u.plogi.node_name, 5253 WWN_SIZE); 5254 qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN); 5255 /* we took an extra ref_count to prevent PLOGI ACK when 5256 * fcport/sess has not been created. 5257 */ 5258 pla->ref_count--; 5259 } 5260 } else { 5261 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5262 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5263 if (fcport) { 5264 fcport->d_id = e->u.new_sess.id; 5265 fcport->flags |= FCF_FABRIC_DEVICE; 5266 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 5267 fcport->tgt_short_link_down_cnt = 0; 5268 5269 memcpy(fcport->port_name, e->u.new_sess.port_name, 5270 WWN_SIZE); 5271 5272 fcport->fc4_type = e->u.new_sess.fc4_type; 5273 if (NVME_PRIORITY(vha->hw, fcport)) 5274 fcport->do_prli_nvme = 1; 5275 else 5276 fcport->do_prli_nvme = 0; 5277 5278 if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) { 5279 fcport->dm_login_expire = jiffies + 5280 QLA_N2N_WAIT_TIME * HZ; 5281 fcport->fc4_type = FS_FC4TYPE_FCP; 5282 fcport->n2n_flag = 1; 5283 if (vha->flags.nvme_enabled) 5284 fcport->fc4_type |= FS_FC4TYPE_NVME; 5285 } 5286 5287 } else { 5288 ql_dbg(ql_dbg_disc, vha, 0xffff, 5289 "%s %8phC mem alloc fail.\n", 5290 __func__, e->u.new_sess.port_name); 5291 5292 if (pla) { 5293 list_del(&pla->list); 5294 kmem_cache_free(qla_tgt_plogi_cachep, pla); 5295 } 5296 return; 5297 } 5298 5299 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5300 /* search again to make sure no one else got ahead */ 5301 tfcp = qla2x00_find_fcport_by_wwpn(vha, 5302 e->u.new_sess.port_name, 1); 5303 if (tfcp) { 5304 /* should rarily happen */ 5305 ql_dbg(ql_dbg_disc, vha, 0xffff, 5306 "%s %8phC found existing fcport b4 add. DS %d LS %d\n", 5307 __func__, tfcp->port_name, tfcp->disc_state, 5308 tfcp->fw_login_state); 5309 5310 free_fcport = 1; 5311 } else { 5312 list_add_tail(&fcport->list, &vha->vp_fcports); 5313 5314 } 5315 if (pla) { 5316 qlt_plogi_ack_link(vha, pla, fcport, 5317 QLT_PLOGI_LINK_SAME_WWN); 5318 pla->ref_count--; 5319 } 5320 } 5321 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5322 5323 if (fcport) { 5324 fcport->id_changed = 1; 5325 fcport->scan_state = QLA_FCPORT_FOUND; 5326 fcport->chip_reset = vha->hw->base_qpair->chip_reset; 5327 memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE); 5328 5329 if (pla) { 5330 if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) { 5331 u16 wd3_lo; 5332 5333 fcport->fw_login_state = DSC_LS_PRLI_PEND; 5334 fcport->local = 0; 5335 fcport->loop_id = 5336 le16_to_cpu( 5337 pla->iocb.u.isp24.nport_handle); 5338 fcport->fw_login_state = DSC_LS_PRLI_PEND; 5339 wd3_lo = 5340 le16_to_cpu( 5341 pla->iocb.u.isp24.u.prli.wd3_lo); 5342 5343 if (wd3_lo & BIT_7) 5344 fcport->conf_compl_supported = 1; 5345 5346 if ((wd3_lo & BIT_4) == 0) 5347 fcport->port_type = FCT_INITIATOR; 5348 else 5349 fcport->port_type = FCT_TARGET; 5350 } 5351 qlt_plogi_ack_unref(vha, pla); 5352 } else { 5353 fc_port_t *dfcp = NULL; 5354 5355 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5356 tfcp = qla2x00_find_fcport_by_nportid(vha, 5357 &e->u.new_sess.id, 1); 5358 if (tfcp && (tfcp != fcport)) { 5359 /* 5360 * We have a conflict fcport with same NportID. 5361 */ 5362 ql_dbg(ql_dbg_disc, vha, 0xffff, 5363 "%s %8phC found conflict b4 add. DS %d LS %d\n", 5364 __func__, tfcp->port_name, tfcp->disc_state, 5365 tfcp->fw_login_state); 5366 5367 switch (tfcp->disc_state) { 5368 case DSC_DELETED: 5369 break; 5370 case DSC_DELETE_PEND: 5371 fcport->login_pause = 1; 5372 tfcp->conflict = fcport; 5373 break; 5374 default: 5375 fcport->login_pause = 1; 5376 tfcp->conflict = fcport; 5377 dfcp = tfcp; 5378 break; 5379 } 5380 } 5381 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5382 if (dfcp) 5383 qlt_schedule_sess_for_deletion(tfcp); 5384 5385 if (N2N_TOPO(vha->hw)) { 5386 fcport->flags &= ~FCF_FABRIC_DEVICE; 5387 fcport->keep_nport_handle = 1; 5388 if (vha->flags.nvme_enabled) { 5389 fcport->fc4_type = 5390 (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP); 5391 fcport->n2n_flag = 1; 5392 } 5393 fcport->fw_login_state = 0; 5394 5395 schedule_delayed_work(&vha->scan.scan_work, 5); 5396 } else { 5397 qla24xx_fcport_handle_login(vha, fcport); 5398 } 5399 } 5400 } 5401 5402 if (free_fcport) { 5403 qla2x00_free_fcport(fcport); 5404 if (pla) { 5405 list_del(&pla->list); 5406 kmem_cache_free(qla_tgt_plogi_cachep, pla); 5407 } 5408 } 5409 } 5410 5411 static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e) 5412 { 5413 struct srb *sp = e->u.iosb.sp; 5414 int rval; 5415 5416 rval = qla2x00_start_sp(sp); 5417 if (rval != QLA_SUCCESS) { 5418 ql_dbg(ql_dbg_disc, vha, 0x2043, 5419 "%s: %s: Re-issue IOCB failed (%d).\n", 5420 __func__, sp->name, rval); 5421 qla24xx_sp_unmap(vha, sp); 5422 } 5423 } 5424 5425 void 5426 qla2x00_do_work(struct scsi_qla_host *vha) 5427 { 5428 struct qla_work_evt *e, *tmp; 5429 unsigned long flags; 5430 LIST_HEAD(work); 5431 int rc; 5432 5433 spin_lock_irqsave(&vha->work_lock, flags); 5434 list_splice_init(&vha->work_list, &work); 5435 spin_unlock_irqrestore(&vha->work_lock, flags); 5436 5437 list_for_each_entry_safe(e, tmp, &work, list) { 5438 rc = QLA_SUCCESS; 5439 switch (e->type) { 5440 case QLA_EVT_AEN: 5441 fc_host_post_event(vha->host, fc_get_event_number(), 5442 e->u.aen.code, e->u.aen.data); 5443 break; 5444 case QLA_EVT_IDC_ACK: 5445 qla81xx_idc_ack(vha, e->u.idc_ack.mb); 5446 break; 5447 case QLA_EVT_ASYNC_LOGIN: 5448 qla2x00_async_login(vha, e->u.logio.fcport, 5449 e->u.logio.data); 5450 break; 5451 case QLA_EVT_ASYNC_LOGOUT: 5452 rc = qla2x00_async_logout(vha, e->u.logio.fcport); 5453 break; 5454 case QLA_EVT_ASYNC_ADISC: 5455 qla2x00_async_adisc(vha, e->u.logio.fcport, 5456 e->u.logio.data); 5457 break; 5458 case QLA_EVT_UEVENT: 5459 qla2x00_uevent_emit(vha, e->u.uevent.code); 5460 break; 5461 case QLA_EVT_AENFX: 5462 qlafx00_process_aen(vha, e); 5463 break; 5464 case QLA_EVT_GPNID: 5465 qla24xx_async_gpnid(vha, &e->u.gpnid.id); 5466 break; 5467 case QLA_EVT_UNMAP: 5468 qla24xx_sp_unmap(vha, e->u.iosb.sp); 5469 break; 5470 case QLA_EVT_RELOGIN: 5471 qla2x00_relogin(vha); 5472 break; 5473 case QLA_EVT_NEW_SESS: 5474 qla24xx_create_new_sess(vha, e); 5475 break; 5476 case QLA_EVT_GPDB: 5477 qla24xx_async_gpdb(vha, e->u.fcport.fcport, 5478 e->u.fcport.opt); 5479 break; 5480 case QLA_EVT_PRLI: 5481 qla24xx_async_prli(vha, e->u.fcport.fcport); 5482 break; 5483 case QLA_EVT_GPSC: 5484 qla24xx_async_gpsc(vha, e->u.fcport.fcport); 5485 break; 5486 case QLA_EVT_GNL: 5487 qla24xx_async_gnl(vha, e->u.fcport.fcport); 5488 break; 5489 case QLA_EVT_NACK: 5490 qla24xx_do_nack_work(vha, e); 5491 break; 5492 case QLA_EVT_ASYNC_PRLO: 5493 rc = qla2x00_async_prlo(vha, e->u.logio.fcport); 5494 break; 5495 case QLA_EVT_ASYNC_PRLO_DONE: 5496 qla2x00_async_prlo_done(vha, e->u.logio.fcport, 5497 e->u.logio.data); 5498 break; 5499 case QLA_EVT_GPNFT: 5500 qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type, 5501 e->u.gpnft.sp); 5502 break; 5503 case QLA_EVT_GPNFT_DONE: 5504 qla24xx_async_gpnft_done(vha, e->u.iosb.sp); 5505 break; 5506 case QLA_EVT_GNNFT_DONE: 5507 qla24xx_async_gnnft_done(vha, e->u.iosb.sp); 5508 break; 5509 case QLA_EVT_GNNID: 5510 qla24xx_async_gnnid(vha, e->u.fcport.fcport); 5511 break; 5512 case QLA_EVT_GFPNID: 5513 qla24xx_async_gfpnid(vha, e->u.fcport.fcport); 5514 break; 5515 case QLA_EVT_SP_RETRY: 5516 qla_sp_retry(vha, e); 5517 break; 5518 case QLA_EVT_IIDMA: 5519 qla_do_iidma_work(vha, e->u.fcport.fcport); 5520 break; 5521 case QLA_EVT_ELS_PLOGI: 5522 qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI, 5523 e->u.fcport.fcport, false); 5524 break; 5525 case QLA_EVT_SA_REPLACE: 5526 rc = qla24xx_issue_sa_replace_iocb(vha, e); 5527 break; 5528 } 5529 5530 if (rc == EAGAIN) { 5531 /* put 'work' at head of 'vha->work_list' */ 5532 spin_lock_irqsave(&vha->work_lock, flags); 5533 list_splice(&work, &vha->work_list); 5534 spin_unlock_irqrestore(&vha->work_lock, flags); 5535 break; 5536 } 5537 list_del_init(&e->list); 5538 if (e->flags & QLA_EVT_FLAG_FREE) 5539 kfree(e); 5540 5541 /* For each work completed decrement vha ref count */ 5542 QLA_VHA_MARK_NOT_BUSY(vha); 5543 } 5544 } 5545 5546 int qla24xx_post_relogin_work(struct scsi_qla_host *vha) 5547 { 5548 struct qla_work_evt *e; 5549 5550 e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN); 5551 5552 if (!e) { 5553 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 5554 return QLA_FUNCTION_FAILED; 5555 } 5556 5557 return qla2x00_post_work(vha, e); 5558 } 5559 5560 /* Relogins all the fcports of a vport 5561 * Context: dpc thread 5562 */ 5563 void qla2x00_relogin(struct scsi_qla_host *vha) 5564 { 5565 fc_port_t *fcport; 5566 int status, relogin_needed = 0; 5567 struct event_arg ea; 5568 5569 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5570 /* 5571 * If the port is not ONLINE then try to login 5572 * to it if we haven't run out of retries. 5573 */ 5574 if (atomic_read(&fcport->state) != FCS_ONLINE && 5575 fcport->login_retry) { 5576 if (fcport->scan_state != QLA_FCPORT_FOUND || 5577 fcport->disc_state == DSC_LOGIN_AUTH_PEND || 5578 fcport->disc_state == DSC_LOGIN_COMPLETE) 5579 continue; 5580 5581 if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) || 5582 fcport->disc_state == DSC_DELETE_PEND) { 5583 relogin_needed = 1; 5584 } else { 5585 if (vha->hw->current_topology != ISP_CFG_NL) { 5586 memset(&ea, 0, sizeof(ea)); 5587 ea.fcport = fcport; 5588 qla24xx_handle_relogin_event(vha, &ea); 5589 } else if (vha->hw->current_topology == 5590 ISP_CFG_NL && 5591 IS_QLA2XXX_MIDTYPE(vha->hw)) { 5592 (void)qla24xx_fcport_handle_login(vha, 5593 fcport); 5594 } else if (vha->hw->current_topology == 5595 ISP_CFG_NL) { 5596 fcport->login_retry--; 5597 status = 5598 qla2x00_local_device_login(vha, 5599 fcport); 5600 if (status == QLA_SUCCESS) { 5601 fcport->old_loop_id = 5602 fcport->loop_id; 5603 ql_dbg(ql_dbg_disc, vha, 0x2003, 5604 "Port login OK: logged in ID 0x%x.\n", 5605 fcport->loop_id); 5606 qla2x00_update_fcport 5607 (vha, fcport); 5608 } else if (status == 1) { 5609 set_bit(RELOGIN_NEEDED, 5610 &vha->dpc_flags); 5611 /* retry the login again */ 5612 ql_dbg(ql_dbg_disc, vha, 0x2007, 5613 "Retrying %d login again loop_id 0x%x.\n", 5614 fcport->login_retry, 5615 fcport->loop_id); 5616 } else { 5617 fcport->login_retry = 0; 5618 } 5619 5620 if (fcport->login_retry == 0 && 5621 status != QLA_SUCCESS) 5622 qla2x00_clear_loop_id(fcport); 5623 } 5624 } 5625 } 5626 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5627 break; 5628 } 5629 5630 if (relogin_needed) 5631 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 5632 5633 ql_dbg(ql_dbg_disc, vha, 0x400e, 5634 "Relogin end.\n"); 5635 } 5636 5637 /* Schedule work on any of the dpc-workqueues */ 5638 void 5639 qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code) 5640 { 5641 struct qla_hw_data *ha = base_vha->hw; 5642 5643 switch (work_code) { 5644 case MBA_IDC_AEN: /* 0x8200 */ 5645 if (ha->dpc_lp_wq) 5646 queue_work(ha->dpc_lp_wq, &ha->idc_aen); 5647 break; 5648 5649 case QLA83XX_NIC_CORE_RESET: /* 0x1 */ 5650 if (!ha->flags.nic_core_reset_hdlr_active) { 5651 if (ha->dpc_hp_wq) 5652 queue_work(ha->dpc_hp_wq, &ha->nic_core_reset); 5653 } else 5654 ql_dbg(ql_dbg_p3p, base_vha, 0xb05e, 5655 "NIC Core reset is already active. Skip " 5656 "scheduling it again.\n"); 5657 break; 5658 case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */ 5659 if (ha->dpc_hp_wq) 5660 queue_work(ha->dpc_hp_wq, &ha->idc_state_handler); 5661 break; 5662 case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */ 5663 if (ha->dpc_hp_wq) 5664 queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable); 5665 break; 5666 default: 5667 ql_log(ql_log_warn, base_vha, 0xb05f, 5668 "Unknown work-code=0x%x.\n", work_code); 5669 } 5670 5671 return; 5672 } 5673 5674 /* Work: Perform NIC Core Unrecoverable state handling */ 5675 void 5676 qla83xx_nic_core_unrecoverable_work(struct work_struct *work) 5677 { 5678 struct qla_hw_data *ha = 5679 container_of(work, struct qla_hw_data, nic_core_unrecoverable); 5680 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5681 uint32_t dev_state = 0; 5682 5683 qla83xx_idc_lock(base_vha, 0); 5684 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5685 qla83xx_reset_ownership(base_vha); 5686 if (ha->flags.nic_core_reset_owner) { 5687 ha->flags.nic_core_reset_owner = 0; 5688 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 5689 QLA8XXX_DEV_FAILED); 5690 ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n"); 5691 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 5692 } 5693 qla83xx_idc_unlock(base_vha, 0); 5694 } 5695 5696 /* Work: Execute IDC state handler */ 5697 void 5698 qla83xx_idc_state_handler_work(struct work_struct *work) 5699 { 5700 struct qla_hw_data *ha = 5701 container_of(work, struct qla_hw_data, idc_state_handler); 5702 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5703 uint32_t dev_state = 0; 5704 5705 qla83xx_idc_lock(base_vha, 0); 5706 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5707 if (dev_state == QLA8XXX_DEV_FAILED || 5708 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) 5709 qla83xx_idc_state_handler(base_vha); 5710 qla83xx_idc_unlock(base_vha, 0); 5711 } 5712 5713 static int 5714 qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha) 5715 { 5716 int rval = QLA_SUCCESS; 5717 unsigned long heart_beat_wait = jiffies + (1 * HZ); 5718 uint32_t heart_beat_counter1, heart_beat_counter2; 5719 5720 do { 5721 if (time_after(jiffies, heart_beat_wait)) { 5722 ql_dbg(ql_dbg_p3p, base_vha, 0xb07c, 5723 "Nic Core f/w is not alive.\n"); 5724 rval = QLA_FUNCTION_FAILED; 5725 break; 5726 } 5727 5728 qla83xx_idc_lock(base_vha, 0); 5729 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 5730 &heart_beat_counter1); 5731 qla83xx_idc_unlock(base_vha, 0); 5732 msleep(100); 5733 qla83xx_idc_lock(base_vha, 0); 5734 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 5735 &heart_beat_counter2); 5736 qla83xx_idc_unlock(base_vha, 0); 5737 } while (heart_beat_counter1 == heart_beat_counter2); 5738 5739 return rval; 5740 } 5741 5742 /* Work: Perform NIC Core Reset handling */ 5743 void 5744 qla83xx_nic_core_reset_work(struct work_struct *work) 5745 { 5746 struct qla_hw_data *ha = 5747 container_of(work, struct qla_hw_data, nic_core_reset); 5748 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5749 uint32_t dev_state = 0; 5750 5751 if (IS_QLA2031(ha)) { 5752 if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS) 5753 ql_log(ql_log_warn, base_vha, 0xb081, 5754 "Failed to dump mctp\n"); 5755 return; 5756 } 5757 5758 if (!ha->flags.nic_core_reset_hdlr_active) { 5759 if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) { 5760 qla83xx_idc_lock(base_vha, 0); 5761 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, 5762 &dev_state); 5763 qla83xx_idc_unlock(base_vha, 0); 5764 if (dev_state != QLA8XXX_DEV_NEED_RESET) { 5765 ql_dbg(ql_dbg_p3p, base_vha, 0xb07a, 5766 "Nic Core f/w is alive.\n"); 5767 return; 5768 } 5769 } 5770 5771 ha->flags.nic_core_reset_hdlr_active = 1; 5772 if (qla83xx_nic_core_reset(base_vha)) { 5773 /* NIC Core reset failed. */ 5774 ql_dbg(ql_dbg_p3p, base_vha, 0xb061, 5775 "NIC Core reset failed.\n"); 5776 } 5777 ha->flags.nic_core_reset_hdlr_active = 0; 5778 } 5779 } 5780 5781 /* Work: Handle 8200 IDC aens */ 5782 void 5783 qla83xx_service_idc_aen(struct work_struct *work) 5784 { 5785 struct qla_hw_data *ha = 5786 container_of(work, struct qla_hw_data, idc_aen); 5787 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5788 uint32_t dev_state, idc_control; 5789 5790 qla83xx_idc_lock(base_vha, 0); 5791 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5792 qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control); 5793 qla83xx_idc_unlock(base_vha, 0); 5794 if (dev_state == QLA8XXX_DEV_NEED_RESET) { 5795 if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) { 5796 ql_dbg(ql_dbg_p3p, base_vha, 0xb062, 5797 "Application requested NIC Core Reset.\n"); 5798 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 5799 } else if (qla83xx_check_nic_core_fw_alive(base_vha) == 5800 QLA_SUCCESS) { 5801 ql_dbg(ql_dbg_p3p, base_vha, 0xb07b, 5802 "Other protocol driver requested NIC Core Reset.\n"); 5803 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 5804 } 5805 } else if (dev_state == QLA8XXX_DEV_FAILED || 5806 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { 5807 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 5808 } 5809 } 5810 5811 /* 5812 * Control the frequency of IDC lock retries 5813 */ 5814 #define QLA83XX_WAIT_LOGIC_MS 100 5815 5816 static int 5817 qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha) 5818 { 5819 int rval; 5820 uint32_t data; 5821 uint32_t idc_lck_rcvry_stage_mask = 0x3; 5822 uint32_t idc_lck_rcvry_owner_mask = 0x3c; 5823 struct qla_hw_data *ha = base_vha->hw; 5824 5825 ql_dbg(ql_dbg_p3p, base_vha, 0xb086, 5826 "Trying force recovery of the IDC lock.\n"); 5827 5828 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data); 5829 if (rval) 5830 return rval; 5831 5832 if ((data & idc_lck_rcvry_stage_mask) > 0) { 5833 return QLA_SUCCESS; 5834 } else { 5835 data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2); 5836 rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 5837 data); 5838 if (rval) 5839 return rval; 5840 5841 msleep(200); 5842 5843 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 5844 &data); 5845 if (rval) 5846 return rval; 5847 5848 if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) { 5849 data &= (IDC_LOCK_RECOVERY_STAGE2 | 5850 ~(idc_lck_rcvry_stage_mask)); 5851 rval = qla83xx_wr_reg(base_vha, 5852 QLA83XX_IDC_LOCK_RECOVERY, data); 5853 if (rval) 5854 return rval; 5855 5856 /* Forcefully perform IDC UnLock */ 5857 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, 5858 &data); 5859 if (rval) 5860 return rval; 5861 /* Clear lock-id by setting 0xff */ 5862 rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 5863 0xff); 5864 if (rval) 5865 return rval; 5866 /* Clear lock-recovery by setting 0x0 */ 5867 rval = qla83xx_wr_reg(base_vha, 5868 QLA83XX_IDC_LOCK_RECOVERY, 0x0); 5869 if (rval) 5870 return rval; 5871 } else 5872 return QLA_SUCCESS; 5873 } 5874 5875 return rval; 5876 } 5877 5878 static int 5879 qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha) 5880 { 5881 int rval = QLA_SUCCESS; 5882 uint32_t o_drv_lockid, n_drv_lockid; 5883 unsigned long lock_recovery_timeout; 5884 5885 lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT; 5886 retry_lockid: 5887 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid); 5888 if (rval) 5889 goto exit; 5890 5891 /* MAX wait time before forcing IDC Lock recovery = 2 secs */ 5892 if (time_after_eq(jiffies, lock_recovery_timeout)) { 5893 if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS) 5894 return QLA_SUCCESS; 5895 else 5896 return QLA_FUNCTION_FAILED; 5897 } 5898 5899 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid); 5900 if (rval) 5901 goto exit; 5902 5903 if (o_drv_lockid == n_drv_lockid) { 5904 msleep(QLA83XX_WAIT_LOGIC_MS); 5905 goto retry_lockid; 5906 } else 5907 return QLA_SUCCESS; 5908 5909 exit: 5910 return rval; 5911 } 5912 5913 /* 5914 * Context: task, can sleep 5915 */ 5916 void 5917 qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) 5918 { 5919 uint32_t data; 5920 uint32_t lock_owner; 5921 struct qla_hw_data *ha = base_vha->hw; 5922 5923 might_sleep(); 5924 5925 /* IDC-lock implementation using driver-lock/lock-id remote registers */ 5926 retry_lock: 5927 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data) 5928 == QLA_SUCCESS) { 5929 if (data) { 5930 /* Setting lock-id to our function-number */ 5931 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 5932 ha->portnum); 5933 } else { 5934 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, 5935 &lock_owner); 5936 ql_dbg(ql_dbg_p3p, base_vha, 0xb063, 5937 "Failed to acquire IDC lock, acquired by %d, " 5938 "retrying...\n", lock_owner); 5939 5940 /* Retry/Perform IDC-Lock recovery */ 5941 if (qla83xx_idc_lock_recovery(base_vha) 5942 == QLA_SUCCESS) { 5943 msleep(QLA83XX_WAIT_LOGIC_MS); 5944 goto retry_lock; 5945 } else 5946 ql_log(ql_log_warn, base_vha, 0xb075, 5947 "IDC Lock recovery FAILED.\n"); 5948 } 5949 5950 } 5951 5952 return; 5953 } 5954 5955 static bool 5956 qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha, 5957 struct purex_entry_24xx *purex) 5958 { 5959 char fwstr[16]; 5960 u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0]; 5961 struct port_database_24xx *pdb; 5962 5963 /* Domain Controller is always logged-out. */ 5964 /* if RDP request is not from Domain Controller: */ 5965 if (sid != 0xfffc01) 5966 return false; 5967 5968 ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid); 5969 5970 pdb = kzalloc(sizeof(*pdb), GFP_KERNEL); 5971 if (!pdb) { 5972 ql_dbg(ql_dbg_init, vha, 0x0181, 5973 "%s: Failed allocate pdb\n", __func__); 5974 } else if (qla24xx_get_port_database(vha, 5975 le16_to_cpu(purex->nport_handle), pdb)) { 5976 ql_dbg(ql_dbg_init, vha, 0x0181, 5977 "%s: Failed get pdb sid=%x\n", __func__, sid); 5978 } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE && 5979 pdb->current_login_state != PDS_PRLI_COMPLETE) { 5980 ql_dbg(ql_dbg_init, vha, 0x0181, 5981 "%s: Port not logged in sid=%#x\n", __func__, sid); 5982 } else { 5983 /* RDP request is from logged in port */ 5984 kfree(pdb); 5985 return false; 5986 } 5987 kfree(pdb); 5988 5989 vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr)); 5990 fwstr[strcspn(fwstr, " ")] = 0; 5991 /* if FW version allows RDP response length upto 2048 bytes: */ 5992 if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0) 5993 return false; 5994 5995 ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr); 5996 5997 /* RDP response length is to be reduced to maximum 256 bytes */ 5998 return true; 5999 } 6000 6001 /* 6002 * Function Name: qla24xx_process_purex_iocb 6003 * 6004 * Description: 6005 * Prepare a RDP response and send to Fabric switch 6006 * 6007 * PARAMETERS: 6008 * vha: SCSI qla host 6009 * purex: RDP request received by HBA 6010 */ 6011 void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, 6012 struct purex_item *item) 6013 { 6014 struct qla_hw_data *ha = vha->hw; 6015 struct purex_entry_24xx *purex = 6016 (struct purex_entry_24xx *)&item->iocb; 6017 dma_addr_t rsp_els_dma; 6018 dma_addr_t rsp_payload_dma; 6019 dma_addr_t stat_dma; 6020 dma_addr_t sfp_dma; 6021 struct els_entry_24xx *rsp_els = NULL; 6022 struct rdp_rsp_payload *rsp_payload = NULL; 6023 struct link_statistics *stat = NULL; 6024 uint8_t *sfp = NULL; 6025 uint16_t sfp_flags = 0; 6026 uint rsp_payload_length = sizeof(*rsp_payload); 6027 int rval; 6028 6029 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180, 6030 "%s: Enter\n", __func__); 6031 6032 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181, 6033 "-------- ELS REQ -------\n"); 6034 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182, 6035 purex, sizeof(*purex)); 6036 6037 if (qla25xx_rdp_rsp_reduce_size(vha, purex)) { 6038 rsp_payload_length = 6039 offsetof(typeof(*rsp_payload), optical_elmt_desc); 6040 ql_dbg(ql_dbg_init, vha, 0x0181, 6041 "Reducing RSP payload length to %u bytes...\n", 6042 rsp_payload_length); 6043 } 6044 6045 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), 6046 &rsp_els_dma, GFP_KERNEL); 6047 if (!rsp_els) { 6048 ql_log(ql_log_warn, vha, 0x0183, 6049 "Failed allocate dma buffer ELS RSP.\n"); 6050 goto dealloc; 6051 } 6052 6053 rsp_payload = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload), 6054 &rsp_payload_dma, GFP_KERNEL); 6055 if (!rsp_payload) { 6056 ql_log(ql_log_warn, vha, 0x0184, 6057 "Failed allocate dma buffer ELS RSP payload.\n"); 6058 goto dealloc; 6059 } 6060 6061 sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN, 6062 &sfp_dma, GFP_KERNEL); 6063 6064 stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat), 6065 &stat_dma, GFP_KERNEL); 6066 6067 /* Prepare Response IOCB */ 6068 rsp_els->entry_type = ELS_IOCB_TYPE; 6069 rsp_els->entry_count = 1; 6070 rsp_els->sys_define = 0; 6071 rsp_els->entry_status = 0; 6072 rsp_els->handle = 0; 6073 rsp_els->nport_handle = purex->nport_handle; 6074 rsp_els->tx_dsd_count = cpu_to_le16(1); 6075 rsp_els->vp_index = purex->vp_idx; 6076 rsp_els->sof_type = EST_SOFI3; 6077 rsp_els->rx_xchg_address = purex->rx_xchg_addr; 6078 rsp_els->rx_dsd_count = 0; 6079 rsp_els->opcode = purex->els_frame_payload[0]; 6080 6081 rsp_els->d_id[0] = purex->s_id[0]; 6082 rsp_els->d_id[1] = purex->s_id[1]; 6083 rsp_els->d_id[2] = purex->s_id[2]; 6084 6085 rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC); 6086 rsp_els->rx_byte_count = 0; 6087 rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length); 6088 6089 put_unaligned_le64(rsp_payload_dma, &rsp_els->tx_address); 6090 rsp_els->tx_len = rsp_els->tx_byte_count; 6091 6092 rsp_els->rx_address = 0; 6093 rsp_els->rx_len = 0; 6094 6095 /* Prepare Response Payload */ 6096 rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */ 6097 rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) - 6098 sizeof(rsp_payload->hdr)); 6099 6100 /* Link service Request Info Descriptor */ 6101 rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1); 6102 rsp_payload->ls_req_info_desc.desc_len = 6103 cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc)); 6104 rsp_payload->ls_req_info_desc.req_payload_word_0 = 6105 cpu_to_be32p((uint32_t *)purex->els_frame_payload); 6106 6107 /* Link service Request Info Descriptor 2 */ 6108 rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1); 6109 rsp_payload->ls_req_info_desc2.desc_len = 6110 cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2)); 6111 rsp_payload->ls_req_info_desc2.req_payload_word_0 = 6112 cpu_to_be32p((uint32_t *)purex->els_frame_payload); 6113 6114 6115 rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000); 6116 rsp_payload->sfp_diag_desc.desc_len = 6117 cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc)); 6118 6119 if (sfp) { 6120 /* SFP Flags */ 6121 memset(sfp, 0, SFP_RTDI_LEN); 6122 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0); 6123 if (!rval) { 6124 /* SFP Flags bits 3-0: Port Tx Laser Type */ 6125 if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5)) 6126 sfp_flags |= BIT_0; /* short wave */ 6127 else if (sfp[0] & BIT_1) 6128 sfp_flags |= BIT_1; /* long wave 1310nm */ 6129 else if (sfp[1] & BIT_4) 6130 sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */ 6131 } 6132 6133 /* SFP Type */ 6134 memset(sfp, 0, SFP_RTDI_LEN); 6135 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0); 6136 if (!rval) { 6137 sfp_flags |= BIT_4; /* optical */ 6138 if (sfp[0] == 0x3) 6139 sfp_flags |= BIT_6; /* sfp+ */ 6140 } 6141 6142 rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags); 6143 6144 /* SFP Diagnostics */ 6145 memset(sfp, 0, SFP_RTDI_LEN); 6146 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0); 6147 if (!rval) { 6148 __be16 *trx = (__force __be16 *)sfp; /* already be16 */ 6149 rsp_payload->sfp_diag_desc.temperature = trx[0]; 6150 rsp_payload->sfp_diag_desc.vcc = trx[1]; 6151 rsp_payload->sfp_diag_desc.tx_bias = trx[2]; 6152 rsp_payload->sfp_diag_desc.tx_power = trx[3]; 6153 rsp_payload->sfp_diag_desc.rx_power = trx[4]; 6154 } 6155 } 6156 6157 /* Port Speed Descriptor */ 6158 rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001); 6159 rsp_payload->port_speed_desc.desc_len = 6160 cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc)); 6161 rsp_payload->port_speed_desc.speed_capab = cpu_to_be16( 6162 qla25xx_fdmi_port_speed_capability(ha)); 6163 rsp_payload->port_speed_desc.operating_speed = cpu_to_be16( 6164 qla25xx_fdmi_port_speed_currently(ha)); 6165 6166 /* Link Error Status Descriptor */ 6167 rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002); 6168 rsp_payload->ls_err_desc.desc_len = 6169 cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc)); 6170 6171 if (stat) { 6172 rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0); 6173 if (!rval) { 6174 rsp_payload->ls_err_desc.link_fail_cnt = 6175 cpu_to_be32(le32_to_cpu(stat->link_fail_cnt)); 6176 rsp_payload->ls_err_desc.loss_sync_cnt = 6177 cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt)); 6178 rsp_payload->ls_err_desc.loss_sig_cnt = 6179 cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt)); 6180 rsp_payload->ls_err_desc.prim_seq_err_cnt = 6181 cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt)); 6182 rsp_payload->ls_err_desc.inval_xmit_word_cnt = 6183 cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt)); 6184 rsp_payload->ls_err_desc.inval_crc_cnt = 6185 cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt)); 6186 rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6; 6187 } 6188 } 6189 6190 /* Portname Descriptor */ 6191 rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003); 6192 rsp_payload->port_name_diag_desc.desc_len = 6193 cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc)); 6194 memcpy(rsp_payload->port_name_diag_desc.WWNN, 6195 vha->node_name, 6196 sizeof(rsp_payload->port_name_diag_desc.WWNN)); 6197 memcpy(rsp_payload->port_name_diag_desc.WWPN, 6198 vha->port_name, 6199 sizeof(rsp_payload->port_name_diag_desc.WWPN)); 6200 6201 /* F-Port Portname Descriptor */ 6202 rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003); 6203 rsp_payload->port_name_direct_desc.desc_len = 6204 cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc)); 6205 memcpy(rsp_payload->port_name_direct_desc.WWNN, 6206 vha->fabric_node_name, 6207 sizeof(rsp_payload->port_name_direct_desc.WWNN)); 6208 memcpy(rsp_payload->port_name_direct_desc.WWPN, 6209 vha->fabric_port_name, 6210 sizeof(rsp_payload->port_name_direct_desc.WWPN)); 6211 6212 /* Bufer Credit Descriptor */ 6213 rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006); 6214 rsp_payload->buffer_credit_desc.desc_len = 6215 cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc)); 6216 rsp_payload->buffer_credit_desc.fcport_b2b = 0; 6217 rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0); 6218 rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0); 6219 6220 if (ha->flags.plogi_template_valid) { 6221 uint32_t tmp = 6222 be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred); 6223 rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp); 6224 } 6225 6226 if (rsp_payload_length < sizeof(*rsp_payload)) 6227 goto send; 6228 6229 /* Optical Element Descriptor, Temperature */ 6230 rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007); 6231 rsp_payload->optical_elmt_desc[0].desc_len = 6232 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); 6233 /* Optical Element Descriptor, Voltage */ 6234 rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007); 6235 rsp_payload->optical_elmt_desc[1].desc_len = 6236 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); 6237 /* Optical Element Descriptor, Tx Bias Current */ 6238 rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007); 6239 rsp_payload->optical_elmt_desc[2].desc_len = 6240 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); 6241 /* Optical Element Descriptor, Tx Power */ 6242 rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007); 6243 rsp_payload->optical_elmt_desc[3].desc_len = 6244 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); 6245 /* Optical Element Descriptor, Rx Power */ 6246 rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007); 6247 rsp_payload->optical_elmt_desc[4].desc_len = 6248 cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); 6249 6250 if (sfp) { 6251 memset(sfp, 0, SFP_RTDI_LEN); 6252 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0); 6253 if (!rval) { 6254 __be16 *trx = (__force __be16 *)sfp; /* already be16 */ 6255 6256 /* Optical Element Descriptor, Temperature */ 6257 rsp_payload->optical_elmt_desc[0].high_alarm = trx[0]; 6258 rsp_payload->optical_elmt_desc[0].low_alarm = trx[1]; 6259 rsp_payload->optical_elmt_desc[0].high_warn = trx[2]; 6260 rsp_payload->optical_elmt_desc[0].low_warn = trx[3]; 6261 rsp_payload->optical_elmt_desc[0].element_flags = 6262 cpu_to_be32(1 << 28); 6263 6264 /* Optical Element Descriptor, Voltage */ 6265 rsp_payload->optical_elmt_desc[1].high_alarm = trx[4]; 6266 rsp_payload->optical_elmt_desc[1].low_alarm = trx[5]; 6267 rsp_payload->optical_elmt_desc[1].high_warn = trx[6]; 6268 rsp_payload->optical_elmt_desc[1].low_warn = trx[7]; 6269 rsp_payload->optical_elmt_desc[1].element_flags = 6270 cpu_to_be32(2 << 28); 6271 6272 /* Optical Element Descriptor, Tx Bias Current */ 6273 rsp_payload->optical_elmt_desc[2].high_alarm = trx[8]; 6274 rsp_payload->optical_elmt_desc[2].low_alarm = trx[9]; 6275 rsp_payload->optical_elmt_desc[2].high_warn = trx[10]; 6276 rsp_payload->optical_elmt_desc[2].low_warn = trx[11]; 6277 rsp_payload->optical_elmt_desc[2].element_flags = 6278 cpu_to_be32(3 << 28); 6279 6280 /* Optical Element Descriptor, Tx Power */ 6281 rsp_payload->optical_elmt_desc[3].high_alarm = trx[12]; 6282 rsp_payload->optical_elmt_desc[3].low_alarm = trx[13]; 6283 rsp_payload->optical_elmt_desc[3].high_warn = trx[14]; 6284 rsp_payload->optical_elmt_desc[3].low_warn = trx[15]; 6285 rsp_payload->optical_elmt_desc[3].element_flags = 6286 cpu_to_be32(4 << 28); 6287 6288 /* Optical Element Descriptor, Rx Power */ 6289 rsp_payload->optical_elmt_desc[4].high_alarm = trx[16]; 6290 rsp_payload->optical_elmt_desc[4].low_alarm = trx[17]; 6291 rsp_payload->optical_elmt_desc[4].high_warn = trx[18]; 6292 rsp_payload->optical_elmt_desc[4].low_warn = trx[19]; 6293 rsp_payload->optical_elmt_desc[4].element_flags = 6294 cpu_to_be32(5 << 28); 6295 } 6296 6297 memset(sfp, 0, SFP_RTDI_LEN); 6298 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0); 6299 if (!rval) { 6300 /* Temperature high/low alarm/warning */ 6301 rsp_payload->optical_elmt_desc[0].element_flags |= 6302 cpu_to_be32( 6303 (sfp[0] >> 7 & 1) << 3 | 6304 (sfp[0] >> 6 & 1) << 2 | 6305 (sfp[4] >> 7 & 1) << 1 | 6306 (sfp[4] >> 6 & 1) << 0); 6307 6308 /* Voltage high/low alarm/warning */ 6309 rsp_payload->optical_elmt_desc[1].element_flags |= 6310 cpu_to_be32( 6311 (sfp[0] >> 5 & 1) << 3 | 6312 (sfp[0] >> 4 & 1) << 2 | 6313 (sfp[4] >> 5 & 1) << 1 | 6314 (sfp[4] >> 4 & 1) << 0); 6315 6316 /* Tx Bias Current high/low alarm/warning */ 6317 rsp_payload->optical_elmt_desc[2].element_flags |= 6318 cpu_to_be32( 6319 (sfp[0] >> 3 & 1) << 3 | 6320 (sfp[0] >> 2 & 1) << 2 | 6321 (sfp[4] >> 3 & 1) << 1 | 6322 (sfp[4] >> 2 & 1) << 0); 6323 6324 /* Tx Power high/low alarm/warning */ 6325 rsp_payload->optical_elmt_desc[3].element_flags |= 6326 cpu_to_be32( 6327 (sfp[0] >> 1 & 1) << 3 | 6328 (sfp[0] >> 0 & 1) << 2 | 6329 (sfp[4] >> 1 & 1) << 1 | 6330 (sfp[4] >> 0 & 1) << 0); 6331 6332 /* Rx Power high/low alarm/warning */ 6333 rsp_payload->optical_elmt_desc[4].element_flags |= 6334 cpu_to_be32( 6335 (sfp[1] >> 7 & 1) << 3 | 6336 (sfp[1] >> 6 & 1) << 2 | 6337 (sfp[5] >> 7 & 1) << 1 | 6338 (sfp[5] >> 6 & 1) << 0); 6339 } 6340 } 6341 6342 /* Optical Product Data Descriptor */ 6343 rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008); 6344 rsp_payload->optical_prod_desc.desc_len = 6345 cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc)); 6346 6347 if (sfp) { 6348 memset(sfp, 0, SFP_RTDI_LEN); 6349 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0); 6350 if (!rval) { 6351 memcpy(rsp_payload->optical_prod_desc.vendor_name, 6352 sfp + 0, 6353 sizeof(rsp_payload->optical_prod_desc.vendor_name)); 6354 memcpy(rsp_payload->optical_prod_desc.part_number, 6355 sfp + 20, 6356 sizeof(rsp_payload->optical_prod_desc.part_number)); 6357 memcpy(rsp_payload->optical_prod_desc.revision, 6358 sfp + 36, 6359 sizeof(rsp_payload->optical_prod_desc.revision)); 6360 memcpy(rsp_payload->optical_prod_desc.serial_number, 6361 sfp + 48, 6362 sizeof(rsp_payload->optical_prod_desc.serial_number)); 6363 } 6364 6365 memset(sfp, 0, SFP_RTDI_LEN); 6366 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0); 6367 if (!rval) { 6368 memcpy(rsp_payload->optical_prod_desc.date, 6369 sfp + 0, 6370 sizeof(rsp_payload->optical_prod_desc.date)); 6371 } 6372 } 6373 6374 send: 6375 ql_dbg(ql_dbg_init, vha, 0x0183, 6376 "Sending ELS Response to RDP Request...\n"); 6377 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184, 6378 "-------- ELS RSP -------\n"); 6379 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185, 6380 rsp_els, sizeof(*rsp_els)); 6381 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186, 6382 "-------- ELS RSP PAYLOAD -------\n"); 6383 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187, 6384 rsp_payload, rsp_payload_length); 6385 6386 rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0); 6387 6388 if (rval) { 6389 ql_log(ql_log_warn, vha, 0x0188, 6390 "%s: iocb failed to execute -> %x\n", __func__, rval); 6391 } else if (rsp_els->comp_status) { 6392 ql_log(ql_log_warn, vha, 0x0189, 6393 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 6394 __func__, rsp_els->comp_status, 6395 rsp_els->error_subcode_1, rsp_els->error_subcode_2); 6396 } else { 6397 ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__); 6398 } 6399 6400 dealloc: 6401 if (stat) 6402 dma_free_coherent(&ha->pdev->dev, sizeof(*stat), 6403 stat, stat_dma); 6404 if (sfp) 6405 dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN, 6406 sfp, sfp_dma); 6407 if (rsp_payload) 6408 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload), 6409 rsp_payload, rsp_payload_dma); 6410 if (rsp_els) 6411 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), 6412 rsp_els, rsp_els_dma); 6413 } 6414 6415 void 6416 qla24xx_free_purex_item(struct purex_item *item) 6417 { 6418 if (item == &item->vha->default_item) 6419 memset(&item->vha->default_item, 0, sizeof(struct purex_item)); 6420 else 6421 kfree(item); 6422 } 6423 6424 void qla24xx_process_purex_list(struct purex_list *list) 6425 { 6426 struct list_head head = LIST_HEAD_INIT(head); 6427 struct purex_item *item, *next; 6428 ulong flags; 6429 6430 spin_lock_irqsave(&list->lock, flags); 6431 list_splice_init(&list->head, &head); 6432 spin_unlock_irqrestore(&list->lock, flags); 6433 6434 list_for_each_entry_safe(item, next, &head, list) { 6435 list_del(&item->list); 6436 item->process_item(item->vha, item); 6437 qla24xx_free_purex_item(item); 6438 } 6439 } 6440 6441 /* 6442 * Context: task, can sleep 6443 */ 6444 void 6445 qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id) 6446 { 6447 #if 0 6448 uint16_t options = (requester_id << 15) | BIT_7; 6449 #endif 6450 uint16_t retry; 6451 uint32_t data; 6452 struct qla_hw_data *ha = base_vha->hw; 6453 6454 might_sleep(); 6455 6456 /* IDC-unlock implementation using driver-unlock/lock-id 6457 * remote registers 6458 */ 6459 retry = 0; 6460 retry_unlock: 6461 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data) 6462 == QLA_SUCCESS) { 6463 if (data == ha->portnum) { 6464 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data); 6465 /* Clearing lock-id by setting 0xff */ 6466 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff); 6467 } else if (retry < 10) { 6468 /* SV: XXX: IDC unlock retrying needed here? */ 6469 6470 /* Retry for IDC-unlock */ 6471 msleep(QLA83XX_WAIT_LOGIC_MS); 6472 retry++; 6473 ql_dbg(ql_dbg_p3p, base_vha, 0xb064, 6474 "Failed to release IDC lock, retrying=%d\n", retry); 6475 goto retry_unlock; 6476 } 6477 } else if (retry < 10) { 6478 /* Retry for IDC-unlock */ 6479 msleep(QLA83XX_WAIT_LOGIC_MS); 6480 retry++; 6481 ql_dbg(ql_dbg_p3p, base_vha, 0xb065, 6482 "Failed to read drv-lockid, retrying=%d\n", retry); 6483 goto retry_unlock; 6484 } 6485 6486 return; 6487 6488 #if 0 6489 /* XXX: IDC-unlock implementation using access-control mbx */ 6490 retry = 0; 6491 retry_unlock2: 6492 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { 6493 if (retry < 10) { 6494 /* Retry for IDC-unlock */ 6495 msleep(QLA83XX_WAIT_LOGIC_MS); 6496 retry++; 6497 ql_dbg(ql_dbg_p3p, base_vha, 0xb066, 6498 "Failed to release IDC lock, retrying=%d\n", retry); 6499 goto retry_unlock2; 6500 } 6501 } 6502 6503 return; 6504 #endif 6505 } 6506 6507 int 6508 __qla83xx_set_drv_presence(scsi_qla_host_t *vha) 6509 { 6510 int rval = QLA_SUCCESS; 6511 struct qla_hw_data *ha = vha->hw; 6512 uint32_t drv_presence; 6513 6514 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6515 if (rval == QLA_SUCCESS) { 6516 drv_presence |= (1 << ha->portnum); 6517 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 6518 drv_presence); 6519 } 6520 6521 return rval; 6522 } 6523 6524 int 6525 qla83xx_set_drv_presence(scsi_qla_host_t *vha) 6526 { 6527 int rval = QLA_SUCCESS; 6528 6529 qla83xx_idc_lock(vha, 0); 6530 rval = __qla83xx_set_drv_presence(vha); 6531 qla83xx_idc_unlock(vha, 0); 6532 6533 return rval; 6534 } 6535 6536 int 6537 __qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 6538 { 6539 int rval = QLA_SUCCESS; 6540 struct qla_hw_data *ha = vha->hw; 6541 uint32_t drv_presence; 6542 6543 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6544 if (rval == QLA_SUCCESS) { 6545 drv_presence &= ~(1 << ha->portnum); 6546 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 6547 drv_presence); 6548 } 6549 6550 return rval; 6551 } 6552 6553 int 6554 qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 6555 { 6556 int rval = QLA_SUCCESS; 6557 6558 qla83xx_idc_lock(vha, 0); 6559 rval = __qla83xx_clear_drv_presence(vha); 6560 qla83xx_idc_unlock(vha, 0); 6561 6562 return rval; 6563 } 6564 6565 static void 6566 qla83xx_need_reset_handler(scsi_qla_host_t *vha) 6567 { 6568 struct qla_hw_data *ha = vha->hw; 6569 uint32_t drv_ack, drv_presence; 6570 unsigned long ack_timeout; 6571 6572 /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */ 6573 ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); 6574 while (1) { 6575 qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 6576 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 6577 if ((drv_ack & drv_presence) == drv_presence) 6578 break; 6579 6580 if (time_after_eq(jiffies, ack_timeout)) { 6581 ql_log(ql_log_warn, vha, 0xb067, 6582 "RESET ACK TIMEOUT! drv_presence=0x%x " 6583 "drv_ack=0x%x\n", drv_presence, drv_ack); 6584 /* 6585 * The function(s) which did not ack in time are forced 6586 * to withdraw any further participation in the IDC 6587 * reset. 6588 */ 6589 if (drv_ack != drv_presence) 6590 qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 6591 drv_ack); 6592 break; 6593 } 6594 6595 qla83xx_idc_unlock(vha, 0); 6596 msleep(1000); 6597 qla83xx_idc_lock(vha, 0); 6598 } 6599 6600 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD); 6601 ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n"); 6602 } 6603 6604 static int 6605 qla83xx_device_bootstrap(scsi_qla_host_t *vha) 6606 { 6607 int rval = QLA_SUCCESS; 6608 uint32_t idc_control; 6609 6610 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING); 6611 ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n"); 6612 6613 /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */ 6614 __qla83xx_get_idc_control(vha, &idc_control); 6615 idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET; 6616 __qla83xx_set_idc_control(vha, 0); 6617 6618 qla83xx_idc_unlock(vha, 0); 6619 rval = qla83xx_restart_nic_firmware(vha); 6620 qla83xx_idc_lock(vha, 0); 6621 6622 if (rval != QLA_SUCCESS) { 6623 ql_log(ql_log_fatal, vha, 0xb06a, 6624 "Failed to restart NIC f/w.\n"); 6625 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED); 6626 ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n"); 6627 } else { 6628 ql_dbg(ql_dbg_p3p, vha, 0xb06c, 6629 "Success in restarting nic f/w.\n"); 6630 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY); 6631 ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n"); 6632 } 6633 6634 return rval; 6635 } 6636 6637 /* Assumes idc_lock always held on entry */ 6638 int 6639 qla83xx_idc_state_handler(scsi_qla_host_t *base_vha) 6640 { 6641 struct qla_hw_data *ha = base_vha->hw; 6642 int rval = QLA_SUCCESS; 6643 unsigned long dev_init_timeout; 6644 uint32_t dev_state; 6645 6646 /* Wait for MAX-INIT-TIMEOUT for the device to go ready */ 6647 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); 6648 6649 while (1) { 6650 6651 if (time_after_eq(jiffies, dev_init_timeout)) { 6652 ql_log(ql_log_warn, base_vha, 0xb06e, 6653 "Initialization TIMEOUT!\n"); 6654 /* Init timeout. Disable further NIC Core 6655 * communication. 6656 */ 6657 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 6658 QLA8XXX_DEV_FAILED); 6659 ql_log(ql_log_info, base_vha, 0xb06f, 6660 "HW State: FAILED.\n"); 6661 } 6662 6663 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 6664 switch (dev_state) { 6665 case QLA8XXX_DEV_READY: 6666 if (ha->flags.nic_core_reset_owner) 6667 qla83xx_idc_audit(base_vha, 6668 IDC_AUDIT_COMPLETION); 6669 ha->flags.nic_core_reset_owner = 0; 6670 ql_dbg(ql_dbg_p3p, base_vha, 0xb070, 6671 "Reset_owner reset by 0x%x.\n", 6672 ha->portnum); 6673 goto exit; 6674 case QLA8XXX_DEV_COLD: 6675 if (ha->flags.nic_core_reset_owner) 6676 rval = qla83xx_device_bootstrap(base_vha); 6677 else { 6678 /* Wait for AEN to change device-state */ 6679 qla83xx_idc_unlock(base_vha, 0); 6680 msleep(1000); 6681 qla83xx_idc_lock(base_vha, 0); 6682 } 6683 break; 6684 case QLA8XXX_DEV_INITIALIZING: 6685 /* Wait for AEN to change device-state */ 6686 qla83xx_idc_unlock(base_vha, 0); 6687 msleep(1000); 6688 qla83xx_idc_lock(base_vha, 0); 6689 break; 6690 case QLA8XXX_DEV_NEED_RESET: 6691 if (!ql2xdontresethba && ha->flags.nic_core_reset_owner) 6692 qla83xx_need_reset_handler(base_vha); 6693 else { 6694 /* Wait for AEN to change device-state */ 6695 qla83xx_idc_unlock(base_vha, 0); 6696 msleep(1000); 6697 qla83xx_idc_lock(base_vha, 0); 6698 } 6699 /* reset timeout value after need reset handler */ 6700 dev_init_timeout = jiffies + 6701 (ha->fcoe_dev_init_timeout * HZ); 6702 break; 6703 case QLA8XXX_DEV_NEED_QUIESCENT: 6704 /* XXX: DEBUG for now */ 6705 qla83xx_idc_unlock(base_vha, 0); 6706 msleep(1000); 6707 qla83xx_idc_lock(base_vha, 0); 6708 break; 6709 case QLA8XXX_DEV_QUIESCENT: 6710 /* XXX: DEBUG for now */ 6711 if (ha->flags.quiesce_owner) 6712 goto exit; 6713 6714 qla83xx_idc_unlock(base_vha, 0); 6715 msleep(1000); 6716 qla83xx_idc_lock(base_vha, 0); 6717 dev_init_timeout = jiffies + 6718 (ha->fcoe_dev_init_timeout * HZ); 6719 break; 6720 case QLA8XXX_DEV_FAILED: 6721 if (ha->flags.nic_core_reset_owner) 6722 qla83xx_idc_audit(base_vha, 6723 IDC_AUDIT_COMPLETION); 6724 ha->flags.nic_core_reset_owner = 0; 6725 __qla83xx_clear_drv_presence(base_vha); 6726 qla83xx_idc_unlock(base_vha, 0); 6727 qla8xxx_dev_failed_handler(base_vha); 6728 rval = QLA_FUNCTION_FAILED; 6729 qla83xx_idc_lock(base_vha, 0); 6730 goto exit; 6731 case QLA8XXX_BAD_VALUE: 6732 qla83xx_idc_unlock(base_vha, 0); 6733 msleep(1000); 6734 qla83xx_idc_lock(base_vha, 0); 6735 break; 6736 default: 6737 ql_log(ql_log_warn, base_vha, 0xb071, 6738 "Unknown Device State: %x.\n", dev_state); 6739 qla83xx_idc_unlock(base_vha, 0); 6740 qla8xxx_dev_failed_handler(base_vha); 6741 rval = QLA_FUNCTION_FAILED; 6742 qla83xx_idc_lock(base_vha, 0); 6743 goto exit; 6744 } 6745 } 6746 6747 exit: 6748 return rval; 6749 } 6750 6751 void 6752 qla2x00_disable_board_on_pci_error(struct work_struct *work) 6753 { 6754 struct qla_hw_data *ha = container_of(work, struct qla_hw_data, 6755 board_disable); 6756 struct pci_dev *pdev = ha->pdev; 6757 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6758 6759 ql_log(ql_log_warn, base_vha, 0x015b, 6760 "Disabling adapter.\n"); 6761 6762 if (!atomic_read(&pdev->enable_cnt)) { 6763 ql_log(ql_log_info, base_vha, 0xfffc, 6764 "PCI device disabled, no action req for PCI error=%lx\n", 6765 base_vha->pci_flags); 6766 return; 6767 } 6768 6769 /* 6770 * if UNLOADING flag is already set, then continue unload, 6771 * where it was set first. 6772 */ 6773 if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) 6774 return; 6775 6776 qla2x00_wait_for_sess_deletion(base_vha); 6777 6778 qla2x00_delete_all_vps(ha, base_vha); 6779 6780 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 6781 6782 qla2x00_dfs_remove(base_vha); 6783 6784 qla84xx_put_chip(base_vha); 6785 6786 if (base_vha->timer_active) 6787 qla2x00_stop_timer(base_vha); 6788 6789 base_vha->flags.online = 0; 6790 6791 qla2x00_destroy_deferred_work(ha); 6792 6793 /* 6794 * Do not try to stop beacon blink as it will issue a mailbox 6795 * command. 6796 */ 6797 qla2x00_free_sysfs_attr(base_vha, false); 6798 6799 fc_remove_host(base_vha->host); 6800 6801 scsi_remove_host(base_vha->host); 6802 6803 base_vha->flags.init_done = 0; 6804 qla25xx_delete_queues(base_vha); 6805 qla2x00_free_fcports(base_vha); 6806 qla2x00_free_irqs(base_vha); 6807 qla2x00_mem_free(ha); 6808 qla82xx_md_free(base_vha); 6809 qla2x00_free_queues(ha); 6810 6811 qla2x00_unmap_iobases(ha); 6812 6813 pci_release_selected_regions(ha->pdev, ha->bars); 6814 pci_disable_pcie_error_reporting(pdev); 6815 pci_disable_device(pdev); 6816 6817 /* 6818 * Let qla2x00_remove_one cleanup qla_hw_data on device removal. 6819 */ 6820 } 6821 6822 /************************************************************************** 6823 * qla2x00_do_dpc 6824 * This kernel thread is a task that is schedule by the interrupt handler 6825 * to perform the background processing for interrupts. 6826 * 6827 * Notes: 6828 * This task always run in the context of a kernel thread. It 6829 * is kick-off by the driver's detect code and starts up 6830 * up one per adapter. It immediately goes to sleep and waits for 6831 * some fibre event. When either the interrupt handler or 6832 * the timer routine detects a event it will one of the task 6833 * bits then wake us up. 6834 **************************************************************************/ 6835 static int 6836 qla2x00_do_dpc(void *data) 6837 { 6838 scsi_qla_host_t *base_vha; 6839 struct qla_hw_data *ha; 6840 uint32_t online; 6841 struct qla_qpair *qpair; 6842 6843 ha = (struct qla_hw_data *)data; 6844 base_vha = pci_get_drvdata(ha->pdev); 6845 6846 set_user_nice(current, MIN_NICE); 6847 6848 set_current_state(TASK_INTERRUPTIBLE); 6849 while (!kthread_should_stop()) { 6850 ql_dbg(ql_dbg_dpc, base_vha, 0x4000, 6851 "DPC handler sleeping.\n"); 6852 6853 schedule(); 6854 6855 if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags)) 6856 qla_pci_set_eeh_busy(base_vha); 6857 6858 if (!base_vha->flags.init_done || ha->flags.mbox_busy) 6859 goto end_loop; 6860 6861 if (ha->flags.eeh_busy) { 6862 ql_dbg(ql_dbg_dpc, base_vha, 0x4003, 6863 "eeh_busy=%d.\n", ha->flags.eeh_busy); 6864 goto end_loop; 6865 } 6866 6867 ha->dpc_active = 1; 6868 6869 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001, 6870 "DPC handler waking up, dpc_flags=0x%lx.\n", 6871 base_vha->dpc_flags); 6872 6873 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 6874 break; 6875 6876 if (IS_P3P_TYPE(ha)) { 6877 if (IS_QLA8044(ha)) { 6878 if (test_and_clear_bit(ISP_UNRECOVERABLE, 6879 &base_vha->dpc_flags)) { 6880 qla8044_idc_lock(ha); 6881 qla8044_wr_direct(base_vha, 6882 QLA8044_CRB_DEV_STATE_INDEX, 6883 QLA8XXX_DEV_FAILED); 6884 qla8044_idc_unlock(ha); 6885 ql_log(ql_log_info, base_vha, 0x4004, 6886 "HW State: FAILED.\n"); 6887 qla8044_device_state_handler(base_vha); 6888 continue; 6889 } 6890 6891 } else { 6892 if (test_and_clear_bit(ISP_UNRECOVERABLE, 6893 &base_vha->dpc_flags)) { 6894 qla82xx_idc_lock(ha); 6895 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6896 QLA8XXX_DEV_FAILED); 6897 qla82xx_idc_unlock(ha); 6898 ql_log(ql_log_info, base_vha, 0x0151, 6899 "HW State: FAILED.\n"); 6900 qla82xx_device_state_handler(base_vha); 6901 continue; 6902 } 6903 } 6904 6905 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, 6906 &base_vha->dpc_flags)) { 6907 6908 ql_dbg(ql_dbg_dpc, base_vha, 0x4005, 6909 "FCoE context reset scheduled.\n"); 6910 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 6911 &base_vha->dpc_flags))) { 6912 if (qla82xx_fcoe_ctx_reset(base_vha)) { 6913 /* FCoE-ctx reset failed. 6914 * Escalate to chip-reset 6915 */ 6916 set_bit(ISP_ABORT_NEEDED, 6917 &base_vha->dpc_flags); 6918 } 6919 clear_bit(ABORT_ISP_ACTIVE, 6920 &base_vha->dpc_flags); 6921 } 6922 6923 ql_dbg(ql_dbg_dpc, base_vha, 0x4006, 6924 "FCoE context reset end.\n"); 6925 } 6926 } else if (IS_QLAFX00(ha)) { 6927 if (test_and_clear_bit(ISP_UNRECOVERABLE, 6928 &base_vha->dpc_flags)) { 6929 ql_dbg(ql_dbg_dpc, base_vha, 0x4020, 6930 "Firmware Reset Recovery\n"); 6931 if (qlafx00_reset_initialize(base_vha)) { 6932 /* Failed. Abort isp later. */ 6933 if (!test_bit(UNLOADING, 6934 &base_vha->dpc_flags)) { 6935 set_bit(ISP_UNRECOVERABLE, 6936 &base_vha->dpc_flags); 6937 ql_dbg(ql_dbg_dpc, base_vha, 6938 0x4021, 6939 "Reset Recovery Failed\n"); 6940 } 6941 } 6942 } 6943 6944 if (test_and_clear_bit(FX00_TARGET_SCAN, 6945 &base_vha->dpc_flags)) { 6946 ql_dbg(ql_dbg_dpc, base_vha, 0x4022, 6947 "ISPFx00 Target Scan scheduled\n"); 6948 if (qlafx00_rescan_isp(base_vha)) { 6949 if (!test_bit(UNLOADING, 6950 &base_vha->dpc_flags)) 6951 set_bit(ISP_UNRECOVERABLE, 6952 &base_vha->dpc_flags); 6953 ql_dbg(ql_dbg_dpc, base_vha, 0x401e, 6954 "ISPFx00 Target Scan Failed\n"); 6955 } 6956 ql_dbg(ql_dbg_dpc, base_vha, 0x401f, 6957 "ISPFx00 Target Scan End\n"); 6958 } 6959 if (test_and_clear_bit(FX00_HOST_INFO_RESEND, 6960 &base_vha->dpc_flags)) { 6961 ql_dbg(ql_dbg_dpc, base_vha, 0x4023, 6962 "ISPFx00 Host Info resend scheduled\n"); 6963 qlafx00_fx_disc(base_vha, 6964 &base_vha->hw->mr.fcport, 6965 FXDISC_REG_HOST_INFO); 6966 } 6967 } 6968 6969 if (test_and_clear_bit(DETECT_SFP_CHANGE, 6970 &base_vha->dpc_flags)) { 6971 /* Semantic: 6972 * - NO-OP -- await next ISP-ABORT. Preferred method 6973 * to minimize disruptions that will occur 6974 * when a forced chip-reset occurs. 6975 * - Force -- ISP-ABORT scheduled. 6976 */ 6977 /* set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); */ 6978 } 6979 6980 if (test_and_clear_bit 6981 (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 6982 !test_bit(UNLOADING, &base_vha->dpc_flags)) { 6983 bool do_reset = true; 6984 6985 switch (base_vha->qlini_mode) { 6986 case QLA2XXX_INI_MODE_ENABLED: 6987 break; 6988 case QLA2XXX_INI_MODE_DISABLED: 6989 if (!qla_tgt_mode_enabled(base_vha) && 6990 !ha->flags.fw_started) 6991 do_reset = false; 6992 break; 6993 case QLA2XXX_INI_MODE_DUAL: 6994 if (!qla_dual_mode_enabled(base_vha) && 6995 !ha->flags.fw_started) 6996 do_reset = false; 6997 break; 6998 default: 6999 break; 7000 } 7001 7002 if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE, 7003 &base_vha->dpc_flags))) { 7004 base_vha->flags.online = 1; 7005 ql_dbg(ql_dbg_dpc, base_vha, 0x4007, 7006 "ISP abort scheduled.\n"); 7007 if (ha->isp_ops->abort_isp(base_vha)) { 7008 /* failed. retry later */ 7009 set_bit(ISP_ABORT_NEEDED, 7010 &base_vha->dpc_flags); 7011 } 7012 clear_bit(ABORT_ISP_ACTIVE, 7013 &base_vha->dpc_flags); 7014 ql_dbg(ql_dbg_dpc, base_vha, 0x4008, 7015 "ISP abort end.\n"); 7016 } 7017 } 7018 7019 if (test_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags)) { 7020 if (atomic_read(&base_vha->loop_state) == LOOP_READY) { 7021 qla24xx_process_purex_list 7022 (&base_vha->purex_list); 7023 clear_bit(PROCESS_PUREX_IOCB, 7024 &base_vha->dpc_flags); 7025 } 7026 } 7027 7028 if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, 7029 &base_vha->dpc_flags)) { 7030 qla2x00_update_fcports(base_vha); 7031 } 7032 7033 if (IS_QLAFX00(ha)) 7034 goto loop_resync_check; 7035 7036 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 7037 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 7038 "Quiescence mode scheduled.\n"); 7039 if (IS_P3P_TYPE(ha)) { 7040 if (IS_QLA82XX(ha)) 7041 qla82xx_device_state_handler(base_vha); 7042 if (IS_QLA8044(ha)) 7043 qla8044_device_state_handler(base_vha); 7044 clear_bit(ISP_QUIESCE_NEEDED, 7045 &base_vha->dpc_flags); 7046 if (!ha->flags.quiesce_owner) { 7047 qla2x00_perform_loop_resync(base_vha); 7048 if (IS_QLA82XX(ha)) { 7049 qla82xx_idc_lock(ha); 7050 qla82xx_clear_qsnt_ready( 7051 base_vha); 7052 qla82xx_idc_unlock(ha); 7053 } else if (IS_QLA8044(ha)) { 7054 qla8044_idc_lock(ha); 7055 qla8044_clear_qsnt_ready( 7056 base_vha); 7057 qla8044_idc_unlock(ha); 7058 } 7059 } 7060 } else { 7061 clear_bit(ISP_QUIESCE_NEEDED, 7062 &base_vha->dpc_flags); 7063 qla2x00_quiesce_io(base_vha); 7064 } 7065 ql_dbg(ql_dbg_dpc, base_vha, 0x400a, 7066 "Quiescence mode end.\n"); 7067 } 7068 7069 if (test_and_clear_bit(RESET_MARKER_NEEDED, 7070 &base_vha->dpc_flags) && 7071 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { 7072 7073 ql_dbg(ql_dbg_dpc, base_vha, 0x400b, 7074 "Reset marker scheduled.\n"); 7075 qla2x00_rst_aen(base_vha); 7076 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); 7077 ql_dbg(ql_dbg_dpc, base_vha, 0x400c, 7078 "Reset marker end.\n"); 7079 } 7080 7081 /* Retry each device up to login retry count */ 7082 if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) && 7083 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && 7084 atomic_read(&base_vha->loop_state) != LOOP_DOWN) { 7085 7086 if (!base_vha->relogin_jif || 7087 time_after_eq(jiffies, base_vha->relogin_jif)) { 7088 base_vha->relogin_jif = jiffies + HZ; 7089 clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags); 7090 7091 ql_dbg(ql_dbg_disc, base_vha, 0x400d, 7092 "Relogin scheduled.\n"); 7093 qla24xx_post_relogin_work(base_vha); 7094 } 7095 } 7096 loop_resync_check: 7097 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, 7098 &base_vha->dpc_flags)) { 7099 7100 ql_dbg(ql_dbg_dpc, base_vha, 0x400f, 7101 "Loop resync scheduled.\n"); 7102 7103 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 7104 &base_vha->dpc_flags))) { 7105 7106 qla2x00_loop_resync(base_vha); 7107 7108 clear_bit(LOOP_RESYNC_ACTIVE, 7109 &base_vha->dpc_flags); 7110 } 7111 7112 ql_dbg(ql_dbg_dpc, base_vha, 0x4010, 7113 "Loop resync end.\n"); 7114 } 7115 7116 if (IS_QLAFX00(ha)) 7117 goto intr_on_check; 7118 7119 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && 7120 atomic_read(&base_vha->loop_state) == LOOP_READY) { 7121 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags); 7122 qla2xxx_flash_npiv_conf(base_vha); 7123 } 7124 7125 intr_on_check: 7126 if (!ha->interrupts_on) 7127 ha->isp_ops->enable_intrs(ha); 7128 7129 if (test_and_clear_bit(BEACON_BLINK_NEEDED, 7130 &base_vha->dpc_flags)) { 7131 if (ha->beacon_blink_led == 1) 7132 ha->isp_ops->beacon_blink(base_vha); 7133 } 7134 7135 /* qpair online check */ 7136 if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED, 7137 &base_vha->dpc_flags)) { 7138 if (ha->flags.eeh_busy || 7139 ha->flags.pci_channel_io_perm_failure) 7140 online = 0; 7141 else 7142 online = 1; 7143 7144 mutex_lock(&ha->mq_lock); 7145 list_for_each_entry(qpair, &base_vha->qp_list, 7146 qp_list_elem) 7147 qpair->online = online; 7148 mutex_unlock(&ha->mq_lock); 7149 } 7150 7151 if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, 7152 &base_vha->dpc_flags)) { 7153 u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold; 7154 7155 if (threshold > ha->orig_fw_xcb_count) 7156 threshold = ha->orig_fw_xcb_count; 7157 7158 ql_log(ql_log_info, base_vha, 0xffffff, 7159 "SET ZIO Activity exchange threshold to %d.\n", 7160 threshold); 7161 if (qla27xx_set_zio_threshold(base_vha, threshold)) { 7162 ql_log(ql_log_info, base_vha, 0xffffff, 7163 "Unable to SET ZIO Activity exchange threshold to %d.\n", 7164 threshold); 7165 } 7166 } 7167 7168 if (!IS_QLAFX00(ha)) 7169 qla2x00_do_dpc_all_vps(base_vha); 7170 7171 if (test_and_clear_bit(N2N_LINK_RESET, 7172 &base_vha->dpc_flags)) { 7173 qla2x00_lip_reset(base_vha); 7174 } 7175 7176 ha->dpc_active = 0; 7177 end_loop: 7178 set_current_state(TASK_INTERRUPTIBLE); 7179 } /* End of while(1) */ 7180 __set_current_state(TASK_RUNNING); 7181 7182 ql_dbg(ql_dbg_dpc, base_vha, 0x4011, 7183 "DPC handler exiting.\n"); 7184 7185 /* 7186 * Make sure that nobody tries to wake us up again. 7187 */ 7188 ha->dpc_active = 0; 7189 7190 /* Cleanup any residual CTX SRBs. */ 7191 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 7192 7193 return 0; 7194 } 7195 7196 void 7197 qla2xxx_wake_dpc(struct scsi_qla_host *vha) 7198 { 7199 struct qla_hw_data *ha = vha->hw; 7200 struct task_struct *t = ha->dpc_thread; 7201 7202 if (!test_bit(UNLOADING, &vha->dpc_flags) && t) 7203 wake_up_process(t); 7204 } 7205 7206 /* 7207 * qla2x00_rst_aen 7208 * Processes asynchronous reset. 7209 * 7210 * Input: 7211 * ha = adapter block pointer. 7212 */ 7213 static void 7214 qla2x00_rst_aen(scsi_qla_host_t *vha) 7215 { 7216 if (vha->flags.online && !vha->flags.reset_active && 7217 !atomic_read(&vha->loop_down_timer) && 7218 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) { 7219 do { 7220 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 7221 7222 /* 7223 * Issue marker command only when we are going to start 7224 * the I/O. 7225 */ 7226 vha->marker_needed = 1; 7227 } while (!atomic_read(&vha->loop_down_timer) && 7228 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags))); 7229 } 7230 } 7231 7232 static bool qla_do_heartbeat(struct scsi_qla_host *vha) 7233 { 7234 struct qla_hw_data *ha = vha->hw; 7235 u32 cmpl_cnt; 7236 u16 i; 7237 bool do_heartbeat = false; 7238 7239 /* 7240 * Allow do_heartbeat only if we don’t have any active interrupts, 7241 * but there are still IOs outstanding with firmware. 7242 */ 7243 cmpl_cnt = ha->base_qpair->cmd_completion_cnt; 7244 if (cmpl_cnt == ha->base_qpair->prev_completion_cnt && 7245 cmpl_cnt != ha->base_qpair->cmd_cnt) { 7246 do_heartbeat = true; 7247 goto skip; 7248 } 7249 ha->base_qpair->prev_completion_cnt = cmpl_cnt; 7250 7251 for (i = 0; i < ha->max_qpairs; i++) { 7252 if (ha->queue_pair_map[i]) { 7253 cmpl_cnt = ha->queue_pair_map[i]->cmd_completion_cnt; 7254 if (cmpl_cnt == ha->queue_pair_map[i]->prev_completion_cnt && 7255 cmpl_cnt != ha->queue_pair_map[i]->cmd_cnt) { 7256 do_heartbeat = true; 7257 break; 7258 } 7259 ha->queue_pair_map[i]->prev_completion_cnt = cmpl_cnt; 7260 } 7261 } 7262 7263 skip: 7264 return do_heartbeat; 7265 } 7266 7267 static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started) 7268 { 7269 struct qla_hw_data *ha = vha->hw; 7270 7271 if (vha->vp_idx) 7272 return; 7273 7274 if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha)) 7275 return; 7276 7277 /* 7278 * dpc thread cannot run if heartbeat is running at the same time. 7279 * We also do not want to starve heartbeat task. Therefore, do 7280 * heartbeat task at least once every 5 seconds. 7281 */ 7282 if (dpc_started && 7283 time_before(jiffies, ha->last_heartbeat_run_jiffies + 5 * HZ)) 7284 return; 7285 7286 if (qla_do_heartbeat(vha)) { 7287 ha->last_heartbeat_run_jiffies = jiffies; 7288 queue_work(ha->wq, &ha->heartbeat_work); 7289 } 7290 } 7291 7292 static void qla_wind_down_chip(scsi_qla_host_t *vha) 7293 { 7294 struct qla_hw_data *ha = vha->hw; 7295 7296 if (!ha->flags.eeh_busy) 7297 return; 7298 if (ha->pci_error_state) 7299 /* system is trying to recover */ 7300 return; 7301 7302 /* 7303 * Current system is not handling PCIE error. At this point, this is 7304 * best effort to wind down the adapter. 7305 */ 7306 if (time_after_eq(jiffies, ha->eeh_jif + ql2xdelay_before_pci_error_handling * HZ) && 7307 !ha->flags.eeh_flush) { 7308 ql_log(ql_log_info, vha, 0x9009, 7309 "PCI Error detected, attempting to reset hardware.\n"); 7310 7311 ha->isp_ops->reset_chip(vha); 7312 ha->isp_ops->disable_intrs(ha); 7313 7314 ha->flags.eeh_flush = EEH_FLUSH_RDY; 7315 ha->eeh_jif = jiffies; 7316 7317 } else if (ha->flags.eeh_flush == EEH_FLUSH_RDY && 7318 time_after_eq(jiffies, ha->eeh_jif + 5 * HZ)) { 7319 pci_clear_master(ha->pdev); 7320 7321 /* flush all command */ 7322 qla2x00_abort_isp_cleanup(vha); 7323 ha->flags.eeh_flush = EEH_FLUSH_DONE; 7324 7325 ql_log(ql_log_info, vha, 0x900a, 7326 "PCI Error handling complete, all IOs aborted.\n"); 7327 } 7328 } 7329 7330 /************************************************************************** 7331 * qla2x00_timer 7332 * 7333 * Description: 7334 * One second timer 7335 * 7336 * Context: Interrupt 7337 ***************************************************************************/ 7338 void 7339 qla2x00_timer(struct timer_list *t) 7340 { 7341 scsi_qla_host_t *vha = from_timer(vha, t, timer); 7342 unsigned long cpu_flags = 0; 7343 int start_dpc = 0; 7344 int index; 7345 srb_t *sp; 7346 uint16_t w; 7347 struct qla_hw_data *ha = vha->hw; 7348 struct req_que *req; 7349 unsigned long flags; 7350 fc_port_t *fcport = NULL; 7351 7352 if (ha->flags.eeh_busy) { 7353 qla_wind_down_chip(vha); 7354 7355 ql_dbg(ql_dbg_timer, vha, 0x6000, 7356 "EEH = %d, restarting timer.\n", 7357 ha->flags.eeh_busy); 7358 qla2x00_restart_timer(vha, WATCH_INTERVAL); 7359 return; 7360 } 7361 7362 /* 7363 * Hardware read to raise pending EEH errors during mailbox waits. If 7364 * the read returns -1 then disable the board. 7365 */ 7366 if (!pci_channel_offline(ha->pdev)) { 7367 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 7368 qla2x00_check_reg16_for_disconnect(vha, w); 7369 } 7370 7371 /* Make sure qla82xx_watchdog is run only for physical port */ 7372 if (!vha->vp_idx && IS_P3P_TYPE(ha)) { 7373 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) 7374 start_dpc++; 7375 if (IS_QLA82XX(ha)) 7376 qla82xx_watchdog(vha); 7377 else if (IS_QLA8044(ha)) 7378 qla8044_watchdog(vha); 7379 } 7380 7381 if (!vha->vp_idx && IS_QLAFX00(ha)) 7382 qlafx00_timer_routine(vha); 7383 7384 if (vha->link_down_time < QLA2XX_MAX_LINK_DOWN_TIME) 7385 vha->link_down_time++; 7386 7387 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 7388 list_for_each_entry(fcport, &vha->vp_fcports, list) { 7389 if (fcport->tgt_link_down_time < QLA2XX_MAX_LINK_DOWN_TIME) 7390 fcport->tgt_link_down_time++; 7391 } 7392 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 7393 7394 /* Loop down handler. */ 7395 if (atomic_read(&vha->loop_down_timer) > 0 && 7396 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && 7397 !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags)) 7398 && vha->flags.online) { 7399 7400 if (atomic_read(&vha->loop_down_timer) == 7401 vha->loop_down_abort_time) { 7402 7403 ql_log(ql_log_info, vha, 0x6008, 7404 "Loop down - aborting the queues before time expires.\n"); 7405 7406 if (!IS_QLA2100(ha) && vha->link_down_timeout) 7407 atomic_set(&vha->loop_state, LOOP_DEAD); 7408 7409 /* 7410 * Schedule an ISP abort to return any FCP2-device 7411 * commands. 7412 */ 7413 /* NPIV - scan physical port only */ 7414 if (!vha->vp_idx) { 7415 spin_lock_irqsave(&ha->hardware_lock, 7416 cpu_flags); 7417 req = ha->req_q_map[0]; 7418 for (index = 1; 7419 index < req->num_outstanding_cmds; 7420 index++) { 7421 fc_port_t *sfcp; 7422 7423 sp = req->outstanding_cmds[index]; 7424 if (!sp) 7425 continue; 7426 if (sp->cmd_type != TYPE_SRB) 7427 continue; 7428 if (sp->type != SRB_SCSI_CMD) 7429 continue; 7430 sfcp = sp->fcport; 7431 if (!(sfcp->flags & FCF_FCP2_DEVICE)) 7432 continue; 7433 7434 if (IS_QLA82XX(ha)) 7435 set_bit(FCOE_CTX_RESET_NEEDED, 7436 &vha->dpc_flags); 7437 else 7438 set_bit(ISP_ABORT_NEEDED, 7439 &vha->dpc_flags); 7440 break; 7441 } 7442 spin_unlock_irqrestore(&ha->hardware_lock, 7443 cpu_flags); 7444 } 7445 start_dpc++; 7446 } 7447 7448 /* if the loop has been down for 4 minutes, reinit adapter */ 7449 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { 7450 if (!(vha->device_flags & DFLG_NO_CABLE)) { 7451 ql_log(ql_log_warn, vha, 0x6009, 7452 "Loop down - aborting ISP.\n"); 7453 7454 if (IS_QLA82XX(ha)) 7455 set_bit(FCOE_CTX_RESET_NEEDED, 7456 &vha->dpc_flags); 7457 else 7458 set_bit(ISP_ABORT_NEEDED, 7459 &vha->dpc_flags); 7460 } 7461 } 7462 ql_dbg(ql_dbg_timer, vha, 0x600a, 7463 "Loop down - seconds remaining %d.\n", 7464 atomic_read(&vha->loop_down_timer)); 7465 } 7466 /* Check if beacon LED needs to be blinked for physical host only */ 7467 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { 7468 /* There is no beacon_blink function for ISP82xx */ 7469 if (!IS_P3P_TYPE(ha)) { 7470 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); 7471 start_dpc++; 7472 } 7473 } 7474 7475 /* check if edif running */ 7476 if (vha->hw->flags.edif_enabled) 7477 qla_edif_timer(vha); 7478 7479 /* Process any deferred work. */ 7480 if (!list_empty(&vha->work_list)) { 7481 unsigned long flags; 7482 bool q = false; 7483 7484 spin_lock_irqsave(&vha->work_lock, flags); 7485 if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags)) 7486 q = true; 7487 spin_unlock_irqrestore(&vha->work_lock, flags); 7488 if (q) 7489 queue_work(vha->hw->wq, &vha->iocb_work); 7490 } 7491 7492 /* 7493 * FC-NVME 7494 * see if the active AEN count has changed from what was last reported. 7495 */ 7496 index = atomic_read(&ha->nvme_active_aen_cnt); 7497 if (!vha->vp_idx && 7498 (index != ha->nvme_last_rptd_aen) && 7499 ha->zio_mode == QLA_ZIO_MODE_6 && 7500 !ha->flags.host_shutting_down) { 7501 ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt); 7502 ql_log(ql_log_info, vha, 0x3002, 7503 "nvme: Sched: Set ZIO exchange threshold to %d.\n", 7504 ha->nvme_last_rptd_aen); 7505 set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); 7506 start_dpc++; 7507 } 7508 7509 if (!vha->vp_idx && 7510 atomic_read(&ha->zio_threshold) != ha->last_zio_threshold && 7511 IS_ZIO_THRESHOLD_CAPABLE(ha)) { 7512 ql_log(ql_log_info, vha, 0x3002, 7513 "Sched: Set ZIO exchange threshold to %d.\n", 7514 ha->last_zio_threshold); 7515 ha->last_zio_threshold = atomic_read(&ha->zio_threshold); 7516 set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); 7517 start_dpc++; 7518 } 7519 7520 /* borrowing w to signify dpc will run */ 7521 w = 0; 7522 /* Schedule the DPC routine if needed */ 7523 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 7524 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || 7525 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) || 7526 start_dpc || 7527 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) || 7528 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) || 7529 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || 7530 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || 7531 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || 7532 test_bit(RELOGIN_NEEDED, &vha->dpc_flags) || 7533 test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) { 7534 ql_dbg(ql_dbg_timer, vha, 0x600b, 7535 "isp_abort_needed=%d loop_resync_needed=%d " 7536 "fcport_update_needed=%d start_dpc=%d " 7537 "reset_marker_needed=%d", 7538 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags), 7539 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags), 7540 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags), 7541 start_dpc, 7542 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); 7543 ql_dbg(ql_dbg_timer, vha, 0x600c, 7544 "beacon_blink_needed=%d isp_unrecoverable=%d " 7545 "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " 7546 "relogin_needed=%d, Process_purex_iocb=%d.\n", 7547 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), 7548 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), 7549 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), 7550 test_bit(VP_DPC_NEEDED, &vha->dpc_flags), 7551 test_bit(RELOGIN_NEEDED, &vha->dpc_flags), 7552 test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)); 7553 qla2xxx_wake_dpc(vha); 7554 w = 1; 7555 } 7556 7557 qla_heart_beat(vha, w); 7558 7559 qla2x00_restart_timer(vha, WATCH_INTERVAL); 7560 } 7561 7562 /* Firmware interface routines. */ 7563 7564 #define FW_ISP21XX 0 7565 #define FW_ISP22XX 1 7566 #define FW_ISP2300 2 7567 #define FW_ISP2322 3 7568 #define FW_ISP24XX 4 7569 #define FW_ISP25XX 5 7570 #define FW_ISP81XX 6 7571 #define FW_ISP82XX 7 7572 #define FW_ISP2031 8 7573 #define FW_ISP8031 9 7574 #define FW_ISP27XX 10 7575 #define FW_ISP28XX 11 7576 7577 #define FW_FILE_ISP21XX "ql2100_fw.bin" 7578 #define FW_FILE_ISP22XX "ql2200_fw.bin" 7579 #define FW_FILE_ISP2300 "ql2300_fw.bin" 7580 #define FW_FILE_ISP2322 "ql2322_fw.bin" 7581 #define FW_FILE_ISP24XX "ql2400_fw.bin" 7582 #define FW_FILE_ISP25XX "ql2500_fw.bin" 7583 #define FW_FILE_ISP81XX "ql8100_fw.bin" 7584 #define FW_FILE_ISP82XX "ql8200_fw.bin" 7585 #define FW_FILE_ISP2031 "ql2600_fw.bin" 7586 #define FW_FILE_ISP8031 "ql8300_fw.bin" 7587 #define FW_FILE_ISP27XX "ql2700_fw.bin" 7588 #define FW_FILE_ISP28XX "ql2800_fw.bin" 7589 7590 7591 static DEFINE_MUTEX(qla_fw_lock); 7592 7593 static struct fw_blob qla_fw_blobs[] = { 7594 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, 7595 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, 7596 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, 7597 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 7598 { .name = FW_FILE_ISP24XX, }, 7599 { .name = FW_FILE_ISP25XX, }, 7600 { .name = FW_FILE_ISP81XX, }, 7601 { .name = FW_FILE_ISP82XX, }, 7602 { .name = FW_FILE_ISP2031, }, 7603 { .name = FW_FILE_ISP8031, }, 7604 { .name = FW_FILE_ISP27XX, }, 7605 { .name = FW_FILE_ISP28XX, }, 7606 { .name = NULL, }, 7607 }; 7608 7609 struct fw_blob * 7610 qla2x00_request_firmware(scsi_qla_host_t *vha) 7611 { 7612 struct qla_hw_data *ha = vha->hw; 7613 struct fw_blob *blob; 7614 7615 if (IS_QLA2100(ha)) { 7616 blob = &qla_fw_blobs[FW_ISP21XX]; 7617 } else if (IS_QLA2200(ha)) { 7618 blob = &qla_fw_blobs[FW_ISP22XX]; 7619 } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 7620 blob = &qla_fw_blobs[FW_ISP2300]; 7621 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 7622 blob = &qla_fw_blobs[FW_ISP2322]; 7623 } else if (IS_QLA24XX_TYPE(ha)) { 7624 blob = &qla_fw_blobs[FW_ISP24XX]; 7625 } else if (IS_QLA25XX(ha)) { 7626 blob = &qla_fw_blobs[FW_ISP25XX]; 7627 } else if (IS_QLA81XX(ha)) { 7628 blob = &qla_fw_blobs[FW_ISP81XX]; 7629 } else if (IS_QLA82XX(ha)) { 7630 blob = &qla_fw_blobs[FW_ISP82XX]; 7631 } else if (IS_QLA2031(ha)) { 7632 blob = &qla_fw_blobs[FW_ISP2031]; 7633 } else if (IS_QLA8031(ha)) { 7634 blob = &qla_fw_blobs[FW_ISP8031]; 7635 } else if (IS_QLA27XX(ha)) { 7636 blob = &qla_fw_blobs[FW_ISP27XX]; 7637 } else if (IS_QLA28XX(ha)) { 7638 blob = &qla_fw_blobs[FW_ISP28XX]; 7639 } else { 7640 return NULL; 7641 } 7642 7643 if (!blob->name) 7644 return NULL; 7645 7646 mutex_lock(&qla_fw_lock); 7647 if (blob->fw) 7648 goto out; 7649 7650 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 7651 ql_log(ql_log_warn, vha, 0x0063, 7652 "Failed to load firmware image (%s).\n", blob->name); 7653 blob->fw = NULL; 7654 blob = NULL; 7655 } 7656 7657 out: 7658 mutex_unlock(&qla_fw_lock); 7659 return blob; 7660 } 7661 7662 static void 7663 qla2x00_release_firmware(void) 7664 { 7665 struct fw_blob *blob; 7666 7667 mutex_lock(&qla_fw_lock); 7668 for (blob = qla_fw_blobs; blob->name; blob++) 7669 release_firmware(blob->fw); 7670 mutex_unlock(&qla_fw_lock); 7671 } 7672 7673 static void qla_pci_error_cleanup(scsi_qla_host_t *vha) 7674 { 7675 struct qla_hw_data *ha = vha->hw; 7676 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 7677 struct qla_qpair *qpair = NULL; 7678 struct scsi_qla_host *vp, *tvp; 7679 fc_port_t *fcport; 7680 int i; 7681 unsigned long flags; 7682 7683 ql_dbg(ql_dbg_aer, vha, 0x9000, 7684 "%s\n", __func__); 7685 ha->chip_reset++; 7686 7687 ha->base_qpair->chip_reset = ha->chip_reset; 7688 for (i = 0; i < ha->max_qpairs; i++) { 7689 if (ha->queue_pair_map[i]) 7690 ha->queue_pair_map[i]->chip_reset = 7691 ha->base_qpair->chip_reset; 7692 } 7693 7694 /* 7695 * purge mailbox might take a while. Slot Reset/chip reset 7696 * will take care of the purge 7697 */ 7698 7699 mutex_lock(&ha->mq_lock); 7700 ha->base_qpair->online = 0; 7701 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 7702 qpair->online = 0; 7703 wmb(); 7704 mutex_unlock(&ha->mq_lock); 7705 7706 qla2x00_mark_all_devices_lost(vha); 7707 7708 spin_lock_irqsave(&ha->vport_slock, flags); 7709 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 7710 atomic_inc(&vp->vref_count); 7711 spin_unlock_irqrestore(&ha->vport_slock, flags); 7712 qla2x00_mark_all_devices_lost(vp); 7713 spin_lock_irqsave(&ha->vport_slock, flags); 7714 atomic_dec(&vp->vref_count); 7715 } 7716 spin_unlock_irqrestore(&ha->vport_slock, flags); 7717 7718 /* Clear all async request states across all VPs. */ 7719 list_for_each_entry(fcport, &vha->vp_fcports, list) 7720 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 7721 7722 spin_lock_irqsave(&ha->vport_slock, flags); 7723 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 7724 atomic_inc(&vp->vref_count); 7725 spin_unlock_irqrestore(&ha->vport_slock, flags); 7726 list_for_each_entry(fcport, &vp->vp_fcports, list) 7727 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 7728 spin_lock_irqsave(&ha->vport_slock, flags); 7729 atomic_dec(&vp->vref_count); 7730 } 7731 spin_unlock_irqrestore(&ha->vport_slock, flags); 7732 } 7733 7734 7735 static pci_ers_result_t 7736 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 7737 { 7738 scsi_qla_host_t *vha = pci_get_drvdata(pdev); 7739 struct qla_hw_data *ha = vha->hw; 7740 pci_ers_result_t ret = PCI_ERS_RESULT_NEED_RESET; 7741 7742 ql_log(ql_log_warn, vha, 0x9000, 7743 "PCI error detected, state %x.\n", state); 7744 ha->pci_error_state = QLA_PCI_ERR_DETECTED; 7745 7746 if (!atomic_read(&pdev->enable_cnt)) { 7747 ql_log(ql_log_info, vha, 0xffff, 7748 "PCI device is disabled,state %x\n", state); 7749 ret = PCI_ERS_RESULT_NEED_RESET; 7750 goto out; 7751 } 7752 7753 switch (state) { 7754 case pci_channel_io_normal: 7755 qla_pci_set_eeh_busy(vha); 7756 if (ql2xmqsupport || ql2xnvmeenable) { 7757 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 7758 qla2xxx_wake_dpc(vha); 7759 } 7760 ret = PCI_ERS_RESULT_CAN_RECOVER; 7761 break; 7762 case pci_channel_io_frozen: 7763 qla_pci_set_eeh_busy(vha); 7764 ret = PCI_ERS_RESULT_NEED_RESET; 7765 break; 7766 case pci_channel_io_perm_failure: 7767 ha->flags.pci_channel_io_perm_failure = 1; 7768 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 7769 if (ql2xmqsupport || ql2xnvmeenable) { 7770 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 7771 qla2xxx_wake_dpc(vha); 7772 } 7773 ret = PCI_ERS_RESULT_DISCONNECT; 7774 } 7775 out: 7776 ql_dbg(ql_dbg_aer, vha, 0x600d, 7777 "PCI error detected returning [%x].\n", ret); 7778 return ret; 7779 } 7780 7781 static pci_ers_result_t 7782 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) 7783 { 7784 int risc_paused = 0; 7785 uint32_t stat; 7786 unsigned long flags; 7787 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7788 struct qla_hw_data *ha = base_vha->hw; 7789 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 7790 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 7791 7792 ql_log(ql_log_warn, base_vha, 0x9000, 7793 "mmio enabled\n"); 7794 7795 ha->pci_error_state = QLA_PCI_MMIO_ENABLED; 7796 7797 if (IS_QLA82XX(ha)) 7798 return PCI_ERS_RESULT_RECOVERED; 7799 7800 if (qla2x00_isp_reg_stat(ha)) { 7801 ql_log(ql_log_info, base_vha, 0x803f, 7802 "During mmio enabled, PCI/Register disconnect still detected.\n"); 7803 goto out; 7804 } 7805 7806 spin_lock_irqsave(&ha->hardware_lock, flags); 7807 if (IS_QLA2100(ha) || IS_QLA2200(ha)){ 7808 stat = rd_reg_word(®->hccr); 7809 if (stat & HCCR_RISC_PAUSE) 7810 risc_paused = 1; 7811 } else if (IS_QLA23XX(ha)) { 7812 stat = rd_reg_dword(®->u.isp2300.host_status); 7813 if (stat & HSR_RISC_PAUSED) 7814 risc_paused = 1; 7815 } else if (IS_FWI2_CAPABLE(ha)) { 7816 stat = rd_reg_dword(®24->host_status); 7817 if (stat & HSRX_RISC_PAUSED) 7818 risc_paused = 1; 7819 } 7820 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7821 7822 if (risc_paused) { 7823 ql_log(ql_log_info, base_vha, 0x9003, 7824 "RISC paused -- mmio_enabled, Dumping firmware.\n"); 7825 qla2xxx_dump_fw(base_vha); 7826 } 7827 out: 7828 /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */ 7829 ql_dbg(ql_dbg_aer, base_vha, 0x600d, 7830 "mmio enabled returning.\n"); 7831 return PCI_ERS_RESULT_NEED_RESET; 7832 } 7833 7834 static pci_ers_result_t 7835 qla2xxx_pci_slot_reset(struct pci_dev *pdev) 7836 { 7837 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 7838 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7839 struct qla_hw_data *ha = base_vha->hw; 7840 int rc; 7841 struct qla_qpair *qpair = NULL; 7842 7843 ql_log(ql_log_warn, base_vha, 0x9004, 7844 "Slot Reset.\n"); 7845 7846 ha->pci_error_state = QLA_PCI_SLOT_RESET; 7847 /* Workaround: qla2xxx driver which access hardware earlier 7848 * needs error state to be pci_channel_io_online. 7849 * Otherwise mailbox command timesout. 7850 */ 7851 pdev->error_state = pci_channel_io_normal; 7852 7853 pci_restore_state(pdev); 7854 7855 /* pci_restore_state() clears the saved_state flag of the device 7856 * save restored state which resets saved_state flag 7857 */ 7858 pci_save_state(pdev); 7859 7860 if (ha->mem_only) 7861 rc = pci_enable_device_mem(pdev); 7862 else 7863 rc = pci_enable_device(pdev); 7864 7865 if (rc) { 7866 ql_log(ql_log_warn, base_vha, 0x9005, 7867 "Can't re-enable PCI device after reset.\n"); 7868 goto exit_slot_reset; 7869 } 7870 7871 7872 if (ha->isp_ops->pci_config(base_vha)) 7873 goto exit_slot_reset; 7874 7875 mutex_lock(&ha->mq_lock); 7876 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 7877 qpair->online = 1; 7878 mutex_unlock(&ha->mq_lock); 7879 7880 ha->flags.eeh_busy = 0; 7881 base_vha->flags.online = 1; 7882 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7883 ha->isp_ops->abort_isp(base_vha); 7884 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7885 7886 if (qla2x00_isp_reg_stat(ha)) { 7887 ha->flags.eeh_busy = 1; 7888 qla_pci_error_cleanup(base_vha); 7889 ql_log(ql_log_warn, base_vha, 0x9005, 7890 "Device unable to recover from PCI error.\n"); 7891 } else { 7892 ret = PCI_ERS_RESULT_RECOVERED; 7893 } 7894 7895 exit_slot_reset: 7896 ql_dbg(ql_dbg_aer, base_vha, 0x900e, 7897 "Slot Reset returning %x.\n", ret); 7898 7899 return ret; 7900 } 7901 7902 static void 7903 qla2xxx_pci_resume(struct pci_dev *pdev) 7904 { 7905 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7906 struct qla_hw_data *ha = base_vha->hw; 7907 int ret; 7908 7909 ql_log(ql_log_warn, base_vha, 0x900f, 7910 "Pci Resume.\n"); 7911 7912 7913 ret = qla2x00_wait_for_hba_online(base_vha); 7914 if (ret != QLA_SUCCESS) { 7915 ql_log(ql_log_fatal, base_vha, 0x9002, 7916 "The device failed to resume I/O from slot/link_reset.\n"); 7917 } 7918 ha->pci_error_state = QLA_PCI_RESUME; 7919 ql_dbg(ql_dbg_aer, base_vha, 0x600d, 7920 "Pci Resume returning.\n"); 7921 } 7922 7923 void qla_pci_set_eeh_busy(struct scsi_qla_host *vha) 7924 { 7925 struct qla_hw_data *ha = vha->hw; 7926 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 7927 bool do_cleanup = false; 7928 unsigned long flags; 7929 7930 if (ha->flags.eeh_busy) 7931 return; 7932 7933 spin_lock_irqsave(&base_vha->work_lock, flags); 7934 if (!ha->flags.eeh_busy) { 7935 ha->eeh_jif = jiffies; 7936 ha->flags.eeh_flush = 0; 7937 7938 ha->flags.eeh_busy = 1; 7939 do_cleanup = true; 7940 } 7941 spin_unlock_irqrestore(&base_vha->work_lock, flags); 7942 7943 if (do_cleanup) 7944 qla_pci_error_cleanup(base_vha); 7945 } 7946 7947 /* 7948 * this routine will schedule a task to pause IO from interrupt context 7949 * if caller sees a PCIE error event (register read = 0xf's) 7950 */ 7951 void qla_schedule_eeh_work(struct scsi_qla_host *vha) 7952 { 7953 struct qla_hw_data *ha = vha->hw; 7954 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 7955 7956 if (ha->flags.eeh_busy) 7957 return; 7958 7959 set_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags); 7960 qla2xxx_wake_dpc(base_vha); 7961 } 7962 7963 static void 7964 qla_pci_reset_prepare(struct pci_dev *pdev) 7965 { 7966 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7967 struct qla_hw_data *ha = base_vha->hw; 7968 struct qla_qpair *qpair; 7969 7970 ql_log(ql_log_warn, base_vha, 0xffff, 7971 "%s.\n", __func__); 7972 7973 /* 7974 * PCI FLR/function reset is about to reset the 7975 * slot. Stop the chip to stop all DMA access. 7976 * It is assumed that pci_reset_done will be called 7977 * after FLR to resume Chip operation. 7978 */ 7979 ha->flags.eeh_busy = 1; 7980 mutex_lock(&ha->mq_lock); 7981 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 7982 qpair->online = 0; 7983 mutex_unlock(&ha->mq_lock); 7984 7985 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7986 qla2x00_abort_isp_cleanup(base_vha); 7987 qla2x00_abort_all_cmds(base_vha, DID_RESET << 16); 7988 } 7989 7990 static void 7991 qla_pci_reset_done(struct pci_dev *pdev) 7992 { 7993 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7994 struct qla_hw_data *ha = base_vha->hw; 7995 struct qla_qpair *qpair; 7996 7997 ql_log(ql_log_warn, base_vha, 0xffff, 7998 "%s.\n", __func__); 7999 8000 /* 8001 * FLR just completed by PCI layer. Resume adapter 8002 */ 8003 ha->flags.eeh_busy = 0; 8004 mutex_lock(&ha->mq_lock); 8005 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 8006 qpair->online = 1; 8007 mutex_unlock(&ha->mq_lock); 8008 8009 base_vha->flags.online = 1; 8010 ha->isp_ops->abort_isp(base_vha); 8011 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 8012 } 8013 8014 static void qla2xxx_map_queues(struct Scsi_Host *shost) 8015 { 8016 scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; 8017 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 8018 8019 if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase) 8020 blk_mq_map_queues(qmap); 8021 else 8022 blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); 8023 } 8024 8025 struct scsi_host_template qla2xxx_driver_template = { 8026 .module = THIS_MODULE, 8027 .name = QLA2XXX_DRIVER_NAME, 8028 .queuecommand = qla2xxx_queuecommand, 8029 8030 .eh_timed_out = fc_eh_timed_out, 8031 .eh_abort_handler = qla2xxx_eh_abort, 8032 .eh_should_retry_cmd = fc_eh_should_retry_cmd, 8033 .eh_device_reset_handler = qla2xxx_eh_device_reset, 8034 .eh_target_reset_handler = qla2xxx_eh_target_reset, 8035 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 8036 .eh_host_reset_handler = qla2xxx_eh_host_reset, 8037 8038 .slave_configure = qla2xxx_slave_configure, 8039 8040 .slave_alloc = qla2xxx_slave_alloc, 8041 .slave_destroy = qla2xxx_slave_destroy, 8042 .scan_finished = qla2xxx_scan_finished, 8043 .scan_start = qla2xxx_scan_start, 8044 .change_queue_depth = scsi_change_queue_depth, 8045 .map_queues = qla2xxx_map_queues, 8046 .this_id = -1, 8047 .cmd_per_lun = 3, 8048 .sg_tablesize = SG_ALL, 8049 8050 .max_sectors = 0xFFFF, 8051 .shost_groups = qla2x00_host_groups, 8052 8053 .supported_mode = MODE_INITIATOR, 8054 .track_queue_depth = 1, 8055 .cmd_size = sizeof(srb_t), 8056 }; 8057 8058 static const struct pci_error_handlers qla2xxx_err_handler = { 8059 .error_detected = qla2xxx_pci_error_detected, 8060 .mmio_enabled = qla2xxx_pci_mmio_enabled, 8061 .slot_reset = qla2xxx_pci_slot_reset, 8062 .resume = qla2xxx_pci_resume, 8063 .reset_prepare = qla_pci_reset_prepare, 8064 .reset_done = qla_pci_reset_done, 8065 }; 8066 8067 static struct pci_device_id qla2xxx_pci_tbl[] = { 8068 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) }, 8069 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) }, 8070 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) }, 8071 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) }, 8072 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) }, 8073 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) }, 8074 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, 8075 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, 8076 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, 8077 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) }, 8078 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 8079 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 8080 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, 8081 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) }, 8082 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, 8083 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, 8084 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, 8085 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, 8086 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, 8087 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, 8088 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, 8089 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) }, 8090 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) }, 8091 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) }, 8092 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) }, 8093 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) }, 8094 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) }, 8095 { 0 }, 8096 }; 8097 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 8098 8099 static struct pci_driver qla2xxx_pci_driver = { 8100 .name = QLA2XXX_DRIVER_NAME, 8101 .driver = { 8102 .owner = THIS_MODULE, 8103 }, 8104 .id_table = qla2xxx_pci_tbl, 8105 .probe = qla2x00_probe_one, 8106 .remove = qla2x00_remove_one, 8107 .shutdown = qla2x00_shutdown, 8108 .err_handler = &qla2xxx_err_handler, 8109 }; 8110 8111 static const struct file_operations apidev_fops = { 8112 .owner = THIS_MODULE, 8113 .llseek = noop_llseek, 8114 }; 8115 8116 /** 8117 * qla2x00_module_init - Module initialization. 8118 **/ 8119 static int __init 8120 qla2x00_module_init(void) 8121 { 8122 int ret = 0; 8123 8124 BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64); 8125 BUILD_BUG_ON(sizeof(cmd_entry_t) != 64); 8126 BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64); 8127 BUILD_BUG_ON(sizeof(cont_entry_t) != 64); 8128 BUILD_BUG_ON(sizeof(init_cb_t) != 96); 8129 BUILD_BUG_ON(sizeof(mrk_entry_t) != 64); 8130 BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64); 8131 BUILD_BUG_ON(sizeof(request_t) != 64); 8132 BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64); 8133 BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64); 8134 BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64); 8135 BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64); 8136 BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64); 8137 BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64); 8138 BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64); 8139 BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64); 8140 BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64); 8141 BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64); 8142 BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64); 8143 BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64); 8144 BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2604); 8145 BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424); 8146 BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164); 8147 BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260); 8148 BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260); 8149 BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16); 8150 BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); 8151 BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256); 8152 BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24); 8153 BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256); 8154 BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288); 8155 BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216); 8156 BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64); 8157 BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64); 8158 BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64); 8159 BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64); 8160 BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128); 8161 BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128); 8162 BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64); 8163 BUILD_BUG_ON(sizeof(struct mbx_entry) != 64); 8164 BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252); 8165 BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64); 8166 BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512); 8167 BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512); 8168 BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64); 8169 BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64); 8170 BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64); 8171 BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634); 8172 BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100); 8173 BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976); 8174 BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228); 8175 BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52); 8176 BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172); 8177 BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524); 8178 BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8); 8179 BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12); 8180 BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24); 8181 BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420); 8182 BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28); 8183 BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32); 8184 BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196); 8185 BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE); 8186 BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128); 8187 BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8); 8188 BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16); 8189 BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24); 8190 BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16); 8191 BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336); 8192 BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064); 8193 BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64); 8194 BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64); 8195 BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64); 8196 BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64); 8197 BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52); 8198 BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56); 8199 BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64); 8200 BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64); 8201 BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64); 8202 BUILD_BUG_ON(sizeof(sts21_entry_t) != 64); 8203 BUILD_BUG_ON(sizeof(sts22_entry_t) != 64); 8204 BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64); 8205 BUILD_BUG_ON(sizeof(sts_entry_t) != 64); 8206 BUILD_BUG_ON(sizeof(sw_info_t) != 32); 8207 BUILD_BUG_ON(sizeof(target_id_t) != 2); 8208 8209 qla_trace_init(); 8210 8211 /* Allocate cache for SRBs. */ 8212 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 8213 SLAB_HWCACHE_ALIGN, NULL); 8214 if (srb_cachep == NULL) { 8215 ql_log(ql_log_fatal, NULL, 0x0001, 8216 "Unable to allocate SRB cache...Failing load!.\n"); 8217 return -ENOMEM; 8218 } 8219 8220 /* Initialize target kmem_cache and mem_pools */ 8221 ret = qlt_init(); 8222 if (ret < 0) { 8223 goto destroy_cache; 8224 } else if (ret > 0) { 8225 /* 8226 * If initiator mode is explictly disabled by qlt_init(), 8227 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from 8228 * performing scsi_scan_target() during LOOP UP event. 8229 */ 8230 qla2xxx_transport_functions.disable_target_scan = 1; 8231 qla2xxx_transport_vport_functions.disable_target_scan = 1; 8232 } 8233 8234 /* Derive version string. */ 8235 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 8236 if (ql2xextended_error_logging) 8237 strcat(qla2x00_version_str, "-debug"); 8238 if (ql2xextended_error_logging == 1) 8239 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; 8240 8241 qla2xxx_transport_template = 8242 fc_attach_transport(&qla2xxx_transport_functions); 8243 if (!qla2xxx_transport_template) { 8244 ql_log(ql_log_fatal, NULL, 0x0002, 8245 "fc_attach_transport failed...Failing load!.\n"); 8246 ret = -ENODEV; 8247 goto qlt_exit; 8248 } 8249 8250 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); 8251 if (apidev_major < 0) { 8252 ql_log(ql_log_fatal, NULL, 0x0003, 8253 "Unable to register char device %s.\n", QLA2XXX_APIDEV); 8254 } 8255 8256 qla2xxx_transport_vport_template = 8257 fc_attach_transport(&qla2xxx_transport_vport_functions); 8258 if (!qla2xxx_transport_vport_template) { 8259 ql_log(ql_log_fatal, NULL, 0x0004, 8260 "fc_attach_transport vport failed...Failing load!.\n"); 8261 ret = -ENODEV; 8262 goto unreg_chrdev; 8263 } 8264 ql_log(ql_log_info, NULL, 0x0005, 8265 "QLogic Fibre Channel HBA Driver: %s.\n", 8266 qla2x00_version_str); 8267 ret = pci_register_driver(&qla2xxx_pci_driver); 8268 if (ret) { 8269 ql_log(ql_log_fatal, NULL, 0x0006, 8270 "pci_register_driver failed...ret=%d Failing load!.\n", 8271 ret); 8272 goto release_vport_transport; 8273 } 8274 return ret; 8275 8276 release_vport_transport: 8277 fc_release_transport(qla2xxx_transport_vport_template); 8278 8279 unreg_chrdev: 8280 if (apidev_major >= 0) 8281 unregister_chrdev(apidev_major, QLA2XXX_APIDEV); 8282 fc_release_transport(qla2xxx_transport_template); 8283 8284 qlt_exit: 8285 qlt_exit(); 8286 8287 destroy_cache: 8288 kmem_cache_destroy(srb_cachep); 8289 8290 qla_trace_uninit(); 8291 return ret; 8292 } 8293 8294 /** 8295 * qla2x00_module_exit - Module cleanup. 8296 **/ 8297 static void __exit 8298 qla2x00_module_exit(void) 8299 { 8300 pci_unregister_driver(&qla2xxx_pci_driver); 8301 qla2x00_release_firmware(); 8302 kmem_cache_destroy(ctx_cachep); 8303 fc_release_transport(qla2xxx_transport_vport_template); 8304 if (apidev_major >= 0) 8305 unregister_chrdev(apidev_major, QLA2XXX_APIDEV); 8306 fc_release_transport(qla2xxx_transport_template); 8307 qlt_exit(); 8308 kmem_cache_destroy(srb_cachep); 8309 qla_trace_uninit(); 8310 } 8311 8312 module_init(qla2x00_module_init); 8313 module_exit(qla2x00_module_exit); 8314 8315 MODULE_AUTHOR("QLogic Corporation"); 8316 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); 8317 MODULE_LICENSE("GPL"); 8318 MODULE_FIRMWARE(FW_FILE_ISP21XX); 8319 MODULE_FIRMWARE(FW_FILE_ISP22XX); 8320 MODULE_FIRMWARE(FW_FILE_ISP2300); 8321 MODULE_FIRMWARE(FW_FILE_ISP2322); 8322 MODULE_FIRMWARE(FW_FILE_ISP24XX); 8323 MODULE_FIRMWARE(FW_FILE_ISP25XX); 8324