1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/moduleparam.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mutex.h> 14 #include <linux/kobject.h> 15 #include <linux/slab.h> 16 #include <linux/blk-mq-pci.h> 17 #include <linux/refcount.h> 18 19 #include <scsi/scsi_tcq.h> 20 #include <scsi/scsicam.h> 21 #include <scsi/scsi_transport.h> 22 #include <scsi/scsi_transport_fc.h> 23 24 #include "qla_target.h" 25 26 /* 27 * Driver version 28 */ 29 char qla2x00_version_str[40]; 30 31 static int apidev_major; 32 33 /* 34 * SRB allocation cache 35 */ 36 struct kmem_cache *srb_cachep; 37 38 /* 39 * CT6 CTX allocation cache 40 */ 41 static struct kmem_cache *ctx_cachep; 42 /* 43 * error level for logging 44 */ 45 uint ql_errlev = 0x8001; 46 47 static int ql2xenableclass2; 48 module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); 49 MODULE_PARM_DESC(ql2xenableclass2, 50 "Specify if Class 2 operations are supported from the very " 51 "beginning. Default is 0 - class 2 not supported."); 52 53 54 int ql2xlogintimeout = 20; 55 module_param(ql2xlogintimeout, int, S_IRUGO); 56 MODULE_PARM_DESC(ql2xlogintimeout, 57 "Login timeout value in seconds."); 58 59 int qlport_down_retry; 60 module_param(qlport_down_retry, int, S_IRUGO); 61 MODULE_PARM_DESC(qlport_down_retry, 62 "Maximum number of command retries to a port that returns " 63 "a PORT-DOWN status."); 64 65 int ql2xplogiabsentdevice; 66 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); 67 MODULE_PARM_DESC(ql2xplogiabsentdevice, 68 "Option to enable PLOGI to devices that are not present after " 69 "a Fabric scan. This is needed for several broken switches. " 70 "Default is 0 - no PLOGI. 1 - perform PLOGI."); 71 72 int ql2xloginretrycount; 73 module_param(ql2xloginretrycount, int, S_IRUGO); 74 MODULE_PARM_DESC(ql2xloginretrycount, 75 "Specify an alternate value for the NVRAM login retry count."); 76 77 int ql2xallocfwdump = 1; 78 module_param(ql2xallocfwdump, int, S_IRUGO); 79 MODULE_PARM_DESC(ql2xallocfwdump, 80 "Option to enable allocation of memory for a firmware dump " 81 "during HBA initialization. Memory allocation requirements " 82 "vary by ISP type. Default is 1 - allocate memory."); 83 84 int ql2xextended_error_logging; 85 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 86 module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 87 MODULE_PARM_DESC(ql2xextended_error_logging, 88 "Option to enable extended error logging,\n" 89 "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n" 90 "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n" 91 "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n" 92 "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n" 93 "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n" 94 "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n" 95 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" 96 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" 97 "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n" 98 "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n" 99 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" 100 "\t\t0x1e400000 - Preferred value for capturing essential " 101 "debug information (equivalent to old " 102 "ql2xextended_error_logging=1).\n" 103 "\t\tDo LOGICAL OR of the value to enable more than one level"); 104 105 int ql2xshiftctondsd = 6; 106 module_param(ql2xshiftctondsd, int, S_IRUGO); 107 MODULE_PARM_DESC(ql2xshiftctondsd, 108 "Set to control shifting of command type processing " 109 "based on total number of SG elements."); 110 111 int ql2xfdmienable = 1; 112 module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR); 113 module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR); 114 MODULE_PARM_DESC(ql2xfdmienable, 115 "Enables FDMI registrations. " 116 "0 - no FDMI. Default is 1 - perform FDMI."); 117 118 #define MAX_Q_DEPTH 64 119 static int ql2xmaxqdepth = MAX_Q_DEPTH; 120 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); 121 MODULE_PARM_DESC(ql2xmaxqdepth, 122 "Maximum queue depth to set for each LUN. " 123 "Default is 64."); 124 125 #if (IS_ENABLED(CONFIG_NVME_FC)) 126 int ql2xenabledif; 127 #else 128 int ql2xenabledif = 2; 129 #endif 130 module_param(ql2xenabledif, int, S_IRUGO); 131 MODULE_PARM_DESC(ql2xenabledif, 132 " Enable T10-CRC-DIF:\n" 133 " Default is 2.\n" 134 " 0 -- No DIF Support\n" 135 " 1 -- Enable DIF for all types\n" 136 " 2 -- Enable DIF for all types, except Type 0.\n"); 137 138 #if (IS_ENABLED(CONFIG_NVME_FC)) 139 int ql2xnvmeenable = 1; 140 #else 141 int ql2xnvmeenable; 142 #endif 143 module_param(ql2xnvmeenable, int, 0644); 144 MODULE_PARM_DESC(ql2xnvmeenable, 145 "Enables NVME support. " 146 "0 - no NVMe. Default is Y"); 147 148 int ql2xenablehba_err_chk = 2; 149 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); 150 MODULE_PARM_DESC(ql2xenablehba_err_chk, 151 " Enable T10-CRC-DIF Error isolation by HBA:\n" 152 " Default is 2.\n" 153 " 0 -- Error isolation disabled\n" 154 " 1 -- Error isolation enabled only for DIX Type 0\n" 155 " 2 -- Error isolation enabled for all Types\n"); 156 157 int ql2xiidmaenable = 1; 158 module_param(ql2xiidmaenable, int, S_IRUGO); 159 MODULE_PARM_DESC(ql2xiidmaenable, 160 "Enables iIDMA settings " 161 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 162 163 int ql2xmqsupport = 1; 164 module_param(ql2xmqsupport, int, S_IRUGO); 165 MODULE_PARM_DESC(ql2xmqsupport, 166 "Enable on demand multiple queue pairs support " 167 "Default is 1 for supported. " 168 "Set it to 0 to turn off mq qpair support."); 169 170 int ql2xfwloadbin; 171 module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 172 module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 173 MODULE_PARM_DESC(ql2xfwloadbin, 174 "Option to specify location from which to load ISP firmware:.\n" 175 " 2 -- load firmware via the request_firmware() (hotplug).\n" 176 " interface.\n" 177 " 1 -- load firmware from flash.\n" 178 " 0 -- use default semantics.\n"); 179 180 int ql2xetsenable; 181 module_param(ql2xetsenable, int, S_IRUGO); 182 MODULE_PARM_DESC(ql2xetsenable, 183 "Enables firmware ETS burst." 184 "Default is 0 - skip ETS enablement."); 185 186 int ql2xdbwr = 1; 187 module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); 188 MODULE_PARM_DESC(ql2xdbwr, 189 "Option to specify scheme for request queue posting.\n" 190 " 0 -- Regular doorbell.\n" 191 " 1 -- CAMRAM doorbell (faster).\n"); 192 193 int ql2xtargetreset = 1; 194 module_param(ql2xtargetreset, int, S_IRUGO); 195 MODULE_PARM_DESC(ql2xtargetreset, 196 "Enable target reset." 197 "Default is 1 - use hw defaults."); 198 199 int ql2xgffidenable; 200 module_param(ql2xgffidenable, int, S_IRUGO); 201 MODULE_PARM_DESC(ql2xgffidenable, 202 "Enables GFF_ID checks of port type. " 203 "Default is 0 - Do not use GFF_ID information."); 204 205 int ql2xasynctmfenable = 1; 206 module_param(ql2xasynctmfenable, int, S_IRUGO); 207 MODULE_PARM_DESC(ql2xasynctmfenable, 208 "Enables issue of TM IOCBs asynchronously via IOCB mechanism" 209 "Default is 1 - Issue TM IOCBs via mailbox mechanism."); 210 211 int ql2xdontresethba; 212 module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR); 213 MODULE_PARM_DESC(ql2xdontresethba, 214 "Option to specify reset behaviour.\n" 215 " 0 (Default) -- Reset on failure.\n" 216 " 1 -- Do not reset on failure.\n"); 217 218 uint64_t ql2xmaxlun = MAX_LUNS; 219 module_param(ql2xmaxlun, ullong, S_IRUGO); 220 MODULE_PARM_DESC(ql2xmaxlun, 221 "Defines the maximum LU number to register with the SCSI " 222 "midlayer. Default is 65535."); 223 224 int ql2xmdcapmask = 0x1F; 225 module_param(ql2xmdcapmask, int, S_IRUGO); 226 MODULE_PARM_DESC(ql2xmdcapmask, 227 "Set the Minidump driver capture mask level. " 228 "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); 229 230 int ql2xmdenable = 1; 231 module_param(ql2xmdenable, int, S_IRUGO); 232 MODULE_PARM_DESC(ql2xmdenable, 233 "Enable/disable MiniDump. " 234 "0 - MiniDump disabled. " 235 "1 (Default) - MiniDump enabled."); 236 237 int ql2xexlogins; 238 module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR); 239 MODULE_PARM_DESC(ql2xexlogins, 240 "Number of extended Logins. " 241 "0 (Default)- Disabled."); 242 243 int ql2xexchoffld = 1024; 244 module_param(ql2xexchoffld, uint, 0644); 245 MODULE_PARM_DESC(ql2xexchoffld, 246 "Number of target exchanges."); 247 248 int ql2xiniexchg = 1024; 249 module_param(ql2xiniexchg, uint, 0644); 250 MODULE_PARM_DESC(ql2xiniexchg, 251 "Number of initiator exchanges."); 252 253 int ql2xfwholdabts; 254 module_param(ql2xfwholdabts, int, S_IRUGO); 255 MODULE_PARM_DESC(ql2xfwholdabts, 256 "Allow FW to hold status IOCB until ABTS rsp received. " 257 "0 (Default) Do not set fw option. " 258 "1 - Set fw option to hold ABTS."); 259 260 int ql2xmvasynctoatio = 1; 261 module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR); 262 MODULE_PARM_DESC(ql2xmvasynctoatio, 263 "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ" 264 "0 (Default). Do not move IOCBs" 265 "1 - Move IOCBs."); 266 267 int ql2xautodetectsfp = 1; 268 module_param(ql2xautodetectsfp, int, 0444); 269 MODULE_PARM_DESC(ql2xautodetectsfp, 270 "Detect SFP range and set appropriate distance.\n" 271 "1 (Default): Enable\n"); 272 273 int ql2xenablemsix = 1; 274 module_param(ql2xenablemsix, int, 0444); 275 MODULE_PARM_DESC(ql2xenablemsix, 276 "Set to enable MSI or MSI-X interrupt mechanism.\n" 277 " Default is 1, enable MSI-X interrupt mechanism.\n" 278 " 0 -- enable traditional pin-based mechanism.\n" 279 " 1 -- enable MSI-X interrupt mechanism.\n" 280 " 2 -- enable MSI interrupt mechanism.\n"); 281 282 int qla2xuseresexchforels; 283 module_param(qla2xuseresexchforels, int, 0444); 284 MODULE_PARM_DESC(qla2xuseresexchforels, 285 "Reserve 1/2 of emergency exchanges for ELS.\n" 286 " 0 (default): disabled"); 287 288 static int ql2xprotmask; 289 module_param(ql2xprotmask, int, 0644); 290 MODULE_PARM_DESC(ql2xprotmask, 291 "Override DIF/DIX protection capabilities mask\n" 292 "Default is 0 which sets protection mask based on " 293 "capabilities reported by HBA firmware.\n"); 294 295 static int ql2xprotguard; 296 module_param(ql2xprotguard, int, 0644); 297 MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n" 298 " 0 -- Let HBA firmware decide\n" 299 " 1 -- Force T10 CRC\n" 300 " 2 -- Force IP checksum\n"); 301 302 int ql2xdifbundlinginternalbuffers; 303 module_param(ql2xdifbundlinginternalbuffers, int, 0644); 304 MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers, 305 "Force using internal buffers for DIF information\n" 306 "0 (Default). Based on check.\n" 307 "1 Force using internal buffers\n"); 308 309 static void qla2x00_clear_drv_active(struct qla_hw_data *); 310 static void qla2x00_free_device(scsi_qla_host_t *); 311 static int qla2xxx_map_queues(struct Scsi_Host *shost); 312 static void qla2x00_destroy_deferred_work(struct qla_hw_data *); 313 314 315 static struct scsi_transport_template *qla2xxx_transport_template = NULL; 316 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; 317 318 /* TODO Convert to inlines 319 * 320 * Timer routines 321 */ 322 323 __inline__ void 324 qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval) 325 { 326 timer_setup(&vha->timer, qla2x00_timer, 0); 327 vha->timer.expires = jiffies + interval * HZ; 328 add_timer(&vha->timer); 329 vha->timer_active = 1; 330 } 331 332 static inline void 333 qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) 334 { 335 /* Currently used for 82XX only. */ 336 if (vha->device_flags & DFLG_DEV_FAILED) { 337 ql_dbg(ql_dbg_timer, vha, 0x600d, 338 "Device in a failed state, returning.\n"); 339 return; 340 } 341 342 mod_timer(&vha->timer, jiffies + interval * HZ); 343 } 344 345 static __inline__ void 346 qla2x00_stop_timer(scsi_qla_host_t *vha) 347 { 348 del_timer_sync(&vha->timer); 349 vha->timer_active = 0; 350 } 351 352 static int qla2x00_do_dpc(void *data); 353 354 static void qla2x00_rst_aen(scsi_qla_host_t *); 355 356 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, 357 struct req_que **, struct rsp_que **); 358 static void qla2x00_free_fw_dump(struct qla_hw_data *); 359 static void qla2x00_mem_free(struct qla_hw_data *); 360 int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, 361 struct qla_qpair *qpair); 362 363 /* -------------------------------------------------------------------------- */ 364 static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req, 365 struct rsp_que *rsp) 366 { 367 struct qla_hw_data *ha = vha->hw; 368 369 rsp->qpair = ha->base_qpair; 370 rsp->req = req; 371 ha->base_qpair->hw = ha; 372 ha->base_qpair->req = req; 373 ha->base_qpair->rsp = rsp; 374 ha->base_qpair->vha = vha; 375 ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; 376 ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; 377 ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; 378 ha->base_qpair->srb_mempool = ha->srb_mempool; 379 INIT_LIST_HEAD(&ha->base_qpair->hints_list); 380 ha->base_qpair->enable_class_2 = ql2xenableclass2; 381 /* init qpair to this cpu. Will adjust at run time. */ 382 qla_cpu_update(rsp->qpair, raw_smp_processor_id()); 383 ha->base_qpair->pdev = ha->pdev; 384 385 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) 386 ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs; 387 } 388 389 static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, 390 struct rsp_que *rsp) 391 { 392 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 393 394 ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *), 395 GFP_KERNEL); 396 if (!ha->req_q_map) { 397 ql_log(ql_log_fatal, vha, 0x003b, 398 "Unable to allocate memory for request queue ptrs.\n"); 399 goto fail_req_map; 400 } 401 402 ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *), 403 GFP_KERNEL); 404 if (!ha->rsp_q_map) { 405 ql_log(ql_log_fatal, vha, 0x003c, 406 "Unable to allocate memory for response queue ptrs.\n"); 407 goto fail_rsp_map; 408 } 409 410 ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); 411 if (ha->base_qpair == NULL) { 412 ql_log(ql_log_warn, vha, 0x00e0, 413 "Failed to allocate base queue pair memory.\n"); 414 goto fail_base_qpair; 415 } 416 417 qla_init_base_qpair(vha, req, rsp); 418 419 if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) { 420 ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *), 421 GFP_KERNEL); 422 if (!ha->queue_pair_map) { 423 ql_log(ql_log_fatal, vha, 0x0180, 424 "Unable to allocate memory for queue pair ptrs.\n"); 425 goto fail_qpair_map; 426 } 427 } 428 429 /* 430 * Make sure we record at least the request and response queue zero in 431 * case we need to free them if part of the probe fails. 432 */ 433 ha->rsp_q_map[0] = rsp; 434 ha->req_q_map[0] = req; 435 set_bit(0, ha->rsp_qid_map); 436 set_bit(0, ha->req_qid_map); 437 return 0; 438 439 fail_qpair_map: 440 kfree(ha->base_qpair); 441 ha->base_qpair = NULL; 442 fail_base_qpair: 443 kfree(ha->rsp_q_map); 444 ha->rsp_q_map = NULL; 445 fail_rsp_map: 446 kfree(ha->req_q_map); 447 ha->req_q_map = NULL; 448 fail_req_map: 449 return -ENOMEM; 450 } 451 452 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) 453 { 454 if (IS_QLAFX00(ha)) { 455 if (req && req->ring_fx00) 456 dma_free_coherent(&ha->pdev->dev, 457 (req->length_fx00 + 1) * sizeof(request_t), 458 req->ring_fx00, req->dma_fx00); 459 } else if (req && req->ring) 460 dma_free_coherent(&ha->pdev->dev, 461 (req->length + 1) * sizeof(request_t), 462 req->ring, req->dma); 463 464 if (req) 465 kfree(req->outstanding_cmds); 466 467 kfree(req); 468 } 469 470 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) 471 { 472 if (IS_QLAFX00(ha)) { 473 if (rsp && rsp->ring_fx00) 474 dma_free_coherent(&ha->pdev->dev, 475 (rsp->length_fx00 + 1) * sizeof(request_t), 476 rsp->ring_fx00, rsp->dma_fx00); 477 } else if (rsp && rsp->ring) { 478 dma_free_coherent(&ha->pdev->dev, 479 (rsp->length + 1) * sizeof(response_t), 480 rsp->ring, rsp->dma); 481 } 482 kfree(rsp); 483 } 484 485 static void qla2x00_free_queues(struct qla_hw_data *ha) 486 { 487 struct req_que *req; 488 struct rsp_que *rsp; 489 int cnt; 490 unsigned long flags; 491 492 if (ha->queue_pair_map) { 493 kfree(ha->queue_pair_map); 494 ha->queue_pair_map = NULL; 495 } 496 if (ha->base_qpair) { 497 kfree(ha->base_qpair); 498 ha->base_qpair = NULL; 499 } 500 501 spin_lock_irqsave(&ha->hardware_lock, flags); 502 for (cnt = 0; cnt < ha->max_req_queues; cnt++) { 503 if (!test_bit(cnt, ha->req_qid_map)) 504 continue; 505 506 req = ha->req_q_map[cnt]; 507 clear_bit(cnt, ha->req_qid_map); 508 ha->req_q_map[cnt] = NULL; 509 510 spin_unlock_irqrestore(&ha->hardware_lock, flags); 511 qla2x00_free_req_que(ha, req); 512 spin_lock_irqsave(&ha->hardware_lock, flags); 513 } 514 spin_unlock_irqrestore(&ha->hardware_lock, flags); 515 516 kfree(ha->req_q_map); 517 ha->req_q_map = NULL; 518 519 520 spin_lock_irqsave(&ha->hardware_lock, flags); 521 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { 522 if (!test_bit(cnt, ha->rsp_qid_map)) 523 continue; 524 525 rsp = ha->rsp_q_map[cnt]; 526 clear_bit(cnt, ha->rsp_qid_map); 527 ha->rsp_q_map[cnt] = NULL; 528 spin_unlock_irqrestore(&ha->hardware_lock, flags); 529 qla2x00_free_rsp_que(ha, rsp); 530 spin_lock_irqsave(&ha->hardware_lock, flags); 531 } 532 spin_unlock_irqrestore(&ha->hardware_lock, flags); 533 534 kfree(ha->rsp_q_map); 535 ha->rsp_q_map = NULL; 536 } 537 538 static char * 539 qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) 540 { 541 struct qla_hw_data *ha = vha->hw; 542 static const char *const pci_bus_modes[] = { 543 "33", "66", "100", "133", 544 }; 545 uint16_t pci_bus; 546 547 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; 548 if (pci_bus) { 549 snprintf(str, str_len, "PCI-X (%s MHz)", 550 pci_bus_modes[pci_bus]); 551 } else { 552 pci_bus = (ha->pci_attr & BIT_8) >> 8; 553 snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]); 554 } 555 556 return str; 557 } 558 559 static char * 560 qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) 561 { 562 static const char *const pci_bus_modes[] = { 563 "33", "66", "100", "133", 564 }; 565 struct qla_hw_data *ha = vha->hw; 566 uint32_t pci_bus; 567 568 if (pci_is_pcie(ha->pdev)) { 569 uint32_t lstat, lspeed, lwidth; 570 const char *speed_str; 571 572 pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat); 573 lspeed = lstat & PCI_EXP_LNKCAP_SLS; 574 lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4; 575 576 switch (lspeed) { 577 case 1: 578 speed_str = "2.5GT/s"; 579 break; 580 case 2: 581 speed_str = "5.0GT/s"; 582 break; 583 case 3: 584 speed_str = "8.0GT/s"; 585 break; 586 default: 587 speed_str = "<unknown>"; 588 break; 589 } 590 snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth); 591 592 return str; 593 } 594 595 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; 596 if (pci_bus == 0 || pci_bus == 8) 597 snprintf(str, str_len, "PCI (%s MHz)", 598 pci_bus_modes[pci_bus >> 3]); 599 else 600 snprintf(str, str_len, "PCI-X Mode %d (%s MHz)", 601 pci_bus & 4 ? 2 : 1, 602 pci_bus_modes[pci_bus & 3]); 603 604 return str; 605 } 606 607 static char * 608 qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 609 { 610 char un_str[10]; 611 struct qla_hw_data *ha = vha->hw; 612 613 snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version, 614 ha->fw_minor_version, ha->fw_subminor_version); 615 616 if (ha->fw_attributes & BIT_9) { 617 strcat(str, "FLX"); 618 return (str); 619 } 620 621 switch (ha->fw_attributes & 0xFF) { 622 case 0x7: 623 strcat(str, "EF"); 624 break; 625 case 0x17: 626 strcat(str, "TP"); 627 break; 628 case 0x37: 629 strcat(str, "IP"); 630 break; 631 case 0x77: 632 strcat(str, "VI"); 633 break; 634 default: 635 sprintf(un_str, "(%x)", ha->fw_attributes); 636 strcat(str, un_str); 637 break; 638 } 639 if (ha->fw_attributes & 0x100) 640 strcat(str, "X"); 641 642 return (str); 643 } 644 645 static char * 646 qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 647 { 648 struct qla_hw_data *ha = vha->hw; 649 650 snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version, 651 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes); 652 return str; 653 } 654 655 void qla2x00_sp_free_dma(srb_t *sp) 656 { 657 struct qla_hw_data *ha = sp->vha->hw; 658 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 659 660 if (sp->flags & SRB_DMA_VALID) { 661 scsi_dma_unmap(cmd); 662 sp->flags &= ~SRB_DMA_VALID; 663 } 664 665 if (sp->flags & SRB_CRC_PROT_DMA_VALID) { 666 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 667 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 668 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 669 } 670 671 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 672 /* List assured to be having elements */ 673 qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx); 674 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 675 } 676 677 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 678 struct crc_context *ctx0 = sp->u.scmd.crc_ctx; 679 680 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); 681 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 682 } 683 684 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 685 struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx; 686 687 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 688 ctx1->fcp_cmnd_dma); 689 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 690 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 691 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 692 mempool_free(ctx1, ha->ctx_mempool); 693 } 694 } 695 696 void qla2x00_sp_compl(srb_t *sp, int res) 697 { 698 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 699 struct completion *comp = sp->comp; 700 701 sp->free(sp); 702 cmd->result = res; 703 CMD_SP(cmd) = NULL; 704 cmd->scsi_done(cmd); 705 if (comp) 706 complete(comp); 707 } 708 709 void qla2xxx_qpair_sp_free_dma(srb_t *sp) 710 { 711 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 712 struct qla_hw_data *ha = sp->fcport->vha->hw; 713 714 if (sp->flags & SRB_DMA_VALID) { 715 scsi_dma_unmap(cmd); 716 sp->flags &= ~SRB_DMA_VALID; 717 } 718 719 if (sp->flags & SRB_CRC_PROT_DMA_VALID) { 720 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 721 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 722 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 723 } 724 725 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 726 /* List assured to be having elements */ 727 qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx); 728 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 729 } 730 731 if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) { 732 struct crc_context *difctx = sp->u.scmd.crc_ctx; 733 struct dsd_dma *dif_dsd, *nxt_dsd; 734 735 list_for_each_entry_safe(dif_dsd, nxt_dsd, 736 &difctx->ldif_dma_hndl_list, list) { 737 list_del(&dif_dsd->list); 738 dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr, 739 dif_dsd->dsd_list_dma); 740 kfree(dif_dsd); 741 difctx->no_dif_bundl--; 742 } 743 744 list_for_each_entry_safe(dif_dsd, nxt_dsd, 745 &difctx->ldif_dsd_list, list) { 746 list_del(&dif_dsd->list); 747 dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr, 748 dif_dsd->dsd_list_dma); 749 kfree(dif_dsd); 750 difctx->no_ldif_dsd--; 751 } 752 753 if (difctx->no_ldif_dsd) { 754 ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022, 755 "%s: difctx->no_ldif_dsd=%x\n", 756 __func__, difctx->no_ldif_dsd); 757 } 758 759 if (difctx->no_dif_bundl) { 760 ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022, 761 "%s: difctx->no_dif_bundl=%x\n", 762 __func__, difctx->no_dif_bundl); 763 } 764 sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID; 765 } 766 767 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 768 struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx; 769 770 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 771 ctx1->fcp_cmnd_dma); 772 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 773 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 774 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 775 mempool_free(ctx1, ha->ctx_mempool); 776 sp->flags &= ~SRB_FCP_CMND_DMA_VALID; 777 } 778 779 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 780 struct crc_context *ctx0 = sp->u.scmd.crc_ctx; 781 782 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); 783 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 784 } 785 } 786 787 void qla2xxx_qpair_sp_compl(srb_t *sp, int res) 788 { 789 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 790 struct completion *comp = sp->comp; 791 792 sp->free(sp); 793 cmd->result = res; 794 CMD_SP(cmd) = NULL; 795 cmd->scsi_done(cmd); 796 if (comp) 797 complete(comp); 798 } 799 800 static int 801 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 802 { 803 scsi_qla_host_t *vha = shost_priv(host); 804 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 805 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 806 struct qla_hw_data *ha = vha->hw; 807 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 808 srb_t *sp; 809 int rval; 810 811 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) || 812 WARN_ON_ONCE(!rport)) { 813 cmd->result = DID_NO_CONNECT << 16; 814 goto qc24_fail_command; 815 } 816 817 if (ha->mqenable) { 818 uint32_t tag; 819 uint16_t hwq; 820 struct qla_qpair *qpair = NULL; 821 822 tag = blk_mq_unique_tag(cmd->request); 823 hwq = blk_mq_unique_tag_to_hwq(tag); 824 qpair = ha->queue_pair_map[hwq]; 825 826 if (qpair) 827 return qla2xxx_mqueuecommand(host, cmd, qpair); 828 } 829 830 if (ha->flags.eeh_busy) { 831 if (ha->flags.pci_channel_io_perm_failure) { 832 ql_dbg(ql_dbg_aer, vha, 0x9010, 833 "PCI Channel IO permanent failure, exiting " 834 "cmd=%p.\n", cmd); 835 cmd->result = DID_NO_CONNECT << 16; 836 } else { 837 ql_dbg(ql_dbg_aer, vha, 0x9011, 838 "EEH_Busy, Requeuing the cmd=%p.\n", cmd); 839 cmd->result = DID_REQUEUE << 16; 840 } 841 goto qc24_fail_command; 842 } 843 844 rval = fc_remote_port_chkready(rport); 845 if (rval) { 846 cmd->result = rval; 847 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003, 848 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 849 cmd, rval); 850 goto qc24_fail_command; 851 } 852 853 if (!vha->flags.difdix_supported && 854 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 855 ql_dbg(ql_dbg_io, vha, 0x3004, 856 "DIF Cap not reg, fail DIF capable cmd's:%p.\n", 857 cmd); 858 cmd->result = DID_NO_CONNECT << 16; 859 goto qc24_fail_command; 860 } 861 862 if (!fcport) { 863 cmd->result = DID_NO_CONNECT << 16; 864 goto qc24_fail_command; 865 } 866 867 if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { 868 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 869 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 870 ql_dbg(ql_dbg_io, vha, 0x3005, 871 "Returning DNC, fcport_state=%d loop_state=%d.\n", 872 atomic_read(&fcport->state), 873 atomic_read(&base_vha->loop_state)); 874 cmd->result = DID_NO_CONNECT << 16; 875 goto qc24_fail_command; 876 } 877 goto qc24_target_busy; 878 } 879 880 /* 881 * Return target busy if we've received a non-zero retry_delay_timer 882 * in a FCP_RSP. 883 */ 884 if (fcport->retry_delay_timestamp == 0) { 885 /* retry delay not set */ 886 } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 887 fcport->retry_delay_timestamp = 0; 888 else 889 goto qc24_target_busy; 890 891 sp = scsi_cmd_priv(cmd); 892 qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport); 893 894 sp->u.scmd.cmd = cmd; 895 sp->type = SRB_SCSI_CMD; 896 897 CMD_SP(cmd) = (void *)sp; 898 sp->free = qla2x00_sp_free_dma; 899 sp->done = qla2x00_sp_compl; 900 901 rval = ha->isp_ops->start_scsi(sp); 902 if (rval != QLA_SUCCESS) { 903 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, 904 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); 905 goto qc24_host_busy_free_sp; 906 } 907 908 return 0; 909 910 qc24_host_busy_free_sp: 911 sp->free(sp); 912 913 qc24_target_busy: 914 return SCSI_MLQUEUE_TARGET_BUSY; 915 916 qc24_fail_command: 917 cmd->scsi_done(cmd); 918 919 return 0; 920 } 921 922 /* For MQ supported I/O */ 923 int 924 qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, 925 struct qla_qpair *qpair) 926 { 927 scsi_qla_host_t *vha = shost_priv(host); 928 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 929 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 930 struct qla_hw_data *ha = vha->hw; 931 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 932 srb_t *sp; 933 int rval; 934 935 rval = rport ? fc_remote_port_chkready(rport) : FC_PORTSTATE_OFFLINE; 936 if (rval) { 937 cmd->result = rval; 938 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076, 939 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 940 cmd, rval); 941 goto qc24_fail_command; 942 } 943 944 if (!fcport) { 945 cmd->result = DID_NO_CONNECT << 16; 946 goto qc24_fail_command; 947 } 948 949 if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { 950 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 951 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 952 ql_dbg(ql_dbg_io, vha, 0x3077, 953 "Returning DNC, fcport_state=%d loop_state=%d.\n", 954 atomic_read(&fcport->state), 955 atomic_read(&base_vha->loop_state)); 956 cmd->result = DID_NO_CONNECT << 16; 957 goto qc24_fail_command; 958 } 959 goto qc24_target_busy; 960 } 961 962 /* 963 * Return target busy if we've received a non-zero retry_delay_timer 964 * in a FCP_RSP. 965 */ 966 if (fcport->retry_delay_timestamp == 0) { 967 /* retry delay not set */ 968 } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 969 fcport->retry_delay_timestamp = 0; 970 else 971 goto qc24_target_busy; 972 973 sp = scsi_cmd_priv(cmd); 974 qla2xxx_init_sp(sp, vha, qpair, fcport); 975 976 sp->u.scmd.cmd = cmd; 977 sp->type = SRB_SCSI_CMD; 978 CMD_SP(cmd) = (void *)sp; 979 sp->free = qla2xxx_qpair_sp_free_dma; 980 sp->done = qla2xxx_qpair_sp_compl; 981 982 rval = ha->isp_ops->start_scsi_mq(sp); 983 if (rval != QLA_SUCCESS) { 984 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078, 985 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); 986 if (rval == QLA_INTERFACE_ERROR) 987 goto qc24_free_sp_fail_command; 988 goto qc24_host_busy_free_sp; 989 } 990 991 return 0; 992 993 qc24_host_busy_free_sp: 994 sp->free(sp); 995 996 qc24_target_busy: 997 return SCSI_MLQUEUE_TARGET_BUSY; 998 999 qc24_free_sp_fail_command: 1000 sp->free(sp); 1001 CMD_SP(cmd) = NULL; 1002 qla2xxx_rel_qpair_sp(sp->qpair, sp); 1003 1004 qc24_fail_command: 1005 cmd->scsi_done(cmd); 1006 1007 return 0; 1008 } 1009 1010 /* 1011 * qla2x00_eh_wait_on_command 1012 * Waits for the command to be returned by the Firmware for some 1013 * max time. 1014 * 1015 * Input: 1016 * cmd = Scsi Command to wait on. 1017 * 1018 * Return: 1019 * Completed in time : QLA_SUCCESS 1020 * Did not complete in time : QLA_FUNCTION_FAILED 1021 */ 1022 static int 1023 qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) 1024 { 1025 #define ABORT_POLLING_PERIOD 1000 1026 #define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD)) 1027 unsigned long wait_iter = ABORT_WAIT_ITER; 1028 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1029 struct qla_hw_data *ha = vha->hw; 1030 int ret = QLA_SUCCESS; 1031 1032 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { 1033 ql_dbg(ql_dbg_taskm, vha, 0x8005, 1034 "Return:eh_wait.\n"); 1035 return ret; 1036 } 1037 1038 while (CMD_SP(cmd) && wait_iter--) { 1039 msleep(ABORT_POLLING_PERIOD); 1040 } 1041 if (CMD_SP(cmd)) 1042 ret = QLA_FUNCTION_FAILED; 1043 1044 return ret; 1045 } 1046 1047 /* 1048 * qla2x00_wait_for_hba_online 1049 * Wait till the HBA is online after going through 1050 * <= MAX_RETRIES_OF_ISP_ABORT or 1051 * finally HBA is disabled ie marked offline 1052 * 1053 * Input: 1054 * ha - pointer to host adapter structure 1055 * 1056 * Note: 1057 * Does context switching-Release SPIN_LOCK 1058 * (if any) before calling this routine. 1059 * 1060 * Return: 1061 * Success (Adapter is online) : 0 1062 * Failed (Adapter is offline/disabled) : 1 1063 */ 1064 int 1065 qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) 1066 { 1067 int return_status; 1068 unsigned long wait_online; 1069 struct qla_hw_data *ha = vha->hw; 1070 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1071 1072 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 1073 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 1074 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 1075 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 1076 ha->dpc_active) && time_before(jiffies, wait_online)) { 1077 1078 msleep(1000); 1079 } 1080 if (base_vha->flags.online) 1081 return_status = QLA_SUCCESS; 1082 else 1083 return_status = QLA_FUNCTION_FAILED; 1084 1085 return (return_status); 1086 } 1087 1088 static inline int test_fcport_count(scsi_qla_host_t *vha) 1089 { 1090 struct qla_hw_data *ha = vha->hw; 1091 unsigned long flags; 1092 int res; 1093 1094 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1095 ql_dbg(ql_dbg_init, vha, 0x00ec, 1096 "tgt %p, fcport_count=%d\n", 1097 vha, vha->fcport_count); 1098 res = (vha->fcport_count == 0); 1099 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1100 1101 return res; 1102 } 1103 1104 /* 1105 * qla2x00_wait_for_sess_deletion can only be called from remove_one. 1106 * it has dependency on UNLOADING flag to stop device discovery 1107 */ 1108 void 1109 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) 1110 { 1111 u8 i; 1112 1113 qla2x00_mark_all_devices_lost(vha); 1114 1115 for (i = 0; i < 10; i++) { 1116 if (wait_event_timeout(vha->fcport_waitQ, 1117 test_fcport_count(vha), HZ) > 0) 1118 break; 1119 } 1120 1121 flush_workqueue(vha->hw->wq); 1122 } 1123 1124 /* 1125 * qla2x00_wait_for_hba_ready 1126 * Wait till the HBA is ready before doing driver unload 1127 * 1128 * Input: 1129 * ha - pointer to host adapter structure 1130 * 1131 * Note: 1132 * Does context switching-Release SPIN_LOCK 1133 * (if any) before calling this routine. 1134 * 1135 */ 1136 static void 1137 qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) 1138 { 1139 struct qla_hw_data *ha = vha->hw; 1140 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1141 1142 while ((qla2x00_reset_active(vha) || ha->dpc_active || 1143 ha->flags.mbox_busy) || 1144 test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) || 1145 test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) { 1146 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 1147 break; 1148 msleep(1000); 1149 } 1150 } 1151 1152 int 1153 qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) 1154 { 1155 int return_status; 1156 unsigned long wait_reset; 1157 struct qla_hw_data *ha = vha->hw; 1158 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1159 1160 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); 1161 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 1162 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 1163 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 1164 ha->dpc_active) && time_before(jiffies, wait_reset)) { 1165 1166 msleep(1000); 1167 1168 if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 1169 ha->flags.chip_reset_done) 1170 break; 1171 } 1172 if (ha->flags.chip_reset_done) 1173 return_status = QLA_SUCCESS; 1174 else 1175 return_status = QLA_FUNCTION_FAILED; 1176 1177 return return_status; 1178 } 1179 1180 #define ISP_REG_DISCONNECT 0xffffffffU 1181 /************************************************************************** 1182 * qla2x00_isp_reg_stat 1183 * 1184 * Description: 1185 * Read the host status register of ISP before aborting the command. 1186 * 1187 * Input: 1188 * ha = pointer to host adapter structure. 1189 * 1190 * 1191 * Returns: 1192 * Either true or false. 1193 * 1194 * Note: Return true if there is register disconnect. 1195 **************************************************************************/ 1196 static inline 1197 uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) 1198 { 1199 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1200 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 1201 1202 if (IS_P3P_TYPE(ha)) 1203 return ((RD_REG_DWORD(®82->host_int)) == ISP_REG_DISCONNECT); 1204 else 1205 return ((RD_REG_DWORD(®->host_status)) == 1206 ISP_REG_DISCONNECT); 1207 } 1208 1209 /************************************************************************** 1210 * qla2xxx_eh_abort 1211 * 1212 * Description: 1213 * The abort function will abort the specified command. 1214 * 1215 * Input: 1216 * cmd = Linux SCSI command packet to be aborted. 1217 * 1218 * Returns: 1219 * Either SUCCESS or FAILED. 1220 * 1221 * Note: 1222 * Only return FAILED if command not returned by firmware. 1223 **************************************************************************/ 1224 static int 1225 qla2xxx_eh_abort(struct scsi_cmnd *cmd) 1226 { 1227 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1228 DECLARE_COMPLETION_ONSTACK(comp); 1229 srb_t *sp; 1230 int ret; 1231 unsigned int id; 1232 uint64_t lun; 1233 int rval; 1234 struct qla_hw_data *ha = vha->hw; 1235 uint32_t ratov_j; 1236 struct qla_qpair *qpair; 1237 unsigned long flags; 1238 1239 if (qla2x00_isp_reg_stat(ha)) { 1240 ql_log(ql_log_info, vha, 0x8042, 1241 "PCI/Register disconnect, exiting.\n"); 1242 return FAILED; 1243 } 1244 1245 ret = fc_block_scsi_eh(cmd); 1246 if (ret != 0) 1247 return ret; 1248 1249 sp = scsi_cmd_priv(cmd); 1250 qpair = sp->qpair; 1251 1252 if ((sp->fcport && sp->fcport->deleted) || !qpair) 1253 return SUCCESS; 1254 1255 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 1256 if (sp->completed) { 1257 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 1258 return SUCCESS; 1259 } 1260 1261 if (sp->abort || sp->aborted) { 1262 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 1263 return FAILED; 1264 } 1265 1266 sp->abort = 1; 1267 sp->comp = ∁ 1268 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 1269 1270 1271 id = cmd->device->id; 1272 lun = cmd->device->lun; 1273 1274 ql_dbg(ql_dbg_taskm, vha, 0x8002, 1275 "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n", 1276 vha->host_no, id, lun, sp, cmd, sp->handle); 1277 1278 /* 1279 * Abort will release the original Command/sp from FW. Let the 1280 * original command call scsi_done. In return, he will wakeup 1281 * this sleeping thread. 1282 */ 1283 rval = ha->isp_ops->abort_command(sp); 1284 1285 ql_dbg(ql_dbg_taskm, vha, 0x8003, 1286 "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval); 1287 1288 /* Wait for the command completion. */ 1289 ratov_j = ha->r_a_tov/10 * 4 * 1000; 1290 ratov_j = msecs_to_jiffies(ratov_j); 1291 switch (rval) { 1292 case QLA_SUCCESS: 1293 if (!wait_for_completion_timeout(&comp, ratov_j)) { 1294 ql_dbg(ql_dbg_taskm, vha, 0xffff, 1295 "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", 1296 __func__, ha->r_a_tov/10); 1297 ret = FAILED; 1298 } else { 1299 ret = SUCCESS; 1300 } 1301 break; 1302 default: 1303 ret = FAILED; 1304 break; 1305 } 1306 1307 sp->comp = NULL; 1308 1309 ql_log(ql_log_info, vha, 0x801c, 1310 "Abort command issued nexus=%ld:%d:%llu -- %x.\n", 1311 vha->host_no, id, lun, ret); 1312 1313 return ret; 1314 } 1315 1316 /* 1317 * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED. 1318 */ 1319 int 1320 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, 1321 uint64_t l, enum nexus_wait_type type) 1322 { 1323 int cnt, match, status; 1324 unsigned long flags; 1325 struct qla_hw_data *ha = vha->hw; 1326 struct req_que *req; 1327 srb_t *sp; 1328 struct scsi_cmnd *cmd; 1329 1330 status = QLA_SUCCESS; 1331 1332 spin_lock_irqsave(&ha->hardware_lock, flags); 1333 req = vha->req; 1334 for (cnt = 1; status == QLA_SUCCESS && 1335 cnt < req->num_outstanding_cmds; cnt++) { 1336 sp = req->outstanding_cmds[cnt]; 1337 if (!sp) 1338 continue; 1339 if (sp->type != SRB_SCSI_CMD) 1340 continue; 1341 if (vha->vp_idx != sp->vha->vp_idx) 1342 continue; 1343 match = 0; 1344 cmd = GET_CMD_SP(sp); 1345 switch (type) { 1346 case WAIT_HOST: 1347 match = 1; 1348 break; 1349 case WAIT_TARGET: 1350 match = cmd->device->id == t; 1351 break; 1352 case WAIT_LUN: 1353 match = (cmd->device->id == t && 1354 cmd->device->lun == l); 1355 break; 1356 } 1357 if (!match) 1358 continue; 1359 1360 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1361 status = qla2x00_eh_wait_on_command(cmd); 1362 spin_lock_irqsave(&ha->hardware_lock, flags); 1363 } 1364 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1365 1366 return status; 1367 } 1368 1369 static char *reset_errors[] = { 1370 "HBA not online", 1371 "HBA not ready", 1372 "Task management failed", 1373 "Waiting for command completions", 1374 }; 1375 1376 static int 1377 __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 1378 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, uint64_t, int)) 1379 { 1380 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1381 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1382 int err; 1383 1384 if (!fcport) { 1385 return FAILED; 1386 } 1387 1388 err = fc_block_scsi_eh(cmd); 1389 if (err != 0) 1390 return err; 1391 1392 if (fcport->deleted) 1393 return SUCCESS; 1394 1395 ql_log(ql_log_info, vha, 0x8009, 1396 "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no, 1397 cmd->device->id, cmd->device->lun, cmd); 1398 1399 err = 0; 1400 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1401 ql_log(ql_log_warn, vha, 0x800a, 1402 "Wait for hba online failed for cmd=%p.\n", cmd); 1403 goto eh_reset_failed; 1404 } 1405 err = 2; 1406 if (do_reset(fcport, cmd->device->lun, 1) 1407 != QLA_SUCCESS) { 1408 ql_log(ql_log_warn, vha, 0x800c, 1409 "do_reset failed for cmd=%p.\n", cmd); 1410 goto eh_reset_failed; 1411 } 1412 err = 3; 1413 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 1414 cmd->device->lun, type) != QLA_SUCCESS) { 1415 ql_log(ql_log_warn, vha, 0x800d, 1416 "wait for pending cmds failed for cmd=%p.\n", cmd); 1417 goto eh_reset_failed; 1418 } 1419 1420 ql_log(ql_log_info, vha, 0x800e, 1421 "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name, 1422 vha->host_no, cmd->device->id, cmd->device->lun, cmd); 1423 1424 return SUCCESS; 1425 1426 eh_reset_failed: 1427 ql_log(ql_log_info, vha, 0x800f, 1428 "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name, 1429 reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun, 1430 cmd); 1431 return FAILED; 1432 } 1433 1434 static int 1435 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 1436 { 1437 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1438 struct qla_hw_data *ha = vha->hw; 1439 1440 if (qla2x00_isp_reg_stat(ha)) { 1441 ql_log(ql_log_info, vha, 0x803e, 1442 "PCI/Register disconnect, exiting.\n"); 1443 return FAILED; 1444 } 1445 1446 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd, 1447 ha->isp_ops->lun_reset); 1448 } 1449 1450 static int 1451 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) 1452 { 1453 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1454 struct qla_hw_data *ha = vha->hw; 1455 1456 if (qla2x00_isp_reg_stat(ha)) { 1457 ql_log(ql_log_info, vha, 0x803f, 1458 "PCI/Register disconnect, exiting.\n"); 1459 return FAILED; 1460 } 1461 1462 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd, 1463 ha->isp_ops->target_reset); 1464 } 1465 1466 /************************************************************************** 1467 * qla2xxx_eh_bus_reset 1468 * 1469 * Description: 1470 * The bus reset function will reset the bus and abort any executing 1471 * commands. 1472 * 1473 * Input: 1474 * cmd = Linux SCSI command packet of the command that cause the 1475 * bus reset. 1476 * 1477 * Returns: 1478 * SUCCESS/FAILURE (defined as macro in scsi.h). 1479 * 1480 **************************************************************************/ 1481 static int 1482 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 1483 { 1484 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1485 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1486 int ret = FAILED; 1487 unsigned int id; 1488 uint64_t lun; 1489 struct qla_hw_data *ha = vha->hw; 1490 1491 if (qla2x00_isp_reg_stat(ha)) { 1492 ql_log(ql_log_info, vha, 0x8040, 1493 "PCI/Register disconnect, exiting.\n"); 1494 return FAILED; 1495 } 1496 1497 id = cmd->device->id; 1498 lun = cmd->device->lun; 1499 1500 if (!fcport) { 1501 return ret; 1502 } 1503 1504 ret = fc_block_scsi_eh(cmd); 1505 if (ret != 0) 1506 return ret; 1507 ret = FAILED; 1508 1509 if (qla2x00_chip_is_down(vha)) 1510 return ret; 1511 1512 ql_log(ql_log_info, vha, 0x8012, 1513 "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1514 1515 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1516 ql_log(ql_log_fatal, vha, 0x8013, 1517 "Wait for hba online failed board disabled.\n"); 1518 goto eh_bus_reset_done; 1519 } 1520 1521 if (qla2x00_loop_reset(vha) == QLA_SUCCESS) 1522 ret = SUCCESS; 1523 1524 if (ret == FAILED) 1525 goto eh_bus_reset_done; 1526 1527 /* Flush outstanding commands. */ 1528 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != 1529 QLA_SUCCESS) { 1530 ql_log(ql_log_warn, vha, 0x8014, 1531 "Wait for pending commands failed.\n"); 1532 ret = FAILED; 1533 } 1534 1535 eh_bus_reset_done: 1536 ql_log(ql_log_warn, vha, 0x802b, 1537 "BUS RESET %s nexus=%ld:%d:%llu.\n", 1538 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1539 1540 return ret; 1541 } 1542 1543 /************************************************************************** 1544 * qla2xxx_eh_host_reset 1545 * 1546 * Description: 1547 * The reset function will reset the Adapter. 1548 * 1549 * Input: 1550 * cmd = Linux SCSI command packet of the command that cause the 1551 * adapter reset. 1552 * 1553 * Returns: 1554 * Either SUCCESS or FAILED. 1555 * 1556 * Note: 1557 **************************************************************************/ 1558 static int 1559 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 1560 { 1561 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1562 struct qla_hw_data *ha = vha->hw; 1563 int ret = FAILED; 1564 unsigned int id; 1565 uint64_t lun; 1566 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1567 1568 if (qla2x00_isp_reg_stat(ha)) { 1569 ql_log(ql_log_info, vha, 0x8041, 1570 "PCI/Register disconnect, exiting.\n"); 1571 schedule_work(&ha->board_disable); 1572 return SUCCESS; 1573 } 1574 1575 id = cmd->device->id; 1576 lun = cmd->device->lun; 1577 1578 ql_log(ql_log_info, vha, 0x8018, 1579 "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1580 1581 /* 1582 * No point in issuing another reset if one is active. Also do not 1583 * attempt a reset if we are updating flash. 1584 */ 1585 if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING) 1586 goto eh_host_reset_lock; 1587 1588 if (vha != base_vha) { 1589 if (qla2x00_vp_abort_isp(vha)) 1590 goto eh_host_reset_lock; 1591 } else { 1592 if (IS_P3P_TYPE(vha->hw)) { 1593 if (!qla82xx_fcoe_ctx_reset(vha)) { 1594 /* Ctx reset success */ 1595 ret = SUCCESS; 1596 goto eh_host_reset_lock; 1597 } 1598 /* fall thru if ctx reset failed */ 1599 } 1600 if (ha->wq) 1601 flush_workqueue(ha->wq); 1602 1603 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1604 if (ha->isp_ops->abort_isp(base_vha)) { 1605 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1606 /* failed. schedule dpc to try */ 1607 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 1608 1609 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1610 ql_log(ql_log_warn, vha, 0x802a, 1611 "wait for hba online failed.\n"); 1612 goto eh_host_reset_lock; 1613 } 1614 } 1615 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1616 } 1617 1618 /* Waiting for command to be returned to OS.*/ 1619 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) == 1620 QLA_SUCCESS) 1621 ret = SUCCESS; 1622 1623 eh_host_reset_lock: 1624 ql_log(ql_log_info, vha, 0x8017, 1625 "ADAPTER RESET %s nexus=%ld:%d:%llu.\n", 1626 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1627 1628 return ret; 1629 } 1630 1631 /* 1632 * qla2x00_loop_reset 1633 * Issue loop reset. 1634 * 1635 * Input: 1636 * ha = adapter block pointer. 1637 * 1638 * Returns: 1639 * 0 = success 1640 */ 1641 int 1642 qla2x00_loop_reset(scsi_qla_host_t *vha) 1643 { 1644 int ret; 1645 struct fc_port *fcport; 1646 struct qla_hw_data *ha = vha->hw; 1647 1648 if (IS_QLAFX00(ha)) { 1649 return qlafx00_loop_reset(vha); 1650 } 1651 1652 if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) { 1653 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1654 if (fcport->port_type != FCT_TARGET) 1655 continue; 1656 1657 ret = ha->isp_ops->target_reset(fcport, 0, 0); 1658 if (ret != QLA_SUCCESS) { 1659 ql_dbg(ql_dbg_taskm, vha, 0x802c, 1660 "Bus Reset failed: Reset=%d " 1661 "d_id=%x.\n", ret, fcport->d_id.b24); 1662 } 1663 } 1664 } 1665 1666 1667 if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { 1668 atomic_set(&vha->loop_state, LOOP_DOWN); 1669 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1670 qla2x00_mark_all_devices_lost(vha); 1671 ret = qla2x00_full_login_lip(vha); 1672 if (ret != QLA_SUCCESS) { 1673 ql_dbg(ql_dbg_taskm, vha, 0x802d, 1674 "full_login_lip=%d.\n", ret); 1675 } 1676 } 1677 1678 if (ha->flags.enable_lip_reset) { 1679 ret = qla2x00_lip_reset(vha); 1680 if (ret != QLA_SUCCESS) 1681 ql_dbg(ql_dbg_taskm, vha, 0x802e, 1682 "lip_reset failed (%d).\n", ret); 1683 } 1684 1685 /* Issue marker command only when we are going to start the I/O */ 1686 vha->marker_needed = 1; 1687 1688 return QLA_SUCCESS; 1689 } 1690 1691 static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res, 1692 unsigned long *flags) 1693 __releases(qp->qp_lock_ptr) 1694 __acquires(qp->qp_lock_ptr) 1695 { 1696 DECLARE_COMPLETION_ONSTACK(comp); 1697 scsi_qla_host_t *vha = qp->vha; 1698 struct qla_hw_data *ha = vha->hw; 1699 int rval; 1700 bool ret_cmd; 1701 uint32_t ratov_j; 1702 1703 if (qla2x00_chip_is_down(vha)) { 1704 sp->done(sp, res); 1705 return; 1706 } 1707 1708 if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS || 1709 (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy && 1710 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 1711 !qla2x00_isp_reg_stat(ha))) { 1712 if (sp->comp) { 1713 sp->done(sp, res); 1714 return; 1715 } 1716 1717 sp->comp = ∁ 1718 sp->abort = 1; 1719 spin_unlock_irqrestore(qp->qp_lock_ptr, *flags); 1720 1721 rval = ha->isp_ops->abort_command(sp); 1722 /* Wait for command completion. */ 1723 ret_cmd = false; 1724 ratov_j = ha->r_a_tov/10 * 4 * 1000; 1725 ratov_j = msecs_to_jiffies(ratov_j); 1726 switch (rval) { 1727 case QLA_SUCCESS: 1728 if (wait_for_completion_timeout(&comp, ratov_j)) { 1729 ql_dbg(ql_dbg_taskm, vha, 0xffff, 1730 "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", 1731 __func__, ha->r_a_tov/10); 1732 ret_cmd = true; 1733 } 1734 /* else FW return SP to driver */ 1735 break; 1736 default: 1737 ret_cmd = true; 1738 break; 1739 } 1740 1741 spin_lock_irqsave(qp->qp_lock_ptr, *flags); 1742 if (ret_cmd && (!sp->completed || !sp->aborted)) 1743 sp->done(sp, res); 1744 } else { 1745 sp->done(sp, res); 1746 } 1747 } 1748 1749 static void 1750 __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) 1751 { 1752 int cnt; 1753 unsigned long flags; 1754 srb_t *sp; 1755 scsi_qla_host_t *vha = qp->vha; 1756 struct qla_hw_data *ha = vha->hw; 1757 struct req_que *req; 1758 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1759 struct qla_tgt_cmd *cmd; 1760 1761 if (!ha->req_q_map) 1762 return; 1763 spin_lock_irqsave(qp->qp_lock_ptr, flags); 1764 req = qp->req; 1765 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1766 sp = req->outstanding_cmds[cnt]; 1767 if (sp) { 1768 switch (sp->cmd_type) { 1769 case TYPE_SRB: 1770 qla2x00_abort_srb(qp, sp, res, &flags); 1771 break; 1772 case TYPE_TGT_CMD: 1773 if (!vha->hw->tgt.tgt_ops || !tgt || 1774 qla_ini_mode_enabled(vha)) { 1775 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, 1776 "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n", 1777 vha->dpc_flags); 1778 continue; 1779 } 1780 cmd = (struct qla_tgt_cmd *)sp; 1781 cmd->aborted = 1; 1782 break; 1783 case TYPE_TGT_TMCMD: 1784 /* Skip task management functions. */ 1785 break; 1786 default: 1787 break; 1788 } 1789 req->outstanding_cmds[cnt] = NULL; 1790 } 1791 } 1792 spin_unlock_irqrestore(qp->qp_lock_ptr, flags); 1793 } 1794 1795 void 1796 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) 1797 { 1798 int que; 1799 struct qla_hw_data *ha = vha->hw; 1800 1801 /* Continue only if initialization complete. */ 1802 if (!ha->base_qpair) 1803 return; 1804 __qla2x00_abort_all_cmds(ha->base_qpair, res); 1805 1806 if (!ha->queue_pair_map) 1807 return; 1808 for (que = 0; que < ha->max_qpairs; que++) { 1809 if (!ha->queue_pair_map[que]) 1810 continue; 1811 1812 __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res); 1813 } 1814 } 1815 1816 static int 1817 qla2xxx_slave_alloc(struct scsi_device *sdev) 1818 { 1819 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1820 1821 if (!rport || fc_remote_port_chkready(rport)) 1822 return -ENXIO; 1823 1824 sdev->hostdata = *(fc_port_t **)rport->dd_data; 1825 1826 return 0; 1827 } 1828 1829 static int 1830 qla2xxx_slave_configure(struct scsi_device *sdev) 1831 { 1832 scsi_qla_host_t *vha = shost_priv(sdev->host); 1833 struct req_que *req = vha->req; 1834 1835 if (IS_T10_PI_CAPABLE(vha->hw)) 1836 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1837 1838 scsi_change_queue_depth(sdev, req->max_q_depth); 1839 return 0; 1840 } 1841 1842 static void 1843 qla2xxx_slave_destroy(struct scsi_device *sdev) 1844 { 1845 sdev->hostdata = NULL; 1846 } 1847 1848 /** 1849 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1850 * @ha: HA context 1851 * 1852 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 1853 * supported addressing method. 1854 */ 1855 static void 1856 qla2x00_config_dma_addressing(struct qla_hw_data *ha) 1857 { 1858 /* Assume a 32bit DMA mask. */ 1859 ha->flags.enable_64bit_addressing = 0; 1860 1861 if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 1862 /* Any upper-dword bits set? */ 1863 if (MSD(dma_get_required_mask(&ha->pdev->dev)) && 1864 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { 1865 /* Ok, a 64bit DMA mask is applicable. */ 1866 ha->flags.enable_64bit_addressing = 1; 1867 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; 1868 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; 1869 return; 1870 } 1871 } 1872 1873 dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); 1874 pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32)); 1875 } 1876 1877 static void 1878 qla2x00_enable_intrs(struct qla_hw_data *ha) 1879 { 1880 unsigned long flags = 0; 1881 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1882 1883 spin_lock_irqsave(&ha->hardware_lock, flags); 1884 ha->interrupts_on = 1; 1885 /* enable risc and host interrupts */ 1886 WRT_REG_WORD(®->ictrl, ICR_EN_INT | ICR_EN_RISC); 1887 RD_REG_WORD(®->ictrl); 1888 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1889 1890 } 1891 1892 static void 1893 qla2x00_disable_intrs(struct qla_hw_data *ha) 1894 { 1895 unsigned long flags = 0; 1896 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1897 1898 spin_lock_irqsave(&ha->hardware_lock, flags); 1899 ha->interrupts_on = 0; 1900 /* disable risc and host interrupts */ 1901 WRT_REG_WORD(®->ictrl, 0); 1902 RD_REG_WORD(®->ictrl); 1903 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1904 } 1905 1906 static void 1907 qla24xx_enable_intrs(struct qla_hw_data *ha) 1908 { 1909 unsigned long flags = 0; 1910 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1911 1912 spin_lock_irqsave(&ha->hardware_lock, flags); 1913 ha->interrupts_on = 1; 1914 WRT_REG_DWORD(®->ictrl, ICRX_EN_RISC_INT); 1915 RD_REG_DWORD(®->ictrl); 1916 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1917 } 1918 1919 static void 1920 qla24xx_disable_intrs(struct qla_hw_data *ha) 1921 { 1922 unsigned long flags = 0; 1923 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1924 1925 if (IS_NOPOLLING_TYPE(ha)) 1926 return; 1927 spin_lock_irqsave(&ha->hardware_lock, flags); 1928 ha->interrupts_on = 0; 1929 WRT_REG_DWORD(®->ictrl, 0); 1930 RD_REG_DWORD(®->ictrl); 1931 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1932 } 1933 1934 static int 1935 qla2x00_iospace_config(struct qla_hw_data *ha) 1936 { 1937 resource_size_t pio; 1938 uint16_t msix; 1939 1940 if (pci_request_selected_regions(ha->pdev, ha->bars, 1941 QLA2XXX_DRIVER_NAME)) { 1942 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011, 1943 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 1944 pci_name(ha->pdev)); 1945 goto iospace_error_exit; 1946 } 1947 if (!(ha->bars & 1)) 1948 goto skip_pio; 1949 1950 /* We only need PIO for Flash operations on ISP2312 v2 chips. */ 1951 pio = pci_resource_start(ha->pdev, 0); 1952 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 1953 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1954 ql_log_pci(ql_log_warn, ha->pdev, 0x0012, 1955 "Invalid pci I/O region size (%s).\n", 1956 pci_name(ha->pdev)); 1957 pio = 0; 1958 } 1959 } else { 1960 ql_log_pci(ql_log_warn, ha->pdev, 0x0013, 1961 "Region #0 no a PIO resource (%s).\n", 1962 pci_name(ha->pdev)); 1963 pio = 0; 1964 } 1965 ha->pio_address = pio; 1966 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014, 1967 "PIO address=%llu.\n", 1968 (unsigned long long)ha->pio_address); 1969 1970 skip_pio: 1971 /* Use MMIO operations for all accesses. */ 1972 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 1973 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015, 1974 "Region #1 not an MMIO resource (%s), aborting.\n", 1975 pci_name(ha->pdev)); 1976 goto iospace_error_exit; 1977 } 1978 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 1979 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016, 1980 "Invalid PCI mem region size (%s), aborting.\n", 1981 pci_name(ha->pdev)); 1982 goto iospace_error_exit; 1983 } 1984 1985 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 1986 if (!ha->iobase) { 1987 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017, 1988 "Cannot remap MMIO (%s), aborting.\n", 1989 pci_name(ha->pdev)); 1990 goto iospace_error_exit; 1991 } 1992 1993 /* Determine queue resources */ 1994 ha->max_req_queues = ha->max_rsp_queues = 1; 1995 ha->msix_count = QLA_BASE_VECTORS; 1996 if (!ql2xmqsupport || !ql2xnvmeenable || 1997 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 1998 goto mqiobase_exit; 1999 2000 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 2001 pci_resource_len(ha->pdev, 3)); 2002 if (ha->mqiobase) { 2003 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018, 2004 "MQIO Base=%p.\n", ha->mqiobase); 2005 /* Read MSIX vector size of the board */ 2006 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 2007 ha->msix_count = msix + 1; 2008 /* Max queues are bounded by available msix vectors */ 2009 /* MB interrupt uses 1 vector */ 2010 ha->max_req_queues = ha->msix_count - 1; 2011 ha->max_rsp_queues = ha->max_req_queues; 2012 /* Queue pairs is the max value minus the base queue pair */ 2013 ha->max_qpairs = ha->max_rsp_queues - 1; 2014 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188, 2015 "Max no of queues pairs: %d.\n", ha->max_qpairs); 2016 2017 ql_log_pci(ql_log_info, ha->pdev, 0x001a, 2018 "MSI-X vector count: %d.\n", ha->msix_count); 2019 } else 2020 ql_log_pci(ql_log_info, ha->pdev, 0x001b, 2021 "BAR 3 not enabled.\n"); 2022 2023 mqiobase_exit: 2024 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c, 2025 "MSIX Count: %d.\n", ha->msix_count); 2026 return (0); 2027 2028 iospace_error_exit: 2029 return (-ENOMEM); 2030 } 2031 2032 2033 static int 2034 qla83xx_iospace_config(struct qla_hw_data *ha) 2035 { 2036 uint16_t msix; 2037 2038 if (pci_request_selected_regions(ha->pdev, ha->bars, 2039 QLA2XXX_DRIVER_NAME)) { 2040 ql_log_pci(ql_log_fatal, ha->pdev, 0x0117, 2041 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 2042 pci_name(ha->pdev)); 2043 2044 goto iospace_error_exit; 2045 } 2046 2047 /* Use MMIO operations for all accesses. */ 2048 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 2049 ql_log_pci(ql_log_warn, ha->pdev, 0x0118, 2050 "Invalid pci I/O region size (%s).\n", 2051 pci_name(ha->pdev)); 2052 goto iospace_error_exit; 2053 } 2054 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 2055 ql_log_pci(ql_log_warn, ha->pdev, 0x0119, 2056 "Invalid PCI mem region size (%s), aborting\n", 2057 pci_name(ha->pdev)); 2058 goto iospace_error_exit; 2059 } 2060 2061 ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN); 2062 if (!ha->iobase) { 2063 ql_log_pci(ql_log_fatal, ha->pdev, 0x011a, 2064 "Cannot remap MMIO (%s), aborting.\n", 2065 pci_name(ha->pdev)); 2066 goto iospace_error_exit; 2067 } 2068 2069 /* 64bit PCI BAR - BAR2 will correspoond to region 4 */ 2070 /* 83XX 26XX always use MQ type access for queues 2071 * - mbar 2, a.k.a region 4 */ 2072 ha->max_req_queues = ha->max_rsp_queues = 1; 2073 ha->msix_count = QLA_BASE_VECTORS; 2074 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4), 2075 pci_resource_len(ha->pdev, 4)); 2076 2077 if (!ha->mqiobase) { 2078 ql_log_pci(ql_log_fatal, ha->pdev, 0x011d, 2079 "BAR2/region4 not enabled\n"); 2080 goto mqiobase_exit; 2081 } 2082 2083 ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2), 2084 pci_resource_len(ha->pdev, 2)); 2085 if (ha->msixbase) { 2086 /* Read MSIX vector size of the board */ 2087 pci_read_config_word(ha->pdev, 2088 QLA_83XX_PCI_MSIX_CONTROL, &msix); 2089 ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1; 2090 /* 2091 * By default, driver uses at least two msix vectors 2092 * (default & rspq) 2093 */ 2094 if (ql2xmqsupport || ql2xnvmeenable) { 2095 /* MB interrupt uses 1 vector */ 2096 ha->max_req_queues = ha->msix_count - 1; 2097 2098 /* ATIOQ needs 1 vector. That's 1 less QPair */ 2099 if (QLA_TGT_MODE_ENABLED()) 2100 ha->max_req_queues--; 2101 2102 ha->max_rsp_queues = ha->max_req_queues; 2103 2104 /* Queue pairs is the max value minus 2105 * the base queue pair */ 2106 ha->max_qpairs = ha->max_req_queues - 1; 2107 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3, 2108 "Max no of queues pairs: %d.\n", ha->max_qpairs); 2109 } 2110 ql_log_pci(ql_log_info, ha->pdev, 0x011c, 2111 "MSI-X vector count: %d.\n", ha->msix_count); 2112 } else 2113 ql_log_pci(ql_log_info, ha->pdev, 0x011e, 2114 "BAR 1 not enabled.\n"); 2115 2116 mqiobase_exit: 2117 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f, 2118 "MSIX Count: %d.\n", ha->msix_count); 2119 return 0; 2120 2121 iospace_error_exit: 2122 return -ENOMEM; 2123 } 2124 2125 static struct isp_operations qla2100_isp_ops = { 2126 .pci_config = qla2100_pci_config, 2127 .reset_chip = qla2x00_reset_chip, 2128 .chip_diag = qla2x00_chip_diag, 2129 .config_rings = qla2x00_config_rings, 2130 .reset_adapter = qla2x00_reset_adapter, 2131 .nvram_config = qla2x00_nvram_config, 2132 .update_fw_options = qla2x00_update_fw_options, 2133 .load_risc = qla2x00_load_risc, 2134 .pci_info_str = qla2x00_pci_info_str, 2135 .fw_version_str = qla2x00_fw_version_str, 2136 .intr_handler = qla2100_intr_handler, 2137 .enable_intrs = qla2x00_enable_intrs, 2138 .disable_intrs = qla2x00_disable_intrs, 2139 .abort_command = qla2x00_abort_command, 2140 .target_reset = qla2x00_abort_target, 2141 .lun_reset = qla2x00_lun_reset, 2142 .fabric_login = qla2x00_login_fabric, 2143 .fabric_logout = qla2x00_fabric_logout, 2144 .calc_req_entries = qla2x00_calc_iocbs_32, 2145 .build_iocbs = qla2x00_build_scsi_iocbs_32, 2146 .prep_ms_iocb = qla2x00_prep_ms_iocb, 2147 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 2148 .read_nvram = qla2x00_read_nvram_data, 2149 .write_nvram = qla2x00_write_nvram_data, 2150 .fw_dump = qla2100_fw_dump, 2151 .beacon_on = NULL, 2152 .beacon_off = NULL, 2153 .beacon_blink = NULL, 2154 .read_optrom = qla2x00_read_optrom_data, 2155 .write_optrom = qla2x00_write_optrom_data, 2156 .get_flash_version = qla2x00_get_flash_version, 2157 .start_scsi = qla2x00_start_scsi, 2158 .start_scsi_mq = NULL, 2159 .abort_isp = qla2x00_abort_isp, 2160 .iospace_config = qla2x00_iospace_config, 2161 .initialize_adapter = qla2x00_initialize_adapter, 2162 }; 2163 2164 static struct isp_operations qla2300_isp_ops = { 2165 .pci_config = qla2300_pci_config, 2166 .reset_chip = qla2x00_reset_chip, 2167 .chip_diag = qla2x00_chip_diag, 2168 .config_rings = qla2x00_config_rings, 2169 .reset_adapter = qla2x00_reset_adapter, 2170 .nvram_config = qla2x00_nvram_config, 2171 .update_fw_options = qla2x00_update_fw_options, 2172 .load_risc = qla2x00_load_risc, 2173 .pci_info_str = qla2x00_pci_info_str, 2174 .fw_version_str = qla2x00_fw_version_str, 2175 .intr_handler = qla2300_intr_handler, 2176 .enable_intrs = qla2x00_enable_intrs, 2177 .disable_intrs = qla2x00_disable_intrs, 2178 .abort_command = qla2x00_abort_command, 2179 .target_reset = qla2x00_abort_target, 2180 .lun_reset = qla2x00_lun_reset, 2181 .fabric_login = qla2x00_login_fabric, 2182 .fabric_logout = qla2x00_fabric_logout, 2183 .calc_req_entries = qla2x00_calc_iocbs_32, 2184 .build_iocbs = qla2x00_build_scsi_iocbs_32, 2185 .prep_ms_iocb = qla2x00_prep_ms_iocb, 2186 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 2187 .read_nvram = qla2x00_read_nvram_data, 2188 .write_nvram = qla2x00_write_nvram_data, 2189 .fw_dump = qla2300_fw_dump, 2190 .beacon_on = qla2x00_beacon_on, 2191 .beacon_off = qla2x00_beacon_off, 2192 .beacon_blink = qla2x00_beacon_blink, 2193 .read_optrom = qla2x00_read_optrom_data, 2194 .write_optrom = qla2x00_write_optrom_data, 2195 .get_flash_version = qla2x00_get_flash_version, 2196 .start_scsi = qla2x00_start_scsi, 2197 .start_scsi_mq = NULL, 2198 .abort_isp = qla2x00_abort_isp, 2199 .iospace_config = qla2x00_iospace_config, 2200 .initialize_adapter = qla2x00_initialize_adapter, 2201 }; 2202 2203 static struct isp_operations qla24xx_isp_ops = { 2204 .pci_config = qla24xx_pci_config, 2205 .reset_chip = qla24xx_reset_chip, 2206 .chip_diag = qla24xx_chip_diag, 2207 .config_rings = qla24xx_config_rings, 2208 .reset_adapter = qla24xx_reset_adapter, 2209 .nvram_config = qla24xx_nvram_config, 2210 .update_fw_options = qla24xx_update_fw_options, 2211 .load_risc = qla24xx_load_risc, 2212 .pci_info_str = qla24xx_pci_info_str, 2213 .fw_version_str = qla24xx_fw_version_str, 2214 .intr_handler = qla24xx_intr_handler, 2215 .enable_intrs = qla24xx_enable_intrs, 2216 .disable_intrs = qla24xx_disable_intrs, 2217 .abort_command = qla24xx_abort_command, 2218 .target_reset = qla24xx_abort_target, 2219 .lun_reset = qla24xx_lun_reset, 2220 .fabric_login = qla24xx_login_fabric, 2221 .fabric_logout = qla24xx_fabric_logout, 2222 .calc_req_entries = NULL, 2223 .build_iocbs = NULL, 2224 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2225 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2226 .read_nvram = qla24xx_read_nvram_data, 2227 .write_nvram = qla24xx_write_nvram_data, 2228 .fw_dump = qla24xx_fw_dump, 2229 .beacon_on = qla24xx_beacon_on, 2230 .beacon_off = qla24xx_beacon_off, 2231 .beacon_blink = qla24xx_beacon_blink, 2232 .read_optrom = qla24xx_read_optrom_data, 2233 .write_optrom = qla24xx_write_optrom_data, 2234 .get_flash_version = qla24xx_get_flash_version, 2235 .start_scsi = qla24xx_start_scsi, 2236 .start_scsi_mq = NULL, 2237 .abort_isp = qla2x00_abort_isp, 2238 .iospace_config = qla2x00_iospace_config, 2239 .initialize_adapter = qla2x00_initialize_adapter, 2240 }; 2241 2242 static struct isp_operations qla25xx_isp_ops = { 2243 .pci_config = qla25xx_pci_config, 2244 .reset_chip = qla24xx_reset_chip, 2245 .chip_diag = qla24xx_chip_diag, 2246 .config_rings = qla24xx_config_rings, 2247 .reset_adapter = qla24xx_reset_adapter, 2248 .nvram_config = qla24xx_nvram_config, 2249 .update_fw_options = qla24xx_update_fw_options, 2250 .load_risc = qla24xx_load_risc, 2251 .pci_info_str = qla24xx_pci_info_str, 2252 .fw_version_str = qla24xx_fw_version_str, 2253 .intr_handler = qla24xx_intr_handler, 2254 .enable_intrs = qla24xx_enable_intrs, 2255 .disable_intrs = qla24xx_disable_intrs, 2256 .abort_command = qla24xx_abort_command, 2257 .target_reset = qla24xx_abort_target, 2258 .lun_reset = qla24xx_lun_reset, 2259 .fabric_login = qla24xx_login_fabric, 2260 .fabric_logout = qla24xx_fabric_logout, 2261 .calc_req_entries = NULL, 2262 .build_iocbs = NULL, 2263 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2264 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2265 .read_nvram = qla25xx_read_nvram_data, 2266 .write_nvram = qla25xx_write_nvram_data, 2267 .fw_dump = qla25xx_fw_dump, 2268 .beacon_on = qla24xx_beacon_on, 2269 .beacon_off = qla24xx_beacon_off, 2270 .beacon_blink = qla24xx_beacon_blink, 2271 .read_optrom = qla25xx_read_optrom_data, 2272 .write_optrom = qla24xx_write_optrom_data, 2273 .get_flash_version = qla24xx_get_flash_version, 2274 .start_scsi = qla24xx_dif_start_scsi, 2275 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2276 .abort_isp = qla2x00_abort_isp, 2277 .iospace_config = qla2x00_iospace_config, 2278 .initialize_adapter = qla2x00_initialize_adapter, 2279 }; 2280 2281 static struct isp_operations qla81xx_isp_ops = { 2282 .pci_config = qla25xx_pci_config, 2283 .reset_chip = qla24xx_reset_chip, 2284 .chip_diag = qla24xx_chip_diag, 2285 .config_rings = qla24xx_config_rings, 2286 .reset_adapter = qla24xx_reset_adapter, 2287 .nvram_config = qla81xx_nvram_config, 2288 .update_fw_options = qla81xx_update_fw_options, 2289 .load_risc = qla81xx_load_risc, 2290 .pci_info_str = qla24xx_pci_info_str, 2291 .fw_version_str = qla24xx_fw_version_str, 2292 .intr_handler = qla24xx_intr_handler, 2293 .enable_intrs = qla24xx_enable_intrs, 2294 .disable_intrs = qla24xx_disable_intrs, 2295 .abort_command = qla24xx_abort_command, 2296 .target_reset = qla24xx_abort_target, 2297 .lun_reset = qla24xx_lun_reset, 2298 .fabric_login = qla24xx_login_fabric, 2299 .fabric_logout = qla24xx_fabric_logout, 2300 .calc_req_entries = NULL, 2301 .build_iocbs = NULL, 2302 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2303 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2304 .read_nvram = NULL, 2305 .write_nvram = NULL, 2306 .fw_dump = qla81xx_fw_dump, 2307 .beacon_on = qla24xx_beacon_on, 2308 .beacon_off = qla24xx_beacon_off, 2309 .beacon_blink = qla83xx_beacon_blink, 2310 .read_optrom = qla25xx_read_optrom_data, 2311 .write_optrom = qla24xx_write_optrom_data, 2312 .get_flash_version = qla24xx_get_flash_version, 2313 .start_scsi = qla24xx_dif_start_scsi, 2314 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2315 .abort_isp = qla2x00_abort_isp, 2316 .iospace_config = qla2x00_iospace_config, 2317 .initialize_adapter = qla2x00_initialize_adapter, 2318 }; 2319 2320 static struct isp_operations qla82xx_isp_ops = { 2321 .pci_config = qla82xx_pci_config, 2322 .reset_chip = qla82xx_reset_chip, 2323 .chip_diag = qla24xx_chip_diag, 2324 .config_rings = qla82xx_config_rings, 2325 .reset_adapter = qla24xx_reset_adapter, 2326 .nvram_config = qla81xx_nvram_config, 2327 .update_fw_options = qla24xx_update_fw_options, 2328 .load_risc = qla82xx_load_risc, 2329 .pci_info_str = qla24xx_pci_info_str, 2330 .fw_version_str = qla24xx_fw_version_str, 2331 .intr_handler = qla82xx_intr_handler, 2332 .enable_intrs = qla82xx_enable_intrs, 2333 .disable_intrs = qla82xx_disable_intrs, 2334 .abort_command = qla24xx_abort_command, 2335 .target_reset = qla24xx_abort_target, 2336 .lun_reset = qla24xx_lun_reset, 2337 .fabric_login = qla24xx_login_fabric, 2338 .fabric_logout = qla24xx_fabric_logout, 2339 .calc_req_entries = NULL, 2340 .build_iocbs = NULL, 2341 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2342 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2343 .read_nvram = qla24xx_read_nvram_data, 2344 .write_nvram = qla24xx_write_nvram_data, 2345 .fw_dump = qla82xx_fw_dump, 2346 .beacon_on = qla82xx_beacon_on, 2347 .beacon_off = qla82xx_beacon_off, 2348 .beacon_blink = NULL, 2349 .read_optrom = qla82xx_read_optrom_data, 2350 .write_optrom = qla82xx_write_optrom_data, 2351 .get_flash_version = qla82xx_get_flash_version, 2352 .start_scsi = qla82xx_start_scsi, 2353 .start_scsi_mq = NULL, 2354 .abort_isp = qla82xx_abort_isp, 2355 .iospace_config = qla82xx_iospace_config, 2356 .initialize_adapter = qla2x00_initialize_adapter, 2357 }; 2358 2359 static struct isp_operations qla8044_isp_ops = { 2360 .pci_config = qla82xx_pci_config, 2361 .reset_chip = qla82xx_reset_chip, 2362 .chip_diag = qla24xx_chip_diag, 2363 .config_rings = qla82xx_config_rings, 2364 .reset_adapter = qla24xx_reset_adapter, 2365 .nvram_config = qla81xx_nvram_config, 2366 .update_fw_options = qla24xx_update_fw_options, 2367 .load_risc = qla82xx_load_risc, 2368 .pci_info_str = qla24xx_pci_info_str, 2369 .fw_version_str = qla24xx_fw_version_str, 2370 .intr_handler = qla8044_intr_handler, 2371 .enable_intrs = qla82xx_enable_intrs, 2372 .disable_intrs = qla82xx_disable_intrs, 2373 .abort_command = qla24xx_abort_command, 2374 .target_reset = qla24xx_abort_target, 2375 .lun_reset = qla24xx_lun_reset, 2376 .fabric_login = qla24xx_login_fabric, 2377 .fabric_logout = qla24xx_fabric_logout, 2378 .calc_req_entries = NULL, 2379 .build_iocbs = NULL, 2380 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2381 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2382 .read_nvram = NULL, 2383 .write_nvram = NULL, 2384 .fw_dump = qla8044_fw_dump, 2385 .beacon_on = qla82xx_beacon_on, 2386 .beacon_off = qla82xx_beacon_off, 2387 .beacon_blink = NULL, 2388 .read_optrom = qla8044_read_optrom_data, 2389 .write_optrom = qla8044_write_optrom_data, 2390 .get_flash_version = qla82xx_get_flash_version, 2391 .start_scsi = qla82xx_start_scsi, 2392 .start_scsi_mq = NULL, 2393 .abort_isp = qla8044_abort_isp, 2394 .iospace_config = qla82xx_iospace_config, 2395 .initialize_adapter = qla2x00_initialize_adapter, 2396 }; 2397 2398 static struct isp_operations qla83xx_isp_ops = { 2399 .pci_config = qla25xx_pci_config, 2400 .reset_chip = qla24xx_reset_chip, 2401 .chip_diag = qla24xx_chip_diag, 2402 .config_rings = qla24xx_config_rings, 2403 .reset_adapter = qla24xx_reset_adapter, 2404 .nvram_config = qla81xx_nvram_config, 2405 .update_fw_options = qla81xx_update_fw_options, 2406 .load_risc = qla81xx_load_risc, 2407 .pci_info_str = qla24xx_pci_info_str, 2408 .fw_version_str = qla24xx_fw_version_str, 2409 .intr_handler = qla24xx_intr_handler, 2410 .enable_intrs = qla24xx_enable_intrs, 2411 .disable_intrs = qla24xx_disable_intrs, 2412 .abort_command = qla24xx_abort_command, 2413 .target_reset = qla24xx_abort_target, 2414 .lun_reset = qla24xx_lun_reset, 2415 .fabric_login = qla24xx_login_fabric, 2416 .fabric_logout = qla24xx_fabric_logout, 2417 .calc_req_entries = NULL, 2418 .build_iocbs = NULL, 2419 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2420 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2421 .read_nvram = NULL, 2422 .write_nvram = NULL, 2423 .fw_dump = qla83xx_fw_dump, 2424 .beacon_on = qla24xx_beacon_on, 2425 .beacon_off = qla24xx_beacon_off, 2426 .beacon_blink = qla83xx_beacon_blink, 2427 .read_optrom = qla25xx_read_optrom_data, 2428 .write_optrom = qla24xx_write_optrom_data, 2429 .get_flash_version = qla24xx_get_flash_version, 2430 .start_scsi = qla24xx_dif_start_scsi, 2431 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2432 .abort_isp = qla2x00_abort_isp, 2433 .iospace_config = qla83xx_iospace_config, 2434 .initialize_adapter = qla2x00_initialize_adapter, 2435 }; 2436 2437 static struct isp_operations qlafx00_isp_ops = { 2438 .pci_config = qlafx00_pci_config, 2439 .reset_chip = qlafx00_soft_reset, 2440 .chip_diag = qlafx00_chip_diag, 2441 .config_rings = qlafx00_config_rings, 2442 .reset_adapter = qlafx00_soft_reset, 2443 .nvram_config = NULL, 2444 .update_fw_options = NULL, 2445 .load_risc = NULL, 2446 .pci_info_str = qlafx00_pci_info_str, 2447 .fw_version_str = qlafx00_fw_version_str, 2448 .intr_handler = qlafx00_intr_handler, 2449 .enable_intrs = qlafx00_enable_intrs, 2450 .disable_intrs = qlafx00_disable_intrs, 2451 .abort_command = qla24xx_async_abort_command, 2452 .target_reset = qlafx00_abort_target, 2453 .lun_reset = qlafx00_lun_reset, 2454 .fabric_login = NULL, 2455 .fabric_logout = NULL, 2456 .calc_req_entries = NULL, 2457 .build_iocbs = NULL, 2458 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2459 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2460 .read_nvram = qla24xx_read_nvram_data, 2461 .write_nvram = qla24xx_write_nvram_data, 2462 .fw_dump = NULL, 2463 .beacon_on = qla24xx_beacon_on, 2464 .beacon_off = qla24xx_beacon_off, 2465 .beacon_blink = NULL, 2466 .read_optrom = qla24xx_read_optrom_data, 2467 .write_optrom = qla24xx_write_optrom_data, 2468 .get_flash_version = qla24xx_get_flash_version, 2469 .start_scsi = qlafx00_start_scsi, 2470 .start_scsi_mq = NULL, 2471 .abort_isp = qlafx00_abort_isp, 2472 .iospace_config = qlafx00_iospace_config, 2473 .initialize_adapter = qlafx00_initialize_adapter, 2474 }; 2475 2476 static struct isp_operations qla27xx_isp_ops = { 2477 .pci_config = qla25xx_pci_config, 2478 .reset_chip = qla24xx_reset_chip, 2479 .chip_diag = qla24xx_chip_diag, 2480 .config_rings = qla24xx_config_rings, 2481 .reset_adapter = qla24xx_reset_adapter, 2482 .nvram_config = qla81xx_nvram_config, 2483 .update_fw_options = qla24xx_update_fw_options, 2484 .load_risc = qla81xx_load_risc, 2485 .pci_info_str = qla24xx_pci_info_str, 2486 .fw_version_str = qla24xx_fw_version_str, 2487 .intr_handler = qla24xx_intr_handler, 2488 .enable_intrs = qla24xx_enable_intrs, 2489 .disable_intrs = qla24xx_disable_intrs, 2490 .abort_command = qla24xx_abort_command, 2491 .target_reset = qla24xx_abort_target, 2492 .lun_reset = qla24xx_lun_reset, 2493 .fabric_login = qla24xx_login_fabric, 2494 .fabric_logout = qla24xx_fabric_logout, 2495 .calc_req_entries = NULL, 2496 .build_iocbs = NULL, 2497 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2498 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2499 .read_nvram = NULL, 2500 .write_nvram = NULL, 2501 .fw_dump = qla27xx_fwdump, 2502 .beacon_on = qla24xx_beacon_on, 2503 .beacon_off = qla24xx_beacon_off, 2504 .beacon_blink = qla83xx_beacon_blink, 2505 .read_optrom = qla25xx_read_optrom_data, 2506 .write_optrom = qla24xx_write_optrom_data, 2507 .get_flash_version = qla24xx_get_flash_version, 2508 .start_scsi = qla24xx_dif_start_scsi, 2509 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2510 .abort_isp = qla2x00_abort_isp, 2511 .iospace_config = qla83xx_iospace_config, 2512 .initialize_adapter = qla2x00_initialize_adapter, 2513 }; 2514 2515 static inline void 2516 qla2x00_set_isp_flags(struct qla_hw_data *ha) 2517 { 2518 ha->device_type = DT_EXTENDED_IDS; 2519 switch (ha->pdev->device) { 2520 case PCI_DEVICE_ID_QLOGIC_ISP2100: 2521 ha->isp_type |= DT_ISP2100; 2522 ha->device_type &= ~DT_EXTENDED_IDS; 2523 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2524 break; 2525 case PCI_DEVICE_ID_QLOGIC_ISP2200: 2526 ha->isp_type |= DT_ISP2200; 2527 ha->device_type &= ~DT_EXTENDED_IDS; 2528 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2529 break; 2530 case PCI_DEVICE_ID_QLOGIC_ISP2300: 2531 ha->isp_type |= DT_ISP2300; 2532 ha->device_type |= DT_ZIO_SUPPORTED; 2533 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2534 break; 2535 case PCI_DEVICE_ID_QLOGIC_ISP2312: 2536 ha->isp_type |= DT_ISP2312; 2537 ha->device_type |= DT_ZIO_SUPPORTED; 2538 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2539 break; 2540 case PCI_DEVICE_ID_QLOGIC_ISP2322: 2541 ha->isp_type |= DT_ISP2322; 2542 ha->device_type |= DT_ZIO_SUPPORTED; 2543 if (ha->pdev->subsystem_vendor == 0x1028 && 2544 ha->pdev->subsystem_device == 0x0170) 2545 ha->device_type |= DT_OEM_001; 2546 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2547 break; 2548 case PCI_DEVICE_ID_QLOGIC_ISP6312: 2549 ha->isp_type |= DT_ISP6312; 2550 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2551 break; 2552 case PCI_DEVICE_ID_QLOGIC_ISP6322: 2553 ha->isp_type |= DT_ISP6322; 2554 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2555 break; 2556 case PCI_DEVICE_ID_QLOGIC_ISP2422: 2557 ha->isp_type |= DT_ISP2422; 2558 ha->device_type |= DT_ZIO_SUPPORTED; 2559 ha->device_type |= DT_FWI2; 2560 ha->device_type |= DT_IIDMA; 2561 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2562 break; 2563 case PCI_DEVICE_ID_QLOGIC_ISP2432: 2564 ha->isp_type |= DT_ISP2432; 2565 ha->device_type |= DT_ZIO_SUPPORTED; 2566 ha->device_type |= DT_FWI2; 2567 ha->device_type |= DT_IIDMA; 2568 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2569 break; 2570 case PCI_DEVICE_ID_QLOGIC_ISP8432: 2571 ha->isp_type |= DT_ISP8432; 2572 ha->device_type |= DT_ZIO_SUPPORTED; 2573 ha->device_type |= DT_FWI2; 2574 ha->device_type |= DT_IIDMA; 2575 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2576 break; 2577 case PCI_DEVICE_ID_QLOGIC_ISP5422: 2578 ha->isp_type |= DT_ISP5422; 2579 ha->device_type |= DT_FWI2; 2580 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2581 break; 2582 case PCI_DEVICE_ID_QLOGIC_ISP5432: 2583 ha->isp_type |= DT_ISP5432; 2584 ha->device_type |= DT_FWI2; 2585 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2586 break; 2587 case PCI_DEVICE_ID_QLOGIC_ISP2532: 2588 ha->isp_type |= DT_ISP2532; 2589 ha->device_type |= DT_ZIO_SUPPORTED; 2590 ha->device_type |= DT_FWI2; 2591 ha->device_type |= DT_IIDMA; 2592 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2593 break; 2594 case PCI_DEVICE_ID_QLOGIC_ISP8001: 2595 ha->isp_type |= DT_ISP8001; 2596 ha->device_type |= DT_ZIO_SUPPORTED; 2597 ha->device_type |= DT_FWI2; 2598 ha->device_type |= DT_IIDMA; 2599 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2600 break; 2601 case PCI_DEVICE_ID_QLOGIC_ISP8021: 2602 ha->isp_type |= DT_ISP8021; 2603 ha->device_type |= DT_ZIO_SUPPORTED; 2604 ha->device_type |= DT_FWI2; 2605 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2606 /* Initialize 82XX ISP flags */ 2607 qla82xx_init_flags(ha); 2608 break; 2609 case PCI_DEVICE_ID_QLOGIC_ISP8044: 2610 ha->isp_type |= DT_ISP8044; 2611 ha->device_type |= DT_ZIO_SUPPORTED; 2612 ha->device_type |= DT_FWI2; 2613 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2614 /* Initialize 82XX ISP flags */ 2615 qla82xx_init_flags(ha); 2616 break; 2617 case PCI_DEVICE_ID_QLOGIC_ISP2031: 2618 ha->isp_type |= DT_ISP2031; 2619 ha->device_type |= DT_ZIO_SUPPORTED; 2620 ha->device_type |= DT_FWI2; 2621 ha->device_type |= DT_IIDMA; 2622 ha->device_type |= DT_T10_PI; 2623 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2624 break; 2625 case PCI_DEVICE_ID_QLOGIC_ISP8031: 2626 ha->isp_type |= DT_ISP8031; 2627 ha->device_type |= DT_ZIO_SUPPORTED; 2628 ha->device_type |= DT_FWI2; 2629 ha->device_type |= DT_IIDMA; 2630 ha->device_type |= DT_T10_PI; 2631 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2632 break; 2633 case PCI_DEVICE_ID_QLOGIC_ISPF001: 2634 ha->isp_type |= DT_ISPFX00; 2635 break; 2636 case PCI_DEVICE_ID_QLOGIC_ISP2071: 2637 ha->isp_type |= DT_ISP2071; 2638 ha->device_type |= DT_ZIO_SUPPORTED; 2639 ha->device_type |= DT_FWI2; 2640 ha->device_type |= DT_IIDMA; 2641 ha->device_type |= DT_T10_PI; 2642 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2643 break; 2644 case PCI_DEVICE_ID_QLOGIC_ISP2271: 2645 ha->isp_type |= DT_ISP2271; 2646 ha->device_type |= DT_ZIO_SUPPORTED; 2647 ha->device_type |= DT_FWI2; 2648 ha->device_type |= DT_IIDMA; 2649 ha->device_type |= DT_T10_PI; 2650 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2651 break; 2652 case PCI_DEVICE_ID_QLOGIC_ISP2261: 2653 ha->isp_type |= DT_ISP2261; 2654 ha->device_type |= DT_ZIO_SUPPORTED; 2655 ha->device_type |= DT_FWI2; 2656 ha->device_type |= DT_IIDMA; 2657 ha->device_type |= DT_T10_PI; 2658 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2659 break; 2660 case PCI_DEVICE_ID_QLOGIC_ISP2081: 2661 case PCI_DEVICE_ID_QLOGIC_ISP2089: 2662 ha->isp_type |= DT_ISP2081; 2663 ha->device_type |= DT_ZIO_SUPPORTED; 2664 ha->device_type |= DT_FWI2; 2665 ha->device_type |= DT_IIDMA; 2666 ha->device_type |= DT_T10_PI; 2667 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2668 break; 2669 case PCI_DEVICE_ID_QLOGIC_ISP2281: 2670 case PCI_DEVICE_ID_QLOGIC_ISP2289: 2671 ha->isp_type |= DT_ISP2281; 2672 ha->device_type |= DT_ZIO_SUPPORTED; 2673 ha->device_type |= DT_FWI2; 2674 ha->device_type |= DT_IIDMA; 2675 ha->device_type |= DT_T10_PI; 2676 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2677 break; 2678 } 2679 2680 if (IS_QLA82XX(ha)) 2681 ha->port_no = ha->portnum & 1; 2682 else { 2683 /* Get adapter physical port no from interrupt pin register. */ 2684 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); 2685 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || 2686 IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2687 ha->port_no--; 2688 else 2689 ha->port_no = !(ha->port_no & 1); 2690 } 2691 2692 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, 2693 "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", 2694 ha->device_type, ha->port_no, ha->fw_srisc_address); 2695 } 2696 2697 static void 2698 qla2xxx_scan_start(struct Scsi_Host *shost) 2699 { 2700 scsi_qla_host_t *vha = shost_priv(shost); 2701 2702 if (vha->hw->flags.running_gold_fw) 2703 return; 2704 2705 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2706 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2707 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2708 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags); 2709 } 2710 2711 static int 2712 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) 2713 { 2714 scsi_qla_host_t *vha = shost_priv(shost); 2715 2716 if (test_bit(UNLOADING, &vha->dpc_flags)) 2717 return 1; 2718 if (!vha->host) 2719 return 1; 2720 if (time > vha->hw->loop_reset_delay * HZ) 2721 return 1; 2722 2723 return atomic_read(&vha->loop_state) == LOOP_READY; 2724 } 2725 2726 static void qla2x00_iocb_work_fn(struct work_struct *work) 2727 { 2728 struct scsi_qla_host *vha = container_of(work, 2729 struct scsi_qla_host, iocb_work); 2730 struct qla_hw_data *ha = vha->hw; 2731 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2732 int i = 2; 2733 unsigned long flags; 2734 2735 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 2736 return; 2737 2738 while (!list_empty(&vha->work_list) && i > 0) { 2739 qla2x00_do_work(vha); 2740 i--; 2741 } 2742 2743 spin_lock_irqsave(&vha->work_lock, flags); 2744 clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags); 2745 spin_unlock_irqrestore(&vha->work_lock, flags); 2746 } 2747 2748 /* 2749 * PCI driver interface 2750 */ 2751 static int 2752 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 2753 { 2754 int ret = -ENODEV; 2755 struct Scsi_Host *host; 2756 scsi_qla_host_t *base_vha = NULL; 2757 struct qla_hw_data *ha; 2758 char pci_info[30]; 2759 char fw_str[30], wq_name[30]; 2760 struct scsi_host_template *sht; 2761 int bars, mem_only = 0; 2762 uint16_t req_length = 0, rsp_length = 0; 2763 struct req_que *req = NULL; 2764 struct rsp_que *rsp = NULL; 2765 int i; 2766 2767 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 2768 sht = &qla2xxx_driver_template; 2769 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || 2770 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || 2771 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || 2772 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 2773 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || 2774 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 || 2775 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || 2776 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || 2777 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || 2778 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || 2779 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || 2780 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || 2781 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || 2782 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 || 2783 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 || 2784 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 || 2785 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 || 2786 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 || 2787 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) { 2788 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2789 mem_only = 1; 2790 ql_dbg_pci(ql_dbg_init, pdev, 0x0007, 2791 "Mem only adapter.\n"); 2792 } 2793 ql_dbg_pci(ql_dbg_init, pdev, 0x0008, 2794 "Bars=%d.\n", bars); 2795 2796 if (mem_only) { 2797 if (pci_enable_device_mem(pdev)) 2798 return ret; 2799 } else { 2800 if (pci_enable_device(pdev)) 2801 return ret; 2802 } 2803 2804 /* This may fail but that's ok */ 2805 pci_enable_pcie_error_reporting(pdev); 2806 2807 /* Turn off T10-DIF when FC-NVMe is enabled */ 2808 if (ql2xnvmeenable) 2809 ql2xenabledif = 0; 2810 2811 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); 2812 if (!ha) { 2813 ql_log_pci(ql_log_fatal, pdev, 0x0009, 2814 "Unable to allocate memory for ha.\n"); 2815 goto disable_device; 2816 } 2817 ql_dbg_pci(ql_dbg_init, pdev, 0x000a, 2818 "Memory allocated for ha=%p.\n", ha); 2819 ha->pdev = pdev; 2820 INIT_LIST_HEAD(&ha->tgt.q_full_list); 2821 spin_lock_init(&ha->tgt.q_full_lock); 2822 spin_lock_init(&ha->tgt.sess_lock); 2823 spin_lock_init(&ha->tgt.atio_lock); 2824 2825 atomic_set(&ha->nvme_active_aen_cnt, 0); 2826 2827 /* Clear our data area */ 2828 ha->bars = bars; 2829 ha->mem_only = mem_only; 2830 spin_lock_init(&ha->hardware_lock); 2831 spin_lock_init(&ha->vport_slock); 2832 mutex_init(&ha->selflogin_lock); 2833 mutex_init(&ha->optrom_mutex); 2834 2835 /* Set ISP-type information. */ 2836 qla2x00_set_isp_flags(ha); 2837 2838 /* Set EEH reset type to fundamental if required by hba */ 2839 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || 2840 IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2841 pdev->needs_freset = 1; 2842 2843 ha->prev_topology = 0; 2844 ha->init_cb_size = sizeof(init_cb_t); 2845 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2846 ha->optrom_size = OPTROM_SIZE_2300; 2847 ha->max_exchg = FW_MAX_EXCHANGES_CNT; 2848 atomic_set(&ha->num_pend_mbx_stage1, 0); 2849 atomic_set(&ha->num_pend_mbx_stage2, 0); 2850 atomic_set(&ha->num_pend_mbx_stage3, 0); 2851 atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD); 2852 ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD; 2853 2854 /* Assign ISP specific operations. */ 2855 if (IS_QLA2100(ha)) { 2856 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2857 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 2858 req_length = REQUEST_ENTRY_CNT_2100; 2859 rsp_length = RESPONSE_ENTRY_CNT_2100; 2860 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 2861 ha->gid_list_info_size = 4; 2862 ha->flash_conf_off = ~0; 2863 ha->flash_data_off = ~0; 2864 ha->nvram_conf_off = ~0; 2865 ha->nvram_data_off = ~0; 2866 ha->isp_ops = &qla2100_isp_ops; 2867 } else if (IS_QLA2200(ha)) { 2868 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2869 ha->mbx_count = MAILBOX_REGISTER_COUNT_2200; 2870 req_length = REQUEST_ENTRY_CNT_2200; 2871 rsp_length = RESPONSE_ENTRY_CNT_2100; 2872 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 2873 ha->gid_list_info_size = 4; 2874 ha->flash_conf_off = ~0; 2875 ha->flash_data_off = ~0; 2876 ha->nvram_conf_off = ~0; 2877 ha->nvram_data_off = ~0; 2878 ha->isp_ops = &qla2100_isp_ops; 2879 } else if (IS_QLA23XX(ha)) { 2880 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2881 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2882 req_length = REQUEST_ENTRY_CNT_2200; 2883 rsp_length = RESPONSE_ENTRY_CNT_2300; 2884 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2885 ha->gid_list_info_size = 6; 2886 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 2887 ha->optrom_size = OPTROM_SIZE_2322; 2888 ha->flash_conf_off = ~0; 2889 ha->flash_data_off = ~0; 2890 ha->nvram_conf_off = ~0; 2891 ha->nvram_data_off = ~0; 2892 ha->isp_ops = &qla2300_isp_ops; 2893 } else if (IS_QLA24XX_TYPE(ha)) { 2894 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2895 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2896 req_length = REQUEST_ENTRY_CNT_24XX; 2897 rsp_length = RESPONSE_ENTRY_CNT_2300; 2898 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2899 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2900 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2901 ha->gid_list_info_size = 8; 2902 ha->optrom_size = OPTROM_SIZE_24XX; 2903 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX; 2904 ha->isp_ops = &qla24xx_isp_ops; 2905 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2906 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2907 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2908 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2909 } else if (IS_QLA25XX(ha)) { 2910 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2911 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2912 req_length = REQUEST_ENTRY_CNT_24XX; 2913 rsp_length = RESPONSE_ENTRY_CNT_2300; 2914 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2915 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2916 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2917 ha->gid_list_info_size = 8; 2918 ha->optrom_size = OPTROM_SIZE_25XX; 2919 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2920 ha->isp_ops = &qla25xx_isp_ops; 2921 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2922 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2923 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2924 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2925 } else if (IS_QLA81XX(ha)) { 2926 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2927 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2928 req_length = REQUEST_ENTRY_CNT_24XX; 2929 rsp_length = RESPONSE_ENTRY_CNT_2300; 2930 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2931 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2932 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2933 ha->gid_list_info_size = 8; 2934 ha->optrom_size = OPTROM_SIZE_81XX; 2935 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2936 ha->isp_ops = &qla81xx_isp_ops; 2937 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 2938 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2939 ha->nvram_conf_off = ~0; 2940 ha->nvram_data_off = ~0; 2941 } else if (IS_QLA82XX(ha)) { 2942 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2943 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2944 req_length = REQUEST_ENTRY_CNT_82XX; 2945 rsp_length = RESPONSE_ENTRY_CNT_82XX; 2946 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2947 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2948 ha->gid_list_info_size = 8; 2949 ha->optrom_size = OPTROM_SIZE_82XX; 2950 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2951 ha->isp_ops = &qla82xx_isp_ops; 2952 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2953 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2954 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2955 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2956 } else if (IS_QLA8044(ha)) { 2957 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2958 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2959 req_length = REQUEST_ENTRY_CNT_82XX; 2960 rsp_length = RESPONSE_ENTRY_CNT_82XX; 2961 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2962 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2963 ha->gid_list_info_size = 8; 2964 ha->optrom_size = OPTROM_SIZE_83XX; 2965 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2966 ha->isp_ops = &qla8044_isp_ops; 2967 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2968 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2969 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2970 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2971 } else if (IS_QLA83XX(ha)) { 2972 ha->portnum = PCI_FUNC(ha->pdev->devfn); 2973 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2974 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2975 req_length = REQUEST_ENTRY_CNT_83XX; 2976 rsp_length = RESPONSE_ENTRY_CNT_83XX; 2977 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2978 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2979 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2980 ha->gid_list_info_size = 8; 2981 ha->optrom_size = OPTROM_SIZE_83XX; 2982 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2983 ha->isp_ops = &qla83xx_isp_ops; 2984 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 2985 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2986 ha->nvram_conf_off = ~0; 2987 ha->nvram_data_off = ~0; 2988 } else if (IS_QLAFX00(ha)) { 2989 ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00; 2990 ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00; 2991 ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00; 2992 req_length = REQUEST_ENTRY_CNT_FX00; 2993 rsp_length = RESPONSE_ENTRY_CNT_FX00; 2994 ha->isp_ops = &qlafx00_isp_ops; 2995 ha->port_down_retry_count = 30; /* default value */ 2996 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; 2997 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 2998 ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL; 2999 ha->mr.fw_hbt_en = 1; 3000 ha->mr.host_info_resend = false; 3001 ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; 3002 } else if (IS_QLA27XX(ha)) { 3003 ha->portnum = PCI_FUNC(ha->pdev->devfn); 3004 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3005 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3006 req_length = REQUEST_ENTRY_CNT_83XX; 3007 rsp_length = RESPONSE_ENTRY_CNT_83XX; 3008 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3009 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3010 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3011 ha->gid_list_info_size = 8; 3012 ha->optrom_size = OPTROM_SIZE_83XX; 3013 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3014 ha->isp_ops = &qla27xx_isp_ops; 3015 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 3016 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 3017 ha->nvram_conf_off = ~0; 3018 ha->nvram_data_off = ~0; 3019 } else if (IS_QLA28XX(ha)) { 3020 ha->portnum = PCI_FUNC(ha->pdev->devfn); 3021 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3022 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3023 req_length = REQUEST_ENTRY_CNT_24XX; 3024 rsp_length = RESPONSE_ENTRY_CNT_2300; 3025 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3026 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3027 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3028 ha->gid_list_info_size = 8; 3029 ha->optrom_size = OPTROM_SIZE_28XX; 3030 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3031 ha->isp_ops = &qla27xx_isp_ops; 3032 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX; 3033 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX; 3034 ha->nvram_conf_off = ~0; 3035 ha->nvram_data_off = ~0; 3036 } 3037 3038 ql_dbg_pci(ql_dbg_init, pdev, 0x001e, 3039 "mbx_count=%d, req_length=%d, " 3040 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, " 3041 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, " 3042 "max_fibre_devices=%d.\n", 3043 ha->mbx_count, req_length, rsp_length, ha->max_loop_id, 3044 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, 3045 ha->nvram_npiv_size, ha->max_fibre_devices); 3046 ql_dbg_pci(ql_dbg_init, pdev, 0x001f, 3047 "isp_ops=%p, flash_conf_off=%d, " 3048 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", 3049 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, 3050 ha->nvram_conf_off, ha->nvram_data_off); 3051 3052 /* Configure PCI I/O space */ 3053 ret = ha->isp_ops->iospace_config(ha); 3054 if (ret) 3055 goto iospace_config_failed; 3056 3057 ql_log_pci(ql_log_info, pdev, 0x001d, 3058 "Found an ISP%04X irq %d iobase 0x%p.\n", 3059 pdev->device, pdev->irq, ha->iobase); 3060 mutex_init(&ha->vport_lock); 3061 mutex_init(&ha->mq_lock); 3062 init_completion(&ha->mbx_cmd_comp); 3063 complete(&ha->mbx_cmd_comp); 3064 init_completion(&ha->mbx_intr_comp); 3065 init_completion(&ha->dcbx_comp); 3066 init_completion(&ha->lb_portup_comp); 3067 3068 set_bit(0, (unsigned long *) ha->vp_idx_map); 3069 3070 qla2x00_config_dma_addressing(ha); 3071 ql_dbg_pci(ql_dbg_init, pdev, 0x0020, 3072 "64 Bit addressing is %s.\n", 3073 ha->flags.enable_64bit_addressing ? "enable" : 3074 "disable"); 3075 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 3076 if (ret) { 3077 ql_log_pci(ql_log_fatal, pdev, 0x0031, 3078 "Failed to allocate memory for adapter, aborting.\n"); 3079 3080 goto probe_hw_failed; 3081 } 3082 3083 req->max_q_depth = MAX_Q_DEPTH; 3084 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) 3085 req->max_q_depth = ql2xmaxqdepth; 3086 3087 3088 base_vha = qla2x00_create_host(sht, ha); 3089 if (!base_vha) { 3090 ret = -ENOMEM; 3091 goto probe_hw_failed; 3092 } 3093 3094 pci_set_drvdata(pdev, base_vha); 3095 set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); 3096 3097 host = base_vha->host; 3098 base_vha->req = req; 3099 if (IS_QLA2XXX_MIDTYPE(ha)) 3100 base_vha->mgmt_svr_loop_id = 3101 qla2x00_reserve_mgmt_server_loop_id(base_vha); 3102 else 3103 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + 3104 base_vha->vp_idx; 3105 3106 /* Setup fcport template structure. */ 3107 ha->mr.fcport.vha = base_vha; 3108 ha->mr.fcport.port_type = FCT_UNKNOWN; 3109 ha->mr.fcport.loop_id = FC_NO_LOOP_ID; 3110 qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED); 3111 ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED; 3112 ha->mr.fcport.scan_state = 1; 3113 3114 /* Set the SG table size based on ISP type */ 3115 if (!IS_FWI2_CAPABLE(ha)) { 3116 if (IS_QLA2100(ha)) 3117 host->sg_tablesize = 32; 3118 } else { 3119 if (!IS_QLA82XX(ha)) 3120 host->sg_tablesize = QLA_SG_ALL; 3121 } 3122 host->max_id = ha->max_fibre_devices; 3123 host->cmd_per_lun = 3; 3124 host->unique_id = host->host_no; 3125 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 3126 host->max_cmd_len = 32; 3127 else 3128 host->max_cmd_len = MAX_CMDSZ; 3129 host->max_channel = MAX_BUSES - 1; 3130 /* Older HBAs support only 16-bit LUNs */ 3131 if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) && 3132 ql2xmaxlun > 0xffff) 3133 host->max_lun = 0xffff; 3134 else 3135 host->max_lun = ql2xmaxlun; 3136 host->transportt = qla2xxx_transport_template; 3137 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); 3138 3139 ql_dbg(ql_dbg_init, base_vha, 0x0033, 3140 "max_id=%d this_id=%d " 3141 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " 3142 "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id, 3143 host->this_id, host->cmd_per_lun, host->unique_id, 3144 host->max_cmd_len, host->max_channel, host->max_lun, 3145 host->transportt, sht->vendor_id); 3146 3147 INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); 3148 3149 /* Set up the irqs */ 3150 ret = qla2x00_request_irqs(ha, rsp); 3151 if (ret) 3152 goto probe_failed; 3153 3154 /* Alloc arrays of request and response ring ptrs */ 3155 ret = qla2x00_alloc_queues(ha, req, rsp); 3156 if (ret) { 3157 ql_log(ql_log_fatal, base_vha, 0x003d, 3158 "Failed to allocate memory for queue pointers..." 3159 "aborting.\n"); 3160 ret = -ENODEV; 3161 goto probe_failed; 3162 } 3163 3164 if (ha->mqenable) { 3165 /* number of hardware queues supported by blk/scsi-mq*/ 3166 host->nr_hw_queues = ha->max_qpairs; 3167 3168 ql_dbg(ql_dbg_init, base_vha, 0x0192, 3169 "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues); 3170 } else { 3171 if (ql2xnvmeenable) { 3172 host->nr_hw_queues = ha->max_qpairs; 3173 ql_dbg(ql_dbg_init, base_vha, 0x0194, 3174 "FC-NVMe support is enabled, HW queues=%d\n", 3175 host->nr_hw_queues); 3176 } else { 3177 ql_dbg(ql_dbg_init, base_vha, 0x0193, 3178 "blk/scsi-mq disabled.\n"); 3179 } 3180 } 3181 3182 qlt_probe_one_stage1(base_vha, ha); 3183 3184 pci_save_state(pdev); 3185 3186 /* Assign back pointers */ 3187 rsp->req = req; 3188 req->rsp = rsp; 3189 3190 if (IS_QLAFX00(ha)) { 3191 ha->rsp_q_map[0] = rsp; 3192 ha->req_q_map[0] = req; 3193 set_bit(0, ha->req_qid_map); 3194 set_bit(0, ha->rsp_qid_map); 3195 } 3196 3197 /* FWI2-capable only. */ 3198 req->req_q_in = &ha->iobase->isp24.req_q_in; 3199 req->req_q_out = &ha->iobase->isp24.req_q_out; 3200 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; 3201 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; 3202 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 3203 IS_QLA28XX(ha)) { 3204 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; 3205 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; 3206 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; 3207 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out; 3208 } 3209 3210 if (IS_QLAFX00(ha)) { 3211 req->req_q_in = &ha->iobase->ispfx00.req_q_in; 3212 req->req_q_out = &ha->iobase->ispfx00.req_q_out; 3213 rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in; 3214 rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out; 3215 } 3216 3217 if (IS_P3P_TYPE(ha)) { 3218 req->req_q_out = &ha->iobase->isp82.req_q_out[0]; 3219 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0]; 3220 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; 3221 } 3222 3223 ql_dbg(ql_dbg_multiq, base_vha, 0xc009, 3224 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 3225 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 3226 ql_dbg(ql_dbg_multiq, base_vha, 0xc00a, 3227 "req->req_q_in=%p req->req_q_out=%p " 3228 "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 3229 req->req_q_in, req->req_q_out, 3230 rsp->rsp_q_in, rsp->rsp_q_out); 3231 ql_dbg(ql_dbg_init, base_vha, 0x003e, 3232 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 3233 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 3234 ql_dbg(ql_dbg_init, base_vha, 0x003f, 3235 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 3236 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); 3237 3238 ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0); 3239 if (unlikely(!ha->wq)) { 3240 ret = -ENOMEM; 3241 goto probe_failed; 3242 } 3243 3244 if (ha->isp_ops->initialize_adapter(base_vha)) { 3245 ql_log(ql_log_fatal, base_vha, 0x00d6, 3246 "Failed to initialize adapter - Adapter flags %x.\n", 3247 base_vha->device_flags); 3248 3249 if (IS_QLA82XX(ha)) { 3250 qla82xx_idc_lock(ha); 3251 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3252 QLA8XXX_DEV_FAILED); 3253 qla82xx_idc_unlock(ha); 3254 ql_log(ql_log_fatal, base_vha, 0x00d7, 3255 "HW State: FAILED.\n"); 3256 } else if (IS_QLA8044(ha)) { 3257 qla8044_idc_lock(ha); 3258 qla8044_wr_direct(base_vha, 3259 QLA8044_CRB_DEV_STATE_INDEX, 3260 QLA8XXX_DEV_FAILED); 3261 qla8044_idc_unlock(ha); 3262 ql_log(ql_log_fatal, base_vha, 0x0150, 3263 "HW State: FAILED.\n"); 3264 } 3265 3266 ret = -ENODEV; 3267 goto probe_failed; 3268 } 3269 3270 if (IS_QLAFX00(ha)) 3271 host->can_queue = QLAFX00_MAX_CANQUEUE; 3272 else 3273 host->can_queue = req->num_outstanding_cmds - 10; 3274 3275 ql_dbg(ql_dbg_init, base_vha, 0x0032, 3276 "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", 3277 host->can_queue, base_vha->req, 3278 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 3279 3280 if (ha->mqenable) { 3281 bool startit = false; 3282 3283 if (QLA_TGT_MODE_ENABLED()) 3284 startit = false; 3285 3286 if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) 3287 startit = true; 3288 3289 /* Create start of day qpairs for Block MQ */ 3290 for (i = 0; i < ha->max_qpairs; i++) 3291 qla2xxx_create_qpair(base_vha, 5, 0, startit); 3292 } 3293 3294 if (ha->flags.running_gold_fw) 3295 goto skip_dpc; 3296 3297 /* 3298 * Startup the kernel thread for this host adapter 3299 */ 3300 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 3301 "%s_dpc", base_vha->host_str); 3302 if (IS_ERR(ha->dpc_thread)) { 3303 ql_log(ql_log_fatal, base_vha, 0x00ed, 3304 "Failed to start DPC thread.\n"); 3305 ret = PTR_ERR(ha->dpc_thread); 3306 ha->dpc_thread = NULL; 3307 goto probe_failed; 3308 } 3309 ql_dbg(ql_dbg_init, base_vha, 0x00ee, 3310 "DPC thread started successfully.\n"); 3311 3312 /* 3313 * If we're not coming up in initiator mode, we might sit for 3314 * a while without waking up the dpc thread, which leads to a 3315 * stuck process warning. So just kick the dpc once here and 3316 * let the kthread start (and go back to sleep in qla2x00_do_dpc). 3317 */ 3318 qla2xxx_wake_dpc(base_vha); 3319 3320 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); 3321 3322 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { 3323 sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); 3324 ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); 3325 INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen); 3326 3327 sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); 3328 ha->dpc_hp_wq = create_singlethread_workqueue(wq_name); 3329 INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work); 3330 INIT_WORK(&ha->idc_state_handler, 3331 qla83xx_idc_state_handler_work); 3332 INIT_WORK(&ha->nic_core_unrecoverable, 3333 qla83xx_nic_core_unrecoverable_work); 3334 } 3335 3336 skip_dpc: 3337 list_add_tail(&base_vha->list, &ha->vp_list); 3338 base_vha->host->irq = ha->pdev->irq; 3339 3340 /* Initialized the timer */ 3341 qla2x00_start_timer(base_vha, WATCH_INTERVAL); 3342 ql_dbg(ql_dbg_init, base_vha, 0x00ef, 3343 "Started qla2x00_timer with " 3344 "interval=%d.\n", WATCH_INTERVAL); 3345 ql_dbg(ql_dbg_init, base_vha, 0x00f0, 3346 "Detected hba at address=%p.\n", 3347 ha); 3348 3349 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 3350 if (ha->fw_attributes & BIT_4) { 3351 int prot = 0, guard; 3352 3353 base_vha->flags.difdix_supported = 1; 3354 ql_dbg(ql_dbg_init, base_vha, 0x00f1, 3355 "Registering for DIF/DIX type 1 and 3 protection.\n"); 3356 if (ql2xenabledif == 1) 3357 prot = SHOST_DIX_TYPE0_PROTECTION; 3358 if (ql2xprotmask) 3359 scsi_host_set_prot(host, ql2xprotmask); 3360 else 3361 scsi_host_set_prot(host, 3362 prot | SHOST_DIF_TYPE1_PROTECTION 3363 | SHOST_DIF_TYPE2_PROTECTION 3364 | SHOST_DIF_TYPE3_PROTECTION 3365 | SHOST_DIX_TYPE1_PROTECTION 3366 | SHOST_DIX_TYPE2_PROTECTION 3367 | SHOST_DIX_TYPE3_PROTECTION); 3368 3369 guard = SHOST_DIX_GUARD_CRC; 3370 3371 if (IS_PI_IPGUARD_CAPABLE(ha) && 3372 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) 3373 guard |= SHOST_DIX_GUARD_IP; 3374 3375 if (ql2xprotguard) 3376 scsi_host_set_guard(host, ql2xprotguard); 3377 else 3378 scsi_host_set_guard(host, guard); 3379 } else 3380 base_vha->flags.difdix_supported = 0; 3381 } 3382 3383 ha->isp_ops->enable_intrs(ha); 3384 3385 if (IS_QLAFX00(ha)) { 3386 ret = qlafx00_fx_disc(base_vha, 3387 &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO); 3388 host->sg_tablesize = (ha->mr.extended_io_enabled) ? 3389 QLA_SG_ALL : 128; 3390 } 3391 3392 ret = scsi_add_host(host, &pdev->dev); 3393 if (ret) 3394 goto probe_failed; 3395 3396 base_vha->flags.init_done = 1; 3397 base_vha->flags.online = 1; 3398 ha->prev_minidump_failed = 0; 3399 3400 ql_dbg(ql_dbg_init, base_vha, 0x00f2, 3401 "Init done and hba is online.\n"); 3402 3403 if (qla_ini_mode_enabled(base_vha) || 3404 qla_dual_mode_enabled(base_vha)) 3405 scsi_scan_host(host); 3406 else 3407 ql_dbg(ql_dbg_init, base_vha, 0x0122, 3408 "skipping scsi_scan_host() for non-initiator port\n"); 3409 3410 qla2x00_alloc_sysfs_attr(base_vha); 3411 3412 if (IS_QLAFX00(ha)) { 3413 ret = qlafx00_fx_disc(base_vha, 3414 &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO); 3415 3416 /* Register system information */ 3417 ret = qlafx00_fx_disc(base_vha, 3418 &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO); 3419 } 3420 3421 qla2x00_init_host_attr(base_vha); 3422 3423 qla2x00_dfs_setup(base_vha); 3424 3425 ql_log(ql_log_info, base_vha, 0x00fb, 3426 "QLogic %s - %s.\n", ha->model_number, ha->model_desc); 3427 ql_log(ql_log_info, base_vha, 0x00fc, 3428 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", 3429 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info, 3430 sizeof(pci_info)), 3431 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', 3432 base_vha->host_no, 3433 ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str))); 3434 3435 qlt_add_target(ha, base_vha); 3436 3437 clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); 3438 3439 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 3440 return -ENODEV; 3441 3442 if (ha->flags.detected_lr_sfp) { 3443 ql_log(ql_log_info, base_vha, 0xffff, 3444 "Reset chip to pick up LR SFP setting\n"); 3445 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 3446 qla2xxx_wake_dpc(base_vha); 3447 } 3448 3449 return 0; 3450 3451 probe_failed: 3452 if (base_vha->gnl.l) { 3453 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3454 base_vha->gnl.l, base_vha->gnl.ldma); 3455 base_vha->gnl.l = NULL; 3456 } 3457 3458 if (base_vha->timer_active) 3459 qla2x00_stop_timer(base_vha); 3460 base_vha->flags.online = 0; 3461 if (ha->dpc_thread) { 3462 struct task_struct *t = ha->dpc_thread; 3463 3464 ha->dpc_thread = NULL; 3465 kthread_stop(t); 3466 } 3467 3468 qla2x00_free_device(base_vha); 3469 scsi_host_put(base_vha->host); 3470 /* 3471 * Need to NULL out local req/rsp after 3472 * qla2x00_free_device => qla2x00_free_queues frees 3473 * what these are pointing to. Or else we'll 3474 * fall over below in qla2x00_free_req/rsp_que. 3475 */ 3476 req = NULL; 3477 rsp = NULL; 3478 3479 probe_hw_failed: 3480 qla2x00_mem_free(ha); 3481 qla2x00_free_req_que(ha, req); 3482 qla2x00_free_rsp_que(ha, rsp); 3483 qla2x00_clear_drv_active(ha); 3484 3485 iospace_config_failed: 3486 if (IS_P3P_TYPE(ha)) { 3487 if (!ha->nx_pcibase) 3488 iounmap((device_reg_t *)ha->nx_pcibase); 3489 if (!ql2xdbwr) 3490 iounmap((device_reg_t *)ha->nxdb_wr_ptr); 3491 } else { 3492 if (ha->iobase) 3493 iounmap(ha->iobase); 3494 if (ha->cregbase) 3495 iounmap(ha->cregbase); 3496 } 3497 pci_release_selected_regions(ha->pdev, ha->bars); 3498 kfree(ha); 3499 3500 disable_device: 3501 pci_disable_device(pdev); 3502 return ret; 3503 } 3504 3505 static void __qla_set_remove_flag(scsi_qla_host_t *base_vha) 3506 { 3507 scsi_qla_host_t *vp; 3508 unsigned long flags; 3509 struct qla_hw_data *ha; 3510 3511 if (!base_vha) 3512 return; 3513 3514 ha = base_vha->hw; 3515 3516 spin_lock_irqsave(&ha->vport_slock, flags); 3517 list_for_each_entry(vp, &ha->vp_list, list) 3518 set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags); 3519 3520 /* 3521 * Indicate device removal to prevent future board_disable 3522 * and wait until any pending board_disable has completed. 3523 */ 3524 set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags); 3525 spin_unlock_irqrestore(&ha->vport_slock, flags); 3526 } 3527 3528 static void 3529 qla2x00_shutdown(struct pci_dev *pdev) 3530 { 3531 scsi_qla_host_t *vha; 3532 struct qla_hw_data *ha; 3533 3534 vha = pci_get_drvdata(pdev); 3535 ha = vha->hw; 3536 3537 ql_log(ql_log_info, vha, 0xfffa, 3538 "Adapter shutdown\n"); 3539 3540 /* 3541 * Prevent future board_disable and wait 3542 * until any pending board_disable has completed. 3543 */ 3544 __qla_set_remove_flag(vha); 3545 cancel_work_sync(&ha->board_disable); 3546 3547 if (!atomic_read(&pdev->enable_cnt)) 3548 return; 3549 3550 /* Notify ISPFX00 firmware */ 3551 if (IS_QLAFX00(ha)) 3552 qlafx00_driver_shutdown(vha, 20); 3553 3554 /* Turn-off FCE trace */ 3555 if (ha->flags.fce_enabled) { 3556 qla2x00_disable_fce_trace(vha, NULL, NULL); 3557 ha->flags.fce_enabled = 0; 3558 } 3559 3560 /* Turn-off EFT trace */ 3561 if (ha->eft) 3562 qla2x00_disable_eft_trace(vha); 3563 3564 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 3565 IS_QLA28XX(ha)) { 3566 if (ha->flags.fw_started) 3567 qla2x00_abort_isp_cleanup(vha); 3568 } else { 3569 /* Stop currently executing firmware. */ 3570 qla2x00_try_to_stop_firmware(vha); 3571 } 3572 3573 /* Disable timer */ 3574 if (vha->timer_active) 3575 qla2x00_stop_timer(vha); 3576 3577 /* Turn adapter off line */ 3578 vha->flags.online = 0; 3579 3580 /* turn-off interrupts on the card */ 3581 if (ha->interrupts_on) { 3582 vha->flags.init_done = 0; 3583 ha->isp_ops->disable_intrs(ha); 3584 } 3585 3586 qla2x00_free_irqs(vha); 3587 3588 qla2x00_free_fw_dump(ha); 3589 3590 pci_disable_device(pdev); 3591 ql_log(ql_log_info, vha, 0xfffe, 3592 "Adapter shutdown successfully.\n"); 3593 } 3594 3595 /* Deletes all the virtual ports for a given ha */ 3596 static void 3597 qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) 3598 { 3599 scsi_qla_host_t *vha; 3600 unsigned long flags; 3601 3602 mutex_lock(&ha->vport_lock); 3603 while (ha->cur_vport_count) { 3604 spin_lock_irqsave(&ha->vport_slock, flags); 3605 3606 BUG_ON(base_vha->list.next == &ha->vp_list); 3607 /* This assumes first entry in ha->vp_list is always base vha */ 3608 vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); 3609 scsi_host_get(vha->host); 3610 3611 spin_unlock_irqrestore(&ha->vport_slock, flags); 3612 mutex_unlock(&ha->vport_lock); 3613 3614 qla_nvme_delete(vha); 3615 3616 fc_vport_terminate(vha->fc_vport); 3617 scsi_host_put(vha->host); 3618 3619 mutex_lock(&ha->vport_lock); 3620 } 3621 mutex_unlock(&ha->vport_lock); 3622 } 3623 3624 /* Stops all deferred work threads */ 3625 static void 3626 qla2x00_destroy_deferred_work(struct qla_hw_data *ha) 3627 { 3628 /* Cancel all work and destroy DPC workqueues */ 3629 if (ha->dpc_lp_wq) { 3630 cancel_work_sync(&ha->idc_aen); 3631 destroy_workqueue(ha->dpc_lp_wq); 3632 ha->dpc_lp_wq = NULL; 3633 } 3634 3635 if (ha->dpc_hp_wq) { 3636 cancel_work_sync(&ha->nic_core_reset); 3637 cancel_work_sync(&ha->idc_state_handler); 3638 cancel_work_sync(&ha->nic_core_unrecoverable); 3639 destroy_workqueue(ha->dpc_hp_wq); 3640 ha->dpc_hp_wq = NULL; 3641 } 3642 3643 /* Kill the kernel thread for this host */ 3644 if (ha->dpc_thread) { 3645 struct task_struct *t = ha->dpc_thread; 3646 3647 /* 3648 * qla2xxx_wake_dpc checks for ->dpc_thread 3649 * so we need to zero it out. 3650 */ 3651 ha->dpc_thread = NULL; 3652 kthread_stop(t); 3653 } 3654 } 3655 3656 static void 3657 qla2x00_unmap_iobases(struct qla_hw_data *ha) 3658 { 3659 if (IS_QLA82XX(ha)) { 3660 3661 iounmap((device_reg_t *)ha->nx_pcibase); 3662 if (!ql2xdbwr) 3663 iounmap((device_reg_t *)ha->nxdb_wr_ptr); 3664 } else { 3665 if (ha->iobase) 3666 iounmap(ha->iobase); 3667 3668 if (ha->cregbase) 3669 iounmap(ha->cregbase); 3670 3671 if (ha->mqiobase) 3672 iounmap(ha->mqiobase); 3673 3674 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) && 3675 ha->msixbase) 3676 iounmap(ha->msixbase); 3677 } 3678 } 3679 3680 static void 3681 qla2x00_clear_drv_active(struct qla_hw_data *ha) 3682 { 3683 if (IS_QLA8044(ha)) { 3684 qla8044_idc_lock(ha); 3685 qla8044_clear_drv_active(ha); 3686 qla8044_idc_unlock(ha); 3687 } else if (IS_QLA82XX(ha)) { 3688 qla82xx_idc_lock(ha); 3689 qla82xx_clear_drv_active(ha); 3690 qla82xx_idc_unlock(ha); 3691 } 3692 } 3693 3694 static void 3695 qla2x00_remove_one(struct pci_dev *pdev) 3696 { 3697 scsi_qla_host_t *base_vha; 3698 struct qla_hw_data *ha; 3699 3700 base_vha = pci_get_drvdata(pdev); 3701 ha = base_vha->hw; 3702 ql_log(ql_log_info, base_vha, 0xb079, 3703 "Removing driver\n"); 3704 __qla_set_remove_flag(base_vha); 3705 cancel_work_sync(&ha->board_disable); 3706 3707 /* 3708 * If the PCI device is disabled then there was a PCI-disconnect and 3709 * qla2x00_disable_board_on_pci_error has taken care of most of the 3710 * resources. 3711 */ 3712 if (!atomic_read(&pdev->enable_cnt)) { 3713 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3714 base_vha->gnl.l, base_vha->gnl.ldma); 3715 base_vha->gnl.l = NULL; 3716 scsi_host_put(base_vha->host); 3717 kfree(ha); 3718 pci_set_drvdata(pdev, NULL); 3719 return; 3720 } 3721 qla2x00_wait_for_hba_ready(base_vha); 3722 3723 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 3724 IS_QLA28XX(ha)) { 3725 if (ha->flags.fw_started) 3726 qla2x00_abort_isp_cleanup(base_vha); 3727 } else if (!IS_QLAFX00(ha)) { 3728 if (IS_QLA8031(ha)) { 3729 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, 3730 "Clearing fcoe driver presence.\n"); 3731 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) 3732 ql_dbg(ql_dbg_p3p, base_vha, 0xb079, 3733 "Error while clearing DRV-Presence.\n"); 3734 } 3735 3736 qla2x00_try_to_stop_firmware(base_vha); 3737 } 3738 3739 qla2x00_wait_for_sess_deletion(base_vha); 3740 3741 /* 3742 * if UNLOAD flag is already set, then continue unload, 3743 * where it was set first. 3744 */ 3745 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 3746 return; 3747 3748 set_bit(UNLOADING, &base_vha->dpc_flags); 3749 3750 qla_nvme_delete(base_vha); 3751 3752 dma_free_coherent(&ha->pdev->dev, 3753 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); 3754 3755 base_vha->gnl.l = NULL; 3756 3757 vfree(base_vha->scan.l); 3758 3759 if (IS_QLAFX00(ha)) 3760 qlafx00_driver_shutdown(base_vha, 20); 3761 3762 qla2x00_delete_all_vps(ha, base_vha); 3763 3764 qla2x00_dfs_remove(base_vha); 3765 3766 qla84xx_put_chip(base_vha); 3767 3768 /* Disable timer */ 3769 if (base_vha->timer_active) 3770 qla2x00_stop_timer(base_vha); 3771 3772 base_vha->flags.online = 0; 3773 3774 /* free DMA memory */ 3775 if (ha->exlogin_buf) 3776 qla2x00_free_exlogin_buffer(ha); 3777 3778 /* free DMA memory */ 3779 if (ha->exchoffld_buf) 3780 qla2x00_free_exchoffld_buffer(ha); 3781 3782 qla2x00_destroy_deferred_work(ha); 3783 3784 qlt_remove_target(ha, base_vha); 3785 3786 qla2x00_free_sysfs_attr(base_vha, true); 3787 3788 fc_remove_host(base_vha->host); 3789 qlt_remove_target_resources(ha); 3790 3791 scsi_remove_host(base_vha->host); 3792 3793 qla2x00_free_device(base_vha); 3794 3795 qla2x00_clear_drv_active(ha); 3796 3797 scsi_host_put(base_vha->host); 3798 3799 qla2x00_unmap_iobases(ha); 3800 3801 pci_release_selected_regions(ha->pdev, ha->bars); 3802 kfree(ha); 3803 3804 pci_disable_pcie_error_reporting(pdev); 3805 3806 pci_disable_device(pdev); 3807 } 3808 3809 static void 3810 qla2x00_free_device(scsi_qla_host_t *vha) 3811 { 3812 struct qla_hw_data *ha = vha->hw; 3813 3814 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 3815 3816 /* Disable timer */ 3817 if (vha->timer_active) 3818 qla2x00_stop_timer(vha); 3819 3820 qla25xx_delete_queues(vha); 3821 vha->flags.online = 0; 3822 3823 /* turn-off interrupts on the card */ 3824 if (ha->interrupts_on) { 3825 vha->flags.init_done = 0; 3826 ha->isp_ops->disable_intrs(ha); 3827 } 3828 3829 qla2x00_free_fcports(vha); 3830 3831 qla2x00_free_irqs(vha); 3832 3833 /* Flush the work queue and remove it */ 3834 if (ha->wq) { 3835 flush_workqueue(ha->wq); 3836 destroy_workqueue(ha->wq); 3837 ha->wq = NULL; 3838 } 3839 3840 3841 qla2x00_mem_free(ha); 3842 3843 qla82xx_md_free(vha); 3844 3845 qla2x00_free_queues(ha); 3846 } 3847 3848 void qla2x00_free_fcports(struct scsi_qla_host *vha) 3849 { 3850 fc_port_t *fcport, *tfcport; 3851 3852 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) 3853 qla2x00_free_fcport(fcport); 3854 } 3855 3856 static inline void 3857 qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport) 3858 { 3859 int now; 3860 3861 if (!fcport->rport) 3862 return; 3863 3864 if (fcport->rport) { 3865 ql_dbg(ql_dbg_disc, fcport->vha, 0x2109, 3866 "%s %8phN. rport %p roles %x\n", 3867 __func__, fcport->port_name, fcport->rport, 3868 fcport->rport->roles); 3869 fc_remote_port_delete(fcport->rport); 3870 } 3871 qlt_do_generation_tick(vha, &now); 3872 } 3873 3874 /* 3875 * qla2x00_mark_device_lost Updates fcport state when device goes offline. 3876 * 3877 * Input: ha = adapter block pointer. fcport = port structure pointer. 3878 * 3879 * Return: None. 3880 * 3881 * Context: 3882 */ 3883 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, 3884 int do_login) 3885 { 3886 if (IS_QLAFX00(vha->hw)) { 3887 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3888 qla2x00_schedule_rport_del(vha, fcport); 3889 return; 3890 } 3891 3892 if (atomic_read(&fcport->state) == FCS_ONLINE && 3893 vha->vp_idx == fcport->vha->vp_idx) { 3894 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3895 qla2x00_schedule_rport_del(vha, fcport); 3896 } 3897 /* 3898 * We may need to retry the login, so don't change the state of the 3899 * port but do the retries. 3900 */ 3901 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) 3902 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3903 3904 if (!do_login) 3905 return; 3906 3907 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 3908 } 3909 3910 /* 3911 * qla2x00_mark_all_devices_lost 3912 * Updates fcport state when device goes offline. 3913 * 3914 * Input: 3915 * ha = adapter block pointer. 3916 * fcport = port structure pointer. 3917 * 3918 * Return: 3919 * None. 3920 * 3921 * Context: 3922 */ 3923 void 3924 qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha) 3925 { 3926 fc_port_t *fcport; 3927 3928 ql_dbg(ql_dbg_disc, vha, 0x20f1, 3929 "Mark all dev lost\n"); 3930 3931 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3932 fcport->scan_state = 0; 3933 qlt_schedule_sess_for_deletion(fcport); 3934 3935 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx) 3936 continue; 3937 3938 /* 3939 * No point in marking the device as lost, if the device is 3940 * already DEAD. 3941 */ 3942 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 3943 continue; 3944 } 3945 } 3946 3947 static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha) 3948 { 3949 int i; 3950 3951 if (IS_FWI2_CAPABLE(ha)) 3952 return; 3953 3954 for (i = 0; i < SNS_FIRST_LOOP_ID; i++) 3955 set_bit(i, ha->loop_id_map); 3956 set_bit(MANAGEMENT_SERVER, ha->loop_id_map); 3957 set_bit(BROADCAST, ha->loop_id_map); 3958 } 3959 3960 /* 3961 * qla2x00_mem_alloc 3962 * Allocates adapter memory. 3963 * 3964 * Returns: 3965 * 0 = success. 3966 * !0 = failure. 3967 */ 3968 static int 3969 qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, 3970 struct req_que **req, struct rsp_que **rsp) 3971 { 3972 char name[16]; 3973 3974 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 3975 &ha->init_cb_dma, GFP_KERNEL); 3976 if (!ha->init_cb) 3977 goto fail; 3978 3979 if (qlt_mem_alloc(ha) < 0) 3980 goto fail_free_init_cb; 3981 3982 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, 3983 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); 3984 if (!ha->gid_list) 3985 goto fail_free_tgt_mem; 3986 3987 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 3988 if (!ha->srb_mempool) 3989 goto fail_free_gid_list; 3990 3991 if (IS_P3P_TYPE(ha)) { 3992 /* Allocate cache for CT6 Ctx. */ 3993 if (!ctx_cachep) { 3994 ctx_cachep = kmem_cache_create("qla2xxx_ctx", 3995 sizeof(struct ct6_dsd), 0, 3996 SLAB_HWCACHE_ALIGN, NULL); 3997 if (!ctx_cachep) 3998 goto fail_free_srb_mempool; 3999 } 4000 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, 4001 ctx_cachep); 4002 if (!ha->ctx_mempool) 4003 goto fail_free_srb_mempool; 4004 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021, 4005 "ctx_cachep=%p ctx_mempool=%p.\n", 4006 ctx_cachep, ha->ctx_mempool); 4007 } 4008 4009 /* Get memory for cached NVRAM */ 4010 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 4011 if (!ha->nvram) 4012 goto fail_free_ctx_mempool; 4013 4014 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME, 4015 ha->pdev->device); 4016 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4017 DMA_POOL_SIZE, 8, 0); 4018 if (!ha->s_dma_pool) 4019 goto fail_free_nvram; 4020 4021 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022, 4022 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", 4023 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); 4024 4025 if (IS_P3P_TYPE(ha) || ql2xenabledif) { 4026 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4027 DSD_LIST_DMA_POOL_SIZE, 8, 0); 4028 if (!ha->dl_dma_pool) { 4029 ql_log_pci(ql_log_fatal, ha->pdev, 0x0023, 4030 "Failed to allocate memory for dl_dma_pool.\n"); 4031 goto fail_s_dma_pool; 4032 } 4033 4034 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4035 FCP_CMND_DMA_POOL_SIZE, 8, 0); 4036 if (!ha->fcp_cmnd_dma_pool) { 4037 ql_log_pci(ql_log_fatal, ha->pdev, 0x0024, 4038 "Failed to allocate memory for fcp_cmnd_dma_pool.\n"); 4039 goto fail_dl_dma_pool; 4040 } 4041 4042 if (ql2xenabledif) { 4043 u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE; 4044 struct dsd_dma *dsd, *nxt; 4045 uint i; 4046 /* Creata a DMA pool of buffers for DIF bundling */ 4047 ha->dif_bundl_pool = dma_pool_create(name, 4048 &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0); 4049 if (!ha->dif_bundl_pool) { 4050 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024, 4051 "%s: failed create dif_bundl_pool\n", 4052 __func__); 4053 goto fail_dif_bundl_dma_pool; 4054 } 4055 4056 INIT_LIST_HEAD(&ha->pool.good.head); 4057 INIT_LIST_HEAD(&ha->pool.unusable.head); 4058 ha->pool.good.count = 0; 4059 ha->pool.unusable.count = 0; 4060 for (i = 0; i < 128; i++) { 4061 dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC); 4062 if (!dsd) { 4063 ql_dbg_pci(ql_dbg_init, ha->pdev, 4064 0xe0ee, "%s: failed alloc dsd\n", 4065 __func__); 4066 return 1; 4067 } 4068 ha->dif_bundle_kallocs++; 4069 4070 dsd->dsd_addr = dma_pool_alloc( 4071 ha->dif_bundl_pool, GFP_ATOMIC, 4072 &dsd->dsd_list_dma); 4073 if (!dsd->dsd_addr) { 4074 ql_dbg_pci(ql_dbg_init, ha->pdev, 4075 0xe0ee, 4076 "%s: failed alloc ->dsd_addr\n", 4077 __func__); 4078 kfree(dsd); 4079 ha->dif_bundle_kallocs--; 4080 continue; 4081 } 4082 ha->dif_bundle_dma_allocs++; 4083 4084 /* 4085 * if DMA buffer crosses 4G boundary, 4086 * put it on bad list 4087 */ 4088 if (MSD(dsd->dsd_list_dma) ^ 4089 MSD(dsd->dsd_list_dma + bufsize)) { 4090 list_add_tail(&dsd->list, 4091 &ha->pool.unusable.head); 4092 ha->pool.unusable.count++; 4093 } else { 4094 list_add_tail(&dsd->list, 4095 &ha->pool.good.head); 4096 ha->pool.good.count++; 4097 } 4098 } 4099 4100 /* return the good ones back to the pool */ 4101 list_for_each_entry_safe(dsd, nxt, 4102 &ha->pool.good.head, list) { 4103 list_del(&dsd->list); 4104 dma_pool_free(ha->dif_bundl_pool, 4105 dsd->dsd_addr, dsd->dsd_list_dma); 4106 ha->dif_bundle_dma_allocs--; 4107 kfree(dsd); 4108 ha->dif_bundle_kallocs--; 4109 } 4110 4111 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024, 4112 "%s: dif dma pool (good=%u unusable=%u)\n", 4113 __func__, ha->pool.good.count, 4114 ha->pool.unusable.count); 4115 } 4116 4117 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025, 4118 "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n", 4119 ha->dl_dma_pool, ha->fcp_cmnd_dma_pool, 4120 ha->dif_bundl_pool); 4121 } 4122 4123 /* Allocate memory for SNS commands */ 4124 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 4125 /* Get consistent memory allocated for SNS commands */ 4126 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 4127 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 4128 if (!ha->sns_cmd) 4129 goto fail_dma_pool; 4130 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, 4131 "sns_cmd: %p.\n", ha->sns_cmd); 4132 } else { 4133 /* Get consistent memory allocated for MS IOCB */ 4134 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 4135 &ha->ms_iocb_dma); 4136 if (!ha->ms_iocb) 4137 goto fail_dma_pool; 4138 /* Get consistent memory allocated for CT SNS commands */ 4139 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 4140 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 4141 if (!ha->ct_sns) 4142 goto fail_free_ms_iocb; 4143 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027, 4144 "ms_iocb=%p ct_sns=%p.\n", 4145 ha->ms_iocb, ha->ct_sns); 4146 } 4147 4148 /* Allocate memory for request ring */ 4149 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 4150 if (!*req) { 4151 ql_log_pci(ql_log_fatal, ha->pdev, 0x0028, 4152 "Failed to allocate memory for req.\n"); 4153 goto fail_req; 4154 } 4155 (*req)->length = req_len; 4156 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev, 4157 ((*req)->length + 1) * sizeof(request_t), 4158 &(*req)->dma, GFP_KERNEL); 4159 if (!(*req)->ring) { 4160 ql_log_pci(ql_log_fatal, ha->pdev, 0x0029, 4161 "Failed to allocate memory for req_ring.\n"); 4162 goto fail_req_ring; 4163 } 4164 /* Allocate memory for response ring */ 4165 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 4166 if (!*rsp) { 4167 ql_log_pci(ql_log_fatal, ha->pdev, 0x002a, 4168 "Failed to allocate memory for rsp.\n"); 4169 goto fail_rsp; 4170 } 4171 (*rsp)->hw = ha; 4172 (*rsp)->length = rsp_len; 4173 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev, 4174 ((*rsp)->length + 1) * sizeof(response_t), 4175 &(*rsp)->dma, GFP_KERNEL); 4176 if (!(*rsp)->ring) { 4177 ql_log_pci(ql_log_fatal, ha->pdev, 0x002b, 4178 "Failed to allocate memory for rsp_ring.\n"); 4179 goto fail_rsp_ring; 4180 } 4181 (*req)->rsp = *rsp; 4182 (*rsp)->req = *req; 4183 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c, 4184 "req=%p req->length=%d req->ring=%p rsp=%p " 4185 "rsp->length=%d rsp->ring=%p.\n", 4186 *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length, 4187 (*rsp)->ring); 4188 /* Allocate memory for NVRAM data for vports */ 4189 if (ha->nvram_npiv_size) { 4190 ha->npiv_info = kcalloc(ha->nvram_npiv_size, 4191 sizeof(struct qla_npiv_entry), 4192 GFP_KERNEL); 4193 if (!ha->npiv_info) { 4194 ql_log_pci(ql_log_fatal, ha->pdev, 0x002d, 4195 "Failed to allocate memory for npiv_info.\n"); 4196 goto fail_npiv_info; 4197 } 4198 } else 4199 ha->npiv_info = NULL; 4200 4201 /* Get consistent memory allocated for EX-INIT-CB. */ 4202 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 4203 IS_QLA28XX(ha)) { 4204 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 4205 &ha->ex_init_cb_dma); 4206 if (!ha->ex_init_cb) 4207 goto fail_ex_init_cb; 4208 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e, 4209 "ex_init_cb=%p.\n", ha->ex_init_cb); 4210 } 4211 4212 INIT_LIST_HEAD(&ha->gbl_dsd_list); 4213 4214 /* Get consistent memory allocated for Async Port-Database. */ 4215 if (!IS_FWI2_CAPABLE(ha)) { 4216 ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 4217 &ha->async_pd_dma); 4218 if (!ha->async_pd) 4219 goto fail_async_pd; 4220 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f, 4221 "async_pd=%p.\n", ha->async_pd); 4222 } 4223 4224 INIT_LIST_HEAD(&ha->vp_list); 4225 4226 /* Allocate memory for our loop_id bitmap */ 4227 ha->loop_id_map = kcalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE), 4228 sizeof(long), 4229 GFP_KERNEL); 4230 if (!ha->loop_id_map) 4231 goto fail_loop_id_map; 4232 else { 4233 qla2x00_set_reserved_loop_ids(ha); 4234 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, 4235 "loop_id_map=%p.\n", ha->loop_id_map); 4236 } 4237 4238 ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev, 4239 SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL); 4240 if (!ha->sfp_data) { 4241 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, 4242 "Unable to allocate memory for SFP read-data.\n"); 4243 goto fail_sfp_data; 4244 } 4245 4246 ha->flt = dma_alloc_coherent(&ha->pdev->dev, 4247 sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma, 4248 GFP_KERNEL); 4249 if (!ha->flt) { 4250 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, 4251 "Unable to allocate memory for FLT.\n"); 4252 goto fail_flt_buffer; 4253 } 4254 4255 return 0; 4256 4257 fail_flt_buffer: 4258 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, 4259 ha->sfp_data, ha->sfp_data_dma); 4260 fail_sfp_data: 4261 kfree(ha->loop_id_map); 4262 fail_loop_id_map: 4263 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 4264 fail_async_pd: 4265 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); 4266 fail_ex_init_cb: 4267 kfree(ha->npiv_info); 4268 fail_npiv_info: 4269 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) * 4270 sizeof(response_t), (*rsp)->ring, (*rsp)->dma); 4271 (*rsp)->ring = NULL; 4272 (*rsp)->dma = 0; 4273 fail_rsp_ring: 4274 kfree(*rsp); 4275 *rsp = NULL; 4276 fail_rsp: 4277 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) * 4278 sizeof(request_t), (*req)->ring, (*req)->dma); 4279 (*req)->ring = NULL; 4280 (*req)->dma = 0; 4281 fail_req_ring: 4282 kfree(*req); 4283 *req = NULL; 4284 fail_req: 4285 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 4286 ha->ct_sns, ha->ct_sns_dma); 4287 ha->ct_sns = NULL; 4288 ha->ct_sns_dma = 0; 4289 fail_free_ms_iocb: 4290 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 4291 ha->ms_iocb = NULL; 4292 ha->ms_iocb_dma = 0; 4293 4294 if (ha->sns_cmd) 4295 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 4296 ha->sns_cmd, ha->sns_cmd_dma); 4297 fail_dma_pool: 4298 if (ql2xenabledif) { 4299 struct dsd_dma *dsd, *nxt; 4300 4301 list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, 4302 list) { 4303 list_del(&dsd->list); 4304 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, 4305 dsd->dsd_list_dma); 4306 ha->dif_bundle_dma_allocs--; 4307 kfree(dsd); 4308 ha->dif_bundle_kallocs--; 4309 ha->pool.unusable.count--; 4310 } 4311 dma_pool_destroy(ha->dif_bundl_pool); 4312 ha->dif_bundl_pool = NULL; 4313 } 4314 4315 fail_dif_bundl_dma_pool: 4316 if (IS_QLA82XX(ha) || ql2xenabledif) { 4317 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 4318 ha->fcp_cmnd_dma_pool = NULL; 4319 } 4320 fail_dl_dma_pool: 4321 if (IS_QLA82XX(ha) || ql2xenabledif) { 4322 dma_pool_destroy(ha->dl_dma_pool); 4323 ha->dl_dma_pool = NULL; 4324 } 4325 fail_s_dma_pool: 4326 dma_pool_destroy(ha->s_dma_pool); 4327 ha->s_dma_pool = NULL; 4328 fail_free_nvram: 4329 kfree(ha->nvram); 4330 ha->nvram = NULL; 4331 fail_free_ctx_mempool: 4332 mempool_destroy(ha->ctx_mempool); 4333 ha->ctx_mempool = NULL; 4334 fail_free_srb_mempool: 4335 mempool_destroy(ha->srb_mempool); 4336 ha->srb_mempool = NULL; 4337 fail_free_gid_list: 4338 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 4339 ha->gid_list, 4340 ha->gid_list_dma); 4341 ha->gid_list = NULL; 4342 ha->gid_list_dma = 0; 4343 fail_free_tgt_mem: 4344 qlt_mem_free(ha); 4345 fail_free_init_cb: 4346 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, 4347 ha->init_cb_dma); 4348 ha->init_cb = NULL; 4349 ha->init_cb_dma = 0; 4350 fail: 4351 ql_log(ql_log_fatal, NULL, 0x0030, 4352 "Memory allocation failure.\n"); 4353 return -ENOMEM; 4354 } 4355 4356 int 4357 qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha) 4358 { 4359 int rval; 4360 uint16_t size, max_cnt, temp; 4361 struct qla_hw_data *ha = vha->hw; 4362 4363 /* Return if we don't need to alloacate any extended logins */ 4364 if (!ql2xexlogins) 4365 return QLA_SUCCESS; 4366 4367 if (!IS_EXLOGIN_OFFLD_CAPABLE(ha)) 4368 return QLA_SUCCESS; 4369 4370 ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins); 4371 max_cnt = 0; 4372 rval = qla_get_exlogin_status(vha, &size, &max_cnt); 4373 if (rval != QLA_SUCCESS) { 4374 ql_log_pci(ql_log_fatal, ha->pdev, 0xd029, 4375 "Failed to get exlogin status.\n"); 4376 return rval; 4377 } 4378 4379 temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins; 4380 temp *= size; 4381 4382 if (temp != ha->exlogin_size) { 4383 qla2x00_free_exlogin_buffer(ha); 4384 ha->exlogin_size = temp; 4385 4386 ql_log(ql_log_info, vha, 0xd024, 4387 "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n", 4388 max_cnt, size, temp); 4389 4390 ql_log(ql_log_info, vha, 0xd025, 4391 "EXLOGIN: requested size=0x%x\n", ha->exlogin_size); 4392 4393 /* Get consistent memory for extended logins */ 4394 ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev, 4395 ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL); 4396 if (!ha->exlogin_buf) { 4397 ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a, 4398 "Failed to allocate memory for exlogin_buf_dma.\n"); 4399 return -ENOMEM; 4400 } 4401 } 4402 4403 /* Now configure the dma buffer */ 4404 rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma); 4405 if (rval) { 4406 ql_log(ql_log_fatal, vha, 0xd033, 4407 "Setup extended login buffer ****FAILED****.\n"); 4408 qla2x00_free_exlogin_buffer(ha); 4409 } 4410 4411 return rval; 4412 } 4413 4414 /* 4415 * qla2x00_free_exlogin_buffer 4416 * 4417 * Input: 4418 * ha = adapter block pointer 4419 */ 4420 void 4421 qla2x00_free_exlogin_buffer(struct qla_hw_data *ha) 4422 { 4423 if (ha->exlogin_buf) { 4424 dma_free_coherent(&ha->pdev->dev, ha->exlogin_size, 4425 ha->exlogin_buf, ha->exlogin_buf_dma); 4426 ha->exlogin_buf = NULL; 4427 ha->exlogin_size = 0; 4428 } 4429 } 4430 4431 static void 4432 qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt) 4433 { 4434 u32 temp; 4435 struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb; 4436 *ret_cnt = FW_DEF_EXCHANGES_CNT; 4437 4438 if (max_cnt > vha->hw->max_exchg) 4439 max_cnt = vha->hw->max_exchg; 4440 4441 if (qla_ini_mode_enabled(vha)) { 4442 if (vha->ql2xiniexchg > max_cnt) 4443 vha->ql2xiniexchg = max_cnt; 4444 4445 if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT) 4446 *ret_cnt = vha->ql2xiniexchg; 4447 4448 } else if (qla_tgt_mode_enabled(vha)) { 4449 if (vha->ql2xexchoffld > max_cnt) { 4450 vha->ql2xexchoffld = max_cnt; 4451 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 4452 } 4453 4454 if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT) 4455 *ret_cnt = vha->ql2xexchoffld; 4456 } else if (qla_dual_mode_enabled(vha)) { 4457 temp = vha->ql2xiniexchg + vha->ql2xexchoffld; 4458 if (temp > max_cnt) { 4459 vha->ql2xiniexchg -= (temp - max_cnt)/2; 4460 vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1); 4461 temp = max_cnt; 4462 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 4463 } 4464 4465 if (temp > FW_DEF_EXCHANGES_CNT) 4466 *ret_cnt = temp; 4467 } 4468 } 4469 4470 int 4471 qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha) 4472 { 4473 int rval; 4474 u16 size, max_cnt; 4475 u32 actual_cnt, totsz; 4476 struct qla_hw_data *ha = vha->hw; 4477 4478 if (!ha->flags.exchoffld_enabled) 4479 return QLA_SUCCESS; 4480 4481 if (!IS_EXCHG_OFFLD_CAPABLE(ha)) 4482 return QLA_SUCCESS; 4483 4484 max_cnt = 0; 4485 rval = qla_get_exchoffld_status(vha, &size, &max_cnt); 4486 if (rval != QLA_SUCCESS) { 4487 ql_log_pci(ql_log_fatal, ha->pdev, 0xd012, 4488 "Failed to get exlogin status.\n"); 4489 return rval; 4490 } 4491 4492 qla2x00_number_of_exch(vha, &actual_cnt, max_cnt); 4493 ql_log(ql_log_info, vha, 0xd014, 4494 "Actual exchange offload count: %d.\n", actual_cnt); 4495 4496 totsz = actual_cnt * size; 4497 4498 if (totsz != ha->exchoffld_size) { 4499 qla2x00_free_exchoffld_buffer(ha); 4500 if (actual_cnt <= FW_DEF_EXCHANGES_CNT) { 4501 ha->exchoffld_size = 0; 4502 ha->flags.exchoffld_enabled = 0; 4503 return QLA_SUCCESS; 4504 } 4505 4506 ha->exchoffld_size = totsz; 4507 4508 ql_log(ql_log_info, vha, 0xd016, 4509 "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n", 4510 max_cnt, actual_cnt, size, totsz); 4511 4512 ql_log(ql_log_info, vha, 0xd017, 4513 "Exchange Buffers requested size = 0x%x\n", 4514 ha->exchoffld_size); 4515 4516 /* Get consistent memory for extended logins */ 4517 ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev, 4518 ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL); 4519 if (!ha->exchoffld_buf) { 4520 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, 4521 "Failed to allocate memory for Exchange Offload.\n"); 4522 4523 if (ha->max_exchg > 4524 (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) { 4525 ha->max_exchg -= REDUCE_EXCHANGES_CNT; 4526 } else if (ha->max_exchg > 4527 (FW_DEF_EXCHANGES_CNT + 512)) { 4528 ha->max_exchg -= 512; 4529 } else { 4530 ha->flags.exchoffld_enabled = 0; 4531 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, 4532 "Disabling Exchange offload due to lack of memory\n"); 4533 } 4534 ha->exchoffld_size = 0; 4535 4536 return -ENOMEM; 4537 } 4538 } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) { 4539 /* pathological case */ 4540 qla2x00_free_exchoffld_buffer(ha); 4541 ha->exchoffld_size = 0; 4542 ha->flags.exchoffld_enabled = 0; 4543 ql_log(ql_log_info, vha, 0xd016, 4544 "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n", 4545 ha->exchoffld_size, actual_cnt, size, totsz); 4546 return 0; 4547 } 4548 4549 /* Now configure the dma buffer */ 4550 rval = qla_set_exchoffld_mem_cfg(vha); 4551 if (rval) { 4552 ql_log(ql_log_fatal, vha, 0xd02e, 4553 "Setup exchange offload buffer ****FAILED****.\n"); 4554 qla2x00_free_exchoffld_buffer(ha); 4555 } else { 4556 /* re-adjust number of target exchange */ 4557 struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb; 4558 4559 if (qla_ini_mode_enabled(vha)) 4560 icb->exchange_count = 0; 4561 else 4562 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 4563 } 4564 4565 return rval; 4566 } 4567 4568 /* 4569 * qla2x00_free_exchoffld_buffer 4570 * 4571 * Input: 4572 * ha = adapter block pointer 4573 */ 4574 void 4575 qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha) 4576 { 4577 if (ha->exchoffld_buf) { 4578 dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size, 4579 ha->exchoffld_buf, ha->exchoffld_buf_dma); 4580 ha->exchoffld_buf = NULL; 4581 ha->exchoffld_size = 0; 4582 } 4583 } 4584 4585 /* 4586 * qla2x00_free_fw_dump 4587 * Frees fw dump stuff. 4588 * 4589 * Input: 4590 * ha = adapter block pointer 4591 */ 4592 static void 4593 qla2x00_free_fw_dump(struct qla_hw_data *ha) 4594 { 4595 struct fwdt *fwdt = ha->fwdt; 4596 uint j; 4597 4598 if (ha->fce) 4599 dma_free_coherent(&ha->pdev->dev, 4600 FCE_SIZE, ha->fce, ha->fce_dma); 4601 4602 if (ha->eft) 4603 dma_free_coherent(&ha->pdev->dev, 4604 EFT_SIZE, ha->eft, ha->eft_dma); 4605 4606 if (ha->fw_dump) 4607 vfree(ha->fw_dump); 4608 4609 ha->fce = NULL; 4610 ha->fce_dma = 0; 4611 ha->flags.fce_enabled = 0; 4612 ha->eft = NULL; 4613 ha->eft_dma = 0; 4614 ha->fw_dumped = 0; 4615 ha->fw_dump_cap_flags = 0; 4616 ha->fw_dump_reading = 0; 4617 ha->fw_dump = NULL; 4618 ha->fw_dump_len = 0; 4619 4620 for (j = 0; j < 2; j++, fwdt++) { 4621 if (fwdt->template) 4622 vfree(fwdt->template); 4623 fwdt->template = NULL; 4624 fwdt->length = 0; 4625 } 4626 } 4627 4628 /* 4629 * qla2x00_mem_free 4630 * Frees all adapter allocated memory. 4631 * 4632 * Input: 4633 * ha = adapter block pointer. 4634 */ 4635 static void 4636 qla2x00_mem_free(struct qla_hw_data *ha) 4637 { 4638 qla2x00_free_fw_dump(ha); 4639 4640 if (ha->mctp_dump) 4641 dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump, 4642 ha->mctp_dump_dma); 4643 ha->mctp_dump = NULL; 4644 4645 mempool_destroy(ha->srb_mempool); 4646 ha->srb_mempool = NULL; 4647 4648 if (ha->dcbx_tlv) 4649 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 4650 ha->dcbx_tlv, ha->dcbx_tlv_dma); 4651 ha->dcbx_tlv = NULL; 4652 4653 if (ha->xgmac_data) 4654 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 4655 ha->xgmac_data, ha->xgmac_data_dma); 4656 ha->xgmac_data = NULL; 4657 4658 if (ha->sns_cmd) 4659 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 4660 ha->sns_cmd, ha->sns_cmd_dma); 4661 ha->sns_cmd = NULL; 4662 ha->sns_cmd_dma = 0; 4663 4664 if (ha->ct_sns) 4665 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 4666 ha->ct_sns, ha->ct_sns_dma); 4667 ha->ct_sns = NULL; 4668 ha->ct_sns_dma = 0; 4669 4670 if (ha->sfp_data) 4671 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data, 4672 ha->sfp_data_dma); 4673 ha->sfp_data = NULL; 4674 4675 if (ha->flt) 4676 dma_free_coherent(&ha->pdev->dev, 4677 sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, 4678 ha->flt, ha->flt_dma); 4679 ha->flt = NULL; 4680 ha->flt_dma = 0; 4681 4682 if (ha->ms_iocb) 4683 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 4684 ha->ms_iocb = NULL; 4685 ha->ms_iocb_dma = 0; 4686 4687 if (ha->ex_init_cb) 4688 dma_pool_free(ha->s_dma_pool, 4689 ha->ex_init_cb, ha->ex_init_cb_dma); 4690 ha->ex_init_cb = NULL; 4691 ha->ex_init_cb_dma = 0; 4692 4693 if (ha->async_pd) 4694 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 4695 ha->async_pd = NULL; 4696 ha->async_pd_dma = 0; 4697 4698 dma_pool_destroy(ha->s_dma_pool); 4699 ha->s_dma_pool = NULL; 4700 4701 if (ha->gid_list) 4702 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 4703 ha->gid_list, ha->gid_list_dma); 4704 ha->gid_list = NULL; 4705 ha->gid_list_dma = 0; 4706 4707 if (IS_QLA82XX(ha)) { 4708 if (!list_empty(&ha->gbl_dsd_list)) { 4709 struct dsd_dma *dsd_ptr, *tdsd_ptr; 4710 4711 /* clean up allocated prev pool */ 4712 list_for_each_entry_safe(dsd_ptr, 4713 tdsd_ptr, &ha->gbl_dsd_list, list) { 4714 dma_pool_free(ha->dl_dma_pool, 4715 dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); 4716 list_del(&dsd_ptr->list); 4717 kfree(dsd_ptr); 4718 } 4719 } 4720 } 4721 4722 dma_pool_destroy(ha->dl_dma_pool); 4723 ha->dl_dma_pool = NULL; 4724 4725 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 4726 ha->fcp_cmnd_dma_pool = NULL; 4727 4728 mempool_destroy(ha->ctx_mempool); 4729 ha->ctx_mempool = NULL; 4730 4731 if (ql2xenabledif && ha->dif_bundl_pool) { 4732 struct dsd_dma *dsd, *nxt; 4733 4734 list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, 4735 list) { 4736 list_del(&dsd->list); 4737 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, 4738 dsd->dsd_list_dma); 4739 ha->dif_bundle_dma_allocs--; 4740 kfree(dsd); 4741 ha->dif_bundle_kallocs--; 4742 ha->pool.unusable.count--; 4743 } 4744 list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) { 4745 list_del(&dsd->list); 4746 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, 4747 dsd->dsd_list_dma); 4748 ha->dif_bundle_dma_allocs--; 4749 kfree(dsd); 4750 ha->dif_bundle_kallocs--; 4751 } 4752 } 4753 4754 dma_pool_destroy(ha->dif_bundl_pool); 4755 ha->dif_bundl_pool = NULL; 4756 4757 qlt_mem_free(ha); 4758 4759 if (ha->init_cb) 4760 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 4761 ha->init_cb, ha->init_cb_dma); 4762 ha->init_cb = NULL; 4763 ha->init_cb_dma = 0; 4764 4765 vfree(ha->optrom_buffer); 4766 ha->optrom_buffer = NULL; 4767 kfree(ha->nvram); 4768 ha->nvram = NULL; 4769 kfree(ha->npiv_info); 4770 ha->npiv_info = NULL; 4771 kfree(ha->swl); 4772 ha->swl = NULL; 4773 kfree(ha->loop_id_map); 4774 ha->loop_id_map = NULL; 4775 } 4776 4777 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, 4778 struct qla_hw_data *ha) 4779 { 4780 struct Scsi_Host *host; 4781 struct scsi_qla_host *vha = NULL; 4782 4783 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 4784 if (!host) { 4785 ql_log_pci(ql_log_fatal, ha->pdev, 0x0107, 4786 "Failed to allocate host from the scsi layer, aborting.\n"); 4787 return NULL; 4788 } 4789 4790 /* Clear our data area */ 4791 vha = shost_priv(host); 4792 memset(vha, 0, sizeof(scsi_qla_host_t)); 4793 4794 vha->host = host; 4795 vha->host_no = host->host_no; 4796 vha->hw = ha; 4797 4798 vha->qlini_mode = ql2x_ini_mode; 4799 vha->ql2xexchoffld = ql2xexchoffld; 4800 vha->ql2xiniexchg = ql2xiniexchg; 4801 4802 INIT_LIST_HEAD(&vha->vp_fcports); 4803 INIT_LIST_HEAD(&vha->work_list); 4804 INIT_LIST_HEAD(&vha->list); 4805 INIT_LIST_HEAD(&vha->qla_cmd_list); 4806 INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list); 4807 INIT_LIST_HEAD(&vha->logo_list); 4808 INIT_LIST_HEAD(&vha->plogi_ack_list); 4809 INIT_LIST_HEAD(&vha->qp_list); 4810 INIT_LIST_HEAD(&vha->gnl.fcports); 4811 INIT_LIST_HEAD(&vha->gpnid_list); 4812 INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn); 4813 4814 spin_lock_init(&vha->work_lock); 4815 spin_lock_init(&vha->cmd_list_lock); 4816 init_waitqueue_head(&vha->fcport_waitQ); 4817 init_waitqueue_head(&vha->vref_waitq); 4818 4819 vha->gnl.size = sizeof(struct get_name_list_extended) * 4820 (ha->max_loop_id + 1); 4821 vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev, 4822 vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL); 4823 if (!vha->gnl.l) { 4824 ql_log(ql_log_fatal, vha, 0xd04a, 4825 "Alloc failed for name list.\n"); 4826 scsi_host_put(vha->host); 4827 return NULL; 4828 } 4829 4830 /* todo: what about ext login? */ 4831 vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp); 4832 vha->scan.l = vmalloc(vha->scan.size); 4833 if (!vha->scan.l) { 4834 ql_log(ql_log_fatal, vha, 0xd04a, 4835 "Alloc failed for scan database.\n"); 4836 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, 4837 vha->gnl.l, vha->gnl.ldma); 4838 vha->gnl.l = NULL; 4839 scsi_host_put(vha->host); 4840 return NULL; 4841 } 4842 INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn); 4843 4844 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 4845 ql_dbg(ql_dbg_init, vha, 0x0041, 4846 "Allocated the host=%p hw=%p vha=%p dev_name=%s", 4847 vha->host, vha->hw, vha, 4848 dev_name(&(ha->pdev->dev))); 4849 4850 return vha; 4851 } 4852 4853 struct qla_work_evt * 4854 qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) 4855 { 4856 struct qla_work_evt *e; 4857 uint8_t bail; 4858 4859 QLA_VHA_MARK_BUSY(vha, bail); 4860 if (bail) 4861 return NULL; 4862 4863 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); 4864 if (!e) { 4865 QLA_VHA_MARK_NOT_BUSY(vha); 4866 return NULL; 4867 } 4868 4869 INIT_LIST_HEAD(&e->list); 4870 e->type = type; 4871 e->flags = QLA_EVT_FLAG_FREE; 4872 return e; 4873 } 4874 4875 int 4876 qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 4877 { 4878 unsigned long flags; 4879 bool q = false; 4880 4881 spin_lock_irqsave(&vha->work_lock, flags); 4882 list_add_tail(&e->list, &vha->work_list); 4883 4884 if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags)) 4885 q = true; 4886 4887 spin_unlock_irqrestore(&vha->work_lock, flags); 4888 4889 if (q) 4890 queue_work(vha->hw->wq, &vha->iocb_work); 4891 4892 return QLA_SUCCESS; 4893 } 4894 4895 int 4896 qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, 4897 u32 data) 4898 { 4899 struct qla_work_evt *e; 4900 4901 e = qla2x00_alloc_work(vha, QLA_EVT_AEN); 4902 if (!e) 4903 return QLA_FUNCTION_FAILED; 4904 4905 e->u.aen.code = code; 4906 e->u.aen.data = data; 4907 return qla2x00_post_work(vha, e); 4908 } 4909 4910 int 4911 qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) 4912 { 4913 struct qla_work_evt *e; 4914 4915 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK); 4916 if (!e) 4917 return QLA_FUNCTION_FAILED; 4918 4919 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4920 return qla2x00_post_work(vha, e); 4921 } 4922 4923 #define qla2x00_post_async_work(name, type) \ 4924 int qla2x00_post_async_##name##_work( \ 4925 struct scsi_qla_host *vha, \ 4926 fc_port_t *fcport, uint16_t *data) \ 4927 { \ 4928 struct qla_work_evt *e; \ 4929 \ 4930 e = qla2x00_alloc_work(vha, type); \ 4931 if (!e) \ 4932 return QLA_FUNCTION_FAILED; \ 4933 \ 4934 e->u.logio.fcport = fcport; \ 4935 if (data) { \ 4936 e->u.logio.data[0] = data[0]; \ 4937 e->u.logio.data[1] = data[1]; \ 4938 } \ 4939 fcport->flags |= FCF_ASYNC_ACTIVE; \ 4940 return qla2x00_post_work(vha, e); \ 4941 } 4942 4943 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); 4944 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); 4945 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); 4946 qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO); 4947 qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE); 4948 4949 int 4950 qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) 4951 { 4952 struct qla_work_evt *e; 4953 4954 e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT); 4955 if (!e) 4956 return QLA_FUNCTION_FAILED; 4957 4958 e->u.uevent.code = code; 4959 return qla2x00_post_work(vha, e); 4960 } 4961 4962 static void 4963 qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) 4964 { 4965 char event_string[40]; 4966 char *envp[] = { event_string, NULL }; 4967 4968 switch (code) { 4969 case QLA_UEVENT_CODE_FW_DUMP: 4970 snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld", 4971 vha->host_no); 4972 break; 4973 default: 4974 /* do nothing */ 4975 break; 4976 } 4977 kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp); 4978 } 4979 4980 int 4981 qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, 4982 uint32_t *data, int cnt) 4983 { 4984 struct qla_work_evt *e; 4985 4986 e = qla2x00_alloc_work(vha, QLA_EVT_AENFX); 4987 if (!e) 4988 return QLA_FUNCTION_FAILED; 4989 4990 e->u.aenfx.evtcode = evtcode; 4991 e->u.aenfx.count = cnt; 4992 memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt); 4993 return qla2x00_post_work(vha, e); 4994 } 4995 4996 void qla24xx_sched_upd_fcport(fc_port_t *fcport) 4997 { 4998 unsigned long flags; 4999 5000 if (IS_SW_RESV_ADDR(fcport->d_id)) 5001 return; 5002 5003 spin_lock_irqsave(&fcport->vha->work_lock, flags); 5004 if (fcport->disc_state == DSC_UPD_FCPORT) { 5005 spin_unlock_irqrestore(&fcport->vha->work_lock, flags); 5006 return; 5007 } 5008 fcport->jiffies_at_registration = jiffies; 5009 fcport->sec_since_registration = 0; 5010 fcport->next_disc_state = DSC_DELETED; 5011 qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); 5012 spin_unlock_irqrestore(&fcport->vha->work_lock, flags); 5013 5014 queue_work(system_unbound_wq, &fcport->reg_work); 5015 } 5016 5017 static 5018 void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) 5019 { 5020 unsigned long flags; 5021 fc_port_t *fcport = NULL, *tfcp; 5022 struct qlt_plogi_ack_t *pla = 5023 (struct qlt_plogi_ack_t *)e->u.new_sess.pla; 5024 uint8_t free_fcport = 0; 5025 5026 ql_dbg(ql_dbg_disc, vha, 0xffff, 5027 "%s %d %8phC enter\n", 5028 __func__, __LINE__, e->u.new_sess.port_name); 5029 5030 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5031 fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1); 5032 if (fcport) { 5033 fcport->d_id = e->u.new_sess.id; 5034 if (pla) { 5035 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 5036 memcpy(fcport->node_name, 5037 pla->iocb.u.isp24.u.plogi.node_name, 5038 WWN_SIZE); 5039 qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN); 5040 /* we took an extra ref_count to prevent PLOGI ACK when 5041 * fcport/sess has not been created. 5042 */ 5043 pla->ref_count--; 5044 } 5045 } else { 5046 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5047 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5048 if (fcport) { 5049 fcport->d_id = e->u.new_sess.id; 5050 fcport->flags |= FCF_FABRIC_DEVICE; 5051 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 5052 5053 memcpy(fcport->port_name, e->u.new_sess.port_name, 5054 WWN_SIZE); 5055 5056 fcport->fc4_type = e->u.new_sess.fc4_type; 5057 if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) { 5058 fcport->fc4_type = FS_FC4TYPE_FCP; 5059 fcport->n2n_flag = 1; 5060 if (vha->flags.nvme_enabled) 5061 fcport->fc4_type |= FS_FC4TYPE_NVME; 5062 } 5063 5064 } else { 5065 ql_dbg(ql_dbg_disc, vha, 0xffff, 5066 "%s %8phC mem alloc fail.\n", 5067 __func__, e->u.new_sess.port_name); 5068 5069 if (pla) { 5070 list_del(&pla->list); 5071 kmem_cache_free(qla_tgt_plogi_cachep, pla); 5072 } 5073 return; 5074 } 5075 5076 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5077 /* search again to make sure no one else got ahead */ 5078 tfcp = qla2x00_find_fcport_by_wwpn(vha, 5079 e->u.new_sess.port_name, 1); 5080 if (tfcp) { 5081 /* should rarily happen */ 5082 ql_dbg(ql_dbg_disc, vha, 0xffff, 5083 "%s %8phC found existing fcport b4 add. DS %d LS %d\n", 5084 __func__, tfcp->port_name, tfcp->disc_state, 5085 tfcp->fw_login_state); 5086 5087 free_fcport = 1; 5088 } else { 5089 list_add_tail(&fcport->list, &vha->vp_fcports); 5090 5091 } 5092 if (pla) { 5093 qlt_plogi_ack_link(vha, pla, fcport, 5094 QLT_PLOGI_LINK_SAME_WWN); 5095 pla->ref_count--; 5096 } 5097 } 5098 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5099 5100 if (fcport) { 5101 fcport->id_changed = 1; 5102 fcport->scan_state = QLA_FCPORT_FOUND; 5103 fcport->chip_reset = vha->hw->base_qpair->chip_reset; 5104 memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE); 5105 5106 if (pla) { 5107 if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) { 5108 u16 wd3_lo; 5109 5110 fcport->fw_login_state = DSC_LS_PRLI_PEND; 5111 fcport->local = 0; 5112 fcport->loop_id = 5113 le16_to_cpu( 5114 pla->iocb.u.isp24.nport_handle); 5115 fcport->fw_login_state = DSC_LS_PRLI_PEND; 5116 wd3_lo = 5117 le16_to_cpu( 5118 pla->iocb.u.isp24.u.prli.wd3_lo); 5119 5120 if (wd3_lo & BIT_7) 5121 fcport->conf_compl_supported = 1; 5122 5123 if ((wd3_lo & BIT_4) == 0) 5124 fcport->port_type = FCT_INITIATOR; 5125 else 5126 fcport->port_type = FCT_TARGET; 5127 } 5128 qlt_plogi_ack_unref(vha, pla); 5129 } else { 5130 fc_port_t *dfcp = NULL; 5131 5132 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5133 tfcp = qla2x00_find_fcport_by_nportid(vha, 5134 &e->u.new_sess.id, 1); 5135 if (tfcp && (tfcp != fcport)) { 5136 /* 5137 * We have a conflict fcport with same NportID. 5138 */ 5139 ql_dbg(ql_dbg_disc, vha, 0xffff, 5140 "%s %8phC found conflict b4 add. DS %d LS %d\n", 5141 __func__, tfcp->port_name, tfcp->disc_state, 5142 tfcp->fw_login_state); 5143 5144 switch (tfcp->disc_state) { 5145 case DSC_DELETED: 5146 break; 5147 case DSC_DELETE_PEND: 5148 fcport->login_pause = 1; 5149 tfcp->conflict = fcport; 5150 break; 5151 default: 5152 fcport->login_pause = 1; 5153 tfcp->conflict = fcport; 5154 dfcp = tfcp; 5155 break; 5156 } 5157 } 5158 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5159 if (dfcp) 5160 qlt_schedule_sess_for_deletion(tfcp); 5161 5162 if (N2N_TOPO(vha->hw)) { 5163 fcport->flags &= ~FCF_FABRIC_DEVICE; 5164 fcport->keep_nport_handle = 1; 5165 if (vha->flags.nvme_enabled) { 5166 fcport->fc4_type = 5167 (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP); 5168 fcport->n2n_flag = 1; 5169 } 5170 fcport->fw_login_state = 0; 5171 /* 5172 * wait link init done before sending login 5173 */ 5174 } else { 5175 qla24xx_fcport_handle_login(vha, fcport); 5176 } 5177 } 5178 } 5179 5180 if (free_fcport) { 5181 qla2x00_free_fcport(fcport); 5182 if (pla) { 5183 list_del(&pla->list); 5184 kmem_cache_free(qla_tgt_plogi_cachep, pla); 5185 } 5186 } 5187 } 5188 5189 static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e) 5190 { 5191 struct srb *sp = e->u.iosb.sp; 5192 int rval; 5193 5194 rval = qla2x00_start_sp(sp); 5195 if (rval != QLA_SUCCESS) { 5196 ql_dbg(ql_dbg_disc, vha, 0x2043, 5197 "%s: %s: Re-issue IOCB failed (%d).\n", 5198 __func__, sp->name, rval); 5199 qla24xx_sp_unmap(vha, sp); 5200 } 5201 } 5202 5203 void 5204 qla2x00_do_work(struct scsi_qla_host *vha) 5205 { 5206 struct qla_work_evt *e, *tmp; 5207 unsigned long flags; 5208 LIST_HEAD(work); 5209 int rc; 5210 5211 spin_lock_irqsave(&vha->work_lock, flags); 5212 list_splice_init(&vha->work_list, &work); 5213 spin_unlock_irqrestore(&vha->work_lock, flags); 5214 5215 list_for_each_entry_safe(e, tmp, &work, list) { 5216 rc = QLA_SUCCESS; 5217 switch (e->type) { 5218 case QLA_EVT_AEN: 5219 fc_host_post_event(vha->host, fc_get_event_number(), 5220 e->u.aen.code, e->u.aen.data); 5221 break; 5222 case QLA_EVT_IDC_ACK: 5223 qla81xx_idc_ack(vha, e->u.idc_ack.mb); 5224 break; 5225 case QLA_EVT_ASYNC_LOGIN: 5226 qla2x00_async_login(vha, e->u.logio.fcport, 5227 e->u.logio.data); 5228 break; 5229 case QLA_EVT_ASYNC_LOGOUT: 5230 rc = qla2x00_async_logout(vha, e->u.logio.fcport); 5231 break; 5232 case QLA_EVT_ASYNC_ADISC: 5233 qla2x00_async_adisc(vha, e->u.logio.fcport, 5234 e->u.logio.data); 5235 break; 5236 case QLA_EVT_UEVENT: 5237 qla2x00_uevent_emit(vha, e->u.uevent.code); 5238 break; 5239 case QLA_EVT_AENFX: 5240 qlafx00_process_aen(vha, e); 5241 break; 5242 case QLA_EVT_GPNID: 5243 qla24xx_async_gpnid(vha, &e->u.gpnid.id); 5244 break; 5245 case QLA_EVT_UNMAP: 5246 qla24xx_sp_unmap(vha, e->u.iosb.sp); 5247 break; 5248 case QLA_EVT_RELOGIN: 5249 qla2x00_relogin(vha); 5250 break; 5251 case QLA_EVT_NEW_SESS: 5252 qla24xx_create_new_sess(vha, e); 5253 break; 5254 case QLA_EVT_GPDB: 5255 qla24xx_async_gpdb(vha, e->u.fcport.fcport, 5256 e->u.fcport.opt); 5257 break; 5258 case QLA_EVT_PRLI: 5259 qla24xx_async_prli(vha, e->u.fcport.fcport); 5260 break; 5261 case QLA_EVT_GPSC: 5262 qla24xx_async_gpsc(vha, e->u.fcport.fcport); 5263 break; 5264 case QLA_EVT_GNL: 5265 qla24xx_async_gnl(vha, e->u.fcport.fcport); 5266 break; 5267 case QLA_EVT_NACK: 5268 qla24xx_do_nack_work(vha, e); 5269 break; 5270 case QLA_EVT_ASYNC_PRLO: 5271 rc = qla2x00_async_prlo(vha, e->u.logio.fcport); 5272 break; 5273 case QLA_EVT_ASYNC_PRLO_DONE: 5274 qla2x00_async_prlo_done(vha, e->u.logio.fcport, 5275 e->u.logio.data); 5276 break; 5277 case QLA_EVT_GPNFT: 5278 qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type, 5279 e->u.gpnft.sp); 5280 break; 5281 case QLA_EVT_GPNFT_DONE: 5282 qla24xx_async_gpnft_done(vha, e->u.iosb.sp); 5283 break; 5284 case QLA_EVT_GNNFT_DONE: 5285 qla24xx_async_gnnft_done(vha, e->u.iosb.sp); 5286 break; 5287 case QLA_EVT_GNNID: 5288 qla24xx_async_gnnid(vha, e->u.fcport.fcport); 5289 break; 5290 case QLA_EVT_GFPNID: 5291 qla24xx_async_gfpnid(vha, e->u.fcport.fcport); 5292 break; 5293 case QLA_EVT_SP_RETRY: 5294 qla_sp_retry(vha, e); 5295 break; 5296 case QLA_EVT_IIDMA: 5297 qla_do_iidma_work(vha, e->u.fcport.fcport); 5298 break; 5299 case QLA_EVT_ELS_PLOGI: 5300 qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI, 5301 e->u.fcport.fcport, false); 5302 break; 5303 } 5304 5305 if (rc == EAGAIN) { 5306 /* put 'work' at head of 'vha->work_list' */ 5307 spin_lock_irqsave(&vha->work_lock, flags); 5308 list_splice(&work, &vha->work_list); 5309 spin_unlock_irqrestore(&vha->work_lock, flags); 5310 break; 5311 } 5312 list_del_init(&e->list); 5313 if (e->flags & QLA_EVT_FLAG_FREE) 5314 kfree(e); 5315 5316 /* For each work completed decrement vha ref count */ 5317 QLA_VHA_MARK_NOT_BUSY(vha); 5318 } 5319 } 5320 5321 int qla24xx_post_relogin_work(struct scsi_qla_host *vha) 5322 { 5323 struct qla_work_evt *e; 5324 5325 e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN); 5326 5327 if (!e) { 5328 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 5329 return QLA_FUNCTION_FAILED; 5330 } 5331 5332 return qla2x00_post_work(vha, e); 5333 } 5334 5335 /* Relogins all the fcports of a vport 5336 * Context: dpc thread 5337 */ 5338 void qla2x00_relogin(struct scsi_qla_host *vha) 5339 { 5340 fc_port_t *fcport; 5341 int status, relogin_needed = 0; 5342 struct event_arg ea; 5343 5344 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5345 /* 5346 * If the port is not ONLINE then try to login 5347 * to it if we haven't run out of retries. 5348 */ 5349 if (atomic_read(&fcport->state) != FCS_ONLINE && 5350 fcport->login_retry) { 5351 if (fcport->scan_state != QLA_FCPORT_FOUND || 5352 fcport->disc_state == DSC_LOGIN_COMPLETE) 5353 continue; 5354 5355 if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) || 5356 fcport->disc_state == DSC_DELETE_PEND) { 5357 relogin_needed = 1; 5358 } else { 5359 if (vha->hw->current_topology != ISP_CFG_NL) { 5360 memset(&ea, 0, sizeof(ea)); 5361 ea.fcport = fcport; 5362 qla24xx_handle_relogin_event(vha, &ea); 5363 } else if (vha->hw->current_topology == 5364 ISP_CFG_NL) { 5365 fcport->login_retry--; 5366 status = 5367 qla2x00_local_device_login(vha, 5368 fcport); 5369 if (status == QLA_SUCCESS) { 5370 fcport->old_loop_id = 5371 fcport->loop_id; 5372 ql_dbg(ql_dbg_disc, vha, 0x2003, 5373 "Port login OK: logged in ID 0x%x.\n", 5374 fcport->loop_id); 5375 qla2x00_update_fcport 5376 (vha, fcport); 5377 } else if (status == 1) { 5378 set_bit(RELOGIN_NEEDED, 5379 &vha->dpc_flags); 5380 /* retry the login again */ 5381 ql_dbg(ql_dbg_disc, vha, 0x2007, 5382 "Retrying %d login again loop_id 0x%x.\n", 5383 fcport->login_retry, 5384 fcport->loop_id); 5385 } else { 5386 fcport->login_retry = 0; 5387 } 5388 5389 if (fcport->login_retry == 0 && 5390 status != QLA_SUCCESS) 5391 qla2x00_clear_loop_id(fcport); 5392 } 5393 } 5394 } 5395 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5396 break; 5397 } 5398 5399 if (relogin_needed) 5400 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 5401 5402 ql_dbg(ql_dbg_disc, vha, 0x400e, 5403 "Relogin end.\n"); 5404 } 5405 5406 /* Schedule work on any of the dpc-workqueues */ 5407 void 5408 qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code) 5409 { 5410 struct qla_hw_data *ha = base_vha->hw; 5411 5412 switch (work_code) { 5413 case MBA_IDC_AEN: /* 0x8200 */ 5414 if (ha->dpc_lp_wq) 5415 queue_work(ha->dpc_lp_wq, &ha->idc_aen); 5416 break; 5417 5418 case QLA83XX_NIC_CORE_RESET: /* 0x1 */ 5419 if (!ha->flags.nic_core_reset_hdlr_active) { 5420 if (ha->dpc_hp_wq) 5421 queue_work(ha->dpc_hp_wq, &ha->nic_core_reset); 5422 } else 5423 ql_dbg(ql_dbg_p3p, base_vha, 0xb05e, 5424 "NIC Core reset is already active. Skip " 5425 "scheduling it again.\n"); 5426 break; 5427 case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */ 5428 if (ha->dpc_hp_wq) 5429 queue_work(ha->dpc_hp_wq, &ha->idc_state_handler); 5430 break; 5431 case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */ 5432 if (ha->dpc_hp_wq) 5433 queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable); 5434 break; 5435 default: 5436 ql_log(ql_log_warn, base_vha, 0xb05f, 5437 "Unknown work-code=0x%x.\n", work_code); 5438 } 5439 5440 return; 5441 } 5442 5443 /* Work: Perform NIC Core Unrecoverable state handling */ 5444 void 5445 qla83xx_nic_core_unrecoverable_work(struct work_struct *work) 5446 { 5447 struct qla_hw_data *ha = 5448 container_of(work, struct qla_hw_data, nic_core_unrecoverable); 5449 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5450 uint32_t dev_state = 0; 5451 5452 qla83xx_idc_lock(base_vha, 0); 5453 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5454 qla83xx_reset_ownership(base_vha); 5455 if (ha->flags.nic_core_reset_owner) { 5456 ha->flags.nic_core_reset_owner = 0; 5457 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 5458 QLA8XXX_DEV_FAILED); 5459 ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n"); 5460 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 5461 } 5462 qla83xx_idc_unlock(base_vha, 0); 5463 } 5464 5465 /* Work: Execute IDC state handler */ 5466 void 5467 qla83xx_idc_state_handler_work(struct work_struct *work) 5468 { 5469 struct qla_hw_data *ha = 5470 container_of(work, struct qla_hw_data, idc_state_handler); 5471 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5472 uint32_t dev_state = 0; 5473 5474 qla83xx_idc_lock(base_vha, 0); 5475 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5476 if (dev_state == QLA8XXX_DEV_FAILED || 5477 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) 5478 qla83xx_idc_state_handler(base_vha); 5479 qla83xx_idc_unlock(base_vha, 0); 5480 } 5481 5482 static int 5483 qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha) 5484 { 5485 int rval = QLA_SUCCESS; 5486 unsigned long heart_beat_wait = jiffies + (1 * HZ); 5487 uint32_t heart_beat_counter1, heart_beat_counter2; 5488 5489 do { 5490 if (time_after(jiffies, heart_beat_wait)) { 5491 ql_dbg(ql_dbg_p3p, base_vha, 0xb07c, 5492 "Nic Core f/w is not alive.\n"); 5493 rval = QLA_FUNCTION_FAILED; 5494 break; 5495 } 5496 5497 qla83xx_idc_lock(base_vha, 0); 5498 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 5499 &heart_beat_counter1); 5500 qla83xx_idc_unlock(base_vha, 0); 5501 msleep(100); 5502 qla83xx_idc_lock(base_vha, 0); 5503 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 5504 &heart_beat_counter2); 5505 qla83xx_idc_unlock(base_vha, 0); 5506 } while (heart_beat_counter1 == heart_beat_counter2); 5507 5508 return rval; 5509 } 5510 5511 /* Work: Perform NIC Core Reset handling */ 5512 void 5513 qla83xx_nic_core_reset_work(struct work_struct *work) 5514 { 5515 struct qla_hw_data *ha = 5516 container_of(work, struct qla_hw_data, nic_core_reset); 5517 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5518 uint32_t dev_state = 0; 5519 5520 if (IS_QLA2031(ha)) { 5521 if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS) 5522 ql_log(ql_log_warn, base_vha, 0xb081, 5523 "Failed to dump mctp\n"); 5524 return; 5525 } 5526 5527 if (!ha->flags.nic_core_reset_hdlr_active) { 5528 if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) { 5529 qla83xx_idc_lock(base_vha, 0); 5530 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, 5531 &dev_state); 5532 qla83xx_idc_unlock(base_vha, 0); 5533 if (dev_state != QLA8XXX_DEV_NEED_RESET) { 5534 ql_dbg(ql_dbg_p3p, base_vha, 0xb07a, 5535 "Nic Core f/w is alive.\n"); 5536 return; 5537 } 5538 } 5539 5540 ha->flags.nic_core_reset_hdlr_active = 1; 5541 if (qla83xx_nic_core_reset(base_vha)) { 5542 /* NIC Core reset failed. */ 5543 ql_dbg(ql_dbg_p3p, base_vha, 0xb061, 5544 "NIC Core reset failed.\n"); 5545 } 5546 ha->flags.nic_core_reset_hdlr_active = 0; 5547 } 5548 } 5549 5550 /* Work: Handle 8200 IDC aens */ 5551 void 5552 qla83xx_service_idc_aen(struct work_struct *work) 5553 { 5554 struct qla_hw_data *ha = 5555 container_of(work, struct qla_hw_data, idc_aen); 5556 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5557 uint32_t dev_state, idc_control; 5558 5559 qla83xx_idc_lock(base_vha, 0); 5560 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5561 qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control); 5562 qla83xx_idc_unlock(base_vha, 0); 5563 if (dev_state == QLA8XXX_DEV_NEED_RESET) { 5564 if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) { 5565 ql_dbg(ql_dbg_p3p, base_vha, 0xb062, 5566 "Application requested NIC Core Reset.\n"); 5567 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 5568 } else if (qla83xx_check_nic_core_fw_alive(base_vha) == 5569 QLA_SUCCESS) { 5570 ql_dbg(ql_dbg_p3p, base_vha, 0xb07b, 5571 "Other protocol driver requested NIC Core Reset.\n"); 5572 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 5573 } 5574 } else if (dev_state == QLA8XXX_DEV_FAILED || 5575 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { 5576 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 5577 } 5578 } 5579 5580 static void 5581 qla83xx_wait_logic(void) 5582 { 5583 int i; 5584 5585 /* Yield CPU */ 5586 if (!in_interrupt()) { 5587 /* 5588 * Wait about 200ms before retrying again. 5589 * This controls the number of retries for single 5590 * lock operation. 5591 */ 5592 msleep(100); 5593 schedule(); 5594 } else { 5595 for (i = 0; i < 20; i++) 5596 cpu_relax(); /* This a nop instr on i386 */ 5597 } 5598 } 5599 5600 static int 5601 qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha) 5602 { 5603 int rval; 5604 uint32_t data; 5605 uint32_t idc_lck_rcvry_stage_mask = 0x3; 5606 uint32_t idc_lck_rcvry_owner_mask = 0x3c; 5607 struct qla_hw_data *ha = base_vha->hw; 5608 5609 ql_dbg(ql_dbg_p3p, base_vha, 0xb086, 5610 "Trying force recovery of the IDC lock.\n"); 5611 5612 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data); 5613 if (rval) 5614 return rval; 5615 5616 if ((data & idc_lck_rcvry_stage_mask) > 0) { 5617 return QLA_SUCCESS; 5618 } else { 5619 data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2); 5620 rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 5621 data); 5622 if (rval) 5623 return rval; 5624 5625 msleep(200); 5626 5627 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 5628 &data); 5629 if (rval) 5630 return rval; 5631 5632 if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) { 5633 data &= (IDC_LOCK_RECOVERY_STAGE2 | 5634 ~(idc_lck_rcvry_stage_mask)); 5635 rval = qla83xx_wr_reg(base_vha, 5636 QLA83XX_IDC_LOCK_RECOVERY, data); 5637 if (rval) 5638 return rval; 5639 5640 /* Forcefully perform IDC UnLock */ 5641 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, 5642 &data); 5643 if (rval) 5644 return rval; 5645 /* Clear lock-id by setting 0xff */ 5646 rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 5647 0xff); 5648 if (rval) 5649 return rval; 5650 /* Clear lock-recovery by setting 0x0 */ 5651 rval = qla83xx_wr_reg(base_vha, 5652 QLA83XX_IDC_LOCK_RECOVERY, 0x0); 5653 if (rval) 5654 return rval; 5655 } else 5656 return QLA_SUCCESS; 5657 } 5658 5659 return rval; 5660 } 5661 5662 static int 5663 qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha) 5664 { 5665 int rval = QLA_SUCCESS; 5666 uint32_t o_drv_lockid, n_drv_lockid; 5667 unsigned long lock_recovery_timeout; 5668 5669 lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT; 5670 retry_lockid: 5671 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid); 5672 if (rval) 5673 goto exit; 5674 5675 /* MAX wait time before forcing IDC Lock recovery = 2 secs */ 5676 if (time_after_eq(jiffies, lock_recovery_timeout)) { 5677 if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS) 5678 return QLA_SUCCESS; 5679 else 5680 return QLA_FUNCTION_FAILED; 5681 } 5682 5683 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid); 5684 if (rval) 5685 goto exit; 5686 5687 if (o_drv_lockid == n_drv_lockid) { 5688 qla83xx_wait_logic(); 5689 goto retry_lockid; 5690 } else 5691 return QLA_SUCCESS; 5692 5693 exit: 5694 return rval; 5695 } 5696 5697 void 5698 qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) 5699 { 5700 uint32_t data; 5701 uint32_t lock_owner; 5702 struct qla_hw_data *ha = base_vha->hw; 5703 5704 /* IDC-lock implementation using driver-lock/lock-id remote registers */ 5705 retry_lock: 5706 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data) 5707 == QLA_SUCCESS) { 5708 if (data) { 5709 /* Setting lock-id to our function-number */ 5710 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 5711 ha->portnum); 5712 } else { 5713 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, 5714 &lock_owner); 5715 ql_dbg(ql_dbg_p3p, base_vha, 0xb063, 5716 "Failed to acquire IDC lock, acquired by %d, " 5717 "retrying...\n", lock_owner); 5718 5719 /* Retry/Perform IDC-Lock recovery */ 5720 if (qla83xx_idc_lock_recovery(base_vha) 5721 == QLA_SUCCESS) { 5722 qla83xx_wait_logic(); 5723 goto retry_lock; 5724 } else 5725 ql_log(ql_log_warn, base_vha, 0xb075, 5726 "IDC Lock recovery FAILED.\n"); 5727 } 5728 5729 } 5730 5731 return; 5732 } 5733 5734 void 5735 qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id) 5736 { 5737 #if 0 5738 uint16_t options = (requester_id << 15) | BIT_7; 5739 #endif 5740 uint16_t retry; 5741 uint32_t data; 5742 struct qla_hw_data *ha = base_vha->hw; 5743 5744 /* IDC-unlock implementation using driver-unlock/lock-id 5745 * remote registers 5746 */ 5747 retry = 0; 5748 retry_unlock: 5749 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data) 5750 == QLA_SUCCESS) { 5751 if (data == ha->portnum) { 5752 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data); 5753 /* Clearing lock-id by setting 0xff */ 5754 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff); 5755 } else if (retry < 10) { 5756 /* SV: XXX: IDC unlock retrying needed here? */ 5757 5758 /* Retry for IDC-unlock */ 5759 qla83xx_wait_logic(); 5760 retry++; 5761 ql_dbg(ql_dbg_p3p, base_vha, 0xb064, 5762 "Failed to release IDC lock, retrying=%d\n", retry); 5763 goto retry_unlock; 5764 } 5765 } else if (retry < 10) { 5766 /* Retry for IDC-unlock */ 5767 qla83xx_wait_logic(); 5768 retry++; 5769 ql_dbg(ql_dbg_p3p, base_vha, 0xb065, 5770 "Failed to read drv-lockid, retrying=%d\n", retry); 5771 goto retry_unlock; 5772 } 5773 5774 return; 5775 5776 #if 0 5777 /* XXX: IDC-unlock implementation using access-control mbx */ 5778 retry = 0; 5779 retry_unlock2: 5780 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { 5781 if (retry < 10) { 5782 /* Retry for IDC-unlock */ 5783 qla83xx_wait_logic(); 5784 retry++; 5785 ql_dbg(ql_dbg_p3p, base_vha, 0xb066, 5786 "Failed to release IDC lock, retrying=%d\n", retry); 5787 goto retry_unlock2; 5788 } 5789 } 5790 5791 return; 5792 #endif 5793 } 5794 5795 int 5796 __qla83xx_set_drv_presence(scsi_qla_host_t *vha) 5797 { 5798 int rval = QLA_SUCCESS; 5799 struct qla_hw_data *ha = vha->hw; 5800 uint32_t drv_presence; 5801 5802 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 5803 if (rval == QLA_SUCCESS) { 5804 drv_presence |= (1 << ha->portnum); 5805 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 5806 drv_presence); 5807 } 5808 5809 return rval; 5810 } 5811 5812 int 5813 qla83xx_set_drv_presence(scsi_qla_host_t *vha) 5814 { 5815 int rval = QLA_SUCCESS; 5816 5817 qla83xx_idc_lock(vha, 0); 5818 rval = __qla83xx_set_drv_presence(vha); 5819 qla83xx_idc_unlock(vha, 0); 5820 5821 return rval; 5822 } 5823 5824 int 5825 __qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 5826 { 5827 int rval = QLA_SUCCESS; 5828 struct qla_hw_data *ha = vha->hw; 5829 uint32_t drv_presence; 5830 5831 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 5832 if (rval == QLA_SUCCESS) { 5833 drv_presence &= ~(1 << ha->portnum); 5834 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 5835 drv_presence); 5836 } 5837 5838 return rval; 5839 } 5840 5841 int 5842 qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 5843 { 5844 int rval = QLA_SUCCESS; 5845 5846 qla83xx_idc_lock(vha, 0); 5847 rval = __qla83xx_clear_drv_presence(vha); 5848 qla83xx_idc_unlock(vha, 0); 5849 5850 return rval; 5851 } 5852 5853 static void 5854 qla83xx_need_reset_handler(scsi_qla_host_t *vha) 5855 { 5856 struct qla_hw_data *ha = vha->hw; 5857 uint32_t drv_ack, drv_presence; 5858 unsigned long ack_timeout; 5859 5860 /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */ 5861 ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); 5862 while (1) { 5863 qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 5864 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 5865 if ((drv_ack & drv_presence) == drv_presence) 5866 break; 5867 5868 if (time_after_eq(jiffies, ack_timeout)) { 5869 ql_log(ql_log_warn, vha, 0xb067, 5870 "RESET ACK TIMEOUT! drv_presence=0x%x " 5871 "drv_ack=0x%x\n", drv_presence, drv_ack); 5872 /* 5873 * The function(s) which did not ack in time are forced 5874 * to withdraw any further participation in the IDC 5875 * reset. 5876 */ 5877 if (drv_ack != drv_presence) 5878 qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 5879 drv_ack); 5880 break; 5881 } 5882 5883 qla83xx_idc_unlock(vha, 0); 5884 msleep(1000); 5885 qla83xx_idc_lock(vha, 0); 5886 } 5887 5888 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD); 5889 ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n"); 5890 } 5891 5892 static int 5893 qla83xx_device_bootstrap(scsi_qla_host_t *vha) 5894 { 5895 int rval = QLA_SUCCESS; 5896 uint32_t idc_control; 5897 5898 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING); 5899 ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n"); 5900 5901 /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */ 5902 __qla83xx_get_idc_control(vha, &idc_control); 5903 idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET; 5904 __qla83xx_set_idc_control(vha, 0); 5905 5906 qla83xx_idc_unlock(vha, 0); 5907 rval = qla83xx_restart_nic_firmware(vha); 5908 qla83xx_idc_lock(vha, 0); 5909 5910 if (rval != QLA_SUCCESS) { 5911 ql_log(ql_log_fatal, vha, 0xb06a, 5912 "Failed to restart NIC f/w.\n"); 5913 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED); 5914 ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n"); 5915 } else { 5916 ql_dbg(ql_dbg_p3p, vha, 0xb06c, 5917 "Success in restarting nic f/w.\n"); 5918 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY); 5919 ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n"); 5920 } 5921 5922 return rval; 5923 } 5924 5925 /* Assumes idc_lock always held on entry */ 5926 int 5927 qla83xx_idc_state_handler(scsi_qla_host_t *base_vha) 5928 { 5929 struct qla_hw_data *ha = base_vha->hw; 5930 int rval = QLA_SUCCESS; 5931 unsigned long dev_init_timeout; 5932 uint32_t dev_state; 5933 5934 /* Wait for MAX-INIT-TIMEOUT for the device to go ready */ 5935 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); 5936 5937 while (1) { 5938 5939 if (time_after_eq(jiffies, dev_init_timeout)) { 5940 ql_log(ql_log_warn, base_vha, 0xb06e, 5941 "Initialization TIMEOUT!\n"); 5942 /* Init timeout. Disable further NIC Core 5943 * communication. 5944 */ 5945 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 5946 QLA8XXX_DEV_FAILED); 5947 ql_log(ql_log_info, base_vha, 0xb06f, 5948 "HW State: FAILED.\n"); 5949 } 5950 5951 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5952 switch (dev_state) { 5953 case QLA8XXX_DEV_READY: 5954 if (ha->flags.nic_core_reset_owner) 5955 qla83xx_idc_audit(base_vha, 5956 IDC_AUDIT_COMPLETION); 5957 ha->flags.nic_core_reset_owner = 0; 5958 ql_dbg(ql_dbg_p3p, base_vha, 0xb070, 5959 "Reset_owner reset by 0x%x.\n", 5960 ha->portnum); 5961 goto exit; 5962 case QLA8XXX_DEV_COLD: 5963 if (ha->flags.nic_core_reset_owner) 5964 rval = qla83xx_device_bootstrap(base_vha); 5965 else { 5966 /* Wait for AEN to change device-state */ 5967 qla83xx_idc_unlock(base_vha, 0); 5968 msleep(1000); 5969 qla83xx_idc_lock(base_vha, 0); 5970 } 5971 break; 5972 case QLA8XXX_DEV_INITIALIZING: 5973 /* Wait for AEN to change device-state */ 5974 qla83xx_idc_unlock(base_vha, 0); 5975 msleep(1000); 5976 qla83xx_idc_lock(base_vha, 0); 5977 break; 5978 case QLA8XXX_DEV_NEED_RESET: 5979 if (!ql2xdontresethba && ha->flags.nic_core_reset_owner) 5980 qla83xx_need_reset_handler(base_vha); 5981 else { 5982 /* Wait for AEN to change device-state */ 5983 qla83xx_idc_unlock(base_vha, 0); 5984 msleep(1000); 5985 qla83xx_idc_lock(base_vha, 0); 5986 } 5987 /* reset timeout value after need reset handler */ 5988 dev_init_timeout = jiffies + 5989 (ha->fcoe_dev_init_timeout * HZ); 5990 break; 5991 case QLA8XXX_DEV_NEED_QUIESCENT: 5992 /* XXX: DEBUG for now */ 5993 qla83xx_idc_unlock(base_vha, 0); 5994 msleep(1000); 5995 qla83xx_idc_lock(base_vha, 0); 5996 break; 5997 case QLA8XXX_DEV_QUIESCENT: 5998 /* XXX: DEBUG for now */ 5999 if (ha->flags.quiesce_owner) 6000 goto exit; 6001 6002 qla83xx_idc_unlock(base_vha, 0); 6003 msleep(1000); 6004 qla83xx_idc_lock(base_vha, 0); 6005 dev_init_timeout = jiffies + 6006 (ha->fcoe_dev_init_timeout * HZ); 6007 break; 6008 case QLA8XXX_DEV_FAILED: 6009 if (ha->flags.nic_core_reset_owner) 6010 qla83xx_idc_audit(base_vha, 6011 IDC_AUDIT_COMPLETION); 6012 ha->flags.nic_core_reset_owner = 0; 6013 __qla83xx_clear_drv_presence(base_vha); 6014 qla83xx_idc_unlock(base_vha, 0); 6015 qla8xxx_dev_failed_handler(base_vha); 6016 rval = QLA_FUNCTION_FAILED; 6017 qla83xx_idc_lock(base_vha, 0); 6018 goto exit; 6019 case QLA8XXX_BAD_VALUE: 6020 qla83xx_idc_unlock(base_vha, 0); 6021 msleep(1000); 6022 qla83xx_idc_lock(base_vha, 0); 6023 break; 6024 default: 6025 ql_log(ql_log_warn, base_vha, 0xb071, 6026 "Unknown Device State: %x.\n", dev_state); 6027 qla83xx_idc_unlock(base_vha, 0); 6028 qla8xxx_dev_failed_handler(base_vha); 6029 rval = QLA_FUNCTION_FAILED; 6030 qla83xx_idc_lock(base_vha, 0); 6031 goto exit; 6032 } 6033 } 6034 6035 exit: 6036 return rval; 6037 } 6038 6039 void 6040 qla2x00_disable_board_on_pci_error(struct work_struct *work) 6041 { 6042 struct qla_hw_data *ha = container_of(work, struct qla_hw_data, 6043 board_disable); 6044 struct pci_dev *pdev = ha->pdev; 6045 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6046 6047 /* 6048 * if UNLOAD flag is already set, then continue unload, 6049 * where it was set first. 6050 */ 6051 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 6052 return; 6053 6054 ql_log(ql_log_warn, base_vha, 0x015b, 6055 "Disabling adapter.\n"); 6056 6057 if (!atomic_read(&pdev->enable_cnt)) { 6058 ql_log(ql_log_info, base_vha, 0xfffc, 6059 "PCI device disabled, no action req for PCI error=%lx\n", 6060 base_vha->pci_flags); 6061 return; 6062 } 6063 6064 qla2x00_wait_for_sess_deletion(base_vha); 6065 6066 set_bit(UNLOADING, &base_vha->dpc_flags); 6067 6068 qla2x00_delete_all_vps(ha, base_vha); 6069 6070 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 6071 6072 qla2x00_dfs_remove(base_vha); 6073 6074 qla84xx_put_chip(base_vha); 6075 6076 if (base_vha->timer_active) 6077 qla2x00_stop_timer(base_vha); 6078 6079 base_vha->flags.online = 0; 6080 6081 qla2x00_destroy_deferred_work(ha); 6082 6083 /* 6084 * Do not try to stop beacon blink as it will issue a mailbox 6085 * command. 6086 */ 6087 qla2x00_free_sysfs_attr(base_vha, false); 6088 6089 fc_remove_host(base_vha->host); 6090 6091 scsi_remove_host(base_vha->host); 6092 6093 base_vha->flags.init_done = 0; 6094 qla25xx_delete_queues(base_vha); 6095 qla2x00_free_fcports(base_vha); 6096 qla2x00_free_irqs(base_vha); 6097 qla2x00_mem_free(ha); 6098 qla82xx_md_free(base_vha); 6099 qla2x00_free_queues(ha); 6100 6101 qla2x00_unmap_iobases(ha); 6102 6103 pci_release_selected_regions(ha->pdev, ha->bars); 6104 pci_disable_pcie_error_reporting(pdev); 6105 pci_disable_device(pdev); 6106 6107 /* 6108 * Let qla2x00_remove_one cleanup qla_hw_data on device removal. 6109 */ 6110 } 6111 6112 /************************************************************************** 6113 * qla2x00_do_dpc 6114 * This kernel thread is a task that is schedule by the interrupt handler 6115 * to perform the background processing for interrupts. 6116 * 6117 * Notes: 6118 * This task always run in the context of a kernel thread. It 6119 * is kick-off by the driver's detect code and starts up 6120 * up one per adapter. It immediately goes to sleep and waits for 6121 * some fibre event. When either the interrupt handler or 6122 * the timer routine detects a event it will one of the task 6123 * bits then wake us up. 6124 **************************************************************************/ 6125 static int 6126 qla2x00_do_dpc(void *data) 6127 { 6128 scsi_qla_host_t *base_vha; 6129 struct qla_hw_data *ha; 6130 uint32_t online; 6131 struct qla_qpair *qpair; 6132 6133 ha = (struct qla_hw_data *)data; 6134 base_vha = pci_get_drvdata(ha->pdev); 6135 6136 set_user_nice(current, MIN_NICE); 6137 6138 set_current_state(TASK_INTERRUPTIBLE); 6139 while (!kthread_should_stop()) { 6140 ql_dbg(ql_dbg_dpc, base_vha, 0x4000, 6141 "DPC handler sleeping.\n"); 6142 6143 schedule(); 6144 6145 if (!base_vha->flags.init_done || ha->flags.mbox_busy) 6146 goto end_loop; 6147 6148 if (ha->flags.eeh_busy) { 6149 ql_dbg(ql_dbg_dpc, base_vha, 0x4003, 6150 "eeh_busy=%d.\n", ha->flags.eeh_busy); 6151 goto end_loop; 6152 } 6153 6154 ha->dpc_active = 1; 6155 6156 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001, 6157 "DPC handler waking up, dpc_flags=0x%lx.\n", 6158 base_vha->dpc_flags); 6159 6160 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 6161 break; 6162 6163 if (IS_P3P_TYPE(ha)) { 6164 if (IS_QLA8044(ha)) { 6165 if (test_and_clear_bit(ISP_UNRECOVERABLE, 6166 &base_vha->dpc_flags)) { 6167 qla8044_idc_lock(ha); 6168 qla8044_wr_direct(base_vha, 6169 QLA8044_CRB_DEV_STATE_INDEX, 6170 QLA8XXX_DEV_FAILED); 6171 qla8044_idc_unlock(ha); 6172 ql_log(ql_log_info, base_vha, 0x4004, 6173 "HW State: FAILED.\n"); 6174 qla8044_device_state_handler(base_vha); 6175 continue; 6176 } 6177 6178 } else { 6179 if (test_and_clear_bit(ISP_UNRECOVERABLE, 6180 &base_vha->dpc_flags)) { 6181 qla82xx_idc_lock(ha); 6182 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6183 QLA8XXX_DEV_FAILED); 6184 qla82xx_idc_unlock(ha); 6185 ql_log(ql_log_info, base_vha, 0x0151, 6186 "HW State: FAILED.\n"); 6187 qla82xx_device_state_handler(base_vha); 6188 continue; 6189 } 6190 } 6191 6192 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, 6193 &base_vha->dpc_flags)) { 6194 6195 ql_dbg(ql_dbg_dpc, base_vha, 0x4005, 6196 "FCoE context reset scheduled.\n"); 6197 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 6198 &base_vha->dpc_flags))) { 6199 if (qla82xx_fcoe_ctx_reset(base_vha)) { 6200 /* FCoE-ctx reset failed. 6201 * Escalate to chip-reset 6202 */ 6203 set_bit(ISP_ABORT_NEEDED, 6204 &base_vha->dpc_flags); 6205 } 6206 clear_bit(ABORT_ISP_ACTIVE, 6207 &base_vha->dpc_flags); 6208 } 6209 6210 ql_dbg(ql_dbg_dpc, base_vha, 0x4006, 6211 "FCoE context reset end.\n"); 6212 } 6213 } else if (IS_QLAFX00(ha)) { 6214 if (test_and_clear_bit(ISP_UNRECOVERABLE, 6215 &base_vha->dpc_flags)) { 6216 ql_dbg(ql_dbg_dpc, base_vha, 0x4020, 6217 "Firmware Reset Recovery\n"); 6218 if (qlafx00_reset_initialize(base_vha)) { 6219 /* Failed. Abort isp later. */ 6220 if (!test_bit(UNLOADING, 6221 &base_vha->dpc_flags)) { 6222 set_bit(ISP_UNRECOVERABLE, 6223 &base_vha->dpc_flags); 6224 ql_dbg(ql_dbg_dpc, base_vha, 6225 0x4021, 6226 "Reset Recovery Failed\n"); 6227 } 6228 } 6229 } 6230 6231 if (test_and_clear_bit(FX00_TARGET_SCAN, 6232 &base_vha->dpc_flags)) { 6233 ql_dbg(ql_dbg_dpc, base_vha, 0x4022, 6234 "ISPFx00 Target Scan scheduled\n"); 6235 if (qlafx00_rescan_isp(base_vha)) { 6236 if (!test_bit(UNLOADING, 6237 &base_vha->dpc_flags)) 6238 set_bit(ISP_UNRECOVERABLE, 6239 &base_vha->dpc_flags); 6240 ql_dbg(ql_dbg_dpc, base_vha, 0x401e, 6241 "ISPFx00 Target Scan Failed\n"); 6242 } 6243 ql_dbg(ql_dbg_dpc, base_vha, 0x401f, 6244 "ISPFx00 Target Scan End\n"); 6245 } 6246 if (test_and_clear_bit(FX00_HOST_INFO_RESEND, 6247 &base_vha->dpc_flags)) { 6248 ql_dbg(ql_dbg_dpc, base_vha, 0x4023, 6249 "ISPFx00 Host Info resend scheduled\n"); 6250 qlafx00_fx_disc(base_vha, 6251 &base_vha->hw->mr.fcport, 6252 FXDISC_REG_HOST_INFO); 6253 } 6254 } 6255 6256 if (test_and_clear_bit(DETECT_SFP_CHANGE, 6257 &base_vha->dpc_flags) && 6258 !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) { 6259 qla24xx_detect_sfp(base_vha); 6260 6261 if (ha->flags.detected_lr_sfp != 6262 ha->flags.using_lr_setting) 6263 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 6264 } 6265 6266 if (test_and_clear_bit 6267 (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 6268 !test_bit(UNLOADING, &base_vha->dpc_flags)) { 6269 bool do_reset = true; 6270 6271 switch (base_vha->qlini_mode) { 6272 case QLA2XXX_INI_MODE_ENABLED: 6273 break; 6274 case QLA2XXX_INI_MODE_DISABLED: 6275 if (!qla_tgt_mode_enabled(base_vha) && 6276 !ha->flags.fw_started) 6277 do_reset = false; 6278 break; 6279 case QLA2XXX_INI_MODE_DUAL: 6280 if (!qla_dual_mode_enabled(base_vha) && 6281 !ha->flags.fw_started) 6282 do_reset = false; 6283 break; 6284 default: 6285 break; 6286 } 6287 6288 if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE, 6289 &base_vha->dpc_flags))) { 6290 ql_dbg(ql_dbg_dpc, base_vha, 0x4007, 6291 "ISP abort scheduled.\n"); 6292 if (ha->isp_ops->abort_isp(base_vha)) { 6293 /* failed. retry later */ 6294 set_bit(ISP_ABORT_NEEDED, 6295 &base_vha->dpc_flags); 6296 } 6297 clear_bit(ABORT_ISP_ACTIVE, 6298 &base_vha->dpc_flags); 6299 ql_dbg(ql_dbg_dpc, base_vha, 0x4008, 6300 "ISP abort end.\n"); 6301 } 6302 } 6303 6304 if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, 6305 &base_vha->dpc_flags)) { 6306 qla2x00_update_fcports(base_vha); 6307 } 6308 6309 if (IS_QLAFX00(ha)) 6310 goto loop_resync_check; 6311 6312 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 6313 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 6314 "Quiescence mode scheduled.\n"); 6315 if (IS_P3P_TYPE(ha)) { 6316 if (IS_QLA82XX(ha)) 6317 qla82xx_device_state_handler(base_vha); 6318 if (IS_QLA8044(ha)) 6319 qla8044_device_state_handler(base_vha); 6320 clear_bit(ISP_QUIESCE_NEEDED, 6321 &base_vha->dpc_flags); 6322 if (!ha->flags.quiesce_owner) { 6323 qla2x00_perform_loop_resync(base_vha); 6324 if (IS_QLA82XX(ha)) { 6325 qla82xx_idc_lock(ha); 6326 qla82xx_clear_qsnt_ready( 6327 base_vha); 6328 qla82xx_idc_unlock(ha); 6329 } else if (IS_QLA8044(ha)) { 6330 qla8044_idc_lock(ha); 6331 qla8044_clear_qsnt_ready( 6332 base_vha); 6333 qla8044_idc_unlock(ha); 6334 } 6335 } 6336 } else { 6337 clear_bit(ISP_QUIESCE_NEEDED, 6338 &base_vha->dpc_flags); 6339 qla2x00_quiesce_io(base_vha); 6340 } 6341 ql_dbg(ql_dbg_dpc, base_vha, 0x400a, 6342 "Quiescence mode end.\n"); 6343 } 6344 6345 if (test_and_clear_bit(RESET_MARKER_NEEDED, 6346 &base_vha->dpc_flags) && 6347 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { 6348 6349 ql_dbg(ql_dbg_dpc, base_vha, 0x400b, 6350 "Reset marker scheduled.\n"); 6351 qla2x00_rst_aen(base_vha); 6352 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); 6353 ql_dbg(ql_dbg_dpc, base_vha, 0x400c, 6354 "Reset marker end.\n"); 6355 } 6356 6357 /* Retry each device up to login retry count */ 6358 if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) && 6359 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && 6360 atomic_read(&base_vha->loop_state) != LOOP_DOWN) { 6361 6362 if (!base_vha->relogin_jif || 6363 time_after_eq(jiffies, base_vha->relogin_jif)) { 6364 base_vha->relogin_jif = jiffies + HZ; 6365 clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags); 6366 6367 ql_dbg(ql_dbg_disc, base_vha, 0x400d, 6368 "Relogin scheduled.\n"); 6369 qla24xx_post_relogin_work(base_vha); 6370 } 6371 } 6372 loop_resync_check: 6373 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, 6374 &base_vha->dpc_flags)) { 6375 6376 ql_dbg(ql_dbg_dpc, base_vha, 0x400f, 6377 "Loop resync scheduled.\n"); 6378 6379 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 6380 &base_vha->dpc_flags))) { 6381 6382 qla2x00_loop_resync(base_vha); 6383 6384 clear_bit(LOOP_RESYNC_ACTIVE, 6385 &base_vha->dpc_flags); 6386 } 6387 6388 ql_dbg(ql_dbg_dpc, base_vha, 0x4010, 6389 "Loop resync end.\n"); 6390 } 6391 6392 if (IS_QLAFX00(ha)) 6393 goto intr_on_check; 6394 6395 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && 6396 atomic_read(&base_vha->loop_state) == LOOP_READY) { 6397 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags); 6398 qla2xxx_flash_npiv_conf(base_vha); 6399 } 6400 6401 intr_on_check: 6402 if (!ha->interrupts_on) 6403 ha->isp_ops->enable_intrs(ha); 6404 6405 if (test_and_clear_bit(BEACON_BLINK_NEEDED, 6406 &base_vha->dpc_flags)) { 6407 if (ha->beacon_blink_led == 1) 6408 ha->isp_ops->beacon_blink(base_vha); 6409 } 6410 6411 /* qpair online check */ 6412 if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED, 6413 &base_vha->dpc_flags)) { 6414 if (ha->flags.eeh_busy || 6415 ha->flags.pci_channel_io_perm_failure) 6416 online = 0; 6417 else 6418 online = 1; 6419 6420 mutex_lock(&ha->mq_lock); 6421 list_for_each_entry(qpair, &base_vha->qp_list, 6422 qp_list_elem) 6423 qpair->online = online; 6424 mutex_unlock(&ha->mq_lock); 6425 } 6426 6427 if (test_and_clear_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, 6428 &base_vha->dpc_flags)) { 6429 ql_log(ql_log_info, base_vha, 0xffffff, 6430 "nvme: SET ZIO Activity exchange threshold to %d.\n", 6431 ha->nvme_last_rptd_aen); 6432 if (qla27xx_set_zio_threshold(base_vha, 6433 ha->nvme_last_rptd_aen)) { 6434 ql_log(ql_log_info, base_vha, 0xffffff, 6435 "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n", 6436 ha->nvme_last_rptd_aen); 6437 } 6438 } 6439 6440 if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, 6441 &base_vha->dpc_flags)) { 6442 ql_log(ql_log_info, base_vha, 0xffffff, 6443 "SET ZIO Activity exchange threshold to %d.\n", 6444 ha->last_zio_threshold); 6445 qla27xx_set_zio_threshold(base_vha, 6446 ha->last_zio_threshold); 6447 } 6448 6449 if (!IS_QLAFX00(ha)) 6450 qla2x00_do_dpc_all_vps(base_vha); 6451 6452 if (test_and_clear_bit(N2N_LINK_RESET, 6453 &base_vha->dpc_flags)) { 6454 qla2x00_lip_reset(base_vha); 6455 } 6456 6457 ha->dpc_active = 0; 6458 end_loop: 6459 set_current_state(TASK_INTERRUPTIBLE); 6460 } /* End of while(1) */ 6461 __set_current_state(TASK_RUNNING); 6462 6463 ql_dbg(ql_dbg_dpc, base_vha, 0x4011, 6464 "DPC handler exiting.\n"); 6465 6466 /* 6467 * Make sure that nobody tries to wake us up again. 6468 */ 6469 ha->dpc_active = 0; 6470 6471 /* Cleanup any residual CTX SRBs. */ 6472 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 6473 6474 return 0; 6475 } 6476 6477 void 6478 qla2xxx_wake_dpc(struct scsi_qla_host *vha) 6479 { 6480 struct qla_hw_data *ha = vha->hw; 6481 struct task_struct *t = ha->dpc_thread; 6482 6483 if (!test_bit(UNLOADING, &vha->dpc_flags) && t) 6484 wake_up_process(t); 6485 } 6486 6487 /* 6488 * qla2x00_rst_aen 6489 * Processes asynchronous reset. 6490 * 6491 * Input: 6492 * ha = adapter block pointer. 6493 */ 6494 static void 6495 qla2x00_rst_aen(scsi_qla_host_t *vha) 6496 { 6497 if (vha->flags.online && !vha->flags.reset_active && 6498 !atomic_read(&vha->loop_down_timer) && 6499 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) { 6500 do { 6501 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 6502 6503 /* 6504 * Issue marker command only when we are going to start 6505 * the I/O. 6506 */ 6507 vha->marker_needed = 1; 6508 } while (!atomic_read(&vha->loop_down_timer) && 6509 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags))); 6510 } 6511 } 6512 6513 /************************************************************************** 6514 * qla2x00_timer 6515 * 6516 * Description: 6517 * One second timer 6518 * 6519 * Context: Interrupt 6520 ***************************************************************************/ 6521 void 6522 qla2x00_timer(struct timer_list *t) 6523 { 6524 scsi_qla_host_t *vha = from_timer(vha, t, timer); 6525 unsigned long cpu_flags = 0; 6526 int start_dpc = 0; 6527 int index; 6528 srb_t *sp; 6529 uint16_t w; 6530 struct qla_hw_data *ha = vha->hw; 6531 struct req_que *req; 6532 6533 if (ha->flags.eeh_busy) { 6534 ql_dbg(ql_dbg_timer, vha, 0x6000, 6535 "EEH = %d, restarting timer.\n", 6536 ha->flags.eeh_busy); 6537 qla2x00_restart_timer(vha, WATCH_INTERVAL); 6538 return; 6539 } 6540 6541 /* 6542 * Hardware read to raise pending EEH errors during mailbox waits. If 6543 * the read returns -1 then disable the board. 6544 */ 6545 if (!pci_channel_offline(ha->pdev)) { 6546 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 6547 qla2x00_check_reg16_for_disconnect(vha, w); 6548 } 6549 6550 /* Make sure qla82xx_watchdog is run only for physical port */ 6551 if (!vha->vp_idx && IS_P3P_TYPE(ha)) { 6552 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) 6553 start_dpc++; 6554 if (IS_QLA82XX(ha)) 6555 qla82xx_watchdog(vha); 6556 else if (IS_QLA8044(ha)) 6557 qla8044_watchdog(vha); 6558 } 6559 6560 if (!vha->vp_idx && IS_QLAFX00(ha)) 6561 qlafx00_timer_routine(vha); 6562 6563 /* Loop down handler. */ 6564 if (atomic_read(&vha->loop_down_timer) > 0 && 6565 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && 6566 !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags)) 6567 && vha->flags.online) { 6568 6569 if (atomic_read(&vha->loop_down_timer) == 6570 vha->loop_down_abort_time) { 6571 6572 ql_log(ql_log_info, vha, 0x6008, 6573 "Loop down - aborting the queues before time expires.\n"); 6574 6575 if (!IS_QLA2100(ha) && vha->link_down_timeout) 6576 atomic_set(&vha->loop_state, LOOP_DEAD); 6577 6578 /* 6579 * Schedule an ISP abort to return any FCP2-device 6580 * commands. 6581 */ 6582 /* NPIV - scan physical port only */ 6583 if (!vha->vp_idx) { 6584 spin_lock_irqsave(&ha->hardware_lock, 6585 cpu_flags); 6586 req = ha->req_q_map[0]; 6587 for (index = 1; 6588 index < req->num_outstanding_cmds; 6589 index++) { 6590 fc_port_t *sfcp; 6591 6592 sp = req->outstanding_cmds[index]; 6593 if (!sp) 6594 continue; 6595 if (sp->cmd_type != TYPE_SRB) 6596 continue; 6597 if (sp->type != SRB_SCSI_CMD) 6598 continue; 6599 sfcp = sp->fcport; 6600 if (!(sfcp->flags & FCF_FCP2_DEVICE)) 6601 continue; 6602 6603 if (IS_QLA82XX(ha)) 6604 set_bit(FCOE_CTX_RESET_NEEDED, 6605 &vha->dpc_flags); 6606 else 6607 set_bit(ISP_ABORT_NEEDED, 6608 &vha->dpc_flags); 6609 break; 6610 } 6611 spin_unlock_irqrestore(&ha->hardware_lock, 6612 cpu_flags); 6613 } 6614 start_dpc++; 6615 } 6616 6617 /* if the loop has been down for 4 minutes, reinit adapter */ 6618 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { 6619 if (!(vha->device_flags & DFLG_NO_CABLE)) { 6620 ql_log(ql_log_warn, vha, 0x6009, 6621 "Loop down - aborting ISP.\n"); 6622 6623 if (IS_QLA82XX(ha)) 6624 set_bit(FCOE_CTX_RESET_NEEDED, 6625 &vha->dpc_flags); 6626 else 6627 set_bit(ISP_ABORT_NEEDED, 6628 &vha->dpc_flags); 6629 } 6630 } 6631 ql_dbg(ql_dbg_timer, vha, 0x600a, 6632 "Loop down - seconds remaining %d.\n", 6633 atomic_read(&vha->loop_down_timer)); 6634 } 6635 /* Check if beacon LED needs to be blinked for physical host only */ 6636 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { 6637 /* There is no beacon_blink function for ISP82xx */ 6638 if (!IS_P3P_TYPE(ha)) { 6639 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); 6640 start_dpc++; 6641 } 6642 } 6643 6644 /* Process any deferred work. */ 6645 if (!list_empty(&vha->work_list)) { 6646 unsigned long flags; 6647 bool q = false; 6648 6649 spin_lock_irqsave(&vha->work_lock, flags); 6650 if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags)) 6651 q = true; 6652 spin_unlock_irqrestore(&vha->work_lock, flags); 6653 if (q) 6654 queue_work(vha->hw->wq, &vha->iocb_work); 6655 } 6656 6657 /* 6658 * FC-NVME 6659 * see if the active AEN count has changed from what was last reported. 6660 */ 6661 if (!vha->vp_idx && 6662 (atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen) && 6663 ha->zio_mode == QLA_ZIO_MODE_6 && 6664 !ha->flags.host_shutting_down) { 6665 ql_log(ql_log_info, vha, 0x3002, 6666 "nvme: Sched: Set ZIO exchange threshold to %d.\n", 6667 ha->nvme_last_rptd_aen); 6668 ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt); 6669 set_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); 6670 start_dpc++; 6671 } 6672 6673 if (!vha->vp_idx && 6674 (atomic_read(&ha->zio_threshold) != ha->last_zio_threshold) && 6675 (ha->zio_mode == QLA_ZIO_MODE_6) && 6676 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) { 6677 ql_log(ql_log_info, vha, 0x3002, 6678 "Sched: Set ZIO exchange threshold to %d.\n", 6679 ha->last_zio_threshold); 6680 ha->last_zio_threshold = atomic_read(&ha->zio_threshold); 6681 set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); 6682 start_dpc++; 6683 } 6684 6685 /* Schedule the DPC routine if needed */ 6686 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 6687 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || 6688 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) || 6689 start_dpc || 6690 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) || 6691 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) || 6692 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || 6693 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || 6694 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || 6695 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) { 6696 ql_dbg(ql_dbg_timer, vha, 0x600b, 6697 "isp_abort_needed=%d loop_resync_needed=%d " 6698 "fcport_update_needed=%d start_dpc=%d " 6699 "reset_marker_needed=%d", 6700 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags), 6701 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags), 6702 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags), 6703 start_dpc, 6704 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); 6705 ql_dbg(ql_dbg_timer, vha, 0x600c, 6706 "beacon_blink_needed=%d isp_unrecoverable=%d " 6707 "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " 6708 "relogin_needed=%d.\n", 6709 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), 6710 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), 6711 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), 6712 test_bit(VP_DPC_NEEDED, &vha->dpc_flags), 6713 test_bit(RELOGIN_NEEDED, &vha->dpc_flags)); 6714 qla2xxx_wake_dpc(vha); 6715 } 6716 6717 qla2x00_restart_timer(vha, WATCH_INTERVAL); 6718 } 6719 6720 /* Firmware interface routines. */ 6721 6722 #define FW_ISP21XX 0 6723 #define FW_ISP22XX 1 6724 #define FW_ISP2300 2 6725 #define FW_ISP2322 3 6726 #define FW_ISP24XX 4 6727 #define FW_ISP25XX 5 6728 #define FW_ISP81XX 6 6729 #define FW_ISP82XX 7 6730 #define FW_ISP2031 8 6731 #define FW_ISP8031 9 6732 #define FW_ISP27XX 10 6733 #define FW_ISP28XX 11 6734 6735 #define FW_FILE_ISP21XX "ql2100_fw.bin" 6736 #define FW_FILE_ISP22XX "ql2200_fw.bin" 6737 #define FW_FILE_ISP2300 "ql2300_fw.bin" 6738 #define FW_FILE_ISP2322 "ql2322_fw.bin" 6739 #define FW_FILE_ISP24XX "ql2400_fw.bin" 6740 #define FW_FILE_ISP25XX "ql2500_fw.bin" 6741 #define FW_FILE_ISP81XX "ql8100_fw.bin" 6742 #define FW_FILE_ISP82XX "ql8200_fw.bin" 6743 #define FW_FILE_ISP2031 "ql2600_fw.bin" 6744 #define FW_FILE_ISP8031 "ql8300_fw.bin" 6745 #define FW_FILE_ISP27XX "ql2700_fw.bin" 6746 #define FW_FILE_ISP28XX "ql2800_fw.bin" 6747 6748 6749 static DEFINE_MUTEX(qla_fw_lock); 6750 6751 static struct fw_blob qla_fw_blobs[] = { 6752 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, 6753 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, 6754 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, 6755 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 6756 { .name = FW_FILE_ISP24XX, }, 6757 { .name = FW_FILE_ISP25XX, }, 6758 { .name = FW_FILE_ISP81XX, }, 6759 { .name = FW_FILE_ISP82XX, }, 6760 { .name = FW_FILE_ISP2031, }, 6761 { .name = FW_FILE_ISP8031, }, 6762 { .name = FW_FILE_ISP27XX, }, 6763 { .name = FW_FILE_ISP28XX, }, 6764 { .name = NULL, }, 6765 }; 6766 6767 struct fw_blob * 6768 qla2x00_request_firmware(scsi_qla_host_t *vha) 6769 { 6770 struct qla_hw_data *ha = vha->hw; 6771 struct fw_blob *blob; 6772 6773 if (IS_QLA2100(ha)) { 6774 blob = &qla_fw_blobs[FW_ISP21XX]; 6775 } else if (IS_QLA2200(ha)) { 6776 blob = &qla_fw_blobs[FW_ISP22XX]; 6777 } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 6778 blob = &qla_fw_blobs[FW_ISP2300]; 6779 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 6780 blob = &qla_fw_blobs[FW_ISP2322]; 6781 } else if (IS_QLA24XX_TYPE(ha)) { 6782 blob = &qla_fw_blobs[FW_ISP24XX]; 6783 } else if (IS_QLA25XX(ha)) { 6784 blob = &qla_fw_blobs[FW_ISP25XX]; 6785 } else if (IS_QLA81XX(ha)) { 6786 blob = &qla_fw_blobs[FW_ISP81XX]; 6787 } else if (IS_QLA82XX(ha)) { 6788 blob = &qla_fw_blobs[FW_ISP82XX]; 6789 } else if (IS_QLA2031(ha)) { 6790 blob = &qla_fw_blobs[FW_ISP2031]; 6791 } else if (IS_QLA8031(ha)) { 6792 blob = &qla_fw_blobs[FW_ISP8031]; 6793 } else if (IS_QLA27XX(ha)) { 6794 blob = &qla_fw_blobs[FW_ISP27XX]; 6795 } else if (IS_QLA28XX(ha)) { 6796 blob = &qla_fw_blobs[FW_ISP28XX]; 6797 } else { 6798 return NULL; 6799 } 6800 6801 if (!blob->name) 6802 return NULL; 6803 6804 mutex_lock(&qla_fw_lock); 6805 if (blob->fw) 6806 goto out; 6807 6808 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 6809 ql_log(ql_log_warn, vha, 0x0063, 6810 "Failed to load firmware image (%s).\n", blob->name); 6811 blob->fw = NULL; 6812 blob = NULL; 6813 } 6814 6815 out: 6816 mutex_unlock(&qla_fw_lock); 6817 return blob; 6818 } 6819 6820 static void 6821 qla2x00_release_firmware(void) 6822 { 6823 struct fw_blob *blob; 6824 6825 mutex_lock(&qla_fw_lock); 6826 for (blob = qla_fw_blobs; blob->name; blob++) 6827 release_firmware(blob->fw); 6828 mutex_unlock(&qla_fw_lock); 6829 } 6830 6831 static void qla_pci_error_cleanup(scsi_qla_host_t *vha) 6832 { 6833 struct qla_hw_data *ha = vha->hw; 6834 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6835 struct qla_qpair *qpair = NULL; 6836 struct scsi_qla_host *vp; 6837 fc_port_t *fcport; 6838 int i; 6839 unsigned long flags; 6840 6841 ha->chip_reset++; 6842 6843 ha->base_qpair->chip_reset = ha->chip_reset; 6844 for (i = 0; i < ha->max_qpairs; i++) { 6845 if (ha->queue_pair_map[i]) 6846 ha->queue_pair_map[i]->chip_reset = 6847 ha->base_qpair->chip_reset; 6848 } 6849 6850 /* purge MBox commands */ 6851 if (atomic_read(&ha->num_pend_mbx_stage3)) { 6852 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 6853 complete(&ha->mbx_intr_comp); 6854 } 6855 6856 i = 0; 6857 6858 while (atomic_read(&ha->num_pend_mbx_stage3) || 6859 atomic_read(&ha->num_pend_mbx_stage2) || 6860 atomic_read(&ha->num_pend_mbx_stage1)) { 6861 msleep(20); 6862 i++; 6863 if (i > 50) 6864 break; 6865 } 6866 6867 ha->flags.purge_mbox = 0; 6868 6869 mutex_lock(&ha->mq_lock); 6870 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 6871 qpair->online = 0; 6872 mutex_unlock(&ha->mq_lock); 6873 6874 qla2x00_mark_all_devices_lost(vha); 6875 6876 spin_lock_irqsave(&ha->vport_slock, flags); 6877 list_for_each_entry(vp, &ha->vp_list, list) { 6878 atomic_inc(&vp->vref_count); 6879 spin_unlock_irqrestore(&ha->vport_slock, flags); 6880 qla2x00_mark_all_devices_lost(vp); 6881 spin_lock_irqsave(&ha->vport_slock, flags); 6882 atomic_dec(&vp->vref_count); 6883 } 6884 spin_unlock_irqrestore(&ha->vport_slock, flags); 6885 6886 /* Clear all async request states across all VPs. */ 6887 list_for_each_entry(fcport, &vha->vp_fcports, list) 6888 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 6889 6890 spin_lock_irqsave(&ha->vport_slock, flags); 6891 list_for_each_entry(vp, &ha->vp_list, list) { 6892 atomic_inc(&vp->vref_count); 6893 spin_unlock_irqrestore(&ha->vport_slock, flags); 6894 list_for_each_entry(fcport, &vp->vp_fcports, list) 6895 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 6896 spin_lock_irqsave(&ha->vport_slock, flags); 6897 atomic_dec(&vp->vref_count); 6898 } 6899 spin_unlock_irqrestore(&ha->vport_slock, flags); 6900 } 6901 6902 6903 static pci_ers_result_t 6904 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 6905 { 6906 scsi_qla_host_t *vha = pci_get_drvdata(pdev); 6907 struct qla_hw_data *ha = vha->hw; 6908 6909 ql_dbg(ql_dbg_aer, vha, 0x9000, 6910 "PCI error detected, state %x.\n", state); 6911 6912 if (!atomic_read(&pdev->enable_cnt)) { 6913 ql_log(ql_log_info, vha, 0xffff, 6914 "PCI device is disabled,state %x\n", state); 6915 return PCI_ERS_RESULT_NEED_RESET; 6916 } 6917 6918 switch (state) { 6919 case pci_channel_io_normal: 6920 ha->flags.eeh_busy = 0; 6921 if (ql2xmqsupport || ql2xnvmeenable) { 6922 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 6923 qla2xxx_wake_dpc(vha); 6924 } 6925 return PCI_ERS_RESULT_CAN_RECOVER; 6926 case pci_channel_io_frozen: 6927 ha->flags.eeh_busy = 1; 6928 qla_pci_error_cleanup(vha); 6929 return PCI_ERS_RESULT_NEED_RESET; 6930 case pci_channel_io_perm_failure: 6931 ha->flags.pci_channel_io_perm_failure = 1; 6932 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 6933 if (ql2xmqsupport || ql2xnvmeenable) { 6934 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 6935 qla2xxx_wake_dpc(vha); 6936 } 6937 return PCI_ERS_RESULT_DISCONNECT; 6938 } 6939 return PCI_ERS_RESULT_NEED_RESET; 6940 } 6941 6942 static pci_ers_result_t 6943 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) 6944 { 6945 int risc_paused = 0; 6946 uint32_t stat; 6947 unsigned long flags; 6948 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 6949 struct qla_hw_data *ha = base_vha->hw; 6950 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 6951 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 6952 6953 if (IS_QLA82XX(ha)) 6954 return PCI_ERS_RESULT_RECOVERED; 6955 6956 spin_lock_irqsave(&ha->hardware_lock, flags); 6957 if (IS_QLA2100(ha) || IS_QLA2200(ha)){ 6958 stat = RD_REG_DWORD(®->hccr); 6959 if (stat & HCCR_RISC_PAUSE) 6960 risc_paused = 1; 6961 } else if (IS_QLA23XX(ha)) { 6962 stat = RD_REG_DWORD(®->u.isp2300.host_status); 6963 if (stat & HSR_RISC_PAUSED) 6964 risc_paused = 1; 6965 } else if (IS_FWI2_CAPABLE(ha)) { 6966 stat = RD_REG_DWORD(®24->host_status); 6967 if (stat & HSRX_RISC_PAUSED) 6968 risc_paused = 1; 6969 } 6970 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6971 6972 if (risc_paused) { 6973 ql_log(ql_log_info, base_vha, 0x9003, 6974 "RISC paused -- mmio_enabled, Dumping firmware.\n"); 6975 ha->isp_ops->fw_dump(base_vha, 0); 6976 6977 return PCI_ERS_RESULT_NEED_RESET; 6978 } else 6979 return PCI_ERS_RESULT_RECOVERED; 6980 } 6981 6982 static pci_ers_result_t 6983 qla2xxx_pci_slot_reset(struct pci_dev *pdev) 6984 { 6985 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 6986 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 6987 struct qla_hw_data *ha = base_vha->hw; 6988 int rc; 6989 struct qla_qpair *qpair = NULL; 6990 6991 ql_dbg(ql_dbg_aer, base_vha, 0x9004, 6992 "Slot Reset.\n"); 6993 6994 /* Workaround: qla2xxx driver which access hardware earlier 6995 * needs error state to be pci_channel_io_online. 6996 * Otherwise mailbox command timesout. 6997 */ 6998 pdev->error_state = pci_channel_io_normal; 6999 7000 pci_restore_state(pdev); 7001 7002 /* pci_restore_state() clears the saved_state flag of the device 7003 * save restored state which resets saved_state flag 7004 */ 7005 pci_save_state(pdev); 7006 7007 if (ha->mem_only) 7008 rc = pci_enable_device_mem(pdev); 7009 else 7010 rc = pci_enable_device(pdev); 7011 7012 if (rc) { 7013 ql_log(ql_log_warn, base_vha, 0x9005, 7014 "Can't re-enable PCI device after reset.\n"); 7015 goto exit_slot_reset; 7016 } 7017 7018 7019 if (ha->isp_ops->pci_config(base_vha)) 7020 goto exit_slot_reset; 7021 7022 mutex_lock(&ha->mq_lock); 7023 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 7024 qpair->online = 1; 7025 mutex_unlock(&ha->mq_lock); 7026 7027 base_vha->flags.online = 1; 7028 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7029 if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS) 7030 ret = PCI_ERS_RESULT_RECOVERED; 7031 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7032 7033 7034 exit_slot_reset: 7035 ql_dbg(ql_dbg_aer, base_vha, 0x900e, 7036 "slot_reset return %x.\n", ret); 7037 7038 return ret; 7039 } 7040 7041 static void 7042 qla2xxx_pci_resume(struct pci_dev *pdev) 7043 { 7044 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7045 struct qla_hw_data *ha = base_vha->hw; 7046 int ret; 7047 7048 ql_dbg(ql_dbg_aer, base_vha, 0x900f, 7049 "pci_resume.\n"); 7050 7051 ha->flags.eeh_busy = 0; 7052 7053 ret = qla2x00_wait_for_hba_online(base_vha); 7054 if (ret != QLA_SUCCESS) { 7055 ql_log(ql_log_fatal, base_vha, 0x9002, 7056 "The device failed to resume I/O from slot/link_reset.\n"); 7057 } 7058 } 7059 7060 static void 7061 qla_pci_reset_prepare(struct pci_dev *pdev) 7062 { 7063 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7064 struct qla_hw_data *ha = base_vha->hw; 7065 struct qla_qpair *qpair; 7066 7067 ql_log(ql_log_warn, base_vha, 0xffff, 7068 "%s.\n", __func__); 7069 7070 /* 7071 * PCI FLR/function reset is about to reset the 7072 * slot. Stop the chip to stop all DMA access. 7073 * It is assumed that pci_reset_done will be called 7074 * after FLR to resume Chip operation. 7075 */ 7076 ha->flags.eeh_busy = 1; 7077 mutex_lock(&ha->mq_lock); 7078 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 7079 qpair->online = 0; 7080 mutex_unlock(&ha->mq_lock); 7081 7082 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7083 qla2x00_abort_isp_cleanup(base_vha); 7084 qla2x00_abort_all_cmds(base_vha, DID_RESET << 16); 7085 } 7086 7087 static void 7088 qla_pci_reset_done(struct pci_dev *pdev) 7089 { 7090 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7091 struct qla_hw_data *ha = base_vha->hw; 7092 struct qla_qpair *qpair; 7093 7094 ql_log(ql_log_warn, base_vha, 0xffff, 7095 "%s.\n", __func__); 7096 7097 /* 7098 * FLR just completed by PCI layer. Resume adapter 7099 */ 7100 ha->flags.eeh_busy = 0; 7101 mutex_lock(&ha->mq_lock); 7102 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 7103 qpair->online = 1; 7104 mutex_unlock(&ha->mq_lock); 7105 7106 base_vha->flags.online = 1; 7107 ha->isp_ops->abort_isp(base_vha); 7108 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7109 } 7110 7111 static int qla2xxx_map_queues(struct Scsi_Host *shost) 7112 { 7113 int rc; 7114 scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; 7115 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 7116 7117 if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase) 7118 rc = blk_mq_map_queues(qmap); 7119 else 7120 rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); 7121 return rc; 7122 } 7123 7124 struct scsi_host_template qla2xxx_driver_template = { 7125 .module = THIS_MODULE, 7126 .name = QLA2XXX_DRIVER_NAME, 7127 .queuecommand = qla2xxx_queuecommand, 7128 7129 .eh_timed_out = fc_eh_timed_out, 7130 .eh_abort_handler = qla2xxx_eh_abort, 7131 .eh_device_reset_handler = qla2xxx_eh_device_reset, 7132 .eh_target_reset_handler = qla2xxx_eh_target_reset, 7133 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 7134 .eh_host_reset_handler = qla2xxx_eh_host_reset, 7135 7136 .slave_configure = qla2xxx_slave_configure, 7137 7138 .slave_alloc = qla2xxx_slave_alloc, 7139 .slave_destroy = qla2xxx_slave_destroy, 7140 .scan_finished = qla2xxx_scan_finished, 7141 .scan_start = qla2xxx_scan_start, 7142 .change_queue_depth = scsi_change_queue_depth, 7143 .map_queues = qla2xxx_map_queues, 7144 .this_id = -1, 7145 .cmd_per_lun = 3, 7146 .sg_tablesize = SG_ALL, 7147 7148 .max_sectors = 0xFFFF, 7149 .shost_attrs = qla2x00_host_attrs, 7150 7151 .supported_mode = MODE_INITIATOR, 7152 .track_queue_depth = 1, 7153 .cmd_size = sizeof(srb_t), 7154 }; 7155 7156 static const struct pci_error_handlers qla2xxx_err_handler = { 7157 .error_detected = qla2xxx_pci_error_detected, 7158 .mmio_enabled = qla2xxx_pci_mmio_enabled, 7159 .slot_reset = qla2xxx_pci_slot_reset, 7160 .resume = qla2xxx_pci_resume, 7161 .reset_prepare = qla_pci_reset_prepare, 7162 .reset_done = qla_pci_reset_done, 7163 }; 7164 7165 static struct pci_device_id qla2xxx_pci_tbl[] = { 7166 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) }, 7167 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) }, 7168 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) }, 7169 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) }, 7170 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) }, 7171 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) }, 7172 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, 7173 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, 7174 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, 7175 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) }, 7176 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 7177 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 7178 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, 7179 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) }, 7180 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, 7181 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, 7182 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, 7183 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, 7184 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, 7185 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, 7186 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, 7187 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) }, 7188 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) }, 7189 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) }, 7190 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) }, 7191 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) }, 7192 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) }, 7193 { 0 }, 7194 }; 7195 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 7196 7197 static struct pci_driver qla2xxx_pci_driver = { 7198 .name = QLA2XXX_DRIVER_NAME, 7199 .driver = { 7200 .owner = THIS_MODULE, 7201 }, 7202 .id_table = qla2xxx_pci_tbl, 7203 .probe = qla2x00_probe_one, 7204 .remove = qla2x00_remove_one, 7205 .shutdown = qla2x00_shutdown, 7206 .err_handler = &qla2xxx_err_handler, 7207 }; 7208 7209 static const struct file_operations apidev_fops = { 7210 .owner = THIS_MODULE, 7211 .llseek = noop_llseek, 7212 }; 7213 7214 /** 7215 * qla2x00_module_init - Module initialization. 7216 **/ 7217 static int __init 7218 qla2x00_module_init(void) 7219 { 7220 int ret = 0; 7221 7222 BUILD_BUG_ON(sizeof(cmd_entry_t) != 64); 7223 BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64); 7224 BUILD_BUG_ON(sizeof(cont_entry_t) != 64); 7225 BUILD_BUG_ON(sizeof(init_cb_t) != 96); 7226 BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64); 7227 BUILD_BUG_ON(sizeof(request_t) != 64); 7228 BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64); 7229 BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64); 7230 BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64); 7231 BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64); 7232 BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64); 7233 BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64); 7234 BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64); 7235 BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64); 7236 BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); 7237 BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64); 7238 BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64); 7239 BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128); 7240 BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128); 7241 BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64); 7242 BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064); 7243 BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64); 7244 BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56); 7245 BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16); 7246 BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8); 7247 7248 /* Allocate cache for SRBs. */ 7249 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 7250 SLAB_HWCACHE_ALIGN, NULL); 7251 if (srb_cachep == NULL) { 7252 ql_log(ql_log_fatal, NULL, 0x0001, 7253 "Unable to allocate SRB cache...Failing load!.\n"); 7254 return -ENOMEM; 7255 } 7256 7257 /* Initialize target kmem_cache and mem_pools */ 7258 ret = qlt_init(); 7259 if (ret < 0) { 7260 goto destroy_cache; 7261 } else if (ret > 0) { 7262 /* 7263 * If initiator mode is explictly disabled by qlt_init(), 7264 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from 7265 * performing scsi_scan_target() during LOOP UP event. 7266 */ 7267 qla2xxx_transport_functions.disable_target_scan = 1; 7268 qla2xxx_transport_vport_functions.disable_target_scan = 1; 7269 } 7270 7271 /* Derive version string. */ 7272 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 7273 if (ql2xextended_error_logging) 7274 strcat(qla2x00_version_str, "-debug"); 7275 if (ql2xextended_error_logging == 1) 7276 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; 7277 7278 if (ql2x_ini_mode == QLA2XXX_INI_MODE_DUAL) 7279 qla_insert_tgt_attrs(); 7280 7281 qla2xxx_transport_template = 7282 fc_attach_transport(&qla2xxx_transport_functions); 7283 if (!qla2xxx_transport_template) { 7284 ql_log(ql_log_fatal, NULL, 0x0002, 7285 "fc_attach_transport failed...Failing load!.\n"); 7286 ret = -ENODEV; 7287 goto qlt_exit; 7288 } 7289 7290 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); 7291 if (apidev_major < 0) { 7292 ql_log(ql_log_fatal, NULL, 0x0003, 7293 "Unable to register char device %s.\n", QLA2XXX_APIDEV); 7294 } 7295 7296 qla2xxx_transport_vport_template = 7297 fc_attach_transport(&qla2xxx_transport_vport_functions); 7298 if (!qla2xxx_transport_vport_template) { 7299 ql_log(ql_log_fatal, NULL, 0x0004, 7300 "fc_attach_transport vport failed...Failing load!.\n"); 7301 ret = -ENODEV; 7302 goto unreg_chrdev; 7303 } 7304 ql_log(ql_log_info, NULL, 0x0005, 7305 "QLogic Fibre Channel HBA Driver: %s.\n", 7306 qla2x00_version_str); 7307 ret = pci_register_driver(&qla2xxx_pci_driver); 7308 if (ret) { 7309 ql_log(ql_log_fatal, NULL, 0x0006, 7310 "pci_register_driver failed...ret=%d Failing load!.\n", 7311 ret); 7312 goto release_vport_transport; 7313 } 7314 return ret; 7315 7316 release_vport_transport: 7317 fc_release_transport(qla2xxx_transport_vport_template); 7318 7319 unreg_chrdev: 7320 if (apidev_major >= 0) 7321 unregister_chrdev(apidev_major, QLA2XXX_APIDEV); 7322 fc_release_transport(qla2xxx_transport_template); 7323 7324 qlt_exit: 7325 qlt_exit(); 7326 7327 destroy_cache: 7328 kmem_cache_destroy(srb_cachep); 7329 return ret; 7330 } 7331 7332 /** 7333 * qla2x00_module_exit - Module cleanup. 7334 **/ 7335 static void __exit 7336 qla2x00_module_exit(void) 7337 { 7338 pci_unregister_driver(&qla2xxx_pci_driver); 7339 qla2x00_release_firmware(); 7340 kmem_cache_destroy(ctx_cachep); 7341 fc_release_transport(qla2xxx_transport_vport_template); 7342 if (apidev_major >= 0) 7343 unregister_chrdev(apidev_major, QLA2XXX_APIDEV); 7344 fc_release_transport(qla2xxx_transport_template); 7345 qlt_exit(); 7346 kmem_cache_destroy(srb_cachep); 7347 } 7348 7349 module_init(qla2x00_module_init); 7350 module_exit(qla2x00_module_exit); 7351 7352 MODULE_AUTHOR("QLogic Corporation"); 7353 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); 7354 MODULE_LICENSE("GPL"); 7355 MODULE_VERSION(QLA2XXX_VERSION); 7356 MODULE_FIRMWARE(FW_FILE_ISP21XX); 7357 MODULE_FIRMWARE(FW_FILE_ISP22XX); 7358 MODULE_FIRMWARE(FW_FILE_ISP2300); 7359 MODULE_FIRMWARE(FW_FILE_ISP2322); 7360 MODULE_FIRMWARE(FW_FILE_ISP24XX); 7361 MODULE_FIRMWARE(FW_FILE_ISP25XX); 7362