1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/moduleparam.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mutex.h> 14 #include <linux/kobject.h> 15 #include <linux/slab.h> 16 #include <linux/blk-mq-pci.h> 17 #include <linux/refcount.h> 18 19 #include <scsi/scsi_tcq.h> 20 #include <scsi/scsicam.h> 21 #include <scsi/scsi_transport.h> 22 #include <scsi/scsi_transport_fc.h> 23 24 #include "qla_target.h" 25 26 /* 27 * Driver version 28 */ 29 char qla2x00_version_str[40]; 30 31 static int apidev_major; 32 33 /* 34 * SRB allocation cache 35 */ 36 struct kmem_cache *srb_cachep; 37 38 /* 39 * CT6 CTX allocation cache 40 */ 41 static struct kmem_cache *ctx_cachep; 42 /* 43 * error level for logging 44 */ 45 uint ql_errlev = 0x8001; 46 47 static int ql2xenableclass2; 48 module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); 49 MODULE_PARM_DESC(ql2xenableclass2, 50 "Specify if Class 2 operations are supported from the very " 51 "beginning. Default is 0 - class 2 not supported."); 52 53 54 int ql2xlogintimeout = 20; 55 module_param(ql2xlogintimeout, int, S_IRUGO); 56 MODULE_PARM_DESC(ql2xlogintimeout, 57 "Login timeout value in seconds."); 58 59 int qlport_down_retry; 60 module_param(qlport_down_retry, int, S_IRUGO); 61 MODULE_PARM_DESC(qlport_down_retry, 62 "Maximum number of command retries to a port that returns " 63 "a PORT-DOWN status."); 64 65 int ql2xplogiabsentdevice; 66 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); 67 MODULE_PARM_DESC(ql2xplogiabsentdevice, 68 "Option to enable PLOGI to devices that are not present after " 69 "a Fabric scan. This is needed for several broken switches. " 70 "Default is 0 - no PLOGI. 1 - perform PLOGI."); 71 72 int ql2xloginretrycount; 73 module_param(ql2xloginretrycount, int, S_IRUGO); 74 MODULE_PARM_DESC(ql2xloginretrycount, 75 "Specify an alternate value for the NVRAM login retry count."); 76 77 int ql2xallocfwdump = 1; 78 module_param(ql2xallocfwdump, int, S_IRUGO); 79 MODULE_PARM_DESC(ql2xallocfwdump, 80 "Option to enable allocation of memory for a firmware dump " 81 "during HBA initialization. Memory allocation requirements " 82 "vary by ISP type. Default is 1 - allocate memory."); 83 84 int ql2xextended_error_logging; 85 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 86 module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 87 MODULE_PARM_DESC(ql2xextended_error_logging, 88 "Option to enable extended error logging,\n" 89 "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n" 90 "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n" 91 "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n" 92 "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n" 93 "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n" 94 "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n" 95 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" 96 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" 97 "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n" 98 "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n" 99 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" 100 "\t\t0x1e400000 - Preferred value for capturing essential " 101 "debug information (equivalent to old " 102 "ql2xextended_error_logging=1).\n" 103 "\t\tDo LOGICAL OR of the value to enable more than one level"); 104 105 int ql2xshiftctondsd = 6; 106 module_param(ql2xshiftctondsd, int, S_IRUGO); 107 MODULE_PARM_DESC(ql2xshiftctondsd, 108 "Set to control shifting of command type processing " 109 "based on total number of SG elements."); 110 111 int ql2xfdmienable = 1; 112 module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR); 113 module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR); 114 MODULE_PARM_DESC(ql2xfdmienable, 115 "Enables FDMI registrations. " 116 "0 - no FDMI. Default is 1 - perform FDMI."); 117 118 #define MAX_Q_DEPTH 64 119 static int ql2xmaxqdepth = MAX_Q_DEPTH; 120 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); 121 MODULE_PARM_DESC(ql2xmaxqdepth, 122 "Maximum queue depth to set for each LUN. " 123 "Default is 64."); 124 125 #if (IS_ENABLED(CONFIG_NVME_FC)) 126 int ql2xenabledif; 127 #else 128 int ql2xenabledif = 2; 129 #endif 130 module_param(ql2xenabledif, int, S_IRUGO); 131 MODULE_PARM_DESC(ql2xenabledif, 132 " Enable T10-CRC-DIF:\n" 133 " Default is 2.\n" 134 " 0 -- No DIF Support\n" 135 " 1 -- Enable DIF for all types\n" 136 " 2 -- Enable DIF for all types, except Type 0.\n"); 137 138 #if (IS_ENABLED(CONFIG_NVME_FC)) 139 int ql2xnvmeenable = 1; 140 #else 141 int ql2xnvmeenable; 142 #endif 143 module_param(ql2xnvmeenable, int, 0644); 144 MODULE_PARM_DESC(ql2xnvmeenable, 145 "Enables NVME support. " 146 "0 - no NVMe. Default is Y"); 147 148 int ql2xenablehba_err_chk = 2; 149 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); 150 MODULE_PARM_DESC(ql2xenablehba_err_chk, 151 " Enable T10-CRC-DIF Error isolation by HBA:\n" 152 " Default is 2.\n" 153 " 0 -- Error isolation disabled\n" 154 " 1 -- Error isolation enabled only for DIX Type 0\n" 155 " 2 -- Error isolation enabled for all Types\n"); 156 157 int ql2xiidmaenable = 1; 158 module_param(ql2xiidmaenable, int, S_IRUGO); 159 MODULE_PARM_DESC(ql2xiidmaenable, 160 "Enables iIDMA settings " 161 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 162 163 int ql2xmqsupport = 1; 164 module_param(ql2xmqsupport, int, S_IRUGO); 165 MODULE_PARM_DESC(ql2xmqsupport, 166 "Enable on demand multiple queue pairs support " 167 "Default is 1 for supported. " 168 "Set it to 0 to turn off mq qpair support."); 169 170 int ql2xfwloadbin; 171 module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 172 module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR); 173 MODULE_PARM_DESC(ql2xfwloadbin, 174 "Option to specify location from which to load ISP firmware:.\n" 175 " 2 -- load firmware via the request_firmware() (hotplug).\n" 176 " interface.\n" 177 " 1 -- load firmware from flash.\n" 178 " 0 -- use default semantics.\n"); 179 180 int ql2xetsenable; 181 module_param(ql2xetsenable, int, S_IRUGO); 182 MODULE_PARM_DESC(ql2xetsenable, 183 "Enables firmware ETS burst." 184 "Default is 0 - skip ETS enablement."); 185 186 int ql2xdbwr = 1; 187 module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); 188 MODULE_PARM_DESC(ql2xdbwr, 189 "Option to specify scheme for request queue posting.\n" 190 " 0 -- Regular doorbell.\n" 191 " 1 -- CAMRAM doorbell (faster).\n"); 192 193 int ql2xtargetreset = 1; 194 module_param(ql2xtargetreset, int, S_IRUGO); 195 MODULE_PARM_DESC(ql2xtargetreset, 196 "Enable target reset." 197 "Default is 1 - use hw defaults."); 198 199 int ql2xgffidenable; 200 module_param(ql2xgffidenable, int, S_IRUGO); 201 MODULE_PARM_DESC(ql2xgffidenable, 202 "Enables GFF_ID checks of port type. " 203 "Default is 0 - Do not use GFF_ID information."); 204 205 int ql2xasynctmfenable = 1; 206 module_param(ql2xasynctmfenable, int, S_IRUGO); 207 MODULE_PARM_DESC(ql2xasynctmfenable, 208 "Enables issue of TM IOCBs asynchronously via IOCB mechanism" 209 "Default is 1 - Issue TM IOCBs via mailbox mechanism."); 210 211 int ql2xdontresethba; 212 module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR); 213 MODULE_PARM_DESC(ql2xdontresethba, 214 "Option to specify reset behaviour.\n" 215 " 0 (Default) -- Reset on failure.\n" 216 " 1 -- Do not reset on failure.\n"); 217 218 uint64_t ql2xmaxlun = MAX_LUNS; 219 module_param(ql2xmaxlun, ullong, S_IRUGO); 220 MODULE_PARM_DESC(ql2xmaxlun, 221 "Defines the maximum LU number to register with the SCSI " 222 "midlayer. Default is 65535."); 223 224 int ql2xmdcapmask = 0x1F; 225 module_param(ql2xmdcapmask, int, S_IRUGO); 226 MODULE_PARM_DESC(ql2xmdcapmask, 227 "Set the Minidump driver capture mask level. " 228 "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); 229 230 int ql2xmdenable = 1; 231 module_param(ql2xmdenable, int, S_IRUGO); 232 MODULE_PARM_DESC(ql2xmdenable, 233 "Enable/disable MiniDump. " 234 "0 - MiniDump disabled. " 235 "1 (Default) - MiniDump enabled."); 236 237 int ql2xexlogins; 238 module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR); 239 MODULE_PARM_DESC(ql2xexlogins, 240 "Number of extended Logins. " 241 "0 (Default)- Disabled."); 242 243 int ql2xexchoffld = 1024; 244 module_param(ql2xexchoffld, uint, 0644); 245 MODULE_PARM_DESC(ql2xexchoffld, 246 "Number of target exchanges."); 247 248 int ql2xiniexchg = 1024; 249 module_param(ql2xiniexchg, uint, 0644); 250 MODULE_PARM_DESC(ql2xiniexchg, 251 "Number of initiator exchanges."); 252 253 int ql2xfwholdabts; 254 module_param(ql2xfwholdabts, int, S_IRUGO); 255 MODULE_PARM_DESC(ql2xfwholdabts, 256 "Allow FW to hold status IOCB until ABTS rsp received. " 257 "0 (Default) Do not set fw option. " 258 "1 - Set fw option to hold ABTS."); 259 260 int ql2xmvasynctoatio = 1; 261 module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR); 262 MODULE_PARM_DESC(ql2xmvasynctoatio, 263 "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ" 264 "0 (Default). Do not move IOCBs" 265 "1 - Move IOCBs."); 266 267 int ql2xautodetectsfp = 1; 268 module_param(ql2xautodetectsfp, int, 0444); 269 MODULE_PARM_DESC(ql2xautodetectsfp, 270 "Detect SFP range and set appropriate distance.\n" 271 "1 (Default): Enable\n"); 272 273 int ql2xenablemsix = 1; 274 module_param(ql2xenablemsix, int, 0444); 275 MODULE_PARM_DESC(ql2xenablemsix, 276 "Set to enable MSI or MSI-X interrupt mechanism.\n" 277 " Default is 1, enable MSI-X interrupt mechanism.\n" 278 " 0 -- enable traditional pin-based mechanism.\n" 279 " 1 -- enable MSI-X interrupt mechanism.\n" 280 " 2 -- enable MSI interrupt mechanism.\n"); 281 282 int qla2xuseresexchforels; 283 module_param(qla2xuseresexchforels, int, 0444); 284 MODULE_PARM_DESC(qla2xuseresexchforels, 285 "Reserve 1/2 of emergency exchanges for ELS.\n" 286 " 0 (default): disabled"); 287 288 static int ql2xprotmask; 289 module_param(ql2xprotmask, int, 0644); 290 MODULE_PARM_DESC(ql2xprotmask, 291 "Override DIF/DIX protection capabilities mask\n" 292 "Default is 0 which sets protection mask based on " 293 "capabilities reported by HBA firmware.\n"); 294 295 static int ql2xprotguard; 296 module_param(ql2xprotguard, int, 0644); 297 MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n" 298 " 0 -- Let HBA firmware decide\n" 299 " 1 -- Force T10 CRC\n" 300 " 2 -- Force IP checksum\n"); 301 302 int ql2xdifbundlinginternalbuffers; 303 module_param(ql2xdifbundlinginternalbuffers, int, 0644); 304 MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers, 305 "Force using internal buffers for DIF information\n" 306 "0 (Default). Based on check.\n" 307 "1 Force using internal buffers\n"); 308 309 static void qla2x00_clear_drv_active(struct qla_hw_data *); 310 static void qla2x00_free_device(scsi_qla_host_t *); 311 static int qla2xxx_map_queues(struct Scsi_Host *shost); 312 static void qla2x00_destroy_deferred_work(struct qla_hw_data *); 313 314 315 static struct scsi_transport_template *qla2xxx_transport_template = NULL; 316 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; 317 318 /* TODO Convert to inlines 319 * 320 * Timer routines 321 */ 322 323 __inline__ void 324 qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval) 325 { 326 timer_setup(&vha->timer, qla2x00_timer, 0); 327 vha->timer.expires = jiffies + interval * HZ; 328 add_timer(&vha->timer); 329 vha->timer_active = 1; 330 } 331 332 static inline void 333 qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) 334 { 335 /* Currently used for 82XX only. */ 336 if (vha->device_flags & DFLG_DEV_FAILED) { 337 ql_dbg(ql_dbg_timer, vha, 0x600d, 338 "Device in a failed state, returning.\n"); 339 return; 340 } 341 342 mod_timer(&vha->timer, jiffies + interval * HZ); 343 } 344 345 static __inline__ void 346 qla2x00_stop_timer(scsi_qla_host_t *vha) 347 { 348 del_timer_sync(&vha->timer); 349 vha->timer_active = 0; 350 } 351 352 static int qla2x00_do_dpc(void *data); 353 354 static void qla2x00_rst_aen(scsi_qla_host_t *); 355 356 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, 357 struct req_que **, struct rsp_que **); 358 static void qla2x00_free_fw_dump(struct qla_hw_data *); 359 static void qla2x00_mem_free(struct qla_hw_data *); 360 int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, 361 struct qla_qpair *qpair); 362 363 /* -------------------------------------------------------------------------- */ 364 static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req, 365 struct rsp_que *rsp) 366 { 367 struct qla_hw_data *ha = vha->hw; 368 369 rsp->qpair = ha->base_qpair; 370 rsp->req = req; 371 ha->base_qpair->hw = ha; 372 ha->base_qpair->req = req; 373 ha->base_qpair->rsp = rsp; 374 ha->base_qpair->vha = vha; 375 ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; 376 ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; 377 ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; 378 ha->base_qpair->srb_mempool = ha->srb_mempool; 379 INIT_LIST_HEAD(&ha->base_qpair->hints_list); 380 ha->base_qpair->enable_class_2 = ql2xenableclass2; 381 /* init qpair to this cpu. Will adjust at run time. */ 382 qla_cpu_update(rsp->qpair, raw_smp_processor_id()); 383 ha->base_qpair->pdev = ha->pdev; 384 385 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) 386 ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs; 387 } 388 389 static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, 390 struct rsp_que *rsp) 391 { 392 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 393 394 ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *), 395 GFP_KERNEL); 396 if (!ha->req_q_map) { 397 ql_log(ql_log_fatal, vha, 0x003b, 398 "Unable to allocate memory for request queue ptrs.\n"); 399 goto fail_req_map; 400 } 401 402 ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *), 403 GFP_KERNEL); 404 if (!ha->rsp_q_map) { 405 ql_log(ql_log_fatal, vha, 0x003c, 406 "Unable to allocate memory for response queue ptrs.\n"); 407 goto fail_rsp_map; 408 } 409 410 ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); 411 if (ha->base_qpair == NULL) { 412 ql_log(ql_log_warn, vha, 0x00e0, 413 "Failed to allocate base queue pair memory.\n"); 414 goto fail_base_qpair; 415 } 416 417 qla_init_base_qpair(vha, req, rsp); 418 419 if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) { 420 ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *), 421 GFP_KERNEL); 422 if (!ha->queue_pair_map) { 423 ql_log(ql_log_fatal, vha, 0x0180, 424 "Unable to allocate memory for queue pair ptrs.\n"); 425 goto fail_qpair_map; 426 } 427 } 428 429 /* 430 * Make sure we record at least the request and response queue zero in 431 * case we need to free them if part of the probe fails. 432 */ 433 ha->rsp_q_map[0] = rsp; 434 ha->req_q_map[0] = req; 435 set_bit(0, ha->rsp_qid_map); 436 set_bit(0, ha->req_qid_map); 437 return 0; 438 439 fail_qpair_map: 440 kfree(ha->base_qpair); 441 ha->base_qpair = NULL; 442 fail_base_qpair: 443 kfree(ha->rsp_q_map); 444 ha->rsp_q_map = NULL; 445 fail_rsp_map: 446 kfree(ha->req_q_map); 447 ha->req_q_map = NULL; 448 fail_req_map: 449 return -ENOMEM; 450 } 451 452 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) 453 { 454 if (IS_QLAFX00(ha)) { 455 if (req && req->ring_fx00) 456 dma_free_coherent(&ha->pdev->dev, 457 (req->length_fx00 + 1) * sizeof(request_t), 458 req->ring_fx00, req->dma_fx00); 459 } else if (req && req->ring) 460 dma_free_coherent(&ha->pdev->dev, 461 (req->length + 1) * sizeof(request_t), 462 req->ring, req->dma); 463 464 if (req) 465 kfree(req->outstanding_cmds); 466 467 kfree(req); 468 } 469 470 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) 471 { 472 if (IS_QLAFX00(ha)) { 473 if (rsp && rsp->ring_fx00) 474 dma_free_coherent(&ha->pdev->dev, 475 (rsp->length_fx00 + 1) * sizeof(request_t), 476 rsp->ring_fx00, rsp->dma_fx00); 477 } else if (rsp && rsp->ring) { 478 dma_free_coherent(&ha->pdev->dev, 479 (rsp->length + 1) * sizeof(response_t), 480 rsp->ring, rsp->dma); 481 } 482 kfree(rsp); 483 } 484 485 static void qla2x00_free_queues(struct qla_hw_data *ha) 486 { 487 struct req_que *req; 488 struct rsp_que *rsp; 489 int cnt; 490 unsigned long flags; 491 492 if (ha->queue_pair_map) { 493 kfree(ha->queue_pair_map); 494 ha->queue_pair_map = NULL; 495 } 496 if (ha->base_qpair) { 497 kfree(ha->base_qpair); 498 ha->base_qpair = NULL; 499 } 500 501 spin_lock_irqsave(&ha->hardware_lock, flags); 502 for (cnt = 0; cnt < ha->max_req_queues; cnt++) { 503 if (!test_bit(cnt, ha->req_qid_map)) 504 continue; 505 506 req = ha->req_q_map[cnt]; 507 clear_bit(cnt, ha->req_qid_map); 508 ha->req_q_map[cnt] = NULL; 509 510 spin_unlock_irqrestore(&ha->hardware_lock, flags); 511 qla2x00_free_req_que(ha, req); 512 spin_lock_irqsave(&ha->hardware_lock, flags); 513 } 514 spin_unlock_irqrestore(&ha->hardware_lock, flags); 515 516 kfree(ha->req_q_map); 517 ha->req_q_map = NULL; 518 519 520 spin_lock_irqsave(&ha->hardware_lock, flags); 521 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { 522 if (!test_bit(cnt, ha->rsp_qid_map)) 523 continue; 524 525 rsp = ha->rsp_q_map[cnt]; 526 clear_bit(cnt, ha->rsp_qid_map); 527 ha->rsp_q_map[cnt] = NULL; 528 spin_unlock_irqrestore(&ha->hardware_lock, flags); 529 qla2x00_free_rsp_que(ha, rsp); 530 spin_lock_irqsave(&ha->hardware_lock, flags); 531 } 532 spin_unlock_irqrestore(&ha->hardware_lock, flags); 533 534 kfree(ha->rsp_q_map); 535 ha->rsp_q_map = NULL; 536 } 537 538 static char * 539 qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) 540 { 541 struct qla_hw_data *ha = vha->hw; 542 static const char *const pci_bus_modes[] = { 543 "33", "66", "100", "133", 544 }; 545 uint16_t pci_bus; 546 547 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; 548 if (pci_bus) { 549 snprintf(str, str_len, "PCI-X (%s MHz)", 550 pci_bus_modes[pci_bus]); 551 } else { 552 pci_bus = (ha->pci_attr & BIT_8) >> 8; 553 snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]); 554 } 555 556 return str; 557 } 558 559 static char * 560 qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) 561 { 562 static const char *const pci_bus_modes[] = { 563 "33", "66", "100", "133", 564 }; 565 struct qla_hw_data *ha = vha->hw; 566 uint32_t pci_bus; 567 568 if (pci_is_pcie(ha->pdev)) { 569 uint32_t lstat, lspeed, lwidth; 570 const char *speed_str; 571 572 pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat); 573 lspeed = lstat & PCI_EXP_LNKCAP_SLS; 574 lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4; 575 576 switch (lspeed) { 577 case 1: 578 speed_str = "2.5GT/s"; 579 break; 580 case 2: 581 speed_str = "5.0GT/s"; 582 break; 583 case 3: 584 speed_str = "8.0GT/s"; 585 break; 586 default: 587 speed_str = "<unknown>"; 588 break; 589 } 590 snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth); 591 592 return str; 593 } 594 595 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; 596 if (pci_bus == 0 || pci_bus == 8) 597 snprintf(str, str_len, "PCI (%s MHz)", 598 pci_bus_modes[pci_bus >> 3]); 599 else 600 snprintf(str, str_len, "PCI-X Mode %d (%s MHz)", 601 pci_bus & 4 ? 2 : 1, 602 pci_bus_modes[pci_bus & 3]); 603 604 return str; 605 } 606 607 static char * 608 qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 609 { 610 char un_str[10]; 611 struct qla_hw_data *ha = vha->hw; 612 613 snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version, 614 ha->fw_minor_version, ha->fw_subminor_version); 615 616 if (ha->fw_attributes & BIT_9) { 617 strcat(str, "FLX"); 618 return (str); 619 } 620 621 switch (ha->fw_attributes & 0xFF) { 622 case 0x7: 623 strcat(str, "EF"); 624 break; 625 case 0x17: 626 strcat(str, "TP"); 627 break; 628 case 0x37: 629 strcat(str, "IP"); 630 break; 631 case 0x77: 632 strcat(str, "VI"); 633 break; 634 default: 635 sprintf(un_str, "(%x)", ha->fw_attributes); 636 strcat(str, un_str); 637 break; 638 } 639 if (ha->fw_attributes & 0x100) 640 strcat(str, "X"); 641 642 return (str); 643 } 644 645 static char * 646 qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) 647 { 648 struct qla_hw_data *ha = vha->hw; 649 650 snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version, 651 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes); 652 return str; 653 } 654 655 void qla2x00_sp_free_dma(srb_t *sp) 656 { 657 struct qla_hw_data *ha = sp->vha->hw; 658 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 659 660 if (sp->flags & SRB_DMA_VALID) { 661 scsi_dma_unmap(cmd); 662 sp->flags &= ~SRB_DMA_VALID; 663 } 664 665 if (sp->flags & SRB_CRC_PROT_DMA_VALID) { 666 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 667 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 668 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 669 } 670 671 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 672 /* List assured to be having elements */ 673 qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx); 674 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 675 } 676 677 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 678 struct crc_context *ctx0 = sp->u.scmd.crc_ctx; 679 680 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); 681 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 682 } 683 684 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 685 struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx; 686 687 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 688 ctx1->fcp_cmnd_dma); 689 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 690 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 691 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 692 mempool_free(ctx1, ha->ctx_mempool); 693 } 694 } 695 696 void qla2x00_sp_compl(srb_t *sp, int res) 697 { 698 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 699 struct completion *comp = sp->comp; 700 701 sp->free(sp); 702 cmd->result = res; 703 CMD_SP(cmd) = NULL; 704 cmd->scsi_done(cmd); 705 if (comp) 706 complete(comp); 707 } 708 709 void qla2xxx_qpair_sp_free_dma(srb_t *sp) 710 { 711 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 712 struct qla_hw_data *ha = sp->fcport->vha->hw; 713 714 if (sp->flags & SRB_DMA_VALID) { 715 scsi_dma_unmap(cmd); 716 sp->flags &= ~SRB_DMA_VALID; 717 } 718 719 if (sp->flags & SRB_CRC_PROT_DMA_VALID) { 720 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 721 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 722 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 723 } 724 725 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 726 /* List assured to be having elements */ 727 qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx); 728 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 729 } 730 731 if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) { 732 struct crc_context *difctx = sp->u.scmd.crc_ctx; 733 struct dsd_dma *dif_dsd, *nxt_dsd; 734 735 list_for_each_entry_safe(dif_dsd, nxt_dsd, 736 &difctx->ldif_dma_hndl_list, list) { 737 list_del(&dif_dsd->list); 738 dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr, 739 dif_dsd->dsd_list_dma); 740 kfree(dif_dsd); 741 difctx->no_dif_bundl--; 742 } 743 744 list_for_each_entry_safe(dif_dsd, nxt_dsd, 745 &difctx->ldif_dsd_list, list) { 746 list_del(&dif_dsd->list); 747 dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr, 748 dif_dsd->dsd_list_dma); 749 kfree(dif_dsd); 750 difctx->no_ldif_dsd--; 751 } 752 753 if (difctx->no_ldif_dsd) { 754 ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022, 755 "%s: difctx->no_ldif_dsd=%x\n", 756 __func__, difctx->no_ldif_dsd); 757 } 758 759 if (difctx->no_dif_bundl) { 760 ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022, 761 "%s: difctx->no_dif_bundl=%x\n", 762 __func__, difctx->no_dif_bundl); 763 } 764 sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID; 765 } 766 767 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 768 struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx; 769 770 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 771 ctx1->fcp_cmnd_dma); 772 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 773 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 774 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 775 mempool_free(ctx1, ha->ctx_mempool); 776 sp->flags &= ~SRB_FCP_CMND_DMA_VALID; 777 } 778 779 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 780 struct crc_context *ctx0 = sp->u.scmd.crc_ctx; 781 782 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); 783 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 784 } 785 } 786 787 void qla2xxx_qpair_sp_compl(srb_t *sp, int res) 788 { 789 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 790 struct completion *comp = sp->comp; 791 792 sp->free(sp); 793 cmd->result = res; 794 CMD_SP(cmd) = NULL; 795 cmd->scsi_done(cmd); 796 if (comp) 797 complete(comp); 798 } 799 800 static int 801 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 802 { 803 scsi_qla_host_t *vha = shost_priv(host); 804 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 805 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 806 struct qla_hw_data *ha = vha->hw; 807 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 808 srb_t *sp; 809 int rval; 810 811 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) || 812 WARN_ON_ONCE(!rport)) { 813 cmd->result = DID_NO_CONNECT << 16; 814 goto qc24_fail_command; 815 } 816 817 if (ha->mqenable) { 818 uint32_t tag; 819 uint16_t hwq; 820 struct qla_qpair *qpair = NULL; 821 822 tag = blk_mq_unique_tag(cmd->request); 823 hwq = blk_mq_unique_tag_to_hwq(tag); 824 qpair = ha->queue_pair_map[hwq]; 825 826 if (qpair) 827 return qla2xxx_mqueuecommand(host, cmd, qpair); 828 } 829 830 if (ha->flags.eeh_busy) { 831 if (ha->flags.pci_channel_io_perm_failure) { 832 ql_dbg(ql_dbg_aer, vha, 0x9010, 833 "PCI Channel IO permanent failure, exiting " 834 "cmd=%p.\n", cmd); 835 cmd->result = DID_NO_CONNECT << 16; 836 } else { 837 ql_dbg(ql_dbg_aer, vha, 0x9011, 838 "EEH_Busy, Requeuing the cmd=%p.\n", cmd); 839 cmd->result = DID_REQUEUE << 16; 840 } 841 goto qc24_fail_command; 842 } 843 844 rval = fc_remote_port_chkready(rport); 845 if (rval) { 846 cmd->result = rval; 847 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003, 848 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 849 cmd, rval); 850 goto qc24_fail_command; 851 } 852 853 if (!vha->flags.difdix_supported && 854 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 855 ql_dbg(ql_dbg_io, vha, 0x3004, 856 "DIF Cap not reg, fail DIF capable cmd's:%p.\n", 857 cmd); 858 cmd->result = DID_NO_CONNECT << 16; 859 goto qc24_fail_command; 860 } 861 862 if (!fcport) { 863 cmd->result = DID_NO_CONNECT << 16; 864 goto qc24_fail_command; 865 } 866 867 if (atomic_read(&fcport->state) != FCS_ONLINE) { 868 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 869 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 870 ql_dbg(ql_dbg_io, vha, 0x3005, 871 "Returning DNC, fcport_state=%d loop_state=%d.\n", 872 atomic_read(&fcport->state), 873 atomic_read(&base_vha->loop_state)); 874 cmd->result = DID_NO_CONNECT << 16; 875 goto qc24_fail_command; 876 } 877 goto qc24_target_busy; 878 } 879 880 /* 881 * Return target busy if we've received a non-zero retry_delay_timer 882 * in a FCP_RSP. 883 */ 884 if (fcport->retry_delay_timestamp == 0) { 885 /* retry delay not set */ 886 } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 887 fcport->retry_delay_timestamp = 0; 888 else 889 goto qc24_target_busy; 890 891 sp = scsi_cmd_priv(cmd); 892 qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport); 893 894 sp->u.scmd.cmd = cmd; 895 sp->type = SRB_SCSI_CMD; 896 897 CMD_SP(cmd) = (void *)sp; 898 sp->free = qla2x00_sp_free_dma; 899 sp->done = qla2x00_sp_compl; 900 901 rval = ha->isp_ops->start_scsi(sp); 902 if (rval != QLA_SUCCESS) { 903 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, 904 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); 905 goto qc24_host_busy_free_sp; 906 } 907 908 return 0; 909 910 qc24_host_busy_free_sp: 911 sp->free(sp); 912 913 qc24_target_busy: 914 return SCSI_MLQUEUE_TARGET_BUSY; 915 916 qc24_fail_command: 917 cmd->scsi_done(cmd); 918 919 return 0; 920 } 921 922 /* For MQ supported I/O */ 923 int 924 qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, 925 struct qla_qpair *qpair) 926 { 927 scsi_qla_host_t *vha = shost_priv(host); 928 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 929 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 930 struct qla_hw_data *ha = vha->hw; 931 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 932 srb_t *sp; 933 int rval; 934 935 rval = rport ? fc_remote_port_chkready(rport) : FC_PORTSTATE_OFFLINE; 936 if (rval) { 937 cmd->result = rval; 938 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076, 939 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 940 cmd, rval); 941 goto qc24_fail_command; 942 } 943 944 if (!fcport) { 945 cmd->result = DID_NO_CONNECT << 16; 946 goto qc24_fail_command; 947 } 948 949 if (atomic_read(&fcport->state) != FCS_ONLINE) { 950 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 951 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 952 ql_dbg(ql_dbg_io, vha, 0x3077, 953 "Returning DNC, fcport_state=%d loop_state=%d.\n", 954 atomic_read(&fcport->state), 955 atomic_read(&base_vha->loop_state)); 956 cmd->result = DID_NO_CONNECT << 16; 957 goto qc24_fail_command; 958 } 959 goto qc24_target_busy; 960 } 961 962 /* 963 * Return target busy if we've received a non-zero retry_delay_timer 964 * in a FCP_RSP. 965 */ 966 if (fcport->retry_delay_timestamp == 0) { 967 /* retry delay not set */ 968 } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 969 fcport->retry_delay_timestamp = 0; 970 else 971 goto qc24_target_busy; 972 973 sp = scsi_cmd_priv(cmd); 974 qla2xxx_init_sp(sp, vha, qpair, fcport); 975 976 sp->u.scmd.cmd = cmd; 977 sp->type = SRB_SCSI_CMD; 978 CMD_SP(cmd) = (void *)sp; 979 sp->free = qla2xxx_qpair_sp_free_dma; 980 sp->done = qla2xxx_qpair_sp_compl; 981 982 rval = ha->isp_ops->start_scsi_mq(sp); 983 if (rval != QLA_SUCCESS) { 984 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078, 985 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); 986 if (rval == QLA_INTERFACE_ERROR) 987 goto qc24_free_sp_fail_command; 988 goto qc24_host_busy_free_sp; 989 } 990 991 return 0; 992 993 qc24_host_busy_free_sp: 994 sp->free(sp); 995 996 qc24_target_busy: 997 return SCSI_MLQUEUE_TARGET_BUSY; 998 999 qc24_free_sp_fail_command: 1000 sp->free(sp); 1001 CMD_SP(cmd) = NULL; 1002 qla2xxx_rel_qpair_sp(sp->qpair, sp); 1003 1004 qc24_fail_command: 1005 cmd->scsi_done(cmd); 1006 1007 return 0; 1008 } 1009 1010 /* 1011 * qla2x00_eh_wait_on_command 1012 * Waits for the command to be returned by the Firmware for some 1013 * max time. 1014 * 1015 * Input: 1016 * cmd = Scsi Command to wait on. 1017 * 1018 * Return: 1019 * Completed in time : QLA_SUCCESS 1020 * Did not complete in time : QLA_FUNCTION_FAILED 1021 */ 1022 static int 1023 qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) 1024 { 1025 #define ABORT_POLLING_PERIOD 1000 1026 #define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD)) 1027 unsigned long wait_iter = ABORT_WAIT_ITER; 1028 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1029 struct qla_hw_data *ha = vha->hw; 1030 int ret = QLA_SUCCESS; 1031 1032 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { 1033 ql_dbg(ql_dbg_taskm, vha, 0x8005, 1034 "Return:eh_wait.\n"); 1035 return ret; 1036 } 1037 1038 while (CMD_SP(cmd) && wait_iter--) { 1039 msleep(ABORT_POLLING_PERIOD); 1040 } 1041 if (CMD_SP(cmd)) 1042 ret = QLA_FUNCTION_FAILED; 1043 1044 return ret; 1045 } 1046 1047 /* 1048 * qla2x00_wait_for_hba_online 1049 * Wait till the HBA is online after going through 1050 * <= MAX_RETRIES_OF_ISP_ABORT or 1051 * finally HBA is disabled ie marked offline 1052 * 1053 * Input: 1054 * ha - pointer to host adapter structure 1055 * 1056 * Note: 1057 * Does context switching-Release SPIN_LOCK 1058 * (if any) before calling this routine. 1059 * 1060 * Return: 1061 * Success (Adapter is online) : 0 1062 * Failed (Adapter is offline/disabled) : 1 1063 */ 1064 int 1065 qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) 1066 { 1067 int return_status; 1068 unsigned long wait_online; 1069 struct qla_hw_data *ha = vha->hw; 1070 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1071 1072 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 1073 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 1074 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 1075 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 1076 ha->dpc_active) && time_before(jiffies, wait_online)) { 1077 1078 msleep(1000); 1079 } 1080 if (base_vha->flags.online) 1081 return_status = QLA_SUCCESS; 1082 else 1083 return_status = QLA_FUNCTION_FAILED; 1084 1085 return (return_status); 1086 } 1087 1088 static inline int test_fcport_count(scsi_qla_host_t *vha) 1089 { 1090 struct qla_hw_data *ha = vha->hw; 1091 unsigned long flags; 1092 int res; 1093 1094 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1095 ql_dbg(ql_dbg_init, vha, 0x00ec, 1096 "tgt %p, fcport_count=%d\n", 1097 vha, vha->fcport_count); 1098 res = (vha->fcport_count == 0); 1099 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1100 1101 return res; 1102 } 1103 1104 /* 1105 * qla2x00_wait_for_sess_deletion can only be called from remove_one. 1106 * it has dependency on UNLOADING flag to stop device discovery 1107 */ 1108 void 1109 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) 1110 { 1111 u8 i; 1112 1113 qla2x00_mark_all_devices_lost(vha, 0); 1114 1115 for (i = 0; i < 10; i++) { 1116 if (wait_event_timeout(vha->fcport_waitQ, 1117 test_fcport_count(vha), HZ) > 0) 1118 break; 1119 } 1120 1121 flush_workqueue(vha->hw->wq); 1122 } 1123 1124 /* 1125 * qla2x00_wait_for_hba_ready 1126 * Wait till the HBA is ready before doing driver unload 1127 * 1128 * Input: 1129 * ha - pointer to host adapter structure 1130 * 1131 * Note: 1132 * Does context switching-Release SPIN_LOCK 1133 * (if any) before calling this routine. 1134 * 1135 */ 1136 static void 1137 qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) 1138 { 1139 struct qla_hw_data *ha = vha->hw; 1140 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1141 1142 while ((qla2x00_reset_active(vha) || ha->dpc_active || 1143 ha->flags.mbox_busy) || 1144 test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) || 1145 test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) { 1146 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 1147 break; 1148 msleep(1000); 1149 } 1150 } 1151 1152 int 1153 qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) 1154 { 1155 int return_status; 1156 unsigned long wait_reset; 1157 struct qla_hw_data *ha = vha->hw; 1158 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1159 1160 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); 1161 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 1162 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 1163 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 1164 ha->dpc_active) && time_before(jiffies, wait_reset)) { 1165 1166 msleep(1000); 1167 1168 if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 1169 ha->flags.chip_reset_done) 1170 break; 1171 } 1172 if (ha->flags.chip_reset_done) 1173 return_status = QLA_SUCCESS; 1174 else 1175 return_status = QLA_FUNCTION_FAILED; 1176 1177 return return_status; 1178 } 1179 1180 #define ISP_REG_DISCONNECT 0xffffffffU 1181 /************************************************************************** 1182 * qla2x00_isp_reg_stat 1183 * 1184 * Description: 1185 * Read the host status register of ISP before aborting the command. 1186 * 1187 * Input: 1188 * ha = pointer to host adapter structure. 1189 * 1190 * 1191 * Returns: 1192 * Either true or false. 1193 * 1194 * Note: Return true if there is register disconnect. 1195 **************************************************************************/ 1196 static inline 1197 uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) 1198 { 1199 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1200 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 1201 1202 if (IS_P3P_TYPE(ha)) 1203 return ((RD_REG_DWORD(®82->host_int)) == ISP_REG_DISCONNECT); 1204 else 1205 return ((RD_REG_DWORD(®->host_status)) == 1206 ISP_REG_DISCONNECT); 1207 } 1208 1209 /************************************************************************** 1210 * qla2xxx_eh_abort 1211 * 1212 * Description: 1213 * The abort function will abort the specified command. 1214 * 1215 * Input: 1216 * cmd = Linux SCSI command packet to be aborted. 1217 * 1218 * Returns: 1219 * Either SUCCESS or FAILED. 1220 * 1221 * Note: 1222 * Only return FAILED if command not returned by firmware. 1223 **************************************************************************/ 1224 static int 1225 qla2xxx_eh_abort(struct scsi_cmnd *cmd) 1226 { 1227 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1228 DECLARE_COMPLETION_ONSTACK(comp); 1229 srb_t *sp; 1230 int ret; 1231 unsigned int id; 1232 uint64_t lun; 1233 int rval; 1234 struct qla_hw_data *ha = vha->hw; 1235 uint32_t ratov_j; 1236 struct qla_qpair *qpair; 1237 unsigned long flags; 1238 1239 if (qla2x00_isp_reg_stat(ha)) { 1240 ql_log(ql_log_info, vha, 0x8042, 1241 "PCI/Register disconnect, exiting.\n"); 1242 return FAILED; 1243 } 1244 1245 ret = fc_block_scsi_eh(cmd); 1246 if (ret != 0) 1247 return ret; 1248 1249 sp = scsi_cmd_priv(cmd); 1250 qpair = sp->qpair; 1251 1252 if ((sp->fcport && sp->fcport->deleted) || !qpair) 1253 return SUCCESS; 1254 1255 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 1256 if (sp->completed) { 1257 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 1258 return SUCCESS; 1259 } 1260 1261 if (sp->abort || sp->aborted) { 1262 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 1263 return FAILED; 1264 } 1265 1266 sp->abort = 1; 1267 sp->comp = ∁ 1268 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 1269 1270 1271 id = cmd->device->id; 1272 lun = cmd->device->lun; 1273 1274 ql_dbg(ql_dbg_taskm, vha, 0x8002, 1275 "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n", 1276 vha->host_no, id, lun, sp, cmd, sp->handle); 1277 1278 /* 1279 * Abort will release the original Command/sp from FW. Let the 1280 * original command call scsi_done. In return, he will wakeup 1281 * this sleeping thread. 1282 */ 1283 rval = ha->isp_ops->abort_command(sp); 1284 1285 ql_dbg(ql_dbg_taskm, vha, 0x8003, 1286 "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval); 1287 1288 /* Wait for the command completion. */ 1289 ratov_j = ha->r_a_tov/10 * 4 * 1000; 1290 ratov_j = msecs_to_jiffies(ratov_j); 1291 switch (rval) { 1292 case QLA_SUCCESS: 1293 if (!wait_for_completion_timeout(&comp, ratov_j)) { 1294 ql_dbg(ql_dbg_taskm, vha, 0xffff, 1295 "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", 1296 __func__, ha->r_a_tov/10); 1297 ret = FAILED; 1298 } else { 1299 ret = SUCCESS; 1300 } 1301 break; 1302 default: 1303 ret = FAILED; 1304 break; 1305 } 1306 1307 sp->comp = NULL; 1308 1309 ql_log(ql_log_info, vha, 0x801c, 1310 "Abort command issued nexus=%ld:%d:%llu -- %x.\n", 1311 vha->host_no, id, lun, ret); 1312 1313 return ret; 1314 } 1315 1316 /* 1317 * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED. 1318 */ 1319 int 1320 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, 1321 uint64_t l, enum nexus_wait_type type) 1322 { 1323 int cnt, match, status; 1324 unsigned long flags; 1325 struct qla_hw_data *ha = vha->hw; 1326 struct req_que *req; 1327 srb_t *sp; 1328 struct scsi_cmnd *cmd; 1329 1330 status = QLA_SUCCESS; 1331 1332 spin_lock_irqsave(&ha->hardware_lock, flags); 1333 req = vha->req; 1334 for (cnt = 1; status == QLA_SUCCESS && 1335 cnt < req->num_outstanding_cmds; cnt++) { 1336 sp = req->outstanding_cmds[cnt]; 1337 if (!sp) 1338 continue; 1339 if (sp->type != SRB_SCSI_CMD) 1340 continue; 1341 if (vha->vp_idx != sp->vha->vp_idx) 1342 continue; 1343 match = 0; 1344 cmd = GET_CMD_SP(sp); 1345 switch (type) { 1346 case WAIT_HOST: 1347 match = 1; 1348 break; 1349 case WAIT_TARGET: 1350 match = cmd->device->id == t; 1351 break; 1352 case WAIT_LUN: 1353 match = (cmd->device->id == t && 1354 cmd->device->lun == l); 1355 break; 1356 } 1357 if (!match) 1358 continue; 1359 1360 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1361 status = qla2x00_eh_wait_on_command(cmd); 1362 spin_lock_irqsave(&ha->hardware_lock, flags); 1363 } 1364 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1365 1366 return status; 1367 } 1368 1369 static char *reset_errors[] = { 1370 "HBA not online", 1371 "HBA not ready", 1372 "Task management failed", 1373 "Waiting for command completions", 1374 }; 1375 1376 static int 1377 __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 1378 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, uint64_t, int)) 1379 { 1380 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1381 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1382 int err; 1383 1384 if (!fcport) { 1385 return FAILED; 1386 } 1387 1388 err = fc_block_scsi_eh(cmd); 1389 if (err != 0) 1390 return err; 1391 1392 if (fcport->deleted) 1393 return SUCCESS; 1394 1395 ql_log(ql_log_info, vha, 0x8009, 1396 "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no, 1397 cmd->device->id, cmd->device->lun, cmd); 1398 1399 err = 0; 1400 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1401 ql_log(ql_log_warn, vha, 0x800a, 1402 "Wait for hba online failed for cmd=%p.\n", cmd); 1403 goto eh_reset_failed; 1404 } 1405 err = 2; 1406 if (do_reset(fcport, cmd->device->lun, 1) 1407 != QLA_SUCCESS) { 1408 ql_log(ql_log_warn, vha, 0x800c, 1409 "do_reset failed for cmd=%p.\n", cmd); 1410 goto eh_reset_failed; 1411 } 1412 err = 3; 1413 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 1414 cmd->device->lun, type) != QLA_SUCCESS) { 1415 ql_log(ql_log_warn, vha, 0x800d, 1416 "wait for pending cmds failed for cmd=%p.\n", cmd); 1417 goto eh_reset_failed; 1418 } 1419 1420 ql_log(ql_log_info, vha, 0x800e, 1421 "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name, 1422 vha->host_no, cmd->device->id, cmd->device->lun, cmd); 1423 1424 return SUCCESS; 1425 1426 eh_reset_failed: 1427 ql_log(ql_log_info, vha, 0x800f, 1428 "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name, 1429 reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun, 1430 cmd); 1431 return FAILED; 1432 } 1433 1434 static int 1435 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 1436 { 1437 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1438 struct qla_hw_data *ha = vha->hw; 1439 1440 if (qla2x00_isp_reg_stat(ha)) { 1441 ql_log(ql_log_info, vha, 0x803e, 1442 "PCI/Register disconnect, exiting.\n"); 1443 return FAILED; 1444 } 1445 1446 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd, 1447 ha->isp_ops->lun_reset); 1448 } 1449 1450 static int 1451 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) 1452 { 1453 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1454 struct qla_hw_data *ha = vha->hw; 1455 1456 if (qla2x00_isp_reg_stat(ha)) { 1457 ql_log(ql_log_info, vha, 0x803f, 1458 "PCI/Register disconnect, exiting.\n"); 1459 return FAILED; 1460 } 1461 1462 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd, 1463 ha->isp_ops->target_reset); 1464 } 1465 1466 /************************************************************************** 1467 * qla2xxx_eh_bus_reset 1468 * 1469 * Description: 1470 * The bus reset function will reset the bus and abort any executing 1471 * commands. 1472 * 1473 * Input: 1474 * cmd = Linux SCSI command packet of the command that cause the 1475 * bus reset. 1476 * 1477 * Returns: 1478 * SUCCESS/FAILURE (defined as macro in scsi.h). 1479 * 1480 **************************************************************************/ 1481 static int 1482 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 1483 { 1484 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1485 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1486 int ret = FAILED; 1487 unsigned int id; 1488 uint64_t lun; 1489 struct qla_hw_data *ha = vha->hw; 1490 1491 if (qla2x00_isp_reg_stat(ha)) { 1492 ql_log(ql_log_info, vha, 0x8040, 1493 "PCI/Register disconnect, exiting.\n"); 1494 return FAILED; 1495 } 1496 1497 id = cmd->device->id; 1498 lun = cmd->device->lun; 1499 1500 if (!fcport) { 1501 return ret; 1502 } 1503 1504 ret = fc_block_scsi_eh(cmd); 1505 if (ret != 0) 1506 return ret; 1507 ret = FAILED; 1508 1509 if (qla2x00_chip_is_down(vha)) 1510 return ret; 1511 1512 ql_log(ql_log_info, vha, 0x8012, 1513 "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1514 1515 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1516 ql_log(ql_log_fatal, vha, 0x8013, 1517 "Wait for hba online failed board disabled.\n"); 1518 goto eh_bus_reset_done; 1519 } 1520 1521 if (qla2x00_loop_reset(vha) == QLA_SUCCESS) 1522 ret = SUCCESS; 1523 1524 if (ret == FAILED) 1525 goto eh_bus_reset_done; 1526 1527 /* Flush outstanding commands. */ 1528 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != 1529 QLA_SUCCESS) { 1530 ql_log(ql_log_warn, vha, 0x8014, 1531 "Wait for pending commands failed.\n"); 1532 ret = FAILED; 1533 } 1534 1535 eh_bus_reset_done: 1536 ql_log(ql_log_warn, vha, 0x802b, 1537 "BUS RESET %s nexus=%ld:%d:%llu.\n", 1538 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1539 1540 return ret; 1541 } 1542 1543 /************************************************************************** 1544 * qla2xxx_eh_host_reset 1545 * 1546 * Description: 1547 * The reset function will reset the Adapter. 1548 * 1549 * Input: 1550 * cmd = Linux SCSI command packet of the command that cause the 1551 * adapter reset. 1552 * 1553 * Returns: 1554 * Either SUCCESS or FAILED. 1555 * 1556 * Note: 1557 **************************************************************************/ 1558 static int 1559 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 1560 { 1561 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1562 struct qla_hw_data *ha = vha->hw; 1563 int ret = FAILED; 1564 unsigned int id; 1565 uint64_t lun; 1566 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1567 1568 if (qla2x00_isp_reg_stat(ha)) { 1569 ql_log(ql_log_info, vha, 0x8041, 1570 "PCI/Register disconnect, exiting.\n"); 1571 schedule_work(&ha->board_disable); 1572 return SUCCESS; 1573 } 1574 1575 id = cmd->device->id; 1576 lun = cmd->device->lun; 1577 1578 ql_log(ql_log_info, vha, 0x8018, 1579 "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1580 1581 /* 1582 * No point in issuing another reset if one is active. Also do not 1583 * attempt a reset if we are updating flash. 1584 */ 1585 if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING) 1586 goto eh_host_reset_lock; 1587 1588 if (vha != base_vha) { 1589 if (qla2x00_vp_abort_isp(vha)) 1590 goto eh_host_reset_lock; 1591 } else { 1592 if (IS_P3P_TYPE(vha->hw)) { 1593 if (!qla82xx_fcoe_ctx_reset(vha)) { 1594 /* Ctx reset success */ 1595 ret = SUCCESS; 1596 goto eh_host_reset_lock; 1597 } 1598 /* fall thru if ctx reset failed */ 1599 } 1600 if (ha->wq) 1601 flush_workqueue(ha->wq); 1602 1603 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1604 if (ha->isp_ops->abort_isp(base_vha)) { 1605 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1606 /* failed. schedule dpc to try */ 1607 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 1608 1609 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1610 ql_log(ql_log_warn, vha, 0x802a, 1611 "wait for hba online failed.\n"); 1612 goto eh_host_reset_lock; 1613 } 1614 } 1615 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1616 } 1617 1618 /* Waiting for command to be returned to OS.*/ 1619 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) == 1620 QLA_SUCCESS) 1621 ret = SUCCESS; 1622 1623 eh_host_reset_lock: 1624 ql_log(ql_log_info, vha, 0x8017, 1625 "ADAPTER RESET %s nexus=%ld:%d:%llu.\n", 1626 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); 1627 1628 return ret; 1629 } 1630 1631 /* 1632 * qla2x00_loop_reset 1633 * Issue loop reset. 1634 * 1635 * Input: 1636 * ha = adapter block pointer. 1637 * 1638 * Returns: 1639 * 0 = success 1640 */ 1641 int 1642 qla2x00_loop_reset(scsi_qla_host_t *vha) 1643 { 1644 int ret; 1645 struct fc_port *fcport; 1646 struct qla_hw_data *ha = vha->hw; 1647 1648 if (IS_QLAFX00(ha)) { 1649 return qlafx00_loop_reset(vha); 1650 } 1651 1652 if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) { 1653 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1654 if (fcport->port_type != FCT_TARGET) 1655 continue; 1656 1657 ret = ha->isp_ops->target_reset(fcport, 0, 0); 1658 if (ret != QLA_SUCCESS) { 1659 ql_dbg(ql_dbg_taskm, vha, 0x802c, 1660 "Bus Reset failed: Reset=%d " 1661 "d_id=%x.\n", ret, fcport->d_id.b24); 1662 } 1663 } 1664 } 1665 1666 1667 if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { 1668 atomic_set(&vha->loop_state, LOOP_DOWN); 1669 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1670 qla2x00_mark_all_devices_lost(vha, 0); 1671 ret = qla2x00_full_login_lip(vha); 1672 if (ret != QLA_SUCCESS) { 1673 ql_dbg(ql_dbg_taskm, vha, 0x802d, 1674 "full_login_lip=%d.\n", ret); 1675 } 1676 } 1677 1678 if (ha->flags.enable_lip_reset) { 1679 ret = qla2x00_lip_reset(vha); 1680 if (ret != QLA_SUCCESS) 1681 ql_dbg(ql_dbg_taskm, vha, 0x802e, 1682 "lip_reset failed (%d).\n", ret); 1683 } 1684 1685 /* Issue marker command only when we are going to start the I/O */ 1686 vha->marker_needed = 1; 1687 1688 return QLA_SUCCESS; 1689 } 1690 1691 static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res, 1692 unsigned long *flags) 1693 __releases(qp->qp_lock_ptr) 1694 __acquires(qp->qp_lock_ptr) 1695 { 1696 DECLARE_COMPLETION_ONSTACK(comp); 1697 scsi_qla_host_t *vha = qp->vha; 1698 struct qla_hw_data *ha = vha->hw; 1699 int rval; 1700 bool ret_cmd; 1701 uint32_t ratov_j; 1702 1703 if (qla2x00_chip_is_down(vha)) { 1704 sp->done(sp, res); 1705 return; 1706 } 1707 1708 if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS || 1709 (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy && 1710 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 1711 !qla2x00_isp_reg_stat(ha))) { 1712 if (sp->comp) { 1713 sp->done(sp, res); 1714 return; 1715 } 1716 1717 sp->comp = ∁ 1718 sp->abort = 1; 1719 spin_unlock_irqrestore(qp->qp_lock_ptr, *flags); 1720 1721 rval = ha->isp_ops->abort_command(sp); 1722 /* Wait for command completion. */ 1723 ret_cmd = false; 1724 ratov_j = ha->r_a_tov/10 * 4 * 1000; 1725 ratov_j = msecs_to_jiffies(ratov_j); 1726 switch (rval) { 1727 case QLA_SUCCESS: 1728 if (wait_for_completion_timeout(&comp, ratov_j)) { 1729 ql_dbg(ql_dbg_taskm, vha, 0xffff, 1730 "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", 1731 __func__, ha->r_a_tov/10); 1732 ret_cmd = true; 1733 } 1734 /* else FW return SP to driver */ 1735 break; 1736 default: 1737 ret_cmd = true; 1738 break; 1739 } 1740 1741 spin_lock_irqsave(qp->qp_lock_ptr, *flags); 1742 if (ret_cmd && (!sp->completed || !sp->aborted)) 1743 sp->done(sp, res); 1744 } else { 1745 sp->done(sp, res); 1746 } 1747 } 1748 1749 static void 1750 __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) 1751 { 1752 int cnt; 1753 unsigned long flags; 1754 srb_t *sp; 1755 scsi_qla_host_t *vha = qp->vha; 1756 struct qla_hw_data *ha = vha->hw; 1757 struct req_que *req; 1758 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1759 struct qla_tgt_cmd *cmd; 1760 1761 if (!ha->req_q_map) 1762 return; 1763 spin_lock_irqsave(qp->qp_lock_ptr, flags); 1764 req = qp->req; 1765 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1766 sp = req->outstanding_cmds[cnt]; 1767 if (sp) { 1768 switch (sp->cmd_type) { 1769 case TYPE_SRB: 1770 qla2x00_abort_srb(qp, sp, res, &flags); 1771 break; 1772 case TYPE_TGT_CMD: 1773 if (!vha->hw->tgt.tgt_ops || !tgt || 1774 qla_ini_mode_enabled(vha)) { 1775 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, 1776 "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n", 1777 vha->dpc_flags); 1778 continue; 1779 } 1780 cmd = (struct qla_tgt_cmd *)sp; 1781 cmd->aborted = 1; 1782 break; 1783 case TYPE_TGT_TMCMD: 1784 /* Skip task management functions. */ 1785 break; 1786 default: 1787 break; 1788 } 1789 req->outstanding_cmds[cnt] = NULL; 1790 } 1791 } 1792 spin_unlock_irqrestore(qp->qp_lock_ptr, flags); 1793 } 1794 1795 void 1796 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) 1797 { 1798 int que; 1799 struct qla_hw_data *ha = vha->hw; 1800 1801 /* Continue only if initialization complete. */ 1802 if (!ha->base_qpair) 1803 return; 1804 __qla2x00_abort_all_cmds(ha->base_qpair, res); 1805 1806 if (!ha->queue_pair_map) 1807 return; 1808 for (que = 0; que < ha->max_qpairs; que++) { 1809 if (!ha->queue_pair_map[que]) 1810 continue; 1811 1812 __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res); 1813 } 1814 } 1815 1816 static int 1817 qla2xxx_slave_alloc(struct scsi_device *sdev) 1818 { 1819 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1820 1821 if (!rport || fc_remote_port_chkready(rport)) 1822 return -ENXIO; 1823 1824 sdev->hostdata = *(fc_port_t **)rport->dd_data; 1825 1826 return 0; 1827 } 1828 1829 static int 1830 qla2xxx_slave_configure(struct scsi_device *sdev) 1831 { 1832 scsi_qla_host_t *vha = shost_priv(sdev->host); 1833 struct req_que *req = vha->req; 1834 1835 if (IS_T10_PI_CAPABLE(vha->hw)) 1836 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1837 1838 scsi_change_queue_depth(sdev, req->max_q_depth); 1839 return 0; 1840 } 1841 1842 static void 1843 qla2xxx_slave_destroy(struct scsi_device *sdev) 1844 { 1845 sdev->hostdata = NULL; 1846 } 1847 1848 /** 1849 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1850 * @ha: HA context 1851 * 1852 * At exit, the @ha's flags.enable_64bit_addressing set to indicated 1853 * supported addressing method. 1854 */ 1855 static void 1856 qla2x00_config_dma_addressing(struct qla_hw_data *ha) 1857 { 1858 /* Assume a 32bit DMA mask. */ 1859 ha->flags.enable_64bit_addressing = 0; 1860 1861 if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 1862 /* Any upper-dword bits set? */ 1863 if (MSD(dma_get_required_mask(&ha->pdev->dev)) && 1864 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { 1865 /* Ok, a 64bit DMA mask is applicable. */ 1866 ha->flags.enable_64bit_addressing = 1; 1867 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; 1868 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; 1869 return; 1870 } 1871 } 1872 1873 dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); 1874 pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32)); 1875 } 1876 1877 static void 1878 qla2x00_enable_intrs(struct qla_hw_data *ha) 1879 { 1880 unsigned long flags = 0; 1881 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1882 1883 spin_lock_irqsave(&ha->hardware_lock, flags); 1884 ha->interrupts_on = 1; 1885 /* enable risc and host interrupts */ 1886 WRT_REG_WORD(®->ictrl, ICR_EN_INT | ICR_EN_RISC); 1887 RD_REG_WORD(®->ictrl); 1888 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1889 1890 } 1891 1892 static void 1893 qla2x00_disable_intrs(struct qla_hw_data *ha) 1894 { 1895 unsigned long flags = 0; 1896 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1897 1898 spin_lock_irqsave(&ha->hardware_lock, flags); 1899 ha->interrupts_on = 0; 1900 /* disable risc and host interrupts */ 1901 WRT_REG_WORD(®->ictrl, 0); 1902 RD_REG_WORD(®->ictrl); 1903 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1904 } 1905 1906 static void 1907 qla24xx_enable_intrs(struct qla_hw_data *ha) 1908 { 1909 unsigned long flags = 0; 1910 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1911 1912 spin_lock_irqsave(&ha->hardware_lock, flags); 1913 ha->interrupts_on = 1; 1914 WRT_REG_DWORD(®->ictrl, ICRX_EN_RISC_INT); 1915 RD_REG_DWORD(®->ictrl); 1916 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1917 } 1918 1919 static void 1920 qla24xx_disable_intrs(struct qla_hw_data *ha) 1921 { 1922 unsigned long flags = 0; 1923 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1924 1925 if (IS_NOPOLLING_TYPE(ha)) 1926 return; 1927 spin_lock_irqsave(&ha->hardware_lock, flags); 1928 ha->interrupts_on = 0; 1929 WRT_REG_DWORD(®->ictrl, 0); 1930 RD_REG_DWORD(®->ictrl); 1931 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1932 } 1933 1934 static int 1935 qla2x00_iospace_config(struct qla_hw_data *ha) 1936 { 1937 resource_size_t pio; 1938 uint16_t msix; 1939 1940 if (pci_request_selected_regions(ha->pdev, ha->bars, 1941 QLA2XXX_DRIVER_NAME)) { 1942 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011, 1943 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 1944 pci_name(ha->pdev)); 1945 goto iospace_error_exit; 1946 } 1947 if (!(ha->bars & 1)) 1948 goto skip_pio; 1949 1950 /* We only need PIO for Flash operations on ISP2312 v2 chips. */ 1951 pio = pci_resource_start(ha->pdev, 0); 1952 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 1953 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1954 ql_log_pci(ql_log_warn, ha->pdev, 0x0012, 1955 "Invalid pci I/O region size (%s).\n", 1956 pci_name(ha->pdev)); 1957 pio = 0; 1958 } 1959 } else { 1960 ql_log_pci(ql_log_warn, ha->pdev, 0x0013, 1961 "Region #0 no a PIO resource (%s).\n", 1962 pci_name(ha->pdev)); 1963 pio = 0; 1964 } 1965 ha->pio_address = pio; 1966 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014, 1967 "PIO address=%llu.\n", 1968 (unsigned long long)ha->pio_address); 1969 1970 skip_pio: 1971 /* Use MMIO operations for all accesses. */ 1972 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 1973 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015, 1974 "Region #1 not an MMIO resource (%s), aborting.\n", 1975 pci_name(ha->pdev)); 1976 goto iospace_error_exit; 1977 } 1978 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 1979 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016, 1980 "Invalid PCI mem region size (%s), aborting.\n", 1981 pci_name(ha->pdev)); 1982 goto iospace_error_exit; 1983 } 1984 1985 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 1986 if (!ha->iobase) { 1987 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017, 1988 "Cannot remap MMIO (%s), aborting.\n", 1989 pci_name(ha->pdev)); 1990 goto iospace_error_exit; 1991 } 1992 1993 /* Determine queue resources */ 1994 ha->max_req_queues = ha->max_rsp_queues = 1; 1995 ha->msix_count = QLA_BASE_VECTORS; 1996 if (!ql2xmqsupport || !ql2xnvmeenable || 1997 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 1998 goto mqiobase_exit; 1999 2000 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 2001 pci_resource_len(ha->pdev, 3)); 2002 if (ha->mqiobase) { 2003 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018, 2004 "MQIO Base=%p.\n", ha->mqiobase); 2005 /* Read MSIX vector size of the board */ 2006 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 2007 ha->msix_count = msix + 1; 2008 /* Max queues are bounded by available msix vectors */ 2009 /* MB interrupt uses 1 vector */ 2010 ha->max_req_queues = ha->msix_count - 1; 2011 ha->max_rsp_queues = ha->max_req_queues; 2012 /* Queue pairs is the max value minus the base queue pair */ 2013 ha->max_qpairs = ha->max_rsp_queues - 1; 2014 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188, 2015 "Max no of queues pairs: %d.\n", ha->max_qpairs); 2016 2017 ql_log_pci(ql_log_info, ha->pdev, 0x001a, 2018 "MSI-X vector count: %d.\n", ha->msix_count); 2019 } else 2020 ql_log_pci(ql_log_info, ha->pdev, 0x001b, 2021 "BAR 3 not enabled.\n"); 2022 2023 mqiobase_exit: 2024 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c, 2025 "MSIX Count: %d.\n", ha->msix_count); 2026 return (0); 2027 2028 iospace_error_exit: 2029 return (-ENOMEM); 2030 } 2031 2032 2033 static int 2034 qla83xx_iospace_config(struct qla_hw_data *ha) 2035 { 2036 uint16_t msix; 2037 2038 if (pci_request_selected_regions(ha->pdev, ha->bars, 2039 QLA2XXX_DRIVER_NAME)) { 2040 ql_log_pci(ql_log_fatal, ha->pdev, 0x0117, 2041 "Failed to reserve PIO/MMIO regions (%s), aborting.\n", 2042 pci_name(ha->pdev)); 2043 2044 goto iospace_error_exit; 2045 } 2046 2047 /* Use MMIO operations for all accesses. */ 2048 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 2049 ql_log_pci(ql_log_warn, ha->pdev, 0x0118, 2050 "Invalid pci I/O region size (%s).\n", 2051 pci_name(ha->pdev)); 2052 goto iospace_error_exit; 2053 } 2054 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 2055 ql_log_pci(ql_log_warn, ha->pdev, 0x0119, 2056 "Invalid PCI mem region size (%s), aborting\n", 2057 pci_name(ha->pdev)); 2058 goto iospace_error_exit; 2059 } 2060 2061 ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN); 2062 if (!ha->iobase) { 2063 ql_log_pci(ql_log_fatal, ha->pdev, 0x011a, 2064 "Cannot remap MMIO (%s), aborting.\n", 2065 pci_name(ha->pdev)); 2066 goto iospace_error_exit; 2067 } 2068 2069 /* 64bit PCI BAR - BAR2 will correspoond to region 4 */ 2070 /* 83XX 26XX always use MQ type access for queues 2071 * - mbar 2, a.k.a region 4 */ 2072 ha->max_req_queues = ha->max_rsp_queues = 1; 2073 ha->msix_count = QLA_BASE_VECTORS; 2074 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4), 2075 pci_resource_len(ha->pdev, 4)); 2076 2077 if (!ha->mqiobase) { 2078 ql_log_pci(ql_log_fatal, ha->pdev, 0x011d, 2079 "BAR2/region4 not enabled\n"); 2080 goto mqiobase_exit; 2081 } 2082 2083 ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2), 2084 pci_resource_len(ha->pdev, 2)); 2085 if (ha->msixbase) { 2086 /* Read MSIX vector size of the board */ 2087 pci_read_config_word(ha->pdev, 2088 QLA_83XX_PCI_MSIX_CONTROL, &msix); 2089 ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1; 2090 /* 2091 * By default, driver uses at least two msix vectors 2092 * (default & rspq) 2093 */ 2094 if (ql2xmqsupport || ql2xnvmeenable) { 2095 /* MB interrupt uses 1 vector */ 2096 ha->max_req_queues = ha->msix_count - 1; 2097 2098 /* ATIOQ needs 1 vector. That's 1 less QPair */ 2099 if (QLA_TGT_MODE_ENABLED()) 2100 ha->max_req_queues--; 2101 2102 ha->max_rsp_queues = ha->max_req_queues; 2103 2104 /* Queue pairs is the max value minus 2105 * the base queue pair */ 2106 ha->max_qpairs = ha->max_req_queues - 1; 2107 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3, 2108 "Max no of queues pairs: %d.\n", ha->max_qpairs); 2109 } 2110 ql_log_pci(ql_log_info, ha->pdev, 0x011c, 2111 "MSI-X vector count: %d.\n", ha->msix_count); 2112 } else 2113 ql_log_pci(ql_log_info, ha->pdev, 0x011e, 2114 "BAR 1 not enabled.\n"); 2115 2116 mqiobase_exit: 2117 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f, 2118 "MSIX Count: %d.\n", ha->msix_count); 2119 return 0; 2120 2121 iospace_error_exit: 2122 return -ENOMEM; 2123 } 2124 2125 static struct isp_operations qla2100_isp_ops = { 2126 .pci_config = qla2100_pci_config, 2127 .reset_chip = qla2x00_reset_chip, 2128 .chip_diag = qla2x00_chip_diag, 2129 .config_rings = qla2x00_config_rings, 2130 .reset_adapter = qla2x00_reset_adapter, 2131 .nvram_config = qla2x00_nvram_config, 2132 .update_fw_options = qla2x00_update_fw_options, 2133 .load_risc = qla2x00_load_risc, 2134 .pci_info_str = qla2x00_pci_info_str, 2135 .fw_version_str = qla2x00_fw_version_str, 2136 .intr_handler = qla2100_intr_handler, 2137 .enable_intrs = qla2x00_enable_intrs, 2138 .disable_intrs = qla2x00_disable_intrs, 2139 .abort_command = qla2x00_abort_command, 2140 .target_reset = qla2x00_abort_target, 2141 .lun_reset = qla2x00_lun_reset, 2142 .fabric_login = qla2x00_login_fabric, 2143 .fabric_logout = qla2x00_fabric_logout, 2144 .calc_req_entries = qla2x00_calc_iocbs_32, 2145 .build_iocbs = qla2x00_build_scsi_iocbs_32, 2146 .prep_ms_iocb = qla2x00_prep_ms_iocb, 2147 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 2148 .read_nvram = qla2x00_read_nvram_data, 2149 .write_nvram = qla2x00_write_nvram_data, 2150 .fw_dump = qla2100_fw_dump, 2151 .beacon_on = NULL, 2152 .beacon_off = NULL, 2153 .beacon_blink = NULL, 2154 .read_optrom = qla2x00_read_optrom_data, 2155 .write_optrom = qla2x00_write_optrom_data, 2156 .get_flash_version = qla2x00_get_flash_version, 2157 .start_scsi = qla2x00_start_scsi, 2158 .start_scsi_mq = NULL, 2159 .abort_isp = qla2x00_abort_isp, 2160 .iospace_config = qla2x00_iospace_config, 2161 .initialize_adapter = qla2x00_initialize_adapter, 2162 }; 2163 2164 static struct isp_operations qla2300_isp_ops = { 2165 .pci_config = qla2300_pci_config, 2166 .reset_chip = qla2x00_reset_chip, 2167 .chip_diag = qla2x00_chip_diag, 2168 .config_rings = qla2x00_config_rings, 2169 .reset_adapter = qla2x00_reset_adapter, 2170 .nvram_config = qla2x00_nvram_config, 2171 .update_fw_options = qla2x00_update_fw_options, 2172 .load_risc = qla2x00_load_risc, 2173 .pci_info_str = qla2x00_pci_info_str, 2174 .fw_version_str = qla2x00_fw_version_str, 2175 .intr_handler = qla2300_intr_handler, 2176 .enable_intrs = qla2x00_enable_intrs, 2177 .disable_intrs = qla2x00_disable_intrs, 2178 .abort_command = qla2x00_abort_command, 2179 .target_reset = qla2x00_abort_target, 2180 .lun_reset = qla2x00_lun_reset, 2181 .fabric_login = qla2x00_login_fabric, 2182 .fabric_logout = qla2x00_fabric_logout, 2183 .calc_req_entries = qla2x00_calc_iocbs_32, 2184 .build_iocbs = qla2x00_build_scsi_iocbs_32, 2185 .prep_ms_iocb = qla2x00_prep_ms_iocb, 2186 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, 2187 .read_nvram = qla2x00_read_nvram_data, 2188 .write_nvram = qla2x00_write_nvram_data, 2189 .fw_dump = qla2300_fw_dump, 2190 .beacon_on = qla2x00_beacon_on, 2191 .beacon_off = qla2x00_beacon_off, 2192 .beacon_blink = qla2x00_beacon_blink, 2193 .read_optrom = qla2x00_read_optrom_data, 2194 .write_optrom = qla2x00_write_optrom_data, 2195 .get_flash_version = qla2x00_get_flash_version, 2196 .start_scsi = qla2x00_start_scsi, 2197 .start_scsi_mq = NULL, 2198 .abort_isp = qla2x00_abort_isp, 2199 .iospace_config = qla2x00_iospace_config, 2200 .initialize_adapter = qla2x00_initialize_adapter, 2201 }; 2202 2203 static struct isp_operations qla24xx_isp_ops = { 2204 .pci_config = qla24xx_pci_config, 2205 .reset_chip = qla24xx_reset_chip, 2206 .chip_diag = qla24xx_chip_diag, 2207 .config_rings = qla24xx_config_rings, 2208 .reset_adapter = qla24xx_reset_adapter, 2209 .nvram_config = qla24xx_nvram_config, 2210 .update_fw_options = qla24xx_update_fw_options, 2211 .load_risc = qla24xx_load_risc, 2212 .pci_info_str = qla24xx_pci_info_str, 2213 .fw_version_str = qla24xx_fw_version_str, 2214 .intr_handler = qla24xx_intr_handler, 2215 .enable_intrs = qla24xx_enable_intrs, 2216 .disable_intrs = qla24xx_disable_intrs, 2217 .abort_command = qla24xx_abort_command, 2218 .target_reset = qla24xx_abort_target, 2219 .lun_reset = qla24xx_lun_reset, 2220 .fabric_login = qla24xx_login_fabric, 2221 .fabric_logout = qla24xx_fabric_logout, 2222 .calc_req_entries = NULL, 2223 .build_iocbs = NULL, 2224 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2225 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2226 .read_nvram = qla24xx_read_nvram_data, 2227 .write_nvram = qla24xx_write_nvram_data, 2228 .fw_dump = qla24xx_fw_dump, 2229 .beacon_on = qla24xx_beacon_on, 2230 .beacon_off = qla24xx_beacon_off, 2231 .beacon_blink = qla24xx_beacon_blink, 2232 .read_optrom = qla24xx_read_optrom_data, 2233 .write_optrom = qla24xx_write_optrom_data, 2234 .get_flash_version = qla24xx_get_flash_version, 2235 .start_scsi = qla24xx_start_scsi, 2236 .start_scsi_mq = NULL, 2237 .abort_isp = qla2x00_abort_isp, 2238 .iospace_config = qla2x00_iospace_config, 2239 .initialize_adapter = qla2x00_initialize_adapter, 2240 }; 2241 2242 static struct isp_operations qla25xx_isp_ops = { 2243 .pci_config = qla25xx_pci_config, 2244 .reset_chip = qla24xx_reset_chip, 2245 .chip_diag = qla24xx_chip_diag, 2246 .config_rings = qla24xx_config_rings, 2247 .reset_adapter = qla24xx_reset_adapter, 2248 .nvram_config = qla24xx_nvram_config, 2249 .update_fw_options = qla24xx_update_fw_options, 2250 .load_risc = qla24xx_load_risc, 2251 .pci_info_str = qla24xx_pci_info_str, 2252 .fw_version_str = qla24xx_fw_version_str, 2253 .intr_handler = qla24xx_intr_handler, 2254 .enable_intrs = qla24xx_enable_intrs, 2255 .disable_intrs = qla24xx_disable_intrs, 2256 .abort_command = qla24xx_abort_command, 2257 .target_reset = qla24xx_abort_target, 2258 .lun_reset = qla24xx_lun_reset, 2259 .fabric_login = qla24xx_login_fabric, 2260 .fabric_logout = qla24xx_fabric_logout, 2261 .calc_req_entries = NULL, 2262 .build_iocbs = NULL, 2263 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2264 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2265 .read_nvram = qla25xx_read_nvram_data, 2266 .write_nvram = qla25xx_write_nvram_data, 2267 .fw_dump = qla25xx_fw_dump, 2268 .beacon_on = qla24xx_beacon_on, 2269 .beacon_off = qla24xx_beacon_off, 2270 .beacon_blink = qla24xx_beacon_blink, 2271 .read_optrom = qla25xx_read_optrom_data, 2272 .write_optrom = qla24xx_write_optrom_data, 2273 .get_flash_version = qla24xx_get_flash_version, 2274 .start_scsi = qla24xx_dif_start_scsi, 2275 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2276 .abort_isp = qla2x00_abort_isp, 2277 .iospace_config = qla2x00_iospace_config, 2278 .initialize_adapter = qla2x00_initialize_adapter, 2279 }; 2280 2281 static struct isp_operations qla81xx_isp_ops = { 2282 .pci_config = qla25xx_pci_config, 2283 .reset_chip = qla24xx_reset_chip, 2284 .chip_diag = qla24xx_chip_diag, 2285 .config_rings = qla24xx_config_rings, 2286 .reset_adapter = qla24xx_reset_adapter, 2287 .nvram_config = qla81xx_nvram_config, 2288 .update_fw_options = qla81xx_update_fw_options, 2289 .load_risc = qla81xx_load_risc, 2290 .pci_info_str = qla24xx_pci_info_str, 2291 .fw_version_str = qla24xx_fw_version_str, 2292 .intr_handler = qla24xx_intr_handler, 2293 .enable_intrs = qla24xx_enable_intrs, 2294 .disable_intrs = qla24xx_disable_intrs, 2295 .abort_command = qla24xx_abort_command, 2296 .target_reset = qla24xx_abort_target, 2297 .lun_reset = qla24xx_lun_reset, 2298 .fabric_login = qla24xx_login_fabric, 2299 .fabric_logout = qla24xx_fabric_logout, 2300 .calc_req_entries = NULL, 2301 .build_iocbs = NULL, 2302 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2303 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2304 .read_nvram = NULL, 2305 .write_nvram = NULL, 2306 .fw_dump = qla81xx_fw_dump, 2307 .beacon_on = qla24xx_beacon_on, 2308 .beacon_off = qla24xx_beacon_off, 2309 .beacon_blink = qla83xx_beacon_blink, 2310 .read_optrom = qla25xx_read_optrom_data, 2311 .write_optrom = qla24xx_write_optrom_data, 2312 .get_flash_version = qla24xx_get_flash_version, 2313 .start_scsi = qla24xx_dif_start_scsi, 2314 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2315 .abort_isp = qla2x00_abort_isp, 2316 .iospace_config = qla2x00_iospace_config, 2317 .initialize_adapter = qla2x00_initialize_adapter, 2318 }; 2319 2320 static struct isp_operations qla82xx_isp_ops = { 2321 .pci_config = qla82xx_pci_config, 2322 .reset_chip = qla82xx_reset_chip, 2323 .chip_diag = qla24xx_chip_diag, 2324 .config_rings = qla82xx_config_rings, 2325 .reset_adapter = qla24xx_reset_adapter, 2326 .nvram_config = qla81xx_nvram_config, 2327 .update_fw_options = qla24xx_update_fw_options, 2328 .load_risc = qla82xx_load_risc, 2329 .pci_info_str = qla24xx_pci_info_str, 2330 .fw_version_str = qla24xx_fw_version_str, 2331 .intr_handler = qla82xx_intr_handler, 2332 .enable_intrs = qla82xx_enable_intrs, 2333 .disable_intrs = qla82xx_disable_intrs, 2334 .abort_command = qla24xx_abort_command, 2335 .target_reset = qla24xx_abort_target, 2336 .lun_reset = qla24xx_lun_reset, 2337 .fabric_login = qla24xx_login_fabric, 2338 .fabric_logout = qla24xx_fabric_logout, 2339 .calc_req_entries = NULL, 2340 .build_iocbs = NULL, 2341 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2342 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2343 .read_nvram = qla24xx_read_nvram_data, 2344 .write_nvram = qla24xx_write_nvram_data, 2345 .fw_dump = qla82xx_fw_dump, 2346 .beacon_on = qla82xx_beacon_on, 2347 .beacon_off = qla82xx_beacon_off, 2348 .beacon_blink = NULL, 2349 .read_optrom = qla82xx_read_optrom_data, 2350 .write_optrom = qla82xx_write_optrom_data, 2351 .get_flash_version = qla82xx_get_flash_version, 2352 .start_scsi = qla82xx_start_scsi, 2353 .start_scsi_mq = NULL, 2354 .abort_isp = qla82xx_abort_isp, 2355 .iospace_config = qla82xx_iospace_config, 2356 .initialize_adapter = qla2x00_initialize_adapter, 2357 }; 2358 2359 static struct isp_operations qla8044_isp_ops = { 2360 .pci_config = qla82xx_pci_config, 2361 .reset_chip = qla82xx_reset_chip, 2362 .chip_diag = qla24xx_chip_diag, 2363 .config_rings = qla82xx_config_rings, 2364 .reset_adapter = qla24xx_reset_adapter, 2365 .nvram_config = qla81xx_nvram_config, 2366 .update_fw_options = qla24xx_update_fw_options, 2367 .load_risc = qla82xx_load_risc, 2368 .pci_info_str = qla24xx_pci_info_str, 2369 .fw_version_str = qla24xx_fw_version_str, 2370 .intr_handler = qla8044_intr_handler, 2371 .enable_intrs = qla82xx_enable_intrs, 2372 .disable_intrs = qla82xx_disable_intrs, 2373 .abort_command = qla24xx_abort_command, 2374 .target_reset = qla24xx_abort_target, 2375 .lun_reset = qla24xx_lun_reset, 2376 .fabric_login = qla24xx_login_fabric, 2377 .fabric_logout = qla24xx_fabric_logout, 2378 .calc_req_entries = NULL, 2379 .build_iocbs = NULL, 2380 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2381 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2382 .read_nvram = NULL, 2383 .write_nvram = NULL, 2384 .fw_dump = qla8044_fw_dump, 2385 .beacon_on = qla82xx_beacon_on, 2386 .beacon_off = qla82xx_beacon_off, 2387 .beacon_blink = NULL, 2388 .read_optrom = qla8044_read_optrom_data, 2389 .write_optrom = qla8044_write_optrom_data, 2390 .get_flash_version = qla82xx_get_flash_version, 2391 .start_scsi = qla82xx_start_scsi, 2392 .start_scsi_mq = NULL, 2393 .abort_isp = qla8044_abort_isp, 2394 .iospace_config = qla82xx_iospace_config, 2395 .initialize_adapter = qla2x00_initialize_adapter, 2396 }; 2397 2398 static struct isp_operations qla83xx_isp_ops = { 2399 .pci_config = qla25xx_pci_config, 2400 .reset_chip = qla24xx_reset_chip, 2401 .chip_diag = qla24xx_chip_diag, 2402 .config_rings = qla24xx_config_rings, 2403 .reset_adapter = qla24xx_reset_adapter, 2404 .nvram_config = qla81xx_nvram_config, 2405 .update_fw_options = qla81xx_update_fw_options, 2406 .load_risc = qla81xx_load_risc, 2407 .pci_info_str = qla24xx_pci_info_str, 2408 .fw_version_str = qla24xx_fw_version_str, 2409 .intr_handler = qla24xx_intr_handler, 2410 .enable_intrs = qla24xx_enable_intrs, 2411 .disable_intrs = qla24xx_disable_intrs, 2412 .abort_command = qla24xx_abort_command, 2413 .target_reset = qla24xx_abort_target, 2414 .lun_reset = qla24xx_lun_reset, 2415 .fabric_login = qla24xx_login_fabric, 2416 .fabric_logout = qla24xx_fabric_logout, 2417 .calc_req_entries = NULL, 2418 .build_iocbs = NULL, 2419 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2420 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2421 .read_nvram = NULL, 2422 .write_nvram = NULL, 2423 .fw_dump = qla83xx_fw_dump, 2424 .beacon_on = qla24xx_beacon_on, 2425 .beacon_off = qla24xx_beacon_off, 2426 .beacon_blink = qla83xx_beacon_blink, 2427 .read_optrom = qla25xx_read_optrom_data, 2428 .write_optrom = qla24xx_write_optrom_data, 2429 .get_flash_version = qla24xx_get_flash_version, 2430 .start_scsi = qla24xx_dif_start_scsi, 2431 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2432 .abort_isp = qla2x00_abort_isp, 2433 .iospace_config = qla83xx_iospace_config, 2434 .initialize_adapter = qla2x00_initialize_adapter, 2435 }; 2436 2437 static struct isp_operations qlafx00_isp_ops = { 2438 .pci_config = qlafx00_pci_config, 2439 .reset_chip = qlafx00_soft_reset, 2440 .chip_diag = qlafx00_chip_diag, 2441 .config_rings = qlafx00_config_rings, 2442 .reset_adapter = qlafx00_soft_reset, 2443 .nvram_config = NULL, 2444 .update_fw_options = NULL, 2445 .load_risc = NULL, 2446 .pci_info_str = qlafx00_pci_info_str, 2447 .fw_version_str = qlafx00_fw_version_str, 2448 .intr_handler = qlafx00_intr_handler, 2449 .enable_intrs = qlafx00_enable_intrs, 2450 .disable_intrs = qlafx00_disable_intrs, 2451 .abort_command = qla24xx_async_abort_command, 2452 .target_reset = qlafx00_abort_target, 2453 .lun_reset = qlafx00_lun_reset, 2454 .fabric_login = NULL, 2455 .fabric_logout = NULL, 2456 .calc_req_entries = NULL, 2457 .build_iocbs = NULL, 2458 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2459 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2460 .read_nvram = qla24xx_read_nvram_data, 2461 .write_nvram = qla24xx_write_nvram_data, 2462 .fw_dump = NULL, 2463 .beacon_on = qla24xx_beacon_on, 2464 .beacon_off = qla24xx_beacon_off, 2465 .beacon_blink = NULL, 2466 .read_optrom = qla24xx_read_optrom_data, 2467 .write_optrom = qla24xx_write_optrom_data, 2468 .get_flash_version = qla24xx_get_flash_version, 2469 .start_scsi = qlafx00_start_scsi, 2470 .start_scsi_mq = NULL, 2471 .abort_isp = qlafx00_abort_isp, 2472 .iospace_config = qlafx00_iospace_config, 2473 .initialize_adapter = qlafx00_initialize_adapter, 2474 }; 2475 2476 static struct isp_operations qla27xx_isp_ops = { 2477 .pci_config = qla25xx_pci_config, 2478 .reset_chip = qla24xx_reset_chip, 2479 .chip_diag = qla24xx_chip_diag, 2480 .config_rings = qla24xx_config_rings, 2481 .reset_adapter = qla24xx_reset_adapter, 2482 .nvram_config = qla81xx_nvram_config, 2483 .update_fw_options = qla24xx_update_fw_options, 2484 .load_risc = qla81xx_load_risc, 2485 .pci_info_str = qla24xx_pci_info_str, 2486 .fw_version_str = qla24xx_fw_version_str, 2487 .intr_handler = qla24xx_intr_handler, 2488 .enable_intrs = qla24xx_enable_intrs, 2489 .disable_intrs = qla24xx_disable_intrs, 2490 .abort_command = qla24xx_abort_command, 2491 .target_reset = qla24xx_abort_target, 2492 .lun_reset = qla24xx_lun_reset, 2493 .fabric_login = qla24xx_login_fabric, 2494 .fabric_logout = qla24xx_fabric_logout, 2495 .calc_req_entries = NULL, 2496 .build_iocbs = NULL, 2497 .prep_ms_iocb = qla24xx_prep_ms_iocb, 2498 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2499 .read_nvram = NULL, 2500 .write_nvram = NULL, 2501 .fw_dump = qla27xx_fwdump, 2502 .beacon_on = qla24xx_beacon_on, 2503 .beacon_off = qla24xx_beacon_off, 2504 .beacon_blink = qla83xx_beacon_blink, 2505 .read_optrom = qla25xx_read_optrom_data, 2506 .write_optrom = qla24xx_write_optrom_data, 2507 .get_flash_version = qla24xx_get_flash_version, 2508 .start_scsi = qla24xx_dif_start_scsi, 2509 .start_scsi_mq = qla2xxx_dif_start_scsi_mq, 2510 .abort_isp = qla2x00_abort_isp, 2511 .iospace_config = qla83xx_iospace_config, 2512 .initialize_adapter = qla2x00_initialize_adapter, 2513 }; 2514 2515 static inline void 2516 qla2x00_set_isp_flags(struct qla_hw_data *ha) 2517 { 2518 ha->device_type = DT_EXTENDED_IDS; 2519 switch (ha->pdev->device) { 2520 case PCI_DEVICE_ID_QLOGIC_ISP2100: 2521 ha->isp_type |= DT_ISP2100; 2522 ha->device_type &= ~DT_EXTENDED_IDS; 2523 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2524 break; 2525 case PCI_DEVICE_ID_QLOGIC_ISP2200: 2526 ha->isp_type |= DT_ISP2200; 2527 ha->device_type &= ~DT_EXTENDED_IDS; 2528 ha->fw_srisc_address = RISC_START_ADDRESS_2100; 2529 break; 2530 case PCI_DEVICE_ID_QLOGIC_ISP2300: 2531 ha->isp_type |= DT_ISP2300; 2532 ha->device_type |= DT_ZIO_SUPPORTED; 2533 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2534 break; 2535 case PCI_DEVICE_ID_QLOGIC_ISP2312: 2536 ha->isp_type |= DT_ISP2312; 2537 ha->device_type |= DT_ZIO_SUPPORTED; 2538 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2539 break; 2540 case PCI_DEVICE_ID_QLOGIC_ISP2322: 2541 ha->isp_type |= DT_ISP2322; 2542 ha->device_type |= DT_ZIO_SUPPORTED; 2543 if (ha->pdev->subsystem_vendor == 0x1028 && 2544 ha->pdev->subsystem_device == 0x0170) 2545 ha->device_type |= DT_OEM_001; 2546 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2547 break; 2548 case PCI_DEVICE_ID_QLOGIC_ISP6312: 2549 ha->isp_type |= DT_ISP6312; 2550 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2551 break; 2552 case PCI_DEVICE_ID_QLOGIC_ISP6322: 2553 ha->isp_type |= DT_ISP6322; 2554 ha->fw_srisc_address = RISC_START_ADDRESS_2300; 2555 break; 2556 case PCI_DEVICE_ID_QLOGIC_ISP2422: 2557 ha->isp_type |= DT_ISP2422; 2558 ha->device_type |= DT_ZIO_SUPPORTED; 2559 ha->device_type |= DT_FWI2; 2560 ha->device_type |= DT_IIDMA; 2561 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2562 break; 2563 case PCI_DEVICE_ID_QLOGIC_ISP2432: 2564 ha->isp_type |= DT_ISP2432; 2565 ha->device_type |= DT_ZIO_SUPPORTED; 2566 ha->device_type |= DT_FWI2; 2567 ha->device_type |= DT_IIDMA; 2568 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2569 break; 2570 case PCI_DEVICE_ID_QLOGIC_ISP8432: 2571 ha->isp_type |= DT_ISP8432; 2572 ha->device_type |= DT_ZIO_SUPPORTED; 2573 ha->device_type |= DT_FWI2; 2574 ha->device_type |= DT_IIDMA; 2575 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2576 break; 2577 case PCI_DEVICE_ID_QLOGIC_ISP5422: 2578 ha->isp_type |= DT_ISP5422; 2579 ha->device_type |= DT_FWI2; 2580 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2581 break; 2582 case PCI_DEVICE_ID_QLOGIC_ISP5432: 2583 ha->isp_type |= DT_ISP5432; 2584 ha->device_type |= DT_FWI2; 2585 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2586 break; 2587 case PCI_DEVICE_ID_QLOGIC_ISP2532: 2588 ha->isp_type |= DT_ISP2532; 2589 ha->device_type |= DT_ZIO_SUPPORTED; 2590 ha->device_type |= DT_FWI2; 2591 ha->device_type |= DT_IIDMA; 2592 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2593 break; 2594 case PCI_DEVICE_ID_QLOGIC_ISP8001: 2595 ha->isp_type |= DT_ISP8001; 2596 ha->device_type |= DT_ZIO_SUPPORTED; 2597 ha->device_type |= DT_FWI2; 2598 ha->device_type |= DT_IIDMA; 2599 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2600 break; 2601 case PCI_DEVICE_ID_QLOGIC_ISP8021: 2602 ha->isp_type |= DT_ISP8021; 2603 ha->device_type |= DT_ZIO_SUPPORTED; 2604 ha->device_type |= DT_FWI2; 2605 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2606 /* Initialize 82XX ISP flags */ 2607 qla82xx_init_flags(ha); 2608 break; 2609 case PCI_DEVICE_ID_QLOGIC_ISP8044: 2610 ha->isp_type |= DT_ISP8044; 2611 ha->device_type |= DT_ZIO_SUPPORTED; 2612 ha->device_type |= DT_FWI2; 2613 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2614 /* Initialize 82XX ISP flags */ 2615 qla82xx_init_flags(ha); 2616 break; 2617 case PCI_DEVICE_ID_QLOGIC_ISP2031: 2618 ha->isp_type |= DT_ISP2031; 2619 ha->device_type |= DT_ZIO_SUPPORTED; 2620 ha->device_type |= DT_FWI2; 2621 ha->device_type |= DT_IIDMA; 2622 ha->device_type |= DT_T10_PI; 2623 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2624 break; 2625 case PCI_DEVICE_ID_QLOGIC_ISP8031: 2626 ha->isp_type |= DT_ISP8031; 2627 ha->device_type |= DT_ZIO_SUPPORTED; 2628 ha->device_type |= DT_FWI2; 2629 ha->device_type |= DT_IIDMA; 2630 ha->device_type |= DT_T10_PI; 2631 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2632 break; 2633 case PCI_DEVICE_ID_QLOGIC_ISPF001: 2634 ha->isp_type |= DT_ISPFX00; 2635 break; 2636 case PCI_DEVICE_ID_QLOGIC_ISP2071: 2637 ha->isp_type |= DT_ISP2071; 2638 ha->device_type |= DT_ZIO_SUPPORTED; 2639 ha->device_type |= DT_FWI2; 2640 ha->device_type |= DT_IIDMA; 2641 ha->device_type |= DT_T10_PI; 2642 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2643 break; 2644 case PCI_DEVICE_ID_QLOGIC_ISP2271: 2645 ha->isp_type |= DT_ISP2271; 2646 ha->device_type |= DT_ZIO_SUPPORTED; 2647 ha->device_type |= DT_FWI2; 2648 ha->device_type |= DT_IIDMA; 2649 ha->device_type |= DT_T10_PI; 2650 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2651 break; 2652 case PCI_DEVICE_ID_QLOGIC_ISP2261: 2653 ha->isp_type |= DT_ISP2261; 2654 ha->device_type |= DT_ZIO_SUPPORTED; 2655 ha->device_type |= DT_FWI2; 2656 ha->device_type |= DT_IIDMA; 2657 ha->device_type |= DT_T10_PI; 2658 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2659 break; 2660 case PCI_DEVICE_ID_QLOGIC_ISP2081: 2661 case PCI_DEVICE_ID_QLOGIC_ISP2089: 2662 ha->isp_type |= DT_ISP2081; 2663 ha->device_type |= DT_ZIO_SUPPORTED; 2664 ha->device_type |= DT_FWI2; 2665 ha->device_type |= DT_IIDMA; 2666 ha->device_type |= DT_T10_PI; 2667 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2668 break; 2669 case PCI_DEVICE_ID_QLOGIC_ISP2281: 2670 case PCI_DEVICE_ID_QLOGIC_ISP2289: 2671 ha->isp_type |= DT_ISP2281; 2672 ha->device_type |= DT_ZIO_SUPPORTED; 2673 ha->device_type |= DT_FWI2; 2674 ha->device_type |= DT_IIDMA; 2675 ha->device_type |= DT_T10_PI; 2676 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2677 break; 2678 } 2679 2680 if (IS_QLA82XX(ha)) 2681 ha->port_no = ha->portnum & 1; 2682 else { 2683 /* Get adapter physical port no from interrupt pin register. */ 2684 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); 2685 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || 2686 IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2687 ha->port_no--; 2688 else 2689 ha->port_no = !(ha->port_no & 1); 2690 } 2691 2692 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, 2693 "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", 2694 ha->device_type, ha->port_no, ha->fw_srisc_address); 2695 } 2696 2697 static void 2698 qla2xxx_scan_start(struct Scsi_Host *shost) 2699 { 2700 scsi_qla_host_t *vha = shost_priv(shost); 2701 2702 if (vha->hw->flags.running_gold_fw) 2703 return; 2704 2705 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2706 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2707 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2708 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags); 2709 } 2710 2711 static int 2712 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) 2713 { 2714 scsi_qla_host_t *vha = shost_priv(shost); 2715 2716 if (test_bit(UNLOADING, &vha->dpc_flags)) 2717 return 1; 2718 if (!vha->host) 2719 return 1; 2720 if (time > vha->hw->loop_reset_delay * HZ) 2721 return 1; 2722 2723 return atomic_read(&vha->loop_state) == LOOP_READY; 2724 } 2725 2726 static void qla2x00_iocb_work_fn(struct work_struct *work) 2727 { 2728 struct scsi_qla_host *vha = container_of(work, 2729 struct scsi_qla_host, iocb_work); 2730 struct qla_hw_data *ha = vha->hw; 2731 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2732 int i = 2; 2733 unsigned long flags; 2734 2735 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 2736 return; 2737 2738 while (!list_empty(&vha->work_list) && i > 0) { 2739 qla2x00_do_work(vha); 2740 i--; 2741 } 2742 2743 spin_lock_irqsave(&vha->work_lock, flags); 2744 clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags); 2745 spin_unlock_irqrestore(&vha->work_lock, flags); 2746 } 2747 2748 /* 2749 * PCI driver interface 2750 */ 2751 static int 2752 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 2753 { 2754 int ret = -ENODEV; 2755 struct Scsi_Host *host; 2756 scsi_qla_host_t *base_vha = NULL; 2757 struct qla_hw_data *ha; 2758 char pci_info[30]; 2759 char fw_str[30], wq_name[30]; 2760 struct scsi_host_template *sht; 2761 int bars, mem_only = 0; 2762 uint16_t req_length = 0, rsp_length = 0; 2763 struct req_que *req = NULL; 2764 struct rsp_que *rsp = NULL; 2765 int i; 2766 2767 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 2768 sht = &qla2xxx_driver_template; 2769 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || 2770 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || 2771 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || 2772 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 2773 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || 2774 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 || 2775 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || 2776 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || 2777 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || 2778 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || 2779 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || 2780 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || 2781 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || 2782 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 || 2783 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 || 2784 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 || 2785 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 || 2786 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 || 2787 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) { 2788 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2789 mem_only = 1; 2790 ql_dbg_pci(ql_dbg_init, pdev, 0x0007, 2791 "Mem only adapter.\n"); 2792 } 2793 ql_dbg_pci(ql_dbg_init, pdev, 0x0008, 2794 "Bars=%d.\n", bars); 2795 2796 if (mem_only) { 2797 if (pci_enable_device_mem(pdev)) 2798 return ret; 2799 } else { 2800 if (pci_enable_device(pdev)) 2801 return ret; 2802 } 2803 2804 /* This may fail but that's ok */ 2805 pci_enable_pcie_error_reporting(pdev); 2806 2807 /* Turn off T10-DIF when FC-NVMe is enabled */ 2808 if (ql2xnvmeenable) 2809 ql2xenabledif = 0; 2810 2811 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); 2812 if (!ha) { 2813 ql_log_pci(ql_log_fatal, pdev, 0x0009, 2814 "Unable to allocate memory for ha.\n"); 2815 goto disable_device; 2816 } 2817 ql_dbg_pci(ql_dbg_init, pdev, 0x000a, 2818 "Memory allocated for ha=%p.\n", ha); 2819 ha->pdev = pdev; 2820 INIT_LIST_HEAD(&ha->tgt.q_full_list); 2821 spin_lock_init(&ha->tgt.q_full_lock); 2822 spin_lock_init(&ha->tgt.sess_lock); 2823 spin_lock_init(&ha->tgt.atio_lock); 2824 2825 atomic_set(&ha->nvme_active_aen_cnt, 0); 2826 2827 /* Clear our data area */ 2828 ha->bars = bars; 2829 ha->mem_only = mem_only; 2830 spin_lock_init(&ha->hardware_lock); 2831 spin_lock_init(&ha->vport_slock); 2832 mutex_init(&ha->selflogin_lock); 2833 mutex_init(&ha->optrom_mutex); 2834 2835 /* Set ISP-type information. */ 2836 qla2x00_set_isp_flags(ha); 2837 2838 /* Set EEH reset type to fundamental if required by hba */ 2839 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || 2840 IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2841 pdev->needs_freset = 1; 2842 2843 ha->prev_topology = 0; 2844 ha->init_cb_size = sizeof(init_cb_t); 2845 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2846 ha->optrom_size = OPTROM_SIZE_2300; 2847 ha->max_exchg = FW_MAX_EXCHANGES_CNT; 2848 atomic_set(&ha->num_pend_mbx_stage1, 0); 2849 atomic_set(&ha->num_pend_mbx_stage2, 0); 2850 atomic_set(&ha->num_pend_mbx_stage3, 0); 2851 atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD); 2852 ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD; 2853 2854 /* Assign ISP specific operations. */ 2855 if (IS_QLA2100(ha)) { 2856 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2857 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 2858 req_length = REQUEST_ENTRY_CNT_2100; 2859 rsp_length = RESPONSE_ENTRY_CNT_2100; 2860 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 2861 ha->gid_list_info_size = 4; 2862 ha->flash_conf_off = ~0; 2863 ha->flash_data_off = ~0; 2864 ha->nvram_conf_off = ~0; 2865 ha->nvram_data_off = ~0; 2866 ha->isp_ops = &qla2100_isp_ops; 2867 } else if (IS_QLA2200(ha)) { 2868 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2869 ha->mbx_count = MAILBOX_REGISTER_COUNT_2200; 2870 req_length = REQUEST_ENTRY_CNT_2200; 2871 rsp_length = RESPONSE_ENTRY_CNT_2100; 2872 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 2873 ha->gid_list_info_size = 4; 2874 ha->flash_conf_off = ~0; 2875 ha->flash_data_off = ~0; 2876 ha->nvram_conf_off = ~0; 2877 ha->nvram_data_off = ~0; 2878 ha->isp_ops = &qla2100_isp_ops; 2879 } else if (IS_QLA23XX(ha)) { 2880 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; 2881 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2882 req_length = REQUEST_ENTRY_CNT_2200; 2883 rsp_length = RESPONSE_ENTRY_CNT_2300; 2884 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2885 ha->gid_list_info_size = 6; 2886 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 2887 ha->optrom_size = OPTROM_SIZE_2322; 2888 ha->flash_conf_off = ~0; 2889 ha->flash_data_off = ~0; 2890 ha->nvram_conf_off = ~0; 2891 ha->nvram_data_off = ~0; 2892 ha->isp_ops = &qla2300_isp_ops; 2893 } else if (IS_QLA24XX_TYPE(ha)) { 2894 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2895 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2896 req_length = REQUEST_ENTRY_CNT_24XX; 2897 rsp_length = RESPONSE_ENTRY_CNT_2300; 2898 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2899 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2900 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2901 ha->gid_list_info_size = 8; 2902 ha->optrom_size = OPTROM_SIZE_24XX; 2903 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX; 2904 ha->isp_ops = &qla24xx_isp_ops; 2905 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2906 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2907 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2908 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2909 } else if (IS_QLA25XX(ha)) { 2910 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2911 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2912 req_length = REQUEST_ENTRY_CNT_24XX; 2913 rsp_length = RESPONSE_ENTRY_CNT_2300; 2914 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2915 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2916 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2917 ha->gid_list_info_size = 8; 2918 ha->optrom_size = OPTROM_SIZE_25XX; 2919 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2920 ha->isp_ops = &qla25xx_isp_ops; 2921 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2922 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2923 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2924 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2925 } else if (IS_QLA81XX(ha)) { 2926 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2927 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2928 req_length = REQUEST_ENTRY_CNT_24XX; 2929 rsp_length = RESPONSE_ENTRY_CNT_2300; 2930 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2931 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2932 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2933 ha->gid_list_info_size = 8; 2934 ha->optrom_size = OPTROM_SIZE_81XX; 2935 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2936 ha->isp_ops = &qla81xx_isp_ops; 2937 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 2938 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2939 ha->nvram_conf_off = ~0; 2940 ha->nvram_data_off = ~0; 2941 } else if (IS_QLA82XX(ha)) { 2942 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2943 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2944 req_length = REQUEST_ENTRY_CNT_82XX; 2945 rsp_length = RESPONSE_ENTRY_CNT_82XX; 2946 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2947 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2948 ha->gid_list_info_size = 8; 2949 ha->optrom_size = OPTROM_SIZE_82XX; 2950 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2951 ha->isp_ops = &qla82xx_isp_ops; 2952 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2953 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2954 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2955 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2956 } else if (IS_QLA8044(ha)) { 2957 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2958 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2959 req_length = REQUEST_ENTRY_CNT_82XX; 2960 rsp_length = RESPONSE_ENTRY_CNT_82XX; 2961 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2962 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2963 ha->gid_list_info_size = 8; 2964 ha->optrom_size = OPTROM_SIZE_83XX; 2965 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2966 ha->isp_ops = &qla8044_isp_ops; 2967 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; 2968 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2969 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2970 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2971 } else if (IS_QLA83XX(ha)) { 2972 ha->portnum = PCI_FUNC(ha->pdev->devfn); 2973 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2974 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2975 req_length = REQUEST_ENTRY_CNT_83XX; 2976 rsp_length = RESPONSE_ENTRY_CNT_83XX; 2977 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2978 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2979 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2980 ha->gid_list_info_size = 8; 2981 ha->optrom_size = OPTROM_SIZE_83XX; 2982 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 2983 ha->isp_ops = &qla83xx_isp_ops; 2984 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 2985 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2986 ha->nvram_conf_off = ~0; 2987 ha->nvram_data_off = ~0; 2988 } else if (IS_QLAFX00(ha)) { 2989 ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00; 2990 ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00; 2991 ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00; 2992 req_length = REQUEST_ENTRY_CNT_FX00; 2993 rsp_length = RESPONSE_ENTRY_CNT_FX00; 2994 ha->isp_ops = &qlafx00_isp_ops; 2995 ha->port_down_retry_count = 30; /* default value */ 2996 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; 2997 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 2998 ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL; 2999 ha->mr.fw_hbt_en = 1; 3000 ha->mr.host_info_resend = false; 3001 ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; 3002 } else if (IS_QLA27XX(ha)) { 3003 ha->portnum = PCI_FUNC(ha->pdev->devfn); 3004 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3005 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3006 req_length = REQUEST_ENTRY_CNT_83XX; 3007 rsp_length = RESPONSE_ENTRY_CNT_83XX; 3008 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3009 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3010 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3011 ha->gid_list_info_size = 8; 3012 ha->optrom_size = OPTROM_SIZE_83XX; 3013 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3014 ha->isp_ops = &qla27xx_isp_ops; 3015 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 3016 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 3017 ha->nvram_conf_off = ~0; 3018 ha->nvram_data_off = ~0; 3019 } else if (IS_QLA28XX(ha)) { 3020 ha->portnum = PCI_FUNC(ha->pdev->devfn); 3021 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 3022 ha->mbx_count = MAILBOX_REGISTER_COUNT; 3023 req_length = REQUEST_ENTRY_CNT_24XX; 3024 rsp_length = RESPONSE_ENTRY_CNT_2300; 3025 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 3026 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 3027 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 3028 ha->gid_list_info_size = 8; 3029 ha->optrom_size = OPTROM_SIZE_28XX; 3030 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 3031 ha->isp_ops = &qla27xx_isp_ops; 3032 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX; 3033 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX; 3034 ha->nvram_conf_off = ~0; 3035 ha->nvram_data_off = ~0; 3036 } 3037 3038 ql_dbg_pci(ql_dbg_init, pdev, 0x001e, 3039 "mbx_count=%d, req_length=%d, " 3040 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, " 3041 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, " 3042 "max_fibre_devices=%d.\n", 3043 ha->mbx_count, req_length, rsp_length, ha->max_loop_id, 3044 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, 3045 ha->nvram_npiv_size, ha->max_fibre_devices); 3046 ql_dbg_pci(ql_dbg_init, pdev, 0x001f, 3047 "isp_ops=%p, flash_conf_off=%d, " 3048 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", 3049 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, 3050 ha->nvram_conf_off, ha->nvram_data_off); 3051 3052 /* Configure PCI I/O space */ 3053 ret = ha->isp_ops->iospace_config(ha); 3054 if (ret) 3055 goto iospace_config_failed; 3056 3057 ql_log_pci(ql_log_info, pdev, 0x001d, 3058 "Found an ISP%04X irq %d iobase 0x%p.\n", 3059 pdev->device, pdev->irq, ha->iobase); 3060 mutex_init(&ha->vport_lock); 3061 mutex_init(&ha->mq_lock); 3062 init_completion(&ha->mbx_cmd_comp); 3063 complete(&ha->mbx_cmd_comp); 3064 init_completion(&ha->mbx_intr_comp); 3065 init_completion(&ha->dcbx_comp); 3066 init_completion(&ha->lb_portup_comp); 3067 3068 set_bit(0, (unsigned long *) ha->vp_idx_map); 3069 3070 qla2x00_config_dma_addressing(ha); 3071 ql_dbg_pci(ql_dbg_init, pdev, 0x0020, 3072 "64 Bit addressing is %s.\n", 3073 ha->flags.enable_64bit_addressing ? "enable" : 3074 "disable"); 3075 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 3076 if (ret) { 3077 ql_log_pci(ql_log_fatal, pdev, 0x0031, 3078 "Failed to allocate memory for adapter, aborting.\n"); 3079 3080 goto probe_hw_failed; 3081 } 3082 3083 req->max_q_depth = MAX_Q_DEPTH; 3084 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) 3085 req->max_q_depth = ql2xmaxqdepth; 3086 3087 3088 base_vha = qla2x00_create_host(sht, ha); 3089 if (!base_vha) { 3090 ret = -ENOMEM; 3091 goto probe_hw_failed; 3092 } 3093 3094 pci_set_drvdata(pdev, base_vha); 3095 set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); 3096 3097 host = base_vha->host; 3098 base_vha->req = req; 3099 if (IS_QLA2XXX_MIDTYPE(ha)) 3100 base_vha->mgmt_svr_loop_id = 3101 qla2x00_reserve_mgmt_server_loop_id(base_vha); 3102 else 3103 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + 3104 base_vha->vp_idx; 3105 3106 /* Setup fcport template structure. */ 3107 ha->mr.fcport.vha = base_vha; 3108 ha->mr.fcport.port_type = FCT_UNKNOWN; 3109 ha->mr.fcport.loop_id = FC_NO_LOOP_ID; 3110 qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED); 3111 ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED; 3112 ha->mr.fcport.scan_state = 1; 3113 3114 /* Set the SG table size based on ISP type */ 3115 if (!IS_FWI2_CAPABLE(ha)) { 3116 if (IS_QLA2100(ha)) 3117 host->sg_tablesize = 32; 3118 } else { 3119 if (!IS_QLA82XX(ha)) 3120 host->sg_tablesize = QLA_SG_ALL; 3121 } 3122 host->max_id = ha->max_fibre_devices; 3123 host->cmd_per_lun = 3; 3124 host->unique_id = host->host_no; 3125 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 3126 host->max_cmd_len = 32; 3127 else 3128 host->max_cmd_len = MAX_CMDSZ; 3129 host->max_channel = MAX_BUSES - 1; 3130 /* Older HBAs support only 16-bit LUNs */ 3131 if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) && 3132 ql2xmaxlun > 0xffff) 3133 host->max_lun = 0xffff; 3134 else 3135 host->max_lun = ql2xmaxlun; 3136 host->transportt = qla2xxx_transport_template; 3137 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); 3138 3139 ql_dbg(ql_dbg_init, base_vha, 0x0033, 3140 "max_id=%d this_id=%d " 3141 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " 3142 "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id, 3143 host->this_id, host->cmd_per_lun, host->unique_id, 3144 host->max_cmd_len, host->max_channel, host->max_lun, 3145 host->transportt, sht->vendor_id); 3146 3147 INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); 3148 3149 /* Set up the irqs */ 3150 ret = qla2x00_request_irqs(ha, rsp); 3151 if (ret) 3152 goto probe_failed; 3153 3154 /* Alloc arrays of request and response ring ptrs */ 3155 ret = qla2x00_alloc_queues(ha, req, rsp); 3156 if (ret) { 3157 ql_log(ql_log_fatal, base_vha, 0x003d, 3158 "Failed to allocate memory for queue pointers..." 3159 "aborting.\n"); 3160 ret = -ENODEV; 3161 goto probe_failed; 3162 } 3163 3164 if (ha->mqenable) { 3165 /* number of hardware queues supported by blk/scsi-mq*/ 3166 host->nr_hw_queues = ha->max_qpairs; 3167 3168 ql_dbg(ql_dbg_init, base_vha, 0x0192, 3169 "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues); 3170 } else { 3171 if (ql2xnvmeenable) { 3172 host->nr_hw_queues = ha->max_qpairs; 3173 ql_dbg(ql_dbg_init, base_vha, 0x0194, 3174 "FC-NVMe support is enabled, HW queues=%d\n", 3175 host->nr_hw_queues); 3176 } else { 3177 ql_dbg(ql_dbg_init, base_vha, 0x0193, 3178 "blk/scsi-mq disabled.\n"); 3179 } 3180 } 3181 3182 qlt_probe_one_stage1(base_vha, ha); 3183 3184 pci_save_state(pdev); 3185 3186 /* Assign back pointers */ 3187 rsp->req = req; 3188 req->rsp = rsp; 3189 3190 if (IS_QLAFX00(ha)) { 3191 ha->rsp_q_map[0] = rsp; 3192 ha->req_q_map[0] = req; 3193 set_bit(0, ha->req_qid_map); 3194 set_bit(0, ha->rsp_qid_map); 3195 } 3196 3197 /* FWI2-capable only. */ 3198 req->req_q_in = &ha->iobase->isp24.req_q_in; 3199 req->req_q_out = &ha->iobase->isp24.req_q_out; 3200 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; 3201 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; 3202 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 3203 IS_QLA28XX(ha)) { 3204 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; 3205 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; 3206 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; 3207 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out; 3208 } 3209 3210 if (IS_QLAFX00(ha)) { 3211 req->req_q_in = &ha->iobase->ispfx00.req_q_in; 3212 req->req_q_out = &ha->iobase->ispfx00.req_q_out; 3213 rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in; 3214 rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out; 3215 } 3216 3217 if (IS_P3P_TYPE(ha)) { 3218 req->req_q_out = &ha->iobase->isp82.req_q_out[0]; 3219 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0]; 3220 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; 3221 } 3222 3223 ql_dbg(ql_dbg_multiq, base_vha, 0xc009, 3224 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 3225 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 3226 ql_dbg(ql_dbg_multiq, base_vha, 0xc00a, 3227 "req->req_q_in=%p req->req_q_out=%p " 3228 "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 3229 req->req_q_in, req->req_q_out, 3230 rsp->rsp_q_in, rsp->rsp_q_out); 3231 ql_dbg(ql_dbg_init, base_vha, 0x003e, 3232 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", 3233 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); 3234 ql_dbg(ql_dbg_init, base_vha, 0x003f, 3235 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 3236 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); 3237 3238 ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0); 3239 if (unlikely(!ha->wq)) { 3240 ret = -ENOMEM; 3241 goto probe_failed; 3242 } 3243 3244 if (ha->isp_ops->initialize_adapter(base_vha)) { 3245 ql_log(ql_log_fatal, base_vha, 0x00d6, 3246 "Failed to initialize adapter - Adapter flags %x.\n", 3247 base_vha->device_flags); 3248 3249 if (IS_QLA82XX(ha)) { 3250 qla82xx_idc_lock(ha); 3251 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3252 QLA8XXX_DEV_FAILED); 3253 qla82xx_idc_unlock(ha); 3254 ql_log(ql_log_fatal, base_vha, 0x00d7, 3255 "HW State: FAILED.\n"); 3256 } else if (IS_QLA8044(ha)) { 3257 qla8044_idc_lock(ha); 3258 qla8044_wr_direct(base_vha, 3259 QLA8044_CRB_DEV_STATE_INDEX, 3260 QLA8XXX_DEV_FAILED); 3261 qla8044_idc_unlock(ha); 3262 ql_log(ql_log_fatal, base_vha, 0x0150, 3263 "HW State: FAILED.\n"); 3264 } 3265 3266 ret = -ENODEV; 3267 goto probe_failed; 3268 } 3269 3270 if (IS_QLAFX00(ha)) 3271 host->can_queue = QLAFX00_MAX_CANQUEUE; 3272 else 3273 host->can_queue = req->num_outstanding_cmds - 10; 3274 3275 ql_dbg(ql_dbg_init, base_vha, 0x0032, 3276 "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", 3277 host->can_queue, base_vha->req, 3278 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 3279 3280 if (ha->mqenable) { 3281 bool startit = false; 3282 3283 if (QLA_TGT_MODE_ENABLED()) 3284 startit = false; 3285 3286 if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) 3287 startit = true; 3288 3289 /* Create start of day qpairs for Block MQ */ 3290 for (i = 0; i < ha->max_qpairs; i++) 3291 qla2xxx_create_qpair(base_vha, 5, 0, startit); 3292 } 3293 3294 if (ha->flags.running_gold_fw) 3295 goto skip_dpc; 3296 3297 /* 3298 * Startup the kernel thread for this host adapter 3299 */ 3300 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 3301 "%s_dpc", base_vha->host_str); 3302 if (IS_ERR(ha->dpc_thread)) { 3303 ql_log(ql_log_fatal, base_vha, 0x00ed, 3304 "Failed to start DPC thread.\n"); 3305 ret = PTR_ERR(ha->dpc_thread); 3306 ha->dpc_thread = NULL; 3307 goto probe_failed; 3308 } 3309 ql_dbg(ql_dbg_init, base_vha, 0x00ee, 3310 "DPC thread started successfully.\n"); 3311 3312 /* 3313 * If we're not coming up in initiator mode, we might sit for 3314 * a while without waking up the dpc thread, which leads to a 3315 * stuck process warning. So just kick the dpc once here and 3316 * let the kthread start (and go back to sleep in qla2x00_do_dpc). 3317 */ 3318 qla2xxx_wake_dpc(base_vha); 3319 3320 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); 3321 3322 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { 3323 sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); 3324 ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); 3325 INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen); 3326 3327 sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); 3328 ha->dpc_hp_wq = create_singlethread_workqueue(wq_name); 3329 INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work); 3330 INIT_WORK(&ha->idc_state_handler, 3331 qla83xx_idc_state_handler_work); 3332 INIT_WORK(&ha->nic_core_unrecoverable, 3333 qla83xx_nic_core_unrecoverable_work); 3334 } 3335 3336 skip_dpc: 3337 list_add_tail(&base_vha->list, &ha->vp_list); 3338 base_vha->host->irq = ha->pdev->irq; 3339 3340 /* Initialized the timer */ 3341 qla2x00_start_timer(base_vha, WATCH_INTERVAL); 3342 ql_dbg(ql_dbg_init, base_vha, 0x00ef, 3343 "Started qla2x00_timer with " 3344 "interval=%d.\n", WATCH_INTERVAL); 3345 ql_dbg(ql_dbg_init, base_vha, 0x00f0, 3346 "Detected hba at address=%p.\n", 3347 ha); 3348 3349 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 3350 if (ha->fw_attributes & BIT_4) { 3351 int prot = 0, guard; 3352 3353 base_vha->flags.difdix_supported = 1; 3354 ql_dbg(ql_dbg_init, base_vha, 0x00f1, 3355 "Registering for DIF/DIX type 1 and 3 protection.\n"); 3356 if (ql2xenabledif == 1) 3357 prot = SHOST_DIX_TYPE0_PROTECTION; 3358 if (ql2xprotmask) 3359 scsi_host_set_prot(host, ql2xprotmask); 3360 else 3361 scsi_host_set_prot(host, 3362 prot | SHOST_DIF_TYPE1_PROTECTION 3363 | SHOST_DIF_TYPE2_PROTECTION 3364 | SHOST_DIF_TYPE3_PROTECTION 3365 | SHOST_DIX_TYPE1_PROTECTION 3366 | SHOST_DIX_TYPE2_PROTECTION 3367 | SHOST_DIX_TYPE3_PROTECTION); 3368 3369 guard = SHOST_DIX_GUARD_CRC; 3370 3371 if (IS_PI_IPGUARD_CAPABLE(ha) && 3372 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) 3373 guard |= SHOST_DIX_GUARD_IP; 3374 3375 if (ql2xprotguard) 3376 scsi_host_set_guard(host, ql2xprotguard); 3377 else 3378 scsi_host_set_guard(host, guard); 3379 } else 3380 base_vha->flags.difdix_supported = 0; 3381 } 3382 3383 ha->isp_ops->enable_intrs(ha); 3384 3385 if (IS_QLAFX00(ha)) { 3386 ret = qlafx00_fx_disc(base_vha, 3387 &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO); 3388 host->sg_tablesize = (ha->mr.extended_io_enabled) ? 3389 QLA_SG_ALL : 128; 3390 } 3391 3392 ret = scsi_add_host(host, &pdev->dev); 3393 if (ret) 3394 goto probe_failed; 3395 3396 base_vha->flags.init_done = 1; 3397 base_vha->flags.online = 1; 3398 ha->prev_minidump_failed = 0; 3399 3400 ql_dbg(ql_dbg_init, base_vha, 0x00f2, 3401 "Init done and hba is online.\n"); 3402 3403 if (qla_ini_mode_enabled(base_vha) || 3404 qla_dual_mode_enabled(base_vha)) 3405 scsi_scan_host(host); 3406 else 3407 ql_dbg(ql_dbg_init, base_vha, 0x0122, 3408 "skipping scsi_scan_host() for non-initiator port\n"); 3409 3410 qla2x00_alloc_sysfs_attr(base_vha); 3411 3412 if (IS_QLAFX00(ha)) { 3413 ret = qlafx00_fx_disc(base_vha, 3414 &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO); 3415 3416 /* Register system information */ 3417 ret = qlafx00_fx_disc(base_vha, 3418 &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO); 3419 } 3420 3421 qla2x00_init_host_attr(base_vha); 3422 3423 qla2x00_dfs_setup(base_vha); 3424 3425 ql_log(ql_log_info, base_vha, 0x00fb, 3426 "QLogic %s - %s.\n", ha->model_number, ha->model_desc); 3427 ql_log(ql_log_info, base_vha, 0x00fc, 3428 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", 3429 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info, 3430 sizeof(pci_info)), 3431 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', 3432 base_vha->host_no, 3433 ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str))); 3434 3435 qlt_add_target(ha, base_vha); 3436 3437 clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); 3438 3439 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 3440 return -ENODEV; 3441 3442 if (ha->flags.detected_lr_sfp) { 3443 ql_log(ql_log_info, base_vha, 0xffff, 3444 "Reset chip to pick up LR SFP setting\n"); 3445 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 3446 qla2xxx_wake_dpc(base_vha); 3447 } 3448 3449 return 0; 3450 3451 probe_failed: 3452 if (base_vha->gnl.l) { 3453 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3454 base_vha->gnl.l, base_vha->gnl.ldma); 3455 base_vha->gnl.l = NULL; 3456 } 3457 3458 if (base_vha->timer_active) 3459 qla2x00_stop_timer(base_vha); 3460 base_vha->flags.online = 0; 3461 if (ha->dpc_thread) { 3462 struct task_struct *t = ha->dpc_thread; 3463 3464 ha->dpc_thread = NULL; 3465 kthread_stop(t); 3466 } 3467 3468 qla2x00_free_device(base_vha); 3469 scsi_host_put(base_vha->host); 3470 /* 3471 * Need to NULL out local req/rsp after 3472 * qla2x00_free_device => qla2x00_free_queues frees 3473 * what these are pointing to. Or else we'll 3474 * fall over below in qla2x00_free_req/rsp_que. 3475 */ 3476 req = NULL; 3477 rsp = NULL; 3478 3479 probe_hw_failed: 3480 qla2x00_mem_free(ha); 3481 qla2x00_free_req_que(ha, req); 3482 qla2x00_free_rsp_que(ha, rsp); 3483 qla2x00_clear_drv_active(ha); 3484 3485 iospace_config_failed: 3486 if (IS_P3P_TYPE(ha)) { 3487 if (!ha->nx_pcibase) 3488 iounmap((device_reg_t *)ha->nx_pcibase); 3489 if (!ql2xdbwr) 3490 iounmap((device_reg_t *)ha->nxdb_wr_ptr); 3491 } else { 3492 if (ha->iobase) 3493 iounmap(ha->iobase); 3494 if (ha->cregbase) 3495 iounmap(ha->cregbase); 3496 } 3497 pci_release_selected_regions(ha->pdev, ha->bars); 3498 kfree(ha); 3499 3500 disable_device: 3501 pci_disable_device(pdev); 3502 return ret; 3503 } 3504 3505 static void __qla_set_remove_flag(scsi_qla_host_t *base_vha) 3506 { 3507 scsi_qla_host_t *vp; 3508 unsigned long flags; 3509 struct qla_hw_data *ha; 3510 3511 if (!base_vha) 3512 return; 3513 3514 ha = base_vha->hw; 3515 3516 spin_lock_irqsave(&ha->vport_slock, flags); 3517 list_for_each_entry(vp, &ha->vp_list, list) 3518 set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags); 3519 3520 /* 3521 * Indicate device removal to prevent future board_disable 3522 * and wait until any pending board_disable has completed. 3523 */ 3524 set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags); 3525 spin_unlock_irqrestore(&ha->vport_slock, flags); 3526 } 3527 3528 static void 3529 qla2x00_shutdown(struct pci_dev *pdev) 3530 { 3531 scsi_qla_host_t *vha; 3532 struct qla_hw_data *ha; 3533 3534 vha = pci_get_drvdata(pdev); 3535 ha = vha->hw; 3536 3537 ql_log(ql_log_info, vha, 0xfffa, 3538 "Adapter shutdown\n"); 3539 3540 /* 3541 * Prevent future board_disable and wait 3542 * until any pending board_disable has completed. 3543 */ 3544 __qla_set_remove_flag(vha); 3545 cancel_work_sync(&ha->board_disable); 3546 3547 if (!atomic_read(&pdev->enable_cnt)) 3548 return; 3549 3550 /* Notify ISPFX00 firmware */ 3551 if (IS_QLAFX00(ha)) 3552 qlafx00_driver_shutdown(vha, 20); 3553 3554 /* Turn-off FCE trace */ 3555 if (ha->flags.fce_enabled) { 3556 qla2x00_disable_fce_trace(vha, NULL, NULL); 3557 ha->flags.fce_enabled = 0; 3558 } 3559 3560 /* Turn-off EFT trace */ 3561 if (ha->eft) 3562 qla2x00_disable_eft_trace(vha); 3563 3564 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 3565 IS_QLA28XX(ha)) { 3566 if (ha->flags.fw_started) 3567 qla2x00_abort_isp_cleanup(vha); 3568 } else { 3569 /* Stop currently executing firmware. */ 3570 qla2x00_try_to_stop_firmware(vha); 3571 } 3572 3573 /* Disable timer */ 3574 if (vha->timer_active) 3575 qla2x00_stop_timer(vha); 3576 3577 /* Turn adapter off line */ 3578 vha->flags.online = 0; 3579 3580 /* turn-off interrupts on the card */ 3581 if (ha->interrupts_on) { 3582 vha->flags.init_done = 0; 3583 ha->isp_ops->disable_intrs(ha); 3584 } 3585 3586 qla2x00_free_irqs(vha); 3587 3588 qla2x00_free_fw_dump(ha); 3589 3590 pci_disable_device(pdev); 3591 ql_log(ql_log_info, vha, 0xfffe, 3592 "Adapter shutdown successfully.\n"); 3593 } 3594 3595 /* Deletes all the virtual ports for a given ha */ 3596 static void 3597 qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) 3598 { 3599 scsi_qla_host_t *vha; 3600 unsigned long flags; 3601 3602 mutex_lock(&ha->vport_lock); 3603 while (ha->cur_vport_count) { 3604 spin_lock_irqsave(&ha->vport_slock, flags); 3605 3606 BUG_ON(base_vha->list.next == &ha->vp_list); 3607 /* This assumes first entry in ha->vp_list is always base vha */ 3608 vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); 3609 scsi_host_get(vha->host); 3610 3611 spin_unlock_irqrestore(&ha->vport_slock, flags); 3612 mutex_unlock(&ha->vport_lock); 3613 3614 qla_nvme_delete(vha); 3615 3616 fc_vport_terminate(vha->fc_vport); 3617 scsi_host_put(vha->host); 3618 3619 mutex_lock(&ha->vport_lock); 3620 } 3621 mutex_unlock(&ha->vport_lock); 3622 } 3623 3624 /* Stops all deferred work threads */ 3625 static void 3626 qla2x00_destroy_deferred_work(struct qla_hw_data *ha) 3627 { 3628 /* Cancel all work and destroy DPC workqueues */ 3629 if (ha->dpc_lp_wq) { 3630 cancel_work_sync(&ha->idc_aen); 3631 destroy_workqueue(ha->dpc_lp_wq); 3632 ha->dpc_lp_wq = NULL; 3633 } 3634 3635 if (ha->dpc_hp_wq) { 3636 cancel_work_sync(&ha->nic_core_reset); 3637 cancel_work_sync(&ha->idc_state_handler); 3638 cancel_work_sync(&ha->nic_core_unrecoverable); 3639 destroy_workqueue(ha->dpc_hp_wq); 3640 ha->dpc_hp_wq = NULL; 3641 } 3642 3643 /* Kill the kernel thread for this host */ 3644 if (ha->dpc_thread) { 3645 struct task_struct *t = ha->dpc_thread; 3646 3647 /* 3648 * qla2xxx_wake_dpc checks for ->dpc_thread 3649 * so we need to zero it out. 3650 */ 3651 ha->dpc_thread = NULL; 3652 kthread_stop(t); 3653 } 3654 } 3655 3656 static void 3657 qla2x00_unmap_iobases(struct qla_hw_data *ha) 3658 { 3659 if (IS_QLA82XX(ha)) { 3660 3661 iounmap((device_reg_t *)ha->nx_pcibase); 3662 if (!ql2xdbwr) 3663 iounmap((device_reg_t *)ha->nxdb_wr_ptr); 3664 } else { 3665 if (ha->iobase) 3666 iounmap(ha->iobase); 3667 3668 if (ha->cregbase) 3669 iounmap(ha->cregbase); 3670 3671 if (ha->mqiobase) 3672 iounmap(ha->mqiobase); 3673 3674 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) && 3675 ha->msixbase) 3676 iounmap(ha->msixbase); 3677 } 3678 } 3679 3680 static void 3681 qla2x00_clear_drv_active(struct qla_hw_data *ha) 3682 { 3683 if (IS_QLA8044(ha)) { 3684 qla8044_idc_lock(ha); 3685 qla8044_clear_drv_active(ha); 3686 qla8044_idc_unlock(ha); 3687 } else if (IS_QLA82XX(ha)) { 3688 qla82xx_idc_lock(ha); 3689 qla82xx_clear_drv_active(ha); 3690 qla82xx_idc_unlock(ha); 3691 } 3692 } 3693 3694 static void 3695 qla2x00_remove_one(struct pci_dev *pdev) 3696 { 3697 scsi_qla_host_t *base_vha; 3698 struct qla_hw_data *ha; 3699 3700 base_vha = pci_get_drvdata(pdev); 3701 ha = base_vha->hw; 3702 ql_log(ql_log_info, base_vha, 0xb079, 3703 "Removing driver\n"); 3704 __qla_set_remove_flag(base_vha); 3705 cancel_work_sync(&ha->board_disable); 3706 3707 /* 3708 * If the PCI device is disabled then there was a PCI-disconnect and 3709 * qla2x00_disable_board_on_pci_error has taken care of most of the 3710 * resources. 3711 */ 3712 if (!atomic_read(&pdev->enable_cnt)) { 3713 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3714 base_vha->gnl.l, base_vha->gnl.ldma); 3715 base_vha->gnl.l = NULL; 3716 scsi_host_put(base_vha->host); 3717 kfree(ha); 3718 pci_set_drvdata(pdev, NULL); 3719 return; 3720 } 3721 qla2x00_wait_for_hba_ready(base_vha); 3722 3723 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 3724 IS_QLA28XX(ha)) { 3725 if (ha->flags.fw_started) 3726 qla2x00_abort_isp_cleanup(base_vha); 3727 } else if (!IS_QLAFX00(ha)) { 3728 if (IS_QLA8031(ha)) { 3729 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, 3730 "Clearing fcoe driver presence.\n"); 3731 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) 3732 ql_dbg(ql_dbg_p3p, base_vha, 0xb079, 3733 "Error while clearing DRV-Presence.\n"); 3734 } 3735 3736 qla2x00_try_to_stop_firmware(base_vha); 3737 } 3738 3739 qla2x00_wait_for_sess_deletion(base_vha); 3740 3741 /* 3742 * if UNLOAD flag is already set, then continue unload, 3743 * where it was set first. 3744 */ 3745 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 3746 return; 3747 3748 set_bit(UNLOADING, &base_vha->dpc_flags); 3749 3750 qla_nvme_delete(base_vha); 3751 3752 dma_free_coherent(&ha->pdev->dev, 3753 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); 3754 3755 base_vha->gnl.l = NULL; 3756 3757 vfree(base_vha->scan.l); 3758 3759 if (IS_QLAFX00(ha)) 3760 qlafx00_driver_shutdown(base_vha, 20); 3761 3762 qla2x00_delete_all_vps(ha, base_vha); 3763 3764 qla2x00_dfs_remove(base_vha); 3765 3766 qla84xx_put_chip(base_vha); 3767 3768 /* Disable timer */ 3769 if (base_vha->timer_active) 3770 qla2x00_stop_timer(base_vha); 3771 3772 base_vha->flags.online = 0; 3773 3774 /* free DMA memory */ 3775 if (ha->exlogin_buf) 3776 qla2x00_free_exlogin_buffer(ha); 3777 3778 /* free DMA memory */ 3779 if (ha->exchoffld_buf) 3780 qla2x00_free_exchoffld_buffer(ha); 3781 3782 qla2x00_destroy_deferred_work(ha); 3783 3784 qlt_remove_target(ha, base_vha); 3785 3786 qla2x00_free_sysfs_attr(base_vha, true); 3787 3788 fc_remove_host(base_vha->host); 3789 qlt_remove_target_resources(ha); 3790 3791 scsi_remove_host(base_vha->host); 3792 3793 qla2x00_free_device(base_vha); 3794 3795 qla2x00_clear_drv_active(ha); 3796 3797 scsi_host_put(base_vha->host); 3798 3799 qla2x00_unmap_iobases(ha); 3800 3801 pci_release_selected_regions(ha->pdev, ha->bars); 3802 kfree(ha); 3803 3804 pci_disable_pcie_error_reporting(pdev); 3805 3806 pci_disable_device(pdev); 3807 } 3808 3809 static void 3810 qla2x00_free_device(scsi_qla_host_t *vha) 3811 { 3812 struct qla_hw_data *ha = vha->hw; 3813 3814 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 3815 3816 /* Disable timer */ 3817 if (vha->timer_active) 3818 qla2x00_stop_timer(vha); 3819 3820 qla25xx_delete_queues(vha); 3821 vha->flags.online = 0; 3822 3823 /* turn-off interrupts on the card */ 3824 if (ha->interrupts_on) { 3825 vha->flags.init_done = 0; 3826 ha->isp_ops->disable_intrs(ha); 3827 } 3828 3829 qla2x00_free_fcports(vha); 3830 3831 qla2x00_free_irqs(vha); 3832 3833 /* Flush the work queue and remove it */ 3834 if (ha->wq) { 3835 flush_workqueue(ha->wq); 3836 destroy_workqueue(ha->wq); 3837 ha->wq = NULL; 3838 } 3839 3840 3841 qla2x00_mem_free(ha); 3842 3843 qla82xx_md_free(vha); 3844 3845 qla2x00_free_queues(ha); 3846 } 3847 3848 void qla2x00_free_fcports(struct scsi_qla_host *vha) 3849 { 3850 fc_port_t *fcport, *tfcport; 3851 3852 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) 3853 qla2x00_free_fcport(fcport); 3854 } 3855 3856 static inline void 3857 qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, 3858 int defer) 3859 { 3860 struct fc_rport *rport; 3861 scsi_qla_host_t *base_vha; 3862 unsigned long flags; 3863 3864 if (!fcport->rport) 3865 return; 3866 3867 rport = fcport->rport; 3868 if (defer) { 3869 base_vha = pci_get_drvdata(vha->hw->pdev); 3870 spin_lock_irqsave(vha->host->host_lock, flags); 3871 fcport->drport = rport; 3872 spin_unlock_irqrestore(vha->host->host_lock, flags); 3873 qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen); 3874 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 3875 qla2xxx_wake_dpc(base_vha); 3876 } else { 3877 int now; 3878 3879 if (rport) { 3880 ql_dbg(ql_dbg_disc, fcport->vha, 0x2109, 3881 "%s %8phN. rport %p roles %x\n", 3882 __func__, fcport->port_name, rport, 3883 rport->roles); 3884 fc_remote_port_delete(rport); 3885 } 3886 qlt_do_generation_tick(vha, &now); 3887 } 3888 } 3889 3890 /* 3891 * qla2x00_mark_device_lost Updates fcport state when device goes offline. 3892 * 3893 * Input: ha = adapter block pointer. fcport = port structure pointer. 3894 * 3895 * Return: None. 3896 * 3897 * Context: 3898 */ 3899 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, 3900 int do_login, int defer) 3901 { 3902 if (IS_QLAFX00(vha->hw)) { 3903 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3904 qla2x00_schedule_rport_del(vha, fcport, defer); 3905 return; 3906 } 3907 3908 if (atomic_read(&fcport->state) == FCS_ONLINE && 3909 vha->vp_idx == fcport->vha->vp_idx) { 3910 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3911 qla2x00_schedule_rport_del(vha, fcport, defer); 3912 } 3913 /* 3914 * We may need to retry the login, so don't change the state of the 3915 * port but do the retries. 3916 */ 3917 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) 3918 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3919 3920 if (!do_login) 3921 return; 3922 3923 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 3924 } 3925 3926 /* 3927 * qla2x00_mark_all_devices_lost 3928 * Updates fcport state when device goes offline. 3929 * 3930 * Input: 3931 * ha = adapter block pointer. 3932 * fcport = port structure pointer. 3933 * 3934 * Return: 3935 * None. 3936 * 3937 * Context: 3938 */ 3939 void 3940 qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) 3941 { 3942 fc_port_t *fcport; 3943 3944 ql_dbg(ql_dbg_disc, vha, 0x20f1, 3945 "Mark all dev lost\n"); 3946 3947 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3948 fcport->scan_state = 0; 3949 qlt_schedule_sess_for_deletion(fcport); 3950 3951 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx) 3952 continue; 3953 3954 /* 3955 * No point in marking the device as lost, if the device is 3956 * already DEAD. 3957 */ 3958 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 3959 continue; 3960 if (atomic_read(&fcport->state) == FCS_ONLINE) { 3961 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3962 if (defer) 3963 qla2x00_schedule_rport_del(vha, fcport, defer); 3964 else if (vha->vp_idx == fcport->vha->vp_idx) 3965 qla2x00_schedule_rport_del(vha, fcport, defer); 3966 } 3967 } 3968 } 3969 3970 static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha) 3971 { 3972 int i; 3973 3974 if (IS_FWI2_CAPABLE(ha)) 3975 return; 3976 3977 for (i = 0; i < SNS_FIRST_LOOP_ID; i++) 3978 set_bit(i, ha->loop_id_map); 3979 set_bit(MANAGEMENT_SERVER, ha->loop_id_map); 3980 set_bit(BROADCAST, ha->loop_id_map); 3981 } 3982 3983 /* 3984 * qla2x00_mem_alloc 3985 * Allocates adapter memory. 3986 * 3987 * Returns: 3988 * 0 = success. 3989 * !0 = failure. 3990 */ 3991 static int 3992 qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, 3993 struct req_que **req, struct rsp_que **rsp) 3994 { 3995 char name[16]; 3996 3997 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 3998 &ha->init_cb_dma, GFP_KERNEL); 3999 if (!ha->init_cb) 4000 goto fail; 4001 4002 if (qlt_mem_alloc(ha) < 0) 4003 goto fail_free_init_cb; 4004 4005 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, 4006 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); 4007 if (!ha->gid_list) 4008 goto fail_free_tgt_mem; 4009 4010 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 4011 if (!ha->srb_mempool) 4012 goto fail_free_gid_list; 4013 4014 if (IS_P3P_TYPE(ha)) { 4015 /* Allocate cache for CT6 Ctx. */ 4016 if (!ctx_cachep) { 4017 ctx_cachep = kmem_cache_create("qla2xxx_ctx", 4018 sizeof(struct ct6_dsd), 0, 4019 SLAB_HWCACHE_ALIGN, NULL); 4020 if (!ctx_cachep) 4021 goto fail_free_srb_mempool; 4022 } 4023 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, 4024 ctx_cachep); 4025 if (!ha->ctx_mempool) 4026 goto fail_free_srb_mempool; 4027 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021, 4028 "ctx_cachep=%p ctx_mempool=%p.\n", 4029 ctx_cachep, ha->ctx_mempool); 4030 } 4031 4032 /* Get memory for cached NVRAM */ 4033 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 4034 if (!ha->nvram) 4035 goto fail_free_ctx_mempool; 4036 4037 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME, 4038 ha->pdev->device); 4039 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4040 DMA_POOL_SIZE, 8, 0); 4041 if (!ha->s_dma_pool) 4042 goto fail_free_nvram; 4043 4044 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022, 4045 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", 4046 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); 4047 4048 if (IS_P3P_TYPE(ha) || ql2xenabledif) { 4049 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4050 DSD_LIST_DMA_POOL_SIZE, 8, 0); 4051 if (!ha->dl_dma_pool) { 4052 ql_log_pci(ql_log_fatal, ha->pdev, 0x0023, 4053 "Failed to allocate memory for dl_dma_pool.\n"); 4054 goto fail_s_dma_pool; 4055 } 4056 4057 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, 4058 FCP_CMND_DMA_POOL_SIZE, 8, 0); 4059 if (!ha->fcp_cmnd_dma_pool) { 4060 ql_log_pci(ql_log_fatal, ha->pdev, 0x0024, 4061 "Failed to allocate memory for fcp_cmnd_dma_pool.\n"); 4062 goto fail_dl_dma_pool; 4063 } 4064 4065 if (ql2xenabledif) { 4066 u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE; 4067 struct dsd_dma *dsd, *nxt; 4068 uint i; 4069 /* Creata a DMA pool of buffers for DIF bundling */ 4070 ha->dif_bundl_pool = dma_pool_create(name, 4071 &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0); 4072 if (!ha->dif_bundl_pool) { 4073 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024, 4074 "%s: failed create dif_bundl_pool\n", 4075 __func__); 4076 goto fail_dif_bundl_dma_pool; 4077 } 4078 4079 INIT_LIST_HEAD(&ha->pool.good.head); 4080 INIT_LIST_HEAD(&ha->pool.unusable.head); 4081 ha->pool.good.count = 0; 4082 ha->pool.unusable.count = 0; 4083 for (i = 0; i < 128; i++) { 4084 dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC); 4085 if (!dsd) { 4086 ql_dbg_pci(ql_dbg_init, ha->pdev, 4087 0xe0ee, "%s: failed alloc dsd\n", 4088 __func__); 4089 return 1; 4090 } 4091 ha->dif_bundle_kallocs++; 4092 4093 dsd->dsd_addr = dma_pool_alloc( 4094 ha->dif_bundl_pool, GFP_ATOMIC, 4095 &dsd->dsd_list_dma); 4096 if (!dsd->dsd_addr) { 4097 ql_dbg_pci(ql_dbg_init, ha->pdev, 4098 0xe0ee, 4099 "%s: failed alloc ->dsd_addr\n", 4100 __func__); 4101 kfree(dsd); 4102 ha->dif_bundle_kallocs--; 4103 continue; 4104 } 4105 ha->dif_bundle_dma_allocs++; 4106 4107 /* 4108 * if DMA buffer crosses 4G boundary, 4109 * put it on bad list 4110 */ 4111 if (MSD(dsd->dsd_list_dma) ^ 4112 MSD(dsd->dsd_list_dma + bufsize)) { 4113 list_add_tail(&dsd->list, 4114 &ha->pool.unusable.head); 4115 ha->pool.unusable.count++; 4116 } else { 4117 list_add_tail(&dsd->list, 4118 &ha->pool.good.head); 4119 ha->pool.good.count++; 4120 } 4121 } 4122 4123 /* return the good ones back to the pool */ 4124 list_for_each_entry_safe(dsd, nxt, 4125 &ha->pool.good.head, list) { 4126 list_del(&dsd->list); 4127 dma_pool_free(ha->dif_bundl_pool, 4128 dsd->dsd_addr, dsd->dsd_list_dma); 4129 ha->dif_bundle_dma_allocs--; 4130 kfree(dsd); 4131 ha->dif_bundle_kallocs--; 4132 } 4133 4134 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024, 4135 "%s: dif dma pool (good=%u unusable=%u)\n", 4136 __func__, ha->pool.good.count, 4137 ha->pool.unusable.count); 4138 } 4139 4140 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025, 4141 "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n", 4142 ha->dl_dma_pool, ha->fcp_cmnd_dma_pool, 4143 ha->dif_bundl_pool); 4144 } 4145 4146 /* Allocate memory for SNS commands */ 4147 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 4148 /* Get consistent memory allocated for SNS commands */ 4149 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 4150 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 4151 if (!ha->sns_cmd) 4152 goto fail_dma_pool; 4153 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, 4154 "sns_cmd: %p.\n", ha->sns_cmd); 4155 } else { 4156 /* Get consistent memory allocated for MS IOCB */ 4157 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 4158 &ha->ms_iocb_dma); 4159 if (!ha->ms_iocb) 4160 goto fail_dma_pool; 4161 /* Get consistent memory allocated for CT SNS commands */ 4162 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 4163 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 4164 if (!ha->ct_sns) 4165 goto fail_free_ms_iocb; 4166 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027, 4167 "ms_iocb=%p ct_sns=%p.\n", 4168 ha->ms_iocb, ha->ct_sns); 4169 } 4170 4171 /* Allocate memory for request ring */ 4172 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 4173 if (!*req) { 4174 ql_log_pci(ql_log_fatal, ha->pdev, 0x0028, 4175 "Failed to allocate memory for req.\n"); 4176 goto fail_req; 4177 } 4178 (*req)->length = req_len; 4179 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev, 4180 ((*req)->length + 1) * sizeof(request_t), 4181 &(*req)->dma, GFP_KERNEL); 4182 if (!(*req)->ring) { 4183 ql_log_pci(ql_log_fatal, ha->pdev, 0x0029, 4184 "Failed to allocate memory for req_ring.\n"); 4185 goto fail_req_ring; 4186 } 4187 /* Allocate memory for response ring */ 4188 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 4189 if (!*rsp) { 4190 ql_log_pci(ql_log_fatal, ha->pdev, 0x002a, 4191 "Failed to allocate memory for rsp.\n"); 4192 goto fail_rsp; 4193 } 4194 (*rsp)->hw = ha; 4195 (*rsp)->length = rsp_len; 4196 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev, 4197 ((*rsp)->length + 1) * sizeof(response_t), 4198 &(*rsp)->dma, GFP_KERNEL); 4199 if (!(*rsp)->ring) { 4200 ql_log_pci(ql_log_fatal, ha->pdev, 0x002b, 4201 "Failed to allocate memory for rsp_ring.\n"); 4202 goto fail_rsp_ring; 4203 } 4204 (*req)->rsp = *rsp; 4205 (*rsp)->req = *req; 4206 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c, 4207 "req=%p req->length=%d req->ring=%p rsp=%p " 4208 "rsp->length=%d rsp->ring=%p.\n", 4209 *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length, 4210 (*rsp)->ring); 4211 /* Allocate memory for NVRAM data for vports */ 4212 if (ha->nvram_npiv_size) { 4213 ha->npiv_info = kcalloc(ha->nvram_npiv_size, 4214 sizeof(struct qla_npiv_entry), 4215 GFP_KERNEL); 4216 if (!ha->npiv_info) { 4217 ql_log_pci(ql_log_fatal, ha->pdev, 0x002d, 4218 "Failed to allocate memory for npiv_info.\n"); 4219 goto fail_npiv_info; 4220 } 4221 } else 4222 ha->npiv_info = NULL; 4223 4224 /* Get consistent memory allocated for EX-INIT-CB. */ 4225 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 4226 IS_QLA28XX(ha)) { 4227 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 4228 &ha->ex_init_cb_dma); 4229 if (!ha->ex_init_cb) 4230 goto fail_ex_init_cb; 4231 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e, 4232 "ex_init_cb=%p.\n", ha->ex_init_cb); 4233 } 4234 4235 INIT_LIST_HEAD(&ha->gbl_dsd_list); 4236 4237 /* Get consistent memory allocated for Async Port-Database. */ 4238 if (!IS_FWI2_CAPABLE(ha)) { 4239 ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 4240 &ha->async_pd_dma); 4241 if (!ha->async_pd) 4242 goto fail_async_pd; 4243 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f, 4244 "async_pd=%p.\n", ha->async_pd); 4245 } 4246 4247 INIT_LIST_HEAD(&ha->vp_list); 4248 4249 /* Allocate memory for our loop_id bitmap */ 4250 ha->loop_id_map = kcalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE), 4251 sizeof(long), 4252 GFP_KERNEL); 4253 if (!ha->loop_id_map) 4254 goto fail_loop_id_map; 4255 else { 4256 qla2x00_set_reserved_loop_ids(ha); 4257 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, 4258 "loop_id_map=%p.\n", ha->loop_id_map); 4259 } 4260 4261 ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev, 4262 SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL); 4263 if (!ha->sfp_data) { 4264 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, 4265 "Unable to allocate memory for SFP read-data.\n"); 4266 goto fail_sfp_data; 4267 } 4268 4269 ha->flt = dma_alloc_coherent(&ha->pdev->dev, 4270 sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma, 4271 GFP_KERNEL); 4272 if (!ha->flt) { 4273 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, 4274 "Unable to allocate memory for FLT.\n"); 4275 goto fail_flt_buffer; 4276 } 4277 4278 return 0; 4279 4280 fail_flt_buffer: 4281 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, 4282 ha->sfp_data, ha->sfp_data_dma); 4283 fail_sfp_data: 4284 kfree(ha->loop_id_map); 4285 fail_loop_id_map: 4286 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 4287 fail_async_pd: 4288 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); 4289 fail_ex_init_cb: 4290 kfree(ha->npiv_info); 4291 fail_npiv_info: 4292 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) * 4293 sizeof(response_t), (*rsp)->ring, (*rsp)->dma); 4294 (*rsp)->ring = NULL; 4295 (*rsp)->dma = 0; 4296 fail_rsp_ring: 4297 kfree(*rsp); 4298 *rsp = NULL; 4299 fail_rsp: 4300 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) * 4301 sizeof(request_t), (*req)->ring, (*req)->dma); 4302 (*req)->ring = NULL; 4303 (*req)->dma = 0; 4304 fail_req_ring: 4305 kfree(*req); 4306 *req = NULL; 4307 fail_req: 4308 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 4309 ha->ct_sns, ha->ct_sns_dma); 4310 ha->ct_sns = NULL; 4311 ha->ct_sns_dma = 0; 4312 fail_free_ms_iocb: 4313 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 4314 ha->ms_iocb = NULL; 4315 ha->ms_iocb_dma = 0; 4316 4317 if (ha->sns_cmd) 4318 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 4319 ha->sns_cmd, ha->sns_cmd_dma); 4320 fail_dma_pool: 4321 if (ql2xenabledif) { 4322 struct dsd_dma *dsd, *nxt; 4323 4324 list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, 4325 list) { 4326 list_del(&dsd->list); 4327 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, 4328 dsd->dsd_list_dma); 4329 ha->dif_bundle_dma_allocs--; 4330 kfree(dsd); 4331 ha->dif_bundle_kallocs--; 4332 ha->pool.unusable.count--; 4333 } 4334 dma_pool_destroy(ha->dif_bundl_pool); 4335 ha->dif_bundl_pool = NULL; 4336 } 4337 4338 fail_dif_bundl_dma_pool: 4339 if (IS_QLA82XX(ha) || ql2xenabledif) { 4340 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 4341 ha->fcp_cmnd_dma_pool = NULL; 4342 } 4343 fail_dl_dma_pool: 4344 if (IS_QLA82XX(ha) || ql2xenabledif) { 4345 dma_pool_destroy(ha->dl_dma_pool); 4346 ha->dl_dma_pool = NULL; 4347 } 4348 fail_s_dma_pool: 4349 dma_pool_destroy(ha->s_dma_pool); 4350 ha->s_dma_pool = NULL; 4351 fail_free_nvram: 4352 kfree(ha->nvram); 4353 ha->nvram = NULL; 4354 fail_free_ctx_mempool: 4355 mempool_destroy(ha->ctx_mempool); 4356 ha->ctx_mempool = NULL; 4357 fail_free_srb_mempool: 4358 mempool_destroy(ha->srb_mempool); 4359 ha->srb_mempool = NULL; 4360 fail_free_gid_list: 4361 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 4362 ha->gid_list, 4363 ha->gid_list_dma); 4364 ha->gid_list = NULL; 4365 ha->gid_list_dma = 0; 4366 fail_free_tgt_mem: 4367 qlt_mem_free(ha); 4368 fail_free_init_cb: 4369 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, 4370 ha->init_cb_dma); 4371 ha->init_cb = NULL; 4372 ha->init_cb_dma = 0; 4373 fail: 4374 ql_log(ql_log_fatal, NULL, 0x0030, 4375 "Memory allocation failure.\n"); 4376 return -ENOMEM; 4377 } 4378 4379 int 4380 qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha) 4381 { 4382 int rval; 4383 uint16_t size, max_cnt, temp; 4384 struct qla_hw_data *ha = vha->hw; 4385 4386 /* Return if we don't need to alloacate any extended logins */ 4387 if (!ql2xexlogins) 4388 return QLA_SUCCESS; 4389 4390 if (!IS_EXLOGIN_OFFLD_CAPABLE(ha)) 4391 return QLA_SUCCESS; 4392 4393 ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins); 4394 max_cnt = 0; 4395 rval = qla_get_exlogin_status(vha, &size, &max_cnt); 4396 if (rval != QLA_SUCCESS) { 4397 ql_log_pci(ql_log_fatal, ha->pdev, 0xd029, 4398 "Failed to get exlogin status.\n"); 4399 return rval; 4400 } 4401 4402 temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins; 4403 temp *= size; 4404 4405 if (temp != ha->exlogin_size) { 4406 qla2x00_free_exlogin_buffer(ha); 4407 ha->exlogin_size = temp; 4408 4409 ql_log(ql_log_info, vha, 0xd024, 4410 "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n", 4411 max_cnt, size, temp); 4412 4413 ql_log(ql_log_info, vha, 0xd025, 4414 "EXLOGIN: requested size=0x%x\n", ha->exlogin_size); 4415 4416 /* Get consistent memory for extended logins */ 4417 ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev, 4418 ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL); 4419 if (!ha->exlogin_buf) { 4420 ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a, 4421 "Failed to allocate memory for exlogin_buf_dma.\n"); 4422 return -ENOMEM; 4423 } 4424 } 4425 4426 /* Now configure the dma buffer */ 4427 rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma); 4428 if (rval) { 4429 ql_log(ql_log_fatal, vha, 0xd033, 4430 "Setup extended login buffer ****FAILED****.\n"); 4431 qla2x00_free_exlogin_buffer(ha); 4432 } 4433 4434 return rval; 4435 } 4436 4437 /* 4438 * qla2x00_free_exlogin_buffer 4439 * 4440 * Input: 4441 * ha = adapter block pointer 4442 */ 4443 void 4444 qla2x00_free_exlogin_buffer(struct qla_hw_data *ha) 4445 { 4446 if (ha->exlogin_buf) { 4447 dma_free_coherent(&ha->pdev->dev, ha->exlogin_size, 4448 ha->exlogin_buf, ha->exlogin_buf_dma); 4449 ha->exlogin_buf = NULL; 4450 ha->exlogin_size = 0; 4451 } 4452 } 4453 4454 static void 4455 qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt) 4456 { 4457 u32 temp; 4458 struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb; 4459 *ret_cnt = FW_DEF_EXCHANGES_CNT; 4460 4461 if (max_cnt > vha->hw->max_exchg) 4462 max_cnt = vha->hw->max_exchg; 4463 4464 if (qla_ini_mode_enabled(vha)) { 4465 if (vha->ql2xiniexchg > max_cnt) 4466 vha->ql2xiniexchg = max_cnt; 4467 4468 if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT) 4469 *ret_cnt = vha->ql2xiniexchg; 4470 4471 } else if (qla_tgt_mode_enabled(vha)) { 4472 if (vha->ql2xexchoffld > max_cnt) { 4473 vha->ql2xexchoffld = max_cnt; 4474 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 4475 } 4476 4477 if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT) 4478 *ret_cnt = vha->ql2xexchoffld; 4479 } else if (qla_dual_mode_enabled(vha)) { 4480 temp = vha->ql2xiniexchg + vha->ql2xexchoffld; 4481 if (temp > max_cnt) { 4482 vha->ql2xiniexchg -= (temp - max_cnt)/2; 4483 vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1); 4484 temp = max_cnt; 4485 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 4486 } 4487 4488 if (temp > FW_DEF_EXCHANGES_CNT) 4489 *ret_cnt = temp; 4490 } 4491 } 4492 4493 int 4494 qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha) 4495 { 4496 int rval; 4497 u16 size, max_cnt; 4498 u32 actual_cnt, totsz; 4499 struct qla_hw_data *ha = vha->hw; 4500 4501 if (!ha->flags.exchoffld_enabled) 4502 return QLA_SUCCESS; 4503 4504 if (!IS_EXCHG_OFFLD_CAPABLE(ha)) 4505 return QLA_SUCCESS; 4506 4507 max_cnt = 0; 4508 rval = qla_get_exchoffld_status(vha, &size, &max_cnt); 4509 if (rval != QLA_SUCCESS) { 4510 ql_log_pci(ql_log_fatal, ha->pdev, 0xd012, 4511 "Failed to get exlogin status.\n"); 4512 return rval; 4513 } 4514 4515 qla2x00_number_of_exch(vha, &actual_cnt, max_cnt); 4516 ql_log(ql_log_info, vha, 0xd014, 4517 "Actual exchange offload count: %d.\n", actual_cnt); 4518 4519 totsz = actual_cnt * size; 4520 4521 if (totsz != ha->exchoffld_size) { 4522 qla2x00_free_exchoffld_buffer(ha); 4523 if (actual_cnt <= FW_DEF_EXCHANGES_CNT) { 4524 ha->exchoffld_size = 0; 4525 ha->flags.exchoffld_enabled = 0; 4526 return QLA_SUCCESS; 4527 } 4528 4529 ha->exchoffld_size = totsz; 4530 4531 ql_log(ql_log_info, vha, 0xd016, 4532 "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n", 4533 max_cnt, actual_cnt, size, totsz); 4534 4535 ql_log(ql_log_info, vha, 0xd017, 4536 "Exchange Buffers requested size = 0x%x\n", 4537 ha->exchoffld_size); 4538 4539 /* Get consistent memory for extended logins */ 4540 ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev, 4541 ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL); 4542 if (!ha->exchoffld_buf) { 4543 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, 4544 "Failed to allocate memory for Exchange Offload.\n"); 4545 4546 if (ha->max_exchg > 4547 (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) { 4548 ha->max_exchg -= REDUCE_EXCHANGES_CNT; 4549 } else if (ha->max_exchg > 4550 (FW_DEF_EXCHANGES_CNT + 512)) { 4551 ha->max_exchg -= 512; 4552 } else { 4553 ha->flags.exchoffld_enabled = 0; 4554 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, 4555 "Disabling Exchange offload due to lack of memory\n"); 4556 } 4557 ha->exchoffld_size = 0; 4558 4559 return -ENOMEM; 4560 } 4561 } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) { 4562 /* pathological case */ 4563 qla2x00_free_exchoffld_buffer(ha); 4564 ha->exchoffld_size = 0; 4565 ha->flags.exchoffld_enabled = 0; 4566 ql_log(ql_log_info, vha, 0xd016, 4567 "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n", 4568 ha->exchoffld_size, actual_cnt, size, totsz); 4569 return 0; 4570 } 4571 4572 /* Now configure the dma buffer */ 4573 rval = qla_set_exchoffld_mem_cfg(vha); 4574 if (rval) { 4575 ql_log(ql_log_fatal, vha, 0xd02e, 4576 "Setup exchange offload buffer ****FAILED****.\n"); 4577 qla2x00_free_exchoffld_buffer(ha); 4578 } else { 4579 /* re-adjust number of target exchange */ 4580 struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb; 4581 4582 if (qla_ini_mode_enabled(vha)) 4583 icb->exchange_count = 0; 4584 else 4585 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 4586 } 4587 4588 return rval; 4589 } 4590 4591 /* 4592 * qla2x00_free_exchoffld_buffer 4593 * 4594 * Input: 4595 * ha = adapter block pointer 4596 */ 4597 void 4598 qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha) 4599 { 4600 if (ha->exchoffld_buf) { 4601 dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size, 4602 ha->exchoffld_buf, ha->exchoffld_buf_dma); 4603 ha->exchoffld_buf = NULL; 4604 ha->exchoffld_size = 0; 4605 } 4606 } 4607 4608 /* 4609 * qla2x00_free_fw_dump 4610 * Frees fw dump stuff. 4611 * 4612 * Input: 4613 * ha = adapter block pointer 4614 */ 4615 static void 4616 qla2x00_free_fw_dump(struct qla_hw_data *ha) 4617 { 4618 struct fwdt *fwdt = ha->fwdt; 4619 uint j; 4620 4621 if (ha->fce) 4622 dma_free_coherent(&ha->pdev->dev, 4623 FCE_SIZE, ha->fce, ha->fce_dma); 4624 4625 if (ha->eft) 4626 dma_free_coherent(&ha->pdev->dev, 4627 EFT_SIZE, ha->eft, ha->eft_dma); 4628 4629 if (ha->fw_dump) 4630 vfree(ha->fw_dump); 4631 4632 ha->fce = NULL; 4633 ha->fce_dma = 0; 4634 ha->flags.fce_enabled = 0; 4635 ha->eft = NULL; 4636 ha->eft_dma = 0; 4637 ha->fw_dumped = 0; 4638 ha->fw_dump_cap_flags = 0; 4639 ha->fw_dump_reading = 0; 4640 ha->fw_dump = NULL; 4641 ha->fw_dump_len = 0; 4642 4643 for (j = 0; j < 2; j++, fwdt++) { 4644 if (fwdt->template) 4645 vfree(fwdt->template); 4646 fwdt->template = NULL; 4647 fwdt->length = 0; 4648 } 4649 } 4650 4651 /* 4652 * qla2x00_mem_free 4653 * Frees all adapter allocated memory. 4654 * 4655 * Input: 4656 * ha = adapter block pointer. 4657 */ 4658 static void 4659 qla2x00_mem_free(struct qla_hw_data *ha) 4660 { 4661 qla2x00_free_fw_dump(ha); 4662 4663 if (ha->mctp_dump) 4664 dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump, 4665 ha->mctp_dump_dma); 4666 ha->mctp_dump = NULL; 4667 4668 mempool_destroy(ha->srb_mempool); 4669 ha->srb_mempool = NULL; 4670 4671 if (ha->dcbx_tlv) 4672 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 4673 ha->dcbx_tlv, ha->dcbx_tlv_dma); 4674 ha->dcbx_tlv = NULL; 4675 4676 if (ha->xgmac_data) 4677 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 4678 ha->xgmac_data, ha->xgmac_data_dma); 4679 ha->xgmac_data = NULL; 4680 4681 if (ha->sns_cmd) 4682 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 4683 ha->sns_cmd, ha->sns_cmd_dma); 4684 ha->sns_cmd = NULL; 4685 ha->sns_cmd_dma = 0; 4686 4687 if (ha->ct_sns) 4688 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 4689 ha->ct_sns, ha->ct_sns_dma); 4690 ha->ct_sns = NULL; 4691 ha->ct_sns_dma = 0; 4692 4693 if (ha->sfp_data) 4694 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data, 4695 ha->sfp_data_dma); 4696 ha->sfp_data = NULL; 4697 4698 if (ha->flt) 4699 dma_free_coherent(&ha->pdev->dev, 4700 sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, 4701 ha->flt, ha->flt_dma); 4702 ha->flt = NULL; 4703 ha->flt_dma = 0; 4704 4705 if (ha->ms_iocb) 4706 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 4707 ha->ms_iocb = NULL; 4708 ha->ms_iocb_dma = 0; 4709 4710 if (ha->ex_init_cb) 4711 dma_pool_free(ha->s_dma_pool, 4712 ha->ex_init_cb, ha->ex_init_cb_dma); 4713 ha->ex_init_cb = NULL; 4714 ha->ex_init_cb_dma = 0; 4715 4716 if (ha->async_pd) 4717 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 4718 ha->async_pd = NULL; 4719 ha->async_pd_dma = 0; 4720 4721 dma_pool_destroy(ha->s_dma_pool); 4722 ha->s_dma_pool = NULL; 4723 4724 if (ha->gid_list) 4725 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 4726 ha->gid_list, ha->gid_list_dma); 4727 ha->gid_list = NULL; 4728 ha->gid_list_dma = 0; 4729 4730 if (IS_QLA82XX(ha)) { 4731 if (!list_empty(&ha->gbl_dsd_list)) { 4732 struct dsd_dma *dsd_ptr, *tdsd_ptr; 4733 4734 /* clean up allocated prev pool */ 4735 list_for_each_entry_safe(dsd_ptr, 4736 tdsd_ptr, &ha->gbl_dsd_list, list) { 4737 dma_pool_free(ha->dl_dma_pool, 4738 dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); 4739 list_del(&dsd_ptr->list); 4740 kfree(dsd_ptr); 4741 } 4742 } 4743 } 4744 4745 dma_pool_destroy(ha->dl_dma_pool); 4746 ha->dl_dma_pool = NULL; 4747 4748 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 4749 ha->fcp_cmnd_dma_pool = NULL; 4750 4751 mempool_destroy(ha->ctx_mempool); 4752 ha->ctx_mempool = NULL; 4753 4754 if (ql2xenabledif && ha->dif_bundl_pool) { 4755 struct dsd_dma *dsd, *nxt; 4756 4757 list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, 4758 list) { 4759 list_del(&dsd->list); 4760 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, 4761 dsd->dsd_list_dma); 4762 ha->dif_bundle_dma_allocs--; 4763 kfree(dsd); 4764 ha->dif_bundle_kallocs--; 4765 ha->pool.unusable.count--; 4766 } 4767 list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) { 4768 list_del(&dsd->list); 4769 dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, 4770 dsd->dsd_list_dma); 4771 ha->dif_bundle_dma_allocs--; 4772 kfree(dsd); 4773 ha->dif_bundle_kallocs--; 4774 } 4775 } 4776 4777 dma_pool_destroy(ha->dif_bundl_pool); 4778 ha->dif_bundl_pool = NULL; 4779 4780 qlt_mem_free(ha); 4781 4782 if (ha->init_cb) 4783 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 4784 ha->init_cb, ha->init_cb_dma); 4785 ha->init_cb = NULL; 4786 ha->init_cb_dma = 0; 4787 4788 vfree(ha->optrom_buffer); 4789 ha->optrom_buffer = NULL; 4790 kfree(ha->nvram); 4791 ha->nvram = NULL; 4792 kfree(ha->npiv_info); 4793 ha->npiv_info = NULL; 4794 kfree(ha->swl); 4795 ha->swl = NULL; 4796 kfree(ha->loop_id_map); 4797 ha->loop_id_map = NULL; 4798 } 4799 4800 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, 4801 struct qla_hw_data *ha) 4802 { 4803 struct Scsi_Host *host; 4804 struct scsi_qla_host *vha = NULL; 4805 4806 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 4807 if (!host) { 4808 ql_log_pci(ql_log_fatal, ha->pdev, 0x0107, 4809 "Failed to allocate host from the scsi layer, aborting.\n"); 4810 return NULL; 4811 } 4812 4813 /* Clear our data area */ 4814 vha = shost_priv(host); 4815 memset(vha, 0, sizeof(scsi_qla_host_t)); 4816 4817 vha->host = host; 4818 vha->host_no = host->host_no; 4819 vha->hw = ha; 4820 4821 vha->qlini_mode = ql2x_ini_mode; 4822 vha->ql2xexchoffld = ql2xexchoffld; 4823 vha->ql2xiniexchg = ql2xiniexchg; 4824 4825 INIT_LIST_HEAD(&vha->vp_fcports); 4826 INIT_LIST_HEAD(&vha->work_list); 4827 INIT_LIST_HEAD(&vha->list); 4828 INIT_LIST_HEAD(&vha->qla_cmd_list); 4829 INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list); 4830 INIT_LIST_HEAD(&vha->logo_list); 4831 INIT_LIST_HEAD(&vha->plogi_ack_list); 4832 INIT_LIST_HEAD(&vha->qp_list); 4833 INIT_LIST_HEAD(&vha->gnl.fcports); 4834 INIT_LIST_HEAD(&vha->gpnid_list); 4835 INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn); 4836 4837 spin_lock_init(&vha->work_lock); 4838 spin_lock_init(&vha->cmd_list_lock); 4839 init_waitqueue_head(&vha->fcport_waitQ); 4840 init_waitqueue_head(&vha->vref_waitq); 4841 4842 vha->gnl.size = sizeof(struct get_name_list_extended) * 4843 (ha->max_loop_id + 1); 4844 vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev, 4845 vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL); 4846 if (!vha->gnl.l) { 4847 ql_log(ql_log_fatal, vha, 0xd04a, 4848 "Alloc failed for name list.\n"); 4849 scsi_host_put(vha->host); 4850 return NULL; 4851 } 4852 4853 /* todo: what about ext login? */ 4854 vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp); 4855 vha->scan.l = vmalloc(vha->scan.size); 4856 if (!vha->scan.l) { 4857 ql_log(ql_log_fatal, vha, 0xd04a, 4858 "Alloc failed for scan database.\n"); 4859 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, 4860 vha->gnl.l, vha->gnl.ldma); 4861 vha->gnl.l = NULL; 4862 scsi_host_put(vha->host); 4863 return NULL; 4864 } 4865 INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn); 4866 4867 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 4868 ql_dbg(ql_dbg_init, vha, 0x0041, 4869 "Allocated the host=%p hw=%p vha=%p dev_name=%s", 4870 vha->host, vha->hw, vha, 4871 dev_name(&(ha->pdev->dev))); 4872 4873 return vha; 4874 } 4875 4876 struct qla_work_evt * 4877 qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) 4878 { 4879 struct qla_work_evt *e; 4880 uint8_t bail; 4881 4882 QLA_VHA_MARK_BUSY(vha, bail); 4883 if (bail) 4884 return NULL; 4885 4886 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); 4887 if (!e) { 4888 QLA_VHA_MARK_NOT_BUSY(vha); 4889 return NULL; 4890 } 4891 4892 INIT_LIST_HEAD(&e->list); 4893 e->type = type; 4894 e->flags = QLA_EVT_FLAG_FREE; 4895 return e; 4896 } 4897 4898 int 4899 qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 4900 { 4901 unsigned long flags; 4902 bool q = false; 4903 4904 spin_lock_irqsave(&vha->work_lock, flags); 4905 list_add_tail(&e->list, &vha->work_list); 4906 4907 if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags)) 4908 q = true; 4909 4910 spin_unlock_irqrestore(&vha->work_lock, flags); 4911 4912 if (q) 4913 queue_work(vha->hw->wq, &vha->iocb_work); 4914 4915 return QLA_SUCCESS; 4916 } 4917 4918 int 4919 qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, 4920 u32 data) 4921 { 4922 struct qla_work_evt *e; 4923 4924 e = qla2x00_alloc_work(vha, QLA_EVT_AEN); 4925 if (!e) 4926 return QLA_FUNCTION_FAILED; 4927 4928 e->u.aen.code = code; 4929 e->u.aen.data = data; 4930 return qla2x00_post_work(vha, e); 4931 } 4932 4933 int 4934 qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) 4935 { 4936 struct qla_work_evt *e; 4937 4938 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK); 4939 if (!e) 4940 return QLA_FUNCTION_FAILED; 4941 4942 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4943 return qla2x00_post_work(vha, e); 4944 } 4945 4946 #define qla2x00_post_async_work(name, type) \ 4947 int qla2x00_post_async_##name##_work( \ 4948 struct scsi_qla_host *vha, \ 4949 fc_port_t *fcport, uint16_t *data) \ 4950 { \ 4951 struct qla_work_evt *e; \ 4952 \ 4953 e = qla2x00_alloc_work(vha, type); \ 4954 if (!e) \ 4955 return QLA_FUNCTION_FAILED; \ 4956 \ 4957 e->u.logio.fcport = fcport; \ 4958 if (data) { \ 4959 e->u.logio.data[0] = data[0]; \ 4960 e->u.logio.data[1] = data[1]; \ 4961 } \ 4962 fcport->flags |= FCF_ASYNC_ACTIVE; \ 4963 return qla2x00_post_work(vha, e); \ 4964 } 4965 4966 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); 4967 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); 4968 qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); 4969 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); 4970 qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO); 4971 qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE); 4972 4973 int 4974 qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) 4975 { 4976 struct qla_work_evt *e; 4977 4978 e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT); 4979 if (!e) 4980 return QLA_FUNCTION_FAILED; 4981 4982 e->u.uevent.code = code; 4983 return qla2x00_post_work(vha, e); 4984 } 4985 4986 static void 4987 qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) 4988 { 4989 char event_string[40]; 4990 char *envp[] = { event_string, NULL }; 4991 4992 switch (code) { 4993 case QLA_UEVENT_CODE_FW_DUMP: 4994 snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld", 4995 vha->host_no); 4996 break; 4997 default: 4998 /* do nothing */ 4999 break; 5000 } 5001 kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp); 5002 } 5003 5004 int 5005 qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, 5006 uint32_t *data, int cnt) 5007 { 5008 struct qla_work_evt *e; 5009 5010 e = qla2x00_alloc_work(vha, QLA_EVT_AENFX); 5011 if (!e) 5012 return QLA_FUNCTION_FAILED; 5013 5014 e->u.aenfx.evtcode = evtcode; 5015 e->u.aenfx.count = cnt; 5016 memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt); 5017 return qla2x00_post_work(vha, e); 5018 } 5019 5020 void qla24xx_sched_upd_fcport(fc_port_t *fcport) 5021 { 5022 unsigned long flags; 5023 5024 if (IS_SW_RESV_ADDR(fcport->d_id)) 5025 return; 5026 5027 spin_lock_irqsave(&fcport->vha->work_lock, flags); 5028 if (fcport->disc_state == DSC_UPD_FCPORT) { 5029 spin_unlock_irqrestore(&fcport->vha->work_lock, flags); 5030 return; 5031 } 5032 fcport->jiffies_at_registration = jiffies; 5033 fcport->sec_since_registration = 0; 5034 fcport->next_disc_state = DSC_DELETED; 5035 fcport->disc_state = DSC_UPD_FCPORT; 5036 spin_unlock_irqrestore(&fcport->vha->work_lock, flags); 5037 5038 queue_work(system_unbound_wq, &fcport->reg_work); 5039 } 5040 5041 static 5042 void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) 5043 { 5044 unsigned long flags; 5045 fc_port_t *fcport = NULL, *tfcp; 5046 struct qlt_plogi_ack_t *pla = 5047 (struct qlt_plogi_ack_t *)e->u.new_sess.pla; 5048 uint8_t free_fcport = 0; 5049 5050 ql_dbg(ql_dbg_disc, vha, 0xffff, 5051 "%s %d %8phC enter\n", 5052 __func__, __LINE__, e->u.new_sess.port_name); 5053 5054 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5055 fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1); 5056 if (fcport) { 5057 fcport->d_id = e->u.new_sess.id; 5058 if (pla) { 5059 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 5060 memcpy(fcport->node_name, 5061 pla->iocb.u.isp24.u.plogi.node_name, 5062 WWN_SIZE); 5063 qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN); 5064 /* we took an extra ref_count to prevent PLOGI ACK when 5065 * fcport/sess has not been created. 5066 */ 5067 pla->ref_count--; 5068 } 5069 } else { 5070 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5071 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5072 if (fcport) { 5073 fcport->d_id = e->u.new_sess.id; 5074 fcport->flags |= FCF_FABRIC_DEVICE; 5075 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 5076 5077 memcpy(fcport->port_name, e->u.new_sess.port_name, 5078 WWN_SIZE); 5079 5080 fcport->fc4_type = e->u.new_sess.fc4_type; 5081 if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) { 5082 fcport->fc4_type = FS_FC4TYPE_FCP; 5083 fcport->n2n_flag = 1; 5084 if (vha->flags.nvme_enabled) 5085 fcport->fc4_type |= FS_FC4TYPE_NVME; 5086 } 5087 5088 } else { 5089 ql_dbg(ql_dbg_disc, vha, 0xffff, 5090 "%s %8phC mem alloc fail.\n", 5091 __func__, e->u.new_sess.port_name); 5092 5093 if (pla) { 5094 list_del(&pla->list); 5095 kmem_cache_free(qla_tgt_plogi_cachep, pla); 5096 } 5097 return; 5098 } 5099 5100 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5101 /* search again to make sure no one else got ahead */ 5102 tfcp = qla2x00_find_fcport_by_wwpn(vha, 5103 e->u.new_sess.port_name, 1); 5104 if (tfcp) { 5105 /* should rarily happen */ 5106 ql_dbg(ql_dbg_disc, vha, 0xffff, 5107 "%s %8phC found existing fcport b4 add. DS %d LS %d\n", 5108 __func__, tfcp->port_name, tfcp->disc_state, 5109 tfcp->fw_login_state); 5110 5111 free_fcport = 1; 5112 } else { 5113 list_add_tail(&fcport->list, &vha->vp_fcports); 5114 5115 } 5116 if (pla) { 5117 qlt_plogi_ack_link(vha, pla, fcport, 5118 QLT_PLOGI_LINK_SAME_WWN); 5119 pla->ref_count--; 5120 } 5121 } 5122 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5123 5124 if (fcport) { 5125 fcport->id_changed = 1; 5126 fcport->scan_state = QLA_FCPORT_FOUND; 5127 fcport->chip_reset = vha->hw->base_qpair->chip_reset; 5128 memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE); 5129 5130 if (pla) { 5131 if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) { 5132 u16 wd3_lo; 5133 5134 fcport->fw_login_state = DSC_LS_PRLI_PEND; 5135 fcport->local = 0; 5136 fcport->loop_id = 5137 le16_to_cpu( 5138 pla->iocb.u.isp24.nport_handle); 5139 fcport->fw_login_state = DSC_LS_PRLI_PEND; 5140 wd3_lo = 5141 le16_to_cpu( 5142 pla->iocb.u.isp24.u.prli.wd3_lo); 5143 5144 if (wd3_lo & BIT_7) 5145 fcport->conf_compl_supported = 1; 5146 5147 if ((wd3_lo & BIT_4) == 0) 5148 fcport->port_type = FCT_INITIATOR; 5149 else 5150 fcport->port_type = FCT_TARGET; 5151 } 5152 qlt_plogi_ack_unref(vha, pla); 5153 } else { 5154 fc_port_t *dfcp = NULL; 5155 5156 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5157 tfcp = qla2x00_find_fcport_by_nportid(vha, 5158 &e->u.new_sess.id, 1); 5159 if (tfcp && (tfcp != fcport)) { 5160 /* 5161 * We have a conflict fcport with same NportID. 5162 */ 5163 ql_dbg(ql_dbg_disc, vha, 0xffff, 5164 "%s %8phC found conflict b4 add. DS %d LS %d\n", 5165 __func__, tfcp->port_name, tfcp->disc_state, 5166 tfcp->fw_login_state); 5167 5168 switch (tfcp->disc_state) { 5169 case DSC_DELETED: 5170 break; 5171 case DSC_DELETE_PEND: 5172 fcport->login_pause = 1; 5173 tfcp->conflict = fcport; 5174 break; 5175 default: 5176 fcport->login_pause = 1; 5177 tfcp->conflict = fcport; 5178 dfcp = tfcp; 5179 break; 5180 } 5181 } 5182 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5183 if (dfcp) 5184 qlt_schedule_sess_for_deletion(tfcp); 5185 5186 if (N2N_TOPO(vha->hw)) { 5187 fcport->flags &= ~FCF_FABRIC_DEVICE; 5188 fcport->keep_nport_handle = 1; 5189 if (vha->flags.nvme_enabled) { 5190 fcport->fc4_type = 5191 (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP); 5192 fcport->n2n_flag = 1; 5193 } 5194 fcport->fw_login_state = 0; 5195 /* 5196 * wait link init done before sending login 5197 */ 5198 } else { 5199 qla24xx_fcport_handle_login(vha, fcport); 5200 } 5201 } 5202 } 5203 5204 if (free_fcport) { 5205 qla2x00_free_fcport(fcport); 5206 if (pla) { 5207 list_del(&pla->list); 5208 kmem_cache_free(qla_tgt_plogi_cachep, pla); 5209 } 5210 } 5211 } 5212 5213 static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e) 5214 { 5215 struct srb *sp = e->u.iosb.sp; 5216 int rval; 5217 5218 rval = qla2x00_start_sp(sp); 5219 if (rval != QLA_SUCCESS) { 5220 ql_dbg(ql_dbg_disc, vha, 0x2043, 5221 "%s: %s: Re-issue IOCB failed (%d).\n", 5222 __func__, sp->name, rval); 5223 qla24xx_sp_unmap(vha, sp); 5224 } 5225 } 5226 5227 void 5228 qla2x00_do_work(struct scsi_qla_host *vha) 5229 { 5230 struct qla_work_evt *e, *tmp; 5231 unsigned long flags; 5232 LIST_HEAD(work); 5233 int rc; 5234 5235 spin_lock_irqsave(&vha->work_lock, flags); 5236 list_splice_init(&vha->work_list, &work); 5237 spin_unlock_irqrestore(&vha->work_lock, flags); 5238 5239 list_for_each_entry_safe(e, tmp, &work, list) { 5240 rc = QLA_SUCCESS; 5241 switch (e->type) { 5242 case QLA_EVT_AEN: 5243 fc_host_post_event(vha->host, fc_get_event_number(), 5244 e->u.aen.code, e->u.aen.data); 5245 break; 5246 case QLA_EVT_IDC_ACK: 5247 qla81xx_idc_ack(vha, e->u.idc_ack.mb); 5248 break; 5249 case QLA_EVT_ASYNC_LOGIN: 5250 qla2x00_async_login(vha, e->u.logio.fcport, 5251 e->u.logio.data); 5252 break; 5253 case QLA_EVT_ASYNC_LOGOUT: 5254 rc = qla2x00_async_logout(vha, e->u.logio.fcport); 5255 break; 5256 case QLA_EVT_ASYNC_LOGOUT_DONE: 5257 qla2x00_async_logout_done(vha, e->u.logio.fcport, 5258 e->u.logio.data); 5259 break; 5260 case QLA_EVT_ASYNC_ADISC: 5261 qla2x00_async_adisc(vha, e->u.logio.fcport, 5262 e->u.logio.data); 5263 break; 5264 case QLA_EVT_UEVENT: 5265 qla2x00_uevent_emit(vha, e->u.uevent.code); 5266 break; 5267 case QLA_EVT_AENFX: 5268 qlafx00_process_aen(vha, e); 5269 break; 5270 case QLA_EVT_GPNID: 5271 qla24xx_async_gpnid(vha, &e->u.gpnid.id); 5272 break; 5273 case QLA_EVT_UNMAP: 5274 qla24xx_sp_unmap(vha, e->u.iosb.sp); 5275 break; 5276 case QLA_EVT_RELOGIN: 5277 qla2x00_relogin(vha); 5278 break; 5279 case QLA_EVT_NEW_SESS: 5280 qla24xx_create_new_sess(vha, e); 5281 break; 5282 case QLA_EVT_GPDB: 5283 qla24xx_async_gpdb(vha, e->u.fcport.fcport, 5284 e->u.fcport.opt); 5285 break; 5286 case QLA_EVT_PRLI: 5287 qla24xx_async_prli(vha, e->u.fcport.fcport); 5288 break; 5289 case QLA_EVT_GPSC: 5290 qla24xx_async_gpsc(vha, e->u.fcport.fcport); 5291 break; 5292 case QLA_EVT_GNL: 5293 qla24xx_async_gnl(vha, e->u.fcport.fcport); 5294 break; 5295 case QLA_EVT_NACK: 5296 qla24xx_do_nack_work(vha, e); 5297 break; 5298 case QLA_EVT_ASYNC_PRLO: 5299 rc = qla2x00_async_prlo(vha, e->u.logio.fcport); 5300 break; 5301 case QLA_EVT_ASYNC_PRLO_DONE: 5302 qla2x00_async_prlo_done(vha, e->u.logio.fcport, 5303 e->u.logio.data); 5304 break; 5305 case QLA_EVT_GPNFT: 5306 qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type, 5307 e->u.gpnft.sp); 5308 break; 5309 case QLA_EVT_GPNFT_DONE: 5310 qla24xx_async_gpnft_done(vha, e->u.iosb.sp); 5311 break; 5312 case QLA_EVT_GNNFT_DONE: 5313 qla24xx_async_gnnft_done(vha, e->u.iosb.sp); 5314 break; 5315 case QLA_EVT_GNNID: 5316 qla24xx_async_gnnid(vha, e->u.fcport.fcport); 5317 break; 5318 case QLA_EVT_GFPNID: 5319 qla24xx_async_gfpnid(vha, e->u.fcport.fcport); 5320 break; 5321 case QLA_EVT_SP_RETRY: 5322 qla_sp_retry(vha, e); 5323 break; 5324 case QLA_EVT_IIDMA: 5325 qla_do_iidma_work(vha, e->u.fcport.fcport); 5326 break; 5327 case QLA_EVT_ELS_PLOGI: 5328 qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI, 5329 e->u.fcport.fcport, false); 5330 break; 5331 } 5332 5333 if (rc == EAGAIN) { 5334 /* put 'work' at head of 'vha->work_list' */ 5335 spin_lock_irqsave(&vha->work_lock, flags); 5336 list_splice(&work, &vha->work_list); 5337 spin_unlock_irqrestore(&vha->work_lock, flags); 5338 break; 5339 } 5340 list_del_init(&e->list); 5341 if (e->flags & QLA_EVT_FLAG_FREE) 5342 kfree(e); 5343 5344 /* For each work completed decrement vha ref count */ 5345 QLA_VHA_MARK_NOT_BUSY(vha); 5346 } 5347 } 5348 5349 int qla24xx_post_relogin_work(struct scsi_qla_host *vha) 5350 { 5351 struct qla_work_evt *e; 5352 5353 e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN); 5354 5355 if (!e) { 5356 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 5357 return QLA_FUNCTION_FAILED; 5358 } 5359 5360 return qla2x00_post_work(vha, e); 5361 } 5362 5363 /* Relogins all the fcports of a vport 5364 * Context: dpc thread 5365 */ 5366 void qla2x00_relogin(struct scsi_qla_host *vha) 5367 { 5368 fc_port_t *fcport; 5369 int status, relogin_needed = 0; 5370 struct event_arg ea; 5371 5372 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5373 /* 5374 * If the port is not ONLINE then try to login 5375 * to it if we haven't run out of retries. 5376 */ 5377 if (atomic_read(&fcport->state) != FCS_ONLINE && 5378 fcport->login_retry) { 5379 if (fcport->scan_state != QLA_FCPORT_FOUND || 5380 fcport->disc_state == DSC_LOGIN_COMPLETE) 5381 continue; 5382 5383 if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) || 5384 fcport->disc_state == DSC_DELETE_PEND) { 5385 relogin_needed = 1; 5386 } else { 5387 if (vha->hw->current_topology != ISP_CFG_NL) { 5388 memset(&ea, 0, sizeof(ea)); 5389 ea.fcport = fcport; 5390 qla24xx_handle_relogin_event(vha, &ea); 5391 } else if (vha->hw->current_topology == 5392 ISP_CFG_NL) { 5393 fcport->login_retry--; 5394 status = 5395 qla2x00_local_device_login(vha, 5396 fcport); 5397 if (status == QLA_SUCCESS) { 5398 fcport->old_loop_id = 5399 fcport->loop_id; 5400 ql_dbg(ql_dbg_disc, vha, 0x2003, 5401 "Port login OK: logged in ID 0x%x.\n", 5402 fcport->loop_id); 5403 qla2x00_update_fcport 5404 (vha, fcport); 5405 } else if (status == 1) { 5406 set_bit(RELOGIN_NEEDED, 5407 &vha->dpc_flags); 5408 /* retry the login again */ 5409 ql_dbg(ql_dbg_disc, vha, 0x2007, 5410 "Retrying %d login again loop_id 0x%x.\n", 5411 fcport->login_retry, 5412 fcport->loop_id); 5413 } else { 5414 fcport->login_retry = 0; 5415 } 5416 5417 if (fcport->login_retry == 0 && 5418 status != QLA_SUCCESS) 5419 qla2x00_clear_loop_id(fcport); 5420 } 5421 } 5422 } 5423 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5424 break; 5425 } 5426 5427 if (relogin_needed) 5428 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 5429 5430 ql_dbg(ql_dbg_disc, vha, 0x400e, 5431 "Relogin end.\n"); 5432 } 5433 5434 /* Schedule work on any of the dpc-workqueues */ 5435 void 5436 qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code) 5437 { 5438 struct qla_hw_data *ha = base_vha->hw; 5439 5440 switch (work_code) { 5441 case MBA_IDC_AEN: /* 0x8200 */ 5442 if (ha->dpc_lp_wq) 5443 queue_work(ha->dpc_lp_wq, &ha->idc_aen); 5444 break; 5445 5446 case QLA83XX_NIC_CORE_RESET: /* 0x1 */ 5447 if (!ha->flags.nic_core_reset_hdlr_active) { 5448 if (ha->dpc_hp_wq) 5449 queue_work(ha->dpc_hp_wq, &ha->nic_core_reset); 5450 } else 5451 ql_dbg(ql_dbg_p3p, base_vha, 0xb05e, 5452 "NIC Core reset is already active. Skip " 5453 "scheduling it again.\n"); 5454 break; 5455 case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */ 5456 if (ha->dpc_hp_wq) 5457 queue_work(ha->dpc_hp_wq, &ha->idc_state_handler); 5458 break; 5459 case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */ 5460 if (ha->dpc_hp_wq) 5461 queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable); 5462 break; 5463 default: 5464 ql_log(ql_log_warn, base_vha, 0xb05f, 5465 "Unknown work-code=0x%x.\n", work_code); 5466 } 5467 5468 return; 5469 } 5470 5471 /* Work: Perform NIC Core Unrecoverable state handling */ 5472 void 5473 qla83xx_nic_core_unrecoverable_work(struct work_struct *work) 5474 { 5475 struct qla_hw_data *ha = 5476 container_of(work, struct qla_hw_data, nic_core_unrecoverable); 5477 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5478 uint32_t dev_state = 0; 5479 5480 qla83xx_idc_lock(base_vha, 0); 5481 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5482 qla83xx_reset_ownership(base_vha); 5483 if (ha->flags.nic_core_reset_owner) { 5484 ha->flags.nic_core_reset_owner = 0; 5485 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 5486 QLA8XXX_DEV_FAILED); 5487 ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n"); 5488 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 5489 } 5490 qla83xx_idc_unlock(base_vha, 0); 5491 } 5492 5493 /* Work: Execute IDC state handler */ 5494 void 5495 qla83xx_idc_state_handler_work(struct work_struct *work) 5496 { 5497 struct qla_hw_data *ha = 5498 container_of(work, struct qla_hw_data, idc_state_handler); 5499 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5500 uint32_t dev_state = 0; 5501 5502 qla83xx_idc_lock(base_vha, 0); 5503 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5504 if (dev_state == QLA8XXX_DEV_FAILED || 5505 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) 5506 qla83xx_idc_state_handler(base_vha); 5507 qla83xx_idc_unlock(base_vha, 0); 5508 } 5509 5510 static int 5511 qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha) 5512 { 5513 int rval = QLA_SUCCESS; 5514 unsigned long heart_beat_wait = jiffies + (1 * HZ); 5515 uint32_t heart_beat_counter1, heart_beat_counter2; 5516 5517 do { 5518 if (time_after(jiffies, heart_beat_wait)) { 5519 ql_dbg(ql_dbg_p3p, base_vha, 0xb07c, 5520 "Nic Core f/w is not alive.\n"); 5521 rval = QLA_FUNCTION_FAILED; 5522 break; 5523 } 5524 5525 qla83xx_idc_lock(base_vha, 0); 5526 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 5527 &heart_beat_counter1); 5528 qla83xx_idc_unlock(base_vha, 0); 5529 msleep(100); 5530 qla83xx_idc_lock(base_vha, 0); 5531 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, 5532 &heart_beat_counter2); 5533 qla83xx_idc_unlock(base_vha, 0); 5534 } while (heart_beat_counter1 == heart_beat_counter2); 5535 5536 return rval; 5537 } 5538 5539 /* Work: Perform NIC Core Reset handling */ 5540 void 5541 qla83xx_nic_core_reset_work(struct work_struct *work) 5542 { 5543 struct qla_hw_data *ha = 5544 container_of(work, struct qla_hw_data, nic_core_reset); 5545 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5546 uint32_t dev_state = 0; 5547 5548 if (IS_QLA2031(ha)) { 5549 if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS) 5550 ql_log(ql_log_warn, base_vha, 0xb081, 5551 "Failed to dump mctp\n"); 5552 return; 5553 } 5554 5555 if (!ha->flags.nic_core_reset_hdlr_active) { 5556 if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) { 5557 qla83xx_idc_lock(base_vha, 0); 5558 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, 5559 &dev_state); 5560 qla83xx_idc_unlock(base_vha, 0); 5561 if (dev_state != QLA8XXX_DEV_NEED_RESET) { 5562 ql_dbg(ql_dbg_p3p, base_vha, 0xb07a, 5563 "Nic Core f/w is alive.\n"); 5564 return; 5565 } 5566 } 5567 5568 ha->flags.nic_core_reset_hdlr_active = 1; 5569 if (qla83xx_nic_core_reset(base_vha)) { 5570 /* NIC Core reset failed. */ 5571 ql_dbg(ql_dbg_p3p, base_vha, 0xb061, 5572 "NIC Core reset failed.\n"); 5573 } 5574 ha->flags.nic_core_reset_hdlr_active = 0; 5575 } 5576 } 5577 5578 /* Work: Handle 8200 IDC aens */ 5579 void 5580 qla83xx_service_idc_aen(struct work_struct *work) 5581 { 5582 struct qla_hw_data *ha = 5583 container_of(work, struct qla_hw_data, idc_aen); 5584 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5585 uint32_t dev_state, idc_control; 5586 5587 qla83xx_idc_lock(base_vha, 0); 5588 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5589 qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control); 5590 qla83xx_idc_unlock(base_vha, 0); 5591 if (dev_state == QLA8XXX_DEV_NEED_RESET) { 5592 if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) { 5593 ql_dbg(ql_dbg_p3p, base_vha, 0xb062, 5594 "Application requested NIC Core Reset.\n"); 5595 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 5596 } else if (qla83xx_check_nic_core_fw_alive(base_vha) == 5597 QLA_SUCCESS) { 5598 ql_dbg(ql_dbg_p3p, base_vha, 0xb07b, 5599 "Other protocol driver requested NIC Core Reset.\n"); 5600 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); 5601 } 5602 } else if (dev_state == QLA8XXX_DEV_FAILED || 5603 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { 5604 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); 5605 } 5606 } 5607 5608 static void 5609 qla83xx_wait_logic(void) 5610 { 5611 int i; 5612 5613 /* Yield CPU */ 5614 if (!in_interrupt()) { 5615 /* 5616 * Wait about 200ms before retrying again. 5617 * This controls the number of retries for single 5618 * lock operation. 5619 */ 5620 msleep(100); 5621 schedule(); 5622 } else { 5623 for (i = 0; i < 20; i++) 5624 cpu_relax(); /* This a nop instr on i386 */ 5625 } 5626 } 5627 5628 static int 5629 qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha) 5630 { 5631 int rval; 5632 uint32_t data; 5633 uint32_t idc_lck_rcvry_stage_mask = 0x3; 5634 uint32_t idc_lck_rcvry_owner_mask = 0x3c; 5635 struct qla_hw_data *ha = base_vha->hw; 5636 5637 ql_dbg(ql_dbg_p3p, base_vha, 0xb086, 5638 "Trying force recovery of the IDC lock.\n"); 5639 5640 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data); 5641 if (rval) 5642 return rval; 5643 5644 if ((data & idc_lck_rcvry_stage_mask) > 0) { 5645 return QLA_SUCCESS; 5646 } else { 5647 data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2); 5648 rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 5649 data); 5650 if (rval) 5651 return rval; 5652 5653 msleep(200); 5654 5655 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 5656 &data); 5657 if (rval) 5658 return rval; 5659 5660 if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) { 5661 data &= (IDC_LOCK_RECOVERY_STAGE2 | 5662 ~(idc_lck_rcvry_stage_mask)); 5663 rval = qla83xx_wr_reg(base_vha, 5664 QLA83XX_IDC_LOCK_RECOVERY, data); 5665 if (rval) 5666 return rval; 5667 5668 /* Forcefully perform IDC UnLock */ 5669 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, 5670 &data); 5671 if (rval) 5672 return rval; 5673 /* Clear lock-id by setting 0xff */ 5674 rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 5675 0xff); 5676 if (rval) 5677 return rval; 5678 /* Clear lock-recovery by setting 0x0 */ 5679 rval = qla83xx_wr_reg(base_vha, 5680 QLA83XX_IDC_LOCK_RECOVERY, 0x0); 5681 if (rval) 5682 return rval; 5683 } else 5684 return QLA_SUCCESS; 5685 } 5686 5687 return rval; 5688 } 5689 5690 static int 5691 qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha) 5692 { 5693 int rval = QLA_SUCCESS; 5694 uint32_t o_drv_lockid, n_drv_lockid; 5695 unsigned long lock_recovery_timeout; 5696 5697 lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT; 5698 retry_lockid: 5699 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid); 5700 if (rval) 5701 goto exit; 5702 5703 /* MAX wait time before forcing IDC Lock recovery = 2 secs */ 5704 if (time_after_eq(jiffies, lock_recovery_timeout)) { 5705 if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS) 5706 return QLA_SUCCESS; 5707 else 5708 return QLA_FUNCTION_FAILED; 5709 } 5710 5711 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid); 5712 if (rval) 5713 goto exit; 5714 5715 if (o_drv_lockid == n_drv_lockid) { 5716 qla83xx_wait_logic(); 5717 goto retry_lockid; 5718 } else 5719 return QLA_SUCCESS; 5720 5721 exit: 5722 return rval; 5723 } 5724 5725 void 5726 qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) 5727 { 5728 uint32_t data; 5729 uint32_t lock_owner; 5730 struct qla_hw_data *ha = base_vha->hw; 5731 5732 /* IDC-lock implementation using driver-lock/lock-id remote registers */ 5733 retry_lock: 5734 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data) 5735 == QLA_SUCCESS) { 5736 if (data) { 5737 /* Setting lock-id to our function-number */ 5738 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 5739 ha->portnum); 5740 } else { 5741 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, 5742 &lock_owner); 5743 ql_dbg(ql_dbg_p3p, base_vha, 0xb063, 5744 "Failed to acquire IDC lock, acquired by %d, " 5745 "retrying...\n", lock_owner); 5746 5747 /* Retry/Perform IDC-Lock recovery */ 5748 if (qla83xx_idc_lock_recovery(base_vha) 5749 == QLA_SUCCESS) { 5750 qla83xx_wait_logic(); 5751 goto retry_lock; 5752 } else 5753 ql_log(ql_log_warn, base_vha, 0xb075, 5754 "IDC Lock recovery FAILED.\n"); 5755 } 5756 5757 } 5758 5759 return; 5760 } 5761 5762 void 5763 qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id) 5764 { 5765 #if 0 5766 uint16_t options = (requester_id << 15) | BIT_7; 5767 #endif 5768 uint16_t retry; 5769 uint32_t data; 5770 struct qla_hw_data *ha = base_vha->hw; 5771 5772 /* IDC-unlock implementation using driver-unlock/lock-id 5773 * remote registers 5774 */ 5775 retry = 0; 5776 retry_unlock: 5777 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data) 5778 == QLA_SUCCESS) { 5779 if (data == ha->portnum) { 5780 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data); 5781 /* Clearing lock-id by setting 0xff */ 5782 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff); 5783 } else if (retry < 10) { 5784 /* SV: XXX: IDC unlock retrying needed here? */ 5785 5786 /* Retry for IDC-unlock */ 5787 qla83xx_wait_logic(); 5788 retry++; 5789 ql_dbg(ql_dbg_p3p, base_vha, 0xb064, 5790 "Failed to release IDC lock, retrying=%d\n", retry); 5791 goto retry_unlock; 5792 } 5793 } else if (retry < 10) { 5794 /* Retry for IDC-unlock */ 5795 qla83xx_wait_logic(); 5796 retry++; 5797 ql_dbg(ql_dbg_p3p, base_vha, 0xb065, 5798 "Failed to read drv-lockid, retrying=%d\n", retry); 5799 goto retry_unlock; 5800 } 5801 5802 return; 5803 5804 #if 0 5805 /* XXX: IDC-unlock implementation using access-control mbx */ 5806 retry = 0; 5807 retry_unlock2: 5808 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { 5809 if (retry < 10) { 5810 /* Retry for IDC-unlock */ 5811 qla83xx_wait_logic(); 5812 retry++; 5813 ql_dbg(ql_dbg_p3p, base_vha, 0xb066, 5814 "Failed to release IDC lock, retrying=%d\n", retry); 5815 goto retry_unlock2; 5816 } 5817 } 5818 5819 return; 5820 #endif 5821 } 5822 5823 int 5824 __qla83xx_set_drv_presence(scsi_qla_host_t *vha) 5825 { 5826 int rval = QLA_SUCCESS; 5827 struct qla_hw_data *ha = vha->hw; 5828 uint32_t drv_presence; 5829 5830 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 5831 if (rval == QLA_SUCCESS) { 5832 drv_presence |= (1 << ha->portnum); 5833 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 5834 drv_presence); 5835 } 5836 5837 return rval; 5838 } 5839 5840 int 5841 qla83xx_set_drv_presence(scsi_qla_host_t *vha) 5842 { 5843 int rval = QLA_SUCCESS; 5844 5845 qla83xx_idc_lock(vha, 0); 5846 rval = __qla83xx_set_drv_presence(vha); 5847 qla83xx_idc_unlock(vha, 0); 5848 5849 return rval; 5850 } 5851 5852 int 5853 __qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 5854 { 5855 int rval = QLA_SUCCESS; 5856 struct qla_hw_data *ha = vha->hw; 5857 uint32_t drv_presence; 5858 5859 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 5860 if (rval == QLA_SUCCESS) { 5861 drv_presence &= ~(1 << ha->portnum); 5862 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 5863 drv_presence); 5864 } 5865 5866 return rval; 5867 } 5868 5869 int 5870 qla83xx_clear_drv_presence(scsi_qla_host_t *vha) 5871 { 5872 int rval = QLA_SUCCESS; 5873 5874 qla83xx_idc_lock(vha, 0); 5875 rval = __qla83xx_clear_drv_presence(vha); 5876 qla83xx_idc_unlock(vha, 0); 5877 5878 return rval; 5879 } 5880 5881 static void 5882 qla83xx_need_reset_handler(scsi_qla_host_t *vha) 5883 { 5884 struct qla_hw_data *ha = vha->hw; 5885 uint32_t drv_ack, drv_presence; 5886 unsigned long ack_timeout; 5887 5888 /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */ 5889 ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); 5890 while (1) { 5891 qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); 5892 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 5893 if ((drv_ack & drv_presence) == drv_presence) 5894 break; 5895 5896 if (time_after_eq(jiffies, ack_timeout)) { 5897 ql_log(ql_log_warn, vha, 0xb067, 5898 "RESET ACK TIMEOUT! drv_presence=0x%x " 5899 "drv_ack=0x%x\n", drv_presence, drv_ack); 5900 /* 5901 * The function(s) which did not ack in time are forced 5902 * to withdraw any further participation in the IDC 5903 * reset. 5904 */ 5905 if (drv_ack != drv_presence) 5906 qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, 5907 drv_ack); 5908 break; 5909 } 5910 5911 qla83xx_idc_unlock(vha, 0); 5912 msleep(1000); 5913 qla83xx_idc_lock(vha, 0); 5914 } 5915 5916 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD); 5917 ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n"); 5918 } 5919 5920 static int 5921 qla83xx_device_bootstrap(scsi_qla_host_t *vha) 5922 { 5923 int rval = QLA_SUCCESS; 5924 uint32_t idc_control; 5925 5926 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING); 5927 ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n"); 5928 5929 /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */ 5930 __qla83xx_get_idc_control(vha, &idc_control); 5931 idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET; 5932 __qla83xx_set_idc_control(vha, 0); 5933 5934 qla83xx_idc_unlock(vha, 0); 5935 rval = qla83xx_restart_nic_firmware(vha); 5936 qla83xx_idc_lock(vha, 0); 5937 5938 if (rval != QLA_SUCCESS) { 5939 ql_log(ql_log_fatal, vha, 0xb06a, 5940 "Failed to restart NIC f/w.\n"); 5941 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED); 5942 ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n"); 5943 } else { 5944 ql_dbg(ql_dbg_p3p, vha, 0xb06c, 5945 "Success in restarting nic f/w.\n"); 5946 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY); 5947 ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n"); 5948 } 5949 5950 return rval; 5951 } 5952 5953 /* Assumes idc_lock always held on entry */ 5954 int 5955 qla83xx_idc_state_handler(scsi_qla_host_t *base_vha) 5956 { 5957 struct qla_hw_data *ha = base_vha->hw; 5958 int rval = QLA_SUCCESS; 5959 unsigned long dev_init_timeout; 5960 uint32_t dev_state; 5961 5962 /* Wait for MAX-INIT-TIMEOUT for the device to go ready */ 5963 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); 5964 5965 while (1) { 5966 5967 if (time_after_eq(jiffies, dev_init_timeout)) { 5968 ql_log(ql_log_warn, base_vha, 0xb06e, 5969 "Initialization TIMEOUT!\n"); 5970 /* Init timeout. Disable further NIC Core 5971 * communication. 5972 */ 5973 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, 5974 QLA8XXX_DEV_FAILED); 5975 ql_log(ql_log_info, base_vha, 0xb06f, 5976 "HW State: FAILED.\n"); 5977 } 5978 5979 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); 5980 switch (dev_state) { 5981 case QLA8XXX_DEV_READY: 5982 if (ha->flags.nic_core_reset_owner) 5983 qla83xx_idc_audit(base_vha, 5984 IDC_AUDIT_COMPLETION); 5985 ha->flags.nic_core_reset_owner = 0; 5986 ql_dbg(ql_dbg_p3p, base_vha, 0xb070, 5987 "Reset_owner reset by 0x%x.\n", 5988 ha->portnum); 5989 goto exit; 5990 case QLA8XXX_DEV_COLD: 5991 if (ha->flags.nic_core_reset_owner) 5992 rval = qla83xx_device_bootstrap(base_vha); 5993 else { 5994 /* Wait for AEN to change device-state */ 5995 qla83xx_idc_unlock(base_vha, 0); 5996 msleep(1000); 5997 qla83xx_idc_lock(base_vha, 0); 5998 } 5999 break; 6000 case QLA8XXX_DEV_INITIALIZING: 6001 /* Wait for AEN to change device-state */ 6002 qla83xx_idc_unlock(base_vha, 0); 6003 msleep(1000); 6004 qla83xx_idc_lock(base_vha, 0); 6005 break; 6006 case QLA8XXX_DEV_NEED_RESET: 6007 if (!ql2xdontresethba && ha->flags.nic_core_reset_owner) 6008 qla83xx_need_reset_handler(base_vha); 6009 else { 6010 /* Wait for AEN to change device-state */ 6011 qla83xx_idc_unlock(base_vha, 0); 6012 msleep(1000); 6013 qla83xx_idc_lock(base_vha, 0); 6014 } 6015 /* reset timeout value after need reset handler */ 6016 dev_init_timeout = jiffies + 6017 (ha->fcoe_dev_init_timeout * HZ); 6018 break; 6019 case QLA8XXX_DEV_NEED_QUIESCENT: 6020 /* XXX: DEBUG for now */ 6021 qla83xx_idc_unlock(base_vha, 0); 6022 msleep(1000); 6023 qla83xx_idc_lock(base_vha, 0); 6024 break; 6025 case QLA8XXX_DEV_QUIESCENT: 6026 /* XXX: DEBUG for now */ 6027 if (ha->flags.quiesce_owner) 6028 goto exit; 6029 6030 qla83xx_idc_unlock(base_vha, 0); 6031 msleep(1000); 6032 qla83xx_idc_lock(base_vha, 0); 6033 dev_init_timeout = jiffies + 6034 (ha->fcoe_dev_init_timeout * HZ); 6035 break; 6036 case QLA8XXX_DEV_FAILED: 6037 if (ha->flags.nic_core_reset_owner) 6038 qla83xx_idc_audit(base_vha, 6039 IDC_AUDIT_COMPLETION); 6040 ha->flags.nic_core_reset_owner = 0; 6041 __qla83xx_clear_drv_presence(base_vha); 6042 qla83xx_idc_unlock(base_vha, 0); 6043 qla8xxx_dev_failed_handler(base_vha); 6044 rval = QLA_FUNCTION_FAILED; 6045 qla83xx_idc_lock(base_vha, 0); 6046 goto exit; 6047 case QLA8XXX_BAD_VALUE: 6048 qla83xx_idc_unlock(base_vha, 0); 6049 msleep(1000); 6050 qla83xx_idc_lock(base_vha, 0); 6051 break; 6052 default: 6053 ql_log(ql_log_warn, base_vha, 0xb071, 6054 "Unknown Device State: %x.\n", dev_state); 6055 qla83xx_idc_unlock(base_vha, 0); 6056 qla8xxx_dev_failed_handler(base_vha); 6057 rval = QLA_FUNCTION_FAILED; 6058 qla83xx_idc_lock(base_vha, 0); 6059 goto exit; 6060 } 6061 } 6062 6063 exit: 6064 return rval; 6065 } 6066 6067 void 6068 qla2x00_disable_board_on_pci_error(struct work_struct *work) 6069 { 6070 struct qla_hw_data *ha = container_of(work, struct qla_hw_data, 6071 board_disable); 6072 struct pci_dev *pdev = ha->pdev; 6073 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6074 6075 /* 6076 * if UNLOAD flag is already set, then continue unload, 6077 * where it was set first. 6078 */ 6079 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 6080 return; 6081 6082 ql_log(ql_log_warn, base_vha, 0x015b, 6083 "Disabling adapter.\n"); 6084 6085 if (!atomic_read(&pdev->enable_cnt)) { 6086 ql_log(ql_log_info, base_vha, 0xfffc, 6087 "PCI device disabled, no action req for PCI error=%lx\n", 6088 base_vha->pci_flags); 6089 return; 6090 } 6091 6092 qla2x00_wait_for_sess_deletion(base_vha); 6093 6094 set_bit(UNLOADING, &base_vha->dpc_flags); 6095 6096 qla2x00_delete_all_vps(ha, base_vha); 6097 6098 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 6099 6100 qla2x00_dfs_remove(base_vha); 6101 6102 qla84xx_put_chip(base_vha); 6103 6104 if (base_vha->timer_active) 6105 qla2x00_stop_timer(base_vha); 6106 6107 base_vha->flags.online = 0; 6108 6109 qla2x00_destroy_deferred_work(ha); 6110 6111 /* 6112 * Do not try to stop beacon blink as it will issue a mailbox 6113 * command. 6114 */ 6115 qla2x00_free_sysfs_attr(base_vha, false); 6116 6117 fc_remove_host(base_vha->host); 6118 6119 scsi_remove_host(base_vha->host); 6120 6121 base_vha->flags.init_done = 0; 6122 qla25xx_delete_queues(base_vha); 6123 qla2x00_free_fcports(base_vha); 6124 qla2x00_free_irqs(base_vha); 6125 qla2x00_mem_free(ha); 6126 qla82xx_md_free(base_vha); 6127 qla2x00_free_queues(ha); 6128 6129 qla2x00_unmap_iobases(ha); 6130 6131 pci_release_selected_regions(ha->pdev, ha->bars); 6132 pci_disable_pcie_error_reporting(pdev); 6133 pci_disable_device(pdev); 6134 6135 /* 6136 * Let qla2x00_remove_one cleanup qla_hw_data on device removal. 6137 */ 6138 } 6139 6140 /************************************************************************** 6141 * qla2x00_do_dpc 6142 * This kernel thread is a task that is schedule by the interrupt handler 6143 * to perform the background processing for interrupts. 6144 * 6145 * Notes: 6146 * This task always run in the context of a kernel thread. It 6147 * is kick-off by the driver's detect code and starts up 6148 * up one per adapter. It immediately goes to sleep and waits for 6149 * some fibre event. When either the interrupt handler or 6150 * the timer routine detects a event it will one of the task 6151 * bits then wake us up. 6152 **************************************************************************/ 6153 static int 6154 qla2x00_do_dpc(void *data) 6155 { 6156 scsi_qla_host_t *base_vha; 6157 struct qla_hw_data *ha; 6158 uint32_t online; 6159 struct qla_qpair *qpair; 6160 6161 ha = (struct qla_hw_data *)data; 6162 base_vha = pci_get_drvdata(ha->pdev); 6163 6164 set_user_nice(current, MIN_NICE); 6165 6166 set_current_state(TASK_INTERRUPTIBLE); 6167 while (!kthread_should_stop()) { 6168 ql_dbg(ql_dbg_dpc, base_vha, 0x4000, 6169 "DPC handler sleeping.\n"); 6170 6171 schedule(); 6172 6173 if (!base_vha->flags.init_done || ha->flags.mbox_busy) 6174 goto end_loop; 6175 6176 if (ha->flags.eeh_busy) { 6177 ql_dbg(ql_dbg_dpc, base_vha, 0x4003, 6178 "eeh_busy=%d.\n", ha->flags.eeh_busy); 6179 goto end_loop; 6180 } 6181 6182 ha->dpc_active = 1; 6183 6184 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001, 6185 "DPC handler waking up, dpc_flags=0x%lx.\n", 6186 base_vha->dpc_flags); 6187 6188 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 6189 break; 6190 6191 if (IS_P3P_TYPE(ha)) { 6192 if (IS_QLA8044(ha)) { 6193 if (test_and_clear_bit(ISP_UNRECOVERABLE, 6194 &base_vha->dpc_flags)) { 6195 qla8044_idc_lock(ha); 6196 qla8044_wr_direct(base_vha, 6197 QLA8044_CRB_DEV_STATE_INDEX, 6198 QLA8XXX_DEV_FAILED); 6199 qla8044_idc_unlock(ha); 6200 ql_log(ql_log_info, base_vha, 0x4004, 6201 "HW State: FAILED.\n"); 6202 qla8044_device_state_handler(base_vha); 6203 continue; 6204 } 6205 6206 } else { 6207 if (test_and_clear_bit(ISP_UNRECOVERABLE, 6208 &base_vha->dpc_flags)) { 6209 qla82xx_idc_lock(ha); 6210 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6211 QLA8XXX_DEV_FAILED); 6212 qla82xx_idc_unlock(ha); 6213 ql_log(ql_log_info, base_vha, 0x0151, 6214 "HW State: FAILED.\n"); 6215 qla82xx_device_state_handler(base_vha); 6216 continue; 6217 } 6218 } 6219 6220 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, 6221 &base_vha->dpc_flags)) { 6222 6223 ql_dbg(ql_dbg_dpc, base_vha, 0x4005, 6224 "FCoE context reset scheduled.\n"); 6225 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 6226 &base_vha->dpc_flags))) { 6227 if (qla82xx_fcoe_ctx_reset(base_vha)) { 6228 /* FCoE-ctx reset failed. 6229 * Escalate to chip-reset 6230 */ 6231 set_bit(ISP_ABORT_NEEDED, 6232 &base_vha->dpc_flags); 6233 } 6234 clear_bit(ABORT_ISP_ACTIVE, 6235 &base_vha->dpc_flags); 6236 } 6237 6238 ql_dbg(ql_dbg_dpc, base_vha, 0x4006, 6239 "FCoE context reset end.\n"); 6240 } 6241 } else if (IS_QLAFX00(ha)) { 6242 if (test_and_clear_bit(ISP_UNRECOVERABLE, 6243 &base_vha->dpc_flags)) { 6244 ql_dbg(ql_dbg_dpc, base_vha, 0x4020, 6245 "Firmware Reset Recovery\n"); 6246 if (qlafx00_reset_initialize(base_vha)) { 6247 /* Failed. Abort isp later. */ 6248 if (!test_bit(UNLOADING, 6249 &base_vha->dpc_flags)) { 6250 set_bit(ISP_UNRECOVERABLE, 6251 &base_vha->dpc_flags); 6252 ql_dbg(ql_dbg_dpc, base_vha, 6253 0x4021, 6254 "Reset Recovery Failed\n"); 6255 } 6256 } 6257 } 6258 6259 if (test_and_clear_bit(FX00_TARGET_SCAN, 6260 &base_vha->dpc_flags)) { 6261 ql_dbg(ql_dbg_dpc, base_vha, 0x4022, 6262 "ISPFx00 Target Scan scheduled\n"); 6263 if (qlafx00_rescan_isp(base_vha)) { 6264 if (!test_bit(UNLOADING, 6265 &base_vha->dpc_flags)) 6266 set_bit(ISP_UNRECOVERABLE, 6267 &base_vha->dpc_flags); 6268 ql_dbg(ql_dbg_dpc, base_vha, 0x401e, 6269 "ISPFx00 Target Scan Failed\n"); 6270 } 6271 ql_dbg(ql_dbg_dpc, base_vha, 0x401f, 6272 "ISPFx00 Target Scan End\n"); 6273 } 6274 if (test_and_clear_bit(FX00_HOST_INFO_RESEND, 6275 &base_vha->dpc_flags)) { 6276 ql_dbg(ql_dbg_dpc, base_vha, 0x4023, 6277 "ISPFx00 Host Info resend scheduled\n"); 6278 qlafx00_fx_disc(base_vha, 6279 &base_vha->hw->mr.fcport, 6280 FXDISC_REG_HOST_INFO); 6281 } 6282 } 6283 6284 if (test_and_clear_bit(DETECT_SFP_CHANGE, 6285 &base_vha->dpc_flags) && 6286 !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) { 6287 qla24xx_detect_sfp(base_vha); 6288 6289 if (ha->flags.detected_lr_sfp != 6290 ha->flags.using_lr_setting) 6291 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 6292 } 6293 6294 if (test_and_clear_bit 6295 (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 6296 !test_bit(UNLOADING, &base_vha->dpc_flags)) { 6297 bool do_reset = true; 6298 6299 switch (base_vha->qlini_mode) { 6300 case QLA2XXX_INI_MODE_ENABLED: 6301 break; 6302 case QLA2XXX_INI_MODE_DISABLED: 6303 if (!qla_tgt_mode_enabled(base_vha) && 6304 !ha->flags.fw_started) 6305 do_reset = false; 6306 break; 6307 case QLA2XXX_INI_MODE_DUAL: 6308 if (!qla_dual_mode_enabled(base_vha) && 6309 !ha->flags.fw_started) 6310 do_reset = false; 6311 break; 6312 default: 6313 break; 6314 } 6315 6316 if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE, 6317 &base_vha->dpc_flags))) { 6318 ql_dbg(ql_dbg_dpc, base_vha, 0x4007, 6319 "ISP abort scheduled.\n"); 6320 if (ha->isp_ops->abort_isp(base_vha)) { 6321 /* failed. retry later */ 6322 set_bit(ISP_ABORT_NEEDED, 6323 &base_vha->dpc_flags); 6324 } 6325 clear_bit(ABORT_ISP_ACTIVE, 6326 &base_vha->dpc_flags); 6327 ql_dbg(ql_dbg_dpc, base_vha, 0x4008, 6328 "ISP abort end.\n"); 6329 } 6330 } 6331 6332 if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, 6333 &base_vha->dpc_flags)) { 6334 qla2x00_update_fcports(base_vha); 6335 } 6336 6337 if (IS_QLAFX00(ha)) 6338 goto loop_resync_check; 6339 6340 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 6341 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 6342 "Quiescence mode scheduled.\n"); 6343 if (IS_P3P_TYPE(ha)) { 6344 if (IS_QLA82XX(ha)) 6345 qla82xx_device_state_handler(base_vha); 6346 if (IS_QLA8044(ha)) 6347 qla8044_device_state_handler(base_vha); 6348 clear_bit(ISP_QUIESCE_NEEDED, 6349 &base_vha->dpc_flags); 6350 if (!ha->flags.quiesce_owner) { 6351 qla2x00_perform_loop_resync(base_vha); 6352 if (IS_QLA82XX(ha)) { 6353 qla82xx_idc_lock(ha); 6354 qla82xx_clear_qsnt_ready( 6355 base_vha); 6356 qla82xx_idc_unlock(ha); 6357 } else if (IS_QLA8044(ha)) { 6358 qla8044_idc_lock(ha); 6359 qla8044_clear_qsnt_ready( 6360 base_vha); 6361 qla8044_idc_unlock(ha); 6362 } 6363 } 6364 } else { 6365 clear_bit(ISP_QUIESCE_NEEDED, 6366 &base_vha->dpc_flags); 6367 qla2x00_quiesce_io(base_vha); 6368 } 6369 ql_dbg(ql_dbg_dpc, base_vha, 0x400a, 6370 "Quiescence mode end.\n"); 6371 } 6372 6373 if (test_and_clear_bit(RESET_MARKER_NEEDED, 6374 &base_vha->dpc_flags) && 6375 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { 6376 6377 ql_dbg(ql_dbg_dpc, base_vha, 0x400b, 6378 "Reset marker scheduled.\n"); 6379 qla2x00_rst_aen(base_vha); 6380 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); 6381 ql_dbg(ql_dbg_dpc, base_vha, 0x400c, 6382 "Reset marker end.\n"); 6383 } 6384 6385 /* Retry each device up to login retry count */ 6386 if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) && 6387 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && 6388 atomic_read(&base_vha->loop_state) != LOOP_DOWN) { 6389 6390 if (!base_vha->relogin_jif || 6391 time_after_eq(jiffies, base_vha->relogin_jif)) { 6392 base_vha->relogin_jif = jiffies + HZ; 6393 clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags); 6394 6395 ql_dbg(ql_dbg_disc, base_vha, 0x400d, 6396 "Relogin scheduled.\n"); 6397 qla24xx_post_relogin_work(base_vha); 6398 } 6399 } 6400 loop_resync_check: 6401 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, 6402 &base_vha->dpc_flags)) { 6403 6404 ql_dbg(ql_dbg_dpc, base_vha, 0x400f, 6405 "Loop resync scheduled.\n"); 6406 6407 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 6408 &base_vha->dpc_flags))) { 6409 6410 qla2x00_loop_resync(base_vha); 6411 6412 clear_bit(LOOP_RESYNC_ACTIVE, 6413 &base_vha->dpc_flags); 6414 } 6415 6416 ql_dbg(ql_dbg_dpc, base_vha, 0x4010, 6417 "Loop resync end.\n"); 6418 } 6419 6420 if (IS_QLAFX00(ha)) 6421 goto intr_on_check; 6422 6423 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && 6424 atomic_read(&base_vha->loop_state) == LOOP_READY) { 6425 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags); 6426 qla2xxx_flash_npiv_conf(base_vha); 6427 } 6428 6429 intr_on_check: 6430 if (!ha->interrupts_on) 6431 ha->isp_ops->enable_intrs(ha); 6432 6433 if (test_and_clear_bit(BEACON_BLINK_NEEDED, 6434 &base_vha->dpc_flags)) { 6435 if (ha->beacon_blink_led == 1) 6436 ha->isp_ops->beacon_blink(base_vha); 6437 } 6438 6439 /* qpair online check */ 6440 if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED, 6441 &base_vha->dpc_flags)) { 6442 if (ha->flags.eeh_busy || 6443 ha->flags.pci_channel_io_perm_failure) 6444 online = 0; 6445 else 6446 online = 1; 6447 6448 mutex_lock(&ha->mq_lock); 6449 list_for_each_entry(qpair, &base_vha->qp_list, 6450 qp_list_elem) 6451 qpair->online = online; 6452 mutex_unlock(&ha->mq_lock); 6453 } 6454 6455 if (test_and_clear_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, 6456 &base_vha->dpc_flags)) { 6457 ql_log(ql_log_info, base_vha, 0xffffff, 6458 "nvme: SET ZIO Activity exchange threshold to %d.\n", 6459 ha->nvme_last_rptd_aen); 6460 if (qla27xx_set_zio_threshold(base_vha, 6461 ha->nvme_last_rptd_aen)) { 6462 ql_log(ql_log_info, base_vha, 0xffffff, 6463 "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n", 6464 ha->nvme_last_rptd_aen); 6465 } 6466 } 6467 6468 if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, 6469 &base_vha->dpc_flags)) { 6470 ql_log(ql_log_info, base_vha, 0xffffff, 6471 "SET ZIO Activity exchange threshold to %d.\n", 6472 ha->last_zio_threshold); 6473 qla27xx_set_zio_threshold(base_vha, 6474 ha->last_zio_threshold); 6475 } 6476 6477 if (!IS_QLAFX00(ha)) 6478 qla2x00_do_dpc_all_vps(base_vha); 6479 6480 if (test_and_clear_bit(N2N_LINK_RESET, 6481 &base_vha->dpc_flags)) { 6482 qla2x00_lip_reset(base_vha); 6483 } 6484 6485 ha->dpc_active = 0; 6486 end_loop: 6487 set_current_state(TASK_INTERRUPTIBLE); 6488 } /* End of while(1) */ 6489 __set_current_state(TASK_RUNNING); 6490 6491 ql_dbg(ql_dbg_dpc, base_vha, 0x4011, 6492 "DPC handler exiting.\n"); 6493 6494 /* 6495 * Make sure that nobody tries to wake us up again. 6496 */ 6497 ha->dpc_active = 0; 6498 6499 /* Cleanup any residual CTX SRBs. */ 6500 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 6501 6502 return 0; 6503 } 6504 6505 void 6506 qla2xxx_wake_dpc(struct scsi_qla_host *vha) 6507 { 6508 struct qla_hw_data *ha = vha->hw; 6509 struct task_struct *t = ha->dpc_thread; 6510 6511 if (!test_bit(UNLOADING, &vha->dpc_flags) && t) 6512 wake_up_process(t); 6513 } 6514 6515 /* 6516 * qla2x00_rst_aen 6517 * Processes asynchronous reset. 6518 * 6519 * Input: 6520 * ha = adapter block pointer. 6521 */ 6522 static void 6523 qla2x00_rst_aen(scsi_qla_host_t *vha) 6524 { 6525 if (vha->flags.online && !vha->flags.reset_active && 6526 !atomic_read(&vha->loop_down_timer) && 6527 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) { 6528 do { 6529 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 6530 6531 /* 6532 * Issue marker command only when we are going to start 6533 * the I/O. 6534 */ 6535 vha->marker_needed = 1; 6536 } while (!atomic_read(&vha->loop_down_timer) && 6537 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags))); 6538 } 6539 } 6540 6541 /************************************************************************** 6542 * qla2x00_timer 6543 * 6544 * Description: 6545 * One second timer 6546 * 6547 * Context: Interrupt 6548 ***************************************************************************/ 6549 void 6550 qla2x00_timer(struct timer_list *t) 6551 { 6552 scsi_qla_host_t *vha = from_timer(vha, t, timer); 6553 unsigned long cpu_flags = 0; 6554 int start_dpc = 0; 6555 int index; 6556 srb_t *sp; 6557 uint16_t w; 6558 struct qla_hw_data *ha = vha->hw; 6559 struct req_que *req; 6560 6561 if (ha->flags.eeh_busy) { 6562 ql_dbg(ql_dbg_timer, vha, 0x6000, 6563 "EEH = %d, restarting timer.\n", 6564 ha->flags.eeh_busy); 6565 qla2x00_restart_timer(vha, WATCH_INTERVAL); 6566 return; 6567 } 6568 6569 /* 6570 * Hardware read to raise pending EEH errors during mailbox waits. If 6571 * the read returns -1 then disable the board. 6572 */ 6573 if (!pci_channel_offline(ha->pdev)) { 6574 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 6575 qla2x00_check_reg16_for_disconnect(vha, w); 6576 } 6577 6578 /* Make sure qla82xx_watchdog is run only for physical port */ 6579 if (!vha->vp_idx && IS_P3P_TYPE(ha)) { 6580 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) 6581 start_dpc++; 6582 if (IS_QLA82XX(ha)) 6583 qla82xx_watchdog(vha); 6584 else if (IS_QLA8044(ha)) 6585 qla8044_watchdog(vha); 6586 } 6587 6588 if (!vha->vp_idx && IS_QLAFX00(ha)) 6589 qlafx00_timer_routine(vha); 6590 6591 /* Loop down handler. */ 6592 if (atomic_read(&vha->loop_down_timer) > 0 && 6593 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && 6594 !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags)) 6595 && vha->flags.online) { 6596 6597 if (atomic_read(&vha->loop_down_timer) == 6598 vha->loop_down_abort_time) { 6599 6600 ql_log(ql_log_info, vha, 0x6008, 6601 "Loop down - aborting the queues before time expires.\n"); 6602 6603 if (!IS_QLA2100(ha) && vha->link_down_timeout) 6604 atomic_set(&vha->loop_state, LOOP_DEAD); 6605 6606 /* 6607 * Schedule an ISP abort to return any FCP2-device 6608 * commands. 6609 */ 6610 /* NPIV - scan physical port only */ 6611 if (!vha->vp_idx) { 6612 spin_lock_irqsave(&ha->hardware_lock, 6613 cpu_flags); 6614 req = ha->req_q_map[0]; 6615 for (index = 1; 6616 index < req->num_outstanding_cmds; 6617 index++) { 6618 fc_port_t *sfcp; 6619 6620 sp = req->outstanding_cmds[index]; 6621 if (!sp) 6622 continue; 6623 if (sp->cmd_type != TYPE_SRB) 6624 continue; 6625 if (sp->type != SRB_SCSI_CMD) 6626 continue; 6627 sfcp = sp->fcport; 6628 if (!(sfcp->flags & FCF_FCP2_DEVICE)) 6629 continue; 6630 6631 if (IS_QLA82XX(ha)) 6632 set_bit(FCOE_CTX_RESET_NEEDED, 6633 &vha->dpc_flags); 6634 else 6635 set_bit(ISP_ABORT_NEEDED, 6636 &vha->dpc_flags); 6637 break; 6638 } 6639 spin_unlock_irqrestore(&ha->hardware_lock, 6640 cpu_flags); 6641 } 6642 start_dpc++; 6643 } 6644 6645 /* if the loop has been down for 4 minutes, reinit adapter */ 6646 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { 6647 if (!(vha->device_flags & DFLG_NO_CABLE)) { 6648 ql_log(ql_log_warn, vha, 0x6009, 6649 "Loop down - aborting ISP.\n"); 6650 6651 if (IS_QLA82XX(ha)) 6652 set_bit(FCOE_CTX_RESET_NEEDED, 6653 &vha->dpc_flags); 6654 else 6655 set_bit(ISP_ABORT_NEEDED, 6656 &vha->dpc_flags); 6657 } 6658 } 6659 ql_dbg(ql_dbg_timer, vha, 0x600a, 6660 "Loop down - seconds remaining %d.\n", 6661 atomic_read(&vha->loop_down_timer)); 6662 } 6663 /* Check if beacon LED needs to be blinked for physical host only */ 6664 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { 6665 /* There is no beacon_blink function for ISP82xx */ 6666 if (!IS_P3P_TYPE(ha)) { 6667 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); 6668 start_dpc++; 6669 } 6670 } 6671 6672 /* Process any deferred work. */ 6673 if (!list_empty(&vha->work_list)) { 6674 unsigned long flags; 6675 bool q = false; 6676 6677 spin_lock_irqsave(&vha->work_lock, flags); 6678 if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags)) 6679 q = true; 6680 spin_unlock_irqrestore(&vha->work_lock, flags); 6681 if (q) 6682 queue_work(vha->hw->wq, &vha->iocb_work); 6683 } 6684 6685 /* 6686 * FC-NVME 6687 * see if the active AEN count has changed from what was last reported. 6688 */ 6689 if (!vha->vp_idx && 6690 (atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen) && 6691 ha->zio_mode == QLA_ZIO_MODE_6 && 6692 !ha->flags.host_shutting_down) { 6693 ql_log(ql_log_info, vha, 0x3002, 6694 "nvme: Sched: Set ZIO exchange threshold to %d.\n", 6695 ha->nvme_last_rptd_aen); 6696 ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt); 6697 set_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); 6698 start_dpc++; 6699 } 6700 6701 if (!vha->vp_idx && 6702 (atomic_read(&ha->zio_threshold) != ha->last_zio_threshold) && 6703 (ha->zio_mode == QLA_ZIO_MODE_6) && 6704 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) { 6705 ql_log(ql_log_info, vha, 0x3002, 6706 "Sched: Set ZIO exchange threshold to %d.\n", 6707 ha->last_zio_threshold); 6708 ha->last_zio_threshold = atomic_read(&ha->zio_threshold); 6709 set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); 6710 start_dpc++; 6711 } 6712 6713 /* Schedule the DPC routine if needed */ 6714 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 6715 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || 6716 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) || 6717 start_dpc || 6718 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) || 6719 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) || 6720 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || 6721 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || 6722 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || 6723 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) { 6724 ql_dbg(ql_dbg_timer, vha, 0x600b, 6725 "isp_abort_needed=%d loop_resync_needed=%d " 6726 "fcport_update_needed=%d start_dpc=%d " 6727 "reset_marker_needed=%d", 6728 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags), 6729 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags), 6730 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags), 6731 start_dpc, 6732 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); 6733 ql_dbg(ql_dbg_timer, vha, 0x600c, 6734 "beacon_blink_needed=%d isp_unrecoverable=%d " 6735 "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " 6736 "relogin_needed=%d.\n", 6737 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), 6738 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), 6739 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), 6740 test_bit(VP_DPC_NEEDED, &vha->dpc_flags), 6741 test_bit(RELOGIN_NEEDED, &vha->dpc_flags)); 6742 qla2xxx_wake_dpc(vha); 6743 } 6744 6745 qla2x00_restart_timer(vha, WATCH_INTERVAL); 6746 } 6747 6748 /* Firmware interface routines. */ 6749 6750 #define FW_ISP21XX 0 6751 #define FW_ISP22XX 1 6752 #define FW_ISP2300 2 6753 #define FW_ISP2322 3 6754 #define FW_ISP24XX 4 6755 #define FW_ISP25XX 5 6756 #define FW_ISP81XX 6 6757 #define FW_ISP82XX 7 6758 #define FW_ISP2031 8 6759 #define FW_ISP8031 9 6760 #define FW_ISP27XX 10 6761 #define FW_ISP28XX 11 6762 6763 #define FW_FILE_ISP21XX "ql2100_fw.bin" 6764 #define FW_FILE_ISP22XX "ql2200_fw.bin" 6765 #define FW_FILE_ISP2300 "ql2300_fw.bin" 6766 #define FW_FILE_ISP2322 "ql2322_fw.bin" 6767 #define FW_FILE_ISP24XX "ql2400_fw.bin" 6768 #define FW_FILE_ISP25XX "ql2500_fw.bin" 6769 #define FW_FILE_ISP81XX "ql8100_fw.bin" 6770 #define FW_FILE_ISP82XX "ql8200_fw.bin" 6771 #define FW_FILE_ISP2031 "ql2600_fw.bin" 6772 #define FW_FILE_ISP8031 "ql8300_fw.bin" 6773 #define FW_FILE_ISP27XX "ql2700_fw.bin" 6774 #define FW_FILE_ISP28XX "ql2800_fw.bin" 6775 6776 6777 static DEFINE_MUTEX(qla_fw_lock); 6778 6779 static struct fw_blob qla_fw_blobs[] = { 6780 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, 6781 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, 6782 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, 6783 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 6784 { .name = FW_FILE_ISP24XX, }, 6785 { .name = FW_FILE_ISP25XX, }, 6786 { .name = FW_FILE_ISP81XX, }, 6787 { .name = FW_FILE_ISP82XX, }, 6788 { .name = FW_FILE_ISP2031, }, 6789 { .name = FW_FILE_ISP8031, }, 6790 { .name = FW_FILE_ISP27XX, }, 6791 { .name = FW_FILE_ISP28XX, }, 6792 { .name = NULL, }, 6793 }; 6794 6795 struct fw_blob * 6796 qla2x00_request_firmware(scsi_qla_host_t *vha) 6797 { 6798 struct qla_hw_data *ha = vha->hw; 6799 struct fw_blob *blob; 6800 6801 if (IS_QLA2100(ha)) { 6802 blob = &qla_fw_blobs[FW_ISP21XX]; 6803 } else if (IS_QLA2200(ha)) { 6804 blob = &qla_fw_blobs[FW_ISP22XX]; 6805 } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { 6806 blob = &qla_fw_blobs[FW_ISP2300]; 6807 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 6808 blob = &qla_fw_blobs[FW_ISP2322]; 6809 } else if (IS_QLA24XX_TYPE(ha)) { 6810 blob = &qla_fw_blobs[FW_ISP24XX]; 6811 } else if (IS_QLA25XX(ha)) { 6812 blob = &qla_fw_blobs[FW_ISP25XX]; 6813 } else if (IS_QLA81XX(ha)) { 6814 blob = &qla_fw_blobs[FW_ISP81XX]; 6815 } else if (IS_QLA82XX(ha)) { 6816 blob = &qla_fw_blobs[FW_ISP82XX]; 6817 } else if (IS_QLA2031(ha)) { 6818 blob = &qla_fw_blobs[FW_ISP2031]; 6819 } else if (IS_QLA8031(ha)) { 6820 blob = &qla_fw_blobs[FW_ISP8031]; 6821 } else if (IS_QLA27XX(ha)) { 6822 blob = &qla_fw_blobs[FW_ISP27XX]; 6823 } else if (IS_QLA28XX(ha)) { 6824 blob = &qla_fw_blobs[FW_ISP28XX]; 6825 } else { 6826 return NULL; 6827 } 6828 6829 if (!blob->name) 6830 return NULL; 6831 6832 mutex_lock(&qla_fw_lock); 6833 if (blob->fw) 6834 goto out; 6835 6836 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 6837 ql_log(ql_log_warn, vha, 0x0063, 6838 "Failed to load firmware image (%s).\n", blob->name); 6839 blob->fw = NULL; 6840 blob = NULL; 6841 } 6842 6843 out: 6844 mutex_unlock(&qla_fw_lock); 6845 return blob; 6846 } 6847 6848 static void 6849 qla2x00_release_firmware(void) 6850 { 6851 struct fw_blob *blob; 6852 6853 mutex_lock(&qla_fw_lock); 6854 for (blob = qla_fw_blobs; blob->name; blob++) 6855 release_firmware(blob->fw); 6856 mutex_unlock(&qla_fw_lock); 6857 } 6858 6859 static void qla_pci_error_cleanup(scsi_qla_host_t *vha) 6860 { 6861 struct qla_hw_data *ha = vha->hw; 6862 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6863 struct qla_qpair *qpair = NULL; 6864 struct scsi_qla_host *vp; 6865 fc_port_t *fcport; 6866 int i; 6867 unsigned long flags; 6868 6869 ha->chip_reset++; 6870 6871 ha->base_qpair->chip_reset = ha->chip_reset; 6872 for (i = 0; i < ha->max_qpairs; i++) { 6873 if (ha->queue_pair_map[i]) 6874 ha->queue_pair_map[i]->chip_reset = 6875 ha->base_qpair->chip_reset; 6876 } 6877 6878 /* purge MBox commands */ 6879 if (atomic_read(&ha->num_pend_mbx_stage3)) { 6880 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 6881 complete(&ha->mbx_intr_comp); 6882 } 6883 6884 i = 0; 6885 6886 while (atomic_read(&ha->num_pend_mbx_stage3) || 6887 atomic_read(&ha->num_pend_mbx_stage2) || 6888 atomic_read(&ha->num_pend_mbx_stage1)) { 6889 msleep(20); 6890 i++; 6891 if (i > 50) 6892 break; 6893 } 6894 6895 ha->flags.purge_mbox = 0; 6896 6897 mutex_lock(&ha->mq_lock); 6898 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 6899 qpair->online = 0; 6900 mutex_unlock(&ha->mq_lock); 6901 6902 qla2x00_mark_all_devices_lost(vha, 0); 6903 6904 spin_lock_irqsave(&ha->vport_slock, flags); 6905 list_for_each_entry(vp, &ha->vp_list, list) { 6906 atomic_inc(&vp->vref_count); 6907 spin_unlock_irqrestore(&ha->vport_slock, flags); 6908 qla2x00_mark_all_devices_lost(vp, 0); 6909 spin_lock_irqsave(&ha->vport_slock, flags); 6910 atomic_dec(&vp->vref_count); 6911 } 6912 spin_unlock_irqrestore(&ha->vport_slock, flags); 6913 6914 /* Clear all async request states across all VPs. */ 6915 list_for_each_entry(fcport, &vha->vp_fcports, list) 6916 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 6917 6918 spin_lock_irqsave(&ha->vport_slock, flags); 6919 list_for_each_entry(vp, &ha->vp_list, list) { 6920 atomic_inc(&vp->vref_count); 6921 spin_unlock_irqrestore(&ha->vport_slock, flags); 6922 list_for_each_entry(fcport, &vp->vp_fcports, list) 6923 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 6924 spin_lock_irqsave(&ha->vport_slock, flags); 6925 atomic_dec(&vp->vref_count); 6926 } 6927 spin_unlock_irqrestore(&ha->vport_slock, flags); 6928 } 6929 6930 6931 static pci_ers_result_t 6932 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 6933 { 6934 scsi_qla_host_t *vha = pci_get_drvdata(pdev); 6935 struct qla_hw_data *ha = vha->hw; 6936 6937 ql_dbg(ql_dbg_aer, vha, 0x9000, 6938 "PCI error detected, state %x.\n", state); 6939 6940 if (!atomic_read(&pdev->enable_cnt)) { 6941 ql_log(ql_log_info, vha, 0xffff, 6942 "PCI device is disabled,state %x\n", state); 6943 return PCI_ERS_RESULT_NEED_RESET; 6944 } 6945 6946 switch (state) { 6947 case pci_channel_io_normal: 6948 ha->flags.eeh_busy = 0; 6949 if (ql2xmqsupport || ql2xnvmeenable) { 6950 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 6951 qla2xxx_wake_dpc(vha); 6952 } 6953 return PCI_ERS_RESULT_CAN_RECOVER; 6954 case pci_channel_io_frozen: 6955 ha->flags.eeh_busy = 1; 6956 qla_pci_error_cleanup(vha); 6957 return PCI_ERS_RESULT_NEED_RESET; 6958 case pci_channel_io_perm_failure: 6959 ha->flags.pci_channel_io_perm_failure = 1; 6960 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 6961 if (ql2xmqsupport || ql2xnvmeenable) { 6962 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 6963 qla2xxx_wake_dpc(vha); 6964 } 6965 return PCI_ERS_RESULT_DISCONNECT; 6966 } 6967 return PCI_ERS_RESULT_NEED_RESET; 6968 } 6969 6970 static pci_ers_result_t 6971 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) 6972 { 6973 int risc_paused = 0; 6974 uint32_t stat; 6975 unsigned long flags; 6976 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 6977 struct qla_hw_data *ha = base_vha->hw; 6978 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 6979 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 6980 6981 if (IS_QLA82XX(ha)) 6982 return PCI_ERS_RESULT_RECOVERED; 6983 6984 spin_lock_irqsave(&ha->hardware_lock, flags); 6985 if (IS_QLA2100(ha) || IS_QLA2200(ha)){ 6986 stat = RD_REG_DWORD(®->hccr); 6987 if (stat & HCCR_RISC_PAUSE) 6988 risc_paused = 1; 6989 } else if (IS_QLA23XX(ha)) { 6990 stat = RD_REG_DWORD(®->u.isp2300.host_status); 6991 if (stat & HSR_RISC_PAUSED) 6992 risc_paused = 1; 6993 } else if (IS_FWI2_CAPABLE(ha)) { 6994 stat = RD_REG_DWORD(®24->host_status); 6995 if (stat & HSRX_RISC_PAUSED) 6996 risc_paused = 1; 6997 } 6998 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6999 7000 if (risc_paused) { 7001 ql_log(ql_log_info, base_vha, 0x9003, 7002 "RISC paused -- mmio_enabled, Dumping firmware.\n"); 7003 ha->isp_ops->fw_dump(base_vha, 0); 7004 7005 return PCI_ERS_RESULT_NEED_RESET; 7006 } else 7007 return PCI_ERS_RESULT_RECOVERED; 7008 } 7009 7010 static pci_ers_result_t 7011 qla2xxx_pci_slot_reset(struct pci_dev *pdev) 7012 { 7013 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 7014 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7015 struct qla_hw_data *ha = base_vha->hw; 7016 int rc; 7017 struct qla_qpair *qpair = NULL; 7018 7019 ql_dbg(ql_dbg_aer, base_vha, 0x9004, 7020 "Slot Reset.\n"); 7021 7022 /* Workaround: qla2xxx driver which access hardware earlier 7023 * needs error state to be pci_channel_io_online. 7024 * Otherwise mailbox command timesout. 7025 */ 7026 pdev->error_state = pci_channel_io_normal; 7027 7028 pci_restore_state(pdev); 7029 7030 /* pci_restore_state() clears the saved_state flag of the device 7031 * save restored state which resets saved_state flag 7032 */ 7033 pci_save_state(pdev); 7034 7035 if (ha->mem_only) 7036 rc = pci_enable_device_mem(pdev); 7037 else 7038 rc = pci_enable_device(pdev); 7039 7040 if (rc) { 7041 ql_log(ql_log_warn, base_vha, 0x9005, 7042 "Can't re-enable PCI device after reset.\n"); 7043 goto exit_slot_reset; 7044 } 7045 7046 7047 if (ha->isp_ops->pci_config(base_vha)) 7048 goto exit_slot_reset; 7049 7050 mutex_lock(&ha->mq_lock); 7051 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 7052 qpair->online = 1; 7053 mutex_unlock(&ha->mq_lock); 7054 7055 base_vha->flags.online = 1; 7056 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7057 if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS) 7058 ret = PCI_ERS_RESULT_RECOVERED; 7059 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7060 7061 7062 exit_slot_reset: 7063 ql_dbg(ql_dbg_aer, base_vha, 0x900e, 7064 "slot_reset return %x.\n", ret); 7065 7066 return ret; 7067 } 7068 7069 static void 7070 qla2xxx_pci_resume(struct pci_dev *pdev) 7071 { 7072 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7073 struct qla_hw_data *ha = base_vha->hw; 7074 int ret; 7075 7076 ql_dbg(ql_dbg_aer, base_vha, 0x900f, 7077 "pci_resume.\n"); 7078 7079 ha->flags.eeh_busy = 0; 7080 7081 ret = qla2x00_wait_for_hba_online(base_vha); 7082 if (ret != QLA_SUCCESS) { 7083 ql_log(ql_log_fatal, base_vha, 0x9002, 7084 "The device failed to resume I/O from slot/link_reset.\n"); 7085 } 7086 } 7087 7088 static void 7089 qla_pci_reset_prepare(struct pci_dev *pdev) 7090 { 7091 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7092 struct qla_hw_data *ha = base_vha->hw; 7093 struct qla_qpair *qpair; 7094 7095 ql_log(ql_log_warn, base_vha, 0xffff, 7096 "%s.\n", __func__); 7097 7098 /* 7099 * PCI FLR/function reset is about to reset the 7100 * slot. Stop the chip to stop all DMA access. 7101 * It is assumed that pci_reset_done will be called 7102 * after FLR to resume Chip operation. 7103 */ 7104 ha->flags.eeh_busy = 1; 7105 mutex_lock(&ha->mq_lock); 7106 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 7107 qpair->online = 0; 7108 mutex_unlock(&ha->mq_lock); 7109 7110 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7111 qla2x00_abort_isp_cleanup(base_vha); 7112 qla2x00_abort_all_cmds(base_vha, DID_RESET << 16); 7113 } 7114 7115 static void 7116 qla_pci_reset_done(struct pci_dev *pdev) 7117 { 7118 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 7119 struct qla_hw_data *ha = base_vha->hw; 7120 struct qla_qpair *qpair; 7121 7122 ql_log(ql_log_warn, base_vha, 0xffff, 7123 "%s.\n", __func__); 7124 7125 /* 7126 * FLR just completed by PCI layer. Resume adapter 7127 */ 7128 ha->flags.eeh_busy = 0; 7129 mutex_lock(&ha->mq_lock); 7130 list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) 7131 qpair->online = 1; 7132 mutex_unlock(&ha->mq_lock); 7133 7134 base_vha->flags.online = 1; 7135 ha->isp_ops->abort_isp(base_vha); 7136 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 7137 } 7138 7139 static int qla2xxx_map_queues(struct Scsi_Host *shost) 7140 { 7141 int rc; 7142 scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; 7143 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 7144 7145 if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase) 7146 rc = blk_mq_map_queues(qmap); 7147 else 7148 rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); 7149 return rc; 7150 } 7151 7152 struct scsi_host_template qla2xxx_driver_template = { 7153 .module = THIS_MODULE, 7154 .name = QLA2XXX_DRIVER_NAME, 7155 .queuecommand = qla2xxx_queuecommand, 7156 7157 .eh_timed_out = fc_eh_timed_out, 7158 .eh_abort_handler = qla2xxx_eh_abort, 7159 .eh_device_reset_handler = qla2xxx_eh_device_reset, 7160 .eh_target_reset_handler = qla2xxx_eh_target_reset, 7161 .eh_bus_reset_handler = qla2xxx_eh_bus_reset, 7162 .eh_host_reset_handler = qla2xxx_eh_host_reset, 7163 7164 .slave_configure = qla2xxx_slave_configure, 7165 7166 .slave_alloc = qla2xxx_slave_alloc, 7167 .slave_destroy = qla2xxx_slave_destroy, 7168 .scan_finished = qla2xxx_scan_finished, 7169 .scan_start = qla2xxx_scan_start, 7170 .change_queue_depth = scsi_change_queue_depth, 7171 .map_queues = qla2xxx_map_queues, 7172 .this_id = -1, 7173 .cmd_per_lun = 3, 7174 .sg_tablesize = SG_ALL, 7175 7176 .max_sectors = 0xFFFF, 7177 .shost_attrs = qla2x00_host_attrs, 7178 7179 .supported_mode = MODE_INITIATOR, 7180 .track_queue_depth = 1, 7181 .cmd_size = sizeof(srb_t), 7182 }; 7183 7184 static const struct pci_error_handlers qla2xxx_err_handler = { 7185 .error_detected = qla2xxx_pci_error_detected, 7186 .mmio_enabled = qla2xxx_pci_mmio_enabled, 7187 .slot_reset = qla2xxx_pci_slot_reset, 7188 .resume = qla2xxx_pci_resume, 7189 .reset_prepare = qla_pci_reset_prepare, 7190 .reset_done = qla_pci_reset_done, 7191 }; 7192 7193 static struct pci_device_id qla2xxx_pci_tbl[] = { 7194 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) }, 7195 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) }, 7196 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) }, 7197 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) }, 7198 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) }, 7199 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) }, 7200 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, 7201 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, 7202 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, 7203 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) }, 7204 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 7205 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 7206 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, 7207 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) }, 7208 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, 7209 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, 7210 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, 7211 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, 7212 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, 7213 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, 7214 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, 7215 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) }, 7216 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) }, 7217 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) }, 7218 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) }, 7219 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) }, 7220 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) }, 7221 { 0 }, 7222 }; 7223 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 7224 7225 static struct pci_driver qla2xxx_pci_driver = { 7226 .name = QLA2XXX_DRIVER_NAME, 7227 .driver = { 7228 .owner = THIS_MODULE, 7229 }, 7230 .id_table = qla2xxx_pci_tbl, 7231 .probe = qla2x00_probe_one, 7232 .remove = qla2x00_remove_one, 7233 .shutdown = qla2x00_shutdown, 7234 .err_handler = &qla2xxx_err_handler, 7235 }; 7236 7237 static const struct file_operations apidev_fops = { 7238 .owner = THIS_MODULE, 7239 .llseek = noop_llseek, 7240 }; 7241 7242 /** 7243 * qla2x00_module_init - Module initialization. 7244 **/ 7245 static int __init 7246 qla2x00_module_init(void) 7247 { 7248 int ret = 0; 7249 7250 BUILD_BUG_ON(sizeof(cmd_entry_t) != 64); 7251 BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64); 7252 BUILD_BUG_ON(sizeof(cont_entry_t) != 64); 7253 BUILD_BUG_ON(sizeof(init_cb_t) != 96); 7254 BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64); 7255 BUILD_BUG_ON(sizeof(request_t) != 64); 7256 BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64); 7257 BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64); 7258 BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64); 7259 BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64); 7260 BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64); 7261 BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64); 7262 BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64); 7263 BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64); 7264 BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); 7265 BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64); 7266 BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64); 7267 BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128); 7268 BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128); 7269 BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64); 7270 BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064); 7271 BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64); 7272 BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56); 7273 7274 /* Allocate cache for SRBs. */ 7275 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 7276 SLAB_HWCACHE_ALIGN, NULL); 7277 if (srb_cachep == NULL) { 7278 ql_log(ql_log_fatal, NULL, 0x0001, 7279 "Unable to allocate SRB cache...Failing load!.\n"); 7280 return -ENOMEM; 7281 } 7282 7283 /* Initialize target kmem_cache and mem_pools */ 7284 ret = qlt_init(); 7285 if (ret < 0) { 7286 goto destroy_cache; 7287 } else if (ret > 0) { 7288 /* 7289 * If initiator mode is explictly disabled by qlt_init(), 7290 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from 7291 * performing scsi_scan_target() during LOOP UP event. 7292 */ 7293 qla2xxx_transport_functions.disable_target_scan = 1; 7294 qla2xxx_transport_vport_functions.disable_target_scan = 1; 7295 } 7296 7297 /* Derive version string. */ 7298 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 7299 if (ql2xextended_error_logging) 7300 strcat(qla2x00_version_str, "-debug"); 7301 if (ql2xextended_error_logging == 1) 7302 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; 7303 7304 if (ql2x_ini_mode == QLA2XXX_INI_MODE_DUAL) 7305 qla_insert_tgt_attrs(); 7306 7307 qla2xxx_transport_template = 7308 fc_attach_transport(&qla2xxx_transport_functions); 7309 if (!qla2xxx_transport_template) { 7310 ql_log(ql_log_fatal, NULL, 0x0002, 7311 "fc_attach_transport failed...Failing load!.\n"); 7312 ret = -ENODEV; 7313 goto qlt_exit; 7314 } 7315 7316 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); 7317 if (apidev_major < 0) { 7318 ql_log(ql_log_fatal, NULL, 0x0003, 7319 "Unable to register char device %s.\n", QLA2XXX_APIDEV); 7320 } 7321 7322 qla2xxx_transport_vport_template = 7323 fc_attach_transport(&qla2xxx_transport_vport_functions); 7324 if (!qla2xxx_transport_vport_template) { 7325 ql_log(ql_log_fatal, NULL, 0x0004, 7326 "fc_attach_transport vport failed...Failing load!.\n"); 7327 ret = -ENODEV; 7328 goto unreg_chrdev; 7329 } 7330 ql_log(ql_log_info, NULL, 0x0005, 7331 "QLogic Fibre Channel HBA Driver: %s.\n", 7332 qla2x00_version_str); 7333 ret = pci_register_driver(&qla2xxx_pci_driver); 7334 if (ret) { 7335 ql_log(ql_log_fatal, NULL, 0x0006, 7336 "pci_register_driver failed...ret=%d Failing load!.\n", 7337 ret); 7338 goto release_vport_transport; 7339 } 7340 return ret; 7341 7342 release_vport_transport: 7343 fc_release_transport(qla2xxx_transport_vport_template); 7344 7345 unreg_chrdev: 7346 if (apidev_major >= 0) 7347 unregister_chrdev(apidev_major, QLA2XXX_APIDEV); 7348 fc_release_transport(qla2xxx_transport_template); 7349 7350 qlt_exit: 7351 qlt_exit(); 7352 7353 destroy_cache: 7354 kmem_cache_destroy(srb_cachep); 7355 return ret; 7356 } 7357 7358 /** 7359 * qla2x00_module_exit - Module cleanup. 7360 **/ 7361 static void __exit 7362 qla2x00_module_exit(void) 7363 { 7364 pci_unregister_driver(&qla2xxx_pci_driver); 7365 qla2x00_release_firmware(); 7366 kmem_cache_destroy(ctx_cachep); 7367 fc_release_transport(qla2xxx_transport_vport_template); 7368 if (apidev_major >= 0) 7369 unregister_chrdev(apidev_major, QLA2XXX_APIDEV); 7370 fc_release_transport(qla2xxx_transport_template); 7371 qlt_exit(); 7372 kmem_cache_destroy(srb_cachep); 7373 } 7374 7375 module_init(qla2x00_module_init); 7376 module_exit(qla2x00_module_exit); 7377 7378 MODULE_AUTHOR("QLogic Corporation"); 7379 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); 7380 MODULE_LICENSE("GPL"); 7381 MODULE_VERSION(QLA2XXX_VERSION); 7382 MODULE_FIRMWARE(FW_FILE_ISP21XX); 7383 MODULE_FIRMWARE(FW_FILE_ISP22XX); 7384 MODULE_FIRMWARE(FW_FILE_ISP2300); 7385 MODULE_FIRMWARE(FW_FILE_ISP2322); 7386 MODULE_FIRMWARE(FW_FILE_ISP24XX); 7387 MODULE_FIRMWARE(FW_FILE_ISP25XX); 7388