xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_os.c (revision 1f0d40d8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2014 QLogic Corporation
5  */
6 #include "qla_def.h"
7 
8 #include <linux/moduleparam.h>
9 #include <linux/vmalloc.h>
10 #include <linux/delay.h>
11 #include <linux/kthread.h>
12 #include <linux/mutex.h>
13 #include <linux/kobject.h>
14 #include <linux/slab.h>
15 #include <linux/blk-mq-pci.h>
16 #include <linux/refcount.h>
17 #include <linux/crash_dump.h>
18 #include <linux/trace_events.h>
19 #include <linux/trace.h>
20 
21 #include <scsi/scsi_tcq.h>
22 #include <scsi/scsicam.h>
23 #include <scsi/scsi_transport.h>
24 #include <scsi/scsi_transport_fc.h>
25 
26 #include "qla_target.h"
27 
28 /*
29  * Driver version
30  */
31 char qla2x00_version_str[40];
32 
33 static int apidev_major;
34 
35 /*
36  * SRB allocation cache
37  */
38 struct kmem_cache *srb_cachep;
39 
40 static struct trace_array *qla_trc_array;
41 
42 int ql2xfulldump_on_mpifail;
43 module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
44 MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
45 		 "Set this to take full dump on MPI hang.");
46 
47 int ql2xenforce_iocb_limit = 1;
48 module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
49 MODULE_PARM_DESC(ql2xenforce_iocb_limit,
50 		 "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
51 
52 /*
53  * CT6 CTX allocation cache
54  */
55 static struct kmem_cache *ctx_cachep;
56 /*
57  * error level for logging
58  */
59 uint ql_errlev = 0x8001;
60 
61 int ql2xsecenable;
62 module_param(ql2xsecenable, int, S_IRUGO);
63 MODULE_PARM_DESC(ql2xsecenable,
64 	"Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled.");
65 
66 static int ql2xenableclass2;
67 module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
68 MODULE_PARM_DESC(ql2xenableclass2,
69 		"Specify if Class 2 operations are supported from the very "
70 		"beginning. Default is 0 - class 2 not supported.");
71 
72 
73 int ql2xlogintimeout = 20;
74 module_param(ql2xlogintimeout, int, S_IRUGO);
75 MODULE_PARM_DESC(ql2xlogintimeout,
76 		"Login timeout value in seconds.");
77 
78 int qlport_down_retry;
79 module_param(qlport_down_retry, int, S_IRUGO);
80 MODULE_PARM_DESC(qlport_down_retry,
81 		"Maximum number of command retries to a port that returns "
82 		"a PORT-DOWN status.");
83 
84 int ql2xplogiabsentdevice;
85 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
86 MODULE_PARM_DESC(ql2xplogiabsentdevice,
87 		"Option to enable PLOGI to devices that are not present after "
88 		"a Fabric scan.  This is needed for several broken switches. "
89 		"Default is 0 - no PLOGI. 1 - perform PLOGI.");
90 
91 int ql2xloginretrycount;
92 module_param(ql2xloginretrycount, int, S_IRUGO);
93 MODULE_PARM_DESC(ql2xloginretrycount,
94 		"Specify an alternate value for the NVRAM login retry count.");
95 
96 int ql2xallocfwdump = 1;
97 module_param(ql2xallocfwdump, int, S_IRUGO);
98 MODULE_PARM_DESC(ql2xallocfwdump,
99 		"Option to enable allocation of memory for a firmware dump "
100 		"during HBA initialization.  Memory allocation requirements "
101 		"vary by ISP type.  Default is 1 - allocate memory.");
102 
103 int ql2xextended_error_logging;
104 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
105 module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
106 MODULE_PARM_DESC(ql2xextended_error_logging,
107 		"Option to enable extended error logging,\n"
108 		"\t\tDefault is 0 - no logging.  0x40000000 - Module Init & Probe.\n"
109 		"\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
110 		"\t\t0x08000000 - IO tracing.    0x04000000 - DPC Thread.\n"
111 		"\t\t0x02000000 - Async events.  0x01000000 - Timer routines.\n"
112 		"\t\t0x00800000 - User space.    0x00400000 - Task Management.\n"
113 		"\t\t0x00200000 - AER/EEH.       0x00100000 - Multi Q.\n"
114 		"\t\t0x00080000 - P3P Specific.  0x00040000 - Virtual Port.\n"
115 		"\t\t0x00020000 - Buffer Dump.   0x00010000 - Misc.\n"
116 		"\t\t0x00008000 - Verbose.       0x00004000 - Target.\n"
117 		"\t\t0x00002000 - Target Mgmt.   0x00001000 - Target TMF.\n"
118 		"\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
119 		"\t\t0x1e400000 - Preferred value for capturing essential "
120 		"debug information (equivalent to old "
121 		"ql2xextended_error_logging=1).\n"
122 		"\t\tDo LOGICAL OR of the value to enable more than one level");
123 
124 int ql2xextended_error_logging_ktrace = 1;
125 module_param(ql2xextended_error_logging_ktrace, int, S_IRUGO|S_IWUSR);
126 MODULE_PARM_DESC(ql2xextended_error_logging_ktrace,
127 		"Same BIT definition as ql2xextended_error_logging, but used to control logging to kernel trace buffer (default=1).\n");
128 
129 int ql2xshiftctondsd = 6;
130 module_param(ql2xshiftctondsd, int, S_IRUGO);
131 MODULE_PARM_DESC(ql2xshiftctondsd,
132 		"Set to control shifting of command type processing "
133 		"based on total number of SG elements.");
134 
135 int ql2xfdmienable = 1;
136 module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
137 module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
138 MODULE_PARM_DESC(ql2xfdmienable,
139 		"Enables FDMI registrations. "
140 		"0 - no FDMI registrations. "
141 		"1 - provide FDMI registrations (default).");
142 
143 #define MAX_Q_DEPTH	64
144 static int ql2xmaxqdepth = MAX_Q_DEPTH;
145 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
146 MODULE_PARM_DESC(ql2xmaxqdepth,
147 		"Maximum queue depth to set for each LUN. "
148 		"Default is 64.");
149 
150 int ql2xenabledif = 2;
151 module_param(ql2xenabledif, int, S_IRUGO);
152 MODULE_PARM_DESC(ql2xenabledif,
153 		" Enable T10-CRC-DIF:\n"
154 		" Default is 2.\n"
155 		"  0 -- No DIF Support\n"
156 		"  1 -- Enable DIF for all types\n"
157 		"  2 -- Enable DIF for all types, except Type 0.\n");
158 
159 #if (IS_ENABLED(CONFIG_NVME_FC))
160 int ql2xnvmeenable = 1;
161 #else
162 int ql2xnvmeenable;
163 #endif
164 module_param(ql2xnvmeenable, int, 0644);
165 MODULE_PARM_DESC(ql2xnvmeenable,
166     "Enables NVME support. "
167     "0 - no NVMe.  Default is Y");
168 
169 int ql2xenablehba_err_chk = 2;
170 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
171 MODULE_PARM_DESC(ql2xenablehba_err_chk,
172 		" Enable T10-CRC-DIF Error isolation by HBA:\n"
173 		" Default is 2.\n"
174 		"  0 -- Error isolation disabled\n"
175 		"  1 -- Error isolation enabled only for DIX Type 0\n"
176 		"  2 -- Error isolation enabled for all Types\n");
177 
178 int ql2xiidmaenable = 1;
179 module_param(ql2xiidmaenable, int, S_IRUGO);
180 MODULE_PARM_DESC(ql2xiidmaenable,
181 		"Enables iIDMA settings "
182 		"Default is 1 - perform iIDMA. 0 - no iIDMA.");
183 
184 int ql2xmqsupport = 1;
185 module_param(ql2xmqsupport, int, S_IRUGO);
186 MODULE_PARM_DESC(ql2xmqsupport,
187 		"Enable on demand multiple queue pairs support "
188 		"Default is 1 for supported. "
189 		"Set it to 0 to turn off mq qpair support.");
190 
191 int ql2xfwloadbin;
192 module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
193 module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
194 MODULE_PARM_DESC(ql2xfwloadbin,
195 		"Option to specify location from which to load ISP firmware:.\n"
196 		" 2 -- load firmware via the request_firmware() (hotplug).\n"
197 		"      interface.\n"
198 		" 1 -- load firmware from flash.\n"
199 		" 0 -- use default semantics.\n");
200 
201 int ql2xetsenable;
202 module_param(ql2xetsenable, int, S_IRUGO);
203 MODULE_PARM_DESC(ql2xetsenable,
204 		"Enables firmware ETS burst."
205 		"Default is 0 - skip ETS enablement.");
206 
207 int ql2xdbwr = 1;
208 module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
209 MODULE_PARM_DESC(ql2xdbwr,
210 		"Option to specify scheme for request queue posting.\n"
211 		" 0 -- Regular doorbell.\n"
212 		" 1 -- CAMRAM doorbell (faster).\n");
213 
214 int ql2xgffidenable;
215 module_param(ql2xgffidenable, int, S_IRUGO);
216 MODULE_PARM_DESC(ql2xgffidenable,
217 		"Enables GFF_ID checks of port type. "
218 		"Default is 0 - Do not use GFF_ID information.");
219 
220 int ql2xasynctmfenable = 1;
221 module_param(ql2xasynctmfenable, int, S_IRUGO);
222 MODULE_PARM_DESC(ql2xasynctmfenable,
223 		"Enables issue of TM IOCBs asynchronously via IOCB mechanism"
224 		"Default is 1 - Issue TM IOCBs via mailbox mechanism.");
225 
226 int ql2xdontresethba;
227 module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
228 MODULE_PARM_DESC(ql2xdontresethba,
229 		"Option to specify reset behaviour.\n"
230 		" 0 (Default) -- Reset on failure.\n"
231 		" 1 -- Do not reset on failure.\n");
232 
233 uint64_t ql2xmaxlun = MAX_LUNS;
234 module_param(ql2xmaxlun, ullong, S_IRUGO);
235 MODULE_PARM_DESC(ql2xmaxlun,
236 		"Defines the maximum LU number to register with the SCSI "
237 		"midlayer. Default is 65535.");
238 
239 int ql2xmdcapmask = 0x1F;
240 module_param(ql2xmdcapmask, int, S_IRUGO);
241 MODULE_PARM_DESC(ql2xmdcapmask,
242 		"Set the Minidump driver capture mask level. "
243 		"Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
244 
245 int ql2xmdenable = 1;
246 module_param(ql2xmdenable, int, S_IRUGO);
247 MODULE_PARM_DESC(ql2xmdenable,
248 		"Enable/disable MiniDump. "
249 		"0 - MiniDump disabled. "
250 		"1 (Default) - MiniDump enabled.");
251 
252 int ql2xexlogins;
253 module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
254 MODULE_PARM_DESC(ql2xexlogins,
255 		 "Number of extended Logins. "
256 		 "0 (Default)- Disabled.");
257 
258 int ql2xexchoffld = 1024;
259 module_param(ql2xexchoffld, uint, 0644);
260 MODULE_PARM_DESC(ql2xexchoffld,
261 	"Number of target exchanges.");
262 
263 int ql2xiniexchg = 1024;
264 module_param(ql2xiniexchg, uint, 0644);
265 MODULE_PARM_DESC(ql2xiniexchg,
266 	"Number of initiator exchanges.");
267 
268 int ql2xfwholdabts;
269 module_param(ql2xfwholdabts, int, S_IRUGO);
270 MODULE_PARM_DESC(ql2xfwholdabts,
271 		"Allow FW to hold status IOCB until ABTS rsp received. "
272 		"0 (Default) Do not set fw option. "
273 		"1 - Set fw option to hold ABTS.");
274 
275 int ql2xmvasynctoatio = 1;
276 module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR);
277 MODULE_PARM_DESC(ql2xmvasynctoatio,
278 		"Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ"
279 		"0 (Default). Do not move IOCBs"
280 		"1 - Move IOCBs.");
281 
282 int ql2xautodetectsfp = 1;
283 module_param(ql2xautodetectsfp, int, 0444);
284 MODULE_PARM_DESC(ql2xautodetectsfp,
285 		 "Detect SFP range and set appropriate distance.\n"
286 		 "1 (Default): Enable\n");
287 
288 int ql2xenablemsix = 1;
289 module_param(ql2xenablemsix, int, 0444);
290 MODULE_PARM_DESC(ql2xenablemsix,
291 		 "Set to enable MSI or MSI-X interrupt mechanism.\n"
292 		 " Default is 1, enable MSI-X interrupt mechanism.\n"
293 		 " 0 -- enable traditional pin-based mechanism.\n"
294 		 " 1 -- enable MSI-X interrupt mechanism.\n"
295 		 " 2 -- enable MSI interrupt mechanism.\n");
296 
297 int qla2xuseresexchforels;
298 module_param(qla2xuseresexchforels, int, 0444);
299 MODULE_PARM_DESC(qla2xuseresexchforels,
300 		 "Reserve 1/2 of emergency exchanges for ELS.\n"
301 		 " 0 (default): disabled");
302 
303 static int ql2xprotmask;
304 module_param(ql2xprotmask, int, 0644);
305 MODULE_PARM_DESC(ql2xprotmask,
306 		 "Override DIF/DIX protection capabilities mask\n"
307 		 "Default is 0 which sets protection mask based on "
308 		 "capabilities reported by HBA firmware.\n");
309 
310 static int ql2xprotguard;
311 module_param(ql2xprotguard, int, 0644);
312 MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
313 		 "  0 -- Let HBA firmware decide\n"
314 		 "  1 -- Force T10 CRC\n"
315 		 "  2 -- Force IP checksum\n");
316 
317 int ql2xdifbundlinginternalbuffers;
318 module_param(ql2xdifbundlinginternalbuffers, int, 0644);
319 MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
320     "Force using internal buffers for DIF information\n"
321     "0 (Default). Based on check.\n"
322     "1 Force using internal buffers\n");
323 
324 int ql2xsmartsan;
325 module_param(ql2xsmartsan, int, 0444);
326 module_param_named(smartsan, ql2xsmartsan, int, 0444);
327 MODULE_PARM_DESC(ql2xsmartsan,
328 		"Send SmartSAN Management Attributes for FDMI Registration."
329 		" Default is 0 - No SmartSAN registration,"
330 		" 1 - Register SmartSAN Management Attributes.");
331 
332 int ql2xrdpenable;
333 module_param(ql2xrdpenable, int, 0444);
334 module_param_named(rdpenable, ql2xrdpenable, int, 0444);
335 MODULE_PARM_DESC(ql2xrdpenable,
336 		"Enables RDP responses. "
337 		"0 - no RDP responses (default). "
338 		"1 - provide RDP responses.");
339 int ql2xabts_wait_nvme = 1;
340 module_param(ql2xabts_wait_nvme, int, 0444);
341 MODULE_PARM_DESC(ql2xabts_wait_nvme,
342 		 "To wait for ABTS response on I/O timeouts for NVMe. (default: 1)");
343 
344 
345 static u32 ql2xdelay_before_pci_error_handling = 5;
346 module_param(ql2xdelay_before_pci_error_handling, uint, 0644);
347 MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling,
348 	"Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n");
349 
350 static void qla2x00_clear_drv_active(struct qla_hw_data *);
351 static void qla2x00_free_device(scsi_qla_host_t *);
352 static void qla2xxx_map_queues(struct Scsi_Host *shost);
353 static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
354 
355 u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
356 module_param(ql2xnvme_queues, uint, S_IRUGO);
357 MODULE_PARM_DESC(ql2xnvme_queues,
358 	"Number of NVMe Queues that can be configured.\n"
359 	"Final value will be min(ql2xnvme_queues, num_cpus,num_chip_queues)\n"
360 	"1 - Minimum number of queues supported\n"
361 	"8 - Default value");
362 
363 int ql2xfc2target = 1;
364 module_param(ql2xfc2target, int, 0444);
365 MODULE_PARM_DESC(qla2xfc2target,
366 		  "Enables FC2 Target support. "
367 		  "0 - FC2 Target support is disabled. "
368 		  "1 - FC2 Target support is enabled (default).");
369 
370 static struct scsi_transport_template *qla2xxx_transport_template = NULL;
371 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
372 
373 /* TODO Convert to inlines
374  *
375  * Timer routines
376  */
377 
378 __inline__ void
379 qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval)
380 {
381 	timer_setup(&vha->timer, qla2x00_timer, 0);
382 	vha->timer.expires = jiffies + interval * HZ;
383 	add_timer(&vha->timer);
384 	vha->timer_active = 1;
385 }
386 
387 static inline void
388 qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
389 {
390 	/* Currently used for 82XX only. */
391 	if (vha->device_flags & DFLG_DEV_FAILED) {
392 		ql_dbg(ql_dbg_timer, vha, 0x600d,
393 		    "Device in a failed state, returning.\n");
394 		return;
395 	}
396 
397 	mod_timer(&vha->timer, jiffies + interval * HZ);
398 }
399 
400 static __inline__ void
401 qla2x00_stop_timer(scsi_qla_host_t *vha)
402 {
403 	del_timer_sync(&vha->timer);
404 	vha->timer_active = 0;
405 }
406 
407 static int qla2x00_do_dpc(void *data);
408 
409 static void qla2x00_rst_aen(scsi_qla_host_t *);
410 
411 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
412 	struct req_que **, struct rsp_que **);
413 static void qla2x00_free_fw_dump(struct qla_hw_data *);
414 static void qla2x00_mem_free(struct qla_hw_data *);
415 int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
416 	struct qla_qpair *qpair);
417 
418 /* -------------------------------------------------------------------------- */
419 static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
420     struct rsp_que *rsp)
421 {
422 	struct qla_hw_data *ha = vha->hw;
423 
424 	rsp->qpair = ha->base_qpair;
425 	rsp->req = req;
426 	ha->base_qpair->hw = ha;
427 	ha->base_qpair->req = req;
428 	ha->base_qpair->rsp = rsp;
429 	ha->base_qpair->vha = vha;
430 	ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
431 	ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
432 	ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
433 	ha->base_qpair->srb_mempool = ha->srb_mempool;
434 	INIT_LIST_HEAD(&ha->base_qpair->hints_list);
435 	ha->base_qpair->enable_class_2 = ql2xenableclass2;
436 	/* init qpair to this cpu. Will adjust at run time. */
437 	qla_cpu_update(rsp->qpair, raw_smp_processor_id());
438 	ha->base_qpair->pdev = ha->pdev;
439 
440 	if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
441 		ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
442 }
443 
444 static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
445 				struct rsp_que *rsp)
446 {
447 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
448 
449 	ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *),
450 				GFP_KERNEL);
451 	if (!ha->req_q_map) {
452 		ql_log(ql_log_fatal, vha, 0x003b,
453 		    "Unable to allocate memory for request queue ptrs.\n");
454 		goto fail_req_map;
455 	}
456 
457 	ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *),
458 				GFP_KERNEL);
459 	if (!ha->rsp_q_map) {
460 		ql_log(ql_log_fatal, vha, 0x003c,
461 		    "Unable to allocate memory for response queue ptrs.\n");
462 		goto fail_rsp_map;
463 	}
464 
465 	ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
466 	if (ha->base_qpair == NULL) {
467 		ql_log(ql_log_warn, vha, 0x00e0,
468 		    "Failed to allocate base queue pair memory.\n");
469 		goto fail_base_qpair;
470 	}
471 
472 	qla_init_base_qpair(vha, req, rsp);
473 
474 	if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) {
475 		ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
476 			GFP_KERNEL);
477 		if (!ha->queue_pair_map) {
478 			ql_log(ql_log_fatal, vha, 0x0180,
479 			    "Unable to allocate memory for queue pair ptrs.\n");
480 			goto fail_qpair_map;
481 		}
482 		if (qla_mapq_alloc_qp_cpu_map(ha) != 0) {
483 			kfree(ha->queue_pair_map);
484 			ha->queue_pair_map = NULL;
485 			goto fail_qpair_map;
486 		}
487 	}
488 
489 	/*
490 	 * Make sure we record at least the request and response queue zero in
491 	 * case we need to free them if part of the probe fails.
492 	 */
493 	ha->rsp_q_map[0] = rsp;
494 	ha->req_q_map[0] = req;
495 	set_bit(0, ha->rsp_qid_map);
496 	set_bit(0, ha->req_qid_map);
497 	return 0;
498 
499 fail_qpair_map:
500 	kfree(ha->base_qpair);
501 	ha->base_qpair = NULL;
502 fail_base_qpair:
503 	kfree(ha->rsp_q_map);
504 	ha->rsp_q_map = NULL;
505 fail_rsp_map:
506 	kfree(ha->req_q_map);
507 	ha->req_q_map = NULL;
508 fail_req_map:
509 	return -ENOMEM;
510 }
511 
512 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
513 {
514 	if (IS_QLAFX00(ha)) {
515 		if (req && req->ring_fx00)
516 			dma_free_coherent(&ha->pdev->dev,
517 			    (req->length_fx00 + 1) * sizeof(request_t),
518 			    req->ring_fx00, req->dma_fx00);
519 	} else if (req && req->ring)
520 		dma_free_coherent(&ha->pdev->dev,
521 		(req->length + 1) * sizeof(request_t),
522 		req->ring, req->dma);
523 
524 	if (req)
525 		kfree(req->outstanding_cmds);
526 
527 	kfree(req);
528 }
529 
530 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
531 {
532 	if (IS_QLAFX00(ha)) {
533 		if (rsp && rsp->ring_fx00)
534 			dma_free_coherent(&ha->pdev->dev,
535 			    (rsp->length_fx00 + 1) * sizeof(request_t),
536 			    rsp->ring_fx00, rsp->dma_fx00);
537 	} else if (rsp && rsp->ring) {
538 		dma_free_coherent(&ha->pdev->dev,
539 		(rsp->length + 1) * sizeof(response_t),
540 		rsp->ring, rsp->dma);
541 	}
542 	kfree(rsp);
543 }
544 
545 static void qla2x00_free_queues(struct qla_hw_data *ha)
546 {
547 	struct req_que *req;
548 	struct rsp_que *rsp;
549 	int cnt;
550 	unsigned long flags;
551 
552 	if (ha->queue_pair_map) {
553 		kfree(ha->queue_pair_map);
554 		ha->queue_pair_map = NULL;
555 	}
556 	if (ha->base_qpair) {
557 		kfree(ha->base_qpair);
558 		ha->base_qpair = NULL;
559 	}
560 
561 	qla_mapq_free_qp_cpu_map(ha);
562 	spin_lock_irqsave(&ha->hardware_lock, flags);
563 	for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
564 		if (!test_bit(cnt, ha->req_qid_map))
565 			continue;
566 
567 		req = ha->req_q_map[cnt];
568 		clear_bit(cnt, ha->req_qid_map);
569 		ha->req_q_map[cnt] = NULL;
570 
571 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
572 		qla2x00_free_req_que(ha, req);
573 		spin_lock_irqsave(&ha->hardware_lock, flags);
574 	}
575 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
576 
577 	kfree(ha->req_q_map);
578 	ha->req_q_map = NULL;
579 
580 
581 	spin_lock_irqsave(&ha->hardware_lock, flags);
582 	for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
583 		if (!test_bit(cnt, ha->rsp_qid_map))
584 			continue;
585 
586 		rsp = ha->rsp_q_map[cnt];
587 		clear_bit(cnt, ha->rsp_qid_map);
588 		ha->rsp_q_map[cnt] =  NULL;
589 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
590 		qla2x00_free_rsp_que(ha, rsp);
591 		spin_lock_irqsave(&ha->hardware_lock, flags);
592 	}
593 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
594 
595 	kfree(ha->rsp_q_map);
596 	ha->rsp_q_map = NULL;
597 }
598 
599 static char *
600 qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
601 {
602 	struct qla_hw_data *ha = vha->hw;
603 	static const char *const pci_bus_modes[] = {
604 		"33", "66", "100", "133",
605 	};
606 	uint16_t pci_bus;
607 
608 	pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
609 	if (pci_bus) {
610 		snprintf(str, str_len, "PCI-X (%s MHz)",
611 			 pci_bus_modes[pci_bus]);
612 	} else {
613 		pci_bus = (ha->pci_attr & BIT_8) >> 8;
614 		snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]);
615 	}
616 
617 	return str;
618 }
619 
620 static char *
621 qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
622 {
623 	static const char *const pci_bus_modes[] = {
624 		"33", "66", "100", "133",
625 	};
626 	struct qla_hw_data *ha = vha->hw;
627 	uint32_t pci_bus;
628 
629 	if (pci_is_pcie(ha->pdev)) {
630 		uint32_t lstat, lspeed, lwidth;
631 		const char *speed_str;
632 
633 		pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
634 		lspeed = lstat & PCI_EXP_LNKCAP_SLS;
635 		lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
636 
637 		switch (lspeed) {
638 		case 1:
639 			speed_str = "2.5GT/s";
640 			break;
641 		case 2:
642 			speed_str = "5.0GT/s";
643 			break;
644 		case 3:
645 			speed_str = "8.0GT/s";
646 			break;
647 		case 4:
648 			speed_str = "16.0GT/s";
649 			break;
650 		default:
651 			speed_str = "<unknown>";
652 			break;
653 		}
654 		snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth);
655 
656 		return str;
657 	}
658 
659 	pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
660 	if (pci_bus == 0 || pci_bus == 8)
661 		snprintf(str, str_len, "PCI (%s MHz)",
662 			 pci_bus_modes[pci_bus >> 3]);
663 	else
664 		snprintf(str, str_len, "PCI-X Mode %d (%s MHz)",
665 			 pci_bus & 4 ? 2 : 1,
666 			 pci_bus_modes[pci_bus & 3]);
667 
668 	return str;
669 }
670 
671 static char *
672 qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
673 {
674 	char un_str[10];
675 	struct qla_hw_data *ha = vha->hw;
676 
677 	snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version,
678 	    ha->fw_minor_version, ha->fw_subminor_version);
679 
680 	if (ha->fw_attributes & BIT_9) {
681 		strcat(str, "FLX");
682 		return (str);
683 	}
684 
685 	switch (ha->fw_attributes & 0xFF) {
686 	case 0x7:
687 		strcat(str, "EF");
688 		break;
689 	case 0x17:
690 		strcat(str, "TP");
691 		break;
692 	case 0x37:
693 		strcat(str, "IP");
694 		break;
695 	case 0x77:
696 		strcat(str, "VI");
697 		break;
698 	default:
699 		sprintf(un_str, "(%x)", ha->fw_attributes);
700 		strcat(str, un_str);
701 		break;
702 	}
703 	if (ha->fw_attributes & 0x100)
704 		strcat(str, "X");
705 
706 	return (str);
707 }
708 
709 static char *
710 qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
711 {
712 	struct qla_hw_data *ha = vha->hw;
713 
714 	snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version,
715 	    ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
716 	return str;
717 }
718 
719 void qla2x00_sp_free_dma(srb_t *sp)
720 {
721 	struct qla_hw_data *ha = sp->vha->hw;
722 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
723 
724 	if (sp->flags & SRB_DMA_VALID) {
725 		scsi_dma_unmap(cmd);
726 		sp->flags &= ~SRB_DMA_VALID;
727 	}
728 
729 	if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
730 		dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
731 		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
732 		sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
733 	}
734 
735 	if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
736 		/* List assured to be having elements */
737 		qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
738 		sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
739 	}
740 
741 	if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
742 		struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
743 
744 		dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
745 		sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
746 	}
747 
748 	if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
749 		struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx;
750 
751 		dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
752 		    ctx1->fcp_cmnd_dma);
753 		list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
754 		ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
755 		ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
756 	}
757 
758 	if (sp->flags & SRB_GOT_BUF)
759 		qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc);
760 }
761 
762 void qla2x00_sp_compl(srb_t *sp, int res)
763 {
764 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
765 	struct completion *comp = sp->comp;
766 
767 	/* kref: INIT */
768 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
769 	cmd->result = res;
770 	sp->type = 0;
771 	scsi_done(cmd);
772 	if (comp)
773 		complete(comp);
774 }
775 
776 void qla2xxx_qpair_sp_free_dma(srb_t *sp)
777 {
778 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
779 	struct qla_hw_data *ha = sp->fcport->vha->hw;
780 
781 	if (sp->flags & SRB_DMA_VALID) {
782 		scsi_dma_unmap(cmd);
783 		sp->flags &= ~SRB_DMA_VALID;
784 	}
785 
786 	if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
787 		dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
788 		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
789 		sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
790 	}
791 
792 	if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
793 		/* List assured to be having elements */
794 		qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
795 		sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
796 	}
797 
798 	if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
799 		struct crc_context *difctx = sp->u.scmd.crc_ctx;
800 		struct dsd_dma *dif_dsd, *nxt_dsd;
801 
802 		list_for_each_entry_safe(dif_dsd, nxt_dsd,
803 		    &difctx->ldif_dma_hndl_list, list) {
804 			list_del(&dif_dsd->list);
805 			dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr,
806 			    dif_dsd->dsd_list_dma);
807 			kfree(dif_dsd);
808 			difctx->no_dif_bundl--;
809 		}
810 
811 		list_for_each_entry_safe(dif_dsd, nxt_dsd,
812 		    &difctx->ldif_dsd_list, list) {
813 			list_del(&dif_dsd->list);
814 			dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr,
815 			    dif_dsd->dsd_list_dma);
816 			kfree(dif_dsd);
817 			difctx->no_ldif_dsd--;
818 		}
819 
820 		if (difctx->no_ldif_dsd) {
821 			ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
822 			    "%s: difctx->no_ldif_dsd=%x\n",
823 			    __func__, difctx->no_ldif_dsd);
824 		}
825 
826 		if (difctx->no_dif_bundl) {
827 			ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
828 			    "%s: difctx->no_dif_bundl=%x\n",
829 			    __func__, difctx->no_dif_bundl);
830 		}
831 		sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID;
832 	}
833 
834 	if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
835 		struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx;
836 
837 		dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
838 		    ctx1->fcp_cmnd_dma);
839 		list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
840 		ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
841 		ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
842 		sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
843 	}
844 
845 	if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
846 		struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
847 
848 		dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
849 		sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
850 	}
851 
852 	if (sp->flags & SRB_GOT_BUF)
853 		qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc);
854 }
855 
856 void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
857 {
858 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
859 	struct completion *comp = sp->comp;
860 
861 	/* ref: INIT */
862 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
863 	cmd->result = res;
864 	sp->type = 0;
865 	scsi_done(cmd);
866 	if (comp)
867 		complete(comp);
868 }
869 
870 static int
871 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
872 {
873 	scsi_qla_host_t *vha = shost_priv(host);
874 	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
875 	struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
876 	struct qla_hw_data *ha = vha->hw;
877 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
878 	srb_t *sp;
879 	int rval;
880 
881 	if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) ||
882 	    WARN_ON_ONCE(!rport)) {
883 		cmd->result = DID_NO_CONNECT << 16;
884 		goto qc24_fail_command;
885 	}
886 
887 	if (ha->mqenable) {
888 		uint32_t tag;
889 		uint16_t hwq;
890 		struct qla_qpair *qpair = NULL;
891 
892 		tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
893 		hwq = blk_mq_unique_tag_to_hwq(tag);
894 		qpair = ha->queue_pair_map[hwq];
895 
896 		if (qpair)
897 			return qla2xxx_mqueuecommand(host, cmd, qpair);
898 	}
899 
900 	if (ha->flags.eeh_busy) {
901 		if (ha->flags.pci_channel_io_perm_failure) {
902 			ql_dbg(ql_dbg_aer, vha, 0x9010,
903 			    "PCI Channel IO permanent failure, exiting "
904 			    "cmd=%p.\n", cmd);
905 			cmd->result = DID_NO_CONNECT << 16;
906 		} else {
907 			ql_dbg(ql_dbg_aer, vha, 0x9011,
908 			    "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
909 			cmd->result = DID_REQUEUE << 16;
910 		}
911 		goto qc24_fail_command;
912 	}
913 
914 	rval = fc_remote_port_chkready(rport);
915 	if (rval) {
916 		cmd->result = rval;
917 		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
918 		    "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
919 		    cmd, rval);
920 		goto qc24_fail_command;
921 	}
922 
923 	if (!vha->flags.difdix_supported &&
924 		scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
925 			ql_dbg(ql_dbg_io, vha, 0x3004,
926 			    "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
927 			    cmd);
928 			cmd->result = DID_NO_CONNECT << 16;
929 			goto qc24_fail_command;
930 	}
931 
932 	if (!fcport || fcport->deleted) {
933 		cmd->result = DID_IMM_RETRY << 16;
934 		goto qc24_fail_command;
935 	}
936 
937 	if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
938 		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
939 			atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
940 			ql_dbg(ql_dbg_io, vha, 0x3005,
941 			    "Returning DNC, fcport_state=%d loop_state=%d.\n",
942 			    atomic_read(&fcport->state),
943 			    atomic_read(&base_vha->loop_state));
944 			cmd->result = DID_NO_CONNECT << 16;
945 			goto qc24_fail_command;
946 		}
947 		goto qc24_target_busy;
948 	}
949 
950 	/*
951 	 * Return target busy if we've received a non-zero retry_delay_timer
952 	 * in a FCP_RSP.
953 	 */
954 	if (fcport->retry_delay_timestamp == 0) {
955 		/* retry delay not set */
956 	} else if (time_after(jiffies, fcport->retry_delay_timestamp))
957 		fcport->retry_delay_timestamp = 0;
958 	else
959 		goto qc24_target_busy;
960 
961 	sp = scsi_cmd_priv(cmd);
962 	/* ref: INIT */
963 	qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
964 
965 	sp->u.scmd.cmd = cmd;
966 	sp->type = SRB_SCSI_CMD;
967 	sp->free = qla2x00_sp_free_dma;
968 	sp->done = qla2x00_sp_compl;
969 
970 	rval = ha->isp_ops->start_scsi(sp);
971 	if (rval != QLA_SUCCESS) {
972 		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
973 		    "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
974 		goto qc24_host_busy_free_sp;
975 	}
976 
977 	return 0;
978 
979 qc24_host_busy_free_sp:
980 	/* ref: INIT */
981 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
982 
983 qc24_target_busy:
984 	return SCSI_MLQUEUE_TARGET_BUSY;
985 
986 qc24_fail_command:
987 	scsi_done(cmd);
988 
989 	return 0;
990 }
991 
992 /* For MQ supported I/O */
993 int
994 qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
995     struct qla_qpair *qpair)
996 {
997 	scsi_qla_host_t *vha = shost_priv(host);
998 	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
999 	struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
1000 	struct qla_hw_data *ha = vha->hw;
1001 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1002 	srb_t *sp;
1003 	int rval;
1004 
1005 	rval = rport ? fc_remote_port_chkready(rport) : (DID_NO_CONNECT << 16);
1006 	if (rval) {
1007 		cmd->result = rval;
1008 		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
1009 		    "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
1010 		    cmd, rval);
1011 		goto qc24_fail_command;
1012 	}
1013 
1014 	if (!qpair->online) {
1015 		ql_dbg(ql_dbg_io, vha, 0x3077,
1016 		       "qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy);
1017 		cmd->result = DID_NO_CONNECT << 16;
1018 		goto qc24_fail_command;
1019 	}
1020 
1021 	if (!fcport || fcport->deleted) {
1022 		cmd->result = DID_IMM_RETRY << 16;
1023 		goto qc24_fail_command;
1024 	}
1025 
1026 	if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
1027 		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
1028 			atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1029 			ql_dbg(ql_dbg_io, vha, 0x3077,
1030 			    "Returning DNC, fcport_state=%d loop_state=%d.\n",
1031 			    atomic_read(&fcport->state),
1032 			    atomic_read(&base_vha->loop_state));
1033 			cmd->result = DID_NO_CONNECT << 16;
1034 			goto qc24_fail_command;
1035 		}
1036 		goto qc24_target_busy;
1037 	}
1038 
1039 	/*
1040 	 * Return target busy if we've received a non-zero retry_delay_timer
1041 	 * in a FCP_RSP.
1042 	 */
1043 	if (fcport->retry_delay_timestamp == 0) {
1044 		/* retry delay not set */
1045 	} else if (time_after(jiffies, fcport->retry_delay_timestamp))
1046 		fcport->retry_delay_timestamp = 0;
1047 	else
1048 		goto qc24_target_busy;
1049 
1050 	sp = scsi_cmd_priv(cmd);
1051 	/* ref: INIT */
1052 	qla2xxx_init_sp(sp, vha, qpair, fcport);
1053 
1054 	sp->u.scmd.cmd = cmd;
1055 	sp->type = SRB_SCSI_CMD;
1056 	sp->free = qla2xxx_qpair_sp_free_dma;
1057 	sp->done = qla2xxx_qpair_sp_compl;
1058 
1059 	rval = ha->isp_ops->start_scsi_mq(sp);
1060 	if (rval != QLA_SUCCESS) {
1061 		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
1062 		    "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
1063 		goto qc24_host_busy_free_sp;
1064 	}
1065 
1066 	return 0;
1067 
1068 qc24_host_busy_free_sp:
1069 	/* ref: INIT */
1070 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
1071 
1072 qc24_target_busy:
1073 	return SCSI_MLQUEUE_TARGET_BUSY;
1074 
1075 qc24_fail_command:
1076 	scsi_done(cmd);
1077 
1078 	return 0;
1079 }
1080 
1081 /*
1082  * qla2x00_eh_wait_on_command
1083  *    Waits for the command to be returned by the Firmware for some
1084  *    max time.
1085  *
1086  * Input:
1087  *    cmd = Scsi Command to wait on.
1088  *
1089  * Return:
1090  *    Completed in time : QLA_SUCCESS
1091  *    Did not complete in time : QLA_FUNCTION_FAILED
1092  */
1093 static int
1094 qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
1095 {
1096 #define ABORT_POLLING_PERIOD	1000
1097 #define ABORT_WAIT_ITER		((2 * 1000) / (ABORT_POLLING_PERIOD))
1098 	unsigned long wait_iter = ABORT_WAIT_ITER;
1099 	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1100 	struct qla_hw_data *ha = vha->hw;
1101 	srb_t *sp = scsi_cmd_priv(cmd);
1102 	int ret = QLA_SUCCESS;
1103 
1104 	if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
1105 		ql_dbg(ql_dbg_taskm, vha, 0x8005,
1106 		    "Return:eh_wait.\n");
1107 		return ret;
1108 	}
1109 
1110 	while (sp->type && wait_iter--)
1111 		msleep(ABORT_POLLING_PERIOD);
1112 	if (sp->type)
1113 		ret = QLA_FUNCTION_FAILED;
1114 
1115 	return ret;
1116 }
1117 
1118 /*
1119  * qla2x00_wait_for_hba_online
1120  *    Wait till the HBA is online after going through
1121  *    <= MAX_RETRIES_OF_ISP_ABORT  or
1122  *    finally HBA is disabled ie marked offline
1123  *
1124  * Input:
1125  *     ha - pointer to host adapter structure
1126  *
1127  * Note:
1128  *    Does context switching-Release SPIN_LOCK
1129  *    (if any) before calling this routine.
1130  *
1131  * Return:
1132  *    Success (Adapter is online) : 0
1133  *    Failed  (Adapter is offline/disabled) : 1
1134  */
1135 int
1136 qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
1137 {
1138 	int		return_status;
1139 	unsigned long	wait_online;
1140 	struct qla_hw_data *ha = vha->hw;
1141 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1142 
1143 	wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
1144 	while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
1145 	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
1146 	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
1147 	    ha->dpc_active) && time_before(jiffies, wait_online)) {
1148 
1149 		msleep(1000);
1150 	}
1151 	if (base_vha->flags.online)
1152 		return_status = QLA_SUCCESS;
1153 	else
1154 		return_status = QLA_FUNCTION_FAILED;
1155 
1156 	return (return_status);
1157 }
1158 
1159 static inline int test_fcport_count(scsi_qla_host_t *vha)
1160 {
1161 	struct qla_hw_data *ha = vha->hw;
1162 	unsigned long flags;
1163 	int res;
1164 	/* Return 0 = sleep, x=wake */
1165 
1166 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1167 	ql_dbg(ql_dbg_init, vha, 0x00ec,
1168 	    "tgt %p, fcport_count=%d\n",
1169 	    vha, vha->fcport_count);
1170 	res = (vha->fcport_count == 0);
1171 	if  (res) {
1172 		struct fc_port *fcport;
1173 
1174 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
1175 			if (fcport->deleted != QLA_SESS_DELETED) {
1176 				/* session(s) may not be fully logged in
1177 				 * (ie fcport_count=0), but session
1178 				 * deletion thread(s) may be inflight.
1179 				 */
1180 
1181 				res = 0;
1182 				break;
1183 			}
1184 		}
1185 	}
1186 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1187 
1188 	return res;
1189 }
1190 
1191 /*
1192  * qla2x00_wait_for_sess_deletion can only be called from remove_one.
1193  * it has dependency on UNLOADING flag to stop device discovery
1194  */
1195 void
1196 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
1197 {
1198 	u8 i;
1199 
1200 	qla2x00_mark_all_devices_lost(vha);
1201 
1202 	for (i = 0; i < 10; i++) {
1203 		if (wait_event_timeout(vha->fcport_waitQ,
1204 		    test_fcport_count(vha), HZ) > 0)
1205 			break;
1206 	}
1207 
1208 	flush_workqueue(vha->hw->wq);
1209 }
1210 
1211 /*
1212  * qla2x00_wait_for_hba_ready
1213  * Wait till the HBA is ready before doing driver unload
1214  *
1215  * Input:
1216  *     ha - pointer to host adapter structure
1217  *
1218  * Note:
1219  *    Does context switching-Release SPIN_LOCK
1220  *    (if any) before calling this routine.
1221  *
1222  */
1223 static void
1224 qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
1225 {
1226 	struct qla_hw_data *ha = vha->hw;
1227 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1228 
1229 	while ((qla2x00_reset_active(vha) || ha->dpc_active ||
1230 		ha->flags.mbox_busy) ||
1231 	       test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
1232 	       test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
1233 		if (test_bit(UNLOADING, &base_vha->dpc_flags))
1234 			break;
1235 		msleep(1000);
1236 	}
1237 }
1238 
1239 int
1240 qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
1241 {
1242 	int		return_status;
1243 	unsigned long	wait_reset;
1244 	struct qla_hw_data *ha = vha->hw;
1245 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1246 
1247 	wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
1248 	while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
1249 	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
1250 	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
1251 	    ha->dpc_active) && time_before(jiffies, wait_reset)) {
1252 
1253 		msleep(1000);
1254 
1255 		if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
1256 		    ha->flags.chip_reset_done)
1257 			break;
1258 	}
1259 	if (ha->flags.chip_reset_done)
1260 		return_status = QLA_SUCCESS;
1261 	else
1262 		return_status = QLA_FUNCTION_FAILED;
1263 
1264 	return return_status;
1265 }
1266 
1267 /**************************************************************************
1268 * qla2xxx_eh_abort
1269 *
1270 * Description:
1271 *    The abort function will abort the specified command.
1272 *
1273 * Input:
1274 *    cmd = Linux SCSI command packet to be aborted.
1275 *
1276 * Returns:
1277 *    Either SUCCESS or FAILED.
1278 *
1279 * Note:
1280 *    Only return FAILED if command not returned by firmware.
1281 **************************************************************************/
1282 static int
1283 qla2xxx_eh_abort(struct scsi_cmnd *cmd)
1284 {
1285 	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1286 	DECLARE_COMPLETION_ONSTACK(comp);
1287 	srb_t *sp;
1288 	int ret;
1289 	unsigned int id;
1290 	uint64_t lun;
1291 	int rval;
1292 	struct qla_hw_data *ha = vha->hw;
1293 	uint32_t ratov_j;
1294 	struct qla_qpair *qpair;
1295 	unsigned long flags;
1296 	int fast_fail_status = SUCCESS;
1297 
1298 	if (qla2x00_isp_reg_stat(ha)) {
1299 		ql_log(ql_log_info, vha, 0x8042,
1300 		    "PCI/Register disconnect, exiting.\n");
1301 		qla_pci_set_eeh_busy(vha);
1302 		return FAILED;
1303 	}
1304 
1305 	/* Save any FAST_IO_FAIL value to return later if abort succeeds */
1306 	ret = fc_block_scsi_eh(cmd);
1307 	if (ret != 0)
1308 		fast_fail_status = ret;
1309 
1310 	sp = scsi_cmd_priv(cmd);
1311 	qpair = sp->qpair;
1312 
1313 	vha->cmd_timeout_cnt++;
1314 
1315 	if ((sp->fcport && sp->fcport->deleted) || !qpair)
1316 		return fast_fail_status != SUCCESS ? fast_fail_status : FAILED;
1317 
1318 	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1319 	sp->comp = &comp;
1320 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1321 
1322 
1323 	id = cmd->device->id;
1324 	lun = cmd->device->lun;
1325 
1326 	ql_dbg(ql_dbg_taskm, vha, 0x8002,
1327 	    "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
1328 	    vha->host_no, id, lun, sp, cmd, sp->handle);
1329 
1330 	/*
1331 	 * Abort will release the original Command/sp from FW. Let the
1332 	 * original command call scsi_done. In return, he will wakeup
1333 	 * this sleeping thread.
1334 	 */
1335 	rval = ha->isp_ops->abort_command(sp);
1336 
1337 	ql_dbg(ql_dbg_taskm, vha, 0x8003,
1338 	       "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
1339 
1340 	/* Wait for the command completion. */
1341 	ratov_j = ha->r_a_tov/10 * 4 * 1000;
1342 	ratov_j = msecs_to_jiffies(ratov_j);
1343 	switch (rval) {
1344 	case QLA_SUCCESS:
1345 		if (!wait_for_completion_timeout(&comp, ratov_j)) {
1346 			ql_dbg(ql_dbg_taskm, vha, 0xffff,
1347 			    "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
1348 			    __func__, ha->r_a_tov/10);
1349 			ret = FAILED;
1350 		} else {
1351 			ret = fast_fail_status;
1352 		}
1353 		break;
1354 	default:
1355 		ret = FAILED;
1356 		break;
1357 	}
1358 
1359 	sp->comp = NULL;
1360 
1361 	ql_log(ql_log_info, vha, 0x801c,
1362 	    "Abort command issued nexus=%ld:%d:%llu -- %x.\n",
1363 	    vha->host_no, id, lun, ret);
1364 
1365 	return ret;
1366 }
1367 
1368 /*
1369  * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
1370  */
1371 static int
1372 __qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t,
1373 				       uint64_t l, enum nexus_wait_type type)
1374 {
1375 	int cnt, match, status;
1376 	unsigned long flags;
1377 	scsi_qla_host_t *vha = qpair->vha;
1378 	struct req_que *req = qpair->req;
1379 	srb_t *sp;
1380 	struct scsi_cmnd *cmd;
1381 
1382 	status = QLA_SUCCESS;
1383 
1384 	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1385 	for (cnt = 1; status == QLA_SUCCESS &&
1386 		cnt < req->num_outstanding_cmds; cnt++) {
1387 		sp = req->outstanding_cmds[cnt];
1388 		if (!sp)
1389 			continue;
1390 		if (sp->type != SRB_SCSI_CMD)
1391 			continue;
1392 		if (vha->vp_idx != sp->vha->vp_idx)
1393 			continue;
1394 		match = 0;
1395 		cmd = GET_CMD_SP(sp);
1396 		switch (type) {
1397 		case WAIT_HOST:
1398 			match = 1;
1399 			break;
1400 		case WAIT_TARGET:
1401 			match = cmd->device->id == t;
1402 			break;
1403 		case WAIT_LUN:
1404 			match = (cmd->device->id == t &&
1405 				cmd->device->lun == l);
1406 			break;
1407 		}
1408 		if (!match)
1409 			continue;
1410 
1411 		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1412 		status = qla2x00_eh_wait_on_command(cmd);
1413 		spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1414 	}
1415 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1416 
1417 	return status;
1418 }
1419 
1420 int
1421 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
1422 				     uint64_t l, enum nexus_wait_type type)
1423 {
1424 	struct qla_qpair *qpair;
1425 	struct qla_hw_data *ha = vha->hw;
1426 	int i, status = QLA_SUCCESS;
1427 
1428 	status = __qla2x00_eh_wait_for_pending_commands(ha->base_qpair, t, l,
1429 							type);
1430 	for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) {
1431 		qpair = ha->queue_pair_map[i];
1432 		if (!qpair)
1433 			continue;
1434 		status = __qla2x00_eh_wait_for_pending_commands(qpair, t, l,
1435 								type);
1436 	}
1437 	return status;
1438 }
1439 
1440 static char *reset_errors[] = {
1441 	"HBA not online",
1442 	"HBA not ready",
1443 	"Task management failed",
1444 	"Waiting for command completions",
1445 };
1446 
1447 static int
1448 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
1449 {
1450 	struct scsi_device *sdev = cmd->device;
1451 	scsi_qla_host_t *vha = shost_priv(sdev->host);
1452 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1453 	fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
1454 	struct qla_hw_data *ha = vha->hw;
1455 	int err;
1456 
1457 	if (qla2x00_isp_reg_stat(ha)) {
1458 		ql_log(ql_log_info, vha, 0x803e,
1459 		    "PCI/Register disconnect, exiting.\n");
1460 		qla_pci_set_eeh_busy(vha);
1461 		return FAILED;
1462 	}
1463 
1464 	if (!fcport) {
1465 		return FAILED;
1466 	}
1467 
1468 	err = fc_block_rport(rport);
1469 	if (err != 0)
1470 		return err;
1471 
1472 	if (fcport->deleted)
1473 		return FAILED;
1474 
1475 	ql_log(ql_log_info, vha, 0x8009,
1476 	    "DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no,
1477 	    sdev->id, sdev->lun, cmd);
1478 
1479 	err = 0;
1480 	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1481 		ql_log(ql_log_warn, vha, 0x800a,
1482 		    "Wait for hba online failed for cmd=%p.\n", cmd);
1483 		goto eh_reset_failed;
1484 	}
1485 	err = 2;
1486 	if (ha->isp_ops->lun_reset(fcport, sdev->lun, 1)
1487 		!= QLA_SUCCESS) {
1488 		ql_log(ql_log_warn, vha, 0x800c,
1489 		    "do_reset failed for cmd=%p.\n", cmd);
1490 		goto eh_reset_failed;
1491 	}
1492 	err = 3;
1493 	if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
1494 	    sdev->lun, WAIT_LUN) != QLA_SUCCESS) {
1495 		ql_log(ql_log_warn, vha, 0x800d,
1496 		    "wait for pending cmds failed for cmd=%p.\n", cmd);
1497 		goto eh_reset_failed;
1498 	}
1499 
1500 	ql_log(ql_log_info, vha, 0x800e,
1501 	    "DEVICE RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n",
1502 	    vha->host_no, sdev->id, sdev->lun, cmd);
1503 
1504 	return SUCCESS;
1505 
1506 eh_reset_failed:
1507 	ql_log(ql_log_info, vha, 0x800f,
1508 	    "DEVICE RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
1509 	    reset_errors[err], vha->host_no, sdev->id, sdev->lun,
1510 	    cmd);
1511 	vha->reset_cmd_err_cnt++;
1512 	return FAILED;
1513 }
1514 
1515 static int
1516 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
1517 {
1518 	struct scsi_device *sdev = cmd->device;
1519 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1520 	scsi_qla_host_t *vha = shost_priv(rport_to_shost(rport));
1521 	struct qla_hw_data *ha = vha->hw;
1522 	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1523 	int err;
1524 
1525 	if (qla2x00_isp_reg_stat(ha)) {
1526 		ql_log(ql_log_info, vha, 0x803f,
1527 		    "PCI/Register disconnect, exiting.\n");
1528 		qla_pci_set_eeh_busy(vha);
1529 		return FAILED;
1530 	}
1531 
1532 	if (!fcport) {
1533 		return FAILED;
1534 	}
1535 
1536 	err = fc_block_rport(rport);
1537 	if (err != 0)
1538 		return err;
1539 
1540 	if (fcport->deleted)
1541 		return FAILED;
1542 
1543 	ql_log(ql_log_info, vha, 0x8009,
1544 	    "TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no,
1545 	    sdev->id, cmd);
1546 
1547 	err = 0;
1548 	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1549 		ql_log(ql_log_warn, vha, 0x800a,
1550 		    "Wait for hba online failed for cmd=%p.\n", cmd);
1551 		goto eh_reset_failed;
1552 	}
1553 	err = 2;
1554 	if (ha->isp_ops->target_reset(fcport, 0, 0) != QLA_SUCCESS) {
1555 		ql_log(ql_log_warn, vha, 0x800c,
1556 		    "target_reset failed for cmd=%p.\n", cmd);
1557 		goto eh_reset_failed;
1558 	}
1559 	err = 3;
1560 	if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
1561 	    0, WAIT_TARGET) != QLA_SUCCESS) {
1562 		ql_log(ql_log_warn, vha, 0x800d,
1563 		    "wait for pending cmds failed for cmd=%p.\n", cmd);
1564 		goto eh_reset_failed;
1565 	}
1566 
1567 	ql_log(ql_log_info, vha, 0x800e,
1568 	    "TARGET RESET SUCCEEDED nexus:%ld:%d cmd=%p.\n",
1569 	    vha->host_no, sdev->id, cmd);
1570 
1571 	return SUCCESS;
1572 
1573 eh_reset_failed:
1574 	ql_log(ql_log_info, vha, 0x800f,
1575 	    "TARGET RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
1576 	    reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
1577 	    cmd);
1578 	vha->reset_cmd_err_cnt++;
1579 	return FAILED;
1580 }
1581 
1582 /**************************************************************************
1583 * qla2xxx_eh_bus_reset
1584 *
1585 * Description:
1586 *    The bus reset function will reset the bus and abort any executing
1587 *    commands.
1588 *
1589 * Input:
1590 *    cmd = Linux SCSI command packet of the command that cause the
1591 *          bus reset.
1592 *
1593 * Returns:
1594 *    SUCCESS/FAILURE (defined as macro in scsi.h).
1595 *
1596 **************************************************************************/
1597 static int
1598 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1599 {
1600 	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1601 	int ret = FAILED;
1602 	unsigned int id;
1603 	uint64_t lun;
1604 	struct qla_hw_data *ha = vha->hw;
1605 
1606 	if (qla2x00_isp_reg_stat(ha)) {
1607 		ql_log(ql_log_info, vha, 0x8040,
1608 		    "PCI/Register disconnect, exiting.\n");
1609 		qla_pci_set_eeh_busy(vha);
1610 		return FAILED;
1611 	}
1612 
1613 	id = cmd->device->id;
1614 	lun = cmd->device->lun;
1615 
1616 	if (qla2x00_chip_is_down(vha))
1617 		return ret;
1618 
1619 	ql_log(ql_log_info, vha, 0x8012,
1620 	    "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
1621 
1622 	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1623 		ql_log(ql_log_fatal, vha, 0x8013,
1624 		    "Wait for hba online failed board disabled.\n");
1625 		goto eh_bus_reset_done;
1626 	}
1627 
1628 	if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
1629 		ret = SUCCESS;
1630 
1631 	if (ret == FAILED)
1632 		goto eh_bus_reset_done;
1633 
1634 	/* Flush outstanding commands. */
1635 	if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
1636 	    QLA_SUCCESS) {
1637 		ql_log(ql_log_warn, vha, 0x8014,
1638 		    "Wait for pending commands failed.\n");
1639 		ret = FAILED;
1640 	}
1641 
1642 eh_bus_reset_done:
1643 	ql_log(ql_log_warn, vha, 0x802b,
1644 	    "BUS RESET %s nexus=%ld:%d:%llu.\n",
1645 	    (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
1646 
1647 	return ret;
1648 }
1649 
1650 /**************************************************************************
1651 * qla2xxx_eh_host_reset
1652 *
1653 * Description:
1654 *    The reset function will reset the Adapter.
1655 *
1656 * Input:
1657 *      cmd = Linux SCSI command packet of the command that cause the
1658 *            adapter reset.
1659 *
1660 * Returns:
1661 *      Either SUCCESS or FAILED.
1662 *
1663 * Note:
1664 **************************************************************************/
1665 static int
1666 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1667 {
1668 	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1669 	struct qla_hw_data *ha = vha->hw;
1670 	int ret = FAILED;
1671 	unsigned int id;
1672 	uint64_t lun;
1673 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1674 
1675 	if (qla2x00_isp_reg_stat(ha)) {
1676 		ql_log(ql_log_info, vha, 0x8041,
1677 		    "PCI/Register disconnect, exiting.\n");
1678 		qla_pci_set_eeh_busy(vha);
1679 		return SUCCESS;
1680 	}
1681 
1682 	id = cmd->device->id;
1683 	lun = cmd->device->lun;
1684 
1685 	ql_log(ql_log_info, vha, 0x8018,
1686 	    "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
1687 
1688 	/*
1689 	 * No point in issuing another reset if one is active.  Also do not
1690 	 * attempt a reset if we are updating flash.
1691 	 */
1692 	if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)
1693 		goto eh_host_reset_lock;
1694 
1695 	if (vha != base_vha) {
1696 		if (qla2x00_vp_abort_isp(vha))
1697 			goto eh_host_reset_lock;
1698 	} else {
1699 		if (IS_P3P_TYPE(vha->hw)) {
1700 			if (!qla82xx_fcoe_ctx_reset(vha)) {
1701 				/* Ctx reset success */
1702 				ret = SUCCESS;
1703 				goto eh_host_reset_lock;
1704 			}
1705 			/* fall thru if ctx reset failed */
1706 		}
1707 		if (ha->wq)
1708 			flush_workqueue(ha->wq);
1709 
1710 		set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1711 		if (ha->isp_ops->abort_isp(base_vha)) {
1712 			clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1713 			/* failed. schedule dpc to try */
1714 			set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1715 
1716 			if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1717 				ql_log(ql_log_warn, vha, 0x802a,
1718 				    "wait for hba online failed.\n");
1719 				goto eh_host_reset_lock;
1720 			}
1721 		}
1722 		clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1723 	}
1724 
1725 	/* Waiting for command to be returned to OS.*/
1726 	if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
1727 		QLA_SUCCESS)
1728 		ret = SUCCESS;
1729 
1730 eh_host_reset_lock:
1731 	ql_log(ql_log_info, vha, 0x8017,
1732 	    "ADAPTER RESET %s nexus=%ld:%d:%llu.\n",
1733 	    (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
1734 
1735 	return ret;
1736 }
1737 
1738 /*
1739 * qla2x00_loop_reset
1740 *      Issue loop reset.
1741 *
1742 * Input:
1743 *      ha = adapter block pointer.
1744 *
1745 * Returns:
1746 *      0 = success
1747 */
1748 int
1749 qla2x00_loop_reset(scsi_qla_host_t *vha)
1750 {
1751 	int ret;
1752 	struct qla_hw_data *ha = vha->hw;
1753 
1754 	if (IS_QLAFX00(ha))
1755 		return QLA_SUCCESS;
1756 
1757 	if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
1758 		atomic_set(&vha->loop_state, LOOP_DOWN);
1759 		atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1760 		qla2x00_mark_all_devices_lost(vha);
1761 		ret = qla2x00_full_login_lip(vha);
1762 		if (ret != QLA_SUCCESS) {
1763 			ql_dbg(ql_dbg_taskm, vha, 0x802d,
1764 			    "full_login_lip=%d.\n", ret);
1765 		}
1766 	}
1767 
1768 	if (ha->flags.enable_lip_reset) {
1769 		ret = qla2x00_lip_reset(vha);
1770 		if (ret != QLA_SUCCESS)
1771 			ql_dbg(ql_dbg_taskm, vha, 0x802e,
1772 			    "lip_reset failed (%d).\n", ret);
1773 	}
1774 
1775 	/* Issue marker command only when we are going to start the I/O */
1776 	vha->marker_needed = 1;
1777 
1778 	return QLA_SUCCESS;
1779 }
1780 
1781 /*
1782  * The caller must ensure that no completion interrupts will happen
1783  * while this function is in progress.
1784  */
1785 static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
1786 			      unsigned long *flags)
1787 	__releases(qp->qp_lock_ptr)
1788 	__acquires(qp->qp_lock_ptr)
1789 {
1790 	DECLARE_COMPLETION_ONSTACK(comp);
1791 	scsi_qla_host_t *vha = qp->vha;
1792 	struct qla_hw_data *ha = vha->hw;
1793 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1794 	int rval;
1795 	bool ret_cmd;
1796 	uint32_t ratov_j;
1797 
1798 	lockdep_assert_held(qp->qp_lock_ptr);
1799 
1800 	if (qla2x00_chip_is_down(vha)) {
1801 		sp->done(sp, res);
1802 		return;
1803 	}
1804 
1805 	if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
1806 	    (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
1807 	     !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
1808 	     !qla2x00_isp_reg_stat(ha))) {
1809 		if (sp->comp) {
1810 			sp->done(sp, res);
1811 			return;
1812 		}
1813 
1814 		sp->comp = &comp;
1815 		spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
1816 
1817 		rval = ha->isp_ops->abort_command(sp);
1818 		/* Wait for command completion. */
1819 		ret_cmd = false;
1820 		ratov_j = ha->r_a_tov/10 * 4 * 1000;
1821 		ratov_j = msecs_to_jiffies(ratov_j);
1822 		switch (rval) {
1823 		case QLA_SUCCESS:
1824 			if (wait_for_completion_timeout(&comp, ratov_j)) {
1825 				ql_dbg(ql_dbg_taskm, vha, 0xffff,
1826 				    "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
1827 				    __func__, ha->r_a_tov/10);
1828 				ret_cmd = true;
1829 			}
1830 			/* else FW return SP to driver */
1831 			break;
1832 		default:
1833 			ret_cmd = true;
1834 			break;
1835 		}
1836 
1837 		spin_lock_irqsave(qp->qp_lock_ptr, *flags);
1838 		if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
1839 			sp->done(sp, res);
1840 	} else {
1841 		sp->done(sp, res);
1842 	}
1843 }
1844 
1845 /*
1846  * The caller must ensure that no completion interrupts will happen
1847  * while this function is in progress.
1848  */
1849 static void
1850 __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
1851 {
1852 	int cnt;
1853 	unsigned long flags;
1854 	srb_t *sp;
1855 	scsi_qla_host_t *vha = qp->vha;
1856 	struct qla_hw_data *ha = vha->hw;
1857 	struct req_que *req;
1858 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1859 	struct qla_tgt_cmd *cmd;
1860 
1861 	if (!ha->req_q_map)
1862 		return;
1863 	spin_lock_irqsave(qp->qp_lock_ptr, flags);
1864 	req = qp->req;
1865 	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
1866 		sp = req->outstanding_cmds[cnt];
1867 		if (sp) {
1868 			/*
1869 			 * perform lockless completion during driver unload
1870 			 */
1871 			if (qla2x00_chip_is_down(vha)) {
1872 				req->outstanding_cmds[cnt] = NULL;
1873 				spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
1874 				sp->done(sp, res);
1875 				spin_lock_irqsave(qp->qp_lock_ptr, flags);
1876 				continue;
1877 			}
1878 
1879 			switch (sp->cmd_type) {
1880 			case TYPE_SRB:
1881 				qla2x00_abort_srb(qp, sp, res, &flags);
1882 				break;
1883 			case TYPE_TGT_CMD:
1884 				if (!vha->hw->tgt.tgt_ops || !tgt ||
1885 				    qla_ini_mode_enabled(vha)) {
1886 					ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
1887 					    "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
1888 					    vha->dpc_flags);
1889 					continue;
1890 				}
1891 				cmd = (struct qla_tgt_cmd *)sp;
1892 				cmd->aborted = 1;
1893 				break;
1894 			case TYPE_TGT_TMCMD:
1895 				/* Skip task management functions. */
1896 				break;
1897 			default:
1898 				break;
1899 			}
1900 			req->outstanding_cmds[cnt] = NULL;
1901 		}
1902 	}
1903 	spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
1904 }
1905 
1906 /*
1907  * The caller must ensure that no completion interrupts will happen
1908  * while this function is in progress.
1909  */
1910 void
1911 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1912 {
1913 	int que;
1914 	struct qla_hw_data *ha = vha->hw;
1915 
1916 	/* Continue only if initialization complete. */
1917 	if (!ha->base_qpair)
1918 		return;
1919 	__qla2x00_abort_all_cmds(ha->base_qpair, res);
1920 
1921 	if (!ha->queue_pair_map)
1922 		return;
1923 	for (que = 0; que < ha->max_qpairs; que++) {
1924 		if (!ha->queue_pair_map[que])
1925 			continue;
1926 
1927 		__qla2x00_abort_all_cmds(ha->queue_pair_map[que], res);
1928 	}
1929 }
1930 
1931 static int
1932 qla2xxx_slave_alloc(struct scsi_device *sdev)
1933 {
1934 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1935 
1936 	if (!rport || fc_remote_port_chkready(rport))
1937 		return -ENXIO;
1938 
1939 	sdev->hostdata = *(fc_port_t **)rport->dd_data;
1940 
1941 	return 0;
1942 }
1943 
1944 static int
1945 qla2xxx_slave_configure(struct scsi_device *sdev)
1946 {
1947 	scsi_qla_host_t *vha = shost_priv(sdev->host);
1948 	struct req_que *req = vha->req;
1949 
1950 	if (IS_T10_PI_CAPABLE(vha->hw))
1951 		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1952 
1953 	scsi_change_queue_depth(sdev, req->max_q_depth);
1954 	return 0;
1955 }
1956 
1957 static void
1958 qla2xxx_slave_destroy(struct scsi_device *sdev)
1959 {
1960 	sdev->hostdata = NULL;
1961 }
1962 
1963 /**
1964  * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1965  * @ha: HA context
1966  *
1967  * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1968  * supported addressing method.
1969  */
1970 static void
1971 qla2x00_config_dma_addressing(struct qla_hw_data *ha)
1972 {
1973 	/* Assume a 32bit DMA mask. */
1974 	ha->flags.enable_64bit_addressing = 0;
1975 
1976 	if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
1977 		/* Any upper-dword bits set? */
1978 		if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
1979 		    !dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
1980 			/* Ok, a 64bit DMA mask is applicable. */
1981 			ha->flags.enable_64bit_addressing = 1;
1982 			ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
1983 			ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
1984 			return;
1985 		}
1986 	}
1987 
1988 	dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
1989 	dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
1990 }
1991 
1992 static void
1993 qla2x00_enable_intrs(struct qla_hw_data *ha)
1994 {
1995 	unsigned long flags = 0;
1996 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1997 
1998 	spin_lock_irqsave(&ha->hardware_lock, flags);
1999 	ha->interrupts_on = 1;
2000 	/* enable risc and host interrupts */
2001 	wrt_reg_word(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
2002 	rd_reg_word(&reg->ictrl);
2003 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2004 
2005 }
2006 
2007 static void
2008 qla2x00_disable_intrs(struct qla_hw_data *ha)
2009 {
2010 	unsigned long flags = 0;
2011 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2012 
2013 	spin_lock_irqsave(&ha->hardware_lock, flags);
2014 	ha->interrupts_on = 0;
2015 	/* disable risc and host interrupts */
2016 	wrt_reg_word(&reg->ictrl, 0);
2017 	rd_reg_word(&reg->ictrl);
2018 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2019 }
2020 
2021 static void
2022 qla24xx_enable_intrs(struct qla_hw_data *ha)
2023 {
2024 	unsigned long flags = 0;
2025 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2026 
2027 	spin_lock_irqsave(&ha->hardware_lock, flags);
2028 	ha->interrupts_on = 1;
2029 	wrt_reg_dword(&reg->ictrl, ICRX_EN_RISC_INT);
2030 	rd_reg_dword(&reg->ictrl);
2031 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2032 }
2033 
2034 static void
2035 qla24xx_disable_intrs(struct qla_hw_data *ha)
2036 {
2037 	unsigned long flags = 0;
2038 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2039 
2040 	if (IS_NOPOLLING_TYPE(ha))
2041 		return;
2042 	spin_lock_irqsave(&ha->hardware_lock, flags);
2043 	ha->interrupts_on = 0;
2044 	wrt_reg_dword(&reg->ictrl, 0);
2045 	rd_reg_dword(&reg->ictrl);
2046 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2047 }
2048 
2049 static int
2050 qla2x00_iospace_config(struct qla_hw_data *ha)
2051 {
2052 	resource_size_t pio;
2053 	uint16_t msix;
2054 
2055 	if (pci_request_selected_regions(ha->pdev, ha->bars,
2056 	    QLA2XXX_DRIVER_NAME)) {
2057 		ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
2058 		    "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
2059 		    pci_name(ha->pdev));
2060 		goto iospace_error_exit;
2061 	}
2062 	if (!(ha->bars & 1))
2063 		goto skip_pio;
2064 
2065 	/* We only need PIO for Flash operations on ISP2312 v2 chips. */
2066 	pio = pci_resource_start(ha->pdev, 0);
2067 	if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
2068 		if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
2069 			ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
2070 			    "Invalid pci I/O region size (%s).\n",
2071 			    pci_name(ha->pdev));
2072 			pio = 0;
2073 		}
2074 	} else {
2075 		ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
2076 		    "Region #0 no a PIO resource (%s).\n",
2077 		    pci_name(ha->pdev));
2078 		pio = 0;
2079 	}
2080 	ha->pio_address = pio;
2081 	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
2082 	    "PIO address=%llu.\n",
2083 	    (unsigned long long)ha->pio_address);
2084 
2085 skip_pio:
2086 	/* Use MMIO operations for all accesses. */
2087 	if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
2088 		ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
2089 		    "Region #1 not an MMIO resource (%s), aborting.\n",
2090 		    pci_name(ha->pdev));
2091 		goto iospace_error_exit;
2092 	}
2093 	if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
2094 		ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
2095 		    "Invalid PCI mem region size (%s), aborting.\n",
2096 		    pci_name(ha->pdev));
2097 		goto iospace_error_exit;
2098 	}
2099 
2100 	ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
2101 	if (!ha->iobase) {
2102 		ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
2103 		    "Cannot remap MMIO (%s), aborting.\n",
2104 		    pci_name(ha->pdev));
2105 		goto iospace_error_exit;
2106 	}
2107 
2108 	/* Determine queue resources */
2109 	ha->max_req_queues = ha->max_rsp_queues = 1;
2110 	ha->msix_count = QLA_BASE_VECTORS;
2111 
2112 	/* Check if FW supports MQ or not */
2113 	if (!(ha->fw_attributes & BIT_6))
2114 		goto mqiobase_exit;
2115 
2116 	if (!ql2xmqsupport || !ql2xnvmeenable ||
2117 	    (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
2118 		goto mqiobase_exit;
2119 
2120 	ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
2121 			pci_resource_len(ha->pdev, 3));
2122 	if (ha->mqiobase) {
2123 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
2124 		    "MQIO Base=%p.\n", ha->mqiobase);
2125 		/* Read MSIX vector size of the board */
2126 		pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
2127 		ha->msix_count = msix + 1;
2128 		/* Max queues are bounded by available msix vectors */
2129 		/* MB interrupt uses 1 vector */
2130 		ha->max_req_queues = ha->msix_count - 1;
2131 		ha->max_rsp_queues = ha->max_req_queues;
2132 		/* Queue pairs is the max value minus the base queue pair */
2133 		ha->max_qpairs = ha->max_rsp_queues - 1;
2134 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
2135 		    "Max no of queues pairs: %d.\n", ha->max_qpairs);
2136 
2137 		ql_log_pci(ql_log_info, ha->pdev, 0x001a,
2138 		    "MSI-X vector count: %d.\n", ha->msix_count);
2139 	} else
2140 		ql_log_pci(ql_log_info, ha->pdev, 0x001b,
2141 		    "BAR 3 not enabled.\n");
2142 
2143 mqiobase_exit:
2144 	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
2145 	    "MSIX Count: %d.\n", ha->msix_count);
2146 	return (0);
2147 
2148 iospace_error_exit:
2149 	return (-ENOMEM);
2150 }
2151 
2152 
2153 static int
2154 qla83xx_iospace_config(struct qla_hw_data *ha)
2155 {
2156 	uint16_t msix;
2157 
2158 	if (pci_request_selected_regions(ha->pdev, ha->bars,
2159 	    QLA2XXX_DRIVER_NAME)) {
2160 		ql_log_pci(ql_log_fatal, ha->pdev, 0x0117,
2161 		    "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
2162 		    pci_name(ha->pdev));
2163 
2164 		goto iospace_error_exit;
2165 	}
2166 
2167 	/* Use MMIO operations for all accesses. */
2168 	if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
2169 		ql_log_pci(ql_log_warn, ha->pdev, 0x0118,
2170 		    "Invalid pci I/O region size (%s).\n",
2171 		    pci_name(ha->pdev));
2172 		goto iospace_error_exit;
2173 	}
2174 	if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
2175 		ql_log_pci(ql_log_warn, ha->pdev, 0x0119,
2176 		    "Invalid PCI mem region size (%s), aborting\n",
2177 			pci_name(ha->pdev));
2178 		goto iospace_error_exit;
2179 	}
2180 
2181 	ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN);
2182 	if (!ha->iobase) {
2183 		ql_log_pci(ql_log_fatal, ha->pdev, 0x011a,
2184 		    "Cannot remap MMIO (%s), aborting.\n",
2185 		    pci_name(ha->pdev));
2186 		goto iospace_error_exit;
2187 	}
2188 
2189 	/* 64bit PCI BAR - BAR2 will correspoond to region 4 */
2190 	/* 83XX 26XX always use MQ type access for queues
2191 	 * - mbar 2, a.k.a region 4 */
2192 	ha->max_req_queues = ha->max_rsp_queues = 1;
2193 	ha->msix_count = QLA_BASE_VECTORS;
2194 	ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
2195 			pci_resource_len(ha->pdev, 4));
2196 
2197 	if (!ha->mqiobase) {
2198 		ql_log_pci(ql_log_fatal, ha->pdev, 0x011d,
2199 		    "BAR2/region4 not enabled\n");
2200 		goto mqiobase_exit;
2201 	}
2202 
2203 	ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
2204 			pci_resource_len(ha->pdev, 2));
2205 	if (ha->msixbase) {
2206 		/* Read MSIX vector size of the board */
2207 		pci_read_config_word(ha->pdev,
2208 		    QLA_83XX_PCI_MSIX_CONTROL, &msix);
2209 		ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE)  + 1;
2210 		/*
2211 		 * By default, driver uses at least two msix vectors
2212 		 * (default & rspq)
2213 		 */
2214 		if (ql2xmqsupport || ql2xnvmeenable) {
2215 			/* MB interrupt uses 1 vector */
2216 			ha->max_req_queues = ha->msix_count - 1;
2217 
2218 			/* ATIOQ needs 1 vector. That's 1 less QPair */
2219 			if (QLA_TGT_MODE_ENABLED())
2220 				ha->max_req_queues--;
2221 
2222 			ha->max_rsp_queues = ha->max_req_queues;
2223 
2224 			/* Queue pairs is the max value minus
2225 			 * the base queue pair */
2226 			ha->max_qpairs = ha->max_req_queues - 1;
2227 			ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3,
2228 			    "Max no of queues pairs: %d.\n", ha->max_qpairs);
2229 		}
2230 		ql_log_pci(ql_log_info, ha->pdev, 0x011c,
2231 		    "MSI-X vector count: %d.\n", ha->msix_count);
2232 	} else
2233 		ql_log_pci(ql_log_info, ha->pdev, 0x011e,
2234 		    "BAR 1 not enabled.\n");
2235 
2236 mqiobase_exit:
2237 	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
2238 	    "MSIX Count: %d.\n", ha->msix_count);
2239 	return 0;
2240 
2241 iospace_error_exit:
2242 	return -ENOMEM;
2243 }
2244 
2245 static struct isp_operations qla2100_isp_ops = {
2246 	.pci_config		= qla2100_pci_config,
2247 	.reset_chip		= qla2x00_reset_chip,
2248 	.chip_diag		= qla2x00_chip_diag,
2249 	.config_rings		= qla2x00_config_rings,
2250 	.reset_adapter		= qla2x00_reset_adapter,
2251 	.nvram_config		= qla2x00_nvram_config,
2252 	.update_fw_options	= qla2x00_update_fw_options,
2253 	.load_risc		= qla2x00_load_risc,
2254 	.pci_info_str		= qla2x00_pci_info_str,
2255 	.fw_version_str		= qla2x00_fw_version_str,
2256 	.intr_handler		= qla2100_intr_handler,
2257 	.enable_intrs		= qla2x00_enable_intrs,
2258 	.disable_intrs		= qla2x00_disable_intrs,
2259 	.abort_command		= qla2x00_abort_command,
2260 	.target_reset		= qla2x00_abort_target,
2261 	.lun_reset		= qla2x00_lun_reset,
2262 	.fabric_login		= qla2x00_login_fabric,
2263 	.fabric_logout		= qla2x00_fabric_logout,
2264 	.calc_req_entries	= qla2x00_calc_iocbs_32,
2265 	.build_iocbs		= qla2x00_build_scsi_iocbs_32,
2266 	.prep_ms_iocb		= qla2x00_prep_ms_iocb,
2267 	.prep_ms_fdmi_iocb	= qla2x00_prep_ms_fdmi_iocb,
2268 	.read_nvram		= qla2x00_read_nvram_data,
2269 	.write_nvram		= qla2x00_write_nvram_data,
2270 	.fw_dump		= qla2100_fw_dump,
2271 	.beacon_on		= NULL,
2272 	.beacon_off		= NULL,
2273 	.beacon_blink		= NULL,
2274 	.read_optrom		= qla2x00_read_optrom_data,
2275 	.write_optrom		= qla2x00_write_optrom_data,
2276 	.get_flash_version	= qla2x00_get_flash_version,
2277 	.start_scsi		= qla2x00_start_scsi,
2278 	.start_scsi_mq          = NULL,
2279 	.abort_isp		= qla2x00_abort_isp,
2280 	.iospace_config     	= qla2x00_iospace_config,
2281 	.initialize_adapter	= qla2x00_initialize_adapter,
2282 };
2283 
2284 static struct isp_operations qla2300_isp_ops = {
2285 	.pci_config		= qla2300_pci_config,
2286 	.reset_chip		= qla2x00_reset_chip,
2287 	.chip_diag		= qla2x00_chip_diag,
2288 	.config_rings		= qla2x00_config_rings,
2289 	.reset_adapter		= qla2x00_reset_adapter,
2290 	.nvram_config		= qla2x00_nvram_config,
2291 	.update_fw_options	= qla2x00_update_fw_options,
2292 	.load_risc		= qla2x00_load_risc,
2293 	.pci_info_str		= qla2x00_pci_info_str,
2294 	.fw_version_str		= qla2x00_fw_version_str,
2295 	.intr_handler		= qla2300_intr_handler,
2296 	.enable_intrs		= qla2x00_enable_intrs,
2297 	.disable_intrs		= qla2x00_disable_intrs,
2298 	.abort_command		= qla2x00_abort_command,
2299 	.target_reset		= qla2x00_abort_target,
2300 	.lun_reset		= qla2x00_lun_reset,
2301 	.fabric_login		= qla2x00_login_fabric,
2302 	.fabric_logout		= qla2x00_fabric_logout,
2303 	.calc_req_entries	= qla2x00_calc_iocbs_32,
2304 	.build_iocbs		= qla2x00_build_scsi_iocbs_32,
2305 	.prep_ms_iocb		= qla2x00_prep_ms_iocb,
2306 	.prep_ms_fdmi_iocb	= qla2x00_prep_ms_fdmi_iocb,
2307 	.read_nvram		= qla2x00_read_nvram_data,
2308 	.write_nvram		= qla2x00_write_nvram_data,
2309 	.fw_dump		= qla2300_fw_dump,
2310 	.beacon_on		= qla2x00_beacon_on,
2311 	.beacon_off		= qla2x00_beacon_off,
2312 	.beacon_blink		= qla2x00_beacon_blink,
2313 	.read_optrom		= qla2x00_read_optrom_data,
2314 	.write_optrom		= qla2x00_write_optrom_data,
2315 	.get_flash_version	= qla2x00_get_flash_version,
2316 	.start_scsi		= qla2x00_start_scsi,
2317 	.start_scsi_mq          = NULL,
2318 	.abort_isp		= qla2x00_abort_isp,
2319 	.iospace_config		= qla2x00_iospace_config,
2320 	.initialize_adapter	= qla2x00_initialize_adapter,
2321 };
2322 
2323 static struct isp_operations qla24xx_isp_ops = {
2324 	.pci_config		= qla24xx_pci_config,
2325 	.reset_chip		= qla24xx_reset_chip,
2326 	.chip_diag		= qla24xx_chip_diag,
2327 	.config_rings		= qla24xx_config_rings,
2328 	.reset_adapter		= qla24xx_reset_adapter,
2329 	.nvram_config		= qla24xx_nvram_config,
2330 	.update_fw_options	= qla24xx_update_fw_options,
2331 	.load_risc		= qla24xx_load_risc,
2332 	.pci_info_str		= qla24xx_pci_info_str,
2333 	.fw_version_str		= qla24xx_fw_version_str,
2334 	.intr_handler		= qla24xx_intr_handler,
2335 	.enable_intrs		= qla24xx_enable_intrs,
2336 	.disable_intrs		= qla24xx_disable_intrs,
2337 	.abort_command		= qla24xx_abort_command,
2338 	.target_reset		= qla24xx_abort_target,
2339 	.lun_reset		= qla24xx_lun_reset,
2340 	.fabric_login		= qla24xx_login_fabric,
2341 	.fabric_logout		= qla24xx_fabric_logout,
2342 	.calc_req_entries	= NULL,
2343 	.build_iocbs		= NULL,
2344 	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
2345 	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
2346 	.read_nvram		= qla24xx_read_nvram_data,
2347 	.write_nvram		= qla24xx_write_nvram_data,
2348 	.fw_dump		= qla24xx_fw_dump,
2349 	.beacon_on		= qla24xx_beacon_on,
2350 	.beacon_off		= qla24xx_beacon_off,
2351 	.beacon_blink		= qla24xx_beacon_blink,
2352 	.read_optrom		= qla24xx_read_optrom_data,
2353 	.write_optrom		= qla24xx_write_optrom_data,
2354 	.get_flash_version	= qla24xx_get_flash_version,
2355 	.start_scsi		= qla24xx_start_scsi,
2356 	.start_scsi_mq          = NULL,
2357 	.abort_isp		= qla2x00_abort_isp,
2358 	.iospace_config		= qla2x00_iospace_config,
2359 	.initialize_adapter	= qla2x00_initialize_adapter,
2360 };
2361 
2362 static struct isp_operations qla25xx_isp_ops = {
2363 	.pci_config		= qla25xx_pci_config,
2364 	.reset_chip		= qla24xx_reset_chip,
2365 	.chip_diag		= qla24xx_chip_diag,
2366 	.config_rings		= qla24xx_config_rings,
2367 	.reset_adapter		= qla24xx_reset_adapter,
2368 	.nvram_config		= qla24xx_nvram_config,
2369 	.update_fw_options	= qla24xx_update_fw_options,
2370 	.load_risc		= qla24xx_load_risc,
2371 	.pci_info_str		= qla24xx_pci_info_str,
2372 	.fw_version_str		= qla24xx_fw_version_str,
2373 	.intr_handler		= qla24xx_intr_handler,
2374 	.enable_intrs		= qla24xx_enable_intrs,
2375 	.disable_intrs		= qla24xx_disable_intrs,
2376 	.abort_command		= qla24xx_abort_command,
2377 	.target_reset		= qla24xx_abort_target,
2378 	.lun_reset		= qla24xx_lun_reset,
2379 	.fabric_login		= qla24xx_login_fabric,
2380 	.fabric_logout		= qla24xx_fabric_logout,
2381 	.calc_req_entries	= NULL,
2382 	.build_iocbs		= NULL,
2383 	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
2384 	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
2385 	.read_nvram		= qla25xx_read_nvram_data,
2386 	.write_nvram		= qla25xx_write_nvram_data,
2387 	.fw_dump		= qla25xx_fw_dump,
2388 	.beacon_on		= qla24xx_beacon_on,
2389 	.beacon_off		= qla24xx_beacon_off,
2390 	.beacon_blink		= qla24xx_beacon_blink,
2391 	.read_optrom		= qla25xx_read_optrom_data,
2392 	.write_optrom		= qla24xx_write_optrom_data,
2393 	.get_flash_version	= qla24xx_get_flash_version,
2394 	.start_scsi		= qla24xx_dif_start_scsi,
2395 	.start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
2396 	.abort_isp		= qla2x00_abort_isp,
2397 	.iospace_config		= qla2x00_iospace_config,
2398 	.initialize_adapter	= qla2x00_initialize_adapter,
2399 };
2400 
2401 static struct isp_operations qla81xx_isp_ops = {
2402 	.pci_config		= qla25xx_pci_config,
2403 	.reset_chip		= qla24xx_reset_chip,
2404 	.chip_diag		= qla24xx_chip_diag,
2405 	.config_rings		= qla24xx_config_rings,
2406 	.reset_adapter		= qla24xx_reset_adapter,
2407 	.nvram_config		= qla81xx_nvram_config,
2408 	.update_fw_options	= qla24xx_update_fw_options,
2409 	.load_risc		= qla81xx_load_risc,
2410 	.pci_info_str		= qla24xx_pci_info_str,
2411 	.fw_version_str		= qla24xx_fw_version_str,
2412 	.intr_handler		= qla24xx_intr_handler,
2413 	.enable_intrs		= qla24xx_enable_intrs,
2414 	.disable_intrs		= qla24xx_disable_intrs,
2415 	.abort_command		= qla24xx_abort_command,
2416 	.target_reset		= qla24xx_abort_target,
2417 	.lun_reset		= qla24xx_lun_reset,
2418 	.fabric_login		= qla24xx_login_fabric,
2419 	.fabric_logout		= qla24xx_fabric_logout,
2420 	.calc_req_entries	= NULL,
2421 	.build_iocbs		= NULL,
2422 	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
2423 	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
2424 	.read_nvram		= NULL,
2425 	.write_nvram		= NULL,
2426 	.fw_dump		= qla81xx_fw_dump,
2427 	.beacon_on		= qla24xx_beacon_on,
2428 	.beacon_off		= qla24xx_beacon_off,
2429 	.beacon_blink		= qla83xx_beacon_blink,
2430 	.read_optrom		= qla25xx_read_optrom_data,
2431 	.write_optrom		= qla24xx_write_optrom_data,
2432 	.get_flash_version	= qla24xx_get_flash_version,
2433 	.start_scsi		= qla24xx_dif_start_scsi,
2434 	.start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
2435 	.abort_isp		= qla2x00_abort_isp,
2436 	.iospace_config		= qla2x00_iospace_config,
2437 	.initialize_adapter	= qla2x00_initialize_adapter,
2438 };
2439 
2440 static struct isp_operations qla82xx_isp_ops = {
2441 	.pci_config		= qla82xx_pci_config,
2442 	.reset_chip		= qla82xx_reset_chip,
2443 	.chip_diag		= qla24xx_chip_diag,
2444 	.config_rings		= qla82xx_config_rings,
2445 	.reset_adapter		= qla24xx_reset_adapter,
2446 	.nvram_config		= qla81xx_nvram_config,
2447 	.update_fw_options	= qla24xx_update_fw_options,
2448 	.load_risc		= qla82xx_load_risc,
2449 	.pci_info_str		= qla24xx_pci_info_str,
2450 	.fw_version_str		= qla24xx_fw_version_str,
2451 	.intr_handler		= qla82xx_intr_handler,
2452 	.enable_intrs		= qla82xx_enable_intrs,
2453 	.disable_intrs		= qla82xx_disable_intrs,
2454 	.abort_command		= qla24xx_abort_command,
2455 	.target_reset		= qla24xx_abort_target,
2456 	.lun_reset		= qla24xx_lun_reset,
2457 	.fabric_login		= qla24xx_login_fabric,
2458 	.fabric_logout		= qla24xx_fabric_logout,
2459 	.calc_req_entries	= NULL,
2460 	.build_iocbs		= NULL,
2461 	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
2462 	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
2463 	.read_nvram		= qla24xx_read_nvram_data,
2464 	.write_nvram		= qla24xx_write_nvram_data,
2465 	.fw_dump		= qla82xx_fw_dump,
2466 	.beacon_on		= qla82xx_beacon_on,
2467 	.beacon_off		= qla82xx_beacon_off,
2468 	.beacon_blink		= NULL,
2469 	.read_optrom		= qla82xx_read_optrom_data,
2470 	.write_optrom		= qla82xx_write_optrom_data,
2471 	.get_flash_version	= qla82xx_get_flash_version,
2472 	.start_scsi             = qla82xx_start_scsi,
2473 	.start_scsi_mq          = NULL,
2474 	.abort_isp		= qla82xx_abort_isp,
2475 	.iospace_config     	= qla82xx_iospace_config,
2476 	.initialize_adapter	= qla2x00_initialize_adapter,
2477 };
2478 
2479 static struct isp_operations qla8044_isp_ops = {
2480 	.pci_config		= qla82xx_pci_config,
2481 	.reset_chip		= qla82xx_reset_chip,
2482 	.chip_diag		= qla24xx_chip_diag,
2483 	.config_rings		= qla82xx_config_rings,
2484 	.reset_adapter		= qla24xx_reset_adapter,
2485 	.nvram_config		= qla81xx_nvram_config,
2486 	.update_fw_options	= qla24xx_update_fw_options,
2487 	.load_risc		= qla82xx_load_risc,
2488 	.pci_info_str		= qla24xx_pci_info_str,
2489 	.fw_version_str		= qla24xx_fw_version_str,
2490 	.intr_handler		= qla8044_intr_handler,
2491 	.enable_intrs		= qla82xx_enable_intrs,
2492 	.disable_intrs		= qla82xx_disable_intrs,
2493 	.abort_command		= qla24xx_abort_command,
2494 	.target_reset		= qla24xx_abort_target,
2495 	.lun_reset		= qla24xx_lun_reset,
2496 	.fabric_login		= qla24xx_login_fabric,
2497 	.fabric_logout		= qla24xx_fabric_logout,
2498 	.calc_req_entries	= NULL,
2499 	.build_iocbs		= NULL,
2500 	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
2501 	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
2502 	.read_nvram		= NULL,
2503 	.write_nvram		= NULL,
2504 	.fw_dump		= qla8044_fw_dump,
2505 	.beacon_on		= qla82xx_beacon_on,
2506 	.beacon_off		= qla82xx_beacon_off,
2507 	.beacon_blink		= NULL,
2508 	.read_optrom		= qla8044_read_optrom_data,
2509 	.write_optrom		= qla8044_write_optrom_data,
2510 	.get_flash_version	= qla82xx_get_flash_version,
2511 	.start_scsi             = qla82xx_start_scsi,
2512 	.start_scsi_mq          = NULL,
2513 	.abort_isp		= qla8044_abort_isp,
2514 	.iospace_config		= qla82xx_iospace_config,
2515 	.initialize_adapter	= qla2x00_initialize_adapter,
2516 };
2517 
2518 static struct isp_operations qla83xx_isp_ops = {
2519 	.pci_config		= qla25xx_pci_config,
2520 	.reset_chip		= qla24xx_reset_chip,
2521 	.chip_diag		= qla24xx_chip_diag,
2522 	.config_rings		= qla24xx_config_rings,
2523 	.reset_adapter		= qla24xx_reset_adapter,
2524 	.nvram_config		= qla81xx_nvram_config,
2525 	.update_fw_options	= qla24xx_update_fw_options,
2526 	.load_risc		= qla81xx_load_risc,
2527 	.pci_info_str		= qla24xx_pci_info_str,
2528 	.fw_version_str		= qla24xx_fw_version_str,
2529 	.intr_handler		= qla24xx_intr_handler,
2530 	.enable_intrs		= qla24xx_enable_intrs,
2531 	.disable_intrs		= qla24xx_disable_intrs,
2532 	.abort_command		= qla24xx_abort_command,
2533 	.target_reset		= qla24xx_abort_target,
2534 	.lun_reset		= qla24xx_lun_reset,
2535 	.fabric_login		= qla24xx_login_fabric,
2536 	.fabric_logout		= qla24xx_fabric_logout,
2537 	.calc_req_entries	= NULL,
2538 	.build_iocbs		= NULL,
2539 	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
2540 	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
2541 	.read_nvram		= NULL,
2542 	.write_nvram		= NULL,
2543 	.fw_dump		= qla83xx_fw_dump,
2544 	.beacon_on		= qla24xx_beacon_on,
2545 	.beacon_off		= qla24xx_beacon_off,
2546 	.beacon_blink		= qla83xx_beacon_blink,
2547 	.read_optrom		= qla25xx_read_optrom_data,
2548 	.write_optrom		= qla24xx_write_optrom_data,
2549 	.get_flash_version	= qla24xx_get_flash_version,
2550 	.start_scsi		= qla24xx_dif_start_scsi,
2551 	.start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
2552 	.abort_isp		= qla2x00_abort_isp,
2553 	.iospace_config		= qla83xx_iospace_config,
2554 	.initialize_adapter	= qla2x00_initialize_adapter,
2555 };
2556 
2557 static struct isp_operations qlafx00_isp_ops = {
2558 	.pci_config		= qlafx00_pci_config,
2559 	.reset_chip		= qlafx00_soft_reset,
2560 	.chip_diag		= qlafx00_chip_diag,
2561 	.config_rings		= qlafx00_config_rings,
2562 	.reset_adapter		= qlafx00_soft_reset,
2563 	.nvram_config		= NULL,
2564 	.update_fw_options	= NULL,
2565 	.load_risc		= NULL,
2566 	.pci_info_str		= qlafx00_pci_info_str,
2567 	.fw_version_str		= qlafx00_fw_version_str,
2568 	.intr_handler		= qlafx00_intr_handler,
2569 	.enable_intrs		= qlafx00_enable_intrs,
2570 	.disable_intrs		= qlafx00_disable_intrs,
2571 	.abort_command		= qla24xx_async_abort_command,
2572 	.target_reset		= qlafx00_abort_target,
2573 	.lun_reset		= qlafx00_lun_reset,
2574 	.fabric_login		= NULL,
2575 	.fabric_logout		= NULL,
2576 	.calc_req_entries	= NULL,
2577 	.build_iocbs		= NULL,
2578 	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
2579 	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
2580 	.read_nvram		= qla24xx_read_nvram_data,
2581 	.write_nvram		= qla24xx_write_nvram_data,
2582 	.fw_dump		= NULL,
2583 	.beacon_on		= qla24xx_beacon_on,
2584 	.beacon_off		= qla24xx_beacon_off,
2585 	.beacon_blink		= NULL,
2586 	.read_optrom		= qla24xx_read_optrom_data,
2587 	.write_optrom		= qla24xx_write_optrom_data,
2588 	.get_flash_version	= qla24xx_get_flash_version,
2589 	.start_scsi		= qlafx00_start_scsi,
2590 	.start_scsi_mq          = NULL,
2591 	.abort_isp		= qlafx00_abort_isp,
2592 	.iospace_config		= qlafx00_iospace_config,
2593 	.initialize_adapter	= qlafx00_initialize_adapter,
2594 };
2595 
2596 static struct isp_operations qla27xx_isp_ops = {
2597 	.pci_config		= qla25xx_pci_config,
2598 	.reset_chip		= qla24xx_reset_chip,
2599 	.chip_diag		= qla24xx_chip_diag,
2600 	.config_rings		= qla24xx_config_rings,
2601 	.reset_adapter		= qla24xx_reset_adapter,
2602 	.nvram_config		= qla81xx_nvram_config,
2603 	.update_fw_options	= qla24xx_update_fw_options,
2604 	.load_risc		= qla81xx_load_risc,
2605 	.pci_info_str		= qla24xx_pci_info_str,
2606 	.fw_version_str		= qla24xx_fw_version_str,
2607 	.intr_handler		= qla24xx_intr_handler,
2608 	.enable_intrs		= qla24xx_enable_intrs,
2609 	.disable_intrs		= qla24xx_disable_intrs,
2610 	.abort_command		= qla24xx_abort_command,
2611 	.target_reset		= qla24xx_abort_target,
2612 	.lun_reset		= qla24xx_lun_reset,
2613 	.fabric_login		= qla24xx_login_fabric,
2614 	.fabric_logout		= qla24xx_fabric_logout,
2615 	.calc_req_entries	= NULL,
2616 	.build_iocbs		= NULL,
2617 	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
2618 	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
2619 	.read_nvram		= NULL,
2620 	.write_nvram		= NULL,
2621 	.fw_dump		= qla27xx_fwdump,
2622 	.mpi_fw_dump		= qla27xx_mpi_fwdump,
2623 	.beacon_on		= qla24xx_beacon_on,
2624 	.beacon_off		= qla24xx_beacon_off,
2625 	.beacon_blink		= qla83xx_beacon_blink,
2626 	.read_optrom		= qla25xx_read_optrom_data,
2627 	.write_optrom		= qla24xx_write_optrom_data,
2628 	.get_flash_version	= qla24xx_get_flash_version,
2629 	.start_scsi		= qla24xx_dif_start_scsi,
2630 	.start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
2631 	.abort_isp		= qla2x00_abort_isp,
2632 	.iospace_config		= qla83xx_iospace_config,
2633 	.initialize_adapter	= qla2x00_initialize_adapter,
2634 };
2635 
2636 static inline void
2637 qla2x00_set_isp_flags(struct qla_hw_data *ha)
2638 {
2639 	ha->device_type = DT_EXTENDED_IDS;
2640 	switch (ha->pdev->device) {
2641 	case PCI_DEVICE_ID_QLOGIC_ISP2100:
2642 		ha->isp_type |= DT_ISP2100;
2643 		ha->device_type &= ~DT_EXTENDED_IDS;
2644 		ha->fw_srisc_address = RISC_START_ADDRESS_2100;
2645 		break;
2646 	case PCI_DEVICE_ID_QLOGIC_ISP2200:
2647 		ha->isp_type |= DT_ISP2200;
2648 		ha->device_type &= ~DT_EXTENDED_IDS;
2649 		ha->fw_srisc_address = RISC_START_ADDRESS_2100;
2650 		break;
2651 	case PCI_DEVICE_ID_QLOGIC_ISP2300:
2652 		ha->isp_type |= DT_ISP2300;
2653 		ha->device_type |= DT_ZIO_SUPPORTED;
2654 		ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2655 		break;
2656 	case PCI_DEVICE_ID_QLOGIC_ISP2312:
2657 		ha->isp_type |= DT_ISP2312;
2658 		ha->device_type |= DT_ZIO_SUPPORTED;
2659 		ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2660 		break;
2661 	case PCI_DEVICE_ID_QLOGIC_ISP2322:
2662 		ha->isp_type |= DT_ISP2322;
2663 		ha->device_type |= DT_ZIO_SUPPORTED;
2664 		if (ha->pdev->subsystem_vendor == 0x1028 &&
2665 		    ha->pdev->subsystem_device == 0x0170)
2666 			ha->device_type |= DT_OEM_001;
2667 		ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2668 		break;
2669 	case PCI_DEVICE_ID_QLOGIC_ISP6312:
2670 		ha->isp_type |= DT_ISP6312;
2671 		ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2672 		break;
2673 	case PCI_DEVICE_ID_QLOGIC_ISP6322:
2674 		ha->isp_type |= DT_ISP6322;
2675 		ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2676 		break;
2677 	case PCI_DEVICE_ID_QLOGIC_ISP2422:
2678 		ha->isp_type |= DT_ISP2422;
2679 		ha->device_type |= DT_ZIO_SUPPORTED;
2680 		ha->device_type |= DT_FWI2;
2681 		ha->device_type |= DT_IIDMA;
2682 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2683 		break;
2684 	case PCI_DEVICE_ID_QLOGIC_ISP2432:
2685 		ha->isp_type |= DT_ISP2432;
2686 		ha->device_type |= DT_ZIO_SUPPORTED;
2687 		ha->device_type |= DT_FWI2;
2688 		ha->device_type |= DT_IIDMA;
2689 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2690 		break;
2691 	case PCI_DEVICE_ID_QLOGIC_ISP8432:
2692 		ha->isp_type |= DT_ISP8432;
2693 		ha->device_type |= DT_ZIO_SUPPORTED;
2694 		ha->device_type |= DT_FWI2;
2695 		ha->device_type |= DT_IIDMA;
2696 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2697 		break;
2698 	case PCI_DEVICE_ID_QLOGIC_ISP5422:
2699 		ha->isp_type |= DT_ISP5422;
2700 		ha->device_type |= DT_FWI2;
2701 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2702 		break;
2703 	case PCI_DEVICE_ID_QLOGIC_ISP5432:
2704 		ha->isp_type |= DT_ISP5432;
2705 		ha->device_type |= DT_FWI2;
2706 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2707 		break;
2708 	case PCI_DEVICE_ID_QLOGIC_ISP2532:
2709 		ha->isp_type |= DT_ISP2532;
2710 		ha->device_type |= DT_ZIO_SUPPORTED;
2711 		ha->device_type |= DT_FWI2;
2712 		ha->device_type |= DT_IIDMA;
2713 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2714 		break;
2715 	case PCI_DEVICE_ID_QLOGIC_ISP8001:
2716 		ha->isp_type |= DT_ISP8001;
2717 		ha->device_type |= DT_ZIO_SUPPORTED;
2718 		ha->device_type |= DT_FWI2;
2719 		ha->device_type |= DT_IIDMA;
2720 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2721 		break;
2722 	case PCI_DEVICE_ID_QLOGIC_ISP8021:
2723 		ha->isp_type |= DT_ISP8021;
2724 		ha->device_type |= DT_ZIO_SUPPORTED;
2725 		ha->device_type |= DT_FWI2;
2726 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2727 		/* Initialize 82XX ISP flags */
2728 		qla82xx_init_flags(ha);
2729 		break;
2730 	 case PCI_DEVICE_ID_QLOGIC_ISP8044:
2731 		ha->isp_type |= DT_ISP8044;
2732 		ha->device_type |= DT_ZIO_SUPPORTED;
2733 		ha->device_type |= DT_FWI2;
2734 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2735 		/* Initialize 82XX ISP flags */
2736 		qla82xx_init_flags(ha);
2737 		break;
2738 	case PCI_DEVICE_ID_QLOGIC_ISP2031:
2739 		ha->isp_type |= DT_ISP2031;
2740 		ha->device_type |= DT_ZIO_SUPPORTED;
2741 		ha->device_type |= DT_FWI2;
2742 		ha->device_type |= DT_IIDMA;
2743 		ha->device_type |= DT_T10_PI;
2744 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2745 		break;
2746 	case PCI_DEVICE_ID_QLOGIC_ISP8031:
2747 		ha->isp_type |= DT_ISP8031;
2748 		ha->device_type |= DT_ZIO_SUPPORTED;
2749 		ha->device_type |= DT_FWI2;
2750 		ha->device_type |= DT_IIDMA;
2751 		ha->device_type |= DT_T10_PI;
2752 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2753 		break;
2754 	case PCI_DEVICE_ID_QLOGIC_ISPF001:
2755 		ha->isp_type |= DT_ISPFX00;
2756 		break;
2757 	case PCI_DEVICE_ID_QLOGIC_ISP2071:
2758 		ha->isp_type |= DT_ISP2071;
2759 		ha->device_type |= DT_ZIO_SUPPORTED;
2760 		ha->device_type |= DT_FWI2;
2761 		ha->device_type |= DT_IIDMA;
2762 		ha->device_type |= DT_T10_PI;
2763 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2764 		break;
2765 	case PCI_DEVICE_ID_QLOGIC_ISP2271:
2766 		ha->isp_type |= DT_ISP2271;
2767 		ha->device_type |= DT_ZIO_SUPPORTED;
2768 		ha->device_type |= DT_FWI2;
2769 		ha->device_type |= DT_IIDMA;
2770 		ha->device_type |= DT_T10_PI;
2771 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2772 		break;
2773 	case PCI_DEVICE_ID_QLOGIC_ISP2261:
2774 		ha->isp_type |= DT_ISP2261;
2775 		ha->device_type |= DT_ZIO_SUPPORTED;
2776 		ha->device_type |= DT_FWI2;
2777 		ha->device_type |= DT_IIDMA;
2778 		ha->device_type |= DT_T10_PI;
2779 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2780 		break;
2781 	case PCI_DEVICE_ID_QLOGIC_ISP2081:
2782 	case PCI_DEVICE_ID_QLOGIC_ISP2089:
2783 		ha->isp_type |= DT_ISP2081;
2784 		ha->device_type |= DT_ZIO_SUPPORTED;
2785 		ha->device_type |= DT_FWI2;
2786 		ha->device_type |= DT_IIDMA;
2787 		ha->device_type |= DT_T10_PI;
2788 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2789 		break;
2790 	case PCI_DEVICE_ID_QLOGIC_ISP2281:
2791 	case PCI_DEVICE_ID_QLOGIC_ISP2289:
2792 		ha->isp_type |= DT_ISP2281;
2793 		ha->device_type |= DT_ZIO_SUPPORTED;
2794 		ha->device_type |= DT_FWI2;
2795 		ha->device_type |= DT_IIDMA;
2796 		ha->device_type |= DT_T10_PI;
2797 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2798 		break;
2799 	}
2800 
2801 	if (IS_QLA82XX(ha))
2802 		ha->port_no = ha->portnum & 1;
2803 	else {
2804 		/* Get adapter physical port no from interrupt pin register. */
2805 		pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
2806 		if (IS_QLA25XX(ha) || IS_QLA2031(ha) ||
2807 		    IS_QLA27XX(ha) || IS_QLA28XX(ha))
2808 			ha->port_no--;
2809 		else
2810 			ha->port_no = !(ha->port_no & 1);
2811 	}
2812 
2813 	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
2814 	    "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
2815 	    ha->device_type, ha->port_no, ha->fw_srisc_address);
2816 }
2817 
2818 static void
2819 qla2xxx_scan_start(struct Scsi_Host *shost)
2820 {
2821 	scsi_qla_host_t *vha = shost_priv(shost);
2822 
2823 	if (vha->hw->flags.running_gold_fw)
2824 		return;
2825 
2826 	set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2827 	set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2828 	set_bit(RSCN_UPDATE, &vha->dpc_flags);
2829 	set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
2830 }
2831 
2832 static int
2833 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
2834 {
2835 	scsi_qla_host_t *vha = shost_priv(shost);
2836 
2837 	if (test_bit(UNLOADING, &vha->dpc_flags))
2838 		return 1;
2839 	if (!vha->host)
2840 		return 1;
2841 	if (time > vha->hw->loop_reset_delay * HZ)
2842 		return 1;
2843 
2844 	return atomic_read(&vha->loop_state) == LOOP_READY;
2845 }
2846 
2847 static void qla_heartbeat_work_fn(struct work_struct *work)
2848 {
2849 	struct qla_hw_data *ha = container_of(work,
2850 		struct qla_hw_data, heartbeat_work);
2851 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2852 
2853 	if (!ha->flags.mbox_busy && base_vha->flags.init_done)
2854 		qla_no_op_mb(base_vha);
2855 }
2856 
2857 static void qla2x00_iocb_work_fn(struct work_struct *work)
2858 {
2859 	struct scsi_qla_host *vha = container_of(work,
2860 		struct scsi_qla_host, iocb_work);
2861 	struct qla_hw_data *ha = vha->hw;
2862 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2863 	int i = 2;
2864 	unsigned long flags;
2865 
2866 	if (test_bit(UNLOADING, &base_vha->dpc_flags))
2867 		return;
2868 
2869 	while (!list_empty(&vha->work_list) && i > 0) {
2870 		qla2x00_do_work(vha);
2871 		i--;
2872 	}
2873 
2874 	spin_lock_irqsave(&vha->work_lock, flags);
2875 	clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags);
2876 	spin_unlock_irqrestore(&vha->work_lock, flags);
2877 }
2878 
2879 static void
2880 qla_trace_init(void)
2881 {
2882 	qla_trc_array = trace_array_get_by_name("qla2xxx");
2883 	if (!qla_trc_array) {
2884 		ql_log(ql_log_fatal, NULL, 0x0001,
2885 		       "Unable to create qla2xxx trace instance, instance logging will be disabled.\n");
2886 		return;
2887 	}
2888 
2889 	QLA_TRACE_ENABLE(qla_trc_array);
2890 }
2891 
2892 static void
2893 qla_trace_uninit(void)
2894 {
2895 	if (!qla_trc_array)
2896 		return;
2897 	trace_array_put(qla_trc_array);
2898 }
2899 
2900 /*
2901  * PCI driver interface
2902  */
2903 static int
2904 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2905 {
2906 	int	ret = -ENODEV;
2907 	struct Scsi_Host *host;
2908 	scsi_qla_host_t *base_vha = NULL;
2909 	struct qla_hw_data *ha;
2910 	char pci_info[30];
2911 	char fw_str[30], wq_name[30];
2912 	struct scsi_host_template *sht;
2913 	int bars, mem_only = 0;
2914 	uint16_t req_length = 0, rsp_length = 0;
2915 	struct req_que *req = NULL;
2916 	struct rsp_que *rsp = NULL;
2917 	int i;
2918 
2919 	bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
2920 	sht = &qla2xxx_driver_template;
2921 	if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
2922 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
2923 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
2924 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
2925 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
2926 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
2927 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
2928 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
2929 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
2930 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
2931 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
2932 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
2933 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
2934 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
2935 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 ||
2936 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 ||
2937 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 ||
2938 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 ||
2939 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) {
2940 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
2941 		mem_only = 1;
2942 		ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
2943 		    "Mem only adapter.\n");
2944 	}
2945 	ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
2946 	    "Bars=%d.\n", bars);
2947 
2948 	if (mem_only) {
2949 		if (pci_enable_device_mem(pdev))
2950 			return ret;
2951 	} else {
2952 		if (pci_enable_device(pdev))
2953 			return ret;
2954 	}
2955 
2956 	if (is_kdump_kernel()) {
2957 		ql2xmqsupport = 0;
2958 		ql2xallocfwdump = 0;
2959 	}
2960 
2961 	/* This may fail but that's ok */
2962 	pci_enable_pcie_error_reporting(pdev);
2963 
2964 	ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
2965 	if (!ha) {
2966 		ql_log_pci(ql_log_fatal, pdev, 0x0009,
2967 		    "Unable to allocate memory for ha.\n");
2968 		goto disable_device;
2969 	}
2970 	ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
2971 	    "Memory allocated for ha=%p.\n", ha);
2972 	ha->pdev = pdev;
2973 	INIT_LIST_HEAD(&ha->tgt.q_full_list);
2974 	spin_lock_init(&ha->tgt.q_full_lock);
2975 	spin_lock_init(&ha->tgt.sess_lock);
2976 	spin_lock_init(&ha->tgt.atio_lock);
2977 
2978 	spin_lock_init(&ha->sadb_lock);
2979 	INIT_LIST_HEAD(&ha->sadb_tx_index_list);
2980 	INIT_LIST_HEAD(&ha->sadb_rx_index_list);
2981 
2982 	spin_lock_init(&ha->sadb_fp_lock);
2983 
2984 	if (qla_edif_sadb_build_free_pool(ha)) {
2985 		kfree(ha);
2986 		goto  disable_device;
2987 	}
2988 
2989 	atomic_set(&ha->nvme_active_aen_cnt, 0);
2990 
2991 	/* Clear our data area */
2992 	ha->bars = bars;
2993 	ha->mem_only = mem_only;
2994 	spin_lock_init(&ha->hardware_lock);
2995 	spin_lock_init(&ha->vport_slock);
2996 	mutex_init(&ha->selflogin_lock);
2997 	mutex_init(&ha->optrom_mutex);
2998 
2999 	/* Set ISP-type information. */
3000 	qla2x00_set_isp_flags(ha);
3001 
3002 	/* Set EEH reset type to fundamental if required by hba */
3003 	if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
3004 	    IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
3005 		pdev->needs_freset = 1;
3006 
3007 	ha->prev_topology = 0;
3008 	ha->init_cb_size = sizeof(init_cb_t);
3009 	ha->link_data_rate = PORT_SPEED_UNKNOWN;
3010 	ha->optrom_size = OPTROM_SIZE_2300;
3011 	ha->max_exchg = FW_MAX_EXCHANGES_CNT;
3012 	atomic_set(&ha->num_pend_mbx_stage1, 0);
3013 	atomic_set(&ha->num_pend_mbx_stage2, 0);
3014 	atomic_set(&ha->num_pend_mbx_stage3, 0);
3015 	atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
3016 	ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
3017 
3018 	/* Assign ISP specific operations. */
3019 	if (IS_QLA2100(ha)) {
3020 		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
3021 		ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
3022 		req_length = REQUEST_ENTRY_CNT_2100;
3023 		rsp_length = RESPONSE_ENTRY_CNT_2100;
3024 		ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
3025 		ha->gid_list_info_size = 4;
3026 		ha->flash_conf_off = ~0;
3027 		ha->flash_data_off = ~0;
3028 		ha->nvram_conf_off = ~0;
3029 		ha->nvram_data_off = ~0;
3030 		ha->isp_ops = &qla2100_isp_ops;
3031 	} else if (IS_QLA2200(ha)) {
3032 		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
3033 		ha->mbx_count = MAILBOX_REGISTER_COUNT_2200;
3034 		req_length = REQUEST_ENTRY_CNT_2200;
3035 		rsp_length = RESPONSE_ENTRY_CNT_2100;
3036 		ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
3037 		ha->gid_list_info_size = 4;
3038 		ha->flash_conf_off = ~0;
3039 		ha->flash_data_off = ~0;
3040 		ha->nvram_conf_off = ~0;
3041 		ha->nvram_data_off = ~0;
3042 		ha->isp_ops = &qla2100_isp_ops;
3043 	} else if (IS_QLA23XX(ha)) {
3044 		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
3045 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
3046 		req_length = REQUEST_ENTRY_CNT_2200;
3047 		rsp_length = RESPONSE_ENTRY_CNT_2300;
3048 		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3049 		ha->gid_list_info_size = 6;
3050 		if (IS_QLA2322(ha) || IS_QLA6322(ha))
3051 			ha->optrom_size = OPTROM_SIZE_2322;
3052 		ha->flash_conf_off = ~0;
3053 		ha->flash_data_off = ~0;
3054 		ha->nvram_conf_off = ~0;
3055 		ha->nvram_data_off = ~0;
3056 		ha->isp_ops = &qla2300_isp_ops;
3057 	} else if (IS_QLA24XX_TYPE(ha)) {
3058 		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3059 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
3060 		req_length = REQUEST_ENTRY_CNT_24XX;
3061 		rsp_length = RESPONSE_ENTRY_CNT_2300;
3062 		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3063 		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3064 		ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
3065 		ha->gid_list_info_size = 8;
3066 		ha->optrom_size = OPTROM_SIZE_24XX;
3067 		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
3068 		ha->isp_ops = &qla24xx_isp_ops;
3069 		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
3070 		ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
3071 		ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
3072 		ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
3073 	} else if (IS_QLA25XX(ha)) {
3074 		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3075 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
3076 		req_length = REQUEST_ENTRY_CNT_24XX;
3077 		rsp_length = RESPONSE_ENTRY_CNT_2300;
3078 		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3079 		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3080 		ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
3081 		ha->gid_list_info_size = 8;
3082 		ha->optrom_size = OPTROM_SIZE_25XX;
3083 		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3084 		ha->isp_ops = &qla25xx_isp_ops;
3085 		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
3086 		ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
3087 		ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
3088 		ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
3089 	} else if (IS_QLA81XX(ha)) {
3090 		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3091 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
3092 		req_length = REQUEST_ENTRY_CNT_24XX;
3093 		rsp_length = RESPONSE_ENTRY_CNT_2300;
3094 		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3095 		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3096 		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3097 		ha->gid_list_info_size = 8;
3098 		ha->optrom_size = OPTROM_SIZE_81XX;
3099 		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3100 		ha->isp_ops = &qla81xx_isp_ops;
3101 		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
3102 		ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
3103 		ha->nvram_conf_off = ~0;
3104 		ha->nvram_data_off = ~0;
3105 	} else if (IS_QLA82XX(ha)) {
3106 		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3107 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
3108 		req_length = REQUEST_ENTRY_CNT_82XX;
3109 		rsp_length = RESPONSE_ENTRY_CNT_82XX;
3110 		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3111 		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3112 		ha->gid_list_info_size = 8;
3113 		ha->optrom_size = OPTROM_SIZE_82XX;
3114 		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3115 		ha->isp_ops = &qla82xx_isp_ops;
3116 		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
3117 		ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
3118 		ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
3119 		ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
3120 	} else if (IS_QLA8044(ha)) {
3121 		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3122 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
3123 		req_length = REQUEST_ENTRY_CNT_82XX;
3124 		rsp_length = RESPONSE_ENTRY_CNT_82XX;
3125 		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3126 		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3127 		ha->gid_list_info_size = 8;
3128 		ha->optrom_size = OPTROM_SIZE_83XX;
3129 		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3130 		ha->isp_ops = &qla8044_isp_ops;
3131 		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
3132 		ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
3133 		ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
3134 		ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
3135 	} else if (IS_QLA83XX(ha)) {
3136 		ha->portnum = PCI_FUNC(ha->pdev->devfn);
3137 		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3138 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
3139 		req_length = REQUEST_ENTRY_CNT_83XX;
3140 		rsp_length = RESPONSE_ENTRY_CNT_83XX;
3141 		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3142 		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3143 		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3144 		ha->gid_list_info_size = 8;
3145 		ha->optrom_size = OPTROM_SIZE_83XX;
3146 		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3147 		ha->isp_ops = &qla83xx_isp_ops;
3148 		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
3149 		ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
3150 		ha->nvram_conf_off = ~0;
3151 		ha->nvram_data_off = ~0;
3152 	}  else if (IS_QLAFX00(ha)) {
3153 		ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00;
3154 		ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00;
3155 		ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
3156 		req_length = REQUEST_ENTRY_CNT_FX00;
3157 		rsp_length = RESPONSE_ENTRY_CNT_FX00;
3158 		ha->isp_ops = &qlafx00_isp_ops;
3159 		ha->port_down_retry_count = 30; /* default value */
3160 		ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
3161 		ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
3162 		ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL;
3163 		ha->mr.fw_hbt_en = 1;
3164 		ha->mr.host_info_resend = false;
3165 		ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL;
3166 	} else if (IS_QLA27XX(ha)) {
3167 		ha->portnum = PCI_FUNC(ha->pdev->devfn);
3168 		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3169 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
3170 		req_length = REQUEST_ENTRY_CNT_83XX;
3171 		rsp_length = RESPONSE_ENTRY_CNT_83XX;
3172 		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3173 		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3174 		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3175 		ha->gid_list_info_size = 8;
3176 		ha->optrom_size = OPTROM_SIZE_83XX;
3177 		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3178 		ha->isp_ops = &qla27xx_isp_ops;
3179 		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
3180 		ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
3181 		ha->nvram_conf_off = ~0;
3182 		ha->nvram_data_off = ~0;
3183 	} else if (IS_QLA28XX(ha)) {
3184 		ha->portnum = PCI_FUNC(ha->pdev->devfn);
3185 		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3186 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
3187 		req_length = REQUEST_ENTRY_CNT_83XX;
3188 		rsp_length = RESPONSE_ENTRY_CNT_83XX;
3189 		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3190 		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3191 		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3192 		ha->gid_list_info_size = 8;
3193 		ha->optrom_size = OPTROM_SIZE_28XX;
3194 		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3195 		ha->isp_ops = &qla27xx_isp_ops;
3196 		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX;
3197 		ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX;
3198 		ha->nvram_conf_off = ~0;
3199 		ha->nvram_data_off = ~0;
3200 	}
3201 
3202 	ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
3203 	    "mbx_count=%d, req_length=%d, "
3204 	    "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
3205 	    "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, "
3206 	    "max_fibre_devices=%d.\n",
3207 	    ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
3208 	    ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
3209 	    ha->nvram_npiv_size, ha->max_fibre_devices);
3210 	ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
3211 	    "isp_ops=%p, flash_conf_off=%d, "
3212 	    "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
3213 	    ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
3214 	    ha->nvram_conf_off, ha->nvram_data_off);
3215 
3216 	/* Configure PCI I/O space */
3217 	ret = ha->isp_ops->iospace_config(ha);
3218 	if (ret)
3219 		goto iospace_config_failed;
3220 
3221 	ql_log_pci(ql_log_info, pdev, 0x001d,
3222 	    "Found an ISP%04X irq %d iobase 0x%p.\n",
3223 	    pdev->device, pdev->irq, ha->iobase);
3224 	mutex_init(&ha->vport_lock);
3225 	mutex_init(&ha->mq_lock);
3226 	init_completion(&ha->mbx_cmd_comp);
3227 	complete(&ha->mbx_cmd_comp);
3228 	init_completion(&ha->mbx_intr_comp);
3229 	init_completion(&ha->dcbx_comp);
3230 	init_completion(&ha->lb_portup_comp);
3231 
3232 	set_bit(0, (unsigned long *) ha->vp_idx_map);
3233 
3234 	qla2x00_config_dma_addressing(ha);
3235 	ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
3236 	    "64 Bit addressing is %s.\n",
3237 	    ha->flags.enable_64bit_addressing ? "enable" :
3238 	    "disable");
3239 	ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
3240 	if (ret) {
3241 		ql_log_pci(ql_log_fatal, pdev, 0x0031,
3242 		    "Failed to allocate memory for adapter, aborting.\n");
3243 
3244 		goto probe_hw_failed;
3245 	}
3246 
3247 	req->max_q_depth = MAX_Q_DEPTH;
3248 	if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
3249 		req->max_q_depth = ql2xmaxqdepth;
3250 
3251 
3252 	base_vha = qla2x00_create_host(sht, ha);
3253 	if (!base_vha) {
3254 		ret = -ENOMEM;
3255 		goto probe_hw_failed;
3256 	}
3257 
3258 	pci_set_drvdata(pdev, base_vha);
3259 	set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
3260 
3261 	host = base_vha->host;
3262 	base_vha->req = req;
3263 	if (IS_QLA2XXX_MIDTYPE(ha))
3264 		base_vha->mgmt_svr_loop_id =
3265 			qla2x00_reserve_mgmt_server_loop_id(base_vha);
3266 	else
3267 		base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
3268 						base_vha->vp_idx;
3269 
3270 	/* Setup fcport template structure. */
3271 	ha->mr.fcport.vha = base_vha;
3272 	ha->mr.fcport.port_type = FCT_UNKNOWN;
3273 	ha->mr.fcport.loop_id = FC_NO_LOOP_ID;
3274 	qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED);
3275 	ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED;
3276 	ha->mr.fcport.scan_state = 1;
3277 
3278 	qla2xxx_reset_stats(host, QLA2XX_HW_ERROR | QLA2XX_SHT_LNK_DWN |
3279 			    QLA2XX_INT_ERR | QLA2XX_CMD_TIMEOUT |
3280 			    QLA2XX_RESET_CMD_ERR | QLA2XX_TGT_SHT_LNK_DOWN);
3281 
3282 	/* Set the SG table size based on ISP type */
3283 	if (!IS_FWI2_CAPABLE(ha)) {
3284 		if (IS_QLA2100(ha))
3285 			host->sg_tablesize = 32;
3286 	} else {
3287 		if (!IS_QLA82XX(ha))
3288 			host->sg_tablesize = QLA_SG_ALL;
3289 	}
3290 	host->max_id = ha->max_fibre_devices;
3291 	host->cmd_per_lun = 3;
3292 	host->unique_id = host->host_no;
3293 	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
3294 		host->max_cmd_len = 32;
3295 	else
3296 		host->max_cmd_len = MAX_CMDSZ;
3297 	host->max_channel = MAX_BUSES - 1;
3298 	/* Older HBAs support only 16-bit LUNs */
3299 	if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) &&
3300 	    ql2xmaxlun > 0xffff)
3301 		host->max_lun = 0xffff;
3302 	else
3303 		host->max_lun = ql2xmaxlun;
3304 	host->transportt = qla2xxx_transport_template;
3305 	sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
3306 
3307 	ql_dbg(ql_dbg_init, base_vha, 0x0033,
3308 	    "max_id=%d this_id=%d "
3309 	    "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
3310 	    "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id,
3311 	    host->this_id, host->cmd_per_lun, host->unique_id,
3312 	    host->max_cmd_len, host->max_channel, host->max_lun,
3313 	    host->transportt, sht->vendor_id);
3314 
3315 	INIT_WORK(&ha->heartbeat_work, qla_heartbeat_work_fn);
3316 
3317 	/* Set up the irqs */
3318 	ret = qla2x00_request_irqs(ha, rsp);
3319 	if (ret)
3320 		goto probe_failed;
3321 
3322 	/* Alloc arrays of request and response ring ptrs */
3323 	ret = qla2x00_alloc_queues(ha, req, rsp);
3324 	if (ret) {
3325 		ql_log(ql_log_fatal, base_vha, 0x003d,
3326 		    "Failed to allocate memory for queue pointers..."
3327 		    "aborting.\n");
3328 		ret = -ENODEV;
3329 		goto probe_failed;
3330 	}
3331 
3332 	if (ha->mqenable) {
3333 		/* number of hardware queues supported by blk/scsi-mq*/
3334 		host->nr_hw_queues = ha->max_qpairs;
3335 
3336 		ql_dbg(ql_dbg_init, base_vha, 0x0192,
3337 			"blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
3338 	} else {
3339 		if (ql2xnvmeenable) {
3340 			host->nr_hw_queues = ha->max_qpairs;
3341 			ql_dbg(ql_dbg_init, base_vha, 0x0194,
3342 			    "FC-NVMe support is enabled, HW queues=%d\n",
3343 			    host->nr_hw_queues);
3344 		} else {
3345 			ql_dbg(ql_dbg_init, base_vha, 0x0193,
3346 			    "blk/scsi-mq disabled.\n");
3347 		}
3348 	}
3349 
3350 	qlt_probe_one_stage1(base_vha, ha);
3351 
3352 	pci_save_state(pdev);
3353 
3354 	/* Assign back pointers */
3355 	rsp->req = req;
3356 	req->rsp = rsp;
3357 
3358 	if (IS_QLAFX00(ha)) {
3359 		ha->rsp_q_map[0] = rsp;
3360 		ha->req_q_map[0] = req;
3361 		set_bit(0, ha->req_qid_map);
3362 		set_bit(0, ha->rsp_qid_map);
3363 	}
3364 
3365 	/* FWI2-capable only. */
3366 	req->req_q_in = &ha->iobase->isp24.req_q_in;
3367 	req->req_q_out = &ha->iobase->isp24.req_q_out;
3368 	rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
3369 	rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
3370 	if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3371 	    IS_QLA28XX(ha)) {
3372 		req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
3373 		req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
3374 		rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
3375 		rsp->rsp_q_out =  &ha->mqiobase->isp25mq.rsp_q_out;
3376 	}
3377 
3378 	if (IS_QLAFX00(ha)) {
3379 		req->req_q_in = &ha->iobase->ispfx00.req_q_in;
3380 		req->req_q_out = &ha->iobase->ispfx00.req_q_out;
3381 		rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in;
3382 		rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
3383 	}
3384 
3385 	if (IS_P3P_TYPE(ha)) {
3386 		req->req_q_out = &ha->iobase->isp82.req_q_out[0];
3387 		rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
3388 		rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
3389 	}
3390 
3391 	ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
3392 	    "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
3393 	    ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
3394 	ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
3395 	    "req->req_q_in=%p req->req_q_out=%p "
3396 	    "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
3397 	    req->req_q_in, req->req_q_out,
3398 	    rsp->rsp_q_in, rsp->rsp_q_out);
3399 	ql_dbg(ql_dbg_init, base_vha, 0x003e,
3400 	    "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
3401 	    ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
3402 	ql_dbg(ql_dbg_init, base_vha, 0x003f,
3403 	    "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
3404 	    req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
3405 
3406 	ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
3407 	if (unlikely(!ha->wq)) {
3408 		ret = -ENOMEM;
3409 		goto probe_failed;
3410 	}
3411 
3412 	if (ha->isp_ops->initialize_adapter(base_vha)) {
3413 		ql_log(ql_log_fatal, base_vha, 0x00d6,
3414 		    "Failed to initialize adapter - Adapter flags %x.\n",
3415 		    base_vha->device_flags);
3416 
3417 		if (IS_QLA82XX(ha)) {
3418 			qla82xx_idc_lock(ha);
3419 			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3420 				QLA8XXX_DEV_FAILED);
3421 			qla82xx_idc_unlock(ha);
3422 			ql_log(ql_log_fatal, base_vha, 0x00d7,
3423 			    "HW State: FAILED.\n");
3424 		} else if (IS_QLA8044(ha)) {
3425 			qla8044_idc_lock(ha);
3426 			qla8044_wr_direct(base_vha,
3427 				QLA8044_CRB_DEV_STATE_INDEX,
3428 				QLA8XXX_DEV_FAILED);
3429 			qla8044_idc_unlock(ha);
3430 			ql_log(ql_log_fatal, base_vha, 0x0150,
3431 			    "HW State: FAILED.\n");
3432 		}
3433 
3434 		ret = -ENODEV;
3435 		goto probe_failed;
3436 	}
3437 
3438 	if (IS_QLAFX00(ha))
3439 		host->can_queue = QLAFX00_MAX_CANQUEUE;
3440 	else
3441 		host->can_queue = req->num_outstanding_cmds - 10;
3442 
3443 	ql_dbg(ql_dbg_init, base_vha, 0x0032,
3444 	    "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
3445 	    host->can_queue, base_vha->req,
3446 	    base_vha->mgmt_svr_loop_id, host->sg_tablesize);
3447 
3448 	/* Check if FW supports MQ or not for ISP25xx */
3449 	if (IS_QLA25XX(ha) && !(ha->fw_attributes & BIT_6))
3450 		ha->mqenable = 0;
3451 
3452 	if (ha->mqenable) {
3453 		bool startit = false;
3454 
3455 		if (QLA_TGT_MODE_ENABLED())
3456 			startit = false;
3457 
3458 		if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED)
3459 			startit = true;
3460 
3461 		/* Create start of day qpairs for Block MQ */
3462 		for (i = 0; i < ha->max_qpairs; i++)
3463 			qla2xxx_create_qpair(base_vha, 5, 0, startit);
3464 	}
3465 	qla_init_iocb_limit(base_vha);
3466 
3467 	if (ha->flags.running_gold_fw)
3468 		goto skip_dpc;
3469 
3470 	/*
3471 	 * Startup the kernel thread for this host adapter
3472 	 */
3473 	ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
3474 	    "%s_dpc", base_vha->host_str);
3475 	if (IS_ERR(ha->dpc_thread)) {
3476 		ql_log(ql_log_fatal, base_vha, 0x00ed,
3477 		    "Failed to start DPC thread.\n");
3478 		ret = PTR_ERR(ha->dpc_thread);
3479 		ha->dpc_thread = NULL;
3480 		goto probe_failed;
3481 	}
3482 	ql_dbg(ql_dbg_init, base_vha, 0x00ee,
3483 	    "DPC thread started successfully.\n");
3484 
3485 	/*
3486 	 * If we're not coming up in initiator mode, we might sit for
3487 	 * a while without waking up the dpc thread, which leads to a
3488 	 * stuck process warning.  So just kick the dpc once here and
3489 	 * let the kthread start (and go back to sleep in qla2x00_do_dpc).
3490 	 */
3491 	qla2xxx_wake_dpc(base_vha);
3492 
3493 	INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
3494 
3495 	if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
3496 		sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
3497 		ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
3498 		INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
3499 
3500 		sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
3501 		ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
3502 		INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
3503 		INIT_WORK(&ha->idc_state_handler,
3504 		    qla83xx_idc_state_handler_work);
3505 		INIT_WORK(&ha->nic_core_unrecoverable,
3506 		    qla83xx_nic_core_unrecoverable_work);
3507 	}
3508 
3509 skip_dpc:
3510 	list_add_tail(&base_vha->list, &ha->vp_list);
3511 	base_vha->host->irq = ha->pdev->irq;
3512 
3513 	/* Initialized the timer */
3514 	qla2x00_start_timer(base_vha, WATCH_INTERVAL);
3515 	ql_dbg(ql_dbg_init, base_vha, 0x00ef,
3516 	    "Started qla2x00_timer with "
3517 	    "interval=%d.\n", WATCH_INTERVAL);
3518 	ql_dbg(ql_dbg_init, base_vha, 0x00f0,
3519 	    "Detected hba at address=%p.\n",
3520 	    ha);
3521 
3522 	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
3523 		if (ha->fw_attributes & BIT_4) {
3524 			int prot = 0, guard;
3525 
3526 			base_vha->flags.difdix_supported = 1;
3527 			ql_dbg(ql_dbg_init, base_vha, 0x00f1,
3528 			    "Registering for DIF/DIX type 1 and 3 protection.\n");
3529 			if (ql2xenabledif == 1)
3530 				prot = SHOST_DIX_TYPE0_PROTECTION;
3531 			if (ql2xprotmask)
3532 				scsi_host_set_prot(host, ql2xprotmask);
3533 			else
3534 				scsi_host_set_prot(host,
3535 				    prot | SHOST_DIF_TYPE1_PROTECTION
3536 				    | SHOST_DIF_TYPE2_PROTECTION
3537 				    | SHOST_DIF_TYPE3_PROTECTION
3538 				    | SHOST_DIX_TYPE1_PROTECTION
3539 				    | SHOST_DIX_TYPE2_PROTECTION
3540 				    | SHOST_DIX_TYPE3_PROTECTION);
3541 
3542 			guard = SHOST_DIX_GUARD_CRC;
3543 
3544 			if (IS_PI_IPGUARD_CAPABLE(ha) &&
3545 			    (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
3546 				guard |= SHOST_DIX_GUARD_IP;
3547 
3548 			if (ql2xprotguard)
3549 				scsi_host_set_guard(host, ql2xprotguard);
3550 			else
3551 				scsi_host_set_guard(host, guard);
3552 		} else
3553 			base_vha->flags.difdix_supported = 0;
3554 	}
3555 
3556 	ha->isp_ops->enable_intrs(ha);
3557 
3558 	if (IS_QLAFX00(ha)) {
3559 		ret = qlafx00_fx_disc(base_vha,
3560 			&base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
3561 		host->sg_tablesize = (ha->mr.extended_io_enabled) ?
3562 		    QLA_SG_ALL : 128;
3563 	}
3564 
3565 	ret = scsi_add_host(host, &pdev->dev);
3566 	if (ret)
3567 		goto probe_failed;
3568 
3569 	base_vha->flags.init_done = 1;
3570 	base_vha->flags.online = 1;
3571 	ha->prev_minidump_failed = 0;
3572 
3573 	ql_dbg(ql_dbg_init, base_vha, 0x00f2,
3574 	    "Init done and hba is online.\n");
3575 
3576 	if (qla_ini_mode_enabled(base_vha) ||
3577 		qla_dual_mode_enabled(base_vha))
3578 		scsi_scan_host(host);
3579 	else
3580 		ql_log(ql_log_info, base_vha, 0x0122,
3581 			"skipping scsi_scan_host() for non-initiator port\n");
3582 
3583 	qla2x00_alloc_sysfs_attr(base_vha);
3584 
3585 	if (IS_QLAFX00(ha)) {
3586 		ret = qlafx00_fx_disc(base_vha,
3587 			&base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
3588 
3589 		/* Register system information */
3590 		ret =  qlafx00_fx_disc(base_vha,
3591 			&base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO);
3592 	}
3593 
3594 	qla2x00_init_host_attr(base_vha);
3595 
3596 	qla2x00_dfs_setup(base_vha);
3597 
3598 	ql_log(ql_log_info, base_vha, 0x00fb,
3599 	    "QLogic %s - %s.\n", ha->model_number, ha->model_desc);
3600 	ql_log(ql_log_info, base_vha, 0x00fc,
3601 	    "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
3602 	    pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info,
3603 						       sizeof(pci_info)),
3604 	    pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
3605 	    base_vha->host_no,
3606 	    ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
3607 
3608 	qlt_add_target(ha, base_vha);
3609 
3610 	clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
3611 
3612 	if (test_bit(UNLOADING, &base_vha->dpc_flags))
3613 		return -ENODEV;
3614 
3615 	return 0;
3616 
3617 probe_failed:
3618 	qla_enode_stop(base_vha);
3619 	qla_edb_stop(base_vha);
3620 	if (base_vha->gnl.l) {
3621 		dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
3622 				base_vha->gnl.l, base_vha->gnl.ldma);
3623 		base_vha->gnl.l = NULL;
3624 	}
3625 
3626 	if (base_vha->timer_active)
3627 		qla2x00_stop_timer(base_vha);
3628 	base_vha->flags.online = 0;
3629 	if (ha->dpc_thread) {
3630 		struct task_struct *t = ha->dpc_thread;
3631 
3632 		ha->dpc_thread = NULL;
3633 		kthread_stop(t);
3634 	}
3635 
3636 	qla2x00_free_device(base_vha);
3637 	scsi_host_put(base_vha->host);
3638 	/*
3639 	 * Need to NULL out local req/rsp after
3640 	 * qla2x00_free_device => qla2x00_free_queues frees
3641 	 * what these are pointing to. Or else we'll
3642 	 * fall over below in qla2x00_free_req/rsp_que.
3643 	 */
3644 	req = NULL;
3645 	rsp = NULL;
3646 
3647 probe_hw_failed:
3648 	qla2x00_mem_free(ha);
3649 	qla2x00_free_req_que(ha, req);
3650 	qla2x00_free_rsp_que(ha, rsp);
3651 	qla2x00_clear_drv_active(ha);
3652 
3653 iospace_config_failed:
3654 	if (IS_P3P_TYPE(ha)) {
3655 		if (!ha->nx_pcibase)
3656 			iounmap((device_reg_t *)ha->nx_pcibase);
3657 		if (!ql2xdbwr)
3658 			iounmap((device_reg_t *)ha->nxdb_wr_ptr);
3659 	} else {
3660 		if (ha->iobase)
3661 			iounmap(ha->iobase);
3662 		if (ha->cregbase)
3663 			iounmap(ha->cregbase);
3664 	}
3665 	pci_release_selected_regions(ha->pdev, ha->bars);
3666 	kfree(ha);
3667 
3668 disable_device:
3669 	pci_disable_device(pdev);
3670 	return ret;
3671 }
3672 
3673 static void __qla_set_remove_flag(scsi_qla_host_t *base_vha)
3674 {
3675 	scsi_qla_host_t *vp;
3676 	unsigned long flags;
3677 	struct qla_hw_data *ha;
3678 
3679 	if (!base_vha)
3680 		return;
3681 
3682 	ha = base_vha->hw;
3683 
3684 	spin_lock_irqsave(&ha->vport_slock, flags);
3685 	list_for_each_entry(vp, &ha->vp_list, list)
3686 		set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags);
3687 
3688 	/*
3689 	 * Indicate device removal to prevent future board_disable
3690 	 * and wait until any pending board_disable has completed.
3691 	 */
3692 	set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
3693 	spin_unlock_irqrestore(&ha->vport_slock, flags);
3694 }
3695 
3696 static void
3697 qla2x00_shutdown(struct pci_dev *pdev)
3698 {
3699 	scsi_qla_host_t *vha;
3700 	struct qla_hw_data  *ha;
3701 
3702 	vha = pci_get_drvdata(pdev);
3703 	ha = vha->hw;
3704 
3705 	ql_log(ql_log_info, vha, 0xfffa,
3706 		"Adapter shutdown\n");
3707 
3708 	/*
3709 	 * Prevent future board_disable and wait
3710 	 * until any pending board_disable has completed.
3711 	 */
3712 	__qla_set_remove_flag(vha);
3713 	cancel_work_sync(&ha->board_disable);
3714 
3715 	if (!atomic_read(&pdev->enable_cnt))
3716 		return;
3717 
3718 	/* Notify ISPFX00 firmware */
3719 	if (IS_QLAFX00(ha))
3720 		qlafx00_driver_shutdown(vha, 20);
3721 
3722 	/* Turn-off FCE trace */
3723 	if (ha->flags.fce_enabled) {
3724 		qla2x00_disable_fce_trace(vha, NULL, NULL);
3725 		ha->flags.fce_enabled = 0;
3726 	}
3727 
3728 	/* Turn-off EFT trace */
3729 	if (ha->eft)
3730 		qla2x00_disable_eft_trace(vha);
3731 
3732 	if (IS_QLA25XX(ha) ||  IS_QLA2031(ha) || IS_QLA27XX(ha) ||
3733 	    IS_QLA28XX(ha)) {
3734 		if (ha->flags.fw_started)
3735 			qla2x00_abort_isp_cleanup(vha);
3736 	} else {
3737 		/* Stop currently executing firmware. */
3738 		qla2x00_try_to_stop_firmware(vha);
3739 	}
3740 
3741 	/* Disable timer */
3742 	if (vha->timer_active)
3743 		qla2x00_stop_timer(vha);
3744 
3745 	/* Turn adapter off line */
3746 	vha->flags.online = 0;
3747 
3748 	/* turn-off interrupts on the card */
3749 	if (ha->interrupts_on) {
3750 		vha->flags.init_done = 0;
3751 		ha->isp_ops->disable_intrs(ha);
3752 	}
3753 
3754 	qla2x00_free_irqs(vha);
3755 
3756 	qla2x00_free_fw_dump(ha);
3757 
3758 	pci_disable_device(pdev);
3759 	ql_log(ql_log_info, vha, 0xfffe,
3760 		"Adapter shutdown successfully.\n");
3761 }
3762 
3763 /* Deletes all the virtual ports for a given ha */
3764 static void
3765 qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
3766 {
3767 	scsi_qla_host_t *vha;
3768 	unsigned long flags;
3769 
3770 	mutex_lock(&ha->vport_lock);
3771 	while (ha->cur_vport_count) {
3772 		spin_lock_irqsave(&ha->vport_slock, flags);
3773 
3774 		BUG_ON(base_vha->list.next == &ha->vp_list);
3775 		/* This assumes first entry in ha->vp_list is always base vha */
3776 		vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
3777 		scsi_host_get(vha->host);
3778 
3779 		spin_unlock_irqrestore(&ha->vport_slock, flags);
3780 		mutex_unlock(&ha->vport_lock);
3781 
3782 		qla_nvme_delete(vha);
3783 
3784 		fc_vport_terminate(vha->fc_vport);
3785 		scsi_host_put(vha->host);
3786 
3787 		mutex_lock(&ha->vport_lock);
3788 	}
3789 	mutex_unlock(&ha->vport_lock);
3790 }
3791 
3792 /* Stops all deferred work threads */
3793 static void
3794 qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
3795 {
3796 	/* Cancel all work and destroy DPC workqueues */
3797 	if (ha->dpc_lp_wq) {
3798 		cancel_work_sync(&ha->idc_aen);
3799 		destroy_workqueue(ha->dpc_lp_wq);
3800 		ha->dpc_lp_wq = NULL;
3801 	}
3802 
3803 	if (ha->dpc_hp_wq) {
3804 		cancel_work_sync(&ha->nic_core_reset);
3805 		cancel_work_sync(&ha->idc_state_handler);
3806 		cancel_work_sync(&ha->nic_core_unrecoverable);
3807 		destroy_workqueue(ha->dpc_hp_wq);
3808 		ha->dpc_hp_wq = NULL;
3809 	}
3810 
3811 	/* Kill the kernel thread for this host */
3812 	if (ha->dpc_thread) {
3813 		struct task_struct *t = ha->dpc_thread;
3814 
3815 		/*
3816 		 * qla2xxx_wake_dpc checks for ->dpc_thread
3817 		 * so we need to zero it out.
3818 		 */
3819 		ha->dpc_thread = NULL;
3820 		kthread_stop(t);
3821 	}
3822 }
3823 
3824 static void
3825 qla2x00_unmap_iobases(struct qla_hw_data *ha)
3826 {
3827 	if (IS_QLA82XX(ha)) {
3828 
3829 		iounmap((device_reg_t *)ha->nx_pcibase);
3830 		if (!ql2xdbwr)
3831 			iounmap((device_reg_t *)ha->nxdb_wr_ptr);
3832 	} else {
3833 		if (ha->iobase)
3834 			iounmap(ha->iobase);
3835 
3836 		if (ha->cregbase)
3837 			iounmap(ha->cregbase);
3838 
3839 		if (ha->mqiobase)
3840 			iounmap(ha->mqiobase);
3841 
3842 		if (ha->msixbase)
3843 			iounmap(ha->msixbase);
3844 	}
3845 }
3846 
3847 static void
3848 qla2x00_clear_drv_active(struct qla_hw_data *ha)
3849 {
3850 	if (IS_QLA8044(ha)) {
3851 		qla8044_idc_lock(ha);
3852 		qla8044_clear_drv_active(ha);
3853 		qla8044_idc_unlock(ha);
3854 	} else if (IS_QLA82XX(ha)) {
3855 		qla82xx_idc_lock(ha);
3856 		qla82xx_clear_drv_active(ha);
3857 		qla82xx_idc_unlock(ha);
3858 	}
3859 }
3860 
3861 static void
3862 qla2x00_remove_one(struct pci_dev *pdev)
3863 {
3864 	scsi_qla_host_t *base_vha;
3865 	struct qla_hw_data  *ha;
3866 
3867 	base_vha = pci_get_drvdata(pdev);
3868 	ha = base_vha->hw;
3869 	ql_log(ql_log_info, base_vha, 0xb079,
3870 	    "Removing driver\n");
3871 	__qla_set_remove_flag(base_vha);
3872 	cancel_work_sync(&ha->board_disable);
3873 
3874 	/*
3875 	 * If the PCI device is disabled then there was a PCI-disconnect and
3876 	 * qla2x00_disable_board_on_pci_error has taken care of most of the
3877 	 * resources.
3878 	 */
3879 	if (!atomic_read(&pdev->enable_cnt)) {
3880 		dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
3881 		    base_vha->gnl.l, base_vha->gnl.ldma);
3882 		base_vha->gnl.l = NULL;
3883 		scsi_host_put(base_vha->host);
3884 		kfree(ha);
3885 		pci_set_drvdata(pdev, NULL);
3886 		return;
3887 	}
3888 	qla2x00_wait_for_hba_ready(base_vha);
3889 
3890 	/*
3891 	 * if UNLOADING flag is already set, then continue unload,
3892 	 * where it was set first.
3893 	 */
3894 	if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
3895 		return;
3896 
3897 	if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
3898 	    IS_QLA28XX(ha)) {
3899 		if (ha->flags.fw_started)
3900 			qla2x00_abort_isp_cleanup(base_vha);
3901 	} else if (!IS_QLAFX00(ha)) {
3902 		if (IS_QLA8031(ha)) {
3903 			ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
3904 			    "Clearing fcoe driver presence.\n");
3905 			if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
3906 				ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
3907 				    "Error while clearing DRV-Presence.\n");
3908 		}
3909 
3910 		qla2x00_try_to_stop_firmware(base_vha);
3911 	}
3912 
3913 	qla2x00_wait_for_sess_deletion(base_vha);
3914 
3915 	qla_nvme_delete(base_vha);
3916 
3917 	dma_free_coherent(&ha->pdev->dev,
3918 		base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
3919 
3920 	base_vha->gnl.l = NULL;
3921 	qla_enode_stop(base_vha);
3922 	qla_edb_stop(base_vha);
3923 
3924 	vfree(base_vha->scan.l);
3925 
3926 	if (IS_QLAFX00(ha))
3927 		qlafx00_driver_shutdown(base_vha, 20);
3928 
3929 	qla2x00_delete_all_vps(ha, base_vha);
3930 
3931 	qla2x00_dfs_remove(base_vha);
3932 
3933 	qla84xx_put_chip(base_vha);
3934 
3935 	/* Disable timer */
3936 	if (base_vha->timer_active)
3937 		qla2x00_stop_timer(base_vha);
3938 
3939 	base_vha->flags.online = 0;
3940 
3941 	/* free DMA memory */
3942 	if (ha->exlogin_buf)
3943 		qla2x00_free_exlogin_buffer(ha);
3944 
3945 	/* free DMA memory */
3946 	if (ha->exchoffld_buf)
3947 		qla2x00_free_exchoffld_buffer(ha);
3948 
3949 	qla2x00_destroy_deferred_work(ha);
3950 
3951 	qlt_remove_target(ha, base_vha);
3952 
3953 	qla2x00_free_sysfs_attr(base_vha, true);
3954 
3955 	fc_remove_host(base_vha->host);
3956 
3957 	scsi_remove_host(base_vha->host);
3958 
3959 	qla2x00_free_device(base_vha);
3960 
3961 	qla2x00_clear_drv_active(ha);
3962 
3963 	scsi_host_put(base_vha->host);
3964 
3965 	qla2x00_unmap_iobases(ha);
3966 
3967 	pci_release_selected_regions(ha->pdev, ha->bars);
3968 	kfree(ha);
3969 
3970 	pci_disable_pcie_error_reporting(pdev);
3971 
3972 	pci_disable_device(pdev);
3973 }
3974 
3975 static inline void
3976 qla24xx_free_purex_list(struct purex_list *list)
3977 {
3978 	struct purex_item *item, *next;
3979 	ulong flags;
3980 
3981 	spin_lock_irqsave(&list->lock, flags);
3982 	list_for_each_entry_safe(item, next, &list->head, list) {
3983 		list_del(&item->list);
3984 		if (item == &item->vha->default_item)
3985 			continue;
3986 		kfree(item);
3987 	}
3988 	spin_unlock_irqrestore(&list->lock, flags);
3989 }
3990 
3991 static void
3992 qla2x00_free_device(scsi_qla_host_t *vha)
3993 {
3994 	struct qla_hw_data *ha = vha->hw;
3995 
3996 	qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3997 
3998 	/* Disable timer */
3999 	if (vha->timer_active)
4000 		qla2x00_stop_timer(vha);
4001 
4002 	qla25xx_delete_queues(vha);
4003 	vha->flags.online = 0;
4004 
4005 	/* turn-off interrupts on the card */
4006 	if (ha->interrupts_on) {
4007 		vha->flags.init_done = 0;
4008 		ha->isp_ops->disable_intrs(ha);
4009 	}
4010 
4011 	qla2x00_free_fcports(vha);
4012 
4013 	qla2x00_free_irqs(vha);
4014 
4015 	/* Flush the work queue and remove it */
4016 	if (ha->wq) {
4017 		destroy_workqueue(ha->wq);
4018 		ha->wq = NULL;
4019 	}
4020 
4021 
4022 	qla24xx_free_purex_list(&vha->purex_list);
4023 
4024 	qla2x00_mem_free(ha);
4025 
4026 	qla82xx_md_free(vha);
4027 
4028 	qla_edif_sadb_release_free_pool(ha);
4029 	qla_edif_sadb_release(ha);
4030 
4031 	qla2x00_free_queues(ha);
4032 }
4033 
4034 void qla2x00_free_fcports(struct scsi_qla_host *vha)
4035 {
4036 	fc_port_t *fcport, *tfcport;
4037 
4038 	list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list)
4039 		qla2x00_free_fcport(fcport);
4040 }
4041 
4042 static inline void
4043 qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport)
4044 {
4045 	int now;
4046 
4047 	if (!fcport->rport)
4048 		return;
4049 
4050 	if (fcport->rport) {
4051 		ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
4052 		    "%s %8phN. rport %p roles %x\n",
4053 		    __func__, fcport->port_name, fcport->rport,
4054 		    fcport->rport->roles);
4055 		fc_remote_port_delete(fcport->rport);
4056 	}
4057 	qlt_do_generation_tick(vha, &now);
4058 }
4059 
4060 /*
4061  * qla2x00_mark_device_lost Updates fcport state when device goes offline.
4062  *
4063  * Input: ha = adapter block pointer.  fcport = port structure pointer.
4064  *
4065  * Return: None.
4066  *
4067  * Context:
4068  */
4069 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
4070     int do_login)
4071 {
4072 	if (IS_QLAFX00(vha->hw)) {
4073 		qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
4074 		qla2x00_schedule_rport_del(vha, fcport);
4075 		return;
4076 	}
4077 
4078 	if (atomic_read(&fcport->state) == FCS_ONLINE &&
4079 	    vha->vp_idx == fcport->vha->vp_idx) {
4080 		qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
4081 		qla2x00_schedule_rport_del(vha, fcport);
4082 	}
4083 
4084 	/*
4085 	 * We may need to retry the login, so don't change the state of the
4086 	 * port but do the retries.
4087 	 */
4088 	if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
4089 		qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
4090 
4091 	if (!do_login)
4092 		return;
4093 
4094 	set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
4095 }
4096 
4097 void
4098 qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
4099 {
4100 	fc_port_t *fcport;
4101 
4102 	ql_dbg(ql_dbg_disc, vha, 0x20f1,
4103 	    "Mark all dev lost\n");
4104 
4105 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
4106 		if (ql2xfc2target &&
4107 		    fcport->loop_id != FC_NO_LOOP_ID &&
4108 		    (fcport->flags & FCF_FCP2_DEVICE) &&
4109 		    fcport->port_type == FCT_TARGET &&
4110 		    !qla2x00_reset_active(vha)) {
4111 			ql_dbg(ql_dbg_disc, vha, 0x211a,
4112 			       "Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC",
4113 			       fcport->flags, fcport->port_type,
4114 			       fcport->d_id.b24, fcport->port_name);
4115 			continue;
4116 		}
4117 		fcport->scan_state = 0;
4118 		qlt_schedule_sess_for_deletion(fcport);
4119 	}
4120 }
4121 
4122 static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
4123 {
4124 	int i;
4125 
4126 	if (IS_FWI2_CAPABLE(ha))
4127 		return;
4128 
4129 	for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
4130 		set_bit(i, ha->loop_id_map);
4131 	set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
4132 	set_bit(BROADCAST, ha->loop_id_map);
4133 }
4134 
4135 /*
4136 * qla2x00_mem_alloc
4137 *      Allocates adapter memory.
4138 *
4139 * Returns:
4140 *      0  = success.
4141 *      !0  = failure.
4142 */
4143 static int
4144 qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
4145 	struct req_que **req, struct rsp_que **rsp)
4146 {
4147 	char	name[16];
4148 	int rc;
4149 
4150 	if (QLA_TGT_MODE_ENABLED() || EDIF_CAP(ha)) {
4151 		ha->vp_map = kcalloc(MAX_MULTI_ID_FABRIC, sizeof(struct qla_vp_map), GFP_KERNEL);
4152 		if (!ha->vp_map)
4153 			goto fail;
4154 	}
4155 
4156 	ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
4157 		&ha->init_cb_dma, GFP_KERNEL);
4158 	if (!ha->init_cb)
4159 		goto fail_free_vp_map;
4160 
4161 	rc = btree_init32(&ha->host_map);
4162 	if (rc)
4163 		goto fail_free_init_cb;
4164 
4165 	if (qlt_mem_alloc(ha) < 0)
4166 		goto fail_free_btree;
4167 
4168 	ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
4169 		qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
4170 	if (!ha->gid_list)
4171 		goto fail_free_tgt_mem;
4172 
4173 	ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
4174 	if (!ha->srb_mempool)
4175 		goto fail_free_gid_list;
4176 
4177 	if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable && IS_QLA28XX(ha))) {
4178 		/* Allocate cache for CT6 Ctx. */
4179 		if (!ctx_cachep) {
4180 			ctx_cachep = kmem_cache_create("qla2xxx_ctx",
4181 				sizeof(struct ct6_dsd), 0,
4182 				SLAB_HWCACHE_ALIGN, NULL);
4183 			if (!ctx_cachep)
4184 				goto fail_free_srb_mempool;
4185 		}
4186 		ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
4187 			ctx_cachep);
4188 		if (!ha->ctx_mempool)
4189 			goto fail_free_srb_mempool;
4190 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
4191 		    "ctx_cachep=%p ctx_mempool=%p.\n",
4192 		    ctx_cachep, ha->ctx_mempool);
4193 	}
4194 
4195 	/* Get memory for cached NVRAM */
4196 	ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
4197 	if (!ha->nvram)
4198 		goto fail_free_ctx_mempool;
4199 
4200 	snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
4201 		ha->pdev->device);
4202 	ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
4203 		DMA_POOL_SIZE, 8, 0);
4204 	if (!ha->s_dma_pool)
4205 		goto fail_free_nvram;
4206 
4207 	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
4208 	    "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
4209 	    ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
4210 
4211 	if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) && ql2xsecenable)) {
4212 		ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
4213 			DSD_LIST_DMA_POOL_SIZE, 8, 0);
4214 		if (!ha->dl_dma_pool) {
4215 			ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
4216 			    "Failed to allocate memory for dl_dma_pool.\n");
4217 			goto fail_s_dma_pool;
4218 		}
4219 
4220 		ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
4221 			FCP_CMND_DMA_POOL_SIZE, 8, 0);
4222 		if (!ha->fcp_cmnd_dma_pool) {
4223 			ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
4224 			    "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
4225 			goto fail_dl_dma_pool;
4226 		}
4227 
4228 		if (ql2xenabledif) {
4229 			u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE;
4230 			struct dsd_dma *dsd, *nxt;
4231 			uint i;
4232 			/* Creata a DMA pool of buffers for DIF bundling */
4233 			ha->dif_bundl_pool = dma_pool_create(name,
4234 			    &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0);
4235 			if (!ha->dif_bundl_pool) {
4236 				ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
4237 				    "%s: failed create dif_bundl_pool\n",
4238 				    __func__);
4239 				goto fail_dif_bundl_dma_pool;
4240 			}
4241 
4242 			INIT_LIST_HEAD(&ha->pool.good.head);
4243 			INIT_LIST_HEAD(&ha->pool.unusable.head);
4244 			ha->pool.good.count = 0;
4245 			ha->pool.unusable.count = 0;
4246 			for (i = 0; i < 128; i++) {
4247 				dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC);
4248 				if (!dsd) {
4249 					ql_dbg_pci(ql_dbg_init, ha->pdev,
4250 					    0xe0ee, "%s: failed alloc dsd\n",
4251 					    __func__);
4252 					return -ENOMEM;
4253 				}
4254 				ha->dif_bundle_kallocs++;
4255 
4256 				dsd->dsd_addr = dma_pool_alloc(
4257 				    ha->dif_bundl_pool, GFP_ATOMIC,
4258 				    &dsd->dsd_list_dma);
4259 				if (!dsd->dsd_addr) {
4260 					ql_dbg_pci(ql_dbg_init, ha->pdev,
4261 					    0xe0ee,
4262 					    "%s: failed alloc ->dsd_addr\n",
4263 					    __func__);
4264 					kfree(dsd);
4265 					ha->dif_bundle_kallocs--;
4266 					continue;
4267 				}
4268 				ha->dif_bundle_dma_allocs++;
4269 
4270 				/*
4271 				 * if DMA buffer crosses 4G boundary,
4272 				 * put it on bad list
4273 				 */
4274 				if (MSD(dsd->dsd_list_dma) ^
4275 				    MSD(dsd->dsd_list_dma + bufsize)) {
4276 					list_add_tail(&dsd->list,
4277 					    &ha->pool.unusable.head);
4278 					ha->pool.unusable.count++;
4279 				} else {
4280 					list_add_tail(&dsd->list,
4281 					    &ha->pool.good.head);
4282 					ha->pool.good.count++;
4283 				}
4284 			}
4285 
4286 			/* return the good ones back to the pool */
4287 			list_for_each_entry_safe(dsd, nxt,
4288 			    &ha->pool.good.head, list) {
4289 				list_del(&dsd->list);
4290 				dma_pool_free(ha->dif_bundl_pool,
4291 				    dsd->dsd_addr, dsd->dsd_list_dma);
4292 				ha->dif_bundle_dma_allocs--;
4293 				kfree(dsd);
4294 				ha->dif_bundle_kallocs--;
4295 			}
4296 
4297 			ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
4298 			    "%s: dif dma pool (good=%u unusable=%u)\n",
4299 			    __func__, ha->pool.good.count,
4300 			    ha->pool.unusable.count);
4301 		}
4302 
4303 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
4304 		    "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n",
4305 		    ha->dl_dma_pool, ha->fcp_cmnd_dma_pool,
4306 		    ha->dif_bundl_pool);
4307 	}
4308 
4309 	/* Allocate memory for SNS commands */
4310 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4311 	/* Get consistent memory allocated for SNS commands */
4312 		ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
4313 		sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
4314 		if (!ha->sns_cmd)
4315 			goto fail_dma_pool;
4316 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
4317 		    "sns_cmd: %p.\n", ha->sns_cmd);
4318 	} else {
4319 	/* Get consistent memory allocated for MS IOCB */
4320 		ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
4321 			&ha->ms_iocb_dma);
4322 		if (!ha->ms_iocb)
4323 			goto fail_dma_pool;
4324 	/* Get consistent memory allocated for CT SNS commands */
4325 		ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
4326 			sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
4327 		if (!ha->ct_sns)
4328 			goto fail_free_ms_iocb;
4329 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
4330 		    "ms_iocb=%p ct_sns=%p.\n",
4331 		    ha->ms_iocb, ha->ct_sns);
4332 	}
4333 
4334 	/* Allocate memory for request ring */
4335 	*req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
4336 	if (!*req) {
4337 		ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
4338 		    "Failed to allocate memory for req.\n");
4339 		goto fail_req;
4340 	}
4341 	(*req)->length = req_len;
4342 	(*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
4343 		((*req)->length + 1) * sizeof(request_t),
4344 		&(*req)->dma, GFP_KERNEL);
4345 	if (!(*req)->ring) {
4346 		ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
4347 		    "Failed to allocate memory for req_ring.\n");
4348 		goto fail_req_ring;
4349 	}
4350 	/* Allocate memory for response ring */
4351 	*rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
4352 	if (!*rsp) {
4353 		ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
4354 		    "Failed to allocate memory for rsp.\n");
4355 		goto fail_rsp;
4356 	}
4357 	(*rsp)->hw = ha;
4358 	(*rsp)->length = rsp_len;
4359 	(*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
4360 		((*rsp)->length + 1) * sizeof(response_t),
4361 		&(*rsp)->dma, GFP_KERNEL);
4362 	if (!(*rsp)->ring) {
4363 		ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
4364 		    "Failed to allocate memory for rsp_ring.\n");
4365 		goto fail_rsp_ring;
4366 	}
4367 	(*req)->rsp = *rsp;
4368 	(*rsp)->req = *req;
4369 	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
4370 	    "req=%p req->length=%d req->ring=%p rsp=%p "
4371 	    "rsp->length=%d rsp->ring=%p.\n",
4372 	    *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
4373 	    (*rsp)->ring);
4374 	/* Allocate memory for NVRAM data for vports */
4375 	if (ha->nvram_npiv_size) {
4376 		ha->npiv_info = kcalloc(ha->nvram_npiv_size,
4377 					sizeof(struct qla_npiv_entry),
4378 					GFP_KERNEL);
4379 		if (!ha->npiv_info) {
4380 			ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
4381 			    "Failed to allocate memory for npiv_info.\n");
4382 			goto fail_npiv_info;
4383 		}
4384 	} else
4385 		ha->npiv_info = NULL;
4386 
4387 	/* Get consistent memory allocated for EX-INIT-CB. */
4388 	if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
4389 	    IS_QLA28XX(ha)) {
4390 		ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
4391 		    &ha->ex_init_cb_dma);
4392 		if (!ha->ex_init_cb)
4393 			goto fail_ex_init_cb;
4394 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
4395 		    "ex_init_cb=%p.\n", ha->ex_init_cb);
4396 	}
4397 
4398 	/* Get consistent memory allocated for Special Features-CB. */
4399 	if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4400 		ha->sf_init_cb = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL,
4401 						&ha->sf_init_cb_dma);
4402 		if (!ha->sf_init_cb)
4403 			goto fail_sf_init_cb;
4404 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199,
4405 			   "sf_init_cb=%p.\n", ha->sf_init_cb);
4406 	}
4407 
4408 	INIT_LIST_HEAD(&ha->gbl_dsd_list);
4409 
4410 	/* Get consistent memory allocated for Async Port-Database. */
4411 	if (!IS_FWI2_CAPABLE(ha)) {
4412 		ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
4413 			&ha->async_pd_dma);
4414 		if (!ha->async_pd)
4415 			goto fail_async_pd;
4416 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
4417 		    "async_pd=%p.\n", ha->async_pd);
4418 	}
4419 
4420 	INIT_LIST_HEAD(&ha->vp_list);
4421 
4422 	/* Allocate memory for our loop_id bitmap */
4423 	ha->loop_id_map = kcalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE),
4424 				  sizeof(long),
4425 				  GFP_KERNEL);
4426 	if (!ha->loop_id_map)
4427 		goto fail_loop_id_map;
4428 	else {
4429 		qla2x00_set_reserved_loop_ids(ha);
4430 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
4431 		    "loop_id_map=%p.\n", ha->loop_id_map);
4432 	}
4433 
4434 	ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev,
4435 	    SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL);
4436 	if (!ha->sfp_data) {
4437 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
4438 		    "Unable to allocate memory for SFP read-data.\n");
4439 		goto fail_sfp_data;
4440 	}
4441 
4442 	ha->flt = dma_alloc_coherent(&ha->pdev->dev,
4443 	    sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma,
4444 	    GFP_KERNEL);
4445 	if (!ha->flt) {
4446 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
4447 		    "Unable to allocate memory for FLT.\n");
4448 		goto fail_flt_buffer;
4449 	}
4450 
4451 	/* allocate the purex dma pool */
4452 	ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev,
4453 	    ELS_MAX_PAYLOAD, 8, 0);
4454 
4455 	if (!ha->purex_dma_pool) {
4456 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
4457 		    "Unable to allocate purex_dma_pool.\n");
4458 		goto fail_flt;
4459 	}
4460 
4461 	ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16;
4462 	ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev,
4463 	    ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL);
4464 
4465 	if (!ha->elsrej.c) {
4466 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
4467 		    "Alloc failed for els reject cmd.\n");
4468 		goto fail_elsrej;
4469 	}
4470 	ha->elsrej.c->er_cmd = ELS_LS_RJT;
4471 	ha->elsrej.c->er_reason = ELS_RJT_LOGIC;
4472 	ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA;
4473 	return 0;
4474 
4475 fail_elsrej:
4476 	dma_pool_destroy(ha->purex_dma_pool);
4477 fail_flt:
4478 	dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
4479 	    ha->flt, ha->flt_dma);
4480 
4481 fail_flt_buffer:
4482 	dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
4483 	    ha->sfp_data, ha->sfp_data_dma);
4484 fail_sfp_data:
4485 	kfree(ha->loop_id_map);
4486 fail_loop_id_map:
4487 	dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
4488 fail_async_pd:
4489 	dma_pool_free(ha->s_dma_pool, ha->sf_init_cb, ha->sf_init_cb_dma);
4490 fail_sf_init_cb:
4491 	dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
4492 fail_ex_init_cb:
4493 	kfree(ha->npiv_info);
4494 fail_npiv_info:
4495 	dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
4496 		sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
4497 	(*rsp)->ring = NULL;
4498 	(*rsp)->dma = 0;
4499 fail_rsp_ring:
4500 	kfree(*rsp);
4501 	*rsp = NULL;
4502 fail_rsp:
4503 	dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
4504 		sizeof(request_t), (*req)->ring, (*req)->dma);
4505 	(*req)->ring = NULL;
4506 	(*req)->dma = 0;
4507 fail_req_ring:
4508 	kfree(*req);
4509 	*req = NULL;
4510 fail_req:
4511 	dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
4512 		ha->ct_sns, ha->ct_sns_dma);
4513 	ha->ct_sns = NULL;
4514 	ha->ct_sns_dma = 0;
4515 fail_free_ms_iocb:
4516 	dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
4517 	ha->ms_iocb = NULL;
4518 	ha->ms_iocb_dma = 0;
4519 
4520 	if (ha->sns_cmd)
4521 		dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
4522 		    ha->sns_cmd, ha->sns_cmd_dma);
4523 fail_dma_pool:
4524 	if (ql2xenabledif) {
4525 		struct dsd_dma *dsd, *nxt;
4526 
4527 		list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
4528 		    list) {
4529 			list_del(&dsd->list);
4530 			dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
4531 			    dsd->dsd_list_dma);
4532 			ha->dif_bundle_dma_allocs--;
4533 			kfree(dsd);
4534 			ha->dif_bundle_kallocs--;
4535 			ha->pool.unusable.count--;
4536 		}
4537 		dma_pool_destroy(ha->dif_bundl_pool);
4538 		ha->dif_bundl_pool = NULL;
4539 	}
4540 
4541 fail_dif_bundl_dma_pool:
4542 	if (IS_QLA82XX(ha) || ql2xenabledif) {
4543 		dma_pool_destroy(ha->fcp_cmnd_dma_pool);
4544 		ha->fcp_cmnd_dma_pool = NULL;
4545 	}
4546 fail_dl_dma_pool:
4547 	if (IS_QLA82XX(ha) || ql2xenabledif) {
4548 		dma_pool_destroy(ha->dl_dma_pool);
4549 		ha->dl_dma_pool = NULL;
4550 	}
4551 fail_s_dma_pool:
4552 	dma_pool_destroy(ha->s_dma_pool);
4553 	ha->s_dma_pool = NULL;
4554 fail_free_nvram:
4555 	kfree(ha->nvram);
4556 	ha->nvram = NULL;
4557 fail_free_ctx_mempool:
4558 	mempool_destroy(ha->ctx_mempool);
4559 	ha->ctx_mempool = NULL;
4560 fail_free_srb_mempool:
4561 	mempool_destroy(ha->srb_mempool);
4562 	ha->srb_mempool = NULL;
4563 fail_free_gid_list:
4564 	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
4565 	ha->gid_list,
4566 	ha->gid_list_dma);
4567 	ha->gid_list = NULL;
4568 	ha->gid_list_dma = 0;
4569 fail_free_tgt_mem:
4570 	qlt_mem_free(ha);
4571 fail_free_btree:
4572 	btree_destroy32(&ha->host_map);
4573 fail_free_init_cb:
4574 	dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
4575 	ha->init_cb_dma);
4576 	ha->init_cb = NULL;
4577 	ha->init_cb_dma = 0;
4578 fail_free_vp_map:
4579 	kfree(ha->vp_map);
4580 fail:
4581 	ql_log(ql_log_fatal, NULL, 0x0030,
4582 	    "Memory allocation failure.\n");
4583 	return -ENOMEM;
4584 }
4585 
4586 int
4587 qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
4588 {
4589 	int rval;
4590 	uint16_t	size, max_cnt;
4591 	uint32_t temp;
4592 	struct qla_hw_data *ha = vha->hw;
4593 
4594 	/* Return if we don't need to alloacate any extended logins */
4595 	if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400)
4596 		return QLA_SUCCESS;
4597 
4598 	if (!IS_EXLOGIN_OFFLD_CAPABLE(ha))
4599 		return QLA_SUCCESS;
4600 
4601 	ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins);
4602 	max_cnt = 0;
4603 	rval = qla_get_exlogin_status(vha, &size, &max_cnt);
4604 	if (rval != QLA_SUCCESS) {
4605 		ql_log_pci(ql_log_fatal, ha->pdev, 0xd029,
4606 		    "Failed to get exlogin status.\n");
4607 		return rval;
4608 	}
4609 
4610 	temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins;
4611 	temp *= size;
4612 
4613 	if (temp != ha->exlogin_size) {
4614 		qla2x00_free_exlogin_buffer(ha);
4615 		ha->exlogin_size = temp;
4616 
4617 		ql_log(ql_log_info, vha, 0xd024,
4618 		    "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n",
4619 		    max_cnt, size, temp);
4620 
4621 		ql_log(ql_log_info, vha, 0xd025,
4622 		    "EXLOGIN: requested size=0x%x\n", ha->exlogin_size);
4623 
4624 		/* Get consistent memory for extended logins */
4625 		ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev,
4626 			ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL);
4627 		if (!ha->exlogin_buf) {
4628 			ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a,
4629 		    "Failed to allocate memory for exlogin_buf_dma.\n");
4630 			return -ENOMEM;
4631 		}
4632 	}
4633 
4634 	/* Now configure the dma buffer */
4635 	rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma);
4636 	if (rval) {
4637 		ql_log(ql_log_fatal, vha, 0xd033,
4638 		    "Setup extended login buffer  ****FAILED****.\n");
4639 		qla2x00_free_exlogin_buffer(ha);
4640 	}
4641 
4642 	return rval;
4643 }
4644 
4645 /*
4646 * qla2x00_free_exlogin_buffer
4647 *
4648 * Input:
4649 *	ha = adapter block pointer
4650 */
4651 void
4652 qla2x00_free_exlogin_buffer(struct qla_hw_data *ha)
4653 {
4654 	if (ha->exlogin_buf) {
4655 		dma_free_coherent(&ha->pdev->dev, ha->exlogin_size,
4656 		    ha->exlogin_buf, ha->exlogin_buf_dma);
4657 		ha->exlogin_buf = NULL;
4658 		ha->exlogin_size = 0;
4659 	}
4660 }
4661 
4662 static void
4663 qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
4664 {
4665 	u32 temp;
4666 	struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb;
4667 	*ret_cnt = FW_DEF_EXCHANGES_CNT;
4668 
4669 	if (max_cnt > vha->hw->max_exchg)
4670 		max_cnt = vha->hw->max_exchg;
4671 
4672 	if (qla_ini_mode_enabled(vha)) {
4673 		if (vha->ql2xiniexchg > max_cnt)
4674 			vha->ql2xiniexchg = max_cnt;
4675 
4676 		if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
4677 			*ret_cnt = vha->ql2xiniexchg;
4678 
4679 	} else if (qla_tgt_mode_enabled(vha)) {
4680 		if (vha->ql2xexchoffld > max_cnt) {
4681 			vha->ql2xexchoffld = max_cnt;
4682 			icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
4683 		}
4684 
4685 		if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
4686 			*ret_cnt = vha->ql2xexchoffld;
4687 	} else if (qla_dual_mode_enabled(vha)) {
4688 		temp = vha->ql2xiniexchg + vha->ql2xexchoffld;
4689 		if (temp > max_cnt) {
4690 			vha->ql2xiniexchg -= (temp - max_cnt)/2;
4691 			vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
4692 			temp = max_cnt;
4693 			icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
4694 		}
4695 
4696 		if (temp > FW_DEF_EXCHANGES_CNT)
4697 			*ret_cnt = temp;
4698 	}
4699 }
4700 
4701 int
4702 qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
4703 {
4704 	int rval;
4705 	u16	size, max_cnt;
4706 	u32 actual_cnt, totsz;
4707 	struct qla_hw_data *ha = vha->hw;
4708 
4709 	if (!ha->flags.exchoffld_enabled)
4710 		return QLA_SUCCESS;
4711 
4712 	if (!IS_EXCHG_OFFLD_CAPABLE(ha))
4713 		return QLA_SUCCESS;
4714 
4715 	max_cnt = 0;
4716 	rval = qla_get_exchoffld_status(vha, &size, &max_cnt);
4717 	if (rval != QLA_SUCCESS) {
4718 		ql_log_pci(ql_log_fatal, ha->pdev, 0xd012,
4719 		    "Failed to get exlogin status.\n");
4720 		return rval;
4721 	}
4722 
4723 	qla2x00_number_of_exch(vha, &actual_cnt, max_cnt);
4724 	ql_log(ql_log_info, vha, 0xd014,
4725 	    "Actual exchange offload count: %d.\n", actual_cnt);
4726 
4727 	totsz = actual_cnt * size;
4728 
4729 	if (totsz != ha->exchoffld_size) {
4730 		qla2x00_free_exchoffld_buffer(ha);
4731 		if (actual_cnt <= FW_DEF_EXCHANGES_CNT) {
4732 			ha->exchoffld_size = 0;
4733 			ha->flags.exchoffld_enabled = 0;
4734 			return QLA_SUCCESS;
4735 		}
4736 
4737 		ha->exchoffld_size = totsz;
4738 
4739 		ql_log(ql_log_info, vha, 0xd016,
4740 		    "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n",
4741 		    max_cnt, actual_cnt, size, totsz);
4742 
4743 		ql_log(ql_log_info, vha, 0xd017,
4744 		    "Exchange Buffers requested size = 0x%x\n",
4745 		    ha->exchoffld_size);
4746 
4747 		/* Get consistent memory for extended logins */
4748 		ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev,
4749 			ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
4750 		if (!ha->exchoffld_buf) {
4751 			ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
4752 			"Failed to allocate memory for Exchange Offload.\n");
4753 
4754 			if (ha->max_exchg >
4755 			    (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) {
4756 				ha->max_exchg -= REDUCE_EXCHANGES_CNT;
4757 			} else if (ha->max_exchg >
4758 			    (FW_DEF_EXCHANGES_CNT + 512)) {
4759 				ha->max_exchg -= 512;
4760 			} else {
4761 				ha->flags.exchoffld_enabled = 0;
4762 				ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
4763 				    "Disabling Exchange offload due to lack of memory\n");
4764 			}
4765 			ha->exchoffld_size = 0;
4766 
4767 			return -ENOMEM;
4768 		}
4769 	} else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) {
4770 		/* pathological case */
4771 		qla2x00_free_exchoffld_buffer(ha);
4772 		ha->exchoffld_size = 0;
4773 		ha->flags.exchoffld_enabled = 0;
4774 		ql_log(ql_log_info, vha, 0xd016,
4775 		    "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n",
4776 		    ha->exchoffld_size, actual_cnt, size, totsz);
4777 		return 0;
4778 	}
4779 
4780 	/* Now configure the dma buffer */
4781 	rval = qla_set_exchoffld_mem_cfg(vha);
4782 	if (rval) {
4783 		ql_log(ql_log_fatal, vha, 0xd02e,
4784 		    "Setup exchange offload buffer ****FAILED****.\n");
4785 		qla2x00_free_exchoffld_buffer(ha);
4786 	} else {
4787 		/* re-adjust number of target exchange */
4788 		struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb;
4789 
4790 		if (qla_ini_mode_enabled(vha))
4791 			icb->exchange_count = 0;
4792 		else
4793 			icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
4794 	}
4795 
4796 	return rval;
4797 }
4798 
4799 /*
4800 * qla2x00_free_exchoffld_buffer
4801 *
4802 * Input:
4803 *	ha = adapter block pointer
4804 */
4805 void
4806 qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha)
4807 {
4808 	if (ha->exchoffld_buf) {
4809 		dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size,
4810 		    ha->exchoffld_buf, ha->exchoffld_buf_dma);
4811 		ha->exchoffld_buf = NULL;
4812 		ha->exchoffld_size = 0;
4813 	}
4814 }
4815 
4816 /*
4817 * qla2x00_free_fw_dump
4818 *	Frees fw dump stuff.
4819 *
4820 * Input:
4821 *	ha = adapter block pointer
4822 */
4823 static void
4824 qla2x00_free_fw_dump(struct qla_hw_data *ha)
4825 {
4826 	struct fwdt *fwdt = ha->fwdt;
4827 	uint j;
4828 
4829 	if (ha->fce)
4830 		dma_free_coherent(&ha->pdev->dev,
4831 		    FCE_SIZE, ha->fce, ha->fce_dma);
4832 
4833 	if (ha->eft)
4834 		dma_free_coherent(&ha->pdev->dev,
4835 		    EFT_SIZE, ha->eft, ha->eft_dma);
4836 
4837 	vfree(ha->fw_dump);
4838 
4839 	ha->fce = NULL;
4840 	ha->fce_dma = 0;
4841 	ha->flags.fce_enabled = 0;
4842 	ha->eft = NULL;
4843 	ha->eft_dma = 0;
4844 	ha->fw_dumped = false;
4845 	ha->fw_dump_cap_flags = 0;
4846 	ha->fw_dump_reading = 0;
4847 	ha->fw_dump = NULL;
4848 	ha->fw_dump_len = 0;
4849 
4850 	for (j = 0; j < 2; j++, fwdt++) {
4851 		vfree(fwdt->template);
4852 		fwdt->template = NULL;
4853 		fwdt->length = 0;
4854 	}
4855 }
4856 
4857 /*
4858 * qla2x00_mem_free
4859 *      Frees all adapter allocated memory.
4860 *
4861 * Input:
4862 *      ha = adapter block pointer.
4863 */
4864 static void
4865 qla2x00_mem_free(struct qla_hw_data *ha)
4866 {
4867 	qla2x00_free_fw_dump(ha);
4868 
4869 	if (ha->mctp_dump)
4870 		dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
4871 		    ha->mctp_dump_dma);
4872 	ha->mctp_dump = NULL;
4873 
4874 	mempool_destroy(ha->srb_mempool);
4875 	ha->srb_mempool = NULL;
4876 
4877 	if (ha->dcbx_tlv)
4878 		dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
4879 		    ha->dcbx_tlv, ha->dcbx_tlv_dma);
4880 	ha->dcbx_tlv = NULL;
4881 
4882 	if (ha->xgmac_data)
4883 		dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
4884 		    ha->xgmac_data, ha->xgmac_data_dma);
4885 	ha->xgmac_data = NULL;
4886 
4887 	if (ha->sns_cmd)
4888 		dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
4889 		ha->sns_cmd, ha->sns_cmd_dma);
4890 	ha->sns_cmd = NULL;
4891 	ha->sns_cmd_dma = 0;
4892 
4893 	if (ha->ct_sns)
4894 		dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
4895 		ha->ct_sns, ha->ct_sns_dma);
4896 	ha->ct_sns = NULL;
4897 	ha->ct_sns_dma = 0;
4898 
4899 	if (ha->sfp_data)
4900 		dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data,
4901 		    ha->sfp_data_dma);
4902 	ha->sfp_data = NULL;
4903 
4904 	if (ha->flt)
4905 		dma_free_coherent(&ha->pdev->dev,
4906 		    sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE,
4907 		    ha->flt, ha->flt_dma);
4908 	ha->flt = NULL;
4909 	ha->flt_dma = 0;
4910 
4911 	if (ha->ms_iocb)
4912 		dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
4913 	ha->ms_iocb = NULL;
4914 	ha->ms_iocb_dma = 0;
4915 
4916 	if (ha->sf_init_cb)
4917 		dma_pool_free(ha->s_dma_pool,
4918 			      ha->sf_init_cb, ha->sf_init_cb_dma);
4919 
4920 	if (ha->ex_init_cb)
4921 		dma_pool_free(ha->s_dma_pool,
4922 			ha->ex_init_cb, ha->ex_init_cb_dma);
4923 	ha->ex_init_cb = NULL;
4924 	ha->ex_init_cb_dma = 0;
4925 
4926 	if (ha->async_pd)
4927 		dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
4928 	ha->async_pd = NULL;
4929 	ha->async_pd_dma = 0;
4930 
4931 	dma_pool_destroy(ha->s_dma_pool);
4932 	ha->s_dma_pool = NULL;
4933 
4934 	if (ha->gid_list)
4935 		dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
4936 		ha->gid_list, ha->gid_list_dma);
4937 	ha->gid_list = NULL;
4938 	ha->gid_list_dma = 0;
4939 
4940 	if (IS_QLA82XX(ha)) {
4941 		if (!list_empty(&ha->gbl_dsd_list)) {
4942 			struct dsd_dma *dsd_ptr, *tdsd_ptr;
4943 
4944 			/* clean up allocated prev pool */
4945 			list_for_each_entry_safe(dsd_ptr,
4946 				tdsd_ptr, &ha->gbl_dsd_list, list) {
4947 				dma_pool_free(ha->dl_dma_pool,
4948 				dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
4949 				list_del(&dsd_ptr->list);
4950 				kfree(dsd_ptr);
4951 			}
4952 		}
4953 	}
4954 
4955 	dma_pool_destroy(ha->dl_dma_pool);
4956 	ha->dl_dma_pool = NULL;
4957 
4958 	dma_pool_destroy(ha->fcp_cmnd_dma_pool);
4959 	ha->fcp_cmnd_dma_pool = NULL;
4960 
4961 	mempool_destroy(ha->ctx_mempool);
4962 	ha->ctx_mempool = NULL;
4963 
4964 	if (ql2xenabledif && ha->dif_bundl_pool) {
4965 		struct dsd_dma *dsd, *nxt;
4966 
4967 		list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
4968 					 list) {
4969 			list_del(&dsd->list);
4970 			dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
4971 				      dsd->dsd_list_dma);
4972 			ha->dif_bundle_dma_allocs--;
4973 			kfree(dsd);
4974 			ha->dif_bundle_kallocs--;
4975 			ha->pool.unusable.count--;
4976 		}
4977 		list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) {
4978 			list_del(&dsd->list);
4979 			dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
4980 				      dsd->dsd_list_dma);
4981 			ha->dif_bundle_dma_allocs--;
4982 			kfree(dsd);
4983 			ha->dif_bundle_kallocs--;
4984 		}
4985 	}
4986 
4987 	dma_pool_destroy(ha->dif_bundl_pool);
4988 	ha->dif_bundl_pool = NULL;
4989 
4990 	qlt_mem_free(ha);
4991 	qla_remove_hostmap(ha);
4992 
4993 	if (ha->init_cb)
4994 		dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
4995 			ha->init_cb, ha->init_cb_dma);
4996 
4997 	dma_pool_destroy(ha->purex_dma_pool);
4998 	ha->purex_dma_pool = NULL;
4999 
5000 	if (ha->elsrej.c) {
5001 		dma_free_coherent(&ha->pdev->dev, ha->elsrej.size,
5002 		    ha->elsrej.c, ha->elsrej.cdma);
5003 		ha->elsrej.c = NULL;
5004 	}
5005 
5006 	ha->init_cb = NULL;
5007 	ha->init_cb_dma = 0;
5008 
5009 	vfree(ha->optrom_buffer);
5010 	ha->optrom_buffer = NULL;
5011 	kfree(ha->nvram);
5012 	ha->nvram = NULL;
5013 	kfree(ha->npiv_info);
5014 	ha->npiv_info = NULL;
5015 	kfree(ha->swl);
5016 	ha->swl = NULL;
5017 	kfree(ha->loop_id_map);
5018 	ha->sf_init_cb = NULL;
5019 	ha->sf_init_cb_dma = 0;
5020 	ha->loop_id_map = NULL;
5021 
5022 	kfree(ha->vp_map);
5023 	ha->vp_map = NULL;
5024 }
5025 
5026 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
5027 						struct qla_hw_data *ha)
5028 {
5029 	struct Scsi_Host *host;
5030 	struct scsi_qla_host *vha = NULL;
5031 
5032 	host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
5033 	if (!host) {
5034 		ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
5035 		    "Failed to allocate host from the scsi layer, aborting.\n");
5036 		return NULL;
5037 	}
5038 
5039 	/* Clear our data area */
5040 	vha = shost_priv(host);
5041 	memset(vha, 0, sizeof(scsi_qla_host_t));
5042 
5043 	vha->host = host;
5044 	vha->host_no = host->host_no;
5045 	vha->hw = ha;
5046 
5047 	vha->qlini_mode = ql2x_ini_mode;
5048 	vha->ql2xexchoffld = ql2xexchoffld;
5049 	vha->ql2xiniexchg = ql2xiniexchg;
5050 
5051 	INIT_LIST_HEAD(&vha->vp_fcports);
5052 	INIT_LIST_HEAD(&vha->work_list);
5053 	INIT_LIST_HEAD(&vha->list);
5054 	INIT_LIST_HEAD(&vha->qla_cmd_list);
5055 	INIT_LIST_HEAD(&vha->logo_list);
5056 	INIT_LIST_HEAD(&vha->plogi_ack_list);
5057 	INIT_LIST_HEAD(&vha->qp_list);
5058 	INIT_LIST_HEAD(&vha->gnl.fcports);
5059 	INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
5060 
5061 	INIT_LIST_HEAD(&vha->purex_list.head);
5062 	spin_lock_init(&vha->purex_list.lock);
5063 
5064 	spin_lock_init(&vha->work_lock);
5065 	spin_lock_init(&vha->cmd_list_lock);
5066 	init_waitqueue_head(&vha->fcport_waitQ);
5067 	init_waitqueue_head(&vha->vref_waitq);
5068 	qla_enode_init(vha);
5069 	qla_edb_init(vha);
5070 
5071 
5072 	vha->gnl.size = sizeof(struct get_name_list_extended) *
5073 			(ha->max_loop_id + 1);
5074 	vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev,
5075 	    vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL);
5076 	if (!vha->gnl.l) {
5077 		ql_log(ql_log_fatal, vha, 0xd04a,
5078 		    "Alloc failed for name list.\n");
5079 		scsi_host_put(vha->host);
5080 		return NULL;
5081 	}
5082 
5083 	/* todo: what about ext login? */
5084 	vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp);
5085 	vha->scan.l = vmalloc(vha->scan.size);
5086 	if (!vha->scan.l) {
5087 		ql_log(ql_log_fatal, vha, 0xd04a,
5088 		    "Alloc failed for scan database.\n");
5089 		dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
5090 		    vha->gnl.l, vha->gnl.ldma);
5091 		vha->gnl.l = NULL;
5092 		scsi_host_put(vha->host);
5093 		return NULL;
5094 	}
5095 	INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
5096 
5097 	sprintf(vha->host_str, "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
5098 	ql_dbg(ql_dbg_init, vha, 0x0041,
5099 	    "Allocated the host=%p hw=%p vha=%p dev_name=%s",
5100 	    vha->host, vha->hw, vha,
5101 	    dev_name(&(ha->pdev->dev)));
5102 
5103 	return vha;
5104 }
5105 
5106 struct qla_work_evt *
5107 qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
5108 {
5109 	struct qla_work_evt *e;
5110 
5111 	if (test_bit(UNLOADING, &vha->dpc_flags))
5112 		return NULL;
5113 
5114 	if (qla_vha_mark_busy(vha))
5115 		return NULL;
5116 
5117 	e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
5118 	if (!e) {
5119 		QLA_VHA_MARK_NOT_BUSY(vha);
5120 		return NULL;
5121 	}
5122 
5123 	INIT_LIST_HEAD(&e->list);
5124 	e->type = type;
5125 	e->flags = QLA_EVT_FLAG_FREE;
5126 	return e;
5127 }
5128 
5129 int
5130 qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
5131 {
5132 	unsigned long flags;
5133 	bool q = false;
5134 
5135 	spin_lock_irqsave(&vha->work_lock, flags);
5136 	list_add_tail(&e->list, &vha->work_list);
5137 
5138 	if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
5139 		q = true;
5140 
5141 	spin_unlock_irqrestore(&vha->work_lock, flags);
5142 
5143 	if (q)
5144 		queue_work(vha->hw->wq, &vha->iocb_work);
5145 
5146 	return QLA_SUCCESS;
5147 }
5148 
5149 int
5150 qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
5151     u32 data)
5152 {
5153 	struct qla_work_evt *e;
5154 
5155 	e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
5156 	if (!e)
5157 		return QLA_FUNCTION_FAILED;
5158 
5159 	e->u.aen.code = code;
5160 	e->u.aen.data = data;
5161 	return qla2x00_post_work(vha, e);
5162 }
5163 
5164 int
5165 qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
5166 {
5167 	struct qla_work_evt *e;
5168 
5169 	e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
5170 	if (!e)
5171 		return QLA_FUNCTION_FAILED;
5172 
5173 	memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
5174 	return qla2x00_post_work(vha, e);
5175 }
5176 
5177 #define qla2x00_post_async_work(name, type)	\
5178 int qla2x00_post_async_##name##_work(		\
5179     struct scsi_qla_host *vha,			\
5180     fc_port_t *fcport, uint16_t *data)		\
5181 {						\
5182 	struct qla_work_evt *e;			\
5183 						\
5184 	e = qla2x00_alloc_work(vha, type);	\
5185 	if (!e)					\
5186 		return QLA_FUNCTION_FAILED;	\
5187 						\
5188 	e->u.logio.fcport = fcport;		\
5189 	if (data) {				\
5190 		e->u.logio.data[0] = data[0];	\
5191 		e->u.logio.data[1] = data[1];	\
5192 	}					\
5193 	fcport->flags |= FCF_ASYNC_ACTIVE;	\
5194 	return qla2x00_post_work(vha, e);	\
5195 }
5196 
5197 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
5198 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
5199 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
5200 qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
5201 qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
5202 
5203 int
5204 qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
5205 {
5206 	struct qla_work_evt *e;
5207 
5208 	e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
5209 	if (!e)
5210 		return QLA_FUNCTION_FAILED;
5211 
5212 	e->u.uevent.code = code;
5213 	return qla2x00_post_work(vha, e);
5214 }
5215 
5216 static void
5217 qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
5218 {
5219 	char event_string[40];
5220 	char *envp[] = { event_string, NULL };
5221 
5222 	switch (code) {
5223 	case QLA_UEVENT_CODE_FW_DUMP:
5224 		snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu",
5225 		    vha->host_no);
5226 		break;
5227 	default:
5228 		/* do nothing */
5229 		break;
5230 	}
5231 	kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
5232 }
5233 
5234 int
5235 qlafx00_post_aenfx_work(struct scsi_qla_host *vha,  uint32_t evtcode,
5236 			uint32_t *data, int cnt)
5237 {
5238 	struct qla_work_evt *e;
5239 
5240 	e = qla2x00_alloc_work(vha, QLA_EVT_AENFX);
5241 	if (!e)
5242 		return QLA_FUNCTION_FAILED;
5243 
5244 	e->u.aenfx.evtcode = evtcode;
5245 	e->u.aenfx.count = cnt;
5246 	memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt);
5247 	return qla2x00_post_work(vha, e);
5248 }
5249 
5250 void qla24xx_sched_upd_fcport(fc_port_t *fcport)
5251 {
5252 	unsigned long flags;
5253 
5254 	if (IS_SW_RESV_ADDR(fcport->d_id))
5255 		return;
5256 
5257 	spin_lock_irqsave(&fcport->vha->work_lock, flags);
5258 	if (fcport->disc_state == DSC_UPD_FCPORT) {
5259 		spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
5260 		return;
5261 	}
5262 	fcport->jiffies_at_registration = jiffies;
5263 	fcport->sec_since_registration = 0;
5264 	fcport->next_disc_state = DSC_DELETED;
5265 	qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
5266 	spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
5267 
5268 	queue_work(system_unbound_wq, &fcport->reg_work);
5269 }
5270 
5271 static
5272 void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
5273 {
5274 	unsigned long flags;
5275 	fc_port_t *fcport =  NULL, *tfcp;
5276 	struct qlt_plogi_ack_t *pla =
5277 	    (struct qlt_plogi_ack_t *)e->u.new_sess.pla;
5278 	uint8_t free_fcport = 0;
5279 
5280 	ql_dbg(ql_dbg_disc, vha, 0xffff,
5281 	    "%s %d %8phC enter\n",
5282 	    __func__, __LINE__, e->u.new_sess.port_name);
5283 
5284 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5285 	fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
5286 	if (fcport) {
5287 		fcport->d_id = e->u.new_sess.id;
5288 		if (pla) {
5289 			fcport->fw_login_state = DSC_LS_PLOGI_PEND;
5290 			memcpy(fcport->node_name,
5291 			    pla->iocb.u.isp24.u.plogi.node_name,
5292 			    WWN_SIZE);
5293 			qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN);
5294 			/* we took an extra ref_count to prevent PLOGI ACK when
5295 			 * fcport/sess has not been created.
5296 			 */
5297 			pla->ref_count--;
5298 		}
5299 	} else {
5300 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5301 		fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5302 		if (fcport) {
5303 			fcport->d_id = e->u.new_sess.id;
5304 			fcport->flags |= FCF_FABRIC_DEVICE;
5305 			fcport->fw_login_state = DSC_LS_PLOGI_PEND;
5306 			fcport->tgt_short_link_down_cnt = 0;
5307 
5308 			memcpy(fcport->port_name, e->u.new_sess.port_name,
5309 			    WWN_SIZE);
5310 
5311 			fcport->fc4_type = e->u.new_sess.fc4_type;
5312 			if (NVME_PRIORITY(vha->hw, fcport))
5313 				fcport->do_prli_nvme = 1;
5314 			else
5315 				fcport->do_prli_nvme = 0;
5316 
5317 			if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
5318 				fcport->dm_login_expire = jiffies +
5319 					QLA_N2N_WAIT_TIME * HZ;
5320 				fcport->fc4_type = FS_FC4TYPE_FCP;
5321 				fcport->n2n_flag = 1;
5322 				if (vha->flags.nvme_enabled)
5323 					fcport->fc4_type |= FS_FC4TYPE_NVME;
5324 			}
5325 
5326 		} else {
5327 			ql_dbg(ql_dbg_disc, vha, 0xffff,
5328 				   "%s %8phC mem alloc fail.\n",
5329 				   __func__, e->u.new_sess.port_name);
5330 
5331 			if (pla) {
5332 				list_del(&pla->list);
5333 				kmem_cache_free(qla_tgt_plogi_cachep, pla);
5334 			}
5335 			return;
5336 		}
5337 
5338 		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5339 		/* search again to make sure no one else got ahead */
5340 		tfcp = qla2x00_find_fcport_by_wwpn(vha,
5341 		    e->u.new_sess.port_name, 1);
5342 		if (tfcp) {
5343 			/* should rarily happen */
5344 			ql_dbg(ql_dbg_disc, vha, 0xffff,
5345 			    "%s %8phC found existing fcport b4 add. DS %d LS %d\n",
5346 			    __func__, tfcp->port_name, tfcp->disc_state,
5347 			    tfcp->fw_login_state);
5348 
5349 			free_fcport = 1;
5350 		} else {
5351 			list_add_tail(&fcport->list, &vha->vp_fcports);
5352 
5353 		}
5354 		if (pla) {
5355 			qlt_plogi_ack_link(vha, pla, fcport,
5356 			    QLT_PLOGI_LINK_SAME_WWN);
5357 			pla->ref_count--;
5358 		}
5359 	}
5360 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5361 
5362 	if (fcport) {
5363 		fcport->id_changed = 1;
5364 		fcport->scan_state = QLA_FCPORT_FOUND;
5365 		fcport->chip_reset = vha->hw->base_qpair->chip_reset;
5366 		memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
5367 
5368 		if (pla) {
5369 			if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) {
5370 				u16 wd3_lo;
5371 
5372 				fcport->fw_login_state = DSC_LS_PRLI_PEND;
5373 				fcport->local = 0;
5374 				fcport->loop_id =
5375 					le16_to_cpu(
5376 					    pla->iocb.u.isp24.nport_handle);
5377 				fcport->fw_login_state = DSC_LS_PRLI_PEND;
5378 				wd3_lo =
5379 				    le16_to_cpu(
5380 					pla->iocb.u.isp24.u.prli.wd3_lo);
5381 
5382 				if (wd3_lo & BIT_7)
5383 					fcport->conf_compl_supported = 1;
5384 
5385 				if ((wd3_lo & BIT_4) == 0)
5386 					fcport->port_type = FCT_INITIATOR;
5387 				else
5388 					fcport->port_type = FCT_TARGET;
5389 			}
5390 			qlt_plogi_ack_unref(vha, pla);
5391 		} else {
5392 			fc_port_t *dfcp = NULL;
5393 
5394 			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5395 			tfcp = qla2x00_find_fcport_by_nportid(vha,
5396 			    &e->u.new_sess.id, 1);
5397 			if (tfcp && (tfcp != fcport)) {
5398 				/*
5399 				 * We have a conflict fcport with same NportID.
5400 				 */
5401 				ql_dbg(ql_dbg_disc, vha, 0xffff,
5402 				    "%s %8phC found conflict b4 add. DS %d LS %d\n",
5403 				    __func__, tfcp->port_name, tfcp->disc_state,
5404 				    tfcp->fw_login_state);
5405 
5406 				switch (tfcp->disc_state) {
5407 				case DSC_DELETED:
5408 					break;
5409 				case DSC_DELETE_PEND:
5410 					fcport->login_pause = 1;
5411 					tfcp->conflict = fcport;
5412 					break;
5413 				default:
5414 					fcport->login_pause = 1;
5415 					tfcp->conflict = fcport;
5416 					dfcp = tfcp;
5417 					break;
5418 				}
5419 			}
5420 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5421 			if (dfcp)
5422 				qlt_schedule_sess_for_deletion(tfcp);
5423 
5424 			if (N2N_TOPO(vha->hw)) {
5425 				fcport->flags &= ~FCF_FABRIC_DEVICE;
5426 				fcport->keep_nport_handle = 1;
5427 				if (vha->flags.nvme_enabled) {
5428 					fcport->fc4_type =
5429 					    (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP);
5430 					fcport->n2n_flag = 1;
5431 				}
5432 				fcport->fw_login_state = 0;
5433 
5434 				schedule_delayed_work(&vha->scan.scan_work, 5);
5435 			} else {
5436 				qla24xx_fcport_handle_login(vha, fcport);
5437 			}
5438 		}
5439 	}
5440 
5441 	if (free_fcport) {
5442 		qla2x00_free_fcport(fcport);
5443 		if (pla) {
5444 			list_del(&pla->list);
5445 			kmem_cache_free(qla_tgt_plogi_cachep, pla);
5446 		}
5447 	}
5448 }
5449 
5450 static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e)
5451 {
5452 	struct srb *sp = e->u.iosb.sp;
5453 	int rval;
5454 
5455 	rval = qla2x00_start_sp(sp);
5456 	if (rval != QLA_SUCCESS) {
5457 		ql_dbg(ql_dbg_disc, vha, 0x2043,
5458 		    "%s: %s: Re-issue IOCB failed (%d).\n",
5459 		    __func__, sp->name, rval);
5460 		qla24xx_sp_unmap(vha, sp);
5461 	}
5462 }
5463 
5464 void
5465 qla2x00_do_work(struct scsi_qla_host *vha)
5466 {
5467 	struct qla_work_evt *e, *tmp;
5468 	unsigned long flags;
5469 	LIST_HEAD(work);
5470 	int rc;
5471 
5472 	spin_lock_irqsave(&vha->work_lock, flags);
5473 	list_splice_init(&vha->work_list, &work);
5474 	spin_unlock_irqrestore(&vha->work_lock, flags);
5475 
5476 	list_for_each_entry_safe(e, tmp, &work, list) {
5477 		rc = QLA_SUCCESS;
5478 		switch (e->type) {
5479 		case QLA_EVT_AEN:
5480 			fc_host_post_event(vha->host, fc_get_event_number(),
5481 			    e->u.aen.code, e->u.aen.data);
5482 			break;
5483 		case QLA_EVT_IDC_ACK:
5484 			qla81xx_idc_ack(vha, e->u.idc_ack.mb);
5485 			break;
5486 		case QLA_EVT_ASYNC_LOGIN:
5487 			qla2x00_async_login(vha, e->u.logio.fcport,
5488 			    e->u.logio.data);
5489 			break;
5490 		case QLA_EVT_ASYNC_LOGOUT:
5491 			rc = qla2x00_async_logout(vha, e->u.logio.fcport);
5492 			break;
5493 		case QLA_EVT_ASYNC_ADISC:
5494 			qla2x00_async_adisc(vha, e->u.logio.fcport,
5495 			    e->u.logio.data);
5496 			break;
5497 		case QLA_EVT_UEVENT:
5498 			qla2x00_uevent_emit(vha, e->u.uevent.code);
5499 			break;
5500 		case QLA_EVT_AENFX:
5501 			qlafx00_process_aen(vha, e);
5502 			break;
5503 		case QLA_EVT_UNMAP:
5504 			qla24xx_sp_unmap(vha, e->u.iosb.sp);
5505 			break;
5506 		case QLA_EVT_RELOGIN:
5507 			qla2x00_relogin(vha);
5508 			break;
5509 		case QLA_EVT_NEW_SESS:
5510 			qla24xx_create_new_sess(vha, e);
5511 			break;
5512 		case QLA_EVT_GPDB:
5513 			qla24xx_async_gpdb(vha, e->u.fcport.fcport,
5514 			    e->u.fcport.opt);
5515 			break;
5516 		case QLA_EVT_PRLI:
5517 			qla24xx_async_prli(vha, e->u.fcport.fcport);
5518 			break;
5519 		case QLA_EVT_GPSC:
5520 			qla24xx_async_gpsc(vha, e->u.fcport.fcport);
5521 			break;
5522 		case QLA_EVT_GNL:
5523 			qla24xx_async_gnl(vha, e->u.fcport.fcport);
5524 			break;
5525 		case QLA_EVT_NACK:
5526 			qla24xx_do_nack_work(vha, e);
5527 			break;
5528 		case QLA_EVT_ASYNC_PRLO:
5529 			rc = qla2x00_async_prlo(vha, e->u.logio.fcport);
5530 			break;
5531 		case QLA_EVT_ASYNC_PRLO_DONE:
5532 			qla2x00_async_prlo_done(vha, e->u.logio.fcport,
5533 			    e->u.logio.data);
5534 			break;
5535 		case QLA_EVT_GPNFT:
5536 			qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type,
5537 			    e->u.gpnft.sp);
5538 			break;
5539 		case QLA_EVT_GPNFT_DONE:
5540 			qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
5541 			break;
5542 		case QLA_EVT_GNNFT_DONE:
5543 			qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
5544 			break;
5545 		case QLA_EVT_GFPNID:
5546 			qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
5547 			break;
5548 		case QLA_EVT_SP_RETRY:
5549 			qla_sp_retry(vha, e);
5550 			break;
5551 		case QLA_EVT_IIDMA:
5552 			qla_do_iidma_work(vha, e->u.fcport.fcport);
5553 			break;
5554 		case QLA_EVT_ELS_PLOGI:
5555 			qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
5556 			    e->u.fcport.fcport, false);
5557 			break;
5558 		case QLA_EVT_SA_REPLACE:
5559 			rc = qla24xx_issue_sa_replace_iocb(vha, e);
5560 			break;
5561 		}
5562 
5563 		if (rc == EAGAIN) {
5564 			/* put 'work' at head of 'vha->work_list' */
5565 			spin_lock_irqsave(&vha->work_lock, flags);
5566 			list_splice(&work, &vha->work_list);
5567 			spin_unlock_irqrestore(&vha->work_lock, flags);
5568 			break;
5569 		}
5570 		list_del_init(&e->list);
5571 		if (e->flags & QLA_EVT_FLAG_FREE)
5572 			kfree(e);
5573 
5574 		/* For each work completed decrement vha ref count */
5575 		QLA_VHA_MARK_NOT_BUSY(vha);
5576 	}
5577 }
5578 
5579 int qla24xx_post_relogin_work(struct scsi_qla_host *vha)
5580 {
5581 	struct qla_work_evt *e;
5582 
5583 	e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN);
5584 
5585 	if (!e) {
5586 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5587 		return QLA_FUNCTION_FAILED;
5588 	}
5589 
5590 	return qla2x00_post_work(vha, e);
5591 }
5592 
5593 /* Relogins all the fcports of a vport
5594  * Context: dpc thread
5595  */
5596 void qla2x00_relogin(struct scsi_qla_host *vha)
5597 {
5598 	fc_port_t       *fcport;
5599 	int status, relogin_needed = 0;
5600 	struct event_arg ea;
5601 
5602 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
5603 		/*
5604 		 * If the port is not ONLINE then try to login
5605 		 * to it if we haven't run out of retries.
5606 		 */
5607 		if (atomic_read(&fcport->state) != FCS_ONLINE &&
5608 		    fcport->login_retry) {
5609 			if (fcport->scan_state != QLA_FCPORT_FOUND ||
5610 			    fcport->disc_state == DSC_LOGIN_AUTH_PEND ||
5611 			    fcport->disc_state == DSC_LOGIN_COMPLETE)
5612 				continue;
5613 
5614 			if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) ||
5615 				fcport->disc_state == DSC_DELETE_PEND) {
5616 				relogin_needed = 1;
5617 			} else {
5618 				if (vha->hw->current_topology != ISP_CFG_NL) {
5619 					memset(&ea, 0, sizeof(ea));
5620 					ea.fcport = fcport;
5621 					qla24xx_handle_relogin_event(vha, &ea);
5622 				} else if (vha->hw->current_topology ==
5623 					 ISP_CFG_NL &&
5624 					IS_QLA2XXX_MIDTYPE(vha->hw)) {
5625 					(void)qla24xx_fcport_handle_login(vha,
5626 									fcport);
5627 				} else if (vha->hw->current_topology ==
5628 				    ISP_CFG_NL) {
5629 					fcport->login_retry--;
5630 					status =
5631 					    qla2x00_local_device_login(vha,
5632 						fcport);
5633 					if (status == QLA_SUCCESS) {
5634 						fcport->old_loop_id =
5635 						    fcport->loop_id;
5636 						ql_dbg(ql_dbg_disc, vha, 0x2003,
5637 						    "Port login OK: logged in ID 0x%x.\n",
5638 						    fcport->loop_id);
5639 						qla2x00_update_fcport
5640 							(vha, fcport);
5641 					} else if (status == 1) {
5642 						set_bit(RELOGIN_NEEDED,
5643 						    &vha->dpc_flags);
5644 						/* retry the login again */
5645 						ql_dbg(ql_dbg_disc, vha, 0x2007,
5646 						    "Retrying %d login again loop_id 0x%x.\n",
5647 						    fcport->login_retry,
5648 						    fcport->loop_id);
5649 					} else {
5650 						fcport->login_retry = 0;
5651 					}
5652 
5653 					if (fcport->login_retry == 0 &&
5654 					    status != QLA_SUCCESS)
5655 						qla2x00_clear_loop_id(fcport);
5656 				}
5657 			}
5658 		}
5659 		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5660 			break;
5661 	}
5662 
5663 	if (relogin_needed)
5664 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5665 
5666 	ql_dbg(ql_dbg_disc, vha, 0x400e,
5667 	    "Relogin end.\n");
5668 }
5669 
5670 /* Schedule work on any of the dpc-workqueues */
5671 void
5672 qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code)
5673 {
5674 	struct qla_hw_data *ha = base_vha->hw;
5675 
5676 	switch (work_code) {
5677 	case MBA_IDC_AEN: /* 0x8200 */
5678 		if (ha->dpc_lp_wq)
5679 			queue_work(ha->dpc_lp_wq, &ha->idc_aen);
5680 		break;
5681 
5682 	case QLA83XX_NIC_CORE_RESET: /* 0x1 */
5683 		if (!ha->flags.nic_core_reset_hdlr_active) {
5684 			if (ha->dpc_hp_wq)
5685 				queue_work(ha->dpc_hp_wq, &ha->nic_core_reset);
5686 		} else
5687 			ql_dbg(ql_dbg_p3p, base_vha, 0xb05e,
5688 			    "NIC Core reset is already active. Skip "
5689 			    "scheduling it again.\n");
5690 		break;
5691 	case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */
5692 		if (ha->dpc_hp_wq)
5693 			queue_work(ha->dpc_hp_wq, &ha->idc_state_handler);
5694 		break;
5695 	case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */
5696 		if (ha->dpc_hp_wq)
5697 			queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable);
5698 		break;
5699 	default:
5700 		ql_log(ql_log_warn, base_vha, 0xb05f,
5701 		    "Unknown work-code=0x%x.\n", work_code);
5702 	}
5703 
5704 	return;
5705 }
5706 
5707 /* Work: Perform NIC Core Unrecoverable state handling */
5708 void
5709 qla83xx_nic_core_unrecoverable_work(struct work_struct *work)
5710 {
5711 	struct qla_hw_data *ha =
5712 		container_of(work, struct qla_hw_data, nic_core_unrecoverable);
5713 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5714 	uint32_t dev_state = 0;
5715 
5716 	qla83xx_idc_lock(base_vha, 0);
5717 	qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5718 	qla83xx_reset_ownership(base_vha);
5719 	if (ha->flags.nic_core_reset_owner) {
5720 		ha->flags.nic_core_reset_owner = 0;
5721 		qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
5722 		    QLA8XXX_DEV_FAILED);
5723 		ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n");
5724 		qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
5725 	}
5726 	qla83xx_idc_unlock(base_vha, 0);
5727 }
5728 
5729 /* Work: Execute IDC state handler */
5730 void
5731 qla83xx_idc_state_handler_work(struct work_struct *work)
5732 {
5733 	struct qla_hw_data *ha =
5734 		container_of(work, struct qla_hw_data, idc_state_handler);
5735 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5736 	uint32_t dev_state = 0;
5737 
5738 	qla83xx_idc_lock(base_vha, 0);
5739 	qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5740 	if (dev_state == QLA8XXX_DEV_FAILED ||
5741 			dev_state == QLA8XXX_DEV_NEED_QUIESCENT)
5742 		qla83xx_idc_state_handler(base_vha);
5743 	qla83xx_idc_unlock(base_vha, 0);
5744 }
5745 
5746 static int
5747 qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
5748 {
5749 	int rval = QLA_SUCCESS;
5750 	unsigned long heart_beat_wait = jiffies + (1 * HZ);
5751 	uint32_t heart_beat_counter1, heart_beat_counter2;
5752 
5753 	do {
5754 		if (time_after(jiffies, heart_beat_wait)) {
5755 			ql_dbg(ql_dbg_p3p, base_vha, 0xb07c,
5756 			    "Nic Core f/w is not alive.\n");
5757 			rval = QLA_FUNCTION_FAILED;
5758 			break;
5759 		}
5760 
5761 		qla83xx_idc_lock(base_vha, 0);
5762 		qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
5763 		    &heart_beat_counter1);
5764 		qla83xx_idc_unlock(base_vha, 0);
5765 		msleep(100);
5766 		qla83xx_idc_lock(base_vha, 0);
5767 		qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
5768 		    &heart_beat_counter2);
5769 		qla83xx_idc_unlock(base_vha, 0);
5770 	} while (heart_beat_counter1 == heart_beat_counter2);
5771 
5772 	return rval;
5773 }
5774 
5775 /* Work: Perform NIC Core Reset handling */
5776 void
5777 qla83xx_nic_core_reset_work(struct work_struct *work)
5778 {
5779 	struct qla_hw_data *ha =
5780 		container_of(work, struct qla_hw_data, nic_core_reset);
5781 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5782 	uint32_t dev_state = 0;
5783 
5784 	if (IS_QLA2031(ha)) {
5785 		if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS)
5786 			ql_log(ql_log_warn, base_vha, 0xb081,
5787 			    "Failed to dump mctp\n");
5788 		return;
5789 	}
5790 
5791 	if (!ha->flags.nic_core_reset_hdlr_active) {
5792 		if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) {
5793 			qla83xx_idc_lock(base_vha, 0);
5794 			qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE,
5795 			    &dev_state);
5796 			qla83xx_idc_unlock(base_vha, 0);
5797 			if (dev_state != QLA8XXX_DEV_NEED_RESET) {
5798 				ql_dbg(ql_dbg_p3p, base_vha, 0xb07a,
5799 				    "Nic Core f/w is alive.\n");
5800 				return;
5801 			}
5802 		}
5803 
5804 		ha->flags.nic_core_reset_hdlr_active = 1;
5805 		if (qla83xx_nic_core_reset(base_vha)) {
5806 			/* NIC Core reset failed. */
5807 			ql_dbg(ql_dbg_p3p, base_vha, 0xb061,
5808 			    "NIC Core reset failed.\n");
5809 		}
5810 		ha->flags.nic_core_reset_hdlr_active = 0;
5811 	}
5812 }
5813 
5814 /* Work: Handle 8200 IDC aens */
5815 void
5816 qla83xx_service_idc_aen(struct work_struct *work)
5817 {
5818 	struct qla_hw_data *ha =
5819 		container_of(work, struct qla_hw_data, idc_aen);
5820 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5821 	uint32_t dev_state, idc_control;
5822 
5823 	qla83xx_idc_lock(base_vha, 0);
5824 	qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5825 	qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control);
5826 	qla83xx_idc_unlock(base_vha, 0);
5827 	if (dev_state == QLA8XXX_DEV_NEED_RESET) {
5828 		if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) {
5829 			ql_dbg(ql_dbg_p3p, base_vha, 0xb062,
5830 			    "Application requested NIC Core Reset.\n");
5831 			qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
5832 		} else if (qla83xx_check_nic_core_fw_alive(base_vha) ==
5833 		    QLA_SUCCESS) {
5834 			ql_dbg(ql_dbg_p3p, base_vha, 0xb07b,
5835 			    "Other protocol driver requested NIC Core Reset.\n");
5836 			qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
5837 		}
5838 	} else if (dev_state == QLA8XXX_DEV_FAILED ||
5839 			dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
5840 		qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
5841 	}
5842 }
5843 
5844 /*
5845  * Control the frequency of IDC lock retries
5846  */
5847 #define QLA83XX_WAIT_LOGIC_MS	100
5848 
5849 static int
5850 qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
5851 {
5852 	int rval;
5853 	uint32_t data;
5854 	uint32_t idc_lck_rcvry_stage_mask = 0x3;
5855 	uint32_t idc_lck_rcvry_owner_mask = 0x3c;
5856 	struct qla_hw_data *ha = base_vha->hw;
5857 
5858 	ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
5859 	    "Trying force recovery of the IDC lock.\n");
5860 
5861 	rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
5862 	if (rval)
5863 		return rval;
5864 
5865 	if ((data & idc_lck_rcvry_stage_mask) > 0) {
5866 		return QLA_SUCCESS;
5867 	} else {
5868 		data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2);
5869 		rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
5870 		    data);
5871 		if (rval)
5872 			return rval;
5873 
5874 		msleep(200);
5875 
5876 		rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
5877 		    &data);
5878 		if (rval)
5879 			return rval;
5880 
5881 		if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) {
5882 			data &= (IDC_LOCK_RECOVERY_STAGE2 |
5883 					~(idc_lck_rcvry_stage_mask));
5884 			rval = qla83xx_wr_reg(base_vha,
5885 			    QLA83XX_IDC_LOCK_RECOVERY, data);
5886 			if (rval)
5887 				return rval;
5888 
5889 			/* Forcefully perform IDC UnLock */
5890 			rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK,
5891 			    &data);
5892 			if (rval)
5893 				return rval;
5894 			/* Clear lock-id by setting 0xff */
5895 			rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
5896 			    0xff);
5897 			if (rval)
5898 				return rval;
5899 			/* Clear lock-recovery by setting 0x0 */
5900 			rval = qla83xx_wr_reg(base_vha,
5901 			    QLA83XX_IDC_LOCK_RECOVERY, 0x0);
5902 			if (rval)
5903 				return rval;
5904 		} else
5905 			return QLA_SUCCESS;
5906 	}
5907 
5908 	return rval;
5909 }
5910 
5911 static int
5912 qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
5913 {
5914 	int rval = QLA_SUCCESS;
5915 	uint32_t o_drv_lockid, n_drv_lockid;
5916 	unsigned long lock_recovery_timeout;
5917 
5918 	lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT;
5919 retry_lockid:
5920 	rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid);
5921 	if (rval)
5922 		goto exit;
5923 
5924 	/* MAX wait time before forcing IDC Lock recovery = 2 secs */
5925 	if (time_after_eq(jiffies, lock_recovery_timeout)) {
5926 		if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS)
5927 			return QLA_SUCCESS;
5928 		else
5929 			return QLA_FUNCTION_FAILED;
5930 	}
5931 
5932 	rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid);
5933 	if (rval)
5934 		goto exit;
5935 
5936 	if (o_drv_lockid == n_drv_lockid) {
5937 		msleep(QLA83XX_WAIT_LOGIC_MS);
5938 		goto retry_lockid;
5939 	} else
5940 		return QLA_SUCCESS;
5941 
5942 exit:
5943 	return rval;
5944 }
5945 
5946 /*
5947  * Context: task, can sleep
5948  */
5949 void
5950 qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
5951 {
5952 	uint32_t data;
5953 	uint32_t lock_owner;
5954 	struct qla_hw_data *ha = base_vha->hw;
5955 
5956 	might_sleep();
5957 
5958 	/* IDC-lock implementation using driver-lock/lock-id remote registers */
5959 retry_lock:
5960 	if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
5961 	    == QLA_SUCCESS) {
5962 		if (data) {
5963 			/* Setting lock-id to our function-number */
5964 			qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
5965 			    ha->portnum);
5966 		} else {
5967 			qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID,
5968 			    &lock_owner);
5969 			ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
5970 			    "Failed to acquire IDC lock, acquired by %d, "
5971 			    "retrying...\n", lock_owner);
5972 
5973 			/* Retry/Perform IDC-Lock recovery */
5974 			if (qla83xx_idc_lock_recovery(base_vha)
5975 			    == QLA_SUCCESS) {
5976 				msleep(QLA83XX_WAIT_LOGIC_MS);
5977 				goto retry_lock;
5978 			} else
5979 				ql_log(ql_log_warn, base_vha, 0xb075,
5980 				    "IDC Lock recovery FAILED.\n");
5981 		}
5982 
5983 	}
5984 
5985 	return;
5986 }
5987 
5988 static bool
5989 qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha,
5990 	struct purex_entry_24xx *purex)
5991 {
5992 	char fwstr[16];
5993 	u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0];
5994 	struct port_database_24xx *pdb;
5995 
5996 	/* Domain Controller is always logged-out. */
5997 	/* if RDP request is not from Domain Controller: */
5998 	if (sid != 0xfffc01)
5999 		return false;
6000 
6001 	ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid);
6002 
6003 	pdb = kzalloc(sizeof(*pdb), GFP_KERNEL);
6004 	if (!pdb) {
6005 		ql_dbg(ql_dbg_init, vha, 0x0181,
6006 		    "%s: Failed allocate pdb\n", __func__);
6007 	} else if (qla24xx_get_port_database(vha,
6008 				le16_to_cpu(purex->nport_handle), pdb)) {
6009 		ql_dbg(ql_dbg_init, vha, 0x0181,
6010 		    "%s: Failed get pdb sid=%x\n", __func__, sid);
6011 	} else if (pdb->current_login_state != PDS_PLOGI_COMPLETE &&
6012 	    pdb->current_login_state != PDS_PRLI_COMPLETE) {
6013 		ql_dbg(ql_dbg_init, vha, 0x0181,
6014 		    "%s: Port not logged in sid=%#x\n", __func__, sid);
6015 	} else {
6016 		/* RDP request is from logged in port */
6017 		kfree(pdb);
6018 		return false;
6019 	}
6020 	kfree(pdb);
6021 
6022 	vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr));
6023 	fwstr[strcspn(fwstr, " ")] = 0;
6024 	/* if FW version allows RDP response length upto 2048 bytes: */
6025 	if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0)
6026 		return false;
6027 
6028 	ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr);
6029 
6030 	/* RDP response length is to be reduced to maximum 256 bytes */
6031 	return true;
6032 }
6033 
6034 /*
6035  * Function Name: qla24xx_process_purex_iocb
6036  *
6037  * Description:
6038  * Prepare a RDP response and send to Fabric switch
6039  *
6040  * PARAMETERS:
6041  * vha:	SCSI qla host
6042  * purex: RDP request received by HBA
6043  */
6044 void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
6045 			       struct purex_item *item)
6046 {
6047 	struct qla_hw_data *ha = vha->hw;
6048 	struct purex_entry_24xx *purex =
6049 	    (struct purex_entry_24xx *)&item->iocb;
6050 	dma_addr_t rsp_els_dma;
6051 	dma_addr_t rsp_payload_dma;
6052 	dma_addr_t stat_dma;
6053 	dma_addr_t sfp_dma;
6054 	struct els_entry_24xx *rsp_els = NULL;
6055 	struct rdp_rsp_payload *rsp_payload = NULL;
6056 	struct link_statistics *stat = NULL;
6057 	uint8_t *sfp = NULL;
6058 	uint16_t sfp_flags = 0;
6059 	uint rsp_payload_length = sizeof(*rsp_payload);
6060 	int rval;
6061 
6062 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180,
6063 	    "%s: Enter\n", __func__);
6064 
6065 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181,
6066 	    "-------- ELS REQ -------\n");
6067 	ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182,
6068 	    purex, sizeof(*purex));
6069 
6070 	if (qla25xx_rdp_rsp_reduce_size(vha, purex)) {
6071 		rsp_payload_length =
6072 		    offsetof(typeof(*rsp_payload), optical_elmt_desc);
6073 		ql_dbg(ql_dbg_init, vha, 0x0181,
6074 		    "Reducing RSP payload length to %u bytes...\n",
6075 		    rsp_payload_length);
6076 	}
6077 
6078 	rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els),
6079 	    &rsp_els_dma, GFP_KERNEL);
6080 	if (!rsp_els) {
6081 		ql_log(ql_log_warn, vha, 0x0183,
6082 		    "Failed allocate dma buffer ELS RSP.\n");
6083 		goto dealloc;
6084 	}
6085 
6086 	rsp_payload = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
6087 	    &rsp_payload_dma, GFP_KERNEL);
6088 	if (!rsp_payload) {
6089 		ql_log(ql_log_warn, vha, 0x0184,
6090 		    "Failed allocate dma buffer ELS RSP payload.\n");
6091 		goto dealloc;
6092 	}
6093 
6094 	sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
6095 	    &sfp_dma, GFP_KERNEL);
6096 
6097 	stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat),
6098 	    &stat_dma, GFP_KERNEL);
6099 
6100 	/* Prepare Response IOCB */
6101 	rsp_els->entry_type = ELS_IOCB_TYPE;
6102 	rsp_els->entry_count = 1;
6103 	rsp_els->sys_define = 0;
6104 	rsp_els->entry_status = 0;
6105 	rsp_els->handle = 0;
6106 	rsp_els->nport_handle = purex->nport_handle;
6107 	rsp_els->tx_dsd_count = cpu_to_le16(1);
6108 	rsp_els->vp_index = purex->vp_idx;
6109 	rsp_els->sof_type = EST_SOFI3;
6110 	rsp_els->rx_xchg_address = purex->rx_xchg_addr;
6111 	rsp_els->rx_dsd_count = 0;
6112 	rsp_els->opcode = purex->els_frame_payload[0];
6113 
6114 	rsp_els->d_id[0] = purex->s_id[0];
6115 	rsp_els->d_id[1] = purex->s_id[1];
6116 	rsp_els->d_id[2] = purex->s_id[2];
6117 
6118 	rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC);
6119 	rsp_els->rx_byte_count = 0;
6120 	rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length);
6121 
6122 	put_unaligned_le64(rsp_payload_dma, &rsp_els->tx_address);
6123 	rsp_els->tx_len = rsp_els->tx_byte_count;
6124 
6125 	rsp_els->rx_address = 0;
6126 	rsp_els->rx_len = 0;
6127 
6128 	/* Prepare Response Payload */
6129 	rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */
6130 	rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) -
6131 					   sizeof(rsp_payload->hdr));
6132 
6133 	/* Link service Request Info Descriptor */
6134 	rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1);
6135 	rsp_payload->ls_req_info_desc.desc_len =
6136 	    cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc));
6137 	rsp_payload->ls_req_info_desc.req_payload_word_0 =
6138 	    cpu_to_be32p((uint32_t *)purex->els_frame_payload);
6139 
6140 	/* Link service Request Info Descriptor 2 */
6141 	rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1);
6142 	rsp_payload->ls_req_info_desc2.desc_len =
6143 	    cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2));
6144 	rsp_payload->ls_req_info_desc2.req_payload_word_0 =
6145 	    cpu_to_be32p((uint32_t *)purex->els_frame_payload);
6146 
6147 
6148 	rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000);
6149 	rsp_payload->sfp_diag_desc.desc_len =
6150 		cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc));
6151 
6152 	if (sfp) {
6153 		/* SFP Flags */
6154 		memset(sfp, 0, SFP_RTDI_LEN);
6155 		rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0);
6156 		if (!rval) {
6157 			/* SFP Flags bits 3-0: Port Tx Laser Type */
6158 			if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5))
6159 				sfp_flags |= BIT_0; /* short wave */
6160 			else if (sfp[0] & BIT_1)
6161 				sfp_flags |= BIT_1; /* long wave 1310nm */
6162 			else if (sfp[1] & BIT_4)
6163 				sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */
6164 		}
6165 
6166 		/* SFP Type */
6167 		memset(sfp, 0, SFP_RTDI_LEN);
6168 		rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0);
6169 		if (!rval) {
6170 			sfp_flags |= BIT_4; /* optical */
6171 			if (sfp[0] == 0x3)
6172 				sfp_flags |= BIT_6; /* sfp+ */
6173 		}
6174 
6175 		rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags);
6176 
6177 		/* SFP Diagnostics */
6178 		memset(sfp, 0, SFP_RTDI_LEN);
6179 		rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0);
6180 		if (!rval) {
6181 			__be16 *trx = (__force __be16 *)sfp; /* already be16 */
6182 			rsp_payload->sfp_diag_desc.temperature = trx[0];
6183 			rsp_payload->sfp_diag_desc.vcc = trx[1];
6184 			rsp_payload->sfp_diag_desc.tx_bias = trx[2];
6185 			rsp_payload->sfp_diag_desc.tx_power = trx[3];
6186 			rsp_payload->sfp_diag_desc.rx_power = trx[4];
6187 		}
6188 	}
6189 
6190 	/* Port Speed Descriptor */
6191 	rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001);
6192 	rsp_payload->port_speed_desc.desc_len =
6193 	    cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc));
6194 	rsp_payload->port_speed_desc.speed_capab = cpu_to_be16(
6195 	    qla25xx_fdmi_port_speed_capability(ha));
6196 	rsp_payload->port_speed_desc.operating_speed = cpu_to_be16(
6197 	    qla25xx_fdmi_port_speed_currently(ha));
6198 
6199 	/* Link Error Status Descriptor */
6200 	rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002);
6201 	rsp_payload->ls_err_desc.desc_len =
6202 		cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc));
6203 
6204 	if (stat) {
6205 		rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0);
6206 		if (!rval) {
6207 			rsp_payload->ls_err_desc.link_fail_cnt =
6208 			    cpu_to_be32(le32_to_cpu(stat->link_fail_cnt));
6209 			rsp_payload->ls_err_desc.loss_sync_cnt =
6210 			    cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt));
6211 			rsp_payload->ls_err_desc.loss_sig_cnt =
6212 			    cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt));
6213 			rsp_payload->ls_err_desc.prim_seq_err_cnt =
6214 			    cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt));
6215 			rsp_payload->ls_err_desc.inval_xmit_word_cnt =
6216 			    cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt));
6217 			rsp_payload->ls_err_desc.inval_crc_cnt =
6218 			    cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt));
6219 			rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6;
6220 		}
6221 	}
6222 
6223 	/* Portname Descriptor */
6224 	rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003);
6225 	rsp_payload->port_name_diag_desc.desc_len =
6226 	    cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc));
6227 	memcpy(rsp_payload->port_name_diag_desc.WWNN,
6228 	    vha->node_name,
6229 	    sizeof(rsp_payload->port_name_diag_desc.WWNN));
6230 	memcpy(rsp_payload->port_name_diag_desc.WWPN,
6231 	    vha->port_name,
6232 	    sizeof(rsp_payload->port_name_diag_desc.WWPN));
6233 
6234 	/* F-Port Portname Descriptor */
6235 	rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003);
6236 	rsp_payload->port_name_direct_desc.desc_len =
6237 	    cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc));
6238 	memcpy(rsp_payload->port_name_direct_desc.WWNN,
6239 	    vha->fabric_node_name,
6240 	    sizeof(rsp_payload->port_name_direct_desc.WWNN));
6241 	memcpy(rsp_payload->port_name_direct_desc.WWPN,
6242 	    vha->fabric_port_name,
6243 	    sizeof(rsp_payload->port_name_direct_desc.WWPN));
6244 
6245 	/* Bufer Credit Descriptor */
6246 	rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006);
6247 	rsp_payload->buffer_credit_desc.desc_len =
6248 		cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc));
6249 	rsp_payload->buffer_credit_desc.fcport_b2b = 0;
6250 	rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0);
6251 	rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0);
6252 
6253 	if (ha->flags.plogi_template_valid) {
6254 		uint32_t tmp =
6255 		be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred);
6256 		rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp);
6257 	}
6258 
6259 	if (rsp_payload_length < sizeof(*rsp_payload))
6260 		goto send;
6261 
6262 	/* Optical Element Descriptor, Temperature */
6263 	rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007);
6264 	rsp_payload->optical_elmt_desc[0].desc_len =
6265 		cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6266 	/* Optical Element Descriptor, Voltage */
6267 	rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007);
6268 	rsp_payload->optical_elmt_desc[1].desc_len =
6269 		cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6270 	/* Optical Element Descriptor, Tx Bias Current */
6271 	rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007);
6272 	rsp_payload->optical_elmt_desc[2].desc_len =
6273 		cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6274 	/* Optical Element Descriptor, Tx Power */
6275 	rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007);
6276 	rsp_payload->optical_elmt_desc[3].desc_len =
6277 		cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6278 	/* Optical Element Descriptor, Rx Power */
6279 	rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007);
6280 	rsp_payload->optical_elmt_desc[4].desc_len =
6281 		cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6282 
6283 	if (sfp) {
6284 		memset(sfp, 0, SFP_RTDI_LEN);
6285 		rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0);
6286 		if (!rval) {
6287 			__be16 *trx = (__force __be16 *)sfp; /* already be16 */
6288 
6289 			/* Optical Element Descriptor, Temperature */
6290 			rsp_payload->optical_elmt_desc[0].high_alarm = trx[0];
6291 			rsp_payload->optical_elmt_desc[0].low_alarm = trx[1];
6292 			rsp_payload->optical_elmt_desc[0].high_warn = trx[2];
6293 			rsp_payload->optical_elmt_desc[0].low_warn = trx[3];
6294 			rsp_payload->optical_elmt_desc[0].element_flags =
6295 			    cpu_to_be32(1 << 28);
6296 
6297 			/* Optical Element Descriptor, Voltage */
6298 			rsp_payload->optical_elmt_desc[1].high_alarm = trx[4];
6299 			rsp_payload->optical_elmt_desc[1].low_alarm = trx[5];
6300 			rsp_payload->optical_elmt_desc[1].high_warn = trx[6];
6301 			rsp_payload->optical_elmt_desc[1].low_warn = trx[7];
6302 			rsp_payload->optical_elmt_desc[1].element_flags =
6303 			    cpu_to_be32(2 << 28);
6304 
6305 			/* Optical Element Descriptor, Tx Bias Current */
6306 			rsp_payload->optical_elmt_desc[2].high_alarm = trx[8];
6307 			rsp_payload->optical_elmt_desc[2].low_alarm = trx[9];
6308 			rsp_payload->optical_elmt_desc[2].high_warn = trx[10];
6309 			rsp_payload->optical_elmt_desc[2].low_warn = trx[11];
6310 			rsp_payload->optical_elmt_desc[2].element_flags =
6311 			    cpu_to_be32(3 << 28);
6312 
6313 			/* Optical Element Descriptor, Tx Power */
6314 			rsp_payload->optical_elmt_desc[3].high_alarm = trx[12];
6315 			rsp_payload->optical_elmt_desc[3].low_alarm = trx[13];
6316 			rsp_payload->optical_elmt_desc[3].high_warn = trx[14];
6317 			rsp_payload->optical_elmt_desc[3].low_warn = trx[15];
6318 			rsp_payload->optical_elmt_desc[3].element_flags =
6319 			    cpu_to_be32(4 << 28);
6320 
6321 			/* Optical Element Descriptor, Rx Power */
6322 			rsp_payload->optical_elmt_desc[4].high_alarm = trx[16];
6323 			rsp_payload->optical_elmt_desc[4].low_alarm = trx[17];
6324 			rsp_payload->optical_elmt_desc[4].high_warn = trx[18];
6325 			rsp_payload->optical_elmt_desc[4].low_warn = trx[19];
6326 			rsp_payload->optical_elmt_desc[4].element_flags =
6327 			    cpu_to_be32(5 << 28);
6328 		}
6329 
6330 		memset(sfp, 0, SFP_RTDI_LEN);
6331 		rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0);
6332 		if (!rval) {
6333 			/* Temperature high/low alarm/warning */
6334 			rsp_payload->optical_elmt_desc[0].element_flags |=
6335 			    cpu_to_be32(
6336 				(sfp[0] >> 7 & 1) << 3 |
6337 				(sfp[0] >> 6 & 1) << 2 |
6338 				(sfp[4] >> 7 & 1) << 1 |
6339 				(sfp[4] >> 6 & 1) << 0);
6340 
6341 			/* Voltage high/low alarm/warning */
6342 			rsp_payload->optical_elmt_desc[1].element_flags |=
6343 			    cpu_to_be32(
6344 				(sfp[0] >> 5 & 1) << 3 |
6345 				(sfp[0] >> 4 & 1) << 2 |
6346 				(sfp[4] >> 5 & 1) << 1 |
6347 				(sfp[4] >> 4 & 1) << 0);
6348 
6349 			/* Tx Bias Current high/low alarm/warning */
6350 			rsp_payload->optical_elmt_desc[2].element_flags |=
6351 			    cpu_to_be32(
6352 				(sfp[0] >> 3 & 1) << 3 |
6353 				(sfp[0] >> 2 & 1) << 2 |
6354 				(sfp[4] >> 3 & 1) << 1 |
6355 				(sfp[4] >> 2 & 1) << 0);
6356 
6357 			/* Tx Power high/low alarm/warning */
6358 			rsp_payload->optical_elmt_desc[3].element_flags |=
6359 			    cpu_to_be32(
6360 				(sfp[0] >> 1 & 1) << 3 |
6361 				(sfp[0] >> 0 & 1) << 2 |
6362 				(sfp[4] >> 1 & 1) << 1 |
6363 				(sfp[4] >> 0 & 1) << 0);
6364 
6365 			/* Rx Power high/low alarm/warning */
6366 			rsp_payload->optical_elmt_desc[4].element_flags |=
6367 			    cpu_to_be32(
6368 				(sfp[1] >> 7 & 1) << 3 |
6369 				(sfp[1] >> 6 & 1) << 2 |
6370 				(sfp[5] >> 7 & 1) << 1 |
6371 				(sfp[5] >> 6 & 1) << 0);
6372 		}
6373 	}
6374 
6375 	/* Optical Product Data Descriptor */
6376 	rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008);
6377 	rsp_payload->optical_prod_desc.desc_len =
6378 		cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc));
6379 
6380 	if (sfp) {
6381 		memset(sfp, 0, SFP_RTDI_LEN);
6382 		rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0);
6383 		if (!rval) {
6384 			memcpy(rsp_payload->optical_prod_desc.vendor_name,
6385 			    sfp + 0,
6386 			    sizeof(rsp_payload->optical_prod_desc.vendor_name));
6387 			memcpy(rsp_payload->optical_prod_desc.part_number,
6388 			    sfp + 20,
6389 			    sizeof(rsp_payload->optical_prod_desc.part_number));
6390 			memcpy(rsp_payload->optical_prod_desc.revision,
6391 			    sfp + 36,
6392 			    sizeof(rsp_payload->optical_prod_desc.revision));
6393 			memcpy(rsp_payload->optical_prod_desc.serial_number,
6394 			    sfp + 48,
6395 			    sizeof(rsp_payload->optical_prod_desc.serial_number));
6396 		}
6397 
6398 		memset(sfp, 0, SFP_RTDI_LEN);
6399 		rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0);
6400 		if (!rval) {
6401 			memcpy(rsp_payload->optical_prod_desc.date,
6402 			    sfp + 0,
6403 			    sizeof(rsp_payload->optical_prod_desc.date));
6404 		}
6405 	}
6406 
6407 send:
6408 	ql_dbg(ql_dbg_init, vha, 0x0183,
6409 	    "Sending ELS Response to RDP Request...\n");
6410 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184,
6411 	    "-------- ELS RSP -------\n");
6412 	ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185,
6413 	    rsp_els, sizeof(*rsp_els));
6414 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186,
6415 	    "-------- ELS RSP PAYLOAD -------\n");
6416 	ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187,
6417 	    rsp_payload, rsp_payload_length);
6418 
6419 	rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0);
6420 
6421 	if (rval) {
6422 		ql_log(ql_log_warn, vha, 0x0188,
6423 		    "%s: iocb failed to execute -> %x\n", __func__, rval);
6424 	} else if (rsp_els->comp_status) {
6425 		ql_log(ql_log_warn, vha, 0x0189,
6426 		    "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
6427 		    __func__, rsp_els->comp_status,
6428 		    rsp_els->error_subcode_1, rsp_els->error_subcode_2);
6429 	} else {
6430 		ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__);
6431 	}
6432 
6433 dealloc:
6434 	if (stat)
6435 		dma_free_coherent(&ha->pdev->dev, sizeof(*stat),
6436 		    stat, stat_dma);
6437 	if (sfp)
6438 		dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
6439 		    sfp, sfp_dma);
6440 	if (rsp_payload)
6441 		dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
6442 		    rsp_payload, rsp_payload_dma);
6443 	if (rsp_els)
6444 		dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els),
6445 		    rsp_els, rsp_els_dma);
6446 }
6447 
6448 void
6449 qla24xx_free_purex_item(struct purex_item *item)
6450 {
6451 	if (item == &item->vha->default_item)
6452 		memset(&item->vha->default_item, 0, sizeof(struct purex_item));
6453 	else
6454 		kfree(item);
6455 }
6456 
6457 void qla24xx_process_purex_list(struct purex_list *list)
6458 {
6459 	struct list_head head = LIST_HEAD_INIT(head);
6460 	struct purex_item *item, *next;
6461 	ulong flags;
6462 
6463 	spin_lock_irqsave(&list->lock, flags);
6464 	list_splice_init(&list->head, &head);
6465 	spin_unlock_irqrestore(&list->lock, flags);
6466 
6467 	list_for_each_entry_safe(item, next, &head, list) {
6468 		list_del(&item->list);
6469 		item->process_item(item->vha, item);
6470 		qla24xx_free_purex_item(item);
6471 	}
6472 }
6473 
6474 /*
6475  * Context: task, can sleep
6476  */
6477 void
6478 qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
6479 {
6480 #if 0
6481 	uint16_t options = (requester_id << 15) | BIT_7;
6482 #endif
6483 	uint16_t retry;
6484 	uint32_t data;
6485 	struct qla_hw_data *ha = base_vha->hw;
6486 
6487 	might_sleep();
6488 
6489 	/* IDC-unlock implementation using driver-unlock/lock-id
6490 	 * remote registers
6491 	 */
6492 	retry = 0;
6493 retry_unlock:
6494 	if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data)
6495 	    == QLA_SUCCESS) {
6496 		if (data == ha->portnum) {
6497 			qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data);
6498 			/* Clearing lock-id by setting 0xff */
6499 			qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff);
6500 		} else if (retry < 10) {
6501 			/* SV: XXX: IDC unlock retrying needed here? */
6502 
6503 			/* Retry for IDC-unlock */
6504 			msleep(QLA83XX_WAIT_LOGIC_MS);
6505 			retry++;
6506 			ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
6507 			    "Failed to release IDC lock, retrying=%d\n", retry);
6508 			goto retry_unlock;
6509 		}
6510 	} else if (retry < 10) {
6511 		/* Retry for IDC-unlock */
6512 		msleep(QLA83XX_WAIT_LOGIC_MS);
6513 		retry++;
6514 		ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
6515 		    "Failed to read drv-lockid, retrying=%d\n", retry);
6516 		goto retry_unlock;
6517 	}
6518 
6519 	return;
6520 
6521 #if 0
6522 	/* XXX: IDC-unlock implementation using access-control mbx */
6523 	retry = 0;
6524 retry_unlock2:
6525 	if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
6526 		if (retry < 10) {
6527 			/* Retry for IDC-unlock */
6528 			msleep(QLA83XX_WAIT_LOGIC_MS);
6529 			retry++;
6530 			ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
6531 			    "Failed to release IDC lock, retrying=%d\n", retry);
6532 			goto retry_unlock2;
6533 		}
6534 	}
6535 
6536 	return;
6537 #endif
6538 }
6539 
6540 int
6541 __qla83xx_set_drv_presence(scsi_qla_host_t *vha)
6542 {
6543 	int rval = QLA_SUCCESS;
6544 	struct qla_hw_data *ha = vha->hw;
6545 	uint32_t drv_presence;
6546 
6547 	rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6548 	if (rval == QLA_SUCCESS) {
6549 		drv_presence |= (1 << ha->portnum);
6550 		rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
6551 		    drv_presence);
6552 	}
6553 
6554 	return rval;
6555 }
6556 
6557 int
6558 qla83xx_set_drv_presence(scsi_qla_host_t *vha)
6559 {
6560 	int rval = QLA_SUCCESS;
6561 
6562 	qla83xx_idc_lock(vha, 0);
6563 	rval = __qla83xx_set_drv_presence(vha);
6564 	qla83xx_idc_unlock(vha, 0);
6565 
6566 	return rval;
6567 }
6568 
6569 int
6570 __qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
6571 {
6572 	int rval = QLA_SUCCESS;
6573 	struct qla_hw_data *ha = vha->hw;
6574 	uint32_t drv_presence;
6575 
6576 	rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6577 	if (rval == QLA_SUCCESS) {
6578 		drv_presence &= ~(1 << ha->portnum);
6579 		rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
6580 		    drv_presence);
6581 	}
6582 
6583 	return rval;
6584 }
6585 
6586 int
6587 qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
6588 {
6589 	int rval = QLA_SUCCESS;
6590 
6591 	qla83xx_idc_lock(vha, 0);
6592 	rval = __qla83xx_clear_drv_presence(vha);
6593 	qla83xx_idc_unlock(vha, 0);
6594 
6595 	return rval;
6596 }
6597 
6598 static void
6599 qla83xx_need_reset_handler(scsi_qla_host_t *vha)
6600 {
6601 	struct qla_hw_data *ha = vha->hw;
6602 	uint32_t drv_ack, drv_presence;
6603 	unsigned long ack_timeout;
6604 
6605 	/* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */
6606 	ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
6607 	while (1) {
6608 		qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6609 		qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6610 		if ((drv_ack & drv_presence) == drv_presence)
6611 			break;
6612 
6613 		if (time_after_eq(jiffies, ack_timeout)) {
6614 			ql_log(ql_log_warn, vha, 0xb067,
6615 			    "RESET ACK TIMEOUT! drv_presence=0x%x "
6616 			    "drv_ack=0x%x\n", drv_presence, drv_ack);
6617 			/*
6618 			 * The function(s) which did not ack in time are forced
6619 			 * to withdraw any further participation in the IDC
6620 			 * reset.
6621 			 */
6622 			if (drv_ack != drv_presence)
6623 				qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
6624 				    drv_ack);
6625 			break;
6626 		}
6627 
6628 		qla83xx_idc_unlock(vha, 0);
6629 		msleep(1000);
6630 		qla83xx_idc_lock(vha, 0);
6631 	}
6632 
6633 	qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD);
6634 	ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
6635 }
6636 
6637 static int
6638 qla83xx_device_bootstrap(scsi_qla_host_t *vha)
6639 {
6640 	int rval = QLA_SUCCESS;
6641 	uint32_t idc_control;
6642 
6643 	qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
6644 	ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n");
6645 
6646 	/* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */
6647 	__qla83xx_get_idc_control(vha, &idc_control);
6648 	idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET;
6649 	__qla83xx_set_idc_control(vha, 0);
6650 
6651 	qla83xx_idc_unlock(vha, 0);
6652 	rval = qla83xx_restart_nic_firmware(vha);
6653 	qla83xx_idc_lock(vha, 0);
6654 
6655 	if (rval != QLA_SUCCESS) {
6656 		ql_log(ql_log_fatal, vha, 0xb06a,
6657 		    "Failed to restart NIC f/w.\n");
6658 		qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED);
6659 		ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n");
6660 	} else {
6661 		ql_dbg(ql_dbg_p3p, vha, 0xb06c,
6662 		    "Success in restarting nic f/w.\n");
6663 		qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY);
6664 		ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n");
6665 	}
6666 
6667 	return rval;
6668 }
6669 
6670 /* Assumes idc_lock always held on entry */
6671 int
6672 qla83xx_idc_state_handler(scsi_qla_host_t *base_vha)
6673 {
6674 	struct qla_hw_data *ha = base_vha->hw;
6675 	int rval = QLA_SUCCESS;
6676 	unsigned long dev_init_timeout;
6677 	uint32_t dev_state;
6678 
6679 	/* Wait for MAX-INIT-TIMEOUT for the device to go ready */
6680 	dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
6681 
6682 	while (1) {
6683 
6684 		if (time_after_eq(jiffies, dev_init_timeout)) {
6685 			ql_log(ql_log_warn, base_vha, 0xb06e,
6686 			    "Initialization TIMEOUT!\n");
6687 			/* Init timeout. Disable further NIC Core
6688 			 * communication.
6689 			 */
6690 			qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
6691 				QLA8XXX_DEV_FAILED);
6692 			ql_log(ql_log_info, base_vha, 0xb06f,
6693 			    "HW State: FAILED.\n");
6694 		}
6695 
6696 		qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6697 		switch (dev_state) {
6698 		case QLA8XXX_DEV_READY:
6699 			if (ha->flags.nic_core_reset_owner)
6700 				qla83xx_idc_audit(base_vha,
6701 				    IDC_AUDIT_COMPLETION);
6702 			ha->flags.nic_core_reset_owner = 0;
6703 			ql_dbg(ql_dbg_p3p, base_vha, 0xb070,
6704 			    "Reset_owner reset by 0x%x.\n",
6705 			    ha->portnum);
6706 			goto exit;
6707 		case QLA8XXX_DEV_COLD:
6708 			if (ha->flags.nic_core_reset_owner)
6709 				rval = qla83xx_device_bootstrap(base_vha);
6710 			else {
6711 			/* Wait for AEN to change device-state */
6712 				qla83xx_idc_unlock(base_vha, 0);
6713 				msleep(1000);
6714 				qla83xx_idc_lock(base_vha, 0);
6715 			}
6716 			break;
6717 		case QLA8XXX_DEV_INITIALIZING:
6718 			/* Wait for AEN to change device-state */
6719 			qla83xx_idc_unlock(base_vha, 0);
6720 			msleep(1000);
6721 			qla83xx_idc_lock(base_vha, 0);
6722 			break;
6723 		case QLA8XXX_DEV_NEED_RESET:
6724 			if (!ql2xdontresethba && ha->flags.nic_core_reset_owner)
6725 				qla83xx_need_reset_handler(base_vha);
6726 			else {
6727 				/* Wait for AEN to change device-state */
6728 				qla83xx_idc_unlock(base_vha, 0);
6729 				msleep(1000);
6730 				qla83xx_idc_lock(base_vha, 0);
6731 			}
6732 			/* reset timeout value after need reset handler */
6733 			dev_init_timeout = jiffies +
6734 			    (ha->fcoe_dev_init_timeout * HZ);
6735 			break;
6736 		case QLA8XXX_DEV_NEED_QUIESCENT:
6737 			/* XXX: DEBUG for now */
6738 			qla83xx_idc_unlock(base_vha, 0);
6739 			msleep(1000);
6740 			qla83xx_idc_lock(base_vha, 0);
6741 			break;
6742 		case QLA8XXX_DEV_QUIESCENT:
6743 			/* XXX: DEBUG for now */
6744 			if (ha->flags.quiesce_owner)
6745 				goto exit;
6746 
6747 			qla83xx_idc_unlock(base_vha, 0);
6748 			msleep(1000);
6749 			qla83xx_idc_lock(base_vha, 0);
6750 			dev_init_timeout = jiffies +
6751 			    (ha->fcoe_dev_init_timeout * HZ);
6752 			break;
6753 		case QLA8XXX_DEV_FAILED:
6754 			if (ha->flags.nic_core_reset_owner)
6755 				qla83xx_idc_audit(base_vha,
6756 				    IDC_AUDIT_COMPLETION);
6757 			ha->flags.nic_core_reset_owner = 0;
6758 			__qla83xx_clear_drv_presence(base_vha);
6759 			qla83xx_idc_unlock(base_vha, 0);
6760 			qla8xxx_dev_failed_handler(base_vha);
6761 			rval = QLA_FUNCTION_FAILED;
6762 			qla83xx_idc_lock(base_vha, 0);
6763 			goto exit;
6764 		case QLA8XXX_BAD_VALUE:
6765 			qla83xx_idc_unlock(base_vha, 0);
6766 			msleep(1000);
6767 			qla83xx_idc_lock(base_vha, 0);
6768 			break;
6769 		default:
6770 			ql_log(ql_log_warn, base_vha, 0xb071,
6771 			    "Unknown Device State: %x.\n", dev_state);
6772 			qla83xx_idc_unlock(base_vha, 0);
6773 			qla8xxx_dev_failed_handler(base_vha);
6774 			rval = QLA_FUNCTION_FAILED;
6775 			qla83xx_idc_lock(base_vha, 0);
6776 			goto exit;
6777 		}
6778 	}
6779 
6780 exit:
6781 	return rval;
6782 }
6783 
6784 void
6785 qla2x00_disable_board_on_pci_error(struct work_struct *work)
6786 {
6787 	struct qla_hw_data *ha = container_of(work, struct qla_hw_data,
6788 	    board_disable);
6789 	struct pci_dev *pdev = ha->pdev;
6790 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6791 
6792 	ql_log(ql_log_warn, base_vha, 0x015b,
6793 	    "Disabling adapter.\n");
6794 
6795 	if (!atomic_read(&pdev->enable_cnt)) {
6796 		ql_log(ql_log_info, base_vha, 0xfffc,
6797 		    "PCI device disabled, no action req for PCI error=%lx\n",
6798 		    base_vha->pci_flags);
6799 		return;
6800 	}
6801 
6802 	/*
6803 	 * if UNLOADING flag is already set, then continue unload,
6804 	 * where it was set first.
6805 	 */
6806 	if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
6807 		return;
6808 
6809 	qla2x00_wait_for_sess_deletion(base_vha);
6810 
6811 	qla2x00_delete_all_vps(ha, base_vha);
6812 
6813 	qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
6814 
6815 	qla2x00_dfs_remove(base_vha);
6816 
6817 	qla84xx_put_chip(base_vha);
6818 
6819 	if (base_vha->timer_active)
6820 		qla2x00_stop_timer(base_vha);
6821 
6822 	base_vha->flags.online = 0;
6823 
6824 	qla2x00_destroy_deferred_work(ha);
6825 
6826 	/*
6827 	 * Do not try to stop beacon blink as it will issue a mailbox
6828 	 * command.
6829 	 */
6830 	qla2x00_free_sysfs_attr(base_vha, false);
6831 
6832 	fc_remove_host(base_vha->host);
6833 
6834 	scsi_remove_host(base_vha->host);
6835 
6836 	base_vha->flags.init_done = 0;
6837 	qla25xx_delete_queues(base_vha);
6838 	qla2x00_free_fcports(base_vha);
6839 	qla2x00_free_irqs(base_vha);
6840 	qla2x00_mem_free(ha);
6841 	qla82xx_md_free(base_vha);
6842 	qla2x00_free_queues(ha);
6843 
6844 	qla2x00_unmap_iobases(ha);
6845 
6846 	pci_release_selected_regions(ha->pdev, ha->bars);
6847 	pci_disable_pcie_error_reporting(pdev);
6848 	pci_disable_device(pdev);
6849 
6850 	/*
6851 	 * Let qla2x00_remove_one cleanup qla_hw_data on device removal.
6852 	 */
6853 }
6854 
6855 /**************************************************************************
6856 * qla2x00_do_dpc
6857 *   This kernel thread is a task that is schedule by the interrupt handler
6858 *   to perform the background processing for interrupts.
6859 *
6860 * Notes:
6861 * This task always run in the context of a kernel thread.  It
6862 * is kick-off by the driver's detect code and starts up
6863 * up one per adapter. It immediately goes to sleep and waits for
6864 * some fibre event.  When either the interrupt handler or
6865 * the timer routine detects a event it will one of the task
6866 * bits then wake us up.
6867 **************************************************************************/
6868 static int
6869 qla2x00_do_dpc(void *data)
6870 {
6871 	scsi_qla_host_t *base_vha;
6872 	struct qla_hw_data *ha;
6873 	uint32_t online;
6874 	struct qla_qpair *qpair;
6875 
6876 	ha = (struct qla_hw_data *)data;
6877 	base_vha = pci_get_drvdata(ha->pdev);
6878 
6879 	set_user_nice(current, MIN_NICE);
6880 
6881 	set_current_state(TASK_INTERRUPTIBLE);
6882 	while (!kthread_should_stop()) {
6883 		ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
6884 		    "DPC handler sleeping.\n");
6885 
6886 		schedule();
6887 
6888 		if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags))
6889 			qla_pci_set_eeh_busy(base_vha);
6890 
6891 		if (!base_vha->flags.init_done || ha->flags.mbox_busy)
6892 			goto end_loop;
6893 
6894 		if (ha->flags.eeh_busy) {
6895 			ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
6896 			    "eeh_busy=%d.\n", ha->flags.eeh_busy);
6897 			goto end_loop;
6898 		}
6899 
6900 		ha->dpc_active = 1;
6901 
6902 		ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
6903 		    "DPC handler waking up, dpc_flags=0x%lx.\n",
6904 		    base_vha->dpc_flags);
6905 
6906 		if (test_bit(UNLOADING, &base_vha->dpc_flags))
6907 			break;
6908 
6909 		if (IS_P3P_TYPE(ha)) {
6910 			if (IS_QLA8044(ha)) {
6911 				if (test_and_clear_bit(ISP_UNRECOVERABLE,
6912 					&base_vha->dpc_flags)) {
6913 					qla8044_idc_lock(ha);
6914 					qla8044_wr_direct(base_vha,
6915 						QLA8044_CRB_DEV_STATE_INDEX,
6916 						QLA8XXX_DEV_FAILED);
6917 					qla8044_idc_unlock(ha);
6918 					ql_log(ql_log_info, base_vha, 0x4004,
6919 						"HW State: FAILED.\n");
6920 					qla8044_device_state_handler(base_vha);
6921 					continue;
6922 				}
6923 
6924 			} else {
6925 				if (test_and_clear_bit(ISP_UNRECOVERABLE,
6926 					&base_vha->dpc_flags)) {
6927 					qla82xx_idc_lock(ha);
6928 					qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6929 						QLA8XXX_DEV_FAILED);
6930 					qla82xx_idc_unlock(ha);
6931 					ql_log(ql_log_info, base_vha, 0x0151,
6932 						"HW State: FAILED.\n");
6933 					qla82xx_device_state_handler(base_vha);
6934 					continue;
6935 				}
6936 			}
6937 
6938 			if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
6939 				&base_vha->dpc_flags)) {
6940 
6941 				ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
6942 				    "FCoE context reset scheduled.\n");
6943 				if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
6944 					&base_vha->dpc_flags))) {
6945 					if (qla82xx_fcoe_ctx_reset(base_vha)) {
6946 						/* FCoE-ctx reset failed.
6947 						 * Escalate to chip-reset
6948 						 */
6949 						set_bit(ISP_ABORT_NEEDED,
6950 							&base_vha->dpc_flags);
6951 					}
6952 					clear_bit(ABORT_ISP_ACTIVE,
6953 						&base_vha->dpc_flags);
6954 				}
6955 
6956 				ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
6957 				    "FCoE context reset end.\n");
6958 			}
6959 		} else if (IS_QLAFX00(ha)) {
6960 			if (test_and_clear_bit(ISP_UNRECOVERABLE,
6961 				&base_vha->dpc_flags)) {
6962 				ql_dbg(ql_dbg_dpc, base_vha, 0x4020,
6963 				    "Firmware Reset Recovery\n");
6964 				if (qlafx00_reset_initialize(base_vha)) {
6965 					/* Failed. Abort isp later. */
6966 					if (!test_bit(UNLOADING,
6967 					    &base_vha->dpc_flags)) {
6968 						set_bit(ISP_UNRECOVERABLE,
6969 						    &base_vha->dpc_flags);
6970 						ql_dbg(ql_dbg_dpc, base_vha,
6971 						    0x4021,
6972 						    "Reset Recovery Failed\n");
6973 					}
6974 				}
6975 			}
6976 
6977 			if (test_and_clear_bit(FX00_TARGET_SCAN,
6978 				&base_vha->dpc_flags)) {
6979 				ql_dbg(ql_dbg_dpc, base_vha, 0x4022,
6980 				    "ISPFx00 Target Scan scheduled\n");
6981 				if (qlafx00_rescan_isp(base_vha)) {
6982 					if (!test_bit(UNLOADING,
6983 					    &base_vha->dpc_flags))
6984 						set_bit(ISP_UNRECOVERABLE,
6985 						    &base_vha->dpc_flags);
6986 					ql_dbg(ql_dbg_dpc, base_vha, 0x401e,
6987 					    "ISPFx00 Target Scan Failed\n");
6988 				}
6989 				ql_dbg(ql_dbg_dpc, base_vha, 0x401f,
6990 				    "ISPFx00 Target Scan End\n");
6991 			}
6992 			if (test_and_clear_bit(FX00_HOST_INFO_RESEND,
6993 				&base_vha->dpc_flags)) {
6994 				ql_dbg(ql_dbg_dpc, base_vha, 0x4023,
6995 				    "ISPFx00 Host Info resend scheduled\n");
6996 				qlafx00_fx_disc(base_vha,
6997 				    &base_vha->hw->mr.fcport,
6998 				    FXDISC_REG_HOST_INFO);
6999 			}
7000 		}
7001 
7002 		if (test_and_clear_bit(DETECT_SFP_CHANGE,
7003 		    &base_vha->dpc_flags)) {
7004 			/* Semantic:
7005 			 *  - NO-OP -- await next ISP-ABORT. Preferred method
7006 			 *             to minimize disruptions that will occur
7007 			 *             when a forced chip-reset occurs.
7008 			 *  - Force -- ISP-ABORT scheduled.
7009 			 */
7010 			/* set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); */
7011 		}
7012 
7013 		if (test_and_clear_bit
7014 		    (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
7015 		    !test_bit(UNLOADING, &base_vha->dpc_flags)) {
7016 			bool do_reset = true;
7017 
7018 			switch (base_vha->qlini_mode) {
7019 			case QLA2XXX_INI_MODE_ENABLED:
7020 				break;
7021 			case QLA2XXX_INI_MODE_DISABLED:
7022 				if (!qla_tgt_mode_enabled(base_vha) &&
7023 				    !ha->flags.fw_started)
7024 					do_reset = false;
7025 				break;
7026 			case QLA2XXX_INI_MODE_DUAL:
7027 				if (!qla_dual_mode_enabled(base_vha) &&
7028 				    !ha->flags.fw_started)
7029 					do_reset = false;
7030 				break;
7031 			default:
7032 				break;
7033 			}
7034 
7035 			if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
7036 			    &base_vha->dpc_flags))) {
7037 				base_vha->flags.online = 1;
7038 				ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
7039 				    "ISP abort scheduled.\n");
7040 				if (ha->isp_ops->abort_isp(base_vha)) {
7041 					/* failed. retry later */
7042 					set_bit(ISP_ABORT_NEEDED,
7043 					    &base_vha->dpc_flags);
7044 				}
7045 				clear_bit(ABORT_ISP_ACTIVE,
7046 						&base_vha->dpc_flags);
7047 				ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
7048 				    "ISP abort end.\n");
7049 			}
7050 		}
7051 
7052 		if (test_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags)) {
7053 			if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
7054 				qla24xx_process_purex_list
7055 					(&base_vha->purex_list);
7056 				clear_bit(PROCESS_PUREX_IOCB,
7057 				    &base_vha->dpc_flags);
7058 			}
7059 		}
7060 
7061 		if (IS_QLAFX00(ha))
7062 			goto loop_resync_check;
7063 
7064 		if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
7065 			ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
7066 			    "Quiescence mode scheduled.\n");
7067 			if (IS_P3P_TYPE(ha)) {
7068 				if (IS_QLA82XX(ha))
7069 					qla82xx_device_state_handler(base_vha);
7070 				if (IS_QLA8044(ha))
7071 					qla8044_device_state_handler(base_vha);
7072 				clear_bit(ISP_QUIESCE_NEEDED,
7073 				    &base_vha->dpc_flags);
7074 				if (!ha->flags.quiesce_owner) {
7075 					qla2x00_perform_loop_resync(base_vha);
7076 					if (IS_QLA82XX(ha)) {
7077 						qla82xx_idc_lock(ha);
7078 						qla82xx_clear_qsnt_ready(
7079 						    base_vha);
7080 						qla82xx_idc_unlock(ha);
7081 					} else if (IS_QLA8044(ha)) {
7082 						qla8044_idc_lock(ha);
7083 						qla8044_clear_qsnt_ready(
7084 						    base_vha);
7085 						qla8044_idc_unlock(ha);
7086 					}
7087 				}
7088 			} else {
7089 				clear_bit(ISP_QUIESCE_NEEDED,
7090 				    &base_vha->dpc_flags);
7091 				qla2x00_quiesce_io(base_vha);
7092 			}
7093 			ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
7094 			    "Quiescence mode end.\n");
7095 		}
7096 
7097 		if (test_and_clear_bit(RESET_MARKER_NEEDED,
7098 				&base_vha->dpc_flags) &&
7099 		    (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
7100 
7101 			ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
7102 			    "Reset marker scheduled.\n");
7103 			qla2x00_rst_aen(base_vha);
7104 			clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
7105 			ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
7106 			    "Reset marker end.\n");
7107 		}
7108 
7109 		/* Retry each device up to login retry count */
7110 		if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) &&
7111 		    !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
7112 		    atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
7113 
7114 			if (!base_vha->relogin_jif ||
7115 			    time_after_eq(jiffies, base_vha->relogin_jif)) {
7116 				base_vha->relogin_jif = jiffies + HZ;
7117 				clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags);
7118 
7119 				ql_dbg(ql_dbg_disc, base_vha, 0x400d,
7120 				    "Relogin scheduled.\n");
7121 				qla24xx_post_relogin_work(base_vha);
7122 			}
7123 		}
7124 loop_resync_check:
7125 		if (!qla2x00_reset_active(base_vha) &&
7126 		    test_and_clear_bit(LOOP_RESYNC_NEEDED,
7127 		    &base_vha->dpc_flags)) {
7128 			/*
7129 			 * Allow abort_isp to complete before moving on to scanning.
7130 			 */
7131 			ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
7132 			    "Loop resync scheduled.\n");
7133 
7134 			if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
7135 			    &base_vha->dpc_flags))) {
7136 
7137 				qla2x00_loop_resync(base_vha);
7138 
7139 				clear_bit(LOOP_RESYNC_ACTIVE,
7140 						&base_vha->dpc_flags);
7141 			}
7142 
7143 			ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
7144 			    "Loop resync end.\n");
7145 		}
7146 
7147 		if (IS_QLAFX00(ha))
7148 			goto intr_on_check;
7149 
7150 		if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
7151 		    atomic_read(&base_vha->loop_state) == LOOP_READY) {
7152 			clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
7153 			qla2xxx_flash_npiv_conf(base_vha);
7154 		}
7155 
7156 intr_on_check:
7157 		if (!ha->interrupts_on)
7158 			ha->isp_ops->enable_intrs(ha);
7159 
7160 		if (test_and_clear_bit(BEACON_BLINK_NEEDED,
7161 					&base_vha->dpc_flags)) {
7162 			if (ha->beacon_blink_led == 1)
7163 				ha->isp_ops->beacon_blink(base_vha);
7164 		}
7165 
7166 		/* qpair online check */
7167 		if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED,
7168 		    &base_vha->dpc_flags)) {
7169 			if (ha->flags.eeh_busy ||
7170 			    ha->flags.pci_channel_io_perm_failure)
7171 				online = 0;
7172 			else
7173 				online = 1;
7174 
7175 			mutex_lock(&ha->mq_lock);
7176 			list_for_each_entry(qpair, &base_vha->qp_list,
7177 			    qp_list_elem)
7178 			qpair->online = online;
7179 			mutex_unlock(&ha->mq_lock);
7180 		}
7181 
7182 		if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
7183 				       &base_vha->dpc_flags)) {
7184 			u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold;
7185 
7186 			if (threshold > ha->orig_fw_xcb_count)
7187 				threshold = ha->orig_fw_xcb_count;
7188 
7189 			ql_log(ql_log_info, base_vha, 0xffffff,
7190 			       "SET ZIO Activity exchange threshold to %d.\n",
7191 			       threshold);
7192 			if (qla27xx_set_zio_threshold(base_vha, threshold)) {
7193 				ql_log(ql_log_info, base_vha, 0xffffff,
7194 				       "Unable to SET ZIO Activity exchange threshold to %d.\n",
7195 				       threshold);
7196 			}
7197 		}
7198 
7199 		if (!IS_QLAFX00(ha))
7200 			qla2x00_do_dpc_all_vps(base_vha);
7201 
7202 		if (test_and_clear_bit(N2N_LINK_RESET,
7203 			&base_vha->dpc_flags)) {
7204 			qla2x00_lip_reset(base_vha);
7205 		}
7206 
7207 		ha->dpc_active = 0;
7208 end_loop:
7209 		set_current_state(TASK_INTERRUPTIBLE);
7210 	} /* End of while(1) */
7211 	__set_current_state(TASK_RUNNING);
7212 
7213 	ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
7214 	    "DPC handler exiting.\n");
7215 
7216 	/*
7217 	 * Make sure that nobody tries to wake us up again.
7218 	 */
7219 	ha->dpc_active = 0;
7220 
7221 	/* Cleanup any residual CTX SRBs. */
7222 	qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
7223 
7224 	return 0;
7225 }
7226 
7227 void
7228 qla2xxx_wake_dpc(struct scsi_qla_host *vha)
7229 {
7230 	struct qla_hw_data *ha = vha->hw;
7231 	struct task_struct *t = ha->dpc_thread;
7232 
7233 	if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
7234 		wake_up_process(t);
7235 }
7236 
7237 /*
7238 *  qla2x00_rst_aen
7239 *      Processes asynchronous reset.
7240 *
7241 * Input:
7242 *      ha  = adapter block pointer.
7243 */
7244 static void
7245 qla2x00_rst_aen(scsi_qla_host_t *vha)
7246 {
7247 	if (vha->flags.online && !vha->flags.reset_active &&
7248 	    !atomic_read(&vha->loop_down_timer) &&
7249 	    !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
7250 		do {
7251 			clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7252 
7253 			/*
7254 			 * Issue marker command only when we are going to start
7255 			 * the I/O.
7256 			 */
7257 			vha->marker_needed = 1;
7258 		} while (!atomic_read(&vha->loop_down_timer) &&
7259 		    (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
7260 	}
7261 }
7262 
7263 static bool qla_do_heartbeat(struct scsi_qla_host *vha)
7264 {
7265 	struct qla_hw_data *ha = vha->hw;
7266 	u32 cmpl_cnt;
7267 	u16 i;
7268 	bool do_heartbeat = false;
7269 
7270 	/*
7271 	 * Allow do_heartbeat only if we don’t have any active interrupts,
7272 	 * but there are still IOs outstanding with firmware.
7273 	 */
7274 	cmpl_cnt = ha->base_qpair->cmd_completion_cnt;
7275 	if (cmpl_cnt == ha->base_qpair->prev_completion_cnt &&
7276 	    cmpl_cnt != ha->base_qpair->cmd_cnt) {
7277 		do_heartbeat = true;
7278 		goto skip;
7279 	}
7280 	ha->base_qpair->prev_completion_cnt = cmpl_cnt;
7281 
7282 	for (i = 0; i < ha->max_qpairs; i++) {
7283 		if (ha->queue_pair_map[i]) {
7284 			cmpl_cnt = ha->queue_pair_map[i]->cmd_completion_cnt;
7285 			if (cmpl_cnt == ha->queue_pair_map[i]->prev_completion_cnt &&
7286 			    cmpl_cnt != ha->queue_pair_map[i]->cmd_cnt) {
7287 				do_heartbeat = true;
7288 				break;
7289 			}
7290 			ha->queue_pair_map[i]->prev_completion_cnt = cmpl_cnt;
7291 		}
7292 	}
7293 
7294 skip:
7295 	return do_heartbeat;
7296 }
7297 
7298 static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started)
7299 {
7300 	struct qla_hw_data *ha = vha->hw;
7301 
7302 	if (vha->vp_idx)
7303 		return;
7304 
7305 	if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha))
7306 		return;
7307 
7308 	/*
7309 	 * dpc thread cannot run if heartbeat is running at the same time.
7310 	 * We also do not want to starve heartbeat task. Therefore, do
7311 	 * heartbeat task at least once every 5 seconds.
7312 	 */
7313 	if (dpc_started &&
7314 	    time_before(jiffies, ha->last_heartbeat_run_jiffies + 5 * HZ))
7315 		return;
7316 
7317 	if (qla_do_heartbeat(vha)) {
7318 		ha->last_heartbeat_run_jiffies = jiffies;
7319 		queue_work(ha->wq, &ha->heartbeat_work);
7320 	}
7321 }
7322 
7323 static void qla_wind_down_chip(scsi_qla_host_t *vha)
7324 {
7325 	struct qla_hw_data *ha = vha->hw;
7326 
7327 	if (!ha->flags.eeh_busy)
7328 		return;
7329 	if (ha->pci_error_state)
7330 		/* system is trying to recover */
7331 		return;
7332 
7333 	/*
7334 	 * Current system is not handling PCIE error.  At this point, this is
7335 	 * best effort to wind down the adapter.
7336 	 */
7337 	if (time_after_eq(jiffies, ha->eeh_jif + ql2xdelay_before_pci_error_handling * HZ) &&
7338 	    !ha->flags.eeh_flush) {
7339 		ql_log(ql_log_info, vha, 0x9009,
7340 		    "PCI Error detected, attempting to reset hardware.\n");
7341 
7342 		ha->isp_ops->reset_chip(vha);
7343 		ha->isp_ops->disable_intrs(ha);
7344 
7345 		ha->flags.eeh_flush = EEH_FLUSH_RDY;
7346 		ha->eeh_jif = jiffies;
7347 
7348 	} else if (ha->flags.eeh_flush == EEH_FLUSH_RDY &&
7349 	    time_after_eq(jiffies, ha->eeh_jif +  5 * HZ)) {
7350 		pci_clear_master(ha->pdev);
7351 
7352 		/* flush all command */
7353 		qla2x00_abort_isp_cleanup(vha);
7354 		ha->flags.eeh_flush = EEH_FLUSH_DONE;
7355 
7356 		ql_log(ql_log_info, vha, 0x900a,
7357 		    "PCI Error handling complete, all IOs aborted.\n");
7358 	}
7359 }
7360 
7361 /**************************************************************************
7362 *   qla2x00_timer
7363 *
7364 * Description:
7365 *   One second timer
7366 *
7367 * Context: Interrupt
7368 ***************************************************************************/
7369 void
7370 qla2x00_timer(struct timer_list *t)
7371 {
7372 	scsi_qla_host_t *vha = from_timer(vha, t, timer);
7373 	unsigned long	cpu_flags = 0;
7374 	int		start_dpc = 0;
7375 	int		index;
7376 	srb_t		*sp;
7377 	uint16_t        w;
7378 	struct qla_hw_data *ha = vha->hw;
7379 	struct req_que *req;
7380 	unsigned long flags;
7381 	fc_port_t *fcport = NULL;
7382 
7383 	if (ha->flags.eeh_busy) {
7384 		qla_wind_down_chip(vha);
7385 
7386 		ql_dbg(ql_dbg_timer, vha, 0x6000,
7387 		    "EEH = %d, restarting timer.\n",
7388 		    ha->flags.eeh_busy);
7389 		qla2x00_restart_timer(vha, WATCH_INTERVAL);
7390 		return;
7391 	}
7392 
7393 	/*
7394 	 * Hardware read to raise pending EEH errors during mailbox waits. If
7395 	 * the read returns -1 then disable the board.
7396 	 */
7397 	if (!pci_channel_offline(ha->pdev)) {
7398 		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
7399 		qla2x00_check_reg16_for_disconnect(vha, w);
7400 	}
7401 
7402 	/* Make sure qla82xx_watchdog is run only for physical port */
7403 	if (!vha->vp_idx && IS_P3P_TYPE(ha)) {
7404 		if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
7405 			start_dpc++;
7406 		if (IS_QLA82XX(ha))
7407 			qla82xx_watchdog(vha);
7408 		else if (IS_QLA8044(ha))
7409 			qla8044_watchdog(vha);
7410 	}
7411 
7412 	if (!vha->vp_idx && IS_QLAFX00(ha))
7413 		qlafx00_timer_routine(vha);
7414 
7415 	if (vha->link_down_time < QLA2XX_MAX_LINK_DOWN_TIME)
7416 		vha->link_down_time++;
7417 
7418 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
7419 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
7420 		if (fcport->tgt_link_down_time < QLA2XX_MAX_LINK_DOWN_TIME)
7421 			fcport->tgt_link_down_time++;
7422 	}
7423 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
7424 
7425 	/* Loop down handler. */
7426 	if (atomic_read(&vha->loop_down_timer) > 0 &&
7427 	    !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
7428 	    !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))
7429 		&& vha->flags.online) {
7430 
7431 		if (atomic_read(&vha->loop_down_timer) ==
7432 		    vha->loop_down_abort_time) {
7433 
7434 			ql_log(ql_log_info, vha, 0x6008,
7435 			    "Loop down - aborting the queues before time expires.\n");
7436 
7437 			if (!IS_QLA2100(ha) && vha->link_down_timeout)
7438 				atomic_set(&vha->loop_state, LOOP_DEAD);
7439 
7440 			/*
7441 			 * Schedule an ISP abort to return any FCP2-device
7442 			 * commands.
7443 			 */
7444 			/* NPIV - scan physical port only */
7445 			if (!vha->vp_idx) {
7446 				spin_lock_irqsave(&ha->hardware_lock,
7447 				    cpu_flags);
7448 				req = ha->req_q_map[0];
7449 				for (index = 1;
7450 				    index < req->num_outstanding_cmds;
7451 				    index++) {
7452 					fc_port_t *sfcp;
7453 
7454 					sp = req->outstanding_cmds[index];
7455 					if (!sp)
7456 						continue;
7457 					if (sp->cmd_type != TYPE_SRB)
7458 						continue;
7459 					if (sp->type != SRB_SCSI_CMD)
7460 						continue;
7461 					sfcp = sp->fcport;
7462 					if (!(sfcp->flags & FCF_FCP2_DEVICE))
7463 						continue;
7464 
7465 					if (IS_QLA82XX(ha))
7466 						set_bit(FCOE_CTX_RESET_NEEDED,
7467 							&vha->dpc_flags);
7468 					else
7469 						set_bit(ISP_ABORT_NEEDED,
7470 							&vha->dpc_flags);
7471 					break;
7472 				}
7473 				spin_unlock_irqrestore(&ha->hardware_lock,
7474 								cpu_flags);
7475 			}
7476 			start_dpc++;
7477 		}
7478 
7479 		/* if the loop has been down for 4 minutes, reinit adapter */
7480 		if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
7481 			if (!(vha->device_flags & DFLG_NO_CABLE) && !vha->vp_idx) {
7482 				ql_log(ql_log_warn, vha, 0x6009,
7483 				    "Loop down - aborting ISP.\n");
7484 
7485 				if (IS_QLA82XX(ha))
7486 					set_bit(FCOE_CTX_RESET_NEEDED,
7487 						&vha->dpc_flags);
7488 				else
7489 					set_bit(ISP_ABORT_NEEDED,
7490 						&vha->dpc_flags);
7491 			}
7492 		}
7493 		ql_dbg(ql_dbg_timer, vha, 0x600a,
7494 		    "Loop down - seconds remaining %d.\n",
7495 		    atomic_read(&vha->loop_down_timer));
7496 	}
7497 	/* Check if beacon LED needs to be blinked for physical host only */
7498 	if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
7499 		/* There is no beacon_blink function for ISP82xx */
7500 		if (!IS_P3P_TYPE(ha)) {
7501 			set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
7502 			start_dpc++;
7503 		}
7504 	}
7505 
7506 	/* check if edif running */
7507 	if (vha->hw->flags.edif_enabled)
7508 		qla_edif_timer(vha);
7509 
7510 	/* Process any deferred work. */
7511 	if (!list_empty(&vha->work_list)) {
7512 		unsigned long flags;
7513 		bool q = false;
7514 
7515 		spin_lock_irqsave(&vha->work_lock, flags);
7516 		if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
7517 			q = true;
7518 		spin_unlock_irqrestore(&vha->work_lock, flags);
7519 		if (q)
7520 			queue_work(vha->hw->wq, &vha->iocb_work);
7521 	}
7522 
7523 	/*
7524 	 * FC-NVME
7525 	 * see if the active AEN count has changed from what was last reported.
7526 	 */
7527 	index = atomic_read(&ha->nvme_active_aen_cnt);
7528 	if (!vha->vp_idx &&
7529 	    (index != ha->nvme_last_rptd_aen) &&
7530 	    ha->zio_mode == QLA_ZIO_MODE_6 &&
7531 	    !ha->flags.host_shutting_down) {
7532 		ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
7533 		ql_log(ql_log_info, vha, 0x3002,
7534 		    "nvme: Sched: Set ZIO exchange threshold to %d.\n",
7535 		    ha->nvme_last_rptd_aen);
7536 		set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
7537 		start_dpc++;
7538 	}
7539 
7540 	if (!vha->vp_idx &&
7541 	    atomic_read(&ha->zio_threshold) != ha->last_zio_threshold &&
7542 	    IS_ZIO_THRESHOLD_CAPABLE(ha)) {
7543 		ql_log(ql_log_info, vha, 0x3002,
7544 		    "Sched: Set ZIO exchange threshold to %d.\n",
7545 		    ha->last_zio_threshold);
7546 		ha->last_zio_threshold = atomic_read(&ha->zio_threshold);
7547 		set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
7548 		start_dpc++;
7549 	}
7550 	qla_adjust_buf(vha);
7551 
7552 	/* borrowing w to signify dpc will run */
7553 	w = 0;
7554 	/* Schedule the DPC routine if needed */
7555 	if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
7556 	    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
7557 	    start_dpc ||
7558 	    test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
7559 	    test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
7560 	    test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
7561 	    test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
7562 	    test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
7563 	    test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
7564 	    test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) {
7565 		ql_dbg(ql_dbg_timer, vha, 0x600b,
7566 		    "isp_abort_needed=%d loop_resync_needed=%d "
7567 		    "start_dpc=%d reset_marker_needed=%d",
7568 		    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
7569 		    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
7570 		    start_dpc, test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
7571 		ql_dbg(ql_dbg_timer, vha, 0x600c,
7572 		    "beacon_blink_needed=%d isp_unrecoverable=%d "
7573 		    "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
7574 		    "relogin_needed=%d, Process_purex_iocb=%d.\n",
7575 		    test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
7576 		    test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
7577 		    test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
7578 		    test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
7579 		    test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
7580 		    test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags));
7581 		qla2xxx_wake_dpc(vha);
7582 		w = 1;
7583 	}
7584 
7585 	qla_heart_beat(vha, w);
7586 
7587 	qla2x00_restart_timer(vha, WATCH_INTERVAL);
7588 }
7589 
7590 /* Firmware interface routines. */
7591 
7592 #define FW_ISP21XX	0
7593 #define FW_ISP22XX	1
7594 #define FW_ISP2300	2
7595 #define FW_ISP2322	3
7596 #define FW_ISP24XX	4
7597 #define FW_ISP25XX	5
7598 #define FW_ISP81XX	6
7599 #define FW_ISP82XX	7
7600 #define FW_ISP2031	8
7601 #define FW_ISP8031	9
7602 #define FW_ISP27XX	10
7603 #define FW_ISP28XX	11
7604 
7605 #define FW_FILE_ISP21XX	"ql2100_fw.bin"
7606 #define FW_FILE_ISP22XX	"ql2200_fw.bin"
7607 #define FW_FILE_ISP2300	"ql2300_fw.bin"
7608 #define FW_FILE_ISP2322	"ql2322_fw.bin"
7609 #define FW_FILE_ISP24XX	"ql2400_fw.bin"
7610 #define FW_FILE_ISP25XX	"ql2500_fw.bin"
7611 #define FW_FILE_ISP81XX	"ql8100_fw.bin"
7612 #define FW_FILE_ISP82XX	"ql8200_fw.bin"
7613 #define FW_FILE_ISP2031	"ql2600_fw.bin"
7614 #define FW_FILE_ISP8031	"ql8300_fw.bin"
7615 #define FW_FILE_ISP27XX	"ql2700_fw.bin"
7616 #define FW_FILE_ISP28XX	"ql2800_fw.bin"
7617 
7618 
7619 static DEFINE_MUTEX(qla_fw_lock);
7620 
7621 static struct fw_blob qla_fw_blobs[] = {
7622 	{ .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
7623 	{ .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
7624 	{ .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
7625 	{ .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
7626 	{ .name = FW_FILE_ISP24XX, },
7627 	{ .name = FW_FILE_ISP25XX, },
7628 	{ .name = FW_FILE_ISP81XX, },
7629 	{ .name = FW_FILE_ISP82XX, },
7630 	{ .name = FW_FILE_ISP2031, },
7631 	{ .name = FW_FILE_ISP8031, },
7632 	{ .name = FW_FILE_ISP27XX, },
7633 	{ .name = FW_FILE_ISP28XX, },
7634 	{ .name = NULL, },
7635 };
7636 
7637 struct fw_blob *
7638 qla2x00_request_firmware(scsi_qla_host_t *vha)
7639 {
7640 	struct qla_hw_data *ha = vha->hw;
7641 	struct fw_blob *blob;
7642 
7643 	if (IS_QLA2100(ha)) {
7644 		blob = &qla_fw_blobs[FW_ISP21XX];
7645 	} else if (IS_QLA2200(ha)) {
7646 		blob = &qla_fw_blobs[FW_ISP22XX];
7647 	} else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
7648 		blob = &qla_fw_blobs[FW_ISP2300];
7649 	} else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
7650 		blob = &qla_fw_blobs[FW_ISP2322];
7651 	} else if (IS_QLA24XX_TYPE(ha)) {
7652 		blob = &qla_fw_blobs[FW_ISP24XX];
7653 	} else if (IS_QLA25XX(ha)) {
7654 		blob = &qla_fw_blobs[FW_ISP25XX];
7655 	} else if (IS_QLA81XX(ha)) {
7656 		blob = &qla_fw_blobs[FW_ISP81XX];
7657 	} else if (IS_QLA82XX(ha)) {
7658 		blob = &qla_fw_blobs[FW_ISP82XX];
7659 	} else if (IS_QLA2031(ha)) {
7660 		blob = &qla_fw_blobs[FW_ISP2031];
7661 	} else if (IS_QLA8031(ha)) {
7662 		blob = &qla_fw_blobs[FW_ISP8031];
7663 	} else if (IS_QLA27XX(ha)) {
7664 		blob = &qla_fw_blobs[FW_ISP27XX];
7665 	} else if (IS_QLA28XX(ha)) {
7666 		blob = &qla_fw_blobs[FW_ISP28XX];
7667 	} else {
7668 		return NULL;
7669 	}
7670 
7671 	if (!blob->name)
7672 		return NULL;
7673 
7674 	mutex_lock(&qla_fw_lock);
7675 	if (blob->fw)
7676 		goto out;
7677 
7678 	if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
7679 		ql_log(ql_log_warn, vha, 0x0063,
7680 		    "Failed to load firmware image (%s).\n", blob->name);
7681 		blob->fw = NULL;
7682 		blob = NULL;
7683 	}
7684 
7685 out:
7686 	mutex_unlock(&qla_fw_lock);
7687 	return blob;
7688 }
7689 
7690 static void
7691 qla2x00_release_firmware(void)
7692 {
7693 	struct fw_blob *blob;
7694 
7695 	mutex_lock(&qla_fw_lock);
7696 	for (blob = qla_fw_blobs; blob->name; blob++)
7697 		release_firmware(blob->fw);
7698 	mutex_unlock(&qla_fw_lock);
7699 }
7700 
7701 static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
7702 {
7703 	struct qla_hw_data *ha = vha->hw;
7704 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
7705 	struct qla_qpair *qpair = NULL;
7706 	struct scsi_qla_host *vp, *tvp;
7707 	fc_port_t *fcport;
7708 	int i;
7709 	unsigned long flags;
7710 
7711 	ql_dbg(ql_dbg_aer, vha, 0x9000,
7712 	       "%s\n", __func__);
7713 	ha->chip_reset++;
7714 
7715 	ha->base_qpair->chip_reset = ha->chip_reset;
7716 	for (i = 0; i < ha->max_qpairs; i++) {
7717 		if (ha->queue_pair_map[i])
7718 			ha->queue_pair_map[i]->chip_reset =
7719 			    ha->base_qpair->chip_reset;
7720 	}
7721 
7722 	/*
7723 	 * purge mailbox might take a while. Slot Reset/chip reset
7724 	 * will take care of the purge
7725 	 */
7726 
7727 	mutex_lock(&ha->mq_lock);
7728 	ha->base_qpair->online = 0;
7729 	list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
7730 		qpair->online = 0;
7731 	wmb();
7732 	mutex_unlock(&ha->mq_lock);
7733 
7734 	qla2x00_mark_all_devices_lost(vha);
7735 
7736 	spin_lock_irqsave(&ha->vport_slock, flags);
7737 	list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7738 		atomic_inc(&vp->vref_count);
7739 		spin_unlock_irqrestore(&ha->vport_slock, flags);
7740 		qla2x00_mark_all_devices_lost(vp);
7741 		spin_lock_irqsave(&ha->vport_slock, flags);
7742 		atomic_dec(&vp->vref_count);
7743 	}
7744 	spin_unlock_irqrestore(&ha->vport_slock, flags);
7745 
7746 	/* Clear all async request states across all VPs. */
7747 	list_for_each_entry(fcport, &vha->vp_fcports, list)
7748 		fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7749 
7750 	spin_lock_irqsave(&ha->vport_slock, flags);
7751 	list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7752 		atomic_inc(&vp->vref_count);
7753 		spin_unlock_irqrestore(&ha->vport_slock, flags);
7754 		list_for_each_entry(fcport, &vp->vp_fcports, list)
7755 			fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7756 		spin_lock_irqsave(&ha->vport_slock, flags);
7757 		atomic_dec(&vp->vref_count);
7758 	}
7759 	spin_unlock_irqrestore(&ha->vport_slock, flags);
7760 }
7761 
7762 
7763 static pci_ers_result_t
7764 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7765 {
7766 	scsi_qla_host_t *vha = pci_get_drvdata(pdev);
7767 	struct qla_hw_data *ha = vha->hw;
7768 	pci_ers_result_t ret = PCI_ERS_RESULT_NEED_RESET;
7769 
7770 	ql_log(ql_log_warn, vha, 0x9000,
7771 	       "PCI error detected, state %x.\n", state);
7772 	ha->pci_error_state = QLA_PCI_ERR_DETECTED;
7773 
7774 	if (!atomic_read(&pdev->enable_cnt)) {
7775 		ql_log(ql_log_info, vha, 0xffff,
7776 			"PCI device is disabled,state %x\n", state);
7777 		ret = PCI_ERS_RESULT_NEED_RESET;
7778 		goto out;
7779 	}
7780 
7781 	switch (state) {
7782 	case pci_channel_io_normal:
7783 		qla_pci_set_eeh_busy(vha);
7784 		if (ql2xmqsupport || ql2xnvmeenable) {
7785 			set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
7786 			qla2xxx_wake_dpc(vha);
7787 		}
7788 		ret = PCI_ERS_RESULT_CAN_RECOVER;
7789 		break;
7790 	case pci_channel_io_frozen:
7791 		qla_pci_set_eeh_busy(vha);
7792 		ret = PCI_ERS_RESULT_NEED_RESET;
7793 		break;
7794 	case pci_channel_io_perm_failure:
7795 		ha->flags.pci_channel_io_perm_failure = 1;
7796 		qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
7797 		if (ql2xmqsupport || ql2xnvmeenable) {
7798 			set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
7799 			qla2xxx_wake_dpc(vha);
7800 		}
7801 		ret = PCI_ERS_RESULT_DISCONNECT;
7802 	}
7803 out:
7804 	ql_dbg(ql_dbg_aer, vha, 0x600d,
7805 	       "PCI error detected returning [%x].\n", ret);
7806 	return ret;
7807 }
7808 
7809 static pci_ers_result_t
7810 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
7811 {
7812 	int risc_paused = 0;
7813 	uint32_t stat;
7814 	unsigned long flags;
7815 	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
7816 	struct qla_hw_data *ha = base_vha->hw;
7817 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
7818 	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
7819 
7820 	ql_log(ql_log_warn, base_vha, 0x9000,
7821 	       "mmio enabled\n");
7822 
7823 	ha->pci_error_state = QLA_PCI_MMIO_ENABLED;
7824 
7825 	if (IS_QLA82XX(ha))
7826 		return PCI_ERS_RESULT_RECOVERED;
7827 
7828 	if (qla2x00_isp_reg_stat(ha)) {
7829 		ql_log(ql_log_info, base_vha, 0x803f,
7830 		    "During mmio enabled, PCI/Register disconnect still detected.\n");
7831 		goto out;
7832 	}
7833 
7834 	spin_lock_irqsave(&ha->hardware_lock, flags);
7835 	if (IS_QLA2100(ha) || IS_QLA2200(ha)){
7836 		stat = rd_reg_word(&reg->hccr);
7837 		if (stat & HCCR_RISC_PAUSE)
7838 			risc_paused = 1;
7839 	} else if (IS_QLA23XX(ha)) {
7840 		stat = rd_reg_dword(&reg->u.isp2300.host_status);
7841 		if (stat & HSR_RISC_PAUSED)
7842 			risc_paused = 1;
7843 	} else if (IS_FWI2_CAPABLE(ha)) {
7844 		stat = rd_reg_dword(&reg24->host_status);
7845 		if (stat & HSRX_RISC_PAUSED)
7846 			risc_paused = 1;
7847 	}
7848 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
7849 
7850 	if (risc_paused) {
7851 		ql_log(ql_log_info, base_vha, 0x9003,
7852 		    "RISC paused -- mmio_enabled, Dumping firmware.\n");
7853 		qla2xxx_dump_fw(base_vha);
7854 	}
7855 out:
7856 	/* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */
7857 	ql_dbg(ql_dbg_aer, base_vha, 0x600d,
7858 	       "mmio enabled returning.\n");
7859 	return PCI_ERS_RESULT_NEED_RESET;
7860 }
7861 
7862 static pci_ers_result_t
7863 qla2xxx_pci_slot_reset(struct pci_dev *pdev)
7864 {
7865 	pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
7866 	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
7867 	struct qla_hw_data *ha = base_vha->hw;
7868 	int rc;
7869 	struct qla_qpair *qpair = NULL;
7870 
7871 	ql_log(ql_log_warn, base_vha, 0x9004,
7872 	       "Slot Reset.\n");
7873 
7874 	ha->pci_error_state = QLA_PCI_SLOT_RESET;
7875 	/* Workaround: qla2xxx driver which access hardware earlier
7876 	 * needs error state to be pci_channel_io_online.
7877 	 * Otherwise mailbox command timesout.
7878 	 */
7879 	pdev->error_state = pci_channel_io_normal;
7880 
7881 	pci_restore_state(pdev);
7882 
7883 	/* pci_restore_state() clears the saved_state flag of the device
7884 	 * save restored state which resets saved_state flag
7885 	 */
7886 	pci_save_state(pdev);
7887 
7888 	if (ha->mem_only)
7889 		rc = pci_enable_device_mem(pdev);
7890 	else
7891 		rc = pci_enable_device(pdev);
7892 
7893 	if (rc) {
7894 		ql_log(ql_log_warn, base_vha, 0x9005,
7895 		    "Can't re-enable PCI device after reset.\n");
7896 		goto exit_slot_reset;
7897 	}
7898 
7899 
7900 	if (ha->isp_ops->pci_config(base_vha))
7901 		goto exit_slot_reset;
7902 
7903 	mutex_lock(&ha->mq_lock);
7904 	list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
7905 		qpair->online = 1;
7906 	mutex_unlock(&ha->mq_lock);
7907 
7908 	ha->flags.eeh_busy = 0;
7909 	base_vha->flags.online = 1;
7910 	set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
7911 	ha->isp_ops->abort_isp(base_vha);
7912 	clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
7913 
7914 	if (qla2x00_isp_reg_stat(ha)) {
7915 		ha->flags.eeh_busy = 1;
7916 		qla_pci_error_cleanup(base_vha);
7917 		ql_log(ql_log_warn, base_vha, 0x9005,
7918 		       "Device unable to recover from PCI error.\n");
7919 	} else {
7920 		ret =  PCI_ERS_RESULT_RECOVERED;
7921 	}
7922 
7923 exit_slot_reset:
7924 	ql_dbg(ql_dbg_aer, base_vha, 0x900e,
7925 	    "Slot Reset returning %x.\n", ret);
7926 
7927 	return ret;
7928 }
7929 
7930 static void
7931 qla2xxx_pci_resume(struct pci_dev *pdev)
7932 {
7933 	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
7934 	struct qla_hw_data *ha = base_vha->hw;
7935 	int ret;
7936 
7937 	ql_log(ql_log_warn, base_vha, 0x900f,
7938 	       "Pci Resume.\n");
7939 
7940 
7941 	ret = qla2x00_wait_for_hba_online(base_vha);
7942 	if (ret != QLA_SUCCESS) {
7943 		ql_log(ql_log_fatal, base_vha, 0x9002,
7944 		    "The device failed to resume I/O from slot/link_reset.\n");
7945 	}
7946 	ha->pci_error_state = QLA_PCI_RESUME;
7947 	ql_dbg(ql_dbg_aer, base_vha, 0x600d,
7948 	       "Pci Resume returning.\n");
7949 }
7950 
7951 void qla_pci_set_eeh_busy(struct scsi_qla_host *vha)
7952 {
7953 	struct qla_hw_data *ha = vha->hw;
7954 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7955 	bool do_cleanup = false;
7956 	unsigned long flags;
7957 
7958 	if (ha->flags.eeh_busy)
7959 		return;
7960 
7961 	spin_lock_irqsave(&base_vha->work_lock, flags);
7962 	if (!ha->flags.eeh_busy) {
7963 		ha->eeh_jif = jiffies;
7964 		ha->flags.eeh_flush = 0;
7965 
7966 		ha->flags.eeh_busy = 1;
7967 		do_cleanup = true;
7968 	}
7969 	spin_unlock_irqrestore(&base_vha->work_lock, flags);
7970 
7971 	if (do_cleanup)
7972 		qla_pci_error_cleanup(base_vha);
7973 }
7974 
7975 /*
7976  * this routine will schedule a task to pause IO from interrupt context
7977  * if caller sees a PCIE error event (register read = 0xf's)
7978  */
7979 void qla_schedule_eeh_work(struct scsi_qla_host *vha)
7980 {
7981 	struct qla_hw_data *ha = vha->hw;
7982 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7983 
7984 	if (ha->flags.eeh_busy)
7985 		return;
7986 
7987 	set_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags);
7988 	qla2xxx_wake_dpc(base_vha);
7989 }
7990 
7991 static void
7992 qla_pci_reset_prepare(struct pci_dev *pdev)
7993 {
7994 	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
7995 	struct qla_hw_data *ha = base_vha->hw;
7996 	struct qla_qpair *qpair;
7997 
7998 	ql_log(ql_log_warn, base_vha, 0xffff,
7999 	    "%s.\n", __func__);
8000 
8001 	/*
8002 	 * PCI FLR/function reset is about to reset the
8003 	 * slot. Stop the chip to stop all DMA access.
8004 	 * It is assumed that pci_reset_done will be called
8005 	 * after FLR to resume Chip operation.
8006 	 */
8007 	ha->flags.eeh_busy = 1;
8008 	mutex_lock(&ha->mq_lock);
8009 	list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
8010 		qpair->online = 0;
8011 	mutex_unlock(&ha->mq_lock);
8012 
8013 	set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
8014 	qla2x00_abort_isp_cleanup(base_vha);
8015 	qla2x00_abort_all_cmds(base_vha, DID_RESET << 16);
8016 }
8017 
8018 static void
8019 qla_pci_reset_done(struct pci_dev *pdev)
8020 {
8021 	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
8022 	struct qla_hw_data *ha = base_vha->hw;
8023 	struct qla_qpair *qpair;
8024 
8025 	ql_log(ql_log_warn, base_vha, 0xffff,
8026 	    "%s.\n", __func__);
8027 
8028 	/*
8029 	 * FLR just completed by PCI layer. Resume adapter
8030 	 */
8031 	ha->flags.eeh_busy = 0;
8032 	mutex_lock(&ha->mq_lock);
8033 	list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
8034 		qpair->online = 1;
8035 	mutex_unlock(&ha->mq_lock);
8036 
8037 	base_vha->flags.online = 1;
8038 	ha->isp_ops->abort_isp(base_vha);
8039 	clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
8040 }
8041 
8042 static void qla2xxx_map_queues(struct Scsi_Host *shost)
8043 {
8044 	scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
8045 	struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
8046 
8047 	if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
8048 		blk_mq_map_queues(qmap);
8049 	else
8050 		blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
8051 }
8052 
8053 struct scsi_host_template qla2xxx_driver_template = {
8054 	.module			= THIS_MODULE,
8055 	.name			= QLA2XXX_DRIVER_NAME,
8056 	.queuecommand		= qla2xxx_queuecommand,
8057 
8058 	.eh_timed_out		= fc_eh_timed_out,
8059 	.eh_abort_handler	= qla2xxx_eh_abort,
8060 	.eh_should_retry_cmd	= fc_eh_should_retry_cmd,
8061 	.eh_device_reset_handler = qla2xxx_eh_device_reset,
8062 	.eh_target_reset_handler = qla2xxx_eh_target_reset,
8063 	.eh_bus_reset_handler	= qla2xxx_eh_bus_reset,
8064 	.eh_host_reset_handler	= qla2xxx_eh_host_reset,
8065 
8066 	.slave_configure	= qla2xxx_slave_configure,
8067 
8068 	.slave_alloc		= qla2xxx_slave_alloc,
8069 	.slave_destroy		= qla2xxx_slave_destroy,
8070 	.scan_finished		= qla2xxx_scan_finished,
8071 	.scan_start		= qla2xxx_scan_start,
8072 	.change_queue_depth	= scsi_change_queue_depth,
8073 	.map_queues             = qla2xxx_map_queues,
8074 	.this_id		= -1,
8075 	.cmd_per_lun		= 3,
8076 	.sg_tablesize		= SG_ALL,
8077 
8078 	.max_sectors		= 0xFFFF,
8079 	.shost_groups		= qla2x00_host_groups,
8080 
8081 	.supported_mode		= MODE_INITIATOR,
8082 	.track_queue_depth	= 1,
8083 	.cmd_size		= sizeof(srb_t),
8084 };
8085 
8086 static const struct pci_error_handlers qla2xxx_err_handler = {
8087 	.error_detected = qla2xxx_pci_error_detected,
8088 	.mmio_enabled = qla2xxx_pci_mmio_enabled,
8089 	.slot_reset = qla2xxx_pci_slot_reset,
8090 	.resume = qla2xxx_pci_resume,
8091 	.reset_prepare = qla_pci_reset_prepare,
8092 	.reset_done = qla_pci_reset_done,
8093 };
8094 
8095 static struct pci_device_id qla2xxx_pci_tbl[] = {
8096 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
8097 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
8098 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
8099 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
8100 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
8101 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
8102 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
8103 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
8104 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
8105 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
8106 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
8107 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
8108 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
8109 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
8110 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
8111 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
8112 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
8113 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
8114 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
8115 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
8116 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
8117 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
8118 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) },
8119 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) },
8120 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) },
8121 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) },
8122 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) },
8123 	{ 0 },
8124 };
8125 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
8126 
8127 static struct pci_driver qla2xxx_pci_driver = {
8128 	.name		= QLA2XXX_DRIVER_NAME,
8129 	.driver		= {
8130 		.owner		= THIS_MODULE,
8131 	},
8132 	.id_table	= qla2xxx_pci_tbl,
8133 	.probe		= qla2x00_probe_one,
8134 	.remove		= qla2x00_remove_one,
8135 	.shutdown	= qla2x00_shutdown,
8136 	.err_handler	= &qla2xxx_err_handler,
8137 };
8138 
8139 static const struct file_operations apidev_fops = {
8140 	.owner = THIS_MODULE,
8141 	.llseek = noop_llseek,
8142 };
8143 
8144 /**
8145  * qla2x00_module_init - Module initialization.
8146  **/
8147 static int __init
8148 qla2x00_module_init(void)
8149 {
8150 	int ret = 0;
8151 
8152 	BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64);
8153 	BUILD_BUG_ON(sizeof(cmd_entry_t) != 64);
8154 	BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64);
8155 	BUILD_BUG_ON(sizeof(cont_entry_t) != 64);
8156 	BUILD_BUG_ON(sizeof(init_cb_t) != 96);
8157 	BUILD_BUG_ON(sizeof(mrk_entry_t) != 64);
8158 	BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64);
8159 	BUILD_BUG_ON(sizeof(request_t) != 64);
8160 	BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64);
8161 	BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64);
8162 	BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64);
8163 	BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64);
8164 	BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64);
8165 	BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64);
8166 	BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64);
8167 	BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64);
8168 	BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64);
8169 	BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64);
8170 	BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64);
8171 	BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64);
8172 	BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2604);
8173 	BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424);
8174 	BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164);
8175 	BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260);
8176 	BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260);
8177 	BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16);
8178 	BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
8179 	BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256);
8180 	BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24);
8181 	BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256);
8182 	BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288);
8183 	BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216);
8184 	BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64);
8185 	BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64);
8186 	BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64);
8187 	BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64);
8188 	BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128);
8189 	BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128);
8190 	BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64);
8191 	BUILD_BUG_ON(sizeof(struct mbx_entry) != 64);
8192 	BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252);
8193 	BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64);
8194 	BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512);
8195 	BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512);
8196 	BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64);
8197 	BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64);
8198 	BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64);
8199 	BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634);
8200 	BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100);
8201 	BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976);
8202 	BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228);
8203 	BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52);
8204 	BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172);
8205 	BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524);
8206 	BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8);
8207 	BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12);
8208 	BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24);
8209 	BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420);
8210 	BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28);
8211 	BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32);
8212 	BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196);
8213 	BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE);
8214 	BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128);
8215 	BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
8216 	BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
8217 	BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24);
8218 	BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16);
8219 	BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336);
8220 	BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
8221 	BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64);
8222 	BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64);
8223 	BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64);
8224 	BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
8225 	BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52);
8226 	BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
8227 	BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64);
8228 	BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64);
8229 	BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64);
8230 	BUILD_BUG_ON(sizeof(sts21_entry_t) != 64);
8231 	BUILD_BUG_ON(sizeof(sts22_entry_t) != 64);
8232 	BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64);
8233 	BUILD_BUG_ON(sizeof(sts_entry_t) != 64);
8234 	BUILD_BUG_ON(sizeof(sw_info_t) != 32);
8235 	BUILD_BUG_ON(sizeof(target_id_t) != 2);
8236 
8237 	qla_trace_init();
8238 
8239 	/* Allocate cache for SRBs. */
8240 	srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
8241 	    SLAB_HWCACHE_ALIGN, NULL);
8242 	if (srb_cachep == NULL) {
8243 		ql_log(ql_log_fatal, NULL, 0x0001,
8244 		    "Unable to allocate SRB cache...Failing load!.\n");
8245 		return -ENOMEM;
8246 	}
8247 
8248 	/* Initialize target kmem_cache and mem_pools */
8249 	ret = qlt_init();
8250 	if (ret < 0) {
8251 		goto destroy_cache;
8252 	} else if (ret > 0) {
8253 		/*
8254 		 * If initiator mode is explictly disabled by qlt_init(),
8255 		 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
8256 		 * performing scsi_scan_target() during LOOP UP event.
8257 		 */
8258 		qla2xxx_transport_functions.disable_target_scan = 1;
8259 		qla2xxx_transport_vport_functions.disable_target_scan = 1;
8260 	}
8261 
8262 	/* Derive version string. */
8263 	strcpy(qla2x00_version_str, QLA2XXX_VERSION);
8264 	if (ql2xextended_error_logging)
8265 		strcat(qla2x00_version_str, "-debug");
8266 	if (ql2xextended_error_logging == 1)
8267 		ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
8268 
8269 	qla2xxx_transport_template =
8270 	    fc_attach_transport(&qla2xxx_transport_functions);
8271 	if (!qla2xxx_transport_template) {
8272 		ql_log(ql_log_fatal, NULL, 0x0002,
8273 		    "fc_attach_transport failed...Failing load!.\n");
8274 		ret = -ENODEV;
8275 		goto qlt_exit;
8276 	}
8277 
8278 	apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
8279 	if (apidev_major < 0) {
8280 		ql_log(ql_log_fatal, NULL, 0x0003,
8281 		    "Unable to register char device %s.\n", QLA2XXX_APIDEV);
8282 	}
8283 
8284 	qla2xxx_transport_vport_template =
8285 	    fc_attach_transport(&qla2xxx_transport_vport_functions);
8286 	if (!qla2xxx_transport_vport_template) {
8287 		ql_log(ql_log_fatal, NULL, 0x0004,
8288 		    "fc_attach_transport vport failed...Failing load!.\n");
8289 		ret = -ENODEV;
8290 		goto unreg_chrdev;
8291 	}
8292 	ql_log(ql_log_info, NULL, 0x0005,
8293 	    "QLogic Fibre Channel HBA Driver: %s.\n",
8294 	    qla2x00_version_str);
8295 	ret = pci_register_driver(&qla2xxx_pci_driver);
8296 	if (ret) {
8297 		ql_log(ql_log_fatal, NULL, 0x0006,
8298 		    "pci_register_driver failed...ret=%d Failing load!.\n",
8299 		    ret);
8300 		goto release_vport_transport;
8301 	}
8302 	return ret;
8303 
8304 release_vport_transport:
8305 	fc_release_transport(qla2xxx_transport_vport_template);
8306 
8307 unreg_chrdev:
8308 	if (apidev_major >= 0)
8309 		unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
8310 	fc_release_transport(qla2xxx_transport_template);
8311 
8312 qlt_exit:
8313 	qlt_exit();
8314 
8315 destroy_cache:
8316 	kmem_cache_destroy(srb_cachep);
8317 
8318 	qla_trace_uninit();
8319 	return ret;
8320 }
8321 
8322 /**
8323  * qla2x00_module_exit - Module cleanup.
8324  **/
8325 static void __exit
8326 qla2x00_module_exit(void)
8327 {
8328 	pci_unregister_driver(&qla2xxx_pci_driver);
8329 	qla2x00_release_firmware();
8330 	kmem_cache_destroy(ctx_cachep);
8331 	fc_release_transport(qla2xxx_transport_vport_template);
8332 	if (apidev_major >= 0)
8333 		unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
8334 	fc_release_transport(qla2xxx_transport_template);
8335 	qlt_exit();
8336 	kmem_cache_destroy(srb_cachep);
8337 	qla_trace_uninit();
8338 }
8339 
8340 module_init(qla2x00_module_init);
8341 module_exit(qla2x00_module_exit);
8342 
8343 MODULE_AUTHOR("QLogic Corporation");
8344 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
8345 MODULE_LICENSE("GPL");
8346 MODULE_FIRMWARE(FW_FILE_ISP21XX);
8347 MODULE_FIRMWARE(FW_FILE_ISP22XX);
8348 MODULE_FIRMWARE(FW_FILE_ISP2300);
8349 MODULE_FIRMWARE(FW_FILE_ISP2322);
8350 MODULE_FIRMWARE(FW_FILE_ISP24XX);
8351 MODULE_FIRMWARE(FW_FILE_ISP25XX);
8352