xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_init.c (revision d70705e1)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23 
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/kthread.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/ctype.h>
34 #include <linux/aer.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/miscdevice.h>
38 #include <linux/percpu.h>
39 #include <linux/msi.h>
40 #include <linux/irq.h>
41 #include <linux/bitops.h>
42 #include <linux/crash_dump.h>
43 #include <linux/cpu.h>
44 #include <linux/cpuhotplug.h>
45 
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_host.h>
49 #include <scsi/scsi_transport_fc.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/fc/fc_fs.h>
52 
53 #include "lpfc_hw4.h"
54 #include "lpfc_hw.h"
55 #include "lpfc_sli.h"
56 #include "lpfc_sli4.h"
57 #include "lpfc_nl.h"
58 #include "lpfc_disc.h"
59 #include "lpfc.h"
60 #include "lpfc_scsi.h"
61 #include "lpfc_nvme.h"
62 #include "lpfc_logmsg.h"
63 #include "lpfc_crtn.h"
64 #include "lpfc_vport.h"
65 #include "lpfc_version.h"
66 #include "lpfc_ids.h"
67 
68 static enum cpuhp_state lpfc_cpuhp_state;
69 /* Used when mapping IRQ vectors in a driver centric manner */
70 static uint32_t lpfc_present_cpu;
71 
72 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
75 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76 static int lpfc_post_rcv_buf(struct lpfc_hba *);
77 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
78 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79 static int lpfc_setup_endian_order(struct lpfc_hba *);
80 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
81 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
82 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
83 static void lpfc_init_sgl_list(struct lpfc_hba *);
84 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85 static void lpfc_free_active_sgl(struct lpfc_hba *);
86 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
91 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
93 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
94 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
95 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
96 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
97 
98 static struct scsi_transport_template *lpfc_transport_template = NULL;
99 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
100 static DEFINE_IDR(lpfc_hba_index);
101 #define LPFC_NVMET_BUF_POST 254
102 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
103 
104 /**
105  * lpfc_config_port_prep - Perform lpfc initialization prior to config port
106  * @phba: pointer to lpfc hba data structure.
107  *
108  * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
109  * mailbox command. It retrieves the revision information from the HBA and
110  * collects the Vital Product Data (VPD) about the HBA for preparing the
111  * configuration of the HBA.
112  *
113  * Return codes:
114  *   0 - success.
115  *   -ERESTART - requests the SLI layer to reset the HBA and try again.
116  *   Any other value - indicates an error.
117  **/
118 int
119 lpfc_config_port_prep(struct lpfc_hba *phba)
120 {
121 	lpfc_vpd_t *vp = &phba->vpd;
122 	int i = 0, rc;
123 	LPFC_MBOXQ_t *pmb;
124 	MAILBOX_t *mb;
125 	char *lpfc_vpd_data = NULL;
126 	uint16_t offset = 0;
127 	static char licensed[56] =
128 		    "key unlock for use with gnu public licensed code only\0";
129 	static int init_key = 1;
130 
131 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
132 	if (!pmb) {
133 		phba->link_state = LPFC_HBA_ERROR;
134 		return -ENOMEM;
135 	}
136 
137 	mb = &pmb->u.mb;
138 	phba->link_state = LPFC_INIT_MBX_CMDS;
139 
140 	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
141 		if (init_key) {
142 			uint32_t *ptext = (uint32_t *) licensed;
143 
144 			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
145 				*ptext = cpu_to_be32(*ptext);
146 			init_key = 0;
147 		}
148 
149 		lpfc_read_nv(phba, pmb);
150 		memset((char*)mb->un.varRDnvp.rsvd3, 0,
151 			sizeof (mb->un.varRDnvp.rsvd3));
152 		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
153 			 sizeof (licensed));
154 
155 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
156 
157 		if (rc != MBX_SUCCESS) {
158 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
159 					"0324 Config Port initialization "
160 					"error, mbxCmd x%x READ_NVPARM, "
161 					"mbxStatus x%x\n",
162 					mb->mbxCommand, mb->mbxStatus);
163 			mempool_free(pmb, phba->mbox_mem_pool);
164 			return -ERESTART;
165 		}
166 		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
167 		       sizeof(phba->wwnn));
168 		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
169 		       sizeof(phba->wwpn));
170 	}
171 
172 	/*
173 	 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
174 	 * which was already set in lpfc_get_cfgparam()
175 	 */
176 	phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
177 
178 	/* Setup and issue mailbox READ REV command */
179 	lpfc_read_rev(phba, pmb);
180 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
181 	if (rc != MBX_SUCCESS) {
182 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
183 				"0439 Adapter failed to init, mbxCmd x%x "
184 				"READ_REV, mbxStatus x%x\n",
185 				mb->mbxCommand, mb->mbxStatus);
186 		mempool_free( pmb, phba->mbox_mem_pool);
187 		return -ERESTART;
188 	}
189 
190 
191 	/*
192 	 * The value of rr must be 1 since the driver set the cv field to 1.
193 	 * This setting requires the FW to set all revision fields.
194 	 */
195 	if (mb->un.varRdRev.rr == 0) {
196 		vp->rev.rBit = 0;
197 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
198 				"0440 Adapter failed to init, READ_REV has "
199 				"missing revision information.\n");
200 		mempool_free(pmb, phba->mbox_mem_pool);
201 		return -ERESTART;
202 	}
203 
204 	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
205 		mempool_free(pmb, phba->mbox_mem_pool);
206 		return -EINVAL;
207 	}
208 
209 	/* Save information as VPD data */
210 	vp->rev.rBit = 1;
211 	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
212 	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
213 	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
214 	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
215 	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
216 	vp->rev.biuRev = mb->un.varRdRev.biuRev;
217 	vp->rev.smRev = mb->un.varRdRev.smRev;
218 	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
219 	vp->rev.endecRev = mb->un.varRdRev.endecRev;
220 	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
221 	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
222 	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
223 	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
224 	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
225 	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
226 
227 	/* If the sli feature level is less then 9, we must
228 	 * tear down all RPIs and VPIs on link down if NPIV
229 	 * is enabled.
230 	 */
231 	if (vp->rev.feaLevelHigh < 9)
232 		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
233 
234 	if (lpfc_is_LC_HBA(phba->pcidev->device))
235 		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
236 						sizeof (phba->RandomData));
237 
238 	/* Get adapter VPD information */
239 	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
240 	if (!lpfc_vpd_data)
241 		goto out_free_mbox;
242 	do {
243 		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
244 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
245 
246 		if (rc != MBX_SUCCESS) {
247 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
248 					"0441 VPD not present on adapter, "
249 					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
250 					mb->mbxCommand, mb->mbxStatus);
251 			mb->un.varDmp.word_cnt = 0;
252 		}
253 		/* dump mem may return a zero when finished or we got a
254 		 * mailbox error, either way we are done.
255 		 */
256 		if (mb->un.varDmp.word_cnt == 0)
257 			break;
258 
259 		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
260 			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
261 		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
262 				      lpfc_vpd_data + offset,
263 				      mb->un.varDmp.word_cnt);
264 		offset += mb->un.varDmp.word_cnt;
265 	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
266 
267 	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
268 
269 	kfree(lpfc_vpd_data);
270 out_free_mbox:
271 	mempool_free(pmb, phba->mbox_mem_pool);
272 	return 0;
273 }
274 
275 /**
276  * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
277  * @phba: pointer to lpfc hba data structure.
278  * @pmboxq: pointer to the driver internal queue element for mailbox command.
279  *
280  * This is the completion handler for driver's configuring asynchronous event
281  * mailbox command to the device. If the mailbox command returns successfully,
282  * it will set internal async event support flag to 1; otherwise, it will
283  * set internal async event support flag to 0.
284  **/
285 static void
286 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
287 {
288 	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
289 		phba->temp_sensor_support = 1;
290 	else
291 		phba->temp_sensor_support = 0;
292 	mempool_free(pmboxq, phba->mbox_mem_pool);
293 	return;
294 }
295 
296 /**
297  * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
298  * @phba: pointer to lpfc hba data structure.
299  * @pmboxq: pointer to the driver internal queue element for mailbox command.
300  *
301  * This is the completion handler for dump mailbox command for getting
302  * wake up parameters. When this command complete, the response contain
303  * Option rom version of the HBA. This function translate the version number
304  * into a human readable string and store it in OptionROMVersion.
305  **/
306 static void
307 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
308 {
309 	struct prog_id *prg;
310 	uint32_t prog_id_word;
311 	char dist = ' ';
312 	/* character array used for decoding dist type. */
313 	char dist_char[] = "nabx";
314 
315 	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
316 		mempool_free(pmboxq, phba->mbox_mem_pool);
317 		return;
318 	}
319 
320 	prg = (struct prog_id *) &prog_id_word;
321 
322 	/* word 7 contain option rom version */
323 	prog_id_word = pmboxq->u.mb.un.varWords[7];
324 
325 	/* Decode the Option rom version word to a readable string */
326 	if (prg->dist < 4)
327 		dist = dist_char[prg->dist];
328 
329 	if ((prg->dist == 3) && (prg->num == 0))
330 		snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
331 			prg->ver, prg->rev, prg->lev);
332 	else
333 		snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
334 			prg->ver, prg->rev, prg->lev,
335 			dist, prg->num);
336 	mempool_free(pmboxq, phba->mbox_mem_pool);
337 	return;
338 }
339 
340 /**
341  * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
342  *	cfg_soft_wwnn, cfg_soft_wwpn
343  * @vport: pointer to lpfc vport data structure.
344  *
345  *
346  * Return codes
347  *   None.
348  **/
349 void
350 lpfc_update_vport_wwn(struct lpfc_vport *vport)
351 {
352 	uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
353 	u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
354 
355 	/* If the soft name exists then update it using the service params */
356 	if (vport->phba->cfg_soft_wwnn)
357 		u64_to_wwn(vport->phba->cfg_soft_wwnn,
358 			   vport->fc_sparam.nodeName.u.wwn);
359 	if (vport->phba->cfg_soft_wwpn)
360 		u64_to_wwn(vport->phba->cfg_soft_wwpn,
361 			   vport->fc_sparam.portName.u.wwn);
362 
363 	/*
364 	 * If the name is empty or there exists a soft name
365 	 * then copy the service params name, otherwise use the fc name
366 	 */
367 	if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
368 		memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
369 			sizeof(struct lpfc_name));
370 	else
371 		memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
372 			sizeof(struct lpfc_name));
373 
374 	/*
375 	 * If the port name has changed, then set the Param changes flag
376 	 * to unreg the login
377 	 */
378 	if (vport->fc_portname.u.wwn[0] != 0 &&
379 		memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
380 			sizeof(struct lpfc_name)))
381 		vport->vport_flag |= FAWWPN_PARAM_CHG;
382 
383 	if (vport->fc_portname.u.wwn[0] == 0 ||
384 	    vport->phba->cfg_soft_wwpn ||
385 	    (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
386 	    vport->vport_flag & FAWWPN_SET) {
387 		memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
388 			sizeof(struct lpfc_name));
389 		vport->vport_flag &= ~FAWWPN_SET;
390 		if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
391 			vport->vport_flag |= FAWWPN_SET;
392 	}
393 	else
394 		memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
395 			sizeof(struct lpfc_name));
396 }
397 
398 /**
399  * lpfc_config_port_post - Perform lpfc initialization after config port
400  * @phba: pointer to lpfc hba data structure.
401  *
402  * This routine will do LPFC initialization after the CONFIG_PORT mailbox
403  * command call. It performs all internal resource and state setups on the
404  * port: post IOCB buffers, enable appropriate host interrupt attentions,
405  * ELS ring timers, etc.
406  *
407  * Return codes
408  *   0 - success.
409  *   Any other value - error.
410  **/
411 int
412 lpfc_config_port_post(struct lpfc_hba *phba)
413 {
414 	struct lpfc_vport *vport = phba->pport;
415 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
416 	LPFC_MBOXQ_t *pmb;
417 	MAILBOX_t *mb;
418 	struct lpfc_dmabuf *mp;
419 	struct lpfc_sli *psli = &phba->sli;
420 	uint32_t status, timeout;
421 	int i, j;
422 	int rc;
423 
424 	spin_lock_irq(&phba->hbalock);
425 	/*
426 	 * If the Config port completed correctly the HBA is not
427 	 * over heated any more.
428 	 */
429 	if (phba->over_temp_state == HBA_OVER_TEMP)
430 		phba->over_temp_state = HBA_NORMAL_TEMP;
431 	spin_unlock_irq(&phba->hbalock);
432 
433 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
434 	if (!pmb) {
435 		phba->link_state = LPFC_HBA_ERROR;
436 		return -ENOMEM;
437 	}
438 	mb = &pmb->u.mb;
439 
440 	/* Get login parameters for NID.  */
441 	rc = lpfc_read_sparam(phba, pmb, 0);
442 	if (rc) {
443 		mempool_free(pmb, phba->mbox_mem_pool);
444 		return -ENOMEM;
445 	}
446 
447 	pmb->vport = vport;
448 	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
449 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
450 				"0448 Adapter failed init, mbxCmd x%x "
451 				"READ_SPARM mbxStatus x%x\n",
452 				mb->mbxCommand, mb->mbxStatus);
453 		phba->link_state = LPFC_HBA_ERROR;
454 		mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
455 		mempool_free(pmb, phba->mbox_mem_pool);
456 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
457 		kfree(mp);
458 		return -EIO;
459 	}
460 
461 	mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
462 
463 	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
464 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
465 	kfree(mp);
466 	pmb->ctx_buf = NULL;
467 	lpfc_update_vport_wwn(vport);
468 
469 	/* Update the fc_host data structures with new wwn. */
470 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
471 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
472 	fc_host_max_npiv_vports(shost) = phba->max_vpi;
473 
474 	/* If no serial number in VPD data, use low 6 bytes of WWNN */
475 	/* This should be consolidated into parse_vpd ? - mr */
476 	if (phba->SerialNumber[0] == 0) {
477 		uint8_t *outptr;
478 
479 		outptr = &vport->fc_nodename.u.s.IEEE[0];
480 		for (i = 0; i < 12; i++) {
481 			status = *outptr++;
482 			j = ((status & 0xf0) >> 4);
483 			if (j <= 9)
484 				phba->SerialNumber[i] =
485 				    (char)((uint8_t) 0x30 + (uint8_t) j);
486 			else
487 				phba->SerialNumber[i] =
488 				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
489 			i++;
490 			j = (status & 0xf);
491 			if (j <= 9)
492 				phba->SerialNumber[i] =
493 				    (char)((uint8_t) 0x30 + (uint8_t) j);
494 			else
495 				phba->SerialNumber[i] =
496 				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
497 		}
498 	}
499 
500 	lpfc_read_config(phba, pmb);
501 	pmb->vport = vport;
502 	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
503 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
504 				"0453 Adapter failed to init, mbxCmd x%x "
505 				"READ_CONFIG, mbxStatus x%x\n",
506 				mb->mbxCommand, mb->mbxStatus);
507 		phba->link_state = LPFC_HBA_ERROR;
508 		mempool_free( pmb, phba->mbox_mem_pool);
509 		return -EIO;
510 	}
511 
512 	/* Check if the port is disabled */
513 	lpfc_sli_read_link_ste(phba);
514 
515 	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
516 	if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
517 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
518 				"3359 HBA queue depth changed from %d to %d\n",
519 				phba->cfg_hba_queue_depth,
520 				mb->un.varRdConfig.max_xri);
521 		phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
522 	}
523 
524 	phba->lmt = mb->un.varRdConfig.lmt;
525 
526 	/* Get the default values for Model Name and Description */
527 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
528 
529 	phba->link_state = LPFC_LINK_DOWN;
530 
531 	/* Only process IOCBs on ELS ring till hba_state is READY */
532 	if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
533 		psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
534 	if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
535 		psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
536 
537 	/* Post receive buffers for desired rings */
538 	if (phba->sli_rev != 3)
539 		lpfc_post_rcv_buf(phba);
540 
541 	/*
542 	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
543 	 */
544 	if (phba->intr_type == MSIX) {
545 		rc = lpfc_config_msi(phba, pmb);
546 		if (rc) {
547 			mempool_free(pmb, phba->mbox_mem_pool);
548 			return -EIO;
549 		}
550 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
551 		if (rc != MBX_SUCCESS) {
552 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
553 					"0352 Config MSI mailbox command "
554 					"failed, mbxCmd x%x, mbxStatus x%x\n",
555 					pmb->u.mb.mbxCommand,
556 					pmb->u.mb.mbxStatus);
557 			mempool_free(pmb, phba->mbox_mem_pool);
558 			return -EIO;
559 		}
560 	}
561 
562 	spin_lock_irq(&phba->hbalock);
563 	/* Initialize ERATT handling flag */
564 	phba->hba_flag &= ~HBA_ERATT_HANDLED;
565 
566 	/* Enable appropriate host interrupts */
567 	if (lpfc_readl(phba->HCregaddr, &status)) {
568 		spin_unlock_irq(&phba->hbalock);
569 		return -EIO;
570 	}
571 	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
572 	if (psli->num_rings > 0)
573 		status |= HC_R0INT_ENA;
574 	if (psli->num_rings > 1)
575 		status |= HC_R1INT_ENA;
576 	if (psli->num_rings > 2)
577 		status |= HC_R2INT_ENA;
578 	if (psli->num_rings > 3)
579 		status |= HC_R3INT_ENA;
580 
581 	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
582 	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
583 		status &= ~(HC_R0INT_ENA);
584 
585 	writel(status, phba->HCregaddr);
586 	readl(phba->HCregaddr); /* flush */
587 	spin_unlock_irq(&phba->hbalock);
588 
589 	/* Set up ring-0 (ELS) timer */
590 	timeout = phba->fc_ratov * 2;
591 	mod_timer(&vport->els_tmofunc,
592 		  jiffies + msecs_to_jiffies(1000 * timeout));
593 	/* Set up heart beat (HB) timer */
594 	mod_timer(&phba->hb_tmofunc,
595 		  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
596 	phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
597 	phba->last_completion_time = jiffies;
598 	/* Set up error attention (ERATT) polling timer */
599 	mod_timer(&phba->eratt_poll,
600 		  jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
601 
602 	if (phba->hba_flag & LINK_DISABLED) {
603 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
604 				"2598 Adapter Link is disabled.\n");
605 		lpfc_down_link(phba, pmb);
606 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
607 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
608 		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
609 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
610 					"2599 Adapter failed to issue DOWN_LINK"
611 					" mbox command rc 0x%x\n", rc);
612 
613 			mempool_free(pmb, phba->mbox_mem_pool);
614 			return -EIO;
615 		}
616 	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
617 		mempool_free(pmb, phba->mbox_mem_pool);
618 		rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
619 		if (rc)
620 			return rc;
621 	}
622 	/* MBOX buffer will be freed in mbox compl */
623 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
624 	if (!pmb) {
625 		phba->link_state = LPFC_HBA_ERROR;
626 		return -ENOMEM;
627 	}
628 
629 	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
630 	pmb->mbox_cmpl = lpfc_config_async_cmpl;
631 	pmb->vport = phba->pport;
632 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
633 
634 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
635 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
636 				"0456 Adapter failed to issue "
637 				"ASYNCEVT_ENABLE mbox status x%x\n",
638 				rc);
639 		mempool_free(pmb, phba->mbox_mem_pool);
640 	}
641 
642 	/* Get Option rom version */
643 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
644 	if (!pmb) {
645 		phba->link_state = LPFC_HBA_ERROR;
646 		return -ENOMEM;
647 	}
648 
649 	lpfc_dump_wakeup_param(phba, pmb);
650 	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
651 	pmb->vport = phba->pport;
652 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
653 
654 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
655 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
656 				"0435 Adapter failed "
657 				"to get Option ROM version status x%x\n", rc);
658 		mempool_free(pmb, phba->mbox_mem_pool);
659 	}
660 
661 	return 0;
662 }
663 
664 /**
665  * lpfc_hba_init_link - Initialize the FC link
666  * @phba: pointer to lpfc hba data structure.
667  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
668  *
669  * This routine will issue the INIT_LINK mailbox command call.
670  * It is available to other drivers through the lpfc_hba data
671  * structure for use as a delayed link up mechanism with the
672  * module parameter lpfc_suppress_link_up.
673  *
674  * Return code
675  *		0 - success
676  *		Any other value - error
677  **/
678 static int
679 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
680 {
681 	return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
682 }
683 
684 /**
685  * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
686  * @phba: pointer to lpfc hba data structure.
687  * @fc_topology: desired fc topology.
688  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
689  *
690  * This routine will issue the INIT_LINK mailbox command call.
691  * It is available to other drivers through the lpfc_hba data
692  * structure for use as a delayed link up mechanism with the
693  * module parameter lpfc_suppress_link_up.
694  *
695  * Return code
696  *              0 - success
697  *              Any other value - error
698  **/
699 int
700 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
701 			       uint32_t flag)
702 {
703 	struct lpfc_vport *vport = phba->pport;
704 	LPFC_MBOXQ_t *pmb;
705 	MAILBOX_t *mb;
706 	int rc;
707 
708 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
709 	if (!pmb) {
710 		phba->link_state = LPFC_HBA_ERROR;
711 		return -ENOMEM;
712 	}
713 	mb = &pmb->u.mb;
714 	pmb->vport = vport;
715 
716 	if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
717 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
718 	     !(phba->lmt & LMT_1Gb)) ||
719 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
720 	     !(phba->lmt & LMT_2Gb)) ||
721 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
722 	     !(phba->lmt & LMT_4Gb)) ||
723 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
724 	     !(phba->lmt & LMT_8Gb)) ||
725 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
726 	     !(phba->lmt & LMT_10Gb)) ||
727 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
728 	     !(phba->lmt & LMT_16Gb)) ||
729 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
730 	     !(phba->lmt & LMT_32Gb)) ||
731 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
732 	     !(phba->lmt & LMT_64Gb))) {
733 		/* Reset link speed to auto */
734 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
735 				"1302 Invalid speed for this board:%d "
736 				"Reset link speed to auto.\n",
737 				phba->cfg_link_speed);
738 			phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
739 	}
740 	lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
741 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
742 	if (phba->sli_rev < LPFC_SLI_REV4)
743 		lpfc_set_loopback_flag(phba);
744 	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
745 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
746 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
747 				"0498 Adapter failed to init, mbxCmd x%x "
748 				"INIT_LINK, mbxStatus x%x\n",
749 				mb->mbxCommand, mb->mbxStatus);
750 		if (phba->sli_rev <= LPFC_SLI_REV3) {
751 			/* Clear all interrupt enable conditions */
752 			writel(0, phba->HCregaddr);
753 			readl(phba->HCregaddr); /* flush */
754 			/* Clear all pending interrupts */
755 			writel(0xffffffff, phba->HAregaddr);
756 			readl(phba->HAregaddr); /* flush */
757 		}
758 		phba->link_state = LPFC_HBA_ERROR;
759 		if (rc != MBX_BUSY || flag == MBX_POLL)
760 			mempool_free(pmb, phba->mbox_mem_pool);
761 		return -EIO;
762 	}
763 	phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
764 	if (flag == MBX_POLL)
765 		mempool_free(pmb, phba->mbox_mem_pool);
766 
767 	return 0;
768 }
769 
770 /**
771  * lpfc_hba_down_link - this routine downs the FC link
772  * @phba: pointer to lpfc hba data structure.
773  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
774  *
775  * This routine will issue the DOWN_LINK mailbox command call.
776  * It is available to other drivers through the lpfc_hba data
777  * structure for use to stop the link.
778  *
779  * Return code
780  *		0 - success
781  *		Any other value - error
782  **/
783 static int
784 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
785 {
786 	LPFC_MBOXQ_t *pmb;
787 	int rc;
788 
789 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
790 	if (!pmb) {
791 		phba->link_state = LPFC_HBA_ERROR;
792 		return -ENOMEM;
793 	}
794 
795 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
796 			"0491 Adapter Link is disabled.\n");
797 	lpfc_down_link(phba, pmb);
798 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
799 	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
800 	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
801 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
802 				"2522 Adapter failed to issue DOWN_LINK"
803 				" mbox command rc 0x%x\n", rc);
804 
805 		mempool_free(pmb, phba->mbox_mem_pool);
806 		return -EIO;
807 	}
808 	if (flag == MBX_POLL)
809 		mempool_free(pmb, phba->mbox_mem_pool);
810 
811 	return 0;
812 }
813 
814 /**
815  * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
816  * @phba: pointer to lpfc HBA data structure.
817  *
818  * This routine will do LPFC uninitialization before the HBA is reset when
819  * bringing down the SLI Layer.
820  *
821  * Return codes
822  *   0 - success.
823  *   Any other value - error.
824  **/
825 int
826 lpfc_hba_down_prep(struct lpfc_hba *phba)
827 {
828 	struct lpfc_vport **vports;
829 	int i;
830 
831 	if (phba->sli_rev <= LPFC_SLI_REV3) {
832 		/* Disable interrupts */
833 		writel(0, phba->HCregaddr);
834 		readl(phba->HCregaddr); /* flush */
835 	}
836 
837 	if (phba->pport->load_flag & FC_UNLOADING)
838 		lpfc_cleanup_discovery_resources(phba->pport);
839 	else {
840 		vports = lpfc_create_vport_work_array(phba);
841 		if (vports != NULL)
842 			for (i = 0; i <= phba->max_vports &&
843 				vports[i] != NULL; i++)
844 				lpfc_cleanup_discovery_resources(vports[i]);
845 		lpfc_destroy_vport_work_array(phba, vports);
846 	}
847 	return 0;
848 }
849 
850 /**
851  * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
852  * rspiocb which got deferred
853  *
854  * @phba: pointer to lpfc HBA data structure.
855  *
856  * This routine will cleanup completed slow path events after HBA is reset
857  * when bringing down the SLI Layer.
858  *
859  *
860  * Return codes
861  *   void.
862  **/
863 static void
864 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
865 {
866 	struct lpfc_iocbq *rspiocbq;
867 	struct hbq_dmabuf *dmabuf;
868 	struct lpfc_cq_event *cq_event;
869 
870 	spin_lock_irq(&phba->hbalock);
871 	phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
872 	spin_unlock_irq(&phba->hbalock);
873 
874 	while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
875 		/* Get the response iocb from the head of work queue */
876 		spin_lock_irq(&phba->hbalock);
877 		list_remove_head(&phba->sli4_hba.sp_queue_event,
878 				 cq_event, struct lpfc_cq_event, list);
879 		spin_unlock_irq(&phba->hbalock);
880 
881 		switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
882 		case CQE_CODE_COMPL_WQE:
883 			rspiocbq = container_of(cq_event, struct lpfc_iocbq,
884 						 cq_event);
885 			lpfc_sli_release_iocbq(phba, rspiocbq);
886 			break;
887 		case CQE_CODE_RECEIVE:
888 		case CQE_CODE_RECEIVE_V1:
889 			dmabuf = container_of(cq_event, struct hbq_dmabuf,
890 					      cq_event);
891 			lpfc_in_buf_free(phba, &dmabuf->dbuf);
892 		}
893 	}
894 }
895 
896 /**
897  * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
898  * @phba: pointer to lpfc HBA data structure.
899  *
900  * This routine will cleanup posted ELS buffers after the HBA is reset
901  * when bringing down the SLI Layer.
902  *
903  *
904  * Return codes
905  *   void.
906  **/
907 static void
908 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
909 {
910 	struct lpfc_sli *psli = &phba->sli;
911 	struct lpfc_sli_ring *pring;
912 	struct lpfc_dmabuf *mp, *next_mp;
913 	LIST_HEAD(buflist);
914 	int count;
915 
916 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
917 		lpfc_sli_hbqbuf_free_all(phba);
918 	else {
919 		/* Cleanup preposted buffers on the ELS ring */
920 		pring = &psli->sli3_ring[LPFC_ELS_RING];
921 		spin_lock_irq(&phba->hbalock);
922 		list_splice_init(&pring->postbufq, &buflist);
923 		spin_unlock_irq(&phba->hbalock);
924 
925 		count = 0;
926 		list_for_each_entry_safe(mp, next_mp, &buflist, list) {
927 			list_del(&mp->list);
928 			count++;
929 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
930 			kfree(mp);
931 		}
932 
933 		spin_lock_irq(&phba->hbalock);
934 		pring->postbufq_cnt -= count;
935 		spin_unlock_irq(&phba->hbalock);
936 	}
937 }
938 
939 /**
940  * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
941  * @phba: pointer to lpfc HBA data structure.
942  *
943  * This routine will cleanup the txcmplq after the HBA is reset when bringing
944  * down the SLI Layer.
945  *
946  * Return codes
947  *   void
948  **/
949 static void
950 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
951 {
952 	struct lpfc_sli *psli = &phba->sli;
953 	struct lpfc_queue *qp = NULL;
954 	struct lpfc_sli_ring *pring;
955 	LIST_HEAD(completions);
956 	int i;
957 	struct lpfc_iocbq *piocb, *next_iocb;
958 
959 	if (phba->sli_rev != LPFC_SLI_REV4) {
960 		for (i = 0; i < psli->num_rings; i++) {
961 			pring = &psli->sli3_ring[i];
962 			spin_lock_irq(&phba->hbalock);
963 			/* At this point in time the HBA is either reset or DOA
964 			 * Nothing should be on txcmplq as it will
965 			 * NEVER complete.
966 			 */
967 			list_splice_init(&pring->txcmplq, &completions);
968 			pring->txcmplq_cnt = 0;
969 			spin_unlock_irq(&phba->hbalock);
970 
971 			lpfc_sli_abort_iocb_ring(phba, pring);
972 		}
973 		/* Cancel all the IOCBs from the completions list */
974 		lpfc_sli_cancel_iocbs(phba, &completions,
975 				      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
976 		return;
977 	}
978 	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
979 		pring = qp->pring;
980 		if (!pring)
981 			continue;
982 		spin_lock_irq(&pring->ring_lock);
983 		list_for_each_entry_safe(piocb, next_iocb,
984 					 &pring->txcmplq, list)
985 			piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
986 		list_splice_init(&pring->txcmplq, &completions);
987 		pring->txcmplq_cnt = 0;
988 		spin_unlock_irq(&pring->ring_lock);
989 		lpfc_sli_abort_iocb_ring(phba, pring);
990 	}
991 	/* Cancel all the IOCBs from the completions list */
992 	lpfc_sli_cancel_iocbs(phba, &completions,
993 			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
994 }
995 
996 /**
997  * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
998  * @phba: pointer to lpfc HBA data structure.
999  *
1000  * This routine will do uninitialization after the HBA is reset when bring
1001  * down the SLI Layer.
1002  *
1003  * Return codes
1004  *   0 - success.
1005  *   Any other value - error.
1006  **/
1007 static int
1008 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1009 {
1010 	lpfc_hba_free_post_buf(phba);
1011 	lpfc_hba_clean_txcmplq(phba);
1012 	return 0;
1013 }
1014 
1015 /**
1016  * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1017  * @phba: pointer to lpfc HBA data structure.
1018  *
1019  * This routine will do uninitialization after the HBA is reset when bring
1020  * down the SLI Layer.
1021  *
1022  * Return codes
1023  *   0 - success.
1024  *   Any other value - error.
1025  **/
1026 static int
1027 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1028 {
1029 	struct lpfc_io_buf *psb, *psb_next;
1030 	struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1031 	struct lpfc_sli4_hdw_queue *qp;
1032 	LIST_HEAD(aborts);
1033 	LIST_HEAD(nvme_aborts);
1034 	LIST_HEAD(nvmet_aborts);
1035 	struct lpfc_sglq *sglq_entry = NULL;
1036 	int cnt, idx;
1037 
1038 
1039 	lpfc_sli_hbqbuf_free_all(phba);
1040 	lpfc_hba_clean_txcmplq(phba);
1041 
1042 	/* At this point in time the HBA is either reset or DOA. Either
1043 	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1044 	 * on the lpfc_els_sgl_list so that it can either be freed if the
1045 	 * driver is unloading or reposted if the driver is restarting
1046 	 * the port.
1047 	 */
1048 
1049 	/* sgl_list_lock required because worker thread uses this
1050 	 * list.
1051 	 */
1052 	spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1053 	list_for_each_entry(sglq_entry,
1054 		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1055 		sglq_entry->state = SGL_FREED;
1056 
1057 	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1058 			&phba->sli4_hba.lpfc_els_sgl_list);
1059 
1060 
1061 	spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1062 
1063 	/* abts_xxxx_buf_list_lock required because worker thread uses this
1064 	 * list.
1065 	 */
1066 	spin_lock_irq(&phba->hbalock);
1067 	cnt = 0;
1068 	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1069 		qp = &phba->sli4_hba.hdwq[idx];
1070 
1071 		spin_lock(&qp->abts_io_buf_list_lock);
1072 		list_splice_init(&qp->lpfc_abts_io_buf_list,
1073 				 &aborts);
1074 
1075 		list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1076 			psb->pCmd = NULL;
1077 			psb->status = IOSTAT_SUCCESS;
1078 			cnt++;
1079 		}
1080 		spin_lock(&qp->io_buf_list_put_lock);
1081 		list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1082 		qp->put_io_bufs += qp->abts_scsi_io_bufs;
1083 		qp->put_io_bufs += qp->abts_nvme_io_bufs;
1084 		qp->abts_scsi_io_bufs = 0;
1085 		qp->abts_nvme_io_bufs = 0;
1086 		spin_unlock(&qp->io_buf_list_put_lock);
1087 		spin_unlock(&qp->abts_io_buf_list_lock);
1088 	}
1089 	spin_unlock_irq(&phba->hbalock);
1090 
1091 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1092 		spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1093 		list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1094 				 &nvmet_aborts);
1095 		spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1096 		list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1097 			ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1098 			lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1099 		}
1100 	}
1101 
1102 	lpfc_sli4_free_sp_events(phba);
1103 	return cnt;
1104 }
1105 
1106 /**
1107  * lpfc_hba_down_post - Wrapper func for hba down post routine
1108  * @phba: pointer to lpfc HBA data structure.
1109  *
1110  * This routine wraps the actual SLI3 or SLI4 routine for performing
1111  * uninitialization after the HBA is reset when bring down the SLI Layer.
1112  *
1113  * Return codes
1114  *   0 - success.
1115  *   Any other value - error.
1116  **/
1117 int
1118 lpfc_hba_down_post(struct lpfc_hba *phba)
1119 {
1120 	return (*phba->lpfc_hba_down_post)(phba);
1121 }
1122 
1123 /**
1124  * lpfc_hb_timeout - The HBA-timer timeout handler
1125  * @t: timer context used to obtain the pointer to lpfc hba data structure.
1126  *
1127  * This is the HBA-timer timeout handler registered to the lpfc driver. When
1128  * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1129  * work-port-events bitmap and the worker thread is notified. This timeout
1130  * event will be used by the worker thread to invoke the actual timeout
1131  * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1132  * be performed in the timeout handler and the HBA timeout event bit shall
1133  * be cleared by the worker thread after it has taken the event bitmap out.
1134  **/
1135 static void
1136 lpfc_hb_timeout(struct timer_list *t)
1137 {
1138 	struct lpfc_hba *phba;
1139 	uint32_t tmo_posted;
1140 	unsigned long iflag;
1141 
1142 	phba = from_timer(phba, t, hb_tmofunc);
1143 
1144 	/* Check for heart beat timeout conditions */
1145 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1146 	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1147 	if (!tmo_posted)
1148 		phba->pport->work_port_events |= WORKER_HB_TMO;
1149 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1150 
1151 	/* Tell the worker thread there is work to do */
1152 	if (!tmo_posted)
1153 		lpfc_worker_wake_up(phba);
1154 	return;
1155 }
1156 
1157 /**
1158  * lpfc_rrq_timeout - The RRQ-timer timeout handler
1159  * @t: timer context used to obtain the pointer to lpfc hba data structure.
1160  *
1161  * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1162  * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1163  * work-port-events bitmap and the worker thread is notified. This timeout
1164  * event will be used by the worker thread to invoke the actual timeout
1165  * handler routine, lpfc_rrq_handler. Any periodical operations will
1166  * be performed in the timeout handler and the RRQ timeout event bit shall
1167  * be cleared by the worker thread after it has taken the event bitmap out.
1168  **/
1169 static void
1170 lpfc_rrq_timeout(struct timer_list *t)
1171 {
1172 	struct lpfc_hba *phba;
1173 	unsigned long iflag;
1174 
1175 	phba = from_timer(phba, t, rrq_tmr);
1176 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1177 	if (!(phba->pport->load_flag & FC_UNLOADING))
1178 		phba->hba_flag |= HBA_RRQ_ACTIVE;
1179 	else
1180 		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1181 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1182 
1183 	if (!(phba->pport->load_flag & FC_UNLOADING))
1184 		lpfc_worker_wake_up(phba);
1185 }
1186 
1187 /**
1188  * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1189  * @phba: pointer to lpfc hba data structure.
1190  * @pmboxq: pointer to the driver internal queue element for mailbox command.
1191  *
1192  * This is the callback function to the lpfc heart-beat mailbox command.
1193  * If configured, the lpfc driver issues the heart-beat mailbox command to
1194  * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1195  * heart-beat mailbox command is issued, the driver shall set up heart-beat
1196  * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1197  * heart-beat outstanding state. Once the mailbox command comes back and
1198  * no error conditions detected, the heart-beat mailbox command timer is
1199  * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1200  * state is cleared for the next heart-beat. If the timer expired with the
1201  * heart-beat outstanding state set, the driver will put the HBA offline.
1202  **/
1203 static void
1204 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1205 {
1206 	unsigned long drvr_flag;
1207 
1208 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
1209 	phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1210 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1211 
1212 	/* Check and reset heart-beat timer if necessary */
1213 	mempool_free(pmboxq, phba->mbox_mem_pool);
1214 	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1215 		!(phba->link_state == LPFC_HBA_ERROR) &&
1216 		!(phba->pport->load_flag & FC_UNLOADING))
1217 		mod_timer(&phba->hb_tmofunc,
1218 			  jiffies +
1219 			  msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1220 	return;
1221 }
1222 
1223 /*
1224  * lpfc_idle_stat_delay_work - idle_stat tracking
1225  *
1226  * This routine tracks per-cq idle_stat and determines polling decisions.
1227  *
1228  * Return codes:
1229  *   None
1230  **/
1231 static void
1232 lpfc_idle_stat_delay_work(struct work_struct *work)
1233 {
1234 	struct lpfc_hba *phba = container_of(to_delayed_work(work),
1235 					     struct lpfc_hba,
1236 					     idle_stat_delay_work);
1237 	struct lpfc_queue *cq;
1238 	struct lpfc_sli4_hdw_queue *hdwq;
1239 	struct lpfc_idle_stat *idle_stat;
1240 	u32 i, idle_percent;
1241 	u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1242 
1243 	if (phba->pport->load_flag & FC_UNLOADING)
1244 		return;
1245 
1246 	if (phba->link_state == LPFC_HBA_ERROR ||
1247 	    phba->pport->fc_flag & FC_OFFLINE_MODE ||
1248 	    phba->cmf_active_mode != LPFC_CFG_OFF)
1249 		goto requeue;
1250 
1251 	for_each_present_cpu(i) {
1252 		hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1253 		cq = hdwq->io_cq;
1254 
1255 		/* Skip if we've already handled this cq's primary CPU */
1256 		if (cq->chann != i)
1257 			continue;
1258 
1259 		idle_stat = &phba->sli4_hba.idle_stat[i];
1260 
1261 		/* get_cpu_idle_time returns values as running counters. Thus,
1262 		 * to know the amount for this period, the prior counter values
1263 		 * need to be subtracted from the current counter values.
1264 		 * From there, the idle time stat can be calculated as a
1265 		 * percentage of 100 - the sum of the other consumption times.
1266 		 */
1267 		wall_idle = get_cpu_idle_time(i, &wall, 1);
1268 		diff_idle = wall_idle - idle_stat->prev_idle;
1269 		diff_wall = wall - idle_stat->prev_wall;
1270 
1271 		if (diff_wall <= diff_idle)
1272 			busy_time = 0;
1273 		else
1274 			busy_time = diff_wall - diff_idle;
1275 
1276 		idle_percent = div64_u64(100 * busy_time, diff_wall);
1277 		idle_percent = 100 - idle_percent;
1278 
1279 		if (idle_percent < 15)
1280 			cq->poll_mode = LPFC_QUEUE_WORK;
1281 		else
1282 			cq->poll_mode = LPFC_IRQ_POLL;
1283 
1284 		idle_stat->prev_idle = wall_idle;
1285 		idle_stat->prev_wall = wall;
1286 	}
1287 
1288 requeue:
1289 	schedule_delayed_work(&phba->idle_stat_delay_work,
1290 			      msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1291 }
1292 
1293 static void
1294 lpfc_hb_eq_delay_work(struct work_struct *work)
1295 {
1296 	struct lpfc_hba *phba = container_of(to_delayed_work(work),
1297 					     struct lpfc_hba, eq_delay_work);
1298 	struct lpfc_eq_intr_info *eqi, *eqi_new;
1299 	struct lpfc_queue *eq, *eq_next;
1300 	unsigned char *ena_delay = NULL;
1301 	uint32_t usdelay;
1302 	int i;
1303 
1304 	if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1305 		return;
1306 
1307 	if (phba->link_state == LPFC_HBA_ERROR ||
1308 	    phba->pport->fc_flag & FC_OFFLINE_MODE)
1309 		goto requeue;
1310 
1311 	ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1312 			    GFP_KERNEL);
1313 	if (!ena_delay)
1314 		goto requeue;
1315 
1316 	for (i = 0; i < phba->cfg_irq_chann; i++) {
1317 		/* Get the EQ corresponding to the IRQ vector */
1318 		eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1319 		if (!eq)
1320 			continue;
1321 		if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1322 			eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1323 			ena_delay[eq->last_cpu] = 1;
1324 		}
1325 	}
1326 
1327 	for_each_present_cpu(i) {
1328 		eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1329 		if (ena_delay[i]) {
1330 			usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1331 			if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1332 				usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1333 		} else {
1334 			usdelay = 0;
1335 		}
1336 
1337 		eqi->icnt = 0;
1338 
1339 		list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1340 			if (unlikely(eq->last_cpu != i)) {
1341 				eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1342 						      eq->last_cpu);
1343 				list_move_tail(&eq->cpu_list, &eqi_new->list);
1344 				continue;
1345 			}
1346 			if (usdelay != eq->q_mode)
1347 				lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1348 							 usdelay);
1349 		}
1350 	}
1351 
1352 	kfree(ena_delay);
1353 
1354 requeue:
1355 	queue_delayed_work(phba->wq, &phba->eq_delay_work,
1356 			   msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1357 }
1358 
1359 /**
1360  * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1361  * @phba: pointer to lpfc hba data structure.
1362  *
1363  * For each heartbeat, this routine does some heuristic methods to adjust
1364  * XRI distribution. The goal is to fully utilize free XRIs.
1365  **/
1366 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1367 {
1368 	u32 i;
1369 	u32 hwq_count;
1370 
1371 	hwq_count = phba->cfg_hdw_queue;
1372 	for (i = 0; i < hwq_count; i++) {
1373 		/* Adjust XRIs in private pool */
1374 		lpfc_adjust_pvt_pool_count(phba, i);
1375 
1376 		/* Adjust high watermark */
1377 		lpfc_adjust_high_watermark(phba, i);
1378 
1379 #ifdef LPFC_MXP_STAT
1380 		/* Snapshot pbl, pvt and busy count */
1381 		lpfc_snapshot_mxp(phba, i);
1382 #endif
1383 	}
1384 }
1385 
1386 /**
1387  * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1388  * @phba: pointer to lpfc hba data structure.
1389  *
1390  * If a HB mbox is not already in progrees, this routine will allocate
1391  * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1392  * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1393  **/
1394 int
1395 lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1396 {
1397 	LPFC_MBOXQ_t *pmboxq;
1398 	int retval;
1399 
1400 	/* Is a Heartbeat mbox already in progress */
1401 	if (phba->hba_flag & HBA_HBEAT_INP)
1402 		return 0;
1403 
1404 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1405 	if (!pmboxq)
1406 		return -ENOMEM;
1407 
1408 	lpfc_heart_beat(phba, pmboxq);
1409 	pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1410 	pmboxq->vport = phba->pport;
1411 	retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1412 
1413 	if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1414 		mempool_free(pmboxq, phba->mbox_mem_pool);
1415 		return -ENXIO;
1416 	}
1417 	phba->hba_flag |= HBA_HBEAT_INP;
1418 
1419 	return 0;
1420 }
1421 
1422 /**
1423  * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1424  * @phba: pointer to lpfc hba data structure.
1425  *
1426  * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1427  * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1428  * of the value of lpfc_enable_hba_heartbeat.
1429  * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1430  * try to issue a MBX_HEARTBEAT mbox command.
1431  **/
1432 void
1433 lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1434 {
1435 	if (phba->cfg_enable_hba_heartbeat)
1436 		return;
1437 	phba->hba_flag |= HBA_HBEAT_TMO;
1438 }
1439 
1440 /**
1441  * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1442  * @phba: pointer to lpfc hba data structure.
1443  *
1444  * This is the actual HBA-timer timeout handler to be invoked by the worker
1445  * thread whenever the HBA timer fired and HBA-timeout event posted. This
1446  * handler performs any periodic operations needed for the device. If such
1447  * periodic event has already been attended to either in the interrupt handler
1448  * or by processing slow-ring or fast-ring events within the HBA-timer
1449  * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1450  * the timer for the next timeout period. If lpfc heart-beat mailbox command
1451  * is configured and there is no heart-beat mailbox command outstanding, a
1452  * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1453  * has been a heart-beat mailbox command outstanding, the HBA shall be put
1454  * to offline.
1455  **/
1456 void
1457 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1458 {
1459 	struct lpfc_vport **vports;
1460 	struct lpfc_dmabuf *buf_ptr;
1461 	int retval = 0;
1462 	int i, tmo;
1463 	struct lpfc_sli *psli = &phba->sli;
1464 	LIST_HEAD(completions);
1465 
1466 	if (phba->cfg_xri_rebalancing) {
1467 		/* Multi-XRI pools handler */
1468 		lpfc_hb_mxp_handler(phba);
1469 	}
1470 
1471 	vports = lpfc_create_vport_work_array(phba);
1472 	if (vports != NULL)
1473 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1474 			lpfc_rcv_seq_check_edtov(vports[i]);
1475 			lpfc_fdmi_change_check(vports[i]);
1476 		}
1477 	lpfc_destroy_vport_work_array(phba, vports);
1478 
1479 	if ((phba->link_state == LPFC_HBA_ERROR) ||
1480 		(phba->pport->load_flag & FC_UNLOADING) ||
1481 		(phba->pport->fc_flag & FC_OFFLINE_MODE))
1482 		return;
1483 
1484 	if (phba->elsbuf_cnt &&
1485 		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1486 		spin_lock_irq(&phba->hbalock);
1487 		list_splice_init(&phba->elsbuf, &completions);
1488 		phba->elsbuf_cnt = 0;
1489 		phba->elsbuf_prev_cnt = 0;
1490 		spin_unlock_irq(&phba->hbalock);
1491 
1492 		while (!list_empty(&completions)) {
1493 			list_remove_head(&completions, buf_ptr,
1494 				struct lpfc_dmabuf, list);
1495 			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1496 			kfree(buf_ptr);
1497 		}
1498 	}
1499 	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1500 
1501 	/* If there is no heart beat outstanding, issue a heartbeat command */
1502 	if (phba->cfg_enable_hba_heartbeat) {
1503 		/* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1504 		spin_lock_irq(&phba->pport->work_port_lock);
1505 		if (time_after(phba->last_completion_time +
1506 				msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1507 				jiffies)) {
1508 			spin_unlock_irq(&phba->pport->work_port_lock);
1509 			if (phba->hba_flag & HBA_HBEAT_INP)
1510 				tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1511 			else
1512 				tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1513 			goto out;
1514 		}
1515 		spin_unlock_irq(&phba->pport->work_port_lock);
1516 
1517 		/* Check if a MBX_HEARTBEAT is already in progress */
1518 		if (phba->hba_flag & HBA_HBEAT_INP) {
1519 			/*
1520 			 * If heart beat timeout called with HBA_HBEAT_INP set
1521 			 * we need to give the hb mailbox cmd a chance to
1522 			 * complete or TMO.
1523 			 */
1524 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1525 				"0459 Adapter heartbeat still outstanding: "
1526 				"last compl time was %d ms.\n",
1527 				jiffies_to_msecs(jiffies
1528 					 - phba->last_completion_time));
1529 			tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1530 		} else {
1531 			if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1532 				(list_empty(&psli->mboxq))) {
1533 
1534 				retval = lpfc_issue_hb_mbox(phba);
1535 				if (retval) {
1536 					tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1537 					goto out;
1538 				}
1539 				phba->skipped_hb = 0;
1540 			} else if (time_before_eq(phba->last_completion_time,
1541 					phba->skipped_hb)) {
1542 				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1543 					"2857 Last completion time not "
1544 					" updated in %d ms\n",
1545 					jiffies_to_msecs(jiffies
1546 						 - phba->last_completion_time));
1547 			} else
1548 				phba->skipped_hb = jiffies;
1549 
1550 			tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1551 			goto out;
1552 		}
1553 	} else {
1554 		/* Check to see if we want to force a MBX_HEARTBEAT */
1555 		if (phba->hba_flag & HBA_HBEAT_TMO) {
1556 			retval = lpfc_issue_hb_mbox(phba);
1557 			if (retval)
1558 				tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1559 			else
1560 				tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1561 			goto out;
1562 		}
1563 		tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1564 	}
1565 out:
1566 	mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1567 }
1568 
1569 /**
1570  * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1571  * @phba: pointer to lpfc hba data structure.
1572  *
1573  * This routine is called to bring the HBA offline when HBA hardware error
1574  * other than Port Error 6 has been detected.
1575  **/
1576 static void
1577 lpfc_offline_eratt(struct lpfc_hba *phba)
1578 {
1579 	struct lpfc_sli   *psli = &phba->sli;
1580 
1581 	spin_lock_irq(&phba->hbalock);
1582 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1583 	spin_unlock_irq(&phba->hbalock);
1584 	lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1585 
1586 	lpfc_offline(phba);
1587 	lpfc_reset_barrier(phba);
1588 	spin_lock_irq(&phba->hbalock);
1589 	lpfc_sli_brdreset(phba);
1590 	spin_unlock_irq(&phba->hbalock);
1591 	lpfc_hba_down_post(phba);
1592 	lpfc_sli_brdready(phba, HS_MBRDY);
1593 	lpfc_unblock_mgmt_io(phba);
1594 	phba->link_state = LPFC_HBA_ERROR;
1595 	return;
1596 }
1597 
1598 /**
1599  * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1600  * @phba: pointer to lpfc hba data structure.
1601  *
1602  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1603  * other than Port Error 6 has been detected.
1604  **/
1605 void
1606 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1607 {
1608 	spin_lock_irq(&phba->hbalock);
1609 	if (phba->link_state == LPFC_HBA_ERROR &&
1610 	    phba->hba_flag & HBA_PCI_ERR) {
1611 		spin_unlock_irq(&phba->hbalock);
1612 		return;
1613 	}
1614 	phba->link_state = LPFC_HBA_ERROR;
1615 	spin_unlock_irq(&phba->hbalock);
1616 
1617 	lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1618 	lpfc_sli_flush_io_rings(phba);
1619 	lpfc_offline(phba);
1620 	lpfc_hba_down_post(phba);
1621 	lpfc_unblock_mgmt_io(phba);
1622 }
1623 
1624 /**
1625  * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1626  * @phba: pointer to lpfc hba data structure.
1627  *
1628  * This routine is invoked to handle the deferred HBA hardware error
1629  * conditions. This type of error is indicated by HBA by setting ER1
1630  * and another ER bit in the host status register. The driver will
1631  * wait until the ER1 bit clears before handling the error condition.
1632  **/
1633 static void
1634 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1635 {
1636 	uint32_t old_host_status = phba->work_hs;
1637 	struct lpfc_sli *psli = &phba->sli;
1638 
1639 	/* If the pci channel is offline, ignore possible errors,
1640 	 * since we cannot communicate with the pci card anyway.
1641 	 */
1642 	if (pci_channel_offline(phba->pcidev)) {
1643 		spin_lock_irq(&phba->hbalock);
1644 		phba->hba_flag &= ~DEFER_ERATT;
1645 		spin_unlock_irq(&phba->hbalock);
1646 		return;
1647 	}
1648 
1649 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1650 			"0479 Deferred Adapter Hardware Error "
1651 			"Data: x%x x%x x%x\n",
1652 			phba->work_hs, phba->work_status[0],
1653 			phba->work_status[1]);
1654 
1655 	spin_lock_irq(&phba->hbalock);
1656 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1657 	spin_unlock_irq(&phba->hbalock);
1658 
1659 
1660 	/*
1661 	 * Firmware stops when it triggred erratt. That could cause the I/Os
1662 	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1663 	 * SCSI layer retry it after re-establishing link.
1664 	 */
1665 	lpfc_sli_abort_fcp_rings(phba);
1666 
1667 	/*
1668 	 * There was a firmware error. Take the hba offline and then
1669 	 * attempt to restart it.
1670 	 */
1671 	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1672 	lpfc_offline(phba);
1673 
1674 	/* Wait for the ER1 bit to clear.*/
1675 	while (phba->work_hs & HS_FFER1) {
1676 		msleep(100);
1677 		if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1678 			phba->work_hs = UNPLUG_ERR ;
1679 			break;
1680 		}
1681 		/* If driver is unloading let the worker thread continue */
1682 		if (phba->pport->load_flag & FC_UNLOADING) {
1683 			phba->work_hs = 0;
1684 			break;
1685 		}
1686 	}
1687 
1688 	/*
1689 	 * This is to ptrotect against a race condition in which
1690 	 * first write to the host attention register clear the
1691 	 * host status register.
1692 	 */
1693 	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1694 		phba->work_hs = old_host_status & ~HS_FFER1;
1695 
1696 	spin_lock_irq(&phba->hbalock);
1697 	phba->hba_flag &= ~DEFER_ERATT;
1698 	spin_unlock_irq(&phba->hbalock);
1699 	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1700 	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1701 }
1702 
1703 static void
1704 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1705 {
1706 	struct lpfc_board_event_header board_event;
1707 	struct Scsi_Host *shost;
1708 
1709 	board_event.event_type = FC_REG_BOARD_EVENT;
1710 	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1711 	shost = lpfc_shost_from_vport(phba->pport);
1712 	fc_host_post_vendor_event(shost, fc_get_event_number(),
1713 				  sizeof(board_event),
1714 				  (char *) &board_event,
1715 				  LPFC_NL_VENDOR_ID);
1716 }
1717 
1718 /**
1719  * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1720  * @phba: pointer to lpfc hba data structure.
1721  *
1722  * This routine is invoked to handle the following HBA hardware error
1723  * conditions:
1724  * 1 - HBA error attention interrupt
1725  * 2 - DMA ring index out of range
1726  * 3 - Mailbox command came back as unknown
1727  **/
1728 static void
1729 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1730 {
1731 	struct lpfc_vport *vport = phba->pport;
1732 	struct lpfc_sli   *psli = &phba->sli;
1733 	uint32_t event_data;
1734 	unsigned long temperature;
1735 	struct temp_event temp_event_data;
1736 	struct Scsi_Host  *shost;
1737 
1738 	/* If the pci channel is offline, ignore possible errors,
1739 	 * since we cannot communicate with the pci card anyway.
1740 	 */
1741 	if (pci_channel_offline(phba->pcidev)) {
1742 		spin_lock_irq(&phba->hbalock);
1743 		phba->hba_flag &= ~DEFER_ERATT;
1744 		spin_unlock_irq(&phba->hbalock);
1745 		return;
1746 	}
1747 
1748 	/* If resets are disabled then leave the HBA alone and return */
1749 	if (!phba->cfg_enable_hba_reset)
1750 		return;
1751 
1752 	/* Send an internal error event to mgmt application */
1753 	lpfc_board_errevt_to_mgmt(phba);
1754 
1755 	if (phba->hba_flag & DEFER_ERATT)
1756 		lpfc_handle_deferred_eratt(phba);
1757 
1758 	if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1759 		if (phba->work_hs & HS_FFER6)
1760 			/* Re-establishing Link */
1761 			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1762 					"1301 Re-establishing Link "
1763 					"Data: x%x x%x x%x\n",
1764 					phba->work_hs, phba->work_status[0],
1765 					phba->work_status[1]);
1766 		if (phba->work_hs & HS_FFER8)
1767 			/* Device Zeroization */
1768 			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1769 					"2861 Host Authentication device "
1770 					"zeroization Data:x%x x%x x%x\n",
1771 					phba->work_hs, phba->work_status[0],
1772 					phba->work_status[1]);
1773 
1774 		spin_lock_irq(&phba->hbalock);
1775 		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1776 		spin_unlock_irq(&phba->hbalock);
1777 
1778 		/*
1779 		* Firmware stops when it triggled erratt with HS_FFER6.
1780 		* That could cause the I/Os dropped by the firmware.
1781 		* Error iocb (I/O) on txcmplq and let the SCSI layer
1782 		* retry it after re-establishing link.
1783 		*/
1784 		lpfc_sli_abort_fcp_rings(phba);
1785 
1786 		/*
1787 		 * There was a firmware error.  Take the hba offline and then
1788 		 * attempt to restart it.
1789 		 */
1790 		lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1791 		lpfc_offline(phba);
1792 		lpfc_sli_brdrestart(phba);
1793 		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1794 			lpfc_unblock_mgmt_io(phba);
1795 			return;
1796 		}
1797 		lpfc_unblock_mgmt_io(phba);
1798 	} else if (phba->work_hs & HS_CRIT_TEMP) {
1799 		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1800 		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1801 		temp_event_data.event_code = LPFC_CRIT_TEMP;
1802 		temp_event_data.data = (uint32_t)temperature;
1803 
1804 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1805 				"0406 Adapter maximum temperature exceeded "
1806 				"(%ld), taking this port offline "
1807 				"Data: x%x x%x x%x\n",
1808 				temperature, phba->work_hs,
1809 				phba->work_status[0], phba->work_status[1]);
1810 
1811 		shost = lpfc_shost_from_vport(phba->pport);
1812 		fc_host_post_vendor_event(shost, fc_get_event_number(),
1813 					  sizeof(temp_event_data),
1814 					  (char *) &temp_event_data,
1815 					  SCSI_NL_VID_TYPE_PCI
1816 					  | PCI_VENDOR_ID_EMULEX);
1817 
1818 		spin_lock_irq(&phba->hbalock);
1819 		phba->over_temp_state = HBA_OVER_TEMP;
1820 		spin_unlock_irq(&phba->hbalock);
1821 		lpfc_offline_eratt(phba);
1822 
1823 	} else {
1824 		/* The if clause above forces this code path when the status
1825 		 * failure is a value other than FFER6. Do not call the offline
1826 		 * twice. This is the adapter hardware error path.
1827 		 */
1828 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1829 				"0457 Adapter Hardware Error "
1830 				"Data: x%x x%x x%x\n",
1831 				phba->work_hs,
1832 				phba->work_status[0], phba->work_status[1]);
1833 
1834 		event_data = FC_REG_DUMP_EVENT;
1835 		shost = lpfc_shost_from_vport(vport);
1836 		fc_host_post_vendor_event(shost, fc_get_event_number(),
1837 				sizeof(event_data), (char *) &event_data,
1838 				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1839 
1840 		lpfc_offline_eratt(phba);
1841 	}
1842 	return;
1843 }
1844 
1845 /**
1846  * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1847  * @phba: pointer to lpfc hba data structure.
1848  * @mbx_action: flag for mailbox shutdown action.
1849  * @en_rn_msg: send reset/port recovery message.
1850  * This routine is invoked to perform an SLI4 port PCI function reset in
1851  * response to port status register polling attention. It waits for port
1852  * status register (ERR, RDY, RN) bits before proceeding with function reset.
1853  * During this process, interrupt vectors are freed and later requested
1854  * for handling possible port resource change.
1855  **/
1856 static int
1857 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1858 			    bool en_rn_msg)
1859 {
1860 	int rc;
1861 	uint32_t intr_mode;
1862 	LPFC_MBOXQ_t *mboxq;
1863 
1864 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1865 	    LPFC_SLI_INTF_IF_TYPE_2) {
1866 		/*
1867 		 * On error status condition, driver need to wait for port
1868 		 * ready before performing reset.
1869 		 */
1870 		rc = lpfc_sli4_pdev_status_reg_wait(phba);
1871 		if (rc)
1872 			return rc;
1873 	}
1874 
1875 	/* need reset: attempt for port recovery */
1876 	if (en_rn_msg)
1877 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1878 				"2887 Reset Needed: Attempting Port "
1879 				"Recovery...\n");
1880 
1881 	/* If we are no wait, the HBA has been reset and is not
1882 	 * functional, thus we should clear
1883 	 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
1884 	 */
1885 	if (mbx_action == LPFC_MBX_NO_WAIT) {
1886 		spin_lock_irq(&phba->hbalock);
1887 		phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1888 		if (phba->sli.mbox_active) {
1889 			mboxq = phba->sli.mbox_active;
1890 			mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1891 			__lpfc_mbox_cmpl_put(phba, mboxq);
1892 			phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1893 			phba->sli.mbox_active = NULL;
1894 		}
1895 		spin_unlock_irq(&phba->hbalock);
1896 	}
1897 
1898 	lpfc_offline_prep(phba, mbx_action);
1899 	lpfc_sli_flush_io_rings(phba);
1900 	lpfc_offline(phba);
1901 	/* release interrupt for possible resource change */
1902 	lpfc_sli4_disable_intr(phba);
1903 	rc = lpfc_sli_brdrestart(phba);
1904 	if (rc) {
1905 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1906 				"6309 Failed to restart board\n");
1907 		return rc;
1908 	}
1909 	/* request and enable interrupt */
1910 	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1911 	if (intr_mode == LPFC_INTR_ERROR) {
1912 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1913 				"3175 Failed to enable interrupt\n");
1914 		return -EIO;
1915 	}
1916 	phba->intr_mode = intr_mode;
1917 	rc = lpfc_online(phba);
1918 	if (rc == 0)
1919 		lpfc_unblock_mgmt_io(phba);
1920 
1921 	return rc;
1922 }
1923 
1924 /**
1925  * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1926  * @phba: pointer to lpfc hba data structure.
1927  *
1928  * This routine is invoked to handle the SLI4 HBA hardware error attention
1929  * conditions.
1930  **/
1931 static void
1932 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1933 {
1934 	struct lpfc_vport *vport = phba->pport;
1935 	uint32_t event_data;
1936 	struct Scsi_Host *shost;
1937 	uint32_t if_type;
1938 	struct lpfc_register portstat_reg = {0};
1939 	uint32_t reg_err1, reg_err2;
1940 	uint32_t uerrlo_reg, uemasklo_reg;
1941 	uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1942 	bool en_rn_msg = true;
1943 	struct temp_event temp_event_data;
1944 	struct lpfc_register portsmphr_reg;
1945 	int rc, i;
1946 
1947 	/* If the pci channel is offline, ignore possible errors, since
1948 	 * we cannot communicate with the pci card anyway.
1949 	 */
1950 	if (pci_channel_offline(phba->pcidev)) {
1951 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1952 				"3166 pci channel is offline\n");
1953 		return;
1954 	}
1955 
1956 	memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1957 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1958 	switch (if_type) {
1959 	case LPFC_SLI_INTF_IF_TYPE_0:
1960 		pci_rd_rc1 = lpfc_readl(
1961 				phba->sli4_hba.u.if_type0.UERRLOregaddr,
1962 				&uerrlo_reg);
1963 		pci_rd_rc2 = lpfc_readl(
1964 				phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1965 				&uemasklo_reg);
1966 		/* consider PCI bus read error as pci_channel_offline */
1967 		if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1968 			return;
1969 		if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1970 			lpfc_sli4_offline_eratt(phba);
1971 			return;
1972 		}
1973 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1974 				"7623 Checking UE recoverable");
1975 
1976 		for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1977 			if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1978 				       &portsmphr_reg.word0))
1979 				continue;
1980 
1981 			smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1982 						   &portsmphr_reg);
1983 			if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1984 			    LPFC_PORT_SEM_UE_RECOVERABLE)
1985 				break;
1986 			/*Sleep for 1Sec, before checking SEMAPHORE */
1987 			msleep(1000);
1988 		}
1989 
1990 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1991 				"4827 smphr_port_status x%x : Waited %dSec",
1992 				smphr_port_status, i);
1993 
1994 		/* Recoverable UE, reset the HBA device */
1995 		if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1996 		    LPFC_PORT_SEM_UE_RECOVERABLE) {
1997 			for (i = 0; i < 20; i++) {
1998 				msleep(1000);
1999 				if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2000 				    &portsmphr_reg.word0) &&
2001 				    (LPFC_POST_STAGE_PORT_READY ==
2002 				     bf_get(lpfc_port_smphr_port_status,
2003 				     &portsmphr_reg))) {
2004 					rc = lpfc_sli4_port_sta_fn_reset(phba,
2005 						LPFC_MBX_NO_WAIT, en_rn_msg);
2006 					if (rc == 0)
2007 						return;
2008 					lpfc_printf_log(phba, KERN_ERR,
2009 						LOG_TRACE_EVENT,
2010 						"4215 Failed to recover UE");
2011 					break;
2012 				}
2013 			}
2014 		}
2015 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2016 				"7624 Firmware not ready: Failing UE recovery,"
2017 				" waited %dSec", i);
2018 		phba->link_state = LPFC_HBA_ERROR;
2019 		break;
2020 
2021 	case LPFC_SLI_INTF_IF_TYPE_2:
2022 	case LPFC_SLI_INTF_IF_TYPE_6:
2023 		pci_rd_rc1 = lpfc_readl(
2024 				phba->sli4_hba.u.if_type2.STATUSregaddr,
2025 				&portstat_reg.word0);
2026 		/* consider PCI bus read error as pci_channel_offline */
2027 		if (pci_rd_rc1 == -EIO) {
2028 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2029 				"3151 PCI bus read access failure: x%x\n",
2030 				readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2031 			lpfc_sli4_offline_eratt(phba);
2032 			return;
2033 		}
2034 		reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2035 		reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2036 		if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2037 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2038 					"2889 Port Overtemperature event, "
2039 					"taking port offline Data: x%x x%x\n",
2040 					reg_err1, reg_err2);
2041 
2042 			phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2043 			temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2044 			temp_event_data.event_code = LPFC_CRIT_TEMP;
2045 			temp_event_data.data = 0xFFFFFFFF;
2046 
2047 			shost = lpfc_shost_from_vport(phba->pport);
2048 			fc_host_post_vendor_event(shost, fc_get_event_number(),
2049 						  sizeof(temp_event_data),
2050 						  (char *)&temp_event_data,
2051 						  SCSI_NL_VID_TYPE_PCI
2052 						  | PCI_VENDOR_ID_EMULEX);
2053 
2054 			spin_lock_irq(&phba->hbalock);
2055 			phba->over_temp_state = HBA_OVER_TEMP;
2056 			spin_unlock_irq(&phba->hbalock);
2057 			lpfc_sli4_offline_eratt(phba);
2058 			return;
2059 		}
2060 		if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2061 		    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2062 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2063 					"3143 Port Down: Firmware Update "
2064 					"Detected\n");
2065 			en_rn_msg = false;
2066 		} else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2067 			 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2068 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2069 					"3144 Port Down: Debug Dump\n");
2070 		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2071 			 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2072 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2073 					"3145 Port Down: Provisioning\n");
2074 
2075 		/* If resets are disabled then leave the HBA alone and return */
2076 		if (!phba->cfg_enable_hba_reset)
2077 			return;
2078 
2079 		/* Check port status register for function reset */
2080 		rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2081 				en_rn_msg);
2082 		if (rc == 0) {
2083 			/* don't report event on forced debug dump */
2084 			if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2085 			    reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2086 				return;
2087 			else
2088 				break;
2089 		}
2090 		/* fall through for not able to recover */
2091 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2092 				"3152 Unrecoverable error\n");
2093 		phba->link_state = LPFC_HBA_ERROR;
2094 		break;
2095 	case LPFC_SLI_INTF_IF_TYPE_1:
2096 	default:
2097 		break;
2098 	}
2099 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2100 			"3123 Report dump event to upper layer\n");
2101 	/* Send an internal error event to mgmt application */
2102 	lpfc_board_errevt_to_mgmt(phba);
2103 
2104 	event_data = FC_REG_DUMP_EVENT;
2105 	shost = lpfc_shost_from_vport(vport);
2106 	fc_host_post_vendor_event(shost, fc_get_event_number(),
2107 				  sizeof(event_data), (char *) &event_data,
2108 				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2109 }
2110 
2111 /**
2112  * lpfc_handle_eratt - Wrapper func for handling hba error attention
2113  * @phba: pointer to lpfc HBA data structure.
2114  *
2115  * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2116  * routine from the API jump table function pointer from the lpfc_hba struct.
2117  *
2118  * Return codes
2119  *   0 - success.
2120  *   Any other value - error.
2121  **/
2122 void
2123 lpfc_handle_eratt(struct lpfc_hba *phba)
2124 {
2125 	(*phba->lpfc_handle_eratt)(phba);
2126 }
2127 
2128 /**
2129  * lpfc_handle_latt - The HBA link event handler
2130  * @phba: pointer to lpfc hba data structure.
2131  *
2132  * This routine is invoked from the worker thread to handle a HBA host
2133  * attention link event. SLI3 only.
2134  **/
2135 void
2136 lpfc_handle_latt(struct lpfc_hba *phba)
2137 {
2138 	struct lpfc_vport *vport = phba->pport;
2139 	struct lpfc_sli   *psli = &phba->sli;
2140 	LPFC_MBOXQ_t *pmb;
2141 	volatile uint32_t control;
2142 	struct lpfc_dmabuf *mp;
2143 	int rc = 0;
2144 
2145 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2146 	if (!pmb) {
2147 		rc = 1;
2148 		goto lpfc_handle_latt_err_exit;
2149 	}
2150 
2151 	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2152 	if (!mp) {
2153 		rc = 2;
2154 		goto lpfc_handle_latt_free_pmb;
2155 	}
2156 
2157 	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2158 	if (!mp->virt) {
2159 		rc = 3;
2160 		goto lpfc_handle_latt_free_mp;
2161 	}
2162 
2163 	/* Cleanup any outstanding ELS commands */
2164 	lpfc_els_flush_all_cmd(phba);
2165 
2166 	psli->slistat.link_event++;
2167 	lpfc_read_topology(phba, pmb, mp);
2168 	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2169 	pmb->vport = vport;
2170 	/* Block ELS IOCBs until we have processed this mbox command */
2171 	phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2172 	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2173 	if (rc == MBX_NOT_FINISHED) {
2174 		rc = 4;
2175 		goto lpfc_handle_latt_free_mbuf;
2176 	}
2177 
2178 	/* Clear Link Attention in HA REG */
2179 	spin_lock_irq(&phba->hbalock);
2180 	writel(HA_LATT, phba->HAregaddr);
2181 	readl(phba->HAregaddr); /* flush */
2182 	spin_unlock_irq(&phba->hbalock);
2183 
2184 	return;
2185 
2186 lpfc_handle_latt_free_mbuf:
2187 	phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2188 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
2189 lpfc_handle_latt_free_mp:
2190 	kfree(mp);
2191 lpfc_handle_latt_free_pmb:
2192 	mempool_free(pmb, phba->mbox_mem_pool);
2193 lpfc_handle_latt_err_exit:
2194 	/* Enable Link attention interrupts */
2195 	spin_lock_irq(&phba->hbalock);
2196 	psli->sli_flag |= LPFC_PROCESS_LA;
2197 	control = readl(phba->HCregaddr);
2198 	control |= HC_LAINT_ENA;
2199 	writel(control, phba->HCregaddr);
2200 	readl(phba->HCregaddr); /* flush */
2201 
2202 	/* Clear Link Attention in HA REG */
2203 	writel(HA_LATT, phba->HAregaddr);
2204 	readl(phba->HAregaddr); /* flush */
2205 	spin_unlock_irq(&phba->hbalock);
2206 	lpfc_linkdown(phba);
2207 	phba->link_state = LPFC_HBA_ERROR;
2208 
2209 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2210 			"0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2211 
2212 	return;
2213 }
2214 
2215 /**
2216  * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2217  * @phba: pointer to lpfc hba data structure.
2218  * @vpd: pointer to the vital product data.
2219  * @len: length of the vital product data in bytes.
2220  *
2221  * This routine parses the Vital Product Data (VPD). The VPD is treated as
2222  * an array of characters. In this routine, the ModelName, ProgramType, and
2223  * ModelDesc, etc. fields of the phba data structure will be populated.
2224  *
2225  * Return codes
2226  *   0 - pointer to the VPD passed in is NULL
2227  *   1 - success
2228  **/
2229 int
2230 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2231 {
2232 	uint8_t lenlo, lenhi;
2233 	int Length;
2234 	int i, j;
2235 	int finished = 0;
2236 	int index = 0;
2237 
2238 	if (!vpd)
2239 		return 0;
2240 
2241 	/* Vital Product */
2242 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2243 			"0455 Vital Product Data: x%x x%x x%x x%x\n",
2244 			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2245 			(uint32_t) vpd[3]);
2246 	while (!finished && (index < (len - 4))) {
2247 		switch (vpd[index]) {
2248 		case 0x82:
2249 		case 0x91:
2250 			index += 1;
2251 			lenlo = vpd[index];
2252 			index += 1;
2253 			lenhi = vpd[index];
2254 			index += 1;
2255 			i = ((((unsigned short)lenhi) << 8) + lenlo);
2256 			index += i;
2257 			break;
2258 		case 0x90:
2259 			index += 1;
2260 			lenlo = vpd[index];
2261 			index += 1;
2262 			lenhi = vpd[index];
2263 			index += 1;
2264 			Length = ((((unsigned short)lenhi) << 8) + lenlo);
2265 			if (Length > len - index)
2266 				Length = len - index;
2267 			while (Length > 0) {
2268 			/* Look for Serial Number */
2269 			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2270 				index += 2;
2271 				i = vpd[index];
2272 				index += 1;
2273 				j = 0;
2274 				Length -= (3+i);
2275 				while(i--) {
2276 					phba->SerialNumber[j++] = vpd[index++];
2277 					if (j == 31)
2278 						break;
2279 				}
2280 				phba->SerialNumber[j] = 0;
2281 				continue;
2282 			}
2283 			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2284 				phba->vpd_flag |= VPD_MODEL_DESC;
2285 				index += 2;
2286 				i = vpd[index];
2287 				index += 1;
2288 				j = 0;
2289 				Length -= (3+i);
2290 				while(i--) {
2291 					phba->ModelDesc[j++] = vpd[index++];
2292 					if (j == 255)
2293 						break;
2294 				}
2295 				phba->ModelDesc[j] = 0;
2296 				continue;
2297 			}
2298 			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2299 				phba->vpd_flag |= VPD_MODEL_NAME;
2300 				index += 2;
2301 				i = vpd[index];
2302 				index += 1;
2303 				j = 0;
2304 				Length -= (3+i);
2305 				while(i--) {
2306 					phba->ModelName[j++] = vpd[index++];
2307 					if (j == 79)
2308 						break;
2309 				}
2310 				phba->ModelName[j] = 0;
2311 				continue;
2312 			}
2313 			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2314 				phba->vpd_flag |= VPD_PROGRAM_TYPE;
2315 				index += 2;
2316 				i = vpd[index];
2317 				index += 1;
2318 				j = 0;
2319 				Length -= (3+i);
2320 				while(i--) {
2321 					phba->ProgramType[j++] = vpd[index++];
2322 					if (j == 255)
2323 						break;
2324 				}
2325 				phba->ProgramType[j] = 0;
2326 				continue;
2327 			}
2328 			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2329 				phba->vpd_flag |= VPD_PORT;
2330 				index += 2;
2331 				i = vpd[index];
2332 				index += 1;
2333 				j = 0;
2334 				Length -= (3+i);
2335 				while(i--) {
2336 					if ((phba->sli_rev == LPFC_SLI_REV4) &&
2337 					    (phba->sli4_hba.pport_name_sta ==
2338 					     LPFC_SLI4_PPNAME_GET)) {
2339 						j++;
2340 						index++;
2341 					} else
2342 						phba->Port[j++] = vpd[index++];
2343 					if (j == 19)
2344 						break;
2345 				}
2346 				if ((phba->sli_rev != LPFC_SLI_REV4) ||
2347 				    (phba->sli4_hba.pport_name_sta ==
2348 				     LPFC_SLI4_PPNAME_NON))
2349 					phba->Port[j] = 0;
2350 				continue;
2351 			}
2352 			else {
2353 				index += 2;
2354 				i = vpd[index];
2355 				index += 1;
2356 				index += i;
2357 				Length -= (3 + i);
2358 			}
2359 		}
2360 		finished = 0;
2361 		break;
2362 		case 0x78:
2363 			finished = 1;
2364 			break;
2365 		default:
2366 			index ++;
2367 			break;
2368 		}
2369 	}
2370 
2371 	return(1);
2372 }
2373 
2374 /**
2375  * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2376  * @phba: pointer to lpfc hba data structure.
2377  * @mdp: pointer to the data structure to hold the derived model name.
2378  * @descp: pointer to the data structure to hold the derived description.
2379  *
2380  * This routine retrieves HBA's description based on its registered PCI device
2381  * ID. The @descp passed into this function points to an array of 256 chars. It
2382  * shall be returned with the model name, maximum speed, and the host bus type.
2383  * The @mdp passed into this function points to an array of 80 chars. When the
2384  * function returns, the @mdp will be filled with the model name.
2385  **/
2386 static void
2387 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2388 {
2389 	lpfc_vpd_t *vp;
2390 	uint16_t dev_id = phba->pcidev->device;
2391 	int max_speed;
2392 	int GE = 0;
2393 	int oneConnect = 0; /* default is not a oneConnect */
2394 	struct {
2395 		char *name;
2396 		char *bus;
2397 		char *function;
2398 	} m = {"<Unknown>", "", ""};
2399 
2400 	if (mdp && mdp[0] != '\0'
2401 		&& descp && descp[0] != '\0')
2402 		return;
2403 
2404 	if (phba->lmt & LMT_64Gb)
2405 		max_speed = 64;
2406 	else if (phba->lmt & LMT_32Gb)
2407 		max_speed = 32;
2408 	else if (phba->lmt & LMT_16Gb)
2409 		max_speed = 16;
2410 	else if (phba->lmt & LMT_10Gb)
2411 		max_speed = 10;
2412 	else if (phba->lmt & LMT_8Gb)
2413 		max_speed = 8;
2414 	else if (phba->lmt & LMT_4Gb)
2415 		max_speed = 4;
2416 	else if (phba->lmt & LMT_2Gb)
2417 		max_speed = 2;
2418 	else if (phba->lmt & LMT_1Gb)
2419 		max_speed = 1;
2420 	else
2421 		max_speed = 0;
2422 
2423 	vp = &phba->vpd;
2424 
2425 	switch (dev_id) {
2426 	case PCI_DEVICE_ID_FIREFLY:
2427 		m = (typeof(m)){"LP6000", "PCI",
2428 				"Obsolete, Unsupported Fibre Channel Adapter"};
2429 		break;
2430 	case PCI_DEVICE_ID_SUPERFLY:
2431 		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2432 			m = (typeof(m)){"LP7000", "PCI", ""};
2433 		else
2434 			m = (typeof(m)){"LP7000E", "PCI", ""};
2435 		m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2436 		break;
2437 	case PCI_DEVICE_ID_DRAGONFLY:
2438 		m = (typeof(m)){"LP8000", "PCI",
2439 				"Obsolete, Unsupported Fibre Channel Adapter"};
2440 		break;
2441 	case PCI_DEVICE_ID_CENTAUR:
2442 		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2443 			m = (typeof(m)){"LP9002", "PCI", ""};
2444 		else
2445 			m = (typeof(m)){"LP9000", "PCI", ""};
2446 		m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2447 		break;
2448 	case PCI_DEVICE_ID_RFLY:
2449 		m = (typeof(m)){"LP952", "PCI",
2450 				"Obsolete, Unsupported Fibre Channel Adapter"};
2451 		break;
2452 	case PCI_DEVICE_ID_PEGASUS:
2453 		m = (typeof(m)){"LP9802", "PCI-X",
2454 				"Obsolete, Unsupported Fibre Channel Adapter"};
2455 		break;
2456 	case PCI_DEVICE_ID_THOR:
2457 		m = (typeof(m)){"LP10000", "PCI-X",
2458 				"Obsolete, Unsupported Fibre Channel Adapter"};
2459 		break;
2460 	case PCI_DEVICE_ID_VIPER:
2461 		m = (typeof(m)){"LPX1000",  "PCI-X",
2462 				"Obsolete, Unsupported Fibre Channel Adapter"};
2463 		break;
2464 	case PCI_DEVICE_ID_PFLY:
2465 		m = (typeof(m)){"LP982", "PCI-X",
2466 				"Obsolete, Unsupported Fibre Channel Adapter"};
2467 		break;
2468 	case PCI_DEVICE_ID_TFLY:
2469 		m = (typeof(m)){"LP1050", "PCI-X",
2470 				"Obsolete, Unsupported Fibre Channel Adapter"};
2471 		break;
2472 	case PCI_DEVICE_ID_HELIOS:
2473 		m = (typeof(m)){"LP11000", "PCI-X2",
2474 				"Obsolete, Unsupported Fibre Channel Adapter"};
2475 		break;
2476 	case PCI_DEVICE_ID_HELIOS_SCSP:
2477 		m = (typeof(m)){"LP11000-SP", "PCI-X2",
2478 				"Obsolete, Unsupported Fibre Channel Adapter"};
2479 		break;
2480 	case PCI_DEVICE_ID_HELIOS_DCSP:
2481 		m = (typeof(m)){"LP11002-SP",  "PCI-X2",
2482 				"Obsolete, Unsupported Fibre Channel Adapter"};
2483 		break;
2484 	case PCI_DEVICE_ID_NEPTUNE:
2485 		m = (typeof(m)){"LPe1000", "PCIe",
2486 				"Obsolete, Unsupported Fibre Channel Adapter"};
2487 		break;
2488 	case PCI_DEVICE_ID_NEPTUNE_SCSP:
2489 		m = (typeof(m)){"LPe1000-SP", "PCIe",
2490 				"Obsolete, Unsupported Fibre Channel Adapter"};
2491 		break;
2492 	case PCI_DEVICE_ID_NEPTUNE_DCSP:
2493 		m = (typeof(m)){"LPe1002-SP", "PCIe",
2494 				"Obsolete, Unsupported Fibre Channel Adapter"};
2495 		break;
2496 	case PCI_DEVICE_ID_BMID:
2497 		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2498 		break;
2499 	case PCI_DEVICE_ID_BSMB:
2500 		m = (typeof(m)){"LP111", "PCI-X2",
2501 				"Obsolete, Unsupported Fibre Channel Adapter"};
2502 		break;
2503 	case PCI_DEVICE_ID_ZEPHYR:
2504 		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2505 		break;
2506 	case PCI_DEVICE_ID_ZEPHYR_SCSP:
2507 		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2508 		break;
2509 	case PCI_DEVICE_ID_ZEPHYR_DCSP:
2510 		m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2511 		GE = 1;
2512 		break;
2513 	case PCI_DEVICE_ID_ZMID:
2514 		m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2515 		break;
2516 	case PCI_DEVICE_ID_ZSMB:
2517 		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2518 		break;
2519 	case PCI_DEVICE_ID_LP101:
2520 		m = (typeof(m)){"LP101", "PCI-X",
2521 				"Obsolete, Unsupported Fibre Channel Adapter"};
2522 		break;
2523 	case PCI_DEVICE_ID_LP10000S:
2524 		m = (typeof(m)){"LP10000-S", "PCI",
2525 				"Obsolete, Unsupported Fibre Channel Adapter"};
2526 		break;
2527 	case PCI_DEVICE_ID_LP11000S:
2528 		m = (typeof(m)){"LP11000-S", "PCI-X2",
2529 				"Obsolete, Unsupported Fibre Channel Adapter"};
2530 		break;
2531 	case PCI_DEVICE_ID_LPE11000S:
2532 		m = (typeof(m)){"LPe11000-S", "PCIe",
2533 				"Obsolete, Unsupported Fibre Channel Adapter"};
2534 		break;
2535 	case PCI_DEVICE_ID_SAT:
2536 		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2537 		break;
2538 	case PCI_DEVICE_ID_SAT_MID:
2539 		m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2540 		break;
2541 	case PCI_DEVICE_ID_SAT_SMB:
2542 		m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2543 		break;
2544 	case PCI_DEVICE_ID_SAT_DCSP:
2545 		m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2546 		break;
2547 	case PCI_DEVICE_ID_SAT_SCSP:
2548 		m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2549 		break;
2550 	case PCI_DEVICE_ID_SAT_S:
2551 		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2552 		break;
2553 	case PCI_DEVICE_ID_HORNET:
2554 		m = (typeof(m)){"LP21000", "PCIe",
2555 				"Obsolete, Unsupported FCoE Adapter"};
2556 		GE = 1;
2557 		break;
2558 	case PCI_DEVICE_ID_PROTEUS_VF:
2559 		m = (typeof(m)){"LPev12000", "PCIe IOV",
2560 				"Obsolete, Unsupported Fibre Channel Adapter"};
2561 		break;
2562 	case PCI_DEVICE_ID_PROTEUS_PF:
2563 		m = (typeof(m)){"LPev12000", "PCIe IOV",
2564 				"Obsolete, Unsupported Fibre Channel Adapter"};
2565 		break;
2566 	case PCI_DEVICE_ID_PROTEUS_S:
2567 		m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2568 				"Obsolete, Unsupported Fibre Channel Adapter"};
2569 		break;
2570 	case PCI_DEVICE_ID_TIGERSHARK:
2571 		oneConnect = 1;
2572 		m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2573 		break;
2574 	case PCI_DEVICE_ID_TOMCAT:
2575 		oneConnect = 1;
2576 		m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2577 		break;
2578 	case PCI_DEVICE_ID_FALCON:
2579 		m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2580 				"EmulexSecure Fibre"};
2581 		break;
2582 	case PCI_DEVICE_ID_BALIUS:
2583 		m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2584 				"Obsolete, Unsupported Fibre Channel Adapter"};
2585 		break;
2586 	case PCI_DEVICE_ID_LANCER_FC:
2587 		m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2588 		break;
2589 	case PCI_DEVICE_ID_LANCER_FC_VF:
2590 		m = (typeof(m)){"LPe16000", "PCIe",
2591 				"Obsolete, Unsupported Fibre Channel Adapter"};
2592 		break;
2593 	case PCI_DEVICE_ID_LANCER_FCOE:
2594 		oneConnect = 1;
2595 		m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2596 		break;
2597 	case PCI_DEVICE_ID_LANCER_FCOE_VF:
2598 		oneConnect = 1;
2599 		m = (typeof(m)){"OCe15100", "PCIe",
2600 				"Obsolete, Unsupported FCoE"};
2601 		break;
2602 	case PCI_DEVICE_ID_LANCER_G6_FC:
2603 		m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2604 		break;
2605 	case PCI_DEVICE_ID_LANCER_G7_FC:
2606 		m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2607 		break;
2608 	case PCI_DEVICE_ID_LANCER_G7P_FC:
2609 		m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2610 		break;
2611 	case PCI_DEVICE_ID_SKYHAWK:
2612 	case PCI_DEVICE_ID_SKYHAWK_VF:
2613 		oneConnect = 1;
2614 		m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2615 		break;
2616 	default:
2617 		m = (typeof(m)){"Unknown", "", ""};
2618 		break;
2619 	}
2620 
2621 	if (mdp && mdp[0] == '\0')
2622 		snprintf(mdp, 79,"%s", m.name);
2623 	/*
2624 	 * oneConnect hba requires special processing, they are all initiators
2625 	 * and we put the port number on the end
2626 	 */
2627 	if (descp && descp[0] == '\0') {
2628 		if (oneConnect)
2629 			snprintf(descp, 255,
2630 				"Emulex OneConnect %s, %s Initiator %s",
2631 				m.name, m.function,
2632 				phba->Port);
2633 		else if (max_speed == 0)
2634 			snprintf(descp, 255,
2635 				"Emulex %s %s %s",
2636 				m.name, m.bus, m.function);
2637 		else
2638 			snprintf(descp, 255,
2639 				"Emulex %s %d%s %s %s",
2640 				m.name, max_speed, (GE) ? "GE" : "Gb",
2641 				m.bus, m.function);
2642 	}
2643 }
2644 
2645 /**
2646  * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2647  * @phba: pointer to lpfc hba data structure.
2648  * @pring: pointer to a IOCB ring.
2649  * @cnt: the number of IOCBs to be posted to the IOCB ring.
2650  *
2651  * This routine posts a given number of IOCBs with the associated DMA buffer
2652  * descriptors specified by the cnt argument to the given IOCB ring.
2653  *
2654  * Return codes
2655  *   The number of IOCBs NOT able to be posted to the IOCB ring.
2656  **/
2657 int
2658 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2659 {
2660 	IOCB_t *icmd;
2661 	struct lpfc_iocbq *iocb;
2662 	struct lpfc_dmabuf *mp1, *mp2;
2663 
2664 	cnt += pring->missbufcnt;
2665 
2666 	/* While there are buffers to post */
2667 	while (cnt > 0) {
2668 		/* Allocate buffer for  command iocb */
2669 		iocb = lpfc_sli_get_iocbq(phba);
2670 		if (iocb == NULL) {
2671 			pring->missbufcnt = cnt;
2672 			return cnt;
2673 		}
2674 		icmd = &iocb->iocb;
2675 
2676 		/* 2 buffers can be posted per command */
2677 		/* Allocate buffer to post */
2678 		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2679 		if (mp1)
2680 		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2681 		if (!mp1 || !mp1->virt) {
2682 			kfree(mp1);
2683 			lpfc_sli_release_iocbq(phba, iocb);
2684 			pring->missbufcnt = cnt;
2685 			return cnt;
2686 		}
2687 
2688 		INIT_LIST_HEAD(&mp1->list);
2689 		/* Allocate buffer to post */
2690 		if (cnt > 1) {
2691 			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2692 			if (mp2)
2693 				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2694 							    &mp2->phys);
2695 			if (!mp2 || !mp2->virt) {
2696 				kfree(mp2);
2697 				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2698 				kfree(mp1);
2699 				lpfc_sli_release_iocbq(phba, iocb);
2700 				pring->missbufcnt = cnt;
2701 				return cnt;
2702 			}
2703 
2704 			INIT_LIST_HEAD(&mp2->list);
2705 		} else {
2706 			mp2 = NULL;
2707 		}
2708 
2709 		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2710 		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2711 		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2712 		icmd->ulpBdeCount = 1;
2713 		cnt--;
2714 		if (mp2) {
2715 			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2716 			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2717 			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2718 			cnt--;
2719 			icmd->ulpBdeCount = 2;
2720 		}
2721 
2722 		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2723 		icmd->ulpLe = 1;
2724 
2725 		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2726 		    IOCB_ERROR) {
2727 			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2728 			kfree(mp1);
2729 			cnt++;
2730 			if (mp2) {
2731 				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2732 				kfree(mp2);
2733 				cnt++;
2734 			}
2735 			lpfc_sli_release_iocbq(phba, iocb);
2736 			pring->missbufcnt = cnt;
2737 			return cnt;
2738 		}
2739 		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2740 		if (mp2)
2741 			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2742 	}
2743 	pring->missbufcnt = 0;
2744 	return 0;
2745 }
2746 
2747 /**
2748  * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2749  * @phba: pointer to lpfc hba data structure.
2750  *
2751  * This routine posts initial receive IOCB buffers to the ELS ring. The
2752  * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2753  * set to 64 IOCBs. SLI3 only.
2754  *
2755  * Return codes
2756  *   0 - success (currently always success)
2757  **/
2758 static int
2759 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2760 {
2761 	struct lpfc_sli *psli = &phba->sli;
2762 
2763 	/* Ring 0, ELS / CT buffers */
2764 	lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2765 	/* Ring 2 - FCP no buffers needed */
2766 
2767 	return 0;
2768 }
2769 
2770 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2771 
2772 /**
2773  * lpfc_sha_init - Set up initial array of hash table entries
2774  * @HashResultPointer: pointer to an array as hash table.
2775  *
2776  * This routine sets up the initial values to the array of hash table entries
2777  * for the LC HBAs.
2778  **/
2779 static void
2780 lpfc_sha_init(uint32_t * HashResultPointer)
2781 {
2782 	HashResultPointer[0] = 0x67452301;
2783 	HashResultPointer[1] = 0xEFCDAB89;
2784 	HashResultPointer[2] = 0x98BADCFE;
2785 	HashResultPointer[3] = 0x10325476;
2786 	HashResultPointer[4] = 0xC3D2E1F0;
2787 }
2788 
2789 /**
2790  * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2791  * @HashResultPointer: pointer to an initial/result hash table.
2792  * @HashWorkingPointer: pointer to an working hash table.
2793  *
2794  * This routine iterates an initial hash table pointed by @HashResultPointer
2795  * with the values from the working hash table pointeed by @HashWorkingPointer.
2796  * The results are putting back to the initial hash table, returned through
2797  * the @HashResultPointer as the result hash table.
2798  **/
2799 static void
2800 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2801 {
2802 	int t;
2803 	uint32_t TEMP;
2804 	uint32_t A, B, C, D, E;
2805 	t = 16;
2806 	do {
2807 		HashWorkingPointer[t] =
2808 		    S(1,
2809 		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2810 								     8] ^
2811 		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2812 	} while (++t <= 79);
2813 	t = 0;
2814 	A = HashResultPointer[0];
2815 	B = HashResultPointer[1];
2816 	C = HashResultPointer[2];
2817 	D = HashResultPointer[3];
2818 	E = HashResultPointer[4];
2819 
2820 	do {
2821 		if (t < 20) {
2822 			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2823 		} else if (t < 40) {
2824 			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2825 		} else if (t < 60) {
2826 			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2827 		} else {
2828 			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2829 		}
2830 		TEMP += S(5, A) + E + HashWorkingPointer[t];
2831 		E = D;
2832 		D = C;
2833 		C = S(30, B);
2834 		B = A;
2835 		A = TEMP;
2836 	} while (++t <= 79);
2837 
2838 	HashResultPointer[0] += A;
2839 	HashResultPointer[1] += B;
2840 	HashResultPointer[2] += C;
2841 	HashResultPointer[3] += D;
2842 	HashResultPointer[4] += E;
2843 
2844 }
2845 
2846 /**
2847  * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2848  * @RandomChallenge: pointer to the entry of host challenge random number array.
2849  * @HashWorking: pointer to the entry of the working hash array.
2850  *
2851  * This routine calculates the working hash array referred by @HashWorking
2852  * from the challenge random numbers associated with the host, referred by
2853  * @RandomChallenge. The result is put into the entry of the working hash
2854  * array and returned by reference through @HashWorking.
2855  **/
2856 static void
2857 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2858 {
2859 	*HashWorking = (*RandomChallenge ^ *HashWorking);
2860 }
2861 
2862 /**
2863  * lpfc_hba_init - Perform special handling for LC HBA initialization
2864  * @phba: pointer to lpfc hba data structure.
2865  * @hbainit: pointer to an array of unsigned 32-bit integers.
2866  *
2867  * This routine performs the special handling for LC HBA initialization.
2868  **/
2869 void
2870 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2871 {
2872 	int t;
2873 	uint32_t *HashWorking;
2874 	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2875 
2876 	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2877 	if (!HashWorking)
2878 		return;
2879 
2880 	HashWorking[0] = HashWorking[78] = *pwwnn++;
2881 	HashWorking[1] = HashWorking[79] = *pwwnn;
2882 
2883 	for (t = 0; t < 7; t++)
2884 		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2885 
2886 	lpfc_sha_init(hbainit);
2887 	lpfc_sha_iterate(hbainit, HashWorking);
2888 	kfree(HashWorking);
2889 }
2890 
2891 /**
2892  * lpfc_cleanup - Performs vport cleanups before deleting a vport
2893  * @vport: pointer to a virtual N_Port data structure.
2894  *
2895  * This routine performs the necessary cleanups before deleting the @vport.
2896  * It invokes the discovery state machine to perform necessary state
2897  * transitions and to release the ndlps associated with the @vport. Note,
2898  * the physical port is treated as @vport 0.
2899  **/
2900 void
2901 lpfc_cleanup(struct lpfc_vport *vport)
2902 {
2903 	struct lpfc_hba   *phba = vport->phba;
2904 	struct lpfc_nodelist *ndlp, *next_ndlp;
2905 	int i = 0;
2906 
2907 	if (phba->link_state > LPFC_LINK_DOWN)
2908 		lpfc_port_link_failure(vport);
2909 
2910 	/* Clean up VMID resources */
2911 	if (lpfc_is_vmid_enabled(phba))
2912 		lpfc_vmid_vport_cleanup(vport);
2913 
2914 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2915 		if (vport->port_type != LPFC_PHYSICAL_PORT &&
2916 		    ndlp->nlp_DID == Fabric_DID) {
2917 			/* Just free up ndlp with Fabric_DID for vports */
2918 			lpfc_nlp_put(ndlp);
2919 			continue;
2920 		}
2921 
2922 		if (ndlp->nlp_DID == Fabric_Cntl_DID &&
2923 		    ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2924 			lpfc_nlp_put(ndlp);
2925 			continue;
2926 		}
2927 
2928 		/* Fabric Ports not in UNMAPPED state are cleaned up in the
2929 		 * DEVICE_RM event.
2930 		 */
2931 		if (ndlp->nlp_type & NLP_FABRIC &&
2932 		    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
2933 			lpfc_disc_state_machine(vport, ndlp, NULL,
2934 					NLP_EVT_DEVICE_RECOVERY);
2935 
2936 		if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
2937 			lpfc_disc_state_machine(vport, ndlp, NULL,
2938 					NLP_EVT_DEVICE_RM);
2939 	}
2940 
2941 	/* At this point, ALL ndlp's should be gone
2942 	 * because of the previous NLP_EVT_DEVICE_RM.
2943 	 * Lets wait for this to happen, if needed.
2944 	 */
2945 	while (!list_empty(&vport->fc_nodes)) {
2946 		if (i++ > 3000) {
2947 			lpfc_printf_vlog(vport, KERN_ERR,
2948 					 LOG_TRACE_EVENT,
2949 				"0233 Nodelist not empty\n");
2950 			list_for_each_entry_safe(ndlp, next_ndlp,
2951 						&vport->fc_nodes, nlp_listp) {
2952 				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2953 						 LOG_TRACE_EVENT,
2954 						 "0282 did:x%x ndlp:x%px "
2955 						 "refcnt:%d xflags x%x nflag x%x\n",
2956 						 ndlp->nlp_DID, (void *)ndlp,
2957 						 kref_read(&ndlp->kref),
2958 						 ndlp->fc4_xpt_flags,
2959 						 ndlp->nlp_flag);
2960 			}
2961 			break;
2962 		}
2963 
2964 		/* Wait for any activity on ndlps to settle */
2965 		msleep(10);
2966 	}
2967 	lpfc_cleanup_vports_rrqs(vport, NULL);
2968 }
2969 
2970 /**
2971  * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2972  * @vport: pointer to a virtual N_Port data structure.
2973  *
2974  * This routine stops all the timers associated with a @vport. This function
2975  * is invoked before disabling or deleting a @vport. Note that the physical
2976  * port is treated as @vport 0.
2977  **/
2978 void
2979 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2980 {
2981 	del_timer_sync(&vport->els_tmofunc);
2982 	del_timer_sync(&vport->delayed_disc_tmo);
2983 	lpfc_can_disctmo(vport);
2984 	return;
2985 }
2986 
2987 /**
2988  * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2989  * @phba: pointer to lpfc hba data structure.
2990  *
2991  * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2992  * caller of this routine should already hold the host lock.
2993  **/
2994 void
2995 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2996 {
2997 	/* Clear pending FCF rediscovery wait flag */
2998 	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2999 
3000 	/* Now, try to stop the timer */
3001 	del_timer(&phba->fcf.redisc_wait);
3002 }
3003 
3004 /**
3005  * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3006  * @phba: pointer to lpfc hba data structure.
3007  *
3008  * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
3009  * checks whether the FCF rediscovery wait timer is pending with the host
3010  * lock held before proceeding with disabling the timer and clearing the
3011  * wait timer pendig flag.
3012  **/
3013 void
3014 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3015 {
3016 	spin_lock_irq(&phba->hbalock);
3017 	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3018 		/* FCF rediscovery timer already fired or stopped */
3019 		spin_unlock_irq(&phba->hbalock);
3020 		return;
3021 	}
3022 	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3023 	/* Clear failover in progress flags */
3024 	phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3025 	spin_unlock_irq(&phba->hbalock);
3026 }
3027 
3028 /**
3029  * lpfc_cmf_stop - Stop CMF processing
3030  * @phba: pointer to lpfc hba data structure.
3031  *
3032  * This is called when the link goes down or if CMF mode is turned OFF.
3033  * It is also called when going offline or unloaded just before the
3034  * congestion info buffer is unregistered.
3035  **/
3036 void
3037 lpfc_cmf_stop(struct lpfc_hba *phba)
3038 {
3039 	int cpu;
3040 	struct lpfc_cgn_stat *cgs;
3041 
3042 	/* We only do something if CMF is enabled */
3043 	if (!phba->sli4_hba.pc_sli4_params.cmf)
3044 		return;
3045 
3046 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3047 			"6221 Stop CMF / Cancel Timer\n");
3048 
3049 	/* Cancel the CMF timer */
3050 	hrtimer_cancel(&phba->cmf_timer);
3051 
3052 	/* Zero CMF counters */
3053 	atomic_set(&phba->cmf_busy, 0);
3054 	for_each_present_cpu(cpu) {
3055 		cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3056 		atomic64_set(&cgs->total_bytes, 0);
3057 		atomic64_set(&cgs->rcv_bytes, 0);
3058 		atomic_set(&cgs->rx_io_cnt, 0);
3059 		atomic64_set(&cgs->rx_latency, 0);
3060 	}
3061 	atomic_set(&phba->cmf_bw_wait, 0);
3062 
3063 	/* Resume any blocked IO - Queue unblock on workqueue */
3064 	queue_work(phba->wq, &phba->unblock_request_work);
3065 }
3066 
3067 static inline uint64_t
3068 lpfc_get_max_line_rate(struct lpfc_hba *phba)
3069 {
3070 	uint64_t rate = lpfc_sli_port_speed_get(phba);
3071 
3072 	return ((((unsigned long)rate) * 1024 * 1024) / 10);
3073 }
3074 
3075 void
3076 lpfc_cmf_signal_init(struct lpfc_hba *phba)
3077 {
3078 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3079 			"6223 Signal CMF init\n");
3080 
3081 	/* Use the new fc_linkspeed to recalculate */
3082 	phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3083 	phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3084 	phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3085 					    phba->cmf_interval_rate, 1000);
3086 	phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3087 
3088 	/* This is a signal to firmware to sync up CMF BW with link speed */
3089 	lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3090 }
3091 
3092 /**
3093  * lpfc_cmf_start - Start CMF processing
3094  * @phba: pointer to lpfc hba data structure.
3095  *
3096  * This is called when the link comes up or if CMF mode is turned OFF
3097  * to Monitor or Managed.
3098  **/
3099 void
3100 lpfc_cmf_start(struct lpfc_hba *phba)
3101 {
3102 	struct lpfc_cgn_stat *cgs;
3103 	int cpu;
3104 
3105 	/* We only do something if CMF is enabled */
3106 	if (!phba->sli4_hba.pc_sli4_params.cmf ||
3107 	    phba->cmf_active_mode == LPFC_CFG_OFF)
3108 		return;
3109 
3110 	/* Reinitialize congestion buffer info */
3111 	lpfc_init_congestion_buf(phba);
3112 
3113 	atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3114 	atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3115 	atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3116 	atomic_set(&phba->cgn_sync_warn_cnt, 0);
3117 
3118 	atomic_set(&phba->cmf_busy, 0);
3119 	for_each_present_cpu(cpu) {
3120 		cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3121 		atomic64_set(&cgs->total_bytes, 0);
3122 		atomic64_set(&cgs->rcv_bytes, 0);
3123 		atomic_set(&cgs->rx_io_cnt, 0);
3124 		atomic64_set(&cgs->rx_latency, 0);
3125 	}
3126 	phba->cmf_latency.tv_sec = 0;
3127 	phba->cmf_latency.tv_nsec = 0;
3128 
3129 	lpfc_cmf_signal_init(phba);
3130 
3131 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3132 			"6222 Start CMF / Timer\n");
3133 
3134 	phba->cmf_timer_cnt = 0;
3135 	hrtimer_start(&phba->cmf_timer,
3136 		      ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
3137 		      HRTIMER_MODE_REL);
3138 	/* Setup for latency check in IO cmpl routines */
3139 	ktime_get_real_ts64(&phba->cmf_latency);
3140 
3141 	atomic_set(&phba->cmf_bw_wait, 0);
3142 	atomic_set(&phba->cmf_stop_io, 0);
3143 }
3144 
3145 /**
3146  * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3147  * @phba: pointer to lpfc hba data structure.
3148  *
3149  * This routine stops all the timers associated with a HBA. This function is
3150  * invoked before either putting a HBA offline or unloading the driver.
3151  **/
3152 void
3153 lpfc_stop_hba_timers(struct lpfc_hba *phba)
3154 {
3155 	if (phba->pport)
3156 		lpfc_stop_vport_timers(phba->pport);
3157 	cancel_delayed_work_sync(&phba->eq_delay_work);
3158 	cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3159 	del_timer_sync(&phba->sli.mbox_tmo);
3160 	del_timer_sync(&phba->fabric_block_timer);
3161 	del_timer_sync(&phba->eratt_poll);
3162 	del_timer_sync(&phba->hb_tmofunc);
3163 	if (phba->sli_rev == LPFC_SLI_REV4) {
3164 		del_timer_sync(&phba->rrq_tmr);
3165 		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3166 	}
3167 	phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3168 
3169 	switch (phba->pci_dev_grp) {
3170 	case LPFC_PCI_DEV_LP:
3171 		/* Stop any LightPulse device specific driver timers */
3172 		del_timer_sync(&phba->fcp_poll_timer);
3173 		break;
3174 	case LPFC_PCI_DEV_OC:
3175 		/* Stop any OneConnect device specific driver timers */
3176 		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3177 		break;
3178 	default:
3179 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3180 				"0297 Invalid device group (x%x)\n",
3181 				phba->pci_dev_grp);
3182 		break;
3183 	}
3184 	return;
3185 }
3186 
3187 /**
3188  * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3189  * @phba: pointer to lpfc hba data structure.
3190  * @mbx_action: flag for mailbox no wait action.
3191  *
3192  * This routine marks a HBA's management interface as blocked. Once the HBA's
3193  * management interface is marked as blocked, all the user space access to
3194  * the HBA, whether they are from sysfs interface or libdfc interface will
3195  * all be blocked. The HBA is set to block the management interface when the
3196  * driver prepares the HBA interface for online or offline.
3197  **/
3198 static void
3199 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3200 {
3201 	unsigned long iflag;
3202 	uint8_t actcmd = MBX_HEARTBEAT;
3203 	unsigned long timeout;
3204 
3205 	spin_lock_irqsave(&phba->hbalock, iflag);
3206 	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3207 	spin_unlock_irqrestore(&phba->hbalock, iflag);
3208 	if (mbx_action == LPFC_MBX_NO_WAIT)
3209 		return;
3210 	timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3211 	spin_lock_irqsave(&phba->hbalock, iflag);
3212 	if (phba->sli.mbox_active) {
3213 		actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3214 		/* Determine how long we might wait for the active mailbox
3215 		 * command to be gracefully completed by firmware.
3216 		 */
3217 		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3218 				phba->sli.mbox_active) * 1000) + jiffies;
3219 	}
3220 	spin_unlock_irqrestore(&phba->hbalock, iflag);
3221 
3222 	/* Wait for the outstnading mailbox command to complete */
3223 	while (phba->sli.mbox_active) {
3224 		/* Check active mailbox complete status every 2ms */
3225 		msleep(2);
3226 		if (time_after(jiffies, timeout)) {
3227 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3228 					"2813 Mgmt IO is Blocked %x "
3229 					"- mbox cmd %x still active\n",
3230 					phba->sli.sli_flag, actcmd);
3231 			break;
3232 		}
3233 	}
3234 }
3235 
3236 /**
3237  * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3238  * @phba: pointer to lpfc hba data structure.
3239  *
3240  * Allocate RPIs for all active remote nodes. This is needed whenever
3241  * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3242  * is to fixup the temporary rpi assignments.
3243  **/
3244 void
3245 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3246 {
3247 	struct lpfc_nodelist  *ndlp, *next_ndlp;
3248 	struct lpfc_vport **vports;
3249 	int i, rpi;
3250 
3251 	if (phba->sli_rev != LPFC_SLI_REV4)
3252 		return;
3253 
3254 	vports = lpfc_create_vport_work_array(phba);
3255 	if (vports == NULL)
3256 		return;
3257 
3258 	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3259 		if (vports[i]->load_flag & FC_UNLOADING)
3260 			continue;
3261 
3262 		list_for_each_entry_safe(ndlp, next_ndlp,
3263 					 &vports[i]->fc_nodes,
3264 					 nlp_listp) {
3265 			rpi = lpfc_sli4_alloc_rpi(phba);
3266 			if (rpi == LPFC_RPI_ALLOC_ERROR) {
3267 				/* TODO print log? */
3268 				continue;
3269 			}
3270 			ndlp->nlp_rpi = rpi;
3271 			lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3272 					 LOG_NODE | LOG_DISCOVERY,
3273 					 "0009 Assign RPI x%x to ndlp x%px "
3274 					 "DID:x%06x flg:x%x\n",
3275 					 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3276 					 ndlp->nlp_flag);
3277 		}
3278 	}
3279 	lpfc_destroy_vport_work_array(phba, vports);
3280 }
3281 
3282 /**
3283  * lpfc_create_expedite_pool - create expedite pool
3284  * @phba: pointer to lpfc hba data structure.
3285  *
3286  * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3287  * to expedite pool. Mark them as expedite.
3288  **/
3289 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3290 {
3291 	struct lpfc_sli4_hdw_queue *qp;
3292 	struct lpfc_io_buf *lpfc_ncmd;
3293 	struct lpfc_io_buf *lpfc_ncmd_next;
3294 	struct lpfc_epd_pool *epd_pool;
3295 	unsigned long iflag;
3296 
3297 	epd_pool = &phba->epd_pool;
3298 	qp = &phba->sli4_hba.hdwq[0];
3299 
3300 	spin_lock_init(&epd_pool->lock);
3301 	spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3302 	spin_lock(&epd_pool->lock);
3303 	INIT_LIST_HEAD(&epd_pool->list);
3304 	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3305 				 &qp->lpfc_io_buf_list_put, list) {
3306 		list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3307 		lpfc_ncmd->expedite = true;
3308 		qp->put_io_bufs--;
3309 		epd_pool->count++;
3310 		if (epd_pool->count >= XRI_BATCH)
3311 			break;
3312 	}
3313 	spin_unlock(&epd_pool->lock);
3314 	spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3315 }
3316 
3317 /**
3318  * lpfc_destroy_expedite_pool - destroy expedite pool
3319  * @phba: pointer to lpfc hba data structure.
3320  *
3321  * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3322  * of HWQ 0. Clear the mark.
3323  **/
3324 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3325 {
3326 	struct lpfc_sli4_hdw_queue *qp;
3327 	struct lpfc_io_buf *lpfc_ncmd;
3328 	struct lpfc_io_buf *lpfc_ncmd_next;
3329 	struct lpfc_epd_pool *epd_pool;
3330 	unsigned long iflag;
3331 
3332 	epd_pool = &phba->epd_pool;
3333 	qp = &phba->sli4_hba.hdwq[0];
3334 
3335 	spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3336 	spin_lock(&epd_pool->lock);
3337 	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3338 				 &epd_pool->list, list) {
3339 		list_move_tail(&lpfc_ncmd->list,
3340 			       &qp->lpfc_io_buf_list_put);
3341 		lpfc_ncmd->flags = false;
3342 		qp->put_io_bufs++;
3343 		epd_pool->count--;
3344 	}
3345 	spin_unlock(&epd_pool->lock);
3346 	spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3347 }
3348 
3349 /**
3350  * lpfc_create_multixri_pools - create multi-XRI pools
3351  * @phba: pointer to lpfc hba data structure.
3352  *
3353  * This routine initialize public, private per HWQ. Then, move XRIs from
3354  * lpfc_io_buf_list_put to public pool. High and low watermark are also
3355  * Initialized.
3356  **/
3357 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3358 {
3359 	u32 i, j;
3360 	u32 hwq_count;
3361 	u32 count_per_hwq;
3362 	struct lpfc_io_buf *lpfc_ncmd;
3363 	struct lpfc_io_buf *lpfc_ncmd_next;
3364 	unsigned long iflag;
3365 	struct lpfc_sli4_hdw_queue *qp;
3366 	struct lpfc_multixri_pool *multixri_pool;
3367 	struct lpfc_pbl_pool *pbl_pool;
3368 	struct lpfc_pvt_pool *pvt_pool;
3369 
3370 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3371 			"1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3372 			phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3373 			phba->sli4_hba.io_xri_cnt);
3374 
3375 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3376 		lpfc_create_expedite_pool(phba);
3377 
3378 	hwq_count = phba->cfg_hdw_queue;
3379 	count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3380 
3381 	for (i = 0; i < hwq_count; i++) {
3382 		multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3383 
3384 		if (!multixri_pool) {
3385 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3386 					"1238 Failed to allocate memory for "
3387 					"multixri_pool\n");
3388 
3389 			if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3390 				lpfc_destroy_expedite_pool(phba);
3391 
3392 			j = 0;
3393 			while (j < i) {
3394 				qp = &phba->sli4_hba.hdwq[j];
3395 				kfree(qp->p_multixri_pool);
3396 				j++;
3397 			}
3398 			phba->cfg_xri_rebalancing = 0;
3399 			return;
3400 		}
3401 
3402 		qp = &phba->sli4_hba.hdwq[i];
3403 		qp->p_multixri_pool = multixri_pool;
3404 
3405 		multixri_pool->xri_limit = count_per_hwq;
3406 		multixri_pool->rrb_next_hwqid = i;
3407 
3408 		/* Deal with public free xri pool */
3409 		pbl_pool = &multixri_pool->pbl_pool;
3410 		spin_lock_init(&pbl_pool->lock);
3411 		spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3412 		spin_lock(&pbl_pool->lock);
3413 		INIT_LIST_HEAD(&pbl_pool->list);
3414 		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3415 					 &qp->lpfc_io_buf_list_put, list) {
3416 			list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3417 			qp->put_io_bufs--;
3418 			pbl_pool->count++;
3419 		}
3420 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3421 				"1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3422 				pbl_pool->count, i);
3423 		spin_unlock(&pbl_pool->lock);
3424 		spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3425 
3426 		/* Deal with private free xri pool */
3427 		pvt_pool = &multixri_pool->pvt_pool;
3428 		pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3429 		pvt_pool->low_watermark = XRI_BATCH;
3430 		spin_lock_init(&pvt_pool->lock);
3431 		spin_lock_irqsave(&pvt_pool->lock, iflag);
3432 		INIT_LIST_HEAD(&pvt_pool->list);
3433 		pvt_pool->count = 0;
3434 		spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3435 	}
3436 }
3437 
3438 /**
3439  * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3440  * @phba: pointer to lpfc hba data structure.
3441  *
3442  * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3443  **/
3444 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3445 {
3446 	u32 i;
3447 	u32 hwq_count;
3448 	struct lpfc_io_buf *lpfc_ncmd;
3449 	struct lpfc_io_buf *lpfc_ncmd_next;
3450 	unsigned long iflag;
3451 	struct lpfc_sli4_hdw_queue *qp;
3452 	struct lpfc_multixri_pool *multixri_pool;
3453 	struct lpfc_pbl_pool *pbl_pool;
3454 	struct lpfc_pvt_pool *pvt_pool;
3455 
3456 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3457 		lpfc_destroy_expedite_pool(phba);
3458 
3459 	if (!(phba->pport->load_flag & FC_UNLOADING))
3460 		lpfc_sli_flush_io_rings(phba);
3461 
3462 	hwq_count = phba->cfg_hdw_queue;
3463 
3464 	for (i = 0; i < hwq_count; i++) {
3465 		qp = &phba->sli4_hba.hdwq[i];
3466 		multixri_pool = qp->p_multixri_pool;
3467 		if (!multixri_pool)
3468 			continue;
3469 
3470 		qp->p_multixri_pool = NULL;
3471 
3472 		spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3473 
3474 		/* Deal with public free xri pool */
3475 		pbl_pool = &multixri_pool->pbl_pool;
3476 		spin_lock(&pbl_pool->lock);
3477 
3478 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3479 				"1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3480 				pbl_pool->count, i);
3481 
3482 		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3483 					 &pbl_pool->list, list) {
3484 			list_move_tail(&lpfc_ncmd->list,
3485 				       &qp->lpfc_io_buf_list_put);
3486 			qp->put_io_bufs++;
3487 			pbl_pool->count--;
3488 		}
3489 
3490 		INIT_LIST_HEAD(&pbl_pool->list);
3491 		pbl_pool->count = 0;
3492 
3493 		spin_unlock(&pbl_pool->lock);
3494 
3495 		/* Deal with private free xri pool */
3496 		pvt_pool = &multixri_pool->pvt_pool;
3497 		spin_lock(&pvt_pool->lock);
3498 
3499 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3500 				"1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3501 				pvt_pool->count, i);
3502 
3503 		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3504 					 &pvt_pool->list, list) {
3505 			list_move_tail(&lpfc_ncmd->list,
3506 				       &qp->lpfc_io_buf_list_put);
3507 			qp->put_io_bufs++;
3508 			pvt_pool->count--;
3509 		}
3510 
3511 		INIT_LIST_HEAD(&pvt_pool->list);
3512 		pvt_pool->count = 0;
3513 
3514 		spin_unlock(&pvt_pool->lock);
3515 		spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3516 
3517 		kfree(multixri_pool);
3518 	}
3519 }
3520 
3521 /**
3522  * lpfc_online - Initialize and bring a HBA online
3523  * @phba: pointer to lpfc hba data structure.
3524  *
3525  * This routine initializes the HBA and brings a HBA online. During this
3526  * process, the management interface is blocked to prevent user space access
3527  * to the HBA interfering with the driver initialization.
3528  *
3529  * Return codes
3530  *   0 - successful
3531  *   1 - failed
3532  **/
3533 int
3534 lpfc_online(struct lpfc_hba *phba)
3535 {
3536 	struct lpfc_vport *vport;
3537 	struct lpfc_vport **vports;
3538 	int i, error = 0;
3539 	bool vpis_cleared = false;
3540 
3541 	if (!phba)
3542 		return 0;
3543 	vport = phba->pport;
3544 
3545 	if (!(vport->fc_flag & FC_OFFLINE_MODE))
3546 		return 0;
3547 
3548 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3549 			"0458 Bring Adapter online\n");
3550 
3551 	lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3552 
3553 	if (phba->sli_rev == LPFC_SLI_REV4) {
3554 		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3555 			lpfc_unblock_mgmt_io(phba);
3556 			return 1;
3557 		}
3558 		spin_lock_irq(&phba->hbalock);
3559 		if (!phba->sli4_hba.max_cfg_param.vpi_used)
3560 			vpis_cleared = true;
3561 		spin_unlock_irq(&phba->hbalock);
3562 
3563 		/* Reestablish the local initiator port.
3564 		 * The offline process destroyed the previous lport.
3565 		 */
3566 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3567 				!phba->nvmet_support) {
3568 			error = lpfc_nvme_create_localport(phba->pport);
3569 			if (error)
3570 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3571 					"6132 NVME restore reg failed "
3572 					"on nvmei error x%x\n", error);
3573 		}
3574 	} else {
3575 		lpfc_sli_queue_init(phba);
3576 		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
3577 			lpfc_unblock_mgmt_io(phba);
3578 			return 1;
3579 		}
3580 	}
3581 
3582 	vports = lpfc_create_vport_work_array(phba);
3583 	if (vports != NULL) {
3584 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3585 			struct Scsi_Host *shost;
3586 			shost = lpfc_shost_from_vport(vports[i]);
3587 			spin_lock_irq(shost->host_lock);
3588 			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3589 			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3590 				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3591 			if (phba->sli_rev == LPFC_SLI_REV4) {
3592 				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3593 				if ((vpis_cleared) &&
3594 				    (vports[i]->port_type !=
3595 					LPFC_PHYSICAL_PORT))
3596 					vports[i]->vpi = 0;
3597 			}
3598 			spin_unlock_irq(shost->host_lock);
3599 		}
3600 	}
3601 	lpfc_destroy_vport_work_array(phba, vports);
3602 
3603 	if (phba->cfg_xri_rebalancing)
3604 		lpfc_create_multixri_pools(phba);
3605 
3606 	lpfc_cpuhp_add(phba);
3607 
3608 	lpfc_unblock_mgmt_io(phba);
3609 	return 0;
3610 }
3611 
3612 /**
3613  * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3614  * @phba: pointer to lpfc hba data structure.
3615  *
3616  * This routine marks a HBA's management interface as not blocked. Once the
3617  * HBA's management interface is marked as not blocked, all the user space
3618  * access to the HBA, whether they are from sysfs interface or libdfc
3619  * interface will be allowed. The HBA is set to block the management interface
3620  * when the driver prepares the HBA interface for online or offline and then
3621  * set to unblock the management interface afterwards.
3622  **/
3623 void
3624 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3625 {
3626 	unsigned long iflag;
3627 
3628 	spin_lock_irqsave(&phba->hbalock, iflag);
3629 	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3630 	spin_unlock_irqrestore(&phba->hbalock, iflag);
3631 }
3632 
3633 /**
3634  * lpfc_offline_prep - Prepare a HBA to be brought offline
3635  * @phba: pointer to lpfc hba data structure.
3636  * @mbx_action: flag for mailbox shutdown action.
3637  *
3638  * This routine is invoked to prepare a HBA to be brought offline. It performs
3639  * unregistration login to all the nodes on all vports and flushes the mailbox
3640  * queue to make it ready to be brought offline.
3641  **/
3642 void
3643 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3644 {
3645 	struct lpfc_vport *vport = phba->pport;
3646 	struct lpfc_nodelist  *ndlp, *next_ndlp;
3647 	struct lpfc_vport **vports;
3648 	struct Scsi_Host *shost;
3649 	int i;
3650 	int offline = 0;
3651 
3652 	if (vport->fc_flag & FC_OFFLINE_MODE)
3653 		return;
3654 
3655 	lpfc_block_mgmt_io(phba, mbx_action);
3656 
3657 	lpfc_linkdown(phba);
3658 
3659 	offline =  pci_channel_offline(phba->pcidev);
3660 
3661 	/* Issue an unreg_login to all nodes on all vports */
3662 	vports = lpfc_create_vport_work_array(phba);
3663 	if (vports != NULL) {
3664 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3665 			if (vports[i]->load_flag & FC_UNLOADING)
3666 				continue;
3667 			shost = lpfc_shost_from_vport(vports[i]);
3668 			spin_lock_irq(shost->host_lock);
3669 			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3670 			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3671 			vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3672 			spin_unlock_irq(shost->host_lock);
3673 
3674 			shost =	lpfc_shost_from_vport(vports[i]);
3675 			list_for_each_entry_safe(ndlp, next_ndlp,
3676 						 &vports[i]->fc_nodes,
3677 						 nlp_listp) {
3678 
3679 				spin_lock_irq(&ndlp->lock);
3680 				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3681 				spin_unlock_irq(&ndlp->lock);
3682 
3683 				if (offline) {
3684 					spin_lock_irq(&ndlp->lock);
3685 					ndlp->nlp_flag &= ~(NLP_UNREG_INP |
3686 							    NLP_RPI_REGISTERED);
3687 					spin_unlock_irq(&ndlp->lock);
3688 				} else {
3689 					lpfc_unreg_rpi(vports[i], ndlp);
3690 				}
3691 				/*
3692 				 * Whenever an SLI4 port goes offline, free the
3693 				 * RPI. Get a new RPI when the adapter port
3694 				 * comes back online.
3695 				 */
3696 				if (phba->sli_rev == LPFC_SLI_REV4) {
3697 					lpfc_printf_vlog(vports[i], KERN_INFO,
3698 						 LOG_NODE | LOG_DISCOVERY,
3699 						 "0011 Free RPI x%x on "
3700 						 "ndlp: x%px did x%x\n",
3701 						 ndlp->nlp_rpi, ndlp,
3702 						 ndlp->nlp_DID);
3703 					lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3704 					ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3705 				}
3706 
3707 				if (ndlp->nlp_type & NLP_FABRIC) {
3708 					lpfc_disc_state_machine(vports[i], ndlp,
3709 						NULL, NLP_EVT_DEVICE_RECOVERY);
3710 
3711 					/* Don't remove the node unless the node
3712 					 * has been unregistered with the
3713 					 * transport, and we're not in recovery
3714 					 * before dev_loss_tmo triggered.
3715 					 * Otherwise, let dev_loss take care of
3716 					 * the node.
3717 					 */
3718 					if (!(ndlp->save_flags &
3719 					      NLP_IN_RECOV_POST_DEV_LOSS) &&
3720 					    !(ndlp->fc4_xpt_flags &
3721 					      (NVME_XPT_REGD | SCSI_XPT_REGD)))
3722 						lpfc_disc_state_machine
3723 							(vports[i], ndlp,
3724 							 NULL,
3725 							 NLP_EVT_DEVICE_RM);
3726 				}
3727 			}
3728 		}
3729 	}
3730 	lpfc_destroy_vport_work_array(phba, vports);
3731 
3732 	lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3733 
3734 	if (phba->wq)
3735 		flush_workqueue(phba->wq);
3736 }
3737 
3738 /**
3739  * lpfc_offline - Bring a HBA offline
3740  * @phba: pointer to lpfc hba data structure.
3741  *
3742  * This routine actually brings a HBA offline. It stops all the timers
3743  * associated with the HBA, brings down the SLI layer, and eventually
3744  * marks the HBA as in offline state for the upper layer protocol.
3745  **/
3746 void
3747 lpfc_offline(struct lpfc_hba *phba)
3748 {
3749 	struct Scsi_Host  *shost;
3750 	struct lpfc_vport **vports;
3751 	int i;
3752 
3753 	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3754 		return;
3755 
3756 	/* stop port and all timers associated with this hba */
3757 	lpfc_stop_port(phba);
3758 
3759 	/* Tear down the local and target port registrations.  The
3760 	 * nvme transports need to cleanup.
3761 	 */
3762 	lpfc_nvmet_destroy_targetport(phba);
3763 	lpfc_nvme_destroy_localport(phba->pport);
3764 
3765 	vports = lpfc_create_vport_work_array(phba);
3766 	if (vports != NULL)
3767 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3768 			lpfc_stop_vport_timers(vports[i]);
3769 	lpfc_destroy_vport_work_array(phba, vports);
3770 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3771 			"0460 Bring Adapter offline\n");
3772 	/* Bring down the SLI Layer and cleanup.  The HBA is offline
3773 	   now.  */
3774 	lpfc_sli_hba_down(phba);
3775 	spin_lock_irq(&phba->hbalock);
3776 	phba->work_ha = 0;
3777 	spin_unlock_irq(&phba->hbalock);
3778 	vports = lpfc_create_vport_work_array(phba);
3779 	if (vports != NULL)
3780 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3781 			shost = lpfc_shost_from_vport(vports[i]);
3782 			spin_lock_irq(shost->host_lock);
3783 			vports[i]->work_port_events = 0;
3784 			vports[i]->fc_flag |= FC_OFFLINE_MODE;
3785 			spin_unlock_irq(shost->host_lock);
3786 		}
3787 	lpfc_destroy_vport_work_array(phba, vports);
3788 	/* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3789 	 * in hba_unset
3790 	 */
3791 	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3792 		__lpfc_cpuhp_remove(phba);
3793 
3794 	if (phba->cfg_xri_rebalancing)
3795 		lpfc_destroy_multixri_pools(phba);
3796 }
3797 
3798 /**
3799  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3800  * @phba: pointer to lpfc hba data structure.
3801  *
3802  * This routine is to free all the SCSI buffers and IOCBs from the driver
3803  * list back to kernel. It is called from lpfc_pci_remove_one to free
3804  * the internal resources before the device is removed from the system.
3805  **/
3806 static void
3807 lpfc_scsi_free(struct lpfc_hba *phba)
3808 {
3809 	struct lpfc_io_buf *sb, *sb_next;
3810 
3811 	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3812 		return;
3813 
3814 	spin_lock_irq(&phba->hbalock);
3815 
3816 	/* Release all the lpfc_scsi_bufs maintained by this host. */
3817 
3818 	spin_lock(&phba->scsi_buf_list_put_lock);
3819 	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3820 				 list) {
3821 		list_del(&sb->list);
3822 		dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3823 			      sb->dma_handle);
3824 		kfree(sb);
3825 		phba->total_scsi_bufs--;
3826 	}
3827 	spin_unlock(&phba->scsi_buf_list_put_lock);
3828 
3829 	spin_lock(&phba->scsi_buf_list_get_lock);
3830 	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3831 				 list) {
3832 		list_del(&sb->list);
3833 		dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3834 			      sb->dma_handle);
3835 		kfree(sb);
3836 		phba->total_scsi_bufs--;
3837 	}
3838 	spin_unlock(&phba->scsi_buf_list_get_lock);
3839 	spin_unlock_irq(&phba->hbalock);
3840 }
3841 
3842 /**
3843  * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3844  * @phba: pointer to lpfc hba data structure.
3845  *
3846  * This routine is to free all the IO buffers and IOCBs from the driver
3847  * list back to kernel. It is called from lpfc_pci_remove_one to free
3848  * the internal resources before the device is removed from the system.
3849  **/
3850 void
3851 lpfc_io_free(struct lpfc_hba *phba)
3852 {
3853 	struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3854 	struct lpfc_sli4_hdw_queue *qp;
3855 	int idx;
3856 
3857 	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3858 		qp = &phba->sli4_hba.hdwq[idx];
3859 		/* Release all the lpfc_nvme_bufs maintained by this host. */
3860 		spin_lock(&qp->io_buf_list_put_lock);
3861 		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3862 					 &qp->lpfc_io_buf_list_put,
3863 					 list) {
3864 			list_del(&lpfc_ncmd->list);
3865 			qp->put_io_bufs--;
3866 			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3867 				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3868 			if (phba->cfg_xpsgl && !phba->nvmet_support)
3869 				lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3870 			lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3871 			kfree(lpfc_ncmd);
3872 			qp->total_io_bufs--;
3873 		}
3874 		spin_unlock(&qp->io_buf_list_put_lock);
3875 
3876 		spin_lock(&qp->io_buf_list_get_lock);
3877 		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3878 					 &qp->lpfc_io_buf_list_get,
3879 					 list) {
3880 			list_del(&lpfc_ncmd->list);
3881 			qp->get_io_bufs--;
3882 			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3883 				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3884 			if (phba->cfg_xpsgl && !phba->nvmet_support)
3885 				lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3886 			lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3887 			kfree(lpfc_ncmd);
3888 			qp->total_io_bufs--;
3889 		}
3890 		spin_unlock(&qp->io_buf_list_get_lock);
3891 	}
3892 }
3893 
3894 /**
3895  * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
3896  * @phba: pointer to lpfc hba data structure.
3897  *
3898  * This routine first calculates the sizes of the current els and allocated
3899  * scsi sgl lists, and then goes through all sgls to updates the physical
3900  * XRIs assigned due to port function reset. During port initialization, the
3901  * current els and allocated scsi sgl lists are 0s.
3902  *
3903  * Return codes
3904  *   0 - successful (for now, it always returns 0)
3905  **/
3906 int
3907 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3908 {
3909 	struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3910 	uint16_t i, lxri, xri_cnt, els_xri_cnt;
3911 	LIST_HEAD(els_sgl_list);
3912 	int rc;
3913 
3914 	/*
3915 	 * update on pci function's els xri-sgl list
3916 	 */
3917 	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3918 
3919 	if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3920 		/* els xri-sgl expanded */
3921 		xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3922 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3923 				"3157 ELS xri-sgl count increased from "
3924 				"%d to %d\n", phba->sli4_hba.els_xri_cnt,
3925 				els_xri_cnt);
3926 		/* allocate the additional els sgls */
3927 		for (i = 0; i < xri_cnt; i++) {
3928 			sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3929 					     GFP_KERNEL);
3930 			if (sglq_entry == NULL) {
3931 				lpfc_printf_log(phba, KERN_ERR,
3932 						LOG_TRACE_EVENT,
3933 						"2562 Failure to allocate an "
3934 						"ELS sgl entry:%d\n", i);
3935 				rc = -ENOMEM;
3936 				goto out_free_mem;
3937 			}
3938 			sglq_entry->buff_type = GEN_BUFF_TYPE;
3939 			sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3940 							   &sglq_entry->phys);
3941 			if (sglq_entry->virt == NULL) {
3942 				kfree(sglq_entry);
3943 				lpfc_printf_log(phba, KERN_ERR,
3944 						LOG_TRACE_EVENT,
3945 						"2563 Failure to allocate an "
3946 						"ELS mbuf:%d\n", i);
3947 				rc = -ENOMEM;
3948 				goto out_free_mem;
3949 			}
3950 			sglq_entry->sgl = sglq_entry->virt;
3951 			memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3952 			sglq_entry->state = SGL_FREED;
3953 			list_add_tail(&sglq_entry->list, &els_sgl_list);
3954 		}
3955 		spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3956 		list_splice_init(&els_sgl_list,
3957 				 &phba->sli4_hba.lpfc_els_sgl_list);
3958 		spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3959 	} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3960 		/* els xri-sgl shrinked */
3961 		xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3962 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3963 				"3158 ELS xri-sgl count decreased from "
3964 				"%d to %d\n", phba->sli4_hba.els_xri_cnt,
3965 				els_xri_cnt);
3966 		spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3967 		list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3968 				 &els_sgl_list);
3969 		/* release extra els sgls from list */
3970 		for (i = 0; i < xri_cnt; i++) {
3971 			list_remove_head(&els_sgl_list,
3972 					 sglq_entry, struct lpfc_sglq, list);
3973 			if (sglq_entry) {
3974 				__lpfc_mbuf_free(phba, sglq_entry->virt,
3975 						 sglq_entry->phys);
3976 				kfree(sglq_entry);
3977 			}
3978 		}
3979 		list_splice_init(&els_sgl_list,
3980 				 &phba->sli4_hba.lpfc_els_sgl_list);
3981 		spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3982 	} else
3983 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3984 				"3163 ELS xri-sgl count unchanged: %d\n",
3985 				els_xri_cnt);
3986 	phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3987 
3988 	/* update xris to els sgls on the list */
3989 	sglq_entry = NULL;
3990 	sglq_entry_next = NULL;
3991 	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3992 				 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3993 		lxri = lpfc_sli4_next_xritag(phba);
3994 		if (lxri == NO_XRI) {
3995 			lpfc_printf_log(phba, KERN_ERR,
3996 					LOG_TRACE_EVENT,
3997 					"2400 Failed to allocate xri for "
3998 					"ELS sgl\n");
3999 			rc = -ENOMEM;
4000 			goto out_free_mem;
4001 		}
4002 		sglq_entry->sli4_lxritag = lxri;
4003 		sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4004 	}
4005 	return 0;
4006 
4007 out_free_mem:
4008 	lpfc_free_els_sgl_list(phba);
4009 	return rc;
4010 }
4011 
4012 /**
4013  * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
4014  * @phba: pointer to lpfc hba data structure.
4015  *
4016  * This routine first calculates the sizes of the current els and allocated
4017  * scsi sgl lists, and then goes through all sgls to updates the physical
4018  * XRIs assigned due to port function reset. During port initialization, the
4019  * current els and allocated scsi sgl lists are 0s.
4020  *
4021  * Return codes
4022  *   0 - successful (for now, it always returns 0)
4023  **/
4024 int
4025 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4026 {
4027 	struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4028 	uint16_t i, lxri, xri_cnt, els_xri_cnt;
4029 	uint16_t nvmet_xri_cnt;
4030 	LIST_HEAD(nvmet_sgl_list);
4031 	int rc;
4032 
4033 	/*
4034 	 * update on pci function's nvmet xri-sgl list
4035 	 */
4036 	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4037 
4038 	/* For NVMET, ALL remaining XRIs are dedicated for IO processing */
4039 	nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4040 	if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4041 		/* els xri-sgl expanded */
4042 		xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4043 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4044 				"6302 NVMET xri-sgl cnt grew from %d to %d\n",
4045 				phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4046 		/* allocate the additional nvmet sgls */
4047 		for (i = 0; i < xri_cnt; i++) {
4048 			sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4049 					     GFP_KERNEL);
4050 			if (sglq_entry == NULL) {
4051 				lpfc_printf_log(phba, KERN_ERR,
4052 						LOG_TRACE_EVENT,
4053 						"6303 Failure to allocate an "
4054 						"NVMET sgl entry:%d\n", i);
4055 				rc = -ENOMEM;
4056 				goto out_free_mem;
4057 			}
4058 			sglq_entry->buff_type = NVMET_BUFF_TYPE;
4059 			sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4060 							   &sglq_entry->phys);
4061 			if (sglq_entry->virt == NULL) {
4062 				kfree(sglq_entry);
4063 				lpfc_printf_log(phba, KERN_ERR,
4064 						LOG_TRACE_EVENT,
4065 						"6304 Failure to allocate an "
4066 						"NVMET buf:%d\n", i);
4067 				rc = -ENOMEM;
4068 				goto out_free_mem;
4069 			}
4070 			sglq_entry->sgl = sglq_entry->virt;
4071 			memset(sglq_entry->sgl, 0,
4072 			       phba->cfg_sg_dma_buf_size);
4073 			sglq_entry->state = SGL_FREED;
4074 			list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4075 		}
4076 		spin_lock_irq(&phba->hbalock);
4077 		spin_lock(&phba->sli4_hba.sgl_list_lock);
4078 		list_splice_init(&nvmet_sgl_list,
4079 				 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4080 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
4081 		spin_unlock_irq(&phba->hbalock);
4082 	} else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4083 		/* nvmet xri-sgl shrunk */
4084 		xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4085 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4086 				"6305 NVMET xri-sgl count decreased from "
4087 				"%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4088 				nvmet_xri_cnt);
4089 		spin_lock_irq(&phba->hbalock);
4090 		spin_lock(&phba->sli4_hba.sgl_list_lock);
4091 		list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4092 				 &nvmet_sgl_list);
4093 		/* release extra nvmet sgls from list */
4094 		for (i = 0; i < xri_cnt; i++) {
4095 			list_remove_head(&nvmet_sgl_list,
4096 					 sglq_entry, struct lpfc_sglq, list);
4097 			if (sglq_entry) {
4098 				lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4099 						    sglq_entry->phys);
4100 				kfree(sglq_entry);
4101 			}
4102 		}
4103 		list_splice_init(&nvmet_sgl_list,
4104 				 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4105 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
4106 		spin_unlock_irq(&phba->hbalock);
4107 	} else
4108 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4109 				"6306 NVMET xri-sgl count unchanged: %d\n",
4110 				nvmet_xri_cnt);
4111 	phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4112 
4113 	/* update xris to nvmet sgls on the list */
4114 	sglq_entry = NULL;
4115 	sglq_entry_next = NULL;
4116 	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4117 				 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4118 		lxri = lpfc_sli4_next_xritag(phba);
4119 		if (lxri == NO_XRI) {
4120 			lpfc_printf_log(phba, KERN_ERR,
4121 					LOG_TRACE_EVENT,
4122 					"6307 Failed to allocate xri for "
4123 					"NVMET sgl\n");
4124 			rc = -ENOMEM;
4125 			goto out_free_mem;
4126 		}
4127 		sglq_entry->sli4_lxritag = lxri;
4128 		sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4129 	}
4130 	return 0;
4131 
4132 out_free_mem:
4133 	lpfc_free_nvmet_sgl_list(phba);
4134 	return rc;
4135 }
4136 
4137 int
4138 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4139 {
4140 	LIST_HEAD(blist);
4141 	struct lpfc_sli4_hdw_queue *qp;
4142 	struct lpfc_io_buf *lpfc_cmd;
4143 	struct lpfc_io_buf *iobufp, *prev_iobufp;
4144 	int idx, cnt, xri, inserted;
4145 
4146 	cnt = 0;
4147 	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4148 		qp = &phba->sli4_hba.hdwq[idx];
4149 		spin_lock_irq(&qp->io_buf_list_get_lock);
4150 		spin_lock(&qp->io_buf_list_put_lock);
4151 
4152 		/* Take everything off the get and put lists */
4153 		list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4154 		list_splice(&qp->lpfc_io_buf_list_put, &blist);
4155 		INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4156 		INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4157 		cnt += qp->get_io_bufs + qp->put_io_bufs;
4158 		qp->get_io_bufs = 0;
4159 		qp->put_io_bufs = 0;
4160 		qp->total_io_bufs = 0;
4161 		spin_unlock(&qp->io_buf_list_put_lock);
4162 		spin_unlock_irq(&qp->io_buf_list_get_lock);
4163 	}
4164 
4165 	/*
4166 	 * Take IO buffers off blist and put on cbuf sorted by XRI.
4167 	 * This is because POST_SGL takes a sequential range of XRIs
4168 	 * to post to the firmware.
4169 	 */
4170 	for (idx = 0; idx < cnt; idx++) {
4171 		list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4172 		if (!lpfc_cmd)
4173 			return cnt;
4174 		if (idx == 0) {
4175 			list_add_tail(&lpfc_cmd->list, cbuf);
4176 			continue;
4177 		}
4178 		xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4179 		inserted = 0;
4180 		prev_iobufp = NULL;
4181 		list_for_each_entry(iobufp, cbuf, list) {
4182 			if (xri < iobufp->cur_iocbq.sli4_xritag) {
4183 				if (prev_iobufp)
4184 					list_add(&lpfc_cmd->list,
4185 						 &prev_iobufp->list);
4186 				else
4187 					list_add(&lpfc_cmd->list, cbuf);
4188 				inserted = 1;
4189 				break;
4190 			}
4191 			prev_iobufp = iobufp;
4192 		}
4193 		if (!inserted)
4194 			list_add_tail(&lpfc_cmd->list, cbuf);
4195 	}
4196 	return cnt;
4197 }
4198 
4199 int
4200 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4201 {
4202 	struct lpfc_sli4_hdw_queue *qp;
4203 	struct lpfc_io_buf *lpfc_cmd;
4204 	int idx, cnt;
4205 
4206 	qp = phba->sli4_hba.hdwq;
4207 	cnt = 0;
4208 	while (!list_empty(cbuf)) {
4209 		for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4210 			list_remove_head(cbuf, lpfc_cmd,
4211 					 struct lpfc_io_buf, list);
4212 			if (!lpfc_cmd)
4213 				return cnt;
4214 			cnt++;
4215 			qp = &phba->sli4_hba.hdwq[idx];
4216 			lpfc_cmd->hdwq_no = idx;
4217 			lpfc_cmd->hdwq = qp;
4218 			lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
4219 			lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
4220 			spin_lock(&qp->io_buf_list_put_lock);
4221 			list_add_tail(&lpfc_cmd->list,
4222 				      &qp->lpfc_io_buf_list_put);
4223 			qp->put_io_bufs++;
4224 			qp->total_io_bufs++;
4225 			spin_unlock(&qp->io_buf_list_put_lock);
4226 		}
4227 	}
4228 	return cnt;
4229 }
4230 
4231 /**
4232  * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4233  * @phba: pointer to lpfc hba data structure.
4234  *
4235  * This routine first calculates the sizes of the current els and allocated
4236  * scsi sgl lists, and then goes through all sgls to updates the physical
4237  * XRIs assigned due to port function reset. During port initialization, the
4238  * current els and allocated scsi sgl lists are 0s.
4239  *
4240  * Return codes
4241  *   0 - successful (for now, it always returns 0)
4242  **/
4243 int
4244 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4245 {
4246 	struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4247 	uint16_t i, lxri, els_xri_cnt;
4248 	uint16_t io_xri_cnt, io_xri_max;
4249 	LIST_HEAD(io_sgl_list);
4250 	int rc, cnt;
4251 
4252 	/*
4253 	 * update on pci function's allocated nvme xri-sgl list
4254 	 */
4255 
4256 	/* maximum number of xris available for nvme buffers */
4257 	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4258 	io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4259 	phba->sli4_hba.io_xri_max = io_xri_max;
4260 
4261 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4262 			"6074 Current allocated XRI sgl count:%d, "
4263 			"maximum XRI count:%d\n",
4264 			phba->sli4_hba.io_xri_cnt,
4265 			phba->sli4_hba.io_xri_max);
4266 
4267 	cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4268 
4269 	if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4270 		/* max nvme xri shrunk below the allocated nvme buffers */
4271 		io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4272 					phba->sli4_hba.io_xri_max;
4273 		/* release the extra allocated nvme buffers */
4274 		for (i = 0; i < io_xri_cnt; i++) {
4275 			list_remove_head(&io_sgl_list, lpfc_ncmd,
4276 					 struct lpfc_io_buf, list);
4277 			if (lpfc_ncmd) {
4278 				dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4279 					      lpfc_ncmd->data,
4280 					      lpfc_ncmd->dma_handle);
4281 				kfree(lpfc_ncmd);
4282 			}
4283 		}
4284 		phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4285 	}
4286 
4287 	/* update xris associated to remaining allocated nvme buffers */
4288 	lpfc_ncmd = NULL;
4289 	lpfc_ncmd_next = NULL;
4290 	phba->sli4_hba.io_xri_cnt = cnt;
4291 	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4292 				 &io_sgl_list, list) {
4293 		lxri = lpfc_sli4_next_xritag(phba);
4294 		if (lxri == NO_XRI) {
4295 			lpfc_printf_log(phba, KERN_ERR,
4296 					LOG_TRACE_EVENT,
4297 					"6075 Failed to allocate xri for "
4298 					"nvme buffer\n");
4299 			rc = -ENOMEM;
4300 			goto out_free_mem;
4301 		}
4302 		lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4303 		lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4304 	}
4305 	cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4306 	return 0;
4307 
4308 out_free_mem:
4309 	lpfc_io_free(phba);
4310 	return rc;
4311 }
4312 
4313 /**
4314  * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4315  * @phba: Pointer to lpfc hba data structure.
4316  * @num_to_alloc: The requested number of buffers to allocate.
4317  *
4318  * This routine allocates nvme buffers for device with SLI-4 interface spec,
4319  * the nvme buffer contains all the necessary information needed to initiate
4320  * an I/O. After allocating up to @num_to_allocate IO buffers and put
4321  * them on a list, it post them to the port by using SGL block post.
4322  *
4323  * Return codes:
4324  *   int - number of IO buffers that were allocated and posted.
4325  *   0 = failure, less than num_to_alloc is a partial failure.
4326  **/
4327 int
4328 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4329 {
4330 	struct lpfc_io_buf *lpfc_ncmd;
4331 	struct lpfc_iocbq *pwqeq;
4332 	uint16_t iotag, lxri = 0;
4333 	int bcnt, num_posted;
4334 	LIST_HEAD(prep_nblist);
4335 	LIST_HEAD(post_nblist);
4336 	LIST_HEAD(nvme_nblist);
4337 
4338 	phba->sli4_hba.io_xri_cnt = 0;
4339 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4340 		lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4341 		if (!lpfc_ncmd)
4342 			break;
4343 		/*
4344 		 * Get memory from the pci pool to map the virt space to
4345 		 * pci bus space for an I/O. The DMA buffer includes the
4346 		 * number of SGE's necessary to support the sg_tablesize.
4347 		 */
4348 		lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4349 						  GFP_KERNEL,
4350 						  &lpfc_ncmd->dma_handle);
4351 		if (!lpfc_ncmd->data) {
4352 			kfree(lpfc_ncmd);
4353 			break;
4354 		}
4355 
4356 		if (phba->cfg_xpsgl && !phba->nvmet_support) {
4357 			INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4358 		} else {
4359 			/*
4360 			 * 4K Page alignment is CRITICAL to BlockGuard, double
4361 			 * check to be sure.
4362 			 */
4363 			if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4364 			    (((unsigned long)(lpfc_ncmd->data) &
4365 			    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4366 				lpfc_printf_log(phba, KERN_ERR,
4367 						LOG_TRACE_EVENT,
4368 						"3369 Memory alignment err: "
4369 						"addr=%lx\n",
4370 						(unsigned long)lpfc_ncmd->data);
4371 				dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4372 					      lpfc_ncmd->data,
4373 					      lpfc_ncmd->dma_handle);
4374 				kfree(lpfc_ncmd);
4375 				break;
4376 			}
4377 		}
4378 
4379 		INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4380 
4381 		lxri = lpfc_sli4_next_xritag(phba);
4382 		if (lxri == NO_XRI) {
4383 			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4384 				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4385 			kfree(lpfc_ncmd);
4386 			break;
4387 		}
4388 		pwqeq = &lpfc_ncmd->cur_iocbq;
4389 
4390 		/* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4391 		iotag = lpfc_sli_next_iotag(phba, pwqeq);
4392 		if (iotag == 0) {
4393 			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4394 				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4395 			kfree(lpfc_ncmd);
4396 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4397 					"6121 Failed to allocate IOTAG for"
4398 					" XRI:0x%x\n", lxri);
4399 			lpfc_sli4_free_xri(phba, lxri);
4400 			break;
4401 		}
4402 		pwqeq->sli4_lxritag = lxri;
4403 		pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4404 		pwqeq->context1 = lpfc_ncmd;
4405 
4406 		/* Initialize local short-hand pointers. */
4407 		lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4408 		lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4409 		lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4410 		spin_lock_init(&lpfc_ncmd->buf_lock);
4411 
4412 		/* add the nvme buffer to a post list */
4413 		list_add_tail(&lpfc_ncmd->list, &post_nblist);
4414 		phba->sli4_hba.io_xri_cnt++;
4415 	}
4416 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4417 			"6114 Allocate %d out of %d requested new NVME "
4418 			"buffers\n", bcnt, num_to_alloc);
4419 
4420 	/* post the list of nvme buffer sgls to port if available */
4421 	if (!list_empty(&post_nblist))
4422 		num_posted = lpfc_sli4_post_io_sgl_list(
4423 				phba, &post_nblist, bcnt);
4424 	else
4425 		num_posted = 0;
4426 
4427 	return num_posted;
4428 }
4429 
4430 static uint64_t
4431 lpfc_get_wwpn(struct lpfc_hba *phba)
4432 {
4433 	uint64_t wwn;
4434 	int rc;
4435 	LPFC_MBOXQ_t *mboxq;
4436 	MAILBOX_t *mb;
4437 
4438 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4439 						GFP_KERNEL);
4440 	if (!mboxq)
4441 		return (uint64_t)-1;
4442 
4443 	/* First get WWN of HBA instance */
4444 	lpfc_read_nv(phba, mboxq);
4445 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4446 	if (rc != MBX_SUCCESS) {
4447 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4448 				"6019 Mailbox failed , mbxCmd x%x "
4449 				"READ_NV, mbxStatus x%x\n",
4450 				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4451 				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4452 		mempool_free(mboxq, phba->mbox_mem_pool);
4453 		return (uint64_t) -1;
4454 	}
4455 	mb = &mboxq->u.mb;
4456 	memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4457 	/* wwn is WWPN of HBA instance */
4458 	mempool_free(mboxq, phba->mbox_mem_pool);
4459 	if (phba->sli_rev == LPFC_SLI_REV4)
4460 		return be64_to_cpu(wwn);
4461 	else
4462 		return rol64(wwn, 32);
4463 }
4464 
4465 /**
4466  * lpfc_vmid_res_alloc - Allocates resources for VMID
4467  * @phba: pointer to lpfc hba data structure.
4468  * @vport: pointer to vport data structure
4469  *
4470  * This routine allocated the resources needed for the VMID.
4471  *
4472  * Return codes
4473  *	0 on Success
4474  *	Non-0 on Failure
4475  */
4476 static int
4477 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4478 {
4479 	/* VMID feature is supported only on SLI4 */
4480 	if (phba->sli_rev == LPFC_SLI_REV3) {
4481 		phba->cfg_vmid_app_header = 0;
4482 		phba->cfg_vmid_priority_tagging = 0;
4483 	}
4484 
4485 	if (lpfc_is_vmid_enabled(phba)) {
4486 		vport->vmid =
4487 		    kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4488 			    GFP_KERNEL);
4489 		if (!vport->vmid)
4490 			return -ENOMEM;
4491 
4492 		rwlock_init(&vport->vmid_lock);
4493 
4494 		/* Set the VMID parameters for the vport */
4495 		vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4496 		vport->vmid_inactivity_timeout =
4497 		    phba->cfg_vmid_inactivity_timeout;
4498 		vport->max_vmid = phba->cfg_max_vmid;
4499 		vport->cur_vmid_cnt = 0;
4500 
4501 		vport->vmid_priority_range = bitmap_zalloc
4502 			(LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4503 
4504 		if (!vport->vmid_priority_range) {
4505 			kfree(vport->vmid);
4506 			return -ENOMEM;
4507 		}
4508 
4509 		hash_init(vport->hash_table);
4510 	}
4511 	return 0;
4512 }
4513 
4514 /**
4515  * lpfc_create_port - Create an FC port
4516  * @phba: pointer to lpfc hba data structure.
4517  * @instance: a unique integer ID to this FC port.
4518  * @dev: pointer to the device data structure.
4519  *
4520  * This routine creates a FC port for the upper layer protocol. The FC port
4521  * can be created on top of either a physical port or a virtual port provided
4522  * by the HBA. This routine also allocates a SCSI host data structure (shost)
4523  * and associates the FC port created before adding the shost into the SCSI
4524  * layer.
4525  *
4526  * Return codes
4527  *   @vport - pointer to the virtual N_Port data structure.
4528  *   NULL - port create failed.
4529  **/
4530 struct lpfc_vport *
4531 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4532 {
4533 	struct lpfc_vport *vport;
4534 	struct Scsi_Host  *shost = NULL;
4535 	struct scsi_host_template *template;
4536 	int error = 0;
4537 	int i;
4538 	uint64_t wwn;
4539 	bool use_no_reset_hba = false;
4540 	int rc;
4541 
4542 	if (lpfc_no_hba_reset_cnt) {
4543 		if (phba->sli_rev < LPFC_SLI_REV4 &&
4544 		    dev == &phba->pcidev->dev) {
4545 			/* Reset the port first */
4546 			lpfc_sli_brdrestart(phba);
4547 			rc = lpfc_sli_chipset_init(phba);
4548 			if (rc)
4549 				return NULL;
4550 		}
4551 		wwn = lpfc_get_wwpn(phba);
4552 	}
4553 
4554 	for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4555 		if (wwn == lpfc_no_hba_reset[i]) {
4556 			lpfc_printf_log(phba, KERN_ERR,
4557 					LOG_TRACE_EVENT,
4558 					"6020 Setting use_no_reset port=%llx\n",
4559 					wwn);
4560 			use_no_reset_hba = true;
4561 			break;
4562 		}
4563 	}
4564 
4565 	/* Seed template for SCSI host registration */
4566 	if (dev == &phba->pcidev->dev) {
4567 		template = &phba->port_template;
4568 
4569 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4570 			/* Seed physical port template */
4571 			memcpy(template, &lpfc_template, sizeof(*template));
4572 
4573 			if (use_no_reset_hba)
4574 				/* template is for a no reset SCSI Host */
4575 				template->eh_host_reset_handler = NULL;
4576 
4577 			/* Template for all vports this physical port creates */
4578 			memcpy(&phba->vport_template, &lpfc_template,
4579 			       sizeof(*template));
4580 			phba->vport_template.shost_attrs = lpfc_vport_attrs;
4581 			phba->vport_template.eh_bus_reset_handler = NULL;
4582 			phba->vport_template.eh_host_reset_handler = NULL;
4583 			phba->vport_template.vendor_id = 0;
4584 
4585 			/* Initialize the host templates with updated value */
4586 			if (phba->sli_rev == LPFC_SLI_REV4) {
4587 				template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4588 				phba->vport_template.sg_tablesize =
4589 					phba->cfg_scsi_seg_cnt;
4590 			} else {
4591 				template->sg_tablesize = phba->cfg_sg_seg_cnt;
4592 				phba->vport_template.sg_tablesize =
4593 					phba->cfg_sg_seg_cnt;
4594 			}
4595 
4596 		} else {
4597 			/* NVMET is for physical port only */
4598 			memcpy(template, &lpfc_template_nvme,
4599 			       sizeof(*template));
4600 		}
4601 	} else {
4602 		template = &phba->vport_template;
4603 	}
4604 
4605 	shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4606 	if (!shost)
4607 		goto out;
4608 
4609 	vport = (struct lpfc_vport *) shost->hostdata;
4610 	vport->phba = phba;
4611 	vport->load_flag |= FC_LOADING;
4612 	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4613 	vport->fc_rscn_flush = 0;
4614 	lpfc_get_vport_cfgparam(vport);
4615 
4616 	/* Adjust value in vport */
4617 	vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4618 
4619 	shost->unique_id = instance;
4620 	shost->max_id = LPFC_MAX_TARGET;
4621 	shost->max_lun = vport->cfg_max_luns;
4622 	shost->this_id = -1;
4623 	shost->max_cmd_len = 16;
4624 
4625 	if (phba->sli_rev == LPFC_SLI_REV4) {
4626 		if (!phba->cfg_fcp_mq_threshold ||
4627 		    phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4628 			phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4629 
4630 		shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4631 					    phba->cfg_fcp_mq_threshold);
4632 
4633 		shost->dma_boundary =
4634 			phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4635 
4636 		if (phba->cfg_xpsgl && !phba->nvmet_support)
4637 			shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4638 		else
4639 			shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4640 	} else
4641 		/* SLI-3 has a limited number of hardware queues (3),
4642 		 * thus there is only one for FCP processing.
4643 		 */
4644 		shost->nr_hw_queues = 1;
4645 
4646 	/*
4647 	 * Set initial can_queue value since 0 is no longer supported and
4648 	 * scsi_add_host will fail. This will be adjusted later based on the
4649 	 * max xri value determined in hba setup.
4650 	 */
4651 	shost->can_queue = phba->cfg_hba_queue_depth - 10;
4652 	if (dev != &phba->pcidev->dev) {
4653 		shost->transportt = lpfc_vport_transport_template;
4654 		vport->port_type = LPFC_NPIV_PORT;
4655 	} else {
4656 		shost->transportt = lpfc_transport_template;
4657 		vport->port_type = LPFC_PHYSICAL_PORT;
4658 	}
4659 
4660 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4661 			"9081 CreatePort TMPLATE type %x TBLsize %d "
4662 			"SEGcnt %d/%d\n",
4663 			vport->port_type, shost->sg_tablesize,
4664 			phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4665 
4666 	/* Allocate the resources for VMID */
4667 	rc = lpfc_vmid_res_alloc(phba, vport);
4668 
4669 	if (rc)
4670 		goto out_put_shost;
4671 
4672 	/* Initialize all internally managed lists. */
4673 	INIT_LIST_HEAD(&vport->fc_nodes);
4674 	INIT_LIST_HEAD(&vport->rcv_buffer_list);
4675 	spin_lock_init(&vport->work_port_lock);
4676 
4677 	timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4678 
4679 	timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4680 
4681 	timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4682 
4683 	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4684 		lpfc_setup_bg(phba, shost);
4685 
4686 	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4687 	if (error)
4688 		goto out_free_vmid;
4689 
4690 	spin_lock_irq(&phba->port_list_lock);
4691 	list_add_tail(&vport->listentry, &phba->port_list);
4692 	spin_unlock_irq(&phba->port_list_lock);
4693 	return vport;
4694 
4695 out_free_vmid:
4696 	kfree(vport->vmid);
4697 	bitmap_free(vport->vmid_priority_range);
4698 out_put_shost:
4699 	scsi_host_put(shost);
4700 out:
4701 	return NULL;
4702 }
4703 
4704 /**
4705  * destroy_port -  destroy an FC port
4706  * @vport: pointer to an lpfc virtual N_Port data structure.
4707  *
4708  * This routine destroys a FC port from the upper layer protocol. All the
4709  * resources associated with the port are released.
4710  **/
4711 void
4712 destroy_port(struct lpfc_vport *vport)
4713 {
4714 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4715 	struct lpfc_hba  *phba = vport->phba;
4716 
4717 	lpfc_debugfs_terminate(vport);
4718 	fc_remove_host(shost);
4719 	scsi_remove_host(shost);
4720 
4721 	spin_lock_irq(&phba->port_list_lock);
4722 	list_del_init(&vport->listentry);
4723 	spin_unlock_irq(&phba->port_list_lock);
4724 
4725 	lpfc_cleanup(vport);
4726 	return;
4727 }
4728 
4729 /**
4730  * lpfc_get_instance - Get a unique integer ID
4731  *
4732  * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4733  * uses the kernel idr facility to perform the task.
4734  *
4735  * Return codes:
4736  *   instance - a unique integer ID allocated as the new instance.
4737  *   -1 - lpfc get instance failed.
4738  **/
4739 int
4740 lpfc_get_instance(void)
4741 {
4742 	int ret;
4743 
4744 	ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4745 	return ret < 0 ? -1 : ret;
4746 }
4747 
4748 /**
4749  * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4750  * @shost: pointer to SCSI host data structure.
4751  * @time: elapsed time of the scan in jiffies.
4752  *
4753  * This routine is called by the SCSI layer with a SCSI host to determine
4754  * whether the scan host is finished.
4755  *
4756  * Note: there is no scan_start function as adapter initialization will have
4757  * asynchronously kicked off the link initialization.
4758  *
4759  * Return codes
4760  *   0 - SCSI host scan is not over yet.
4761  *   1 - SCSI host scan is over.
4762  **/
4763 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4764 {
4765 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4766 	struct lpfc_hba   *phba = vport->phba;
4767 	int stat = 0;
4768 
4769 	spin_lock_irq(shost->host_lock);
4770 
4771 	if (vport->load_flag & FC_UNLOADING) {
4772 		stat = 1;
4773 		goto finished;
4774 	}
4775 	if (time >= msecs_to_jiffies(30 * 1000)) {
4776 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4777 				"0461 Scanning longer than 30 "
4778 				"seconds.  Continuing initialization\n");
4779 		stat = 1;
4780 		goto finished;
4781 	}
4782 	if (time >= msecs_to_jiffies(15 * 1000) &&
4783 	    phba->link_state <= LPFC_LINK_DOWN) {
4784 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4785 				"0465 Link down longer than 15 "
4786 				"seconds.  Continuing initialization\n");
4787 		stat = 1;
4788 		goto finished;
4789 	}
4790 
4791 	if (vport->port_state != LPFC_VPORT_READY)
4792 		goto finished;
4793 	if (vport->num_disc_nodes || vport->fc_prli_sent)
4794 		goto finished;
4795 	if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4796 		goto finished;
4797 	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4798 		goto finished;
4799 
4800 	stat = 1;
4801 
4802 finished:
4803 	spin_unlock_irq(shost->host_lock);
4804 	return stat;
4805 }
4806 
4807 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4808 {
4809 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4810 	struct lpfc_hba   *phba = vport->phba;
4811 
4812 	fc_host_supported_speeds(shost) = 0;
4813 	/*
4814 	 * Avoid reporting supported link speed for FCoE as it can't be
4815 	 * controlled via FCoE.
4816 	 */
4817 	if (phba->hba_flag & HBA_FCOE_MODE)
4818 		return;
4819 
4820 	if (phba->lmt & LMT_256Gb)
4821 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4822 	if (phba->lmt & LMT_128Gb)
4823 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4824 	if (phba->lmt & LMT_64Gb)
4825 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4826 	if (phba->lmt & LMT_32Gb)
4827 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4828 	if (phba->lmt & LMT_16Gb)
4829 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4830 	if (phba->lmt & LMT_10Gb)
4831 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4832 	if (phba->lmt & LMT_8Gb)
4833 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4834 	if (phba->lmt & LMT_4Gb)
4835 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4836 	if (phba->lmt & LMT_2Gb)
4837 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4838 	if (phba->lmt & LMT_1Gb)
4839 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4840 }
4841 
4842 /**
4843  * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4844  * @shost: pointer to SCSI host data structure.
4845  *
4846  * This routine initializes a given SCSI host attributes on a FC port. The
4847  * SCSI host can be either on top of a physical port or a virtual port.
4848  **/
4849 void lpfc_host_attrib_init(struct Scsi_Host *shost)
4850 {
4851 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4852 	struct lpfc_hba   *phba = vport->phba;
4853 	/*
4854 	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
4855 	 */
4856 
4857 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4858 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4859 	fc_host_supported_classes(shost) = FC_COS_CLASS3;
4860 
4861 	memset(fc_host_supported_fc4s(shost), 0,
4862 	       sizeof(fc_host_supported_fc4s(shost)));
4863 	fc_host_supported_fc4s(shost)[2] = 1;
4864 	fc_host_supported_fc4s(shost)[7] = 1;
4865 
4866 	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4867 				 sizeof fc_host_symbolic_name(shost));
4868 
4869 	lpfc_host_supported_speeds_set(shost);
4870 
4871 	fc_host_maxframe_size(shost) =
4872 		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4873 		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4874 
4875 	fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4876 
4877 	/* This value is also unchanging */
4878 	memset(fc_host_active_fc4s(shost), 0,
4879 	       sizeof(fc_host_active_fc4s(shost)));
4880 	fc_host_active_fc4s(shost)[2] = 1;
4881 	fc_host_active_fc4s(shost)[7] = 1;
4882 
4883 	fc_host_max_npiv_vports(shost) = phba->max_vpi;
4884 	spin_lock_irq(shost->host_lock);
4885 	vport->load_flag &= ~FC_LOADING;
4886 	spin_unlock_irq(shost->host_lock);
4887 }
4888 
4889 /**
4890  * lpfc_stop_port_s3 - Stop SLI3 device port
4891  * @phba: pointer to lpfc hba data structure.
4892  *
4893  * This routine is invoked to stop an SLI3 device port, it stops the device
4894  * from generating interrupts and stops the device driver's timers for the
4895  * device.
4896  **/
4897 static void
4898 lpfc_stop_port_s3(struct lpfc_hba *phba)
4899 {
4900 	/* Clear all interrupt enable conditions */
4901 	writel(0, phba->HCregaddr);
4902 	readl(phba->HCregaddr); /* flush */
4903 	/* Clear all pending interrupts */
4904 	writel(0xffffffff, phba->HAregaddr);
4905 	readl(phba->HAregaddr); /* flush */
4906 
4907 	/* Reset some HBA SLI setup states */
4908 	lpfc_stop_hba_timers(phba);
4909 	phba->pport->work_port_events = 0;
4910 }
4911 
4912 /**
4913  * lpfc_stop_port_s4 - Stop SLI4 device port
4914  * @phba: pointer to lpfc hba data structure.
4915  *
4916  * This routine is invoked to stop an SLI4 device port, it stops the device
4917  * from generating interrupts and stops the device driver's timers for the
4918  * device.
4919  **/
4920 static void
4921 lpfc_stop_port_s4(struct lpfc_hba *phba)
4922 {
4923 	/* Reset some HBA SLI4 setup states */
4924 	lpfc_stop_hba_timers(phba);
4925 	if (phba->pport)
4926 		phba->pport->work_port_events = 0;
4927 	phba->sli4_hba.intr_enable = 0;
4928 }
4929 
4930 /**
4931  * lpfc_stop_port - Wrapper function for stopping hba port
4932  * @phba: Pointer to HBA context object.
4933  *
4934  * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4935  * the API jump table function pointer from the lpfc_hba struct.
4936  **/
4937 void
4938 lpfc_stop_port(struct lpfc_hba *phba)
4939 {
4940 	phba->lpfc_stop_port(phba);
4941 
4942 	if (phba->wq)
4943 		flush_workqueue(phba->wq);
4944 }
4945 
4946 /**
4947  * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4948  * @phba: Pointer to hba for which this call is being executed.
4949  *
4950  * This routine starts the timer waiting for the FCF rediscovery to complete.
4951  **/
4952 void
4953 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4954 {
4955 	unsigned long fcf_redisc_wait_tmo =
4956 		(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4957 	/* Start fcf rediscovery wait period timer */
4958 	mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4959 	spin_lock_irq(&phba->hbalock);
4960 	/* Allow action to new fcf asynchronous event */
4961 	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4962 	/* Mark the FCF rediscovery pending state */
4963 	phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4964 	spin_unlock_irq(&phba->hbalock);
4965 }
4966 
4967 /**
4968  * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4969  * @t: Timer context used to obtain the pointer to lpfc hba data structure.
4970  *
4971  * This routine is invoked when waiting for FCF table rediscover has been
4972  * timed out. If new FCF record(s) has (have) been discovered during the
4973  * wait period, a new FCF event shall be added to the FCOE async event
4974  * list, and then worker thread shall be waked up for processing from the
4975  * worker thread context.
4976  **/
4977 static void
4978 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4979 {
4980 	struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4981 
4982 	/* Don't send FCF rediscovery event if timer cancelled */
4983 	spin_lock_irq(&phba->hbalock);
4984 	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4985 		spin_unlock_irq(&phba->hbalock);
4986 		return;
4987 	}
4988 	/* Clear FCF rediscovery timer pending flag */
4989 	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4990 	/* FCF rediscovery event to worker thread */
4991 	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4992 	spin_unlock_irq(&phba->hbalock);
4993 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4994 			"2776 FCF rediscover quiescent timer expired\n");
4995 	/* wake up worker thread */
4996 	lpfc_worker_wake_up(phba);
4997 }
4998 
4999 /**
5000  * lpfc_vmid_poll - VMID timeout detection
5001  * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5002  *
5003  * This routine is invoked when there is no I/O on by a VM for the specified
5004  * amount of time. When this situation is detected, the VMID has to be
5005  * deregistered from the switch and all the local resources freed. The VMID
5006  * will be reassigned to the VM once the I/O begins.
5007  **/
5008 static void
5009 lpfc_vmid_poll(struct timer_list *t)
5010 {
5011 	struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
5012 	u32 wake_up = 0;
5013 
5014 	/* check if there is a need to issue QFPA */
5015 	if (phba->pport->vmid_priority_tagging) {
5016 		wake_up = 1;
5017 		phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5018 	}
5019 
5020 	/* Is the vmid inactivity timer enabled */
5021 	if (phba->pport->vmid_inactivity_timeout ||
5022 	    phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
5023 		wake_up = 1;
5024 		phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5025 	}
5026 
5027 	if (wake_up)
5028 		lpfc_worker_wake_up(phba);
5029 
5030 	/* restart the timer for the next iteration */
5031 	mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5032 							LPFC_VMID_TIMER));
5033 }
5034 
5035 /**
5036  * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5037  * @phba: pointer to lpfc hba data structure.
5038  * @acqe_link: pointer to the async link completion queue entry.
5039  *
5040  * This routine is to parse the SLI4 link-attention link fault code.
5041  **/
5042 static void
5043 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5044 			   struct lpfc_acqe_link *acqe_link)
5045 {
5046 	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5047 	case LPFC_ASYNC_LINK_FAULT_NONE:
5048 	case LPFC_ASYNC_LINK_FAULT_LOCAL:
5049 	case LPFC_ASYNC_LINK_FAULT_REMOTE:
5050 	case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5051 		break;
5052 	default:
5053 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5054 				"0398 Unknown link fault code: x%x\n",
5055 				bf_get(lpfc_acqe_link_fault, acqe_link));
5056 		break;
5057 	}
5058 }
5059 
5060 /**
5061  * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5062  * @phba: pointer to lpfc hba data structure.
5063  * @acqe_link: pointer to the async link completion queue entry.
5064  *
5065  * This routine is to parse the SLI4 link attention type and translate it
5066  * into the base driver's link attention type coding.
5067  *
5068  * Return: Link attention type in terms of base driver's coding.
5069  **/
5070 static uint8_t
5071 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5072 			  struct lpfc_acqe_link *acqe_link)
5073 {
5074 	uint8_t att_type;
5075 
5076 	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5077 	case LPFC_ASYNC_LINK_STATUS_DOWN:
5078 	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5079 		att_type = LPFC_ATT_LINK_DOWN;
5080 		break;
5081 	case LPFC_ASYNC_LINK_STATUS_UP:
5082 		/* Ignore physical link up events - wait for logical link up */
5083 		att_type = LPFC_ATT_RESERVED;
5084 		break;
5085 	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5086 		att_type = LPFC_ATT_LINK_UP;
5087 		break;
5088 	default:
5089 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5090 				"0399 Invalid link attention type: x%x\n",
5091 				bf_get(lpfc_acqe_link_status, acqe_link));
5092 		att_type = LPFC_ATT_RESERVED;
5093 		break;
5094 	}
5095 	return att_type;
5096 }
5097 
5098 /**
5099  * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5100  * @phba: pointer to lpfc hba data structure.
5101  *
5102  * This routine is to get an SLI3 FC port's link speed in Mbps.
5103  *
5104  * Return: link speed in terms of Mbps.
5105  **/
5106 uint32_t
5107 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5108 {
5109 	uint32_t link_speed;
5110 
5111 	if (!lpfc_is_link_up(phba))
5112 		return 0;
5113 
5114 	if (phba->sli_rev <= LPFC_SLI_REV3) {
5115 		switch (phba->fc_linkspeed) {
5116 		case LPFC_LINK_SPEED_1GHZ:
5117 			link_speed = 1000;
5118 			break;
5119 		case LPFC_LINK_SPEED_2GHZ:
5120 			link_speed = 2000;
5121 			break;
5122 		case LPFC_LINK_SPEED_4GHZ:
5123 			link_speed = 4000;
5124 			break;
5125 		case LPFC_LINK_SPEED_8GHZ:
5126 			link_speed = 8000;
5127 			break;
5128 		case LPFC_LINK_SPEED_10GHZ:
5129 			link_speed = 10000;
5130 			break;
5131 		case LPFC_LINK_SPEED_16GHZ:
5132 			link_speed = 16000;
5133 			break;
5134 		default:
5135 			link_speed = 0;
5136 		}
5137 	} else {
5138 		if (phba->sli4_hba.link_state.logical_speed)
5139 			link_speed =
5140 			      phba->sli4_hba.link_state.logical_speed;
5141 		else
5142 			link_speed = phba->sli4_hba.link_state.speed;
5143 	}
5144 	return link_speed;
5145 }
5146 
5147 /**
5148  * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5149  * @phba: pointer to lpfc hba data structure.
5150  * @evt_code: asynchronous event code.
5151  * @speed_code: asynchronous event link speed code.
5152  *
5153  * This routine is to parse the giving SLI4 async event link speed code into
5154  * value of Mbps for the link speed.
5155  *
5156  * Return: link speed in terms of Mbps.
5157  **/
5158 static uint32_t
5159 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5160 			   uint8_t speed_code)
5161 {
5162 	uint32_t port_speed;
5163 
5164 	switch (evt_code) {
5165 	case LPFC_TRAILER_CODE_LINK:
5166 		switch (speed_code) {
5167 		case LPFC_ASYNC_LINK_SPEED_ZERO:
5168 			port_speed = 0;
5169 			break;
5170 		case LPFC_ASYNC_LINK_SPEED_10MBPS:
5171 			port_speed = 10;
5172 			break;
5173 		case LPFC_ASYNC_LINK_SPEED_100MBPS:
5174 			port_speed = 100;
5175 			break;
5176 		case LPFC_ASYNC_LINK_SPEED_1GBPS:
5177 			port_speed = 1000;
5178 			break;
5179 		case LPFC_ASYNC_LINK_SPEED_10GBPS:
5180 			port_speed = 10000;
5181 			break;
5182 		case LPFC_ASYNC_LINK_SPEED_20GBPS:
5183 			port_speed = 20000;
5184 			break;
5185 		case LPFC_ASYNC_LINK_SPEED_25GBPS:
5186 			port_speed = 25000;
5187 			break;
5188 		case LPFC_ASYNC_LINK_SPEED_40GBPS:
5189 			port_speed = 40000;
5190 			break;
5191 		case LPFC_ASYNC_LINK_SPEED_100GBPS:
5192 			port_speed = 100000;
5193 			break;
5194 		default:
5195 			port_speed = 0;
5196 		}
5197 		break;
5198 	case LPFC_TRAILER_CODE_FC:
5199 		switch (speed_code) {
5200 		case LPFC_FC_LA_SPEED_UNKNOWN:
5201 			port_speed = 0;
5202 			break;
5203 		case LPFC_FC_LA_SPEED_1G:
5204 			port_speed = 1000;
5205 			break;
5206 		case LPFC_FC_LA_SPEED_2G:
5207 			port_speed = 2000;
5208 			break;
5209 		case LPFC_FC_LA_SPEED_4G:
5210 			port_speed = 4000;
5211 			break;
5212 		case LPFC_FC_LA_SPEED_8G:
5213 			port_speed = 8000;
5214 			break;
5215 		case LPFC_FC_LA_SPEED_10G:
5216 			port_speed = 10000;
5217 			break;
5218 		case LPFC_FC_LA_SPEED_16G:
5219 			port_speed = 16000;
5220 			break;
5221 		case LPFC_FC_LA_SPEED_32G:
5222 			port_speed = 32000;
5223 			break;
5224 		case LPFC_FC_LA_SPEED_64G:
5225 			port_speed = 64000;
5226 			break;
5227 		case LPFC_FC_LA_SPEED_128G:
5228 			port_speed = 128000;
5229 			break;
5230 		case LPFC_FC_LA_SPEED_256G:
5231 			port_speed = 256000;
5232 			break;
5233 		default:
5234 			port_speed = 0;
5235 		}
5236 		break;
5237 	default:
5238 		port_speed = 0;
5239 	}
5240 	return port_speed;
5241 }
5242 
5243 /**
5244  * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5245  * @phba: pointer to lpfc hba data structure.
5246  * @acqe_link: pointer to the async link completion queue entry.
5247  *
5248  * This routine is to handle the SLI4 asynchronous FCoE link event.
5249  **/
5250 static void
5251 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5252 			 struct lpfc_acqe_link *acqe_link)
5253 {
5254 	struct lpfc_dmabuf *mp;
5255 	LPFC_MBOXQ_t *pmb;
5256 	MAILBOX_t *mb;
5257 	struct lpfc_mbx_read_top *la;
5258 	uint8_t att_type;
5259 	int rc;
5260 
5261 	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5262 	if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5263 		return;
5264 	phba->fcoe_eventtag = acqe_link->event_tag;
5265 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5266 	if (!pmb) {
5267 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5268 				"0395 The mboxq allocation failed\n");
5269 		return;
5270 	}
5271 	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5272 	if (!mp) {
5273 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5274 				"0396 The lpfc_dmabuf allocation failed\n");
5275 		goto out_free_pmb;
5276 	}
5277 	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5278 	if (!mp->virt) {
5279 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5280 				"0397 The mbuf allocation failed\n");
5281 		goto out_free_dmabuf;
5282 	}
5283 
5284 	/* Cleanup any outstanding ELS commands */
5285 	lpfc_els_flush_all_cmd(phba);
5286 
5287 	/* Block ELS IOCBs until we have done process link event */
5288 	phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5289 
5290 	/* Update link event statistics */
5291 	phba->sli.slistat.link_event++;
5292 
5293 	/* Create lpfc_handle_latt mailbox command from link ACQE */
5294 	lpfc_read_topology(phba, pmb, mp);
5295 	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5296 	pmb->vport = phba->pport;
5297 
5298 	/* Keep the link status for extra SLI4 state machine reference */
5299 	phba->sli4_hba.link_state.speed =
5300 			lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5301 				bf_get(lpfc_acqe_link_speed, acqe_link));
5302 	phba->sli4_hba.link_state.duplex =
5303 				bf_get(lpfc_acqe_link_duplex, acqe_link);
5304 	phba->sli4_hba.link_state.status =
5305 				bf_get(lpfc_acqe_link_status, acqe_link);
5306 	phba->sli4_hba.link_state.type =
5307 				bf_get(lpfc_acqe_link_type, acqe_link);
5308 	phba->sli4_hba.link_state.number =
5309 				bf_get(lpfc_acqe_link_number, acqe_link);
5310 	phba->sli4_hba.link_state.fault =
5311 				bf_get(lpfc_acqe_link_fault, acqe_link);
5312 	phba->sli4_hba.link_state.logical_speed =
5313 			bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5314 
5315 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5316 			"2900 Async FC/FCoE Link event - Speed:%dGBit "
5317 			"duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5318 			"Logical speed:%dMbps Fault:%d\n",
5319 			phba->sli4_hba.link_state.speed,
5320 			phba->sli4_hba.link_state.topology,
5321 			phba->sli4_hba.link_state.status,
5322 			phba->sli4_hba.link_state.type,
5323 			phba->sli4_hba.link_state.number,
5324 			phba->sli4_hba.link_state.logical_speed,
5325 			phba->sli4_hba.link_state.fault);
5326 	/*
5327 	 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5328 	 * topology info. Note: Optional for non FC-AL ports.
5329 	 */
5330 	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5331 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5332 		if (rc == MBX_NOT_FINISHED) {
5333 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
5334 			goto out_free_dmabuf;
5335 		}
5336 		return;
5337 	}
5338 	/*
5339 	 * For FCoE Mode: fill in all the topology information we need and call
5340 	 * the READ_TOPOLOGY completion routine to continue without actually
5341 	 * sending the READ_TOPOLOGY mailbox command to the port.
5342 	 */
5343 	/* Initialize completion status */
5344 	mb = &pmb->u.mb;
5345 	mb->mbxStatus = MBX_SUCCESS;
5346 
5347 	/* Parse port fault information field */
5348 	lpfc_sli4_parse_latt_fault(phba, acqe_link);
5349 
5350 	/* Parse and translate link attention fields */
5351 	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5352 	la->eventTag = acqe_link->event_tag;
5353 	bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5354 	bf_set(lpfc_mbx_read_top_link_spd, la,
5355 	       (bf_get(lpfc_acqe_link_speed, acqe_link)));
5356 
5357 	/* Fake the the following irrelvant fields */
5358 	bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5359 	bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5360 	bf_set(lpfc_mbx_read_top_il, la, 0);
5361 	bf_set(lpfc_mbx_read_top_pb, la, 0);
5362 	bf_set(lpfc_mbx_read_top_fa, la, 0);
5363 	bf_set(lpfc_mbx_read_top_mm, la, 0);
5364 
5365 	/* Invoke the lpfc_handle_latt mailbox command callback function */
5366 	lpfc_mbx_cmpl_read_topology(phba, pmb);
5367 
5368 	return;
5369 
5370 out_free_dmabuf:
5371 	kfree(mp);
5372 out_free_pmb:
5373 	mempool_free(pmb, phba->mbox_mem_pool);
5374 }
5375 
5376 /**
5377  * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5378  * topology.
5379  * @phba: pointer to lpfc hba data structure.
5380  * @speed_code: asynchronous event link speed code.
5381  *
5382  * This routine is to parse the giving SLI4 async event link speed code into
5383  * value of Read topology link speed.
5384  *
5385  * Return: link speed in terms of Read topology.
5386  **/
5387 static uint8_t
5388 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5389 {
5390 	uint8_t port_speed;
5391 
5392 	switch (speed_code) {
5393 	case LPFC_FC_LA_SPEED_1G:
5394 		port_speed = LPFC_LINK_SPEED_1GHZ;
5395 		break;
5396 	case LPFC_FC_LA_SPEED_2G:
5397 		port_speed = LPFC_LINK_SPEED_2GHZ;
5398 		break;
5399 	case LPFC_FC_LA_SPEED_4G:
5400 		port_speed = LPFC_LINK_SPEED_4GHZ;
5401 		break;
5402 	case LPFC_FC_LA_SPEED_8G:
5403 		port_speed = LPFC_LINK_SPEED_8GHZ;
5404 		break;
5405 	case LPFC_FC_LA_SPEED_16G:
5406 		port_speed = LPFC_LINK_SPEED_16GHZ;
5407 		break;
5408 	case LPFC_FC_LA_SPEED_32G:
5409 		port_speed = LPFC_LINK_SPEED_32GHZ;
5410 		break;
5411 	case LPFC_FC_LA_SPEED_64G:
5412 		port_speed = LPFC_LINK_SPEED_64GHZ;
5413 		break;
5414 	case LPFC_FC_LA_SPEED_128G:
5415 		port_speed = LPFC_LINK_SPEED_128GHZ;
5416 		break;
5417 	case LPFC_FC_LA_SPEED_256G:
5418 		port_speed = LPFC_LINK_SPEED_256GHZ;
5419 		break;
5420 	default:
5421 		port_speed = 0;
5422 		break;
5423 	}
5424 
5425 	return port_speed;
5426 }
5427 
5428 void
5429 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5430 {
5431 	struct rxtable_entry *entry;
5432 	int cnt = 0, head, tail, last, start;
5433 
5434 	head = atomic_read(&phba->rxtable_idx_head);
5435 	tail = atomic_read(&phba->rxtable_idx_tail);
5436 	if (!phba->rxtable || head == tail) {
5437 		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
5438 				"4411 Rxtable is empty\n");
5439 		return;
5440 	}
5441 	last = tail;
5442 	start = head;
5443 
5444 	/* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */
5445 	while (start != last) {
5446 		if (start)
5447 			start--;
5448 		else
5449 			start = LPFC_MAX_RXMONITOR_ENTRY - 1;
5450 		entry = &phba->rxtable[start];
5451 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5452 				"4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
5453 				"Lat %lld ASz %lld Info %02d BWUtil %d "
5454 				"Int %d slot %d\n",
5455 				cnt, entry->max_bytes_per_interval,
5456 				entry->total_bytes, entry->rcv_bytes,
5457 				entry->avg_io_latency, entry->avg_io_size,
5458 				entry->cmf_info, entry->timer_utilization,
5459 				entry->timer_interval, start);
5460 		cnt++;
5461 		if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
5462 			return;
5463 	}
5464 }
5465 
5466 /**
5467  * lpfc_cgn_update_stat - Save data into congestion stats buffer
5468  * @phba: pointer to lpfc hba data structure.
5469  * @dtag: FPIN descriptor received
5470  *
5471  * Increment the FPIN received counter/time when it happens.
5472  */
5473 void
5474 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5475 {
5476 	struct lpfc_cgn_info *cp;
5477 	struct tm broken;
5478 	struct timespec64 cur_time;
5479 	u32 cnt;
5480 	u16 value;
5481 
5482 	/* Make sure we have a congestion info buffer */
5483 	if (!phba->cgn_i)
5484 		return;
5485 	cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5486 	ktime_get_real_ts64(&cur_time);
5487 	time64_to_tm(cur_time.tv_sec, 0, &broken);
5488 
5489 	/* Update congestion statistics */
5490 	switch (dtag) {
5491 	case ELS_DTAG_LNK_INTEGRITY:
5492 		cnt = le32_to_cpu(cp->link_integ_notification);
5493 		cnt++;
5494 		cp->link_integ_notification = cpu_to_le32(cnt);
5495 
5496 		cp->cgn_stat_lnk_month = broken.tm_mon + 1;
5497 		cp->cgn_stat_lnk_day = broken.tm_mday;
5498 		cp->cgn_stat_lnk_year = broken.tm_year - 100;
5499 		cp->cgn_stat_lnk_hour = broken.tm_hour;
5500 		cp->cgn_stat_lnk_min = broken.tm_min;
5501 		cp->cgn_stat_lnk_sec = broken.tm_sec;
5502 		break;
5503 	case ELS_DTAG_DELIVERY:
5504 		cnt = le32_to_cpu(cp->delivery_notification);
5505 		cnt++;
5506 		cp->delivery_notification = cpu_to_le32(cnt);
5507 
5508 		cp->cgn_stat_del_month = broken.tm_mon + 1;
5509 		cp->cgn_stat_del_day = broken.tm_mday;
5510 		cp->cgn_stat_del_year = broken.tm_year - 100;
5511 		cp->cgn_stat_del_hour = broken.tm_hour;
5512 		cp->cgn_stat_del_min = broken.tm_min;
5513 		cp->cgn_stat_del_sec = broken.tm_sec;
5514 		break;
5515 	case ELS_DTAG_PEER_CONGEST:
5516 		cnt = le32_to_cpu(cp->cgn_peer_notification);
5517 		cnt++;
5518 		cp->cgn_peer_notification = cpu_to_le32(cnt);
5519 
5520 		cp->cgn_stat_peer_month = broken.tm_mon + 1;
5521 		cp->cgn_stat_peer_day = broken.tm_mday;
5522 		cp->cgn_stat_peer_year = broken.tm_year - 100;
5523 		cp->cgn_stat_peer_hour = broken.tm_hour;
5524 		cp->cgn_stat_peer_min = broken.tm_min;
5525 		cp->cgn_stat_peer_sec = broken.tm_sec;
5526 		break;
5527 	case ELS_DTAG_CONGESTION:
5528 		cnt = le32_to_cpu(cp->cgn_notification);
5529 		cnt++;
5530 		cp->cgn_notification = cpu_to_le32(cnt);
5531 
5532 		cp->cgn_stat_cgn_month = broken.tm_mon + 1;
5533 		cp->cgn_stat_cgn_day = broken.tm_mday;
5534 		cp->cgn_stat_cgn_year = broken.tm_year - 100;
5535 		cp->cgn_stat_cgn_hour = broken.tm_hour;
5536 		cp->cgn_stat_cgn_min = broken.tm_min;
5537 		cp->cgn_stat_cgn_sec = broken.tm_sec;
5538 	}
5539 	if (phba->cgn_fpin_frequency &&
5540 	    phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5541 		value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5542 		cp->cgn_stat_npm = value;
5543 	}
5544 	value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5545 				    LPFC_CGN_CRC32_SEED);
5546 	cp->cgn_info_crc = cpu_to_le32(value);
5547 }
5548 
5549 /**
5550  * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer
5551  * @phba: pointer to lpfc hba data structure.
5552  *
5553  * Save the congestion event data every minute.
5554  * On the hour collapse all the minute data into hour data. Every day
5555  * collapse all the hour data into daily data. Separate driver
5556  * and fabrc congestion event counters that will be saved out
5557  * to the registered congestion buffer every minute.
5558  */
5559 static void
5560 lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
5561 {
5562 	struct lpfc_cgn_info *cp;
5563 	struct tm broken;
5564 	struct timespec64 cur_time;
5565 	uint32_t i, index;
5566 	uint16_t value, mvalue;
5567 	uint64_t bps;
5568 	uint32_t mbps;
5569 	uint32_t dvalue, wvalue, lvalue, avalue;
5570 	uint64_t latsum;
5571 	__le16 *ptr;
5572 	__le32 *lptr;
5573 	__le16 *mptr;
5574 
5575 	/* Make sure we have a congestion info buffer */
5576 	if (!phba->cgn_i)
5577 		return;
5578 	cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5579 
5580 	if (time_before(jiffies, phba->cgn_evt_timestamp))
5581 		return;
5582 	phba->cgn_evt_timestamp = jiffies +
5583 			msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
5584 	phba->cgn_evt_minute++;
5585 
5586 	/* We should get to this point in the routine on 1 minute intervals */
5587 
5588 	ktime_get_real_ts64(&cur_time);
5589 	time64_to_tm(cur_time.tv_sec, 0, &broken);
5590 
5591 	if (phba->cgn_fpin_frequency &&
5592 	    phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5593 		value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5594 		cp->cgn_stat_npm = value;
5595 	}
5596 
5597 	/* Read and clear the latency counters for this minute */
5598 	lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5599 	latsum = atomic64_read(&phba->cgn_latency_evt);
5600 	atomic_set(&phba->cgn_latency_evt_cnt, 0);
5601 	atomic64_set(&phba->cgn_latency_evt, 0);
5602 
5603 	/* We need to store MB/sec bandwidth in the congestion information.
5604 	 * block_cnt is count of 512 byte blocks for the entire minute,
5605 	 * bps will get bytes per sec before finally converting to MB/sec.
5606 	 */
5607 	bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5608 	phba->rx_block_cnt = 0;
5609 	mvalue = bps / (1024 * 1024); /* convert to MB/sec */
5610 
5611 	/* Every minute */
5612 	/* cgn parameters */
5613 	cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5614 	cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5615 	cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5616 	cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5617 
5618 	/* Fill in default LUN qdepth */
5619 	value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5620 	cp->cgn_lunq = cpu_to_le16(value);
5621 
5622 	/* Record congestion buffer info - every minute
5623 	 * cgn_driver_evt_cnt (Driver events)
5624 	 * cgn_fabric_warn_cnt (Congestion Warnings)
5625 	 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
5626 	 * cgn_fabric_alarm_cnt (Congestion Alarms)
5627 	 */
5628 	index = ++cp->cgn_index_minute;
5629 	if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5630 		cp->cgn_index_minute = 0;
5631 		index = 0;
5632 	}
5633 
5634 	/* Get the number of driver events in this sample and reset counter */
5635 	dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5636 	atomic_set(&phba->cgn_driver_evt_cnt, 0);
5637 
5638 	/* Get the number of warning events - FPIN and Signal for this minute */
5639 	wvalue = 0;
5640 	if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5641 	    phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5642 	    phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5643 		wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5644 	atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5645 
5646 	/* Get the number of alarm events - FPIN and Signal for this minute */
5647 	avalue = 0;
5648 	if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5649 	    phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5650 		avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5651 	atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5652 
5653 	/* Collect the driver, warning, alarm and latency counts for this
5654 	 * minute into the driver congestion buffer.
5655 	 */
5656 	ptr = &cp->cgn_drvr_min[index];
5657 	value = (uint16_t)dvalue;
5658 	*ptr = cpu_to_le16(value);
5659 
5660 	ptr = &cp->cgn_warn_min[index];
5661 	value = (uint16_t)wvalue;
5662 	*ptr = cpu_to_le16(value);
5663 
5664 	ptr = &cp->cgn_alarm_min[index];
5665 	value = (uint16_t)avalue;
5666 	*ptr = cpu_to_le16(value);
5667 
5668 	lptr = &cp->cgn_latency_min[index];
5669 	if (lvalue) {
5670 		lvalue = (uint32_t)div_u64(latsum, lvalue);
5671 		*lptr = cpu_to_le32(lvalue);
5672 	} else {
5673 		*lptr = 0;
5674 	}
5675 
5676 	/* Collect the bandwidth value into the driver's congesion buffer. */
5677 	mptr = &cp->cgn_bw_min[index];
5678 	*mptr = cpu_to_le16(mvalue);
5679 
5680 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5681 			"2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5682 			index, dvalue, wvalue, *lptr, mvalue, avalue);
5683 
5684 	/* Every hour */
5685 	if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5686 		/* Record congestion buffer info - every hour
5687 		 * Collapse all minutes into an hour
5688 		 */
5689 		index = ++cp->cgn_index_hour;
5690 		if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5691 			cp->cgn_index_hour = 0;
5692 			index = 0;
5693 		}
5694 
5695 		dvalue = 0;
5696 		wvalue = 0;
5697 		lvalue = 0;
5698 		avalue = 0;
5699 		mvalue = 0;
5700 		mbps = 0;
5701 		for (i = 0; i < LPFC_MIN_HOUR; i++) {
5702 			dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5703 			wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5704 			lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5705 			mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5706 			avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5707 		}
5708 		if (lvalue)		/* Avg of latency averages */
5709 			lvalue /= LPFC_MIN_HOUR;
5710 		if (mbps)		/* Avg of Bandwidth averages */
5711 			mvalue = mbps / LPFC_MIN_HOUR;
5712 
5713 		lptr = &cp->cgn_drvr_hr[index];
5714 		*lptr = cpu_to_le32(dvalue);
5715 		lptr = &cp->cgn_warn_hr[index];
5716 		*lptr = cpu_to_le32(wvalue);
5717 		lptr = &cp->cgn_latency_hr[index];
5718 		*lptr = cpu_to_le32(lvalue);
5719 		mptr = &cp->cgn_bw_hr[index];
5720 		*mptr = cpu_to_le16(mvalue);
5721 		lptr = &cp->cgn_alarm_hr[index];
5722 		*lptr = cpu_to_le32(avalue);
5723 
5724 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5725 				"2419 Congestion Info - hour "
5726 				"(%d): %d %d %d %d %d\n",
5727 				index, dvalue, wvalue, lvalue, mvalue, avalue);
5728 	}
5729 
5730 	/* Every day */
5731 	if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5732 		/* Record congestion buffer info - every hour
5733 		 * Collapse all hours into a day. Rotate days
5734 		 * after LPFC_MAX_CGN_DAYS.
5735 		 */
5736 		index = ++cp->cgn_index_day;
5737 		if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5738 			cp->cgn_index_day = 0;
5739 			index = 0;
5740 		}
5741 
5742 		/* Anytime we overwrite daily index 0, after we wrap,
5743 		 * we will be overwriting the oldest day, so we must
5744 		 * update the congestion data start time for that day.
5745 		 * That start time should have previously been saved after
5746 		 * we wrote the last days worth of data.
5747 		 */
5748 		if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) {
5749 			time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken);
5750 
5751 			cp->cgn_info_month = broken.tm_mon + 1;
5752 			cp->cgn_info_day = broken.tm_mday;
5753 			cp->cgn_info_year = broken.tm_year - 100;
5754 			cp->cgn_info_hour = broken.tm_hour;
5755 			cp->cgn_info_minute = broken.tm_min;
5756 			cp->cgn_info_second = broken.tm_sec;
5757 
5758 			lpfc_printf_log
5759 				(phba, KERN_INFO, LOG_CGN_MGMT,
5760 				"2646 CGNInfo idx0 Start Time: "
5761 				"%d/%d/%d %d:%d:%d\n",
5762 				cp->cgn_info_day, cp->cgn_info_month,
5763 				cp->cgn_info_year, cp->cgn_info_hour,
5764 				cp->cgn_info_minute, cp->cgn_info_second);
5765 		}
5766 
5767 		dvalue = 0;
5768 		wvalue = 0;
5769 		lvalue = 0;
5770 		mvalue = 0;
5771 		mbps = 0;
5772 		avalue = 0;
5773 		for (i = 0; i < LPFC_HOUR_DAY; i++) {
5774 			dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5775 			wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5776 			lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5777 			mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5778 			avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5779 		}
5780 		if (lvalue)		/* Avg of latency averages */
5781 			lvalue /= LPFC_HOUR_DAY;
5782 		if (mbps)		/* Avg of Bandwidth averages */
5783 			mvalue = mbps / LPFC_HOUR_DAY;
5784 
5785 		lptr = &cp->cgn_drvr_day[index];
5786 		*lptr = cpu_to_le32(dvalue);
5787 		lptr = &cp->cgn_warn_day[index];
5788 		*lptr = cpu_to_le32(wvalue);
5789 		lptr = &cp->cgn_latency_day[index];
5790 		*lptr = cpu_to_le32(lvalue);
5791 		mptr = &cp->cgn_bw_day[index];
5792 		*mptr = cpu_to_le16(mvalue);
5793 		lptr = &cp->cgn_alarm_day[index];
5794 		*lptr = cpu_to_le32(avalue);
5795 
5796 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5797 				"2420 Congestion Info - daily (%d): "
5798 				"%d %d %d %d %d\n",
5799 				index, dvalue, wvalue, lvalue, mvalue, avalue);
5800 
5801 		/* We just wrote LPFC_MAX_CGN_DAYS of data,
5802 		 * so we are wrapped on any data after this.
5803 		 * Save this as the start time for the next day.
5804 		 */
5805 		if (index == (LPFC_MAX_CGN_DAYS - 1)) {
5806 			phba->hba_flag |= HBA_CGN_DAY_WRAP;
5807 			ktime_get_real_ts64(&phba->cgn_daily_ts);
5808 		}
5809 	}
5810 
5811 	/* Use the frequency found in the last rcv'ed FPIN */
5812 	value = phba->cgn_fpin_frequency;
5813 	cp->cgn_warn_freq = cpu_to_le16(value);
5814 	cp->cgn_alarm_freq = cpu_to_le16(value);
5815 
5816 	lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5817 				     LPFC_CGN_CRC32_SEED);
5818 	cp->cgn_info_crc = cpu_to_le32(lvalue);
5819 }
5820 
5821 /**
5822  * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5823  * @phba: The Hba for which this call is being executed.
5824  *
5825  * The routine calculates the latency from the beginning of the CMF timer
5826  * interval to the current point in time. It is called from IO completion
5827  * when we exceed our Bandwidth limitation for the time interval.
5828  */
5829 uint32_t
5830 lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5831 {
5832 	struct timespec64 cmpl_time;
5833 	uint32_t msec = 0;
5834 
5835 	ktime_get_real_ts64(&cmpl_time);
5836 
5837 	/* This routine works on a ms granularity so sec and usec are
5838 	 * converted accordingly.
5839 	 */
5840 	if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5841 		msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5842 			NSEC_PER_MSEC;
5843 	} else {
5844 		if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5845 			msec = (cmpl_time.tv_sec -
5846 				phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5847 			msec += ((cmpl_time.tv_nsec -
5848 				  phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5849 		} else {
5850 			msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5851 				1) * MSEC_PER_SEC;
5852 			msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5853 				 cmpl_time.tv_nsec) / NSEC_PER_MSEC);
5854 		}
5855 	}
5856 	return msec;
5857 }
5858 
5859 /**
5860  * lpfc_cmf_timer -  This is the timer function for one congestion
5861  * rate interval.
5862  * @timer: Pointer to the high resolution timer that expired
5863  */
5864 static enum hrtimer_restart
5865 lpfc_cmf_timer(struct hrtimer *timer)
5866 {
5867 	struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
5868 					     cmf_timer);
5869 	struct rxtable_entry *entry;
5870 	uint32_t io_cnt;
5871 	uint32_t head, tail;
5872 	uint32_t busy, max_read;
5873 	uint64_t total, rcv, lat, mbpi, extra, cnt;
5874 	int timer_interval = LPFC_CMF_INTERVAL;
5875 	uint32_t ms;
5876 	struct lpfc_cgn_stat *cgs;
5877 	int cpu;
5878 
5879 	/* Only restart the timer if congestion mgmt is on */
5880 	if (phba->cmf_active_mode == LPFC_CFG_OFF ||
5881 	    !phba->cmf_latency.tv_sec) {
5882 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5883 				"6224 CMF timer exit: %d %lld\n",
5884 				phba->cmf_active_mode,
5885 				(uint64_t)phba->cmf_latency.tv_sec);
5886 		return HRTIMER_NORESTART;
5887 	}
5888 
5889 	/* If pport is not ready yet, just exit and wait for
5890 	 * the next timer cycle to hit.
5891 	 */
5892 	if (!phba->pport)
5893 		goto skip;
5894 
5895 	/* Do not block SCSI IO while in the timer routine since
5896 	 * total_bytes will be cleared
5897 	 */
5898 	atomic_set(&phba->cmf_stop_io, 1);
5899 
5900 	/* First we need to calculate the actual ms between
5901 	 * the last timer interrupt and this one. We ask for
5902 	 * LPFC_CMF_INTERVAL, however the actual time may
5903 	 * vary depending on system overhead.
5904 	 */
5905 	ms = lpfc_calc_cmf_latency(phba);
5906 
5907 
5908 	/* Immediately after we calculate the time since the last
5909 	 * timer interrupt, set the start time for the next
5910 	 * interrupt
5911 	 */
5912 	ktime_get_real_ts64(&phba->cmf_latency);
5913 
5914 	phba->cmf_link_byte_count =
5915 		div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
5916 
5917 	/* Collect all the stats from the prior timer interval */
5918 	total = 0;
5919 	io_cnt = 0;
5920 	lat = 0;
5921 	rcv = 0;
5922 	for_each_present_cpu(cpu) {
5923 		cgs = per_cpu_ptr(phba->cmf_stat, cpu);
5924 		total += atomic64_xchg(&cgs->total_bytes, 0);
5925 		io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
5926 		lat += atomic64_xchg(&cgs->rx_latency, 0);
5927 		rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
5928 	}
5929 
5930 	/* Before we issue another CMF_SYNC_WQE, retrieve the BW
5931 	 * returned from the last CMF_SYNC_WQE issued, from
5932 	 * cmf_last_sync_bw. This will be the target BW for
5933 	 * this next timer interval.
5934 	 */
5935 	if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
5936 	    phba->link_state != LPFC_LINK_DOWN &&
5937 	    phba->hba_flag & HBA_SETUP) {
5938 		mbpi = phba->cmf_last_sync_bw;
5939 		phba->cmf_last_sync_bw = 0;
5940 		extra = 0;
5941 
5942 		/* Calculate any extra bytes needed to account for the
5943 		 * timer accuracy. If we are less than LPFC_CMF_INTERVAL
5944 		 * calculate the adjustment needed for total to reflect
5945 		 * a full LPFC_CMF_INTERVAL.
5946 		 */
5947 		if (ms && ms < LPFC_CMF_INTERVAL) {
5948 			cnt = div_u64(total, ms); /* bytes per ms */
5949 			cnt *= LPFC_CMF_INTERVAL; /* what total should be */
5950 			if (cnt > mbpi)
5951 				cnt = mbpi;
5952 			extra = cnt - total;
5953 		}
5954 		lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
5955 	} else {
5956 		/* For Monitor mode or link down we want mbpi
5957 		 * to be the full link speed
5958 		 */
5959 		mbpi = phba->cmf_link_byte_count;
5960 		extra = 0;
5961 	}
5962 	phba->cmf_timer_cnt++;
5963 
5964 	if (io_cnt) {
5965 		/* Update congestion info buffer latency in us */
5966 		atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
5967 		atomic64_add(lat, &phba->cgn_latency_evt);
5968 	}
5969 	busy = atomic_xchg(&phba->cmf_busy, 0);
5970 	max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
5971 
5972 	/* Calculate MBPI for the next timer interval */
5973 	if (mbpi) {
5974 		if (mbpi > phba->cmf_link_byte_count ||
5975 		    phba->cmf_active_mode == LPFC_CFG_MONITOR)
5976 			mbpi = phba->cmf_link_byte_count;
5977 
5978 		/* Change max_bytes_per_interval to what the prior
5979 		 * CMF_SYNC_WQE cmpl indicated.
5980 		 */
5981 		if (mbpi != phba->cmf_max_bytes_per_interval)
5982 			phba->cmf_max_bytes_per_interval = mbpi;
5983 	}
5984 
5985 	/* Save rxmonitor information for debug */
5986 	if (phba->rxtable) {
5987 		head = atomic_xchg(&phba->rxtable_idx_head,
5988 				   LPFC_RXMONITOR_TABLE_IN_USE);
5989 		entry = &phba->rxtable[head];
5990 		entry->total_bytes = total;
5991 		entry->cmf_bytes = total + extra;
5992 		entry->rcv_bytes = rcv;
5993 		entry->cmf_busy = busy;
5994 		entry->cmf_info = phba->cmf_active_info;
5995 		if (io_cnt) {
5996 			entry->avg_io_latency = div_u64(lat, io_cnt);
5997 			entry->avg_io_size = div_u64(rcv, io_cnt);
5998 		} else {
5999 			entry->avg_io_latency = 0;
6000 			entry->avg_io_size = 0;
6001 		}
6002 		entry->max_read_cnt = max_read;
6003 		entry->io_cnt = io_cnt;
6004 		entry->max_bytes_per_interval = mbpi;
6005 		if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
6006 			entry->timer_utilization = phba->cmf_last_ts;
6007 		else
6008 			entry->timer_utilization = ms;
6009 		entry->timer_interval = ms;
6010 		phba->cmf_last_ts = 0;
6011 
6012 		/* Increment rxtable index */
6013 		head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
6014 		tail = atomic_read(&phba->rxtable_idx_tail);
6015 		if (head == tail) {
6016 			tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
6017 			atomic_set(&phba->rxtable_idx_tail, tail);
6018 		}
6019 		atomic_set(&phba->rxtable_idx_head, head);
6020 	}
6021 
6022 	if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
6023 		/* If Monitor mode, check if we are oversubscribed
6024 		 * against the full line rate.
6025 		 */
6026 		if (mbpi && total > mbpi)
6027 			atomic_inc(&phba->cgn_driver_evt_cnt);
6028 	}
6029 	phba->rx_block_cnt += div_u64(rcv, 512);  /* save 512 byte block cnt */
6030 
6031 	/* Each minute save Fabric and Driver congestion information */
6032 	lpfc_cgn_save_evt_cnt(phba);
6033 
6034 	/* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
6035 	 * minute, adjust our next timer interval, if needed, to ensure a
6036 	 * 1 minute granularity when we get the next timer interrupt.
6037 	 */
6038 	if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL),
6039 		       phba->cgn_evt_timestamp)) {
6040 		timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp -
6041 						  jiffies);
6042 		if (timer_interval <= 0)
6043 			timer_interval = LPFC_CMF_INTERVAL;
6044 
6045 		/* If we adjust timer_interval, max_bytes_per_interval
6046 		 * needs to be adjusted as well.
6047 		 */
6048 		phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
6049 						    timer_interval, 1000);
6050 		if (phba->cmf_active_mode == LPFC_CFG_MONITOR)
6051 			phba->cmf_max_bytes_per_interval =
6052 				phba->cmf_link_byte_count;
6053 	}
6054 
6055 	/* Since total_bytes has already been zero'ed, its okay to unblock
6056 	 * after max_bytes_per_interval is setup.
6057 	 */
6058 	if (atomic_xchg(&phba->cmf_bw_wait, 0))
6059 		queue_work(phba->wq, &phba->unblock_request_work);
6060 
6061 	/* SCSI IO is now unblocked */
6062 	atomic_set(&phba->cmf_stop_io, 0);
6063 
6064 skip:
6065 	hrtimer_forward_now(timer,
6066 			    ktime_set(0, timer_interval * NSEC_PER_MSEC));
6067 	return HRTIMER_RESTART;
6068 }
6069 
6070 #define trunk_link_status(__idx)\
6071 	bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6072 	       ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6073 		"Link up" : "Link down") : "NA"
6074 /* Did port __idx reported an error */
6075 #define trunk_port_fault(__idx)\
6076 	bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6077 	       (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6078 
6079 static void
6080 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6081 			      struct lpfc_acqe_fc_la *acqe_fc)
6082 {
6083 	uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
6084 	uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
6085 
6086 	phba->sli4_hba.link_state.speed =
6087 		lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6088 				bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6089 
6090 	phba->sli4_hba.link_state.logical_speed =
6091 				bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6092 	/* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
6093 	phba->fc_linkspeed =
6094 		 lpfc_async_link_speed_to_read_top(
6095 				phba,
6096 				bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6097 
6098 	if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
6099 		phba->trunk_link.link0.state =
6100 			bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
6101 			? LPFC_LINK_UP : LPFC_LINK_DOWN;
6102 		phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6103 	}
6104 	if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
6105 		phba->trunk_link.link1.state =
6106 			bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
6107 			? LPFC_LINK_UP : LPFC_LINK_DOWN;
6108 		phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6109 	}
6110 	if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
6111 		phba->trunk_link.link2.state =
6112 			bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
6113 			? LPFC_LINK_UP : LPFC_LINK_DOWN;
6114 		phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6115 	}
6116 	if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
6117 		phba->trunk_link.link3.state =
6118 			bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
6119 			? LPFC_LINK_UP : LPFC_LINK_DOWN;
6120 		phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6121 	}
6122 
6123 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6124 			"2910 Async FC Trunking Event - Speed:%d\n"
6125 			"\tLogical speed:%d "
6126 			"port0: %s port1: %s port2: %s port3: %s\n",
6127 			phba->sli4_hba.link_state.speed,
6128 			phba->sli4_hba.link_state.logical_speed,
6129 			trunk_link_status(0), trunk_link_status(1),
6130 			trunk_link_status(2), trunk_link_status(3));
6131 
6132 	if (phba->cmf_active_mode != LPFC_CFG_OFF)
6133 		lpfc_cmf_signal_init(phba);
6134 
6135 	if (port_fault)
6136 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6137 				"3202 trunk error:0x%x (%s) seen on port0:%s "
6138 				/*
6139 				 * SLI-4: We have only 0xA error codes
6140 				 * defined as of now. print an appropriate
6141 				 * message in case driver needs to be updated.
6142 				 */
6143 				"port1:%s port2:%s port3:%s\n", err, err > 0xA ?
6144 				"UNDEFINED. update driver." : trunk_errmsg[err],
6145 				trunk_port_fault(0), trunk_port_fault(1),
6146 				trunk_port_fault(2), trunk_port_fault(3));
6147 }
6148 
6149 
6150 /**
6151  * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6152  * @phba: pointer to lpfc hba data structure.
6153  * @acqe_fc: pointer to the async fc completion queue entry.
6154  *
6155  * This routine is to handle the SLI4 asynchronous FC event. It will simply log
6156  * that the event was received and then issue a read_topology mailbox command so
6157  * that the rest of the driver will treat it the same as SLI3.
6158  **/
6159 static void
6160 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6161 {
6162 	struct lpfc_dmabuf *mp;
6163 	LPFC_MBOXQ_t *pmb;
6164 	MAILBOX_t *mb;
6165 	struct lpfc_mbx_read_top *la;
6166 	int rc;
6167 
6168 	if (bf_get(lpfc_trailer_type, acqe_fc) !=
6169 	    LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
6170 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6171 				"2895 Non FC link Event detected.(%d)\n",
6172 				bf_get(lpfc_trailer_type, acqe_fc));
6173 		return;
6174 	}
6175 
6176 	if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6177 	    LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
6178 		lpfc_update_trunk_link_status(phba, acqe_fc);
6179 		return;
6180 	}
6181 
6182 	/* Keep the link status for extra SLI4 state machine reference */
6183 	phba->sli4_hba.link_state.speed =
6184 			lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6185 				bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6186 	phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6187 	phba->sli4_hba.link_state.topology =
6188 				bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
6189 	phba->sli4_hba.link_state.status =
6190 				bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
6191 	phba->sli4_hba.link_state.type =
6192 				bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
6193 	phba->sli4_hba.link_state.number =
6194 				bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
6195 	phba->sli4_hba.link_state.fault =
6196 				bf_get(lpfc_acqe_link_fault, acqe_fc);
6197 
6198 	if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6199 	    LPFC_FC_LA_TYPE_LINK_DOWN)
6200 		phba->sli4_hba.link_state.logical_speed = 0;
6201 	else if	(!phba->sli4_hba.conf_trunk)
6202 		phba->sli4_hba.link_state.logical_speed =
6203 				bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6204 
6205 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6206 			"2896 Async FC event - Speed:%dGBaud Topology:x%x "
6207 			"LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6208 			"%dMbps Fault:%d\n",
6209 			phba->sli4_hba.link_state.speed,
6210 			phba->sli4_hba.link_state.topology,
6211 			phba->sli4_hba.link_state.status,
6212 			phba->sli4_hba.link_state.type,
6213 			phba->sli4_hba.link_state.number,
6214 			phba->sli4_hba.link_state.logical_speed,
6215 			phba->sli4_hba.link_state.fault);
6216 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6217 	if (!pmb) {
6218 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6219 				"2897 The mboxq allocation failed\n");
6220 		return;
6221 	}
6222 	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6223 	if (!mp) {
6224 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6225 				"2898 The lpfc_dmabuf allocation failed\n");
6226 		goto out_free_pmb;
6227 	}
6228 	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
6229 	if (!mp->virt) {
6230 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6231 				"2899 The mbuf allocation failed\n");
6232 		goto out_free_dmabuf;
6233 	}
6234 
6235 	/* Cleanup any outstanding ELS commands */
6236 	lpfc_els_flush_all_cmd(phba);
6237 
6238 	/* Block ELS IOCBs until we have done process link event */
6239 	phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6240 
6241 	/* Update link event statistics */
6242 	phba->sli.slistat.link_event++;
6243 
6244 	/* Create lpfc_handle_latt mailbox command from link ACQE */
6245 	lpfc_read_topology(phba, pmb, mp);
6246 	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6247 	pmb->vport = phba->pport;
6248 
6249 	if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6250 		phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6251 
6252 		switch (phba->sli4_hba.link_state.status) {
6253 		case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
6254 			phba->link_flag |= LS_MDS_LINK_DOWN;
6255 			break;
6256 		case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
6257 			phba->link_flag |= LS_MDS_LOOPBACK;
6258 			break;
6259 		default:
6260 			break;
6261 		}
6262 
6263 		/* Initialize completion status */
6264 		mb = &pmb->u.mb;
6265 		mb->mbxStatus = MBX_SUCCESS;
6266 
6267 		/* Parse port fault information field */
6268 		lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6269 
6270 		/* Parse and translate link attention fields */
6271 		la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6272 		la->eventTag = acqe_fc->event_tag;
6273 
6274 		if (phba->sli4_hba.link_state.status ==
6275 		    LPFC_FC_LA_TYPE_UNEXP_WWPN) {
6276 			bf_set(lpfc_mbx_read_top_att_type, la,
6277 			       LPFC_FC_LA_TYPE_UNEXP_WWPN);
6278 		} else {
6279 			bf_set(lpfc_mbx_read_top_att_type, la,
6280 			       LPFC_FC_LA_TYPE_LINK_DOWN);
6281 		}
6282 		/* Invoke the mailbox command callback function */
6283 		lpfc_mbx_cmpl_read_topology(phba, pmb);
6284 
6285 		return;
6286 	}
6287 
6288 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6289 	if (rc == MBX_NOT_FINISHED) {
6290 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
6291 		goto out_free_dmabuf;
6292 	}
6293 	return;
6294 
6295 out_free_dmabuf:
6296 	kfree(mp);
6297 out_free_pmb:
6298 	mempool_free(pmb, phba->mbox_mem_pool);
6299 }
6300 
6301 /**
6302  * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6303  * @phba: pointer to lpfc hba data structure.
6304  * @acqe_sli: pointer to the async SLI completion queue entry.
6305  *
6306  * This routine is to handle the SLI4 asynchronous SLI events.
6307  **/
6308 static void
6309 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6310 {
6311 	char port_name;
6312 	char message[128];
6313 	uint8_t status;
6314 	uint8_t evt_type;
6315 	uint8_t operational = 0;
6316 	struct temp_event temp_event_data;
6317 	struct lpfc_acqe_misconfigured_event *misconfigured;
6318 	struct lpfc_acqe_cgn_signal *cgn_signal;
6319 	struct Scsi_Host  *shost;
6320 	struct lpfc_vport **vports;
6321 	int rc, i, cnt;
6322 
6323 	evt_type = bf_get(lpfc_trailer_type, acqe_sli);
6324 
6325 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6326 			"2901 Async SLI event - Type:%d, Event Data: x%08x "
6327 			"x%08x x%08x x%08x\n", evt_type,
6328 			acqe_sli->event_data1, acqe_sli->event_data2,
6329 			acqe_sli->reserved, acqe_sli->trailer);
6330 
6331 	port_name = phba->Port[0];
6332 	if (port_name == 0x00)
6333 		port_name = '?'; /* get port name is empty */
6334 
6335 	switch (evt_type) {
6336 	case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
6337 		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6338 		temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6339 		temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6340 
6341 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6342 				"3190 Over Temperature:%d Celsius- Port Name %c\n",
6343 				acqe_sli->event_data1, port_name);
6344 
6345 		phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6346 		shost = lpfc_shost_from_vport(phba->pport);
6347 		fc_host_post_vendor_event(shost, fc_get_event_number(),
6348 					  sizeof(temp_event_data),
6349 					  (char *)&temp_event_data,
6350 					  SCSI_NL_VID_TYPE_PCI
6351 					  | PCI_VENDOR_ID_EMULEX);
6352 		break;
6353 	case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
6354 		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6355 		temp_event_data.event_code = LPFC_NORMAL_TEMP;
6356 		temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6357 
6358 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6359 				"3191 Normal Temperature:%d Celsius - Port Name %c\n",
6360 				acqe_sli->event_data1, port_name);
6361 
6362 		shost = lpfc_shost_from_vport(phba->pport);
6363 		fc_host_post_vendor_event(shost, fc_get_event_number(),
6364 					  sizeof(temp_event_data),
6365 					  (char *)&temp_event_data,
6366 					  SCSI_NL_VID_TYPE_PCI
6367 					  | PCI_VENDOR_ID_EMULEX);
6368 		break;
6369 	case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
6370 		misconfigured = (struct lpfc_acqe_misconfigured_event *)
6371 					&acqe_sli->event_data1;
6372 
6373 		/* fetch the status for this port */
6374 		switch (phba->sli4_hba.lnk_info.lnk_no) {
6375 		case LPFC_LINK_NUMBER_0:
6376 			status = bf_get(lpfc_sli_misconfigured_port0_state,
6377 					&misconfigured->theEvent);
6378 			operational = bf_get(lpfc_sli_misconfigured_port0_op,
6379 					&misconfigured->theEvent);
6380 			break;
6381 		case LPFC_LINK_NUMBER_1:
6382 			status = bf_get(lpfc_sli_misconfigured_port1_state,
6383 					&misconfigured->theEvent);
6384 			operational = bf_get(lpfc_sli_misconfigured_port1_op,
6385 					&misconfigured->theEvent);
6386 			break;
6387 		case LPFC_LINK_NUMBER_2:
6388 			status = bf_get(lpfc_sli_misconfigured_port2_state,
6389 					&misconfigured->theEvent);
6390 			operational = bf_get(lpfc_sli_misconfigured_port2_op,
6391 					&misconfigured->theEvent);
6392 			break;
6393 		case LPFC_LINK_NUMBER_3:
6394 			status = bf_get(lpfc_sli_misconfigured_port3_state,
6395 					&misconfigured->theEvent);
6396 			operational = bf_get(lpfc_sli_misconfigured_port3_op,
6397 					&misconfigured->theEvent);
6398 			break;
6399 		default:
6400 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6401 					"3296 "
6402 					"LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6403 					"event: Invalid link %d",
6404 					phba->sli4_hba.lnk_info.lnk_no);
6405 			return;
6406 		}
6407 
6408 		/* Skip if optic state unchanged */
6409 		if (phba->sli4_hba.lnk_info.optic_state == status)
6410 			return;
6411 
6412 		switch (status) {
6413 		case LPFC_SLI_EVENT_STATUS_VALID:
6414 			sprintf(message, "Physical Link is functional");
6415 			break;
6416 		case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
6417 			sprintf(message, "Optics faulted/incorrectly "
6418 				"installed/not installed - Reseat optics, "
6419 				"if issue not resolved, replace.");
6420 			break;
6421 		case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
6422 			sprintf(message,
6423 				"Optics of two types installed - Remove one "
6424 				"optic or install matching pair of optics.");
6425 			break;
6426 		case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
6427 			sprintf(message, "Incompatible optics - Replace with "
6428 				"compatible optics for card to function.");
6429 			break;
6430 		case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
6431 			sprintf(message, "Unqualified optics - Replace with "
6432 				"Avago optics for Warranty and Technical "
6433 				"Support - Link is%s operational",
6434 				(operational) ? " not" : "");
6435 			break;
6436 		case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
6437 			sprintf(message, "Uncertified optics - Replace with "
6438 				"Avago-certified optics to enable link "
6439 				"operation - Link is%s operational",
6440 				(operational) ? " not" : "");
6441 			break;
6442 		default:
6443 			/* firmware is reporting a status we don't know about */
6444 			sprintf(message, "Unknown event status x%02x", status);
6445 			break;
6446 		}
6447 
6448 		/* Issue READ_CONFIG mbox command to refresh supported speeds */
6449 		rc = lpfc_sli4_read_config(phba);
6450 		if (rc) {
6451 			phba->lmt = 0;
6452 			lpfc_printf_log(phba, KERN_ERR,
6453 					LOG_TRACE_EVENT,
6454 					"3194 Unable to retrieve supported "
6455 					"speeds, rc = 0x%x\n", rc);
6456 		}
6457 		vports = lpfc_create_vport_work_array(phba);
6458 		if (vports != NULL) {
6459 			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6460 					i++) {
6461 				shost = lpfc_shost_from_vport(vports[i]);
6462 				lpfc_host_supported_speeds_set(shost);
6463 			}
6464 		}
6465 		lpfc_destroy_vport_work_array(phba, vports);
6466 
6467 		phba->sli4_hba.lnk_info.optic_state = status;
6468 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6469 				"3176 Port Name %c %s\n", port_name, message);
6470 		break;
6471 	case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
6472 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6473 				"3192 Remote DPort Test Initiated - "
6474 				"Event Data1:x%08x Event Data2: x%08x\n",
6475 				acqe_sli->event_data1, acqe_sli->event_data2);
6476 		break;
6477 	case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
6478 		/* Call FW to obtain active parms */
6479 		lpfc_sli4_cgn_parm_chg_evt(phba);
6480 		break;
6481 	case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
6482 		/* Misconfigured WWN. Reports that the SLI Port is configured
6483 		 * to use FA-WWN, but the attached device doesn’t support it.
6484 		 * No driver action is required.
6485 		 * Event Data1 - N.A, Event Data2 - N.A
6486 		 */
6487 		lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
6488 			     "2699 Misconfigured FA-WWN - Attached device does "
6489 			     "not support FA-WWN\n");
6490 		break;
6491 	case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
6492 		/* EEPROM failure. No driver action is required */
6493 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6494 			     "2518 EEPROM failure - "
6495 			     "Event Data1: x%08x Event Data2: x%08x\n",
6496 			     acqe_sli->event_data1, acqe_sli->event_data2);
6497 		break;
6498 	case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
6499 		if (phba->cmf_active_mode == LPFC_CFG_OFF)
6500 			break;
6501 		cgn_signal = (struct lpfc_acqe_cgn_signal *)
6502 					&acqe_sli->event_data1;
6503 		phba->cgn_acqe_cnt++;
6504 
6505 		cnt = bf_get(lpfc_warn_acqe, cgn_signal);
6506 		atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6507 		atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6508 
6509 		/* no threshold for CMF, even 1 signal will trigger an event */
6510 
6511 		/* Alarm overrides warning, so check that first */
6512 		if (cgn_signal->alarm_cnt) {
6513 			if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6514 				/* Keep track of alarm cnt for CMF_SYNC_WQE */
6515 				atomic_add(cgn_signal->alarm_cnt,
6516 					   &phba->cgn_sync_alarm_cnt);
6517 			}
6518 		} else if (cnt) {
6519 			/* signal action needs to be taken */
6520 			if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6521 			    phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6522 				/* Keep track of warning cnt for CMF_SYNC_WQE */
6523 				atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6524 			}
6525 		}
6526 		break;
6527 	default:
6528 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6529 				"3193 Unrecognized SLI event, type: 0x%x",
6530 				evt_type);
6531 		break;
6532 	}
6533 }
6534 
6535 /**
6536  * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6537  * @vport: pointer to vport data structure.
6538  *
6539  * This routine is to perform Clear Virtual Link (CVL) on a vport in
6540  * response to a CVL event.
6541  *
6542  * Return the pointer to the ndlp with the vport if successful, otherwise
6543  * return NULL.
6544  **/
6545 static struct lpfc_nodelist *
6546 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
6547 {
6548 	struct lpfc_nodelist *ndlp;
6549 	struct Scsi_Host *shost;
6550 	struct lpfc_hba *phba;
6551 
6552 	if (!vport)
6553 		return NULL;
6554 	phba = vport->phba;
6555 	if (!phba)
6556 		return NULL;
6557 	ndlp = lpfc_findnode_did(vport, Fabric_DID);
6558 	if (!ndlp) {
6559 		/* Cannot find existing Fabric ndlp, so allocate a new one */
6560 		ndlp = lpfc_nlp_init(vport, Fabric_DID);
6561 		if (!ndlp)
6562 			return 0;
6563 		/* Set the node type */
6564 		ndlp->nlp_type |= NLP_FABRIC;
6565 		/* Put ndlp onto node list */
6566 		lpfc_enqueue_node(vport, ndlp);
6567 	}
6568 	if ((phba->pport->port_state < LPFC_FLOGI) &&
6569 		(phba->pport->port_state != LPFC_VPORT_FAILED))
6570 		return NULL;
6571 	/* If virtual link is not yet instantiated ignore CVL */
6572 	if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6573 		&& (vport->port_state != LPFC_VPORT_FAILED))
6574 		return NULL;
6575 	shost = lpfc_shost_from_vport(vport);
6576 	if (!shost)
6577 		return NULL;
6578 	lpfc_linkdown_port(vport);
6579 	lpfc_cleanup_pending_mbox(vport);
6580 	spin_lock_irq(shost->host_lock);
6581 	vport->fc_flag |= FC_VPORT_CVL_RCVD;
6582 	spin_unlock_irq(shost->host_lock);
6583 
6584 	return ndlp;
6585 }
6586 
6587 /**
6588  * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6589  * @phba: pointer to lpfc hba data structure.
6590  *
6591  * This routine is to perform Clear Virtual Link (CVL) on all vports in
6592  * response to a FCF dead event.
6593  **/
6594 static void
6595 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6596 {
6597 	struct lpfc_vport **vports;
6598 	int i;
6599 
6600 	vports = lpfc_create_vport_work_array(phba);
6601 	if (vports)
6602 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6603 			lpfc_sli4_perform_vport_cvl(vports[i]);
6604 	lpfc_destroy_vport_work_array(phba, vports);
6605 }
6606 
6607 /**
6608  * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6609  * @phba: pointer to lpfc hba data structure.
6610  * @acqe_fip: pointer to the async fcoe completion queue entry.
6611  *
6612  * This routine is to handle the SLI4 asynchronous fcoe event.
6613  **/
6614 static void
6615 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6616 			struct lpfc_acqe_fip *acqe_fip)
6617 {
6618 	uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
6619 	int rc;
6620 	struct lpfc_vport *vport;
6621 	struct lpfc_nodelist *ndlp;
6622 	int active_vlink_present;
6623 	struct lpfc_vport **vports;
6624 	int i;
6625 
6626 	phba->fc_eventTag = acqe_fip->event_tag;
6627 	phba->fcoe_eventtag = acqe_fip->event_tag;
6628 	switch (event_type) {
6629 	case LPFC_FIP_EVENT_TYPE_NEW_FCF:
6630 	case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
6631 		if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
6632 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6633 					"2546 New FCF event, evt_tag:x%x, "
6634 					"index:x%x\n",
6635 					acqe_fip->event_tag,
6636 					acqe_fip->index);
6637 		else
6638 			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6639 					LOG_DISCOVERY,
6640 					"2788 FCF param modified event, "
6641 					"evt_tag:x%x, index:x%x\n",
6642 					acqe_fip->event_tag,
6643 					acqe_fip->index);
6644 		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6645 			/*
6646 			 * During period of FCF discovery, read the FCF
6647 			 * table record indexed by the event to update
6648 			 * FCF roundrobin failover eligible FCF bmask.
6649 			 */
6650 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6651 					LOG_DISCOVERY,
6652 					"2779 Read FCF (x%x) for updating "
6653 					"roundrobin FCF failover bmask\n",
6654 					acqe_fip->index);
6655 			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6656 		}
6657 
6658 		/* If the FCF discovery is in progress, do nothing. */
6659 		spin_lock_irq(&phba->hbalock);
6660 		if (phba->hba_flag & FCF_TS_INPROG) {
6661 			spin_unlock_irq(&phba->hbalock);
6662 			break;
6663 		}
6664 		/* If fast FCF failover rescan event is pending, do nothing */
6665 		if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6666 			spin_unlock_irq(&phba->hbalock);
6667 			break;
6668 		}
6669 
6670 		/* If the FCF has been in discovered state, do nothing. */
6671 		if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6672 			spin_unlock_irq(&phba->hbalock);
6673 			break;
6674 		}
6675 		spin_unlock_irq(&phba->hbalock);
6676 
6677 		/* Otherwise, scan the entire FCF table and re-discover SAN */
6678 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6679 				"2770 Start FCF table scan per async FCF "
6680 				"event, evt_tag:x%x, index:x%x\n",
6681 				acqe_fip->event_tag, acqe_fip->index);
6682 		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6683 						     LPFC_FCOE_FCF_GET_FIRST);
6684 		if (rc)
6685 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6686 					"2547 Issue FCF scan read FCF mailbox "
6687 					"command failed (x%x)\n", rc);
6688 		break;
6689 
6690 	case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
6691 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6692 				"2548 FCF Table full count 0x%x tag 0x%x\n",
6693 				bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
6694 				acqe_fip->event_tag);
6695 		break;
6696 
6697 	case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
6698 		phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6699 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6700 				"2549 FCF (x%x) disconnected from network, "
6701 				 "tag:x%x\n", acqe_fip->index,
6702 				 acqe_fip->event_tag);
6703 		/*
6704 		 * If we are in the middle of FCF failover process, clear
6705 		 * the corresponding FCF bit in the roundrobin bitmap.
6706 		 */
6707 		spin_lock_irq(&phba->hbalock);
6708 		if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6709 		    (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6710 			spin_unlock_irq(&phba->hbalock);
6711 			/* Update FLOGI FCF failover eligible FCF bmask */
6712 			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6713 			break;
6714 		}
6715 		spin_unlock_irq(&phba->hbalock);
6716 
6717 		/* If the event is not for currently used fcf do nothing */
6718 		if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6719 			break;
6720 
6721 		/*
6722 		 * Otherwise, request the port to rediscover the entire FCF
6723 		 * table for a fast recovery from case that the current FCF
6724 		 * is no longer valid as we are not in the middle of FCF
6725 		 * failover process already.
6726 		 */
6727 		spin_lock_irq(&phba->hbalock);
6728 		/* Mark the fast failover process in progress */
6729 		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6730 		spin_unlock_irq(&phba->hbalock);
6731 
6732 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6733 				"2771 Start FCF fast failover process due to "
6734 				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6735 				"\n", acqe_fip->event_tag, acqe_fip->index);
6736 		rc = lpfc_sli4_redisc_fcf_table(phba);
6737 		if (rc) {
6738 			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6739 					LOG_TRACE_EVENT,
6740 					"2772 Issue FCF rediscover mailbox "
6741 					"command failed, fail through to FCF "
6742 					"dead event\n");
6743 			spin_lock_irq(&phba->hbalock);
6744 			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6745 			spin_unlock_irq(&phba->hbalock);
6746 			/*
6747 			 * Last resort will fail over by treating this
6748 			 * as a link down to FCF registration.
6749 			 */
6750 			lpfc_sli4_fcf_dead_failthrough(phba);
6751 		} else {
6752 			/* Reset FCF roundrobin bmask for new discovery */
6753 			lpfc_sli4_clear_fcf_rr_bmask(phba);
6754 			/*
6755 			 * Handling fast FCF failover to a DEAD FCF event is
6756 			 * considered equalivant to receiving CVL to all vports.
6757 			 */
6758 			lpfc_sli4_perform_all_vport_cvl(phba);
6759 		}
6760 		break;
6761 	case LPFC_FIP_EVENT_TYPE_CVL:
6762 		phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6763 		lpfc_printf_log(phba, KERN_ERR,
6764 				LOG_TRACE_EVENT,
6765 			"2718 Clear Virtual Link Received for VPI 0x%x"
6766 			" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6767 
6768 		vport = lpfc_find_vport_by_vpid(phba,
6769 						acqe_fip->index);
6770 		ndlp = lpfc_sli4_perform_vport_cvl(vport);
6771 		if (!ndlp)
6772 			break;
6773 		active_vlink_present = 0;
6774 
6775 		vports = lpfc_create_vport_work_array(phba);
6776 		if (vports) {
6777 			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6778 					i++) {
6779 				if ((!(vports[i]->fc_flag &
6780 					FC_VPORT_CVL_RCVD)) &&
6781 					(vports[i]->port_state > LPFC_FDISC)) {
6782 					active_vlink_present = 1;
6783 					break;
6784 				}
6785 			}
6786 			lpfc_destroy_vport_work_array(phba, vports);
6787 		}
6788 
6789 		/*
6790 		 * Don't re-instantiate if vport is marked for deletion.
6791 		 * If we are here first then vport_delete is going to wait
6792 		 * for discovery to complete.
6793 		 */
6794 		if (!(vport->load_flag & FC_UNLOADING) &&
6795 					active_vlink_present) {
6796 			/*
6797 			 * If there are other active VLinks present,
6798 			 * re-instantiate the Vlink using FDISC.
6799 			 */
6800 			mod_timer(&ndlp->nlp_delayfunc,
6801 				  jiffies + msecs_to_jiffies(1000));
6802 			spin_lock_irq(&ndlp->lock);
6803 			ndlp->nlp_flag |= NLP_DELAY_TMO;
6804 			spin_unlock_irq(&ndlp->lock);
6805 			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6806 			vport->port_state = LPFC_FDISC;
6807 		} else {
6808 			/*
6809 			 * Otherwise, we request port to rediscover
6810 			 * the entire FCF table for a fast recovery
6811 			 * from possible case that the current FCF
6812 			 * is no longer valid if we are not already
6813 			 * in the FCF failover process.
6814 			 */
6815 			spin_lock_irq(&phba->hbalock);
6816 			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6817 				spin_unlock_irq(&phba->hbalock);
6818 				break;
6819 			}
6820 			/* Mark the fast failover process in progress */
6821 			phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6822 			spin_unlock_irq(&phba->hbalock);
6823 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6824 					LOG_DISCOVERY,
6825 					"2773 Start FCF failover per CVL, "
6826 					"evt_tag:x%x\n", acqe_fip->event_tag);
6827 			rc = lpfc_sli4_redisc_fcf_table(phba);
6828 			if (rc) {
6829 				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6830 						LOG_TRACE_EVENT,
6831 						"2774 Issue FCF rediscover "
6832 						"mailbox command failed, "
6833 						"through to CVL event\n");
6834 				spin_lock_irq(&phba->hbalock);
6835 				phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6836 				spin_unlock_irq(&phba->hbalock);
6837 				/*
6838 				 * Last resort will be re-try on the
6839 				 * the current registered FCF entry.
6840 				 */
6841 				lpfc_retry_pport_discovery(phba);
6842 			} else
6843 				/*
6844 				 * Reset FCF roundrobin bmask for new
6845 				 * discovery.
6846 				 */
6847 				lpfc_sli4_clear_fcf_rr_bmask(phba);
6848 		}
6849 		break;
6850 	default:
6851 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6852 				"0288 Unknown FCoE event type 0x%x event tag "
6853 				"0x%x\n", event_type, acqe_fip->event_tag);
6854 		break;
6855 	}
6856 }
6857 
6858 /**
6859  * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
6860  * @phba: pointer to lpfc hba data structure.
6861  * @acqe_dcbx: pointer to the async dcbx completion queue entry.
6862  *
6863  * This routine is to handle the SLI4 asynchronous dcbx event.
6864  **/
6865 static void
6866 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
6867 			 struct lpfc_acqe_dcbx *acqe_dcbx)
6868 {
6869 	phba->fc_eventTag = acqe_dcbx->event_tag;
6870 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6871 			"0290 The SLI4 DCBX asynchronous event is not "
6872 			"handled yet\n");
6873 }
6874 
6875 /**
6876  * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
6877  * @phba: pointer to lpfc hba data structure.
6878  * @acqe_grp5: pointer to the async grp5 completion queue entry.
6879  *
6880  * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
6881  * is an asynchronous notified of a logical link speed change.  The Port
6882  * reports the logical link speed in units of 10Mbps.
6883  **/
6884 static void
6885 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
6886 			 struct lpfc_acqe_grp5 *acqe_grp5)
6887 {
6888 	uint16_t prev_ll_spd;
6889 
6890 	phba->fc_eventTag = acqe_grp5->event_tag;
6891 	phba->fcoe_eventtag = acqe_grp5->event_tag;
6892 	prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
6893 	phba->sli4_hba.link_state.logical_speed =
6894 		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
6895 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6896 			"2789 GRP5 Async Event: Updating logical link speed "
6897 			"from %dMbps to %dMbps\n", prev_ll_spd,
6898 			phba->sli4_hba.link_state.logical_speed);
6899 }
6900 
6901 /**
6902  * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
6903  * @phba: pointer to lpfc hba data structure.
6904  *
6905  * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
6906  * is an asynchronous notification of a request to reset CM stats.
6907  **/
6908 static void
6909 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
6910 {
6911 	if (!phba->cgn_i)
6912 		return;
6913 	lpfc_init_congestion_stat(phba);
6914 }
6915 
6916 /**
6917  * lpfc_cgn_params_val - Validate FW congestion parameters.
6918  * @phba: pointer to lpfc hba data structure.
6919  * @p_cfg_param: pointer to FW provided congestion parameters.
6920  *
6921  * This routine validates the congestion parameters passed
6922  * by the FW to the driver via an ACQE event.
6923  **/
6924 static void
6925 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
6926 {
6927 	spin_lock_irq(&phba->hbalock);
6928 
6929 	if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
6930 			     LPFC_CFG_MONITOR)) {
6931 		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
6932 				"6225 CMF mode param out of range: %d\n",
6933 				 p_cfg_param->cgn_param_mode);
6934 		p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
6935 	}
6936 
6937 	spin_unlock_irq(&phba->hbalock);
6938 }
6939 
6940 /**
6941  * lpfc_cgn_params_parse - Process a FW cong parm change event
6942  * @phba: pointer to lpfc hba data structure.
6943  * @p_cgn_param: pointer to a data buffer with the FW cong params.
6944  * @len: the size of pdata in bytes.
6945  *
6946  * This routine validates the congestion management buffer signature
6947  * from the FW, validates the contents and makes corrections for
6948  * valid, in-range values.  If the signature magic is correct and
6949  * after parameter validation, the contents are copied to the driver's
6950  * @phba structure. If the magic is incorrect, an error message is
6951  * logged.
6952  **/
6953 static void
6954 lpfc_cgn_params_parse(struct lpfc_hba *phba,
6955 		      struct lpfc_cgn_param *p_cgn_param, uint32_t len)
6956 {
6957 	struct lpfc_cgn_info *cp;
6958 	uint32_t crc, oldmode;
6959 
6960 	/* Make sure the FW has encoded the correct magic number to
6961 	 * validate the congestion parameter in FW memory.
6962 	 */
6963 	if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
6964 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
6965 				"4668 FW cgn parm buffer data: "
6966 				"magic 0x%x version %d mode %d "
6967 				"level0 %d level1 %d "
6968 				"level2 %d byte13 %d "
6969 				"byte14 %d byte15 %d "
6970 				"byte11 %d byte12 %d activeMode %d\n",
6971 				p_cgn_param->cgn_param_magic,
6972 				p_cgn_param->cgn_param_version,
6973 				p_cgn_param->cgn_param_mode,
6974 				p_cgn_param->cgn_param_level0,
6975 				p_cgn_param->cgn_param_level1,
6976 				p_cgn_param->cgn_param_level2,
6977 				p_cgn_param->byte13,
6978 				p_cgn_param->byte14,
6979 				p_cgn_param->byte15,
6980 				p_cgn_param->byte11,
6981 				p_cgn_param->byte12,
6982 				phba->cmf_active_mode);
6983 
6984 		oldmode = phba->cmf_active_mode;
6985 
6986 		/* Any parameters out of range are corrected to defaults
6987 		 * by this routine.  No need to fail.
6988 		 */
6989 		lpfc_cgn_params_val(phba, p_cgn_param);
6990 
6991 		/* Parameters are verified, move them into driver storage */
6992 		spin_lock_irq(&phba->hbalock);
6993 		memcpy(&phba->cgn_p, p_cgn_param,
6994 		       sizeof(struct lpfc_cgn_param));
6995 
6996 		/* Update parameters in congestion info buffer now */
6997 		if (phba->cgn_i) {
6998 			cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
6999 			cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
7000 			cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
7001 			cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
7002 			cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
7003 			crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
7004 						  LPFC_CGN_CRC32_SEED);
7005 			cp->cgn_info_crc = cpu_to_le32(crc);
7006 		}
7007 		spin_unlock_irq(&phba->hbalock);
7008 
7009 		phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
7010 
7011 		switch (oldmode) {
7012 		case LPFC_CFG_OFF:
7013 			if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
7014 				/* Turning CMF on */
7015 				lpfc_cmf_start(phba);
7016 
7017 				if (phba->link_state >= LPFC_LINK_UP) {
7018 					phba->cgn_reg_fpin =
7019 						phba->cgn_init_reg_fpin;
7020 					phba->cgn_reg_signal =
7021 						phba->cgn_init_reg_signal;
7022 					lpfc_issue_els_edc(phba->pport, 0);
7023 				}
7024 			}
7025 			break;
7026 		case LPFC_CFG_MANAGED:
7027 			switch (phba->cgn_p.cgn_param_mode) {
7028 			case LPFC_CFG_OFF:
7029 				/* Turning CMF off */
7030 				lpfc_cmf_stop(phba);
7031 				if (phba->link_state >= LPFC_LINK_UP)
7032 					lpfc_issue_els_edc(phba->pport, 0);
7033 				break;
7034 			case LPFC_CFG_MONITOR:
7035 				lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7036 						"4661 Switch from MANAGED to "
7037 						"`MONITOR mode\n");
7038 				phba->cmf_max_bytes_per_interval =
7039 					phba->cmf_link_byte_count;
7040 
7041 				/* Resume blocked IO - unblock on workqueue */
7042 				queue_work(phba->wq,
7043 					   &phba->unblock_request_work);
7044 				break;
7045 			}
7046 			break;
7047 		case LPFC_CFG_MONITOR:
7048 			switch (phba->cgn_p.cgn_param_mode) {
7049 			case LPFC_CFG_OFF:
7050 				/* Turning CMF off */
7051 				lpfc_cmf_stop(phba);
7052 				if (phba->link_state >= LPFC_LINK_UP)
7053 					lpfc_issue_els_edc(phba->pport, 0);
7054 				break;
7055 			case LPFC_CFG_MANAGED:
7056 				lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7057 						"4662 Switch from MONITOR to "
7058 						"MANAGED mode\n");
7059 				lpfc_cmf_signal_init(phba);
7060 				break;
7061 			}
7062 			break;
7063 		}
7064 	} else {
7065 		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7066 				"4669 FW cgn parm buf wrong magic 0x%x "
7067 				"version %d\n", p_cgn_param->cgn_param_magic,
7068 				p_cgn_param->cgn_param_version);
7069 	}
7070 }
7071 
7072 /**
7073  * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7074  * @phba: pointer to lpfc hba data structure.
7075  *
7076  * This routine issues a read_object mailbox command to
7077  * get the congestion management parameters from the FW
7078  * parses it and updates the driver maintained values.
7079  *
7080  * Returns
7081  *  0     if the object was empty
7082  *  -Eval if an error was encountered
7083  *  Count if bytes were read from object
7084  **/
7085 int
7086 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7087 {
7088 	int ret = 0;
7089 	struct lpfc_cgn_param *p_cgn_param = NULL;
7090 	u32 *pdata = NULL;
7091 	u32 len = 0;
7092 
7093 	/* Find out if the FW has a new set of congestion parameters. */
7094 	len = sizeof(struct lpfc_cgn_param);
7095 	pdata = kzalloc(len, GFP_KERNEL);
7096 	ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7097 			       pdata, len);
7098 
7099 	/* 0 means no data.  A negative means error.  A positive means
7100 	 * bytes were copied.
7101 	 */
7102 	if (!ret) {
7103 		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7104 				"4670 CGN RD OBJ returns no data\n");
7105 		goto rd_obj_err;
7106 	} else if (ret < 0) {
7107 		/* Some error.  Just exit and return it to the caller.*/
7108 		goto rd_obj_err;
7109 	}
7110 
7111 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7112 			"6234 READ CGN PARAMS Successful %d\n", len);
7113 
7114 	/* Parse data pointer over len and update the phba congestion
7115 	 * parameters with values passed back.  The receive rate values
7116 	 * may have been altered in FW, but take no action here.
7117 	 */
7118 	p_cgn_param = (struct lpfc_cgn_param *)pdata;
7119 	lpfc_cgn_params_parse(phba, p_cgn_param, len);
7120 
7121  rd_obj_err:
7122 	kfree(pdata);
7123 	return ret;
7124 }
7125 
7126 /**
7127  * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7128  * @phba: pointer to lpfc hba data structure.
7129  *
7130  * The FW generated Async ACQE SLI event calls this routine when
7131  * the event type is an SLI Internal Port Event and the Event Code
7132  * indicates a change to the FW maintained congestion parameters.
7133  *
7134  * This routine executes a Read_Object mailbox call to obtain the
7135  * current congestion parameters maintained in FW and corrects
7136  * the driver's active congestion parameters.
7137  *
7138  * The acqe event is not passed because there is no further data
7139  * required.
7140  *
7141  * Returns nonzero error if event processing encountered an error.
7142  * Zero otherwise for success.
7143  **/
7144 static int
7145 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7146 {
7147 	int ret = 0;
7148 
7149 	if (!phba->sli4_hba.pc_sli4_params.cmf) {
7150 		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7151 				"4664 Cgn Evt when E2E off. Drop event\n");
7152 		return -EACCES;
7153 	}
7154 
7155 	/* If the event is claiming an empty object, it's ok.  A write
7156 	 * could have cleared it.  Only error is a negative return
7157 	 * status.
7158 	 */
7159 	ret = lpfc_sli4_cgn_params_read(phba);
7160 	if (ret < 0) {
7161 		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7162 				"4667 Error reading Cgn Params (%d)\n",
7163 				ret);
7164 	} else if (!ret) {
7165 		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7166 				"4673 CGN Event empty object.\n");
7167 	}
7168 	return ret;
7169 }
7170 
7171 /**
7172  * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7173  * @phba: pointer to lpfc hba data structure.
7174  *
7175  * This routine is invoked by the worker thread to process all the pending
7176  * SLI4 asynchronous events.
7177  **/
7178 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7179 {
7180 	struct lpfc_cq_event *cq_event;
7181 	unsigned long iflags;
7182 
7183 	/* First, declare the async event has been handled */
7184 	spin_lock_irqsave(&phba->hbalock, iflags);
7185 	phba->hba_flag &= ~ASYNC_EVENT;
7186 	spin_unlock_irqrestore(&phba->hbalock, iflags);
7187 
7188 	/* Now, handle all the async events */
7189 	spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7190 	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7191 		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7192 				 cq_event, struct lpfc_cq_event, list);
7193 		spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7194 				       iflags);
7195 
7196 		/* Process the asynchronous event */
7197 		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7198 		case LPFC_TRAILER_CODE_LINK:
7199 			lpfc_sli4_async_link_evt(phba,
7200 						 &cq_event->cqe.acqe_link);
7201 			break;
7202 		case LPFC_TRAILER_CODE_FCOE:
7203 			lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7204 			break;
7205 		case LPFC_TRAILER_CODE_DCBX:
7206 			lpfc_sli4_async_dcbx_evt(phba,
7207 						 &cq_event->cqe.acqe_dcbx);
7208 			break;
7209 		case LPFC_TRAILER_CODE_GRP5:
7210 			lpfc_sli4_async_grp5_evt(phba,
7211 						 &cq_event->cqe.acqe_grp5);
7212 			break;
7213 		case LPFC_TRAILER_CODE_FC:
7214 			lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7215 			break;
7216 		case LPFC_TRAILER_CODE_SLI:
7217 			lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7218 			break;
7219 		case LPFC_TRAILER_CODE_CMSTAT:
7220 			lpfc_sli4_async_cmstat_evt(phba);
7221 			break;
7222 		default:
7223 			lpfc_printf_log(phba, KERN_ERR,
7224 					LOG_TRACE_EVENT,
7225 					"1804 Invalid asynchronous event code: "
7226 					"x%x\n", bf_get(lpfc_trailer_code,
7227 					&cq_event->cqe.mcqe_cmpl));
7228 			break;
7229 		}
7230 
7231 		/* Free the completion event processed to the free pool */
7232 		lpfc_sli4_cq_event_release(phba, cq_event);
7233 		spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7234 	}
7235 	spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7236 }
7237 
7238 /**
7239  * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7240  * @phba: pointer to lpfc hba data structure.
7241  *
7242  * This routine is invoked by the worker thread to process FCF table
7243  * rediscovery pending completion event.
7244  **/
7245 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7246 {
7247 	int rc;
7248 
7249 	spin_lock_irq(&phba->hbalock);
7250 	/* Clear FCF rediscovery timeout event */
7251 	phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7252 	/* Clear driver fast failover FCF record flag */
7253 	phba->fcf.failover_rec.flag = 0;
7254 	/* Set state for FCF fast failover */
7255 	phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7256 	spin_unlock_irq(&phba->hbalock);
7257 
7258 	/* Scan FCF table from the first entry to re-discover SAN */
7259 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7260 			"2777 Start post-quiescent FCF table scan\n");
7261 	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7262 	if (rc)
7263 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7264 				"2747 Issue FCF scan read FCF mailbox "
7265 				"command failed 0x%x\n", rc);
7266 }
7267 
7268 /**
7269  * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7270  * @phba: pointer to lpfc hba data structure.
7271  * @dev_grp: The HBA PCI-Device group number.
7272  *
7273  * This routine is invoked to set up the per HBA PCI-Device group function
7274  * API jump table entries.
7275  *
7276  * Return: 0 if success, otherwise -ENODEV
7277  **/
7278 int
7279 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7280 {
7281 	int rc;
7282 
7283 	/* Set up lpfc PCI-device group */
7284 	phba->pci_dev_grp = dev_grp;
7285 
7286 	/* The LPFC_PCI_DEV_OC uses SLI4 */
7287 	if (dev_grp == LPFC_PCI_DEV_OC)
7288 		phba->sli_rev = LPFC_SLI_REV4;
7289 
7290 	/* Set up device INIT API function jump table */
7291 	rc = lpfc_init_api_table_setup(phba, dev_grp);
7292 	if (rc)
7293 		return -ENODEV;
7294 	/* Set up SCSI API function jump table */
7295 	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7296 	if (rc)
7297 		return -ENODEV;
7298 	/* Set up SLI API function jump table */
7299 	rc = lpfc_sli_api_table_setup(phba, dev_grp);
7300 	if (rc)
7301 		return -ENODEV;
7302 	/* Set up MBOX API function jump table */
7303 	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7304 	if (rc)
7305 		return -ENODEV;
7306 
7307 	return 0;
7308 }
7309 
7310 /**
7311  * lpfc_log_intr_mode - Log the active interrupt mode
7312  * @phba: pointer to lpfc hba data structure.
7313  * @intr_mode: active interrupt mode adopted.
7314  *
7315  * This routine it invoked to log the currently used active interrupt mode
7316  * to the device.
7317  **/
7318 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7319 {
7320 	switch (intr_mode) {
7321 	case 0:
7322 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7323 				"0470 Enable INTx interrupt mode.\n");
7324 		break;
7325 	case 1:
7326 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7327 				"0481 Enabled MSI interrupt mode.\n");
7328 		break;
7329 	case 2:
7330 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7331 				"0480 Enabled MSI-X interrupt mode.\n");
7332 		break;
7333 	default:
7334 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7335 				"0482 Illegal interrupt mode.\n");
7336 		break;
7337 	}
7338 	return;
7339 }
7340 
7341 /**
7342  * lpfc_enable_pci_dev - Enable a generic PCI device.
7343  * @phba: pointer to lpfc hba data structure.
7344  *
7345  * This routine is invoked to enable the PCI device that is common to all
7346  * PCI devices.
7347  *
7348  * Return codes
7349  * 	0 - successful
7350  * 	other values - error
7351  **/
7352 static int
7353 lpfc_enable_pci_dev(struct lpfc_hba *phba)
7354 {
7355 	struct pci_dev *pdev;
7356 
7357 	/* Obtain PCI device reference */
7358 	if (!phba->pcidev)
7359 		goto out_error;
7360 	else
7361 		pdev = phba->pcidev;
7362 	/* Enable PCI device */
7363 	if (pci_enable_device_mem(pdev))
7364 		goto out_error;
7365 	/* Request PCI resource for the device */
7366 	if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
7367 		goto out_disable_device;
7368 	/* Set up device as PCI master and save state for EEH */
7369 	pci_set_master(pdev);
7370 	pci_try_set_mwi(pdev);
7371 	pci_save_state(pdev);
7372 
7373 	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7374 	if (pci_is_pcie(pdev))
7375 		pdev->needs_freset = 1;
7376 
7377 	return 0;
7378 
7379 out_disable_device:
7380 	pci_disable_device(pdev);
7381 out_error:
7382 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7383 			"1401 Failed to enable pci device\n");
7384 	return -ENODEV;
7385 }
7386 
7387 /**
7388  * lpfc_disable_pci_dev - Disable a generic PCI device.
7389  * @phba: pointer to lpfc hba data structure.
7390  *
7391  * This routine is invoked to disable the PCI device that is common to all
7392  * PCI devices.
7393  **/
7394 static void
7395 lpfc_disable_pci_dev(struct lpfc_hba *phba)
7396 {
7397 	struct pci_dev *pdev;
7398 
7399 	/* Obtain PCI device reference */
7400 	if (!phba->pcidev)
7401 		return;
7402 	else
7403 		pdev = phba->pcidev;
7404 	/* Release PCI resource and disable PCI device */
7405 	pci_release_mem_regions(pdev);
7406 	pci_disable_device(pdev);
7407 
7408 	return;
7409 }
7410 
7411 /**
7412  * lpfc_reset_hba - Reset a hba
7413  * @phba: pointer to lpfc hba data structure.
7414  *
7415  * This routine is invoked to reset a hba device. It brings the HBA
7416  * offline, performs a board restart, and then brings the board back
7417  * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
7418  * on outstanding mailbox commands.
7419  **/
7420 void
7421 lpfc_reset_hba(struct lpfc_hba *phba)
7422 {
7423 	/* If resets are disabled then set error state and return. */
7424 	if (!phba->cfg_enable_hba_reset) {
7425 		phba->link_state = LPFC_HBA_ERROR;
7426 		return;
7427 	}
7428 
7429 	/* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
7430 	if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7431 		lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7432 	} else {
7433 		lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7434 		lpfc_sli_flush_io_rings(phba);
7435 	}
7436 	lpfc_offline(phba);
7437 	lpfc_sli_brdrestart(phba);
7438 	lpfc_online(phba);
7439 	lpfc_unblock_mgmt_io(phba);
7440 }
7441 
7442 /**
7443  * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7444  * @phba: pointer to lpfc hba data structure.
7445  *
7446  * This function enables the PCI SR-IOV virtual functions to a physical
7447  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7448  * enable the number of virtual functions to the physical function. As
7449  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7450  * API call does not considered as an error condition for most of the device.
7451  **/
7452 uint16_t
7453 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7454 {
7455 	struct pci_dev *pdev = phba->pcidev;
7456 	uint16_t nr_virtfn;
7457 	int pos;
7458 
7459 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
7460 	if (pos == 0)
7461 		return 0;
7462 
7463 	pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
7464 	return nr_virtfn;
7465 }
7466 
7467 /**
7468  * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7469  * @phba: pointer to lpfc hba data structure.
7470  * @nr_vfn: number of virtual functions to be enabled.
7471  *
7472  * This function enables the PCI SR-IOV virtual functions to a physical
7473  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7474  * enable the number of virtual functions to the physical function. As
7475  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7476  * API call does not considered as an error condition for most of the device.
7477  **/
7478 int
7479 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7480 {
7481 	struct pci_dev *pdev = phba->pcidev;
7482 	uint16_t max_nr_vfn;
7483 	int rc;
7484 
7485 	max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7486 	if (nr_vfn > max_nr_vfn) {
7487 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7488 				"3057 Requested vfs (%d) greater than "
7489 				"supported vfs (%d)", nr_vfn, max_nr_vfn);
7490 		return -EINVAL;
7491 	}
7492 
7493 	rc = pci_enable_sriov(pdev, nr_vfn);
7494 	if (rc) {
7495 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7496 				"2806 Failed to enable sriov on this device "
7497 				"with vfn number nr_vf:%d, rc:%d\n",
7498 				nr_vfn, rc);
7499 	} else
7500 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7501 				"2807 Successful enable sriov on this device "
7502 				"with vfn number nr_vf:%d\n", nr_vfn);
7503 	return rc;
7504 }
7505 
7506 static void
7507 lpfc_unblock_requests_work(struct work_struct *work)
7508 {
7509 	struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7510 					     unblock_request_work);
7511 
7512 	lpfc_unblock_requests(phba);
7513 }
7514 
7515 /**
7516  * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7517  * @phba: pointer to lpfc hba data structure.
7518  *
7519  * This routine is invoked to set up the driver internal resources before the
7520  * device specific resource setup to support the HBA device it attached to.
7521  *
7522  * Return codes
7523  *	0 - successful
7524  *	other values - error
7525  **/
7526 static int
7527 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7528 {
7529 	struct lpfc_sli *psli = &phba->sli;
7530 
7531 	/*
7532 	 * Driver resources common to all SLI revisions
7533 	 */
7534 	atomic_set(&phba->fast_event_count, 0);
7535 	atomic_set(&phba->dbg_log_idx, 0);
7536 	atomic_set(&phba->dbg_log_cnt, 0);
7537 	atomic_set(&phba->dbg_log_dmping, 0);
7538 	spin_lock_init(&phba->hbalock);
7539 
7540 	/* Initialize port_list spinlock */
7541 	spin_lock_init(&phba->port_list_lock);
7542 	INIT_LIST_HEAD(&phba->port_list);
7543 
7544 	INIT_LIST_HEAD(&phba->work_list);
7545 	init_waitqueue_head(&phba->wait_4_mlo_m_q);
7546 
7547 	/* Initialize the wait queue head for the kernel thread */
7548 	init_waitqueue_head(&phba->work_waitq);
7549 
7550 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7551 			"1403 Protocols supported %s %s %s\n",
7552 			((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7553 				"SCSI" : " "),
7554 			((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7555 				"NVME" : " "),
7556 			(phba->nvmet_support ? "NVMET" : " "));
7557 
7558 	/* Initialize the IO buffer list used by driver for SLI3 SCSI */
7559 	spin_lock_init(&phba->scsi_buf_list_get_lock);
7560 	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7561 	spin_lock_init(&phba->scsi_buf_list_put_lock);
7562 	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7563 
7564 	/* Initialize the fabric iocb list */
7565 	INIT_LIST_HEAD(&phba->fabric_iocb_list);
7566 
7567 	/* Initialize list to save ELS buffers */
7568 	INIT_LIST_HEAD(&phba->elsbuf);
7569 
7570 	/* Initialize FCF connection rec list */
7571 	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7572 
7573 	/* Initialize OAS configuration list */
7574 	spin_lock_init(&phba->devicelock);
7575 	INIT_LIST_HEAD(&phba->luns);
7576 
7577 	/* MBOX heartbeat timer */
7578 	timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7579 	/* Fabric block timer */
7580 	timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7581 	/* EA polling mode timer */
7582 	timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7583 	/* Heartbeat timer */
7584 	timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7585 
7586 	INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7587 
7588 	INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7589 			  lpfc_idle_stat_delay_work);
7590 	INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7591 	return 0;
7592 }
7593 
7594 /**
7595  * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7596  * @phba: pointer to lpfc hba data structure.
7597  *
7598  * This routine is invoked to set up the driver internal resources specific to
7599  * support the SLI-3 HBA device it attached to.
7600  *
7601  * Return codes
7602  * 0 - successful
7603  * other values - error
7604  **/
7605 static int
7606 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7607 {
7608 	int rc, entry_sz;
7609 
7610 	/*
7611 	 * Initialize timers used by driver
7612 	 */
7613 
7614 	/* FCP polling mode timer */
7615 	timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7616 
7617 	/* Host attention work mask setup */
7618 	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7619 	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7620 
7621 	/* Get all the module params for configuring this host */
7622 	lpfc_get_cfgparam(phba);
7623 	/* Set up phase-1 common device driver resources */
7624 
7625 	rc = lpfc_setup_driver_resource_phase1(phba);
7626 	if (rc)
7627 		return -ENODEV;
7628 
7629 	if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
7630 		phba->menlo_flag |= HBA_MENLO_SUPPORT;
7631 		/* check for menlo minimum sg count */
7632 		if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
7633 			phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
7634 	}
7635 
7636 	if (!phba->sli.sli3_ring)
7637 		phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7638 					      sizeof(struct lpfc_sli_ring),
7639 					      GFP_KERNEL);
7640 	if (!phba->sli.sli3_ring)
7641 		return -ENOMEM;
7642 
7643 	/*
7644 	 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
7645 	 * used to create the sg_dma_buf_pool must be dynamically calculated.
7646 	 */
7647 
7648 	if (phba->sli_rev == LPFC_SLI_REV4)
7649 		entry_sz = sizeof(struct sli4_sge);
7650 	else
7651 		entry_sz = sizeof(struct ulp_bde64);
7652 
7653 	/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
7654 	if (phba->cfg_enable_bg) {
7655 		/*
7656 		 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
7657 		 * the FCP rsp, and a BDE for each. Sice we have no control
7658 		 * over how many protection data segments the SCSI Layer
7659 		 * will hand us (ie: there could be one for every block
7660 		 * in the IO), we just allocate enough BDEs to accomidate
7661 		 * our max amount and we need to limit lpfc_sg_seg_cnt to
7662 		 * minimize the risk of running out.
7663 		 */
7664 		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7665 			sizeof(struct fcp_rsp) +
7666 			(LPFC_MAX_SG_SEG_CNT * entry_sz);
7667 
7668 		if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7669 			phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7670 
7671 		/* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
7672 		phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7673 	} else {
7674 		/*
7675 		 * The scsi_buf for a regular I/O will hold the FCP cmnd,
7676 		 * the FCP rsp, a BDE for each, and a BDE for up to
7677 		 * cfg_sg_seg_cnt data segments.
7678 		 */
7679 		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7680 			sizeof(struct fcp_rsp) +
7681 			((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7682 
7683 		/* Total BDEs in BPL for scsi_sg_list */
7684 		phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7685 	}
7686 
7687 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7688 			"9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7689 			phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7690 			phba->cfg_total_seg_cnt);
7691 
7692 	phba->max_vpi = LPFC_MAX_VPI;
7693 	/* This will be set to correct value after config_port mbox */
7694 	phba->max_vports = 0;
7695 
7696 	/*
7697 	 * Initialize the SLI Layer to run with lpfc HBAs.
7698 	 */
7699 	lpfc_sli_setup(phba);
7700 	lpfc_sli_queue_init(phba);
7701 
7702 	/* Allocate device driver memory */
7703 	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7704 		return -ENOMEM;
7705 
7706 	phba->lpfc_sg_dma_buf_pool =
7707 		dma_pool_create("lpfc_sg_dma_buf_pool",
7708 				&phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7709 				BPL_ALIGN_SZ, 0);
7710 
7711 	if (!phba->lpfc_sg_dma_buf_pool)
7712 		goto fail_free_mem;
7713 
7714 	phba->lpfc_cmd_rsp_buf_pool =
7715 			dma_pool_create("lpfc_cmd_rsp_buf_pool",
7716 					&phba->pcidev->dev,
7717 					sizeof(struct fcp_cmnd) +
7718 					sizeof(struct fcp_rsp),
7719 					BPL_ALIGN_SZ, 0);
7720 
7721 	if (!phba->lpfc_cmd_rsp_buf_pool)
7722 		goto fail_free_dma_buf_pool;
7723 
7724 	/*
7725 	 * Enable sr-iov virtual functions if supported and configured
7726 	 * through the module parameter.
7727 	 */
7728 	if (phba->cfg_sriov_nr_virtfn > 0) {
7729 		rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7730 						 phba->cfg_sriov_nr_virtfn);
7731 		if (rc) {
7732 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7733 					"2808 Requested number of SR-IOV "
7734 					"virtual functions (%d) is not "
7735 					"supported\n",
7736 					phba->cfg_sriov_nr_virtfn);
7737 			phba->cfg_sriov_nr_virtfn = 0;
7738 		}
7739 	}
7740 
7741 	return 0;
7742 
7743 fail_free_dma_buf_pool:
7744 	dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7745 	phba->lpfc_sg_dma_buf_pool = NULL;
7746 fail_free_mem:
7747 	lpfc_mem_free(phba);
7748 	return -ENOMEM;
7749 }
7750 
7751 /**
7752  * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7753  * @phba: pointer to lpfc hba data structure.
7754  *
7755  * This routine is invoked to unset the driver internal resources set up
7756  * specific for supporting the SLI-3 HBA device it attached to.
7757  **/
7758 static void
7759 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7760 {
7761 	/* Free device driver memory allocated */
7762 	lpfc_mem_free_all(phba);
7763 
7764 	return;
7765 }
7766 
7767 /**
7768  * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7769  * @phba: pointer to lpfc hba data structure.
7770  *
7771  * This routine is invoked to set up the driver internal resources specific to
7772  * support the SLI-4 HBA device it attached to.
7773  *
7774  * Return codes
7775  * 	0 - successful
7776  * 	other values - error
7777  **/
7778 static int
7779 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7780 {
7781 	LPFC_MBOXQ_t *mboxq;
7782 	MAILBOX_t *mb;
7783 	int rc, i, max_buf_size;
7784 	int longs;
7785 	int extra;
7786 	uint64_t wwn;
7787 	u32 if_type;
7788 	u32 if_fam;
7789 
7790 	phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7791 	phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7792 	phba->sli4_hba.curr_disp_cpu = 0;
7793 
7794 	/* Get all the module params for configuring this host */
7795 	lpfc_get_cfgparam(phba);
7796 
7797 	/* Set up phase-1 common device driver resources */
7798 	rc = lpfc_setup_driver_resource_phase1(phba);
7799 	if (rc)
7800 		return -ENODEV;
7801 
7802 	/* Before proceed, wait for POST done and device ready */
7803 	rc = lpfc_sli4_post_status_check(phba);
7804 	if (rc)
7805 		return -ENODEV;
7806 
7807 	/* Allocate all driver workqueues here */
7808 
7809 	/* The lpfc_wq workqueue for deferred irq use */
7810 	phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7811 
7812 	/*
7813 	 * Initialize timers used by driver
7814 	 */
7815 
7816 	timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7817 
7818 	/* FCF rediscover timer */
7819 	timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7820 
7821 	/* CMF congestion timer */
7822 	hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7823 	phba->cmf_timer.function = lpfc_cmf_timer;
7824 
7825 	/*
7826 	 * Control structure for handling external multi-buffer mailbox
7827 	 * command pass-through.
7828 	 */
7829 	memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7830 		sizeof(struct lpfc_mbox_ext_buf_ctx));
7831 	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7832 
7833 	phba->max_vpi = LPFC_MAX_VPI;
7834 
7835 	/* This will be set to correct value after the read_config mbox */
7836 	phba->max_vports = 0;
7837 
7838 	/* Program the default value of vlan_id and fc_map */
7839 	phba->valid_vlan = 0;
7840 	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7841 	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7842 	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7843 
7844 	/*
7845 	 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
7846 	 * we will associate a new ring, for each EQ/CQ/WQ tuple.
7847 	 * The WQ create will allocate the ring.
7848 	 */
7849 
7850 	/* Initialize buffer queue management fields */
7851 	INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
7852 	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
7853 	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
7854 
7855 	/* for VMID idle timeout if VMID is enabled */
7856 	if (lpfc_is_vmid_enabled(phba))
7857 		timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
7858 
7859 	/*
7860 	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
7861 	 */
7862 	/* Initialize the Abort buffer list used by driver */
7863 	spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
7864 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
7865 
7866 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
7867 		/* Initialize the Abort nvme buffer list used by driver */
7868 		spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
7869 		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7870 		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
7871 		spin_lock_init(&phba->sli4_hba.t_active_list_lock);
7872 		INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
7873 	}
7874 
7875 	/* This abort list used by worker thread */
7876 	spin_lock_init(&phba->sli4_hba.sgl_list_lock);
7877 	spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
7878 	spin_lock_init(&phba->sli4_hba.asynce_list_lock);
7879 	spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
7880 
7881 	/*
7882 	 * Initialize driver internal slow-path work queues
7883 	 */
7884 
7885 	/* Driver internel slow-path CQ Event pool */
7886 	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
7887 	/* Response IOCB work queue list */
7888 	INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
7889 	/* Asynchronous event CQ Event work queue list */
7890 	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
7891 	/* Slow-path XRI aborted CQ Event work queue list */
7892 	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
7893 	/* Receive queue CQ Event work queue list */
7894 	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
7895 
7896 	/* Initialize extent block lists. */
7897 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
7898 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
7899 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
7900 	INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
7901 
7902 	/* Initialize mboxq lists. If the early init routines fail
7903 	 * these lists need to be correctly initialized.
7904 	 */
7905 	INIT_LIST_HEAD(&phba->sli.mboxq);
7906 	INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
7907 
7908 	/* initialize optic_state to 0xFF */
7909 	phba->sli4_hba.lnk_info.optic_state = 0xff;
7910 
7911 	/* Allocate device driver memory */
7912 	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
7913 	if (rc)
7914 		goto out_destroy_workqueue;
7915 
7916 	/* IF Type 2 ports get initialized now. */
7917 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
7918 	    LPFC_SLI_INTF_IF_TYPE_2) {
7919 		rc = lpfc_pci_function_reset(phba);
7920 		if (unlikely(rc)) {
7921 			rc = -ENODEV;
7922 			goto out_free_mem;
7923 		}
7924 		phba->temp_sensor_support = 1;
7925 	}
7926 
7927 	/* Create the bootstrap mailbox command */
7928 	rc = lpfc_create_bootstrap_mbox(phba);
7929 	if (unlikely(rc))
7930 		goto out_free_mem;
7931 
7932 	/* Set up the host's endian order with the device. */
7933 	rc = lpfc_setup_endian_order(phba);
7934 	if (unlikely(rc))
7935 		goto out_free_bsmbx;
7936 
7937 	/* Set up the hba's configuration parameters. */
7938 	rc = lpfc_sli4_read_config(phba);
7939 	if (unlikely(rc))
7940 		goto out_free_bsmbx;
7941 	rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
7942 	if (unlikely(rc))
7943 		goto out_free_bsmbx;
7944 
7945 	/* IF Type 0 ports get initialized now. */
7946 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7947 	    LPFC_SLI_INTF_IF_TYPE_0) {
7948 		rc = lpfc_pci_function_reset(phba);
7949 		if (unlikely(rc))
7950 			goto out_free_bsmbx;
7951 	}
7952 
7953 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7954 						       GFP_KERNEL);
7955 	if (!mboxq) {
7956 		rc = -ENOMEM;
7957 		goto out_free_bsmbx;
7958 	}
7959 
7960 	/* Check for NVMET being configured */
7961 	phba->nvmet_support = 0;
7962 	if (lpfc_enable_nvmet_cnt) {
7963 
7964 		/* First get WWN of HBA instance */
7965 		lpfc_read_nv(phba, mboxq);
7966 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7967 		if (rc != MBX_SUCCESS) {
7968 			lpfc_printf_log(phba, KERN_ERR,
7969 					LOG_TRACE_EVENT,
7970 					"6016 Mailbox failed , mbxCmd x%x "
7971 					"READ_NV, mbxStatus x%x\n",
7972 					bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7973 					bf_get(lpfc_mqe_status, &mboxq->u.mqe));
7974 			mempool_free(mboxq, phba->mbox_mem_pool);
7975 			rc = -EIO;
7976 			goto out_free_bsmbx;
7977 		}
7978 		mb = &mboxq->u.mb;
7979 		memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
7980 		       sizeof(uint64_t));
7981 		wwn = cpu_to_be64(wwn);
7982 		phba->sli4_hba.wwnn.u.name = wwn;
7983 		memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
7984 		       sizeof(uint64_t));
7985 		/* wwn is WWPN of HBA instance */
7986 		wwn = cpu_to_be64(wwn);
7987 		phba->sli4_hba.wwpn.u.name = wwn;
7988 
7989 		/* Check to see if it matches any module parameter */
7990 		for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
7991 			if (wwn == lpfc_enable_nvmet[i]) {
7992 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
7993 				if (lpfc_nvmet_mem_alloc(phba))
7994 					break;
7995 
7996 				phba->nvmet_support = 1; /* a match */
7997 
7998 				lpfc_printf_log(phba, KERN_ERR,
7999 						LOG_TRACE_EVENT,
8000 						"6017 NVME Target %016llx\n",
8001 						wwn);
8002 #else
8003 				lpfc_printf_log(phba, KERN_ERR,
8004 						LOG_TRACE_EVENT,
8005 						"6021 Can't enable NVME Target."
8006 						" NVME_TARGET_FC infrastructure"
8007 						" is not in kernel\n");
8008 #endif
8009 				/* Not supported for NVMET */
8010 				phba->cfg_xri_rebalancing = 0;
8011 				if (phba->irq_chann_mode == NHT_MODE) {
8012 					phba->cfg_irq_chann =
8013 						phba->sli4_hba.num_present_cpu;
8014 					phba->cfg_hdw_queue =
8015 						phba->sli4_hba.num_present_cpu;
8016 					phba->irq_chann_mode = NORMAL_MODE;
8017 				}
8018 				break;
8019 			}
8020 		}
8021 	}
8022 
8023 	lpfc_nvme_mod_param_dep(phba);
8024 
8025 	/*
8026 	 * Get sli4 parameters that override parameters from Port capabilities.
8027 	 * If this call fails, it isn't critical unless the SLI4 parameters come
8028 	 * back in conflict.
8029 	 */
8030 	rc = lpfc_get_sli4_parameters(phba, mboxq);
8031 	if (rc) {
8032 		if_type = bf_get(lpfc_sli_intf_if_type,
8033 				 &phba->sli4_hba.sli_intf);
8034 		if_fam = bf_get(lpfc_sli_intf_sli_family,
8035 				&phba->sli4_hba.sli_intf);
8036 		if (phba->sli4_hba.extents_in_use &&
8037 		    phba->sli4_hba.rpi_hdrs_in_use) {
8038 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8039 					"2999 Unsupported SLI4 Parameters "
8040 					"Extents and RPI headers enabled.\n");
8041 			if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8042 			    if_fam ==  LPFC_SLI_INTF_FAMILY_BE2) {
8043 				mempool_free(mboxq, phba->mbox_mem_pool);
8044 				rc = -EIO;
8045 				goto out_free_bsmbx;
8046 			}
8047 		}
8048 		if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8049 		      if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
8050 			mempool_free(mboxq, phba->mbox_mem_pool);
8051 			rc = -EIO;
8052 			goto out_free_bsmbx;
8053 		}
8054 	}
8055 
8056 	/*
8057 	 * 1 for cmd, 1 for rsp, NVME adds an extra one
8058 	 * for boundary conditions in its max_sgl_segment template.
8059 	 */
8060 	extra = 2;
8061 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8062 		extra++;
8063 
8064 	/*
8065 	 * It doesn't matter what family our adapter is in, we are
8066 	 * limited to 2 Pages, 512 SGEs, for our SGL.
8067 	 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
8068 	 */
8069 	max_buf_size = (2 * SLI4_PAGE_SIZE);
8070 
8071 	/*
8072 	 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
8073 	 * used to create the sg_dma_buf_pool must be calculated.
8074 	 */
8075 	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8076 		/* Both cfg_enable_bg and cfg_external_dif code paths */
8077 
8078 		/*
8079 		 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
8080 		 * the FCP rsp, and a SGE. Sice we have no control
8081 		 * over how many protection segments the SCSI Layer
8082 		 * will hand us (ie: there could be one for every block
8083 		 * in the IO), just allocate enough SGEs to accomidate
8084 		 * our max amount and we need to limit lpfc_sg_seg_cnt
8085 		 * to minimize the risk of running out.
8086 		 */
8087 		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8088 				sizeof(struct fcp_rsp) + max_buf_size;
8089 
8090 		/* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
8091 		phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8092 
8093 		/*
8094 		 * If supporting DIF, reduce the seg count for scsi to
8095 		 * allow room for the DIF sges.
8096 		 */
8097 		if (phba->cfg_enable_bg &&
8098 		    phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8099 			phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8100 		else
8101 			phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8102 
8103 	} else {
8104 		/*
8105 		 * The scsi_buf for a regular I/O holds the FCP cmnd,
8106 		 * the FCP rsp, a SGE for each, and a SGE for up to
8107 		 * cfg_sg_seg_cnt data segments.
8108 		 */
8109 		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8110 				sizeof(struct fcp_rsp) +
8111 				((phba->cfg_sg_seg_cnt + extra) *
8112 				sizeof(struct sli4_sge));
8113 
8114 		/* Total SGEs for scsi_sg_list */
8115 		phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8116 		phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8117 
8118 		/*
8119 		 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
8120 		 * need to post 1 page for the SGL.
8121 		 */
8122 	}
8123 
8124 	if (phba->cfg_xpsgl && !phba->nvmet_support)
8125 		phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8126 	else if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
8127 		phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8128 	else
8129 		phba->cfg_sg_dma_buf_size =
8130 				SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8131 
8132 	phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8133 			       sizeof(struct sli4_sge);
8134 
8135 	/* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
8136 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8137 		if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8138 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8139 					"6300 Reducing NVME sg segment "
8140 					"cnt to %d\n",
8141 					LPFC_MAX_NVME_SEG_CNT);
8142 			phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8143 		} else
8144 			phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8145 	}
8146 
8147 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8148 			"9087 sg_seg_cnt:%d dmabuf_size:%d "
8149 			"total:%d scsi:%d nvme:%d\n",
8150 			phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8151 			phba->cfg_total_seg_cnt,  phba->cfg_scsi_seg_cnt,
8152 			phba->cfg_nvme_seg_cnt);
8153 
8154 	if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8155 		i = phba->cfg_sg_dma_buf_size;
8156 	else
8157 		i = SLI4_PAGE_SIZE;
8158 
8159 	phba->lpfc_sg_dma_buf_pool =
8160 			dma_pool_create("lpfc_sg_dma_buf_pool",
8161 					&phba->pcidev->dev,
8162 					phba->cfg_sg_dma_buf_size,
8163 					i, 0);
8164 	if (!phba->lpfc_sg_dma_buf_pool)
8165 		goto out_free_bsmbx;
8166 
8167 	phba->lpfc_cmd_rsp_buf_pool =
8168 			dma_pool_create("lpfc_cmd_rsp_buf_pool",
8169 					&phba->pcidev->dev,
8170 					sizeof(struct fcp_cmnd) +
8171 					sizeof(struct fcp_rsp),
8172 					i, 0);
8173 	if (!phba->lpfc_cmd_rsp_buf_pool)
8174 		goto out_free_sg_dma_buf;
8175 
8176 	mempool_free(mboxq, phba->mbox_mem_pool);
8177 
8178 	/* Verify OAS is supported */
8179 	lpfc_sli4_oas_verify(phba);
8180 
8181 	/* Verify RAS support on adapter */
8182 	lpfc_sli4_ras_init(phba);
8183 
8184 	/* Verify all the SLI4 queues */
8185 	rc = lpfc_sli4_queue_verify(phba);
8186 	if (rc)
8187 		goto out_free_cmd_rsp_buf;
8188 
8189 	/* Create driver internal CQE event pool */
8190 	rc = lpfc_sli4_cq_event_pool_create(phba);
8191 	if (rc)
8192 		goto out_free_cmd_rsp_buf;
8193 
8194 	/* Initialize sgl lists per host */
8195 	lpfc_init_sgl_list(phba);
8196 
8197 	/* Allocate and initialize active sgl array */
8198 	rc = lpfc_init_active_sgl_array(phba);
8199 	if (rc) {
8200 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8201 				"1430 Failed to initialize sgl list.\n");
8202 		goto out_destroy_cq_event_pool;
8203 	}
8204 	rc = lpfc_sli4_init_rpi_hdrs(phba);
8205 	if (rc) {
8206 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8207 				"1432 Failed to initialize rpi headers.\n");
8208 		goto out_free_active_sgl;
8209 	}
8210 
8211 	/* Allocate eligible FCF bmask memory for FCF roundrobin failover */
8212 	longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8213 	phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8214 					 GFP_KERNEL);
8215 	if (!phba->fcf.fcf_rr_bmask) {
8216 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8217 				"2759 Failed allocate memory for FCF round "
8218 				"robin failover bmask\n");
8219 		rc = -ENOMEM;
8220 		goto out_remove_rpi_hdrs;
8221 	}
8222 
8223 	phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8224 					    sizeof(struct lpfc_hba_eq_hdl),
8225 					    GFP_KERNEL);
8226 	if (!phba->sli4_hba.hba_eq_hdl) {
8227 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8228 				"2572 Failed allocate memory for "
8229 				"fast-path per-EQ handle array\n");
8230 		rc = -ENOMEM;
8231 		goto out_free_fcf_rr_bmask;
8232 	}
8233 
8234 	phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8235 					sizeof(struct lpfc_vector_map_info),
8236 					GFP_KERNEL);
8237 	if (!phba->sli4_hba.cpu_map) {
8238 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8239 				"3327 Failed allocate memory for msi-x "
8240 				"interrupt vector mapping\n");
8241 		rc = -ENOMEM;
8242 		goto out_free_hba_eq_hdl;
8243 	}
8244 
8245 	phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8246 	if (!phba->sli4_hba.eq_info) {
8247 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8248 				"3321 Failed allocation for per_cpu stats\n");
8249 		rc = -ENOMEM;
8250 		goto out_free_hba_cpu_map;
8251 	}
8252 
8253 	phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8254 					   sizeof(*phba->sli4_hba.idle_stat),
8255 					   GFP_KERNEL);
8256 	if (!phba->sli4_hba.idle_stat) {
8257 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8258 				"3390 Failed allocation for idle_stat\n");
8259 		rc = -ENOMEM;
8260 		goto out_free_hba_eq_info;
8261 	}
8262 
8263 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8264 	phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8265 	if (!phba->sli4_hba.c_stat) {
8266 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8267 				"3332 Failed allocating per cpu hdwq stats\n");
8268 		rc = -ENOMEM;
8269 		goto out_free_hba_idle_stat;
8270 	}
8271 #endif
8272 
8273 	phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8274 	if (!phba->cmf_stat) {
8275 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8276 				"3331 Failed allocating per cpu cgn stats\n");
8277 		rc = -ENOMEM;
8278 		goto out_free_hba_hdwq_info;
8279 	}
8280 
8281 	/*
8282 	 * Enable sr-iov virtual functions if supported and configured
8283 	 * through the module parameter.
8284 	 */
8285 	if (phba->cfg_sriov_nr_virtfn > 0) {
8286 		rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8287 						 phba->cfg_sriov_nr_virtfn);
8288 		if (rc) {
8289 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8290 					"3020 Requested number of SR-IOV "
8291 					"virtual functions (%d) is not "
8292 					"supported\n",
8293 					phba->cfg_sriov_nr_virtfn);
8294 			phba->cfg_sriov_nr_virtfn = 0;
8295 		}
8296 	}
8297 
8298 	return 0;
8299 
8300 out_free_hba_hdwq_info:
8301 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8302 	free_percpu(phba->sli4_hba.c_stat);
8303 out_free_hba_idle_stat:
8304 #endif
8305 	kfree(phba->sli4_hba.idle_stat);
8306 out_free_hba_eq_info:
8307 	free_percpu(phba->sli4_hba.eq_info);
8308 out_free_hba_cpu_map:
8309 	kfree(phba->sli4_hba.cpu_map);
8310 out_free_hba_eq_hdl:
8311 	kfree(phba->sli4_hba.hba_eq_hdl);
8312 out_free_fcf_rr_bmask:
8313 	kfree(phba->fcf.fcf_rr_bmask);
8314 out_remove_rpi_hdrs:
8315 	lpfc_sli4_remove_rpi_hdrs(phba);
8316 out_free_active_sgl:
8317 	lpfc_free_active_sgl(phba);
8318 out_destroy_cq_event_pool:
8319 	lpfc_sli4_cq_event_pool_destroy(phba);
8320 out_free_cmd_rsp_buf:
8321 	dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8322 	phba->lpfc_cmd_rsp_buf_pool = NULL;
8323 out_free_sg_dma_buf:
8324 	dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8325 	phba->lpfc_sg_dma_buf_pool = NULL;
8326 out_free_bsmbx:
8327 	lpfc_destroy_bootstrap_mbox(phba);
8328 out_free_mem:
8329 	lpfc_mem_free(phba);
8330 out_destroy_workqueue:
8331 	destroy_workqueue(phba->wq);
8332 	phba->wq = NULL;
8333 	return rc;
8334 }
8335 
8336 /**
8337  * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8338  * @phba: pointer to lpfc hba data structure.
8339  *
8340  * This routine is invoked to unset the driver internal resources set up
8341  * specific for supporting the SLI-4 HBA device it attached to.
8342  **/
8343 static void
8344 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8345 {
8346 	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
8347 
8348 	free_percpu(phba->sli4_hba.eq_info);
8349 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8350 	free_percpu(phba->sli4_hba.c_stat);
8351 #endif
8352 	free_percpu(phba->cmf_stat);
8353 	kfree(phba->sli4_hba.idle_stat);
8354 
8355 	/* Free memory allocated for msi-x interrupt vector to CPU mapping */
8356 	kfree(phba->sli4_hba.cpu_map);
8357 	phba->sli4_hba.num_possible_cpu = 0;
8358 	phba->sli4_hba.num_present_cpu = 0;
8359 	phba->sli4_hba.curr_disp_cpu = 0;
8360 	cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8361 
8362 	/* Free memory allocated for fast-path work queue handles */
8363 	kfree(phba->sli4_hba.hba_eq_hdl);
8364 
8365 	/* Free the allocated rpi headers. */
8366 	lpfc_sli4_remove_rpi_hdrs(phba);
8367 	lpfc_sli4_remove_rpis(phba);
8368 
8369 	/* Free eligible FCF index bmask */
8370 	kfree(phba->fcf.fcf_rr_bmask);
8371 
8372 	/* Free the ELS sgl list */
8373 	lpfc_free_active_sgl(phba);
8374 	lpfc_free_els_sgl_list(phba);
8375 	lpfc_free_nvmet_sgl_list(phba);
8376 
8377 	/* Free the completion queue EQ event pool */
8378 	lpfc_sli4_cq_event_release_all(phba);
8379 	lpfc_sli4_cq_event_pool_destroy(phba);
8380 
8381 	/* Release resource identifiers. */
8382 	lpfc_sli4_dealloc_resource_identifiers(phba);
8383 
8384 	/* Free the bsmbx region. */
8385 	lpfc_destroy_bootstrap_mbox(phba);
8386 
8387 	/* Free the SLI Layer memory with SLI4 HBAs */
8388 	lpfc_mem_free_all(phba);
8389 
8390 	/* Free the current connect table */
8391 	list_for_each_entry_safe(conn_entry, next_conn_entry,
8392 		&phba->fcf_conn_rec_list, list) {
8393 		list_del_init(&conn_entry->list);
8394 		kfree(conn_entry);
8395 	}
8396 
8397 	return;
8398 }
8399 
8400 /**
8401  * lpfc_init_api_table_setup - Set up init api function jump table
8402  * @phba: The hba struct for which this call is being executed.
8403  * @dev_grp: The HBA PCI-Device group number.
8404  *
8405  * This routine sets up the device INIT interface API function jump table
8406  * in @phba struct.
8407  *
8408  * Returns: 0 - success, -ENODEV - failure.
8409  **/
8410 int
8411 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8412 {
8413 	phba->lpfc_hba_init_link = lpfc_hba_init_link;
8414 	phba->lpfc_hba_down_link = lpfc_hba_down_link;
8415 	phba->lpfc_selective_reset = lpfc_selective_reset;
8416 	switch (dev_grp) {
8417 	case LPFC_PCI_DEV_LP:
8418 		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8419 		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8420 		phba->lpfc_stop_port = lpfc_stop_port_s3;
8421 		break;
8422 	case LPFC_PCI_DEV_OC:
8423 		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8424 		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8425 		phba->lpfc_stop_port = lpfc_stop_port_s4;
8426 		break;
8427 	default:
8428 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8429 				"1431 Invalid HBA PCI-device group: 0x%x\n",
8430 				dev_grp);
8431 		return -ENODEV;
8432 	}
8433 	return 0;
8434 }
8435 
8436 /**
8437  * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8438  * @phba: pointer to lpfc hba data structure.
8439  *
8440  * This routine is invoked to set up the driver internal resources after the
8441  * device specific resource setup to support the HBA device it attached to.
8442  *
8443  * Return codes
8444  * 	0 - successful
8445  * 	other values - error
8446  **/
8447 static int
8448 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8449 {
8450 	int error;
8451 
8452 	/* Startup the kernel thread for this host adapter. */
8453 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
8454 					  "lpfc_worker_%d", phba->brd_no);
8455 	if (IS_ERR(phba->worker_thread)) {
8456 		error = PTR_ERR(phba->worker_thread);
8457 		return error;
8458 	}
8459 
8460 	return 0;
8461 }
8462 
8463 /**
8464  * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8465  * @phba: pointer to lpfc hba data structure.
8466  *
8467  * This routine is invoked to unset the driver internal resources set up after
8468  * the device specific resource setup for supporting the HBA device it
8469  * attached to.
8470  **/
8471 static void
8472 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8473 {
8474 	if (phba->wq) {
8475 		flush_workqueue(phba->wq);
8476 		destroy_workqueue(phba->wq);
8477 		phba->wq = NULL;
8478 	}
8479 
8480 	/* Stop kernel worker thread */
8481 	if (phba->worker_thread)
8482 		kthread_stop(phba->worker_thread);
8483 }
8484 
8485 /**
8486  * lpfc_free_iocb_list - Free iocb list.
8487  * @phba: pointer to lpfc hba data structure.
8488  *
8489  * This routine is invoked to free the driver's IOCB list and memory.
8490  **/
8491 void
8492 lpfc_free_iocb_list(struct lpfc_hba *phba)
8493 {
8494 	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
8495 
8496 	spin_lock_irq(&phba->hbalock);
8497 	list_for_each_entry_safe(iocbq_entry, iocbq_next,
8498 				 &phba->lpfc_iocb_list, list) {
8499 		list_del(&iocbq_entry->list);
8500 		kfree(iocbq_entry);
8501 		phba->total_iocbq_bufs--;
8502 	}
8503 	spin_unlock_irq(&phba->hbalock);
8504 
8505 	return;
8506 }
8507 
8508 /**
8509  * lpfc_init_iocb_list - Allocate and initialize iocb list.
8510  * @phba: pointer to lpfc hba data structure.
8511  * @iocb_count: number of requested iocbs
8512  *
8513  * This routine is invoked to allocate and initizlize the driver's IOCB
8514  * list and set up the IOCB tag array accordingly.
8515  *
8516  * Return codes
8517  *	0 - successful
8518  *	other values - error
8519  **/
8520 int
8521 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8522 {
8523 	struct lpfc_iocbq *iocbq_entry = NULL;
8524 	uint16_t iotag;
8525 	int i;
8526 
8527 	/* Initialize and populate the iocb list per host.  */
8528 	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8529 	for (i = 0; i < iocb_count; i++) {
8530 		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
8531 		if (iocbq_entry == NULL) {
8532 			printk(KERN_ERR "%s: only allocated %d iocbs of "
8533 				"expected %d count. Unloading driver.\n",
8534 				__func__, i, iocb_count);
8535 			goto out_free_iocbq;
8536 		}
8537 
8538 		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8539 		if (iotag == 0) {
8540 			kfree(iocbq_entry);
8541 			printk(KERN_ERR "%s: failed to allocate IOTAG. "
8542 				"Unloading driver.\n", __func__);
8543 			goto out_free_iocbq;
8544 		}
8545 		iocbq_entry->sli4_lxritag = NO_XRI;
8546 		iocbq_entry->sli4_xritag = NO_XRI;
8547 
8548 		spin_lock_irq(&phba->hbalock);
8549 		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8550 		phba->total_iocbq_bufs++;
8551 		spin_unlock_irq(&phba->hbalock);
8552 	}
8553 
8554 	return 0;
8555 
8556 out_free_iocbq:
8557 	lpfc_free_iocb_list(phba);
8558 
8559 	return -ENOMEM;
8560 }
8561 
8562 /**
8563  * lpfc_free_sgl_list - Free a given sgl list.
8564  * @phba: pointer to lpfc hba data structure.
8565  * @sglq_list: pointer to the head of sgl list.
8566  *
8567  * This routine is invoked to free a give sgl list and memory.
8568  **/
8569 void
8570 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8571 {
8572 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8573 
8574 	list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
8575 		list_del(&sglq_entry->list);
8576 		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8577 		kfree(sglq_entry);
8578 	}
8579 }
8580 
8581 /**
8582  * lpfc_free_els_sgl_list - Free els sgl list.
8583  * @phba: pointer to lpfc hba data structure.
8584  *
8585  * This routine is invoked to free the driver's els sgl list and memory.
8586  **/
8587 static void
8588 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8589 {
8590 	LIST_HEAD(sglq_list);
8591 
8592 	/* Retrieve all els sgls from driver list */
8593 	spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8594 	list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8595 	spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8596 
8597 	/* Now free the sgl list */
8598 	lpfc_free_sgl_list(phba, &sglq_list);
8599 }
8600 
8601 /**
8602  * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8603  * @phba: pointer to lpfc hba data structure.
8604  *
8605  * This routine is invoked to free the driver's nvmet sgl list and memory.
8606  **/
8607 static void
8608 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8609 {
8610 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8611 	LIST_HEAD(sglq_list);
8612 
8613 	/* Retrieve all nvmet sgls from driver list */
8614 	spin_lock_irq(&phba->hbalock);
8615 	spin_lock(&phba->sli4_hba.sgl_list_lock);
8616 	list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8617 	spin_unlock(&phba->sli4_hba.sgl_list_lock);
8618 	spin_unlock_irq(&phba->hbalock);
8619 
8620 	/* Now free the sgl list */
8621 	list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
8622 		list_del(&sglq_entry->list);
8623 		lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8624 		kfree(sglq_entry);
8625 	}
8626 
8627 	/* Update the nvmet_xri_cnt to reflect no current sgls.
8628 	 * The next initialization cycle sets the count and allocates
8629 	 * the sgls over again.
8630 	 */
8631 	phba->sli4_hba.nvmet_xri_cnt = 0;
8632 }
8633 
8634 /**
8635  * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8636  * @phba: pointer to lpfc hba data structure.
8637  *
8638  * This routine is invoked to allocate the driver's active sgl memory.
8639  * This array will hold the sglq_entry's for active IOs.
8640  **/
8641 static int
8642 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8643 {
8644 	int size;
8645 	size = sizeof(struct lpfc_sglq *);
8646 	size *= phba->sli4_hba.max_cfg_param.max_xri;
8647 
8648 	phba->sli4_hba.lpfc_sglq_active_list =
8649 		kzalloc(size, GFP_KERNEL);
8650 	if (!phba->sli4_hba.lpfc_sglq_active_list)
8651 		return -ENOMEM;
8652 	return 0;
8653 }
8654 
8655 /**
8656  * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8657  * @phba: pointer to lpfc hba data structure.
8658  *
8659  * This routine is invoked to walk through the array of active sglq entries
8660  * and free all of the resources.
8661  * This is just a place holder for now.
8662  **/
8663 static void
8664 lpfc_free_active_sgl(struct lpfc_hba *phba)
8665 {
8666 	kfree(phba->sli4_hba.lpfc_sglq_active_list);
8667 }
8668 
8669 /**
8670  * lpfc_init_sgl_list - Allocate and initialize sgl list.
8671  * @phba: pointer to lpfc hba data structure.
8672  *
8673  * This routine is invoked to allocate and initizlize the driver's sgl
8674  * list and set up the sgl xritag tag array accordingly.
8675  *
8676  **/
8677 static void
8678 lpfc_init_sgl_list(struct lpfc_hba *phba)
8679 {
8680 	/* Initialize and populate the sglq list per host/VF. */
8681 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8682 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8683 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8684 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8685 
8686 	/* els xri-sgl book keeping */
8687 	phba->sli4_hba.els_xri_cnt = 0;
8688 
8689 	/* nvme xri-buffer book keeping */
8690 	phba->sli4_hba.io_xri_cnt = 0;
8691 }
8692 
8693 /**
8694  * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8695  * @phba: pointer to lpfc hba data structure.
8696  *
8697  * This routine is invoked to post rpi header templates to the
8698  * port for those SLI4 ports that do not support extents.  This routine
8699  * posts a PAGE_SIZE memory region to the port to hold up to
8700  * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
8701  * and should be called only when interrupts are disabled.
8702  *
8703  * Return codes
8704  * 	0 - successful
8705  *	-ERROR - otherwise.
8706  **/
8707 int
8708 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8709 {
8710 	int rc = 0;
8711 	struct lpfc_rpi_hdr *rpi_hdr;
8712 
8713 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8714 	if (!phba->sli4_hba.rpi_hdrs_in_use)
8715 		return rc;
8716 	if (phba->sli4_hba.extents_in_use)
8717 		return -EIO;
8718 
8719 	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8720 	if (!rpi_hdr) {
8721 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8722 				"0391 Error during rpi post operation\n");
8723 		lpfc_sli4_remove_rpis(phba);
8724 		rc = -ENODEV;
8725 	}
8726 
8727 	return rc;
8728 }
8729 
8730 /**
8731  * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8732  * @phba: pointer to lpfc hba data structure.
8733  *
8734  * This routine is invoked to allocate a single 4KB memory region to
8735  * support rpis and stores them in the phba.  This single region
8736  * provides support for up to 64 rpis.  The region is used globally
8737  * by the device.
8738  *
8739  * Returns:
8740  *   A valid rpi hdr on success.
8741  *   A NULL pointer on any failure.
8742  **/
8743 struct lpfc_rpi_hdr *
8744 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8745 {
8746 	uint16_t rpi_limit, curr_rpi_range;
8747 	struct lpfc_dmabuf *dmabuf;
8748 	struct lpfc_rpi_hdr *rpi_hdr;
8749 
8750 	/*
8751 	 * If the SLI4 port supports extents, posting the rpi header isn't
8752 	 * required.  Set the expected maximum count and let the actual value
8753 	 * get set when extents are fully allocated.
8754 	 */
8755 	if (!phba->sli4_hba.rpi_hdrs_in_use)
8756 		return NULL;
8757 	if (phba->sli4_hba.extents_in_use)
8758 		return NULL;
8759 
8760 	/* The limit on the logical index is just the max_rpi count. */
8761 	rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8762 
8763 	spin_lock_irq(&phba->hbalock);
8764 	/*
8765 	 * Establish the starting RPI in this header block.  The starting
8766 	 * rpi is normalized to a zero base because the physical rpi is
8767 	 * port based.
8768 	 */
8769 	curr_rpi_range = phba->sli4_hba.next_rpi;
8770 	spin_unlock_irq(&phba->hbalock);
8771 
8772 	/* Reached full RPI range */
8773 	if (curr_rpi_range == rpi_limit)
8774 		return NULL;
8775 
8776 	/*
8777 	 * First allocate the protocol header region for the port.  The
8778 	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
8779 	 */
8780 	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8781 	if (!dmabuf)
8782 		return NULL;
8783 
8784 	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8785 					  LPFC_HDR_TEMPLATE_SIZE,
8786 					  &dmabuf->phys, GFP_KERNEL);
8787 	if (!dmabuf->virt) {
8788 		rpi_hdr = NULL;
8789 		goto err_free_dmabuf;
8790 	}
8791 
8792 	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
8793 		rpi_hdr = NULL;
8794 		goto err_free_coherent;
8795 	}
8796 
8797 	/* Save the rpi header data for cleanup later. */
8798 	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
8799 	if (!rpi_hdr)
8800 		goto err_free_coherent;
8801 
8802 	rpi_hdr->dmabuf = dmabuf;
8803 	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
8804 	rpi_hdr->page_count = 1;
8805 	spin_lock_irq(&phba->hbalock);
8806 
8807 	/* The rpi_hdr stores the logical index only. */
8808 	rpi_hdr->start_rpi = curr_rpi_range;
8809 	rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8810 	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8811 
8812 	spin_unlock_irq(&phba->hbalock);
8813 	return rpi_hdr;
8814 
8815  err_free_coherent:
8816 	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8817 			  dmabuf->virt, dmabuf->phys);
8818  err_free_dmabuf:
8819 	kfree(dmabuf);
8820 	return NULL;
8821 }
8822 
8823 /**
8824  * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
8825  * @phba: pointer to lpfc hba data structure.
8826  *
8827  * This routine is invoked to remove all memory resources allocated
8828  * to support rpis for SLI4 ports not supporting extents. This routine
8829  * presumes the caller has released all rpis consumed by fabric or port
8830  * logins and is prepared to have the header pages removed.
8831  **/
8832 void
8833 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
8834 {
8835 	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
8836 
8837 	if (!phba->sli4_hba.rpi_hdrs_in_use)
8838 		goto exit;
8839 
8840 	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
8841 				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
8842 		list_del(&rpi_hdr->list);
8843 		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
8844 				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
8845 		kfree(rpi_hdr->dmabuf);
8846 		kfree(rpi_hdr);
8847 	}
8848  exit:
8849 	/* There are no rpis available to the port now. */
8850 	phba->sli4_hba.next_rpi = 0;
8851 }
8852 
8853 /**
8854  * lpfc_hba_alloc - Allocate driver hba data structure for a device.
8855  * @pdev: pointer to pci device data structure.
8856  *
8857  * This routine is invoked to allocate the driver hba data structure for an
8858  * HBA device. If the allocation is successful, the phba reference to the
8859  * PCI device data structure is set.
8860  *
8861  * Return codes
8862  *      pointer to @phba - successful
8863  *      NULL - error
8864  **/
8865 static struct lpfc_hba *
8866 lpfc_hba_alloc(struct pci_dev *pdev)
8867 {
8868 	struct lpfc_hba *phba;
8869 
8870 	/* Allocate memory for HBA structure */
8871 	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
8872 	if (!phba) {
8873 		dev_err(&pdev->dev, "failed to allocate hba struct\n");
8874 		return NULL;
8875 	}
8876 
8877 	/* Set reference to PCI device in HBA structure */
8878 	phba->pcidev = pdev;
8879 
8880 	/* Assign an unused board number */
8881 	phba->brd_no = lpfc_get_instance();
8882 	if (phba->brd_no < 0) {
8883 		kfree(phba);
8884 		return NULL;
8885 	}
8886 	phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
8887 
8888 	spin_lock_init(&phba->ct_ev_lock);
8889 	INIT_LIST_HEAD(&phba->ct_ev_waiters);
8890 
8891 	return phba;
8892 }
8893 
8894 /**
8895  * lpfc_hba_free - Free driver hba data structure with a device.
8896  * @phba: pointer to lpfc hba data structure.
8897  *
8898  * This routine is invoked to free the driver hba data structure with an
8899  * HBA device.
8900  **/
8901 static void
8902 lpfc_hba_free(struct lpfc_hba *phba)
8903 {
8904 	if (phba->sli_rev == LPFC_SLI_REV4)
8905 		kfree(phba->sli4_hba.hdwq);
8906 
8907 	/* Release the driver assigned board number */
8908 	idr_remove(&lpfc_hba_index, phba->brd_no);
8909 
8910 	/* Free memory allocated with sli3 rings */
8911 	kfree(phba->sli.sli3_ring);
8912 	phba->sli.sli3_ring = NULL;
8913 
8914 	kfree(phba);
8915 	return;
8916 }
8917 
8918 /**
8919  * lpfc_create_shost - Create hba physical port with associated scsi host.
8920  * @phba: pointer to lpfc hba data structure.
8921  *
8922  * This routine is invoked to create HBA physical port and associate a SCSI
8923  * host with it.
8924  *
8925  * Return codes
8926  *      0 - successful
8927  *      other values - error
8928  **/
8929 static int
8930 lpfc_create_shost(struct lpfc_hba *phba)
8931 {
8932 	struct lpfc_vport *vport;
8933 	struct Scsi_Host  *shost;
8934 
8935 	/* Initialize HBA FC structure */
8936 	phba->fc_edtov = FF_DEF_EDTOV;
8937 	phba->fc_ratov = FF_DEF_RATOV;
8938 	phba->fc_altov = FF_DEF_ALTOV;
8939 	phba->fc_arbtov = FF_DEF_ARBTOV;
8940 
8941 	atomic_set(&phba->sdev_cnt, 0);
8942 	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
8943 	if (!vport)
8944 		return -ENODEV;
8945 
8946 	shost = lpfc_shost_from_vport(vport);
8947 	phba->pport = vport;
8948 
8949 	if (phba->nvmet_support) {
8950 		/* Only 1 vport (pport) will support NVME target */
8951 		phba->targetport = NULL;
8952 		phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
8953 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
8954 				"6076 NVME Target Found\n");
8955 	}
8956 
8957 	lpfc_debugfs_initialize(vport);
8958 	/* Put reference to SCSI host to driver's device private data */
8959 	pci_set_drvdata(phba->pcidev, shost);
8960 
8961 	/*
8962 	 * At this point we are fully registered with PSA. In addition,
8963 	 * any initial discovery should be completed.
8964 	 */
8965 	vport->load_flag |= FC_ALLOW_FDMI;
8966 	if (phba->cfg_enable_SmartSAN ||
8967 	    (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
8968 
8969 		/* Setup appropriate attribute masks */
8970 		vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
8971 		if (phba->cfg_enable_SmartSAN)
8972 			vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
8973 		else
8974 			vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
8975 	}
8976 	return 0;
8977 }
8978 
8979 /**
8980  * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
8981  * @phba: pointer to lpfc hba data structure.
8982  *
8983  * This routine is invoked to destroy HBA physical port and the associated
8984  * SCSI host.
8985  **/
8986 static void
8987 lpfc_destroy_shost(struct lpfc_hba *phba)
8988 {
8989 	struct lpfc_vport *vport = phba->pport;
8990 
8991 	/* Destroy physical port that associated with the SCSI host */
8992 	destroy_port(vport);
8993 
8994 	return;
8995 }
8996 
8997 /**
8998  * lpfc_setup_bg - Setup Block guard structures and debug areas.
8999  * @phba: pointer to lpfc hba data structure.
9000  * @shost: the shost to be used to detect Block guard settings.
9001  *
9002  * This routine sets up the local Block guard protocol settings for @shost.
9003  * This routine also allocates memory for debugging bg buffers.
9004  **/
9005 static void
9006 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
9007 {
9008 	uint32_t old_mask;
9009 	uint32_t old_guard;
9010 
9011 	if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9012 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9013 				"1478 Registering BlockGuard with the "
9014 				"SCSI layer\n");
9015 
9016 		old_mask = phba->cfg_prot_mask;
9017 		old_guard = phba->cfg_prot_guard;
9018 
9019 		/* Only allow supported values */
9020 		phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
9021 			SHOST_DIX_TYPE0_PROTECTION |
9022 			SHOST_DIX_TYPE1_PROTECTION);
9023 		phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9024 					 SHOST_DIX_GUARD_CRC);
9025 
9026 		/* DIF Type 1 protection for profiles AST1/C1 is end to end */
9027 		if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9028 			phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9029 
9030 		if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9031 			if ((old_mask != phba->cfg_prot_mask) ||
9032 				(old_guard != phba->cfg_prot_guard))
9033 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9034 					"1475 Registering BlockGuard with the "
9035 					"SCSI layer: mask %d  guard %d\n",
9036 					phba->cfg_prot_mask,
9037 					phba->cfg_prot_guard);
9038 
9039 			scsi_host_set_prot(shost, phba->cfg_prot_mask);
9040 			scsi_host_set_guard(shost, phba->cfg_prot_guard);
9041 		} else
9042 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9043 				"1479 Not Registering BlockGuard with the SCSI "
9044 				"layer, Bad protection parameters: %d %d\n",
9045 				old_mask, old_guard);
9046 	}
9047 }
9048 
9049 /**
9050  * lpfc_post_init_setup - Perform necessary device post initialization setup.
9051  * @phba: pointer to lpfc hba data structure.
9052  *
9053  * This routine is invoked to perform all the necessary post initialization
9054  * setup for the device.
9055  **/
9056 static void
9057 lpfc_post_init_setup(struct lpfc_hba *phba)
9058 {
9059 	struct Scsi_Host  *shost;
9060 	struct lpfc_adapter_event_header adapter_event;
9061 
9062 	/* Get the default values for Model Name and Description */
9063 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9064 
9065 	/*
9066 	 * hba setup may have changed the hba_queue_depth so we need to
9067 	 * adjust the value of can_queue.
9068 	 */
9069 	shost = pci_get_drvdata(phba->pcidev);
9070 	shost->can_queue = phba->cfg_hba_queue_depth - 10;
9071 
9072 	lpfc_host_attrib_init(shost);
9073 
9074 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9075 		spin_lock_irq(shost->host_lock);
9076 		lpfc_poll_start_timer(phba);
9077 		spin_unlock_irq(shost->host_lock);
9078 	}
9079 
9080 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9081 			"0428 Perform SCSI scan\n");
9082 	/* Send board arrival event to upper layer */
9083 	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
9084 	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
9085 	fc_host_post_vendor_event(shost, fc_get_event_number(),
9086 				  sizeof(adapter_event),
9087 				  (char *) &adapter_event,
9088 				  LPFC_NL_VENDOR_ID);
9089 	return;
9090 }
9091 
9092 /**
9093  * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9094  * @phba: pointer to lpfc hba data structure.
9095  *
9096  * This routine is invoked to set up the PCI device memory space for device
9097  * with SLI-3 interface spec.
9098  *
9099  * Return codes
9100  * 	0 - successful
9101  * 	other values - error
9102  **/
9103 static int
9104 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9105 {
9106 	struct pci_dev *pdev = phba->pcidev;
9107 	unsigned long bar0map_len, bar2map_len;
9108 	int i, hbq_count;
9109 	void *ptr;
9110 	int error;
9111 
9112 	if (!pdev)
9113 		return -ENODEV;
9114 
9115 	/* Set the device DMA mask size */
9116 	error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9117 	if (error)
9118 		error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9119 	if (error)
9120 		return error;
9121 	error = -ENODEV;
9122 
9123 	/* Get the bus address of Bar0 and Bar2 and the number of bytes
9124 	 * required by each mapping.
9125 	 */
9126 	phba->pci_bar0_map = pci_resource_start(pdev, 0);
9127 	bar0map_len = pci_resource_len(pdev, 0);
9128 
9129 	phba->pci_bar2_map = pci_resource_start(pdev, 2);
9130 	bar2map_len = pci_resource_len(pdev, 2);
9131 
9132 	/* Map HBA SLIM to a kernel virtual address. */
9133 	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9134 	if (!phba->slim_memmap_p) {
9135 		dev_printk(KERN_ERR, &pdev->dev,
9136 			   "ioremap failed for SLIM memory.\n");
9137 		goto out;
9138 	}
9139 
9140 	/* Map HBA Control Registers to a kernel virtual address. */
9141 	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9142 	if (!phba->ctrl_regs_memmap_p) {
9143 		dev_printk(KERN_ERR, &pdev->dev,
9144 			   "ioremap failed for HBA control registers.\n");
9145 		goto out_iounmap_slim;
9146 	}
9147 
9148 	/* Allocate memory for SLI-2 structures */
9149 	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9150 					       &phba->slim2p.phys, GFP_KERNEL);
9151 	if (!phba->slim2p.virt)
9152 		goto out_iounmap;
9153 
9154 	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9155 	phba->mbox_ext = (phba->slim2p.virt +
9156 		offsetof(struct lpfc_sli2_slim, mbx_ext_words));
9157 	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9158 	phba->IOCBs = (phba->slim2p.virt +
9159 		       offsetof(struct lpfc_sli2_slim, IOCBs));
9160 
9161 	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9162 						 lpfc_sli_hbq_size(),
9163 						 &phba->hbqslimp.phys,
9164 						 GFP_KERNEL);
9165 	if (!phba->hbqslimp.virt)
9166 		goto out_free_slim;
9167 
9168 	hbq_count = lpfc_sli_hbq_count();
9169 	ptr = phba->hbqslimp.virt;
9170 	for (i = 0; i < hbq_count; ++i) {
9171 		phba->hbqs[i].hbq_virt = ptr;
9172 		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9173 		ptr += (lpfc_hbq_defs[i]->entry_count *
9174 			sizeof(struct lpfc_hbq_entry));
9175 	}
9176 	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9177 	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9178 
9179 	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9180 
9181 	phba->MBslimaddr = phba->slim_memmap_p;
9182 	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9183 	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9184 	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9185 	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9186 
9187 	return 0;
9188 
9189 out_free_slim:
9190 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9191 			  phba->slim2p.virt, phba->slim2p.phys);
9192 out_iounmap:
9193 	iounmap(phba->ctrl_regs_memmap_p);
9194 out_iounmap_slim:
9195 	iounmap(phba->slim_memmap_p);
9196 out:
9197 	return error;
9198 }
9199 
9200 /**
9201  * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9202  * @phba: pointer to lpfc hba data structure.
9203  *
9204  * This routine is invoked to unset the PCI device memory space for device
9205  * with SLI-3 interface spec.
9206  **/
9207 static void
9208 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9209 {
9210 	struct pci_dev *pdev;
9211 
9212 	/* Obtain PCI device reference */
9213 	if (!phba->pcidev)
9214 		return;
9215 	else
9216 		pdev = phba->pcidev;
9217 
9218 	/* Free coherent DMA memory allocated */
9219 	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9220 			  phba->hbqslimp.virt, phba->hbqslimp.phys);
9221 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9222 			  phba->slim2p.virt, phba->slim2p.phys);
9223 
9224 	/* I/O memory unmap */
9225 	iounmap(phba->ctrl_regs_memmap_p);
9226 	iounmap(phba->slim_memmap_p);
9227 
9228 	return;
9229 }
9230 
9231 /**
9232  * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9233  * @phba: pointer to lpfc hba data structure.
9234  *
9235  * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
9236  * done and check status.
9237  *
9238  * Return 0 if successful, otherwise -ENODEV.
9239  **/
9240 int
9241 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9242 {
9243 	struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
9244 	struct lpfc_register reg_data;
9245 	int i, port_error = 0;
9246 	uint32_t if_type;
9247 
9248 	memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
9249 	memset(&reg_data, 0, sizeof(reg_data));
9250 	if (!phba->sli4_hba.PSMPHRregaddr)
9251 		return -ENODEV;
9252 
9253 	/* Wait up to 30 seconds for the SLI Port POST done and ready */
9254 	for (i = 0; i < 3000; i++) {
9255 		if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9256 			&portsmphr_reg.word0) ||
9257 			(bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
9258 			/* Port has a fatal POST error, break out */
9259 			port_error = -ENODEV;
9260 			break;
9261 		}
9262 		if (LPFC_POST_STAGE_PORT_READY ==
9263 		    bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
9264 			break;
9265 		msleep(10);
9266 	}
9267 
9268 	/*
9269 	 * If there was a port error during POST, then don't proceed with
9270 	 * other register reads as the data may not be valid.  Just exit.
9271 	 */
9272 	if (port_error) {
9273 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9274 			"1408 Port Failed POST - portsmphr=0x%x, "
9275 			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9276 			"scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9277 			portsmphr_reg.word0,
9278 			bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
9279 			bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
9280 			bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
9281 			bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
9282 			bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
9283 			bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
9284 			bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
9285 			bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
9286 	} else {
9287 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9288 				"2534 Device Info: SLIFamily=0x%x, "
9289 				"SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9290 				"SLIHint_2=0x%x, FT=0x%x\n",
9291 				bf_get(lpfc_sli_intf_sli_family,
9292 				       &phba->sli4_hba.sli_intf),
9293 				bf_get(lpfc_sli_intf_slirev,
9294 				       &phba->sli4_hba.sli_intf),
9295 				bf_get(lpfc_sli_intf_if_type,
9296 				       &phba->sli4_hba.sli_intf),
9297 				bf_get(lpfc_sli_intf_sli_hint1,
9298 				       &phba->sli4_hba.sli_intf),
9299 				bf_get(lpfc_sli_intf_sli_hint2,
9300 				       &phba->sli4_hba.sli_intf),
9301 				bf_get(lpfc_sli_intf_func_type,
9302 				       &phba->sli4_hba.sli_intf));
9303 		/*
9304 		 * Check for other Port errors during the initialization
9305 		 * process.  Fail the load if the port did not come up
9306 		 * correctly.
9307 		 */
9308 		if_type = bf_get(lpfc_sli_intf_if_type,
9309 				 &phba->sli4_hba.sli_intf);
9310 		switch (if_type) {
9311 		case LPFC_SLI_INTF_IF_TYPE_0:
9312 			phba->sli4_hba.ue_mask_lo =
9313 			      readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9314 			phba->sli4_hba.ue_mask_hi =
9315 			      readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9316 			uerrlo_reg.word0 =
9317 			      readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9318 			uerrhi_reg.word0 =
9319 				readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9320 			if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9321 			    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9322 				lpfc_printf_log(phba, KERN_ERR,
9323 						LOG_TRACE_EVENT,
9324 						"1422 Unrecoverable Error "
9325 						"Detected during POST "
9326 						"uerr_lo_reg=0x%x, "
9327 						"uerr_hi_reg=0x%x, "
9328 						"ue_mask_lo_reg=0x%x, "
9329 						"ue_mask_hi_reg=0x%x\n",
9330 						uerrlo_reg.word0,
9331 						uerrhi_reg.word0,
9332 						phba->sli4_hba.ue_mask_lo,
9333 						phba->sli4_hba.ue_mask_hi);
9334 				port_error = -ENODEV;
9335 			}
9336 			break;
9337 		case LPFC_SLI_INTF_IF_TYPE_2:
9338 		case LPFC_SLI_INTF_IF_TYPE_6:
9339 			/* Final checks.  The port status should be clean. */
9340 			if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9341 				&reg_data.word0) ||
9342 				(bf_get(lpfc_sliport_status_err, &reg_data) &&
9343 				 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
9344 				phba->work_status[0] =
9345 					readl(phba->sli4_hba.u.if_type2.
9346 					      ERR1regaddr);
9347 				phba->work_status[1] =
9348 					readl(phba->sli4_hba.u.if_type2.
9349 					      ERR2regaddr);
9350 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9351 					"2888 Unrecoverable port error "
9352 					"following POST: port status reg "
9353 					"0x%x, port_smphr reg 0x%x, "
9354 					"error 1=0x%x, error 2=0x%x\n",
9355 					reg_data.word0,
9356 					portsmphr_reg.word0,
9357 					phba->work_status[0],
9358 					phba->work_status[1]);
9359 				port_error = -ENODEV;
9360 			}
9361 			break;
9362 		case LPFC_SLI_INTF_IF_TYPE_1:
9363 		default:
9364 			break;
9365 		}
9366 	}
9367 	return port_error;
9368 }
9369 
9370 /**
9371  * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9372  * @phba: pointer to lpfc hba data structure.
9373  * @if_type:  The SLI4 interface type getting configured.
9374  *
9375  * This routine is invoked to set up SLI4 BAR0 PCI config space register
9376  * memory map.
9377  **/
9378 static void
9379 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9380 {
9381 	switch (if_type) {
9382 	case LPFC_SLI_INTF_IF_TYPE_0:
9383 		phba->sli4_hba.u.if_type0.UERRLOregaddr =
9384 			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9385 		phba->sli4_hba.u.if_type0.UERRHIregaddr =
9386 			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9387 		phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9388 			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9389 		phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9390 			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9391 		phba->sli4_hba.SLIINTFregaddr =
9392 			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9393 		break;
9394 	case LPFC_SLI_INTF_IF_TYPE_2:
9395 		phba->sli4_hba.u.if_type2.EQDregaddr =
9396 			phba->sli4_hba.conf_regs_memmap_p +
9397 						LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9398 		phba->sli4_hba.u.if_type2.ERR1regaddr =
9399 			phba->sli4_hba.conf_regs_memmap_p +
9400 						LPFC_CTL_PORT_ER1_OFFSET;
9401 		phba->sli4_hba.u.if_type2.ERR2regaddr =
9402 			phba->sli4_hba.conf_regs_memmap_p +
9403 						LPFC_CTL_PORT_ER2_OFFSET;
9404 		phba->sli4_hba.u.if_type2.CTRLregaddr =
9405 			phba->sli4_hba.conf_regs_memmap_p +
9406 						LPFC_CTL_PORT_CTL_OFFSET;
9407 		phba->sli4_hba.u.if_type2.STATUSregaddr =
9408 			phba->sli4_hba.conf_regs_memmap_p +
9409 						LPFC_CTL_PORT_STA_OFFSET;
9410 		phba->sli4_hba.SLIINTFregaddr =
9411 			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9412 		phba->sli4_hba.PSMPHRregaddr =
9413 			phba->sli4_hba.conf_regs_memmap_p +
9414 						LPFC_CTL_PORT_SEM_OFFSET;
9415 		phba->sli4_hba.RQDBregaddr =
9416 			phba->sli4_hba.conf_regs_memmap_p +
9417 						LPFC_ULP0_RQ_DOORBELL;
9418 		phba->sli4_hba.WQDBregaddr =
9419 			phba->sli4_hba.conf_regs_memmap_p +
9420 						LPFC_ULP0_WQ_DOORBELL;
9421 		phba->sli4_hba.CQDBregaddr =
9422 			phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9423 		phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9424 		phba->sli4_hba.MQDBregaddr =
9425 			phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9426 		phba->sli4_hba.BMBXregaddr =
9427 			phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9428 		break;
9429 	case LPFC_SLI_INTF_IF_TYPE_6:
9430 		phba->sli4_hba.u.if_type2.EQDregaddr =
9431 			phba->sli4_hba.conf_regs_memmap_p +
9432 						LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9433 		phba->sli4_hba.u.if_type2.ERR1regaddr =
9434 			phba->sli4_hba.conf_regs_memmap_p +
9435 						LPFC_CTL_PORT_ER1_OFFSET;
9436 		phba->sli4_hba.u.if_type2.ERR2regaddr =
9437 			phba->sli4_hba.conf_regs_memmap_p +
9438 						LPFC_CTL_PORT_ER2_OFFSET;
9439 		phba->sli4_hba.u.if_type2.CTRLregaddr =
9440 			phba->sli4_hba.conf_regs_memmap_p +
9441 						LPFC_CTL_PORT_CTL_OFFSET;
9442 		phba->sli4_hba.u.if_type2.STATUSregaddr =
9443 			phba->sli4_hba.conf_regs_memmap_p +
9444 						LPFC_CTL_PORT_STA_OFFSET;
9445 		phba->sli4_hba.PSMPHRregaddr =
9446 			phba->sli4_hba.conf_regs_memmap_p +
9447 						LPFC_CTL_PORT_SEM_OFFSET;
9448 		phba->sli4_hba.BMBXregaddr =
9449 			phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9450 		break;
9451 	case LPFC_SLI_INTF_IF_TYPE_1:
9452 	default:
9453 		dev_printk(KERN_ERR, &phba->pcidev->dev,
9454 			   "FATAL - unsupported SLI4 interface type - %d\n",
9455 			   if_type);
9456 		break;
9457 	}
9458 }
9459 
9460 /**
9461  * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9462  * @phba: pointer to lpfc hba data structure.
9463  * @if_type: sli if type to operate on.
9464  *
9465  * This routine is invoked to set up SLI4 BAR1 register memory map.
9466  **/
9467 static void
9468 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9469 {
9470 	switch (if_type) {
9471 	case LPFC_SLI_INTF_IF_TYPE_0:
9472 		phba->sli4_hba.PSMPHRregaddr =
9473 			phba->sli4_hba.ctrl_regs_memmap_p +
9474 			LPFC_SLIPORT_IF0_SMPHR;
9475 		phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9476 			LPFC_HST_ISR0;
9477 		phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9478 			LPFC_HST_IMR0;
9479 		phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9480 			LPFC_HST_ISCR0;
9481 		break;
9482 	case LPFC_SLI_INTF_IF_TYPE_6:
9483 		phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9484 			LPFC_IF6_RQ_DOORBELL;
9485 		phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9486 			LPFC_IF6_WQ_DOORBELL;
9487 		phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9488 			LPFC_IF6_CQ_DOORBELL;
9489 		phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9490 			LPFC_IF6_EQ_DOORBELL;
9491 		phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9492 			LPFC_IF6_MQ_DOORBELL;
9493 		break;
9494 	case LPFC_SLI_INTF_IF_TYPE_2:
9495 	case LPFC_SLI_INTF_IF_TYPE_1:
9496 	default:
9497 		dev_err(&phba->pcidev->dev,
9498 			   "FATAL - unsupported SLI4 interface type - %d\n",
9499 			   if_type);
9500 		break;
9501 	}
9502 }
9503 
9504 /**
9505  * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9506  * @phba: pointer to lpfc hba data structure.
9507  * @vf: virtual function number
9508  *
9509  * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
9510  * based on the given viftual function number, @vf.
9511  *
9512  * Return 0 if successful, otherwise -ENODEV.
9513  **/
9514 static int
9515 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9516 {
9517 	if (vf > LPFC_VIR_FUNC_MAX)
9518 		return -ENODEV;
9519 
9520 	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9521 				vf * LPFC_VFR_PAGE_SIZE +
9522 					LPFC_ULP0_RQ_DOORBELL);
9523 	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9524 				vf * LPFC_VFR_PAGE_SIZE +
9525 					LPFC_ULP0_WQ_DOORBELL);
9526 	phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9527 				vf * LPFC_VFR_PAGE_SIZE +
9528 					LPFC_EQCQ_DOORBELL);
9529 	phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9530 	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9531 				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
9532 	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9533 				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
9534 	return 0;
9535 }
9536 
9537 /**
9538  * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9539  * @phba: pointer to lpfc hba data structure.
9540  *
9541  * This routine is invoked to create the bootstrap mailbox
9542  * region consistent with the SLI-4 interface spec.  This
9543  * routine allocates all memory necessary to communicate
9544  * mailbox commands to the port and sets up all alignment
9545  * needs.  No locks are expected to be held when calling
9546  * this routine.
9547  *
9548  * Return codes
9549  * 	0 - successful
9550  * 	-ENOMEM - could not allocated memory.
9551  **/
9552 static int
9553 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9554 {
9555 	uint32_t bmbx_size;
9556 	struct lpfc_dmabuf *dmabuf;
9557 	struct dma_address *dma_address;
9558 	uint32_t pa_addr;
9559 	uint64_t phys_addr;
9560 
9561 	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9562 	if (!dmabuf)
9563 		return -ENOMEM;
9564 
9565 	/*
9566 	 * The bootstrap mailbox region is comprised of 2 parts
9567 	 * plus an alignment restriction of 16 bytes.
9568 	 */
9569 	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9570 	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9571 					  &dmabuf->phys, GFP_KERNEL);
9572 	if (!dmabuf->virt) {
9573 		kfree(dmabuf);
9574 		return -ENOMEM;
9575 	}
9576 
9577 	/*
9578 	 * Initialize the bootstrap mailbox pointers now so that the register
9579 	 * operations are simple later.  The mailbox dma address is required
9580 	 * to be 16-byte aligned.  Also align the virtual memory as each
9581 	 * maibox is copied into the bmbx mailbox region before issuing the
9582 	 * command to the port.
9583 	 */
9584 	phba->sli4_hba.bmbx.dmabuf = dmabuf;
9585 	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9586 
9587 	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9588 					      LPFC_ALIGN_16_BYTE);
9589 	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9590 					      LPFC_ALIGN_16_BYTE);
9591 
9592 	/*
9593 	 * Set the high and low physical addresses now.  The SLI4 alignment
9594 	 * requirement is 16 bytes and the mailbox is posted to the port
9595 	 * as two 30-bit addresses.  The other data is a bit marking whether
9596 	 * the 30-bit address is the high or low address.
9597 	 * Upcast bmbx aphys to 64bits so shift instruction compiles
9598 	 * clean on 32 bit machines.
9599 	 */
9600 	dma_address = &phba->sli4_hba.bmbx.dma_address;
9601 	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9602 	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
9603 	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9604 					   LPFC_BMBX_BIT1_ADDR_HI);
9605 
9606 	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9607 	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9608 					   LPFC_BMBX_BIT1_ADDR_LO);
9609 	return 0;
9610 }
9611 
9612 /**
9613  * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9614  * @phba: pointer to lpfc hba data structure.
9615  *
9616  * This routine is invoked to teardown the bootstrap mailbox
9617  * region and release all host resources. This routine requires
9618  * the caller to ensure all mailbox commands recovered, no
9619  * additional mailbox comands are sent, and interrupts are disabled
9620  * before calling this routine.
9621  *
9622  **/
9623 static void
9624 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9625 {
9626 	dma_free_coherent(&phba->pcidev->dev,
9627 			  phba->sli4_hba.bmbx.bmbx_size,
9628 			  phba->sli4_hba.bmbx.dmabuf->virt,
9629 			  phba->sli4_hba.bmbx.dmabuf->phys);
9630 
9631 	kfree(phba->sli4_hba.bmbx.dmabuf);
9632 	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9633 }
9634 
9635 static const char * const lpfc_topo_to_str[] = {
9636 	"Loop then P2P",
9637 	"Loopback",
9638 	"P2P Only",
9639 	"Unsupported",
9640 	"Loop Only",
9641 	"Unsupported",
9642 	"P2P then Loop",
9643 };
9644 
9645 #define	LINK_FLAGS_DEF	0x0
9646 #define	LINK_FLAGS_P2P	0x1
9647 #define	LINK_FLAGS_LOOP	0x2
9648 /**
9649  * lpfc_map_topology - Map the topology read from READ_CONFIG
9650  * @phba: pointer to lpfc hba data structure.
9651  * @rd_config: pointer to read config data
9652  *
9653  * This routine is invoked to map the topology values as read
9654  * from the read config mailbox command. If the persistent
9655  * topology feature is supported, the firmware will provide the
9656  * saved topology information to be used in INIT_LINK
9657  **/
9658 static void
9659 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9660 {
9661 	u8 ptv, tf, pt;
9662 
9663 	ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
9664 	tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
9665 	pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
9666 
9667 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9668 			"2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9669 			 ptv, tf, pt);
9670 	if (!ptv) {
9671 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9672 				"2019 FW does not support persistent topology "
9673 				"Using driver parameter defined value [%s]",
9674 				lpfc_topo_to_str[phba->cfg_topology]);
9675 		return;
9676 	}
9677 	/* FW supports persistent topology - override module parameter value */
9678 	phba->hba_flag |= HBA_PERSISTENT_TOPO;
9679 
9680 	/* if ASIC_GEN_NUM >= 0xC) */
9681 	if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9682 		    LPFC_SLI_INTF_IF_TYPE_6) ||
9683 	    (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9684 		    LPFC_SLI_INTF_FAMILY_G6)) {
9685 		if (!tf) {
9686 			phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9687 					? FLAGS_TOPOLOGY_MODE_LOOP
9688 					: FLAGS_TOPOLOGY_MODE_PT_PT);
9689 		} else {
9690 			phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
9691 		}
9692 	} else { /* G5 */
9693 		if (tf) {
9694 			/* If topology failover set - pt is '0' or '1' */
9695 			phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9696 					      FLAGS_TOPOLOGY_MODE_LOOP_PT);
9697 		} else {
9698 			phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9699 					? FLAGS_TOPOLOGY_MODE_PT_PT
9700 					: FLAGS_TOPOLOGY_MODE_LOOP);
9701 		}
9702 	}
9703 	if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
9704 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9705 				"2020 Using persistent topology value [%s]",
9706 				lpfc_topo_to_str[phba->cfg_topology]);
9707 	} else {
9708 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9709 				"2021 Invalid topology values from FW "
9710 				"Using driver parameter defined value [%s]",
9711 				lpfc_topo_to_str[phba->cfg_topology]);
9712 	}
9713 }
9714 
9715 /**
9716  * lpfc_sli4_read_config - Get the config parameters.
9717  * @phba: pointer to lpfc hba data structure.
9718  *
9719  * This routine is invoked to read the configuration parameters from the HBA.
9720  * The configuration parameters are used to set the base and maximum values
9721  * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
9722  * allocation for the port.
9723  *
9724  * Return codes
9725  * 	0 - successful
9726  * 	-ENOMEM - No available memory
9727  *      -EIO - The mailbox failed to complete successfully.
9728  **/
9729 int
9730 lpfc_sli4_read_config(struct lpfc_hba *phba)
9731 {
9732 	LPFC_MBOXQ_t *pmb;
9733 	struct lpfc_mbx_read_config *rd_config;
9734 	union  lpfc_sli4_cfg_shdr *shdr;
9735 	uint32_t shdr_status, shdr_add_status;
9736 	struct lpfc_mbx_get_func_cfg *get_func_cfg;
9737 	struct lpfc_rsrc_desc_fcfcoe *desc;
9738 	char *pdesc_0;
9739 	uint16_t forced_link_speed;
9740 	uint32_t if_type, qmin;
9741 	int length, i, rc = 0, rc2;
9742 
9743 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9744 	if (!pmb) {
9745 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9746 				"2011 Unable to allocate memory for issuing "
9747 				"SLI_CONFIG_SPECIAL mailbox command\n");
9748 		return -ENOMEM;
9749 	}
9750 
9751 	lpfc_read_config(phba, pmb);
9752 
9753 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9754 	if (rc != MBX_SUCCESS) {
9755 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9756 				"2012 Mailbox failed , mbxCmd x%x "
9757 				"READ_CONFIG, mbxStatus x%x\n",
9758 				bf_get(lpfc_mqe_command, &pmb->u.mqe),
9759 				bf_get(lpfc_mqe_status, &pmb->u.mqe));
9760 		rc = -EIO;
9761 	} else {
9762 		rd_config = &pmb->u.mqe.un.rd_config;
9763 		if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
9764 			phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9765 			phba->sli4_hba.lnk_info.lnk_tp =
9766 				bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
9767 			phba->sli4_hba.lnk_info.lnk_no =
9768 				bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
9769 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9770 					"3081 lnk_type:%d, lnk_numb:%d\n",
9771 					phba->sli4_hba.lnk_info.lnk_tp,
9772 					phba->sli4_hba.lnk_info.lnk_no);
9773 		} else
9774 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9775 					"3082 Mailbox (x%x) returned ldv:x0\n",
9776 					bf_get(lpfc_mqe_command, &pmb->u.mqe));
9777 		if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
9778 			phba->bbcredit_support = 1;
9779 			phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9780 		}
9781 
9782 		phba->sli4_hba.conf_trunk =
9783 			bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
9784 		phba->sli4_hba.extents_in_use =
9785 			bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
9786 		phba->sli4_hba.max_cfg_param.max_xri =
9787 			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
9788 		/* Reduce resource usage in kdump environment */
9789 		if (is_kdump_kernel() &&
9790 		    phba->sli4_hba.max_cfg_param.max_xri > 512)
9791 			phba->sli4_hba.max_cfg_param.max_xri = 512;
9792 		phba->sli4_hba.max_cfg_param.xri_base =
9793 			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
9794 		phba->sli4_hba.max_cfg_param.max_vpi =
9795 			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
9796 		/* Limit the max we support */
9797 		if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
9798 			phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
9799 		phba->sli4_hba.max_cfg_param.vpi_base =
9800 			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
9801 		phba->sli4_hba.max_cfg_param.max_rpi =
9802 			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
9803 		phba->sli4_hba.max_cfg_param.rpi_base =
9804 			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
9805 		phba->sli4_hba.max_cfg_param.max_vfi =
9806 			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
9807 		phba->sli4_hba.max_cfg_param.vfi_base =
9808 			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
9809 		phba->sli4_hba.max_cfg_param.max_fcfi =
9810 			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
9811 		phba->sli4_hba.max_cfg_param.max_eq =
9812 			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
9813 		phba->sli4_hba.max_cfg_param.max_rq =
9814 			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
9815 		phba->sli4_hba.max_cfg_param.max_wq =
9816 			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
9817 		phba->sli4_hba.max_cfg_param.max_cq =
9818 			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
9819 		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
9820 		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
9821 		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
9822 		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
9823 		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
9824 				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
9825 		phba->max_vports = phba->max_vpi;
9826 
9827 		/* Next decide on FPIN or Signal E2E CGN support
9828 		 * For congestion alarms and warnings valid combination are:
9829 		 * 1. FPIN alarms / FPIN warnings
9830 		 * 2. Signal alarms / Signal warnings
9831 		 * 3. FPIN alarms / Signal warnings
9832 		 * 4. Signal alarms / FPIN warnings
9833 		 *
9834 		 * Initialize the adapter frequency to 100 mSecs
9835 		 */
9836 		phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
9837 		phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
9838 		phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
9839 
9840 		if (lpfc_use_cgn_signal) {
9841 			if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
9842 				phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
9843 				phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
9844 			}
9845 			if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
9846 				/* MUST support both alarm and warning
9847 				 * because EDC does not support alarm alone.
9848 				 */
9849 				if (phba->cgn_reg_signal !=
9850 				    EDC_CG_SIG_WARN_ONLY) {
9851 					/* Must support both or none */
9852 					phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
9853 					phba->cgn_reg_signal =
9854 						EDC_CG_SIG_NOTSUPPORTED;
9855 				} else {
9856 					phba->cgn_reg_signal =
9857 						EDC_CG_SIG_WARN_ALARM;
9858 					phba->cgn_reg_fpin =
9859 						LPFC_CGN_FPIN_NONE;
9860 				}
9861 			}
9862 		}
9863 
9864 		/* Set the congestion initial signal and fpin values. */
9865 		phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
9866 		phba->cgn_init_reg_signal = phba->cgn_reg_signal;
9867 
9868 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
9869 				"6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
9870 				phba->cgn_reg_signal, phba->cgn_reg_fpin);
9871 
9872 		lpfc_map_topology(phba, rd_config);
9873 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9874 				"2003 cfg params Extents? %d "
9875 				"XRI(B:%d M:%d), "
9876 				"VPI(B:%d M:%d) "
9877 				"VFI(B:%d M:%d) "
9878 				"RPI(B:%d M:%d) "
9879 				"FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
9880 				phba->sli4_hba.extents_in_use,
9881 				phba->sli4_hba.max_cfg_param.xri_base,
9882 				phba->sli4_hba.max_cfg_param.max_xri,
9883 				phba->sli4_hba.max_cfg_param.vpi_base,
9884 				phba->sli4_hba.max_cfg_param.max_vpi,
9885 				phba->sli4_hba.max_cfg_param.vfi_base,
9886 				phba->sli4_hba.max_cfg_param.max_vfi,
9887 				phba->sli4_hba.max_cfg_param.rpi_base,
9888 				phba->sli4_hba.max_cfg_param.max_rpi,
9889 				phba->sli4_hba.max_cfg_param.max_fcfi,
9890 				phba->sli4_hba.max_cfg_param.max_eq,
9891 				phba->sli4_hba.max_cfg_param.max_cq,
9892 				phba->sli4_hba.max_cfg_param.max_wq,
9893 				phba->sli4_hba.max_cfg_param.max_rq,
9894 				phba->lmt);
9895 
9896 		/*
9897 		 * Calculate queue resources based on how
9898 		 * many WQ/CQ/EQs are available.
9899 		 */
9900 		qmin = phba->sli4_hba.max_cfg_param.max_wq;
9901 		if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
9902 			qmin = phba->sli4_hba.max_cfg_param.max_cq;
9903 		if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
9904 			qmin = phba->sli4_hba.max_cfg_param.max_eq;
9905 		/*
9906 		 * Whats left after this can go toward NVME / FCP.
9907 		 * The minus 4 accounts for ELS, NVME LS, MBOX
9908 		 * plus one extra. When configured for
9909 		 * NVMET, FCP io channel WQs are not created.
9910 		 */
9911 		qmin -= 4;
9912 
9913 		/* Check to see if there is enough for NVME */
9914 		if ((phba->cfg_irq_chann > qmin) ||
9915 		    (phba->cfg_hdw_queue > qmin)) {
9916 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9917 					"2005 Reducing Queues - "
9918 					"FW resource limitation: "
9919 					"WQ %d CQ %d EQ %d: min %d: "
9920 					"IRQ %d HDWQ %d\n",
9921 					phba->sli4_hba.max_cfg_param.max_wq,
9922 					phba->sli4_hba.max_cfg_param.max_cq,
9923 					phba->sli4_hba.max_cfg_param.max_eq,
9924 					qmin, phba->cfg_irq_chann,
9925 					phba->cfg_hdw_queue);
9926 
9927 			if (phba->cfg_irq_chann > qmin)
9928 				phba->cfg_irq_chann = qmin;
9929 			if (phba->cfg_hdw_queue > qmin)
9930 				phba->cfg_hdw_queue = qmin;
9931 		}
9932 	}
9933 
9934 	if (rc)
9935 		goto read_cfg_out;
9936 
9937 	/* Update link speed if forced link speed is supported */
9938 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9939 	if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9940 		forced_link_speed =
9941 			bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
9942 		if (forced_link_speed) {
9943 			phba->hba_flag |= HBA_FORCED_LINK_SPEED;
9944 
9945 			switch (forced_link_speed) {
9946 			case LINK_SPEED_1G:
9947 				phba->cfg_link_speed =
9948 					LPFC_USER_LINK_SPEED_1G;
9949 				break;
9950 			case LINK_SPEED_2G:
9951 				phba->cfg_link_speed =
9952 					LPFC_USER_LINK_SPEED_2G;
9953 				break;
9954 			case LINK_SPEED_4G:
9955 				phba->cfg_link_speed =
9956 					LPFC_USER_LINK_SPEED_4G;
9957 				break;
9958 			case LINK_SPEED_8G:
9959 				phba->cfg_link_speed =
9960 					LPFC_USER_LINK_SPEED_8G;
9961 				break;
9962 			case LINK_SPEED_10G:
9963 				phba->cfg_link_speed =
9964 					LPFC_USER_LINK_SPEED_10G;
9965 				break;
9966 			case LINK_SPEED_16G:
9967 				phba->cfg_link_speed =
9968 					LPFC_USER_LINK_SPEED_16G;
9969 				break;
9970 			case LINK_SPEED_32G:
9971 				phba->cfg_link_speed =
9972 					LPFC_USER_LINK_SPEED_32G;
9973 				break;
9974 			case LINK_SPEED_64G:
9975 				phba->cfg_link_speed =
9976 					LPFC_USER_LINK_SPEED_64G;
9977 				break;
9978 			case 0xffff:
9979 				phba->cfg_link_speed =
9980 					LPFC_USER_LINK_SPEED_AUTO;
9981 				break;
9982 			default:
9983 				lpfc_printf_log(phba, KERN_ERR,
9984 						LOG_TRACE_EVENT,
9985 						"0047 Unrecognized link "
9986 						"speed : %d\n",
9987 						forced_link_speed);
9988 				phba->cfg_link_speed =
9989 					LPFC_USER_LINK_SPEED_AUTO;
9990 			}
9991 		}
9992 	}
9993 
9994 	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
9995 	length = phba->sli4_hba.max_cfg_param.max_xri -
9996 			lpfc_sli4_get_els_iocb_cnt(phba);
9997 	if (phba->cfg_hba_queue_depth > length) {
9998 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9999 				"3361 HBA queue depth changed from %d to %d\n",
10000 				phba->cfg_hba_queue_depth, length);
10001 		phba->cfg_hba_queue_depth = length;
10002 	}
10003 
10004 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
10005 	    LPFC_SLI_INTF_IF_TYPE_2)
10006 		goto read_cfg_out;
10007 
10008 	/* get the pf# and vf# for SLI4 if_type 2 port */
10009 	length = (sizeof(struct lpfc_mbx_get_func_cfg) -
10010 		  sizeof(struct lpfc_sli4_cfg_mhdr));
10011 	lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
10012 			 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
10013 			 length, LPFC_SLI4_MBX_EMBED);
10014 
10015 	rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10016 	shdr = (union lpfc_sli4_cfg_shdr *)
10017 				&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
10018 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10019 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10020 	if (rc2 || shdr_status || shdr_add_status) {
10021 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10022 				"3026 Mailbox failed , mbxCmd x%x "
10023 				"GET_FUNCTION_CONFIG, mbxStatus x%x\n",
10024 				bf_get(lpfc_mqe_command, &pmb->u.mqe),
10025 				bf_get(lpfc_mqe_status, &pmb->u.mqe));
10026 		goto read_cfg_out;
10027 	}
10028 
10029 	/* search for fc_fcoe resrouce descriptor */
10030 	get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
10031 
10032 	pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
10033 	desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
10034 	length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
10035 	if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
10036 		length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
10037 	else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
10038 		goto read_cfg_out;
10039 
10040 	for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
10041 		desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
10042 		if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
10043 		    bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
10044 			phba->sli4_hba.iov.pf_number =
10045 				bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
10046 			phba->sli4_hba.iov.vf_number =
10047 				bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
10048 			break;
10049 		}
10050 	}
10051 
10052 	if (i < LPFC_RSRC_DESC_MAX_NUM)
10053 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10054 				"3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10055 				"vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10056 				phba->sli4_hba.iov.vf_number);
10057 	else
10058 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10059 				"3028 GET_FUNCTION_CONFIG: failed to find "
10060 				"Resource Descriptor:x%x\n",
10061 				LPFC_RSRC_DESC_TYPE_FCFCOE);
10062 
10063 read_cfg_out:
10064 	mempool_free(pmb, phba->mbox_mem_pool);
10065 	return rc;
10066 }
10067 
10068 /**
10069  * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10070  * @phba: pointer to lpfc hba data structure.
10071  *
10072  * This routine is invoked to setup the port-side endian order when
10073  * the port if_type is 0.  This routine has no function for other
10074  * if_types.
10075  *
10076  * Return codes
10077  * 	0 - successful
10078  * 	-ENOMEM - No available memory
10079  *      -EIO - The mailbox failed to complete successfully.
10080  **/
10081 static int
10082 lpfc_setup_endian_order(struct lpfc_hba *phba)
10083 {
10084 	LPFC_MBOXQ_t *mboxq;
10085 	uint32_t if_type, rc = 0;
10086 	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
10087 				      HOST_ENDIAN_HIGH_WORD1};
10088 
10089 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10090 	switch (if_type) {
10091 	case LPFC_SLI_INTF_IF_TYPE_0:
10092 		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10093 						       GFP_KERNEL);
10094 		if (!mboxq) {
10095 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10096 					"0492 Unable to allocate memory for "
10097 					"issuing SLI_CONFIG_SPECIAL mailbox "
10098 					"command\n");
10099 			return -ENOMEM;
10100 		}
10101 
10102 		/*
10103 		 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
10104 		 * two words to contain special data values and no other data.
10105 		 */
10106 		memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
10107 		memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10108 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10109 		if (rc != MBX_SUCCESS) {
10110 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10111 					"0493 SLI_CONFIG_SPECIAL mailbox "
10112 					"failed with status x%x\n",
10113 					rc);
10114 			rc = -EIO;
10115 		}
10116 		mempool_free(mboxq, phba->mbox_mem_pool);
10117 		break;
10118 	case LPFC_SLI_INTF_IF_TYPE_6:
10119 	case LPFC_SLI_INTF_IF_TYPE_2:
10120 	case LPFC_SLI_INTF_IF_TYPE_1:
10121 	default:
10122 		break;
10123 	}
10124 	return rc;
10125 }
10126 
10127 /**
10128  * lpfc_sli4_queue_verify - Verify and update EQ counts
10129  * @phba: pointer to lpfc hba data structure.
10130  *
10131  * This routine is invoked to check the user settable queue counts for EQs.
10132  * After this routine is called the counts will be set to valid values that
10133  * adhere to the constraints of the system's interrupt vectors and the port's
10134  * queue resources.
10135  *
10136  * Return codes
10137  *      0 - successful
10138  *      -ENOMEM - No available memory
10139  **/
10140 static int
10141 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10142 {
10143 	/*
10144 	 * Sanity check for configured queue parameters against the run-time
10145 	 * device parameters
10146 	 */
10147 
10148 	if (phba->nvmet_support) {
10149 		if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10150 			phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10151 		if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10152 			phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10153 	}
10154 
10155 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10156 			"2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10157 			phba->cfg_hdw_queue, phba->cfg_irq_chann,
10158 			phba->cfg_nvmet_mrq);
10159 
10160 	/* Get EQ depth from module parameter, fake the default for now */
10161 	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10162 	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10163 
10164 	/* Get CQ depth from module parameter, fake the default for now */
10165 	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10166 	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10167 	return 0;
10168 }
10169 
10170 static int
10171 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10172 {
10173 	struct lpfc_queue *qdesc;
10174 	u32 wqesize;
10175 	int cpu;
10176 
10177 	cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10178 	/* Create Fast Path IO CQs */
10179 	if (phba->enab_exp_wqcq_pages)
10180 		/* Increase the CQ size when WQEs contain an embedded cdb */
10181 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10182 					      phba->sli4_hba.cq_esize,
10183 					      LPFC_CQE_EXP_COUNT, cpu);
10184 
10185 	else
10186 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10187 					      phba->sli4_hba.cq_esize,
10188 					      phba->sli4_hba.cq_ecount, cpu);
10189 	if (!qdesc) {
10190 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10191 				"0499 Failed allocate fast-path IO CQ (%d)\n",
10192 				idx);
10193 		return 1;
10194 	}
10195 	qdesc->qe_valid = 1;
10196 	qdesc->hdwq = idx;
10197 	qdesc->chann = cpu;
10198 	phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10199 
10200 	/* Create Fast Path IO WQs */
10201 	if (phba->enab_exp_wqcq_pages) {
10202 		/* Increase the WQ size when WQEs contain an embedded cdb */
10203 		wqesize = (phba->fcp_embed_io) ?
10204 			LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10205 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10206 					      wqesize,
10207 					      LPFC_WQE_EXP_COUNT, cpu);
10208 	} else
10209 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10210 					      phba->sli4_hba.wq_esize,
10211 					      phba->sli4_hba.wq_ecount, cpu);
10212 
10213 	if (!qdesc) {
10214 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10215 				"0503 Failed allocate fast-path IO WQ (%d)\n",
10216 				idx);
10217 		return 1;
10218 	}
10219 	qdesc->hdwq = idx;
10220 	qdesc->chann = cpu;
10221 	phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10222 	list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10223 	return 0;
10224 }
10225 
10226 /**
10227  * lpfc_sli4_queue_create - Create all the SLI4 queues
10228  * @phba: pointer to lpfc hba data structure.
10229  *
10230  * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
10231  * operation. For each SLI4 queue type, the parameters such as queue entry
10232  * count (queue depth) shall be taken from the module parameter. For now,
10233  * we just use some constant number as place holder.
10234  *
10235  * Return codes
10236  *      0 - successful
10237  *      -ENOMEM - No availble memory
10238  *      -EIO - The mailbox failed to complete successfully.
10239  **/
10240 int
10241 lpfc_sli4_queue_create(struct lpfc_hba *phba)
10242 {
10243 	struct lpfc_queue *qdesc;
10244 	int idx, cpu, eqcpu;
10245 	struct lpfc_sli4_hdw_queue *qp;
10246 	struct lpfc_vector_map_info *cpup;
10247 	struct lpfc_vector_map_info *eqcpup;
10248 	struct lpfc_eq_intr_info *eqi;
10249 
10250 	/*
10251 	 * Create HBA Record arrays.
10252 	 * Both NVME and FCP will share that same vectors / EQs
10253 	 */
10254 	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10255 	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10256 	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10257 	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10258 	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10259 	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10260 	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10261 	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10262 	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10263 	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10264 
10265 	if (!phba->sli4_hba.hdwq) {
10266 		phba->sli4_hba.hdwq = kcalloc(
10267 			phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10268 			GFP_KERNEL);
10269 		if (!phba->sli4_hba.hdwq) {
10270 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10271 					"6427 Failed allocate memory for "
10272 					"fast-path Hardware Queue array\n");
10273 			goto out_error;
10274 		}
10275 		/* Prepare hardware queues to take IO buffers */
10276 		for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10277 			qp = &phba->sli4_hba.hdwq[idx];
10278 			spin_lock_init(&qp->io_buf_list_get_lock);
10279 			spin_lock_init(&qp->io_buf_list_put_lock);
10280 			INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10281 			INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10282 			qp->get_io_bufs = 0;
10283 			qp->put_io_bufs = 0;
10284 			qp->total_io_bufs = 0;
10285 			spin_lock_init(&qp->abts_io_buf_list_lock);
10286 			INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10287 			qp->abts_scsi_io_bufs = 0;
10288 			qp->abts_nvme_io_bufs = 0;
10289 			INIT_LIST_HEAD(&qp->sgl_list);
10290 			INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10291 			spin_lock_init(&qp->hdwq_lock);
10292 		}
10293 	}
10294 
10295 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10296 		if (phba->nvmet_support) {
10297 			phba->sli4_hba.nvmet_cqset = kcalloc(
10298 					phba->cfg_nvmet_mrq,
10299 					sizeof(struct lpfc_queue *),
10300 					GFP_KERNEL);
10301 			if (!phba->sli4_hba.nvmet_cqset) {
10302 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10303 					"3121 Fail allocate memory for "
10304 					"fast-path CQ set array\n");
10305 				goto out_error;
10306 			}
10307 			phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10308 					phba->cfg_nvmet_mrq,
10309 					sizeof(struct lpfc_queue *),
10310 					GFP_KERNEL);
10311 			if (!phba->sli4_hba.nvmet_mrq_hdr) {
10312 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10313 					"3122 Fail allocate memory for "
10314 					"fast-path RQ set hdr array\n");
10315 				goto out_error;
10316 			}
10317 			phba->sli4_hba.nvmet_mrq_data = kcalloc(
10318 					phba->cfg_nvmet_mrq,
10319 					sizeof(struct lpfc_queue *),
10320 					GFP_KERNEL);
10321 			if (!phba->sli4_hba.nvmet_mrq_data) {
10322 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10323 					"3124 Fail allocate memory for "
10324 					"fast-path RQ set data array\n");
10325 				goto out_error;
10326 			}
10327 		}
10328 	}
10329 
10330 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10331 
10332 	/* Create HBA Event Queues (EQs) */
10333 	for_each_present_cpu(cpu) {
10334 		/* We only want to create 1 EQ per vector, even though
10335 		 * multiple CPUs might be using that vector. so only
10336 		 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
10337 		 */
10338 		cpup = &phba->sli4_hba.cpu_map[cpu];
10339 		if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10340 			continue;
10341 
10342 		/* Get a ptr to the Hardware Queue associated with this CPU */
10343 		qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10344 
10345 		/* Allocate an EQ */
10346 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10347 					      phba->sli4_hba.eq_esize,
10348 					      phba->sli4_hba.eq_ecount, cpu);
10349 		if (!qdesc) {
10350 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10351 					"0497 Failed allocate EQ (%d)\n",
10352 					cpup->hdwq);
10353 			goto out_error;
10354 		}
10355 		qdesc->qe_valid = 1;
10356 		qdesc->hdwq = cpup->hdwq;
10357 		qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
10358 		qdesc->last_cpu = qdesc->chann;
10359 
10360 		/* Save the allocated EQ in the Hardware Queue */
10361 		qp->hba_eq = qdesc;
10362 
10363 		eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10364 		list_add(&qdesc->cpu_list, &eqi->list);
10365 	}
10366 
10367 	/* Now we need to populate the other Hardware Queues, that share
10368 	 * an IRQ vector, with the associated EQ ptr.
10369 	 */
10370 	for_each_present_cpu(cpu) {
10371 		cpup = &phba->sli4_hba.cpu_map[cpu];
10372 
10373 		/* Check for EQ already allocated in previous loop */
10374 		if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10375 			continue;
10376 
10377 		/* Check for multiple CPUs per hdwq */
10378 		qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10379 		if (qp->hba_eq)
10380 			continue;
10381 
10382 		/* We need to share an EQ for this hdwq */
10383 		eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10384 		eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10385 		qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10386 	}
10387 
10388 	/* Allocate IO Path SLI4 CQ/WQs */
10389 	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10390 		if (lpfc_alloc_io_wq_cq(phba, idx))
10391 			goto out_error;
10392 	}
10393 
10394 	if (phba->nvmet_support) {
10395 		for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10396 			cpu = lpfc_find_cpu_handle(phba, idx,
10397 						   LPFC_FIND_BY_HDWQ);
10398 			qdesc = lpfc_sli4_queue_alloc(phba,
10399 						      LPFC_DEFAULT_PAGE_SIZE,
10400 						      phba->sli4_hba.cq_esize,
10401 						      phba->sli4_hba.cq_ecount,
10402 						      cpu);
10403 			if (!qdesc) {
10404 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10405 						"3142 Failed allocate NVME "
10406 						"CQ Set (%d)\n", idx);
10407 				goto out_error;
10408 			}
10409 			qdesc->qe_valid = 1;
10410 			qdesc->hdwq = idx;
10411 			qdesc->chann = cpu;
10412 			phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10413 		}
10414 	}
10415 
10416 	/*
10417 	 * Create Slow Path Completion Queues (CQs)
10418 	 */
10419 
10420 	cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10421 	/* Create slow-path Mailbox Command Complete Queue */
10422 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10423 				      phba->sli4_hba.cq_esize,
10424 				      phba->sli4_hba.cq_ecount, cpu);
10425 	if (!qdesc) {
10426 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10427 				"0500 Failed allocate slow-path mailbox CQ\n");
10428 		goto out_error;
10429 	}
10430 	qdesc->qe_valid = 1;
10431 	phba->sli4_hba.mbx_cq = qdesc;
10432 
10433 	/* Create slow-path ELS Complete Queue */
10434 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10435 				      phba->sli4_hba.cq_esize,
10436 				      phba->sli4_hba.cq_ecount, cpu);
10437 	if (!qdesc) {
10438 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10439 				"0501 Failed allocate slow-path ELS CQ\n");
10440 		goto out_error;
10441 	}
10442 	qdesc->qe_valid = 1;
10443 	qdesc->chann = cpu;
10444 	phba->sli4_hba.els_cq = qdesc;
10445 
10446 
10447 	/*
10448 	 * Create Slow Path Work Queues (WQs)
10449 	 */
10450 
10451 	/* Create Mailbox Command Queue */
10452 
10453 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10454 				      phba->sli4_hba.mq_esize,
10455 				      phba->sli4_hba.mq_ecount, cpu);
10456 	if (!qdesc) {
10457 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10458 				"0505 Failed allocate slow-path MQ\n");
10459 		goto out_error;
10460 	}
10461 	qdesc->chann = cpu;
10462 	phba->sli4_hba.mbx_wq = qdesc;
10463 
10464 	/*
10465 	 * Create ELS Work Queues
10466 	 */
10467 
10468 	/* Create slow-path ELS Work Queue */
10469 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10470 				      phba->sli4_hba.wq_esize,
10471 				      phba->sli4_hba.wq_ecount, cpu);
10472 	if (!qdesc) {
10473 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10474 				"0504 Failed allocate slow-path ELS WQ\n");
10475 		goto out_error;
10476 	}
10477 	qdesc->chann = cpu;
10478 	phba->sli4_hba.els_wq = qdesc;
10479 	list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10480 
10481 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10482 		/* Create NVME LS Complete Queue */
10483 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10484 					      phba->sli4_hba.cq_esize,
10485 					      phba->sli4_hba.cq_ecount, cpu);
10486 		if (!qdesc) {
10487 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10488 					"6079 Failed allocate NVME LS CQ\n");
10489 			goto out_error;
10490 		}
10491 		qdesc->chann = cpu;
10492 		qdesc->qe_valid = 1;
10493 		phba->sli4_hba.nvmels_cq = qdesc;
10494 
10495 		/* Create NVME LS Work Queue */
10496 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10497 					      phba->sli4_hba.wq_esize,
10498 					      phba->sli4_hba.wq_ecount, cpu);
10499 		if (!qdesc) {
10500 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10501 					"6080 Failed allocate NVME LS WQ\n");
10502 			goto out_error;
10503 		}
10504 		qdesc->chann = cpu;
10505 		phba->sli4_hba.nvmels_wq = qdesc;
10506 		list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10507 	}
10508 
10509 	/*
10510 	 * Create Receive Queue (RQ)
10511 	 */
10512 
10513 	/* Create Receive Queue for header */
10514 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10515 				      phba->sli4_hba.rq_esize,
10516 				      phba->sli4_hba.rq_ecount, cpu);
10517 	if (!qdesc) {
10518 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10519 				"0506 Failed allocate receive HRQ\n");
10520 		goto out_error;
10521 	}
10522 	phba->sli4_hba.hdr_rq = qdesc;
10523 
10524 	/* Create Receive Queue for data */
10525 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10526 				      phba->sli4_hba.rq_esize,
10527 				      phba->sli4_hba.rq_ecount, cpu);
10528 	if (!qdesc) {
10529 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10530 				"0507 Failed allocate receive DRQ\n");
10531 		goto out_error;
10532 	}
10533 	phba->sli4_hba.dat_rq = qdesc;
10534 
10535 	if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10536 	    phba->nvmet_support) {
10537 		for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10538 			cpu = lpfc_find_cpu_handle(phba, idx,
10539 						   LPFC_FIND_BY_HDWQ);
10540 			/* Create NVMET Receive Queue for header */
10541 			qdesc = lpfc_sli4_queue_alloc(phba,
10542 						      LPFC_DEFAULT_PAGE_SIZE,
10543 						      phba->sli4_hba.rq_esize,
10544 						      LPFC_NVMET_RQE_DEF_COUNT,
10545 						      cpu);
10546 			if (!qdesc) {
10547 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10548 						"3146 Failed allocate "
10549 						"receive HRQ\n");
10550 				goto out_error;
10551 			}
10552 			qdesc->hdwq = idx;
10553 			phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10554 
10555 			/* Only needed for header of RQ pair */
10556 			qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10557 						   GFP_KERNEL,
10558 						   cpu_to_node(cpu));
10559 			if (qdesc->rqbp == NULL) {
10560 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10561 						"6131 Failed allocate "
10562 						"Header RQBP\n");
10563 				goto out_error;
10564 			}
10565 
10566 			/* Put list in known state in case driver load fails. */
10567 			INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10568 
10569 			/* Create NVMET Receive Queue for data */
10570 			qdesc = lpfc_sli4_queue_alloc(phba,
10571 						      LPFC_DEFAULT_PAGE_SIZE,
10572 						      phba->sli4_hba.rq_esize,
10573 						      LPFC_NVMET_RQE_DEF_COUNT,
10574 						      cpu);
10575 			if (!qdesc) {
10576 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10577 						"3156 Failed allocate "
10578 						"receive DRQ\n");
10579 				goto out_error;
10580 			}
10581 			qdesc->hdwq = idx;
10582 			phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10583 		}
10584 	}
10585 
10586 	/* Clear NVME stats */
10587 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10588 		for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10589 			memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10590 			       sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10591 		}
10592 	}
10593 
10594 	/* Clear SCSI stats */
10595 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10596 		for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10597 			memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10598 			       sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10599 		}
10600 	}
10601 
10602 	return 0;
10603 
10604 out_error:
10605 	lpfc_sli4_queue_destroy(phba);
10606 	return -ENOMEM;
10607 }
10608 
10609 static inline void
10610 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
10611 {
10612 	if (*qp != NULL) {
10613 		lpfc_sli4_queue_free(*qp);
10614 		*qp = NULL;
10615 	}
10616 }
10617 
10618 static inline void
10619 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
10620 {
10621 	int idx;
10622 
10623 	if (*qs == NULL)
10624 		return;
10625 
10626 	for (idx = 0; idx < max; idx++)
10627 		__lpfc_sli4_release_queue(&(*qs)[idx]);
10628 
10629 	kfree(*qs);
10630 	*qs = NULL;
10631 }
10632 
10633 static inline void
10634 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10635 {
10636 	struct lpfc_sli4_hdw_queue *hdwq;
10637 	struct lpfc_queue *eq;
10638 	uint32_t idx;
10639 
10640 	hdwq = phba->sli4_hba.hdwq;
10641 
10642 	/* Loop thru all Hardware Queues */
10643 	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10644 		/* Free the CQ/WQ corresponding to the Hardware Queue */
10645 		lpfc_sli4_queue_free(hdwq[idx].io_cq);
10646 		lpfc_sli4_queue_free(hdwq[idx].io_wq);
10647 		hdwq[idx].hba_eq = NULL;
10648 		hdwq[idx].io_cq = NULL;
10649 		hdwq[idx].io_wq = NULL;
10650 		if (phba->cfg_xpsgl && !phba->nvmet_support)
10651 			lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10652 		lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10653 	}
10654 	/* Loop thru all IRQ vectors */
10655 	for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10656 		/* Free the EQ corresponding to the IRQ vector */
10657 		eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10658 		lpfc_sli4_queue_free(eq);
10659 		phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10660 	}
10661 }
10662 
10663 /**
10664  * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10665  * @phba: pointer to lpfc hba data structure.
10666  *
10667  * This routine is invoked to release all the SLI4 queues with the FCoE HBA
10668  * operation.
10669  *
10670  * Return codes
10671  *      0 - successful
10672  *      -ENOMEM - No available memory
10673  *      -EIO - The mailbox failed to complete successfully.
10674  **/
10675 void
10676 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10677 {
10678 	/*
10679 	 * Set FREE_INIT before beginning to free the queues.
10680 	 * Wait until the users of queues to acknowledge to
10681 	 * release queues by clearing FREE_WAIT.
10682 	 */
10683 	spin_lock_irq(&phba->hbalock);
10684 	phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10685 	while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10686 		spin_unlock_irq(&phba->hbalock);
10687 		msleep(20);
10688 		spin_lock_irq(&phba->hbalock);
10689 	}
10690 	spin_unlock_irq(&phba->hbalock);
10691 
10692 	lpfc_sli4_cleanup_poll_list(phba);
10693 
10694 	/* Release HBA eqs */
10695 	if (phba->sli4_hba.hdwq)
10696 		lpfc_sli4_release_hdwq(phba);
10697 
10698 	if (phba->nvmet_support) {
10699 		lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10700 					 phba->cfg_nvmet_mrq);
10701 
10702 		lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10703 					 phba->cfg_nvmet_mrq);
10704 		lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10705 					 phba->cfg_nvmet_mrq);
10706 	}
10707 
10708 	/* Release mailbox command work queue */
10709 	__lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10710 
10711 	/* Release ELS work queue */
10712 	__lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10713 
10714 	/* Release ELS work queue */
10715 	__lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10716 
10717 	/* Release unsolicited receive queue */
10718 	__lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10719 	__lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10720 
10721 	/* Release ELS complete queue */
10722 	__lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10723 
10724 	/* Release NVME LS complete queue */
10725 	__lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10726 
10727 	/* Release mailbox command complete queue */
10728 	__lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10729 
10730 	/* Everything on this list has been freed */
10731 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10732 
10733 	/* Done with freeing the queues */
10734 	spin_lock_irq(&phba->hbalock);
10735 	phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10736 	spin_unlock_irq(&phba->hbalock);
10737 }
10738 
10739 int
10740 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10741 {
10742 	struct lpfc_rqb *rqbp;
10743 	struct lpfc_dmabuf *h_buf;
10744 	struct rqb_dmabuf *rqb_buffer;
10745 
10746 	rqbp = rq->rqbp;
10747 	while (!list_empty(&rqbp->rqb_buffer_list)) {
10748 		list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10749 				 struct lpfc_dmabuf, list);
10750 
10751 		rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
10752 		(rqbp->rqb_free_buffer)(phba, rqb_buffer);
10753 		rqbp->buffer_count--;
10754 	}
10755 	return 1;
10756 }
10757 
10758 static int
10759 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
10760 	struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
10761 	int qidx, uint32_t qtype)
10762 {
10763 	struct lpfc_sli_ring *pring;
10764 	int rc;
10765 
10766 	if (!eq || !cq || !wq) {
10767 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10768 			"6085 Fast-path %s (%d) not allocated\n",
10769 			((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
10770 		return -ENOMEM;
10771 	}
10772 
10773 	/* create the Cq first */
10774 	rc = lpfc_cq_create(phba, cq, eq,
10775 			(qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
10776 	if (rc) {
10777 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10778 				"6086 Failed setup of CQ (%d), rc = 0x%x\n",
10779 				qidx, (uint32_t)rc);
10780 		return rc;
10781 	}
10782 
10783 	if (qtype != LPFC_MBOX) {
10784 		/* Setup cq_map for fast lookup */
10785 		if (cq_map)
10786 			*cq_map = cq->queue_id;
10787 
10788 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10789 			"6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
10790 			qidx, cq->queue_id, qidx, eq->queue_id);
10791 
10792 		/* create the wq */
10793 		rc = lpfc_wq_create(phba, wq, cq, qtype);
10794 		if (rc) {
10795 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10796 				"4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
10797 				qidx, (uint32_t)rc);
10798 			/* no need to tear down cq - caller will do so */
10799 			return rc;
10800 		}
10801 
10802 		/* Bind this CQ/WQ to the NVME ring */
10803 		pring = wq->pring;
10804 		pring->sli.sli4.wqp = (void *)wq;
10805 		cq->pring = pring;
10806 
10807 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10808 			"2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
10809 			qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
10810 	} else {
10811 		rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
10812 		if (rc) {
10813 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10814 					"0539 Failed setup of slow-path MQ: "
10815 					"rc = 0x%x\n", rc);
10816 			/* no need to tear down cq - caller will do so */
10817 			return rc;
10818 		}
10819 
10820 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10821 			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
10822 			phba->sli4_hba.mbx_wq->queue_id,
10823 			phba->sli4_hba.mbx_cq->queue_id);
10824 	}
10825 
10826 	return 0;
10827 }
10828 
10829 /**
10830  * lpfc_setup_cq_lookup - Setup the CQ lookup table
10831  * @phba: pointer to lpfc hba data structure.
10832  *
10833  * This routine will populate the cq_lookup table by all
10834  * available CQ queue_id's.
10835  **/
10836 static void
10837 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
10838 {
10839 	struct lpfc_queue *eq, *childq;
10840 	int qidx;
10841 
10842 	memset(phba->sli4_hba.cq_lookup, 0,
10843 	       (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
10844 	/* Loop thru all IRQ vectors */
10845 	for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10846 		/* Get the EQ corresponding to the IRQ vector */
10847 		eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
10848 		if (!eq)
10849 			continue;
10850 		/* Loop through all CQs associated with that EQ */
10851 		list_for_each_entry(childq, &eq->child_list, list) {
10852 			if (childq->queue_id > phba->sli4_hba.cq_max)
10853 				continue;
10854 			if (childq->subtype == LPFC_IO)
10855 				phba->sli4_hba.cq_lookup[childq->queue_id] =
10856 					childq;
10857 		}
10858 	}
10859 }
10860 
10861 /**
10862  * lpfc_sli4_queue_setup - Set up all the SLI4 queues
10863  * @phba: pointer to lpfc hba data structure.
10864  *
10865  * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
10866  * operation.
10867  *
10868  * Return codes
10869  *      0 - successful
10870  *      -ENOMEM - No available memory
10871  *      -EIO - The mailbox failed to complete successfully.
10872  **/
10873 int
10874 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
10875 {
10876 	uint32_t shdr_status, shdr_add_status;
10877 	union lpfc_sli4_cfg_shdr *shdr;
10878 	struct lpfc_vector_map_info *cpup;
10879 	struct lpfc_sli4_hdw_queue *qp;
10880 	LPFC_MBOXQ_t *mboxq;
10881 	int qidx, cpu;
10882 	uint32_t length, usdelay;
10883 	int rc = -ENOMEM;
10884 
10885 	/* Check for dual-ULP support */
10886 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10887 	if (!mboxq) {
10888 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10889 				"3249 Unable to allocate memory for "
10890 				"QUERY_FW_CFG mailbox command\n");
10891 		return -ENOMEM;
10892 	}
10893 	length = (sizeof(struct lpfc_mbx_query_fw_config) -
10894 		  sizeof(struct lpfc_sli4_cfg_mhdr));
10895 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10896 			 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
10897 			 length, LPFC_SLI4_MBX_EMBED);
10898 
10899 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10900 
10901 	shdr = (union lpfc_sli4_cfg_shdr *)
10902 			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10903 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10904 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10905 	if (shdr_status || shdr_add_status || rc) {
10906 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10907 				"3250 QUERY_FW_CFG mailbox failed with status "
10908 				"x%x add_status x%x, mbx status x%x\n",
10909 				shdr_status, shdr_add_status, rc);
10910 		mempool_free(mboxq, phba->mbox_mem_pool);
10911 		rc = -ENXIO;
10912 		goto out_error;
10913 	}
10914 
10915 	phba->sli4_hba.fw_func_mode =
10916 			mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
10917 	phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
10918 	phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
10919 	phba->sli4_hba.physical_port =
10920 			mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
10921 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10922 			"3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
10923 			"ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
10924 			phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
10925 
10926 	mempool_free(mboxq, phba->mbox_mem_pool);
10927 
10928 	/*
10929 	 * Set up HBA Event Queues (EQs)
10930 	 */
10931 	qp = phba->sli4_hba.hdwq;
10932 
10933 	/* Set up HBA event queue */
10934 	if (!qp) {
10935 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10936 				"3147 Fast-path EQs not allocated\n");
10937 		rc = -ENOMEM;
10938 		goto out_error;
10939 	}
10940 
10941 	/* Loop thru all IRQ vectors */
10942 	for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10943 		/* Create HBA Event Queues (EQs) in order */
10944 		for_each_present_cpu(cpu) {
10945 			cpup = &phba->sli4_hba.cpu_map[cpu];
10946 
10947 			/* Look for the CPU thats using that vector with
10948 			 * LPFC_CPU_FIRST_IRQ set.
10949 			 */
10950 			if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10951 				continue;
10952 			if (qidx != cpup->eq)
10953 				continue;
10954 
10955 			/* Create an EQ for that vector */
10956 			rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
10957 					    phba->cfg_fcp_imax);
10958 			if (rc) {
10959 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10960 						"0523 Failed setup of fast-path"
10961 						" EQ (%d), rc = 0x%x\n",
10962 						cpup->eq, (uint32_t)rc);
10963 				goto out_destroy;
10964 			}
10965 
10966 			/* Save the EQ for that vector in the hba_eq_hdl */
10967 			phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
10968 				qp[cpup->hdwq].hba_eq;
10969 
10970 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10971 					"2584 HBA EQ setup: queue[%d]-id=%d\n",
10972 					cpup->eq,
10973 					qp[cpup->hdwq].hba_eq->queue_id);
10974 		}
10975 	}
10976 
10977 	/* Loop thru all Hardware Queues */
10978 	for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
10979 		cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
10980 		cpup = &phba->sli4_hba.cpu_map[cpu];
10981 
10982 		/* Create the CQ/WQ corresponding to the Hardware Queue */
10983 		rc = lpfc_create_wq_cq(phba,
10984 				       phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
10985 				       qp[qidx].io_cq,
10986 				       qp[qidx].io_wq,
10987 				       &phba->sli4_hba.hdwq[qidx].io_cq_map,
10988 				       qidx,
10989 				       LPFC_IO);
10990 		if (rc) {
10991 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10992 					"0535 Failed to setup fastpath "
10993 					"IO WQ/CQ (%d), rc = 0x%x\n",
10994 					qidx, (uint32_t)rc);
10995 			goto out_destroy;
10996 		}
10997 	}
10998 
10999 	/*
11000 	 * Set up Slow Path Complete Queues (CQs)
11001 	 */
11002 
11003 	/* Set up slow-path MBOX CQ/MQ */
11004 
11005 	if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
11006 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11007 				"0528 %s not allocated\n",
11008 				phba->sli4_hba.mbx_cq ?
11009 				"Mailbox WQ" : "Mailbox CQ");
11010 		rc = -ENOMEM;
11011 		goto out_destroy;
11012 	}
11013 
11014 	rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11015 			       phba->sli4_hba.mbx_cq,
11016 			       phba->sli4_hba.mbx_wq,
11017 			       NULL, 0, LPFC_MBOX);
11018 	if (rc) {
11019 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11020 			"0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
11021 			(uint32_t)rc);
11022 		goto out_destroy;
11023 	}
11024 	if (phba->nvmet_support) {
11025 		if (!phba->sli4_hba.nvmet_cqset) {
11026 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11027 					"3165 Fast-path NVME CQ Set "
11028 					"array not allocated\n");
11029 			rc = -ENOMEM;
11030 			goto out_destroy;
11031 		}
11032 		if (phba->cfg_nvmet_mrq > 1) {
11033 			rc = lpfc_cq_create_set(phba,
11034 					phba->sli4_hba.nvmet_cqset,
11035 					qp,
11036 					LPFC_WCQ, LPFC_NVMET);
11037 			if (rc) {
11038 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11039 						"3164 Failed setup of NVME CQ "
11040 						"Set, rc = 0x%x\n",
11041 						(uint32_t)rc);
11042 				goto out_destroy;
11043 			}
11044 		} else {
11045 			/* Set up NVMET Receive Complete Queue */
11046 			rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11047 					    qp[0].hba_eq,
11048 					    LPFC_WCQ, LPFC_NVMET);
11049 			if (rc) {
11050 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11051 						"6089 Failed setup NVMET CQ: "
11052 						"rc = 0x%x\n", (uint32_t)rc);
11053 				goto out_destroy;
11054 			}
11055 			phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11056 
11057 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11058 					"6090 NVMET CQ setup: cq-id=%d, "
11059 					"parent eq-id=%d\n",
11060 					phba->sli4_hba.nvmet_cqset[0]->queue_id,
11061 					qp[0].hba_eq->queue_id);
11062 		}
11063 	}
11064 
11065 	/* Set up slow-path ELS WQ/CQ */
11066 	if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11067 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11068 				"0530 ELS %s not allocated\n",
11069 				phba->sli4_hba.els_cq ? "WQ" : "CQ");
11070 		rc = -ENOMEM;
11071 		goto out_destroy;
11072 	}
11073 	rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11074 			       phba->sli4_hba.els_cq,
11075 			       phba->sli4_hba.els_wq,
11076 			       NULL, 0, LPFC_ELS);
11077 	if (rc) {
11078 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11079 				"0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11080 				(uint32_t)rc);
11081 		goto out_destroy;
11082 	}
11083 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11084 			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11085 			phba->sli4_hba.els_wq->queue_id,
11086 			phba->sli4_hba.els_cq->queue_id);
11087 
11088 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11089 		/* Set up NVME LS Complete Queue */
11090 		if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11091 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11092 					"6091 LS %s not allocated\n",
11093 					phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11094 			rc = -ENOMEM;
11095 			goto out_destroy;
11096 		}
11097 		rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11098 				       phba->sli4_hba.nvmels_cq,
11099 				       phba->sli4_hba.nvmels_wq,
11100 				       NULL, 0, LPFC_NVME_LS);
11101 		if (rc) {
11102 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11103 					"0526 Failed setup of NVVME LS WQ/CQ: "
11104 					"rc = 0x%x\n", (uint32_t)rc);
11105 			goto out_destroy;
11106 		}
11107 
11108 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11109 				"6096 ELS WQ setup: wq-id=%d, "
11110 				"parent cq-id=%d\n",
11111 				phba->sli4_hba.nvmels_wq->queue_id,
11112 				phba->sli4_hba.nvmels_cq->queue_id);
11113 	}
11114 
11115 	/*
11116 	 * Create NVMET Receive Queue (RQ)
11117 	 */
11118 	if (phba->nvmet_support) {
11119 		if ((!phba->sli4_hba.nvmet_cqset) ||
11120 		    (!phba->sli4_hba.nvmet_mrq_hdr) ||
11121 		    (!phba->sli4_hba.nvmet_mrq_data)) {
11122 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11123 					"6130 MRQ CQ Queues not "
11124 					"allocated\n");
11125 			rc = -ENOMEM;
11126 			goto out_destroy;
11127 		}
11128 		if (phba->cfg_nvmet_mrq > 1) {
11129 			rc = lpfc_mrq_create(phba,
11130 					     phba->sli4_hba.nvmet_mrq_hdr,
11131 					     phba->sli4_hba.nvmet_mrq_data,
11132 					     phba->sli4_hba.nvmet_cqset,
11133 					     LPFC_NVMET);
11134 			if (rc) {
11135 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11136 						"6098 Failed setup of NVMET "
11137 						"MRQ: rc = 0x%x\n",
11138 						(uint32_t)rc);
11139 				goto out_destroy;
11140 			}
11141 
11142 		} else {
11143 			rc = lpfc_rq_create(phba,
11144 					    phba->sli4_hba.nvmet_mrq_hdr[0],
11145 					    phba->sli4_hba.nvmet_mrq_data[0],
11146 					    phba->sli4_hba.nvmet_cqset[0],
11147 					    LPFC_NVMET);
11148 			if (rc) {
11149 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11150 						"6057 Failed setup of NVMET "
11151 						"Receive Queue: rc = 0x%x\n",
11152 						(uint32_t)rc);
11153 				goto out_destroy;
11154 			}
11155 
11156 			lpfc_printf_log(
11157 				phba, KERN_INFO, LOG_INIT,
11158 				"6099 NVMET RQ setup: hdr-rq-id=%d, "
11159 				"dat-rq-id=%d parent cq-id=%d\n",
11160 				phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11161 				phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11162 				phba->sli4_hba.nvmet_cqset[0]->queue_id);
11163 
11164 		}
11165 	}
11166 
11167 	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11168 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11169 				"0540 Receive Queue not allocated\n");
11170 		rc = -ENOMEM;
11171 		goto out_destroy;
11172 	}
11173 
11174 	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11175 			    phba->sli4_hba.els_cq, LPFC_USOL);
11176 	if (rc) {
11177 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11178 				"0541 Failed setup of Receive Queue: "
11179 				"rc = 0x%x\n", (uint32_t)rc);
11180 		goto out_destroy;
11181 	}
11182 
11183 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11184 			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11185 			"parent cq-id=%d\n",
11186 			phba->sli4_hba.hdr_rq->queue_id,
11187 			phba->sli4_hba.dat_rq->queue_id,
11188 			phba->sli4_hba.els_cq->queue_id);
11189 
11190 	if (phba->cfg_fcp_imax)
11191 		usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11192 	else
11193 		usdelay = 0;
11194 
11195 	for (qidx = 0; qidx < phba->cfg_irq_chann;
11196 	     qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
11197 		lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11198 					 usdelay);
11199 
11200 	if (phba->sli4_hba.cq_max) {
11201 		kfree(phba->sli4_hba.cq_lookup);
11202 		phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11203 			sizeof(struct lpfc_queue *), GFP_KERNEL);
11204 		if (!phba->sli4_hba.cq_lookup) {
11205 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11206 					"0549 Failed setup of CQ Lookup table: "
11207 					"size 0x%x\n", phba->sli4_hba.cq_max);
11208 			rc = -ENOMEM;
11209 			goto out_destroy;
11210 		}
11211 		lpfc_setup_cq_lookup(phba);
11212 	}
11213 	return 0;
11214 
11215 out_destroy:
11216 	lpfc_sli4_queue_unset(phba);
11217 out_error:
11218 	return rc;
11219 }
11220 
11221 /**
11222  * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11223  * @phba: pointer to lpfc hba data structure.
11224  *
11225  * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
11226  * operation.
11227  *
11228  * Return codes
11229  *      0 - successful
11230  *      -ENOMEM - No available memory
11231  *      -EIO - The mailbox failed to complete successfully.
11232  **/
11233 void
11234 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11235 {
11236 	struct lpfc_sli4_hdw_queue *qp;
11237 	struct lpfc_queue *eq;
11238 	int qidx;
11239 
11240 	/* Unset mailbox command work queue */
11241 	if (phba->sli4_hba.mbx_wq)
11242 		lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11243 
11244 	/* Unset NVME LS work queue */
11245 	if (phba->sli4_hba.nvmels_wq)
11246 		lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11247 
11248 	/* Unset ELS work queue */
11249 	if (phba->sli4_hba.els_wq)
11250 		lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11251 
11252 	/* Unset unsolicited receive queue */
11253 	if (phba->sli4_hba.hdr_rq)
11254 		lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11255 				phba->sli4_hba.dat_rq);
11256 
11257 	/* Unset mailbox command complete queue */
11258 	if (phba->sli4_hba.mbx_cq)
11259 		lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11260 
11261 	/* Unset ELS complete queue */
11262 	if (phba->sli4_hba.els_cq)
11263 		lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11264 
11265 	/* Unset NVME LS complete queue */
11266 	if (phba->sli4_hba.nvmels_cq)
11267 		lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11268 
11269 	if (phba->nvmet_support) {
11270 		/* Unset NVMET MRQ queue */
11271 		if (phba->sli4_hba.nvmet_mrq_hdr) {
11272 			for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11273 				lpfc_rq_destroy(
11274 					phba,
11275 					phba->sli4_hba.nvmet_mrq_hdr[qidx],
11276 					phba->sli4_hba.nvmet_mrq_data[qidx]);
11277 		}
11278 
11279 		/* Unset NVMET CQ Set complete queue */
11280 		if (phba->sli4_hba.nvmet_cqset) {
11281 			for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11282 				lpfc_cq_destroy(
11283 					phba, phba->sli4_hba.nvmet_cqset[qidx]);
11284 		}
11285 	}
11286 
11287 	/* Unset fast-path SLI4 queues */
11288 	if (phba->sli4_hba.hdwq) {
11289 		/* Loop thru all Hardware Queues */
11290 		for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11291 			/* Destroy the CQ/WQ corresponding to Hardware Queue */
11292 			qp = &phba->sli4_hba.hdwq[qidx];
11293 			lpfc_wq_destroy(phba, qp->io_wq);
11294 			lpfc_cq_destroy(phba, qp->io_cq);
11295 		}
11296 		/* Loop thru all IRQ vectors */
11297 		for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11298 			/* Destroy the EQ corresponding to the IRQ vector */
11299 			eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11300 			lpfc_eq_destroy(phba, eq);
11301 		}
11302 	}
11303 
11304 	kfree(phba->sli4_hba.cq_lookup);
11305 	phba->sli4_hba.cq_lookup = NULL;
11306 	phba->sli4_hba.cq_max = 0;
11307 }
11308 
11309 /**
11310  * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11311  * @phba: pointer to lpfc hba data structure.
11312  *
11313  * This routine is invoked to allocate and set up a pool of completion queue
11314  * events. The body of the completion queue event is a completion queue entry
11315  * CQE. For now, this pool is used for the interrupt service routine to queue
11316  * the following HBA completion queue events for the worker thread to process:
11317  *   - Mailbox asynchronous events
11318  *   - Receive queue completion unsolicited events
11319  * Later, this can be used for all the slow-path events.
11320  *
11321  * Return codes
11322  *      0 - successful
11323  *      -ENOMEM - No available memory
11324  **/
11325 static int
11326 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11327 {
11328 	struct lpfc_cq_event *cq_event;
11329 	int i;
11330 
11331 	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11332 		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
11333 		if (!cq_event)
11334 			goto out_pool_create_fail;
11335 		list_add_tail(&cq_event->list,
11336 			      &phba->sli4_hba.sp_cqe_event_pool);
11337 	}
11338 	return 0;
11339 
11340 out_pool_create_fail:
11341 	lpfc_sli4_cq_event_pool_destroy(phba);
11342 	return -ENOMEM;
11343 }
11344 
11345 /**
11346  * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11347  * @phba: pointer to lpfc hba data structure.
11348  *
11349  * This routine is invoked to free the pool of completion queue events at
11350  * driver unload time. Note that, it is the responsibility of the driver
11351  * cleanup routine to free all the outstanding completion-queue events
11352  * allocated from this pool back into the pool before invoking this routine
11353  * to destroy the pool.
11354  **/
11355 static void
11356 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11357 {
11358 	struct lpfc_cq_event *cq_event, *next_cq_event;
11359 
11360 	list_for_each_entry_safe(cq_event, next_cq_event,
11361 				 &phba->sli4_hba.sp_cqe_event_pool, list) {
11362 		list_del(&cq_event->list);
11363 		kfree(cq_event);
11364 	}
11365 }
11366 
11367 /**
11368  * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11369  * @phba: pointer to lpfc hba data structure.
11370  *
11371  * This routine is the lock free version of the API invoked to allocate a
11372  * completion-queue event from the free pool.
11373  *
11374  * Return: Pointer to the newly allocated completion-queue event if successful
11375  *         NULL otherwise.
11376  **/
11377 struct lpfc_cq_event *
11378 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11379 {
11380 	struct lpfc_cq_event *cq_event = NULL;
11381 
11382 	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11383 			 struct lpfc_cq_event, list);
11384 	return cq_event;
11385 }
11386 
11387 /**
11388  * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11389  * @phba: pointer to lpfc hba data structure.
11390  *
11391  * This routine is the lock version of the API invoked to allocate a
11392  * completion-queue event from the free pool.
11393  *
11394  * Return: Pointer to the newly allocated completion-queue event if successful
11395  *         NULL otherwise.
11396  **/
11397 struct lpfc_cq_event *
11398 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11399 {
11400 	struct lpfc_cq_event *cq_event;
11401 	unsigned long iflags;
11402 
11403 	spin_lock_irqsave(&phba->hbalock, iflags);
11404 	cq_event = __lpfc_sli4_cq_event_alloc(phba);
11405 	spin_unlock_irqrestore(&phba->hbalock, iflags);
11406 	return cq_event;
11407 }
11408 
11409 /**
11410  * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11411  * @phba: pointer to lpfc hba data structure.
11412  * @cq_event: pointer to the completion queue event to be freed.
11413  *
11414  * This routine is the lock free version of the API invoked to release a
11415  * completion-queue event back into the free pool.
11416  **/
11417 void
11418 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11419 			     struct lpfc_cq_event *cq_event)
11420 {
11421 	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11422 }
11423 
11424 /**
11425  * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11426  * @phba: pointer to lpfc hba data structure.
11427  * @cq_event: pointer to the completion queue event to be freed.
11428  *
11429  * This routine is the lock version of the API invoked to release a
11430  * completion-queue event back into the free pool.
11431  **/
11432 void
11433 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11434 			   struct lpfc_cq_event *cq_event)
11435 {
11436 	unsigned long iflags;
11437 	spin_lock_irqsave(&phba->hbalock, iflags);
11438 	__lpfc_sli4_cq_event_release(phba, cq_event);
11439 	spin_unlock_irqrestore(&phba->hbalock, iflags);
11440 }
11441 
11442 /**
11443  * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11444  * @phba: pointer to lpfc hba data structure.
11445  *
11446  * This routine is to free all the pending completion-queue events to the
11447  * back into the free pool for device reset.
11448  **/
11449 static void
11450 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11451 {
11452 	LIST_HEAD(cq_event_list);
11453 	struct lpfc_cq_event *cq_event;
11454 	unsigned long iflags;
11455 
11456 	/* Retrieve all the pending WCQEs from pending WCQE lists */
11457 
11458 	/* Pending ELS XRI abort events */
11459 	spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11460 	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11461 			 &cq_event_list);
11462 	spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11463 
11464 	/* Pending asynnc events */
11465 	spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11466 	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11467 			 &cq_event_list);
11468 	spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11469 
11470 	while (!list_empty(&cq_event_list)) {
11471 		list_remove_head(&cq_event_list, cq_event,
11472 				 struct lpfc_cq_event, list);
11473 		lpfc_sli4_cq_event_release(phba, cq_event);
11474 	}
11475 }
11476 
11477 /**
11478  * lpfc_pci_function_reset - Reset pci function.
11479  * @phba: pointer to lpfc hba data structure.
11480  *
11481  * This routine is invoked to request a PCI function reset. It will destroys
11482  * all resources assigned to the PCI function which originates this request.
11483  *
11484  * Return codes
11485  *      0 - successful
11486  *      -ENOMEM - No available memory
11487  *      -EIO - The mailbox failed to complete successfully.
11488  **/
11489 int
11490 lpfc_pci_function_reset(struct lpfc_hba *phba)
11491 {
11492 	LPFC_MBOXQ_t *mboxq;
11493 	uint32_t rc = 0, if_type;
11494 	uint32_t shdr_status, shdr_add_status;
11495 	uint32_t rdy_chk;
11496 	uint32_t port_reset = 0;
11497 	union lpfc_sli4_cfg_shdr *shdr;
11498 	struct lpfc_register reg_data;
11499 	uint16_t devid;
11500 
11501 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11502 	switch (if_type) {
11503 	case LPFC_SLI_INTF_IF_TYPE_0:
11504 		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11505 						       GFP_KERNEL);
11506 		if (!mboxq) {
11507 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11508 					"0494 Unable to allocate memory for "
11509 					"issuing SLI_FUNCTION_RESET mailbox "
11510 					"command\n");
11511 			return -ENOMEM;
11512 		}
11513 
11514 		/* Setup PCI function reset mailbox-ioctl command */
11515 		lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11516 				 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
11517 				 LPFC_SLI4_MBX_EMBED);
11518 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11519 		shdr = (union lpfc_sli4_cfg_shdr *)
11520 			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11521 		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11522 		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
11523 					 &shdr->response);
11524 		mempool_free(mboxq, phba->mbox_mem_pool);
11525 		if (shdr_status || shdr_add_status || rc) {
11526 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11527 					"0495 SLI_FUNCTION_RESET mailbox "
11528 					"failed with status x%x add_status x%x,"
11529 					" mbx status x%x\n",
11530 					shdr_status, shdr_add_status, rc);
11531 			rc = -ENXIO;
11532 		}
11533 		break;
11534 	case LPFC_SLI_INTF_IF_TYPE_2:
11535 	case LPFC_SLI_INTF_IF_TYPE_6:
11536 wait:
11537 		/*
11538 		 * Poll the Port Status Register and wait for RDY for
11539 		 * up to 30 seconds. If the port doesn't respond, treat
11540 		 * it as an error.
11541 		 */
11542 		for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
11543 			if (lpfc_readl(phba->sli4_hba.u.if_type2.
11544 				STATUSregaddr, &reg_data.word0)) {
11545 				rc = -ENODEV;
11546 				goto out;
11547 			}
11548 			if (bf_get(lpfc_sliport_status_rdy, &reg_data))
11549 				break;
11550 			msleep(20);
11551 		}
11552 
11553 		if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
11554 			phba->work_status[0] = readl(
11555 				phba->sli4_hba.u.if_type2.ERR1regaddr);
11556 			phba->work_status[1] = readl(
11557 				phba->sli4_hba.u.if_type2.ERR2regaddr);
11558 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11559 					"2890 Port not ready, port status reg "
11560 					"0x%x error 1=0x%x, error 2=0x%x\n",
11561 					reg_data.word0,
11562 					phba->work_status[0],
11563 					phba->work_status[1]);
11564 			rc = -ENODEV;
11565 			goto out;
11566 		}
11567 
11568 		if (!port_reset) {
11569 			/*
11570 			 * Reset the port now
11571 			 */
11572 			reg_data.word0 = 0;
11573 			bf_set(lpfc_sliport_ctrl_end, &reg_data,
11574 			       LPFC_SLIPORT_LITTLE_ENDIAN);
11575 			bf_set(lpfc_sliport_ctrl_ip, &reg_data,
11576 			       LPFC_SLIPORT_INIT_PORT);
11577 			writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11578 			       CTRLregaddr);
11579 			/* flush */
11580 			pci_read_config_word(phba->pcidev,
11581 					     PCI_DEVICE_ID, &devid);
11582 
11583 			port_reset = 1;
11584 			msleep(20);
11585 			goto wait;
11586 		} else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
11587 			rc = -ENODEV;
11588 			goto out;
11589 		}
11590 		break;
11591 
11592 	case LPFC_SLI_INTF_IF_TYPE_1:
11593 	default:
11594 		break;
11595 	}
11596 
11597 out:
11598 	/* Catch the not-ready port failure after a port reset. */
11599 	if (rc) {
11600 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11601 				"3317 HBA not functional: IP Reset Failed "
11602 				"try: echo fw_reset > board_mode\n");
11603 		rc = -ENODEV;
11604 	}
11605 
11606 	return rc;
11607 }
11608 
11609 /**
11610  * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11611  * @phba: pointer to lpfc hba data structure.
11612  *
11613  * This routine is invoked to set up the PCI device memory space for device
11614  * with SLI-4 interface spec.
11615  *
11616  * Return codes
11617  * 	0 - successful
11618  * 	other values - error
11619  **/
11620 static int
11621 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11622 {
11623 	struct pci_dev *pdev = phba->pcidev;
11624 	unsigned long bar0map_len, bar1map_len, bar2map_len;
11625 	int error;
11626 	uint32_t if_type;
11627 
11628 	if (!pdev)
11629 		return -ENODEV;
11630 
11631 	/* Set the device DMA mask size */
11632 	error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11633 	if (error)
11634 		error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11635 	if (error)
11636 		return error;
11637 
11638 	/*
11639 	 * The BARs and register set definitions and offset locations are
11640 	 * dependent on the if_type.
11641 	 */
11642 	if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
11643 				  &phba->sli4_hba.sli_intf.word0)) {
11644 		return -ENODEV;
11645 	}
11646 
11647 	/* There is no SLI3 failback for SLI4 devices. */
11648 	if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11649 	    LPFC_SLI_INTF_VALID) {
11650 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11651 				"2894 SLI_INTF reg contents invalid "
11652 				"sli_intf reg 0x%x\n",
11653 				phba->sli4_hba.sli_intf.word0);
11654 		return -ENODEV;
11655 	}
11656 
11657 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11658 	/*
11659 	 * Get the bus address of SLI4 device Bar regions and the
11660 	 * number of bytes required by each mapping. The mapping of the
11661 	 * particular PCI BARs regions is dependent on the type of
11662 	 * SLI4 device.
11663 	 */
11664 	if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
11665 		phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11666 		bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
11667 
11668 		/*
11669 		 * Map SLI4 PCI Config Space Register base to a kernel virtual
11670 		 * addr
11671 		 */
11672 		phba->sli4_hba.conf_regs_memmap_p =
11673 			ioremap(phba->pci_bar0_map, bar0map_len);
11674 		if (!phba->sli4_hba.conf_regs_memmap_p) {
11675 			dev_printk(KERN_ERR, &pdev->dev,
11676 				   "ioremap failed for SLI4 PCI config "
11677 				   "registers.\n");
11678 			return -ENODEV;
11679 		}
11680 		phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11681 		/* Set up BAR0 PCI config space register memory map */
11682 		lpfc_sli4_bar0_register_memmap(phba, if_type);
11683 	} else {
11684 		phba->pci_bar0_map = pci_resource_start(pdev, 1);
11685 		bar0map_len = pci_resource_len(pdev, 1);
11686 		if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
11687 			dev_printk(KERN_ERR, &pdev->dev,
11688 			   "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11689 			return -ENODEV;
11690 		}
11691 		phba->sli4_hba.conf_regs_memmap_p =
11692 				ioremap(phba->pci_bar0_map, bar0map_len);
11693 		if (!phba->sli4_hba.conf_regs_memmap_p) {
11694 			dev_printk(KERN_ERR, &pdev->dev,
11695 				"ioremap failed for SLI4 PCI config "
11696 				"registers.\n");
11697 			return -ENODEV;
11698 		}
11699 		lpfc_sli4_bar0_register_memmap(phba, if_type);
11700 	}
11701 
11702 	if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11703 		if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
11704 			/*
11705 			 * Map SLI4 if type 0 HBA Control Register base to a
11706 			 * kernel virtual address and setup the registers.
11707 			 */
11708 			phba->pci_bar1_map = pci_resource_start(pdev,
11709 								PCI_64BIT_BAR2);
11710 			bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11711 			phba->sli4_hba.ctrl_regs_memmap_p =
11712 					ioremap(phba->pci_bar1_map,
11713 						bar1map_len);
11714 			if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11715 				dev_err(&pdev->dev,
11716 					   "ioremap failed for SLI4 HBA "
11717 					    "control registers.\n");
11718 				error = -ENOMEM;
11719 				goto out_iounmap_conf;
11720 			}
11721 			phba->pci_bar2_memmap_p =
11722 					 phba->sli4_hba.ctrl_regs_memmap_p;
11723 			lpfc_sli4_bar1_register_memmap(phba, if_type);
11724 		} else {
11725 			error = -ENOMEM;
11726 			goto out_iounmap_conf;
11727 		}
11728 	}
11729 
11730 	if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
11731 	    (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
11732 		/*
11733 		 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
11734 		 * virtual address and setup the registers.
11735 		 */
11736 		phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11737 		bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11738 		phba->sli4_hba.drbl_regs_memmap_p =
11739 				ioremap(phba->pci_bar1_map, bar1map_len);
11740 		if (!phba->sli4_hba.drbl_regs_memmap_p) {
11741 			dev_err(&pdev->dev,
11742 			   "ioremap failed for SLI4 HBA doorbell registers.\n");
11743 			error = -ENOMEM;
11744 			goto out_iounmap_conf;
11745 		}
11746 		phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11747 		lpfc_sli4_bar1_register_memmap(phba, if_type);
11748 	}
11749 
11750 	if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11751 		if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11752 			/*
11753 			 * Map SLI4 if type 0 HBA Doorbell Register base to
11754 			 * a kernel virtual address and setup the registers.
11755 			 */
11756 			phba->pci_bar2_map = pci_resource_start(pdev,
11757 								PCI_64BIT_BAR4);
11758 			bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11759 			phba->sli4_hba.drbl_regs_memmap_p =
11760 					ioremap(phba->pci_bar2_map,
11761 						bar2map_len);
11762 			if (!phba->sli4_hba.drbl_regs_memmap_p) {
11763 				dev_err(&pdev->dev,
11764 					   "ioremap failed for SLI4 HBA"
11765 					   " doorbell registers.\n");
11766 				error = -ENOMEM;
11767 				goto out_iounmap_ctrl;
11768 			}
11769 			phba->pci_bar4_memmap_p =
11770 					phba->sli4_hba.drbl_regs_memmap_p;
11771 			error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
11772 			if (error)
11773 				goto out_iounmap_all;
11774 		} else {
11775 			error = -ENOMEM;
11776 			goto out_iounmap_all;
11777 		}
11778 	}
11779 
11780 	if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
11781 	    pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11782 		/*
11783 		 * Map SLI4 if type 6 HBA DPP Register base to a kernel
11784 		 * virtual address and setup the registers.
11785 		 */
11786 		phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11787 		bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11788 		phba->sli4_hba.dpp_regs_memmap_p =
11789 				ioremap(phba->pci_bar2_map, bar2map_len);
11790 		if (!phba->sli4_hba.dpp_regs_memmap_p) {
11791 			dev_err(&pdev->dev,
11792 			   "ioremap failed for SLI4 HBA dpp registers.\n");
11793 			error = -ENOMEM;
11794 			goto out_iounmap_ctrl;
11795 		}
11796 		phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
11797 	}
11798 
11799 	/* Set up the EQ/CQ register handeling functions now */
11800 	switch (if_type) {
11801 	case LPFC_SLI_INTF_IF_TYPE_0:
11802 	case LPFC_SLI_INTF_IF_TYPE_2:
11803 		phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
11804 		phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
11805 		phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
11806 		break;
11807 	case LPFC_SLI_INTF_IF_TYPE_6:
11808 		phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
11809 		phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
11810 		phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
11811 		break;
11812 	default:
11813 		break;
11814 	}
11815 
11816 	return 0;
11817 
11818 out_iounmap_all:
11819 	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11820 out_iounmap_ctrl:
11821 	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
11822 out_iounmap_conf:
11823 	iounmap(phba->sli4_hba.conf_regs_memmap_p);
11824 
11825 	return error;
11826 }
11827 
11828 /**
11829  * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
11830  * @phba: pointer to lpfc hba data structure.
11831  *
11832  * This routine is invoked to unset the PCI device memory space for device
11833  * with SLI-4 interface spec.
11834  **/
11835 static void
11836 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
11837 {
11838 	uint32_t if_type;
11839 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11840 
11841 	switch (if_type) {
11842 	case LPFC_SLI_INTF_IF_TYPE_0:
11843 		iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11844 		iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
11845 		iounmap(phba->sli4_hba.conf_regs_memmap_p);
11846 		break;
11847 	case LPFC_SLI_INTF_IF_TYPE_2:
11848 		iounmap(phba->sli4_hba.conf_regs_memmap_p);
11849 		break;
11850 	case LPFC_SLI_INTF_IF_TYPE_6:
11851 		iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11852 		iounmap(phba->sli4_hba.conf_regs_memmap_p);
11853 		if (phba->sli4_hba.dpp_regs_memmap_p)
11854 			iounmap(phba->sli4_hba.dpp_regs_memmap_p);
11855 		break;
11856 	case LPFC_SLI_INTF_IF_TYPE_1:
11857 	default:
11858 		dev_printk(KERN_ERR, &phba->pcidev->dev,
11859 			   "FATAL - unsupported SLI4 interface type - %d\n",
11860 			   if_type);
11861 		break;
11862 	}
11863 }
11864 
11865 /**
11866  * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
11867  * @phba: pointer to lpfc hba data structure.
11868  *
11869  * This routine is invoked to enable the MSI-X interrupt vectors to device
11870  * with SLI-3 interface specs.
11871  *
11872  * Return codes
11873  *   0 - successful
11874  *   other values - error
11875  **/
11876 static int
11877 lpfc_sli_enable_msix(struct lpfc_hba *phba)
11878 {
11879 	int rc;
11880 	LPFC_MBOXQ_t *pmb;
11881 
11882 	/* Set up MSI-X multi-message vectors */
11883 	rc = pci_alloc_irq_vectors(phba->pcidev,
11884 			LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
11885 	if (rc < 0) {
11886 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11887 				"0420 PCI enable MSI-X failed (%d)\n", rc);
11888 		goto vec_fail_out;
11889 	}
11890 
11891 	/*
11892 	 * Assign MSI-X vectors to interrupt handlers
11893 	 */
11894 
11895 	/* vector-0 is associated to slow-path handler */
11896 	rc = request_irq(pci_irq_vector(phba->pcidev, 0),
11897 			 &lpfc_sli_sp_intr_handler, 0,
11898 			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
11899 	if (rc) {
11900 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11901 				"0421 MSI-X slow-path request_irq failed "
11902 				"(%d)\n", rc);
11903 		goto msi_fail_out;
11904 	}
11905 
11906 	/* vector-1 is associated to fast-path handler */
11907 	rc = request_irq(pci_irq_vector(phba->pcidev, 1),
11908 			 &lpfc_sli_fp_intr_handler, 0,
11909 			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
11910 
11911 	if (rc) {
11912 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11913 				"0429 MSI-X fast-path request_irq failed "
11914 				"(%d)\n", rc);
11915 		goto irq_fail_out;
11916 	}
11917 
11918 	/*
11919 	 * Configure HBA MSI-X attention conditions to messages
11920 	 */
11921 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11922 
11923 	if (!pmb) {
11924 		rc = -ENOMEM;
11925 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11926 				"0474 Unable to allocate memory for issuing "
11927 				"MBOX_CONFIG_MSI command\n");
11928 		goto mem_fail_out;
11929 	}
11930 	rc = lpfc_config_msi(phba, pmb);
11931 	if (rc)
11932 		goto mbx_fail_out;
11933 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
11934 	if (rc != MBX_SUCCESS) {
11935 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
11936 				"0351 Config MSI mailbox command failed, "
11937 				"mbxCmd x%x, mbxStatus x%x\n",
11938 				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
11939 		goto mbx_fail_out;
11940 	}
11941 
11942 	/* Free memory allocated for mailbox command */
11943 	mempool_free(pmb, phba->mbox_mem_pool);
11944 	return rc;
11945 
11946 mbx_fail_out:
11947 	/* Free memory allocated for mailbox command */
11948 	mempool_free(pmb, phba->mbox_mem_pool);
11949 
11950 mem_fail_out:
11951 	/* free the irq already requested */
11952 	free_irq(pci_irq_vector(phba->pcidev, 1), phba);
11953 
11954 irq_fail_out:
11955 	/* free the irq already requested */
11956 	free_irq(pci_irq_vector(phba->pcidev, 0), phba);
11957 
11958 msi_fail_out:
11959 	/* Unconfigure MSI-X capability structure */
11960 	pci_free_irq_vectors(phba->pcidev);
11961 
11962 vec_fail_out:
11963 	return rc;
11964 }
11965 
11966 /**
11967  * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
11968  * @phba: pointer to lpfc hba data structure.
11969  *
11970  * This routine is invoked to enable the MSI interrupt mode to device with
11971  * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
11972  * enable the MSI vector. The device driver is responsible for calling the
11973  * request_irq() to register MSI vector with a interrupt the handler, which
11974  * is done in this function.
11975  *
11976  * Return codes
11977  * 	0 - successful
11978  * 	other values - error
11979  */
11980 static int
11981 lpfc_sli_enable_msi(struct lpfc_hba *phba)
11982 {
11983 	int rc;
11984 
11985 	rc = pci_enable_msi(phba->pcidev);
11986 	if (!rc)
11987 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11988 				"0462 PCI enable MSI mode success.\n");
11989 	else {
11990 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11991 				"0471 PCI enable MSI mode failed (%d)\n", rc);
11992 		return rc;
11993 	}
11994 
11995 	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
11996 			 0, LPFC_DRIVER_NAME, phba);
11997 	if (rc) {
11998 		pci_disable_msi(phba->pcidev);
11999 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12000 				"0478 MSI request_irq failed (%d)\n", rc);
12001 	}
12002 	return rc;
12003 }
12004 
12005 /**
12006  * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
12007  * @phba: pointer to lpfc hba data structure.
12008  * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12009  *
12010  * This routine is invoked to enable device interrupt and associate driver's
12011  * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
12012  * spec. Depends on the interrupt mode configured to the driver, the driver
12013  * will try to fallback from the configured interrupt mode to an interrupt
12014  * mode which is supported by the platform, kernel, and device in the order
12015  * of:
12016  * MSI-X -> MSI -> IRQ.
12017  *
12018  * Return codes
12019  *   0 - successful
12020  *   other values - error
12021  **/
12022 static uint32_t
12023 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12024 {
12025 	uint32_t intr_mode = LPFC_INTR_ERROR;
12026 	int retval;
12027 
12028 	/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
12029 	retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
12030 	if (retval)
12031 		return intr_mode;
12032 	phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
12033 
12034 	if (cfg_mode == 2) {
12035 		/* Now, try to enable MSI-X interrupt mode */
12036 		retval = lpfc_sli_enable_msix(phba);
12037 		if (!retval) {
12038 			/* Indicate initialization to MSI-X mode */
12039 			phba->intr_type = MSIX;
12040 			intr_mode = 2;
12041 		}
12042 	}
12043 
12044 	/* Fallback to MSI if MSI-X initialization failed */
12045 	if (cfg_mode >= 1 && phba->intr_type == NONE) {
12046 		retval = lpfc_sli_enable_msi(phba);
12047 		if (!retval) {
12048 			/* Indicate initialization to MSI mode */
12049 			phba->intr_type = MSI;
12050 			intr_mode = 1;
12051 		}
12052 	}
12053 
12054 	/* Fallback to INTx if both MSI-X/MSI initalization failed */
12055 	if (phba->intr_type == NONE) {
12056 		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12057 				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12058 		if (!retval) {
12059 			/* Indicate initialization to INTx mode */
12060 			phba->intr_type = INTx;
12061 			intr_mode = 0;
12062 		}
12063 	}
12064 	return intr_mode;
12065 }
12066 
12067 /**
12068  * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12069  * @phba: pointer to lpfc hba data structure.
12070  *
12071  * This routine is invoked to disable device interrupt and disassociate the
12072  * driver's interrupt handler(s) from interrupt vector(s) to device with
12073  * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12074  * release the interrupt vector(s) for the message signaled interrupt.
12075  **/
12076 static void
12077 lpfc_sli_disable_intr(struct lpfc_hba *phba)
12078 {
12079 	int nr_irqs, i;
12080 
12081 	if (phba->intr_type == MSIX)
12082 		nr_irqs = LPFC_MSIX_VECTORS;
12083 	else
12084 		nr_irqs = 1;
12085 
12086 	for (i = 0; i < nr_irqs; i++)
12087 		free_irq(pci_irq_vector(phba->pcidev, i), phba);
12088 	pci_free_irq_vectors(phba->pcidev);
12089 
12090 	/* Reset interrupt management states */
12091 	phba->intr_type = NONE;
12092 	phba->sli.slistat.sli_intr = 0;
12093 }
12094 
12095 /**
12096  * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12097  * @phba: pointer to lpfc hba data structure.
12098  * @id: EQ vector index or Hardware Queue index
12099  * @match: LPFC_FIND_BY_EQ = match by EQ
12100  *         LPFC_FIND_BY_HDWQ = match by Hardware Queue
12101  * Return the CPU that matches the selection criteria
12102  */
12103 static uint16_t
12104 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12105 {
12106 	struct lpfc_vector_map_info *cpup;
12107 	int cpu;
12108 
12109 	/* Loop through all CPUs */
12110 	for_each_present_cpu(cpu) {
12111 		cpup = &phba->sli4_hba.cpu_map[cpu];
12112 
12113 		/* If we are matching by EQ, there may be multiple CPUs using
12114 		 * using the same vector, so select the one with
12115 		 * LPFC_CPU_FIRST_IRQ set.
12116 		 */
12117 		if ((match == LPFC_FIND_BY_EQ) &&
12118 		    (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12119 		    (cpup->eq == id))
12120 			return cpu;
12121 
12122 		/* If matching by HDWQ, select the first CPU that matches */
12123 		if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12124 			return cpu;
12125 	}
12126 	return 0;
12127 }
12128 
12129 #ifdef CONFIG_X86
12130 /**
12131  * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12132  * @phba: pointer to lpfc hba data structure.
12133  * @cpu: CPU map index
12134  * @phys_id: CPU package physical id
12135  * @core_id: CPU core id
12136  */
12137 static int
12138 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12139 		uint16_t phys_id, uint16_t core_id)
12140 {
12141 	struct lpfc_vector_map_info *cpup;
12142 	int idx;
12143 
12144 	for_each_present_cpu(idx) {
12145 		cpup = &phba->sli4_hba.cpu_map[idx];
12146 		/* Does the cpup match the one we are looking for */
12147 		if ((cpup->phys_id == phys_id) &&
12148 		    (cpup->core_id == core_id) &&
12149 		    (cpu != idx))
12150 			return 1;
12151 	}
12152 	return 0;
12153 }
12154 #endif
12155 
12156 /*
12157  * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12158  * @phba: pointer to lpfc hba data structure.
12159  * @eqidx: index for eq and irq vector
12160  * @flag: flags to set for vector_map structure
12161  * @cpu: cpu used to index vector_map structure
12162  *
12163  * The routine assigns eq info into vector_map structure
12164  */
12165 static inline void
12166 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12167 			unsigned int cpu)
12168 {
12169 	struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12170 	struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
12171 
12172 	cpup->eq = eqidx;
12173 	cpup->flag |= flag;
12174 
12175 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12176 			"3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12177 			cpu, eqhdl->irq, cpup->eq, cpup->flag);
12178 }
12179 
12180 /**
12181  * lpfc_cpu_map_array_init - Initialize cpu_map structure
12182  * @phba: pointer to lpfc hba data structure.
12183  *
12184  * The routine initializes the cpu_map array structure
12185  */
12186 static void
12187 lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12188 {
12189 	struct lpfc_vector_map_info *cpup;
12190 	struct lpfc_eq_intr_info *eqi;
12191 	int cpu;
12192 
12193 	for_each_possible_cpu(cpu) {
12194 		cpup = &phba->sli4_hba.cpu_map[cpu];
12195 		cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12196 		cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12197 		cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12198 		cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12199 		cpup->flag = 0;
12200 		eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12201 		INIT_LIST_HEAD(&eqi->list);
12202 		eqi->icnt = 0;
12203 	}
12204 }
12205 
12206 /**
12207  * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12208  * @phba: pointer to lpfc hba data structure.
12209  *
12210  * The routine initializes the hba_eq_hdl array structure
12211  */
12212 static void
12213 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12214 {
12215 	struct lpfc_hba_eq_hdl *eqhdl;
12216 	int i;
12217 
12218 	for (i = 0; i < phba->cfg_irq_chann; i++) {
12219 		eqhdl = lpfc_get_eq_hdl(i);
12220 		eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
12221 		eqhdl->phba = phba;
12222 	}
12223 }
12224 
12225 /**
12226  * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12227  * @phba: pointer to lpfc hba data structure.
12228  * @vectors: number of msix vectors allocated.
12229  *
12230  * The routine will figure out the CPU affinity assignment for every
12231  * MSI-X vector allocated for the HBA.
12232  * In addition, the CPU to IO channel mapping will be calculated
12233  * and the phba->sli4_hba.cpu_map array will reflect this.
12234  */
12235 static void
12236 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12237 {
12238 	int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
12239 	int max_phys_id, min_phys_id;
12240 	int max_core_id, min_core_id;
12241 	struct lpfc_vector_map_info *cpup;
12242 	struct lpfc_vector_map_info *new_cpup;
12243 #ifdef CONFIG_X86
12244 	struct cpuinfo_x86 *cpuinfo;
12245 #endif
12246 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12247 	struct lpfc_hdwq_stat *c_stat;
12248 #endif
12249 
12250 	max_phys_id = 0;
12251 	min_phys_id = LPFC_VECTOR_MAP_EMPTY;
12252 	max_core_id = 0;
12253 	min_core_id = LPFC_VECTOR_MAP_EMPTY;
12254 
12255 	/* Update CPU map with physical id and core id of each CPU */
12256 	for_each_present_cpu(cpu) {
12257 		cpup = &phba->sli4_hba.cpu_map[cpu];
12258 #ifdef CONFIG_X86
12259 		cpuinfo = &cpu_data(cpu);
12260 		cpup->phys_id = cpuinfo->phys_proc_id;
12261 		cpup->core_id = cpuinfo->cpu_core_id;
12262 		if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12263 			cpup->flag |= LPFC_CPU_MAP_HYPER;
12264 #else
12265 		/* No distinction between CPUs for other platforms */
12266 		cpup->phys_id = 0;
12267 		cpup->core_id = cpu;
12268 #endif
12269 
12270 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12271 				"3328 CPU %d physid %d coreid %d flag x%x\n",
12272 				cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12273 
12274 		if (cpup->phys_id > max_phys_id)
12275 			max_phys_id = cpup->phys_id;
12276 		if (cpup->phys_id < min_phys_id)
12277 			min_phys_id = cpup->phys_id;
12278 
12279 		if (cpup->core_id > max_core_id)
12280 			max_core_id = cpup->core_id;
12281 		if (cpup->core_id < min_core_id)
12282 			min_core_id = cpup->core_id;
12283 	}
12284 
12285 	/* After looking at each irq vector assigned to this pcidev, its
12286 	 * possible to see that not ALL CPUs have been accounted for.
12287 	 * Next we will set any unassigned (unaffinitized) cpu map
12288 	 * entries to a IRQ on the same phys_id.
12289 	 */
12290 	first_cpu = cpumask_first(cpu_present_mask);
12291 	start_cpu = first_cpu;
12292 
12293 	for_each_present_cpu(cpu) {
12294 		cpup = &phba->sli4_hba.cpu_map[cpu];
12295 
12296 		/* Is this CPU entry unassigned */
12297 		if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12298 			/* Mark CPU as IRQ not assigned by the kernel */
12299 			cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12300 
12301 			/* If so, find a new_cpup thats on the the SAME
12302 			 * phys_id as cpup. start_cpu will start where we
12303 			 * left off so all unassigned entries don't get assgined
12304 			 * the IRQ of the first entry.
12305 			 */
12306 			new_cpu = start_cpu;
12307 			for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12308 				new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12309 				if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12310 				    (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12311 				    (new_cpup->phys_id == cpup->phys_id))
12312 					goto found_same;
12313 				new_cpu = cpumask_next(
12314 					new_cpu, cpu_present_mask);
12315 				if (new_cpu == nr_cpumask_bits)
12316 					new_cpu = first_cpu;
12317 			}
12318 			/* At this point, we leave the CPU as unassigned */
12319 			continue;
12320 found_same:
12321 			/* We found a matching phys_id, so copy the IRQ info */
12322 			cpup->eq = new_cpup->eq;
12323 
12324 			/* Bump start_cpu to the next slot to minmize the
12325 			 * chance of having multiple unassigned CPU entries
12326 			 * selecting the same IRQ.
12327 			 */
12328 			start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12329 			if (start_cpu == nr_cpumask_bits)
12330 				start_cpu = first_cpu;
12331 
12332 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12333 					"3337 Set Affinity: CPU %d "
12334 					"eq %d from peer cpu %d same "
12335 					"phys_id (%d)\n",
12336 					cpu, cpup->eq, new_cpu,
12337 					cpup->phys_id);
12338 		}
12339 	}
12340 
12341 	/* Set any unassigned cpu map entries to a IRQ on any phys_id */
12342 	start_cpu = first_cpu;
12343 
12344 	for_each_present_cpu(cpu) {
12345 		cpup = &phba->sli4_hba.cpu_map[cpu];
12346 
12347 		/* Is this entry unassigned */
12348 		if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12349 			/* Mark it as IRQ not assigned by the kernel */
12350 			cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12351 
12352 			/* If so, find a new_cpup thats on ANY phys_id
12353 			 * as the cpup. start_cpu will start where we
12354 			 * left off so all unassigned entries don't get
12355 			 * assigned the IRQ of the first entry.
12356 			 */
12357 			new_cpu = start_cpu;
12358 			for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12359 				new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12360 				if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12361 				    (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12362 					goto found_any;
12363 				new_cpu = cpumask_next(
12364 					new_cpu, cpu_present_mask);
12365 				if (new_cpu == nr_cpumask_bits)
12366 					new_cpu = first_cpu;
12367 			}
12368 			/* We should never leave an entry unassigned */
12369 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12370 					"3339 Set Affinity: CPU %d "
12371 					"eq %d UNASSIGNED\n",
12372 					cpup->hdwq, cpup->eq);
12373 			continue;
12374 found_any:
12375 			/* We found an available entry, copy the IRQ info */
12376 			cpup->eq = new_cpup->eq;
12377 
12378 			/* Bump start_cpu to the next slot to minmize the
12379 			 * chance of having multiple unassigned CPU entries
12380 			 * selecting the same IRQ.
12381 			 */
12382 			start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12383 			if (start_cpu == nr_cpumask_bits)
12384 				start_cpu = first_cpu;
12385 
12386 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12387 					"3338 Set Affinity: CPU %d "
12388 					"eq %d from peer cpu %d (%d/%d)\n",
12389 					cpu, cpup->eq, new_cpu,
12390 					new_cpup->phys_id, new_cpup->core_id);
12391 		}
12392 	}
12393 
12394 	/* Assign hdwq indices that are unique across all cpus in the map
12395 	 * that are also FIRST_CPUs.
12396 	 */
12397 	idx = 0;
12398 	for_each_present_cpu(cpu) {
12399 		cpup = &phba->sli4_hba.cpu_map[cpu];
12400 
12401 		/* Only FIRST IRQs get a hdwq index assignment. */
12402 		if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12403 			continue;
12404 
12405 		/* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
12406 		cpup->hdwq = idx;
12407 		idx++;
12408 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12409 				"3333 Set Affinity: CPU %d (phys %d core %d): "
12410 				"hdwq %d eq %d flg x%x\n",
12411 				cpu, cpup->phys_id, cpup->core_id,
12412 				cpup->hdwq, cpup->eq, cpup->flag);
12413 	}
12414 	/* Associate a hdwq with each cpu_map entry
12415 	 * This will be 1 to 1 - hdwq to cpu, unless there are less
12416 	 * hardware queues then CPUs. For that case we will just round-robin
12417 	 * the available hardware queues as they get assigned to CPUs.
12418 	 * The next_idx is the idx from the FIRST_CPU loop above to account
12419 	 * for irq_chann < hdwq.  The idx is used for round-robin assignments
12420 	 * and needs to start at 0.
12421 	 */
12422 	next_idx = idx;
12423 	start_cpu = 0;
12424 	idx = 0;
12425 	for_each_present_cpu(cpu) {
12426 		cpup = &phba->sli4_hba.cpu_map[cpu];
12427 
12428 		/* FIRST cpus are already mapped. */
12429 		if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12430 			continue;
12431 
12432 		/* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
12433 		 * of the unassigned cpus to the next idx so that all
12434 		 * hdw queues are fully utilized.
12435 		 */
12436 		if (next_idx < phba->cfg_hdw_queue) {
12437 			cpup->hdwq = next_idx;
12438 			next_idx++;
12439 			continue;
12440 		}
12441 
12442 		/* Not a First CPU and all hdw_queues are used.  Reuse a
12443 		 * Hardware Queue for another CPU, so be smart about it
12444 		 * and pick one that has its IRQ/EQ mapped to the same phys_id
12445 		 * (CPU package) and core_id.
12446 		 */
12447 		new_cpu = start_cpu;
12448 		for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12449 			new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12450 			if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12451 			    new_cpup->phys_id == cpup->phys_id &&
12452 			    new_cpup->core_id == cpup->core_id) {
12453 				goto found_hdwq;
12454 			}
12455 			new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12456 			if (new_cpu == nr_cpumask_bits)
12457 				new_cpu = first_cpu;
12458 		}
12459 
12460 		/* If we can't match both phys_id and core_id,
12461 		 * settle for just a phys_id match.
12462 		 */
12463 		new_cpu = start_cpu;
12464 		for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12465 			new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12466 			if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12467 			    new_cpup->phys_id == cpup->phys_id)
12468 				goto found_hdwq;
12469 
12470 			new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12471 			if (new_cpu == nr_cpumask_bits)
12472 				new_cpu = first_cpu;
12473 		}
12474 
12475 		/* Otherwise just round robin on cfg_hdw_queue */
12476 		cpup->hdwq = idx % phba->cfg_hdw_queue;
12477 		idx++;
12478 		goto logit;
12479  found_hdwq:
12480 		/* We found an available entry, copy the IRQ info */
12481 		start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12482 		if (start_cpu == nr_cpumask_bits)
12483 			start_cpu = first_cpu;
12484 		cpup->hdwq = new_cpup->hdwq;
12485  logit:
12486 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12487 				"3335 Set Affinity: CPU %d (phys %d core %d): "
12488 				"hdwq %d eq %d flg x%x\n",
12489 				cpu, cpup->phys_id, cpup->core_id,
12490 				cpup->hdwq, cpup->eq, cpup->flag);
12491 	}
12492 
12493 	/*
12494 	 * Initialize the cpu_map slots for not-present cpus in case
12495 	 * a cpu is hot-added. Perform a simple hdwq round robin assignment.
12496 	 */
12497 	idx = 0;
12498 	for_each_possible_cpu(cpu) {
12499 		cpup = &phba->sli4_hba.cpu_map[cpu];
12500 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12501 		c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12502 		c_stat->hdwq_no = cpup->hdwq;
12503 #endif
12504 		if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12505 			continue;
12506 
12507 		cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12508 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12509 		c_stat->hdwq_no = cpup->hdwq;
12510 #endif
12511 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12512 				"3340 Set Affinity: not present "
12513 				"CPU %d hdwq %d\n",
12514 				cpu, cpup->hdwq);
12515 	}
12516 
12517 	/* The cpu_map array will be used later during initialization
12518 	 * when EQ / CQ / WQs are allocated and configured.
12519 	 */
12520 	return;
12521 }
12522 
12523 /**
12524  * lpfc_cpuhp_get_eq
12525  *
12526  * @phba:   pointer to lpfc hba data structure.
12527  * @cpu:    cpu going offline
12528  * @eqlist: eq list to append to
12529  */
12530 static int
12531 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12532 		  struct list_head *eqlist)
12533 {
12534 	const struct cpumask *maskp;
12535 	struct lpfc_queue *eq;
12536 	struct cpumask *tmp;
12537 	u16 idx;
12538 
12539 	tmp = kzalloc(cpumask_size(), GFP_KERNEL);
12540 	if (!tmp)
12541 		return -ENOMEM;
12542 
12543 	for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12544 		maskp = pci_irq_get_affinity(phba->pcidev, idx);
12545 		if (!maskp)
12546 			continue;
12547 		/*
12548 		 * if irq is not affinitized to the cpu going
12549 		 * then we don't need to poll the eq attached
12550 		 * to it.
12551 		 */
12552 		if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
12553 			continue;
12554 		/* get the cpus that are online and are affini-
12555 		 * tized to this irq vector.  If the count is
12556 		 * more than 1 then cpuhp is not going to shut-
12557 		 * down this vector.  Since this cpu has not
12558 		 * gone offline yet, we need >1.
12559 		 */
12560 		cpumask_and(tmp, maskp, cpu_online_mask);
12561 		if (cpumask_weight(tmp) > 1)
12562 			continue;
12563 
12564 		/* Now that we have an irq to shutdown, get the eq
12565 		 * mapped to this irq.  Note: multiple hdwq's in
12566 		 * the software can share an eq, but eventually
12567 		 * only eq will be mapped to this vector
12568 		 */
12569 		eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12570 		list_add(&eq->_poll_list, eqlist);
12571 	}
12572 	kfree(tmp);
12573 	return 0;
12574 }
12575 
12576 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12577 {
12578 	if (phba->sli_rev != LPFC_SLI_REV4)
12579 		return;
12580 
12581 	cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
12582 					    &phba->cpuhp);
12583 	/*
12584 	 * unregistering the instance doesn't stop the polling
12585 	 * timer. Wait for the poll timer to retire.
12586 	 */
12587 	synchronize_rcu();
12588 	del_timer_sync(&phba->cpuhp_poll_timer);
12589 }
12590 
12591 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12592 {
12593 	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
12594 		return;
12595 
12596 	__lpfc_cpuhp_remove(phba);
12597 }
12598 
12599 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12600 {
12601 	if (phba->sli_rev != LPFC_SLI_REV4)
12602 		return;
12603 
12604 	rcu_read_lock();
12605 
12606 	if (!list_empty(&phba->poll_list))
12607 		mod_timer(&phba->cpuhp_poll_timer,
12608 			  jiffies + msecs_to_jiffies(LPFC_POLL_HB));
12609 
12610 	rcu_read_unlock();
12611 
12612 	cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
12613 					 &phba->cpuhp);
12614 }
12615 
12616 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12617 {
12618 	if (phba->pport->load_flag & FC_UNLOADING) {
12619 		*retval = -EAGAIN;
12620 		return true;
12621 	}
12622 
12623 	if (phba->sli_rev != LPFC_SLI_REV4) {
12624 		*retval = 0;
12625 		return true;
12626 	}
12627 
12628 	/* proceed with the hotplug */
12629 	return false;
12630 }
12631 
12632 /**
12633  * lpfc_irq_set_aff - set IRQ affinity
12634  * @eqhdl: EQ handle
12635  * @cpu: cpu to set affinity
12636  *
12637  **/
12638 static inline void
12639 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
12640 {
12641 	cpumask_clear(&eqhdl->aff_mask);
12642 	cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12643 	irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12644 	irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
12645 }
12646 
12647 /**
12648  * lpfc_irq_clear_aff - clear IRQ affinity
12649  * @eqhdl: EQ handle
12650  *
12651  **/
12652 static inline void
12653 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
12654 {
12655 	cpumask_clear(&eqhdl->aff_mask);
12656 	irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12657 }
12658 
12659 /**
12660  * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12661  * @phba: pointer to HBA context object.
12662  * @cpu: cpu going offline/online
12663  * @offline: true, cpu is going offline. false, cpu is coming online.
12664  *
12665  * If cpu is going offline, we'll try our best effort to find the next
12666  * online cpu on the phba's original_mask and migrate all offlining IRQ
12667  * affinities.
12668  *
12669  * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
12670  *
12671  * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
12672  *	 PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12673  *
12674  **/
12675 static void
12676 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12677 {
12678 	struct lpfc_vector_map_info *cpup;
12679 	struct cpumask *aff_mask;
12680 	unsigned int cpu_select, cpu_next, idx;
12681 	const struct cpumask *orig_mask;
12682 
12683 	if (phba->irq_chann_mode == NORMAL_MODE)
12684 		return;
12685 
12686 	orig_mask = &phba->sli4_hba.irq_aff_mask;
12687 
12688 	if (!cpumask_test_cpu(cpu, orig_mask))
12689 		return;
12690 
12691 	cpup = &phba->sli4_hba.cpu_map[cpu];
12692 
12693 	if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12694 		return;
12695 
12696 	if (offline) {
12697 		/* Find next online CPU on original mask */
12698 		cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
12699 		cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
12700 
12701 		/* Found a valid CPU */
12702 		if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
12703 			/* Go through each eqhdl and ensure offlining
12704 			 * cpu aff_mask is migrated
12705 			 */
12706 			for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12707 				aff_mask = lpfc_get_aff_mask(idx);
12708 
12709 				/* Migrate affinity */
12710 				if (cpumask_test_cpu(cpu, aff_mask))
12711 					lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
12712 							 cpu_select);
12713 			}
12714 		} else {
12715 			/* Rely on irqbalance if no online CPUs left on NUMA */
12716 			for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12717 				lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
12718 		}
12719 	} else {
12720 		/* Migrate affinity back to this CPU */
12721 		lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12722 	}
12723 }
12724 
12725 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
12726 {
12727 	struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12728 	struct lpfc_queue *eq, *next;
12729 	LIST_HEAD(eqlist);
12730 	int retval;
12731 
12732 	if (!phba) {
12733 		WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12734 		return 0;
12735 	}
12736 
12737 	if (__lpfc_cpuhp_checks(phba, &retval))
12738 		return retval;
12739 
12740 	lpfc_irq_rebalance(phba, cpu, true);
12741 
12742 	retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12743 	if (retval)
12744 		return retval;
12745 
12746 	/* start polling on these eq's */
12747 	list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
12748 		list_del_init(&eq->_poll_list);
12749 		lpfc_sli4_start_polling(eq);
12750 	}
12751 
12752 	return 0;
12753 }
12754 
12755 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
12756 {
12757 	struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12758 	struct lpfc_queue *eq, *next;
12759 	unsigned int n;
12760 	int retval;
12761 
12762 	if (!phba) {
12763 		WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12764 		return 0;
12765 	}
12766 
12767 	if (__lpfc_cpuhp_checks(phba, &retval))
12768 		return retval;
12769 
12770 	lpfc_irq_rebalance(phba, cpu, false);
12771 
12772 	list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12773 		n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12774 		if (n == cpu)
12775 			lpfc_sli4_stop_polling(eq);
12776 	}
12777 
12778 	return 0;
12779 }
12780 
12781 /**
12782  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
12783  * @phba: pointer to lpfc hba data structure.
12784  *
12785  * This routine is invoked to enable the MSI-X interrupt vectors to device
12786  * with SLI-4 interface spec.  It also allocates MSI-X vectors and maps them
12787  * to cpus on the system.
12788  *
12789  * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
12790  * the number of cpus on the same numa node as this adapter.  The vectors are
12791  * allocated without requesting OS affinity mapping.  A vector will be
12792  * allocated and assigned to each online and offline cpu.  If the cpu is
12793  * online, then affinity will be set to that cpu.  If the cpu is offline, then
12794  * affinity will be set to the nearest peer cpu within the numa node that is
12795  * online.  If there are no online cpus within the numa node, affinity is not
12796  * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
12797  * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
12798  * configured.
12799  *
12800  * If numa mode is not enabled and there is more than 1 vector allocated, then
12801  * the driver relies on the managed irq interface where the OS assigns vector to
12802  * cpu affinity.  The driver will then use that affinity mapping to setup its
12803  * cpu mapping table.
12804  *
12805  * Return codes
12806  * 0 - successful
12807  * other values - error
12808  **/
12809 static int
12810 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
12811 {
12812 	int vectors, rc, index;
12813 	char *name;
12814 	const struct cpumask *aff_mask = NULL;
12815 	unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
12816 	struct lpfc_vector_map_info *cpup;
12817 	struct lpfc_hba_eq_hdl *eqhdl;
12818 	const struct cpumask *maskp;
12819 	unsigned int flags = PCI_IRQ_MSIX;
12820 
12821 	/* Set up MSI-X multi-message vectors */
12822 	vectors = phba->cfg_irq_chann;
12823 
12824 	if (phba->irq_chann_mode != NORMAL_MODE)
12825 		aff_mask = &phba->sli4_hba.irq_aff_mask;
12826 
12827 	if (aff_mask) {
12828 		cpu_cnt = cpumask_weight(aff_mask);
12829 		vectors = min(phba->cfg_irq_chann, cpu_cnt);
12830 
12831 		/* cpu: iterates over aff_mask including offline or online
12832 		 * cpu_select: iterates over online aff_mask to set affinity
12833 		 */
12834 		cpu = cpumask_first(aff_mask);
12835 		cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
12836 	} else {
12837 		flags |= PCI_IRQ_AFFINITY;
12838 	}
12839 
12840 	rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
12841 	if (rc < 0) {
12842 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12843 				"0484 PCI enable MSI-X failed (%d)\n", rc);
12844 		goto vec_fail_out;
12845 	}
12846 	vectors = rc;
12847 
12848 	/* Assign MSI-X vectors to interrupt handlers */
12849 	for (index = 0; index < vectors; index++) {
12850 		eqhdl = lpfc_get_eq_hdl(index);
12851 		name = eqhdl->handler_name;
12852 		memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
12853 		snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
12854 			 LPFC_DRIVER_HANDLER_NAME"%d", index);
12855 
12856 		eqhdl->idx = index;
12857 		rc = request_irq(pci_irq_vector(phba->pcidev, index),
12858 			 &lpfc_sli4_hba_intr_handler, 0,
12859 			 name, eqhdl);
12860 		if (rc) {
12861 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12862 					"0486 MSI-X fast-path (%d) "
12863 					"request_irq failed (%d)\n", index, rc);
12864 			goto cfg_fail_out;
12865 		}
12866 
12867 		eqhdl->irq = pci_irq_vector(phba->pcidev, index);
12868 
12869 		if (aff_mask) {
12870 			/* If found a neighboring online cpu, set affinity */
12871 			if (cpu_select < nr_cpu_ids)
12872 				lpfc_irq_set_aff(eqhdl, cpu_select);
12873 
12874 			/* Assign EQ to cpu_map */
12875 			lpfc_assign_eq_map_info(phba, index,
12876 						LPFC_CPU_FIRST_IRQ,
12877 						cpu);
12878 
12879 			/* Iterate to next offline or online cpu in aff_mask */
12880 			cpu = cpumask_next(cpu, aff_mask);
12881 
12882 			/* Find next online cpu in aff_mask to set affinity */
12883 			cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
12884 		} else if (vectors == 1) {
12885 			cpu = cpumask_first(cpu_present_mask);
12886 			lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
12887 						cpu);
12888 		} else {
12889 			maskp = pci_irq_get_affinity(phba->pcidev, index);
12890 
12891 			/* Loop through all CPUs associated with vector index */
12892 			for_each_cpu_and(cpu, maskp, cpu_present_mask) {
12893 				cpup = &phba->sli4_hba.cpu_map[cpu];
12894 
12895 				/* If this is the first CPU thats assigned to
12896 				 * this vector, set LPFC_CPU_FIRST_IRQ.
12897 				 *
12898 				 * With certain platforms its possible that irq
12899 				 * vectors are affinitized to all the cpu's.
12900 				 * This can result in each cpu_map.eq to be set
12901 				 * to the last vector, resulting in overwrite
12902 				 * of all the previous cpu_map.eq.  Ensure that
12903 				 * each vector receives a place in cpu_map.
12904 				 * Later call to lpfc_cpu_affinity_check will
12905 				 * ensure we are nicely balanced out.
12906 				 */
12907 				if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
12908 					continue;
12909 				lpfc_assign_eq_map_info(phba, index,
12910 							LPFC_CPU_FIRST_IRQ,
12911 							cpu);
12912 				break;
12913 			}
12914 		}
12915 	}
12916 
12917 	if (vectors != phba->cfg_irq_chann) {
12918 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12919 				"3238 Reducing IO channels to match number of "
12920 				"MSI-X vectors, requested %d got %d\n",
12921 				phba->cfg_irq_chann, vectors);
12922 		if (phba->cfg_irq_chann > vectors)
12923 			phba->cfg_irq_chann = vectors;
12924 	}
12925 
12926 	return rc;
12927 
12928 cfg_fail_out:
12929 	/* free the irq already requested */
12930 	for (--index; index >= 0; index--) {
12931 		eqhdl = lpfc_get_eq_hdl(index);
12932 		lpfc_irq_clear_aff(eqhdl);
12933 		irq_set_affinity_hint(eqhdl->irq, NULL);
12934 		free_irq(eqhdl->irq, eqhdl);
12935 	}
12936 
12937 	/* Unconfigure MSI-X capability structure */
12938 	pci_free_irq_vectors(phba->pcidev);
12939 
12940 vec_fail_out:
12941 	return rc;
12942 }
12943 
12944 /**
12945  * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
12946  * @phba: pointer to lpfc hba data structure.
12947  *
12948  * This routine is invoked to enable the MSI interrupt mode to device with
12949  * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
12950  * called to enable the MSI vector. The device driver is responsible for
12951  * calling the request_irq() to register MSI vector with a interrupt the
12952  * handler, which is done in this function.
12953  *
12954  * Return codes
12955  * 	0 - successful
12956  * 	other values - error
12957  **/
12958 static int
12959 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
12960 {
12961 	int rc, index;
12962 	unsigned int cpu;
12963 	struct lpfc_hba_eq_hdl *eqhdl;
12964 
12965 	rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
12966 				   PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
12967 	if (rc > 0)
12968 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12969 				"0487 PCI enable MSI mode success.\n");
12970 	else {
12971 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12972 				"0488 PCI enable MSI mode failed (%d)\n", rc);
12973 		return rc ? rc : -1;
12974 	}
12975 
12976 	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
12977 			 0, LPFC_DRIVER_NAME, phba);
12978 	if (rc) {
12979 		pci_free_irq_vectors(phba->pcidev);
12980 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12981 				"0490 MSI request_irq failed (%d)\n", rc);
12982 		return rc;
12983 	}
12984 
12985 	eqhdl = lpfc_get_eq_hdl(0);
12986 	eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
12987 
12988 	cpu = cpumask_first(cpu_present_mask);
12989 	lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
12990 
12991 	for (index = 0; index < phba->cfg_irq_chann; index++) {
12992 		eqhdl = lpfc_get_eq_hdl(index);
12993 		eqhdl->idx = index;
12994 	}
12995 
12996 	return 0;
12997 }
12998 
12999 /**
13000  * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
13001  * @phba: pointer to lpfc hba data structure.
13002  * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
13003  *
13004  * This routine is invoked to enable device interrupt and associate driver's
13005  * interrupt handler(s) to interrupt vector(s) to device with SLI-4
13006  * interface spec. Depends on the interrupt mode configured to the driver,
13007  * the driver will try to fallback from the configured interrupt mode to an
13008  * interrupt mode which is supported by the platform, kernel, and device in
13009  * the order of:
13010  * MSI-X -> MSI -> IRQ.
13011  *
13012  * Return codes
13013  * 	0 - successful
13014  * 	other values - error
13015  **/
13016 static uint32_t
13017 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
13018 {
13019 	uint32_t intr_mode = LPFC_INTR_ERROR;
13020 	int retval, idx;
13021 
13022 	if (cfg_mode == 2) {
13023 		/* Preparation before conf_msi mbox cmd */
13024 		retval = 0;
13025 		if (!retval) {
13026 			/* Now, try to enable MSI-X interrupt mode */
13027 			retval = lpfc_sli4_enable_msix(phba);
13028 			if (!retval) {
13029 				/* Indicate initialization to MSI-X mode */
13030 				phba->intr_type = MSIX;
13031 				intr_mode = 2;
13032 			}
13033 		}
13034 	}
13035 
13036 	/* Fallback to MSI if MSI-X initialization failed */
13037 	if (cfg_mode >= 1 && phba->intr_type == NONE) {
13038 		retval = lpfc_sli4_enable_msi(phba);
13039 		if (!retval) {
13040 			/* Indicate initialization to MSI mode */
13041 			phba->intr_type = MSI;
13042 			intr_mode = 1;
13043 		}
13044 	}
13045 
13046 	/* Fallback to INTx if both MSI-X/MSI initalization failed */
13047 	if (phba->intr_type == NONE) {
13048 		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13049 				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13050 		if (!retval) {
13051 			struct lpfc_hba_eq_hdl *eqhdl;
13052 			unsigned int cpu;
13053 
13054 			/* Indicate initialization to INTx mode */
13055 			phba->intr_type = INTx;
13056 			intr_mode = 0;
13057 
13058 			eqhdl = lpfc_get_eq_hdl(0);
13059 			eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13060 
13061 			cpu = cpumask_first(cpu_present_mask);
13062 			lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13063 						cpu);
13064 			for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13065 				eqhdl = lpfc_get_eq_hdl(idx);
13066 				eqhdl->idx = idx;
13067 			}
13068 		}
13069 	}
13070 	return intr_mode;
13071 }
13072 
13073 /**
13074  * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13075  * @phba: pointer to lpfc hba data structure.
13076  *
13077  * This routine is invoked to disable device interrupt and disassociate
13078  * the driver's interrupt handler(s) from interrupt vector(s) to device
13079  * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13080  * will release the interrupt vector(s) for the message signaled interrupt.
13081  **/
13082 static void
13083 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13084 {
13085 	/* Disable the currently initialized interrupt mode */
13086 	if (phba->intr_type == MSIX) {
13087 		int index;
13088 		struct lpfc_hba_eq_hdl *eqhdl;
13089 
13090 		/* Free up MSI-X multi-message vectors */
13091 		for (index = 0; index < phba->cfg_irq_chann; index++) {
13092 			eqhdl = lpfc_get_eq_hdl(index);
13093 			lpfc_irq_clear_aff(eqhdl);
13094 			irq_set_affinity_hint(eqhdl->irq, NULL);
13095 			free_irq(eqhdl->irq, eqhdl);
13096 		}
13097 	} else {
13098 		free_irq(phba->pcidev->irq, phba);
13099 	}
13100 
13101 	pci_free_irq_vectors(phba->pcidev);
13102 
13103 	/* Reset interrupt management states */
13104 	phba->intr_type = NONE;
13105 	phba->sli.slistat.sli_intr = 0;
13106 }
13107 
13108 /**
13109  * lpfc_unset_hba - Unset SLI3 hba device initialization
13110  * @phba: pointer to lpfc hba data structure.
13111  *
13112  * This routine is invoked to unset the HBA device initialization steps to
13113  * a device with SLI-3 interface spec.
13114  **/
13115 static void
13116 lpfc_unset_hba(struct lpfc_hba *phba)
13117 {
13118 	struct lpfc_vport *vport = phba->pport;
13119 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
13120 
13121 	spin_lock_irq(shost->host_lock);
13122 	vport->load_flag |= FC_UNLOADING;
13123 	spin_unlock_irq(shost->host_lock);
13124 
13125 	kfree(phba->vpi_bmask);
13126 	kfree(phba->vpi_ids);
13127 
13128 	lpfc_stop_hba_timers(phba);
13129 
13130 	phba->pport->work_port_events = 0;
13131 
13132 	lpfc_sli_hba_down(phba);
13133 
13134 	lpfc_sli_brdrestart(phba);
13135 
13136 	lpfc_sli_disable_intr(phba);
13137 
13138 	return;
13139 }
13140 
13141 /**
13142  * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13143  * @phba: Pointer to HBA context object.
13144  *
13145  * This function is called in the SLI4 code path to wait for completion
13146  * of device's XRIs exchange busy. It will check the XRI exchange busy
13147  * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
13148  * that, it will check the XRI exchange busy on outstanding FCP and ELS
13149  * I/Os every 30 seconds, log error message, and wait forever. Only when
13150  * all XRI exchange busy complete, the driver unload shall proceed with
13151  * invoking the function reset ioctl mailbox command to the CNA and the
13152  * the rest of the driver unload resource release.
13153  **/
13154 static void
13155 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13156 {
13157 	struct lpfc_sli4_hdw_queue *qp;
13158 	int idx, ccnt;
13159 	int wait_time = 0;
13160 	int io_xri_cmpl = 1;
13161 	int nvmet_xri_cmpl = 1;
13162 	int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13163 
13164 	/* Driver just aborted IOs during the hba_unset process.  Pause
13165 	 * here to give the HBA time to complete the IO and get entries
13166 	 * into the abts lists.
13167 	 */
13168 	msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
13169 
13170 	/* Wait for NVME pending IO to flush back to transport. */
13171 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13172 		lpfc_nvme_wait_for_io_drain(phba);
13173 
13174 	ccnt = 0;
13175 	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13176 		qp = &phba->sli4_hba.hdwq[idx];
13177 		io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13178 		if (!io_xri_cmpl) /* if list is NOT empty */
13179 			ccnt++;
13180 	}
13181 	if (ccnt)
13182 		io_xri_cmpl = 0;
13183 
13184 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13185 		nvmet_xri_cmpl =
13186 			list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13187 	}
13188 
13189 	while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
13190 		if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
13191 			if (!nvmet_xri_cmpl)
13192 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13193 						"6424 NVMET XRI exchange busy "
13194 						"wait time: %d seconds.\n",
13195 						wait_time/1000);
13196 			if (!io_xri_cmpl)
13197 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13198 						"6100 IO XRI exchange busy "
13199 						"wait time: %d seconds.\n",
13200 						wait_time/1000);
13201 			if (!els_xri_cmpl)
13202 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13203 						"2878 ELS XRI exchange busy "
13204 						"wait time: %d seconds.\n",
13205 						wait_time/1000);
13206 			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
13207 			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
13208 		} else {
13209 			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
13210 			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
13211 		}
13212 
13213 		ccnt = 0;
13214 		for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13215 			qp = &phba->sli4_hba.hdwq[idx];
13216 			io_xri_cmpl = list_empty(
13217 			    &qp->lpfc_abts_io_buf_list);
13218 			if (!io_xri_cmpl) /* if list is NOT empty */
13219 				ccnt++;
13220 		}
13221 		if (ccnt)
13222 			io_xri_cmpl = 0;
13223 
13224 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13225 			nvmet_xri_cmpl = list_empty(
13226 				&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13227 		}
13228 		els_xri_cmpl =
13229 			list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13230 
13231 	}
13232 }
13233 
13234 /**
13235  * lpfc_sli4_hba_unset - Unset the fcoe hba
13236  * @phba: Pointer to HBA context object.
13237  *
13238  * This function is called in the SLI4 code path to reset the HBA's FCoE
13239  * function. The caller is not required to hold any lock. This routine
13240  * issues PCI function reset mailbox command to reset the FCoE function.
13241  * At the end of the function, it calls lpfc_hba_down_post function to
13242  * free any pending commands.
13243  **/
13244 static void
13245 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13246 {
13247 	int wait_cnt = 0;
13248 	LPFC_MBOXQ_t *mboxq;
13249 	struct pci_dev *pdev = phba->pcidev;
13250 
13251 	lpfc_stop_hba_timers(phba);
13252 	hrtimer_cancel(&phba->cmf_timer);
13253 
13254 	if (phba->pport)
13255 		phba->sli4_hba.intr_enable = 0;
13256 
13257 	/*
13258 	 * Gracefully wait out the potential current outstanding asynchronous
13259 	 * mailbox command.
13260 	 */
13261 
13262 	/* First, block any pending async mailbox command from posted */
13263 	spin_lock_irq(&phba->hbalock);
13264 	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13265 	spin_unlock_irq(&phba->hbalock);
13266 	/* Now, trying to wait it out if we can */
13267 	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13268 		msleep(10);
13269 		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
13270 			break;
13271 	}
13272 	/* Forcefully release the outstanding mailbox command if timed out */
13273 	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13274 		spin_lock_irq(&phba->hbalock);
13275 		mboxq = phba->sli.mbox_active;
13276 		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13277 		__lpfc_mbox_cmpl_put(phba, mboxq);
13278 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13279 		phba->sli.mbox_active = NULL;
13280 		spin_unlock_irq(&phba->hbalock);
13281 	}
13282 
13283 	/* Abort all iocbs associated with the hba */
13284 	lpfc_sli_hba_iocb_abort(phba);
13285 
13286 	/* Wait for completion of device XRI exchange busy */
13287 	lpfc_sli4_xri_exchange_busy_wait(phba);
13288 
13289 	/* per-phba callback de-registration for hotplug event */
13290 	if (phba->pport)
13291 		lpfc_cpuhp_remove(phba);
13292 
13293 	/* Disable PCI subsystem interrupt */
13294 	lpfc_sli4_disable_intr(phba);
13295 
13296 	/* Disable SR-IOV if enabled */
13297 	if (phba->cfg_sriov_nr_virtfn)
13298 		pci_disable_sriov(pdev);
13299 
13300 	/* Stop kthread signal shall trigger work_done one more time */
13301 	kthread_stop(phba->worker_thread);
13302 
13303 	/* Disable FW logging to host memory */
13304 	lpfc_ras_stop_fwlog(phba);
13305 
13306 	/* Unset the queues shared with the hardware then release all
13307 	 * allocated resources.
13308 	 */
13309 	lpfc_sli4_queue_unset(phba);
13310 	lpfc_sli4_queue_destroy(phba);
13311 
13312 	/* Reset SLI4 HBA FCoE function */
13313 	lpfc_pci_function_reset(phba);
13314 
13315 	/* Free RAS DMA memory */
13316 	if (phba->ras_fwlog.ras_enabled)
13317 		lpfc_sli4_ras_dma_free(phba);
13318 
13319 	/* Stop the SLI4 device port */
13320 	if (phba->pport)
13321 		phba->pport->work_port_events = 0;
13322 }
13323 
13324 static uint32_t
13325 lpfc_cgn_crc32(uint32_t crc, u8 byte)
13326 {
13327 	uint32_t msb = 0;
13328 	uint32_t bit;
13329 
13330 	for (bit = 0; bit < 8; bit++) {
13331 		msb = (crc >> 31) & 1;
13332 		crc <<= 1;
13333 
13334 		if (msb ^ (byte & 1)) {
13335 			crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
13336 			crc |= 1;
13337 		}
13338 		byte >>= 1;
13339 	}
13340 	return crc;
13341 }
13342 
13343 static uint32_t
13344 lpfc_cgn_reverse_bits(uint32_t wd)
13345 {
13346 	uint32_t result = 0;
13347 	uint32_t i;
13348 
13349 	for (i = 0; i < 32; i++) {
13350 		result <<= 1;
13351 		result |= (1 & (wd >> i));
13352 	}
13353 	return result;
13354 }
13355 
13356 /*
13357  * The routine corresponds with the algorithm the HBA firmware
13358  * uses to validate the data integrity.
13359  */
13360 uint32_t
13361 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
13362 {
13363 	uint32_t  i;
13364 	uint32_t result;
13365 	uint8_t  *data = (uint8_t *)ptr;
13366 
13367 	for (i = 0; i < byteLen; ++i)
13368 		crc = lpfc_cgn_crc32(crc, data[i]);
13369 
13370 	result = ~lpfc_cgn_reverse_bits(crc);
13371 	return result;
13372 }
13373 
13374 void
13375 lpfc_init_congestion_buf(struct lpfc_hba *phba)
13376 {
13377 	struct lpfc_cgn_info *cp;
13378 	struct timespec64 cmpl_time;
13379 	struct tm broken;
13380 	uint16_t size;
13381 	uint32_t crc;
13382 
13383 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13384 			"6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13385 
13386 	if (!phba->cgn_i)
13387 		return;
13388 	cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13389 
13390 	atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13391 	atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13392 	atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13393 	atomic_set(&phba->cgn_sync_warn_cnt, 0);
13394 
13395 	atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
13396 	atomic64_set(&phba->cgn_acqe_stat.warn, 0);
13397 	atomic_set(&phba->cgn_driver_evt_cnt, 0);
13398 	atomic_set(&phba->cgn_latency_evt_cnt, 0);
13399 	atomic64_set(&phba->cgn_latency_evt, 0);
13400 	phba->cgn_evt_minute = 0;
13401 	phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
13402 
13403 	memset(cp, 0xff, LPFC_CGN_DATA_SIZE);
13404 	cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13405 	cp->cgn_info_version = LPFC_CGN_INFO_V3;
13406 
13407 	/* cgn parameters */
13408 	cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13409 	cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13410 	cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13411 	cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13412 
13413 	ktime_get_real_ts64(&cmpl_time);
13414 	time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13415 
13416 	cp->cgn_info_month = broken.tm_mon + 1;
13417 	cp->cgn_info_day = broken.tm_mday;
13418 	cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */
13419 	cp->cgn_info_hour = broken.tm_hour;
13420 	cp->cgn_info_minute = broken.tm_min;
13421 	cp->cgn_info_second = broken.tm_sec;
13422 
13423 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13424 			"2643 CGNInfo Init: Start Time "
13425 			"%d/%d/%d %d:%d:%d\n",
13426 			cp->cgn_info_day, cp->cgn_info_month,
13427 			cp->cgn_info_year, cp->cgn_info_hour,
13428 			cp->cgn_info_minute, cp->cgn_info_second);
13429 
13430 	/* Fill in default LUN qdepth */
13431 	if (phba->pport) {
13432 		size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13433 		cp->cgn_lunq = cpu_to_le16(size);
13434 	}
13435 
13436 	/* last used Index initialized to 0xff already */
13437 
13438 	cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13439 	cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13440 	crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13441 	cp->cgn_info_crc = cpu_to_le32(crc);
13442 
13443 	phba->cgn_evt_timestamp = jiffies +
13444 		msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
13445 }
13446 
13447 void
13448 lpfc_init_congestion_stat(struct lpfc_hba *phba)
13449 {
13450 	struct lpfc_cgn_info *cp;
13451 	struct timespec64 cmpl_time;
13452 	struct tm broken;
13453 	uint32_t crc;
13454 
13455 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13456 			"6236 INIT Congestion Stat %p\n", phba->cgn_i);
13457 
13458 	if (!phba->cgn_i)
13459 		return;
13460 
13461 	cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13462 	memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE);
13463 
13464 	ktime_get_real_ts64(&cmpl_time);
13465 	time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13466 
13467 	cp->cgn_stat_month = broken.tm_mon + 1;
13468 	cp->cgn_stat_day = broken.tm_mday;
13469 	cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */
13470 	cp->cgn_stat_hour = broken.tm_hour;
13471 	cp->cgn_stat_minute = broken.tm_min;
13472 
13473 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13474 			"2647 CGNstat Init: Start Time "
13475 			"%d/%d/%d %d:%d\n",
13476 			cp->cgn_stat_day, cp->cgn_stat_month,
13477 			cp->cgn_stat_year, cp->cgn_stat_hour,
13478 			cp->cgn_stat_minute);
13479 
13480 	crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13481 	cp->cgn_info_crc = cpu_to_le32(crc);
13482 }
13483 
13484 /**
13485  * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13486  * @phba: Pointer to hba context object.
13487  * @reg: flag to determine register or unregister.
13488  */
13489 static int
13490 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13491 {
13492 	struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
13493 	union  lpfc_sli4_cfg_shdr *shdr;
13494 	uint32_t shdr_status, shdr_add_status;
13495 	LPFC_MBOXQ_t *mboxq;
13496 	int length, rc;
13497 
13498 	if (!phba->cgn_i)
13499 		return -ENXIO;
13500 
13501 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13502 	if (!mboxq) {
13503 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13504 				"2641 REG_CONGESTION_BUF mbox allocation fail: "
13505 				"HBA state x%x reg %d\n",
13506 				phba->pport->port_state, reg);
13507 		return -ENOMEM;
13508 	}
13509 
13510 	length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13511 		sizeof(struct lpfc_sli4_cfg_mhdr));
13512 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13513 			 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
13514 			 LPFC_SLI4_MBX_EMBED);
13515 	reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13516 	bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
13517 	if (reg > 0)
13518 		bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
13519 	else
13520 		bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
13521 	reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13522 	reg_congestion_buf->addr_lo =
13523 		putPaddrLow(phba->cgn_i->phys);
13524 	reg_congestion_buf->addr_hi =
13525 		putPaddrHigh(phba->cgn_i->phys);
13526 
13527 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13528 	shdr = (union lpfc_sli4_cfg_shdr *)
13529 		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13530 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13531 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13532 				 &shdr->response);
13533 	mempool_free(mboxq, phba->mbox_mem_pool);
13534 	if (shdr_status || shdr_add_status || rc) {
13535 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13536 				"2642 REG_CONGESTION_BUF mailbox "
13537 				"failed with status x%x add_status x%x,"
13538 				" mbx status x%x reg %d\n",
13539 				shdr_status, shdr_add_status, rc, reg);
13540 		return -ENXIO;
13541 	}
13542 	return 0;
13543 }
13544 
13545 int
13546 lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13547 {
13548 	lpfc_cmf_stop(phba);
13549 	return __lpfc_reg_congestion_buf(phba, 0);
13550 }
13551 
13552 int
13553 lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13554 {
13555 	return __lpfc_reg_congestion_buf(phba, 1);
13556 }
13557 
13558 /**
13559  * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13560  * @phba: Pointer to HBA context object.
13561  * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
13562  *
13563  * This function is called in the SLI4 code path to read the port's
13564  * sli4 capabilities.
13565  *
13566  * This function may be be called from any context that can block-wait
13567  * for the completion.  The expectation is that this routine is called
13568  * typically from probe_one or from the online routine.
13569  **/
13570 int
13571 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13572 {
13573 	int rc;
13574 	struct lpfc_mqe *mqe = &mboxq->u.mqe;
13575 	struct lpfc_pc_sli4_params *sli4_params;
13576 	uint32_t mbox_tmo;
13577 	int length;
13578 	bool exp_wqcq_pages = true;
13579 	struct lpfc_sli4_parameters *mbx_sli4_parameters;
13580 
13581 	/*
13582 	 * By default, the driver assumes the SLI4 port requires RPI
13583 	 * header postings.  The SLI4_PARAM response will correct this
13584 	 * assumption.
13585 	 */
13586 	phba->sli4_hba.rpi_hdrs_in_use = 1;
13587 
13588 	/* Read the port's SLI4 Config Parameters */
13589 	length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13590 		  sizeof(struct lpfc_sli4_cfg_mhdr));
13591 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13592 			 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
13593 			 length, LPFC_SLI4_MBX_EMBED);
13594 	if (!phba->sli4_hba.intr_enable)
13595 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13596 	else {
13597 		mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13598 		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13599 	}
13600 	if (unlikely(rc))
13601 		return rc;
13602 	sli4_params = &phba->sli4_hba.pc_sli4_params;
13603 	mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13604 	sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13605 	sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13606 	sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13607 	sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13608 					     mbx_sli4_parameters);
13609 	sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13610 					     mbx_sli4_parameters);
13611 	if (bf_get(cfg_phwq, mbx_sli4_parameters))
13612 		phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13613 	else
13614 		phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13615 	sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13616 	sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13617 					   mbx_sli4_parameters);
13618 	sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13619 	sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13620 	sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13621 	sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13622 	sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
13623 	sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13624 	sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13625 	sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13626 	sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13627 	sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13628 	sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13629 					    mbx_sli4_parameters);
13630 	sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13631 	sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13632 					   mbx_sli4_parameters);
13633 	phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13634 	phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13635 
13636 	/* Check for Extended Pre-Registered SGL support */
13637 	phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13638 
13639 	/* Check for firmware nvme support */
13640 	rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
13641 		     bf_get(cfg_xib, mbx_sli4_parameters));
13642 
13643 	if (rc) {
13644 		/* Save this to indicate the Firmware supports NVME */
13645 		sli4_params->nvme = 1;
13646 
13647 		/* Firmware NVME support, check driver FC4 NVME support */
13648 		if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13649 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13650 					"6133 Disabling NVME support: "
13651 					"FC4 type not supported: x%x\n",
13652 					phba->cfg_enable_fc4_type);
13653 			goto fcponly;
13654 		}
13655 	} else {
13656 		/* No firmware NVME support, check driver FC4 NVME support */
13657 		sli4_params->nvme = 0;
13658 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13659 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13660 					"6101 Disabling NVME support: Not "
13661 					"supported by firmware (%d %d) x%x\n",
13662 					bf_get(cfg_nvme, mbx_sli4_parameters),
13663 					bf_get(cfg_xib, mbx_sli4_parameters),
13664 					phba->cfg_enable_fc4_type);
13665 fcponly:
13666 			phba->nvmet_support = 0;
13667 			phba->cfg_nvmet_mrq = 0;
13668 			phba->cfg_nvme_seg_cnt = 0;
13669 
13670 			/* If no FC4 type support, move to just SCSI support */
13671 			if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13672 				return -ENODEV;
13673 			phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13674 		}
13675 	}
13676 
13677 	/* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
13678 	 * accommodate 512K and 1M IOs in a single nvme buf.
13679 	 */
13680 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13681 		phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13682 
13683 	/* Enable embedded Payload BDE if support is indicated */
13684 	if (bf_get(cfg_pbde, mbx_sli4_parameters))
13685 		phba->cfg_enable_pbde = 1;
13686 	else
13687 		phba->cfg_enable_pbde = 0;
13688 
13689 	/*
13690 	 * To support Suppress Response feature we must satisfy 3 conditions.
13691 	 * lpfc_suppress_rsp module parameter must be set (default).
13692 	 * In SLI4-Parameters Descriptor:
13693 	 * Extended Inline Buffers (XIB) must be supported.
13694 	 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
13695 	 * (double negative).
13696 	 */
13697 	if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13698 	    !(bf_get(cfg_nosr, mbx_sli4_parameters)))
13699 		phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13700 	else
13701 		phba->cfg_suppress_rsp = 0;
13702 
13703 	if (bf_get(cfg_eqdr, mbx_sli4_parameters))
13704 		phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13705 
13706 	/* Make sure that sge_supp_len can be handled by the driver */
13707 	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13708 		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13709 
13710 	/*
13711 	 * Check whether the adapter supports an embedded copy of the
13712 	 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
13713 	 * to use this option, 128-byte WQEs must be used.
13714 	 */
13715 	if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
13716 		phba->fcp_embed_io = 1;
13717 	else
13718 		phba->fcp_embed_io = 0;
13719 
13720 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13721 			"6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
13722 			bf_get(cfg_xib, mbx_sli4_parameters),
13723 			phba->cfg_enable_pbde,
13724 			phba->fcp_embed_io, sli4_params->nvme,
13725 			phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13726 
13727 	if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13728 	    LPFC_SLI_INTF_IF_TYPE_2) &&
13729 	    (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13730 		 LPFC_SLI_INTF_FAMILY_LNCR_A0))
13731 		exp_wqcq_pages = false;
13732 
13733 	if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
13734 	    (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
13735 	    exp_wqcq_pages &&
13736 	    (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
13737 		phba->enab_exp_wqcq_pages = 1;
13738 	else
13739 		phba->enab_exp_wqcq_pages = 0;
13740 	/*
13741 	 * Check if the SLI port supports MDS Diagnostics
13742 	 */
13743 	if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
13744 		phba->mds_diags_support = 1;
13745 	else
13746 		phba->mds_diags_support = 0;
13747 
13748 	/*
13749 	 * Check if the SLI port supports NSLER
13750 	 */
13751 	if (bf_get(cfg_nsler, mbx_sli4_parameters))
13752 		phba->nsler = 1;
13753 	else
13754 		phba->nsler = 0;
13755 
13756 	return 0;
13757 }
13758 
13759 /**
13760  * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
13761  * @pdev: pointer to PCI device
13762  * @pid: pointer to PCI device identifier
13763  *
13764  * This routine is to be called to attach a device with SLI-3 interface spec
13765  * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13766  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13767  * information of the device and driver to see if the driver state that it can
13768  * support this kind of device. If the match is successful, the driver core
13769  * invokes this routine. If this routine determines it can claim the HBA, it
13770  * does all the initialization that it needs to do to handle the HBA properly.
13771  *
13772  * Return code
13773  * 	0 - driver can claim the device
13774  * 	negative value - driver can not claim the device
13775  **/
13776 static int
13777 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
13778 {
13779 	struct lpfc_hba   *phba;
13780 	struct lpfc_vport *vport = NULL;
13781 	struct Scsi_Host  *shost = NULL;
13782 	int error;
13783 	uint32_t cfg_mode, intr_mode;
13784 
13785 	/* Allocate memory for HBA structure */
13786 	phba = lpfc_hba_alloc(pdev);
13787 	if (!phba)
13788 		return -ENOMEM;
13789 
13790 	/* Perform generic PCI device enabling operation */
13791 	error = lpfc_enable_pci_dev(phba);
13792 	if (error)
13793 		goto out_free_phba;
13794 
13795 	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
13796 	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
13797 	if (error)
13798 		goto out_disable_pci_dev;
13799 
13800 	/* Set up SLI-3 specific device PCI memory space */
13801 	error = lpfc_sli_pci_mem_setup(phba);
13802 	if (error) {
13803 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13804 				"1402 Failed to set up pci memory space.\n");
13805 		goto out_disable_pci_dev;
13806 	}
13807 
13808 	/* Set up SLI-3 specific device driver resources */
13809 	error = lpfc_sli_driver_resource_setup(phba);
13810 	if (error) {
13811 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13812 				"1404 Failed to set up driver resource.\n");
13813 		goto out_unset_pci_mem_s3;
13814 	}
13815 
13816 	/* Initialize and populate the iocb list per host */
13817 
13818 	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
13819 	if (error) {
13820 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13821 				"1405 Failed to initialize iocb list.\n");
13822 		goto out_unset_driver_resource_s3;
13823 	}
13824 
13825 	/* Set up common device driver resources */
13826 	error = lpfc_setup_driver_resource_phase2(phba);
13827 	if (error) {
13828 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13829 				"1406 Failed to set up driver resource.\n");
13830 		goto out_free_iocb_list;
13831 	}
13832 
13833 	/* Get the default values for Model Name and Description */
13834 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13835 
13836 	/* Create SCSI host to the physical port */
13837 	error = lpfc_create_shost(phba);
13838 	if (error) {
13839 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13840 				"1407 Failed to create scsi host.\n");
13841 		goto out_unset_driver_resource;
13842 	}
13843 
13844 	/* Configure sysfs attributes */
13845 	vport = phba->pport;
13846 	error = lpfc_alloc_sysfs_attr(vport);
13847 	if (error) {
13848 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13849 				"1476 Failed to allocate sysfs attr\n");
13850 		goto out_destroy_shost;
13851 	}
13852 
13853 	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
13854 	/* Now, trying to enable interrupt and bring up the device */
13855 	cfg_mode = phba->cfg_use_msi;
13856 	while (true) {
13857 		/* Put device to a known state before enabling interrupt */
13858 		lpfc_stop_port(phba);
13859 		/* Configure and enable interrupt */
13860 		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
13861 		if (intr_mode == LPFC_INTR_ERROR) {
13862 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13863 					"0431 Failed to enable interrupt.\n");
13864 			error = -ENODEV;
13865 			goto out_free_sysfs_attr;
13866 		}
13867 		/* SLI-3 HBA setup */
13868 		if (lpfc_sli_hba_setup(phba)) {
13869 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13870 					"1477 Failed to set up hba\n");
13871 			error = -ENODEV;
13872 			goto out_remove_device;
13873 		}
13874 
13875 		/* Wait 50ms for the interrupts of previous mailbox commands */
13876 		msleep(50);
13877 		/* Check active interrupts on message signaled interrupts */
13878 		if (intr_mode == 0 ||
13879 		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
13880 			/* Log the current active interrupt mode */
13881 			phba->intr_mode = intr_mode;
13882 			lpfc_log_intr_mode(phba, intr_mode);
13883 			break;
13884 		} else {
13885 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13886 					"0447 Configure interrupt mode (%d) "
13887 					"failed active interrupt test.\n",
13888 					intr_mode);
13889 			/* Disable the current interrupt mode */
13890 			lpfc_sli_disable_intr(phba);
13891 			/* Try next level of interrupt mode */
13892 			cfg_mode = --intr_mode;
13893 		}
13894 	}
13895 
13896 	/* Perform post initialization setup */
13897 	lpfc_post_init_setup(phba);
13898 
13899 	/* Check if there are static vports to be created. */
13900 	lpfc_create_static_vport(phba);
13901 
13902 	return 0;
13903 
13904 out_remove_device:
13905 	lpfc_unset_hba(phba);
13906 out_free_sysfs_attr:
13907 	lpfc_free_sysfs_attr(vport);
13908 out_destroy_shost:
13909 	lpfc_destroy_shost(phba);
13910 out_unset_driver_resource:
13911 	lpfc_unset_driver_resource_phase2(phba);
13912 out_free_iocb_list:
13913 	lpfc_free_iocb_list(phba);
13914 out_unset_driver_resource_s3:
13915 	lpfc_sli_driver_resource_unset(phba);
13916 out_unset_pci_mem_s3:
13917 	lpfc_sli_pci_mem_unset(phba);
13918 out_disable_pci_dev:
13919 	lpfc_disable_pci_dev(phba);
13920 	if (shost)
13921 		scsi_host_put(shost);
13922 out_free_phba:
13923 	lpfc_hba_free(phba);
13924 	return error;
13925 }
13926 
13927 /**
13928  * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
13929  * @pdev: pointer to PCI device
13930  *
13931  * This routine is to be called to disattach a device with SLI-3 interface
13932  * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13933  * removed from PCI bus, it performs all the necessary cleanup for the HBA
13934  * device to be removed from the PCI subsystem properly.
13935  **/
13936 static void
13937 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
13938 {
13939 	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
13940 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
13941 	struct lpfc_vport **vports;
13942 	struct lpfc_hba   *phba = vport->phba;
13943 	int i;
13944 
13945 	spin_lock_irq(&phba->hbalock);
13946 	vport->load_flag |= FC_UNLOADING;
13947 	spin_unlock_irq(&phba->hbalock);
13948 
13949 	lpfc_free_sysfs_attr(vport);
13950 
13951 	/* Release all the vports against this physical port */
13952 	vports = lpfc_create_vport_work_array(phba);
13953 	if (vports != NULL)
13954 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
13955 			if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
13956 				continue;
13957 			fc_vport_terminate(vports[i]->fc_vport);
13958 		}
13959 	lpfc_destroy_vport_work_array(phba, vports);
13960 
13961 	/* Remove FC host with the physical port */
13962 	fc_remove_host(shost);
13963 	scsi_remove_host(shost);
13964 
13965 	/* Clean up all nodes, mailboxes and IOs. */
13966 	lpfc_cleanup(vport);
13967 
13968 	/*
13969 	 * Bring down the SLI Layer. This step disable all interrupts,
13970 	 * clears the rings, discards all mailbox commands, and resets
13971 	 * the HBA.
13972 	 */
13973 
13974 	/* HBA interrupt will be disabled after this call */
13975 	lpfc_sli_hba_down(phba);
13976 	/* Stop kthread signal shall trigger work_done one more time */
13977 	kthread_stop(phba->worker_thread);
13978 	/* Final cleanup of txcmplq and reset the HBA */
13979 	lpfc_sli_brdrestart(phba);
13980 
13981 	kfree(phba->vpi_bmask);
13982 	kfree(phba->vpi_ids);
13983 
13984 	lpfc_stop_hba_timers(phba);
13985 	spin_lock_irq(&phba->port_list_lock);
13986 	list_del_init(&vport->listentry);
13987 	spin_unlock_irq(&phba->port_list_lock);
13988 
13989 	lpfc_debugfs_terminate(vport);
13990 
13991 	/* Disable SR-IOV if enabled */
13992 	if (phba->cfg_sriov_nr_virtfn)
13993 		pci_disable_sriov(pdev);
13994 
13995 	/* Disable interrupt */
13996 	lpfc_sli_disable_intr(phba);
13997 
13998 	scsi_host_put(shost);
13999 
14000 	/*
14001 	 * Call scsi_free before mem_free since scsi bufs are released to their
14002 	 * corresponding pools here.
14003 	 */
14004 	lpfc_scsi_free(phba);
14005 	lpfc_free_iocb_list(phba);
14006 
14007 	lpfc_mem_free_all(phba);
14008 
14009 	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
14010 			  phba->hbqslimp.virt, phba->hbqslimp.phys);
14011 
14012 	/* Free resources associated with SLI2 interface */
14013 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
14014 			  phba->slim2p.virt, phba->slim2p.phys);
14015 
14016 	/* unmap adapter SLIM and Control Registers */
14017 	iounmap(phba->ctrl_regs_memmap_p);
14018 	iounmap(phba->slim_memmap_p);
14019 
14020 	lpfc_hba_free(phba);
14021 
14022 	pci_release_mem_regions(pdev);
14023 	pci_disable_device(pdev);
14024 }
14025 
14026 /**
14027  * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
14028  * @dev_d: pointer to device
14029  *
14030  * This routine is to be called from the kernel's PCI subsystem to support
14031  * system Power Management (PM) to device with SLI-3 interface spec. When
14032  * PM invokes this method, it quiesces the device by stopping the driver's
14033  * worker thread for the device, turning off device's interrupt and DMA,
14034  * and bring the device offline. Note that as the driver implements the
14035  * minimum PM requirements to a power-aware driver's PM support for the
14036  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14037  * to the suspend() method call will be treated as SUSPEND and the driver will
14038  * fully reinitialize its device during resume() method call, the driver will
14039  * set device to PCI_D3hot state in PCI config space instead of setting it
14040  * according to the @msg provided by the PM.
14041  *
14042  * Return code
14043  * 	0 - driver suspended the device
14044  * 	Error otherwise
14045  **/
14046 static int __maybe_unused
14047 lpfc_pci_suspend_one_s3(struct device *dev_d)
14048 {
14049 	struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14050 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14051 
14052 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14053 			"0473 PCI device Power Management suspend.\n");
14054 
14055 	/* Bring down the device */
14056 	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14057 	lpfc_offline(phba);
14058 	kthread_stop(phba->worker_thread);
14059 
14060 	/* Disable interrupt from device */
14061 	lpfc_sli_disable_intr(phba);
14062 
14063 	return 0;
14064 }
14065 
14066 /**
14067  * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14068  * @dev_d: pointer to device
14069  *
14070  * This routine is to be called from the kernel's PCI subsystem to support
14071  * system Power Management (PM) to device with SLI-3 interface spec. When PM
14072  * invokes this method, it restores the device's PCI config space state and
14073  * fully reinitializes the device and brings it online. Note that as the
14074  * driver implements the minimum PM requirements to a power-aware driver's
14075  * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14076  * FREEZE) to the suspend() method call will be treated as SUSPEND and the
14077  * driver will fully reinitialize its device during resume() method call,
14078  * the device will be set to PCI_D0 directly in PCI config space before
14079  * restoring the state.
14080  *
14081  * Return code
14082  * 	0 - driver suspended the device
14083  * 	Error otherwise
14084  **/
14085 static int __maybe_unused
14086 lpfc_pci_resume_one_s3(struct device *dev_d)
14087 {
14088 	struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14089 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14090 	uint32_t intr_mode;
14091 	int error;
14092 
14093 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14094 			"0452 PCI device Power Management resume.\n");
14095 
14096 	/* Startup the kernel thread for this host adapter. */
14097 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
14098 					"lpfc_worker_%d", phba->brd_no);
14099 	if (IS_ERR(phba->worker_thread)) {
14100 		error = PTR_ERR(phba->worker_thread);
14101 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14102 				"0434 PM resume failed to start worker "
14103 				"thread: error=x%x.\n", error);
14104 		return error;
14105 	}
14106 
14107 	/* Init cpu_map array */
14108 	lpfc_cpu_map_array_init(phba);
14109 	/* Init hba_eq_hdl array */
14110 	lpfc_hba_eq_hdl_array_init(phba);
14111 	/* Configure and enable interrupt */
14112 	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14113 	if (intr_mode == LPFC_INTR_ERROR) {
14114 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14115 				"0430 PM resume Failed to enable interrupt\n");
14116 		return -EIO;
14117 	} else
14118 		phba->intr_mode = intr_mode;
14119 
14120 	/* Restart HBA and bring it online */
14121 	lpfc_sli_brdrestart(phba);
14122 	lpfc_online(phba);
14123 
14124 	/* Log the current active interrupt mode */
14125 	lpfc_log_intr_mode(phba, phba->intr_mode);
14126 
14127 	return 0;
14128 }
14129 
14130 /**
14131  * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14132  * @phba: pointer to lpfc hba data structure.
14133  *
14134  * This routine is called to prepare the SLI3 device for PCI slot recover. It
14135  * aborts all the outstanding SCSI I/Os to the pci device.
14136  **/
14137 static void
14138 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14139 {
14140 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14141 			"2723 PCI channel I/O abort preparing for recovery\n");
14142 
14143 	/*
14144 	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14145 	 * and let the SCSI mid-layer to retry them to recover.
14146 	 */
14147 	lpfc_sli_abort_fcp_rings(phba);
14148 }
14149 
14150 /**
14151  * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14152  * @phba: pointer to lpfc hba data structure.
14153  *
14154  * This routine is called to prepare the SLI3 device for PCI slot reset. It
14155  * disables the device interrupt and pci device, and aborts the internal FCP
14156  * pending I/Os.
14157  **/
14158 static void
14159 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14160 {
14161 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14162 			"2710 PCI channel disable preparing for reset\n");
14163 
14164 	/* Block any management I/Os to the device */
14165 	lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14166 
14167 	/* Block all SCSI devices' I/Os on the host */
14168 	lpfc_scsi_dev_block(phba);
14169 
14170 	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
14171 	lpfc_sli_flush_io_rings(phba);
14172 
14173 	/* stop all timers */
14174 	lpfc_stop_hba_timers(phba);
14175 
14176 	/* Disable interrupt and pci device */
14177 	lpfc_sli_disable_intr(phba);
14178 	pci_disable_device(phba->pcidev);
14179 }
14180 
14181 /**
14182  * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14183  * @phba: pointer to lpfc hba data structure.
14184  *
14185  * This routine is called to prepare the SLI3 device for PCI slot permanently
14186  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
14187  * pending I/Os.
14188  **/
14189 static void
14190 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14191 {
14192 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14193 			"2711 PCI channel permanent disable for failure\n");
14194 	/* Block all SCSI devices' I/Os on the host */
14195 	lpfc_scsi_dev_block(phba);
14196 
14197 	/* stop all timers */
14198 	lpfc_stop_hba_timers(phba);
14199 
14200 	/* Clean up all driver's outstanding SCSI I/Os */
14201 	lpfc_sli_flush_io_rings(phba);
14202 }
14203 
14204 /**
14205  * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14206  * @pdev: pointer to PCI device.
14207  * @state: the current PCI connection state.
14208  *
14209  * This routine is called from the PCI subsystem for I/O error handling to
14210  * device with SLI-3 interface spec. This function is called by the PCI
14211  * subsystem after a PCI bus error affecting this device has been detected.
14212  * When this function is invoked, it will need to stop all the I/Os and
14213  * interrupt(s) to the device. Once that is done, it will return
14214  * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
14215  * as desired.
14216  *
14217  * Return codes
14218  * 	PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
14219  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14220  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14221  **/
14222 static pci_ers_result_t
14223 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
14224 {
14225 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
14226 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14227 
14228 	switch (state) {
14229 	case pci_channel_io_normal:
14230 		/* Non-fatal error, prepare for recovery */
14231 		lpfc_sli_prep_dev_for_recover(phba);
14232 		return PCI_ERS_RESULT_CAN_RECOVER;
14233 	case pci_channel_io_frozen:
14234 		/* Fatal error, prepare for slot reset */
14235 		lpfc_sli_prep_dev_for_reset(phba);
14236 		return PCI_ERS_RESULT_NEED_RESET;
14237 	case pci_channel_io_perm_failure:
14238 		/* Permanent failure, prepare for device down */
14239 		lpfc_sli_prep_dev_for_perm_failure(phba);
14240 		return PCI_ERS_RESULT_DISCONNECT;
14241 	default:
14242 		/* Unknown state, prepare and request slot reset */
14243 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14244 				"0472 Unknown PCI error state: x%x\n", state);
14245 		lpfc_sli_prep_dev_for_reset(phba);
14246 		return PCI_ERS_RESULT_NEED_RESET;
14247 	}
14248 }
14249 
14250 /**
14251  * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14252  * @pdev: pointer to PCI device.
14253  *
14254  * This routine is called from the PCI subsystem for error handling to
14255  * device with SLI-3 interface spec. This is called after PCI bus has been
14256  * reset to restart the PCI card from scratch, as if from a cold-boot.
14257  * During the PCI subsystem error recovery, after driver returns
14258  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
14259  * recovery and then call this routine before calling the .resume method
14260  * to recover the device. This function will initialize the HBA device,
14261  * enable the interrupt, but it will just put the HBA to offline state
14262  * without passing any I/O traffic.
14263  *
14264  * Return codes
14265  * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
14266  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14267  */
14268 static pci_ers_result_t
14269 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
14270 {
14271 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
14272 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14273 	struct lpfc_sli *psli = &phba->sli;
14274 	uint32_t intr_mode;
14275 
14276 	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14277 	if (pci_enable_device_mem(pdev)) {
14278 		printk(KERN_ERR "lpfc: Cannot re-enable "
14279 			"PCI device after reset.\n");
14280 		return PCI_ERS_RESULT_DISCONNECT;
14281 	}
14282 
14283 	pci_restore_state(pdev);
14284 
14285 	/*
14286 	 * As the new kernel behavior of pci_restore_state() API call clears
14287 	 * device saved_state flag, need to save the restored state again.
14288 	 */
14289 	pci_save_state(pdev);
14290 
14291 	if (pdev->is_busmaster)
14292 		pci_set_master(pdev);
14293 
14294 	spin_lock_irq(&phba->hbalock);
14295 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14296 	spin_unlock_irq(&phba->hbalock);
14297 
14298 	/* Configure and enable interrupt */
14299 	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14300 	if (intr_mode == LPFC_INTR_ERROR) {
14301 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14302 				"0427 Cannot re-enable interrupt after "
14303 				"slot reset.\n");
14304 		return PCI_ERS_RESULT_DISCONNECT;
14305 	} else
14306 		phba->intr_mode = intr_mode;
14307 
14308 	/* Take device offline, it will perform cleanup */
14309 	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14310 	lpfc_offline(phba);
14311 	lpfc_sli_brdrestart(phba);
14312 
14313 	/* Log the current active interrupt mode */
14314 	lpfc_log_intr_mode(phba, phba->intr_mode);
14315 
14316 	return PCI_ERS_RESULT_RECOVERED;
14317 }
14318 
14319 /**
14320  * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14321  * @pdev: pointer to PCI device
14322  *
14323  * This routine is called from the PCI subsystem for error handling to device
14324  * with SLI-3 interface spec. It is called when kernel error recovery tells
14325  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
14326  * error recovery. After this call, traffic can start to flow from this device
14327  * again.
14328  */
14329 static void
14330 lpfc_io_resume_s3(struct pci_dev *pdev)
14331 {
14332 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
14333 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14334 
14335 	/* Bring device online, it will be no-op for non-fatal error resume */
14336 	lpfc_online(phba);
14337 }
14338 
14339 /**
14340  * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14341  * @phba: pointer to lpfc hba data structure.
14342  *
14343  * returns the number of ELS/CT IOCBs to reserve
14344  **/
14345 int
14346 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14347 {
14348 	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14349 
14350 	if (phba->sli_rev == LPFC_SLI_REV4) {
14351 		if (max_xri <= 100)
14352 			return 10;
14353 		else if (max_xri <= 256)
14354 			return 25;
14355 		else if (max_xri <= 512)
14356 			return 50;
14357 		else if (max_xri <= 1024)
14358 			return 100;
14359 		else if (max_xri <= 1536)
14360 			return 150;
14361 		else if (max_xri <= 2048)
14362 			return 200;
14363 		else
14364 			return 250;
14365 	} else
14366 		return 0;
14367 }
14368 
14369 /**
14370  * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14371  * @phba: pointer to lpfc hba data structure.
14372  *
14373  * returns the number of ELS/CT + NVMET IOCBs to reserve
14374  **/
14375 int
14376 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14377 {
14378 	int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14379 
14380 	if (phba->nvmet_support)
14381 		max_xri += LPFC_NVMET_BUF_POST;
14382 	return max_xri;
14383 }
14384 
14385 
14386 static int
14387 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14388 	uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
14389 	const struct firmware *fw)
14390 {
14391 	int rc;
14392 	u8 sli_family;
14393 
14394 	sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14395 	/* Three cases:  (1) FW was not supported on the detected adapter.
14396 	 * (2) FW update has been locked out administratively.
14397 	 * (3) Some other error during FW update.
14398 	 * In each case, an unmaskable message is written to the console
14399 	 * for admin diagnosis.
14400 	 */
14401 	if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
14402 	    (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
14403 	     magic_number != MAGIC_NUMBER_G6) ||
14404 	    (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
14405 	     magic_number != MAGIC_NUMBER_G7) ||
14406 	    (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
14407 	     magic_number != MAGIC_NUMBER_G7P)) {
14408 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14409 				"3030 This firmware version is not supported on"
14410 				" this HBA model. Device:%x Magic:%x Type:%x "
14411 				"ID:%x Size %d %zd\n",
14412 				phba->pcidev->device, magic_number, ftype, fid,
14413 				fsize, fw->size);
14414 		rc = -EINVAL;
14415 	} else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
14416 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14417 				"3021 Firmware downloads have been prohibited "
14418 				"by a system configuration setting on "
14419 				"Device:%x Magic:%x Type:%x ID:%x Size %d "
14420 				"%zd\n",
14421 				phba->pcidev->device, magic_number, ftype, fid,
14422 				fsize, fw->size);
14423 		rc = -EACCES;
14424 	} else {
14425 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14426 				"3022 FW Download failed. Add Status x%x "
14427 				"Device:%x Magic:%x Type:%x ID:%x Size %d "
14428 				"%zd\n",
14429 				offset, phba->pcidev->device, magic_number,
14430 				ftype, fid, fsize, fw->size);
14431 		rc = -EIO;
14432 	}
14433 	return rc;
14434 }
14435 
14436 /**
14437  * lpfc_write_firmware - attempt to write a firmware image to the port
14438  * @fw: pointer to firmware image returned from request_firmware.
14439  * @context: pointer to firmware image returned from request_firmware.
14440  *
14441  **/
14442 static void
14443 lpfc_write_firmware(const struct firmware *fw, void *context)
14444 {
14445 	struct lpfc_hba *phba = (struct lpfc_hba *)context;
14446 	char fwrev[FW_REV_STR_SIZE];
14447 	struct lpfc_grp_hdr *image;
14448 	struct list_head dma_buffer_list;
14449 	int i, rc = 0;
14450 	struct lpfc_dmabuf *dmabuf, *next;
14451 	uint32_t offset = 0, temp_offset = 0;
14452 	uint32_t magic_number, ftype, fid, fsize;
14453 
14454 	/* It can be null in no-wait mode, sanity check */
14455 	if (!fw) {
14456 		rc = -ENXIO;
14457 		goto out;
14458 	}
14459 	image = (struct lpfc_grp_hdr *)fw->data;
14460 
14461 	magic_number = be32_to_cpu(image->magic_number);
14462 	ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
14463 	fid = bf_get_be32(lpfc_grp_hdr_id, image);
14464 	fsize = be32_to_cpu(image->size);
14465 
14466 	INIT_LIST_HEAD(&dma_buffer_list);
14467 	lpfc_decode_firmware_rev(phba, fwrev, 1);
14468 	if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14469 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14470 				"3023 Updating Firmware, Current Version:%s "
14471 				"New Version:%s\n",
14472 				fwrev, image->revision);
14473 		for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
14474 			dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
14475 					 GFP_KERNEL);
14476 			if (!dmabuf) {
14477 				rc = -ENOMEM;
14478 				goto release_out;
14479 			}
14480 			dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14481 							  SLI4_PAGE_SIZE,
14482 							  &dmabuf->phys,
14483 							  GFP_KERNEL);
14484 			if (!dmabuf->virt) {
14485 				kfree(dmabuf);
14486 				rc = -ENOMEM;
14487 				goto release_out;
14488 			}
14489 			list_add_tail(&dmabuf->list, &dma_buffer_list);
14490 		}
14491 		while (offset < fw->size) {
14492 			temp_offset = offset;
14493 			list_for_each_entry(dmabuf, &dma_buffer_list, list) {
14494 				if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14495 					memcpy(dmabuf->virt,
14496 					       fw->data + temp_offset,
14497 					       fw->size - temp_offset);
14498 					temp_offset = fw->size;
14499 					break;
14500 				}
14501 				memcpy(dmabuf->virt, fw->data + temp_offset,
14502 				       SLI4_PAGE_SIZE);
14503 				temp_offset += SLI4_PAGE_SIZE;
14504 			}
14505 			rc = lpfc_wr_object(phba, &dma_buffer_list,
14506 				    (fw->size - offset), &offset);
14507 			if (rc) {
14508 				rc = lpfc_log_write_firmware_error(phba, offset,
14509 								   magic_number,
14510 								   ftype,
14511 								   fid,
14512 								   fsize,
14513 								   fw);
14514 				goto release_out;
14515 			}
14516 		}
14517 		rc = offset;
14518 	} else
14519 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14520 				"3029 Skipped Firmware update, Current "
14521 				"Version:%s New Version:%s\n",
14522 				fwrev, image->revision);
14523 
14524 release_out:
14525 	list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
14526 		list_del(&dmabuf->list);
14527 		dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14528 				  dmabuf->virt, dmabuf->phys);
14529 		kfree(dmabuf);
14530 	}
14531 	release_firmware(fw);
14532 out:
14533 	if (rc < 0)
14534 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14535 				"3062 Firmware update error, status %d.\n", rc);
14536 	else
14537 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14538 				"3024 Firmware update success: size %d.\n", rc);
14539 }
14540 
14541 /**
14542  * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14543  * @phba: pointer to lpfc hba data structure.
14544  * @fw_upgrade: which firmware to update.
14545  *
14546  * This routine is called to perform Linux generic firmware upgrade on device
14547  * that supports such feature.
14548  **/
14549 int
14550 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14551 {
14552 	uint8_t file_name[ELX_MODEL_NAME_SIZE];
14553 	int ret;
14554 	const struct firmware *fw;
14555 
14556 	/* Only supported on SLI4 interface type 2 for now */
14557 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14558 	    LPFC_SLI_INTF_IF_TYPE_2)
14559 		return -EPERM;
14560 
14561 	snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
14562 
14563 	if (fw_upgrade == INT_FW_UPGRADE) {
14564 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
14565 					file_name, &phba->pcidev->dev,
14566 					GFP_KERNEL, (void *)phba,
14567 					lpfc_write_firmware);
14568 	} else if (fw_upgrade == RUN_FW_UPGRADE) {
14569 		ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
14570 		if (!ret)
14571 			lpfc_write_firmware(fw, (void *)phba);
14572 	} else {
14573 		ret = -EINVAL;
14574 	}
14575 
14576 	return ret;
14577 }
14578 
14579 /**
14580  * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14581  * @pdev: pointer to PCI device
14582  * @pid: pointer to PCI device identifier
14583  *
14584  * This routine is called from the kernel's PCI subsystem to device with
14585  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14586  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14587  * information of the device and driver to see if the driver state that it
14588  * can support this kind of device. If the match is successful, the driver
14589  * core invokes this routine. If this routine determines it can claim the HBA,
14590  * it does all the initialization that it needs to do to handle the HBA
14591  * properly.
14592  *
14593  * Return code
14594  * 	0 - driver can claim the device
14595  * 	negative value - driver can not claim the device
14596  **/
14597 static int
14598 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
14599 {
14600 	struct lpfc_hba   *phba;
14601 	struct lpfc_vport *vport = NULL;
14602 	struct Scsi_Host  *shost = NULL;
14603 	int error;
14604 	uint32_t cfg_mode, intr_mode;
14605 
14606 	/* Allocate memory for HBA structure */
14607 	phba = lpfc_hba_alloc(pdev);
14608 	if (!phba)
14609 		return -ENOMEM;
14610 
14611 	INIT_LIST_HEAD(&phba->poll_list);
14612 
14613 	/* Perform generic PCI device enabling operation */
14614 	error = lpfc_enable_pci_dev(phba);
14615 	if (error)
14616 		goto out_free_phba;
14617 
14618 	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
14619 	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14620 	if (error)
14621 		goto out_disable_pci_dev;
14622 
14623 	/* Set up SLI-4 specific device PCI memory space */
14624 	error = lpfc_sli4_pci_mem_setup(phba);
14625 	if (error) {
14626 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14627 				"1410 Failed to set up pci memory space.\n");
14628 		goto out_disable_pci_dev;
14629 	}
14630 
14631 	/* Set up SLI-4 Specific device driver resources */
14632 	error = lpfc_sli4_driver_resource_setup(phba);
14633 	if (error) {
14634 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14635 				"1412 Failed to set up driver resource.\n");
14636 		goto out_unset_pci_mem_s4;
14637 	}
14638 
14639 	INIT_LIST_HEAD(&phba->active_rrq_list);
14640 	INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14641 
14642 	/* Set up common device driver resources */
14643 	error = lpfc_setup_driver_resource_phase2(phba);
14644 	if (error) {
14645 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14646 				"1414 Failed to set up driver resource.\n");
14647 		goto out_unset_driver_resource_s4;
14648 	}
14649 
14650 	/* Get the default values for Model Name and Description */
14651 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14652 
14653 	/* Now, trying to enable interrupt and bring up the device */
14654 	cfg_mode = phba->cfg_use_msi;
14655 
14656 	/* Put device to a known state before enabling interrupt */
14657 	phba->pport = NULL;
14658 	lpfc_stop_port(phba);
14659 
14660 	/* Init cpu_map array */
14661 	lpfc_cpu_map_array_init(phba);
14662 
14663 	/* Init hba_eq_hdl array */
14664 	lpfc_hba_eq_hdl_array_init(phba);
14665 
14666 	/* Configure and enable interrupt */
14667 	intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14668 	if (intr_mode == LPFC_INTR_ERROR) {
14669 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14670 				"0426 Failed to enable interrupt.\n");
14671 		error = -ENODEV;
14672 		goto out_unset_driver_resource;
14673 	}
14674 	/* Default to single EQ for non-MSI-X */
14675 	if (phba->intr_type != MSIX) {
14676 		phba->cfg_irq_chann = 1;
14677 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14678 			if (phba->nvmet_support)
14679 				phba->cfg_nvmet_mrq = 1;
14680 		}
14681 	}
14682 	lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14683 
14684 	/* Create SCSI host to the physical port */
14685 	error = lpfc_create_shost(phba);
14686 	if (error) {
14687 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14688 				"1415 Failed to create scsi host.\n");
14689 		goto out_disable_intr;
14690 	}
14691 	vport = phba->pport;
14692 	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14693 
14694 	/* Configure sysfs attributes */
14695 	error = lpfc_alloc_sysfs_attr(vport);
14696 	if (error) {
14697 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14698 				"1416 Failed to allocate sysfs attr\n");
14699 		goto out_destroy_shost;
14700 	}
14701 
14702 	/* Set up SLI-4 HBA */
14703 	if (lpfc_sli4_hba_setup(phba)) {
14704 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14705 				"1421 Failed to set up hba\n");
14706 		error = -ENODEV;
14707 		goto out_free_sysfs_attr;
14708 	}
14709 
14710 	/* Log the current active interrupt mode */
14711 	phba->intr_mode = intr_mode;
14712 	lpfc_log_intr_mode(phba, intr_mode);
14713 
14714 	/* Perform post initialization setup */
14715 	lpfc_post_init_setup(phba);
14716 
14717 	/* NVME support in FW earlier in the driver load corrects the
14718 	 * FC4 type making a check for nvme_support unnecessary.
14719 	 */
14720 	if (phba->nvmet_support == 0) {
14721 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14722 			/* Create NVME binding with nvme_fc_transport. This
14723 			 * ensures the vport is initialized.  If the localport
14724 			 * create fails, it should not unload the driver to
14725 			 * support field issues.
14726 			 */
14727 			error = lpfc_nvme_create_localport(vport);
14728 			if (error) {
14729 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14730 						"6004 NVME registration "
14731 						"failed, error x%x\n",
14732 						error);
14733 			}
14734 		}
14735 	}
14736 
14737 	/* check for firmware upgrade or downgrade */
14738 	if (phba->cfg_request_firmware_upgrade)
14739 		lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
14740 
14741 	/* Check if there are static vports to be created. */
14742 	lpfc_create_static_vport(phba);
14743 
14744 	/* Enable RAS FW log support */
14745 	lpfc_sli4_ras_setup(phba);
14746 
14747 	timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14748 	cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14749 
14750 	return 0;
14751 
14752 out_free_sysfs_attr:
14753 	lpfc_free_sysfs_attr(vport);
14754 out_destroy_shost:
14755 	lpfc_destroy_shost(phba);
14756 out_disable_intr:
14757 	lpfc_sli4_disable_intr(phba);
14758 out_unset_driver_resource:
14759 	lpfc_unset_driver_resource_phase2(phba);
14760 out_unset_driver_resource_s4:
14761 	lpfc_sli4_driver_resource_unset(phba);
14762 out_unset_pci_mem_s4:
14763 	lpfc_sli4_pci_mem_unset(phba);
14764 out_disable_pci_dev:
14765 	lpfc_disable_pci_dev(phba);
14766 	if (shost)
14767 		scsi_host_put(shost);
14768 out_free_phba:
14769 	lpfc_hba_free(phba);
14770 	return error;
14771 }
14772 
14773 /**
14774  * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
14775  * @pdev: pointer to PCI device
14776  *
14777  * This routine is called from the kernel's PCI subsystem to device with
14778  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14779  * removed from PCI bus, it performs all the necessary cleanup for the HBA
14780  * device to be removed from the PCI subsystem properly.
14781  **/
14782 static void
14783 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
14784 {
14785 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
14786 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14787 	struct lpfc_vport **vports;
14788 	struct lpfc_hba *phba = vport->phba;
14789 	int i;
14790 
14791 	/* Mark the device unloading flag */
14792 	spin_lock_irq(&phba->hbalock);
14793 	vport->load_flag |= FC_UNLOADING;
14794 	spin_unlock_irq(&phba->hbalock);
14795 	if (phba->cgn_i)
14796 		lpfc_unreg_congestion_buf(phba);
14797 
14798 	lpfc_free_sysfs_attr(vport);
14799 
14800 	/* Release all the vports against this physical port */
14801 	vports = lpfc_create_vport_work_array(phba);
14802 	if (vports != NULL)
14803 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14804 			if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14805 				continue;
14806 			fc_vport_terminate(vports[i]->fc_vport);
14807 		}
14808 	lpfc_destroy_vport_work_array(phba, vports);
14809 
14810 	/* Remove FC host with the physical port */
14811 	fc_remove_host(shost);
14812 	scsi_remove_host(shost);
14813 
14814 	/* Perform ndlp cleanup on the physical port.  The nvme and nvmet
14815 	 * localports are destroyed after to cleanup all transport memory.
14816 	 */
14817 	lpfc_cleanup(vport);
14818 	lpfc_nvmet_destroy_targetport(phba);
14819 	lpfc_nvme_destroy_localport(vport);
14820 
14821 	/* De-allocate multi-XRI pools */
14822 	if (phba->cfg_xri_rebalancing)
14823 		lpfc_destroy_multixri_pools(phba);
14824 
14825 	/*
14826 	 * Bring down the SLI Layer. This step disables all interrupts,
14827 	 * clears the rings, discards all mailbox commands, and resets
14828 	 * the HBA FCoE function.
14829 	 */
14830 	lpfc_debugfs_terminate(vport);
14831 
14832 	lpfc_stop_hba_timers(phba);
14833 	spin_lock_irq(&phba->port_list_lock);
14834 	list_del_init(&vport->listentry);
14835 	spin_unlock_irq(&phba->port_list_lock);
14836 
14837 	/* Perform scsi free before driver resource_unset since scsi
14838 	 * buffers are released to their corresponding pools here.
14839 	 */
14840 	lpfc_io_free(phba);
14841 	lpfc_free_iocb_list(phba);
14842 	lpfc_sli4_hba_unset(phba);
14843 
14844 	lpfc_unset_driver_resource_phase2(phba);
14845 	lpfc_sli4_driver_resource_unset(phba);
14846 
14847 	/* Unmap adapter Control and Doorbell registers */
14848 	lpfc_sli4_pci_mem_unset(phba);
14849 
14850 	/* Release PCI resources and disable device's PCI function */
14851 	scsi_host_put(shost);
14852 	lpfc_disable_pci_dev(phba);
14853 
14854 	/* Finally, free the driver's device data structure */
14855 	lpfc_hba_free(phba);
14856 
14857 	return;
14858 }
14859 
14860 /**
14861  * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
14862  * @dev_d: pointer to device
14863  *
14864  * This routine is called from the kernel's PCI subsystem to support system
14865  * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
14866  * this method, it quiesces the device by stopping the driver's worker
14867  * thread for the device, turning off device's interrupt and DMA, and bring
14868  * the device offline. Note that as the driver implements the minimum PM
14869  * requirements to a power-aware driver's PM support for suspend/resume -- all
14870  * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
14871  * method call will be treated as SUSPEND and the driver will fully
14872  * reinitialize its device during resume() method call, the driver will set
14873  * device to PCI_D3hot state in PCI config space instead of setting it
14874  * according to the @msg provided by the PM.
14875  *
14876  * Return code
14877  * 	0 - driver suspended the device
14878  * 	Error otherwise
14879  **/
14880 static int __maybe_unused
14881 lpfc_pci_suspend_one_s4(struct device *dev_d)
14882 {
14883 	struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14884 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14885 
14886 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14887 			"2843 PCI device Power Management suspend.\n");
14888 
14889 	/* Bring down the device */
14890 	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14891 	lpfc_offline(phba);
14892 	kthread_stop(phba->worker_thread);
14893 
14894 	/* Disable interrupt from device */
14895 	lpfc_sli4_disable_intr(phba);
14896 	lpfc_sli4_queue_destroy(phba);
14897 
14898 	return 0;
14899 }
14900 
14901 /**
14902  * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
14903  * @dev_d: pointer to device
14904  *
14905  * This routine is called from the kernel's PCI subsystem to support system
14906  * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
14907  * this method, it restores the device's PCI config space state and fully
14908  * reinitializes the device and brings it online. Note that as the driver
14909  * implements the minimum PM requirements to a power-aware driver's PM for
14910  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14911  * to the suspend() method call will be treated as SUSPEND and the driver
14912  * will fully reinitialize its device during resume() method call, the device
14913  * will be set to PCI_D0 directly in PCI config space before restoring the
14914  * state.
14915  *
14916  * Return code
14917  * 	0 - driver suspended the device
14918  * 	Error otherwise
14919  **/
14920 static int __maybe_unused
14921 lpfc_pci_resume_one_s4(struct device *dev_d)
14922 {
14923 	struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14924 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14925 	uint32_t intr_mode;
14926 	int error;
14927 
14928 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14929 			"0292 PCI device Power Management resume.\n");
14930 
14931 	 /* Startup the kernel thread for this host adapter. */
14932 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
14933 					"lpfc_worker_%d", phba->brd_no);
14934 	if (IS_ERR(phba->worker_thread)) {
14935 		error = PTR_ERR(phba->worker_thread);
14936 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14937 				"0293 PM resume failed to start worker "
14938 				"thread: error=x%x.\n", error);
14939 		return error;
14940 	}
14941 
14942 	/* Configure and enable interrupt */
14943 	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
14944 	if (intr_mode == LPFC_INTR_ERROR) {
14945 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14946 				"0294 PM resume Failed to enable interrupt\n");
14947 		return -EIO;
14948 	} else
14949 		phba->intr_mode = intr_mode;
14950 
14951 	/* Restart HBA and bring it online */
14952 	lpfc_sli_brdrestart(phba);
14953 	lpfc_online(phba);
14954 
14955 	/* Log the current active interrupt mode */
14956 	lpfc_log_intr_mode(phba, phba->intr_mode);
14957 
14958 	return 0;
14959 }
14960 
14961 /**
14962  * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
14963  * @phba: pointer to lpfc hba data structure.
14964  *
14965  * This routine is called to prepare the SLI4 device for PCI slot recover. It
14966  * aborts all the outstanding SCSI I/Os to the pci device.
14967  **/
14968 static void
14969 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
14970 {
14971 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14972 			"2828 PCI channel I/O abort preparing for recovery\n");
14973 	/*
14974 	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14975 	 * and let the SCSI mid-layer to retry them to recover.
14976 	 */
14977 	lpfc_sli_abort_fcp_rings(phba);
14978 }
14979 
14980 /**
14981  * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
14982  * @phba: pointer to lpfc hba data structure.
14983  *
14984  * This routine is called to prepare the SLI4 device for PCI slot reset. It
14985  * disables the device interrupt and pci device, and aborts the internal FCP
14986  * pending I/Os.
14987  **/
14988 static void
14989 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
14990 {
14991 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14992 			"2826 PCI channel disable preparing for reset\n");
14993 
14994 	/* Block any management I/Os to the device */
14995 	lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
14996 
14997 	/* Block all SCSI devices' I/Os on the host */
14998 	lpfc_scsi_dev_block(phba);
14999 
15000 	/* Flush all driver's outstanding I/Os as we are to reset */
15001 	lpfc_sli_flush_io_rings(phba);
15002 
15003 	/* stop all timers */
15004 	lpfc_stop_hba_timers(phba);
15005 
15006 	/* Disable interrupt and pci device */
15007 	lpfc_sli4_disable_intr(phba);
15008 	lpfc_sli4_queue_destroy(phba);
15009 	pci_disable_device(phba->pcidev);
15010 }
15011 
15012 /**
15013  * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
15014  * @phba: pointer to lpfc hba data structure.
15015  *
15016  * This routine is called to prepare the SLI4 device for PCI slot permanently
15017  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
15018  * pending I/Os.
15019  **/
15020 static void
15021 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
15022 {
15023 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15024 			"2827 PCI channel permanent disable for failure\n");
15025 
15026 	/* Block all SCSI devices' I/Os on the host */
15027 	lpfc_scsi_dev_block(phba);
15028 
15029 	/* stop all timers */
15030 	lpfc_stop_hba_timers(phba);
15031 
15032 	/* Clean up all driver's outstanding I/Os */
15033 	lpfc_sli_flush_io_rings(phba);
15034 }
15035 
15036 /**
15037  * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15038  * @pdev: pointer to PCI device.
15039  * @state: the current PCI connection state.
15040  *
15041  * This routine is called from the PCI subsystem for error handling to device
15042  * with SLI-4 interface spec. This function is called by the PCI subsystem
15043  * after a PCI bus error affecting this device has been detected. When this
15044  * function is invoked, it will need to stop all the I/Os and interrupt(s)
15045  * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
15046  * for the PCI subsystem to perform proper recovery as desired.
15047  *
15048  * Return codes
15049  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15050  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15051  **/
15052 static pci_ers_result_t
15053 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
15054 {
15055 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
15056 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15057 
15058 	switch (state) {
15059 	case pci_channel_io_normal:
15060 		/* Non-fatal error, prepare for recovery */
15061 		lpfc_sli4_prep_dev_for_recover(phba);
15062 		return PCI_ERS_RESULT_CAN_RECOVER;
15063 	case pci_channel_io_frozen:
15064 		phba->hba_flag |= HBA_PCI_ERR;
15065 		/* Fatal error, prepare for slot reset */
15066 		lpfc_sli4_prep_dev_for_reset(phba);
15067 		return PCI_ERS_RESULT_NEED_RESET;
15068 	case pci_channel_io_perm_failure:
15069 		phba->hba_flag |= HBA_PCI_ERR;
15070 		/* Permanent failure, prepare for device down */
15071 		lpfc_sli4_prep_dev_for_perm_failure(phba);
15072 		return PCI_ERS_RESULT_DISCONNECT;
15073 	default:
15074 		phba->hba_flag |= HBA_PCI_ERR;
15075 		/* Unknown state, prepare and request slot reset */
15076 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15077 				"2825 Unknown PCI error state: x%x\n", state);
15078 		lpfc_sli4_prep_dev_for_reset(phba);
15079 		return PCI_ERS_RESULT_NEED_RESET;
15080 	}
15081 }
15082 
15083 /**
15084  * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15085  * @pdev: pointer to PCI device.
15086  *
15087  * This routine is called from the PCI subsystem for error handling to device
15088  * with SLI-4 interface spec. It is called after PCI bus has been reset to
15089  * restart the PCI card from scratch, as if from a cold-boot. During the
15090  * PCI subsystem error recovery, after the driver returns
15091  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
15092  * recovery and then call this routine before calling the .resume method to
15093  * recover the device. This function will initialize the HBA device, enable
15094  * the interrupt, but it will just put the HBA to offline state without
15095  * passing any I/O traffic.
15096  *
15097  * Return codes
15098  * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
15099  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15100  */
15101 static pci_ers_result_t
15102 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
15103 {
15104 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
15105 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15106 	struct lpfc_sli *psli = &phba->sli;
15107 	uint32_t intr_mode;
15108 
15109 	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15110 	if (pci_enable_device_mem(pdev)) {
15111 		printk(KERN_ERR "lpfc: Cannot re-enable "
15112 			"PCI device after reset.\n");
15113 		return PCI_ERS_RESULT_DISCONNECT;
15114 	}
15115 
15116 	pci_restore_state(pdev);
15117 
15118 	phba->hba_flag &= ~HBA_PCI_ERR;
15119 	/*
15120 	 * As the new kernel behavior of pci_restore_state() API call clears
15121 	 * device saved_state flag, need to save the restored state again.
15122 	 */
15123 	pci_save_state(pdev);
15124 
15125 	if (pdev->is_busmaster)
15126 		pci_set_master(pdev);
15127 
15128 	spin_lock_irq(&phba->hbalock);
15129 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15130 	spin_unlock_irq(&phba->hbalock);
15131 
15132 	/* Init cpu_map array */
15133 	lpfc_cpu_map_array_init(phba);
15134 	/* Configure and enable interrupt */
15135 	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15136 	if (intr_mode == LPFC_INTR_ERROR) {
15137 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15138 				"2824 Cannot re-enable interrupt after "
15139 				"slot reset.\n");
15140 		return PCI_ERS_RESULT_DISCONNECT;
15141 	} else
15142 		phba->intr_mode = intr_mode;
15143 	lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
15144 
15145 	/* Log the current active interrupt mode */
15146 	lpfc_log_intr_mode(phba, phba->intr_mode);
15147 
15148 	return PCI_ERS_RESULT_RECOVERED;
15149 }
15150 
15151 /**
15152  * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15153  * @pdev: pointer to PCI device
15154  *
15155  * This routine is called from the PCI subsystem for error handling to device
15156  * with SLI-4 interface spec. It is called when kernel error recovery tells
15157  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
15158  * error recovery. After this call, traffic can start to flow from this device
15159  * again.
15160  **/
15161 static void
15162 lpfc_io_resume_s4(struct pci_dev *pdev)
15163 {
15164 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
15165 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15166 
15167 	/*
15168 	 * In case of slot reset, as function reset is performed through
15169 	 * mailbox command which needs DMA to be enabled, this operation
15170 	 * has to be moved to the io resume phase. Taking device offline
15171 	 * will perform the necessary cleanup.
15172 	 */
15173 	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15174 		/* Perform device reset */
15175 		lpfc_offline_prep(phba, LPFC_MBX_WAIT);
15176 		lpfc_offline(phba);
15177 		lpfc_sli_brdrestart(phba);
15178 		/* Bring the device back online */
15179 		lpfc_online(phba);
15180 	}
15181 }
15182 
15183 /**
15184  * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15185  * @pdev: pointer to PCI device
15186  * @pid: pointer to PCI device identifier
15187  *
15188  * This routine is to be registered to the kernel's PCI subsystem. When an
15189  * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
15190  * at PCI device-specific information of the device and driver to see if the
15191  * driver state that it can support this kind of device. If the match is
15192  * successful, the driver core invokes this routine. This routine dispatches
15193  * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15194  * do all the initialization that it needs to do to handle the HBA device
15195  * properly.
15196  *
15197  * Return code
15198  * 	0 - driver can claim the device
15199  * 	negative value - driver can not claim the device
15200  **/
15201 static int
15202 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
15203 {
15204 	int rc;
15205 	struct lpfc_sli_intf intf;
15206 
15207 	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
15208 		return -ENODEV;
15209 
15210 	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
15211 	    (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
15212 		rc = lpfc_pci_probe_one_s4(pdev, pid);
15213 	else
15214 		rc = lpfc_pci_probe_one_s3(pdev, pid);
15215 
15216 	return rc;
15217 }
15218 
15219 /**
15220  * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15221  * @pdev: pointer to PCI device
15222  *
15223  * This routine is to be registered to the kernel's PCI subsystem. When an
15224  * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
15225  * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15226  * remove routine, which will perform all the necessary cleanup for the
15227  * device to be removed from the PCI subsystem properly.
15228  **/
15229 static void
15230 lpfc_pci_remove_one(struct pci_dev *pdev)
15231 {
15232 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
15233 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15234 
15235 	switch (phba->pci_dev_grp) {
15236 	case LPFC_PCI_DEV_LP:
15237 		lpfc_pci_remove_one_s3(pdev);
15238 		break;
15239 	case LPFC_PCI_DEV_OC:
15240 		lpfc_pci_remove_one_s4(pdev);
15241 		break;
15242 	default:
15243 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15244 				"1424 Invalid PCI device group: 0x%x\n",
15245 				phba->pci_dev_grp);
15246 		break;
15247 	}
15248 	return;
15249 }
15250 
15251 /**
15252  * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15253  * @dev: pointer to device
15254  *
15255  * This routine is to be registered to the kernel's PCI subsystem to support
15256  * system Power Management (PM). When PM invokes this method, it dispatches
15257  * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15258  * suspend the device.
15259  *
15260  * Return code
15261  * 	0 - driver suspended the device
15262  * 	Error otherwise
15263  **/
15264 static int __maybe_unused
15265 lpfc_pci_suspend_one(struct device *dev)
15266 {
15267 	struct Scsi_Host *shost = dev_get_drvdata(dev);
15268 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15269 	int rc = -ENODEV;
15270 
15271 	switch (phba->pci_dev_grp) {
15272 	case LPFC_PCI_DEV_LP:
15273 		rc = lpfc_pci_suspend_one_s3(dev);
15274 		break;
15275 	case LPFC_PCI_DEV_OC:
15276 		rc = lpfc_pci_suspend_one_s4(dev);
15277 		break;
15278 	default:
15279 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15280 				"1425 Invalid PCI device group: 0x%x\n",
15281 				phba->pci_dev_grp);
15282 		break;
15283 	}
15284 	return rc;
15285 }
15286 
15287 /**
15288  * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15289  * @dev: pointer to device
15290  *
15291  * This routine is to be registered to the kernel's PCI subsystem to support
15292  * system Power Management (PM). When PM invokes this method, it dispatches
15293  * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15294  * resume the device.
15295  *
15296  * Return code
15297  * 	0 - driver suspended the device
15298  * 	Error otherwise
15299  **/
15300 static int __maybe_unused
15301 lpfc_pci_resume_one(struct device *dev)
15302 {
15303 	struct Scsi_Host *shost = dev_get_drvdata(dev);
15304 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15305 	int rc = -ENODEV;
15306 
15307 	switch (phba->pci_dev_grp) {
15308 	case LPFC_PCI_DEV_LP:
15309 		rc = lpfc_pci_resume_one_s3(dev);
15310 		break;
15311 	case LPFC_PCI_DEV_OC:
15312 		rc = lpfc_pci_resume_one_s4(dev);
15313 		break;
15314 	default:
15315 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15316 				"1426 Invalid PCI device group: 0x%x\n",
15317 				phba->pci_dev_grp);
15318 		break;
15319 	}
15320 	return rc;
15321 }
15322 
15323 /**
15324  * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15325  * @pdev: pointer to PCI device.
15326  * @state: the current PCI connection state.
15327  *
15328  * This routine is registered to the PCI subsystem for error handling. This
15329  * function is called by the PCI subsystem after a PCI bus error affecting
15330  * this device has been detected. When this routine is invoked, it dispatches
15331  * the action to the proper SLI-3 or SLI-4 device error detected handling
15332  * routine, which will perform the proper error detected operation.
15333  *
15334  * Return codes
15335  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15336  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15337  **/
15338 static pci_ers_result_t
15339 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
15340 {
15341 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
15342 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15343 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15344 
15345 	if (phba->link_state == LPFC_HBA_ERROR &&
15346 	    phba->hba_flag & HBA_IOQ_FLUSH)
15347 		return PCI_ERS_RESULT_NEED_RESET;
15348 
15349 	switch (phba->pci_dev_grp) {
15350 	case LPFC_PCI_DEV_LP:
15351 		rc = lpfc_io_error_detected_s3(pdev, state);
15352 		break;
15353 	case LPFC_PCI_DEV_OC:
15354 		rc = lpfc_io_error_detected_s4(pdev, state);
15355 		break;
15356 	default:
15357 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15358 				"1427 Invalid PCI device group: 0x%x\n",
15359 				phba->pci_dev_grp);
15360 		break;
15361 	}
15362 	return rc;
15363 }
15364 
15365 /**
15366  * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15367  * @pdev: pointer to PCI device.
15368  *
15369  * This routine is registered to the PCI subsystem for error handling. This
15370  * function is called after PCI bus has been reset to restart the PCI card
15371  * from scratch, as if from a cold-boot. When this routine is invoked, it
15372  * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15373  * routine, which will perform the proper device reset.
15374  *
15375  * Return codes
15376  * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
15377  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15378  **/
15379 static pci_ers_result_t
15380 lpfc_io_slot_reset(struct pci_dev *pdev)
15381 {
15382 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
15383 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15384 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15385 
15386 	switch (phba->pci_dev_grp) {
15387 	case LPFC_PCI_DEV_LP:
15388 		rc = lpfc_io_slot_reset_s3(pdev);
15389 		break;
15390 	case LPFC_PCI_DEV_OC:
15391 		rc = lpfc_io_slot_reset_s4(pdev);
15392 		break;
15393 	default:
15394 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15395 				"1428 Invalid PCI device group: 0x%x\n",
15396 				phba->pci_dev_grp);
15397 		break;
15398 	}
15399 	return rc;
15400 }
15401 
15402 /**
15403  * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15404  * @pdev: pointer to PCI device
15405  *
15406  * This routine is registered to the PCI subsystem for error handling. It
15407  * is called when kernel error recovery tells the lpfc driver that it is
15408  * OK to resume normal PCI operation after PCI bus error recovery. When
15409  * this routine is invoked, it dispatches the action to the proper SLI-3
15410  * or SLI-4 device io_resume routine, which will resume the device operation.
15411  **/
15412 static void
15413 lpfc_io_resume(struct pci_dev *pdev)
15414 {
15415 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
15416 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15417 
15418 	switch (phba->pci_dev_grp) {
15419 	case LPFC_PCI_DEV_LP:
15420 		lpfc_io_resume_s3(pdev);
15421 		break;
15422 	case LPFC_PCI_DEV_OC:
15423 		lpfc_io_resume_s4(pdev);
15424 		break;
15425 	default:
15426 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15427 				"1429 Invalid PCI device group: 0x%x\n",
15428 				phba->pci_dev_grp);
15429 		break;
15430 	}
15431 	return;
15432 }
15433 
15434 /**
15435  * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15436  * @phba: pointer to lpfc hba data structure.
15437  *
15438  * This routine checks to see if OAS is supported for this adapter. If
15439  * supported, the configure Flash Optimized Fabric flag is set.  Otherwise,
15440  * the enable oas flag is cleared and the pool created for OAS device data
15441  * is destroyed.
15442  *
15443  **/
15444 static void
15445 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15446 {
15447 
15448 	if (!phba->cfg_EnableXLane)
15449 		return;
15450 
15451 	if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15452 		phba->cfg_fof = 1;
15453 	} else {
15454 		phba->cfg_fof = 0;
15455 		mempool_destroy(phba->device_data_mem_pool);
15456 		phba->device_data_mem_pool = NULL;
15457 	}
15458 
15459 	return;
15460 }
15461 
15462 /**
15463  * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15464  * @phba: pointer to lpfc hba data structure.
15465  *
15466  * This routine checks to see if RAS is supported by the adapter. Check the
15467  * function through which RAS support enablement is to be done.
15468  **/
15469 void
15470 lpfc_sli4_ras_init(struct lpfc_hba *phba)
15471 {
15472 	/* if ASIC_GEN_NUM >= 0xC) */
15473 	if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15474 		    LPFC_SLI_INTF_IF_TYPE_6) ||
15475 	    (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15476 		    LPFC_SLI_INTF_FAMILY_G6)) {
15477 		phba->ras_fwlog.ras_hwsupport = true;
15478 		if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15479 		    phba->cfg_ras_fwlog_buffsize)
15480 			phba->ras_fwlog.ras_enabled = true;
15481 		else
15482 			phba->ras_fwlog.ras_enabled = false;
15483 	} else {
15484 		phba->ras_fwlog.ras_hwsupport = false;
15485 	}
15486 }
15487 
15488 
15489 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
15490 
15491 static const struct pci_error_handlers lpfc_err_handler = {
15492 	.error_detected = lpfc_io_error_detected,
15493 	.slot_reset = lpfc_io_slot_reset,
15494 	.resume = lpfc_io_resume,
15495 };
15496 
15497 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
15498 			 lpfc_pci_suspend_one,
15499 			 lpfc_pci_resume_one);
15500 
15501 static struct pci_driver lpfc_driver = {
15502 	.name		= LPFC_DRIVER_NAME,
15503 	.id_table	= lpfc_id_table,
15504 	.probe		= lpfc_pci_probe_one,
15505 	.remove		= lpfc_pci_remove_one,
15506 	.shutdown	= lpfc_pci_remove_one,
15507 	.driver.pm	= &lpfc_pci_pm_ops_one,
15508 	.err_handler    = &lpfc_err_handler,
15509 };
15510 
15511 static const struct file_operations lpfc_mgmt_fop = {
15512 	.owner = THIS_MODULE,
15513 };
15514 
15515 static struct miscdevice lpfc_mgmt_dev = {
15516 	.minor = MISC_DYNAMIC_MINOR,
15517 	.name = "lpfcmgmt",
15518 	.fops = &lpfc_mgmt_fop,
15519 };
15520 
15521 /**
15522  * lpfc_init - lpfc module initialization routine
15523  *
15524  * This routine is to be invoked when the lpfc module is loaded into the
15525  * kernel. The special kernel macro module_init() is used to indicate the
15526  * role of this routine to the kernel as lpfc module entry point.
15527  *
15528  * Return codes
15529  *   0 - successful
15530  *   -ENOMEM - FC attach transport failed
15531  *   all others - failed
15532  */
15533 static int __init
15534 lpfc_init(void)
15535 {
15536 	int error = 0;
15537 
15538 	pr_info(LPFC_MODULE_DESC "\n");
15539 	pr_info(LPFC_COPYRIGHT "\n");
15540 
15541 	error = misc_register(&lpfc_mgmt_dev);
15542 	if (error)
15543 		printk(KERN_ERR "Could not register lpfcmgmt device, "
15544 			"misc_register returned with status %d", error);
15545 
15546 	error = -ENOMEM;
15547 	lpfc_transport_functions.vport_create = lpfc_vport_create;
15548 	lpfc_transport_functions.vport_delete = lpfc_vport_delete;
15549 	lpfc_transport_template =
15550 				fc_attach_transport(&lpfc_transport_functions);
15551 	if (lpfc_transport_template == NULL)
15552 		goto unregister;
15553 	lpfc_vport_transport_template =
15554 		fc_attach_transport(&lpfc_vport_transport_functions);
15555 	if (lpfc_vport_transport_template == NULL) {
15556 		fc_release_transport(lpfc_transport_template);
15557 		goto unregister;
15558 	}
15559 	lpfc_wqe_cmd_template();
15560 	lpfc_nvmet_cmd_template();
15561 
15562 	/* Initialize in case vector mapping is needed */
15563 	lpfc_present_cpu = num_present_cpus();
15564 
15565 	error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
15566 					"lpfc/sli4:online",
15567 					lpfc_cpu_online, lpfc_cpu_offline);
15568 	if (error < 0)
15569 		goto cpuhp_failure;
15570 	lpfc_cpuhp_state = error;
15571 
15572 	error = pci_register_driver(&lpfc_driver);
15573 	if (error)
15574 		goto unwind;
15575 
15576 	return error;
15577 
15578 unwind:
15579 	cpuhp_remove_multi_state(lpfc_cpuhp_state);
15580 cpuhp_failure:
15581 	fc_release_transport(lpfc_transport_template);
15582 	fc_release_transport(lpfc_vport_transport_template);
15583 unregister:
15584 	misc_deregister(&lpfc_mgmt_dev);
15585 
15586 	return error;
15587 }
15588 
15589 void lpfc_dmp_dbg(struct lpfc_hba *phba)
15590 {
15591 	unsigned int start_idx;
15592 	unsigned int dbg_cnt;
15593 	unsigned int temp_idx;
15594 	int i;
15595 	int j = 0;
15596 	unsigned long rem_nsec;
15597 
15598 	if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15599 		return;
15600 
15601 	start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15602 	dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15603 	if (!dbg_cnt)
15604 		goto out;
15605 	temp_idx = start_idx;
15606 	if (dbg_cnt >= DBG_LOG_SZ) {
15607 		dbg_cnt = DBG_LOG_SZ;
15608 		temp_idx -= 1;
15609 	} else {
15610 		if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15611 			temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
15612 		} else {
15613 			if (start_idx < dbg_cnt)
15614 				start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15615 			else
15616 				start_idx -= dbg_cnt;
15617 		}
15618 	}
15619 	dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15620 		 start_idx, temp_idx, dbg_cnt);
15621 
15622 	for (i = 0; i < dbg_cnt; i++) {
15623 		if ((start_idx + i) < DBG_LOG_SZ)
15624 			temp_idx = (start_idx + i) % DBG_LOG_SZ;
15625 		else
15626 			temp_idx = j++;
15627 		rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15628 		dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15629 			 temp_idx,
15630 			 (unsigned long)phba->dbg_log[temp_idx].t_ns,
15631 			 rem_nsec / 1000,
15632 			 phba->dbg_log[temp_idx].log);
15633 	}
15634 out:
15635 	atomic_set(&phba->dbg_log_cnt, 0);
15636 	atomic_set(&phba->dbg_log_dmping, 0);
15637 }
15638 
15639 __printf(2, 3)
15640 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15641 {
15642 	unsigned int idx;
15643 	va_list args;
15644 	int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15645 	struct va_format vaf;
15646 
15647 
15648 	va_start(args, fmt);
15649 	if (unlikely(dbg_dmping)) {
15650 		vaf.fmt = fmt;
15651 		vaf.va = &args;
15652 		dev_info(&phba->pcidev->dev, "%pV", &vaf);
15653 		va_end(args);
15654 		return;
15655 	}
15656 	idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15657 		DBG_LOG_SZ;
15658 
15659 	atomic_inc(&phba->dbg_log_cnt);
15660 
15661 	vscnprintf(phba->dbg_log[idx].log,
15662 		   sizeof(phba->dbg_log[idx].log), fmt, args);
15663 	va_end(args);
15664 
15665 	phba->dbg_log[idx].t_ns = local_clock();
15666 }
15667 
15668 /**
15669  * lpfc_exit - lpfc module removal routine
15670  *
15671  * This routine is invoked when the lpfc module is removed from the kernel.
15672  * The special kernel macro module_exit() is used to indicate the role of
15673  * this routine to the kernel as lpfc module exit point.
15674  */
15675 static void __exit
15676 lpfc_exit(void)
15677 {
15678 	misc_deregister(&lpfc_mgmt_dev);
15679 	pci_unregister_driver(&lpfc_driver);
15680 	cpuhp_remove_multi_state(lpfc_cpuhp_state);
15681 	fc_release_transport(lpfc_transport_template);
15682 	fc_release_transport(lpfc_vport_transport_template);
15683 	idr_destroy(&lpfc_hba_index);
15684 }
15685 
15686 module_init(lpfc_init);
15687 module_exit(lpfc_exit);
15688 MODULE_LICENSE("GPL");
15689 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
15690 MODULE_AUTHOR("Broadcom");
15691 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
15692