xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_init.c (revision fd589a8f)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/kthread.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
30 #include <linux/ctype.h>
31 
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 
37 #include "lpfc_hw4.h"
38 #include "lpfc_hw.h"
39 #include "lpfc_sli.h"
40 #include "lpfc_sli4.h"
41 #include "lpfc_nl.h"
42 #include "lpfc_disc.h"
43 #include "lpfc_scsi.h"
44 #include "lpfc.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_crtn.h"
47 #include "lpfc_vport.h"
48 #include "lpfc_version.h"
49 
50 char *_dump_buf_data;
51 unsigned long _dump_buf_data_order;
52 char *_dump_buf_dif;
53 unsigned long _dump_buf_dif_order;
54 spinlock_t _dump_buf_lock;
55 
56 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
57 static int lpfc_post_rcv_buf(struct lpfc_hba *);
58 static int lpfc_sli4_queue_create(struct lpfc_hba *);
59 static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
60 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
61 static int lpfc_setup_endian_order(struct lpfc_hba *);
62 static int lpfc_sli4_read_config(struct lpfc_hba *);
63 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
64 static void lpfc_free_sgl_list(struct lpfc_hba *);
65 static int lpfc_init_sgl_list(struct lpfc_hba *);
66 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
67 static void lpfc_free_active_sgl(struct lpfc_hba *);
68 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
69 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
70 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
71 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
72 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
73 
74 static struct scsi_transport_template *lpfc_transport_template = NULL;
75 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
76 static DEFINE_IDR(lpfc_hba_index);
77 
78 /**
79  * lpfc_config_port_prep - Perform lpfc initialization prior to config port
80  * @phba: pointer to lpfc hba data structure.
81  *
82  * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
83  * mailbox command. It retrieves the revision information from the HBA and
84  * collects the Vital Product Data (VPD) about the HBA for preparing the
85  * configuration of the HBA.
86  *
87  * Return codes:
88  *   0 - success.
89  *   -ERESTART - requests the SLI layer to reset the HBA and try again.
90  *   Any other value - indicates an error.
91  **/
92 int
93 lpfc_config_port_prep(struct lpfc_hba *phba)
94 {
95 	lpfc_vpd_t *vp = &phba->vpd;
96 	int i = 0, rc;
97 	LPFC_MBOXQ_t *pmb;
98 	MAILBOX_t *mb;
99 	char *lpfc_vpd_data = NULL;
100 	uint16_t offset = 0;
101 	static char licensed[56] =
102 		    "key unlock for use with gnu public licensed code only\0";
103 	static int init_key = 1;
104 
105 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
106 	if (!pmb) {
107 		phba->link_state = LPFC_HBA_ERROR;
108 		return -ENOMEM;
109 	}
110 
111 	mb = &pmb->u.mb;
112 	phba->link_state = LPFC_INIT_MBX_CMDS;
113 
114 	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
115 		if (init_key) {
116 			uint32_t *ptext = (uint32_t *) licensed;
117 
118 			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
119 				*ptext = cpu_to_be32(*ptext);
120 			init_key = 0;
121 		}
122 
123 		lpfc_read_nv(phba, pmb);
124 		memset((char*)mb->un.varRDnvp.rsvd3, 0,
125 			sizeof (mb->un.varRDnvp.rsvd3));
126 		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
127 			 sizeof (licensed));
128 
129 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
130 
131 		if (rc != MBX_SUCCESS) {
132 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
133 					"0324 Config Port initialization "
134 					"error, mbxCmd x%x READ_NVPARM, "
135 					"mbxStatus x%x\n",
136 					mb->mbxCommand, mb->mbxStatus);
137 			mempool_free(pmb, phba->mbox_mem_pool);
138 			return -ERESTART;
139 		}
140 		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
141 		       sizeof(phba->wwnn));
142 		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
143 		       sizeof(phba->wwpn));
144 	}
145 
146 	phba->sli3_options = 0x0;
147 
148 	/* Setup and issue mailbox READ REV command */
149 	lpfc_read_rev(phba, pmb);
150 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
151 	if (rc != MBX_SUCCESS) {
152 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
153 				"0439 Adapter failed to init, mbxCmd x%x "
154 				"READ_REV, mbxStatus x%x\n",
155 				mb->mbxCommand, mb->mbxStatus);
156 		mempool_free( pmb, phba->mbox_mem_pool);
157 		return -ERESTART;
158 	}
159 
160 
161 	/*
162 	 * The value of rr must be 1 since the driver set the cv field to 1.
163 	 * This setting requires the FW to set all revision fields.
164 	 */
165 	if (mb->un.varRdRev.rr == 0) {
166 		vp->rev.rBit = 0;
167 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
168 				"0440 Adapter failed to init, READ_REV has "
169 				"missing revision information.\n");
170 		mempool_free(pmb, phba->mbox_mem_pool);
171 		return -ERESTART;
172 	}
173 
174 	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
175 		mempool_free(pmb, phba->mbox_mem_pool);
176 		return -EINVAL;
177 	}
178 
179 	/* Save information as VPD data */
180 	vp->rev.rBit = 1;
181 	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
182 	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
183 	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
184 	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
185 	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
186 	vp->rev.biuRev = mb->un.varRdRev.biuRev;
187 	vp->rev.smRev = mb->un.varRdRev.smRev;
188 	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
189 	vp->rev.endecRev = mb->un.varRdRev.endecRev;
190 	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
191 	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
192 	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
193 	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
194 	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
195 	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
196 
197 	/* If the sli feature level is less then 9, we must
198 	 * tear down all RPIs and VPIs on link down if NPIV
199 	 * is enabled.
200 	 */
201 	if (vp->rev.feaLevelHigh < 9)
202 		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
203 
204 	if (lpfc_is_LC_HBA(phba->pcidev->device))
205 		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
206 						sizeof (phba->RandomData));
207 
208 	/* Get adapter VPD information */
209 	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
210 	if (!lpfc_vpd_data)
211 		goto out_free_mbox;
212 
213 	do {
214 		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
215 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
216 
217 		if (rc != MBX_SUCCESS) {
218 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
219 					"0441 VPD not present on adapter, "
220 					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
221 					mb->mbxCommand, mb->mbxStatus);
222 			mb->un.varDmp.word_cnt = 0;
223 		}
224 		/* dump mem may return a zero when finished or we got a
225 		 * mailbox error, either way we are done.
226 		 */
227 		if (mb->un.varDmp.word_cnt == 0)
228 			break;
229 		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
230 			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
231 		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
232 				      lpfc_vpd_data + offset,
233 				      mb->un.varDmp.word_cnt);
234 		offset += mb->un.varDmp.word_cnt;
235 	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
236 	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
237 
238 	kfree(lpfc_vpd_data);
239 out_free_mbox:
240 	mempool_free(pmb, phba->mbox_mem_pool);
241 	return 0;
242 }
243 
244 /**
245  * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
246  * @phba: pointer to lpfc hba data structure.
247  * @pmboxq: pointer to the driver internal queue element for mailbox command.
248  *
249  * This is the completion handler for driver's configuring asynchronous event
250  * mailbox command to the device. If the mailbox command returns successfully,
251  * it will set internal async event support flag to 1; otherwise, it will
252  * set internal async event support flag to 0.
253  **/
254 static void
255 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
256 {
257 	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
258 		phba->temp_sensor_support = 1;
259 	else
260 		phba->temp_sensor_support = 0;
261 	mempool_free(pmboxq, phba->mbox_mem_pool);
262 	return;
263 }
264 
265 /**
266  * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
267  * @phba: pointer to lpfc hba data structure.
268  * @pmboxq: pointer to the driver internal queue element for mailbox command.
269  *
270  * This is the completion handler for dump mailbox command for getting
271  * wake up parameters. When this command complete, the response contain
272  * Option rom version of the HBA. This function translate the version number
273  * into a human readable string and store it in OptionROMVersion.
274  **/
275 static void
276 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
277 {
278 	struct prog_id *prg;
279 	uint32_t prog_id_word;
280 	char dist = ' ';
281 	/* character array used for decoding dist type. */
282 	char dist_char[] = "nabx";
283 
284 	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
285 		mempool_free(pmboxq, phba->mbox_mem_pool);
286 		return;
287 	}
288 
289 	prg = (struct prog_id *) &prog_id_word;
290 
291 	/* word 7 contain option rom version */
292 	prog_id_word = pmboxq->u.mb.un.varWords[7];
293 
294 	/* Decode the Option rom version word to a readable string */
295 	if (prg->dist < 4)
296 		dist = dist_char[prg->dist];
297 
298 	if ((prg->dist == 3) && (prg->num == 0))
299 		sprintf(phba->OptionROMVersion, "%d.%d%d",
300 			prg->ver, prg->rev, prg->lev);
301 	else
302 		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
303 			prg->ver, prg->rev, prg->lev,
304 			dist, prg->num);
305 	mempool_free(pmboxq, phba->mbox_mem_pool);
306 	return;
307 }
308 
309 /**
310  * lpfc_config_port_post - Perform lpfc initialization after config port
311  * @phba: pointer to lpfc hba data structure.
312  *
313  * This routine will do LPFC initialization after the CONFIG_PORT mailbox
314  * command call. It performs all internal resource and state setups on the
315  * port: post IOCB buffers, enable appropriate host interrupt attentions,
316  * ELS ring timers, etc.
317  *
318  * Return codes
319  *   0 - success.
320  *   Any other value - error.
321  **/
322 int
323 lpfc_config_port_post(struct lpfc_hba *phba)
324 {
325 	struct lpfc_vport *vport = phba->pport;
326 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
327 	LPFC_MBOXQ_t *pmb;
328 	MAILBOX_t *mb;
329 	struct lpfc_dmabuf *mp;
330 	struct lpfc_sli *psli = &phba->sli;
331 	uint32_t status, timeout;
332 	int i, j;
333 	int rc;
334 
335 	spin_lock_irq(&phba->hbalock);
336 	/*
337 	 * If the Config port completed correctly the HBA is not
338 	 * over heated any more.
339 	 */
340 	if (phba->over_temp_state == HBA_OVER_TEMP)
341 		phba->over_temp_state = HBA_NORMAL_TEMP;
342 	spin_unlock_irq(&phba->hbalock);
343 
344 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
345 	if (!pmb) {
346 		phba->link_state = LPFC_HBA_ERROR;
347 		return -ENOMEM;
348 	}
349 	mb = &pmb->u.mb;
350 
351 	/* Get login parameters for NID.  */
352 	lpfc_read_sparam(phba, pmb, 0);
353 	pmb->vport = vport;
354 	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
355 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
356 				"0448 Adapter failed init, mbxCmd x%x "
357 				"READ_SPARM mbxStatus x%x\n",
358 				mb->mbxCommand, mb->mbxStatus);
359 		phba->link_state = LPFC_HBA_ERROR;
360 		mp = (struct lpfc_dmabuf *) pmb->context1;
361 		mempool_free( pmb, phba->mbox_mem_pool);
362 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
363 		kfree(mp);
364 		return -EIO;
365 	}
366 
367 	mp = (struct lpfc_dmabuf *) pmb->context1;
368 
369 	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
370 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
371 	kfree(mp);
372 	pmb->context1 = NULL;
373 
374 	if (phba->cfg_soft_wwnn)
375 		u64_to_wwn(phba->cfg_soft_wwnn,
376 			   vport->fc_sparam.nodeName.u.wwn);
377 	if (phba->cfg_soft_wwpn)
378 		u64_to_wwn(phba->cfg_soft_wwpn,
379 			   vport->fc_sparam.portName.u.wwn);
380 	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
381 	       sizeof (struct lpfc_name));
382 	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
383 	       sizeof (struct lpfc_name));
384 
385 	/* Update the fc_host data structures with new wwn. */
386 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
387 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
388 	fc_host_max_npiv_vports(shost) = phba->max_vpi;
389 
390 	/* If no serial number in VPD data, use low 6 bytes of WWNN */
391 	/* This should be consolidated into parse_vpd ? - mr */
392 	if (phba->SerialNumber[0] == 0) {
393 		uint8_t *outptr;
394 
395 		outptr = &vport->fc_nodename.u.s.IEEE[0];
396 		for (i = 0; i < 12; i++) {
397 			status = *outptr++;
398 			j = ((status & 0xf0) >> 4);
399 			if (j <= 9)
400 				phba->SerialNumber[i] =
401 				    (char)((uint8_t) 0x30 + (uint8_t) j);
402 			else
403 				phba->SerialNumber[i] =
404 				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
405 			i++;
406 			j = (status & 0xf);
407 			if (j <= 9)
408 				phba->SerialNumber[i] =
409 				    (char)((uint8_t) 0x30 + (uint8_t) j);
410 			else
411 				phba->SerialNumber[i] =
412 				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
413 		}
414 	}
415 
416 	lpfc_read_config(phba, pmb);
417 	pmb->vport = vport;
418 	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
419 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
420 				"0453 Adapter failed to init, mbxCmd x%x "
421 				"READ_CONFIG, mbxStatus x%x\n",
422 				mb->mbxCommand, mb->mbxStatus);
423 		phba->link_state = LPFC_HBA_ERROR;
424 		mempool_free( pmb, phba->mbox_mem_pool);
425 		return -EIO;
426 	}
427 
428 	/* Check if the port is disabled */
429 	lpfc_sli_read_link_ste(phba);
430 
431 	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
432 	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
433 		phba->cfg_hba_queue_depth =
434 			(mb->un.varRdConfig.max_xri + 1) -
435 					lpfc_sli4_get_els_iocb_cnt(phba);
436 
437 	phba->lmt = mb->un.varRdConfig.lmt;
438 
439 	/* Get the default values for Model Name and Description */
440 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
441 
442 	if ((phba->cfg_link_speed > LINK_SPEED_10G)
443 	    || ((phba->cfg_link_speed == LINK_SPEED_1G)
444 		&& !(phba->lmt & LMT_1Gb))
445 	    || ((phba->cfg_link_speed == LINK_SPEED_2G)
446 		&& !(phba->lmt & LMT_2Gb))
447 	    || ((phba->cfg_link_speed == LINK_SPEED_4G)
448 		&& !(phba->lmt & LMT_4Gb))
449 	    || ((phba->cfg_link_speed == LINK_SPEED_8G)
450 		&& !(phba->lmt & LMT_8Gb))
451 	    || ((phba->cfg_link_speed == LINK_SPEED_10G)
452 		&& !(phba->lmt & LMT_10Gb))) {
453 		/* Reset link speed to auto */
454 		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
455 			"1302 Invalid speed for this board: "
456 			"Reset link speed to auto: x%x\n",
457 			phba->cfg_link_speed);
458 			phba->cfg_link_speed = LINK_SPEED_AUTO;
459 	}
460 
461 	phba->link_state = LPFC_LINK_DOWN;
462 
463 	/* Only process IOCBs on ELS ring till hba_state is READY */
464 	if (psli->ring[psli->extra_ring].cmdringaddr)
465 		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
466 	if (psli->ring[psli->fcp_ring].cmdringaddr)
467 		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
468 	if (psli->ring[psli->next_ring].cmdringaddr)
469 		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
470 
471 	/* Post receive buffers for desired rings */
472 	if (phba->sli_rev != 3)
473 		lpfc_post_rcv_buf(phba);
474 
475 	/*
476 	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
477 	 */
478 	if (phba->intr_type == MSIX) {
479 		rc = lpfc_config_msi(phba, pmb);
480 		if (rc) {
481 			mempool_free(pmb, phba->mbox_mem_pool);
482 			return -EIO;
483 		}
484 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
485 		if (rc != MBX_SUCCESS) {
486 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
487 					"0352 Config MSI mailbox command "
488 					"failed, mbxCmd x%x, mbxStatus x%x\n",
489 					pmb->u.mb.mbxCommand,
490 					pmb->u.mb.mbxStatus);
491 			mempool_free(pmb, phba->mbox_mem_pool);
492 			return -EIO;
493 		}
494 	}
495 
496 	spin_lock_irq(&phba->hbalock);
497 	/* Initialize ERATT handling flag */
498 	phba->hba_flag &= ~HBA_ERATT_HANDLED;
499 
500 	/* Enable appropriate host interrupts */
501 	status = readl(phba->HCregaddr);
502 	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
503 	if (psli->num_rings > 0)
504 		status |= HC_R0INT_ENA;
505 	if (psli->num_rings > 1)
506 		status |= HC_R1INT_ENA;
507 	if (psli->num_rings > 2)
508 		status |= HC_R2INT_ENA;
509 	if (psli->num_rings > 3)
510 		status |= HC_R3INT_ENA;
511 
512 	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
513 	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
514 		status &= ~(HC_R0INT_ENA);
515 
516 	writel(status, phba->HCregaddr);
517 	readl(phba->HCregaddr); /* flush */
518 	spin_unlock_irq(&phba->hbalock);
519 
520 	/* Set up ring-0 (ELS) timer */
521 	timeout = phba->fc_ratov * 2;
522 	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
523 	/* Set up heart beat (HB) timer */
524 	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
525 	phba->hb_outstanding = 0;
526 	phba->last_completion_time = jiffies;
527 	/* Set up error attention (ERATT) polling timer */
528 	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
529 
530 	if (phba->hba_flag & LINK_DISABLED) {
531 		lpfc_printf_log(phba,
532 			KERN_ERR, LOG_INIT,
533 			"2598 Adapter Link is disabled.\n");
534 		lpfc_down_link(phba, pmb);
535 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
536 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
537 		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
538 			lpfc_printf_log(phba,
539 			KERN_ERR, LOG_INIT,
540 			"2599 Adapter failed to issue DOWN_LINK"
541 			" mbox command rc 0x%x\n", rc);
542 
543 			mempool_free(pmb, phba->mbox_mem_pool);
544 			return -EIO;
545 		}
546 	} else {
547 		lpfc_init_link(phba, pmb, phba->cfg_topology,
548 			phba->cfg_link_speed);
549 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
550 		lpfc_set_loopback_flag(phba);
551 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
552 		if (rc != MBX_SUCCESS) {
553 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
554 				"0454 Adapter failed to init, mbxCmd x%x "
555 				"INIT_LINK, mbxStatus x%x\n",
556 				mb->mbxCommand, mb->mbxStatus);
557 
558 			/* Clear all interrupt enable conditions */
559 			writel(0, phba->HCregaddr);
560 			readl(phba->HCregaddr); /* flush */
561 			/* Clear all pending interrupts */
562 			writel(0xffffffff, phba->HAregaddr);
563 			readl(phba->HAregaddr); /* flush */
564 
565 			phba->link_state = LPFC_HBA_ERROR;
566 			if (rc != MBX_BUSY)
567 				mempool_free(pmb, phba->mbox_mem_pool);
568 			return -EIO;
569 		}
570 	}
571 	/* MBOX buffer will be freed in mbox compl */
572 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
573 	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
574 	pmb->mbox_cmpl = lpfc_config_async_cmpl;
575 	pmb->vport = phba->pport;
576 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
577 
578 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
579 		lpfc_printf_log(phba,
580 				KERN_ERR,
581 				LOG_INIT,
582 				"0456 Adapter failed to issue "
583 				"ASYNCEVT_ENABLE mbox status x%x\n",
584 				rc);
585 		mempool_free(pmb, phba->mbox_mem_pool);
586 	}
587 
588 	/* Get Option rom version */
589 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
590 	lpfc_dump_wakeup_param(phba, pmb);
591 	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
592 	pmb->vport = phba->pport;
593 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
594 
595 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
596 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
597 				"to get Option ROM version status x%x\n", rc);
598 		mempool_free(pmb, phba->mbox_mem_pool);
599 	}
600 
601 	return 0;
602 }
603 
604 /**
605  * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
606  * @phba: pointer to lpfc HBA data structure.
607  *
608  * This routine will do LPFC uninitialization before the HBA is reset when
609  * bringing down the SLI Layer.
610  *
611  * Return codes
612  *   0 - success.
613  *   Any other value - error.
614  **/
615 int
616 lpfc_hba_down_prep(struct lpfc_hba *phba)
617 {
618 	struct lpfc_vport **vports;
619 	int i;
620 
621 	if (phba->sli_rev <= LPFC_SLI_REV3) {
622 		/* Disable interrupts */
623 		writel(0, phba->HCregaddr);
624 		readl(phba->HCregaddr); /* flush */
625 	}
626 
627 	if (phba->pport->load_flag & FC_UNLOADING)
628 		lpfc_cleanup_discovery_resources(phba->pport);
629 	else {
630 		vports = lpfc_create_vport_work_array(phba);
631 		if (vports != NULL)
632 			for (i = 0; i <= phba->max_vports &&
633 				vports[i] != NULL; i++)
634 				lpfc_cleanup_discovery_resources(vports[i]);
635 		lpfc_destroy_vport_work_array(phba, vports);
636 	}
637 	return 0;
638 }
639 
640 /**
641  * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
642  * @phba: pointer to lpfc HBA data structure.
643  *
644  * This routine will do uninitialization after the HBA is reset when bring
645  * down the SLI Layer.
646  *
647  * Return codes
648  *   0 - sucess.
649  *   Any other value - error.
650  **/
651 static int
652 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
653 {
654 	struct lpfc_sli *psli = &phba->sli;
655 	struct lpfc_sli_ring *pring;
656 	struct lpfc_dmabuf *mp, *next_mp;
657 	LIST_HEAD(completions);
658 	int i;
659 
660 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
661 		lpfc_sli_hbqbuf_free_all(phba);
662 	else {
663 		/* Cleanup preposted buffers on the ELS ring */
664 		pring = &psli->ring[LPFC_ELS_RING];
665 		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
666 			list_del(&mp->list);
667 			pring->postbufq_cnt--;
668 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
669 			kfree(mp);
670 		}
671 	}
672 
673 	spin_lock_irq(&phba->hbalock);
674 	for (i = 0; i < psli->num_rings; i++) {
675 		pring = &psli->ring[i];
676 
677 		/* At this point in time the HBA is either reset or DOA. Either
678 		 * way, nothing should be on txcmplq as it will NEVER complete.
679 		 */
680 		list_splice_init(&pring->txcmplq, &completions);
681 		pring->txcmplq_cnt = 0;
682 		spin_unlock_irq(&phba->hbalock);
683 
684 		/* Cancel all the IOCBs from the completions list */
685 		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
686 				      IOERR_SLI_ABORTED);
687 
688 		lpfc_sli_abort_iocb_ring(phba, pring);
689 		spin_lock_irq(&phba->hbalock);
690 	}
691 	spin_unlock_irq(&phba->hbalock);
692 
693 	return 0;
694 }
695 /**
696  * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
697  * @phba: pointer to lpfc HBA data structure.
698  *
699  * This routine will do uninitialization after the HBA is reset when bring
700  * down the SLI Layer.
701  *
702  * Return codes
703  *   0 - sucess.
704  *   Any other value - error.
705  **/
706 static int
707 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
708 {
709 	struct lpfc_scsi_buf *psb, *psb_next;
710 	LIST_HEAD(aborts);
711 	int ret;
712 	unsigned long iflag = 0;
713 	ret = lpfc_hba_down_post_s3(phba);
714 	if (ret)
715 		return ret;
716 	/* At this point in time the HBA is either reset or DOA. Either
717 	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
718 	 * on the lpfc_sgl_list so that it can either be freed if the
719 	 * driver is unloading or reposted if the driver is restarting
720 	 * the port.
721 	 */
722 	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
723 					/* scsl_buf_list */
724 	/* abts_sgl_list_lock required because worker thread uses this
725 	 * list.
726 	 */
727 	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
728 	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
729 			&phba->sli4_hba.lpfc_sgl_list);
730 	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
731 	/* abts_scsi_buf_list_lock required because worker thread uses this
732 	 * list.
733 	 */
734 	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
735 	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
736 			&aborts);
737 	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
738 	spin_unlock_irq(&phba->hbalock);
739 
740 	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
741 		psb->pCmd = NULL;
742 		psb->status = IOSTAT_SUCCESS;
743 	}
744 	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
745 	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
746 	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
747 	return 0;
748 }
749 
750 /**
751  * lpfc_hba_down_post - Wrapper func for hba down post routine
752  * @phba: pointer to lpfc HBA data structure.
753  *
754  * This routine wraps the actual SLI3 or SLI4 routine for performing
755  * uninitialization after the HBA is reset when bring down the SLI Layer.
756  *
757  * Return codes
758  *   0 - sucess.
759  *   Any other value - error.
760  **/
761 int
762 lpfc_hba_down_post(struct lpfc_hba *phba)
763 {
764 	return (*phba->lpfc_hba_down_post)(phba);
765 }
766 
767 /**
768  * lpfc_hb_timeout - The HBA-timer timeout handler
769  * @ptr: unsigned long holds the pointer to lpfc hba data structure.
770  *
771  * This is the HBA-timer timeout handler registered to the lpfc driver. When
772  * this timer fires, a HBA timeout event shall be posted to the lpfc driver
773  * work-port-events bitmap and the worker thread is notified. This timeout
774  * event will be used by the worker thread to invoke the actual timeout
775  * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
776  * be performed in the timeout handler and the HBA timeout event bit shall
777  * be cleared by the worker thread after it has taken the event bitmap out.
778  **/
779 static void
780 lpfc_hb_timeout(unsigned long ptr)
781 {
782 	struct lpfc_hba *phba;
783 	uint32_t tmo_posted;
784 	unsigned long iflag;
785 
786 	phba = (struct lpfc_hba *)ptr;
787 
788 	/* Check for heart beat timeout conditions */
789 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
790 	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
791 	if (!tmo_posted)
792 		phba->pport->work_port_events |= WORKER_HB_TMO;
793 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
794 
795 	/* Tell the worker thread there is work to do */
796 	if (!tmo_posted)
797 		lpfc_worker_wake_up(phba);
798 	return;
799 }
800 
801 /**
802  * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
803  * @phba: pointer to lpfc hba data structure.
804  * @pmboxq: pointer to the driver internal queue element for mailbox command.
805  *
806  * This is the callback function to the lpfc heart-beat mailbox command.
807  * If configured, the lpfc driver issues the heart-beat mailbox command to
808  * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
809  * heart-beat mailbox command is issued, the driver shall set up heart-beat
810  * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
811  * heart-beat outstanding state. Once the mailbox command comes back and
812  * no error conditions detected, the heart-beat mailbox command timer is
813  * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
814  * state is cleared for the next heart-beat. If the timer expired with the
815  * heart-beat outstanding state set, the driver will put the HBA offline.
816  **/
817 static void
818 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
819 {
820 	unsigned long drvr_flag;
821 
822 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
823 	phba->hb_outstanding = 0;
824 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
825 
826 	/* Check and reset heart-beat timer is necessary */
827 	mempool_free(pmboxq, phba->mbox_mem_pool);
828 	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
829 		!(phba->link_state == LPFC_HBA_ERROR) &&
830 		!(phba->pport->load_flag & FC_UNLOADING))
831 		mod_timer(&phba->hb_tmofunc,
832 			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
833 	return;
834 }
835 
836 /**
837  * lpfc_hb_timeout_handler - The HBA-timer timeout handler
838  * @phba: pointer to lpfc hba data structure.
839  *
840  * This is the actual HBA-timer timeout handler to be invoked by the worker
841  * thread whenever the HBA timer fired and HBA-timeout event posted. This
842  * handler performs any periodic operations needed for the device. If such
843  * periodic event has already been attended to either in the interrupt handler
844  * or by processing slow-ring or fast-ring events within the HBA-timer
845  * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
846  * the timer for the next timeout period. If lpfc heart-beat mailbox command
847  * is configured and there is no heart-beat mailbox command outstanding, a
848  * heart-beat mailbox is issued and timer set properly. Otherwise, if there
849  * has been a heart-beat mailbox command outstanding, the HBA shall be put
850  * to offline.
851  **/
852 void
853 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
854 {
855 	LPFC_MBOXQ_t *pmboxq;
856 	struct lpfc_dmabuf *buf_ptr;
857 	int retval;
858 	struct lpfc_sli *psli = &phba->sli;
859 	LIST_HEAD(completions);
860 
861 	if ((phba->link_state == LPFC_HBA_ERROR) ||
862 		(phba->pport->load_flag & FC_UNLOADING) ||
863 		(phba->pport->fc_flag & FC_OFFLINE_MODE))
864 		return;
865 
866 	spin_lock_irq(&phba->pport->work_port_lock);
867 
868 	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
869 		jiffies)) {
870 		spin_unlock_irq(&phba->pport->work_port_lock);
871 		if (!phba->hb_outstanding)
872 			mod_timer(&phba->hb_tmofunc,
873 				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
874 		else
875 			mod_timer(&phba->hb_tmofunc,
876 				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
877 		return;
878 	}
879 	spin_unlock_irq(&phba->pport->work_port_lock);
880 
881 	if (phba->elsbuf_cnt &&
882 		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
883 		spin_lock_irq(&phba->hbalock);
884 		list_splice_init(&phba->elsbuf, &completions);
885 		phba->elsbuf_cnt = 0;
886 		phba->elsbuf_prev_cnt = 0;
887 		spin_unlock_irq(&phba->hbalock);
888 
889 		while (!list_empty(&completions)) {
890 			list_remove_head(&completions, buf_ptr,
891 				struct lpfc_dmabuf, list);
892 			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
893 			kfree(buf_ptr);
894 		}
895 	}
896 	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
897 
898 	/* If there is no heart beat outstanding, issue a heartbeat command */
899 	if (phba->cfg_enable_hba_heartbeat) {
900 		if (!phba->hb_outstanding) {
901 			pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
902 			if (!pmboxq) {
903 				mod_timer(&phba->hb_tmofunc,
904 					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
905 				return;
906 			}
907 
908 			lpfc_heart_beat(phba, pmboxq);
909 			pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
910 			pmboxq->vport = phba->pport;
911 			retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
912 
913 			if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
914 				mempool_free(pmboxq, phba->mbox_mem_pool);
915 				mod_timer(&phba->hb_tmofunc,
916 					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
917 				return;
918 			}
919 			mod_timer(&phba->hb_tmofunc,
920 				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
921 			phba->hb_outstanding = 1;
922 			return;
923 		} else {
924 			/*
925 			* If heart beat timeout called with hb_outstanding set
926 			* we need to take the HBA offline.
927 			*/
928 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
929 					"0459 Adapter heartbeat failure, "
930 					"taking this port offline.\n");
931 
932 			spin_lock_irq(&phba->hbalock);
933 			psli->sli_flag &= ~LPFC_SLI_ACTIVE;
934 			spin_unlock_irq(&phba->hbalock);
935 
936 			lpfc_offline_prep(phba);
937 			lpfc_offline(phba);
938 			lpfc_unblock_mgmt_io(phba);
939 			phba->link_state = LPFC_HBA_ERROR;
940 			lpfc_hba_down_post(phba);
941 		}
942 	}
943 }
944 
945 /**
946  * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
947  * @phba: pointer to lpfc hba data structure.
948  *
949  * This routine is called to bring the HBA offline when HBA hardware error
950  * other than Port Error 6 has been detected.
951  **/
952 static void
953 lpfc_offline_eratt(struct lpfc_hba *phba)
954 {
955 	struct lpfc_sli   *psli = &phba->sli;
956 
957 	spin_lock_irq(&phba->hbalock);
958 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
959 	spin_unlock_irq(&phba->hbalock);
960 	lpfc_offline_prep(phba);
961 
962 	lpfc_offline(phba);
963 	lpfc_reset_barrier(phba);
964 	spin_lock_irq(&phba->hbalock);
965 	lpfc_sli_brdreset(phba);
966 	spin_unlock_irq(&phba->hbalock);
967 	lpfc_hba_down_post(phba);
968 	lpfc_sli_brdready(phba, HS_MBRDY);
969 	lpfc_unblock_mgmt_io(phba);
970 	phba->link_state = LPFC_HBA_ERROR;
971 	return;
972 }
973 
974 /**
975  * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
976  * @phba: pointer to lpfc hba data structure.
977  *
978  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
979  * other than Port Error 6 has been detected.
980  **/
981 static void
982 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
983 {
984 	lpfc_offline_prep(phba);
985 	lpfc_offline(phba);
986 	lpfc_sli4_brdreset(phba);
987 	lpfc_hba_down_post(phba);
988 	lpfc_sli4_post_status_check(phba);
989 	lpfc_unblock_mgmt_io(phba);
990 	phba->link_state = LPFC_HBA_ERROR;
991 }
992 
993 /**
994  * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
995  * @phba: pointer to lpfc hba data structure.
996  *
997  * This routine is invoked to handle the deferred HBA hardware error
998  * conditions. This type of error is indicated by HBA by setting ER1
999  * and another ER bit in the host status register. The driver will
1000  * wait until the ER1 bit clears before handling the error condition.
1001  **/
1002 static void
1003 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1004 {
1005 	uint32_t old_host_status = phba->work_hs;
1006 	struct lpfc_sli_ring  *pring;
1007 	struct lpfc_sli *psli = &phba->sli;
1008 
1009 	/* If the pci channel is offline, ignore possible errors,
1010 	 * since we cannot communicate with the pci card anyway.
1011 	 */
1012 	if (pci_channel_offline(phba->pcidev)) {
1013 		spin_lock_irq(&phba->hbalock);
1014 		phba->hba_flag &= ~DEFER_ERATT;
1015 		spin_unlock_irq(&phba->hbalock);
1016 		return;
1017 	}
1018 
1019 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1020 		"0479 Deferred Adapter Hardware Error "
1021 		"Data: x%x x%x x%x\n",
1022 		phba->work_hs,
1023 		phba->work_status[0], phba->work_status[1]);
1024 
1025 	spin_lock_irq(&phba->hbalock);
1026 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1027 	spin_unlock_irq(&phba->hbalock);
1028 
1029 
1030 	/*
1031 	 * Firmware stops when it triggred erratt. That could cause the I/Os
1032 	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1033 	 * SCSI layer retry it after re-establishing link.
1034 	 */
1035 	pring = &psli->ring[psli->fcp_ring];
1036 	lpfc_sli_abort_iocb_ring(phba, pring);
1037 
1038 	/*
1039 	 * There was a firmware error. Take the hba offline and then
1040 	 * attempt to restart it.
1041 	 */
1042 	lpfc_offline_prep(phba);
1043 	lpfc_offline(phba);
1044 
1045 	/* Wait for the ER1 bit to clear.*/
1046 	while (phba->work_hs & HS_FFER1) {
1047 		msleep(100);
1048 		phba->work_hs = readl(phba->HSregaddr);
1049 		/* If driver is unloading let the worker thread continue */
1050 		if (phba->pport->load_flag & FC_UNLOADING) {
1051 			phba->work_hs = 0;
1052 			break;
1053 		}
1054 	}
1055 
1056 	/*
1057 	 * This is to ptrotect against a race condition in which
1058 	 * first write to the host attention register clear the
1059 	 * host status register.
1060 	 */
1061 	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1062 		phba->work_hs = old_host_status & ~HS_FFER1;
1063 
1064 	spin_lock_irq(&phba->hbalock);
1065 	phba->hba_flag &= ~DEFER_ERATT;
1066 	spin_unlock_irq(&phba->hbalock);
1067 	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1068 	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1069 }
1070 
1071 static void
1072 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1073 {
1074 	struct lpfc_board_event_header board_event;
1075 	struct Scsi_Host *shost;
1076 
1077 	board_event.event_type = FC_REG_BOARD_EVENT;
1078 	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1079 	shost = lpfc_shost_from_vport(phba->pport);
1080 	fc_host_post_vendor_event(shost, fc_get_event_number(),
1081 				  sizeof(board_event),
1082 				  (char *) &board_event,
1083 				  LPFC_NL_VENDOR_ID);
1084 }
1085 
1086 /**
1087  * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1088  * @phba: pointer to lpfc hba data structure.
1089  *
1090  * This routine is invoked to handle the following HBA hardware error
1091  * conditions:
1092  * 1 - HBA error attention interrupt
1093  * 2 - DMA ring index out of range
1094  * 3 - Mailbox command came back as unknown
1095  **/
1096 static void
1097 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1098 {
1099 	struct lpfc_vport *vport = phba->pport;
1100 	struct lpfc_sli   *psli = &phba->sli;
1101 	struct lpfc_sli_ring  *pring;
1102 	uint32_t event_data;
1103 	unsigned long temperature;
1104 	struct temp_event temp_event_data;
1105 	struct Scsi_Host  *shost;
1106 
1107 	/* If the pci channel is offline, ignore possible errors,
1108 	 * since we cannot communicate with the pci card anyway.
1109 	 */
1110 	if (pci_channel_offline(phba->pcidev)) {
1111 		spin_lock_irq(&phba->hbalock);
1112 		phba->hba_flag &= ~DEFER_ERATT;
1113 		spin_unlock_irq(&phba->hbalock);
1114 		return;
1115 	}
1116 
1117 	/* If resets are disabled then leave the HBA alone and return */
1118 	if (!phba->cfg_enable_hba_reset)
1119 		return;
1120 
1121 	/* Send an internal error event to mgmt application */
1122 	lpfc_board_errevt_to_mgmt(phba);
1123 
1124 	if (phba->hba_flag & DEFER_ERATT)
1125 		lpfc_handle_deferred_eratt(phba);
1126 
1127 	if (phba->work_hs & HS_FFER6) {
1128 		/* Re-establishing Link */
1129 		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1130 				"1301 Re-establishing Link "
1131 				"Data: x%x x%x x%x\n",
1132 				phba->work_hs,
1133 				phba->work_status[0], phba->work_status[1]);
1134 
1135 		spin_lock_irq(&phba->hbalock);
1136 		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1137 		spin_unlock_irq(&phba->hbalock);
1138 
1139 		/*
1140 		* Firmware stops when it triggled erratt with HS_FFER6.
1141 		* That could cause the I/Os dropped by the firmware.
1142 		* Error iocb (I/O) on txcmplq and let the SCSI layer
1143 		* retry it after re-establishing link.
1144 		*/
1145 		pring = &psli->ring[psli->fcp_ring];
1146 		lpfc_sli_abort_iocb_ring(phba, pring);
1147 
1148 		/*
1149 		 * There was a firmware error.  Take the hba offline and then
1150 		 * attempt to restart it.
1151 		 */
1152 		lpfc_offline_prep(phba);
1153 		lpfc_offline(phba);
1154 		lpfc_sli_brdrestart(phba);
1155 		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1156 			lpfc_unblock_mgmt_io(phba);
1157 			return;
1158 		}
1159 		lpfc_unblock_mgmt_io(phba);
1160 	} else if (phba->work_hs & HS_CRIT_TEMP) {
1161 		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1162 		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1163 		temp_event_data.event_code = LPFC_CRIT_TEMP;
1164 		temp_event_data.data = (uint32_t)temperature;
1165 
1166 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1167 				"0406 Adapter maximum temperature exceeded "
1168 				"(%ld), taking this port offline "
1169 				"Data: x%x x%x x%x\n",
1170 				temperature, phba->work_hs,
1171 				phba->work_status[0], phba->work_status[1]);
1172 
1173 		shost = lpfc_shost_from_vport(phba->pport);
1174 		fc_host_post_vendor_event(shost, fc_get_event_number(),
1175 					  sizeof(temp_event_data),
1176 					  (char *) &temp_event_data,
1177 					  SCSI_NL_VID_TYPE_PCI
1178 					  | PCI_VENDOR_ID_EMULEX);
1179 
1180 		spin_lock_irq(&phba->hbalock);
1181 		phba->over_temp_state = HBA_OVER_TEMP;
1182 		spin_unlock_irq(&phba->hbalock);
1183 		lpfc_offline_eratt(phba);
1184 
1185 	} else {
1186 		/* The if clause above forces this code path when the status
1187 		 * failure is a value other than FFER6. Do not call the offline
1188 		 * twice. This is the adapter hardware error path.
1189 		 */
1190 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1191 				"0457 Adapter Hardware Error "
1192 				"Data: x%x x%x x%x\n",
1193 				phba->work_hs,
1194 				phba->work_status[0], phba->work_status[1]);
1195 
1196 		event_data = FC_REG_DUMP_EVENT;
1197 		shost = lpfc_shost_from_vport(vport);
1198 		fc_host_post_vendor_event(shost, fc_get_event_number(),
1199 				sizeof(event_data), (char *) &event_data,
1200 				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1201 
1202 		lpfc_offline_eratt(phba);
1203 	}
1204 	return;
1205 }
1206 
1207 /**
1208  * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1209  * @phba: pointer to lpfc hba data structure.
1210  *
1211  * This routine is invoked to handle the SLI4 HBA hardware error attention
1212  * conditions.
1213  **/
1214 static void
1215 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1216 {
1217 	struct lpfc_vport *vport = phba->pport;
1218 	uint32_t event_data;
1219 	struct Scsi_Host *shost;
1220 
1221 	/* If the pci channel is offline, ignore possible errors, since
1222 	 * we cannot communicate with the pci card anyway.
1223 	 */
1224 	if (pci_channel_offline(phba->pcidev))
1225 		return;
1226 	/* If resets are disabled then leave the HBA alone and return */
1227 	if (!phba->cfg_enable_hba_reset)
1228 		return;
1229 
1230 	/* Send an internal error event to mgmt application */
1231 	lpfc_board_errevt_to_mgmt(phba);
1232 
1233 	/* For now, the actual action for SLI4 device handling is not
1234 	 * specified yet, just treated it as adaptor hardware failure
1235 	 */
1236 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1237 			"0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1238 			phba->work_status[0], phba->work_status[1]);
1239 
1240 	event_data = FC_REG_DUMP_EVENT;
1241 	shost = lpfc_shost_from_vport(vport);
1242 	fc_host_post_vendor_event(shost, fc_get_event_number(),
1243 				  sizeof(event_data), (char *) &event_data,
1244 				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1245 
1246 	lpfc_sli4_offline_eratt(phba);
1247 }
1248 
1249 /**
1250  * lpfc_handle_eratt - Wrapper func for handling hba error attention
1251  * @phba: pointer to lpfc HBA data structure.
1252  *
1253  * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1254  * routine from the API jump table function pointer from the lpfc_hba struct.
1255  *
1256  * Return codes
1257  *   0 - sucess.
1258  *   Any other value - error.
1259  **/
1260 void
1261 lpfc_handle_eratt(struct lpfc_hba *phba)
1262 {
1263 	(*phba->lpfc_handle_eratt)(phba);
1264 }
1265 
1266 /**
1267  * lpfc_handle_latt - The HBA link event handler
1268  * @phba: pointer to lpfc hba data structure.
1269  *
1270  * This routine is invoked from the worker thread to handle a HBA host
1271  * attention link event.
1272  **/
1273 void
1274 lpfc_handle_latt(struct lpfc_hba *phba)
1275 {
1276 	struct lpfc_vport *vport = phba->pport;
1277 	struct lpfc_sli   *psli = &phba->sli;
1278 	LPFC_MBOXQ_t *pmb;
1279 	volatile uint32_t control;
1280 	struct lpfc_dmabuf *mp;
1281 	int rc = 0;
1282 
1283 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1284 	if (!pmb) {
1285 		rc = 1;
1286 		goto lpfc_handle_latt_err_exit;
1287 	}
1288 
1289 	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1290 	if (!mp) {
1291 		rc = 2;
1292 		goto lpfc_handle_latt_free_pmb;
1293 	}
1294 
1295 	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1296 	if (!mp->virt) {
1297 		rc = 3;
1298 		goto lpfc_handle_latt_free_mp;
1299 	}
1300 
1301 	/* Cleanup any outstanding ELS commands */
1302 	lpfc_els_flush_all_cmd(phba);
1303 
1304 	psli->slistat.link_event++;
1305 	lpfc_read_la(phba, pmb, mp);
1306 	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
1307 	pmb->vport = vport;
1308 	/* Block ELS IOCBs until we have processed this mbox command */
1309 	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1310 	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1311 	if (rc == MBX_NOT_FINISHED) {
1312 		rc = 4;
1313 		goto lpfc_handle_latt_free_mbuf;
1314 	}
1315 
1316 	/* Clear Link Attention in HA REG */
1317 	spin_lock_irq(&phba->hbalock);
1318 	writel(HA_LATT, phba->HAregaddr);
1319 	readl(phba->HAregaddr); /* flush */
1320 	spin_unlock_irq(&phba->hbalock);
1321 
1322 	return;
1323 
1324 lpfc_handle_latt_free_mbuf:
1325 	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1326 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1327 lpfc_handle_latt_free_mp:
1328 	kfree(mp);
1329 lpfc_handle_latt_free_pmb:
1330 	mempool_free(pmb, phba->mbox_mem_pool);
1331 lpfc_handle_latt_err_exit:
1332 	/* Enable Link attention interrupts */
1333 	spin_lock_irq(&phba->hbalock);
1334 	psli->sli_flag |= LPFC_PROCESS_LA;
1335 	control = readl(phba->HCregaddr);
1336 	control |= HC_LAINT_ENA;
1337 	writel(control, phba->HCregaddr);
1338 	readl(phba->HCregaddr); /* flush */
1339 
1340 	/* Clear Link Attention in HA REG */
1341 	writel(HA_LATT, phba->HAregaddr);
1342 	readl(phba->HAregaddr); /* flush */
1343 	spin_unlock_irq(&phba->hbalock);
1344 	lpfc_linkdown(phba);
1345 	phba->link_state = LPFC_HBA_ERROR;
1346 
1347 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1348 		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1349 
1350 	return;
1351 }
1352 
1353 /**
1354  * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1355  * @phba: pointer to lpfc hba data structure.
1356  * @vpd: pointer to the vital product data.
1357  * @len: length of the vital product data in bytes.
1358  *
1359  * This routine parses the Vital Product Data (VPD). The VPD is treated as
1360  * an array of characters. In this routine, the ModelName, ProgramType, and
1361  * ModelDesc, etc. fields of the phba data structure will be populated.
1362  *
1363  * Return codes
1364  *   0 - pointer to the VPD passed in is NULL
1365  *   1 - success
1366  **/
1367 int
1368 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1369 {
1370 	uint8_t lenlo, lenhi;
1371 	int Length;
1372 	int i, j;
1373 	int finished = 0;
1374 	int index = 0;
1375 
1376 	if (!vpd)
1377 		return 0;
1378 
1379 	/* Vital Product */
1380 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1381 			"0455 Vital Product Data: x%x x%x x%x x%x\n",
1382 			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1383 			(uint32_t) vpd[3]);
1384 	while (!finished && (index < (len - 4))) {
1385 		switch (vpd[index]) {
1386 		case 0x82:
1387 		case 0x91:
1388 			index += 1;
1389 			lenlo = vpd[index];
1390 			index += 1;
1391 			lenhi = vpd[index];
1392 			index += 1;
1393 			i = ((((unsigned short)lenhi) << 8) + lenlo);
1394 			index += i;
1395 			break;
1396 		case 0x90:
1397 			index += 1;
1398 			lenlo = vpd[index];
1399 			index += 1;
1400 			lenhi = vpd[index];
1401 			index += 1;
1402 			Length = ((((unsigned short)lenhi) << 8) + lenlo);
1403 			if (Length > len - index)
1404 				Length = len - index;
1405 			while (Length > 0) {
1406 			/* Look for Serial Number */
1407 			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1408 				index += 2;
1409 				i = vpd[index];
1410 				index += 1;
1411 				j = 0;
1412 				Length -= (3+i);
1413 				while(i--) {
1414 					phba->SerialNumber[j++] = vpd[index++];
1415 					if (j == 31)
1416 						break;
1417 				}
1418 				phba->SerialNumber[j] = 0;
1419 				continue;
1420 			}
1421 			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1422 				phba->vpd_flag |= VPD_MODEL_DESC;
1423 				index += 2;
1424 				i = vpd[index];
1425 				index += 1;
1426 				j = 0;
1427 				Length -= (3+i);
1428 				while(i--) {
1429 					phba->ModelDesc[j++] = vpd[index++];
1430 					if (j == 255)
1431 						break;
1432 				}
1433 				phba->ModelDesc[j] = 0;
1434 				continue;
1435 			}
1436 			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1437 				phba->vpd_flag |= VPD_MODEL_NAME;
1438 				index += 2;
1439 				i = vpd[index];
1440 				index += 1;
1441 				j = 0;
1442 				Length -= (3+i);
1443 				while(i--) {
1444 					phba->ModelName[j++] = vpd[index++];
1445 					if (j == 79)
1446 						break;
1447 				}
1448 				phba->ModelName[j] = 0;
1449 				continue;
1450 			}
1451 			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1452 				phba->vpd_flag |= VPD_PROGRAM_TYPE;
1453 				index += 2;
1454 				i = vpd[index];
1455 				index += 1;
1456 				j = 0;
1457 				Length -= (3+i);
1458 				while(i--) {
1459 					phba->ProgramType[j++] = vpd[index++];
1460 					if (j == 255)
1461 						break;
1462 				}
1463 				phba->ProgramType[j] = 0;
1464 				continue;
1465 			}
1466 			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1467 				phba->vpd_flag |= VPD_PORT;
1468 				index += 2;
1469 				i = vpd[index];
1470 				index += 1;
1471 				j = 0;
1472 				Length -= (3+i);
1473 				while(i--) {
1474 				phba->Port[j++] = vpd[index++];
1475 				if (j == 19)
1476 					break;
1477 				}
1478 				phba->Port[j] = 0;
1479 				continue;
1480 			}
1481 			else {
1482 				index += 2;
1483 				i = vpd[index];
1484 				index += 1;
1485 				index += i;
1486 				Length -= (3 + i);
1487 			}
1488 		}
1489 		finished = 0;
1490 		break;
1491 		case 0x78:
1492 			finished = 1;
1493 			break;
1494 		default:
1495 			index ++;
1496 			break;
1497 		}
1498 	}
1499 
1500 	return(1);
1501 }
1502 
1503 /**
1504  * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1505  * @phba: pointer to lpfc hba data structure.
1506  * @mdp: pointer to the data structure to hold the derived model name.
1507  * @descp: pointer to the data structure to hold the derived description.
1508  *
1509  * This routine retrieves HBA's description based on its registered PCI device
1510  * ID. The @descp passed into this function points to an array of 256 chars. It
1511  * shall be returned with the model name, maximum speed, and the host bus type.
1512  * The @mdp passed into this function points to an array of 80 chars. When the
1513  * function returns, the @mdp will be filled with the model name.
1514  **/
1515 static void
1516 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1517 {
1518 	lpfc_vpd_t *vp;
1519 	uint16_t dev_id = phba->pcidev->device;
1520 	int max_speed;
1521 	int GE = 0;
1522 	int oneConnect = 0; /* default is not a oneConnect */
1523 	struct {
1524 		char * name;
1525 		int    max_speed;
1526 		char * bus;
1527 	} m = {"<Unknown>", 0, ""};
1528 
1529 	if (mdp && mdp[0] != '\0'
1530 		&& descp && descp[0] != '\0')
1531 		return;
1532 
1533 	if (phba->lmt & LMT_10Gb)
1534 		max_speed = 10;
1535 	else if (phba->lmt & LMT_8Gb)
1536 		max_speed = 8;
1537 	else if (phba->lmt & LMT_4Gb)
1538 		max_speed = 4;
1539 	else if (phba->lmt & LMT_2Gb)
1540 		max_speed = 2;
1541 	else
1542 		max_speed = 1;
1543 
1544 	vp = &phba->vpd;
1545 
1546 	switch (dev_id) {
1547 	case PCI_DEVICE_ID_FIREFLY:
1548 		m = (typeof(m)){"LP6000", max_speed, "PCI"};
1549 		break;
1550 	case PCI_DEVICE_ID_SUPERFLY:
1551 		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1552 			m = (typeof(m)){"LP7000", max_speed,  "PCI"};
1553 		else
1554 			m = (typeof(m)){"LP7000E", max_speed, "PCI"};
1555 		break;
1556 	case PCI_DEVICE_ID_DRAGONFLY:
1557 		m = (typeof(m)){"LP8000", max_speed, "PCI"};
1558 		break;
1559 	case PCI_DEVICE_ID_CENTAUR:
1560 		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1561 			m = (typeof(m)){"LP9002", max_speed, "PCI"};
1562 		else
1563 			m = (typeof(m)){"LP9000", max_speed, "PCI"};
1564 		break;
1565 	case PCI_DEVICE_ID_RFLY:
1566 		m = (typeof(m)){"LP952", max_speed, "PCI"};
1567 		break;
1568 	case PCI_DEVICE_ID_PEGASUS:
1569 		m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
1570 		break;
1571 	case PCI_DEVICE_ID_THOR:
1572 		m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
1573 		break;
1574 	case PCI_DEVICE_ID_VIPER:
1575 		m = (typeof(m)){"LPX1000", max_speed,  "PCI-X"};
1576 		break;
1577 	case PCI_DEVICE_ID_PFLY:
1578 		m = (typeof(m)){"LP982", max_speed, "PCI-X"};
1579 		break;
1580 	case PCI_DEVICE_ID_TFLY:
1581 		m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
1582 		break;
1583 	case PCI_DEVICE_ID_HELIOS:
1584 		m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
1585 		break;
1586 	case PCI_DEVICE_ID_HELIOS_SCSP:
1587 		m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
1588 		break;
1589 	case PCI_DEVICE_ID_HELIOS_DCSP:
1590 		m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
1591 		break;
1592 	case PCI_DEVICE_ID_NEPTUNE:
1593 		m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
1594 		break;
1595 	case PCI_DEVICE_ID_NEPTUNE_SCSP:
1596 		m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
1597 		break;
1598 	case PCI_DEVICE_ID_NEPTUNE_DCSP:
1599 		m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
1600 		break;
1601 	case PCI_DEVICE_ID_BMID:
1602 		m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
1603 		break;
1604 	case PCI_DEVICE_ID_BSMB:
1605 		m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
1606 		break;
1607 	case PCI_DEVICE_ID_ZEPHYR:
1608 		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1609 		break;
1610 	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1611 		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1612 		break;
1613 	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1614 		m = (typeof(m)){"LP2105", max_speed, "PCIe"};
1615 		GE = 1;
1616 		break;
1617 	case PCI_DEVICE_ID_ZMID:
1618 		m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
1619 		break;
1620 	case PCI_DEVICE_ID_ZSMB:
1621 		m = (typeof(m)){"LPe111", max_speed, "PCIe"};
1622 		break;
1623 	case PCI_DEVICE_ID_LP101:
1624 		m = (typeof(m)){"LP101", max_speed, "PCI-X"};
1625 		break;
1626 	case PCI_DEVICE_ID_LP10000S:
1627 		m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
1628 		break;
1629 	case PCI_DEVICE_ID_LP11000S:
1630 		m = (typeof(m)){"LP11000-S", max_speed,
1631 			"PCI-X2"};
1632 		break;
1633 	case PCI_DEVICE_ID_LPE11000S:
1634 		m = (typeof(m)){"LPe11000-S", max_speed,
1635 			"PCIe"};
1636 		break;
1637 	case PCI_DEVICE_ID_SAT:
1638 		m = (typeof(m)){"LPe12000", max_speed, "PCIe"};
1639 		break;
1640 	case PCI_DEVICE_ID_SAT_MID:
1641 		m = (typeof(m)){"LPe1250", max_speed, "PCIe"};
1642 		break;
1643 	case PCI_DEVICE_ID_SAT_SMB:
1644 		m = (typeof(m)){"LPe121", max_speed, "PCIe"};
1645 		break;
1646 	case PCI_DEVICE_ID_SAT_DCSP:
1647 		m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"};
1648 		break;
1649 	case PCI_DEVICE_ID_SAT_SCSP:
1650 		m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"};
1651 		break;
1652 	case PCI_DEVICE_ID_SAT_S:
1653 		m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
1654 		break;
1655 	case PCI_DEVICE_ID_HORNET:
1656 		m = (typeof(m)){"LP21000", max_speed, "PCIe"};
1657 		GE = 1;
1658 		break;
1659 	case PCI_DEVICE_ID_PROTEUS_VF:
1660 		m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1661 		break;
1662 	case PCI_DEVICE_ID_PROTEUS_PF:
1663 		m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1664 		break;
1665 	case PCI_DEVICE_ID_PROTEUS_S:
1666 		m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1667 		break;
1668 	case PCI_DEVICE_ID_TIGERSHARK:
1669 		oneConnect = 1;
1670 		m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
1671 		break;
1672 	default:
1673 		m = (typeof(m)){ NULL };
1674 		break;
1675 	}
1676 
1677 	if (mdp && mdp[0] == '\0')
1678 		snprintf(mdp, 79,"%s", m.name);
1679 	/* oneConnect hba requires special processing, they are all initiators
1680 	 * and we put the port number on the end
1681 	 */
1682 	if (descp && descp[0] == '\0') {
1683 		if (oneConnect)
1684 			snprintf(descp, 255,
1685 				"Emulex OneConnect %s, FCoE Initiator, Port %s",
1686 				m.name,
1687 				phba->Port);
1688 		else
1689 			snprintf(descp, 255,
1690 				"Emulex %s %d%s %s %s",
1691 				m.name, m.max_speed,
1692 				(GE) ? "GE" : "Gb",
1693 				m.bus,
1694 				(GE) ? "FCoE Adapter" :
1695 					"Fibre Channel Adapter");
1696 	}
1697 }
1698 
1699 /**
1700  * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1701  * @phba: pointer to lpfc hba data structure.
1702  * @pring: pointer to a IOCB ring.
1703  * @cnt: the number of IOCBs to be posted to the IOCB ring.
1704  *
1705  * This routine posts a given number of IOCBs with the associated DMA buffer
1706  * descriptors specified by the cnt argument to the given IOCB ring.
1707  *
1708  * Return codes
1709  *   The number of IOCBs NOT able to be posted to the IOCB ring.
1710  **/
1711 int
1712 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1713 {
1714 	IOCB_t *icmd;
1715 	struct lpfc_iocbq *iocb;
1716 	struct lpfc_dmabuf *mp1, *mp2;
1717 
1718 	cnt += pring->missbufcnt;
1719 
1720 	/* While there are buffers to post */
1721 	while (cnt > 0) {
1722 		/* Allocate buffer for  command iocb */
1723 		iocb = lpfc_sli_get_iocbq(phba);
1724 		if (iocb == NULL) {
1725 			pring->missbufcnt = cnt;
1726 			return cnt;
1727 		}
1728 		icmd = &iocb->iocb;
1729 
1730 		/* 2 buffers can be posted per command */
1731 		/* Allocate buffer to post */
1732 		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1733 		if (mp1)
1734 		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1735 		if (!mp1 || !mp1->virt) {
1736 			kfree(mp1);
1737 			lpfc_sli_release_iocbq(phba, iocb);
1738 			pring->missbufcnt = cnt;
1739 			return cnt;
1740 		}
1741 
1742 		INIT_LIST_HEAD(&mp1->list);
1743 		/* Allocate buffer to post */
1744 		if (cnt > 1) {
1745 			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1746 			if (mp2)
1747 				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1748 							    &mp2->phys);
1749 			if (!mp2 || !mp2->virt) {
1750 				kfree(mp2);
1751 				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1752 				kfree(mp1);
1753 				lpfc_sli_release_iocbq(phba, iocb);
1754 				pring->missbufcnt = cnt;
1755 				return cnt;
1756 			}
1757 
1758 			INIT_LIST_HEAD(&mp2->list);
1759 		} else {
1760 			mp2 = NULL;
1761 		}
1762 
1763 		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1764 		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1765 		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1766 		icmd->ulpBdeCount = 1;
1767 		cnt--;
1768 		if (mp2) {
1769 			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1770 			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1771 			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1772 			cnt--;
1773 			icmd->ulpBdeCount = 2;
1774 		}
1775 
1776 		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1777 		icmd->ulpLe = 1;
1778 
1779 		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1780 		    IOCB_ERROR) {
1781 			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1782 			kfree(mp1);
1783 			cnt++;
1784 			if (mp2) {
1785 				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1786 				kfree(mp2);
1787 				cnt++;
1788 			}
1789 			lpfc_sli_release_iocbq(phba, iocb);
1790 			pring->missbufcnt = cnt;
1791 			return cnt;
1792 		}
1793 		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1794 		if (mp2)
1795 			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1796 	}
1797 	pring->missbufcnt = 0;
1798 	return 0;
1799 }
1800 
1801 /**
1802  * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
1803  * @phba: pointer to lpfc hba data structure.
1804  *
1805  * This routine posts initial receive IOCB buffers to the ELS ring. The
1806  * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1807  * set to 64 IOCBs.
1808  *
1809  * Return codes
1810  *   0 - success (currently always success)
1811  **/
1812 static int
1813 lpfc_post_rcv_buf(struct lpfc_hba *phba)
1814 {
1815 	struct lpfc_sli *psli = &phba->sli;
1816 
1817 	/* Ring 0, ELS / CT buffers */
1818 	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1819 	/* Ring 2 - FCP no buffers needed */
1820 
1821 	return 0;
1822 }
1823 
1824 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1825 
1826 /**
1827  * lpfc_sha_init - Set up initial array of hash table entries
1828  * @HashResultPointer: pointer to an array as hash table.
1829  *
1830  * This routine sets up the initial values to the array of hash table entries
1831  * for the LC HBAs.
1832  **/
1833 static void
1834 lpfc_sha_init(uint32_t * HashResultPointer)
1835 {
1836 	HashResultPointer[0] = 0x67452301;
1837 	HashResultPointer[1] = 0xEFCDAB89;
1838 	HashResultPointer[2] = 0x98BADCFE;
1839 	HashResultPointer[3] = 0x10325476;
1840 	HashResultPointer[4] = 0xC3D2E1F0;
1841 }
1842 
1843 /**
1844  * lpfc_sha_iterate - Iterate initial hash table with the working hash table
1845  * @HashResultPointer: pointer to an initial/result hash table.
1846  * @HashWorkingPointer: pointer to an working hash table.
1847  *
1848  * This routine iterates an initial hash table pointed by @HashResultPointer
1849  * with the values from the working hash table pointeed by @HashWorkingPointer.
1850  * The results are putting back to the initial hash table, returned through
1851  * the @HashResultPointer as the result hash table.
1852  **/
1853 static void
1854 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
1855 {
1856 	int t;
1857 	uint32_t TEMP;
1858 	uint32_t A, B, C, D, E;
1859 	t = 16;
1860 	do {
1861 		HashWorkingPointer[t] =
1862 		    S(1,
1863 		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
1864 								     8] ^
1865 		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
1866 	} while (++t <= 79);
1867 	t = 0;
1868 	A = HashResultPointer[0];
1869 	B = HashResultPointer[1];
1870 	C = HashResultPointer[2];
1871 	D = HashResultPointer[3];
1872 	E = HashResultPointer[4];
1873 
1874 	do {
1875 		if (t < 20) {
1876 			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
1877 		} else if (t < 40) {
1878 			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
1879 		} else if (t < 60) {
1880 			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
1881 		} else {
1882 			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
1883 		}
1884 		TEMP += S(5, A) + E + HashWorkingPointer[t];
1885 		E = D;
1886 		D = C;
1887 		C = S(30, B);
1888 		B = A;
1889 		A = TEMP;
1890 	} while (++t <= 79);
1891 
1892 	HashResultPointer[0] += A;
1893 	HashResultPointer[1] += B;
1894 	HashResultPointer[2] += C;
1895 	HashResultPointer[3] += D;
1896 	HashResultPointer[4] += E;
1897 
1898 }
1899 
1900 /**
1901  * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
1902  * @RandomChallenge: pointer to the entry of host challenge random number array.
1903  * @HashWorking: pointer to the entry of the working hash array.
1904  *
1905  * This routine calculates the working hash array referred by @HashWorking
1906  * from the challenge random numbers associated with the host, referred by
1907  * @RandomChallenge. The result is put into the entry of the working hash
1908  * array and returned by reference through @HashWorking.
1909  **/
1910 static void
1911 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
1912 {
1913 	*HashWorking = (*RandomChallenge ^ *HashWorking);
1914 }
1915 
1916 /**
1917  * lpfc_hba_init - Perform special handling for LC HBA initialization
1918  * @phba: pointer to lpfc hba data structure.
1919  * @hbainit: pointer to an array of unsigned 32-bit integers.
1920  *
1921  * This routine performs the special handling for LC HBA initialization.
1922  **/
1923 void
1924 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1925 {
1926 	int t;
1927 	uint32_t *HashWorking;
1928 	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
1929 
1930 	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
1931 	if (!HashWorking)
1932 		return;
1933 
1934 	HashWorking[0] = HashWorking[78] = *pwwnn++;
1935 	HashWorking[1] = HashWorking[79] = *pwwnn;
1936 
1937 	for (t = 0; t < 7; t++)
1938 		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
1939 
1940 	lpfc_sha_init(hbainit);
1941 	lpfc_sha_iterate(hbainit, HashWorking);
1942 	kfree(HashWorking);
1943 }
1944 
1945 /**
1946  * lpfc_cleanup - Performs vport cleanups before deleting a vport
1947  * @vport: pointer to a virtual N_Port data structure.
1948  *
1949  * This routine performs the necessary cleanups before deleting the @vport.
1950  * It invokes the discovery state machine to perform necessary state
1951  * transitions and to release the ndlps associated with the @vport. Note,
1952  * the physical port is treated as @vport 0.
1953  **/
1954 void
1955 lpfc_cleanup(struct lpfc_vport *vport)
1956 {
1957 	struct lpfc_hba   *phba = vport->phba;
1958 	struct lpfc_nodelist *ndlp, *next_ndlp;
1959 	int i = 0;
1960 
1961 	if (phba->link_state > LPFC_LINK_DOWN)
1962 		lpfc_port_link_failure(vport);
1963 
1964 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1965 		if (!NLP_CHK_NODE_ACT(ndlp)) {
1966 			ndlp = lpfc_enable_node(vport, ndlp,
1967 						NLP_STE_UNUSED_NODE);
1968 			if (!ndlp)
1969 				continue;
1970 			spin_lock_irq(&phba->ndlp_lock);
1971 			NLP_SET_FREE_REQ(ndlp);
1972 			spin_unlock_irq(&phba->ndlp_lock);
1973 			/* Trigger the release of the ndlp memory */
1974 			lpfc_nlp_put(ndlp);
1975 			continue;
1976 		}
1977 		spin_lock_irq(&phba->ndlp_lock);
1978 		if (NLP_CHK_FREE_REQ(ndlp)) {
1979 			/* The ndlp should not be in memory free mode already */
1980 			spin_unlock_irq(&phba->ndlp_lock);
1981 			continue;
1982 		} else
1983 			/* Indicate request for freeing ndlp memory */
1984 			NLP_SET_FREE_REQ(ndlp);
1985 		spin_unlock_irq(&phba->ndlp_lock);
1986 
1987 		if (vport->port_type != LPFC_PHYSICAL_PORT &&
1988 		    ndlp->nlp_DID == Fabric_DID) {
1989 			/* Just free up ndlp with Fabric_DID for vports */
1990 			lpfc_nlp_put(ndlp);
1991 			continue;
1992 		}
1993 
1994 		if (ndlp->nlp_type & NLP_FABRIC)
1995 			lpfc_disc_state_machine(vport, ndlp, NULL,
1996 					NLP_EVT_DEVICE_RECOVERY);
1997 
1998 		lpfc_disc_state_machine(vport, ndlp, NULL,
1999 					     NLP_EVT_DEVICE_RM);
2000 
2001 	}
2002 
2003 	/* At this point, ALL ndlp's should be gone
2004 	 * because of the previous NLP_EVT_DEVICE_RM.
2005 	 * Lets wait for this to happen, if needed.
2006 	 */
2007 	while (!list_empty(&vport->fc_nodes)) {
2008 		if (i++ > 3000) {
2009 			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2010 				"0233 Nodelist not empty\n");
2011 			list_for_each_entry_safe(ndlp, next_ndlp,
2012 						&vport->fc_nodes, nlp_listp) {
2013 				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2014 						LOG_NODE,
2015 						"0282 did:x%x ndlp:x%p "
2016 						"usgmap:x%x refcnt:%d\n",
2017 						ndlp->nlp_DID, (void *)ndlp,
2018 						ndlp->nlp_usg_map,
2019 						atomic_read(
2020 							&ndlp->kref.refcount));
2021 			}
2022 			break;
2023 		}
2024 
2025 		/* Wait for any activity on ndlps to settle */
2026 		msleep(10);
2027 	}
2028 }
2029 
2030 /**
2031  * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2032  * @vport: pointer to a virtual N_Port data structure.
2033  *
2034  * This routine stops all the timers associated with a @vport. This function
2035  * is invoked before disabling or deleting a @vport. Note that the physical
2036  * port is treated as @vport 0.
2037  **/
2038 void
2039 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2040 {
2041 	del_timer_sync(&vport->els_tmofunc);
2042 	del_timer_sync(&vport->fc_fdmitmo);
2043 	lpfc_can_disctmo(vport);
2044 	return;
2045 }
2046 
2047 /**
2048  * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2049  * @phba: pointer to lpfc hba data structure.
2050  *
2051  * This routine stops all the timers associated with a HBA. This function is
2052  * invoked before either putting a HBA offline or unloading the driver.
2053  **/
2054 void
2055 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2056 {
2057 	lpfc_stop_vport_timers(phba->pport);
2058 	del_timer_sync(&phba->sli.mbox_tmo);
2059 	del_timer_sync(&phba->fabric_block_timer);
2060 	del_timer_sync(&phba->eratt_poll);
2061 	del_timer_sync(&phba->hb_tmofunc);
2062 	phba->hb_outstanding = 0;
2063 
2064 	switch (phba->pci_dev_grp) {
2065 	case LPFC_PCI_DEV_LP:
2066 		/* Stop any LightPulse device specific driver timers */
2067 		del_timer_sync(&phba->fcp_poll_timer);
2068 		break;
2069 	case LPFC_PCI_DEV_OC:
2070 		/* Stop any OneConnect device sepcific driver timers */
2071 		break;
2072 	default:
2073 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2074 				"0297 Invalid device group (x%x)\n",
2075 				phba->pci_dev_grp);
2076 		break;
2077 	}
2078 	return;
2079 }
2080 
2081 /**
2082  * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2083  * @phba: pointer to lpfc hba data structure.
2084  *
2085  * This routine marks a HBA's management interface as blocked. Once the HBA's
2086  * management interface is marked as blocked, all the user space access to
2087  * the HBA, whether they are from sysfs interface or libdfc interface will
2088  * all be blocked. The HBA is set to block the management interface when the
2089  * driver prepares the HBA interface for online or offline.
2090  **/
2091 static void
2092 lpfc_block_mgmt_io(struct lpfc_hba * phba)
2093 {
2094 	unsigned long iflag;
2095 
2096 	spin_lock_irqsave(&phba->hbalock, iflag);
2097 	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2098 	spin_unlock_irqrestore(&phba->hbalock, iflag);
2099 }
2100 
2101 /**
2102  * lpfc_online - Initialize and bring a HBA online
2103  * @phba: pointer to lpfc hba data structure.
2104  *
2105  * This routine initializes the HBA and brings a HBA online. During this
2106  * process, the management interface is blocked to prevent user space access
2107  * to the HBA interfering with the driver initialization.
2108  *
2109  * Return codes
2110  *   0 - successful
2111  *   1 - failed
2112  **/
2113 int
2114 lpfc_online(struct lpfc_hba *phba)
2115 {
2116 	struct lpfc_vport *vport;
2117 	struct lpfc_vport **vports;
2118 	int i;
2119 
2120 	if (!phba)
2121 		return 0;
2122 	vport = phba->pport;
2123 
2124 	if (!(vport->fc_flag & FC_OFFLINE_MODE))
2125 		return 0;
2126 
2127 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2128 			"0458 Bring Adapter online\n");
2129 
2130 	lpfc_block_mgmt_io(phba);
2131 
2132 	if (!lpfc_sli_queue_setup(phba)) {
2133 		lpfc_unblock_mgmt_io(phba);
2134 		return 1;
2135 	}
2136 
2137 	if (phba->sli_rev == LPFC_SLI_REV4) {
2138 		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2139 			lpfc_unblock_mgmt_io(phba);
2140 			return 1;
2141 		}
2142 	} else {
2143 		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
2144 			lpfc_unblock_mgmt_io(phba);
2145 			return 1;
2146 		}
2147 	}
2148 
2149 	vports = lpfc_create_vport_work_array(phba);
2150 	if (vports != NULL)
2151 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2152 			struct Scsi_Host *shost;
2153 			shost = lpfc_shost_from_vport(vports[i]);
2154 			spin_lock_irq(shost->host_lock);
2155 			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2156 			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2157 				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2158 			if (phba->sli_rev == LPFC_SLI_REV4)
2159 				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2160 			spin_unlock_irq(shost->host_lock);
2161 		}
2162 		lpfc_destroy_vport_work_array(phba, vports);
2163 
2164 	lpfc_unblock_mgmt_io(phba);
2165 	return 0;
2166 }
2167 
2168 /**
2169  * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2170  * @phba: pointer to lpfc hba data structure.
2171  *
2172  * This routine marks a HBA's management interface as not blocked. Once the
2173  * HBA's management interface is marked as not blocked, all the user space
2174  * access to the HBA, whether they are from sysfs interface or libdfc
2175  * interface will be allowed. The HBA is set to block the management interface
2176  * when the driver prepares the HBA interface for online or offline and then
2177  * set to unblock the management interface afterwards.
2178  **/
2179 void
2180 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2181 {
2182 	unsigned long iflag;
2183 
2184 	spin_lock_irqsave(&phba->hbalock, iflag);
2185 	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2186 	spin_unlock_irqrestore(&phba->hbalock, iflag);
2187 }
2188 
2189 /**
2190  * lpfc_offline_prep - Prepare a HBA to be brought offline
2191  * @phba: pointer to lpfc hba data structure.
2192  *
2193  * This routine is invoked to prepare a HBA to be brought offline. It performs
2194  * unregistration login to all the nodes on all vports and flushes the mailbox
2195  * queue to make it ready to be brought offline.
2196  **/
2197 void
2198 lpfc_offline_prep(struct lpfc_hba * phba)
2199 {
2200 	struct lpfc_vport *vport = phba->pport;
2201 	struct lpfc_nodelist  *ndlp, *next_ndlp;
2202 	struct lpfc_vport **vports;
2203 	int i;
2204 
2205 	if (vport->fc_flag & FC_OFFLINE_MODE)
2206 		return;
2207 
2208 	lpfc_block_mgmt_io(phba);
2209 
2210 	lpfc_linkdown(phba);
2211 
2212 	/* Issue an unreg_login to all nodes on all vports */
2213 	vports = lpfc_create_vport_work_array(phba);
2214 	if (vports != NULL) {
2215 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2216 			struct Scsi_Host *shost;
2217 
2218 			if (vports[i]->load_flag & FC_UNLOADING)
2219 				continue;
2220 			vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
2221 			shost =	lpfc_shost_from_vport(vports[i]);
2222 			list_for_each_entry_safe(ndlp, next_ndlp,
2223 						 &vports[i]->fc_nodes,
2224 						 nlp_listp) {
2225 				if (!NLP_CHK_NODE_ACT(ndlp))
2226 					continue;
2227 				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2228 					continue;
2229 				if (ndlp->nlp_type & NLP_FABRIC) {
2230 					lpfc_disc_state_machine(vports[i], ndlp,
2231 						NULL, NLP_EVT_DEVICE_RECOVERY);
2232 					lpfc_disc_state_machine(vports[i], ndlp,
2233 						NULL, NLP_EVT_DEVICE_RM);
2234 				}
2235 				spin_lock_irq(shost->host_lock);
2236 				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2237 				spin_unlock_irq(shost->host_lock);
2238 				lpfc_unreg_rpi(vports[i], ndlp);
2239 			}
2240 		}
2241 	}
2242 	lpfc_destroy_vport_work_array(phba, vports);
2243 
2244 	lpfc_sli_mbox_sys_shutdown(phba);
2245 }
2246 
2247 /**
2248  * lpfc_offline - Bring a HBA offline
2249  * @phba: pointer to lpfc hba data structure.
2250  *
2251  * This routine actually brings a HBA offline. It stops all the timers
2252  * associated with the HBA, brings down the SLI layer, and eventually
2253  * marks the HBA as in offline state for the upper layer protocol.
2254  **/
2255 void
2256 lpfc_offline(struct lpfc_hba *phba)
2257 {
2258 	struct Scsi_Host  *shost;
2259 	struct lpfc_vport **vports;
2260 	int i;
2261 
2262 	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2263 		return;
2264 
2265 	/* stop port and all timers associated with this hba */
2266 	lpfc_stop_port(phba);
2267 	vports = lpfc_create_vport_work_array(phba);
2268 	if (vports != NULL)
2269 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2270 			lpfc_stop_vport_timers(vports[i]);
2271 	lpfc_destroy_vport_work_array(phba, vports);
2272 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2273 			"0460 Bring Adapter offline\n");
2274 	/* Bring down the SLI Layer and cleanup.  The HBA is offline
2275 	   now.  */
2276 	lpfc_sli_hba_down(phba);
2277 	spin_lock_irq(&phba->hbalock);
2278 	phba->work_ha = 0;
2279 	spin_unlock_irq(&phba->hbalock);
2280 	vports = lpfc_create_vport_work_array(phba);
2281 	if (vports != NULL)
2282 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2283 			shost = lpfc_shost_from_vport(vports[i]);
2284 			spin_lock_irq(shost->host_lock);
2285 			vports[i]->work_port_events = 0;
2286 			vports[i]->fc_flag |= FC_OFFLINE_MODE;
2287 			spin_unlock_irq(shost->host_lock);
2288 		}
2289 	lpfc_destroy_vport_work_array(phba, vports);
2290 }
2291 
2292 /**
2293  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2294  * @phba: pointer to lpfc hba data structure.
2295  *
2296  * This routine is to free all the SCSI buffers and IOCBs from the driver
2297  * list back to kernel. It is called from lpfc_pci_remove_one to free
2298  * the internal resources before the device is removed from the system.
2299  *
2300  * Return codes
2301  *   0 - successful (for now, it always returns 0)
2302  **/
2303 static int
2304 lpfc_scsi_free(struct lpfc_hba *phba)
2305 {
2306 	struct lpfc_scsi_buf *sb, *sb_next;
2307 	struct lpfc_iocbq *io, *io_next;
2308 
2309 	spin_lock_irq(&phba->hbalock);
2310 	/* Release all the lpfc_scsi_bufs maintained by this host. */
2311 	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2312 		list_del(&sb->list);
2313 		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2314 			      sb->dma_handle);
2315 		kfree(sb);
2316 		phba->total_scsi_bufs--;
2317 	}
2318 
2319 	/* Release all the lpfc_iocbq entries maintained by this host. */
2320 	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2321 		list_del(&io->list);
2322 		kfree(io);
2323 		phba->total_iocbq_bufs--;
2324 	}
2325 
2326 	spin_unlock_irq(&phba->hbalock);
2327 
2328 	return 0;
2329 }
2330 
2331 /**
2332  * lpfc_create_port - Create an FC port
2333  * @phba: pointer to lpfc hba data structure.
2334  * @instance: a unique integer ID to this FC port.
2335  * @dev: pointer to the device data structure.
2336  *
2337  * This routine creates a FC port for the upper layer protocol. The FC port
2338  * can be created on top of either a physical port or a virtual port provided
2339  * by the HBA. This routine also allocates a SCSI host data structure (shost)
2340  * and associates the FC port created before adding the shost into the SCSI
2341  * layer.
2342  *
2343  * Return codes
2344  *   @vport - pointer to the virtual N_Port data structure.
2345  *   NULL - port create failed.
2346  **/
2347 struct lpfc_vport *
2348 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2349 {
2350 	struct lpfc_vport *vport;
2351 	struct Scsi_Host  *shost;
2352 	int error = 0;
2353 
2354 	if (dev != &phba->pcidev->dev)
2355 		shost = scsi_host_alloc(&lpfc_vport_template,
2356 					sizeof(struct lpfc_vport));
2357 	else
2358 		shost = scsi_host_alloc(&lpfc_template,
2359 					sizeof(struct lpfc_vport));
2360 	if (!shost)
2361 		goto out;
2362 
2363 	vport = (struct lpfc_vport *) shost->hostdata;
2364 	vport->phba = phba;
2365 	vport->load_flag |= FC_LOADING;
2366 	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2367 	vport->fc_rscn_flush = 0;
2368 
2369 	lpfc_get_vport_cfgparam(vport);
2370 	shost->unique_id = instance;
2371 	shost->max_id = LPFC_MAX_TARGET;
2372 	shost->max_lun = vport->cfg_max_luns;
2373 	shost->this_id = -1;
2374 	shost->max_cmd_len = 16;
2375 	if (phba->sli_rev == LPFC_SLI_REV4) {
2376 		shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
2377 		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2378 	}
2379 
2380 	/*
2381 	 * Set initial can_queue value since 0 is no longer supported and
2382 	 * scsi_add_host will fail. This will be adjusted later based on the
2383 	 * max xri value determined in hba setup.
2384 	 */
2385 	shost->can_queue = phba->cfg_hba_queue_depth - 10;
2386 	if (dev != &phba->pcidev->dev) {
2387 		shost->transportt = lpfc_vport_transport_template;
2388 		vport->port_type = LPFC_NPIV_PORT;
2389 	} else {
2390 		shost->transportt = lpfc_transport_template;
2391 		vport->port_type = LPFC_PHYSICAL_PORT;
2392 	}
2393 
2394 	/* Initialize all internally managed lists. */
2395 	INIT_LIST_HEAD(&vport->fc_nodes);
2396 	INIT_LIST_HEAD(&vport->rcv_buffer_list);
2397 	spin_lock_init(&vport->work_port_lock);
2398 
2399 	init_timer(&vport->fc_disctmo);
2400 	vport->fc_disctmo.function = lpfc_disc_timeout;
2401 	vport->fc_disctmo.data = (unsigned long)vport;
2402 
2403 	init_timer(&vport->fc_fdmitmo);
2404 	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2405 	vport->fc_fdmitmo.data = (unsigned long)vport;
2406 
2407 	init_timer(&vport->els_tmofunc);
2408 	vport->els_tmofunc.function = lpfc_els_timeout;
2409 	vport->els_tmofunc.data = (unsigned long)vport;
2410 
2411 	error = scsi_add_host(shost, dev);
2412 	if (error)
2413 		goto out_put_shost;
2414 
2415 	spin_lock_irq(&phba->hbalock);
2416 	list_add_tail(&vport->listentry, &phba->port_list);
2417 	spin_unlock_irq(&phba->hbalock);
2418 	return vport;
2419 
2420 out_put_shost:
2421 	scsi_host_put(shost);
2422 out:
2423 	return NULL;
2424 }
2425 
2426 /**
2427  * destroy_port -  destroy an FC port
2428  * @vport: pointer to an lpfc virtual N_Port data structure.
2429  *
2430  * This routine destroys a FC port from the upper layer protocol. All the
2431  * resources associated with the port are released.
2432  **/
2433 void
2434 destroy_port(struct lpfc_vport *vport)
2435 {
2436 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2437 	struct lpfc_hba  *phba = vport->phba;
2438 
2439 	lpfc_debugfs_terminate(vport);
2440 	fc_remove_host(shost);
2441 	scsi_remove_host(shost);
2442 
2443 	spin_lock_irq(&phba->hbalock);
2444 	list_del_init(&vport->listentry);
2445 	spin_unlock_irq(&phba->hbalock);
2446 
2447 	lpfc_cleanup(vport);
2448 	return;
2449 }
2450 
2451 /**
2452  * lpfc_get_instance - Get a unique integer ID
2453  *
2454  * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2455  * uses the kernel idr facility to perform the task.
2456  *
2457  * Return codes:
2458  *   instance - a unique integer ID allocated as the new instance.
2459  *   -1 - lpfc get instance failed.
2460  **/
2461 int
2462 lpfc_get_instance(void)
2463 {
2464 	int instance = 0;
2465 
2466 	/* Assign an unused number */
2467 	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2468 		return -1;
2469 	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2470 		return -1;
2471 	return instance;
2472 }
2473 
2474 /**
2475  * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2476  * @shost: pointer to SCSI host data structure.
2477  * @time: elapsed time of the scan in jiffies.
2478  *
2479  * This routine is called by the SCSI layer with a SCSI host to determine
2480  * whether the scan host is finished.
2481  *
2482  * Note: there is no scan_start function as adapter initialization will have
2483  * asynchronously kicked off the link initialization.
2484  *
2485  * Return codes
2486  *   0 - SCSI host scan is not over yet.
2487  *   1 - SCSI host scan is over.
2488  **/
2489 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2490 {
2491 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2492 	struct lpfc_hba   *phba = vport->phba;
2493 	int stat = 0;
2494 
2495 	spin_lock_irq(shost->host_lock);
2496 
2497 	if (vport->load_flag & FC_UNLOADING) {
2498 		stat = 1;
2499 		goto finished;
2500 	}
2501 	if (time >= 30 * HZ) {
2502 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2503 				"0461 Scanning longer than 30 "
2504 				"seconds.  Continuing initialization\n");
2505 		stat = 1;
2506 		goto finished;
2507 	}
2508 	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2509 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2510 				"0465 Link down longer than 15 "
2511 				"seconds.  Continuing initialization\n");
2512 		stat = 1;
2513 		goto finished;
2514 	}
2515 
2516 	if (vport->port_state != LPFC_VPORT_READY)
2517 		goto finished;
2518 	if (vport->num_disc_nodes || vport->fc_prli_sent)
2519 		goto finished;
2520 	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2521 		goto finished;
2522 	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2523 		goto finished;
2524 
2525 	stat = 1;
2526 
2527 finished:
2528 	spin_unlock_irq(shost->host_lock);
2529 	return stat;
2530 }
2531 
2532 /**
2533  * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2534  * @shost: pointer to SCSI host data structure.
2535  *
2536  * This routine initializes a given SCSI host attributes on a FC port. The
2537  * SCSI host can be either on top of a physical port or a virtual port.
2538  **/
2539 void lpfc_host_attrib_init(struct Scsi_Host *shost)
2540 {
2541 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2542 	struct lpfc_hba   *phba = vport->phba;
2543 	/*
2544 	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2545 	 */
2546 
2547 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2548 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2549 	fc_host_supported_classes(shost) = FC_COS_CLASS3;
2550 
2551 	memset(fc_host_supported_fc4s(shost), 0,
2552 	       sizeof(fc_host_supported_fc4s(shost)));
2553 	fc_host_supported_fc4s(shost)[2] = 1;
2554 	fc_host_supported_fc4s(shost)[7] = 1;
2555 
2556 	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2557 				 sizeof fc_host_symbolic_name(shost));
2558 
2559 	fc_host_supported_speeds(shost) = 0;
2560 	if (phba->lmt & LMT_10Gb)
2561 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2562 	if (phba->lmt & LMT_8Gb)
2563 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2564 	if (phba->lmt & LMT_4Gb)
2565 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2566 	if (phba->lmt & LMT_2Gb)
2567 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2568 	if (phba->lmt & LMT_1Gb)
2569 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2570 
2571 	fc_host_maxframe_size(shost) =
2572 		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2573 		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2574 
2575 	/* This value is also unchanging */
2576 	memset(fc_host_active_fc4s(shost), 0,
2577 	       sizeof(fc_host_active_fc4s(shost)));
2578 	fc_host_active_fc4s(shost)[2] = 1;
2579 	fc_host_active_fc4s(shost)[7] = 1;
2580 
2581 	fc_host_max_npiv_vports(shost) = phba->max_vpi;
2582 	spin_lock_irq(shost->host_lock);
2583 	vport->load_flag &= ~FC_LOADING;
2584 	spin_unlock_irq(shost->host_lock);
2585 }
2586 
2587 /**
2588  * lpfc_stop_port_s3 - Stop SLI3 device port
2589  * @phba: pointer to lpfc hba data structure.
2590  *
2591  * This routine is invoked to stop an SLI3 device port, it stops the device
2592  * from generating interrupts and stops the device driver's timers for the
2593  * device.
2594  **/
2595 static void
2596 lpfc_stop_port_s3(struct lpfc_hba *phba)
2597 {
2598 	/* Clear all interrupt enable conditions */
2599 	writel(0, phba->HCregaddr);
2600 	readl(phba->HCregaddr); /* flush */
2601 	/* Clear all pending interrupts */
2602 	writel(0xffffffff, phba->HAregaddr);
2603 	readl(phba->HAregaddr); /* flush */
2604 
2605 	/* Reset some HBA SLI setup states */
2606 	lpfc_stop_hba_timers(phba);
2607 	phba->pport->work_port_events = 0;
2608 }
2609 
2610 /**
2611  * lpfc_stop_port_s4 - Stop SLI4 device port
2612  * @phba: pointer to lpfc hba data structure.
2613  *
2614  * This routine is invoked to stop an SLI4 device port, it stops the device
2615  * from generating interrupts and stops the device driver's timers for the
2616  * device.
2617  **/
2618 static void
2619 lpfc_stop_port_s4(struct lpfc_hba *phba)
2620 {
2621 	/* Reset some HBA SLI4 setup states */
2622 	lpfc_stop_hba_timers(phba);
2623 	phba->pport->work_port_events = 0;
2624 	phba->sli4_hba.intr_enable = 0;
2625 	/* Hard clear it for now, shall have more graceful way to wait later */
2626 	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2627 }
2628 
2629 /**
2630  * lpfc_stop_port - Wrapper function for stopping hba port
2631  * @phba: Pointer to HBA context object.
2632  *
2633  * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2634  * the API jump table function pointer from the lpfc_hba struct.
2635  **/
2636 void
2637 lpfc_stop_port(struct lpfc_hba *phba)
2638 {
2639 	phba->lpfc_stop_port(phba);
2640 }
2641 
2642 /**
2643  * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2644  * @phba: pointer to lpfc hba data structure.
2645  *
2646  * This routine is invoked to remove the driver default fcf record from
2647  * the port.  This routine currently acts on FCF Index 0.
2648  *
2649  **/
2650 void
2651 lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2652 {
2653 	int rc = 0;
2654 	LPFC_MBOXQ_t *mboxq;
2655 	struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2656 	uint32_t mbox_tmo, req_len;
2657 	uint32_t shdr_status, shdr_add_status;
2658 
2659 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2660 	if (!mboxq) {
2661 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2662 			"2020 Failed to allocate mbox for ADD_FCF cmd\n");
2663 		return;
2664 	}
2665 
2666 	req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2667 		  sizeof(struct lpfc_sli4_cfg_mhdr);
2668 	rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2669 			      LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2670 			      req_len, LPFC_SLI4_MBX_EMBED);
2671 	/*
2672 	 * In phase 1, there is a single FCF index, 0.  In phase2, the driver
2673 	 * supports multiple FCF indices.
2674 	 */
2675 	del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2676 	bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2677 	bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2678 	       phba->fcf.fcf_indx);
2679 
2680 	if (!phba->sli4_hba.intr_enable)
2681 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2682 	else {
2683 		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2684 		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2685 	}
2686 	/* The IOCTL status is embedded in the mailbox subheader. */
2687 	shdr_status = bf_get(lpfc_mbox_hdr_status,
2688 			     &del_fcf_record->header.cfg_shdr.response);
2689 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2690 				 &del_fcf_record->header.cfg_shdr.response);
2691 	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2692 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2693 				"2516 DEL FCF of default FCF Index failed "
2694 				"mbx status x%x, status x%x add_status x%x\n",
2695 				rc, shdr_status, shdr_add_status);
2696 	}
2697 	if (rc != MBX_TIMEOUT)
2698 		mempool_free(mboxq, phba->mbox_mem_pool);
2699 }
2700 
2701 /**
2702  * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2703  * @phba: pointer to lpfc hba data structure.
2704  * @acqe_link: pointer to the async link completion queue entry.
2705  *
2706  * This routine is to parse the SLI4 link-attention link fault code and
2707  * translate it into the base driver's read link attention mailbox command
2708  * status.
2709  *
2710  * Return: Link-attention status in terms of base driver's coding.
2711  **/
2712 static uint16_t
2713 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
2714 			   struct lpfc_acqe_link *acqe_link)
2715 {
2716 	uint16_t latt_fault;
2717 
2718 	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
2719 	case LPFC_ASYNC_LINK_FAULT_NONE:
2720 	case LPFC_ASYNC_LINK_FAULT_LOCAL:
2721 	case LPFC_ASYNC_LINK_FAULT_REMOTE:
2722 		latt_fault = 0;
2723 		break;
2724 	default:
2725 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2726 				"0398 Invalid link fault code: x%x\n",
2727 				bf_get(lpfc_acqe_link_fault, acqe_link));
2728 		latt_fault = MBXERR_ERROR;
2729 		break;
2730 	}
2731 	return latt_fault;
2732 }
2733 
2734 /**
2735  * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
2736  * @phba: pointer to lpfc hba data structure.
2737  * @acqe_link: pointer to the async link completion queue entry.
2738  *
2739  * This routine is to parse the SLI4 link attention type and translate it
2740  * into the base driver's link attention type coding.
2741  *
2742  * Return: Link attention type in terms of base driver's coding.
2743  **/
2744 static uint8_t
2745 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
2746 			  struct lpfc_acqe_link *acqe_link)
2747 {
2748 	uint8_t att_type;
2749 
2750 	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
2751 	case LPFC_ASYNC_LINK_STATUS_DOWN:
2752 	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
2753 		att_type = AT_LINK_DOWN;
2754 		break;
2755 	case LPFC_ASYNC_LINK_STATUS_UP:
2756 		/* Ignore physical link up events - wait for logical link up */
2757 		att_type = AT_RESERVED;
2758 		break;
2759 	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
2760 		att_type = AT_LINK_UP;
2761 		break;
2762 	default:
2763 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2764 				"0399 Invalid link attention type: x%x\n",
2765 				bf_get(lpfc_acqe_link_status, acqe_link));
2766 		att_type = AT_RESERVED;
2767 		break;
2768 	}
2769 	return att_type;
2770 }
2771 
2772 /**
2773  * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
2774  * @phba: pointer to lpfc hba data structure.
2775  * @acqe_link: pointer to the async link completion queue entry.
2776  *
2777  * This routine is to parse the SLI4 link-attention link speed and translate
2778  * it into the base driver's link-attention link speed coding.
2779  *
2780  * Return: Link-attention link speed in terms of base driver's coding.
2781  **/
2782 static uint8_t
2783 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
2784 				struct lpfc_acqe_link *acqe_link)
2785 {
2786 	uint8_t link_speed;
2787 
2788 	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
2789 	case LPFC_ASYNC_LINK_SPEED_ZERO:
2790 		link_speed = LA_UNKNW_LINK;
2791 		break;
2792 	case LPFC_ASYNC_LINK_SPEED_10MBPS:
2793 		link_speed = LA_UNKNW_LINK;
2794 		break;
2795 	case LPFC_ASYNC_LINK_SPEED_100MBPS:
2796 		link_speed = LA_UNKNW_LINK;
2797 		break;
2798 	case LPFC_ASYNC_LINK_SPEED_1GBPS:
2799 		link_speed = LA_1GHZ_LINK;
2800 		break;
2801 	case LPFC_ASYNC_LINK_SPEED_10GBPS:
2802 		link_speed = LA_10GHZ_LINK;
2803 		break;
2804 	default:
2805 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2806 				"0483 Invalid link-attention link speed: x%x\n",
2807 				bf_get(lpfc_acqe_link_speed, acqe_link));
2808 		link_speed = LA_UNKNW_LINK;
2809 		break;
2810 	}
2811 	return link_speed;
2812 }
2813 
2814 /**
2815  * lpfc_sli4_async_link_evt - Process the asynchronous link event
2816  * @phba: pointer to lpfc hba data structure.
2817  * @acqe_link: pointer to the async link completion queue entry.
2818  *
2819  * This routine is to handle the SLI4 asynchronous link event.
2820  **/
2821 static void
2822 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2823 			 struct lpfc_acqe_link *acqe_link)
2824 {
2825 	struct lpfc_dmabuf *mp;
2826 	LPFC_MBOXQ_t *pmb;
2827 	MAILBOX_t *mb;
2828 	READ_LA_VAR *la;
2829 	uint8_t att_type;
2830 
2831 	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
2832 	if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
2833 		return;
2834 	phba->fcoe_eventtag = acqe_link->event_tag;
2835 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2836 	if (!pmb) {
2837 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2838 				"0395 The mboxq allocation failed\n");
2839 		return;
2840 	}
2841 	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2842 	if (!mp) {
2843 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2844 				"0396 The lpfc_dmabuf allocation failed\n");
2845 		goto out_free_pmb;
2846 	}
2847 	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2848 	if (!mp->virt) {
2849 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2850 				"0397 The mbuf allocation failed\n");
2851 		goto out_free_dmabuf;
2852 	}
2853 
2854 	/* Cleanup any outstanding ELS commands */
2855 	lpfc_els_flush_all_cmd(phba);
2856 
2857 	/* Block ELS IOCBs until we have done process link event */
2858 	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2859 
2860 	/* Update link event statistics */
2861 	phba->sli.slistat.link_event++;
2862 
2863 	/* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
2864 	lpfc_read_la(phba, pmb, mp);
2865 	pmb->vport = phba->pport;
2866 
2867 	/* Parse and translate status field */
2868 	mb = &pmb->u.mb;
2869 	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
2870 
2871 	/* Parse and translate link attention fields */
2872 	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
2873 	la->eventTag = acqe_link->event_tag;
2874 	la->attType = att_type;
2875 	la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
2876 
2877 	/* Fake the the following irrelvant fields */
2878 	la->topology = TOPOLOGY_PT_PT;
2879 	la->granted_AL_PA = 0;
2880 	la->il = 0;
2881 	la->pb = 0;
2882 	la->fa = 0;
2883 	la->mm = 0;
2884 
2885 	/* Keep the link status for extra SLI4 state machine reference */
2886 	phba->sli4_hba.link_state.speed =
2887 				bf_get(lpfc_acqe_link_speed, acqe_link);
2888 	phba->sli4_hba.link_state.duplex =
2889 				bf_get(lpfc_acqe_link_duplex, acqe_link);
2890 	phba->sli4_hba.link_state.status =
2891 				bf_get(lpfc_acqe_link_status, acqe_link);
2892 	phba->sli4_hba.link_state.physical =
2893 				bf_get(lpfc_acqe_link_physical, acqe_link);
2894 	phba->sli4_hba.link_state.fault =
2895 				bf_get(lpfc_acqe_link_fault, acqe_link);
2896 
2897 	/* Invoke the lpfc_handle_latt mailbox command callback function */
2898 	lpfc_mbx_cmpl_read_la(phba, pmb);
2899 
2900 	return;
2901 
2902 out_free_dmabuf:
2903 	kfree(mp);
2904 out_free_pmb:
2905 	mempool_free(pmb, phba->mbox_mem_pool);
2906 }
2907 
2908 /**
2909  * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2910  * @phba: pointer to lpfc hba data structure.
2911  * @acqe_link: pointer to the async fcoe completion queue entry.
2912  *
2913  * This routine is to handle the SLI4 asynchronous fcoe event.
2914  **/
2915 static void
2916 lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2917 			 struct lpfc_acqe_fcoe *acqe_fcoe)
2918 {
2919 	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2920 	int rc;
2921 
2922 	phba->fcoe_eventtag = acqe_fcoe->event_tag;
2923 	switch (event_type) {
2924 	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2925 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2926 			"2546 New FCF found index 0x%x tag 0x%x\n",
2927 			acqe_fcoe->fcf_index,
2928 			acqe_fcoe->event_tag);
2929 		/*
2930 		 * If the current FCF is in discovered state, or
2931 		 * FCF discovery is in progress do nothing.
2932 		 */
2933 		spin_lock_irq(&phba->hbalock);
2934 		if ((phba->fcf.fcf_flag & FCF_DISCOVERED) ||
2935 		   (phba->hba_flag & FCF_DISC_INPROGRESS)) {
2936 			spin_unlock_irq(&phba->hbalock);
2937 			break;
2938 		}
2939 		spin_unlock_irq(&phba->hbalock);
2940 
2941 		/* Read the FCF table and re-discover SAN. */
2942 		rc = lpfc_sli4_read_fcf_record(phba,
2943 			LPFC_FCOE_FCF_GET_FIRST);
2944 		if (rc)
2945 			lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2946 				"2547 Read FCF record failed 0x%x\n",
2947 				rc);
2948 		break;
2949 
2950 	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
2951 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2952 			"2548 FCF Table full count 0x%x tag 0x%x\n",
2953 			bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
2954 			acqe_fcoe->event_tag);
2955 		break;
2956 
2957 	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2958 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2959 			"2549 FCF disconnected fron network index 0x%x"
2960 			" tag 0x%x\n", acqe_fcoe->fcf_index,
2961 			acqe_fcoe->event_tag);
2962 		/* If the event is not for currently used fcf do nothing */
2963 		if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
2964 			break;
2965 		/*
2966 		 * Currently, driver support only one FCF - so treat this as
2967 		 * a link down.
2968 		 */
2969 		lpfc_linkdown(phba);
2970 		/* Unregister FCF if no devices connected to it */
2971 		lpfc_unregister_unused_fcf(phba);
2972 		break;
2973 
2974 	default:
2975 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2976 			"0288 Unknown FCoE event type 0x%x event tag "
2977 			"0x%x\n", event_type, acqe_fcoe->event_tag);
2978 		break;
2979 	}
2980 }
2981 
2982 /**
2983  * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
2984  * @phba: pointer to lpfc hba data structure.
2985  * @acqe_link: pointer to the async dcbx completion queue entry.
2986  *
2987  * This routine is to handle the SLI4 asynchronous dcbx event.
2988  **/
2989 static void
2990 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2991 			 struct lpfc_acqe_dcbx *acqe_dcbx)
2992 {
2993 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2994 			"0290 The SLI4 DCBX asynchronous event is not "
2995 			"handled yet\n");
2996 }
2997 
2998 /**
2999  * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3000  * @phba: pointer to lpfc hba data structure.
3001  *
3002  * This routine is invoked by the worker thread to process all the pending
3003  * SLI4 asynchronous events.
3004  **/
3005 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3006 {
3007 	struct lpfc_cq_event *cq_event;
3008 
3009 	/* First, declare the async event has been handled */
3010 	spin_lock_irq(&phba->hbalock);
3011 	phba->hba_flag &= ~ASYNC_EVENT;
3012 	spin_unlock_irq(&phba->hbalock);
3013 	/* Now, handle all the async events */
3014 	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3015 		/* Get the first event from the head of the event queue */
3016 		spin_lock_irq(&phba->hbalock);
3017 		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3018 				 cq_event, struct lpfc_cq_event, list);
3019 		spin_unlock_irq(&phba->hbalock);
3020 		/* Process the asynchronous event */
3021 		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3022 		case LPFC_TRAILER_CODE_LINK:
3023 			lpfc_sli4_async_link_evt(phba,
3024 						 &cq_event->cqe.acqe_link);
3025 			break;
3026 		case LPFC_TRAILER_CODE_FCOE:
3027 			lpfc_sli4_async_fcoe_evt(phba,
3028 						 &cq_event->cqe.acqe_fcoe);
3029 			break;
3030 		case LPFC_TRAILER_CODE_DCBX:
3031 			lpfc_sli4_async_dcbx_evt(phba,
3032 						 &cq_event->cqe.acqe_dcbx);
3033 			break;
3034 		default:
3035 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3036 					"1804 Invalid asynchrous event code: "
3037 					"x%x\n", bf_get(lpfc_trailer_code,
3038 					&cq_event->cqe.mcqe_cmpl));
3039 			break;
3040 		}
3041 		/* Free the completion event processed to the free pool */
3042 		lpfc_sli4_cq_event_release(phba, cq_event);
3043 	}
3044 }
3045 
3046 /**
3047  * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3048  * @phba: pointer to lpfc hba data structure.
3049  * @dev_grp: The HBA PCI-Device group number.
3050  *
3051  * This routine is invoked to set up the per HBA PCI-Device group function
3052  * API jump table entries.
3053  *
3054  * Return: 0 if success, otherwise -ENODEV
3055  **/
3056 int
3057 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3058 {
3059 	int rc;
3060 
3061 	/* Set up lpfc PCI-device group */
3062 	phba->pci_dev_grp = dev_grp;
3063 
3064 	/* The LPFC_PCI_DEV_OC uses SLI4 */
3065 	if (dev_grp == LPFC_PCI_DEV_OC)
3066 		phba->sli_rev = LPFC_SLI_REV4;
3067 
3068 	/* Set up device INIT API function jump table */
3069 	rc = lpfc_init_api_table_setup(phba, dev_grp);
3070 	if (rc)
3071 		return -ENODEV;
3072 	/* Set up SCSI API function jump table */
3073 	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3074 	if (rc)
3075 		return -ENODEV;
3076 	/* Set up SLI API function jump table */
3077 	rc = lpfc_sli_api_table_setup(phba, dev_grp);
3078 	if (rc)
3079 		return -ENODEV;
3080 	/* Set up MBOX API function jump table */
3081 	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3082 	if (rc)
3083 		return -ENODEV;
3084 
3085 	return 0;
3086 }
3087 
3088 /**
3089  * lpfc_log_intr_mode - Log the active interrupt mode
3090  * @phba: pointer to lpfc hba data structure.
3091  * @intr_mode: active interrupt mode adopted.
3092  *
3093  * This routine it invoked to log the currently used active interrupt mode
3094  * to the device.
3095  **/
3096 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3097 {
3098 	switch (intr_mode) {
3099 	case 0:
3100 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3101 				"0470 Enable INTx interrupt mode.\n");
3102 		break;
3103 	case 1:
3104 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3105 				"0481 Enabled MSI interrupt mode.\n");
3106 		break;
3107 	case 2:
3108 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3109 				"0480 Enabled MSI-X interrupt mode.\n");
3110 		break;
3111 	default:
3112 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3113 				"0482 Illegal interrupt mode.\n");
3114 		break;
3115 	}
3116 	return;
3117 }
3118 
3119 /**
3120  * lpfc_enable_pci_dev - Enable a generic PCI device.
3121  * @phba: pointer to lpfc hba data structure.
3122  *
3123  * This routine is invoked to enable the PCI device that is common to all
3124  * PCI devices.
3125  *
3126  * Return codes
3127  * 	0 - sucessful
3128  * 	other values - error
3129  **/
3130 static int
3131 lpfc_enable_pci_dev(struct lpfc_hba *phba)
3132 {
3133 	struct pci_dev *pdev;
3134 	int bars;
3135 
3136 	/* Obtain PCI device reference */
3137 	if (!phba->pcidev)
3138 		goto out_error;
3139 	else
3140 		pdev = phba->pcidev;
3141 	/* Select PCI BARs */
3142 	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3143 	/* Enable PCI device */
3144 	if (pci_enable_device_mem(pdev))
3145 		goto out_error;
3146 	/* Request PCI resource for the device */
3147 	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3148 		goto out_disable_device;
3149 	/* Set up device as PCI master and save state for EEH */
3150 	pci_set_master(pdev);
3151 	pci_try_set_mwi(pdev);
3152 	pci_save_state(pdev);
3153 
3154 	return 0;
3155 
3156 out_disable_device:
3157 	pci_disable_device(pdev);
3158 out_error:
3159 	return -ENODEV;
3160 }
3161 
3162 /**
3163  * lpfc_disable_pci_dev - Disable a generic PCI device.
3164  * @phba: pointer to lpfc hba data structure.
3165  *
3166  * This routine is invoked to disable the PCI device that is common to all
3167  * PCI devices.
3168  **/
3169 static void
3170 lpfc_disable_pci_dev(struct lpfc_hba *phba)
3171 {
3172 	struct pci_dev *pdev;
3173 	int bars;
3174 
3175 	/* Obtain PCI device reference */
3176 	if (!phba->pcidev)
3177 		return;
3178 	else
3179 		pdev = phba->pcidev;
3180 	/* Select PCI BARs */
3181 	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3182 	/* Release PCI resource and disable PCI device */
3183 	pci_release_selected_regions(pdev, bars);
3184 	pci_disable_device(pdev);
3185 	/* Null out PCI private reference to driver */
3186 	pci_set_drvdata(pdev, NULL);
3187 
3188 	return;
3189 }
3190 
3191 /**
3192  * lpfc_reset_hba - Reset a hba
3193  * @phba: pointer to lpfc hba data structure.
3194  *
3195  * This routine is invoked to reset a hba device. It brings the HBA
3196  * offline, performs a board restart, and then brings the board back
3197  * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3198  * on outstanding mailbox commands.
3199  **/
3200 void
3201 lpfc_reset_hba(struct lpfc_hba *phba)
3202 {
3203 	/* If resets are disabled then set error state and return. */
3204 	if (!phba->cfg_enable_hba_reset) {
3205 		phba->link_state = LPFC_HBA_ERROR;
3206 		return;
3207 	}
3208 	lpfc_offline_prep(phba);
3209 	lpfc_offline(phba);
3210 	lpfc_sli_brdrestart(phba);
3211 	lpfc_online(phba);
3212 	lpfc_unblock_mgmt_io(phba);
3213 }
3214 
3215 /**
3216  * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3217  * @phba: pointer to lpfc hba data structure.
3218  *
3219  * This routine is invoked to set up the driver internal resources specific to
3220  * support the SLI-3 HBA device it attached to.
3221  *
3222  * Return codes
3223  * 	0 - sucessful
3224  * 	other values - error
3225  **/
3226 static int
3227 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3228 {
3229 	struct lpfc_sli *psli;
3230 
3231 	/*
3232 	 * Initialize timers used by driver
3233 	 */
3234 
3235 	/* Heartbeat timer */
3236 	init_timer(&phba->hb_tmofunc);
3237 	phba->hb_tmofunc.function = lpfc_hb_timeout;
3238 	phba->hb_tmofunc.data = (unsigned long)phba;
3239 
3240 	psli = &phba->sli;
3241 	/* MBOX heartbeat timer */
3242 	init_timer(&psli->mbox_tmo);
3243 	psli->mbox_tmo.function = lpfc_mbox_timeout;
3244 	psli->mbox_tmo.data = (unsigned long) phba;
3245 	/* FCP polling mode timer */
3246 	init_timer(&phba->fcp_poll_timer);
3247 	phba->fcp_poll_timer.function = lpfc_poll_timeout;
3248 	phba->fcp_poll_timer.data = (unsigned long) phba;
3249 	/* Fabric block timer */
3250 	init_timer(&phba->fabric_block_timer);
3251 	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3252 	phba->fabric_block_timer.data = (unsigned long) phba;
3253 	/* EA polling mode timer */
3254 	init_timer(&phba->eratt_poll);
3255 	phba->eratt_poll.function = lpfc_poll_eratt;
3256 	phba->eratt_poll.data = (unsigned long) phba;
3257 
3258 	/* Host attention work mask setup */
3259 	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3260 	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3261 
3262 	/* Get all the module params for configuring this host */
3263 	lpfc_get_cfgparam(phba);
3264 	/*
3265 	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3266 	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3267 	 * 2 segments are added since the IOCB needs a command and response bde.
3268 	 */
3269 	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3270 		sizeof(struct fcp_rsp) +
3271 			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3272 
3273 	if (phba->cfg_enable_bg) {
3274 		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3275 		phba->cfg_sg_dma_buf_size +=
3276 			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3277 	}
3278 
3279 	/* Also reinitialize the host templates with new values. */
3280 	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3281 	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3282 
3283 	phba->max_vpi = LPFC_MAX_VPI;
3284 	/* This will be set to correct value after config_port mbox */
3285 	phba->max_vports = 0;
3286 
3287 	/*
3288 	 * Initialize the SLI Layer to run with lpfc HBAs.
3289 	 */
3290 	lpfc_sli_setup(phba);
3291 	lpfc_sli_queue_setup(phba);
3292 
3293 	/* Allocate device driver memory */
3294 	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3295 		return -ENOMEM;
3296 
3297 	return 0;
3298 }
3299 
3300 /**
3301  * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3302  * @phba: pointer to lpfc hba data structure.
3303  *
3304  * This routine is invoked to unset the driver internal resources set up
3305  * specific for supporting the SLI-3 HBA device it attached to.
3306  **/
3307 static void
3308 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3309 {
3310 	/* Free device driver memory allocated */
3311 	lpfc_mem_free_all(phba);
3312 
3313 	return;
3314 }
3315 
3316 /**
3317  * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3318  * @phba: pointer to lpfc hba data structure.
3319  *
3320  * This routine is invoked to set up the driver internal resources specific to
3321  * support the SLI-4 HBA device it attached to.
3322  *
3323  * Return codes
3324  * 	0 - sucessful
3325  * 	other values - error
3326  **/
3327 static int
3328 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3329 {
3330 	struct lpfc_sli *psli;
3331 	int rc;
3332 	int i, hbq_count;
3333 
3334 	/* Before proceed, wait for POST done and device ready */
3335 	rc = lpfc_sli4_post_status_check(phba);
3336 	if (rc)
3337 		return -ENODEV;
3338 
3339 	/*
3340 	 * Initialize timers used by driver
3341 	 */
3342 
3343 	/* Heartbeat timer */
3344 	init_timer(&phba->hb_tmofunc);
3345 	phba->hb_tmofunc.function = lpfc_hb_timeout;
3346 	phba->hb_tmofunc.data = (unsigned long)phba;
3347 
3348 	psli = &phba->sli;
3349 	/* MBOX heartbeat timer */
3350 	init_timer(&psli->mbox_tmo);
3351 	psli->mbox_tmo.function = lpfc_mbox_timeout;
3352 	psli->mbox_tmo.data = (unsigned long) phba;
3353 	/* Fabric block timer */
3354 	init_timer(&phba->fabric_block_timer);
3355 	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3356 	phba->fabric_block_timer.data = (unsigned long) phba;
3357 	/* EA polling mode timer */
3358 	init_timer(&phba->eratt_poll);
3359 	phba->eratt_poll.function = lpfc_poll_eratt;
3360 	phba->eratt_poll.data = (unsigned long) phba;
3361 	/*
3362 	 * We need to do a READ_CONFIG mailbox command here before
3363 	 * calling lpfc_get_cfgparam. For VFs this will report the
3364 	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3365 	 * All of the resources allocated
3366 	 * for this Port are tied to these values.
3367 	 */
3368 	/* Get all the module params for configuring this host */
3369 	lpfc_get_cfgparam(phba);
3370 	phba->max_vpi = LPFC_MAX_VPI;
3371 	/* This will be set to correct value after the read_config mbox */
3372 	phba->max_vports = 0;
3373 
3374 	/* Program the default value of vlan_id and fc_map */
3375 	phba->valid_vlan = 0;
3376 	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3377 	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3378 	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3379 
3380 	/*
3381 	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3382 	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3383 	 * 2 segments are added since the IOCB needs a command and response bde.
3384 	 * To insure that the scsi sgl does not cross a 4k page boundary only
3385 	 * sgl sizes of 1k, 2k, 4k, and 8k are supported.
3386 	 * Table of sgl sizes and seg_cnt:
3387 	 * sgl size, 	sg_seg_cnt	total seg
3388 	 * 1k		50		52
3389 	 * 2k		114		116
3390 	 * 4k		242		244
3391 	 * 8k		498		500
3392 	 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3393 	 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3394 	 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3395 	 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3396 	 */
3397 	if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
3398 		phba->cfg_sg_seg_cnt = 50;
3399 	else if (phba->cfg_sg_seg_cnt <= 114)
3400 		phba->cfg_sg_seg_cnt = 114;
3401 	else if (phba->cfg_sg_seg_cnt <= 242)
3402 		phba->cfg_sg_seg_cnt = 242;
3403 	else
3404 		phba->cfg_sg_seg_cnt = 498;
3405 
3406 	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
3407 					+ sizeof(struct fcp_rsp);
3408 	phba->cfg_sg_dma_buf_size +=
3409 		((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
3410 
3411 	/* Initialize buffer queue management fields */
3412 	hbq_count = lpfc_sli_hbq_count();
3413 	for (i = 0; i < hbq_count; ++i)
3414 		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3415 	INIT_LIST_HEAD(&phba->rb_pend_list);
3416 	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3417 	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3418 
3419 	/*
3420 	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3421 	 */
3422 	/* Initialize the Abort scsi buffer list used by driver */
3423 	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3424 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3425 	/* This abort list used by worker thread */
3426 	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3427 
3428 	/*
3429 	 * Initialize dirver internal slow-path work queues
3430 	 */
3431 
3432 	/* Driver internel slow-path CQ Event pool */
3433 	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3434 	/* Response IOCB work queue list */
3435 	INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
3436 	/* Asynchronous event CQ Event work queue list */
3437 	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3438 	/* Fast-path XRI aborted CQ Event work queue list */
3439 	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
3440 	/* Slow-path XRI aborted CQ Event work queue list */
3441 	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
3442 	/* Receive queue CQ Event work queue list */
3443 	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
3444 
3445 	/* Initialize the driver internal SLI layer lists. */
3446 	lpfc_sli_setup(phba);
3447 	lpfc_sli_queue_setup(phba);
3448 
3449 	/* Allocate device driver memory */
3450 	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
3451 	if (rc)
3452 		return -ENOMEM;
3453 
3454 	/* Create the bootstrap mailbox command */
3455 	rc = lpfc_create_bootstrap_mbox(phba);
3456 	if (unlikely(rc))
3457 		goto out_free_mem;
3458 
3459 	/* Set up the host's endian order with the device. */
3460 	rc = lpfc_setup_endian_order(phba);
3461 	if (unlikely(rc))
3462 		goto out_free_bsmbx;
3463 
3464 	/* Set up the hba's configuration parameters. */
3465 	rc = lpfc_sli4_read_config(phba);
3466 	if (unlikely(rc))
3467 		goto out_free_bsmbx;
3468 
3469 	/* Perform a function reset */
3470 	rc = lpfc_pci_function_reset(phba);
3471 	if (unlikely(rc))
3472 		goto out_free_bsmbx;
3473 
3474 	/* Create all the SLI4 queues */
3475 	rc = lpfc_sli4_queue_create(phba);
3476 	if (rc)
3477 		goto out_free_bsmbx;
3478 
3479 	/* Create driver internal CQE event pool */
3480 	rc = lpfc_sli4_cq_event_pool_create(phba);
3481 	if (rc)
3482 		goto out_destroy_queue;
3483 
3484 	/* Initialize and populate the iocb list per host */
3485 	rc = lpfc_init_sgl_list(phba);
3486 	if (rc) {
3487 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3488 				"1400 Failed to initialize sgl list.\n");
3489 		goto out_destroy_cq_event_pool;
3490 	}
3491 	rc = lpfc_init_active_sgl_array(phba);
3492 	if (rc) {
3493 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3494 				"1430 Failed to initialize sgl list.\n");
3495 		goto out_free_sgl_list;
3496 	}
3497 
3498 	rc = lpfc_sli4_init_rpi_hdrs(phba);
3499 	if (rc) {
3500 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3501 				"1432 Failed to initialize rpi headers.\n");
3502 		goto out_free_active_sgl;
3503 	}
3504 
3505 	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3506 				    phba->cfg_fcp_eq_count), GFP_KERNEL);
3507 	if (!phba->sli4_hba.fcp_eq_hdl) {
3508 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3509 				"2572 Failed allocate memory for fast-path "
3510 				"per-EQ handle array\n");
3511 		goto out_remove_rpi_hdrs;
3512 	}
3513 
3514 	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
3515 				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
3516 	if (!phba->sli4_hba.msix_entries) {
3517 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3518 				"2573 Failed allocate memory for msi-x "
3519 				"interrupt vector entries\n");
3520 		goto out_free_fcp_eq_hdl;
3521 	}
3522 
3523 	return rc;
3524 
3525 out_free_fcp_eq_hdl:
3526 	kfree(phba->sli4_hba.fcp_eq_hdl);
3527 out_remove_rpi_hdrs:
3528 	lpfc_sli4_remove_rpi_hdrs(phba);
3529 out_free_active_sgl:
3530 	lpfc_free_active_sgl(phba);
3531 out_free_sgl_list:
3532 	lpfc_free_sgl_list(phba);
3533 out_destroy_cq_event_pool:
3534 	lpfc_sli4_cq_event_pool_destroy(phba);
3535 out_destroy_queue:
3536 	lpfc_sli4_queue_destroy(phba);
3537 out_free_bsmbx:
3538 	lpfc_destroy_bootstrap_mbox(phba);
3539 out_free_mem:
3540 	lpfc_mem_free(phba);
3541 	return rc;
3542 }
3543 
3544 /**
3545  * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
3546  * @phba: pointer to lpfc hba data structure.
3547  *
3548  * This routine is invoked to unset the driver internal resources set up
3549  * specific for supporting the SLI-4 HBA device it attached to.
3550  **/
3551 static void
3552 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3553 {
3554 	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
3555 
3556 	/* unregister default FCFI from the HBA */
3557 	lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
3558 
3559 	/* Free the default FCR table */
3560 	lpfc_sli_remove_dflt_fcf(phba);
3561 
3562 	/* Free memory allocated for msi-x interrupt vector entries */
3563 	kfree(phba->sli4_hba.msix_entries);
3564 
3565 	/* Free memory allocated for fast-path work queue handles */
3566 	kfree(phba->sli4_hba.fcp_eq_hdl);
3567 
3568 	/* Free the allocated rpi headers. */
3569 	lpfc_sli4_remove_rpi_hdrs(phba);
3570 	lpfc_sli4_remove_rpis(phba);
3571 
3572 	/* Free the ELS sgl list */
3573 	lpfc_free_active_sgl(phba);
3574 	lpfc_free_sgl_list(phba);
3575 
3576 	/* Free the SCSI sgl management array */
3577 	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3578 
3579 	/* Free the SLI4 queues */
3580 	lpfc_sli4_queue_destroy(phba);
3581 
3582 	/* Free the completion queue EQ event pool */
3583 	lpfc_sli4_cq_event_release_all(phba);
3584 	lpfc_sli4_cq_event_pool_destroy(phba);
3585 
3586 	/* Reset SLI4 HBA FCoE function */
3587 	lpfc_pci_function_reset(phba);
3588 
3589 	/* Free the bsmbx region. */
3590 	lpfc_destroy_bootstrap_mbox(phba);
3591 
3592 	/* Free the SLI Layer memory with SLI4 HBAs */
3593 	lpfc_mem_free_all(phba);
3594 
3595 	/* Free the current connect table */
3596 	list_for_each_entry_safe(conn_entry, next_conn_entry,
3597 		&phba->fcf_conn_rec_list, list)
3598 		kfree(conn_entry);
3599 
3600 	return;
3601 }
3602 
3603 /**
3604  * lpfc_init_api_table_setup - Set up init api fucntion jump table
3605  * @phba: The hba struct for which this call is being executed.
3606  * @dev_grp: The HBA PCI-Device group number.
3607  *
3608  * This routine sets up the device INIT interface API function jump table
3609  * in @phba struct.
3610  *
3611  * Returns: 0 - success, -ENODEV - failure.
3612  **/
3613 int
3614 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3615 {
3616 	switch (dev_grp) {
3617 	case LPFC_PCI_DEV_LP:
3618 		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
3619 		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
3620 		phba->lpfc_stop_port = lpfc_stop_port_s3;
3621 		break;
3622 	case LPFC_PCI_DEV_OC:
3623 		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
3624 		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
3625 		phba->lpfc_stop_port = lpfc_stop_port_s4;
3626 		break;
3627 	default:
3628 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3629 				"1431 Invalid HBA PCI-device group: 0x%x\n",
3630 				dev_grp);
3631 		return -ENODEV;
3632 		break;
3633 	}
3634 	return 0;
3635 }
3636 
3637 /**
3638  * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3639  * @phba: pointer to lpfc hba data structure.
3640  *
3641  * This routine is invoked to set up the driver internal resources before the
3642  * device specific resource setup to support the HBA device it attached to.
3643  *
3644  * Return codes
3645  *	0 - sucessful
3646  *	other values - error
3647  **/
3648 static int
3649 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3650 {
3651 	/*
3652 	 * Driver resources common to all SLI revisions
3653 	 */
3654 	atomic_set(&phba->fast_event_count, 0);
3655 	spin_lock_init(&phba->hbalock);
3656 
3657 	/* Initialize ndlp management spinlock */
3658 	spin_lock_init(&phba->ndlp_lock);
3659 
3660 	INIT_LIST_HEAD(&phba->port_list);
3661 	INIT_LIST_HEAD(&phba->work_list);
3662 	init_waitqueue_head(&phba->wait_4_mlo_m_q);
3663 
3664 	/* Initialize the wait queue head for the kernel thread */
3665 	init_waitqueue_head(&phba->work_waitq);
3666 
3667 	/* Initialize the scsi buffer list used by driver for scsi IO */
3668 	spin_lock_init(&phba->scsi_buf_list_lock);
3669 	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
3670 
3671 	/* Initialize the fabric iocb list */
3672 	INIT_LIST_HEAD(&phba->fabric_iocb_list);
3673 
3674 	/* Initialize list to save ELS buffers */
3675 	INIT_LIST_HEAD(&phba->elsbuf);
3676 
3677 	/* Initialize FCF connection rec list */
3678 	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
3679 
3680 	return 0;
3681 }
3682 
3683 /**
3684  * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
3685  * @phba: pointer to lpfc hba data structure.
3686  *
3687  * This routine is invoked to set up the driver internal resources after the
3688  * device specific resource setup to support the HBA device it attached to.
3689  *
3690  * Return codes
3691  * 	0 - sucessful
3692  * 	other values - error
3693  **/
3694 static int
3695 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
3696 {
3697 	int error;
3698 
3699 	/* Startup the kernel thread for this host adapter. */
3700 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
3701 					  "lpfc_worker_%d", phba->brd_no);
3702 	if (IS_ERR(phba->worker_thread)) {
3703 		error = PTR_ERR(phba->worker_thread);
3704 		return error;
3705 	}
3706 
3707 	return 0;
3708 }
3709 
3710 /**
3711  * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
3712  * @phba: pointer to lpfc hba data structure.
3713  *
3714  * This routine is invoked to unset the driver internal resources set up after
3715  * the device specific resource setup for supporting the HBA device it
3716  * attached to.
3717  **/
3718 static void
3719 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
3720 {
3721 	/* Stop kernel worker thread */
3722 	kthread_stop(phba->worker_thread);
3723 }
3724 
3725 /**
3726  * lpfc_free_iocb_list - Free iocb list.
3727  * @phba: pointer to lpfc hba data structure.
3728  *
3729  * This routine is invoked to free the driver's IOCB list and memory.
3730  **/
3731 static void
3732 lpfc_free_iocb_list(struct lpfc_hba *phba)
3733 {
3734 	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
3735 
3736 	spin_lock_irq(&phba->hbalock);
3737 	list_for_each_entry_safe(iocbq_entry, iocbq_next,
3738 				 &phba->lpfc_iocb_list, list) {
3739 		list_del(&iocbq_entry->list);
3740 		kfree(iocbq_entry);
3741 		phba->total_iocbq_bufs--;
3742 	}
3743 	spin_unlock_irq(&phba->hbalock);
3744 
3745 	return;
3746 }
3747 
3748 /**
3749  * lpfc_init_iocb_list - Allocate and initialize iocb list.
3750  * @phba: pointer to lpfc hba data structure.
3751  *
3752  * This routine is invoked to allocate and initizlize the driver's IOCB
3753  * list and set up the IOCB tag array accordingly.
3754  *
3755  * Return codes
3756  *	0 - sucessful
3757  *	other values - error
3758  **/
3759 static int
3760 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
3761 {
3762 	struct lpfc_iocbq *iocbq_entry = NULL;
3763 	uint16_t iotag;
3764 	int i;
3765 
3766 	/* Initialize and populate the iocb list per host.  */
3767 	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3768 	for (i = 0; i < iocb_count; i++) {
3769 		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
3770 		if (iocbq_entry == NULL) {
3771 			printk(KERN_ERR "%s: only allocated %d iocbs of "
3772 				"expected %d count. Unloading driver.\n",
3773 				__func__, i, LPFC_IOCB_LIST_CNT);
3774 			goto out_free_iocbq;
3775 		}
3776 
3777 		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
3778 		if (iotag == 0) {
3779 			kfree(iocbq_entry);
3780 			printk(KERN_ERR "%s: failed to allocate IOTAG. "
3781 				"Unloading driver.\n", __func__);
3782 			goto out_free_iocbq;
3783 		}
3784 		iocbq_entry->sli4_xritag = NO_XRI;
3785 
3786 		spin_lock_irq(&phba->hbalock);
3787 		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
3788 		phba->total_iocbq_bufs++;
3789 		spin_unlock_irq(&phba->hbalock);
3790 	}
3791 
3792 	return 0;
3793 
3794 out_free_iocbq:
3795 	lpfc_free_iocb_list(phba);
3796 
3797 	return -ENOMEM;
3798 }
3799 
3800 /**
3801  * lpfc_free_sgl_list - Free sgl list.
3802  * @phba: pointer to lpfc hba data structure.
3803  *
3804  * This routine is invoked to free the driver's sgl list and memory.
3805  **/
3806 static void
3807 lpfc_free_sgl_list(struct lpfc_hba *phba)
3808 {
3809 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
3810 	LIST_HEAD(sglq_list);
3811 	int rc = 0;
3812 
3813 	spin_lock_irq(&phba->hbalock);
3814 	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
3815 	spin_unlock_irq(&phba->hbalock);
3816 
3817 	list_for_each_entry_safe(sglq_entry, sglq_next,
3818 				 &sglq_list, list) {
3819 		list_del(&sglq_entry->list);
3820 		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
3821 		kfree(sglq_entry);
3822 		phba->sli4_hba.total_sglq_bufs--;
3823 	}
3824 	rc = lpfc_sli4_remove_all_sgl_pages(phba);
3825 	if (rc) {
3826 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3827 			"2005 Unable to deregister pages from HBA: %x", rc);
3828 	}
3829 	kfree(phba->sli4_hba.lpfc_els_sgl_array);
3830 }
3831 
3832 /**
3833  * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
3834  * @phba: pointer to lpfc hba data structure.
3835  *
3836  * This routine is invoked to allocate the driver's active sgl memory.
3837  * This array will hold the sglq_entry's for active IOs.
3838  **/
3839 static int
3840 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
3841 {
3842 	int size;
3843 	size = sizeof(struct lpfc_sglq *);
3844 	size *= phba->sli4_hba.max_cfg_param.max_xri;
3845 
3846 	phba->sli4_hba.lpfc_sglq_active_list =
3847 		kzalloc(size, GFP_KERNEL);
3848 	if (!phba->sli4_hba.lpfc_sglq_active_list)
3849 		return -ENOMEM;
3850 	return 0;
3851 }
3852 
3853 /**
3854  * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3855  * @phba: pointer to lpfc hba data structure.
3856  *
3857  * This routine is invoked to walk through the array of active sglq entries
3858  * and free all of the resources.
3859  * This is just a place holder for now.
3860  **/
3861 static void
3862 lpfc_free_active_sgl(struct lpfc_hba *phba)
3863 {
3864 	kfree(phba->sli4_hba.lpfc_sglq_active_list);
3865 }
3866 
3867 /**
3868  * lpfc_init_sgl_list - Allocate and initialize sgl list.
3869  * @phba: pointer to lpfc hba data structure.
3870  *
3871  * This routine is invoked to allocate and initizlize the driver's sgl
3872  * list and set up the sgl xritag tag array accordingly.
3873  *
3874  * Return codes
3875  *	0 - sucessful
3876  *	other values - error
3877  **/
3878 static int
3879 lpfc_init_sgl_list(struct lpfc_hba *phba)
3880 {
3881 	struct lpfc_sglq *sglq_entry = NULL;
3882 	int i;
3883 	int els_xri_cnt;
3884 
3885 	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3886 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3887 				"2400 lpfc_init_sgl_list els %d.\n",
3888 				els_xri_cnt);
3889 	/* Initialize and populate the sglq list per host/VF. */
3890 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
3891 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
3892 
3893 	/* Sanity check on XRI management */
3894 	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
3895 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3896 				"2562 No room left for SCSI XRI allocation: "
3897 				"max_xri=%d, els_xri=%d\n",
3898 				phba->sli4_hba.max_cfg_param.max_xri,
3899 				els_xri_cnt);
3900 		return -ENOMEM;
3901 	}
3902 
3903 	/* Allocate memory for the ELS XRI management array */
3904 	phba->sli4_hba.lpfc_els_sgl_array =
3905 			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
3906 			GFP_KERNEL);
3907 
3908 	if (!phba->sli4_hba.lpfc_els_sgl_array) {
3909 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3910 				"2401 Failed to allocate memory for ELS "
3911 				"XRI management array of size %d.\n",
3912 				els_xri_cnt);
3913 		return -ENOMEM;
3914 	}
3915 
3916 	/* Keep the SCSI XRI into the XRI management array */
3917 	phba->sli4_hba.scsi_xri_max =
3918 			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3919 	phba->sli4_hba.scsi_xri_cnt = 0;
3920 
3921 	phba->sli4_hba.lpfc_scsi_psb_array =
3922 			kzalloc((sizeof(struct lpfc_scsi_buf *) *
3923 			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
3924 
3925 	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
3926 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3927 				"2563 Failed to allocate memory for SCSI "
3928 				"XRI management array of size %d.\n",
3929 				phba->sli4_hba.scsi_xri_max);
3930 		kfree(phba->sli4_hba.lpfc_els_sgl_array);
3931 		return -ENOMEM;
3932 	}
3933 
3934 	for (i = 0; i < els_xri_cnt; i++) {
3935 		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
3936 		if (sglq_entry == NULL) {
3937 			printk(KERN_ERR "%s: only allocated %d sgls of "
3938 				"expected %d count. Unloading driver.\n",
3939 				__func__, i, els_xri_cnt);
3940 			goto out_free_mem;
3941 		}
3942 
3943 		sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
3944 		if (sglq_entry->sli4_xritag == NO_XRI) {
3945 			kfree(sglq_entry);
3946 			printk(KERN_ERR "%s: failed to allocate XRI.\n"
3947 				"Unloading driver.\n", __func__);
3948 			goto out_free_mem;
3949 		}
3950 		sglq_entry->buff_type = GEN_BUFF_TYPE;
3951 		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
3952 		if (sglq_entry->virt == NULL) {
3953 			kfree(sglq_entry);
3954 			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
3955 				"Unloading driver.\n", __func__);
3956 			goto out_free_mem;
3957 		}
3958 		sglq_entry->sgl = sglq_entry->virt;
3959 		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3960 
3961 		/* The list order is used by later block SGL registraton */
3962 		spin_lock_irq(&phba->hbalock);
3963 		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
3964 		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
3965 		phba->sli4_hba.total_sglq_bufs++;
3966 		spin_unlock_irq(&phba->hbalock);
3967 	}
3968 	return 0;
3969 
3970 out_free_mem:
3971 	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3972 	lpfc_free_sgl_list(phba);
3973 	return -ENOMEM;
3974 }
3975 
3976 /**
3977  * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
3978  * @phba: pointer to lpfc hba data structure.
3979  *
3980  * This routine is invoked to post rpi header templates to the
3981  * HBA consistent with the SLI-4 interface spec.  This routine
3982  * posts a PAGE_SIZE memory region to the port to hold up to
3983  * PAGE_SIZE modulo 64 rpi context headers.
3984  * No locks are held here because this is an initialization routine
3985  * called only from probe or lpfc_online when interrupts are not
3986  * enabled and the driver is reinitializing the device.
3987  *
3988  * Return codes
3989  * 	0 - sucessful
3990  * 	ENOMEM - No availble memory
3991  *      EIO - The mailbox failed to complete successfully.
3992  **/
3993 int
3994 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
3995 {
3996 	int rc = 0;
3997 	int longs;
3998 	uint16_t rpi_count;
3999 	struct lpfc_rpi_hdr *rpi_hdr;
4000 
4001 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4002 
4003 	/*
4004 	 * Provision an rpi bitmask range for discovery. The total count
4005 	 * is the difference between max and base + 1.
4006 	 */
4007 	rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
4008 		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4009 
4010 	longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4011 	phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
4012 					   GFP_KERNEL);
4013 	if (!phba->sli4_hba.rpi_bmask)
4014 		return -ENOMEM;
4015 
4016 	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4017 	if (!rpi_hdr) {
4018 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4019 				"0391 Error during rpi post operation\n");
4020 		lpfc_sli4_remove_rpis(phba);
4021 		rc = -ENODEV;
4022 	}
4023 
4024 	return rc;
4025 }
4026 
4027 /**
4028  * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4029  * @phba: pointer to lpfc hba data structure.
4030  *
4031  * This routine is invoked to allocate a single 4KB memory region to
4032  * support rpis and stores them in the phba.  This single region
4033  * provides support for up to 64 rpis.  The region is used globally
4034  * by the device.
4035  *
4036  * Returns:
4037  *   A valid rpi hdr on success.
4038  *   A NULL pointer on any failure.
4039  **/
4040 struct lpfc_rpi_hdr *
4041 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4042 {
4043 	uint16_t rpi_limit, curr_rpi_range;
4044 	struct lpfc_dmabuf *dmabuf;
4045 	struct lpfc_rpi_hdr *rpi_hdr;
4046 
4047 	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4048 		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4049 
4050 	spin_lock_irq(&phba->hbalock);
4051 	curr_rpi_range = phba->sli4_hba.next_rpi;
4052 	spin_unlock_irq(&phba->hbalock);
4053 
4054 	/*
4055 	 * The port has a limited number of rpis. The increment here
4056 	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4057 	 * and to allow the full max_rpi range per port.
4058 	 */
4059 	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4060 		return NULL;
4061 
4062 	/*
4063 	 * First allocate the protocol header region for the port.  The
4064 	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4065 	 */
4066 	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4067 	if (!dmabuf)
4068 		return NULL;
4069 
4070 	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4071 					  LPFC_HDR_TEMPLATE_SIZE,
4072 					  &dmabuf->phys,
4073 					  GFP_KERNEL);
4074 	if (!dmabuf->virt) {
4075 		rpi_hdr = NULL;
4076 		goto err_free_dmabuf;
4077 	}
4078 
4079 	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4080 	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4081 		rpi_hdr = NULL;
4082 		goto err_free_coherent;
4083 	}
4084 
4085 	/* Save the rpi header data for cleanup later. */
4086 	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4087 	if (!rpi_hdr)
4088 		goto err_free_coherent;
4089 
4090 	rpi_hdr->dmabuf = dmabuf;
4091 	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4092 	rpi_hdr->page_count = 1;
4093 	spin_lock_irq(&phba->hbalock);
4094 	rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4095 	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4096 
4097 	/*
4098 	 * The next_rpi stores the next module-64 rpi value to post
4099 	 * in any subsequent rpi memory region postings.
4100 	 */
4101 	phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4102 	spin_unlock_irq(&phba->hbalock);
4103 	return rpi_hdr;
4104 
4105  err_free_coherent:
4106 	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4107 			  dmabuf->virt, dmabuf->phys);
4108  err_free_dmabuf:
4109 	kfree(dmabuf);
4110 	return NULL;
4111 }
4112 
4113 /**
4114  * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4115  * @phba: pointer to lpfc hba data structure.
4116  *
4117  * This routine is invoked to remove all memory resources allocated
4118  * to support rpis. This routine presumes the caller has released all
4119  * rpis consumed by fabric or port logins and is prepared to have
4120  * the header pages removed.
4121  **/
4122 void
4123 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4124 {
4125 	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4126 
4127 	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4128 				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4129 		list_del(&rpi_hdr->list);
4130 		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4131 				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4132 		kfree(rpi_hdr->dmabuf);
4133 		kfree(rpi_hdr);
4134 	}
4135 
4136 	phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4137 	memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4138 }
4139 
4140 /**
4141  * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4142  * @pdev: pointer to pci device data structure.
4143  *
4144  * This routine is invoked to allocate the driver hba data structure for an
4145  * HBA device. If the allocation is successful, the phba reference to the
4146  * PCI device data structure is set.
4147  *
4148  * Return codes
4149  *      pointer to @phba - sucessful
4150  *      NULL - error
4151  **/
4152 static struct lpfc_hba *
4153 lpfc_hba_alloc(struct pci_dev *pdev)
4154 {
4155 	struct lpfc_hba *phba;
4156 
4157 	/* Allocate memory for HBA structure */
4158 	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4159 	if (!phba) {
4160 		dev_err(&pdev->dev, "failed to allocate hba struct\n");
4161 		return NULL;
4162 	}
4163 
4164 	/* Set reference to PCI device in HBA structure */
4165 	phba->pcidev = pdev;
4166 
4167 	/* Assign an unused board number */
4168 	phba->brd_no = lpfc_get_instance();
4169 	if (phba->brd_no < 0) {
4170 		kfree(phba);
4171 		return NULL;
4172 	}
4173 
4174 	mutex_init(&phba->ct_event_mutex);
4175 	INIT_LIST_HEAD(&phba->ct_ev_waiters);
4176 
4177 	return phba;
4178 }
4179 
4180 /**
4181  * lpfc_hba_free - Free driver hba data structure with a device.
4182  * @phba: pointer to lpfc hba data structure.
4183  *
4184  * This routine is invoked to free the driver hba data structure with an
4185  * HBA device.
4186  **/
4187 static void
4188 lpfc_hba_free(struct lpfc_hba *phba)
4189 {
4190 	/* Release the driver assigned board number */
4191 	idr_remove(&lpfc_hba_index, phba->brd_no);
4192 
4193 	kfree(phba);
4194 	return;
4195 }
4196 
4197 /**
4198  * lpfc_create_shost - Create hba physical port with associated scsi host.
4199  * @phba: pointer to lpfc hba data structure.
4200  *
4201  * This routine is invoked to create HBA physical port and associate a SCSI
4202  * host with it.
4203  *
4204  * Return codes
4205  *      0 - sucessful
4206  *      other values - error
4207  **/
4208 static int
4209 lpfc_create_shost(struct lpfc_hba *phba)
4210 {
4211 	struct lpfc_vport *vport;
4212 	struct Scsi_Host  *shost;
4213 
4214 	/* Initialize HBA FC structure */
4215 	phba->fc_edtov = FF_DEF_EDTOV;
4216 	phba->fc_ratov = FF_DEF_RATOV;
4217 	phba->fc_altov = FF_DEF_ALTOV;
4218 	phba->fc_arbtov = FF_DEF_ARBTOV;
4219 
4220 	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4221 	if (!vport)
4222 		return -ENODEV;
4223 
4224 	shost = lpfc_shost_from_vport(vport);
4225 	phba->pport = vport;
4226 	lpfc_debugfs_initialize(vport);
4227 	/* Put reference to SCSI host to driver's device private data */
4228 	pci_set_drvdata(phba->pcidev, shost);
4229 
4230 	return 0;
4231 }
4232 
4233 /**
4234  * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4235  * @phba: pointer to lpfc hba data structure.
4236  *
4237  * This routine is invoked to destroy HBA physical port and the associated
4238  * SCSI host.
4239  **/
4240 static void
4241 lpfc_destroy_shost(struct lpfc_hba *phba)
4242 {
4243 	struct lpfc_vport *vport = phba->pport;
4244 
4245 	/* Destroy physical port that associated with the SCSI host */
4246 	destroy_port(vport);
4247 
4248 	return;
4249 }
4250 
4251 /**
4252  * lpfc_setup_bg - Setup Block guard structures and debug areas.
4253  * @phba: pointer to lpfc hba data structure.
4254  * @shost: the shost to be used to detect Block guard settings.
4255  *
4256  * This routine sets up the local Block guard protocol settings for @shost.
4257  * This routine also allocates memory for debugging bg buffers.
4258  **/
4259 static void
4260 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4261 {
4262 	int pagecnt = 10;
4263 	if (lpfc_prot_mask && lpfc_prot_guard) {
4264 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4265 				"1478 Registering BlockGuard with the "
4266 				"SCSI layer\n");
4267 		scsi_host_set_prot(shost, lpfc_prot_mask);
4268 		scsi_host_set_guard(shost, lpfc_prot_guard);
4269 	}
4270 	if (!_dump_buf_data) {
4271 		while (pagecnt) {
4272 			spin_lock_init(&_dump_buf_lock);
4273 			_dump_buf_data =
4274 				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4275 			if (_dump_buf_data) {
4276 				printk(KERN_ERR "BLKGRD allocated %d pages for "
4277 				       "_dump_buf_data at 0x%p\n",
4278 				       (1 << pagecnt), _dump_buf_data);
4279 				_dump_buf_data_order = pagecnt;
4280 				memset(_dump_buf_data, 0,
4281 				       ((1 << PAGE_SHIFT) << pagecnt));
4282 				break;
4283 			} else
4284 				--pagecnt;
4285 		}
4286 		if (!_dump_buf_data_order)
4287 			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4288 			       "memory for hexdump\n");
4289 	} else
4290 		printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
4291 		       "\n", _dump_buf_data);
4292 	if (!_dump_buf_dif) {
4293 		while (pagecnt) {
4294 			_dump_buf_dif =
4295 				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4296 			if (_dump_buf_dif) {
4297 				printk(KERN_ERR "BLKGRD allocated %d pages for "
4298 				       "_dump_buf_dif at 0x%p\n",
4299 				       (1 << pagecnt), _dump_buf_dif);
4300 				_dump_buf_dif_order = pagecnt;
4301 				memset(_dump_buf_dif, 0,
4302 				       ((1 << PAGE_SHIFT) << pagecnt));
4303 				break;
4304 			} else
4305 				--pagecnt;
4306 		}
4307 		if (!_dump_buf_dif_order)
4308 			printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4309 			       "memory for hexdump\n");
4310 	} else
4311 		printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
4312 		       _dump_buf_dif);
4313 }
4314 
4315 /**
4316  * lpfc_post_init_setup - Perform necessary device post initialization setup.
4317  * @phba: pointer to lpfc hba data structure.
4318  *
4319  * This routine is invoked to perform all the necessary post initialization
4320  * setup for the device.
4321  **/
4322 static void
4323 lpfc_post_init_setup(struct lpfc_hba *phba)
4324 {
4325 	struct Scsi_Host  *shost;
4326 	struct lpfc_adapter_event_header adapter_event;
4327 
4328 	/* Get the default values for Model Name and Description */
4329 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4330 
4331 	/*
4332 	 * hba setup may have changed the hba_queue_depth so we need to
4333 	 * adjust the value of can_queue.
4334 	 */
4335 	shost = pci_get_drvdata(phba->pcidev);
4336 	shost->can_queue = phba->cfg_hba_queue_depth - 10;
4337 	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4338 		lpfc_setup_bg(phba, shost);
4339 
4340 	lpfc_host_attrib_init(shost);
4341 
4342 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4343 		spin_lock_irq(shost->host_lock);
4344 		lpfc_poll_start_timer(phba);
4345 		spin_unlock_irq(shost->host_lock);
4346 	}
4347 
4348 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4349 			"0428 Perform SCSI scan\n");
4350 	/* Send board arrival event to upper layer */
4351 	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4352 	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4353 	fc_host_post_vendor_event(shost, fc_get_event_number(),
4354 				  sizeof(adapter_event),
4355 				  (char *) &adapter_event,
4356 				  LPFC_NL_VENDOR_ID);
4357 	return;
4358 }
4359 
4360 /**
4361  * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4362  * @phba: pointer to lpfc hba data structure.
4363  *
4364  * This routine is invoked to set up the PCI device memory space for device
4365  * with SLI-3 interface spec.
4366  *
4367  * Return codes
4368  * 	0 - sucessful
4369  * 	other values - error
4370  **/
4371 static int
4372 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4373 {
4374 	struct pci_dev *pdev;
4375 	unsigned long bar0map_len, bar2map_len;
4376 	int i, hbq_count;
4377 	void *ptr;
4378 	int error = -ENODEV;
4379 
4380 	/* Obtain PCI device reference */
4381 	if (!phba->pcidev)
4382 		return error;
4383 	else
4384 		pdev = phba->pcidev;
4385 
4386 	/* Set the device DMA mask size */
4387 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
4388 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
4389 			return error;
4390 
4391 	/* Get the bus address of Bar0 and Bar2 and the number of bytes
4392 	 * required by each mapping.
4393 	 */
4394 	phba->pci_bar0_map = pci_resource_start(pdev, 0);
4395 	bar0map_len = pci_resource_len(pdev, 0);
4396 
4397 	phba->pci_bar2_map = pci_resource_start(pdev, 2);
4398 	bar2map_len = pci_resource_len(pdev, 2);
4399 
4400 	/* Map HBA SLIM to a kernel virtual address. */
4401 	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
4402 	if (!phba->slim_memmap_p) {
4403 		dev_printk(KERN_ERR, &pdev->dev,
4404 			   "ioremap failed for SLIM memory.\n");
4405 		goto out;
4406 	}
4407 
4408 	/* Map HBA Control Registers to a kernel virtual address. */
4409 	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
4410 	if (!phba->ctrl_regs_memmap_p) {
4411 		dev_printk(KERN_ERR, &pdev->dev,
4412 			   "ioremap failed for HBA control registers.\n");
4413 		goto out_iounmap_slim;
4414 	}
4415 
4416 	/* Allocate memory for SLI-2 structures */
4417 	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
4418 					       SLI2_SLIM_SIZE,
4419 					       &phba->slim2p.phys,
4420 					       GFP_KERNEL);
4421 	if (!phba->slim2p.virt)
4422 		goto out_iounmap;
4423 
4424 	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
4425 	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
4426 	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
4427 	phba->IOCBs = (phba->slim2p.virt +
4428 		       offsetof(struct lpfc_sli2_slim, IOCBs));
4429 
4430 	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
4431 						 lpfc_sli_hbq_size(),
4432 						 &phba->hbqslimp.phys,
4433 						 GFP_KERNEL);
4434 	if (!phba->hbqslimp.virt)
4435 		goto out_free_slim;
4436 
4437 	hbq_count = lpfc_sli_hbq_count();
4438 	ptr = phba->hbqslimp.virt;
4439 	for (i = 0; i < hbq_count; ++i) {
4440 		phba->hbqs[i].hbq_virt = ptr;
4441 		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4442 		ptr += (lpfc_hbq_defs[i]->entry_count *
4443 			sizeof(struct lpfc_hbq_entry));
4444 	}
4445 	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
4446 	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
4447 
4448 	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
4449 
4450 	INIT_LIST_HEAD(&phba->rb_pend_list);
4451 
4452 	phba->MBslimaddr = phba->slim_memmap_p;
4453 	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
4454 	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
4455 	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
4456 	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
4457 
4458 	return 0;
4459 
4460 out_free_slim:
4461 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4462 			  phba->slim2p.virt, phba->slim2p.phys);
4463 out_iounmap:
4464 	iounmap(phba->ctrl_regs_memmap_p);
4465 out_iounmap_slim:
4466 	iounmap(phba->slim_memmap_p);
4467 out:
4468 	return error;
4469 }
4470 
4471 /**
4472  * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
4473  * @phba: pointer to lpfc hba data structure.
4474  *
4475  * This routine is invoked to unset the PCI device memory space for device
4476  * with SLI-3 interface spec.
4477  **/
4478 static void
4479 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4480 {
4481 	struct pci_dev *pdev;
4482 
4483 	/* Obtain PCI device reference */
4484 	if (!phba->pcidev)
4485 		return;
4486 	else
4487 		pdev = phba->pcidev;
4488 
4489 	/* Free coherent DMA memory allocated */
4490 	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
4491 			  phba->hbqslimp.virt, phba->hbqslimp.phys);
4492 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4493 			  phba->slim2p.virt, phba->slim2p.phys);
4494 
4495 	/* I/O memory unmap */
4496 	iounmap(phba->ctrl_regs_memmap_p);
4497 	iounmap(phba->slim_memmap_p);
4498 
4499 	return;
4500 }
4501 
4502 /**
4503  * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
4504  * @phba: pointer to lpfc hba data structure.
4505  *
4506  * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
4507  * done and check status.
4508  *
4509  * Return 0 if successful, otherwise -ENODEV.
4510  **/
4511 int
4512 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4513 {
4514 	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
4515 	uint32_t onlnreg0, onlnreg1;
4516 	int i, port_error = -ENODEV;
4517 
4518 	if (!phba->sli4_hba.STAregaddr)
4519 		return -ENODEV;
4520 
4521 	/* Wait up to 30 seconds for the SLI Port POST done and ready */
4522 	for (i = 0; i < 3000; i++) {
4523 		sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
4524 		/* Encounter fatal POST error, break out */
4525 		if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
4526 			port_error = -ENODEV;
4527 			break;
4528 		}
4529 		if (LPFC_POST_STAGE_ARMFW_READY ==
4530 		    bf_get(lpfc_hst_state_port_status, &sta_reg)) {
4531 			port_error = 0;
4532 			break;
4533 		}
4534 		msleep(10);
4535 	}
4536 
4537 	if (port_error)
4538 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4539 			"1408 Failure HBA POST Status: sta_reg=0x%x, "
4540 			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
4541 			"dl=x%x, pstatus=x%x\n", sta_reg.word0,
4542 			bf_get(lpfc_hst_state_perr, &sta_reg),
4543 			bf_get(lpfc_hst_state_sfi, &sta_reg),
4544 			bf_get(lpfc_hst_state_nip, &sta_reg),
4545 			bf_get(lpfc_hst_state_ipc, &sta_reg),
4546 			bf_get(lpfc_hst_state_xrom, &sta_reg),
4547 			bf_get(lpfc_hst_state_dl, &sta_reg),
4548 			bf_get(lpfc_hst_state_port_status, &sta_reg));
4549 
4550 	/* Log device information */
4551 	scratchpad.word0 =  readl(phba->sli4_hba.SCRATCHPADregaddr);
4552 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4553 			"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4554 			"FeatureL1=0x%x, FeatureL2=0x%x\n",
4555 			bf_get(lpfc_scratchpad_chiptype, &scratchpad),
4556 			bf_get(lpfc_scratchpad_slirev, &scratchpad),
4557 			bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4558 			bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4559 
4560 	/* With uncoverable error, log the error message and return error */
4561 	onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4562 	onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4563 	if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4564 		uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4565 		uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4566 		if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4567 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4568 					"1422 HBA Unrecoverable error: "
4569 					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4570 					"online0_reg=0x%x, online1_reg=0x%x\n",
4571 					uerrlo_reg.word0, uerrhi_reg.word0,
4572 					onlnreg0, onlnreg1);
4573 		}
4574 		return -ENODEV;
4575 	}
4576 
4577 	return port_error;
4578 }
4579 
4580 /**
4581  * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
4582  * @phba: pointer to lpfc hba data structure.
4583  *
4584  * This routine is invoked to set up SLI4 BAR0 PCI config space register
4585  * memory map.
4586  **/
4587 static void
4588 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4589 {
4590 	phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4591 					LPFC_UERR_STATUS_LO;
4592 	phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4593 					LPFC_UERR_STATUS_HI;
4594 	phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
4595 					LPFC_ONLINE0;
4596 	phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
4597 					LPFC_ONLINE1;
4598 	phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
4599 					LPFC_SCRATCHPAD;
4600 }
4601 
4602 /**
4603  * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
4604  * @phba: pointer to lpfc hba data structure.
4605  *
4606  * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
4607  * memory map.
4608  **/
4609 static void
4610 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
4611 {
4612 
4613 	phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4614 				    LPFC_HST_STATE;
4615 	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4616 				    LPFC_HST_ISR0;
4617 	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4618 				    LPFC_HST_IMR0;
4619 	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4620 				     LPFC_HST_ISCR0;
4621 	return;
4622 }
4623 
4624 /**
4625  * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
4626  * @phba: pointer to lpfc hba data structure.
4627  * @vf: virtual function number
4628  *
4629  * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
4630  * based on the given viftual function number, @vf.
4631  *
4632  * Return 0 if successful, otherwise -ENODEV.
4633  **/
4634 static int
4635 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
4636 {
4637 	if (vf > LPFC_VIR_FUNC_MAX)
4638 		return -ENODEV;
4639 
4640 	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4641 				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
4642 	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4643 				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
4644 	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4645 				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
4646 	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4647 				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
4648 	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4649 				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
4650 	return 0;
4651 }
4652 
4653 /**
4654  * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
4655  * @phba: pointer to lpfc hba data structure.
4656  *
4657  * This routine is invoked to create the bootstrap mailbox
4658  * region consistent with the SLI-4 interface spec.  This
4659  * routine allocates all memory necessary to communicate
4660  * mailbox commands to the port and sets up all alignment
4661  * needs.  No locks are expected to be held when calling
4662  * this routine.
4663  *
4664  * Return codes
4665  * 	0 - sucessful
4666  * 	ENOMEM - could not allocated memory.
4667  **/
4668 static int
4669 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
4670 {
4671 	uint32_t bmbx_size;
4672 	struct lpfc_dmabuf *dmabuf;
4673 	struct dma_address *dma_address;
4674 	uint32_t pa_addr;
4675 	uint64_t phys_addr;
4676 
4677 	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4678 	if (!dmabuf)
4679 		return -ENOMEM;
4680 
4681 	/*
4682 	 * The bootstrap mailbox region is comprised of 2 parts
4683 	 * plus an alignment restriction of 16 bytes.
4684 	 */
4685 	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
4686 	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4687 					  bmbx_size,
4688 					  &dmabuf->phys,
4689 					  GFP_KERNEL);
4690 	if (!dmabuf->virt) {
4691 		kfree(dmabuf);
4692 		return -ENOMEM;
4693 	}
4694 	memset(dmabuf->virt, 0, bmbx_size);
4695 
4696 	/*
4697 	 * Initialize the bootstrap mailbox pointers now so that the register
4698 	 * operations are simple later.  The mailbox dma address is required
4699 	 * to be 16-byte aligned.  Also align the virtual memory as each
4700 	 * maibox is copied into the bmbx mailbox region before issuing the
4701 	 * command to the port.
4702 	 */
4703 	phba->sli4_hba.bmbx.dmabuf = dmabuf;
4704 	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
4705 
4706 	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
4707 					      LPFC_ALIGN_16_BYTE);
4708 	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
4709 					      LPFC_ALIGN_16_BYTE);
4710 
4711 	/*
4712 	 * Set the high and low physical addresses now.  The SLI4 alignment
4713 	 * requirement is 16 bytes and the mailbox is posted to the port
4714 	 * as two 30-bit addresses.  The other data is a bit marking whether
4715 	 * the 30-bit address is the high or low address.
4716 	 * Upcast bmbx aphys to 64bits so shift instruction compiles
4717 	 * clean on 32 bit machines.
4718 	 */
4719 	dma_address = &phba->sli4_hba.bmbx.dma_address;
4720 	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
4721 	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
4722 	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
4723 					   LPFC_BMBX_BIT1_ADDR_HI);
4724 
4725 	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
4726 	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
4727 					   LPFC_BMBX_BIT1_ADDR_LO);
4728 	return 0;
4729 }
4730 
4731 /**
4732  * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
4733  * @phba: pointer to lpfc hba data structure.
4734  *
4735  * This routine is invoked to teardown the bootstrap mailbox
4736  * region and release all host resources. This routine requires
4737  * the caller to ensure all mailbox commands recovered, no
4738  * additional mailbox comands are sent, and interrupts are disabled
4739  * before calling this routine.
4740  *
4741  **/
4742 static void
4743 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
4744 {
4745 	dma_free_coherent(&phba->pcidev->dev,
4746 			  phba->sli4_hba.bmbx.bmbx_size,
4747 			  phba->sli4_hba.bmbx.dmabuf->virt,
4748 			  phba->sli4_hba.bmbx.dmabuf->phys);
4749 
4750 	kfree(phba->sli4_hba.bmbx.dmabuf);
4751 	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
4752 }
4753 
4754 /**
4755  * lpfc_sli4_read_config - Get the config parameters.
4756  * @phba: pointer to lpfc hba data structure.
4757  *
4758  * This routine is invoked to read the configuration parameters from the HBA.
4759  * The configuration parameters are used to set the base and maximum values
4760  * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
4761  * allocation for the port.
4762  *
4763  * Return codes
4764  * 	0 - sucessful
4765  * 	ENOMEM - No availble memory
4766  *      EIO - The mailbox failed to complete successfully.
4767  **/
4768 static int
4769 lpfc_sli4_read_config(struct lpfc_hba *phba)
4770 {
4771 	LPFC_MBOXQ_t *pmb;
4772 	struct lpfc_mbx_read_config *rd_config;
4773 	uint32_t rc = 0;
4774 
4775 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4776 	if (!pmb) {
4777 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4778 				"2011 Unable to allocate memory for issuing "
4779 				"SLI_CONFIG_SPECIAL mailbox command\n");
4780 		return -ENOMEM;
4781 	}
4782 
4783 	lpfc_read_config(phba, pmb);
4784 
4785 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4786 	if (rc != MBX_SUCCESS) {
4787 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4788 			"2012 Mailbox failed , mbxCmd x%x "
4789 			"READ_CONFIG, mbxStatus x%x\n",
4790 			bf_get(lpfc_mqe_command, &pmb->u.mqe),
4791 			bf_get(lpfc_mqe_status, &pmb->u.mqe));
4792 		rc = -EIO;
4793 	} else {
4794 		rd_config = &pmb->u.mqe.un.rd_config;
4795 		phba->sli4_hba.max_cfg_param.max_xri =
4796 			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
4797 		phba->sli4_hba.max_cfg_param.xri_base =
4798 			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
4799 		phba->sli4_hba.max_cfg_param.max_vpi =
4800 			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
4801 		phba->sli4_hba.max_cfg_param.vpi_base =
4802 			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
4803 		phba->sli4_hba.max_cfg_param.max_rpi =
4804 			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
4805 		phba->sli4_hba.max_cfg_param.rpi_base =
4806 			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
4807 		phba->sli4_hba.max_cfg_param.max_vfi =
4808 			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
4809 		phba->sli4_hba.max_cfg_param.vfi_base =
4810 			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
4811 		phba->sli4_hba.max_cfg_param.max_fcfi =
4812 			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
4813 		phba->sli4_hba.max_cfg_param.fcfi_base =
4814 			bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
4815 		phba->sli4_hba.max_cfg_param.max_eq =
4816 			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
4817 		phba->sli4_hba.max_cfg_param.max_rq =
4818 			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
4819 		phba->sli4_hba.max_cfg_param.max_wq =
4820 			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
4821 		phba->sli4_hba.max_cfg_param.max_cq =
4822 			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
4823 		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
4824 		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
4825 		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4826 		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4827 		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4828 		phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
4829 		phba->max_vports = phba->max_vpi;
4830 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4831 				"2003 cfg params XRI(B:%d M:%d), "
4832 				"VPI(B:%d M:%d) "
4833 				"VFI(B:%d M:%d) "
4834 				"RPI(B:%d M:%d) "
4835 				"FCFI(B:%d M:%d)\n",
4836 				phba->sli4_hba.max_cfg_param.xri_base,
4837 				phba->sli4_hba.max_cfg_param.max_xri,
4838 				phba->sli4_hba.max_cfg_param.vpi_base,
4839 				phba->sli4_hba.max_cfg_param.max_vpi,
4840 				phba->sli4_hba.max_cfg_param.vfi_base,
4841 				phba->sli4_hba.max_cfg_param.max_vfi,
4842 				phba->sli4_hba.max_cfg_param.rpi_base,
4843 				phba->sli4_hba.max_cfg_param.max_rpi,
4844 				phba->sli4_hba.max_cfg_param.fcfi_base,
4845 				phba->sli4_hba.max_cfg_param.max_fcfi);
4846 	}
4847 	mempool_free(pmb, phba->mbox_mem_pool);
4848 
4849 	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
4850 	if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
4851 		phba->cfg_hba_queue_depth =
4852 				phba->sli4_hba.max_cfg_param.max_xri;
4853 	return rc;
4854 }
4855 
4856 /**
4857  * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
4858  * @phba: pointer to lpfc hba data structure.
4859  *
4860  * This routine is invoked to setup the host-side endian order to the
4861  * HBA consistent with the SLI-4 interface spec.
4862  *
4863  * Return codes
4864  * 	0 - sucessful
4865  * 	ENOMEM - No availble memory
4866  *      EIO - The mailbox failed to complete successfully.
4867  **/
4868 static int
4869 lpfc_setup_endian_order(struct lpfc_hba *phba)
4870 {
4871 	LPFC_MBOXQ_t *mboxq;
4872 	uint32_t rc = 0;
4873 	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
4874 				      HOST_ENDIAN_HIGH_WORD1};
4875 
4876 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4877 	if (!mboxq) {
4878 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4879 				"0492 Unable to allocate memory for issuing "
4880 				"SLI_CONFIG_SPECIAL mailbox command\n");
4881 		return -ENOMEM;
4882 	}
4883 
4884 	/*
4885 	 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
4886 	 * words to contain special data values and no other data.
4887 	 */
4888 	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
4889 	memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
4890 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4891 	if (rc != MBX_SUCCESS) {
4892 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4893 				"0493 SLI_CONFIG_SPECIAL mailbox failed with "
4894 				"status x%x\n",
4895 				rc);
4896 		rc = -EIO;
4897 	}
4898 
4899 	mempool_free(mboxq, phba->mbox_mem_pool);
4900 	return rc;
4901 }
4902 
4903 /**
4904  * lpfc_sli4_queue_create - Create all the SLI4 queues
4905  * @phba: pointer to lpfc hba data structure.
4906  *
4907  * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
4908  * operation. For each SLI4 queue type, the parameters such as queue entry
4909  * count (queue depth) shall be taken from the module parameter. For now,
4910  * we just use some constant number as place holder.
4911  *
4912  * Return codes
4913  *      0 - sucessful
4914  *      ENOMEM - No availble memory
4915  *      EIO - The mailbox failed to complete successfully.
4916  **/
4917 static int
4918 lpfc_sli4_queue_create(struct lpfc_hba *phba)
4919 {
4920 	struct lpfc_queue *qdesc;
4921 	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
4922 	int cfg_fcp_wq_count;
4923 	int cfg_fcp_eq_count;
4924 
4925 	/*
4926 	 * Sanity check for confiugred queue parameters against the run-time
4927 	 * device parameters
4928 	 */
4929 
4930 	/* Sanity check on FCP fast-path WQ parameters */
4931 	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
4932 	if (cfg_fcp_wq_count >
4933 	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
4934 		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
4935 				   LPFC_SP_WQN_DEF;
4936 		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
4937 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4938 					"2581 Not enough WQs (%d) from "
4939 					"the pci function for supporting "
4940 					"FCP WQs (%d)\n",
4941 					phba->sli4_hba.max_cfg_param.max_wq,
4942 					phba->cfg_fcp_wq_count);
4943 			goto out_error;
4944 		}
4945 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4946 				"2582 Not enough WQs (%d) from the pci "
4947 				"function for supporting the requested "
4948 				"FCP WQs (%d), the actual FCP WQs can "
4949 				"be supported: %d\n",
4950 				phba->sli4_hba.max_cfg_param.max_wq,
4951 				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
4952 	}
4953 	/* The actual number of FCP work queues adopted */
4954 	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
4955 
4956 	/* Sanity check on FCP fast-path EQ parameters */
4957 	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
4958 	if (cfg_fcp_eq_count >
4959 	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
4960 		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
4961 				   LPFC_SP_EQN_DEF;
4962 		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
4963 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4964 					"2574 Not enough EQs (%d) from the "
4965 					"pci function for supporting FCP "
4966 					"EQs (%d)\n",
4967 					phba->sli4_hba.max_cfg_param.max_eq,
4968 					phba->cfg_fcp_eq_count);
4969 			goto out_error;
4970 		}
4971 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4972 				"2575 Not enough EQs (%d) from the pci "
4973 				"function for supporting the requested "
4974 				"FCP EQs (%d), the actual FCP EQs can "
4975 				"be supported: %d\n",
4976 				phba->sli4_hba.max_cfg_param.max_eq,
4977 				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
4978 	}
4979 	/* It does not make sense to have more EQs than WQs */
4980 	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4981 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4982 				"2593 The number of FCP EQs (%d) is more "
4983 				"than the number of FCP WQs (%d), take "
4984 				"the number of FCP EQs same as than of "
4985 				"WQs (%d)\n", cfg_fcp_eq_count,
4986 				phba->cfg_fcp_wq_count,
4987 				phba->cfg_fcp_wq_count);
4988 		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
4989 	}
4990 	/* The actual number of FCP event queues adopted */
4991 	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
4992 	/* The overall number of event queues used */
4993 	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
4994 
4995 	/*
4996 	 * Create Event Queues (EQs)
4997 	 */
4998 
4999 	/* Get EQ depth from module parameter, fake the default for now */
5000 	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
5001 	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
5002 
5003 	/* Create slow path event queue */
5004 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5005 				      phba->sli4_hba.eq_ecount);
5006 	if (!qdesc) {
5007 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5008 				"0496 Failed allocate slow-path EQ\n");
5009 		goto out_error;
5010 	}
5011 	phba->sli4_hba.sp_eq = qdesc;
5012 
5013 	/* Create fast-path FCP Event Queue(s) */
5014 	phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
5015 			       phba->cfg_fcp_eq_count), GFP_KERNEL);
5016 	if (!phba->sli4_hba.fp_eq) {
5017 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5018 				"2576 Failed allocate memory for fast-path "
5019 				"EQ record array\n");
5020 		goto out_free_sp_eq;
5021 	}
5022 	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5023 		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5024 					      phba->sli4_hba.eq_ecount);
5025 		if (!qdesc) {
5026 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5027 					"0497 Failed allocate fast-path EQ\n");
5028 			goto out_free_fp_eq;
5029 		}
5030 		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5031 	}
5032 
5033 	/*
5034 	 * Create Complete Queues (CQs)
5035 	 */
5036 
5037 	/* Get CQ depth from module parameter, fake the default for now */
5038 	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5039 	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5040 
5041 	/* Create slow-path Mailbox Command Complete Queue */
5042 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5043 				      phba->sli4_hba.cq_ecount);
5044 	if (!qdesc) {
5045 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5046 				"0500 Failed allocate slow-path mailbox CQ\n");
5047 		goto out_free_fp_eq;
5048 	}
5049 	phba->sli4_hba.mbx_cq = qdesc;
5050 
5051 	/* Create slow-path ELS Complete Queue */
5052 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5053 				      phba->sli4_hba.cq_ecount);
5054 	if (!qdesc) {
5055 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5056 				"0501 Failed allocate slow-path ELS CQ\n");
5057 		goto out_free_mbx_cq;
5058 	}
5059 	phba->sli4_hba.els_cq = qdesc;
5060 
5061 	/* Create slow-path Unsolicited Receive Complete Queue */
5062 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5063 				      phba->sli4_hba.cq_ecount);
5064 	if (!qdesc) {
5065 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5066 				"0502 Failed allocate slow-path USOL RX CQ\n");
5067 		goto out_free_els_cq;
5068 	}
5069 	phba->sli4_hba.rxq_cq = qdesc;
5070 
5071 	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5072 	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5073 				phba->cfg_fcp_eq_count), GFP_KERNEL);
5074 	if (!phba->sli4_hba.fcp_cq) {
5075 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5076 				"2577 Failed allocate memory for fast-path "
5077 				"CQ record array\n");
5078 		goto out_free_rxq_cq;
5079 	}
5080 	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5081 		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5082 					      phba->sli4_hba.cq_ecount);
5083 		if (!qdesc) {
5084 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5085 					"0499 Failed allocate fast-path FCP "
5086 					"CQ (%d)\n", fcp_cqidx);
5087 			goto out_free_fcp_cq;
5088 		}
5089 		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5090 	}
5091 
5092 	/* Create Mailbox Command Queue */
5093 	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5094 	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5095 
5096 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5097 				      phba->sli4_hba.mq_ecount);
5098 	if (!qdesc) {
5099 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5100 				"0505 Failed allocate slow-path MQ\n");
5101 		goto out_free_fcp_cq;
5102 	}
5103 	phba->sli4_hba.mbx_wq = qdesc;
5104 
5105 	/*
5106 	 * Create all the Work Queues (WQs)
5107 	 */
5108 	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5109 	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5110 
5111 	/* Create slow-path ELS Work Queue */
5112 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5113 				      phba->sli4_hba.wq_ecount);
5114 	if (!qdesc) {
5115 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5116 				"0504 Failed allocate slow-path ELS WQ\n");
5117 		goto out_free_mbx_wq;
5118 	}
5119 	phba->sli4_hba.els_wq = qdesc;
5120 
5121 	/* Create fast-path FCP Work Queue(s) */
5122 	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5123 				phba->cfg_fcp_wq_count), GFP_KERNEL);
5124 	if (!phba->sli4_hba.fcp_wq) {
5125 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5126 				"2578 Failed allocate memory for fast-path "
5127 				"WQ record array\n");
5128 		goto out_free_els_wq;
5129 	}
5130 	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5131 		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5132 					      phba->sli4_hba.wq_ecount);
5133 		if (!qdesc) {
5134 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5135 					"0503 Failed allocate fast-path FCP "
5136 					"WQ (%d)\n", fcp_wqidx);
5137 			goto out_free_fcp_wq;
5138 		}
5139 		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5140 	}
5141 
5142 	/*
5143 	 * Create Receive Queue (RQ)
5144 	 */
5145 	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5146 	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5147 
5148 	/* Create Receive Queue for header */
5149 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5150 				      phba->sli4_hba.rq_ecount);
5151 	if (!qdesc) {
5152 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5153 				"0506 Failed allocate receive HRQ\n");
5154 		goto out_free_fcp_wq;
5155 	}
5156 	phba->sli4_hba.hdr_rq = qdesc;
5157 
5158 	/* Create Receive Queue for data */
5159 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5160 				      phba->sli4_hba.rq_ecount);
5161 	if (!qdesc) {
5162 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5163 				"0507 Failed allocate receive DRQ\n");
5164 		goto out_free_hdr_rq;
5165 	}
5166 	phba->sli4_hba.dat_rq = qdesc;
5167 
5168 	return 0;
5169 
5170 out_free_hdr_rq:
5171 	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5172 	phba->sli4_hba.hdr_rq = NULL;
5173 out_free_fcp_wq:
5174 	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5175 		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5176 		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5177 	}
5178 	kfree(phba->sli4_hba.fcp_wq);
5179 out_free_els_wq:
5180 	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5181 	phba->sli4_hba.els_wq = NULL;
5182 out_free_mbx_wq:
5183 	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5184 	phba->sli4_hba.mbx_wq = NULL;
5185 out_free_fcp_cq:
5186 	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5187 		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5188 		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5189 	}
5190 	kfree(phba->sli4_hba.fcp_cq);
5191 out_free_rxq_cq:
5192 	lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5193 	phba->sli4_hba.rxq_cq = NULL;
5194 out_free_els_cq:
5195 	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5196 	phba->sli4_hba.els_cq = NULL;
5197 out_free_mbx_cq:
5198 	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5199 	phba->sli4_hba.mbx_cq = NULL;
5200 out_free_fp_eq:
5201 	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5202 		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5203 		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5204 	}
5205 	kfree(phba->sli4_hba.fp_eq);
5206 out_free_sp_eq:
5207 	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5208 	phba->sli4_hba.sp_eq = NULL;
5209 out_error:
5210 	return -ENOMEM;
5211 }
5212 
5213 /**
5214  * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5215  * @phba: pointer to lpfc hba data structure.
5216  *
5217  * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5218  * operation.
5219  *
5220  * Return codes
5221  *      0 - sucessful
5222  *      ENOMEM - No availble memory
5223  *      EIO - The mailbox failed to complete successfully.
5224  **/
5225 static void
5226 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5227 {
5228 	int fcp_qidx;
5229 
5230 	/* Release mailbox command work queue */
5231 	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5232 	phba->sli4_hba.mbx_wq = NULL;
5233 
5234 	/* Release ELS work queue */
5235 	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5236 	phba->sli4_hba.els_wq = NULL;
5237 
5238 	/* Release FCP work queue */
5239 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5240 		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5241 	kfree(phba->sli4_hba.fcp_wq);
5242 	phba->sli4_hba.fcp_wq = NULL;
5243 
5244 	/* Release unsolicited receive queue */
5245 	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5246 	phba->sli4_hba.hdr_rq = NULL;
5247 	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5248 	phba->sli4_hba.dat_rq = NULL;
5249 
5250 	/* Release unsolicited receive complete queue */
5251 	lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5252 	phba->sli4_hba.rxq_cq = NULL;
5253 
5254 	/* Release ELS complete queue */
5255 	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5256 	phba->sli4_hba.els_cq = NULL;
5257 
5258 	/* Release mailbox command complete queue */
5259 	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5260 	phba->sli4_hba.mbx_cq = NULL;
5261 
5262 	/* Release FCP response complete queue */
5263 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5264 		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5265 	kfree(phba->sli4_hba.fcp_cq);
5266 	phba->sli4_hba.fcp_cq = NULL;
5267 
5268 	/* Release fast-path event queue */
5269 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5270 		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5271 	kfree(phba->sli4_hba.fp_eq);
5272 	phba->sli4_hba.fp_eq = NULL;
5273 
5274 	/* Release slow-path event queue */
5275 	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5276 	phba->sli4_hba.sp_eq = NULL;
5277 
5278 	return;
5279 }
5280 
5281 /**
5282  * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5283  * @phba: pointer to lpfc hba data structure.
5284  *
5285  * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5286  * operation.
5287  *
5288  * Return codes
5289  *      0 - sucessful
5290  *      ENOMEM - No availble memory
5291  *      EIO - The mailbox failed to complete successfully.
5292  **/
5293 int
5294 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5295 {
5296 	int rc = -ENOMEM;
5297 	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5298 	int fcp_cq_index = 0;
5299 
5300 	/*
5301 	 * Set up Event Queues (EQs)
5302 	 */
5303 
5304 	/* Set up slow-path event queue */
5305 	if (!phba->sli4_hba.sp_eq) {
5306 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5307 				"0520 Slow-path EQ not allocated\n");
5308 		goto out_error;
5309 	}
5310 	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5311 			    LPFC_SP_DEF_IMAX);
5312 	if (rc) {
5313 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5314 				"0521 Failed setup of slow-path EQ: "
5315 				"rc = 0x%x\n", rc);
5316 		goto out_error;
5317 	}
5318 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5319 			"2583 Slow-path EQ setup: queue-id=%d\n",
5320 			phba->sli4_hba.sp_eq->queue_id);
5321 
5322 	/* Set up fast-path event queue */
5323 	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5324 		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5325 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5326 					"0522 Fast-path EQ (%d) not "
5327 					"allocated\n", fcp_eqidx);
5328 			goto out_destroy_fp_eq;
5329 		}
5330 		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5331 				    phba->cfg_fcp_imax);
5332 		if (rc) {
5333 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5334 					"0523 Failed setup of fast-path EQ "
5335 					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
5336 			goto out_destroy_fp_eq;
5337 		}
5338 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5339 				"2584 Fast-path EQ setup: "
5340 				"queue[%d]-id=%d\n", fcp_eqidx,
5341 				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5342 	}
5343 
5344 	/*
5345 	 * Set up Complete Queues (CQs)
5346 	 */
5347 
5348 	/* Set up slow-path MBOX Complete Queue as the first CQ */
5349 	if (!phba->sli4_hba.mbx_cq) {
5350 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5351 				"0528 Mailbox CQ not allocated\n");
5352 		goto out_destroy_fp_eq;
5353 	}
5354 	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5355 			    LPFC_MCQ, LPFC_MBOX);
5356 	if (rc) {
5357 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5358 				"0529 Failed setup of slow-path mailbox CQ: "
5359 				"rc = 0x%x\n", rc);
5360 		goto out_destroy_fp_eq;
5361 	}
5362 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5363 			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5364 			phba->sli4_hba.mbx_cq->queue_id,
5365 			phba->sli4_hba.sp_eq->queue_id);
5366 
5367 	/* Set up slow-path ELS Complete Queue */
5368 	if (!phba->sli4_hba.els_cq) {
5369 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5370 				"0530 ELS CQ not allocated\n");
5371 		goto out_destroy_mbx_cq;
5372 	}
5373 	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
5374 			    LPFC_WCQ, LPFC_ELS);
5375 	if (rc) {
5376 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5377 				"0531 Failed setup of slow-path ELS CQ: "
5378 				"rc = 0x%x\n", rc);
5379 		goto out_destroy_mbx_cq;
5380 	}
5381 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5382 			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
5383 			phba->sli4_hba.els_cq->queue_id,
5384 			phba->sli4_hba.sp_eq->queue_id);
5385 
5386 	/* Set up slow-path Unsolicited Receive Complete Queue */
5387 	if (!phba->sli4_hba.rxq_cq) {
5388 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5389 				"0532 USOL RX CQ not allocated\n");
5390 		goto out_destroy_els_cq;
5391 	}
5392 	rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5393 			    LPFC_RCQ, LPFC_USOL);
5394 	if (rc) {
5395 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5396 				"0533 Failed setup of slow-path USOL RX CQ: "
5397 				"rc = 0x%x\n", rc);
5398 		goto out_destroy_els_cq;
5399 	}
5400 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5401 			"2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5402 			phba->sli4_hba.rxq_cq->queue_id,
5403 			phba->sli4_hba.sp_eq->queue_id);
5404 
5405 	/* Set up fast-path FCP Response Complete Queue */
5406 	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5407 		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
5408 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5409 					"0526 Fast-path FCP CQ (%d) not "
5410 					"allocated\n", fcp_cqidx);
5411 			goto out_destroy_fcp_cq;
5412 		}
5413 		rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
5414 				    phba->sli4_hba.fp_eq[fcp_cqidx],
5415 				    LPFC_WCQ, LPFC_FCP);
5416 		if (rc) {
5417 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5418 					"0527 Failed setup of fast-path FCP "
5419 					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
5420 			goto out_destroy_fcp_cq;
5421 		}
5422 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5423 				"2588 FCP CQ setup: cq[%d]-id=%d, "
5424 				"parent eq[%d]-id=%d\n",
5425 				fcp_cqidx,
5426 				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
5427 				fcp_cqidx,
5428 				phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
5429 	}
5430 
5431 	/*
5432 	 * Set up all the Work Queues (WQs)
5433 	 */
5434 
5435 	/* Set up Mailbox Command Queue */
5436 	if (!phba->sli4_hba.mbx_wq) {
5437 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5438 				"0538 Slow-path MQ not allocated\n");
5439 		goto out_destroy_fcp_cq;
5440 	}
5441 	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
5442 			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
5443 	if (rc) {
5444 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5445 				"0539 Failed setup of slow-path MQ: "
5446 				"rc = 0x%x\n", rc);
5447 		goto out_destroy_fcp_cq;
5448 	}
5449 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5450 			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
5451 			phba->sli4_hba.mbx_wq->queue_id,
5452 			phba->sli4_hba.mbx_cq->queue_id);
5453 
5454 	/* Set up slow-path ELS Work Queue */
5455 	if (!phba->sli4_hba.els_wq) {
5456 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5457 				"0536 Slow-path ELS WQ not allocated\n");
5458 		goto out_destroy_mbx_wq;
5459 	}
5460 	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
5461 			    phba->sli4_hba.els_cq, LPFC_ELS);
5462 	if (rc) {
5463 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5464 				"0537 Failed setup of slow-path ELS WQ: "
5465 				"rc = 0x%x\n", rc);
5466 		goto out_destroy_mbx_wq;
5467 	}
5468 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5469 			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
5470 			phba->sli4_hba.els_wq->queue_id,
5471 			phba->sli4_hba.els_cq->queue_id);
5472 
5473 	/* Set up fast-path FCP Work Queue */
5474 	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5475 		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
5476 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5477 					"0534 Fast-path FCP WQ (%d) not "
5478 					"allocated\n", fcp_wqidx);
5479 			goto out_destroy_fcp_wq;
5480 		}
5481 		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
5482 				    phba->sli4_hba.fcp_cq[fcp_cq_index],
5483 				    LPFC_FCP);
5484 		if (rc) {
5485 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5486 					"0535 Failed setup of fast-path FCP "
5487 					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
5488 			goto out_destroy_fcp_wq;
5489 		}
5490 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5491 				"2591 FCP WQ setup: wq[%d]-id=%d, "
5492 				"parent cq[%d]-id=%d\n",
5493 				fcp_wqidx,
5494 				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
5495 				fcp_cq_index,
5496 				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
5497 		/* Round robin FCP Work Queue's Completion Queue assignment */
5498 		fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
5499 	}
5500 
5501 	/*
5502 	 * Create Receive Queue (RQ)
5503 	 */
5504 	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
5505 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5506 				"0540 Receive Queue not allocated\n");
5507 		goto out_destroy_fcp_wq;
5508 	}
5509 	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5510 			    phba->sli4_hba.rxq_cq, LPFC_USOL);
5511 	if (rc) {
5512 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5513 				"0541 Failed setup of Receive Queue: "
5514 				"rc = 0x%x\n", rc);
5515 		goto out_destroy_fcp_wq;
5516 	}
5517 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5518 			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
5519 			"parent cq-id=%d\n",
5520 			phba->sli4_hba.hdr_rq->queue_id,
5521 			phba->sli4_hba.dat_rq->queue_id,
5522 			phba->sli4_hba.rxq_cq->queue_id);
5523 	return 0;
5524 
5525 out_destroy_fcp_wq:
5526 	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
5527 		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
5528 	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5529 out_destroy_mbx_wq:
5530 	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5531 out_destroy_fcp_cq:
5532 	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5533 		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5534 	lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5535 out_destroy_els_cq:
5536 	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5537 out_destroy_mbx_cq:
5538 	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5539 out_destroy_fp_eq:
5540 	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
5541 		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
5542 	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5543 out_error:
5544 	return rc;
5545 }
5546 
5547 /**
5548  * lpfc_sli4_queue_unset - Unset all the SLI4 queues
5549  * @phba: pointer to lpfc hba data structure.
5550  *
5551  * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
5552  * operation.
5553  *
5554  * Return codes
5555  *      0 - sucessful
5556  *      ENOMEM - No availble memory
5557  *      EIO - The mailbox failed to complete successfully.
5558  **/
5559 void
5560 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5561 {
5562 	int fcp_qidx;
5563 
5564 	/* Unset mailbox command work queue */
5565 	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5566 	/* Unset ELS work queue */
5567 	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5568 	/* Unset unsolicited receive queue */
5569 	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
5570 	/* Unset FCP work queue */
5571 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5572 		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
5573 	/* Unset mailbox command complete queue */
5574 	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5575 	/* Unset ELS complete queue */
5576 	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5577 	/* Unset unsolicited receive complete queue */
5578 	lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5579 	/* Unset FCP response complete queue */
5580 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5581 		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
5582 	/* Unset fast-path event queue */
5583 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5584 		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
5585 	/* Unset slow-path event queue */
5586 	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5587 }
5588 
5589 /**
5590  * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
5591  * @phba: pointer to lpfc hba data structure.
5592  *
5593  * This routine is invoked to allocate and set up a pool of completion queue
5594  * events. The body of the completion queue event is a completion queue entry
5595  * CQE. For now, this pool is used for the interrupt service routine to queue
5596  * the following HBA completion queue events for the worker thread to process:
5597  *   - Mailbox asynchronous events
5598  *   - Receive queue completion unsolicited events
5599  * Later, this can be used for all the slow-path events.
5600  *
5601  * Return codes
5602  *      0 - sucessful
5603  *      -ENOMEM - No availble memory
5604  **/
5605 static int
5606 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
5607 {
5608 	struct lpfc_cq_event *cq_event;
5609 	int i;
5610 
5611 	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
5612 		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
5613 		if (!cq_event)
5614 			goto out_pool_create_fail;
5615 		list_add_tail(&cq_event->list,
5616 			      &phba->sli4_hba.sp_cqe_event_pool);
5617 	}
5618 	return 0;
5619 
5620 out_pool_create_fail:
5621 	lpfc_sli4_cq_event_pool_destroy(phba);
5622 	return -ENOMEM;
5623 }
5624 
5625 /**
5626  * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
5627  * @phba: pointer to lpfc hba data structure.
5628  *
5629  * This routine is invoked to free the pool of completion queue events at
5630  * driver unload time. Note that, it is the responsibility of the driver
5631  * cleanup routine to free all the outstanding completion-queue events
5632  * allocated from this pool back into the pool before invoking this routine
5633  * to destroy the pool.
5634  **/
5635 static void
5636 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
5637 {
5638 	struct lpfc_cq_event *cq_event, *next_cq_event;
5639 
5640 	list_for_each_entry_safe(cq_event, next_cq_event,
5641 				 &phba->sli4_hba.sp_cqe_event_pool, list) {
5642 		list_del(&cq_event->list);
5643 		kfree(cq_event);
5644 	}
5645 }
5646 
5647 /**
5648  * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5649  * @phba: pointer to lpfc hba data structure.
5650  *
5651  * This routine is the lock free version of the API invoked to allocate a
5652  * completion-queue event from the free pool.
5653  *
5654  * Return: Pointer to the newly allocated completion-queue event if successful
5655  *         NULL otherwise.
5656  **/
5657 struct lpfc_cq_event *
5658 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5659 {
5660 	struct lpfc_cq_event *cq_event = NULL;
5661 
5662 	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
5663 			 struct lpfc_cq_event, list);
5664 	return cq_event;
5665 }
5666 
5667 /**
5668  * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5669  * @phba: pointer to lpfc hba data structure.
5670  *
5671  * This routine is the lock version of the API invoked to allocate a
5672  * completion-queue event from the free pool.
5673  *
5674  * Return: Pointer to the newly allocated completion-queue event if successful
5675  *         NULL otherwise.
5676  **/
5677 struct lpfc_cq_event *
5678 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5679 {
5680 	struct lpfc_cq_event *cq_event;
5681 	unsigned long iflags;
5682 
5683 	spin_lock_irqsave(&phba->hbalock, iflags);
5684 	cq_event = __lpfc_sli4_cq_event_alloc(phba);
5685 	spin_unlock_irqrestore(&phba->hbalock, iflags);
5686 	return cq_event;
5687 }
5688 
5689 /**
5690  * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5691  * @phba: pointer to lpfc hba data structure.
5692  * @cq_event: pointer to the completion queue event to be freed.
5693  *
5694  * This routine is the lock free version of the API invoked to release a
5695  * completion-queue event back into the free pool.
5696  **/
5697 void
5698 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5699 			     struct lpfc_cq_event *cq_event)
5700 {
5701 	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
5702 }
5703 
5704 /**
5705  * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5706  * @phba: pointer to lpfc hba data structure.
5707  * @cq_event: pointer to the completion queue event to be freed.
5708  *
5709  * This routine is the lock version of the API invoked to release a
5710  * completion-queue event back into the free pool.
5711  **/
5712 void
5713 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5714 			   struct lpfc_cq_event *cq_event)
5715 {
5716 	unsigned long iflags;
5717 	spin_lock_irqsave(&phba->hbalock, iflags);
5718 	__lpfc_sli4_cq_event_release(phba, cq_event);
5719 	spin_unlock_irqrestore(&phba->hbalock, iflags);
5720 }
5721 
5722 /**
5723  * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
5724  * @phba: pointer to lpfc hba data structure.
5725  *
5726  * This routine is to free all the pending completion-queue events to the
5727  * back into the free pool for device reset.
5728  **/
5729 static void
5730 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5731 {
5732 	LIST_HEAD(cqelist);
5733 	struct lpfc_cq_event *cqe;
5734 	unsigned long iflags;
5735 
5736 	/* Retrieve all the pending WCQEs from pending WCQE lists */
5737 	spin_lock_irqsave(&phba->hbalock, iflags);
5738 	/* Pending FCP XRI abort events */
5739 	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
5740 			 &cqelist);
5741 	/* Pending ELS XRI abort events */
5742 	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
5743 			 &cqelist);
5744 	/* Pending asynnc events */
5745 	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
5746 			 &cqelist);
5747 	spin_unlock_irqrestore(&phba->hbalock, iflags);
5748 
5749 	while (!list_empty(&cqelist)) {
5750 		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
5751 		lpfc_sli4_cq_event_release(phba, cqe);
5752 	}
5753 }
5754 
5755 /**
5756  * lpfc_pci_function_reset - Reset pci function.
5757  * @phba: pointer to lpfc hba data structure.
5758  *
5759  * This routine is invoked to request a PCI function reset. It will destroys
5760  * all resources assigned to the PCI function which originates this request.
5761  *
5762  * Return codes
5763  *      0 - sucessful
5764  *      ENOMEM - No availble memory
5765  *      EIO - The mailbox failed to complete successfully.
5766  **/
5767 int
5768 lpfc_pci_function_reset(struct lpfc_hba *phba)
5769 {
5770 	LPFC_MBOXQ_t *mboxq;
5771 	uint32_t rc = 0;
5772 	uint32_t shdr_status, shdr_add_status;
5773 	union lpfc_sli4_cfg_shdr *shdr;
5774 
5775 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5776 	if (!mboxq) {
5777 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5778 				"0494 Unable to allocate memory for issuing "
5779 				"SLI_FUNCTION_RESET mailbox command\n");
5780 		return -ENOMEM;
5781 	}
5782 
5783 	/* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
5784 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5785 			 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
5786 			 LPFC_SLI4_MBX_EMBED);
5787 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5788 	shdr = (union lpfc_sli4_cfg_shdr *)
5789 		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5790 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5791 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5792 	if (rc != MBX_TIMEOUT)
5793 		mempool_free(mboxq, phba->mbox_mem_pool);
5794 	if (shdr_status || shdr_add_status || rc) {
5795 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5796 				"0495 SLI_FUNCTION_RESET mailbox failed with "
5797 				"status x%x add_status x%x, mbx status x%x\n",
5798 				shdr_status, shdr_add_status, rc);
5799 		rc = -ENXIO;
5800 	}
5801 	return rc;
5802 }
5803 
5804 /**
5805  * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
5806  * @phba: pointer to lpfc hba data structure.
5807  * @cnt: number of nop mailbox commands to send.
5808  *
5809  * This routine is invoked to send a number @cnt of NOP mailbox command and
5810  * wait for each command to complete.
5811  *
5812  * Return: the number of NOP mailbox command completed.
5813  **/
5814 static int
5815 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
5816 {
5817 	LPFC_MBOXQ_t *mboxq;
5818 	int length, cmdsent;
5819 	uint32_t mbox_tmo;
5820 	uint32_t rc = 0;
5821 	uint32_t shdr_status, shdr_add_status;
5822 	union lpfc_sli4_cfg_shdr *shdr;
5823 
5824 	if (cnt == 0) {
5825 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5826 				"2518 Requested to send 0 NOP mailbox cmd\n");
5827 		return cnt;
5828 	}
5829 
5830 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5831 	if (!mboxq) {
5832 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5833 				"2519 Unable to allocate memory for issuing "
5834 				"NOP mailbox command\n");
5835 		return 0;
5836 	}
5837 
5838 	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
5839 	length = (sizeof(struct lpfc_mbx_nop) -
5840 		  sizeof(struct lpfc_sli4_cfg_mhdr));
5841 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5842 			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
5843 
5844 	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5845 	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
5846 		if (!phba->sli4_hba.intr_enable)
5847 			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5848 		else
5849 			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
5850 		if (rc == MBX_TIMEOUT)
5851 			break;
5852 		/* Check return status */
5853 		shdr = (union lpfc_sli4_cfg_shdr *)
5854 			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5855 		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5856 		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
5857 					 &shdr->response);
5858 		if (shdr_status || shdr_add_status || rc) {
5859 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5860 					"2520 NOP mailbox command failed "
5861 					"status x%x add_status x%x mbx "
5862 					"status x%x\n", shdr_status,
5863 					shdr_add_status, rc);
5864 			break;
5865 		}
5866 	}
5867 
5868 	if (rc != MBX_TIMEOUT)
5869 		mempool_free(mboxq, phba->mbox_mem_pool);
5870 
5871 	return cmdsent;
5872 }
5873 
5874 /**
5875  * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
5876  * @phba: pointer to lpfc hba data structure.
5877  * @fcfi: fcf index.
5878  *
5879  * This routine is invoked to unregister a FCFI from device.
5880  **/
5881 void
5882 lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5883 {
5884 	LPFC_MBOXQ_t *mbox;
5885 	uint32_t mbox_tmo;
5886 	int rc;
5887 	unsigned long flags;
5888 
5889 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5890 
5891 	if (!mbox)
5892 		return;
5893 
5894 	lpfc_unreg_fcfi(mbox, fcfi);
5895 
5896 	if (!phba->sli4_hba.intr_enable)
5897 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5898 	else {
5899 		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5900 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5901 	}
5902 	if (rc != MBX_TIMEOUT)
5903 		mempool_free(mbox, phba->mbox_mem_pool);
5904 	if (rc != MBX_SUCCESS)
5905 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5906 				"2517 Unregister FCFI command failed "
5907 				"status %d, mbxStatus x%x\n", rc,
5908 				bf_get(lpfc_mqe_status, &mbox->u.mqe));
5909 	else {
5910 		spin_lock_irqsave(&phba->hbalock, flags);
5911 		/* Mark the FCFI is no longer registered */
5912 		phba->fcf.fcf_flag &=
5913 			~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
5914 		spin_unlock_irqrestore(&phba->hbalock, flags);
5915 	}
5916 }
5917 
5918 /**
5919  * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
5920  * @phba: pointer to lpfc hba data structure.
5921  *
5922  * This routine is invoked to set up the PCI device memory space for device
5923  * with SLI-4 interface spec.
5924  *
5925  * Return codes
5926  * 	0 - sucessful
5927  * 	other values - error
5928  **/
5929 static int
5930 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5931 {
5932 	struct pci_dev *pdev;
5933 	unsigned long bar0map_len, bar1map_len, bar2map_len;
5934 	int error = -ENODEV;
5935 
5936 	/* Obtain PCI device reference */
5937 	if (!phba->pcidev)
5938 		return error;
5939 	else
5940 		pdev = phba->pcidev;
5941 
5942 	/* Set the device DMA mask size */
5943 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
5944 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5945 			return error;
5946 
5947 	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
5948 	 * number of bytes required by each mapping. They are actually
5949 	 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
5950 	 */
5951 	phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
5952 	bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
5953 
5954 	phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
5955 	bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
5956 
5957 	phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
5958 	bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
5959 
5960 	/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
5961 	phba->sli4_hba.conf_regs_memmap_p =
5962 				ioremap(phba->pci_bar0_map, bar0map_len);
5963 	if (!phba->sli4_hba.conf_regs_memmap_p) {
5964 		dev_printk(KERN_ERR, &pdev->dev,
5965 			   "ioremap failed for SLI4 PCI config registers.\n");
5966 		goto out;
5967 	}
5968 
5969 	/* Map SLI4 HBA Control Register base to a kernel virtual address. */
5970 	phba->sli4_hba.ctrl_regs_memmap_p =
5971 				ioremap(phba->pci_bar1_map, bar1map_len);
5972 	if (!phba->sli4_hba.ctrl_regs_memmap_p) {
5973 		dev_printk(KERN_ERR, &pdev->dev,
5974 			   "ioremap failed for SLI4 HBA control registers.\n");
5975 		goto out_iounmap_conf;
5976 	}
5977 
5978 	/* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
5979 	phba->sli4_hba.drbl_regs_memmap_p =
5980 				ioremap(phba->pci_bar2_map, bar2map_len);
5981 	if (!phba->sli4_hba.drbl_regs_memmap_p) {
5982 		dev_printk(KERN_ERR, &pdev->dev,
5983 			   "ioremap failed for SLI4 HBA doorbell registers.\n");
5984 		goto out_iounmap_ctrl;
5985 	}
5986 
5987 	/* Set up BAR0 PCI config space register memory map */
5988 	lpfc_sli4_bar0_register_memmap(phba);
5989 
5990 	/* Set up BAR1 register memory map */
5991 	lpfc_sli4_bar1_register_memmap(phba);
5992 
5993 	/* Set up BAR2 register memory map */
5994 	error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
5995 	if (error)
5996 		goto out_iounmap_all;
5997 
5998 	return 0;
5999 
6000 out_iounmap_all:
6001 	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6002 out_iounmap_ctrl:
6003 	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6004 out_iounmap_conf:
6005 	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6006 out:
6007 	return error;
6008 }
6009 
6010 /**
6011  * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
6012  * @phba: pointer to lpfc hba data structure.
6013  *
6014  * This routine is invoked to unset the PCI device memory space for device
6015  * with SLI-4 interface spec.
6016  **/
6017 static void
6018 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
6019 {
6020 	struct pci_dev *pdev;
6021 
6022 	/* Obtain PCI device reference */
6023 	if (!phba->pcidev)
6024 		return;
6025 	else
6026 		pdev = phba->pcidev;
6027 
6028 	/* Free coherent DMA memory allocated */
6029 
6030 	/* Unmap I/O memory space */
6031 	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6032 	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6033 	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6034 
6035 	return;
6036 }
6037 
6038 /**
6039  * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6040  * @phba: pointer to lpfc hba data structure.
6041  *
6042  * This routine is invoked to enable the MSI-X interrupt vectors to device
6043  * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6044  * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6045  * invoked, enables either all or nothing, depending on the current
6046  * availability of PCI vector resources. The device driver is responsible
6047  * for calling the individual request_irq() to register each MSI-X vector
6048  * with a interrupt handler, which is done in this function. Note that
6049  * later when device is unloading, the driver should always call free_irq()
6050  * on all MSI-X vectors it has done request_irq() on before calling
6051  * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
6052  * will be left with MSI-X enabled and leaks its vectors.
6053  *
6054  * Return codes
6055  *   0 - sucessful
6056  *   other values - error
6057  **/
6058 static int
6059 lpfc_sli_enable_msix(struct lpfc_hba *phba)
6060 {
6061 	int rc, i;
6062 	LPFC_MBOXQ_t *pmb;
6063 
6064 	/* Set up MSI-X multi-message vectors */
6065 	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6066 		phba->msix_entries[i].entry = i;
6067 
6068 	/* Configure MSI-X capability structure */
6069 	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
6070 				ARRAY_SIZE(phba->msix_entries));
6071 	if (rc) {
6072 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6073 				"0420 PCI enable MSI-X failed (%d)\n", rc);
6074 		goto msi_fail_out;
6075 	}
6076 	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6077 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6078 				"0477 MSI-X entry[%d]: vector=x%x "
6079 				"message=%d\n", i,
6080 				phba->msix_entries[i].vector,
6081 				phba->msix_entries[i].entry);
6082 	/*
6083 	 * Assign MSI-X vectors to interrupt handlers
6084 	 */
6085 
6086 	/* vector-0 is associated to slow-path handler */
6087 	rc = request_irq(phba->msix_entries[0].vector,
6088 			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6089 			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6090 	if (rc) {
6091 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6092 				"0421 MSI-X slow-path request_irq failed "
6093 				"(%d)\n", rc);
6094 		goto msi_fail_out;
6095 	}
6096 
6097 	/* vector-1 is associated to fast-path handler */
6098 	rc = request_irq(phba->msix_entries[1].vector,
6099 			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6100 			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
6101 
6102 	if (rc) {
6103 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6104 				"0429 MSI-X fast-path request_irq failed "
6105 				"(%d)\n", rc);
6106 		goto irq_fail_out;
6107 	}
6108 
6109 	/*
6110 	 * Configure HBA MSI-X attention conditions to messages
6111 	 */
6112 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6113 
6114 	if (!pmb) {
6115 		rc = -ENOMEM;
6116 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6117 				"0474 Unable to allocate memory for issuing "
6118 				"MBOX_CONFIG_MSI command\n");
6119 		goto mem_fail_out;
6120 	}
6121 	rc = lpfc_config_msi(phba, pmb);
6122 	if (rc)
6123 		goto mbx_fail_out;
6124 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6125 	if (rc != MBX_SUCCESS) {
6126 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6127 				"0351 Config MSI mailbox command failed, "
6128 				"mbxCmd x%x, mbxStatus x%x\n",
6129 				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
6130 		goto mbx_fail_out;
6131 	}
6132 
6133 	/* Free memory allocated for mailbox command */
6134 	mempool_free(pmb, phba->mbox_mem_pool);
6135 	return rc;
6136 
6137 mbx_fail_out:
6138 	/* Free memory allocated for mailbox command */
6139 	mempool_free(pmb, phba->mbox_mem_pool);
6140 
6141 mem_fail_out:
6142 	/* free the irq already requested */
6143 	free_irq(phba->msix_entries[1].vector, phba);
6144 
6145 irq_fail_out:
6146 	/* free the irq already requested */
6147 	free_irq(phba->msix_entries[0].vector, phba);
6148 
6149 msi_fail_out:
6150 	/* Unconfigure MSI-X capability structure */
6151 	pci_disable_msix(phba->pcidev);
6152 	return rc;
6153 }
6154 
6155 /**
6156  * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
6157  * @phba: pointer to lpfc hba data structure.
6158  *
6159  * This routine is invoked to release the MSI-X vectors and then disable the
6160  * MSI-X interrupt mode to device with SLI-3 interface spec.
6161  **/
6162 static void
6163 lpfc_sli_disable_msix(struct lpfc_hba *phba)
6164 {
6165 	int i;
6166 
6167 	/* Free up MSI-X multi-message vectors */
6168 	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6169 		free_irq(phba->msix_entries[i].vector, phba);
6170 	/* Disable MSI-X */
6171 	pci_disable_msix(phba->pcidev);
6172 
6173 	return;
6174 }
6175 
6176 /**
6177  * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
6178  * @phba: pointer to lpfc hba data structure.
6179  *
6180  * This routine is invoked to enable the MSI interrupt mode to device with
6181  * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
6182  * enable the MSI vector. The device driver is responsible for calling the
6183  * request_irq() to register MSI vector with a interrupt the handler, which
6184  * is done in this function.
6185  *
6186  * Return codes
6187  * 	0 - sucessful
6188  * 	other values - error
6189  */
6190 static int
6191 lpfc_sli_enable_msi(struct lpfc_hba *phba)
6192 {
6193 	int rc;
6194 
6195 	rc = pci_enable_msi(phba->pcidev);
6196 	if (!rc)
6197 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6198 				"0462 PCI enable MSI mode success.\n");
6199 	else {
6200 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6201 				"0471 PCI enable MSI mode failed (%d)\n", rc);
6202 		return rc;
6203 	}
6204 
6205 	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6206 			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6207 	if (rc) {
6208 		pci_disable_msi(phba->pcidev);
6209 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6210 				"0478 MSI request_irq failed (%d)\n", rc);
6211 	}
6212 	return rc;
6213 }
6214 
6215 /**
6216  * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
6217  * @phba: pointer to lpfc hba data structure.
6218  *
6219  * This routine is invoked to disable the MSI interrupt mode to device with
6220  * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
6221  * done request_irq() on before calling pci_disable_msi(). Failure to do so
6222  * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6223  * its vector.
6224  */
6225 static void
6226 lpfc_sli_disable_msi(struct lpfc_hba *phba)
6227 {
6228 	free_irq(phba->pcidev->irq, phba);
6229 	pci_disable_msi(phba->pcidev);
6230 	return;
6231 }
6232 
6233 /**
6234  * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
6235  * @phba: pointer to lpfc hba data structure.
6236  *
6237  * This routine is invoked to enable device interrupt and associate driver's
6238  * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
6239  * spec. Depends on the interrupt mode configured to the driver, the driver
6240  * will try to fallback from the configured interrupt mode to an interrupt
6241  * mode which is supported by the platform, kernel, and device in the order
6242  * of:
6243  * MSI-X -> MSI -> IRQ.
6244  *
6245  * Return codes
6246  *   0 - sucessful
6247  *   other values - error
6248  **/
6249 static uint32_t
6250 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6251 {
6252 	uint32_t intr_mode = LPFC_INTR_ERROR;
6253 	int retval;
6254 
6255 	if (cfg_mode == 2) {
6256 		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6257 		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6258 		if (!retval) {
6259 			/* Now, try to enable MSI-X interrupt mode */
6260 			retval = lpfc_sli_enable_msix(phba);
6261 			if (!retval) {
6262 				/* Indicate initialization to MSI-X mode */
6263 				phba->intr_type = MSIX;
6264 				intr_mode = 2;
6265 			}
6266 		}
6267 	}
6268 
6269 	/* Fallback to MSI if MSI-X initialization failed */
6270 	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6271 		retval = lpfc_sli_enable_msi(phba);
6272 		if (!retval) {
6273 			/* Indicate initialization to MSI mode */
6274 			phba->intr_type = MSI;
6275 			intr_mode = 1;
6276 		}
6277 	}
6278 
6279 	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6280 	if (phba->intr_type == NONE) {
6281 		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6282 				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6283 		if (!retval) {
6284 			/* Indicate initialization to INTx mode */
6285 			phba->intr_type = INTx;
6286 			intr_mode = 0;
6287 		}
6288 	}
6289 	return intr_mode;
6290 }
6291 
6292 /**
6293  * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6294  * @phba: pointer to lpfc hba data structure.
6295  *
6296  * This routine is invoked to disable device interrupt and disassociate the
6297  * driver's interrupt handler(s) from interrupt vector(s) to device with
6298  * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6299  * release the interrupt vector(s) for the message signaled interrupt.
6300  **/
6301 static void
6302 lpfc_sli_disable_intr(struct lpfc_hba *phba)
6303 {
6304 	/* Disable the currently initialized interrupt mode */
6305 	if (phba->intr_type == MSIX)
6306 		lpfc_sli_disable_msix(phba);
6307 	else if (phba->intr_type == MSI)
6308 		lpfc_sli_disable_msi(phba);
6309 	else if (phba->intr_type == INTx)
6310 		free_irq(phba->pcidev->irq, phba);
6311 
6312 	/* Reset interrupt management states */
6313 	phba->intr_type = NONE;
6314 	phba->sli.slistat.sli_intr = 0;
6315 
6316 	return;
6317 }
6318 
6319 /**
6320  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6321  * @phba: pointer to lpfc hba data structure.
6322  *
6323  * This routine is invoked to enable the MSI-X interrupt vectors to device
6324  * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6325  * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6326  * enables either all or nothing, depending on the current availability of
6327  * PCI vector resources. The device driver is responsible for calling the
6328  * individual request_irq() to register each MSI-X vector with a interrupt
6329  * handler, which is done in this function. Note that later when device is
6330  * unloading, the driver should always call free_irq() on all MSI-X vectors
6331  * it has done request_irq() on before calling pci_disable_msix(). Failure
6332  * to do so results in a BUG_ON() and a device will be left with MSI-X
6333  * enabled and leaks its vectors.
6334  *
6335  * Return codes
6336  * 0 - sucessful
6337  * other values - error
6338  **/
6339 static int
6340 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6341 {
6342 	int rc, index;
6343 
6344 	/* Set up MSI-X multi-message vectors */
6345 	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6346 		phba->sli4_hba.msix_entries[index].entry = index;
6347 
6348 	/* Configure MSI-X capability structure */
6349 	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6350 			     phba->sli4_hba.cfg_eqn);
6351 	if (rc) {
6352 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6353 				"0484 PCI enable MSI-X failed (%d)\n", rc);
6354 		goto msi_fail_out;
6355 	}
6356 	/* Log MSI-X vector assignment */
6357 	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6358 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6359 				"0489 MSI-X entry[%d]: vector=x%x "
6360 				"message=%d\n", index,
6361 				phba->sli4_hba.msix_entries[index].vector,
6362 				phba->sli4_hba.msix_entries[index].entry);
6363 	/*
6364 	 * Assign MSI-X vectors to interrupt handlers
6365 	 */
6366 
6367 	/* The first vector must associated to slow-path handler for MQ */
6368 	rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6369 			 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6370 			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6371 	if (rc) {
6372 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6373 				"0485 MSI-X slow-path request_irq failed "
6374 				"(%d)\n", rc);
6375 		goto msi_fail_out;
6376 	}
6377 
6378 	/* The rest of the vector(s) are associated to fast-path handler(s) */
6379 	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6380 		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6381 		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6382 		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6383 				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6384 				 LPFC_FP_DRIVER_HANDLER_NAME,
6385 				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6386 		if (rc) {
6387 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6388 					"0486 MSI-X fast-path (%d) "
6389 					"request_irq failed (%d)\n", index, rc);
6390 			goto cfg_fail_out;
6391 		}
6392 	}
6393 
6394 	return rc;
6395 
6396 cfg_fail_out:
6397 	/* free the irq already requested */
6398 	for (--index; index >= 1; index--)
6399 		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
6400 			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6401 
6402 	/* free the irq already requested */
6403 	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6404 
6405 msi_fail_out:
6406 	/* Unconfigure MSI-X capability structure */
6407 	pci_disable_msix(phba->pcidev);
6408 	return rc;
6409 }
6410 
6411 /**
6412  * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
6413  * @phba: pointer to lpfc hba data structure.
6414  *
6415  * This routine is invoked to release the MSI-X vectors and then disable the
6416  * MSI-X interrupt mode to device with SLI-4 interface spec.
6417  **/
6418 static void
6419 lpfc_sli4_disable_msix(struct lpfc_hba *phba)
6420 {
6421 	int index;
6422 
6423 	/* Free up MSI-X multi-message vectors */
6424 	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6425 
6426 	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
6427 		free_irq(phba->sli4_hba.msix_entries[index].vector,
6428 			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6429 	/* Disable MSI-X */
6430 	pci_disable_msix(phba->pcidev);
6431 
6432 	return;
6433 }
6434 
6435 /**
6436  * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
6437  * @phba: pointer to lpfc hba data structure.
6438  *
6439  * This routine is invoked to enable the MSI interrupt mode to device with
6440  * SLI-4 interface spec. The kernel function pci_enable_msi() is called
6441  * to enable the MSI vector. The device driver is responsible for calling
6442  * the request_irq() to register MSI vector with a interrupt the handler,
6443  * which is done in this function.
6444  *
6445  * Return codes
6446  * 	0 - sucessful
6447  * 	other values - error
6448  **/
6449 static int
6450 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
6451 {
6452 	int rc, index;
6453 
6454 	rc = pci_enable_msi(phba->pcidev);
6455 	if (!rc)
6456 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6457 				"0487 PCI enable MSI mode success.\n");
6458 	else {
6459 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6460 				"0488 PCI enable MSI mode failed (%d)\n", rc);
6461 		return rc;
6462 	}
6463 
6464 	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6465 			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6466 	if (rc) {
6467 		pci_disable_msi(phba->pcidev);
6468 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6469 				"0490 MSI request_irq failed (%d)\n", rc);
6470 	}
6471 
6472 	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
6473 		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6474 		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6475 	}
6476 
6477 	return rc;
6478 }
6479 
6480 /**
6481  * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
6482  * @phba: pointer to lpfc hba data structure.
6483  *
6484  * This routine is invoked to disable the MSI interrupt mode to device with
6485  * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
6486  * done request_irq() on before calling pci_disable_msi(). Failure to do so
6487  * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6488  * its vector.
6489  **/
6490 static void
6491 lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6492 {
6493 	free_irq(phba->pcidev->irq, phba);
6494 	pci_disable_msi(phba->pcidev);
6495 	return;
6496 }
6497 
6498 /**
6499  * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
6500  * @phba: pointer to lpfc hba data structure.
6501  *
6502  * This routine is invoked to enable device interrupt and associate driver's
6503  * interrupt handler(s) to interrupt vector(s) to device with SLI-4
6504  * interface spec. Depends on the interrupt mode configured to the driver,
6505  * the driver will try to fallback from the configured interrupt mode to an
6506  * interrupt mode which is supported by the platform, kernel, and device in
6507  * the order of:
6508  * MSI-X -> MSI -> IRQ.
6509  *
6510  * Return codes
6511  * 	0 - sucessful
6512  * 	other values - error
6513  **/
6514 static uint32_t
6515 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6516 {
6517 	uint32_t intr_mode = LPFC_INTR_ERROR;
6518 	int retval, index;
6519 
6520 	if (cfg_mode == 2) {
6521 		/* Preparation before conf_msi mbox cmd */
6522 		retval = 0;
6523 		if (!retval) {
6524 			/* Now, try to enable MSI-X interrupt mode */
6525 			retval = lpfc_sli4_enable_msix(phba);
6526 			if (!retval) {
6527 				/* Indicate initialization to MSI-X mode */
6528 				phba->intr_type = MSIX;
6529 				intr_mode = 2;
6530 			}
6531 		}
6532 	}
6533 
6534 	/* Fallback to MSI if MSI-X initialization failed */
6535 	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6536 		retval = lpfc_sli4_enable_msi(phba);
6537 		if (!retval) {
6538 			/* Indicate initialization to MSI mode */
6539 			phba->intr_type = MSI;
6540 			intr_mode = 1;
6541 		}
6542 	}
6543 
6544 	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6545 	if (phba->intr_type == NONE) {
6546 		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6547 				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6548 		if (!retval) {
6549 			/* Indicate initialization to INTx mode */
6550 			phba->intr_type = INTx;
6551 			intr_mode = 0;
6552 			for (index = 0; index < phba->cfg_fcp_eq_count;
6553 			     index++) {
6554 				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6555 				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6556 			}
6557 		}
6558 	}
6559 	return intr_mode;
6560 }
6561 
6562 /**
6563  * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
6564  * @phba: pointer to lpfc hba data structure.
6565  *
6566  * This routine is invoked to disable device interrupt and disassociate
6567  * the driver's interrupt handler(s) from interrupt vector(s) to device
6568  * with SLI-4 interface spec. Depending on the interrupt mode, the driver
6569  * will release the interrupt vector(s) for the message signaled interrupt.
6570  **/
6571 static void
6572 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
6573 {
6574 	/* Disable the currently initialized interrupt mode */
6575 	if (phba->intr_type == MSIX)
6576 		lpfc_sli4_disable_msix(phba);
6577 	else if (phba->intr_type == MSI)
6578 		lpfc_sli4_disable_msi(phba);
6579 	else if (phba->intr_type == INTx)
6580 		free_irq(phba->pcidev->irq, phba);
6581 
6582 	/* Reset interrupt management states */
6583 	phba->intr_type = NONE;
6584 	phba->sli.slistat.sli_intr = 0;
6585 
6586 	return;
6587 }
6588 
6589 /**
6590  * lpfc_unset_hba - Unset SLI3 hba device initialization
6591  * @phba: pointer to lpfc hba data structure.
6592  *
6593  * This routine is invoked to unset the HBA device initialization steps to
6594  * a device with SLI-3 interface spec.
6595  **/
6596 static void
6597 lpfc_unset_hba(struct lpfc_hba *phba)
6598 {
6599 	struct lpfc_vport *vport = phba->pport;
6600 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
6601 
6602 	spin_lock_irq(shost->host_lock);
6603 	vport->load_flag |= FC_UNLOADING;
6604 	spin_unlock_irq(shost->host_lock);
6605 
6606 	lpfc_stop_hba_timers(phba);
6607 
6608 	phba->pport->work_port_events = 0;
6609 
6610 	lpfc_sli_hba_down(phba);
6611 
6612 	lpfc_sli_brdrestart(phba);
6613 
6614 	lpfc_sli_disable_intr(phba);
6615 
6616 	return;
6617 }
6618 
6619 /**
6620  * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
6621  * @phba: pointer to lpfc hba data structure.
6622  *
6623  * This routine is invoked to unset the HBA device initialization steps to
6624  * a device with SLI-4 interface spec.
6625  **/
6626 static void
6627 lpfc_sli4_unset_hba(struct lpfc_hba *phba)
6628 {
6629 	struct lpfc_vport *vport = phba->pport;
6630 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
6631 
6632 	spin_lock_irq(shost->host_lock);
6633 	vport->load_flag |= FC_UNLOADING;
6634 	spin_unlock_irq(shost->host_lock);
6635 
6636 	phba->pport->work_port_events = 0;
6637 
6638 	lpfc_sli4_hba_down(phba);
6639 
6640 	lpfc_sli4_disable_intr(phba);
6641 
6642 	return;
6643 }
6644 
6645 /**
6646  * lpfc_sli4_hba_unset - Unset the fcoe hba
6647  * @phba: Pointer to HBA context object.
6648  *
6649  * This function is called in the SLI4 code path to reset the HBA's FCoE
6650  * function. The caller is not required to hold any lock. This routine
6651  * issues PCI function reset mailbox command to reset the FCoE function.
6652  * At the end of the function, it calls lpfc_hba_down_post function to
6653  * free any pending commands.
6654  **/
6655 static void
6656 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6657 {
6658 	int wait_cnt = 0;
6659 	LPFC_MBOXQ_t *mboxq;
6660 
6661 	lpfc_stop_hba_timers(phba);
6662 	phba->sli4_hba.intr_enable = 0;
6663 
6664 	/*
6665 	 * Gracefully wait out the potential current outstanding asynchronous
6666 	 * mailbox command.
6667 	 */
6668 
6669 	/* First, block any pending async mailbox command from posted */
6670 	spin_lock_irq(&phba->hbalock);
6671 	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
6672 	spin_unlock_irq(&phba->hbalock);
6673 	/* Now, trying to wait it out if we can */
6674 	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6675 		msleep(10);
6676 		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
6677 			break;
6678 	}
6679 	/* Forcefully release the outstanding mailbox command if timed out */
6680 	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6681 		spin_lock_irq(&phba->hbalock);
6682 		mboxq = phba->sli.mbox_active;
6683 		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
6684 		__lpfc_mbox_cmpl_put(phba, mboxq);
6685 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6686 		phba->sli.mbox_active = NULL;
6687 		spin_unlock_irq(&phba->hbalock);
6688 	}
6689 
6690 	/* Tear down the queues in the HBA */
6691 	lpfc_sli4_queue_unset(phba);
6692 
6693 	/* Disable PCI subsystem interrupt */
6694 	lpfc_sli4_disable_intr(phba);
6695 
6696 	/* Stop kthread signal shall trigger work_done one more time */
6697 	kthread_stop(phba->worker_thread);
6698 
6699 	/* Stop the SLI4 device port */
6700 	phba->pport->work_port_events = 0;
6701 }
6702 
6703 /**
6704  * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
6705  * @pdev: pointer to PCI device
6706  * @pid: pointer to PCI device identifier
6707  *
6708  * This routine is to be called to attach a device with SLI-3 interface spec
6709  * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6710  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
6711  * information of the device and driver to see if the driver state that it can
6712  * support this kind of device. If the match is successful, the driver core
6713  * invokes this routine. If this routine determines it can claim the HBA, it
6714  * does all the initialization that it needs to do to handle the HBA properly.
6715  *
6716  * Return code
6717  * 	0 - driver can claim the device
6718  * 	negative value - driver can not claim the device
6719  **/
6720 static int __devinit
6721 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6722 {
6723 	struct lpfc_hba   *phba;
6724 	struct lpfc_vport *vport = NULL;
6725 	int error;
6726 	uint32_t cfg_mode, intr_mode;
6727 
6728 	/* Allocate memory for HBA structure */
6729 	phba = lpfc_hba_alloc(pdev);
6730 	if (!phba)
6731 		return -ENOMEM;
6732 
6733 	/* Perform generic PCI device enabling operation */
6734 	error = lpfc_enable_pci_dev(phba);
6735 	if (error) {
6736 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6737 				"1401 Failed to enable pci device.\n");
6738 		goto out_free_phba;
6739 	}
6740 
6741 	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
6742 	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
6743 	if (error)
6744 		goto out_disable_pci_dev;
6745 
6746 	/* Set up SLI-3 specific device PCI memory space */
6747 	error = lpfc_sli_pci_mem_setup(phba);
6748 	if (error) {
6749 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6750 				"1402 Failed to set up pci memory space.\n");
6751 		goto out_disable_pci_dev;
6752 	}
6753 
6754 	/* Set up phase-1 common device driver resources */
6755 	error = lpfc_setup_driver_resource_phase1(phba);
6756 	if (error) {
6757 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6758 				"1403 Failed to set up driver resource.\n");
6759 		goto out_unset_pci_mem_s3;
6760 	}
6761 
6762 	/* Set up SLI-3 specific device driver resources */
6763 	error = lpfc_sli_driver_resource_setup(phba);
6764 	if (error) {
6765 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6766 				"1404 Failed to set up driver resource.\n");
6767 		goto out_unset_pci_mem_s3;
6768 	}
6769 
6770 	/* Initialize and populate the iocb list per host */
6771 	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
6772 	if (error) {
6773 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6774 				"1405 Failed to initialize iocb list.\n");
6775 		goto out_unset_driver_resource_s3;
6776 	}
6777 
6778 	/* Set up common device driver resources */
6779 	error = lpfc_setup_driver_resource_phase2(phba);
6780 	if (error) {
6781 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6782 				"1406 Failed to set up driver resource.\n");
6783 		goto out_free_iocb_list;
6784 	}
6785 
6786 	/* Create SCSI host to the physical port */
6787 	error = lpfc_create_shost(phba);
6788 	if (error) {
6789 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6790 				"1407 Failed to create scsi host.\n");
6791 		goto out_unset_driver_resource;
6792 	}
6793 
6794 	/* Configure sysfs attributes */
6795 	vport = phba->pport;
6796 	error = lpfc_alloc_sysfs_attr(vport);
6797 	if (error) {
6798 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6799 				"1476 Failed to allocate sysfs attr\n");
6800 		goto out_destroy_shost;
6801 	}
6802 
6803 	/* Now, trying to enable interrupt and bring up the device */
6804 	cfg_mode = phba->cfg_use_msi;
6805 	while (true) {
6806 		/* Put device to a known state before enabling interrupt */
6807 		lpfc_stop_port(phba);
6808 		/* Configure and enable interrupt */
6809 		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
6810 		if (intr_mode == LPFC_INTR_ERROR) {
6811 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6812 					"0431 Failed to enable interrupt.\n");
6813 			error = -ENODEV;
6814 			goto out_free_sysfs_attr;
6815 		}
6816 		/* SLI-3 HBA setup */
6817 		if (lpfc_sli_hba_setup(phba)) {
6818 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6819 					"1477 Failed to set up hba\n");
6820 			error = -ENODEV;
6821 			goto out_remove_device;
6822 		}
6823 
6824 		/* Wait 50ms for the interrupts of previous mailbox commands */
6825 		msleep(50);
6826 		/* Check active interrupts on message signaled interrupts */
6827 		if (intr_mode == 0 ||
6828 		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
6829 			/* Log the current active interrupt mode */
6830 			phba->intr_mode = intr_mode;
6831 			lpfc_log_intr_mode(phba, intr_mode);
6832 			break;
6833 		} else {
6834 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6835 					"0447 Configure interrupt mode (%d) "
6836 					"failed active interrupt test.\n",
6837 					intr_mode);
6838 			/* Disable the current interrupt mode */
6839 			lpfc_sli_disable_intr(phba);
6840 			/* Try next level of interrupt mode */
6841 			cfg_mode = --intr_mode;
6842 		}
6843 	}
6844 
6845 	/* Perform post initialization setup */
6846 	lpfc_post_init_setup(phba);
6847 
6848 	/* Check if there are static vports to be created. */
6849 	lpfc_create_static_vport(phba);
6850 
6851 	return 0;
6852 
6853 out_remove_device:
6854 	lpfc_unset_hba(phba);
6855 out_free_sysfs_attr:
6856 	lpfc_free_sysfs_attr(vport);
6857 out_destroy_shost:
6858 	lpfc_destroy_shost(phba);
6859 out_unset_driver_resource:
6860 	lpfc_unset_driver_resource_phase2(phba);
6861 out_free_iocb_list:
6862 	lpfc_free_iocb_list(phba);
6863 out_unset_driver_resource_s3:
6864 	lpfc_sli_driver_resource_unset(phba);
6865 out_unset_pci_mem_s3:
6866 	lpfc_sli_pci_mem_unset(phba);
6867 out_disable_pci_dev:
6868 	lpfc_disable_pci_dev(phba);
6869 out_free_phba:
6870 	lpfc_hba_free(phba);
6871 	return error;
6872 }
6873 
6874 /**
6875  * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
6876  * @pdev: pointer to PCI device
6877  *
6878  * This routine is to be called to disattach a device with SLI-3 interface
6879  * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6880  * removed from PCI bus, it performs all the necessary cleanup for the HBA
6881  * device to be removed from the PCI subsystem properly.
6882  **/
6883 static void __devexit
6884 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
6885 {
6886 	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
6887 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6888 	struct lpfc_vport **vports;
6889 	struct lpfc_hba   *phba = vport->phba;
6890 	int i;
6891 	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
6892 
6893 	spin_lock_irq(&phba->hbalock);
6894 	vport->load_flag |= FC_UNLOADING;
6895 	spin_unlock_irq(&phba->hbalock);
6896 
6897 	lpfc_free_sysfs_attr(vport);
6898 
6899 	/* Release all the vports against this physical port */
6900 	vports = lpfc_create_vport_work_array(phba);
6901 	if (vports != NULL)
6902 		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
6903 			fc_vport_terminate(vports[i]->fc_vport);
6904 	lpfc_destroy_vport_work_array(phba, vports);
6905 
6906 	/* Remove FC host and then SCSI host with the physical port */
6907 	fc_remove_host(shost);
6908 	scsi_remove_host(shost);
6909 	lpfc_cleanup(vport);
6910 
6911 	/*
6912 	 * Bring down the SLI Layer. This step disable all interrupts,
6913 	 * clears the rings, discards all mailbox commands, and resets
6914 	 * the HBA.
6915 	 */
6916 
6917 	/* HBA interrupt will be diabled after this call */
6918 	lpfc_sli_hba_down(phba);
6919 	/* Stop kthread signal shall trigger work_done one more time */
6920 	kthread_stop(phba->worker_thread);
6921 	/* Final cleanup of txcmplq and reset the HBA */
6922 	lpfc_sli_brdrestart(phba);
6923 
6924 	lpfc_stop_hba_timers(phba);
6925 	spin_lock_irq(&phba->hbalock);
6926 	list_del_init(&vport->listentry);
6927 	spin_unlock_irq(&phba->hbalock);
6928 
6929 	lpfc_debugfs_terminate(vport);
6930 
6931 	/* Disable interrupt */
6932 	lpfc_sli_disable_intr(phba);
6933 
6934 	pci_set_drvdata(pdev, NULL);
6935 	scsi_host_put(shost);
6936 
6937 	/*
6938 	 * Call scsi_free before mem_free since scsi bufs are released to their
6939 	 * corresponding pools here.
6940 	 */
6941 	lpfc_scsi_free(phba);
6942 	lpfc_mem_free_all(phba);
6943 
6944 	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
6945 			  phba->hbqslimp.virt, phba->hbqslimp.phys);
6946 
6947 	/* Free resources associated with SLI2 interface */
6948 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6949 			  phba->slim2p.virt, phba->slim2p.phys);
6950 
6951 	/* unmap adapter SLIM and Control Registers */
6952 	iounmap(phba->ctrl_regs_memmap_p);
6953 	iounmap(phba->slim_memmap_p);
6954 
6955 	lpfc_hba_free(phba);
6956 
6957 	pci_release_selected_regions(pdev, bars);
6958 	pci_disable_device(pdev);
6959 }
6960 
6961 /**
6962  * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
6963  * @pdev: pointer to PCI device
6964  * @msg: power management message
6965  *
6966  * This routine is to be called from the kernel's PCI subsystem to support
6967  * system Power Management (PM) to device with SLI-3 interface spec. When
6968  * PM invokes this method, it quiesces the device by stopping the driver's
6969  * worker thread for the device, turning off device's interrupt and DMA,
6970  * and bring the device offline. Note that as the driver implements the
6971  * minimum PM requirements to a power-aware driver's PM support for the
6972  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
6973  * to the suspend() method call will be treated as SUSPEND and the driver will
6974  * fully reinitialize its device during resume() method call, the driver will
6975  * set device to PCI_D3hot state in PCI config space instead of setting it
6976  * according to the @msg provided by the PM.
6977  *
6978  * Return code
6979  * 	0 - driver suspended the device
6980  * 	Error otherwise
6981  **/
6982 static int
6983 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
6984 {
6985 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
6986 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
6987 
6988 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6989 			"0473 PCI device Power Management suspend.\n");
6990 
6991 	/* Bring down the device */
6992 	lpfc_offline_prep(phba);
6993 	lpfc_offline(phba);
6994 	kthread_stop(phba->worker_thread);
6995 
6996 	/* Disable interrupt from device */
6997 	lpfc_sli_disable_intr(phba);
6998 
6999 	/* Save device state to PCI config space */
7000 	pci_save_state(pdev);
7001 	pci_set_power_state(pdev, PCI_D3hot);
7002 
7003 	return 0;
7004 }
7005 
7006 /**
7007  * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
7008  * @pdev: pointer to PCI device
7009  *
7010  * This routine is to be called from the kernel's PCI subsystem to support
7011  * system Power Management (PM) to device with SLI-3 interface spec. When PM
7012  * invokes this method, it restores the device's PCI config space state and
7013  * fully reinitializes the device and brings it online. Note that as the
7014  * driver implements the minimum PM requirements to a power-aware driver's
7015  * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
7016  * FREEZE) to the suspend() method call will be treated as SUSPEND and the
7017  * driver will fully reinitialize its device during resume() method call,
7018  * the device will be set to PCI_D0 directly in PCI config space before
7019  * restoring the state.
7020  *
7021  * Return code
7022  * 	0 - driver suspended the device
7023  * 	Error otherwise
7024  **/
7025 static int
7026 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7027 {
7028 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7029 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7030 	uint32_t intr_mode;
7031 	int error;
7032 
7033 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7034 			"0452 PCI device Power Management resume.\n");
7035 
7036 	/* Restore device state from PCI config space */
7037 	pci_set_power_state(pdev, PCI_D0);
7038 	pci_restore_state(pdev);
7039 	if (pdev->is_busmaster)
7040 		pci_set_master(pdev);
7041 
7042 	/* Startup the kernel thread for this host adapter. */
7043 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
7044 					"lpfc_worker_%d", phba->brd_no);
7045 	if (IS_ERR(phba->worker_thread)) {
7046 		error = PTR_ERR(phba->worker_thread);
7047 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7048 				"0434 PM resume failed to start worker "
7049 				"thread: error=x%x.\n", error);
7050 		return error;
7051 	}
7052 
7053 	/* Configure and enable interrupt */
7054 	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7055 	if (intr_mode == LPFC_INTR_ERROR) {
7056 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7057 				"0430 PM resume Failed to enable interrupt\n");
7058 		return -EIO;
7059 	} else
7060 		phba->intr_mode = intr_mode;
7061 
7062 	/* Restart HBA and bring it online */
7063 	lpfc_sli_brdrestart(phba);
7064 	lpfc_online(phba);
7065 
7066 	/* Log the current active interrupt mode */
7067 	lpfc_log_intr_mode(phba, phba->intr_mode);
7068 
7069 	return 0;
7070 }
7071 
7072 /**
7073  * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7074  * @pdev: pointer to PCI device.
7075  * @state: the current PCI connection state.
7076  *
7077  * This routine is called from the PCI subsystem for I/O error handling to
7078  * device with SLI-3 interface spec. This function is called by the PCI
7079  * subsystem after a PCI bus error affecting this device has been detected.
7080  * When this function is invoked, it will need to stop all the I/Os and
7081  * interrupt(s) to the device. Once that is done, it will return
7082  * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7083  * as desired.
7084  *
7085  * Return codes
7086  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7087  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7088  **/
7089 static pci_ers_result_t
7090 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7091 {
7092 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7093 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7094 	struct lpfc_sli *psli = &phba->sli;
7095 	struct lpfc_sli_ring  *pring;
7096 
7097 	if (state == pci_channel_io_perm_failure) {
7098 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7099 				"0472 PCI channel I/O permanent failure\n");
7100 		/* Block all SCSI devices' I/Os on the host */
7101 		lpfc_scsi_dev_block(phba);
7102 		/* Clean up all driver's outstanding SCSI I/Os */
7103 		lpfc_sli_flush_fcp_rings(phba);
7104 		return PCI_ERS_RESULT_DISCONNECT;
7105 	}
7106 
7107 	pci_disable_device(pdev);
7108 	/*
7109 	 * There may be I/Os dropped by the firmware.
7110 	 * Error iocb (I/O) on txcmplq and let the SCSI layer
7111 	 * retry it after re-establishing link.
7112 	 */
7113 	pring = &psli->ring[psli->fcp_ring];
7114 	lpfc_sli_abort_iocb_ring(phba, pring);
7115 
7116 	/* Disable interrupt */
7117 	lpfc_sli_disable_intr(phba);
7118 
7119 	/* Request a slot reset. */
7120 	return PCI_ERS_RESULT_NEED_RESET;
7121 }
7122 
7123 /**
7124  * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
7125  * @pdev: pointer to PCI device.
7126  *
7127  * This routine is called from the PCI subsystem for error handling to
7128  * device with SLI-3 interface spec. This is called after PCI bus has been
7129  * reset to restart the PCI card from scratch, as if from a cold-boot.
7130  * During the PCI subsystem error recovery, after driver returns
7131  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7132  * recovery and then call this routine before calling the .resume method
7133  * to recover the device. This function will initialize the HBA device,
7134  * enable the interrupt, but it will just put the HBA to offline state
7135  * without passing any I/O traffic.
7136  *
7137  * Return codes
7138  * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7139  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7140  */
7141 static pci_ers_result_t
7142 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7143 {
7144 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7145 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7146 	struct lpfc_sli *psli = &phba->sli;
7147 	uint32_t intr_mode;
7148 
7149 	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
7150 	if (pci_enable_device_mem(pdev)) {
7151 		printk(KERN_ERR "lpfc: Cannot re-enable "
7152 			"PCI device after reset.\n");
7153 		return PCI_ERS_RESULT_DISCONNECT;
7154 	}
7155 
7156 	pci_restore_state(pdev);
7157 	if (pdev->is_busmaster)
7158 		pci_set_master(pdev);
7159 
7160 	spin_lock_irq(&phba->hbalock);
7161 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7162 	spin_unlock_irq(&phba->hbalock);
7163 
7164 	/* Configure and enable interrupt */
7165 	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7166 	if (intr_mode == LPFC_INTR_ERROR) {
7167 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7168 				"0427 Cannot re-enable interrupt after "
7169 				"slot reset.\n");
7170 		return PCI_ERS_RESULT_DISCONNECT;
7171 	} else
7172 		phba->intr_mode = intr_mode;
7173 
7174 	/* Take device offline; this will perform cleanup */
7175 	lpfc_offline(phba);
7176 	lpfc_sli_brdrestart(phba);
7177 
7178 	/* Log the current active interrupt mode */
7179 	lpfc_log_intr_mode(phba, phba->intr_mode);
7180 
7181 	return PCI_ERS_RESULT_RECOVERED;
7182 }
7183 
7184 /**
7185  * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
7186  * @pdev: pointer to PCI device
7187  *
7188  * This routine is called from the PCI subsystem for error handling to device
7189  * with SLI-3 interface spec. It is called when kernel error recovery tells
7190  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7191  * error recovery. After this call, traffic can start to flow from this device
7192  * again.
7193  */
7194 static void
7195 lpfc_io_resume_s3(struct pci_dev *pdev)
7196 {
7197 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7198 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7199 
7200 	lpfc_online(phba);
7201 }
7202 
7203 /**
7204  * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7205  * @phba: pointer to lpfc hba data structure.
7206  *
7207  * returns the number of ELS/CT IOCBs to reserve
7208  **/
7209 int
7210 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7211 {
7212 	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7213 
7214 	if (phba->sli_rev == LPFC_SLI_REV4) {
7215 		if (max_xri <= 100)
7216 			return 4;
7217 		else if (max_xri <= 256)
7218 			return 8;
7219 		else if (max_xri <= 512)
7220 			return 16;
7221 		else if (max_xri <= 1024)
7222 			return 32;
7223 		else
7224 			return 48;
7225 	} else
7226 		return 0;
7227 }
7228 
7229 /**
7230  * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
7231  * @pdev: pointer to PCI device
7232  * @pid: pointer to PCI device identifier
7233  *
7234  * This routine is called from the kernel's PCI subsystem to device with
7235  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7236  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7237  * information of the device and driver to see if the driver state that it
7238  * can support this kind of device. If the match is successful, the driver
7239  * core invokes this routine. If this routine determines it can claim the HBA,
7240  * it does all the initialization that it needs to do to handle the HBA
7241  * properly.
7242  *
7243  * Return code
7244  * 	0 - driver can claim the device
7245  * 	negative value - driver can not claim the device
7246  **/
7247 static int __devinit
7248 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7249 {
7250 	struct lpfc_hba   *phba;
7251 	struct lpfc_vport *vport = NULL;
7252 	int error;
7253 	uint32_t cfg_mode, intr_mode;
7254 	int mcnt;
7255 
7256 	/* Allocate memory for HBA structure */
7257 	phba = lpfc_hba_alloc(pdev);
7258 	if (!phba)
7259 		return -ENOMEM;
7260 
7261 	/* Perform generic PCI device enabling operation */
7262 	error = lpfc_enable_pci_dev(phba);
7263 	if (error) {
7264 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7265 				"1409 Failed to enable pci device.\n");
7266 		goto out_free_phba;
7267 	}
7268 
7269 	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
7270 	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
7271 	if (error)
7272 		goto out_disable_pci_dev;
7273 
7274 	/* Set up SLI-4 specific device PCI memory space */
7275 	error = lpfc_sli4_pci_mem_setup(phba);
7276 	if (error) {
7277 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7278 				"1410 Failed to set up pci memory space.\n");
7279 		goto out_disable_pci_dev;
7280 	}
7281 
7282 	/* Set up phase-1 common device driver resources */
7283 	error = lpfc_setup_driver_resource_phase1(phba);
7284 	if (error) {
7285 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7286 				"1411 Failed to set up driver resource.\n");
7287 		goto out_unset_pci_mem_s4;
7288 	}
7289 
7290 	/* Set up SLI-4 Specific device driver resources */
7291 	error = lpfc_sli4_driver_resource_setup(phba);
7292 	if (error) {
7293 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7294 				"1412 Failed to set up driver resource.\n");
7295 		goto out_unset_pci_mem_s4;
7296 	}
7297 
7298 	/* Initialize and populate the iocb list per host */
7299 	error = lpfc_init_iocb_list(phba,
7300 			phba->sli4_hba.max_cfg_param.max_xri);
7301 	if (error) {
7302 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7303 				"1413 Failed to initialize iocb list.\n");
7304 		goto out_unset_driver_resource_s4;
7305 	}
7306 
7307 	/* Set up common device driver resources */
7308 	error = lpfc_setup_driver_resource_phase2(phba);
7309 	if (error) {
7310 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7311 				"1414 Failed to set up driver resource.\n");
7312 		goto out_free_iocb_list;
7313 	}
7314 
7315 	/* Create SCSI host to the physical port */
7316 	error = lpfc_create_shost(phba);
7317 	if (error) {
7318 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7319 				"1415 Failed to create scsi host.\n");
7320 		goto out_unset_driver_resource;
7321 	}
7322 
7323 	/* Configure sysfs attributes */
7324 	vport = phba->pport;
7325 	error = lpfc_alloc_sysfs_attr(vport);
7326 	if (error) {
7327 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7328 				"1416 Failed to allocate sysfs attr\n");
7329 		goto out_destroy_shost;
7330 	}
7331 
7332 	/* Now, trying to enable interrupt and bring up the device */
7333 	cfg_mode = phba->cfg_use_msi;
7334 	while (true) {
7335 		/* Put device to a known state before enabling interrupt */
7336 		lpfc_stop_port(phba);
7337 		/* Configure and enable interrupt */
7338 		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
7339 		if (intr_mode == LPFC_INTR_ERROR) {
7340 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7341 					"0426 Failed to enable interrupt.\n");
7342 			error = -ENODEV;
7343 			goto out_free_sysfs_attr;
7344 		}
7345 		/* Set up SLI-4 HBA */
7346 		if (lpfc_sli4_hba_setup(phba)) {
7347 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7348 					"1421 Failed to set up hba\n");
7349 			error = -ENODEV;
7350 			goto out_disable_intr;
7351 		}
7352 
7353 		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
7354 		if (intr_mode != 0)
7355 			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
7356 							    LPFC_ACT_INTR_CNT);
7357 
7358 		/* Check active interrupts received only for MSI/MSI-X */
7359 		if (intr_mode == 0 ||
7360 		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
7361 			/* Log the current active interrupt mode */
7362 			phba->intr_mode = intr_mode;
7363 			lpfc_log_intr_mode(phba, intr_mode);
7364 			break;
7365 		}
7366 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7367 				"0451 Configure interrupt mode (%d) "
7368 				"failed active interrupt test.\n",
7369 				intr_mode);
7370 		/* Unset the preivous SLI-4 HBA setup */
7371 		lpfc_sli4_unset_hba(phba);
7372 		/* Try next level of interrupt mode */
7373 		cfg_mode = --intr_mode;
7374 	}
7375 
7376 	/* Perform post initialization setup */
7377 	lpfc_post_init_setup(phba);
7378 
7379 	/* Check if there are static vports to be created. */
7380 	lpfc_create_static_vport(phba);
7381 
7382 	return 0;
7383 
7384 out_disable_intr:
7385 	lpfc_sli4_disable_intr(phba);
7386 out_free_sysfs_attr:
7387 	lpfc_free_sysfs_attr(vport);
7388 out_destroy_shost:
7389 	lpfc_destroy_shost(phba);
7390 out_unset_driver_resource:
7391 	lpfc_unset_driver_resource_phase2(phba);
7392 out_free_iocb_list:
7393 	lpfc_free_iocb_list(phba);
7394 out_unset_driver_resource_s4:
7395 	lpfc_sli4_driver_resource_unset(phba);
7396 out_unset_pci_mem_s4:
7397 	lpfc_sli4_pci_mem_unset(phba);
7398 out_disable_pci_dev:
7399 	lpfc_disable_pci_dev(phba);
7400 out_free_phba:
7401 	lpfc_hba_free(phba);
7402 	return error;
7403 }
7404 
7405 /**
7406  * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
7407  * @pdev: pointer to PCI device
7408  *
7409  * This routine is called from the kernel's PCI subsystem to device with
7410  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7411  * removed from PCI bus, it performs all the necessary cleanup for the HBA
7412  * device to be removed from the PCI subsystem properly.
7413  **/
7414 static void __devexit
7415 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
7416 {
7417 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7418 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7419 	struct lpfc_vport **vports;
7420 	struct lpfc_hba *phba = vport->phba;
7421 	int i;
7422 
7423 	/* Mark the device unloading flag */
7424 	spin_lock_irq(&phba->hbalock);
7425 	vport->load_flag |= FC_UNLOADING;
7426 	spin_unlock_irq(&phba->hbalock);
7427 
7428 	/* Free the HBA sysfs attributes */
7429 	lpfc_free_sysfs_attr(vport);
7430 
7431 	/* Release all the vports against this physical port */
7432 	vports = lpfc_create_vport_work_array(phba);
7433 	if (vports != NULL)
7434 		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7435 			fc_vport_terminate(vports[i]->fc_vport);
7436 	lpfc_destroy_vport_work_array(phba, vports);
7437 
7438 	/* Remove FC host and then SCSI host with the physical port */
7439 	fc_remove_host(shost);
7440 	scsi_remove_host(shost);
7441 
7442 	/* Perform cleanup on the physical port */
7443 	lpfc_cleanup(vport);
7444 
7445 	/*
7446 	 * Bring down the SLI Layer. This step disables all interrupts,
7447 	 * clears the rings, discards all mailbox commands, and resets
7448 	 * the HBA FCoE function.
7449 	 */
7450 	lpfc_debugfs_terminate(vport);
7451 	lpfc_sli4_hba_unset(phba);
7452 
7453 	spin_lock_irq(&phba->hbalock);
7454 	list_del_init(&vport->listentry);
7455 	spin_unlock_irq(&phba->hbalock);
7456 
7457 	/* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
7458 	 * buffers are released to their corresponding pools here.
7459 	 */
7460 	lpfc_scsi_free(phba);
7461 	lpfc_sli4_driver_resource_unset(phba);
7462 
7463 	/* Unmap adapter Control and Doorbell registers */
7464 	lpfc_sli4_pci_mem_unset(phba);
7465 
7466 	/* Release PCI resources and disable device's PCI function */
7467 	scsi_host_put(shost);
7468 	lpfc_disable_pci_dev(phba);
7469 
7470 	/* Finally, free the driver's device data structure */
7471 	lpfc_hba_free(phba);
7472 
7473 	return;
7474 }
7475 
7476 /**
7477  * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
7478  * @pdev: pointer to PCI device
7479  * @msg: power management message
7480  *
7481  * This routine is called from the kernel's PCI subsystem to support system
7482  * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
7483  * this method, it quiesces the device by stopping the driver's worker
7484  * thread for the device, turning off device's interrupt and DMA, and bring
7485  * the device offline. Note that as the driver implements the minimum PM
7486  * requirements to a power-aware driver's PM support for suspend/resume -- all
7487  * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
7488  * method call will be treated as SUSPEND and the driver will fully
7489  * reinitialize its device during resume() method call, the driver will set
7490  * device to PCI_D3hot state in PCI config space instead of setting it
7491  * according to the @msg provided by the PM.
7492  *
7493  * Return code
7494  * 	0 - driver suspended the device
7495  * 	Error otherwise
7496  **/
7497 static int
7498 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
7499 {
7500 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7501 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7502 
7503 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7504 			"0298 PCI device Power Management suspend.\n");
7505 
7506 	/* Bring down the device */
7507 	lpfc_offline_prep(phba);
7508 	lpfc_offline(phba);
7509 	kthread_stop(phba->worker_thread);
7510 
7511 	/* Disable interrupt from device */
7512 	lpfc_sli4_disable_intr(phba);
7513 
7514 	/* Save device state to PCI config space */
7515 	pci_save_state(pdev);
7516 	pci_set_power_state(pdev, PCI_D3hot);
7517 
7518 	return 0;
7519 }
7520 
7521 /**
7522  * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
7523  * @pdev: pointer to PCI device
7524  *
7525  * This routine is called from the kernel's PCI subsystem to support system
7526  * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
7527  * this method, it restores the device's PCI config space state and fully
7528  * reinitializes the device and brings it online. Note that as the driver
7529  * implements the minimum PM requirements to a power-aware driver's PM for
7530  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7531  * to the suspend() method call will be treated as SUSPEND and the driver
7532  * will fully reinitialize its device during resume() method call, the device
7533  * will be set to PCI_D0 directly in PCI config space before restoring the
7534  * state.
7535  *
7536  * Return code
7537  * 	0 - driver suspended the device
7538  * 	Error otherwise
7539  **/
7540 static int
7541 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7542 {
7543 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7544 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7545 	uint32_t intr_mode;
7546 	int error;
7547 
7548 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7549 			"0292 PCI device Power Management resume.\n");
7550 
7551 	/* Restore device state from PCI config space */
7552 	pci_set_power_state(pdev, PCI_D0);
7553 	pci_restore_state(pdev);
7554 	if (pdev->is_busmaster)
7555 		pci_set_master(pdev);
7556 
7557 	 /* Startup the kernel thread for this host adapter. */
7558 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
7559 					"lpfc_worker_%d", phba->brd_no);
7560 	if (IS_ERR(phba->worker_thread)) {
7561 		error = PTR_ERR(phba->worker_thread);
7562 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7563 				"0293 PM resume failed to start worker "
7564 				"thread: error=x%x.\n", error);
7565 		return error;
7566 	}
7567 
7568 	/* Configure and enable interrupt */
7569 	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
7570 	if (intr_mode == LPFC_INTR_ERROR) {
7571 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7572 				"0294 PM resume Failed to enable interrupt\n");
7573 		return -EIO;
7574 	} else
7575 		phba->intr_mode = intr_mode;
7576 
7577 	/* Restart HBA and bring it online */
7578 	lpfc_sli_brdrestart(phba);
7579 	lpfc_online(phba);
7580 
7581 	/* Log the current active interrupt mode */
7582 	lpfc_log_intr_mode(phba, phba->intr_mode);
7583 
7584 	return 0;
7585 }
7586 
7587 /**
7588  * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
7589  * @pdev: pointer to PCI device.
7590  * @state: the current PCI connection state.
7591  *
7592  * This routine is called from the PCI subsystem for error handling to device
7593  * with SLI-4 interface spec. This function is called by the PCI subsystem
7594  * after a PCI bus error affecting this device has been detected. When this
7595  * function is invoked, it will need to stop all the I/Os and interrupt(s)
7596  * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
7597  * for the PCI subsystem to perform proper recovery as desired.
7598  *
7599  * Return codes
7600  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7601  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7602  **/
7603 static pci_ers_result_t
7604 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
7605 {
7606 	return PCI_ERS_RESULT_NEED_RESET;
7607 }
7608 
7609 /**
7610  * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
7611  * @pdev: pointer to PCI device.
7612  *
7613  * This routine is called from the PCI subsystem for error handling to device
7614  * with SLI-4 interface spec. It is called after PCI bus has been reset to
7615  * restart the PCI card from scratch, as if from a cold-boot. During the
7616  * PCI subsystem error recovery, after the driver returns
7617  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7618  * recovery and then call this routine before calling the .resume method to
7619  * recover the device. This function will initialize the HBA device, enable
7620  * the interrupt, but it will just put the HBA to offline state without
7621  * passing any I/O traffic.
7622  *
7623  * Return codes
7624  * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7625  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7626  */
7627 static pci_ers_result_t
7628 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
7629 {
7630 	return PCI_ERS_RESULT_RECOVERED;
7631 }
7632 
7633 /**
7634  * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
7635  * @pdev: pointer to PCI device
7636  *
7637  * This routine is called from the PCI subsystem for error handling to device
7638  * with SLI-4 interface spec. It is called when kernel error recovery tells
7639  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7640  * error recovery. After this call, traffic can start to flow from this device
7641  * again.
7642  **/
7643 static void
7644 lpfc_io_resume_s4(struct pci_dev *pdev)
7645 {
7646 	return;
7647 }
7648 
7649 /**
7650  * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
7651  * @pdev: pointer to PCI device
7652  * @pid: pointer to PCI device identifier
7653  *
7654  * This routine is to be registered to the kernel's PCI subsystem. When an
7655  * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
7656  * at PCI device-specific information of the device and driver to see if the
7657  * driver state that it can support this kind of device. If the match is
7658  * successful, the driver core invokes this routine. This routine dispatches
7659  * the action to the proper SLI-3 or SLI-4 device probing routine, which will
7660  * do all the initialization that it needs to do to handle the HBA device
7661  * properly.
7662  *
7663  * Return code
7664  * 	0 - driver can claim the device
7665  * 	negative value - driver can not claim the device
7666  **/
7667 static int __devinit
7668 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7669 {
7670 	int rc;
7671 	struct lpfc_sli_intf intf;
7672 
7673 	if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0))
7674 		return -ENODEV;
7675 
7676 	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
7677 		(bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4))
7678 		rc = lpfc_pci_probe_one_s4(pdev, pid);
7679 	else
7680 		rc = lpfc_pci_probe_one_s3(pdev, pid);
7681 
7682 	return rc;
7683 }
7684 
7685 /**
7686  * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
7687  * @pdev: pointer to PCI device
7688  *
7689  * This routine is to be registered to the kernel's PCI subsystem. When an
7690  * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
7691  * This routine dispatches the action to the proper SLI-3 or SLI-4 device
7692  * remove routine, which will perform all the necessary cleanup for the
7693  * device to be removed from the PCI subsystem properly.
7694  **/
7695 static void __devexit
7696 lpfc_pci_remove_one(struct pci_dev *pdev)
7697 {
7698 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7699 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7700 
7701 	switch (phba->pci_dev_grp) {
7702 	case LPFC_PCI_DEV_LP:
7703 		lpfc_pci_remove_one_s3(pdev);
7704 		break;
7705 	case LPFC_PCI_DEV_OC:
7706 		lpfc_pci_remove_one_s4(pdev);
7707 		break;
7708 	default:
7709 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7710 				"1424 Invalid PCI device group: 0x%x\n",
7711 				phba->pci_dev_grp);
7712 		break;
7713 	}
7714 	return;
7715 }
7716 
7717 /**
7718  * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
7719  * @pdev: pointer to PCI device
7720  * @msg: power management message
7721  *
7722  * This routine is to be registered to the kernel's PCI subsystem to support
7723  * system Power Management (PM). When PM invokes this method, it dispatches
7724  * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
7725  * suspend the device.
7726  *
7727  * Return code
7728  * 	0 - driver suspended the device
7729  * 	Error otherwise
7730  **/
7731 static int
7732 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
7733 {
7734 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7735 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7736 	int rc = -ENODEV;
7737 
7738 	switch (phba->pci_dev_grp) {
7739 	case LPFC_PCI_DEV_LP:
7740 		rc = lpfc_pci_suspend_one_s3(pdev, msg);
7741 		break;
7742 	case LPFC_PCI_DEV_OC:
7743 		rc = lpfc_pci_suspend_one_s4(pdev, msg);
7744 		break;
7745 	default:
7746 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7747 				"1425 Invalid PCI device group: 0x%x\n",
7748 				phba->pci_dev_grp);
7749 		break;
7750 	}
7751 	return rc;
7752 }
7753 
7754 /**
7755  * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
7756  * @pdev: pointer to PCI device
7757  *
7758  * This routine is to be registered to the kernel's PCI subsystem to support
7759  * system Power Management (PM). When PM invokes this method, it dispatches
7760  * the action to the proper SLI-3 or SLI-4 device resume routine, which will
7761  * resume the device.
7762  *
7763  * Return code
7764  * 	0 - driver suspended the device
7765  * 	Error otherwise
7766  **/
7767 static int
7768 lpfc_pci_resume_one(struct pci_dev *pdev)
7769 {
7770 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7771 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7772 	int rc = -ENODEV;
7773 
7774 	switch (phba->pci_dev_grp) {
7775 	case LPFC_PCI_DEV_LP:
7776 		rc = lpfc_pci_resume_one_s3(pdev);
7777 		break;
7778 	case LPFC_PCI_DEV_OC:
7779 		rc = lpfc_pci_resume_one_s4(pdev);
7780 		break;
7781 	default:
7782 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7783 				"1426 Invalid PCI device group: 0x%x\n",
7784 				phba->pci_dev_grp);
7785 		break;
7786 	}
7787 	return rc;
7788 }
7789 
7790 /**
7791  * lpfc_io_error_detected - lpfc method for handling PCI I/O error
7792  * @pdev: pointer to PCI device.
7793  * @state: the current PCI connection state.
7794  *
7795  * This routine is registered to the PCI subsystem for error handling. This
7796  * function is called by the PCI subsystem after a PCI bus error affecting
7797  * this device has been detected. When this routine is invoked, it dispatches
7798  * the action to the proper SLI-3 or SLI-4 device error detected handling
7799  * routine, which will perform the proper error detected operation.
7800  *
7801  * Return codes
7802  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7803  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7804  **/
7805 static pci_ers_result_t
7806 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7807 {
7808 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7809 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7810 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7811 
7812 	switch (phba->pci_dev_grp) {
7813 	case LPFC_PCI_DEV_LP:
7814 		rc = lpfc_io_error_detected_s3(pdev, state);
7815 		break;
7816 	case LPFC_PCI_DEV_OC:
7817 		rc = lpfc_io_error_detected_s4(pdev, state);
7818 		break;
7819 	default:
7820 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7821 				"1427 Invalid PCI device group: 0x%x\n",
7822 				phba->pci_dev_grp);
7823 		break;
7824 	}
7825 	return rc;
7826 }
7827 
7828 /**
7829  * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
7830  * @pdev: pointer to PCI device.
7831  *
7832  * This routine is registered to the PCI subsystem for error handling. This
7833  * function is called after PCI bus has been reset to restart the PCI card
7834  * from scratch, as if from a cold-boot. When this routine is invoked, it
7835  * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
7836  * routine, which will perform the proper device reset.
7837  *
7838  * Return codes
7839  * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7840  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7841  **/
7842 static pci_ers_result_t
7843 lpfc_io_slot_reset(struct pci_dev *pdev)
7844 {
7845 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7846 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7847 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7848 
7849 	switch (phba->pci_dev_grp) {
7850 	case LPFC_PCI_DEV_LP:
7851 		rc = lpfc_io_slot_reset_s3(pdev);
7852 		break;
7853 	case LPFC_PCI_DEV_OC:
7854 		rc = lpfc_io_slot_reset_s4(pdev);
7855 		break;
7856 	default:
7857 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7858 				"1428 Invalid PCI device group: 0x%x\n",
7859 				phba->pci_dev_grp);
7860 		break;
7861 	}
7862 	return rc;
7863 }
7864 
7865 /**
7866  * lpfc_io_resume - lpfc method for resuming PCI I/O operation
7867  * @pdev: pointer to PCI device
7868  *
7869  * This routine is registered to the PCI subsystem for error handling. It
7870  * is called when kernel error recovery tells the lpfc driver that it is
7871  * OK to resume normal PCI operation after PCI bus error recovery. When
7872  * this routine is invoked, it dispatches the action to the proper SLI-3
7873  * or SLI-4 device io_resume routine, which will resume the device operation.
7874  **/
7875 static void
7876 lpfc_io_resume(struct pci_dev *pdev)
7877 {
7878 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7879 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7880 
7881 	switch (phba->pci_dev_grp) {
7882 	case LPFC_PCI_DEV_LP:
7883 		lpfc_io_resume_s3(pdev);
7884 		break;
7885 	case LPFC_PCI_DEV_OC:
7886 		lpfc_io_resume_s4(pdev);
7887 		break;
7888 	default:
7889 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7890 				"1429 Invalid PCI device group: 0x%x\n",
7891 				phba->pci_dev_grp);
7892 		break;
7893 	}
7894 	return;
7895 }
7896 
7897 static struct pci_device_id lpfc_id_table[] = {
7898 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
7899 		PCI_ANY_ID, PCI_ANY_ID, },
7900 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
7901 		PCI_ANY_ID, PCI_ANY_ID, },
7902 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
7903 		PCI_ANY_ID, PCI_ANY_ID, },
7904 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
7905 		PCI_ANY_ID, PCI_ANY_ID, },
7906 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
7907 		PCI_ANY_ID, PCI_ANY_ID, },
7908 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
7909 		PCI_ANY_ID, PCI_ANY_ID, },
7910 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
7911 		PCI_ANY_ID, PCI_ANY_ID, },
7912 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
7913 		PCI_ANY_ID, PCI_ANY_ID, },
7914 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
7915 		PCI_ANY_ID, PCI_ANY_ID, },
7916 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
7917 		PCI_ANY_ID, PCI_ANY_ID, },
7918 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
7919 		PCI_ANY_ID, PCI_ANY_ID, },
7920 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
7921 		PCI_ANY_ID, PCI_ANY_ID, },
7922 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
7923 		PCI_ANY_ID, PCI_ANY_ID, },
7924 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
7925 		PCI_ANY_ID, PCI_ANY_ID, },
7926 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
7927 		PCI_ANY_ID, PCI_ANY_ID, },
7928 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
7929 		PCI_ANY_ID, PCI_ANY_ID, },
7930 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
7931 		PCI_ANY_ID, PCI_ANY_ID, },
7932 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
7933 		PCI_ANY_ID, PCI_ANY_ID, },
7934 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
7935 		PCI_ANY_ID, PCI_ANY_ID, },
7936 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
7937 		PCI_ANY_ID, PCI_ANY_ID, },
7938 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
7939 		PCI_ANY_ID, PCI_ANY_ID, },
7940 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
7941 		PCI_ANY_ID, PCI_ANY_ID, },
7942 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
7943 		PCI_ANY_ID, PCI_ANY_ID, },
7944 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
7945 		PCI_ANY_ID, PCI_ANY_ID, },
7946 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
7947 		PCI_ANY_ID, PCI_ANY_ID, },
7948 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
7949 		PCI_ANY_ID, PCI_ANY_ID, },
7950 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
7951 		PCI_ANY_ID, PCI_ANY_ID, },
7952 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
7953 		PCI_ANY_ID, PCI_ANY_ID, },
7954 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
7955 		PCI_ANY_ID, PCI_ANY_ID, },
7956 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
7957 		PCI_ANY_ID, PCI_ANY_ID, },
7958 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
7959 		PCI_ANY_ID, PCI_ANY_ID, },
7960 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
7961 		PCI_ANY_ID, PCI_ANY_ID, },
7962 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
7963 		PCI_ANY_ID, PCI_ANY_ID, },
7964 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
7965 		PCI_ANY_ID, PCI_ANY_ID, },
7966 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
7967 		PCI_ANY_ID, PCI_ANY_ID, },
7968 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
7969 		PCI_ANY_ID, PCI_ANY_ID, },
7970 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
7971 		PCI_ANY_ID, PCI_ANY_ID, },
7972 	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7973 		PCI_ANY_ID, PCI_ANY_ID, },
7974 	{ 0 }
7975 };
7976 
7977 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
7978 
7979 static struct pci_error_handlers lpfc_err_handler = {
7980 	.error_detected = lpfc_io_error_detected,
7981 	.slot_reset = lpfc_io_slot_reset,
7982 	.resume = lpfc_io_resume,
7983 };
7984 
7985 static struct pci_driver lpfc_driver = {
7986 	.name		= LPFC_DRIVER_NAME,
7987 	.id_table	= lpfc_id_table,
7988 	.probe		= lpfc_pci_probe_one,
7989 	.remove		= __devexit_p(lpfc_pci_remove_one),
7990 	.suspend        = lpfc_pci_suspend_one,
7991 	.resume		= lpfc_pci_resume_one,
7992 	.err_handler    = &lpfc_err_handler,
7993 };
7994 
7995 /**
7996  * lpfc_init - lpfc module initialization routine
7997  *
7998  * This routine is to be invoked when the lpfc module is loaded into the
7999  * kernel. The special kernel macro module_init() is used to indicate the
8000  * role of this routine to the kernel as lpfc module entry point.
8001  *
8002  * Return codes
8003  *   0 - successful
8004  *   -ENOMEM - FC attach transport failed
8005  *   all others - failed
8006  */
8007 static int __init
8008 lpfc_init(void)
8009 {
8010 	int error = 0;
8011 
8012 	printk(LPFC_MODULE_DESC "\n");
8013 	printk(LPFC_COPYRIGHT "\n");
8014 
8015 	if (lpfc_enable_npiv) {
8016 		lpfc_transport_functions.vport_create = lpfc_vport_create;
8017 		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
8018 	}
8019 	lpfc_transport_template =
8020 				fc_attach_transport(&lpfc_transport_functions);
8021 	if (lpfc_transport_template == NULL)
8022 		return -ENOMEM;
8023 	if (lpfc_enable_npiv) {
8024 		lpfc_vport_transport_template =
8025 			fc_attach_transport(&lpfc_vport_transport_functions);
8026 		if (lpfc_vport_transport_template == NULL) {
8027 			fc_release_transport(lpfc_transport_template);
8028 			return -ENOMEM;
8029 		}
8030 	}
8031 	error = pci_register_driver(&lpfc_driver);
8032 	if (error) {
8033 		fc_release_transport(lpfc_transport_template);
8034 		if (lpfc_enable_npiv)
8035 			fc_release_transport(lpfc_vport_transport_template);
8036 	}
8037 
8038 	return error;
8039 }
8040 
8041 /**
8042  * lpfc_exit - lpfc module removal routine
8043  *
8044  * This routine is invoked when the lpfc module is removed from the kernel.
8045  * The special kernel macro module_exit() is used to indicate the role of
8046  * this routine to the kernel as lpfc module exit point.
8047  */
8048 static void __exit
8049 lpfc_exit(void)
8050 {
8051 	pci_unregister_driver(&lpfc_driver);
8052 	fc_release_transport(lpfc_transport_template);
8053 	if (lpfc_enable_npiv)
8054 		fc_release_transport(lpfc_vport_transport_template);
8055 	if (_dump_buf_data) {
8056 		printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data "
8057 				"at 0x%p\n",
8058 				(1L << _dump_buf_data_order), _dump_buf_data);
8059 		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
8060 	}
8061 
8062 	if (_dump_buf_dif) {
8063 		printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif "
8064 				"at 0x%p\n",
8065 				(1L << _dump_buf_dif_order), _dump_buf_dif);
8066 		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
8067 	}
8068 }
8069 
8070 module_init(lpfc_init);
8071 module_exit(lpfc_exit);
8072 MODULE_LICENSE("GPL");
8073 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
8074 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
8075 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
8076