xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_init.c (revision 51ef4c26)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2007 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/kthread.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
30 #include <linux/ctype.h>
31 
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 
37 #include "lpfc_hw.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_disc.h"
40 #include "lpfc_scsi.h"
41 #include "lpfc.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h"
45 #include "lpfc_version.h"
46 #include "lpfc_vport.h"
47 
48 static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
49 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
50 static int lpfc_post_rcv_buf(struct lpfc_hba *);
51 
52 static struct scsi_transport_template *lpfc_transport_template = NULL;
53 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
54 static DEFINE_IDR(lpfc_hba_index);
55 
56 
57 
58 extern struct lpfc_hbq_init *lpfc_hbq_defs[];
59 
60 /************************************************************************/
61 /*                                                                      */
62 /*    lpfc_config_port_prep                                             */
63 /*    This routine will do LPFC initialization prior to the             */
64 /*    CONFIG_PORT mailbox command. This will be initialized             */
65 /*    as a SLI layer callback routine.                                  */
66 /*    This routine returns 0 on success or -ERESTART if it wants        */
67 /*    the SLI layer to reset the HBA and try again. Any                 */
68 /*    other return value indicates an error.                            */
69 /*                                                                      */
70 /************************************************************************/
71 int
72 lpfc_config_port_prep(struct lpfc_hba *phba)
73 {
74 	lpfc_vpd_t *vp = &phba->vpd;
75 	int i = 0, rc;
76 	LPFC_MBOXQ_t *pmb;
77 	MAILBOX_t *mb;
78 	char *lpfc_vpd_data = NULL;
79 	uint16_t offset = 0;
80 	static char licensed[56] =
81 		    "key unlock for use with gnu public licensed code only\0";
82 	static int init_key = 1;
83 
84 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
85 	if (!pmb) {
86 		phba->link_state = LPFC_HBA_ERROR;
87 		return -ENOMEM;
88 	}
89 
90 	mb = &pmb->mb;
91 	phba->link_state = LPFC_INIT_MBX_CMDS;
92 
93 	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
94 		if (init_key) {
95 			uint32_t *ptext = (uint32_t *) licensed;
96 
97 			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
98 				*ptext = cpu_to_be32(*ptext);
99 			init_key = 0;
100 		}
101 
102 		lpfc_read_nv(phba, pmb);
103 		memset((char*)mb->un.varRDnvp.rsvd3, 0,
104 			sizeof (mb->un.varRDnvp.rsvd3));
105 		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
106 			 sizeof (licensed));
107 
108 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
109 
110 		if (rc != MBX_SUCCESS) {
111 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
112 					"0324 Config Port initialization "
113 					"error, mbxCmd x%x READ_NVPARM, "
114 					"mbxStatus x%x\n",
115 					mb->mbxCommand, mb->mbxStatus);
116 			mempool_free(pmb, phba->mbox_mem_pool);
117 			return -ERESTART;
118 		}
119 		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
120 		       sizeof(phba->wwnn));
121 		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
122 		       sizeof(phba->wwpn));
123 	}
124 
125 	phba->sli3_options = 0x0;
126 
127 	/* Setup and issue mailbox READ REV command */
128 	lpfc_read_rev(phba, pmb);
129 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
130 	if (rc != MBX_SUCCESS) {
131 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
132 				"0439 Adapter failed to init, mbxCmd x%x "
133 				"READ_REV, mbxStatus x%x\n",
134 				mb->mbxCommand, mb->mbxStatus);
135 		mempool_free( pmb, phba->mbox_mem_pool);
136 		return -ERESTART;
137 	}
138 
139 
140 	/*
141 	 * The value of rr must be 1 since the driver set the cv field to 1.
142 	 * This setting requires the FW to set all revision fields.
143 	 */
144 	if (mb->un.varRdRev.rr == 0) {
145 		vp->rev.rBit = 0;
146 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
147 				"0440 Adapter failed to init, READ_REV has "
148 				"missing revision information.\n");
149 		mempool_free(pmb, phba->mbox_mem_pool);
150 		return -ERESTART;
151 	}
152 
153 	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp)
154 		return -EINVAL;
155 
156 	/* Save information as VPD data */
157 	vp->rev.rBit = 1;
158 	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
159 	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
160 	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
161 	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
162 	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
163 	vp->rev.biuRev = mb->un.varRdRev.biuRev;
164 	vp->rev.smRev = mb->un.varRdRev.smRev;
165 	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
166 	vp->rev.endecRev = mb->un.varRdRev.endecRev;
167 	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
168 	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
169 	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
170 	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
171 	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
172 	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
173 
174 	/* If the sli feature level is less then 9, we must
175 	 * tear down all RPIs and VPIs on link down if NPIV
176 	 * is enabled.
177 	 */
178 	if (vp->rev.feaLevelHigh < 9)
179 		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
180 
181 	if (lpfc_is_LC_HBA(phba->pcidev->device))
182 		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
183 						sizeof (phba->RandomData));
184 
185 	/* Get adapter VPD information */
186 	pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL);
187 	if (!pmb->context2)
188 		goto out_free_mbox;
189 	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
190 	if (!lpfc_vpd_data)
191 		goto out_free_context2;
192 
193 	do {
194 		lpfc_dump_mem(phba, pmb, offset);
195 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
196 
197 		if (rc != MBX_SUCCESS) {
198 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
199 					"0441 VPD not present on adapter, "
200 					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
201 					mb->mbxCommand, mb->mbxStatus);
202 			mb->un.varDmp.word_cnt = 0;
203 		}
204 		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
205 			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
206 		lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
207 				      mb->un.varDmp.word_cnt);
208 		offset += mb->un.varDmp.word_cnt;
209 	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
210 	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
211 
212 	kfree(lpfc_vpd_data);
213 out_free_context2:
214 	kfree(pmb->context2);
215 out_free_mbox:
216 	mempool_free(pmb, phba->mbox_mem_pool);
217 	return 0;
218 }
219 
220 /************************************************************************/
221 /*                                                                      */
222 /*    lpfc_config_port_post                                             */
223 /*    This routine will do LPFC initialization after the                */
224 /*    CONFIG_PORT mailbox command. This will be initialized             */
225 /*    as a SLI layer callback routine.                                  */
226 /*    This routine returns 0 on success. Any other return value         */
227 /*    indicates an error.                                               */
228 /*                                                                      */
229 /************************************************************************/
230 int
231 lpfc_config_port_post(struct lpfc_hba *phba)
232 {
233 	struct lpfc_vport *vport = phba->pport;
234 	LPFC_MBOXQ_t *pmb;
235 	MAILBOX_t *mb;
236 	struct lpfc_dmabuf *mp;
237 	struct lpfc_sli *psli = &phba->sli;
238 	uint32_t status, timeout;
239 	int i, j;
240 	int rc;
241 
242 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
243 	if (!pmb) {
244 		phba->link_state = LPFC_HBA_ERROR;
245 		return -ENOMEM;
246 	}
247 	mb = &pmb->mb;
248 
249 	/* Get login parameters for NID.  */
250 	lpfc_read_sparam(phba, pmb, 0);
251 	pmb->vport = vport;
252 	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
253 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
254 				"0448 Adapter failed init, mbxCmd x%x "
255 				"READ_SPARM mbxStatus x%x\n",
256 				mb->mbxCommand, mb->mbxStatus);
257 		phba->link_state = LPFC_HBA_ERROR;
258 		mp = (struct lpfc_dmabuf *) pmb->context1;
259 		mempool_free( pmb, phba->mbox_mem_pool);
260 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
261 		kfree(mp);
262 		return -EIO;
263 	}
264 
265 	mp = (struct lpfc_dmabuf *) pmb->context1;
266 
267 	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
268 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
269 	kfree(mp);
270 	pmb->context1 = NULL;
271 
272 	if (phba->cfg_soft_wwnn)
273 		u64_to_wwn(phba->cfg_soft_wwnn,
274 			   vport->fc_sparam.nodeName.u.wwn);
275 	if (phba->cfg_soft_wwpn)
276 		u64_to_wwn(phba->cfg_soft_wwpn,
277 			   vport->fc_sparam.portName.u.wwn);
278 	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
279 	       sizeof (struct lpfc_name));
280 	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
281 	       sizeof (struct lpfc_name));
282 	/* If no serial number in VPD data, use low 6 bytes of WWNN */
283 	/* This should be consolidated into parse_vpd ? - mr */
284 	if (phba->SerialNumber[0] == 0) {
285 		uint8_t *outptr;
286 
287 		outptr = &vport->fc_nodename.u.s.IEEE[0];
288 		for (i = 0; i < 12; i++) {
289 			status = *outptr++;
290 			j = ((status & 0xf0) >> 4);
291 			if (j <= 9)
292 				phba->SerialNumber[i] =
293 				    (char)((uint8_t) 0x30 + (uint8_t) j);
294 			else
295 				phba->SerialNumber[i] =
296 				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
297 			i++;
298 			j = (status & 0xf);
299 			if (j <= 9)
300 				phba->SerialNumber[i] =
301 				    (char)((uint8_t) 0x30 + (uint8_t) j);
302 			else
303 				phba->SerialNumber[i] =
304 				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
305 		}
306 	}
307 
308 	lpfc_read_config(phba, pmb);
309 	pmb->vport = vport;
310 	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
311 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
312 				"0453 Adapter failed to init, mbxCmd x%x "
313 				"READ_CONFIG, mbxStatus x%x\n",
314 				mb->mbxCommand, mb->mbxStatus);
315 		phba->link_state = LPFC_HBA_ERROR;
316 		mempool_free( pmb, phba->mbox_mem_pool);
317 		return -EIO;
318 	}
319 
320 	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
321 	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
322 		phba->cfg_hba_queue_depth =
323 			mb->un.varRdConfig.max_xri + 1;
324 
325 	phba->lmt = mb->un.varRdConfig.lmt;
326 
327 	/* Get the default values for Model Name and Description */
328 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
329 
330 	if ((phba->cfg_link_speed > LINK_SPEED_10G)
331 	    || ((phba->cfg_link_speed == LINK_SPEED_1G)
332 		&& !(phba->lmt & LMT_1Gb))
333 	    || ((phba->cfg_link_speed == LINK_SPEED_2G)
334 		&& !(phba->lmt & LMT_2Gb))
335 	    || ((phba->cfg_link_speed == LINK_SPEED_4G)
336 		&& !(phba->lmt & LMT_4Gb))
337 	    || ((phba->cfg_link_speed == LINK_SPEED_8G)
338 		&& !(phba->lmt & LMT_8Gb))
339 	    || ((phba->cfg_link_speed == LINK_SPEED_10G)
340 		&& !(phba->lmt & LMT_10Gb))) {
341 		/* Reset link speed to auto */
342 		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
343 			"1302 Invalid speed for this board: "
344 			"Reset link speed to auto: x%x\n",
345 			phba->cfg_link_speed);
346 			phba->cfg_link_speed = LINK_SPEED_AUTO;
347 	}
348 
349 	phba->link_state = LPFC_LINK_DOWN;
350 
351 	/* Only process IOCBs on ring 0 till hba_state is READY */
352 	if (psli->ring[psli->extra_ring].cmdringaddr)
353 		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
354 	if (psli->ring[psli->fcp_ring].cmdringaddr)
355 		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
356 	if (psli->ring[psli->next_ring].cmdringaddr)
357 		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
358 
359 	/* Post receive buffers for desired rings */
360 	if (phba->sli_rev != 3)
361 		lpfc_post_rcv_buf(phba);
362 
363 	/* Enable appropriate host interrupts */
364 	spin_lock_irq(&phba->hbalock);
365 	status = readl(phba->HCregaddr);
366 	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
367 	if (psli->num_rings > 0)
368 		status |= HC_R0INT_ENA;
369 	if (psli->num_rings > 1)
370 		status |= HC_R1INT_ENA;
371 	if (psli->num_rings > 2)
372 		status |= HC_R2INT_ENA;
373 	if (psli->num_rings > 3)
374 		status |= HC_R3INT_ENA;
375 
376 	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
377 	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
378 		status &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
379 
380 	writel(status, phba->HCregaddr);
381 	readl(phba->HCregaddr); /* flush */
382 	spin_unlock_irq(&phba->hbalock);
383 
384 	/*
385 	 * Setup the ring 0 (els)  timeout handler
386 	 */
387 	timeout = phba->fc_ratov << 1;
388 	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
389 	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
390 	phba->hb_outstanding = 0;
391 	phba->last_completion_time = jiffies;
392 
393 	lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
394 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
395 	pmb->vport = vport;
396 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
397 	lpfc_set_loopback_flag(phba);
398 	if (rc != MBX_SUCCESS) {
399 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
400 				"0454 Adapter failed to init, mbxCmd x%x "
401 				"INIT_LINK, mbxStatus x%x\n",
402 				mb->mbxCommand, mb->mbxStatus);
403 
404 		/* Clear all interrupt enable conditions */
405 		writel(0, phba->HCregaddr);
406 		readl(phba->HCregaddr); /* flush */
407 		/* Clear all pending interrupts */
408 		writel(0xffffffff, phba->HAregaddr);
409 		readl(phba->HAregaddr); /* flush */
410 
411 		phba->link_state = LPFC_HBA_ERROR;
412 		if (rc != MBX_BUSY)
413 			mempool_free(pmb, phba->mbox_mem_pool);
414 		return -EIO;
415 	}
416 	/* MBOX buffer will be freed in mbox compl */
417 
418 	return (0);
419 }
420 
421 /************************************************************************/
422 /*                                                                      */
423 /*    lpfc_hba_down_prep                                                */
424 /*    This routine will do LPFC uninitialization before the             */
425 /*    HBA is reset when bringing down the SLI Layer. This will be       */
426 /*    initialized as a SLI layer callback routine.                      */
427 /*    This routine returns 0 on success. Any other return value         */
428 /*    indicates an error.                                               */
429 /*                                                                      */
430 /************************************************************************/
431 int
432 lpfc_hba_down_prep(struct lpfc_hba *phba)
433 {
434 	/* Disable interrupts */
435 	writel(0, phba->HCregaddr);
436 	readl(phba->HCregaddr); /* flush */
437 
438 	lpfc_cleanup_discovery_resources(phba->pport);
439 	return 0;
440 }
441 
442 /************************************************************************/
443 /*                                                                      */
444 /*    lpfc_hba_down_post                                                */
445 /*    This routine will do uninitialization after the HBA is reset      */
446 /*    when bringing down the SLI Layer.                                 */
447 /*    This routine returns 0 on success. Any other return value         */
448 /*    indicates an error.                                               */
449 /*                                                                      */
450 /************************************************************************/
451 int
452 lpfc_hba_down_post(struct lpfc_hba *phba)
453 {
454 	struct lpfc_sli *psli = &phba->sli;
455 	struct lpfc_sli_ring *pring;
456 	struct lpfc_dmabuf *mp, *next_mp;
457 	int i;
458 
459 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
460 		lpfc_sli_hbqbuf_free_all(phba);
461 	else {
462 		/* Cleanup preposted buffers on the ELS ring */
463 		pring = &psli->ring[LPFC_ELS_RING];
464 		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
465 			list_del(&mp->list);
466 			pring->postbufq_cnt--;
467 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
468 			kfree(mp);
469 		}
470 	}
471 
472 	for (i = 0; i < psli->num_rings; i++) {
473 		pring = &psli->ring[i];
474 		lpfc_sli_abort_iocb_ring(phba, pring);
475 	}
476 
477 	return 0;
478 }
479 
480 /* HBA heart beat timeout handler */
481 void
482 lpfc_hb_timeout(unsigned long ptr)
483 {
484 	struct lpfc_hba *phba;
485 	unsigned long iflag;
486 
487 	phba = (struct lpfc_hba *)ptr;
488 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
489 	if (!(phba->pport->work_port_events & WORKER_HB_TMO))
490 		phba->pport->work_port_events |= WORKER_HB_TMO;
491 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
492 
493 	if (phba->work_wait)
494 		wake_up(phba->work_wait);
495 	return;
496 }
497 
498 static void
499 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
500 {
501 	unsigned long drvr_flag;
502 
503 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
504 	phba->hb_outstanding = 0;
505 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
506 
507 	mempool_free(pmboxq, phba->mbox_mem_pool);
508 	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
509 		!(phba->link_state == LPFC_HBA_ERROR) &&
510 		!(phba->pport->load_flag & FC_UNLOADING))
511 		mod_timer(&phba->hb_tmofunc,
512 			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
513 	return;
514 }
515 
516 void
517 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
518 {
519 	LPFC_MBOXQ_t *pmboxq;
520 	int retval;
521 	struct lpfc_sli *psli = &phba->sli;
522 
523 	if ((phba->link_state == LPFC_HBA_ERROR) ||
524 		(phba->pport->load_flag & FC_UNLOADING) ||
525 		(phba->pport->fc_flag & FC_OFFLINE_MODE))
526 		return;
527 
528 	spin_lock_irq(&phba->pport->work_port_lock);
529 	/* If the timer is already canceled do nothing */
530 	if (!(phba->pport->work_port_events & WORKER_HB_TMO)) {
531 		spin_unlock_irq(&phba->pport->work_port_lock);
532 		return;
533 	}
534 
535 	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
536 		jiffies)) {
537 		spin_unlock_irq(&phba->pport->work_port_lock);
538 		if (!phba->hb_outstanding)
539 			mod_timer(&phba->hb_tmofunc,
540 				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
541 		else
542 			mod_timer(&phba->hb_tmofunc,
543 				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
544 		return;
545 	}
546 	spin_unlock_irq(&phba->pport->work_port_lock);
547 
548 	/* If there is no heart beat outstanding, issue a heartbeat command */
549 	if (!phba->hb_outstanding) {
550 		pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
551 		if (!pmboxq) {
552 			mod_timer(&phba->hb_tmofunc,
553 				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
554 			return;
555 		}
556 
557 		lpfc_heart_beat(phba, pmboxq);
558 		pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
559 		pmboxq->vport = phba->pport;
560 		retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
561 
562 		if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
563 			mempool_free(pmboxq, phba->mbox_mem_pool);
564 			mod_timer(&phba->hb_tmofunc,
565 				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
566 			return;
567 		}
568 		mod_timer(&phba->hb_tmofunc,
569 			jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
570 		phba->hb_outstanding = 1;
571 		return;
572 	} else {
573 		/*
574 		 * If heart beat timeout called with hb_outstanding set we
575 		 * need to take the HBA offline.
576 		 */
577 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
578 				"0459 Adapter heartbeat failure, taking "
579 				"this port offline.\n");
580 
581 		spin_lock_irq(&phba->hbalock);
582 		psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
583 		spin_unlock_irq(&phba->hbalock);
584 
585 		lpfc_offline_prep(phba);
586 		lpfc_offline(phba);
587 		lpfc_unblock_mgmt_io(phba);
588 		phba->link_state = LPFC_HBA_ERROR;
589 		lpfc_hba_down_post(phba);
590 	}
591 }
592 
593 /************************************************************************/
594 /*                                                                      */
595 /*    lpfc_handle_eratt                                                 */
596 /*    This routine will handle processing a Host Attention              */
597 /*    Error Status event. This will be initialized                      */
598 /*    as a SLI layer callback routine.                                  */
599 /*                                                                      */
600 /************************************************************************/
601 void
602 lpfc_handle_eratt(struct lpfc_hba *phba)
603 {
604 	struct lpfc_vport *vport = phba->pport;
605 	struct lpfc_sli   *psli = &phba->sli;
606 	struct lpfc_sli_ring  *pring;
607 	struct lpfc_vport **vports;
608 	uint32_t event_data;
609 	struct Scsi_Host  *shost;
610 	int i;
611 
612 	/* If the pci channel is offline, ignore possible errors,
613 	 * since we cannot communicate with the pci card anyway. */
614 	if (pci_channel_offline(phba->pcidev))
615 		return;
616 
617 	if (phba->work_hs & HS_FFER6 ||
618 	    phba->work_hs & HS_FFER5) {
619 		/* Re-establishing Link */
620 		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
621 				"1301 Re-establishing Link "
622 				"Data: x%x x%x x%x\n",
623 				phba->work_hs,
624 				phba->work_status[0], phba->work_status[1]);
625 		vports = lpfc_create_vport_work_array(phba);
626 		if (vports != NULL)
627 			for(i = 0;
628 			    i < LPFC_MAX_VPORTS && vports[i] != NULL;
629 			    i++){
630 				shost = lpfc_shost_from_vport(vports[i]);
631 				spin_lock_irq(shost->host_lock);
632 				vports[i]->fc_flag |= FC_ESTABLISH_LINK;
633 				spin_unlock_irq(shost->host_lock);
634 			}
635 		lpfc_destroy_vport_work_array(vports);
636 		spin_lock_irq(&phba->hbalock);
637 		psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
638 		spin_unlock_irq(&phba->hbalock);
639 
640 		/*
641 		* Firmware stops when it triggled erratt with HS_FFER6.
642 		* That could cause the I/Os dropped by the firmware.
643 		* Error iocb (I/O) on txcmplq and let the SCSI layer
644 		* retry it after re-establishing link.
645 		*/
646 		pring = &psli->ring[psli->fcp_ring];
647 		lpfc_sli_abort_iocb_ring(phba, pring);
648 
649 
650 		/*
651 		 * There was a firmware error.  Take the hba offline and then
652 		 * attempt to restart it.
653 		 */
654 		lpfc_offline_prep(phba);
655 		lpfc_offline(phba);
656 		lpfc_sli_brdrestart(phba);
657 		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
658 			mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
659 			lpfc_unblock_mgmt_io(phba);
660 			return;
661 		}
662 		lpfc_unblock_mgmt_io(phba);
663 	} else {
664 		/* The if clause above forces this code path when the status
665 		 * failure is a value other than FFER6.  Do not call the offline
666 		 *  twice. This is the adapter hardware error path.
667 		 */
668 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
669 				"0457 Adapter Hardware Error "
670 				"Data: x%x x%x x%x\n",
671 				phba->work_hs,
672 				phba->work_status[0], phba->work_status[1]);
673 
674 		event_data = FC_REG_DUMP_EVENT;
675 		shost = lpfc_shost_from_vport(vport);
676 		fc_host_post_vendor_event(shost, fc_get_event_number(),
677 				sizeof(event_data), (char *) &event_data,
678 				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
679 
680 		spin_lock_irq(&phba->hbalock);
681 		psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
682 		spin_unlock_irq(&phba->hbalock);
683 		lpfc_offline_prep(phba);
684 		lpfc_offline(phba);
685 		lpfc_unblock_mgmt_io(phba);
686 		phba->link_state = LPFC_HBA_ERROR;
687 		lpfc_hba_down_post(phba);
688 	}
689 }
690 
691 /************************************************************************/
692 /*                                                                      */
693 /*    lpfc_handle_latt                                                  */
694 /*    This routine will handle processing a Host Attention              */
695 /*    Link Status event. This will be initialized                       */
696 /*    as a SLI layer callback routine.                                  */
697 /*                                                                      */
698 /************************************************************************/
699 void
700 lpfc_handle_latt(struct lpfc_hba *phba)
701 {
702 	struct lpfc_vport *vport = phba->pport;
703 	struct lpfc_sli   *psli = &phba->sli;
704 	LPFC_MBOXQ_t *pmb;
705 	volatile uint32_t control;
706 	struct lpfc_dmabuf *mp;
707 	int rc = -ENOMEM;
708 
709 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
710 	if (!pmb)
711 		goto lpfc_handle_latt_err_exit;
712 
713 	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
714 	if (!mp)
715 		goto lpfc_handle_latt_free_pmb;
716 
717 	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
718 	if (!mp->virt)
719 		goto lpfc_handle_latt_free_mp;
720 
721 	rc = -EIO;
722 
723 	/* Cleanup any outstanding ELS commands */
724 	lpfc_els_flush_all_cmd(phba);
725 
726 	psli->slistat.link_event++;
727 	lpfc_read_la(phba, pmb, mp);
728 	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
729 	pmb->vport = vport;
730 	rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
731 	if (rc == MBX_NOT_FINISHED)
732 		goto lpfc_handle_latt_free_mbuf;
733 
734 	/* Clear Link Attention in HA REG */
735 	spin_lock_irq(&phba->hbalock);
736 	writel(HA_LATT, phba->HAregaddr);
737 	readl(phba->HAregaddr); /* flush */
738 	spin_unlock_irq(&phba->hbalock);
739 
740 	return;
741 
742 lpfc_handle_latt_free_mbuf:
743 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
744 lpfc_handle_latt_free_mp:
745 	kfree(mp);
746 lpfc_handle_latt_free_pmb:
747 	mempool_free(pmb, phba->mbox_mem_pool);
748 lpfc_handle_latt_err_exit:
749 	/* Enable Link attention interrupts */
750 	spin_lock_irq(&phba->hbalock);
751 	psli->sli_flag |= LPFC_PROCESS_LA;
752 	control = readl(phba->HCregaddr);
753 	control |= HC_LAINT_ENA;
754 	writel(control, phba->HCregaddr);
755 	readl(phba->HCregaddr); /* flush */
756 
757 	/* Clear Link Attention in HA REG */
758 	writel(HA_LATT, phba->HAregaddr);
759 	readl(phba->HAregaddr); /* flush */
760 	spin_unlock_irq(&phba->hbalock);
761 	lpfc_linkdown(phba);
762 	phba->link_state = LPFC_HBA_ERROR;
763 
764 	/* The other case is an error from issue_mbox */
765 	if (rc == -ENOMEM)
766 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
767 			        "0300 READ_LA: no buffers\n");
768 
769 	return;
770 }
771 
772 /************************************************************************/
773 /*                                                                      */
774 /*   lpfc_parse_vpd                                                     */
775 /*   This routine will parse the VPD data                               */
776 /*                                                                      */
777 /************************************************************************/
778 static int
779 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
780 {
781 	uint8_t lenlo, lenhi;
782 	int Length;
783 	int i, j;
784 	int finished = 0;
785 	int index = 0;
786 
787 	if (!vpd)
788 		return 0;
789 
790 	/* Vital Product */
791 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
792 			"0455 Vital Product Data: x%x x%x x%x x%x\n",
793 			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
794 			(uint32_t) vpd[3]);
795 	while (!finished && (index < (len - 4))) {
796 		switch (vpd[index]) {
797 		case 0x82:
798 		case 0x91:
799 			index += 1;
800 			lenlo = vpd[index];
801 			index += 1;
802 			lenhi = vpd[index];
803 			index += 1;
804 			i = ((((unsigned short)lenhi) << 8) + lenlo);
805 			index += i;
806 			break;
807 		case 0x90:
808 			index += 1;
809 			lenlo = vpd[index];
810 			index += 1;
811 			lenhi = vpd[index];
812 			index += 1;
813 			Length = ((((unsigned short)lenhi) << 8) + lenlo);
814 			if (Length > len - index)
815 				Length = len - index;
816 			while (Length > 0) {
817 			/* Look for Serial Number */
818 			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
819 				index += 2;
820 				i = vpd[index];
821 				index += 1;
822 				j = 0;
823 				Length -= (3+i);
824 				while(i--) {
825 					phba->SerialNumber[j++] = vpd[index++];
826 					if (j == 31)
827 						break;
828 				}
829 				phba->SerialNumber[j] = 0;
830 				continue;
831 			}
832 			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
833 				phba->vpd_flag |= VPD_MODEL_DESC;
834 				index += 2;
835 				i = vpd[index];
836 				index += 1;
837 				j = 0;
838 				Length -= (3+i);
839 				while(i--) {
840 					phba->ModelDesc[j++] = vpd[index++];
841 					if (j == 255)
842 						break;
843 				}
844 				phba->ModelDesc[j] = 0;
845 				continue;
846 			}
847 			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
848 				phba->vpd_flag |= VPD_MODEL_NAME;
849 				index += 2;
850 				i = vpd[index];
851 				index += 1;
852 				j = 0;
853 				Length -= (3+i);
854 				while(i--) {
855 					phba->ModelName[j++] = vpd[index++];
856 					if (j == 79)
857 						break;
858 				}
859 				phba->ModelName[j] = 0;
860 				continue;
861 			}
862 			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
863 				phba->vpd_flag |= VPD_PROGRAM_TYPE;
864 				index += 2;
865 				i = vpd[index];
866 				index += 1;
867 				j = 0;
868 				Length -= (3+i);
869 				while(i--) {
870 					phba->ProgramType[j++] = vpd[index++];
871 					if (j == 255)
872 						break;
873 				}
874 				phba->ProgramType[j] = 0;
875 				continue;
876 			}
877 			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
878 				phba->vpd_flag |= VPD_PORT;
879 				index += 2;
880 				i = vpd[index];
881 				index += 1;
882 				j = 0;
883 				Length -= (3+i);
884 				while(i--) {
885 				phba->Port[j++] = vpd[index++];
886 				if (j == 19)
887 					break;
888 				}
889 				phba->Port[j] = 0;
890 				continue;
891 			}
892 			else {
893 				index += 2;
894 				i = vpd[index];
895 				index += 1;
896 				index += i;
897 				Length -= (3 + i);
898 			}
899 		}
900 		finished = 0;
901 		break;
902 		case 0x78:
903 			finished = 1;
904 			break;
905 		default:
906 			index ++;
907 			break;
908 		}
909 	}
910 
911 	return(1);
912 }
913 
914 static void
915 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
916 {
917 	lpfc_vpd_t *vp;
918 	uint16_t dev_id = phba->pcidev->device;
919 	int max_speed;
920 	struct {
921 		char * name;
922 		int    max_speed;
923 		char * bus;
924 	} m = {"<Unknown>", 0, ""};
925 
926 	if (mdp && mdp[0] != '\0'
927 		&& descp && descp[0] != '\0')
928 		return;
929 
930 	if (phba->lmt & LMT_10Gb)
931 		max_speed = 10;
932 	else if (phba->lmt & LMT_8Gb)
933 		max_speed = 8;
934 	else if (phba->lmt & LMT_4Gb)
935 		max_speed = 4;
936 	else if (phba->lmt & LMT_2Gb)
937 		max_speed = 2;
938 	else
939 		max_speed = 1;
940 
941 	vp = &phba->vpd;
942 
943 	switch (dev_id) {
944 	case PCI_DEVICE_ID_FIREFLY:
945 		m = (typeof(m)){"LP6000", max_speed, "PCI"};
946 		break;
947 	case PCI_DEVICE_ID_SUPERFLY:
948 		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
949 			m = (typeof(m)){"LP7000", max_speed,  "PCI"};
950 		else
951 			m = (typeof(m)){"LP7000E", max_speed, "PCI"};
952 		break;
953 	case PCI_DEVICE_ID_DRAGONFLY:
954 		m = (typeof(m)){"LP8000", max_speed, "PCI"};
955 		break;
956 	case PCI_DEVICE_ID_CENTAUR:
957 		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
958 			m = (typeof(m)){"LP9002", max_speed, "PCI"};
959 		else
960 			m = (typeof(m)){"LP9000", max_speed, "PCI"};
961 		break;
962 	case PCI_DEVICE_ID_RFLY:
963 		m = (typeof(m)){"LP952", max_speed, "PCI"};
964 		break;
965 	case PCI_DEVICE_ID_PEGASUS:
966 		m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
967 		break;
968 	case PCI_DEVICE_ID_THOR:
969 		m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
970 		break;
971 	case PCI_DEVICE_ID_VIPER:
972 		m = (typeof(m)){"LPX1000", max_speed,  "PCI-X"};
973 		break;
974 	case PCI_DEVICE_ID_PFLY:
975 		m = (typeof(m)){"LP982", max_speed, "PCI-X"};
976 		break;
977 	case PCI_DEVICE_ID_TFLY:
978 		m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
979 		break;
980 	case PCI_DEVICE_ID_HELIOS:
981 		m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
982 		break;
983 	case PCI_DEVICE_ID_HELIOS_SCSP:
984 		m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
985 		break;
986 	case PCI_DEVICE_ID_HELIOS_DCSP:
987 		m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
988 		break;
989 	case PCI_DEVICE_ID_NEPTUNE:
990 		m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
991 		break;
992 	case PCI_DEVICE_ID_NEPTUNE_SCSP:
993 		m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
994 		break;
995 	case PCI_DEVICE_ID_NEPTUNE_DCSP:
996 		m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
997 		break;
998 	case PCI_DEVICE_ID_BMID:
999 		m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
1000 		break;
1001 	case PCI_DEVICE_ID_BSMB:
1002 		m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
1003 		break;
1004 	case PCI_DEVICE_ID_ZEPHYR:
1005 		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1006 		break;
1007 	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1008 		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1009 		break;
1010 	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1011 		m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"};
1012 		break;
1013 	case PCI_DEVICE_ID_ZMID:
1014 		m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
1015 		break;
1016 	case PCI_DEVICE_ID_ZSMB:
1017 		m = (typeof(m)){"LPe111", max_speed, "PCIe"};
1018 		break;
1019 	case PCI_DEVICE_ID_LP101:
1020 		m = (typeof(m)){"LP101", max_speed, "PCI-X"};
1021 		break;
1022 	case PCI_DEVICE_ID_LP10000S:
1023 		m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
1024 		break;
1025 	case PCI_DEVICE_ID_LP11000S:
1026 		m = (typeof(m)){"LP11000-S", max_speed,
1027 			"PCI-X2"};
1028 		break;
1029 	case PCI_DEVICE_ID_LPE11000S:
1030 		m = (typeof(m)){"LPe11000-S", max_speed,
1031 			"PCIe"};
1032 		break;
1033 	case PCI_DEVICE_ID_SAT:
1034 		m = (typeof(m)){"LPe12000", max_speed, "PCIe"};
1035 		break;
1036 	case PCI_DEVICE_ID_SAT_MID:
1037 		m = (typeof(m)){"LPe1250", max_speed, "PCIe"};
1038 		break;
1039 	case PCI_DEVICE_ID_SAT_SMB:
1040 		m = (typeof(m)){"LPe121", max_speed, "PCIe"};
1041 		break;
1042 	case PCI_DEVICE_ID_SAT_DCSP:
1043 		m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"};
1044 		break;
1045 	case PCI_DEVICE_ID_SAT_SCSP:
1046 		m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"};
1047 		break;
1048 	case PCI_DEVICE_ID_SAT_S:
1049 		m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
1050 		break;
1051 	default:
1052 		m = (typeof(m)){ NULL };
1053 		break;
1054 	}
1055 
1056 	if (mdp && mdp[0] == '\0')
1057 		snprintf(mdp, 79,"%s", m.name);
1058 	if (descp && descp[0] == '\0')
1059 		snprintf(descp, 255,
1060 			 "Emulex %s %dGb %s Fibre Channel Adapter",
1061 			 m.name, m.max_speed, m.bus);
1062 }
1063 
1064 /**************************************************/
1065 /*   lpfc_post_buffer                             */
1066 /*                                                */
1067 /*   This routine will post count buffers to the  */
1068 /*   ring with the QUE_RING_BUF_CN command. This  */
1069 /*   allows 3 buffers / command to be posted.     */
1070 /*   Returns the number of buffers NOT posted.    */
1071 /**************************************************/
1072 int
1073 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
1074 		 int type)
1075 {
1076 	IOCB_t *icmd;
1077 	struct lpfc_iocbq *iocb;
1078 	struct lpfc_dmabuf *mp1, *mp2;
1079 
1080 	cnt += pring->missbufcnt;
1081 
1082 	/* While there are buffers to post */
1083 	while (cnt > 0) {
1084 		/* Allocate buffer for  command iocb */
1085 		iocb = lpfc_sli_get_iocbq(phba);
1086 		if (iocb == NULL) {
1087 			pring->missbufcnt = cnt;
1088 			return cnt;
1089 		}
1090 		icmd = &iocb->iocb;
1091 
1092 		/* 2 buffers can be posted per command */
1093 		/* Allocate buffer to post */
1094 		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1095 		if (mp1)
1096 		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1097 						&mp1->phys);
1098 		if (mp1 == 0 || mp1->virt == 0) {
1099 			kfree(mp1);
1100 			lpfc_sli_release_iocbq(phba, iocb);
1101 			pring->missbufcnt = cnt;
1102 			return cnt;
1103 		}
1104 
1105 		INIT_LIST_HEAD(&mp1->list);
1106 		/* Allocate buffer to post */
1107 		if (cnt > 1) {
1108 			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1109 			if (mp2)
1110 				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1111 							    &mp2->phys);
1112 			if (mp2 == 0 || mp2->virt == 0) {
1113 				kfree(mp2);
1114 				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1115 				kfree(mp1);
1116 				lpfc_sli_release_iocbq(phba, iocb);
1117 				pring->missbufcnt = cnt;
1118 				return cnt;
1119 			}
1120 
1121 			INIT_LIST_HEAD(&mp2->list);
1122 		} else {
1123 			mp2 = NULL;
1124 		}
1125 
1126 		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1127 		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1128 		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1129 		icmd->ulpBdeCount = 1;
1130 		cnt--;
1131 		if (mp2) {
1132 			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1133 			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1134 			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1135 			cnt--;
1136 			icmd->ulpBdeCount = 2;
1137 		}
1138 
1139 		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1140 		icmd->ulpLe = 1;
1141 
1142 		if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
1143 			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1144 			kfree(mp1);
1145 			cnt++;
1146 			if (mp2) {
1147 				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1148 				kfree(mp2);
1149 				cnt++;
1150 			}
1151 			lpfc_sli_release_iocbq(phba, iocb);
1152 			pring->missbufcnt = cnt;
1153 			return cnt;
1154 		}
1155 		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1156 		if (mp2)
1157 			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1158 	}
1159 	pring->missbufcnt = 0;
1160 	return 0;
1161 }
1162 
1163 /************************************************************************/
1164 /*                                                                      */
1165 /*   lpfc_post_rcv_buf                                                  */
1166 /*   This routine post initial rcv buffers to the configured rings      */
1167 /*                                                                      */
1168 /************************************************************************/
1169 static int
1170 lpfc_post_rcv_buf(struct lpfc_hba *phba)
1171 {
1172 	struct lpfc_sli *psli = &phba->sli;
1173 
1174 	/* Ring 0, ELS / CT buffers */
1175 	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1);
1176 	/* Ring 2 - FCP no buffers needed */
1177 
1178 	return 0;
1179 }
1180 
1181 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1182 
1183 /************************************************************************/
1184 /*                                                                      */
1185 /*   lpfc_sha_init                                                      */
1186 /*                                                                      */
1187 /************************************************************************/
1188 static void
1189 lpfc_sha_init(uint32_t * HashResultPointer)
1190 {
1191 	HashResultPointer[0] = 0x67452301;
1192 	HashResultPointer[1] = 0xEFCDAB89;
1193 	HashResultPointer[2] = 0x98BADCFE;
1194 	HashResultPointer[3] = 0x10325476;
1195 	HashResultPointer[4] = 0xC3D2E1F0;
1196 }
1197 
1198 /************************************************************************/
1199 /*                                                                      */
1200 /*   lpfc_sha_iterate                                                   */
1201 /*                                                                      */
1202 /************************************************************************/
1203 static void
1204 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
1205 {
1206 	int t;
1207 	uint32_t TEMP;
1208 	uint32_t A, B, C, D, E;
1209 	t = 16;
1210 	do {
1211 		HashWorkingPointer[t] =
1212 		    S(1,
1213 		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
1214 								     8] ^
1215 		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
1216 	} while (++t <= 79);
1217 	t = 0;
1218 	A = HashResultPointer[0];
1219 	B = HashResultPointer[1];
1220 	C = HashResultPointer[2];
1221 	D = HashResultPointer[3];
1222 	E = HashResultPointer[4];
1223 
1224 	do {
1225 		if (t < 20) {
1226 			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
1227 		} else if (t < 40) {
1228 			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
1229 		} else if (t < 60) {
1230 			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
1231 		} else {
1232 			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
1233 		}
1234 		TEMP += S(5, A) + E + HashWorkingPointer[t];
1235 		E = D;
1236 		D = C;
1237 		C = S(30, B);
1238 		B = A;
1239 		A = TEMP;
1240 	} while (++t <= 79);
1241 
1242 	HashResultPointer[0] += A;
1243 	HashResultPointer[1] += B;
1244 	HashResultPointer[2] += C;
1245 	HashResultPointer[3] += D;
1246 	HashResultPointer[4] += E;
1247 
1248 }
1249 
1250 /************************************************************************/
1251 /*                                                                      */
1252 /*   lpfc_challenge_key                                                 */
1253 /*                                                                      */
1254 /************************************************************************/
1255 static void
1256 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
1257 {
1258 	*HashWorking = (*RandomChallenge ^ *HashWorking);
1259 }
1260 
1261 /************************************************************************/
1262 /*                                                                      */
1263 /*   lpfc_hba_init                                                      */
1264 /*                                                                      */
1265 /************************************************************************/
1266 void
1267 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1268 {
1269 	int t;
1270 	uint32_t *HashWorking;
1271 	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
1272 
1273 	HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL);
1274 	if (!HashWorking)
1275 		return;
1276 
1277 	memset(HashWorking, 0, (80 * sizeof(uint32_t)));
1278 	HashWorking[0] = HashWorking[78] = *pwwnn++;
1279 	HashWorking[1] = HashWorking[79] = *pwwnn;
1280 
1281 	for (t = 0; t < 7; t++)
1282 		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
1283 
1284 	lpfc_sha_init(hbainit);
1285 	lpfc_sha_iterate(hbainit, HashWorking);
1286 	kfree(HashWorking);
1287 }
1288 
1289 static void
1290 lpfc_cleanup(struct lpfc_vport *vport)
1291 {
1292 	struct lpfc_nodelist *ndlp, *next_ndlp;
1293 
1294 	/* clean up phba - lpfc specific */
1295 	lpfc_can_disctmo(vport);
1296 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
1297 		lpfc_nlp_put(ndlp);
1298 	return;
1299 }
1300 
1301 static void
1302 lpfc_establish_link_tmo(unsigned long ptr)
1303 {
1304 	struct lpfc_hba   *phba = (struct lpfc_hba *) ptr;
1305 	struct lpfc_vport **vports;
1306 	unsigned long iflag;
1307 	int i;
1308 
1309 	/* Re-establishing Link, timer expired */
1310 	lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1311 			"1300 Re-establishing Link, timer expired "
1312 			"Data: x%x x%x\n",
1313 			phba->pport->fc_flag, phba->pport->port_state);
1314 	vports = lpfc_create_vport_work_array(phba);
1315 	if (vports != NULL)
1316 		for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
1317 			struct Scsi_Host *shost;
1318 			shost = lpfc_shost_from_vport(vports[i]);
1319 			spin_lock_irqsave(shost->host_lock, iflag);
1320 			vports[i]->fc_flag &= ~FC_ESTABLISH_LINK;
1321 			spin_unlock_irqrestore(shost->host_lock, iflag);
1322 		}
1323 	lpfc_destroy_vport_work_array(vports);
1324 }
1325 
1326 void
1327 lpfc_stop_vport_timers(struct lpfc_vport *vport)
1328 {
1329 	del_timer_sync(&vport->els_tmofunc);
1330 	del_timer_sync(&vport->fc_fdmitmo);
1331 	lpfc_can_disctmo(vport);
1332 	return;
1333 }
1334 
1335 static void
1336 lpfc_stop_phba_timers(struct lpfc_hba *phba)
1337 {
1338 	del_timer_sync(&phba->fcp_poll_timer);
1339 	del_timer_sync(&phba->fc_estabtmo);
1340 	lpfc_stop_vport_timers(phba->pport);
1341 	del_timer_sync(&phba->sli.mbox_tmo);
1342 	del_timer_sync(&phba->fabric_block_timer);
1343 	phba->hb_outstanding = 0;
1344 	del_timer_sync(&phba->hb_tmofunc);
1345 	return;
1346 }
1347 
1348 int
1349 lpfc_online(struct lpfc_hba *phba)
1350 {
1351 	struct lpfc_vport *vport = phba->pport;
1352 	struct lpfc_vport **vports;
1353 	int i;
1354 
1355 	if (!phba)
1356 		return 0;
1357 
1358 	if (!(vport->fc_flag & FC_OFFLINE_MODE))
1359 		return 0;
1360 
1361 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1362 			"0458 Bring Adapter online\n");
1363 
1364 	lpfc_block_mgmt_io(phba);
1365 
1366 	if (!lpfc_sli_queue_setup(phba)) {
1367 		lpfc_unblock_mgmt_io(phba);
1368 		return 1;
1369 	}
1370 
1371 	if (lpfc_sli_hba_setup(phba)) {	/* Initialize the HBA */
1372 		lpfc_unblock_mgmt_io(phba);
1373 		return 1;
1374 	}
1375 
1376 	vports = lpfc_create_vport_work_array(phba);
1377 	if (vports != NULL)
1378 		for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
1379 			struct Scsi_Host *shost;
1380 			shost = lpfc_shost_from_vport(vports[i]);
1381 			spin_lock_irq(shost->host_lock);
1382 			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
1383 			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
1384 				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
1385 			spin_unlock_irq(shost->host_lock);
1386 		}
1387 		lpfc_destroy_vport_work_array(vports);
1388 
1389 	lpfc_unblock_mgmt_io(phba);
1390 	return 0;
1391 }
1392 
1393 void
1394 lpfc_block_mgmt_io(struct lpfc_hba * phba)
1395 {
1396 	unsigned long iflag;
1397 
1398 	spin_lock_irqsave(&phba->hbalock, iflag);
1399 	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
1400 	spin_unlock_irqrestore(&phba->hbalock, iflag);
1401 }
1402 
1403 void
1404 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
1405 {
1406 	unsigned long iflag;
1407 
1408 	spin_lock_irqsave(&phba->hbalock, iflag);
1409 	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
1410 	spin_unlock_irqrestore(&phba->hbalock, iflag);
1411 }
1412 
1413 void
1414 lpfc_offline_prep(struct lpfc_hba * phba)
1415 {
1416 	struct lpfc_vport *vport = phba->pport;
1417 	struct lpfc_nodelist  *ndlp, *next_ndlp;
1418 
1419 	if (vport->fc_flag & FC_OFFLINE_MODE)
1420 		return;
1421 
1422 	lpfc_block_mgmt_io(phba);
1423 
1424 	lpfc_linkdown(phba);
1425 
1426 	/* Issue an unreg_login to all nodes */
1427 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
1428 		if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
1429 			lpfc_unreg_rpi(vport, ndlp);
1430 
1431 	lpfc_sli_flush_mbox_queue(phba);
1432 }
1433 
1434 void
1435 lpfc_offline(struct lpfc_hba *phba)
1436 {
1437 	struct Scsi_Host  *shost;
1438 	struct lpfc_vport **vports;
1439 	int i;
1440 
1441 	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1442 		return;
1443 
1444 	/* stop all timers associated with this hba */
1445 	lpfc_stop_phba_timers(phba);
1446 	vports = lpfc_create_vport_work_array(phba);
1447 	if (vports != NULL)
1448 		for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++)
1449 			lpfc_stop_vport_timers(vports[i]);
1450 	lpfc_destroy_vport_work_array(vports);
1451 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1452 			"0460 Bring Adapter offline\n");
1453 	/* Bring down the SLI Layer and cleanup.  The HBA is offline
1454 	   now.  */
1455 	lpfc_sli_hba_down(phba);
1456 	spin_lock_irq(&phba->hbalock);
1457 	phba->work_ha = 0;
1458 	spin_unlock_irq(&phba->hbalock);
1459 	vports = lpfc_create_vport_work_array(phba);
1460 	if (vports != NULL)
1461 		for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
1462 			shost = lpfc_shost_from_vport(vports[i]);
1463 			lpfc_cleanup(vports[i]);
1464 			spin_lock_irq(shost->host_lock);
1465 			vports[i]->work_port_events = 0;
1466 			vports[i]->fc_flag |= FC_OFFLINE_MODE;
1467 			spin_unlock_irq(shost->host_lock);
1468 		}
1469 	lpfc_destroy_vport_work_array(vports);
1470 }
1471 
1472 /******************************************************************************
1473 * Function name: lpfc_scsi_free
1474 *
1475 * Description: Called from lpfc_pci_remove_one free internal driver resources
1476 *
1477 ******************************************************************************/
1478 static int
1479 lpfc_scsi_free(struct lpfc_hba *phba)
1480 {
1481 	struct lpfc_scsi_buf *sb, *sb_next;
1482 	struct lpfc_iocbq *io, *io_next;
1483 
1484 	spin_lock_irq(&phba->hbalock);
1485 	/* Release all the lpfc_scsi_bufs maintained by this host. */
1486 	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
1487 		list_del(&sb->list);
1488 		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
1489 			      sb->dma_handle);
1490 		kfree(sb);
1491 		phba->total_scsi_bufs--;
1492 	}
1493 
1494 	/* Release all the lpfc_iocbq entries maintained by this host. */
1495 	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
1496 		list_del(&io->list);
1497 		kfree(io);
1498 		phba->total_iocbq_bufs--;
1499 	}
1500 
1501 	spin_unlock_irq(&phba->hbalock);
1502 
1503 	return 0;
1504 }
1505 
1506 struct lpfc_vport *
1507 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
1508 {
1509 	struct lpfc_vport *vport;
1510 	struct Scsi_Host  *shost;
1511 	int error = 0;
1512 
1513 	if (dev != &phba->pcidev->dev)
1514 		shost = scsi_host_alloc(&lpfc_vport_template,
1515 					sizeof(struct lpfc_vport));
1516 	else
1517 		shost = scsi_host_alloc(&lpfc_template,
1518 					sizeof(struct lpfc_vport));
1519 	if (!shost)
1520 		goto out;
1521 
1522 	vport = (struct lpfc_vport *) shost->hostdata;
1523 	vport->phba = phba;
1524 
1525 	vport->load_flag |= FC_LOADING;
1526 	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
1527 
1528 	lpfc_get_vport_cfgparam(vport);
1529 	shost->unique_id = instance;
1530 	shost->max_id = LPFC_MAX_TARGET;
1531 	shost->max_lun = vport->cfg_max_luns;
1532 	shost->this_id = -1;
1533 	shost->max_cmd_len = 16;
1534 	/*
1535 	 * Set initial can_queue value since 0 is no longer supported and
1536 	 * scsi_add_host will fail. This will be adjusted later based on the
1537 	 * max xri value determined in hba setup.
1538 	 */
1539 	shost->can_queue = phba->cfg_hba_queue_depth - 10;
1540 	if (dev != &phba->pcidev->dev) {
1541 		shost->transportt = lpfc_vport_transport_template;
1542 		vport->port_type = LPFC_NPIV_PORT;
1543 	} else {
1544 		shost->transportt = lpfc_transport_template;
1545 		vport->port_type = LPFC_PHYSICAL_PORT;
1546 	}
1547 
1548 	/* Initialize all internally managed lists. */
1549 	INIT_LIST_HEAD(&vport->fc_nodes);
1550 	spin_lock_init(&vport->work_port_lock);
1551 
1552 	init_timer(&vport->fc_disctmo);
1553 	vport->fc_disctmo.function = lpfc_disc_timeout;
1554 	vport->fc_disctmo.data = (unsigned long)vport;
1555 
1556 	init_timer(&vport->fc_fdmitmo);
1557 	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
1558 	vport->fc_fdmitmo.data = (unsigned long)vport;
1559 
1560 	init_timer(&vport->els_tmofunc);
1561 	vport->els_tmofunc.function = lpfc_els_timeout;
1562 	vport->els_tmofunc.data = (unsigned long)vport;
1563 
1564 	error = scsi_add_host(shost, dev);
1565 	if (error)
1566 		goto out_put_shost;
1567 
1568 	spin_lock_irq(&phba->hbalock);
1569 	list_add_tail(&vport->listentry, &phba->port_list);
1570 	spin_unlock_irq(&phba->hbalock);
1571 	return vport;
1572 
1573 out_put_shost:
1574 	scsi_host_put(shost);
1575 out:
1576 	return NULL;
1577 }
1578 
1579 void
1580 destroy_port(struct lpfc_vport *vport)
1581 {
1582 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1583 	struct lpfc_hba  *phba = vport->phba;
1584 
1585 	kfree(vport->vname);
1586 
1587 	lpfc_debugfs_terminate(vport);
1588 	fc_remove_host(shost);
1589 	scsi_remove_host(shost);
1590 
1591 	spin_lock_irq(&phba->hbalock);
1592 	list_del_init(&vport->listentry);
1593 	spin_unlock_irq(&phba->hbalock);
1594 
1595 	lpfc_cleanup(vport);
1596 	return;
1597 }
1598 
1599 int
1600 lpfc_get_instance(void)
1601 {
1602 	int instance = 0;
1603 
1604 	/* Assign an unused number */
1605 	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
1606 		return -1;
1607 	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
1608 		return -1;
1609 	return instance;
1610 }
1611 
1612 /*
1613  * Note: there is no scan_start function as adapter initialization
1614  * will have asynchronously kicked off the link initialization.
1615  */
1616 
1617 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
1618 {
1619 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1620 	struct lpfc_hba   *phba = vport->phba;
1621 	int stat = 0;
1622 
1623 	spin_lock_irq(shost->host_lock);
1624 
1625 	if (vport->load_flag & FC_UNLOADING) {
1626 		stat = 1;
1627 		goto finished;
1628 	}
1629 	if (time >= 30 * HZ) {
1630 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1631 				"0461 Scanning longer than 30 "
1632 				"seconds.  Continuing initialization\n");
1633 		stat = 1;
1634 		goto finished;
1635 	}
1636 	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
1637 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1638 				"0465 Link down longer than 15 "
1639 				"seconds.  Continuing initialization\n");
1640 		stat = 1;
1641 		goto finished;
1642 	}
1643 
1644 	if (vport->port_state != LPFC_VPORT_READY)
1645 		goto finished;
1646 	if (vport->num_disc_nodes || vport->fc_prli_sent)
1647 		goto finished;
1648 	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
1649 		goto finished;
1650 	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
1651 		goto finished;
1652 
1653 	stat = 1;
1654 
1655 finished:
1656 	spin_unlock_irq(shost->host_lock);
1657 	return stat;
1658 }
1659 
1660 void lpfc_host_attrib_init(struct Scsi_Host *shost)
1661 {
1662 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1663 	struct lpfc_hba   *phba = vport->phba;
1664 	/*
1665 	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
1666 	 */
1667 
1668 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
1669 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
1670 	fc_host_supported_classes(shost) = FC_COS_CLASS3;
1671 
1672 	memset(fc_host_supported_fc4s(shost), 0,
1673 	       sizeof(fc_host_supported_fc4s(shost)));
1674 	fc_host_supported_fc4s(shost)[2] = 1;
1675 	fc_host_supported_fc4s(shost)[7] = 1;
1676 
1677 	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
1678 				 sizeof fc_host_symbolic_name(shost));
1679 
1680 	fc_host_supported_speeds(shost) = 0;
1681 	if (phba->lmt & LMT_10Gb)
1682 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
1683 	if (phba->lmt & LMT_4Gb)
1684 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
1685 	if (phba->lmt & LMT_2Gb)
1686 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
1687 	if (phba->lmt & LMT_1Gb)
1688 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
1689 
1690 	fc_host_maxframe_size(shost) =
1691 		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
1692 		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
1693 
1694 	/* This value is also unchanging */
1695 	memset(fc_host_active_fc4s(shost), 0,
1696 	       sizeof(fc_host_active_fc4s(shost)));
1697 	fc_host_active_fc4s(shost)[2] = 1;
1698 	fc_host_active_fc4s(shost)[7] = 1;
1699 
1700 	fc_host_max_npiv_vports(shost) = phba->max_vpi;
1701 	spin_lock_irq(shost->host_lock);
1702 	vport->load_flag &= ~FC_LOADING;
1703 	spin_unlock_irq(shost->host_lock);
1704 }
1705 
1706 static int __devinit
1707 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1708 {
1709 	struct lpfc_vport *vport = NULL;
1710 	struct lpfc_hba   *phba;
1711 	struct lpfc_sli   *psli;
1712 	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
1713 	struct Scsi_Host  *shost = NULL;
1714 	void *ptr;
1715 	unsigned long bar0map_len, bar2map_len;
1716 	int error = -ENODEV;
1717 	int  i, hbq_count;
1718 	uint16_t iotag;
1719 
1720 	if (pci_enable_device(pdev))
1721 		goto out;
1722 	if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
1723 		goto out_disable_device;
1724 
1725 	phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL);
1726 	if (!phba)
1727 		goto out_release_regions;
1728 
1729 	spin_lock_init(&phba->hbalock);
1730 
1731 	phba->pcidev = pdev;
1732 
1733 	/* Assign an unused board number */
1734 	if ((phba->brd_no = lpfc_get_instance()) < 0)
1735 		goto out_free_phba;
1736 
1737 	INIT_LIST_HEAD(&phba->port_list);
1738 	/*
1739 	 * Get all the module params for configuring this host and then
1740 	 * establish the host.
1741 	 */
1742 	lpfc_get_cfgparam(phba);
1743 	phba->max_vpi = LPFC_MAX_VPI;
1744 
1745 	/* Initialize timers used by driver */
1746 	init_timer(&phba->fc_estabtmo);
1747 	phba->fc_estabtmo.function = lpfc_establish_link_tmo;
1748 	phba->fc_estabtmo.data = (unsigned long)phba;
1749 
1750 	init_timer(&phba->hb_tmofunc);
1751 	phba->hb_tmofunc.function = lpfc_hb_timeout;
1752 	phba->hb_tmofunc.data = (unsigned long)phba;
1753 
1754 	psli = &phba->sli;
1755 	init_timer(&psli->mbox_tmo);
1756 	psli->mbox_tmo.function = lpfc_mbox_timeout;
1757 	psli->mbox_tmo.data = (unsigned long) phba;
1758 	init_timer(&phba->fcp_poll_timer);
1759 	phba->fcp_poll_timer.function = lpfc_poll_timeout;
1760 	phba->fcp_poll_timer.data = (unsigned long) phba;
1761 	init_timer(&phba->fabric_block_timer);
1762 	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
1763 	phba->fabric_block_timer.data = (unsigned long) phba;
1764 
1765 	pci_set_master(pdev);
1766 	pci_try_set_mwi(pdev);
1767 
1768 	if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
1769 		if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0)
1770 			goto out_idr_remove;
1771 
1772 	/*
1773 	 * Get the bus address of Bar0 and Bar2 and the number of bytes
1774 	 * required by each mapping.
1775 	 */
1776 	phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
1777 	bar0map_len        = pci_resource_len(phba->pcidev, 0);
1778 
1779 	phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2);
1780 	bar2map_len        = pci_resource_len(phba->pcidev, 2);
1781 
1782 	/* Map HBA SLIM to a kernel virtual address. */
1783 	phba->slim_memmap_p      = ioremap(phba->pci_bar0_map, bar0map_len);
1784 	if (!phba->slim_memmap_p) {
1785 		error = -ENODEV;
1786 		dev_printk(KERN_ERR, &pdev->dev,
1787 			   "ioremap failed for SLIM memory.\n");
1788 		goto out_idr_remove;
1789 	}
1790 
1791 	/* Map HBA Control Registers to a kernel virtual address. */
1792 	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
1793 	if (!phba->ctrl_regs_memmap_p) {
1794 		error = -ENODEV;
1795 		dev_printk(KERN_ERR, &pdev->dev,
1796 			   "ioremap failed for HBA control registers.\n");
1797 		goto out_iounmap_slim;
1798 	}
1799 
1800 	/* Allocate memory for SLI-2 structures */
1801 	phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
1802 					  &phba->slim2p_mapping, GFP_KERNEL);
1803 	if (!phba->slim2p)
1804 		goto out_iounmap;
1805 
1806 	memset(phba->slim2p, 0, SLI2_SLIM_SIZE);
1807 
1808 	phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev,
1809 						 lpfc_sli_hbq_size(),
1810 						 &phba->hbqslimp.phys,
1811 						 GFP_KERNEL);
1812 	if (!phba->hbqslimp.virt)
1813 		goto out_free_slim;
1814 
1815 	hbq_count = lpfc_sli_hbq_count();
1816 	ptr = phba->hbqslimp.virt;
1817 	for (i = 0; i < hbq_count; ++i) {
1818 		phba->hbqs[i].hbq_virt = ptr;
1819 		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
1820 		ptr += (lpfc_hbq_defs[i]->entry_count *
1821 			sizeof(struct lpfc_hbq_entry));
1822 	}
1823 	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
1824 	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer  = lpfc_els_hbq_free;
1825 
1826 	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
1827 
1828 	/* Initialize the SLI Layer to run with lpfc HBAs. */
1829 	lpfc_sli_setup(phba);
1830 	lpfc_sli_queue_setup(phba);
1831 
1832 	error = lpfc_mem_alloc(phba);
1833 	if (error)
1834 		goto out_free_hbqslimp;
1835 
1836 	/* Initialize and populate the iocb list per host.  */
1837 	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
1838 	for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
1839 		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
1840 		if (iocbq_entry == NULL) {
1841 			printk(KERN_ERR "%s: only allocated %d iocbs of "
1842 				"expected %d count. Unloading driver.\n",
1843 				__FUNCTION__, i, LPFC_IOCB_LIST_CNT);
1844 			error = -ENOMEM;
1845 			goto out_free_iocbq;
1846 		}
1847 
1848 		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
1849 		if (iotag == 0) {
1850 			kfree (iocbq_entry);
1851 			printk(KERN_ERR "%s: failed to allocate IOTAG. "
1852 			       "Unloading driver.\n",
1853 				__FUNCTION__);
1854 			error = -ENOMEM;
1855 			goto out_free_iocbq;
1856 		}
1857 
1858 		spin_lock_irq(&phba->hbalock);
1859 		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
1860 		phba->total_iocbq_bufs++;
1861 		spin_unlock_irq(&phba->hbalock);
1862 	}
1863 
1864 	/* Initialize HBA structure */
1865 	phba->fc_edtov = FF_DEF_EDTOV;
1866 	phba->fc_ratov = FF_DEF_RATOV;
1867 	phba->fc_altov = FF_DEF_ALTOV;
1868 	phba->fc_arbtov = FF_DEF_ARBTOV;
1869 
1870 	INIT_LIST_HEAD(&phba->work_list);
1871 	phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
1872 	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
1873 
1874 	/* Startup the kernel thread for this host adapter. */
1875 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
1876 				       "lpfc_worker_%d", phba->brd_no);
1877 	if (IS_ERR(phba->worker_thread)) {
1878 		error = PTR_ERR(phba->worker_thread);
1879 		goto out_free_iocbq;
1880 	}
1881 
1882 	/* Initialize the list of scsi buffers used by driver for scsi IO. */
1883 	spin_lock_init(&phba->scsi_buf_list_lock);
1884 	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
1885 
1886 	/* Initialize list of fabric iocbs */
1887 	INIT_LIST_HEAD(&phba->fabric_iocb_list);
1888 
1889 	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
1890 	if (!vport)
1891 		goto out_kthread_stop;
1892 
1893 	shost = lpfc_shost_from_vport(vport);
1894 	phba->pport = vport;
1895 	lpfc_debugfs_initialize(vport);
1896 
1897 	pci_set_drvdata(pdev, shost);
1898 
1899 	if (phba->cfg_use_msi) {
1900 		error = pci_enable_msi(phba->pcidev);
1901 		if (!error)
1902 			phba->using_msi = 1;
1903 		else
1904 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1905 					"0452 Enable MSI failed, continuing "
1906 					"with IRQ\n");
1907 	}
1908 
1909 	error =	request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
1910 			    LPFC_DRIVER_NAME, phba);
1911 	if (error) {
1912 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1913 			"0451 Enable interrupt handler failed\n");
1914 		goto out_disable_msi;
1915 	}
1916 
1917 	phba->MBslimaddr = phba->slim_memmap_p;
1918 	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
1919 	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
1920 	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
1921 	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
1922 
1923 	if (lpfc_alloc_sysfs_attr(vport))
1924 		goto out_free_irq;
1925 
1926 	if (lpfc_sli_hba_setup(phba))
1927 		goto out_remove_device;
1928 
1929 	/*
1930 	 * hba setup may have changed the hba_queue_depth so we need to adjust
1931 	 * the value of can_queue.
1932 	 */
1933 	shost->can_queue = phba->cfg_hba_queue_depth - 10;
1934 
1935 	lpfc_host_attrib_init(shost);
1936 
1937 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1938 		spin_lock_irq(shost->host_lock);
1939 		lpfc_poll_start_timer(phba);
1940 		spin_unlock_irq(shost->host_lock);
1941 	}
1942 
1943 	scsi_scan_host(shost);
1944 
1945 	return 0;
1946 
1947 out_remove_device:
1948 	lpfc_free_sysfs_attr(vport);
1949 	spin_lock_irq(shost->host_lock);
1950 	vport->load_flag |= FC_UNLOADING;
1951 	spin_unlock_irq(shost->host_lock);
1952 out_free_irq:
1953 	lpfc_stop_phba_timers(phba);
1954 	phba->pport->work_port_events = 0;
1955 	free_irq(phba->pcidev->irq, phba);
1956 out_disable_msi:
1957 	if (phba->using_msi)
1958 		pci_disable_msi(phba->pcidev);
1959 	destroy_port(vport);
1960 out_kthread_stop:
1961 	kthread_stop(phba->worker_thread);
1962 out_free_iocbq:
1963 	list_for_each_entry_safe(iocbq_entry, iocbq_next,
1964 						&phba->lpfc_iocb_list, list) {
1965 		kfree(iocbq_entry);
1966 		phba->total_iocbq_bufs--;
1967 	}
1968 	lpfc_mem_free(phba);
1969 out_free_hbqslimp:
1970 	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt,
1971 			  phba->hbqslimp.phys);
1972 out_free_slim:
1973 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p,
1974 							phba->slim2p_mapping);
1975 out_iounmap:
1976 	iounmap(phba->ctrl_regs_memmap_p);
1977 out_iounmap_slim:
1978 	iounmap(phba->slim_memmap_p);
1979 out_idr_remove:
1980 	idr_remove(&lpfc_hba_index, phba->brd_no);
1981 out_free_phba:
1982 	kfree(phba);
1983 out_release_regions:
1984 	pci_release_regions(pdev);
1985 out_disable_device:
1986 	pci_disable_device(pdev);
1987 out:
1988 	pci_set_drvdata(pdev, NULL);
1989 	if (shost)
1990 		scsi_host_put(shost);
1991 	return error;
1992 }
1993 
1994 static void __devexit
1995 lpfc_pci_remove_one(struct pci_dev *pdev)
1996 {
1997 	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
1998 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1999 	struct lpfc_hba   *phba = vport->phba;
2000 	spin_lock_irq(&phba->hbalock);
2001 	vport->load_flag |= FC_UNLOADING;
2002 	spin_unlock_irq(&phba->hbalock);
2003 
2004 	kfree(vport->vname);
2005 	lpfc_free_sysfs_attr(vport);
2006 
2007 	fc_remove_host(shost);
2008 	scsi_remove_host(shost);
2009 	/*
2010 	 * Bring down the SLI Layer. This step disable all interrupts,
2011 	 * clears the rings, discards all mailbox commands, and resets
2012 	 * the HBA.
2013 	 */
2014 	lpfc_sli_hba_down(phba);
2015 	lpfc_sli_brdrestart(phba);
2016 
2017 	lpfc_stop_phba_timers(phba);
2018 	spin_lock_irq(&phba->hbalock);
2019 	list_del_init(&vport->listentry);
2020 	spin_unlock_irq(&phba->hbalock);
2021 
2022 	lpfc_debugfs_terminate(vport);
2023 	lpfc_cleanup(vport);
2024 
2025 	kthread_stop(phba->worker_thread);
2026 
2027 	/* Release the irq reservation */
2028 	free_irq(phba->pcidev->irq, phba);
2029 	if (phba->using_msi)
2030 		pci_disable_msi(phba->pcidev);
2031 
2032 	pci_set_drvdata(pdev, NULL);
2033 	scsi_host_put(shost);
2034 
2035 	/*
2036 	 * Call scsi_free before mem_free since scsi bufs are released to their
2037 	 * corresponding pools here.
2038 	 */
2039 	lpfc_scsi_free(phba);
2040 	lpfc_mem_free(phba);
2041 
2042 	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt,
2043 			  phba->hbqslimp.phys);
2044 
2045 	/* Free resources associated with SLI2 interface */
2046 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
2047 			  phba->slim2p, phba->slim2p_mapping);
2048 
2049 	/* unmap adapter SLIM and Control Registers */
2050 	iounmap(phba->ctrl_regs_memmap_p);
2051 	iounmap(phba->slim_memmap_p);
2052 
2053 	idr_remove(&lpfc_hba_index, phba->brd_no);
2054 
2055 	kfree(phba);
2056 
2057 	pci_release_regions(pdev);
2058 	pci_disable_device(pdev);
2059 }
2060 
2061 /**
2062  * lpfc_io_error_detected - called when PCI error is detected
2063  * @pdev: Pointer to PCI device
2064  * @state: The current pci conneection state
2065  *
2066  * This function is called after a PCI bus error affecting
2067  * this device has been detected.
2068  */
2069 static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
2070 				pci_channel_state_t state)
2071 {
2072 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
2073 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2074 	struct lpfc_sli *psli = &phba->sli;
2075 	struct lpfc_sli_ring  *pring;
2076 
2077 	if (state == pci_channel_io_perm_failure)
2078 		return PCI_ERS_RESULT_DISCONNECT;
2079 
2080 	pci_disable_device(pdev);
2081 	/*
2082 	 * There may be I/Os dropped by the firmware.
2083 	 * Error iocb (I/O) on txcmplq and let the SCSI layer
2084 	 * retry it after re-establishing link.
2085 	 */
2086 	pring = &psli->ring[psli->fcp_ring];
2087 	lpfc_sli_abort_iocb_ring(phba, pring);
2088 
2089 	/* Release the irq reservation */
2090 	free_irq(phba->pcidev->irq, phba);
2091 	if (phba->using_msi)
2092 		pci_disable_msi(phba->pcidev);
2093 
2094 	/* Request a slot reset. */
2095 	return PCI_ERS_RESULT_NEED_RESET;
2096 }
2097 
2098 /**
2099  * lpfc_io_slot_reset - called after the pci bus has been reset.
2100  * @pdev: Pointer to PCI device
2101  *
2102  * Restart the card from scratch, as if from a cold-boot.
2103  */
2104 static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
2105 {
2106 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
2107 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2108 	struct lpfc_sli *psli = &phba->sli;
2109 	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2110 
2111 	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
2112 	if (pci_enable_device_bars(pdev, bars)) {
2113 		printk(KERN_ERR "lpfc: Cannot re-enable "
2114 			"PCI device after reset.\n");
2115 		return PCI_ERS_RESULT_DISCONNECT;
2116 	}
2117 
2118 	pci_set_master(pdev);
2119 
2120 	/* Re-establishing Link */
2121 	spin_lock_irq(shost->host_lock);
2122 	phba->pport->fc_flag |= FC_ESTABLISH_LINK;
2123 	spin_unlock_irq(shost->host_lock);
2124 
2125 	spin_lock_irq(&phba->hbalock);
2126 	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2127 	spin_unlock_irq(&phba->hbalock);
2128 
2129 
2130 	/* Take device offline; this will perform cleanup */
2131 	lpfc_offline(phba);
2132 	lpfc_sli_brdrestart(phba);
2133 
2134 	return PCI_ERS_RESULT_RECOVERED;
2135 }
2136 
2137 /**
2138  * lpfc_io_resume - called when traffic can start flowing again.
2139  * @pdev: Pointer to PCI device
2140  *
2141  * This callback is called when the error recovery driver tells us that
2142  * its OK to resume normal operation.
2143  */
2144 static void lpfc_io_resume(struct pci_dev *pdev)
2145 {
2146 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
2147 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2148 
2149 	if (lpfc_online(phba) == 0) {
2150 		mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
2151 	}
2152 }
2153 
2154 static struct pci_device_id lpfc_id_table[] = {
2155 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
2156 		PCI_ANY_ID, PCI_ANY_ID, },
2157 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
2158 		PCI_ANY_ID, PCI_ANY_ID, },
2159 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
2160 		PCI_ANY_ID, PCI_ANY_ID, },
2161 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
2162 		PCI_ANY_ID, PCI_ANY_ID, },
2163 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
2164 		PCI_ANY_ID, PCI_ANY_ID, },
2165 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
2166 		PCI_ANY_ID, PCI_ANY_ID, },
2167 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
2168 		PCI_ANY_ID, PCI_ANY_ID, },
2169 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
2170 		PCI_ANY_ID, PCI_ANY_ID, },
2171 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
2172 		PCI_ANY_ID, PCI_ANY_ID, },
2173 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
2174 		PCI_ANY_ID, PCI_ANY_ID, },
2175 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
2176 		PCI_ANY_ID, PCI_ANY_ID, },
2177 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
2178 		PCI_ANY_ID, PCI_ANY_ID, },
2179 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
2180 		PCI_ANY_ID, PCI_ANY_ID, },
2181 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
2182 		PCI_ANY_ID, PCI_ANY_ID, },
2183 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
2184 		PCI_ANY_ID, PCI_ANY_ID, },
2185 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
2186 		PCI_ANY_ID, PCI_ANY_ID, },
2187 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
2188 		PCI_ANY_ID, PCI_ANY_ID, },
2189 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
2190 		PCI_ANY_ID, PCI_ANY_ID, },
2191 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
2192 		PCI_ANY_ID, PCI_ANY_ID, },
2193 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
2194 		PCI_ANY_ID, PCI_ANY_ID, },
2195 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
2196 		PCI_ANY_ID, PCI_ANY_ID, },
2197 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
2198 		PCI_ANY_ID, PCI_ANY_ID, },
2199 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
2200 		PCI_ANY_ID, PCI_ANY_ID, },
2201 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
2202 		PCI_ANY_ID, PCI_ANY_ID, },
2203 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
2204 		PCI_ANY_ID, PCI_ANY_ID, },
2205 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
2206 		PCI_ANY_ID, PCI_ANY_ID, },
2207 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
2208 		PCI_ANY_ID, PCI_ANY_ID, },
2209 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
2210 		PCI_ANY_ID, PCI_ANY_ID, },
2211 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
2212 		PCI_ANY_ID, PCI_ANY_ID, },
2213 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
2214 		PCI_ANY_ID, PCI_ANY_ID, },
2215 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
2216 		PCI_ANY_ID, PCI_ANY_ID, },
2217 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
2218 		PCI_ANY_ID, PCI_ANY_ID, },
2219 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
2220 		PCI_ANY_ID, PCI_ANY_ID, },
2221 	{ 0 }
2222 };
2223 
2224 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
2225 
2226 static struct pci_error_handlers lpfc_err_handler = {
2227 	.error_detected = lpfc_io_error_detected,
2228 	.slot_reset = lpfc_io_slot_reset,
2229 	.resume = lpfc_io_resume,
2230 };
2231 
2232 static struct pci_driver lpfc_driver = {
2233 	.name		= LPFC_DRIVER_NAME,
2234 	.id_table	= lpfc_id_table,
2235 	.probe		= lpfc_pci_probe_one,
2236 	.remove		= __devexit_p(lpfc_pci_remove_one),
2237 	.err_handler    = &lpfc_err_handler,
2238 };
2239 
2240 static int __init
2241 lpfc_init(void)
2242 {
2243 	int error = 0;
2244 
2245 	printk(LPFC_MODULE_DESC "\n");
2246 	printk(LPFC_COPYRIGHT "\n");
2247 
2248 	lpfc_transport_template =
2249 				fc_attach_transport(&lpfc_transport_functions);
2250 	lpfc_vport_transport_template =
2251 			fc_attach_transport(&lpfc_vport_transport_functions);
2252 	if (!lpfc_transport_template || !lpfc_vport_transport_template)
2253 		return -ENOMEM;
2254 	error = pci_register_driver(&lpfc_driver);
2255 	if (error) {
2256 		fc_release_transport(lpfc_transport_template);
2257 		fc_release_transport(lpfc_vport_transport_template);
2258 	}
2259 
2260 	return error;
2261 }
2262 
2263 static void __exit
2264 lpfc_exit(void)
2265 {
2266 	pci_unregister_driver(&lpfc_driver);
2267 	fc_release_transport(lpfc_transport_template);
2268 	fc_release_transport(lpfc_vport_transport_template);
2269 }
2270 
2271 module_init(lpfc_init);
2272 module_exit(lpfc_exit);
2273 MODULE_LICENSE("GPL");
2274 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
2275 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
2276 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
2277