xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_hbadisc.c (revision 643d1f7f)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2007 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
26 
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31 
32 #include "lpfc_hw.h"
33 #include "lpfc_disc.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_scsi.h"
36 #include "lpfc.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_vport.h"
40 #include "lpfc_debugfs.h"
41 
42 /* AlpaArray for assignment of scsid for scan-down and bind_method */
43 static uint8_t lpfcAlpaArray[] = {
44 	0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
45 	0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
46 	0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
47 	0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
48 	0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
49 	0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
50 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
51 	0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
52 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
53 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
54 	0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
55 	0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
56 	0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
57 };
58 
59 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
60 static void lpfc_disc_flush_list(struct lpfc_vport *vport);
61 
62 void
63 lpfc_terminate_rport_io(struct fc_rport *rport)
64 {
65 	struct lpfc_rport_data *rdata;
66 	struct lpfc_nodelist * ndlp;
67 	struct lpfc_hba *phba;
68 
69 	rdata = rport->dd_data;
70 	ndlp = rdata->pnode;
71 
72 	if (!ndlp) {
73 		if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
74 			printk(KERN_ERR "Cannot find remote node"
75 			" to terminate I/O Data x%x\n",
76 			rport->port_id);
77 		return;
78 	}
79 
80 	phba  = ndlp->vport->phba;
81 
82 	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
83 		"rport terminate: sid:x%x did:x%x flg:x%x",
84 		ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
85 
86 	if (ndlp->nlp_sid != NLP_NO_SID) {
87 		lpfc_sli_abort_iocb(ndlp->vport,
88 			&phba->sli.ring[phba->sli.fcp_ring],
89 			ndlp->nlp_sid, 0, LPFC_CTX_TGT);
90 	}
91 
92 	/*
93 	 * A device is normally blocked for rediscovery and unblocked when
94 	 * devloss timeout happens.  In case a vport is removed or driver
95 	 * unloaded before devloss timeout happens, we need to unblock here.
96 	 */
97 	scsi_target_unblock(&rport->dev);
98 	return;
99 }
100 
101 /*
102  * This function will be called when dev_loss_tmo fire.
103  */
104 void
105 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
106 {
107 	struct lpfc_rport_data *rdata;
108 	struct lpfc_nodelist * ndlp;
109 	struct lpfc_vport *vport;
110 	struct lpfc_hba   *phba;
111 	struct lpfc_work_evt *evtp;
112 	int  put_node;
113 	int  put_rport;
114 
115 	rdata = rport->dd_data;
116 	ndlp = rdata->pnode;
117 	if (!ndlp)
118 		return;
119 
120 	vport = ndlp->vport;
121 	phba  = vport->phba;
122 
123 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
124 		"rport devlosscb: sid:x%x did:x%x flg:x%x",
125 		ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
126 
127 	/* Don't defer this if we are in the process of deleting the vport
128 	 * or unloading the driver. The unload will cleanup the node
129 	 * appropriately we just need to cleanup the ndlp rport info here.
130 	 */
131 	if (vport->load_flag & FC_UNLOADING) {
132 		put_node = rdata->pnode != NULL;
133 		put_rport = ndlp->rport != NULL;
134 		rdata->pnode = NULL;
135 		ndlp->rport = NULL;
136 		if (put_node)
137 			lpfc_nlp_put(ndlp);
138 		if (put_rport)
139 			put_device(&rport->dev);
140 		return;
141 	}
142 
143 	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
144 		return;
145 
146 	evtp = &ndlp->dev_loss_evt;
147 
148 	if (!list_empty(&evtp->evt_listp))
149 		return;
150 
151 	spin_lock_irq(&phba->hbalock);
152 	/* We need to hold the node by incrementing the reference
153 	 * count until this queued work is done
154 	 */
155 	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
156 	evtp->evt       = LPFC_EVT_DEV_LOSS;
157 	list_add_tail(&evtp->evt_listp, &phba->work_list);
158 	if (phba->work_wait)
159 		wake_up(phba->work_wait);
160 
161 	spin_unlock_irq(&phba->hbalock);
162 
163 	return;
164 }
165 
166 /*
167  * This function is called from the worker thread when dev_loss_tmo
168  * expire.
169  */
170 static void
171 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
172 {
173 	struct lpfc_rport_data *rdata;
174 	struct fc_rport   *rport;
175 	struct lpfc_vport *vport;
176 	struct lpfc_hba   *phba;
177 	uint8_t *name;
178 	int  put_node;
179 	int  put_rport;
180 	int warn_on = 0;
181 
182 	rport = ndlp->rport;
183 
184 	if (!rport)
185 		return;
186 
187 	rdata = rport->dd_data;
188 	name = (uint8_t *) &ndlp->nlp_portname;
189 	vport = ndlp->vport;
190 	phba  = vport->phba;
191 
192 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
193 		"rport devlosstmo:did:x%x type:x%x id:x%x",
194 		ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
195 
196 	/* Don't defer this if we are in the process of deleting the vport
197 	 * or unloading the driver. The unload will cleanup the node
198 	 * appropriately we just need to cleanup the ndlp rport info here.
199 	 */
200 	if (vport->load_flag & FC_UNLOADING) {
201 		if (ndlp->nlp_sid != NLP_NO_SID) {
202 			/* flush the target */
203 			lpfc_sli_abort_iocb(vport,
204 					&phba->sli.ring[phba->sli.fcp_ring],
205 					ndlp->nlp_sid, 0, LPFC_CTX_TGT);
206 		}
207 		put_node = rdata->pnode != NULL;
208 		put_rport = ndlp->rport != NULL;
209 		rdata->pnode = NULL;
210 		ndlp->rport = NULL;
211 		if (put_node)
212 			lpfc_nlp_put(ndlp);
213 		if (put_rport)
214 			put_device(&rport->dev);
215 		return;
216 	}
217 
218 	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
219 		return;
220 
221 	if (ndlp->nlp_type & NLP_FABRIC) {
222 		/* We will clean up these Nodes in linkup */
223 		put_node = rdata->pnode != NULL;
224 		put_rport = ndlp->rport != NULL;
225 		rdata->pnode = NULL;
226 		ndlp->rport = NULL;
227 		if (put_node)
228 			lpfc_nlp_put(ndlp);
229 		if (put_rport)
230 			put_device(&rport->dev);
231 		return;
232 	}
233 
234 	if (ndlp->nlp_sid != NLP_NO_SID) {
235 		warn_on = 1;
236 		/* flush the target */
237 		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
238 				    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
239 	}
240 	if (vport->load_flag & FC_UNLOADING)
241 		warn_on = 0;
242 
243 	if (warn_on) {
244 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
245 				 "0203 Devloss timeout on "
246 				 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
247 				 "NPort x%x Data: x%x x%x x%x\n",
248 				 *name, *(name+1), *(name+2), *(name+3),
249 				 *(name+4), *(name+5), *(name+6), *(name+7),
250 				 ndlp->nlp_DID, ndlp->nlp_flag,
251 				 ndlp->nlp_state, ndlp->nlp_rpi);
252 	} else {
253 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
254 				 "0204 Devloss timeout on "
255 				 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
256 				 "NPort x%x Data: x%x x%x x%x\n",
257 				 *name, *(name+1), *(name+2), *(name+3),
258 				 *(name+4), *(name+5), *(name+6), *(name+7),
259 				 ndlp->nlp_DID, ndlp->nlp_flag,
260 				 ndlp->nlp_state, ndlp->nlp_rpi);
261 	}
262 
263 	put_node = rdata->pnode != NULL;
264 	put_rport = ndlp->rport != NULL;
265 	rdata->pnode = NULL;
266 	ndlp->rport = NULL;
267 	if (put_node)
268 		lpfc_nlp_put(ndlp);
269 	if (put_rport)
270 		put_device(&rport->dev);
271 
272 	if (!(vport->load_flag & FC_UNLOADING) &&
273 	    !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
274 	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
275 	    (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) {
276 		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
277 	}
278 }
279 
280 
281 void
282 lpfc_worker_wake_up(struct lpfc_hba *phba)
283 {
284 	wake_up(phba->work_wait);
285 	return;
286 }
287 
288 static void
289 lpfc_work_list_done(struct lpfc_hba *phba)
290 {
291 	struct lpfc_work_evt  *evtp = NULL;
292 	struct lpfc_nodelist  *ndlp;
293 	int free_evt;
294 
295 	spin_lock_irq(&phba->hbalock);
296 	while (!list_empty(&phba->work_list)) {
297 		list_remove_head((&phba->work_list), evtp, typeof(*evtp),
298 				 evt_listp);
299 		spin_unlock_irq(&phba->hbalock);
300 		free_evt = 1;
301 		switch (evtp->evt) {
302 		case LPFC_EVT_ELS_RETRY:
303 			ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
304 			lpfc_els_retry_delay_handler(ndlp);
305 			free_evt = 0; /* evt is part of ndlp */
306 			/* decrement the node reference count held
307 			 * for this queued work
308 			 */
309 			lpfc_nlp_put(ndlp);
310 			break;
311 		case LPFC_EVT_DEV_LOSS:
312 			ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
313 			lpfc_dev_loss_tmo_handler(ndlp);
314 			free_evt = 0;
315 			/* decrement the node reference count held for
316 			 * this queued work
317 			 */
318 			lpfc_nlp_put(ndlp);
319 			break;
320 		case LPFC_EVT_ONLINE:
321 			if (phba->link_state < LPFC_LINK_DOWN)
322 				*(int *) (evtp->evt_arg1) = lpfc_online(phba);
323 			else
324 				*(int *) (evtp->evt_arg1) = 0;
325 			complete((struct completion *)(evtp->evt_arg2));
326 			break;
327 		case LPFC_EVT_OFFLINE_PREP:
328 			if (phba->link_state >= LPFC_LINK_DOWN)
329 				lpfc_offline_prep(phba);
330 			*(int *)(evtp->evt_arg1) = 0;
331 			complete((struct completion *)(evtp->evt_arg2));
332 			break;
333 		case LPFC_EVT_OFFLINE:
334 			lpfc_offline(phba);
335 			lpfc_sli_brdrestart(phba);
336 			*(int *)(evtp->evt_arg1) =
337 				lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
338 			lpfc_unblock_mgmt_io(phba);
339 			complete((struct completion *)(evtp->evt_arg2));
340 			break;
341 		case LPFC_EVT_WARM_START:
342 			lpfc_offline(phba);
343 			lpfc_reset_barrier(phba);
344 			lpfc_sli_brdreset(phba);
345 			lpfc_hba_down_post(phba);
346 			*(int *)(evtp->evt_arg1) =
347 				lpfc_sli_brdready(phba, HS_MBRDY);
348 			lpfc_unblock_mgmt_io(phba);
349 			complete((struct completion *)(evtp->evt_arg2));
350 			break;
351 		case LPFC_EVT_KILL:
352 			lpfc_offline(phba);
353 			*(int *)(evtp->evt_arg1)
354 				= (phba->pport->stopped)
355 				        ? 0 : lpfc_sli_brdkill(phba);
356 			lpfc_unblock_mgmt_io(phba);
357 			complete((struct completion *)(evtp->evt_arg2));
358 			break;
359 		}
360 		if (free_evt)
361 			kfree(evtp);
362 		spin_lock_irq(&phba->hbalock);
363 	}
364 	spin_unlock_irq(&phba->hbalock);
365 
366 }
367 
368 static void
369 lpfc_work_done(struct lpfc_hba *phba)
370 {
371 	struct lpfc_sli_ring *pring;
372 	uint32_t ha_copy, status, control, work_port_events;
373 	struct lpfc_vport **vports;
374 	struct lpfc_vport *vport;
375 	int i;
376 
377 	spin_lock_irq(&phba->hbalock);
378 	ha_copy = phba->work_ha;
379 	phba->work_ha = 0;
380 	spin_unlock_irq(&phba->hbalock);
381 
382 	if (ha_copy & HA_ERATT)
383 		lpfc_handle_eratt(phba);
384 
385 	if (ha_copy & HA_MBATT)
386 		lpfc_sli_handle_mb_event(phba);
387 
388 	if (ha_copy & HA_LATT)
389 		lpfc_handle_latt(phba);
390 	vports = lpfc_create_vport_work_array(phba);
391 	if (vports != NULL)
392 		for(i = 0; i <= phba->max_vpi; i++) {
393 			/*
394 			 * We could have no vports in array if unloading, so if
395 			 * this happens then just use the pport
396 			 */
397 			if (vports[i] == NULL && i == 0)
398 				vport = phba->pport;
399 			else
400 				vport = vports[i];
401 			if (vport == NULL)
402 				break;
403 			work_port_events = vport->work_port_events;
404 			if (work_port_events & WORKER_DISC_TMO)
405 				lpfc_disc_timeout_handler(vport);
406 			if (work_port_events & WORKER_ELS_TMO)
407 				lpfc_els_timeout_handler(vport);
408 			if (work_port_events & WORKER_HB_TMO)
409 				lpfc_hb_timeout_handler(phba);
410 			if (work_port_events & WORKER_MBOX_TMO)
411 				lpfc_mbox_timeout_handler(phba);
412 			if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
413 				lpfc_unblock_fabric_iocbs(phba);
414 			if (work_port_events & WORKER_FDMI_TMO)
415 				lpfc_fdmi_timeout_handler(vport);
416 			if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
417 				lpfc_ramp_down_queue_handler(phba);
418 			if (work_port_events & WORKER_RAMP_UP_QUEUE)
419 				lpfc_ramp_up_queue_handler(phba);
420 			spin_lock_irq(&vport->work_port_lock);
421 			vport->work_port_events &= ~work_port_events;
422 			spin_unlock_irq(&vport->work_port_lock);
423 		}
424 	lpfc_destroy_vport_work_array(phba, vports);
425 
426 	pring = &phba->sli.ring[LPFC_ELS_RING];
427 	status = (ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
428 	status >>= (4*LPFC_ELS_RING);
429 	if ((status & HA_RXMASK)
430 		|| (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
431 		if (pring->flag & LPFC_STOP_IOCB_EVENT) {
432 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
433 		} else {
434 			lpfc_sli_handle_slow_ring_event(phba, pring,
435 							(status &
436 							 HA_RXMASK));
437 			pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
438 		}
439 		/*
440 		 * Turn on Ring interrupts
441 		 */
442 		spin_lock_irq(&phba->hbalock);
443 		control = readl(phba->HCregaddr);
444 		if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
445 			lpfc_debugfs_slow_ring_trc(phba,
446 				"WRK Enable ring: cntl:x%x hacopy:x%x",
447 				control, ha_copy, 0);
448 
449 			control |= (HC_R0INT_ENA << LPFC_ELS_RING);
450 			writel(control, phba->HCregaddr);
451 			readl(phba->HCregaddr); /* flush */
452 		}
453 		else {
454 			lpfc_debugfs_slow_ring_trc(phba,
455 				"WRK Ring ok:     cntl:x%x hacopy:x%x",
456 				control, ha_copy, 0);
457 		}
458 		spin_unlock_irq(&phba->hbalock);
459 	}
460 	lpfc_work_list_done(phba);
461 }
462 
463 static int
464 check_work_wait_done(struct lpfc_hba *phba)
465 {
466 	struct lpfc_vport *vport;
467 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
468 	int rc = 0;
469 
470 	spin_lock_irq(&phba->hbalock);
471 	list_for_each_entry(vport, &phba->port_list, listentry) {
472 		if (vport->work_port_events) {
473 			rc = 1;
474 			break;
475 		}
476 	}
477 	if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
478 	    kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
479 		rc = 1;
480 		phba->work_found++;
481 	} else
482 		phba->work_found = 0;
483 	spin_unlock_irq(&phba->hbalock);
484 	return rc;
485 }
486 
487 
488 int
489 lpfc_do_work(void *p)
490 {
491 	struct lpfc_hba *phba = p;
492 	int rc;
493 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
494 
495 	set_user_nice(current, -20);
496 	phba->work_wait = &work_waitq;
497 	phba->work_found = 0;
498 
499 	while (1) {
500 
501 		rc = wait_event_interruptible(work_waitq,
502 					      check_work_wait_done(phba));
503 
504 		BUG_ON(rc);
505 
506 		if (kthread_should_stop())
507 			break;
508 
509 		lpfc_work_done(phba);
510 
511 		/* If there is alot of slow ring work, like during link up
512 		 * check_work_wait_done() may cause this thread to not give
513 		 * up the CPU for very long periods of time. This may cause
514 		 * soft lockups or other problems. To avoid these situations
515 		 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
516 		 * consecutive iterations.
517 		 */
518 		if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
519 			phba->work_found = 0;
520 			schedule();
521 		}
522 	}
523 	phba->work_wait = NULL;
524 	return 0;
525 }
526 
527 /*
528  * This is only called to handle FC worker events. Since this a rare
529  * occurance, we allocate a struct lpfc_work_evt structure here instead of
530  * embedding it in the IOCB.
531  */
532 int
533 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
534 		      uint32_t evt)
535 {
536 	struct lpfc_work_evt  *evtp;
537 	unsigned long flags;
538 
539 	/*
540 	 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
541 	 * be queued to worker thread for processing
542 	 */
543 	evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
544 	if (!evtp)
545 		return 0;
546 
547 	evtp->evt_arg1  = arg1;
548 	evtp->evt_arg2  = arg2;
549 	evtp->evt       = evt;
550 
551 	spin_lock_irqsave(&phba->hbalock, flags);
552 	list_add_tail(&evtp->evt_listp, &phba->work_list);
553 	if (phba->work_wait)
554 		lpfc_worker_wake_up(phba);
555 	spin_unlock_irqrestore(&phba->hbalock, flags);
556 
557 	return 1;
558 }
559 
560 void
561 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
562 {
563 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
564 	struct lpfc_hba  *phba = vport->phba;
565 	struct lpfc_nodelist *ndlp, *next_ndlp;
566 	int  rc;
567 
568 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
569 		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
570 			continue;
571 
572 		if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
573 			((vport->port_type == LPFC_NPIV_PORT) &&
574 			(ndlp->nlp_DID == NameServer_DID)))
575 			lpfc_unreg_rpi(vport, ndlp);
576 
577 		/* Leave Fabric nodes alone on link down */
578 		if (!remove && ndlp->nlp_type & NLP_FABRIC)
579 			continue;
580 		rc = lpfc_disc_state_machine(vport, ndlp, NULL,
581 					     remove
582 					     ? NLP_EVT_DEVICE_RM
583 					     : NLP_EVT_DEVICE_RECOVERY);
584 	}
585 	if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
586 		lpfc_mbx_unreg_vpi(vport);
587 		spin_lock_irq(shost->host_lock);
588 		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
589 		spin_unlock_irq(shost->host_lock);
590 	}
591 }
592 
593 void
594 lpfc_port_link_failure(struct lpfc_vport *vport)
595 {
596 	/* Cleanup any outstanding RSCN activity */
597 	lpfc_els_flush_rscn(vport);
598 
599 	/* Cleanup any outstanding ELS commands */
600 	lpfc_els_flush_cmd(vport);
601 
602 	lpfc_cleanup_rpis(vport, 0);
603 
604 	/* Turn off discovery timer if its running */
605 	lpfc_can_disctmo(vport);
606 }
607 
608 static void
609 lpfc_linkdown_port(struct lpfc_vport *vport)
610 {
611 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
612 
613 	fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
614 
615 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
616 		"Link Down:       state:x%x rtry:x%x flg:x%x",
617 		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
618 
619 	lpfc_port_link_failure(vport);
620 
621 }
622 
623 int
624 lpfc_linkdown(struct lpfc_hba *phba)
625 {
626 	struct lpfc_vport *vport = phba->pport;
627 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
628 	struct lpfc_vport **vports;
629 	LPFC_MBOXQ_t          *mb;
630 	int i;
631 
632 	if (phba->link_state == LPFC_LINK_DOWN) {
633 		return 0;
634 	}
635 	spin_lock_irq(&phba->hbalock);
636 	if (phba->link_state > LPFC_LINK_DOWN) {
637 		phba->link_state = LPFC_LINK_DOWN;
638 		phba->pport->fc_flag &= ~FC_LBIT;
639 	}
640 	spin_unlock_irq(&phba->hbalock);
641 	vports = lpfc_create_vport_work_array(phba);
642 	if (vports != NULL)
643 		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
644 			/* Issue a LINK DOWN event to all nodes */
645 			lpfc_linkdown_port(vports[i]);
646 		}
647 	lpfc_destroy_vport_work_array(phba, vports);
648 	/* Clean up any firmware default rpi's */
649 	mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
650 	if (mb) {
651 		lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
652 		mb->vport = vport;
653 		mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
654 		if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
655 		    == MBX_NOT_FINISHED) {
656 			mempool_free(mb, phba->mbox_mem_pool);
657 		}
658 	}
659 
660 	/* Setup myDID for link up if we are in pt2pt mode */
661 	if (phba->pport->fc_flag & FC_PT2PT) {
662 		phba->pport->fc_myDID = 0;
663 		mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
664 		if (mb) {
665 			lpfc_config_link(phba, mb);
666 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
667 			mb->vport = vport;
668 			if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
669 			    == MBX_NOT_FINISHED) {
670 				mempool_free(mb, phba->mbox_mem_pool);
671 			}
672 		}
673 		spin_lock_irq(shost->host_lock);
674 		phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
675 		spin_unlock_irq(shost->host_lock);
676 	}
677 
678 	return 0;
679 }
680 
681 static void
682 lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
683 {
684 	struct lpfc_nodelist *ndlp;
685 
686 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
687 		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
688 			continue;
689 
690 		if (ndlp->nlp_type & NLP_FABRIC) {
691 				/* On Linkup its safe to clean up the ndlp
692 				 * from Fabric connections.
693 				 */
694 			if (ndlp->nlp_DID != Fabric_DID)
695 				lpfc_unreg_rpi(vport, ndlp);
696 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
697 		} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
698 				/* Fail outstanding IO now since device is
699 				 * marked for PLOGI.
700 				 */
701 			lpfc_unreg_rpi(vport, ndlp);
702 		}
703 	}
704 }
705 
706 static void
707 lpfc_linkup_port(struct lpfc_vport *vport)
708 {
709 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
710 	struct lpfc_hba  *phba = vport->phba;
711 
712 	if ((vport->load_flag & FC_UNLOADING) != 0)
713 		return;
714 
715 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
716 		"Link Up:         top:x%x speed:x%x flg:x%x",
717 		phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
718 
719 	/* If NPIV is not enabled, only bring the physical port up */
720 	if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
721 		(vport != phba->pport))
722 		return;
723 
724 	fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
725 
726 	spin_lock_irq(shost->host_lock);
727 	vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
728 			    FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
729 	vport->fc_flag |= FC_NDISC_ACTIVE;
730 	vport->fc_ns_retry = 0;
731 	spin_unlock_irq(shost->host_lock);
732 
733 	if (vport->fc_flag & FC_LBIT)
734 		lpfc_linkup_cleanup_nodes(vport);
735 
736 }
737 
738 static int
739 lpfc_linkup(struct lpfc_hba *phba)
740 {
741 	struct lpfc_vport **vports;
742 	int i;
743 
744 	phba->link_state = LPFC_LINK_UP;
745 
746 	/* Unblock fabric iocbs if they are blocked */
747 	clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
748 	del_timer_sync(&phba->fabric_block_timer);
749 
750 	vports = lpfc_create_vport_work_array(phba);
751 	if (vports != NULL)
752 		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
753 			lpfc_linkup_port(vports[i]);
754 	lpfc_destroy_vport_work_array(phba, vports);
755 	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
756 		lpfc_issue_clear_la(phba, phba->pport);
757 
758 	return 0;
759 }
760 
761 /*
762  * This routine handles processing a CLEAR_LA mailbox
763  * command upon completion. It is setup in the LPFC_MBOXQ
764  * as the completion routine when the command is
765  * handed off to the SLI layer.
766  */
767 static void
768 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
769 {
770 	struct lpfc_vport *vport = pmb->vport;
771 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
772 	struct lpfc_sli   *psli = &phba->sli;
773 	MAILBOX_t *mb = &pmb->mb;
774 	uint32_t control;
775 
776 	/* Since we don't do discovery right now, turn these off here */
777 	psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
778 	psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
779 	psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
780 
781 	/* Check for error */
782 	if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
783 		/* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
784 		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
785 				 "0320 CLEAR_LA mbxStatus error x%x hba "
786 				 "state x%x\n",
787 				 mb->mbxStatus, vport->port_state);
788 		phba->link_state = LPFC_HBA_ERROR;
789 		goto out;
790 	}
791 
792 	if (vport->port_type == LPFC_PHYSICAL_PORT)
793 		phba->link_state = LPFC_HBA_READY;
794 
795 	spin_lock_irq(&phba->hbalock);
796 	psli->sli_flag |= LPFC_PROCESS_LA;
797 	control = readl(phba->HCregaddr);
798 	control |= HC_LAINT_ENA;
799 	writel(control, phba->HCregaddr);
800 	readl(phba->HCregaddr); /* flush */
801 	spin_unlock_irq(&phba->hbalock);
802 	return;
803 
804 	vport->num_disc_nodes = 0;
805 	/* go thru NPR nodes and issue ELS PLOGIs */
806 	if (vport->fc_npr_cnt)
807 		lpfc_els_disc_plogi(vport);
808 
809 	if (!vport->num_disc_nodes) {
810 		spin_lock_irq(shost->host_lock);
811 		vport->fc_flag &= ~FC_NDISC_ACTIVE;
812 		spin_unlock_irq(shost->host_lock);
813 	}
814 
815 	vport->port_state = LPFC_VPORT_READY;
816 
817 out:
818 	/* Device Discovery completes */
819 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
820 			 "0225 Device Discovery completes\n");
821 	mempool_free(pmb, phba->mbox_mem_pool);
822 
823 	spin_lock_irq(shost->host_lock);
824 	vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
825 	spin_unlock_irq(shost->host_lock);
826 
827 	del_timer_sync(&phba->fc_estabtmo);
828 
829 	lpfc_can_disctmo(vport);
830 
831 	/* turn on Link Attention interrupts */
832 
833 	spin_lock_irq(&phba->hbalock);
834 	psli->sli_flag |= LPFC_PROCESS_LA;
835 	control = readl(phba->HCregaddr);
836 	control |= HC_LAINT_ENA;
837 	writel(control, phba->HCregaddr);
838 	readl(phba->HCregaddr); /* flush */
839 	spin_unlock_irq(&phba->hbalock);
840 
841 	return;
842 }
843 
844 
845 static void
846 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
847 {
848 	struct lpfc_vport *vport = pmb->vport;
849 
850 	if (pmb->mb.mbxStatus)
851 		goto out;
852 
853 	mempool_free(pmb, phba->mbox_mem_pool);
854 
855 	if (phba->fc_topology == TOPOLOGY_LOOP &&
856 	    vport->fc_flag & FC_PUBLIC_LOOP &&
857 	    !(vport->fc_flag & FC_LBIT)) {
858 			/* Need to wait for FAN - use discovery timer
859 			 * for timeout.  port_state is identically
860 			 * LPFC_LOCAL_CFG_LINK while waiting for FAN
861 			 */
862 			lpfc_set_disctmo(vport);
863 			return;
864 	}
865 
866 	/* Start discovery by sending a FLOGI. port_state is identically
867 	 * LPFC_FLOGI while waiting for FLOGI cmpl
868 	 */
869 	if (vport->port_state != LPFC_FLOGI) {
870 		lpfc_initial_flogi(vport);
871 	}
872 	return;
873 
874 out:
875 	lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
876 			 "0306 CONFIG_LINK mbxStatus error x%x "
877 			 "HBA state x%x\n",
878 			 pmb->mb.mbxStatus, vport->port_state);
879 	mempool_free(pmb, phba->mbox_mem_pool);
880 
881 	lpfc_linkdown(phba);
882 
883 	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
884 			 "0200 CONFIG_LINK bad hba state x%x\n",
885 			 vport->port_state);
886 
887 	lpfc_issue_clear_la(phba, vport);
888 	return;
889 }
890 
891 static void
892 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
893 {
894 	MAILBOX_t *mb = &pmb->mb;
895 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
896 	struct lpfc_vport  *vport = pmb->vport;
897 
898 
899 	/* Check for error */
900 	if (mb->mbxStatus) {
901 		/* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
902 		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
903 				 "0319 READ_SPARAM mbxStatus error x%x "
904 				 "hba state x%x>\n",
905 				 mb->mbxStatus, vport->port_state);
906 		lpfc_linkdown(phba);
907 		goto out;
908 	}
909 
910 	memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
911 	       sizeof (struct serv_parm));
912 	if (phba->cfg_soft_wwnn)
913 		u64_to_wwn(phba->cfg_soft_wwnn,
914 			   vport->fc_sparam.nodeName.u.wwn);
915 	if (phba->cfg_soft_wwpn)
916 		u64_to_wwn(phba->cfg_soft_wwpn,
917 			   vport->fc_sparam.portName.u.wwn);
918 	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
919 	       sizeof(vport->fc_nodename));
920 	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
921 	       sizeof(vport->fc_portname));
922 	if (vport->port_type == LPFC_PHYSICAL_PORT) {
923 		memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
924 		memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
925 	}
926 
927 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
928 	kfree(mp);
929 	mempool_free(pmb, phba->mbox_mem_pool);
930 	return;
931 
932 out:
933 	pmb->context1 = NULL;
934 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
935 	kfree(mp);
936 	lpfc_issue_clear_la(phba, vport);
937 	mempool_free(pmb, phba->mbox_mem_pool);
938 	return;
939 }
940 
941 static void
942 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
943 {
944 	struct lpfc_vport *vport = phba->pport;
945 	LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
946 	int i;
947 	struct lpfc_dmabuf *mp;
948 	int rc;
949 
950 	sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
951 	cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
952 
953 	spin_lock_irq(&phba->hbalock);
954 	switch (la->UlnkSpeed) {
955 	case LA_1GHZ_LINK:
956 		phba->fc_linkspeed = LA_1GHZ_LINK;
957 		break;
958 	case LA_2GHZ_LINK:
959 		phba->fc_linkspeed = LA_2GHZ_LINK;
960 		break;
961 	case LA_4GHZ_LINK:
962 		phba->fc_linkspeed = LA_4GHZ_LINK;
963 		break;
964 	case LA_8GHZ_LINK:
965 		phba->fc_linkspeed = LA_8GHZ_LINK;
966 		break;
967 	default:
968 		phba->fc_linkspeed = LA_UNKNW_LINK;
969 		break;
970 	}
971 
972 	phba->fc_topology = la->topology;
973 	phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
974 
975 	if (phba->fc_topology == TOPOLOGY_LOOP) {
976 		phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
977 
978 				/* Get Loop Map information */
979 		if (la->il)
980 			vport->fc_flag |= FC_LBIT;
981 
982 		vport->fc_myDID = la->granted_AL_PA;
983 		i = la->un.lilpBde64.tus.f.bdeSize;
984 
985 		if (i == 0) {
986 			phba->alpa_map[0] = 0;
987 		} else {
988 			if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
989 				int numalpa, j, k;
990 				union {
991 					uint8_t pamap[16];
992 					struct {
993 						uint32_t wd1;
994 						uint32_t wd2;
995 						uint32_t wd3;
996 						uint32_t wd4;
997 					} pa;
998 				} un;
999 				numalpa = phba->alpa_map[0];
1000 				j = 0;
1001 				while (j < numalpa) {
1002 					memset(un.pamap, 0, 16);
1003 					for (k = 1; j < numalpa; k++) {
1004 						un.pamap[k - 1] =
1005 							phba->alpa_map[j + 1];
1006 						j++;
1007 						if (k == 16)
1008 							break;
1009 					}
1010 					/* Link Up Event ALPA map */
1011 					lpfc_printf_log(phba,
1012 							KERN_WARNING,
1013 							LOG_LINK_EVENT,
1014 							"1304 Link Up Event "
1015 							"ALPA map Data: x%x "
1016 							"x%x x%x x%x\n",
1017 							un.pa.wd1, un.pa.wd2,
1018 							un.pa.wd3, un.pa.wd4);
1019 				}
1020 			}
1021 		}
1022 	} else {
1023 		if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
1024 			if (phba->max_vpi && phba->cfg_enable_npiv &&
1025 			   (phba->sli_rev == 3))
1026 				phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
1027 		}
1028 		vport->fc_myDID = phba->fc_pref_DID;
1029 		vport->fc_flag |= FC_LBIT;
1030 	}
1031 	spin_unlock_irq(&phba->hbalock);
1032 
1033 	lpfc_linkup(phba);
1034 	if (sparam_mbox) {
1035 		lpfc_read_sparam(phba, sparam_mbox, 0);
1036 		sparam_mbox->vport = vport;
1037 		sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1038 		rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
1039 		if (rc == MBX_NOT_FINISHED) {
1040 			mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
1041 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
1042 			kfree(mp);
1043 			mempool_free(sparam_mbox, phba->mbox_mem_pool);
1044 			if (cfglink_mbox)
1045 				mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1046 			goto out;
1047 		}
1048 	}
1049 
1050 	if (cfglink_mbox) {
1051 		vport->port_state = LPFC_LOCAL_CFG_LINK;
1052 		lpfc_config_link(phba, cfglink_mbox);
1053 		cfglink_mbox->vport = vport;
1054 		cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1055 		rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1056 		if (rc != MBX_NOT_FINISHED)
1057 			return;
1058 		mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1059 	}
1060 out:
1061 	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1062 	lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1063 			 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
1064 			 vport->port_state, sparam_mbox, cfglink_mbox);
1065 	lpfc_issue_clear_la(phba, vport);
1066 	return;
1067 }
1068 
1069 static void
1070 lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1071 {
1072 	uint32_t control;
1073 	struct lpfc_sli *psli = &phba->sli;
1074 
1075 	lpfc_linkdown(phba);
1076 
1077 	/* turn on Link Attention interrupts - no CLEAR_LA needed */
1078 	spin_lock_irq(&phba->hbalock);
1079 	psli->sli_flag |= LPFC_PROCESS_LA;
1080 	control = readl(phba->HCregaddr);
1081 	control |= HC_LAINT_ENA;
1082 	writel(control, phba->HCregaddr);
1083 	readl(phba->HCregaddr); /* flush */
1084 	spin_unlock_irq(&phba->hbalock);
1085 }
1086 
1087 /*
1088  * This routine handles processing a READ_LA mailbox
1089  * command upon completion. It is setup in the LPFC_MBOXQ
1090  * as the completion routine when the command is
1091  * handed off to the SLI layer.
1092  */
1093 void
1094 lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1095 {
1096 	struct lpfc_vport *vport = pmb->vport;
1097 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1098 	READ_LA_VAR *la;
1099 	MAILBOX_t *mb = &pmb->mb;
1100 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1101 
1102 	/* Check for error */
1103 	if (mb->mbxStatus) {
1104 		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1105 				"1307 READ_LA mbox error x%x state x%x\n",
1106 				mb->mbxStatus, vport->port_state);
1107 		lpfc_mbx_issue_link_down(phba);
1108 		phba->link_state = LPFC_HBA_ERROR;
1109 		goto lpfc_mbx_cmpl_read_la_free_mbuf;
1110 	}
1111 
1112 	la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
1113 
1114 	memcpy(&phba->alpa_map[0], mp->virt, 128);
1115 
1116 	spin_lock_irq(shost->host_lock);
1117 	if (la->pb)
1118 		vport->fc_flag |= FC_BYPASSED_MODE;
1119 	else
1120 		vport->fc_flag &= ~FC_BYPASSED_MODE;
1121 	spin_unlock_irq(shost->host_lock);
1122 
1123 	if (((phba->fc_eventTag + 1) < la->eventTag) ||
1124 	    (phba->fc_eventTag == la->eventTag)) {
1125 		phba->fc_stat.LinkMultiEvent++;
1126 		if (la->attType == AT_LINK_UP)
1127 			if (phba->fc_eventTag != 0)
1128 				lpfc_linkdown(phba);
1129 	}
1130 
1131 	phba->fc_eventTag = la->eventTag;
1132 
1133 	if (la->attType == AT_LINK_UP) {
1134 		phba->fc_stat.LinkUp++;
1135 		if (phba->link_flag & LS_LOOPBACK_MODE) {
1136 			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1137 					"1306 Link Up Event in loop back mode "
1138 					"x%x received Data: x%x x%x x%x x%x\n",
1139 					la->eventTag, phba->fc_eventTag,
1140 					la->granted_AL_PA, la->UlnkSpeed,
1141 					phba->alpa_map[0]);
1142 		} else {
1143 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1144 					"1303 Link Up Event x%x received "
1145 					"Data: x%x x%x x%x x%x\n",
1146 					la->eventTag, phba->fc_eventTag,
1147 					la->granted_AL_PA, la->UlnkSpeed,
1148 					phba->alpa_map[0]);
1149 		}
1150 		lpfc_mbx_process_link_up(phba, la);
1151 	} else {
1152 		phba->fc_stat.LinkDown++;
1153 		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1154 				"1305 Link Down Event x%x received "
1155 				"Data: x%x x%x x%x\n",
1156 				la->eventTag, phba->fc_eventTag,
1157 				phba->pport->port_state, vport->fc_flag);
1158 		lpfc_mbx_issue_link_down(phba);
1159 	}
1160 
1161 lpfc_mbx_cmpl_read_la_free_mbuf:
1162 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1163 	kfree(mp);
1164 	mempool_free(pmb, phba->mbox_mem_pool);
1165 	return;
1166 }
1167 
1168 /*
1169  * This routine handles processing a REG_LOGIN mailbox
1170  * command upon completion. It is setup in the LPFC_MBOXQ
1171  * as the completion routine when the command is
1172  * handed off to the SLI layer.
1173  */
1174 void
1175 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1176 {
1177 	struct lpfc_vport  *vport = pmb->vport;
1178 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1179 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1180 
1181 	pmb->context1 = NULL;
1182 
1183 	/* Good status, call state machine */
1184 	lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
1185 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1186 	kfree(mp);
1187 	mempool_free(pmb, phba->mbox_mem_pool);
1188 	/* decrement the node reference count held for this callback
1189 	 * function.
1190 	 */
1191 	lpfc_nlp_put(ndlp);
1192 
1193 	return;
1194 }
1195 
1196 static void
1197 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1198 {
1199 	MAILBOX_t *mb = &pmb->mb;
1200 	struct lpfc_vport *vport = pmb->vport;
1201 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1202 
1203 	switch (mb->mbxStatus) {
1204 	case 0x0011:
1205 	case 0x0020:
1206 	case 0x9700:
1207 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1208 				 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
1209 				 mb->mbxStatus);
1210 		break;
1211 	}
1212 	vport->unreg_vpi_cmpl = VPORT_OK;
1213 	mempool_free(pmb, phba->mbox_mem_pool);
1214 	/*
1215 	 * This shost reference might have been taken at the beginning of
1216 	 * lpfc_vport_delete()
1217 	 */
1218 	if (vport->load_flag & FC_UNLOADING)
1219 		scsi_host_put(shost);
1220 }
1221 
1222 void
1223 lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1224 {
1225 	struct lpfc_hba  *phba = vport->phba;
1226 	LPFC_MBOXQ_t *mbox;
1227 	int rc;
1228 
1229 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1230 	if (!mbox)
1231 		return;
1232 
1233 	lpfc_unreg_vpi(phba, vport->vpi, mbox);
1234 	mbox->vport = vport;
1235 	mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
1236 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1237 	if (rc == MBX_NOT_FINISHED) {
1238 		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1239 				 "1800 Could not issue unreg_vpi\n");
1240 		mempool_free(mbox, phba->mbox_mem_pool);
1241 		vport->unreg_vpi_cmpl = VPORT_ERROR;
1242 	}
1243 }
1244 
1245 static void
1246 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1247 {
1248 	struct lpfc_vport *vport = pmb->vport;
1249 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1250 	MAILBOX_t *mb = &pmb->mb;
1251 
1252 	switch (mb->mbxStatus) {
1253 	case 0x0011:
1254 	case 0x9601:
1255 	case 0x9602:
1256 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1257 				 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
1258 				 mb->mbxStatus);
1259 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1260 		spin_lock_irq(shost->host_lock);
1261 		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1262 		spin_unlock_irq(shost->host_lock);
1263 		vport->fc_myDID = 0;
1264 		goto out;
1265 	}
1266 
1267 	vport->num_disc_nodes = 0;
1268 	/* go thru NPR list and issue ELS PLOGIs */
1269 	if (vport->fc_npr_cnt)
1270 		lpfc_els_disc_plogi(vport);
1271 
1272 	if (!vport->num_disc_nodes) {
1273 		spin_lock_irq(shost->host_lock);
1274 		vport->fc_flag &= ~FC_NDISC_ACTIVE;
1275 		spin_unlock_irq(shost->host_lock);
1276 		lpfc_can_disctmo(vport);
1277 	}
1278 	vport->port_state = LPFC_VPORT_READY;
1279 
1280 out:
1281 	mempool_free(pmb, phba->mbox_mem_pool);
1282 	return;
1283 }
1284 
1285 /*
1286  * This routine handles processing a Fabric REG_LOGIN mailbox
1287  * command upon completion. It is setup in the LPFC_MBOXQ
1288  * as the completion routine when the command is
1289  * handed off to the SLI layer.
1290  */
1291 void
1292 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1293 {
1294 	struct lpfc_vport *vport = pmb->vport;
1295 	MAILBOX_t *mb = &pmb->mb;
1296 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1297 	struct lpfc_nodelist *ndlp;
1298 	struct lpfc_vport **vports;
1299 	int i;
1300 
1301 	ndlp = (struct lpfc_nodelist *) pmb->context2;
1302 	pmb->context1 = NULL;
1303 	pmb->context2 = NULL;
1304 	if (mb->mbxStatus) {
1305 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
1306 		kfree(mp);
1307 		mempool_free(pmb, phba->mbox_mem_pool);
1308 		lpfc_nlp_put(ndlp);
1309 
1310 		if (phba->fc_topology == TOPOLOGY_LOOP) {
1311 			/* FLOGI failed, use loop map to make discovery list */
1312 			lpfc_disc_list_loopmap(vport);
1313 
1314 			/* Start discovery */
1315 			lpfc_disc_start(vport);
1316 			return;
1317 		}
1318 
1319 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1320 		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1321 				 "0258 Register Fabric login error: 0x%x\n",
1322 				 mb->mbxStatus);
1323 		return;
1324 	}
1325 
1326 	ndlp->nlp_rpi = mb->un.varWords[0];
1327 	ndlp->nlp_type |= NLP_FABRIC;
1328 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1329 
1330 	lpfc_nlp_put(ndlp);	/* Drop the reference from the mbox */
1331 
1332 	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1333 		vports = lpfc_create_vport_work_array(phba);
1334 		if (vports != NULL)
1335 			for(i = 0;
1336 			    i <= phba->max_vpi && vports[i] != NULL;
1337 			    i++) {
1338 				if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1339 					continue;
1340 				if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1341 					lpfc_initial_fdisc(vports[i]);
1342 				else if (phba->sli3_options &
1343 						LPFC_SLI3_NPIV_ENABLED) {
1344 					lpfc_vport_set_state(vports[i],
1345 						FC_VPORT_NO_FABRIC_SUPP);
1346 					lpfc_printf_vlog(vport, KERN_ERR,
1347 							 LOG_ELS,
1348 							"0259 No NPIV "
1349 							"Fabric support\n");
1350 				}
1351 			}
1352 		lpfc_destroy_vport_work_array(phba, vports);
1353 		lpfc_do_scr_ns_plogi(phba, vport);
1354 	}
1355 
1356 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1357 	kfree(mp);
1358 	mempool_free(pmb, phba->mbox_mem_pool);
1359 	return;
1360 }
1361 
1362 /*
1363  * This routine handles processing a NameServer REG_LOGIN mailbox
1364  * command upon completion. It is setup in the LPFC_MBOXQ
1365  * as the completion routine when the command is
1366  * handed off to the SLI layer.
1367  */
1368 void
1369 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1370 {
1371 	MAILBOX_t *mb = &pmb->mb;
1372 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1373 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1374 	struct lpfc_vport *vport = pmb->vport;
1375 
1376 	if (mb->mbxStatus) {
1377 out:
1378 		/* decrement the node reference count held for this
1379 		 * callback function.
1380 		 */
1381 		lpfc_nlp_put(ndlp);
1382 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
1383 		kfree(mp);
1384 		mempool_free(pmb, phba->mbox_mem_pool);
1385 
1386 		/* If no other thread is using the ndlp, free it */
1387 		lpfc_nlp_not_used(ndlp);
1388 
1389 		if (phba->fc_topology == TOPOLOGY_LOOP) {
1390 			/*
1391 			 * RegLogin failed, use loop map to make discovery
1392 			 * list
1393 			 */
1394 			lpfc_disc_list_loopmap(vport);
1395 
1396 			/* Start discovery */
1397 			lpfc_disc_start(vport);
1398 			return;
1399 		}
1400 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1401 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1402 				 "0260 Register NameServer error: 0x%x\n",
1403 				 mb->mbxStatus);
1404 		return;
1405 	}
1406 
1407 	pmb->context1 = NULL;
1408 
1409 	ndlp->nlp_rpi = mb->un.varWords[0];
1410 	ndlp->nlp_type |= NLP_FABRIC;
1411 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1412 
1413 	if (vport->port_state < LPFC_VPORT_READY) {
1414 		/* Link up discovery requires Fabric registration. */
1415 		lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
1416 		lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
1417 		lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
1418 		lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
1419 		lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
1420 
1421 		/* Issue SCR just before NameServer GID_FT Query */
1422 		lpfc_issue_els_scr(vport, SCR_DID, 0);
1423 	}
1424 
1425 	vport->fc_ns_retry = 0;
1426 	/* Good status, issue CT Request to NameServer */
1427 	if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
1428 		/* Cannot issue NameServer Query, so finish up discovery */
1429 		goto out;
1430 	}
1431 
1432 	/* decrement the node reference count held for this
1433 	 * callback function.
1434 	 */
1435 	lpfc_nlp_put(ndlp);
1436 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1437 	kfree(mp);
1438 	mempool_free(pmb, phba->mbox_mem_pool);
1439 
1440 	return;
1441 }
1442 
1443 static void
1444 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1445 {
1446 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1447 	struct fc_rport  *rport;
1448 	struct lpfc_rport_data *rdata;
1449 	struct fc_rport_identifiers rport_ids;
1450 	struct lpfc_hba  *phba = vport->phba;
1451 
1452 	/* Remote port has reappeared. Re-register w/ FC transport */
1453 	rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1454 	rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1455 	rport_ids.port_id = ndlp->nlp_DID;
1456 	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1457 
1458 	/*
1459 	 * We leave our node pointer in rport->dd_data when we unregister a
1460 	 * FCP target port.  But fc_remote_port_add zeros the space to which
1461 	 * rport->dd_data points.  So, if we're reusing a previously
1462 	 * registered port, drop the reference that we took the last time we
1463 	 * registered the port.
1464 	 */
1465 	if (ndlp->rport && ndlp->rport->dd_data &&
1466 	    ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
1467 		lpfc_nlp_put(ndlp);
1468 	}
1469 
1470 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
1471 		"rport add:       did:x%x flg:x%x type x%x",
1472 		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1473 
1474 	ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
1475 	if (!rport || !get_device(&rport->dev)) {
1476 		dev_printk(KERN_WARNING, &phba->pcidev->dev,
1477 			   "Warning: fc_remote_port_add failed\n");
1478 		return;
1479 	}
1480 
1481 	/* initialize static port data */
1482 	rport->maxframe_size = ndlp->nlp_maxframe;
1483 	rport->supported_classes = ndlp->nlp_class_sup;
1484 	rdata = rport->dd_data;
1485 	rdata->pnode = lpfc_nlp_get(ndlp);
1486 
1487 	if (ndlp->nlp_type & NLP_FCP_TARGET)
1488 		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1489 	if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1490 		rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1491 
1492 
1493 	if (rport_ids.roles !=  FC_RPORT_ROLE_UNKNOWN)
1494 		fc_remote_port_rolechg(rport, rport_ids.roles);
1495 
1496 	if ((rport->scsi_target_id != -1) &&
1497 	    (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1498 		ndlp->nlp_sid = rport->scsi_target_id;
1499 	}
1500 	return;
1501 }
1502 
1503 static void
1504 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
1505 {
1506 	struct fc_rport *rport = ndlp->rport;
1507 
1508 	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
1509 		"rport delete:    did:x%x flg:x%x type x%x",
1510 		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1511 
1512 	fc_remote_port_delete(rport);
1513 
1514 	return;
1515 }
1516 
1517 static void
1518 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
1519 {
1520 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1521 
1522 	spin_lock_irq(shost->host_lock);
1523 	switch (state) {
1524 	case NLP_STE_UNUSED_NODE:
1525 		vport->fc_unused_cnt += count;
1526 		break;
1527 	case NLP_STE_PLOGI_ISSUE:
1528 		vport->fc_plogi_cnt += count;
1529 		break;
1530 	case NLP_STE_ADISC_ISSUE:
1531 		vport->fc_adisc_cnt += count;
1532 		break;
1533 	case NLP_STE_REG_LOGIN_ISSUE:
1534 		vport->fc_reglogin_cnt += count;
1535 		break;
1536 	case NLP_STE_PRLI_ISSUE:
1537 		vport->fc_prli_cnt += count;
1538 		break;
1539 	case NLP_STE_UNMAPPED_NODE:
1540 		vport->fc_unmap_cnt += count;
1541 		break;
1542 	case NLP_STE_MAPPED_NODE:
1543 		vport->fc_map_cnt += count;
1544 		break;
1545 	case NLP_STE_NPR_NODE:
1546 		vport->fc_npr_cnt += count;
1547 		break;
1548 	}
1549 	spin_unlock_irq(shost->host_lock);
1550 }
1551 
1552 static void
1553 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1554 		       int old_state, int new_state)
1555 {
1556 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1557 
1558 	if (new_state == NLP_STE_UNMAPPED_NODE) {
1559 		ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1560 		ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1561 		ndlp->nlp_type |= NLP_FC_NODE;
1562 	}
1563 	if (new_state == NLP_STE_MAPPED_NODE)
1564 		ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1565 	if (new_state == NLP_STE_NPR_NODE)
1566 		ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
1567 
1568 	/* Transport interface */
1569 	if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1570 			    old_state == NLP_STE_UNMAPPED_NODE)) {
1571 		vport->phba->nport_event_cnt++;
1572 		lpfc_unregister_remote_port(ndlp);
1573 	}
1574 
1575 	if (new_state ==  NLP_STE_MAPPED_NODE ||
1576 	    new_state == NLP_STE_UNMAPPED_NODE) {
1577 		vport->phba->nport_event_cnt++;
1578 		/*
1579 		 * Tell the fc transport about the port, if we haven't
1580 		 * already. If we have, and it's a scsi entity, be
1581 		 * sure to unblock any attached scsi devices
1582 		 */
1583 		lpfc_register_remote_port(vport, ndlp);
1584 	}
1585 	/*
1586 	 * if we added to Mapped list, but the remote port
1587 	 * registration failed or assigned a target id outside
1588 	 * our presentable range - move the node to the
1589 	 * Unmapped List
1590 	 */
1591 	if (new_state == NLP_STE_MAPPED_NODE &&
1592 	    (!ndlp->rport ||
1593 	     ndlp->rport->scsi_target_id == -1 ||
1594 	     ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
1595 		spin_lock_irq(shost->host_lock);
1596 		ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1597 		spin_unlock_irq(shost->host_lock);
1598 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1599 	}
1600 }
1601 
1602 static char *
1603 lpfc_nlp_state_name(char *buffer, size_t size, int state)
1604 {
1605 	static char *states[] = {
1606 		[NLP_STE_UNUSED_NODE] = "UNUSED",
1607 		[NLP_STE_PLOGI_ISSUE] = "PLOGI",
1608 		[NLP_STE_ADISC_ISSUE] = "ADISC",
1609 		[NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
1610 		[NLP_STE_PRLI_ISSUE] = "PRLI",
1611 		[NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
1612 		[NLP_STE_MAPPED_NODE] = "MAPPED",
1613 		[NLP_STE_NPR_NODE] = "NPR",
1614 	};
1615 
1616 	if (state < NLP_STE_MAX_STATE && states[state])
1617 		strlcpy(buffer, states[state], size);
1618 	else
1619 		snprintf(buffer, size, "unknown (%d)", state);
1620 	return buffer;
1621 }
1622 
1623 void
1624 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1625 		   int state)
1626 {
1627 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1628 	int  old_state = ndlp->nlp_state;
1629 	char name1[16], name2[16];
1630 
1631 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1632 			 "0904 NPort state transition x%06x, %s -> %s\n",
1633 			 ndlp->nlp_DID,
1634 			 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
1635 			 lpfc_nlp_state_name(name2, sizeof(name2), state));
1636 
1637 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
1638 		"node statechg    did:x%x old:%d ste:%d",
1639 		ndlp->nlp_DID, old_state, state);
1640 
1641 	if (old_state == NLP_STE_NPR_NODE &&
1642 	    (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1643 	    state != NLP_STE_NPR_NODE)
1644 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
1645 	if (old_state == NLP_STE_UNMAPPED_NODE) {
1646 		ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1647 		ndlp->nlp_type &= ~NLP_FC_NODE;
1648 	}
1649 
1650 	if (list_empty(&ndlp->nlp_listp)) {
1651 		spin_lock_irq(shost->host_lock);
1652 		list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1653 		spin_unlock_irq(shost->host_lock);
1654 	} else if (old_state)
1655 		lpfc_nlp_counters(vport, old_state, -1);
1656 
1657 	ndlp->nlp_state = state;
1658 	lpfc_nlp_counters(vport, state, 1);
1659 	lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
1660 }
1661 
1662 void
1663 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1664 {
1665 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1666 
1667 	if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1668 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
1669 	if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1670 		lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1671 	spin_lock_irq(shost->host_lock);
1672 	list_del_init(&ndlp->nlp_listp);
1673 	spin_unlock_irq(shost->host_lock);
1674 	lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1675 			       NLP_STE_UNUSED_NODE);
1676 }
1677 
1678 void
1679 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1680 {
1681 	/*
1682 	 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
1683 	 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
1684 	 * the ndlp from the vport. The ndlp marked as UNUSED on the list
1685 	 * until ALL other outstanding threads have completed. We check
1686 	 * that the ndlp not already in the UNUSED state before we proceed.
1687 	 */
1688 	if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
1689 		return;
1690 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
1691 	lpfc_nlp_put(ndlp);
1692 	return;
1693 }
1694 
1695 /*
1696  * Start / ReStart rescue timer for Discovery / RSCN handling
1697  */
1698 void
1699 lpfc_set_disctmo(struct lpfc_vport *vport)
1700 {
1701 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1702 	struct lpfc_hba  *phba = vport->phba;
1703 	uint32_t tmo;
1704 
1705 	if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
1706 		/* For FAN, timeout should be greater then edtov */
1707 		tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1708 	} else {
1709 		/* Normal discovery timeout should be > then ELS/CT timeout
1710 		 * FC spec states we need 3 * ratov for CT requests
1711 		 */
1712 		tmo = ((phba->fc_ratov * 3) + 3);
1713 	}
1714 
1715 
1716 	if (!timer_pending(&vport->fc_disctmo)) {
1717 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1718 			"set disc timer:  tmo:x%x state:x%x flg:x%x",
1719 			tmo, vport->port_state, vport->fc_flag);
1720 	}
1721 
1722 	mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
1723 	spin_lock_irq(shost->host_lock);
1724 	vport->fc_flag |= FC_DISC_TMO;
1725 	spin_unlock_irq(shost->host_lock);
1726 
1727 	/* Start Discovery Timer state <hba_state> */
1728 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1729 			 "0247 Start Discovery Timer state x%x "
1730 			 "Data: x%x x%lx x%x x%x\n",
1731 			 vport->port_state, tmo,
1732 			 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
1733 			 vport->fc_adisc_cnt);
1734 
1735 	return;
1736 }
1737 
1738 /*
1739  * Cancel rescue timer for Discovery / RSCN handling
1740  */
1741 int
1742 lpfc_can_disctmo(struct lpfc_vport *vport)
1743 {
1744 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1745 	unsigned long iflags;
1746 
1747 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1748 		"can disc timer:  state:x%x rtry:x%x flg:x%x",
1749 		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
1750 
1751 	/* Turn off discovery timer if its running */
1752 	if (vport->fc_flag & FC_DISC_TMO) {
1753 		spin_lock_irqsave(shost->host_lock, iflags);
1754 		vport->fc_flag &= ~FC_DISC_TMO;
1755 		spin_unlock_irqrestore(shost->host_lock, iflags);
1756 		del_timer_sync(&vport->fc_disctmo);
1757 		spin_lock_irqsave(&vport->work_port_lock, iflags);
1758 		vport->work_port_events &= ~WORKER_DISC_TMO;
1759 		spin_unlock_irqrestore(&vport->work_port_lock, iflags);
1760 	}
1761 
1762 	/* Cancel Discovery Timer state <hba_state> */
1763 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1764 			 "0248 Cancel Discovery Timer state x%x "
1765 			 "Data: x%x x%x x%x\n",
1766 			 vport->port_state, vport->fc_flag,
1767 			 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
1768 	return 0;
1769 }
1770 
1771 /*
1772  * Check specified ring for outstanding IOCB on the SLI queue
1773  * Return true if iocb matches the specified nport
1774  */
1775 int
1776 lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1777 		    struct lpfc_sli_ring *pring,
1778 		    struct lpfc_iocbq *iocb,
1779 		    struct lpfc_nodelist *ndlp)
1780 {
1781 	struct lpfc_sli *psli = &phba->sli;
1782 	IOCB_t *icmd = &iocb->iocb;
1783 	struct lpfc_vport    *vport = ndlp->vport;
1784 
1785 	if (iocb->vport != vport)
1786 		return 0;
1787 
1788 	if (pring->ringno == LPFC_ELS_RING) {
1789 		switch (icmd->ulpCommand) {
1790 		case CMD_GEN_REQUEST64_CR:
1791 			if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1792 				return 1;
1793 		case CMD_ELS_REQUEST64_CR:
1794 			if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1795 				return 1;
1796 		case CMD_XMIT_ELS_RSP64_CX:
1797 			if (iocb->context1 == (uint8_t *) ndlp)
1798 				return 1;
1799 		}
1800 	} else if (pring->ringno == psli->extra_ring) {
1801 
1802 	} else if (pring->ringno == psli->fcp_ring) {
1803 		/* Skip match check if waiting to relogin to FCP target */
1804 		if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1805 		    (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1806 			return 0;
1807 		}
1808 		if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1809 			return 1;
1810 		}
1811 	} else if (pring->ringno == psli->next_ring) {
1812 
1813 	}
1814 	return 0;
1815 }
1816 
1817 /*
1818  * Free resources / clean up outstanding I/Os
1819  * associated with nlp_rpi in the LPFC_NODELIST entry.
1820  */
1821 static int
1822 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1823 {
1824 	LIST_HEAD(completions);
1825 	struct lpfc_sli *psli;
1826 	struct lpfc_sli_ring *pring;
1827 	struct lpfc_iocbq *iocb, *next_iocb;
1828 	IOCB_t *icmd;
1829 	uint32_t rpi, i;
1830 
1831 	lpfc_fabric_abort_nport(ndlp);
1832 
1833 	/*
1834 	 * Everything that matches on txcmplq will be returned
1835 	 * by firmware with a no rpi error.
1836 	 */
1837 	psli = &phba->sli;
1838 	rpi = ndlp->nlp_rpi;
1839 	if (rpi) {
1840 		/* Now process each ring */
1841 		for (i = 0; i < psli->num_rings; i++) {
1842 			pring = &psli->ring[i];
1843 
1844 			spin_lock_irq(&phba->hbalock);
1845 			list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1846 						 list) {
1847 				/*
1848 				 * Check to see if iocb matches the nport we are
1849 				 * looking for
1850 				 */
1851 				if ((lpfc_check_sli_ndlp(phba, pring, iocb,
1852 							 ndlp))) {
1853 					/* It matches, so deque and call compl
1854 					   with an error */
1855 					list_move_tail(&iocb->list,
1856 						       &completions);
1857 					pring->txq_cnt--;
1858 				}
1859 			}
1860 			spin_unlock_irq(&phba->hbalock);
1861 		}
1862 	}
1863 
1864 	while (!list_empty(&completions)) {
1865 		iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1866 		list_del_init(&iocb->list);
1867 
1868 		if (!iocb->iocb_cmpl)
1869 			lpfc_sli_release_iocbq(phba, iocb);
1870 		else {
1871 			icmd = &iocb->iocb;
1872 			icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1873 			icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1874 			(iocb->iocb_cmpl)(phba, iocb, iocb);
1875 		}
1876 	}
1877 
1878 	return 0;
1879 }
1880 
1881 /*
1882  * Free rpi associated with LPFC_NODELIST entry.
1883  * This routine is called from lpfc_freenode(), when we are removing
1884  * a LPFC_NODELIST entry. It is also called if the driver initiates a
1885  * LOGO that completes successfully, and we are waiting to PLOGI back
1886  * to the remote NPort. In addition, it is called after we receive
1887  * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1888  * we are waiting to PLOGI back to the remote NPort.
1889  */
1890 int
1891 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1892 {
1893 	struct lpfc_hba *phba = vport->phba;
1894 	LPFC_MBOXQ_t    *mbox;
1895 	int rc;
1896 
1897 	if (ndlp->nlp_rpi) {
1898 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1899 		if (mbox) {
1900 			lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
1901 			mbox->vport = vport;
1902 			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1903 			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1904 			if (rc == MBX_NOT_FINISHED)
1905 				mempool_free(mbox, phba->mbox_mem_pool);
1906 		}
1907 		lpfc_no_rpi(phba, ndlp);
1908 		ndlp->nlp_rpi = 0;
1909 		return 1;
1910 	}
1911 	return 0;
1912 }
1913 
1914 void
1915 lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1916 {
1917 	struct lpfc_hba  *phba  = vport->phba;
1918 	LPFC_MBOXQ_t     *mbox;
1919 	int rc;
1920 
1921 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1922 	if (mbox) {
1923 		lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1924 		mbox->vport = vport;
1925 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1926 		mbox->context1 = NULL;
1927 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1928 		if (rc == MBX_NOT_FINISHED) {
1929 			mempool_free(mbox, phba->mbox_mem_pool);
1930 		}
1931 	}
1932 }
1933 
1934 void
1935 lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1936 {
1937 	struct lpfc_hba  *phba  = vport->phba;
1938 	LPFC_MBOXQ_t     *mbox;
1939 	int rc;
1940 
1941 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1942 	if (mbox) {
1943 		lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1944 		mbox->vport = vport;
1945 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1946 		mbox->context1 = NULL;
1947 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1948 		if (rc == MBX_NOT_FINISHED) {
1949 			lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1950 					 "1815 Could not issue "
1951 					 "unreg_did (default rpis)\n");
1952 			mempool_free(mbox, phba->mbox_mem_pool);
1953 		}
1954 	}
1955 }
1956 
1957 /*
1958  * Free resources associated with LPFC_NODELIST entry
1959  * so it can be freed.
1960  */
1961 static int
1962 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1963 {
1964 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1965 	struct lpfc_hba  *phba = vport->phba;
1966 	LPFC_MBOXQ_t *mb, *nextmb;
1967 	struct lpfc_dmabuf *mp;
1968 
1969 	/* Cleanup node for NPort <nlp_DID> */
1970 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1971 			 "0900 Cleanup node for NPort x%x "
1972 			 "Data: x%x x%x x%x\n",
1973 			 ndlp->nlp_DID, ndlp->nlp_flag,
1974 			 ndlp->nlp_state, ndlp->nlp_rpi);
1975 	lpfc_dequeue_node(vport, ndlp);
1976 
1977 	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1978 	if ((mb = phba->sli.mbox_active)) {
1979 		if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1980 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1981 			mb->context2 = NULL;
1982 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1983 		}
1984 	}
1985 
1986 	spin_lock_irq(&phba->hbalock);
1987 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1988 		if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1989 		    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1990 			mp = (struct lpfc_dmabuf *) (mb->context1);
1991 			if (mp) {
1992 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
1993 				kfree(mp);
1994 			}
1995 			list_del(&mb->list);
1996 			mempool_free(mb, phba->mbox_mem_pool);
1997 			lpfc_nlp_put(ndlp);
1998 		}
1999 	}
2000 	spin_unlock_irq(&phba->hbalock);
2001 
2002 	lpfc_els_abort(phba,ndlp);
2003 	spin_lock_irq(shost->host_lock);
2004 	ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2005 	spin_unlock_irq(shost->host_lock);
2006 
2007 	ndlp->nlp_last_elscmd = 0;
2008 	del_timer_sync(&ndlp->nlp_delayfunc);
2009 
2010 	if (!list_empty(&ndlp->els_retry_evt.evt_listp))
2011 		list_del_init(&ndlp->els_retry_evt.evt_listp);
2012 	if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
2013 		list_del_init(&ndlp->dev_loss_evt.evt_listp);
2014 
2015 	lpfc_unreg_rpi(vport, ndlp);
2016 
2017 	return 0;
2018 }
2019 
2020 /*
2021  * Check to see if we can free the nlp back to the freelist.
2022  * If we are in the middle of using the nlp in the discovery state
2023  * machine, defer the free till we reach the end of the state machine.
2024  */
2025 static void
2026 lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2027 {
2028 	struct lpfc_hba  *phba = vport->phba;
2029 	struct lpfc_rport_data *rdata;
2030 	LPFC_MBOXQ_t *mbox;
2031 	int rc;
2032 
2033 	if (ndlp->nlp_flag & NLP_DELAY_TMO) {
2034 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
2035 	}
2036 
2037 	if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
2038 		/* For this case we need to cleanup the default rpi
2039 		 * allocated by the firmware.
2040 		 */
2041 		if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
2042 			!= NULL) {
2043 			rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID,
2044 			    (uint8_t *) &vport->fc_sparam, mbox, 0);
2045 			if (rc) {
2046 				mempool_free(mbox, phba->mbox_mem_pool);
2047 			}
2048 			else {
2049 				mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
2050 				mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
2051 				mbox->vport = vport;
2052 				mbox->context2 = NULL;
2053 				rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2054 				if (rc == MBX_NOT_FINISHED) {
2055 					mempool_free(mbox, phba->mbox_mem_pool);
2056 				}
2057 			}
2058 		}
2059 	}
2060 
2061 	lpfc_cleanup_node(vport, ndlp);
2062 
2063 	/*
2064 	 * We can get here with a non-NULL ndlp->rport because when we
2065 	 * unregister a rport we don't break the rport/node linkage.  So if we
2066 	 * do, make sure we don't leaving any dangling pointers behind.
2067 	 */
2068 	if (ndlp->rport) {
2069 		rdata = ndlp->rport->dd_data;
2070 		rdata->pnode = NULL;
2071 		ndlp->rport = NULL;
2072 	}
2073 }
2074 
2075 static int
2076 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2077 	      uint32_t did)
2078 {
2079 	D_ID mydid, ndlpdid, matchdid;
2080 
2081 	if (did == Bcast_DID)
2082 		return 0;
2083 
2084 	if (ndlp->nlp_DID == 0) {
2085 		return 0;
2086 	}
2087 
2088 	/* First check for Direct match */
2089 	if (ndlp->nlp_DID == did)
2090 		return 1;
2091 
2092 	/* Next check for area/domain identically equals 0 match */
2093 	mydid.un.word = vport->fc_myDID;
2094 	if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
2095 		return 0;
2096 	}
2097 
2098 	matchdid.un.word = did;
2099 	ndlpdid.un.word = ndlp->nlp_DID;
2100 	if (matchdid.un.b.id == ndlpdid.un.b.id) {
2101 		if ((mydid.un.b.domain == matchdid.un.b.domain) &&
2102 		    (mydid.un.b.area == matchdid.un.b.area)) {
2103 			if ((ndlpdid.un.b.domain == 0) &&
2104 			    (ndlpdid.un.b.area == 0)) {
2105 				if (ndlpdid.un.b.id)
2106 					return 1;
2107 			}
2108 			return 0;
2109 		}
2110 
2111 		matchdid.un.word = ndlp->nlp_DID;
2112 		if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
2113 		    (mydid.un.b.area == ndlpdid.un.b.area)) {
2114 			if ((matchdid.un.b.domain == 0) &&
2115 			    (matchdid.un.b.area == 0)) {
2116 				if (matchdid.un.b.id)
2117 					return 1;
2118 			}
2119 		}
2120 	}
2121 	return 0;
2122 }
2123 
2124 /* Search for a nodelist entry */
2125 static struct lpfc_nodelist *
2126 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2127 {
2128 	struct lpfc_nodelist *ndlp;
2129 	uint32_t data1;
2130 
2131 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2132 		if (lpfc_matchdid(vport, ndlp, did)) {
2133 			data1 = (((uint32_t) ndlp->nlp_state << 24) |
2134 				 ((uint32_t) ndlp->nlp_xri << 16) |
2135 				 ((uint32_t) ndlp->nlp_type << 8) |
2136 				 ((uint32_t) ndlp->nlp_rpi & 0xff));
2137 			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2138 					 "0929 FIND node DID "
2139 					 "Data: x%p x%x x%x x%x\n",
2140 					 ndlp, ndlp->nlp_DID,
2141 					 ndlp->nlp_flag, data1);
2142 			return ndlp;
2143 		}
2144 	}
2145 
2146 	/* FIND node did <did> NOT FOUND */
2147 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2148 			 "0932 FIND node did x%x NOT FOUND.\n", did);
2149 	return NULL;
2150 }
2151 
2152 struct lpfc_nodelist *
2153 lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2154 {
2155 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2156 	struct lpfc_nodelist *ndlp;
2157 
2158 	spin_lock_irq(shost->host_lock);
2159 	ndlp = __lpfc_findnode_did(vport, did);
2160 	spin_unlock_irq(shost->host_lock);
2161 	return ndlp;
2162 }
2163 
2164 struct lpfc_nodelist *
2165 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2166 {
2167 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2168 	struct lpfc_nodelist *ndlp;
2169 
2170 	ndlp = lpfc_findnode_did(vport, did);
2171 	if (!ndlp) {
2172 		if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
2173 		    lpfc_rscn_payload_check(vport, did) == 0)
2174 			return NULL;
2175 		ndlp = (struct lpfc_nodelist *)
2176 		     mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
2177 		if (!ndlp)
2178 			return NULL;
2179 		lpfc_nlp_init(vport, ndlp, did);
2180 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2181 		spin_lock_irq(shost->host_lock);
2182 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2183 		spin_unlock_irq(shost->host_lock);
2184 		return ndlp;
2185 	}
2186 	if (vport->fc_flag & FC_RSCN_MODE) {
2187 		if (lpfc_rscn_payload_check(vport, did)) {
2188 			/* If we've already recieved a PLOGI from this NPort
2189 			 * we don't need to try to discover it again.
2190 			 */
2191 			if (ndlp->nlp_flag & NLP_RCV_PLOGI)
2192 				return NULL;
2193 
2194 			spin_lock_irq(shost->host_lock);
2195 			ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2196 			spin_unlock_irq(shost->host_lock);
2197 
2198 			/* Since this node is marked for discovery,
2199 			 * delay timeout is not needed.
2200 			 */
2201 			if (ndlp->nlp_flag & NLP_DELAY_TMO)
2202 				lpfc_cancel_retry_delay_tmo(vport, ndlp);
2203 		} else
2204 			ndlp = NULL;
2205 	} else {
2206 		/* If we've already recieved a PLOGI from this NPort,
2207 		 * or we are already in the process of discovery on it,
2208 		 * we don't need to try to discover it again.
2209 		 */
2210 		if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
2211 		    ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2212 		    ndlp->nlp_flag & NLP_RCV_PLOGI)
2213 			return NULL;
2214 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2215 		spin_lock_irq(shost->host_lock);
2216 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2217 		spin_unlock_irq(shost->host_lock);
2218 	}
2219 	return ndlp;
2220 }
2221 
2222 /* Build a list of nodes to discover based on the loopmap */
2223 void
2224 lpfc_disc_list_loopmap(struct lpfc_vport *vport)
2225 {
2226 	struct lpfc_hba  *phba = vport->phba;
2227 	int j;
2228 	uint32_t alpa, index;
2229 
2230 	if (!lpfc_is_link_up(phba))
2231 		return;
2232 
2233 	if (phba->fc_topology != TOPOLOGY_LOOP)
2234 		return;
2235 
2236 	/* Check for loop map present or not */
2237 	if (phba->alpa_map[0]) {
2238 		for (j = 1; j <= phba->alpa_map[0]; j++) {
2239 			alpa = phba->alpa_map[j];
2240 			if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
2241 				continue;
2242 			lpfc_setup_disc_node(vport, alpa);
2243 		}
2244 	} else {
2245 		/* No alpamap, so try all alpa's */
2246 		for (j = 0; j < FC_MAXLOOP; j++) {
2247 			/* If cfg_scan_down is set, start from highest
2248 			 * ALPA (0xef) to lowest (0x1).
2249 			 */
2250 			if (vport->cfg_scan_down)
2251 				index = j;
2252 			else
2253 				index = FC_MAXLOOP - j - 1;
2254 			alpa = lpfcAlpaArray[index];
2255 			if ((vport->fc_myDID & 0xff) == alpa)
2256 				continue;
2257 			lpfc_setup_disc_node(vport, alpa);
2258 		}
2259 	}
2260 	return;
2261 }
2262 
2263 void
2264 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2265 {
2266 	LPFC_MBOXQ_t *mbox;
2267 	struct lpfc_sli *psli = &phba->sli;
2268 	struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
2269 	struct lpfc_sli_ring *fcp_ring   = &psli->ring[psli->fcp_ring];
2270 	struct lpfc_sli_ring *next_ring  = &psli->ring[psli->next_ring];
2271 	int  rc;
2272 
2273 	/*
2274 	 * if it's not a physical port or if we already send
2275 	 * clear_la then don't send it.
2276 	 */
2277 	if ((phba->link_state >= LPFC_CLEAR_LA) ||
2278 	    (vport->port_type != LPFC_PHYSICAL_PORT))
2279 		return;
2280 
2281 			/* Link up discovery */
2282 	if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
2283 		phba->link_state = LPFC_CLEAR_LA;
2284 		lpfc_clear_la(phba, mbox);
2285 		mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2286 		mbox->vport = vport;
2287 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2288 		if (rc == MBX_NOT_FINISHED) {
2289 			mempool_free(mbox, phba->mbox_mem_pool);
2290 			lpfc_disc_flush_list(vport);
2291 			extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2292 			fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2293 			next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2294 			phba->link_state = LPFC_HBA_ERROR;
2295 		}
2296 	}
2297 }
2298 
2299 /* Reg_vpi to tell firmware to resume normal operations */
2300 void
2301 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2302 {
2303 	LPFC_MBOXQ_t *regvpimbox;
2304 
2305 	regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2306 	if (regvpimbox) {
2307 		lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2308 		regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2309 		regvpimbox->vport = vport;
2310 		if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
2311 					== MBX_NOT_FINISHED) {
2312 			mempool_free(regvpimbox, phba->mbox_mem_pool);
2313 		}
2314 	}
2315 }
2316 
2317 /* Start Link up / RSCN discovery on NPR nodes */
2318 void
2319 lpfc_disc_start(struct lpfc_vport *vport)
2320 {
2321 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2322 	struct lpfc_hba  *phba = vport->phba;
2323 	uint32_t num_sent;
2324 	uint32_t clear_la_pending;
2325 	int did_changed;
2326 
2327 	if (!lpfc_is_link_up(phba))
2328 		return;
2329 
2330 	if (phba->link_state == LPFC_CLEAR_LA)
2331 		clear_la_pending = 1;
2332 	else
2333 		clear_la_pending = 0;
2334 
2335 	if (vport->port_state < LPFC_VPORT_READY)
2336 		vport->port_state = LPFC_DISC_AUTH;
2337 
2338 	lpfc_set_disctmo(vport);
2339 
2340 	if (vport->fc_prevDID == vport->fc_myDID)
2341 		did_changed = 0;
2342 	else
2343 		did_changed = 1;
2344 
2345 	vport->fc_prevDID = vport->fc_myDID;
2346 	vport->num_disc_nodes = 0;
2347 
2348 	/* Start Discovery state <hba_state> */
2349 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2350 			 "0202 Start Discovery hba state x%x "
2351 			 "Data: x%x x%x x%x\n",
2352 			 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
2353 			 vport->fc_adisc_cnt);
2354 
2355 	/* First do ADISCs - if any */
2356 	num_sent = lpfc_els_disc_adisc(vport);
2357 
2358 	if (num_sent)
2359 		return;
2360 
2361 	/*
2362 	 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
2363 	 * continue discovery.
2364 	 */
2365 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2366 	    !(vport->fc_flag & FC_RSCN_MODE)) {
2367 		lpfc_issue_reg_vpi(phba, vport);
2368 		return;
2369 	}
2370 
2371 	/*
2372 	 * For SLI2, we need to set port_state to READY and continue
2373 	 * discovery.
2374 	 */
2375 	if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
2376 		/* If we get here, there is nothing to ADISC */
2377 		if (vport->port_type == LPFC_PHYSICAL_PORT)
2378 			lpfc_issue_clear_la(phba, vport);
2379 
2380 		if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2381 			vport->num_disc_nodes = 0;
2382 			/* go thru NPR nodes and issue ELS PLOGIs */
2383 			if (vport->fc_npr_cnt)
2384 				lpfc_els_disc_plogi(vport);
2385 
2386 			if (!vport->num_disc_nodes) {
2387 				spin_lock_irq(shost->host_lock);
2388 				vport->fc_flag &= ~FC_NDISC_ACTIVE;
2389 				spin_unlock_irq(shost->host_lock);
2390 				lpfc_can_disctmo(vport);
2391 			}
2392 		}
2393 		vport->port_state = LPFC_VPORT_READY;
2394 	} else {
2395 		/* Next do PLOGIs - if any */
2396 		num_sent = lpfc_els_disc_plogi(vport);
2397 
2398 		if (num_sent)
2399 			return;
2400 
2401 		if (vport->fc_flag & FC_RSCN_MODE) {
2402 			/* Check to see if more RSCNs came in while we
2403 			 * were processing this one.
2404 			 */
2405 			if ((vport->fc_rscn_id_cnt == 0) &&
2406 			    (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
2407 				spin_lock_irq(shost->host_lock);
2408 				vport->fc_flag &= ~FC_RSCN_MODE;
2409 				spin_unlock_irq(shost->host_lock);
2410 				lpfc_can_disctmo(vport);
2411 			} else
2412 				lpfc_els_handle_rscn(vport);
2413 		}
2414 	}
2415 	return;
2416 }
2417 
2418 /*
2419  *  Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2420  *  ring the match the sppecified nodelist.
2421  */
2422 static void
2423 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2424 {
2425 	LIST_HEAD(completions);
2426 	struct lpfc_sli *psli;
2427 	IOCB_t     *icmd;
2428 	struct lpfc_iocbq    *iocb, *next_iocb;
2429 	struct lpfc_sli_ring *pring;
2430 
2431 	psli = &phba->sli;
2432 	pring = &psli->ring[LPFC_ELS_RING];
2433 
2434 	/* Error matching iocb on txq or txcmplq
2435 	 * First check the txq.
2436 	 */
2437 	spin_lock_irq(&phba->hbalock);
2438 	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2439 		if (iocb->context1 != ndlp) {
2440 			continue;
2441 		}
2442 		icmd = &iocb->iocb;
2443 		if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2444 		    (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2445 
2446 			list_move_tail(&iocb->list, &completions);
2447 			pring->txq_cnt--;
2448 		}
2449 	}
2450 
2451 	/* Next check the txcmplq */
2452 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2453 		if (iocb->context1 != ndlp) {
2454 			continue;
2455 		}
2456 		icmd = &iocb->iocb;
2457 		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
2458 		    icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
2459 			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
2460 		}
2461 	}
2462 	spin_unlock_irq(&phba->hbalock);
2463 
2464 	while (!list_empty(&completions)) {
2465 		iocb = list_get_first(&completions, struct lpfc_iocbq, list);
2466 		list_del_init(&iocb->list);
2467 
2468 		if (!iocb->iocb_cmpl)
2469 			lpfc_sli_release_iocbq(phba, iocb);
2470 		else {
2471 			icmd = &iocb->iocb;
2472 			icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2473 			icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2474 			(iocb->iocb_cmpl) (phba, iocb, iocb);
2475 		}
2476 	}
2477 }
2478 
2479 static void
2480 lpfc_disc_flush_list(struct lpfc_vport *vport)
2481 {
2482 	struct lpfc_nodelist *ndlp, *next_ndlp;
2483 	struct lpfc_hba *phba = vport->phba;
2484 
2485 	if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
2486 		list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2487 					 nlp_listp) {
2488 			if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2489 			    ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2490 				lpfc_free_tx(phba, ndlp);
2491 			}
2492 		}
2493 	}
2494 }
2495 
2496 void
2497 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
2498 {
2499 	lpfc_els_flush_rscn(vport);
2500 	lpfc_els_flush_cmd(vport);
2501 	lpfc_disc_flush_list(vport);
2502 }
2503 
2504 /*****************************************************************************/
2505 /*
2506  * NAME:     lpfc_disc_timeout
2507  *
2508  * FUNCTION: Fibre Channel driver discovery timeout routine.
2509  *
2510  * EXECUTION ENVIRONMENT: interrupt only
2511  *
2512  * CALLED FROM:
2513  *      Timer function
2514  *
2515  * RETURNS:
2516  *      none
2517  */
2518 /*****************************************************************************/
2519 void
2520 lpfc_disc_timeout(unsigned long ptr)
2521 {
2522 	struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2523 	struct lpfc_hba   *phba = vport->phba;
2524 	unsigned long flags = 0;
2525 
2526 	if (unlikely(!phba))
2527 		return;
2528 
2529 	if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
2530 		spin_lock_irqsave(&vport->work_port_lock, flags);
2531 		vport->work_port_events |= WORKER_DISC_TMO;
2532 		spin_unlock_irqrestore(&vport->work_port_lock, flags);
2533 
2534 		spin_lock_irqsave(&phba->hbalock, flags);
2535 		if (phba->work_wait)
2536 			lpfc_worker_wake_up(phba);
2537 		spin_unlock_irqrestore(&phba->hbalock, flags);
2538 	}
2539 	return;
2540 }
2541 
2542 static void
2543 lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2544 {
2545 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2546 	struct lpfc_hba  *phba = vport->phba;
2547 	struct lpfc_sli  *psli = &phba->sli;
2548 	struct lpfc_nodelist *ndlp, *next_ndlp;
2549 	LPFC_MBOXQ_t *initlinkmbox;
2550 	int rc, clrlaerr = 0;
2551 
2552 	if (!(vport->fc_flag & FC_DISC_TMO))
2553 		return;
2554 
2555 	spin_lock_irq(shost->host_lock);
2556 	vport->fc_flag &= ~FC_DISC_TMO;
2557 	spin_unlock_irq(shost->host_lock);
2558 
2559 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2560 		"disc timeout:    state:x%x rtry:x%x flg:x%x",
2561 		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
2562 
2563 	switch (vport->port_state) {
2564 
2565 	case LPFC_LOCAL_CFG_LINK:
2566 	/* port_state is identically  LPFC_LOCAL_CFG_LINK while waiting for
2567 	 * FAN
2568 	 */
2569 				/* FAN timeout */
2570 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
2571 				 "0221 FAN timeout\n");
2572 		/* Start discovery by sending FLOGI, clean up old rpis */
2573 		list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2574 					 nlp_listp) {
2575 			if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2576 				continue;
2577 			if (ndlp->nlp_type & NLP_FABRIC) {
2578 				/* Clean up the ndlp on Fabric connections */
2579 				lpfc_drop_node(vport, ndlp);
2580 
2581 			} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2582 				/* Fail outstanding IO now since device
2583 				 * is marked for PLOGI.
2584 				 */
2585 				lpfc_unreg_rpi(vport, ndlp);
2586 			}
2587 		}
2588 		if (vport->port_state != LPFC_FLOGI) {
2589 			lpfc_initial_flogi(vport);
2590 			return;
2591 		}
2592 		break;
2593 
2594 	case LPFC_FDISC:
2595 	case LPFC_FLOGI:
2596 	/* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2597 		/* Initial FLOGI timeout */
2598 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2599 				 "0222 Initial %s timeout\n",
2600 				 vport->vpi ? "FDISC" : "FLOGI");
2601 
2602 		/* Assume no Fabric and go on with discovery.
2603 		 * Check for outstanding ELS FLOGI to abort.
2604 		 */
2605 
2606 		/* FLOGI failed, so just use loop map to make discovery list */
2607 		lpfc_disc_list_loopmap(vport);
2608 
2609 		/* Start discovery */
2610 		lpfc_disc_start(vport);
2611 		break;
2612 
2613 	case LPFC_FABRIC_CFG_LINK:
2614 	/* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2615 	   NameServer login */
2616 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2617 				 "0223 Timeout while waiting for "
2618 				 "NameServer login\n");
2619 		/* Next look for NameServer ndlp */
2620 		ndlp = lpfc_findnode_did(vport, NameServer_DID);
2621 		if (ndlp)
2622 			lpfc_els_abort(phba, ndlp);
2623 
2624 		/* ReStart discovery */
2625 		goto restart_disc;
2626 
2627 	case LPFC_NS_QRY:
2628 	/* Check for wait for NameServer Rsp timeout */
2629 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2630 				 "0224 NameServer Query timeout "
2631 				 "Data: x%x x%x\n",
2632 				 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2633 
2634 		if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2635 			/* Try it one more time */
2636 			vport->fc_ns_retry++;
2637 			rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
2638 					 vport->fc_ns_retry, 0);
2639 			if (rc == 0)
2640 				break;
2641 		}
2642 		vport->fc_ns_retry = 0;
2643 
2644 restart_disc:
2645 		/*
2646 		 * Discovery is over.
2647 		 * set port_state to PORT_READY if SLI2.
2648 		 * cmpl_reg_vpi will set port_state to READY for SLI3.
2649 		 */
2650 		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2651 			lpfc_issue_reg_vpi(phba, vport);
2652 		else  {	/* NPIV Not enabled */
2653 			lpfc_issue_clear_la(phba, vport);
2654 			vport->port_state = LPFC_VPORT_READY;
2655 		}
2656 
2657 		/* Setup and issue mailbox INITIALIZE LINK command */
2658 		initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2659 		if (!initlinkmbox) {
2660 			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2661 					 "0206 Device Discovery "
2662 					 "completion error\n");
2663 			phba->link_state = LPFC_HBA_ERROR;
2664 			break;
2665 		}
2666 
2667 		lpfc_linkdown(phba);
2668 		lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2669 			       phba->cfg_link_speed);
2670 		initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2671 		initlinkmbox->vport = vport;
2672 		initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2673 		rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
2674 		lpfc_set_loopback_flag(phba);
2675 		if (rc == MBX_NOT_FINISHED)
2676 			mempool_free(initlinkmbox, phba->mbox_mem_pool);
2677 
2678 		break;
2679 
2680 	case LPFC_DISC_AUTH:
2681 	/* Node Authentication timeout */
2682 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2683 				 "0227 Node Authentication timeout\n");
2684 		lpfc_disc_flush_list(vport);
2685 
2686 		/*
2687 		 * set port_state to PORT_READY if SLI2.
2688 		 * cmpl_reg_vpi will set port_state to READY for SLI3.
2689 		 */
2690 		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2691 			lpfc_issue_reg_vpi(phba, vport);
2692 		else {	/* NPIV Not enabled */
2693 			lpfc_issue_clear_la(phba, vport);
2694 			vport->port_state = LPFC_VPORT_READY;
2695 		}
2696 		break;
2697 
2698 	case LPFC_VPORT_READY:
2699 		if (vport->fc_flag & FC_RSCN_MODE) {
2700 			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2701 					 "0231 RSCN timeout Data: x%x "
2702 					 "x%x\n",
2703 					 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2704 
2705 			/* Cleanup any outstanding ELS commands */
2706 			lpfc_els_flush_cmd(vport);
2707 
2708 			lpfc_els_flush_rscn(vport);
2709 			lpfc_disc_flush_list(vport);
2710 		}
2711 		break;
2712 
2713 	default:
2714 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2715 				 "0229 Unexpected discovery timeout, "
2716 				 "vport State x%x\n", vport->port_state);
2717 		break;
2718 	}
2719 
2720 	switch (phba->link_state) {
2721 	case LPFC_CLEAR_LA:
2722 				/* CLEAR LA timeout */
2723 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2724 				 "0228 CLEAR LA timeout\n");
2725 		clrlaerr = 1;
2726 		break;
2727 
2728 	case LPFC_LINK_UP:
2729 		lpfc_issue_clear_la(phba, vport);
2730 		/* Drop thru */
2731 	case LPFC_LINK_UNKNOWN:
2732 	case LPFC_WARM_START:
2733 	case LPFC_INIT_START:
2734 	case LPFC_INIT_MBX_CMDS:
2735 	case LPFC_LINK_DOWN:
2736 	case LPFC_HBA_ERROR:
2737 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2738 				 "0230 Unexpected timeout, hba link "
2739 				 "state x%x\n", phba->link_state);
2740 		clrlaerr = 1;
2741 		break;
2742 
2743 	case LPFC_HBA_READY:
2744 		break;
2745 	}
2746 
2747 	if (clrlaerr) {
2748 		lpfc_disc_flush_list(vport);
2749 		psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2750 		psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2751 		psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2752 		vport->port_state = LPFC_VPORT_READY;
2753 	}
2754 
2755 	return;
2756 }
2757 
2758 /*
2759  * This routine handles processing a NameServer REG_LOGIN mailbox
2760  * command upon completion. It is setup in the LPFC_MBOXQ
2761  * as the completion routine when the command is
2762  * handed off to the SLI layer.
2763  */
2764 void
2765 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2766 {
2767 	MAILBOX_t *mb = &pmb->mb;
2768 	struct lpfc_dmabuf   *mp = (struct lpfc_dmabuf *) (pmb->context1);
2769 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2770 	struct lpfc_vport    *vport = pmb->vport;
2771 
2772 	pmb->context1 = NULL;
2773 
2774 	ndlp->nlp_rpi = mb->un.varWords[0];
2775 	ndlp->nlp_type |= NLP_FABRIC;
2776 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2777 
2778 	/*
2779 	 * Start issuing Fabric-Device Management Interface (FDMI) command to
2780 	 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
2781 	 * fdmi-on=2 (supporting RPA/hostnmae)
2782 	 */
2783 
2784 	if (vport->cfg_fdmi_on == 1)
2785 		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
2786 	else
2787 		mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
2788 
2789 	/* decrement the node reference count held for this callback
2790 	 * function.
2791 	 */
2792 	lpfc_nlp_put(ndlp);
2793 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
2794 	kfree(mp);
2795 	mempool_free(pmb, phba->mbox_mem_pool);
2796 
2797 	return;
2798 }
2799 
2800 static int
2801 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
2802 {
2803 	uint16_t *rpi = param;
2804 
2805 	return ndlp->nlp_rpi == *rpi;
2806 }
2807 
2808 static int
2809 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2810 {
2811 	return memcmp(&ndlp->nlp_portname, param,
2812 		      sizeof(ndlp->nlp_portname)) == 0;
2813 }
2814 
2815 static struct lpfc_nodelist *
2816 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2817 {
2818 	struct lpfc_nodelist *ndlp;
2819 
2820 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2821 		if (filter(ndlp, param))
2822 			return ndlp;
2823 	}
2824 	return NULL;
2825 }
2826 
2827 #if 0
2828 /*
2829  * Search node lists for a remote port matching filter criteria
2830  * Caller needs to hold host_lock before calling this routine.
2831  */
2832 struct lpfc_nodelist *
2833 lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2834 {
2835 	struct Scsi_Host     *shost = lpfc_shost_from_vport(vport);
2836 	struct lpfc_nodelist *ndlp;
2837 
2838 	spin_lock_irq(shost->host_lock);
2839 	ndlp = __lpfc_find_node(vport, filter, param);
2840 	spin_unlock_irq(shost->host_lock);
2841 	return ndlp;
2842 }
2843 #endif  /*  0  */
2844 
2845 /*
2846  * This routine looks up the ndlp lists for the given RPI. If rpi found it
2847  * returns the node list element pointer else return NULL.
2848  */
2849 struct lpfc_nodelist *
2850 __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2851 {
2852 	return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
2853 }
2854 
2855 #if 0
2856 struct lpfc_nodelist *
2857 lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2858 {
2859 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2860 	struct lpfc_nodelist *ndlp;
2861 
2862 	spin_lock_irq(shost->host_lock);
2863 	ndlp = __lpfc_findnode_rpi(vport, rpi);
2864 	spin_unlock_irq(shost->host_lock);
2865 	return ndlp;
2866 }
2867 #endif  /*  0  */
2868 
2869 /*
2870  * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2871  * returns the node element list pointer else return NULL.
2872  */
2873 struct lpfc_nodelist *
2874 lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
2875 {
2876 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2877 	struct lpfc_nodelist *ndlp;
2878 
2879 	spin_lock_irq(shost->host_lock);
2880 	ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
2881 	spin_unlock_irq(shost->host_lock);
2882 	return ndlp;
2883 }
2884 
2885 void
2886 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2887 	      uint32_t did)
2888 {
2889 	memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2890 	INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2891 	INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
2892 	init_timer(&ndlp->nlp_delayfunc);
2893 	ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2894 	ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2895 	ndlp->nlp_DID = did;
2896 	ndlp->vport = vport;
2897 	ndlp->nlp_sid = NLP_NO_SID;
2898 	INIT_LIST_HEAD(&ndlp->nlp_listp);
2899 	kref_init(&ndlp->kref);
2900 
2901 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2902 		"node init:       did:x%x",
2903 		ndlp->nlp_DID, 0, 0);
2904 
2905 	return;
2906 }
2907 
2908 /* This routine releases all resources associated with a specifc NPort's ndlp
2909  * and mempool_free's the nodelist.
2910  */
2911 static void
2912 lpfc_nlp_release(struct kref *kref)
2913 {
2914 	struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2915 						  kref);
2916 
2917 	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2918 		"node release:    did:x%x flg:x%x type:x%x",
2919 		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2920 
2921 	lpfc_nlp_remove(ndlp->vport, ndlp);
2922 	mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
2923 }
2924 
2925 /* This routine bumps the reference count for a ndlp structure to ensure
2926  * that one discovery thread won't free a ndlp while another discovery thread
2927  * is using it.
2928  */
2929 struct lpfc_nodelist *
2930 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2931 {
2932 	if (ndlp) {
2933 		lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2934 			"node get:        did:x%x flg:x%x refcnt:x%x",
2935 			ndlp->nlp_DID, ndlp->nlp_flag,
2936 			atomic_read(&ndlp->kref.refcount));
2937 		kref_get(&ndlp->kref);
2938 	}
2939 	return ndlp;
2940 }
2941 
2942 
2943 /* This routine decrements the reference count for a ndlp structure. If the
2944  * count goes to 0, this indicates the the associated nodelist should be freed.
2945  */
2946 int
2947 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2948 {
2949 	if (ndlp) {
2950 		lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2951 		"node put:        did:x%x flg:x%x refcnt:x%x",
2952 			ndlp->nlp_DID, ndlp->nlp_flag,
2953 			atomic_read(&ndlp->kref.refcount));
2954 	}
2955 	return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
2956 }
2957 
2958 /* This routine free's the specified nodelist if it is not in use
2959  * by any other discovery thread. This routine returns 1 if the ndlp
2960  * is not being used by anyone and has been freed. A return value of
2961  * 0 indicates it is being used by another discovery thread and the
2962  * refcount is left unchanged.
2963  */
2964 int
2965 lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
2966 {
2967 	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2968 		"node not used:   did:x%x flg:x%x refcnt:x%x",
2969 		ndlp->nlp_DID, ndlp->nlp_flag,
2970 		atomic_read(&ndlp->kref.refcount));
2971 
2972 	if (atomic_read(&ndlp->kref.refcount) == 1) {
2973 		lpfc_nlp_put(ndlp);
2974 		return 1;
2975 	}
2976 	return 0;
2977 }
2978 
2979