1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5  */
6 
7 /*
8  * This file implements remote node state machines for:
9  * - Fabric logins.
10  * - Fabric controller events.
11  * - Name/directory services interaction.
12  * - Point-to-point logins.
13  */
14 
15 /*
16  * fabric_sm Node State Machine: Fabric States
17  * ns_sm Node State Machine: Name/Directory Services States
18  * p2p_sm Node State Machine: Point-to-Point Node States
19  */
20 
21 #include "efc.h"
22 
23 static void
24 efc_fabric_initiate_shutdown(struct efc_node *node)
25 {
26 	struct efc *efc = node->efc;
27 
28 	node->els_io_enabled = false;
29 
30 	if (node->attached) {
31 		int rc;
32 
33 		/* issue hw node free; don't care if succeeds right away
34 		 * or sometime later, will check node->attached later in
35 		 * shutdown process
36 		 */
37 		rc = efc_cmd_node_detach(efc, &node->rnode);
38 		if (rc < 0) {
39 			node_printf(node, "Failed freeing HW node, rc=%d\n",
40 				    rc);
41 		}
42 	}
43 	/*
44 	 * node has either been detached or is in the process of being detached,
45 	 * call common node's initiate cleanup function
46 	 */
47 	efc_node_initiate_cleanup(node);
48 }
49 
50 static void
51 __efc_fabric_common(const char *funcname, struct efc_sm_ctx *ctx,
52 		    enum efc_sm_event evt, void *arg)
53 {
54 	struct efc_node *node = NULL;
55 
56 	node = ctx->app;
57 
58 	switch (evt) {
59 	case EFC_EVT_DOMAIN_ATTACH_OK:
60 		break;
61 	case EFC_EVT_SHUTDOWN:
62 		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
63 		efc_fabric_initiate_shutdown(node);
64 		break;
65 
66 	default:
67 		/* call default event handler common to all nodes */
68 		__efc_node_common(funcname, ctx, evt, arg);
69 	}
70 }
71 
72 void
73 __efc_fabric_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
74 		  void *arg)
75 {
76 	struct efc_node *node = ctx->app;
77 	struct efc *efc = node->efc;
78 
79 	efc_node_evt_set(ctx, evt, __func__);
80 
81 	node_sm_trace();
82 
83 	switch (evt) {
84 	case EFC_EVT_REENTER:
85 		efc_log_debug(efc, ">>> reenter !!\n");
86 		fallthrough;
87 
88 	case EFC_EVT_ENTER:
89 		/* send FLOGI */
90 		efc_send_flogi(node);
91 		efc_node_transition(node, __efc_fabric_flogi_wait_rsp, NULL);
92 		break;
93 
94 	default:
95 		__efc_fabric_common(__func__, ctx, evt, arg);
96 	}
97 }
98 
99 void
100 efc_fabric_set_topology(struct efc_node *node,
101 			enum efc_nport_topology topology)
102 {
103 	node->nport->topology = topology;
104 }
105 
106 void
107 efc_fabric_notify_topology(struct efc_node *node)
108 {
109 	struct efc_node *tmp_node;
110 	enum efc_nport_topology topology = node->nport->topology;
111 	unsigned long index;
112 
113 	/*
114 	 * now loop through the nodes in the nport
115 	 * and send topology notification
116 	 */
117 	xa_for_each(&node->nport->lookup, index, tmp_node) {
118 		if (tmp_node != node) {
119 			efc_node_post_event(tmp_node,
120 					    EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
121 					    (void *)topology);
122 		}
123 	}
124 }
125 
126 static bool efc_rnode_is_nport(struct fc_els_flogi *rsp)
127 {
128 	return !(ntohs(rsp->fl_csp.sp_features) & FC_SP_FT_FPORT);
129 }
130 
131 void
132 __efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx,
133 			    enum efc_sm_event evt, void *arg)
134 {
135 	struct efc_node_cb *cbdata = arg;
136 	struct efc_node *node = ctx->app;
137 
138 	efc_node_evt_set(ctx, evt, __func__);
139 
140 	node_sm_trace();
141 
142 	switch (evt) {
143 	case EFC_EVT_SRRS_ELS_REQ_OK: {
144 		if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
145 					   __efc_fabric_common, __func__)) {
146 			return;
147 		}
148 		WARN_ON(!node->els_req_cnt);
149 		node->els_req_cnt--;
150 
151 		memcpy(node->nport->domain->flogi_service_params,
152 		       cbdata->els_rsp.virt,
153 		       sizeof(struct fc_els_flogi));
154 
155 		/* Check to see if the fabric is an F_PORT or and N_PORT */
156 		if (!efc_rnode_is_nport(cbdata->els_rsp.virt)) {
157 			/* sm: if not nport / efc_domain_attach */
158 			/* ext_status has the fc_id, attach domain */
159 			efc_fabric_set_topology(node, EFC_NPORT_TOPO_FABRIC);
160 			efc_fabric_notify_topology(node);
161 			WARN_ON(node->nport->domain->attached);
162 			efc_domain_attach(node->nport->domain,
163 					  cbdata->ext_status);
164 			efc_node_transition(node,
165 					    __efc_fabric_wait_domain_attach,
166 					    NULL);
167 			break;
168 		}
169 
170 		/*  sm: if nport and p2p_winner / efc_domain_attach */
171 		efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P);
172 		if (efc_p2p_setup(node->nport)) {
173 			node_printf(node,
174 				    "p2p setup failed, shutting down node\n");
175 			node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
176 			efc_fabric_initiate_shutdown(node);
177 			break;
178 		}
179 
180 		if (node->nport->p2p_winner) {
181 			efc_node_transition(node,
182 					    __efc_p2p_wait_domain_attach,
183 					     NULL);
184 			if (node->nport->domain->attached &&
185 			    !node->nport->domain->domain_notify_pend) {
186 				/*
187 				 * already attached,
188 				 * just send ATTACH_OK
189 				 */
190 				node_printf(node,
191 					    "p2p winner, domain already attached\n");
192 				efc_node_post_event(node,
193 						    EFC_EVT_DOMAIN_ATTACH_OK,
194 						    NULL);
195 			}
196 		} else {
197 			/*
198 			 * peer is p2p winner;
199 			 * PLOGI will be received on the
200 			 * remote SID=1 node;
201 			 * this node has served its purpose
202 			 */
203 			node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
204 			efc_fabric_initiate_shutdown(node);
205 		}
206 
207 		break;
208 	}
209 
210 	case EFC_EVT_ELS_REQ_ABORTED:
211 	case EFC_EVT_SRRS_ELS_REQ_RJT:
212 	case EFC_EVT_SRRS_ELS_REQ_FAIL: {
213 		struct efc_nport *nport = node->nport;
214 		/*
215 		 * with these errors, we have no recovery,
216 		 * so shutdown the nport, leave the link
217 		 * up and the domain ready
218 		 */
219 		if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
220 					   __efc_fabric_common, __func__)) {
221 			return;
222 		}
223 		node_printf(node,
224 			    "FLOGI failed evt=%s, shutting down nport [%s]\n",
225 			    efc_sm_event_name(evt), nport->display_name);
226 		WARN_ON(!node->els_req_cnt);
227 		node->els_req_cnt--;
228 		efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
229 		break;
230 	}
231 
232 	default:
233 		__efc_fabric_common(__func__, ctx, evt, arg);
234 	}
235 }
236 
237 void
238 __efc_vport_fabric_init(struct efc_sm_ctx *ctx,
239 			enum efc_sm_event evt, void *arg)
240 {
241 	struct efc_node *node = ctx->app;
242 
243 	efc_node_evt_set(ctx, evt, __func__);
244 
245 	node_sm_trace();
246 
247 	switch (evt) {
248 	case EFC_EVT_ENTER:
249 		/* sm: / send FDISC */
250 		efc_send_fdisc(node);
251 		efc_node_transition(node, __efc_fabric_fdisc_wait_rsp, NULL);
252 		break;
253 
254 	default:
255 		__efc_fabric_common(__func__, ctx, evt, arg);
256 	}
257 }
258 
259 void
260 __efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx,
261 			    enum efc_sm_event evt, void *arg)
262 {
263 	struct efc_node_cb *cbdata = arg;
264 	struct efc_node *node = ctx->app;
265 
266 	efc_node_evt_set(ctx, evt, __func__);
267 
268 	node_sm_trace();
269 
270 	switch (evt) {
271 	case EFC_EVT_SRRS_ELS_REQ_OK: {
272 		/* fc_id is in ext_status */
273 		if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
274 					   __efc_fabric_common, __func__)) {
275 			return;
276 		}
277 
278 		WARN_ON(!node->els_req_cnt);
279 		node->els_req_cnt--;
280 		/* sm: / efc_nport_attach */
281 		efc_nport_attach(node->nport, cbdata->ext_status);
282 		efc_node_transition(node, __efc_fabric_wait_domain_attach,
283 				    NULL);
284 		break;
285 	}
286 
287 	case EFC_EVT_SRRS_ELS_REQ_RJT:
288 	case EFC_EVT_SRRS_ELS_REQ_FAIL: {
289 		if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
290 					   __efc_fabric_common, __func__)) {
291 			return;
292 		}
293 		WARN_ON(!node->els_req_cnt);
294 		node->els_req_cnt--;
295 		efc_log_err(node->efc, "FDISC failed, shutting down nport\n");
296 		/* sm: / shutdown nport */
297 		efc_sm_post_event(&node->nport->sm, EFC_EVT_SHUTDOWN, NULL);
298 		break;
299 	}
300 
301 	default:
302 		__efc_fabric_common(__func__, ctx, evt, arg);
303 	}
304 }
305 
306 static int
307 efc_start_ns_node(struct efc_nport *nport)
308 {
309 	struct efc_node *ns;
310 
311 	/* Instantiate a name services node */
312 	ns = efc_node_find(nport, FC_FID_DIR_SERV);
313 	if (!ns) {
314 		ns = efc_node_alloc(nport, FC_FID_DIR_SERV, false, false);
315 		if (!ns)
316 			return -EIO;
317 	}
318 	/*
319 	 * for found ns, should we be transitioning from here?
320 	 * breaks transition only
321 	 *  1. from within state machine or
322 	 *  2. if after alloc
323 	 */
324 	if (ns->efc->nodedb_mask & EFC_NODEDB_PAUSE_NAMESERVER)
325 		efc_node_pause(ns, __efc_ns_init);
326 	else
327 		efc_node_transition(ns, __efc_ns_init, NULL);
328 	return 0;
329 }
330 
331 static int
332 efc_start_fabctl_node(struct efc_nport *nport)
333 {
334 	struct efc_node *fabctl;
335 
336 	fabctl = efc_node_find(nport, FC_FID_FCTRL);
337 	if (!fabctl) {
338 		fabctl = efc_node_alloc(nport, FC_FID_FCTRL,
339 					false, false);
340 		if (!fabctl)
341 			return -EIO;
342 	}
343 	/*
344 	 * for found ns, should we be transitioning from here?
345 	 * breaks transition only
346 	 *  1. from within state machine or
347 	 *  2. if after alloc
348 	 */
349 	efc_node_transition(fabctl, __efc_fabctl_init, NULL);
350 	return 0;
351 }
352 
353 void
354 __efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx,
355 				enum efc_sm_event evt, void *arg)
356 {
357 	struct efc_node *node = ctx->app;
358 
359 	efc_node_evt_set(ctx, evt, __func__);
360 
361 	node_sm_trace();
362 
363 	switch (evt) {
364 	case EFC_EVT_ENTER:
365 		efc_node_hold_frames(node);
366 		break;
367 
368 	case EFC_EVT_EXIT:
369 		efc_node_accept_frames(node);
370 		break;
371 	case EFC_EVT_DOMAIN_ATTACH_OK:
372 	case EFC_EVT_NPORT_ATTACH_OK: {
373 		int rc;
374 
375 		rc = efc_start_ns_node(node->nport);
376 		if (rc)
377 			return;
378 
379 		/* sm: if enable_ini / start fabctl node */
380 		/* Instantiate the fabric controller (sends SCR) */
381 		if (node->nport->enable_rscn) {
382 			rc = efc_start_fabctl_node(node->nport);
383 			if (rc)
384 				return;
385 		}
386 		efc_node_transition(node, __efc_fabric_idle, NULL);
387 		break;
388 	}
389 	default:
390 		__efc_fabric_common(__func__, ctx, evt, arg);
391 	}
392 }
393 
394 void
395 __efc_fabric_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
396 		  void *arg)
397 {
398 	struct efc_node *node = ctx->app;
399 
400 	efc_node_evt_set(ctx, evt, __func__);
401 
402 	node_sm_trace();
403 
404 	switch (evt) {
405 	case EFC_EVT_DOMAIN_ATTACH_OK:
406 		break;
407 	default:
408 		__efc_fabric_common(__func__, ctx, evt, arg);
409 	}
410 }
411 
412 void
413 __efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
414 {
415 	struct efc_node *node = ctx->app;
416 
417 	efc_node_evt_set(ctx, evt, __func__);
418 
419 	node_sm_trace();
420 
421 	switch (evt) {
422 	case EFC_EVT_ENTER:
423 		/* sm: / send PLOGI */
424 		efc_send_plogi(node);
425 		efc_node_transition(node, __efc_ns_plogi_wait_rsp, NULL);
426 		break;
427 	default:
428 		__efc_fabric_common(__func__, ctx, evt, arg);
429 	}
430 }
431 
432 void
433 __efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx,
434 			enum efc_sm_event evt, void *arg)
435 {
436 	struct efc_node_cb *cbdata = arg;
437 	struct efc_node *node = ctx->app;
438 
439 	efc_node_evt_set(ctx, evt, __func__);
440 
441 	node_sm_trace();
442 
443 	switch (evt) {
444 	case EFC_EVT_SRRS_ELS_REQ_OK: {
445 		int rc;
446 
447 		/* Save service parameters */
448 		if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
449 					   __efc_fabric_common, __func__)) {
450 			return;
451 		}
452 		WARN_ON(!node->els_req_cnt);
453 		node->els_req_cnt--;
454 		/* sm: / save sparams, efc_node_attach */
455 		efc_node_save_sparms(node, cbdata->els_rsp.virt);
456 		rc = efc_node_attach(node);
457 		efc_node_transition(node, __efc_ns_wait_node_attach, NULL);
458 		if (rc < 0)
459 			efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
460 					    NULL);
461 		break;
462 	}
463 	default:
464 		__efc_fabric_common(__func__, ctx, evt, arg);
465 	}
466 }
467 
468 void
469 __efc_ns_wait_node_attach(struct efc_sm_ctx *ctx,
470 			  enum efc_sm_event evt, void *arg)
471 {
472 	struct efc_node *node = ctx->app;
473 
474 	efc_node_evt_set(ctx, evt, __func__);
475 
476 	node_sm_trace();
477 
478 	switch (evt) {
479 	case EFC_EVT_ENTER:
480 		efc_node_hold_frames(node);
481 		break;
482 
483 	case EFC_EVT_EXIT:
484 		efc_node_accept_frames(node);
485 		break;
486 
487 	case EFC_EVT_NODE_ATTACH_OK:
488 		node->attached = true;
489 		/* sm: / send RFTID */
490 		efc_ns_send_rftid(node);
491 		efc_node_transition(node, __efc_ns_rftid_wait_rsp, NULL);
492 		break;
493 
494 	case EFC_EVT_NODE_ATTACH_FAIL:
495 		/* node attach failed, shutdown the node */
496 		node->attached = false;
497 		node_printf(node, "Node attach failed\n");
498 		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
499 		efc_fabric_initiate_shutdown(node);
500 		break;
501 
502 	case EFC_EVT_SHUTDOWN:
503 		node_printf(node, "Shutdown event received\n");
504 		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
505 		efc_node_transition(node,
506 				    __efc_fabric_wait_attach_evt_shutdown,
507 				     NULL);
508 		break;
509 
510 	/*
511 	 * if receive RSCN just ignore,
512 	 * we haven't sent GID_PT yet (ACC sent by fabctl node)
513 	 */
514 	case EFC_EVT_RSCN_RCVD:
515 		break;
516 
517 	default:
518 		__efc_fabric_common(__func__, ctx, evt, arg);
519 	}
520 }
521 
522 void
523 __efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
524 				      enum efc_sm_event evt, void *arg)
525 {
526 	struct efc_node *node = ctx->app;
527 
528 	efc_node_evt_set(ctx, evt, __func__);
529 
530 	node_sm_trace();
531 
532 	switch (evt) {
533 	case EFC_EVT_ENTER:
534 		efc_node_hold_frames(node);
535 		break;
536 
537 	case EFC_EVT_EXIT:
538 		efc_node_accept_frames(node);
539 		break;
540 
541 	/* wait for any of these attach events and then shutdown */
542 	case EFC_EVT_NODE_ATTACH_OK:
543 		node->attached = true;
544 		node_printf(node, "Attach evt=%s, proceed to shutdown\n",
545 			    efc_sm_event_name(evt));
546 		efc_fabric_initiate_shutdown(node);
547 		break;
548 
549 	case EFC_EVT_NODE_ATTACH_FAIL:
550 		node->attached = false;
551 		node_printf(node, "Attach evt=%s, proceed to shutdown\n",
552 			    efc_sm_event_name(evt));
553 		efc_fabric_initiate_shutdown(node);
554 		break;
555 
556 	/* ignore shutdown event as we're already in shutdown path */
557 	case EFC_EVT_SHUTDOWN:
558 		node_printf(node, "Shutdown event received\n");
559 		break;
560 
561 	default:
562 		__efc_fabric_common(__func__, ctx, evt, arg);
563 	}
564 }
565 
566 void
567 __efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx,
568 			enum efc_sm_event evt, void *arg)
569 {
570 	struct efc_node *node = ctx->app;
571 
572 	efc_node_evt_set(ctx, evt, __func__);
573 
574 	node_sm_trace();
575 
576 	switch (evt) {
577 	case EFC_EVT_SRRS_ELS_REQ_OK:
578 		if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFT_ID,
579 					  __efc_fabric_common, __func__)) {
580 			return;
581 		}
582 		WARN_ON(!node->els_req_cnt);
583 		node->els_req_cnt--;
584 		/* sm: / send RFFID */
585 		efc_ns_send_rffid(node);
586 		efc_node_transition(node, __efc_ns_rffid_wait_rsp, NULL);
587 		break;
588 
589 	/*
590 	 * if receive RSCN just ignore,
591 	 * we haven't sent GID_PT yet (ACC sent by fabctl node)
592 	 */
593 	case EFC_EVT_RSCN_RCVD:
594 		break;
595 
596 	default:
597 		__efc_fabric_common(__func__, ctx, evt, arg);
598 	}
599 }
600 
601 void
602 __efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx,
603 			enum efc_sm_event evt, void *arg)
604 {
605 	struct efc_node *node = ctx->app;
606 
607 	efc_node_evt_set(ctx, evt, __func__);
608 
609 	node_sm_trace();
610 
611 	/*
612 	 * Waits for an RFFID response event;
613 	 * if rscn enabled, a GIDPT name services request is issued.
614 	 */
615 	switch (evt) {
616 	case EFC_EVT_SRRS_ELS_REQ_OK:	{
617 		if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFF_ID,
618 					  __efc_fabric_common, __func__)) {
619 			return;
620 		}
621 		WARN_ON(!node->els_req_cnt);
622 		node->els_req_cnt--;
623 		if (node->nport->enable_rscn) {
624 			/* sm: if enable_rscn / send GIDPT */
625 			efc_ns_send_gidpt(node);
626 
627 			efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
628 					    NULL);
629 		} else {
630 			/* if 'T' only, we're done, go to idle */
631 			efc_node_transition(node, __efc_ns_idle, NULL);
632 		}
633 		break;
634 	}
635 	/*
636 	 * if receive RSCN just ignore,
637 	 * we haven't sent GID_PT yet (ACC sent by fabctl node)
638 	 */
639 	case EFC_EVT_RSCN_RCVD:
640 		break;
641 
642 	default:
643 		__efc_fabric_common(__func__, ctx, evt, arg);
644 	}
645 }
646 
647 static int
648 efc_process_gidpt_payload(struct efc_node *node,
649 			  void *data, u32 gidpt_len)
650 {
651 	u32 i, j;
652 	struct efc_node *newnode;
653 	struct efc_nport *nport = node->nport;
654 	struct efc *efc = node->efc;
655 	u32 port_id = 0, port_count, plist_count;
656 	struct efc_node *n;
657 	struct efc_node **active_nodes;
658 	int residual;
659 	struct {
660 		struct fc_ct_hdr hdr;
661 		struct fc_gid_pn_resp pn_rsp;
662 	} *rsp;
663 	struct fc_gid_pn_resp *gidpt;
664 	unsigned long index;
665 
666 	rsp = data;
667 	gidpt = &rsp->pn_rsp;
668 	residual = be16_to_cpu(rsp->hdr.ct_mr_size);
669 
670 	if (residual != 0)
671 		efc_log_debug(node->efc, "residual is %u words\n", residual);
672 
673 	if (be16_to_cpu(rsp->hdr.ct_cmd) == FC_FS_RJT) {
674 		node_printf(node,
675 			    "GIDPT request failed: rsn x%x rsn_expl x%x\n",
676 			    rsp->hdr.ct_reason, rsp->hdr.ct_explan);
677 		return -EIO;
678 	}
679 
680 	plist_count = (gidpt_len - sizeof(struct fc_ct_hdr)) / sizeof(*gidpt);
681 
682 	/* Count the number of nodes */
683 	port_count = 0;
684 	xa_for_each(&nport->lookup, index, n) {
685 		port_count++;
686 	}
687 
688 	/* Allocate a buffer for all nodes */
689 	active_nodes = kzalloc(port_count * sizeof(*active_nodes), GFP_ATOMIC);
690 	if (!active_nodes) {
691 		node_printf(node, "efc_malloc failed\n");
692 		return -EIO;
693 	}
694 
695 	/* Fill buffer with fc_id of active nodes */
696 	i = 0;
697 	xa_for_each(&nport->lookup, index, n) {
698 		port_id = n->rnode.fc_id;
699 		switch (port_id) {
700 		case FC_FID_FLOGI:
701 		case FC_FID_FCTRL:
702 		case FC_FID_DIR_SERV:
703 			break;
704 		default:
705 			if (port_id != FC_FID_DOM_MGR)
706 				active_nodes[i++] = n;
707 			break;
708 		}
709 	}
710 
711 	/* update the active nodes buffer */
712 	for (i = 0; i < plist_count; i++) {
713 		hton24(gidpt[i].fp_fid, port_id);
714 
715 		for (j = 0; j < port_count; j++) {
716 			if (active_nodes[j] &&
717 			    port_id == active_nodes[j]->rnode.fc_id) {
718 				active_nodes[j] = NULL;
719 			}
720 		}
721 
722 		if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
723 			break;
724 	}
725 
726 	/* Those remaining in the active_nodes[] are now gone ! */
727 	for (i = 0; i < port_count; i++) {
728 		/*
729 		 * if we're an initiator and the remote node
730 		 * is a target, then post the node missing event.
731 		 * if we're target and we have enabled
732 		 * target RSCN, then post the node missing event.
733 		 */
734 		if (!active_nodes[i])
735 			continue;
736 
737 		if ((node->nport->enable_ini && active_nodes[i]->targ) ||
738 		    (node->nport->enable_tgt && enable_target_rscn(efc))) {
739 			efc_node_post_event(active_nodes[i],
740 					    EFC_EVT_NODE_MISSING, NULL);
741 		} else {
742 			node_printf(node,
743 				    "GID_PT: skipping non-tgt port_id x%06x\n",
744 				    active_nodes[i]->rnode.fc_id);
745 		}
746 	}
747 	kfree(active_nodes);
748 
749 	for (i = 0; i < plist_count; i++) {
750 		hton24(gidpt[i].fp_fid, port_id);
751 
752 		/* Don't create node for ourselves */
753 		if (port_id == node->rnode.nport->fc_id) {
754 			if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
755 				break;
756 			continue;
757 		}
758 
759 		newnode = efc_node_find(nport, port_id);
760 		if (!newnode) {
761 			if (!node->nport->enable_ini)
762 				continue;
763 
764 			newnode = efc_node_alloc(nport, port_id, false, false);
765 			if (!newnode) {
766 				efc_log_err(efc, "efc_node_alloc() failed\n");
767 				return -EIO;
768 			}
769 			/*
770 			 * send PLOGI automatically
771 			 * if initiator
772 			 */
773 			efc_node_init_device(newnode, true);
774 		}
775 
776 		if (node->nport->enable_ini && newnode->targ) {
777 			efc_node_post_event(newnode, EFC_EVT_NODE_REFOUND,
778 					    NULL);
779 		}
780 
781 		if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
782 			break;
783 	}
784 	return 0;
785 }
786 
787 void
788 __efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx,
789 			enum efc_sm_event evt, void *arg)
790 {
791 	struct efc_node_cb *cbdata = arg;
792 	struct efc_node *node = ctx->app;
793 
794 	efc_node_evt_set(ctx, evt, __func__);
795 
796 	node_sm_trace();
797 	/*
798 	 * Wait for a GIDPT response from the name server. Process the FC_IDs
799 	 * that are reported by creating new remote ports, as needed.
800 	 */
801 
802 	switch (evt) {
803 	case EFC_EVT_SRRS_ELS_REQ_OK:	{
804 		if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_GID_PT,
805 					  __efc_fabric_common, __func__)) {
806 			return;
807 		}
808 		WARN_ON(!node->els_req_cnt);
809 		node->els_req_cnt--;
810 		/* sm: / process GIDPT payload */
811 		efc_process_gidpt_payload(node, cbdata->els_rsp.virt,
812 					  cbdata->els_rsp.len);
813 		efc_node_transition(node, __efc_ns_idle, NULL);
814 		break;
815 	}
816 
817 	case EFC_EVT_SRRS_ELS_REQ_FAIL:	{
818 		/* not much we can do; will retry with the next RSCN */
819 		node_printf(node, "GID_PT failed to complete\n");
820 		WARN_ON(!node->els_req_cnt);
821 		node->els_req_cnt--;
822 		efc_node_transition(node, __efc_ns_idle, NULL);
823 		break;
824 	}
825 
826 	/* if receive RSCN here, queue up another discovery processing */
827 	case EFC_EVT_RSCN_RCVD: {
828 		node_printf(node, "RSCN received during GID_PT processing\n");
829 		node->rscn_pending = true;
830 		break;
831 	}
832 
833 	default:
834 		__efc_fabric_common(__func__, ctx, evt, arg);
835 	}
836 }
837 
838 void
839 __efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
840 {
841 	struct efc_node *node = ctx->app;
842 	struct efc *efc = node->efc;
843 
844 	efc_node_evt_set(ctx, evt, __func__);
845 
846 	node_sm_trace();
847 
848 	/*
849 	 * Wait for RSCN received events (posted from the fabric controller)
850 	 * and restart the GIDPT name services query and processing.
851 	 */
852 
853 	switch (evt) {
854 	case EFC_EVT_ENTER:
855 		if (!node->rscn_pending)
856 			break;
857 
858 		node_printf(node, "RSCN pending, restart discovery\n");
859 		node->rscn_pending = false;
860 		fallthrough;
861 
862 	case EFC_EVT_RSCN_RCVD: {
863 		/* sm: / send GIDPT */
864 		/*
865 		 * If target RSCN processing is enabled,
866 		 * and this is target only (not initiator),
867 		 * and tgt_rscn_delay is non-zero,
868 		 * then we delay issuing the GID_PT
869 		 */
870 		if (efc->tgt_rscn_delay_msec != 0 &&
871 		    !node->nport->enable_ini && node->nport->enable_tgt &&
872 		    enable_target_rscn(efc)) {
873 			efc_node_transition(node, __efc_ns_gidpt_delay, NULL);
874 		} else {
875 			efc_ns_send_gidpt(node);
876 			efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
877 					    NULL);
878 		}
879 		break;
880 	}
881 
882 	default:
883 		__efc_fabric_common(__func__, ctx, evt, arg);
884 	}
885 }
886 
887 static void
888 gidpt_delay_timer_cb(struct timer_list *t)
889 {
890 	struct efc_node *node = from_timer(node, t, gidpt_delay_timer);
891 
892 	del_timer(&node->gidpt_delay_timer);
893 
894 	efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL);
895 }
896 
897 void
898 __efc_ns_gidpt_delay(struct efc_sm_ctx *ctx,
899 		     enum efc_sm_event evt, void *arg)
900 {
901 	struct efc_node *node = ctx->app;
902 	struct efc *efc = node->efc;
903 
904 	efc_node_evt_set(ctx, evt, __func__);
905 
906 	node_sm_trace();
907 
908 	switch (evt) {
909 	case EFC_EVT_ENTER: {
910 		u64 delay_msec, tmp;
911 
912 		/*
913 		 * Compute the delay time.
914 		 * Set to tgt_rscn_delay, if the time since last GIDPT
915 		 * is less than tgt_rscn_period, then use tgt_rscn_period.
916 		 */
917 		delay_msec = efc->tgt_rscn_delay_msec;
918 		tmp = jiffies_to_msecs(jiffies) - node->time_last_gidpt_msec;
919 		if (tmp < efc->tgt_rscn_period_msec)
920 			delay_msec = efc->tgt_rscn_period_msec;
921 
922 		timer_setup(&node->gidpt_delay_timer, &gidpt_delay_timer_cb,
923 			    0);
924 		mod_timer(&node->gidpt_delay_timer,
925 			  jiffies + msecs_to_jiffies(delay_msec));
926 
927 		break;
928 	}
929 
930 	case EFC_EVT_GIDPT_DELAY_EXPIRED:
931 		node->time_last_gidpt_msec = jiffies_to_msecs(jiffies);
932 
933 		efc_ns_send_gidpt(node);
934 		efc_node_transition(node, __efc_ns_gidpt_wait_rsp, NULL);
935 		break;
936 
937 	case EFC_EVT_RSCN_RCVD: {
938 		efc_log_debug(efc,
939 			      "RSCN received while in GIDPT delay - no action\n");
940 		break;
941 	}
942 
943 	default:
944 		__efc_fabric_common(__func__, ctx, evt, arg);
945 	}
946 }
947 
948 void
949 __efc_fabctl_init(struct efc_sm_ctx *ctx,
950 		  enum efc_sm_event evt, void *arg)
951 {
952 	struct efc_node *node = ctx->app;
953 
954 	node_sm_trace();
955 
956 	switch (evt) {
957 	case EFC_EVT_ENTER:
958 		/* no need to login to fabric controller, just send SCR */
959 		efc_send_scr(node);
960 		efc_node_transition(node, __efc_fabctl_wait_scr_rsp, NULL);
961 		break;
962 
963 	case EFC_EVT_NODE_ATTACH_OK:
964 		node->attached = true;
965 		break;
966 
967 	default:
968 		__efc_fabric_common(__func__, ctx, evt, arg);
969 	}
970 }
971 
972 void
973 __efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx,
974 			  enum efc_sm_event evt, void *arg)
975 {
976 	struct efc_node *node = ctx->app;
977 
978 	efc_node_evt_set(ctx, evt, __func__);
979 
980 	node_sm_trace();
981 
982 	/*
983 	 * Fabric controller node state machine:
984 	 * Wait for an SCR response from the fabric controller.
985 	 */
986 	switch (evt) {
987 	case EFC_EVT_SRRS_ELS_REQ_OK:
988 		if (efc_node_check_els_req(ctx, evt, arg, ELS_SCR,
989 					   __efc_fabric_common, __func__)) {
990 			return;
991 		}
992 		WARN_ON(!node->els_req_cnt);
993 		node->els_req_cnt--;
994 		efc_node_transition(node, __efc_fabctl_ready, NULL);
995 		break;
996 
997 	default:
998 		__efc_fabric_common(__func__, ctx, evt, arg);
999 	}
1000 }
1001 
1002 static void
1003 efc_process_rscn(struct efc_node *node, struct efc_node_cb *cbdata)
1004 {
1005 	struct efc *efc = node->efc;
1006 	struct efc_nport *nport = node->nport;
1007 	struct efc_node *ns;
1008 
1009 	/* Forward this event to the name-services node */
1010 	ns = efc_node_find(nport, FC_FID_DIR_SERV);
1011 	if (ns)
1012 		efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, cbdata);
1013 	else
1014 		efc_log_warn(efc, "can't find name server node\n");
1015 }
1016 
1017 void
1018 __efc_fabctl_ready(struct efc_sm_ctx *ctx,
1019 		   enum efc_sm_event evt, void *arg)
1020 {
1021 	struct efc_node_cb *cbdata = arg;
1022 	struct efc_node *node = ctx->app;
1023 
1024 	efc_node_evt_set(ctx, evt, __func__);
1025 
1026 	node_sm_trace();
1027 
1028 	/*
1029 	 * Fabric controller node state machine: Ready.
1030 	 * In this state, the fabric controller sends a RSCN, which is received
1031 	 * by this node and is forwarded to the name services node object; and
1032 	 * the RSCN LS_ACC is sent.
1033 	 */
1034 	switch (evt) {
1035 	case EFC_EVT_RSCN_RCVD: {
1036 		struct fc_frame_header *hdr = cbdata->header->dma.virt;
1037 
1038 		/*
1039 		 * sm: / process RSCN (forward to name services node),
1040 		 * send LS_ACC
1041 		 */
1042 		efc_process_rscn(node, cbdata);
1043 		efc_send_ls_acc(node, be16_to_cpu(hdr->fh_ox_id));
1044 		efc_node_transition(node, __efc_fabctl_wait_ls_acc_cmpl,
1045 				    NULL);
1046 		break;
1047 	}
1048 
1049 	default:
1050 		__efc_fabric_common(__func__, ctx, evt, arg);
1051 	}
1052 }
1053 
1054 void
1055 __efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx,
1056 			      enum efc_sm_event evt, void *arg)
1057 {
1058 	struct efc_node *node = ctx->app;
1059 
1060 	efc_node_evt_set(ctx, evt, __func__);
1061 
1062 	node_sm_trace();
1063 
1064 	switch (evt) {
1065 	case EFC_EVT_ENTER:
1066 		efc_node_hold_frames(node);
1067 		break;
1068 
1069 	case EFC_EVT_EXIT:
1070 		efc_node_accept_frames(node);
1071 		break;
1072 
1073 	case EFC_EVT_SRRS_ELS_CMPL_OK:
1074 		WARN_ON(!node->els_cmpl_cnt);
1075 		node->els_cmpl_cnt--;
1076 		efc_node_transition(node, __efc_fabctl_ready, NULL);
1077 		break;
1078 
1079 	default:
1080 		__efc_fabric_common(__func__, ctx, evt, arg);
1081 	}
1082 }
1083 
1084 static uint64_t
1085 efc_get_wwpn(struct fc_els_flogi *sp)
1086 {
1087 	return be64_to_cpu(sp->fl_wwnn);
1088 }
1089 
1090 static int
1091 efc_rnode_is_winner(struct efc_nport *nport)
1092 {
1093 	struct fc_els_flogi *remote_sp;
1094 	u64 remote_wwpn;
1095 	u64 local_wwpn = nport->wwpn;
1096 	u64 wwn_bump = 0;
1097 
1098 	remote_sp = (struct fc_els_flogi *)nport->domain->flogi_service_params;
1099 	remote_wwpn = efc_get_wwpn(remote_sp);
1100 
1101 	local_wwpn ^= wwn_bump;
1102 
1103 	efc_log_debug(nport->efc, "r: %llx\n",
1104 		      be64_to_cpu(remote_sp->fl_wwpn));
1105 	efc_log_debug(nport->efc, "l: %llx\n", local_wwpn);
1106 
1107 	if (remote_wwpn == local_wwpn) {
1108 		efc_log_warn(nport->efc,
1109 			     "WWPN of remote node [%08x %08x] matches local WWPN\n",
1110 			     (u32)(local_wwpn >> 32ll),
1111 			     (u32)local_wwpn);
1112 		return -1;
1113 	}
1114 
1115 	return (remote_wwpn > local_wwpn);
1116 }
1117 
1118 void
1119 __efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx,
1120 			     enum efc_sm_event evt, void *arg)
1121 {
1122 	struct efc_node *node = ctx->app;
1123 	struct efc *efc = node->efc;
1124 
1125 	efc_node_evt_set(ctx, evt, __func__);
1126 
1127 	node_sm_trace();
1128 
1129 	switch (evt) {
1130 	case EFC_EVT_ENTER:
1131 		efc_node_hold_frames(node);
1132 		break;
1133 
1134 	case EFC_EVT_EXIT:
1135 		efc_node_accept_frames(node);
1136 		break;
1137 
1138 	case EFC_EVT_DOMAIN_ATTACH_OK: {
1139 		struct efc_nport *nport = node->nport;
1140 		struct efc_node *rnode;
1141 
1142 		/*
1143 		 * this transient node (SID=0 (recv'd FLOGI)
1144 		 * or DID=fabric (sent FLOGI))
1145 		 * is the p2p winner, will use a separate node
1146 		 * to send PLOGI to peer
1147 		 */
1148 		WARN_ON(!node->nport->p2p_winner);
1149 
1150 		rnode = efc_node_find(nport, node->nport->p2p_remote_port_id);
1151 		if (rnode) {
1152 			/*
1153 			 * the "other" transient p2p node has
1154 			 * already kicked off the
1155 			 * new node from which PLOGI is sent
1156 			 */
1157 			node_printf(node,
1158 				    "Node with fc_id x%x already exists\n",
1159 				    rnode->rnode.fc_id);
1160 		} else {
1161 			/*
1162 			 * create new node (SID=1, DID=2)
1163 			 * from which to send PLOGI
1164 			 */
1165 			rnode = efc_node_alloc(nport,
1166 					       nport->p2p_remote_port_id,
1167 						false, false);
1168 			if (!rnode) {
1169 				efc_log_err(efc, "node alloc failed\n");
1170 				return;
1171 			}
1172 
1173 			efc_fabric_notify_topology(node);
1174 			/* sm: / allocate p2p remote node */
1175 			efc_node_transition(rnode, __efc_p2p_rnode_init,
1176 					    NULL);
1177 		}
1178 
1179 		/*
1180 		 * the transient node (SID=0 or DID=fabric)
1181 		 * has served its purpose
1182 		 */
1183 		if (node->rnode.fc_id == 0) {
1184 			/*
1185 			 * if this is the SID=0 node,
1186 			 * move to the init state in case peer
1187 			 * has restarted FLOGI discovery and FLOGI is pending
1188 			 */
1189 			/* don't send PLOGI on efc_d_init entry */
1190 			efc_node_init_device(node, false);
1191 		} else {
1192 			/*
1193 			 * if this is the DID=fabric node
1194 			 * (we initiated FLOGI), shut it down
1195 			 */
1196 			node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1197 			efc_fabric_initiate_shutdown(node);
1198 		}
1199 		break;
1200 	}
1201 
1202 	default:
1203 		__efc_fabric_common(__func__, ctx, evt, arg);
1204 	}
1205 }
1206 
1207 void
1208 __efc_p2p_rnode_init(struct efc_sm_ctx *ctx,
1209 		     enum efc_sm_event evt, void *arg)
1210 {
1211 	struct efc_node_cb *cbdata = arg;
1212 	struct efc_node *node = ctx->app;
1213 
1214 	efc_node_evt_set(ctx, evt, __func__);
1215 
1216 	node_sm_trace();
1217 
1218 	switch (evt) {
1219 	case EFC_EVT_ENTER:
1220 		/* sm: / send PLOGI */
1221 		efc_send_plogi(node);
1222 		efc_node_transition(node, __efc_p2p_wait_plogi_rsp, NULL);
1223 		break;
1224 
1225 	case EFC_EVT_ABTS_RCVD:
1226 		/* sm: send BA_ACC */
1227 		efc_send_bls_acc(node, cbdata->header->dma.virt);
1228 
1229 		break;
1230 
1231 	default:
1232 		__efc_fabric_common(__func__, ctx, evt, arg);
1233 	}
1234 }
1235 
1236 void
1237 __efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx,
1238 			      enum efc_sm_event evt, void *arg)
1239 {
1240 	struct efc_node_cb *cbdata = arg;
1241 	struct efc_node *node = ctx->app;
1242 
1243 	efc_node_evt_set(ctx, evt, __func__);
1244 
1245 	node_sm_trace();
1246 
1247 	switch (evt) {
1248 	case EFC_EVT_ENTER:
1249 		efc_node_hold_frames(node);
1250 		break;
1251 
1252 	case EFC_EVT_EXIT:
1253 		efc_node_accept_frames(node);
1254 		break;
1255 
1256 	case EFC_EVT_SRRS_ELS_CMPL_OK:
1257 		WARN_ON(!node->els_cmpl_cnt);
1258 		node->els_cmpl_cnt--;
1259 
1260 		/* sm: if p2p_winner / domain_attach */
1261 		if (node->nport->p2p_winner) {
1262 			efc_node_transition(node,
1263 					    __efc_p2p_wait_domain_attach,
1264 					NULL);
1265 			if (!node->nport->domain->attached) {
1266 				node_printf(node, "Domain not attached\n");
1267 				efc_domain_attach(node->nport->domain,
1268 						  node->nport->p2p_port_id);
1269 			} else {
1270 				node_printf(node, "Domain already attached\n");
1271 				efc_node_post_event(node,
1272 						    EFC_EVT_DOMAIN_ATTACH_OK,
1273 						    NULL);
1274 			}
1275 		} else {
1276 			/* this node has served its purpose;
1277 			 * we'll expect a PLOGI on a separate
1278 			 * node (remote SID=0x1); return this node
1279 			 * to init state in case peer
1280 			 * restarts discovery -- it may already
1281 			 * have (pending frames may exist).
1282 			 */
1283 			/* don't send PLOGI on efc_d_init entry */
1284 			efc_node_init_device(node, false);
1285 		}
1286 		break;
1287 
1288 	case EFC_EVT_SRRS_ELS_CMPL_FAIL:
1289 		/*
1290 		 * LS_ACC failed, possibly due to link down;
1291 		 * shutdown node and wait
1292 		 * for FLOGI discovery to restart
1293 		 */
1294 		node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
1295 		WARN_ON(!node->els_cmpl_cnt);
1296 		node->els_cmpl_cnt--;
1297 		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1298 		efc_fabric_initiate_shutdown(node);
1299 		break;
1300 
1301 	case EFC_EVT_ABTS_RCVD: {
1302 		/* sm: / send BA_ACC */
1303 		efc_send_bls_acc(node, cbdata->header->dma.virt);
1304 		break;
1305 	}
1306 
1307 	default:
1308 		__efc_fabric_common(__func__, ctx, evt, arg);
1309 	}
1310 }
1311 
1312 void
1313 __efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx,
1314 			 enum efc_sm_event evt, void *arg)
1315 {
1316 	struct efc_node_cb *cbdata = arg;
1317 	struct efc_node *node = ctx->app;
1318 
1319 	efc_node_evt_set(ctx, evt, __func__);
1320 
1321 	node_sm_trace();
1322 
1323 	switch (evt) {
1324 	case EFC_EVT_SRRS_ELS_REQ_OK: {
1325 		int rc;
1326 
1327 		if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1328 					   __efc_fabric_common, __func__)) {
1329 			return;
1330 		}
1331 		WARN_ON(!node->els_req_cnt);
1332 		node->els_req_cnt--;
1333 		/* sm: / save sparams, efc_node_attach */
1334 		efc_node_save_sparms(node, cbdata->els_rsp.virt);
1335 		rc = efc_node_attach(node);
1336 		efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
1337 		if (rc < 0)
1338 			efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
1339 					    NULL);
1340 		break;
1341 	}
1342 	case EFC_EVT_SRRS_ELS_REQ_FAIL: {
1343 		if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1344 					   __efc_fabric_common, __func__)) {
1345 			return;
1346 		}
1347 		node_printf(node, "PLOGI failed, shutting down\n");
1348 		WARN_ON(!node->els_req_cnt);
1349 		node->els_req_cnt--;
1350 		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1351 		efc_fabric_initiate_shutdown(node);
1352 		break;
1353 	}
1354 
1355 	case EFC_EVT_PLOGI_RCVD: {
1356 		struct fc_frame_header *hdr = cbdata->header->dma.virt;
1357 		/* if we're in external loopback mode, just send LS_ACC */
1358 		if (node->efc->external_loopback) {
1359 			efc_send_plogi_acc(node, be16_to_cpu(hdr->fh_ox_id));
1360 		} else {
1361 			/*
1362 			 * if this isn't external loopback,
1363 			 * pass to default handler
1364 			 */
1365 			__efc_fabric_common(__func__, ctx, evt, arg);
1366 		}
1367 		break;
1368 	}
1369 	case EFC_EVT_PRLI_RCVD:
1370 		/* I, or I+T */
1371 		/* sent PLOGI and before completion was seen, received the
1372 		 * PRLI from the remote node (WCQEs and RCQEs come in on
1373 		 * different queues and order of processing cannot be assumed)
1374 		 * Save OXID so PRLI can be sent after the attach and continue
1375 		 * to wait for PLOGI response
1376 		 */
1377 		efc_process_prli_payload(node, cbdata->payload->dma.virt);
1378 		efc_send_ls_acc_after_attach(node,
1379 					     cbdata->header->dma.virt,
1380 					     EFC_NODE_SEND_LS_ACC_PRLI);
1381 		efc_node_transition(node, __efc_p2p_wait_plogi_rsp_recvd_prli,
1382 				    NULL);
1383 		break;
1384 	default:
1385 		__efc_fabric_common(__func__, ctx, evt, arg);
1386 	}
1387 }
1388 
1389 void
1390 __efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
1391 				    enum efc_sm_event evt, void *arg)
1392 {
1393 	struct efc_node_cb *cbdata = arg;
1394 	struct efc_node *node = ctx->app;
1395 
1396 	efc_node_evt_set(ctx, evt, __func__);
1397 
1398 	node_sm_trace();
1399 
1400 	switch (evt) {
1401 	case EFC_EVT_ENTER:
1402 		/*
1403 		 * Since we've received a PRLI, we have a port login and will
1404 		 * just need to wait for the PLOGI response to do the node
1405 		 * attach and then we can send the LS_ACC for the PRLI. If,
1406 		 * during this time, we receive FCP_CMNDs (which is possible
1407 		 * since we've already sent a PRLI and our peer may have
1408 		 * accepted).
1409 		 * At this time, we are not waiting on any other unsolicited
1410 		 * frames to continue with the login process. Thus, it will not
1411 		 * hurt to hold frames here.
1412 		 */
1413 		efc_node_hold_frames(node);
1414 		break;
1415 
1416 	case EFC_EVT_EXIT:
1417 		efc_node_accept_frames(node);
1418 		break;
1419 
1420 	case EFC_EVT_SRRS_ELS_REQ_OK: {	/* PLOGI response received */
1421 		int rc;
1422 
1423 		/* Completion from PLOGI sent */
1424 		if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1425 					   __efc_fabric_common, __func__)) {
1426 			return;
1427 		}
1428 		WARN_ON(!node->els_req_cnt);
1429 		node->els_req_cnt--;
1430 		/* sm: / save sparams, efc_node_attach */
1431 		efc_node_save_sparms(node, cbdata->els_rsp.virt);
1432 		rc = efc_node_attach(node);
1433 		efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
1434 		if (rc < 0)
1435 			efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
1436 					    NULL);
1437 		break;
1438 	}
1439 	case EFC_EVT_SRRS_ELS_REQ_FAIL:	/* PLOGI response received */
1440 	case EFC_EVT_SRRS_ELS_REQ_RJT:
1441 		/* PLOGI failed, shutdown the node */
1442 		if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1443 					   __efc_fabric_common, __func__)) {
1444 			return;
1445 		}
1446 		WARN_ON(!node->els_req_cnt);
1447 		node->els_req_cnt--;
1448 		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1449 		efc_fabric_initiate_shutdown(node);
1450 		break;
1451 
1452 	default:
1453 		__efc_fabric_common(__func__, ctx, evt, arg);
1454 	}
1455 }
1456 
1457 void
1458 __efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx,
1459 			   enum efc_sm_event evt, void *arg)
1460 {
1461 	struct efc_node_cb *cbdata = arg;
1462 	struct efc_node *node = ctx->app;
1463 
1464 	efc_node_evt_set(ctx, evt, __func__);
1465 
1466 	node_sm_trace();
1467 
1468 	switch (evt) {
1469 	case EFC_EVT_ENTER:
1470 		efc_node_hold_frames(node);
1471 		break;
1472 
1473 	case EFC_EVT_EXIT:
1474 		efc_node_accept_frames(node);
1475 		break;
1476 
1477 	case EFC_EVT_NODE_ATTACH_OK:
1478 		node->attached = true;
1479 		switch (node->send_ls_acc) {
1480 		case EFC_NODE_SEND_LS_ACC_PRLI: {
1481 			efc_d_send_prli_rsp(node->ls_acc_io,
1482 					    node->ls_acc_oxid);
1483 			node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
1484 			node->ls_acc_io = NULL;
1485 			break;
1486 		}
1487 		case EFC_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */
1488 		case EFC_NODE_SEND_LS_ACC_NONE:
1489 		default:
1490 			/* Normal case for I */
1491 			/* sm: send_plogi_acc is not set / send PLOGI acc */
1492 			efc_node_transition(node, __efc_d_port_logged_in,
1493 					    NULL);
1494 			break;
1495 		}
1496 		break;
1497 
1498 	case EFC_EVT_NODE_ATTACH_FAIL:
1499 		/* node attach failed, shutdown the node */
1500 		node->attached = false;
1501 		node_printf(node, "Node attach failed\n");
1502 		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1503 		efc_fabric_initiate_shutdown(node);
1504 		break;
1505 
1506 	case EFC_EVT_SHUTDOWN:
1507 		node_printf(node, "%s received\n", efc_sm_event_name(evt));
1508 		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1509 		efc_node_transition(node,
1510 				    __efc_fabric_wait_attach_evt_shutdown,
1511 				     NULL);
1512 		break;
1513 	case EFC_EVT_PRLI_RCVD:
1514 		node_printf(node, "%s: PRLI received before node is attached\n",
1515 			    efc_sm_event_name(evt));
1516 		efc_process_prli_payload(node, cbdata->payload->dma.virt);
1517 		efc_send_ls_acc_after_attach(node,
1518 					     cbdata->header->dma.virt,
1519 				EFC_NODE_SEND_LS_ACC_PRLI);
1520 		break;
1521 
1522 	default:
1523 		__efc_fabric_common(__func__, ctx, evt, arg);
1524 	}
1525 }
1526 
1527 int
1528 efc_p2p_setup(struct efc_nport *nport)
1529 {
1530 	struct efc *efc = nport->efc;
1531 	int rnode_winner;
1532 
1533 	rnode_winner = efc_rnode_is_winner(nport);
1534 
1535 	/* set nport flags to indicate p2p "winner" */
1536 	if (rnode_winner == 1) {
1537 		nport->p2p_remote_port_id = 0;
1538 		nport->p2p_port_id = 0;
1539 		nport->p2p_winner = false;
1540 	} else if (rnode_winner == 0) {
1541 		nport->p2p_remote_port_id = 2;
1542 		nport->p2p_port_id = 1;
1543 		nport->p2p_winner = true;
1544 	} else {
1545 		/* no winner; only okay if external loopback enabled */
1546 		if (nport->efc->external_loopback) {
1547 			/*
1548 			 * External loopback mode enabled;
1549 			 * local nport and remote node
1550 			 * will be registered with an NPortID = 1;
1551 			 */
1552 			efc_log_debug(efc,
1553 				      "External loopback mode enabled\n");
1554 			nport->p2p_remote_port_id = 1;
1555 			nport->p2p_port_id = 1;
1556 			nport->p2p_winner = true;
1557 		} else {
1558 			efc_log_warn(efc,
1559 				     "failed to determine p2p winner\n");
1560 			return rnode_winner;
1561 		}
1562 	}
1563 	return 0;
1564 }
1565