xref: /openbmc/linux/drivers/scsi/libfc/fc_rport.c (revision b4a9c7ed)
1 /*
2  * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19 
20 /*
21  * RPORT GENERAL INFO
22  *
23  * This file contains all processing regarding fc_rports. It contains the
24  * rport state machine and does all rport interaction with the transport class.
25  * There should be no other places in libfc that interact directly with the
26  * transport class in regards to adding and deleting rports.
27  *
28  * fc_rport's represent N_Port's within the fabric.
29  */
30 
31 /*
32  * RPORT LOCKING
33  *
34  * The rport should never hold the rport mutex and then attempt to acquire
35  * either the lport or disc mutexes. The rport's mutex is considered lesser
36  * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37  * more comments on the heirarchy.
38  *
39  * The locking strategy is similar to the lport's strategy. The lock protects
40  * the rport's states and is held and released by the entry points to the rport
41  * block. All _enter_* functions correspond to rport states and expect the rport
42  * mutex to be locked before calling them. This means that rports only handle
43  * one request or response at a time, since they're not critical for the I/O
44  * path this potential over-use of the mutex is acceptable.
45  */
46 
47 #include <linux/kernel.h>
48 #include <linux/spinlock.h>
49 #include <linux/interrupt.h>
50 #include <linux/rcupdate.h>
51 #include <linux/timer.h>
52 #include <linux/workqueue.h>
53 #include <asm/unaligned.h>
54 
55 #include <scsi/libfc.h>
56 #include <scsi/fc_encode.h>
57 
58 struct workqueue_struct *rport_event_queue;
59 
60 static void fc_rport_enter_plogi(struct fc_rport_priv *);
61 static void fc_rport_enter_prli(struct fc_rport_priv *);
62 static void fc_rport_enter_rtv(struct fc_rport_priv *);
63 static void fc_rport_enter_ready(struct fc_rport_priv *);
64 static void fc_rport_enter_logo(struct fc_rport_priv *);
65 static void fc_rport_enter_adisc(struct fc_rport_priv *);
66 
67 static void fc_rport_recv_plogi_req(struct fc_lport *,
68 				    struct fc_seq *, struct fc_frame *);
69 static void fc_rport_recv_prli_req(struct fc_rport_priv *,
70 				   struct fc_seq *, struct fc_frame *);
71 static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
72 				   struct fc_seq *, struct fc_frame *);
73 static void fc_rport_recv_logo_req(struct fc_lport *,
74 				   struct fc_seq *, struct fc_frame *);
75 static void fc_rport_timeout(struct work_struct *);
76 static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
77 static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
78 static void fc_rport_work(struct work_struct *);
79 
80 static const char *fc_rport_state_names[] = {
81 	[RPORT_ST_INIT] = "Init",
82 	[RPORT_ST_PLOGI] = "PLOGI",
83 	[RPORT_ST_PRLI] = "PRLI",
84 	[RPORT_ST_RTV] = "RTV",
85 	[RPORT_ST_READY] = "Ready",
86 	[RPORT_ST_LOGO] = "LOGO",
87 	[RPORT_ST_ADISC] = "ADISC",
88 	[RPORT_ST_DELETE] = "Delete",
89 	[RPORT_ST_RESTART] = "Restart",
90 };
91 
92 /**
93  * fc_rport_lookup() - lookup a remote port by port_id
94  * @lport: Fibre Channel host port instance
95  * @port_id: remote port port_id to match
96  */
97 static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
98 					     u32 port_id)
99 {
100 	struct fc_rport_priv *rdata;
101 
102 	list_for_each_entry(rdata, &lport->disc.rports, peers)
103 		if (rdata->ids.port_id == port_id)
104 			return rdata;
105 	return NULL;
106 }
107 
108 /**
109  * fc_rport_create() - Create a new remote port
110  * @lport:   The local port that the new remote port is for
111  * @port_id: The port ID for the new remote port
112  *
113  * Locking note:  must be called with the disc_mutex held.
114  */
115 static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
116 					     u32 port_id)
117 {
118 	struct fc_rport_priv *rdata;
119 
120 	rdata = lport->tt.rport_lookup(lport, port_id);
121 	if (rdata)
122 		return rdata;
123 
124 	rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
125 	if (!rdata)
126 		return NULL;
127 
128 	rdata->ids.node_name = -1;
129 	rdata->ids.port_name = -1;
130 	rdata->ids.port_id = port_id;
131 	rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
132 
133 	kref_init(&rdata->kref);
134 	mutex_init(&rdata->rp_mutex);
135 	rdata->local_port = lport;
136 	rdata->rp_state = RPORT_ST_INIT;
137 	rdata->event = RPORT_EV_NONE;
138 	rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
139 	rdata->e_d_tov = lport->e_d_tov;
140 	rdata->r_a_tov = lport->r_a_tov;
141 	rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
142 	INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
143 	INIT_WORK(&rdata->event_work, fc_rport_work);
144 	if (port_id != FC_FID_DIR_SERV)
145 		list_add(&rdata->peers, &lport->disc.rports);
146 	return rdata;
147 }
148 
149 /**
150  * fc_rport_destroy() - free a remote port after last reference is released.
151  * @kref: pointer to kref inside struct fc_rport_priv
152  */
153 static void fc_rport_destroy(struct kref *kref)
154 {
155 	struct fc_rport_priv *rdata;
156 
157 	rdata = container_of(kref, struct fc_rport_priv, kref);
158 	kfree(rdata);
159 }
160 
161 /**
162  * fc_rport_state() - return a string for the state the rport is in
163  * @rdata: remote port private data
164  */
165 static const char *fc_rport_state(struct fc_rport_priv *rdata)
166 {
167 	const char *cp;
168 
169 	cp = fc_rport_state_names[rdata->rp_state];
170 	if (!cp)
171 		cp = "Unknown";
172 	return cp;
173 }
174 
175 /**
176  * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
177  * @rport: Pointer to Fibre Channel remote port structure
178  * @timeout: timeout in seconds
179  */
180 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
181 {
182 	if (timeout)
183 		rport->dev_loss_tmo = timeout + 5;
184 	else
185 		rport->dev_loss_tmo = 30;
186 }
187 EXPORT_SYMBOL(fc_set_rport_loss_tmo);
188 
189 /**
190  * fc_plogi_get_maxframe() - Get max payload from the common service parameters
191  * @flp: FLOGI payload structure
192  * @maxval: upper limit, may be less than what is in the service parameters
193  */
194 static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
195 					  unsigned int maxval)
196 {
197 	unsigned int mfs;
198 
199 	/*
200 	 * Get max payload from the common service parameters and the
201 	 * class 3 receive data field size.
202 	 */
203 	mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
204 	if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
205 		maxval = mfs;
206 	mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
207 	if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
208 		maxval = mfs;
209 	return maxval;
210 }
211 
212 /**
213  * fc_rport_state_enter() - Change the rport's state
214  * @rdata: The rport whose state should change
215  * @new: The new state of the rport
216  *
217  * Locking Note: Called with the rport lock held
218  */
219 static void fc_rport_state_enter(struct fc_rport_priv *rdata,
220 				 enum fc_rport_state new)
221 {
222 	if (rdata->rp_state != new)
223 		rdata->retries = 0;
224 	rdata->rp_state = new;
225 }
226 
227 static void fc_rport_work(struct work_struct *work)
228 {
229 	u32 port_id;
230 	struct fc_rport_priv *rdata =
231 		container_of(work, struct fc_rport_priv, event_work);
232 	struct fc_rport_libfc_priv *rp;
233 	enum fc_rport_event event;
234 	struct fc_lport *lport = rdata->local_port;
235 	struct fc_rport_operations *rport_ops;
236 	struct fc_rport_identifiers ids;
237 	struct fc_rport *rport;
238 	int restart = 0;
239 
240 	mutex_lock(&rdata->rp_mutex);
241 	event = rdata->event;
242 	rport_ops = rdata->ops;
243 	rport = rdata->rport;
244 
245 	FC_RPORT_DBG(rdata, "work event %u\n", event);
246 
247 	switch (event) {
248 	case RPORT_EV_READY:
249 		ids = rdata->ids;
250 		rdata->event = RPORT_EV_NONE;
251 		kref_get(&rdata->kref);
252 		mutex_unlock(&rdata->rp_mutex);
253 
254 		if (!rport)
255 			rport = fc_remote_port_add(lport->host, 0, &ids);
256 		if (!rport) {
257 			FC_RPORT_DBG(rdata, "Failed to add the rport\n");
258 			lport->tt.rport_logoff(rdata);
259 			kref_put(&rdata->kref, lport->tt.rport_destroy);
260 			return;
261 		}
262 		mutex_lock(&rdata->rp_mutex);
263 		if (rdata->rport)
264 			FC_RPORT_DBG(rdata, "rport already allocated\n");
265 		rdata->rport = rport;
266 		rport->maxframe_size = rdata->maxframe_size;
267 		rport->supported_classes = rdata->supported_classes;
268 
269 		rp = rport->dd_data;
270 		rp->local_port = lport;
271 		rp->rp_state = rdata->rp_state;
272 		rp->flags = rdata->flags;
273 		rp->e_d_tov = rdata->e_d_tov;
274 		rp->r_a_tov = rdata->r_a_tov;
275 		mutex_unlock(&rdata->rp_mutex);
276 
277 		if (rport_ops && rport_ops->event_callback) {
278 			FC_RPORT_DBG(rdata, "callback ev %d\n", event);
279 			rport_ops->event_callback(lport, rdata, event);
280 		}
281 		kref_put(&rdata->kref, lport->tt.rport_destroy);
282 		break;
283 
284 	case RPORT_EV_FAILED:
285 	case RPORT_EV_LOGO:
286 	case RPORT_EV_STOP:
287 		port_id = rdata->ids.port_id;
288 		mutex_unlock(&rdata->rp_mutex);
289 
290 		if (port_id != FC_FID_DIR_SERV) {
291 			/*
292 			 * We must drop rp_mutex before taking disc_mutex.
293 			 * Re-evaluate state to allow for restart.
294 			 * A transition to RESTART state must only happen
295 			 * while disc_mutex is held and rdata is on the list.
296 			 */
297 			mutex_lock(&lport->disc.disc_mutex);
298 			mutex_lock(&rdata->rp_mutex);
299 			if (rdata->rp_state == RPORT_ST_RESTART)
300 				restart = 1;
301 			else
302 				list_del(&rdata->peers);
303 			mutex_unlock(&rdata->rp_mutex);
304 			mutex_unlock(&lport->disc.disc_mutex);
305 		}
306 
307 		if (rport_ops && rport_ops->event_callback) {
308 			FC_RPORT_DBG(rdata, "callback ev %d\n", event);
309 			rport_ops->event_callback(lport, rdata, event);
310 		}
311 		cancel_delayed_work_sync(&rdata->retry_work);
312 
313 		/*
314 		 * Reset any outstanding exchanges before freeing rport.
315 		 */
316 		lport->tt.exch_mgr_reset(lport, 0, port_id);
317 		lport->tt.exch_mgr_reset(lport, port_id, 0);
318 
319 		if (rport) {
320 			rp = rport->dd_data;
321 			rp->rp_state = RPORT_ST_DELETE;
322 			mutex_lock(&rdata->rp_mutex);
323 			rdata->rport = NULL;
324 			mutex_unlock(&rdata->rp_mutex);
325 			fc_remote_port_delete(rport);
326 		}
327 		if (restart) {
328 			mutex_lock(&rdata->rp_mutex);
329 			FC_RPORT_DBG(rdata, "work restart\n");
330 			fc_rport_enter_plogi(rdata);
331 			mutex_unlock(&rdata->rp_mutex);
332 		} else
333 			kref_put(&rdata->kref, lport->tt.rport_destroy);
334 		break;
335 
336 	default:
337 		mutex_unlock(&rdata->rp_mutex);
338 		break;
339 	}
340 }
341 
342 /**
343  * fc_rport_login() - Start the remote port login state machine
344  * @rdata: private remote port
345  *
346  * Locking Note: Called without the rport lock held. This
347  * function will hold the rport lock, call an _enter_*
348  * function and then unlock the rport.
349  *
350  * This indicates the intent to be logged into the remote port.
351  * If it appears we are already logged in, ADISC is used to verify
352  * the setup.
353  */
354 int fc_rport_login(struct fc_rport_priv *rdata)
355 {
356 	mutex_lock(&rdata->rp_mutex);
357 
358 	switch (rdata->rp_state) {
359 	case RPORT_ST_READY:
360 		FC_RPORT_DBG(rdata, "ADISC port\n");
361 		fc_rport_enter_adisc(rdata);
362 		break;
363 	case RPORT_ST_RESTART:
364 		break;
365 	case RPORT_ST_DELETE:
366 		FC_RPORT_DBG(rdata, "Restart deleted port\n");
367 		fc_rport_state_enter(rdata, RPORT_ST_RESTART);
368 		break;
369 	default:
370 		FC_RPORT_DBG(rdata, "Login to port\n");
371 		fc_rport_enter_plogi(rdata);
372 		break;
373 	}
374 	mutex_unlock(&rdata->rp_mutex);
375 
376 	return 0;
377 }
378 
379 /**
380  * fc_rport_enter_delete() - schedule a remote port to be deleted.
381  * @rdata: private remote port
382  * @event: event to report as the reason for deletion
383  *
384  * Locking Note: Called with the rport lock held.
385  *
386  * Allow state change into DELETE only once.
387  *
388  * Call queue_work only if there's no event already pending.
389  * Set the new event so that the old pending event will not occur.
390  * Since we have the mutex, even if fc_rport_work() is already started,
391  * it'll see the new event.
392  */
393 static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
394 				  enum fc_rport_event event)
395 {
396 	if (rdata->rp_state == RPORT_ST_DELETE)
397 		return;
398 
399 	FC_RPORT_DBG(rdata, "Delete port\n");
400 
401 	fc_rport_state_enter(rdata, RPORT_ST_DELETE);
402 
403 	if (rdata->event == RPORT_EV_NONE)
404 		queue_work(rport_event_queue, &rdata->event_work);
405 	rdata->event = event;
406 }
407 
408 /**
409  * fc_rport_logoff() - Logoff and remove an rport
410  * @rdata: private remote port
411  *
412  * Locking Note: Called without the rport lock held. This
413  * function will hold the rport lock, call an _enter_*
414  * function and then unlock the rport.
415  */
416 int fc_rport_logoff(struct fc_rport_priv *rdata)
417 {
418 	mutex_lock(&rdata->rp_mutex);
419 
420 	FC_RPORT_DBG(rdata, "Remove port\n");
421 
422 	if (rdata->rp_state == RPORT_ST_DELETE) {
423 		FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
424 		goto out;
425 	}
426 
427 	if (rdata->rp_state == RPORT_ST_RESTART)
428 		FC_RPORT_DBG(rdata, "Port in Restart state, deleting\n");
429 	else
430 		fc_rport_enter_logo(rdata);
431 
432 	/*
433 	 * Change the state to Delete so that we discard
434 	 * the response.
435 	 */
436 	fc_rport_enter_delete(rdata, RPORT_EV_STOP);
437 out:
438 	mutex_unlock(&rdata->rp_mutex);
439 	return 0;
440 }
441 
442 /**
443  * fc_rport_enter_ready() - The rport is ready
444  * @rdata: private remote port
445  *
446  * Locking Note: The rport lock is expected to be held before calling
447  * this routine.
448  */
449 static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
450 {
451 	fc_rport_state_enter(rdata, RPORT_ST_READY);
452 
453 	FC_RPORT_DBG(rdata, "Port is Ready\n");
454 
455 	if (rdata->event == RPORT_EV_NONE)
456 		queue_work(rport_event_queue, &rdata->event_work);
457 	rdata->event = RPORT_EV_READY;
458 }
459 
460 /**
461  * fc_rport_timeout() - Handler for the retry_work timer.
462  * @work: The work struct of the fc_rport_priv
463  *
464  * Locking Note: Called without the rport lock held. This
465  * function will hold the rport lock, call an _enter_*
466  * function and then unlock the rport.
467  */
468 static void fc_rport_timeout(struct work_struct *work)
469 {
470 	struct fc_rport_priv *rdata =
471 		container_of(work, struct fc_rport_priv, retry_work.work);
472 
473 	mutex_lock(&rdata->rp_mutex);
474 
475 	switch (rdata->rp_state) {
476 	case RPORT_ST_PLOGI:
477 		fc_rport_enter_plogi(rdata);
478 		break;
479 	case RPORT_ST_PRLI:
480 		fc_rport_enter_prli(rdata);
481 		break;
482 	case RPORT_ST_RTV:
483 		fc_rport_enter_rtv(rdata);
484 		break;
485 	case RPORT_ST_LOGO:
486 		fc_rport_enter_logo(rdata);
487 		break;
488 	case RPORT_ST_ADISC:
489 		fc_rport_enter_adisc(rdata);
490 		break;
491 	case RPORT_ST_READY:
492 	case RPORT_ST_INIT:
493 	case RPORT_ST_DELETE:
494 	case RPORT_ST_RESTART:
495 		break;
496 	}
497 
498 	mutex_unlock(&rdata->rp_mutex);
499 }
500 
501 /**
502  * fc_rport_error() - Error handler, called once retries have been exhausted
503  * @rdata: private remote port
504  * @fp: The frame pointer
505  *
506  * Locking Note: The rport lock is expected to be held before
507  * calling this routine
508  */
509 static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
510 {
511 	FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
512 		     IS_ERR(fp) ? -PTR_ERR(fp) : 0,
513 		     fc_rport_state(rdata), rdata->retries);
514 
515 	switch (rdata->rp_state) {
516 	case RPORT_ST_PLOGI:
517 	case RPORT_ST_LOGO:
518 		fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
519 		break;
520 	case RPORT_ST_RTV:
521 		fc_rport_enter_ready(rdata);
522 		break;
523 	case RPORT_ST_PRLI:
524 	case RPORT_ST_ADISC:
525 		fc_rport_enter_logo(rdata);
526 		break;
527 	case RPORT_ST_DELETE:
528 	case RPORT_ST_RESTART:
529 	case RPORT_ST_READY:
530 	case RPORT_ST_INIT:
531 		break;
532 	}
533 }
534 
535 /**
536  * fc_rport_error_retry() - Error handler when retries are desired
537  * @rdata: private remote port data
538  * @fp: The frame pointer
539  *
540  * If the error was an exchange timeout retry immediately,
541  * otherwise wait for E_D_TOV.
542  *
543  * Locking Note: The rport lock is expected to be held before
544  * calling this routine
545  */
546 static void fc_rport_error_retry(struct fc_rport_priv *rdata,
547 				 struct fc_frame *fp)
548 {
549 	unsigned long delay = FC_DEF_E_D_TOV;
550 
551 	/* make sure this isn't an FC_EX_CLOSED error, never retry those */
552 	if (PTR_ERR(fp) == -FC_EX_CLOSED)
553 		return fc_rport_error(rdata, fp);
554 
555 	if (rdata->retries < rdata->local_port->max_rport_retry_count) {
556 		FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
557 			     PTR_ERR(fp), fc_rport_state(rdata));
558 		rdata->retries++;
559 		/* no additional delay on exchange timeouts */
560 		if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
561 			delay = 0;
562 		schedule_delayed_work(&rdata->retry_work, delay);
563 		return;
564 	}
565 
566 	return fc_rport_error(rdata, fp);
567 }
568 
569 /**
570  * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
571  * @sp: current sequence in the PLOGI exchange
572  * @fp: response frame
573  * @rdata_arg: private remote port data
574  *
575  * Locking Note: This function will be called without the rport lock
576  * held, but it will lock, call an _enter_* function or fc_rport_error
577  * and then unlock the rport.
578  */
579 static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
580 				void *rdata_arg)
581 {
582 	struct fc_rport_priv *rdata = rdata_arg;
583 	struct fc_lport *lport = rdata->local_port;
584 	struct fc_els_flogi *plp = NULL;
585 	unsigned int tov;
586 	u16 csp_seq;
587 	u16 cssp_seq;
588 	u8 op;
589 
590 	mutex_lock(&rdata->rp_mutex);
591 
592 	FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
593 
594 	if (rdata->rp_state != RPORT_ST_PLOGI) {
595 		FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
596 			     "%s\n", fc_rport_state(rdata));
597 		if (IS_ERR(fp))
598 			goto err;
599 		goto out;
600 	}
601 
602 	if (IS_ERR(fp)) {
603 		fc_rport_error_retry(rdata, fp);
604 		goto err;
605 	}
606 
607 	op = fc_frame_payload_op(fp);
608 	if (op == ELS_LS_ACC &&
609 	    (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
610 		rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
611 		rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
612 
613 		tov = ntohl(plp->fl_csp.sp_e_d_tov);
614 		if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
615 			tov /= 1000;
616 		if (tov > rdata->e_d_tov)
617 			rdata->e_d_tov = tov;
618 		csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
619 		cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
620 		if (cssp_seq < csp_seq)
621 			csp_seq = cssp_seq;
622 		rdata->max_seq = csp_seq;
623 		rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
624 		fc_rport_enter_prli(rdata);
625 	} else
626 		fc_rport_error_retry(rdata, fp);
627 
628 out:
629 	fc_frame_free(fp);
630 err:
631 	mutex_unlock(&rdata->rp_mutex);
632 	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
633 }
634 
635 /**
636  * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
637  * @rdata: private remote port data
638  *
639  * Locking Note: The rport lock is expected to be held before calling
640  * this routine.
641  */
642 static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
643 {
644 	struct fc_lport *lport = rdata->local_port;
645 	struct fc_frame *fp;
646 
647 	FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
648 		     fc_rport_state(rdata));
649 
650 	fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
651 
652 	rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
653 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
654 	if (!fp) {
655 		fc_rport_error_retry(rdata, fp);
656 		return;
657 	}
658 	rdata->e_d_tov = lport->e_d_tov;
659 
660 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
661 				  fc_rport_plogi_resp, rdata, lport->e_d_tov))
662 		fc_rport_error_retry(rdata, NULL);
663 	else
664 		kref_get(&rdata->kref);
665 }
666 
667 /**
668  * fc_rport_prli_resp() - Process Login (PRLI) response handler
669  * @sp: current sequence in the PRLI exchange
670  * @fp: response frame
671  * @rdata_arg: private remote port data
672  *
673  * Locking Note: This function will be called without the rport lock
674  * held, but it will lock, call an _enter_* function or fc_rport_error
675  * and then unlock the rport.
676  */
677 static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
678 			       void *rdata_arg)
679 {
680 	struct fc_rport_priv *rdata = rdata_arg;
681 	struct {
682 		struct fc_els_prli prli;
683 		struct fc_els_spp spp;
684 	} *pp;
685 	u32 roles = FC_RPORT_ROLE_UNKNOWN;
686 	u32 fcp_parm = 0;
687 	u8 op;
688 
689 	mutex_lock(&rdata->rp_mutex);
690 
691 	FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
692 
693 	if (rdata->rp_state != RPORT_ST_PRLI) {
694 		FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
695 			     "%s\n", fc_rport_state(rdata));
696 		if (IS_ERR(fp))
697 			goto err;
698 		goto out;
699 	}
700 
701 	if (IS_ERR(fp)) {
702 		fc_rport_error_retry(rdata, fp);
703 		goto err;
704 	}
705 
706 	/* reinitialize remote port roles */
707 	rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
708 
709 	op = fc_frame_payload_op(fp);
710 	if (op == ELS_LS_ACC) {
711 		pp = fc_frame_payload_get(fp, sizeof(*pp));
712 		if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
713 			fcp_parm = ntohl(pp->spp.spp_params);
714 			if (fcp_parm & FCP_SPPF_RETRY)
715 				rdata->flags |= FC_RP_FLAGS_RETRY;
716 		}
717 
718 		rdata->supported_classes = FC_COS_CLASS3;
719 		if (fcp_parm & FCP_SPPF_INIT_FCN)
720 			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
721 		if (fcp_parm & FCP_SPPF_TARG_FCN)
722 			roles |= FC_RPORT_ROLE_FCP_TARGET;
723 
724 		rdata->ids.roles = roles;
725 		fc_rport_enter_rtv(rdata);
726 
727 	} else {
728 		FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
729 		fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
730 	}
731 
732 out:
733 	fc_frame_free(fp);
734 err:
735 	mutex_unlock(&rdata->rp_mutex);
736 	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
737 }
738 
739 /**
740  * fc_rport_logo_resp() - Logout (LOGO) response handler
741  * @sp: current sequence in the LOGO exchange
742  * @fp: response frame
743  * @rdata_arg: private remote port data
744  *
745  * Locking Note: This function will be called without the rport lock
746  * held, but it will lock, call an _enter_* function or fc_rport_error
747  * and then unlock the rport.
748  */
749 static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
750 			       void *rdata_arg)
751 {
752 	struct fc_rport_priv *rdata = rdata_arg;
753 	u8 op;
754 
755 	mutex_lock(&rdata->rp_mutex);
756 
757 	FC_RPORT_DBG(rdata, "Received a LOGO %s\n", fc_els_resp_type(fp));
758 
759 	if (rdata->rp_state != RPORT_ST_LOGO) {
760 		FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
761 			     "%s\n", fc_rport_state(rdata));
762 		if (IS_ERR(fp))
763 			goto err;
764 		goto out;
765 	}
766 
767 	if (IS_ERR(fp)) {
768 		fc_rport_error_retry(rdata, fp);
769 		goto err;
770 	}
771 
772 	op = fc_frame_payload_op(fp);
773 	if (op != ELS_LS_ACC)
774 		FC_RPORT_DBG(rdata, "Bad ELS response op %x for LOGO command\n",
775 			     op);
776 	fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
777 
778 out:
779 	fc_frame_free(fp);
780 err:
781 	mutex_unlock(&rdata->rp_mutex);
782 	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
783 }
784 
785 /**
786  * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
787  * @rdata: private remote port data
788  *
789  * Locking Note: The rport lock is expected to be held before calling
790  * this routine.
791  */
792 static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
793 {
794 	struct fc_lport *lport = rdata->local_port;
795 	struct {
796 		struct fc_els_prli prli;
797 		struct fc_els_spp spp;
798 	} *pp;
799 	struct fc_frame *fp;
800 
801 	/*
802 	 * If the rport is one of the well known addresses
803 	 * we skip PRLI and RTV and go straight to READY.
804 	 */
805 	if (rdata->ids.port_id >= FC_FID_DOM_MGR) {
806 		fc_rport_enter_ready(rdata);
807 		return;
808 	}
809 
810 	FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
811 		     fc_rport_state(rdata));
812 
813 	fc_rport_state_enter(rdata, RPORT_ST_PRLI);
814 
815 	fp = fc_frame_alloc(lport, sizeof(*pp));
816 	if (!fp) {
817 		fc_rport_error_retry(rdata, fp);
818 		return;
819 	}
820 
821 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
822 				  fc_rport_prli_resp, rdata, lport->e_d_tov))
823 		fc_rport_error_retry(rdata, NULL);
824 	else
825 		kref_get(&rdata->kref);
826 }
827 
828 /**
829  * fc_rport_els_rtv_resp() - Request Timeout Value response handler
830  * @sp: current sequence in the RTV exchange
831  * @fp: response frame
832  * @rdata_arg: private remote port data
833  *
834  * Many targets don't seem to support this.
835  *
836  * Locking Note: This function will be called without the rport lock
837  * held, but it will lock, call an _enter_* function or fc_rport_error
838  * and then unlock the rport.
839  */
840 static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
841 			      void *rdata_arg)
842 {
843 	struct fc_rport_priv *rdata = rdata_arg;
844 	u8 op;
845 
846 	mutex_lock(&rdata->rp_mutex);
847 
848 	FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
849 
850 	if (rdata->rp_state != RPORT_ST_RTV) {
851 		FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
852 			     "%s\n", fc_rport_state(rdata));
853 		if (IS_ERR(fp))
854 			goto err;
855 		goto out;
856 	}
857 
858 	if (IS_ERR(fp)) {
859 		fc_rport_error(rdata, fp);
860 		goto err;
861 	}
862 
863 	op = fc_frame_payload_op(fp);
864 	if (op == ELS_LS_ACC) {
865 		struct fc_els_rtv_acc *rtv;
866 		u32 toq;
867 		u32 tov;
868 
869 		rtv = fc_frame_payload_get(fp, sizeof(*rtv));
870 		if (rtv) {
871 			toq = ntohl(rtv->rtv_toq);
872 			tov = ntohl(rtv->rtv_r_a_tov);
873 			if (tov == 0)
874 				tov = 1;
875 			rdata->r_a_tov = tov;
876 			tov = ntohl(rtv->rtv_e_d_tov);
877 			if (toq & FC_ELS_RTV_EDRES)
878 				tov /= 1000000;
879 			if (tov == 0)
880 				tov = 1;
881 			rdata->e_d_tov = tov;
882 		}
883 	}
884 
885 	fc_rport_enter_ready(rdata);
886 
887 out:
888 	fc_frame_free(fp);
889 err:
890 	mutex_unlock(&rdata->rp_mutex);
891 	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
892 }
893 
894 /**
895  * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
896  * @rdata: private remote port data
897  *
898  * Locking Note: The rport lock is expected to be held before calling
899  * this routine.
900  */
901 static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
902 {
903 	struct fc_frame *fp;
904 	struct fc_lport *lport = rdata->local_port;
905 
906 	FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
907 		     fc_rport_state(rdata));
908 
909 	fc_rport_state_enter(rdata, RPORT_ST_RTV);
910 
911 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
912 	if (!fp) {
913 		fc_rport_error_retry(rdata, fp);
914 		return;
915 	}
916 
917 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
918 				     fc_rport_rtv_resp, rdata, lport->e_d_tov))
919 		fc_rport_error_retry(rdata, NULL);
920 	else
921 		kref_get(&rdata->kref);
922 }
923 
924 /**
925  * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
926  * @rdata: private remote port data
927  *
928  * Locking Note: The rport lock is expected to be held before calling
929  * this routine.
930  */
931 static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
932 {
933 	struct fc_lport *lport = rdata->local_port;
934 	struct fc_frame *fp;
935 
936 	FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n",
937 		     fc_rport_state(rdata));
938 
939 	fc_rport_state_enter(rdata, RPORT_ST_LOGO);
940 
941 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
942 	if (!fp) {
943 		fc_rport_error_retry(rdata, fp);
944 		return;
945 	}
946 
947 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
948 				  fc_rport_logo_resp, rdata, lport->e_d_tov))
949 		fc_rport_error_retry(rdata, NULL);
950 	else
951 		kref_get(&rdata->kref);
952 }
953 
954 /**
955  * fc_rport_els_adisc_resp() - Address Discovery response handler
956  * @sp: current sequence in the ADISC exchange
957  * @fp: response frame
958  * @rdata_arg: remote port private.
959  *
960  * Locking Note: This function will be called without the rport lock
961  * held, but it will lock, call an _enter_* function or fc_rport_error
962  * and then unlock the rport.
963  */
964 static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
965 			      void *rdata_arg)
966 {
967 	struct fc_rport_priv *rdata = rdata_arg;
968 	struct fc_els_adisc *adisc;
969 	u8 op;
970 
971 	mutex_lock(&rdata->rp_mutex);
972 
973 	FC_RPORT_DBG(rdata, "Received a ADISC response\n");
974 
975 	if (rdata->rp_state != RPORT_ST_ADISC) {
976 		FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
977 			     fc_rport_state(rdata));
978 		if (IS_ERR(fp))
979 			goto err;
980 		goto out;
981 	}
982 
983 	if (IS_ERR(fp)) {
984 		fc_rport_error(rdata, fp);
985 		goto err;
986 	}
987 
988 	/*
989 	 * If address verification failed.  Consider us logged out of the rport.
990 	 * Since the rport is still in discovery, we want to be
991 	 * logged in, so go to PLOGI state.  Otherwise, go back to READY.
992 	 */
993 	op = fc_frame_payload_op(fp);
994 	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
995 	if (op != ELS_LS_ACC || !adisc ||
996 	    ntoh24(adisc->adisc_port_id) != rdata->ids.port_id ||
997 	    get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name ||
998 	    get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) {
999 		FC_RPORT_DBG(rdata, "ADISC error or mismatch\n");
1000 		fc_rport_enter_plogi(rdata);
1001 	} else {
1002 		FC_RPORT_DBG(rdata, "ADISC OK\n");
1003 		fc_rport_enter_ready(rdata);
1004 	}
1005 out:
1006 	fc_frame_free(fp);
1007 err:
1008 	mutex_unlock(&rdata->rp_mutex);
1009 	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
1010 }
1011 
1012 /**
1013  * fc_rport_enter_adisc() - Send Address Discover (ADISC) request to peer
1014  * @rdata: remote port private data
1015  *
1016  * Locking Note: The rport lock is expected to be held before calling
1017  * this routine.
1018  */
1019 static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
1020 {
1021 	struct fc_lport *lport = rdata->local_port;
1022 	struct fc_frame *fp;
1023 
1024 	FC_RPORT_DBG(rdata, "sending ADISC from %s state\n",
1025 		     fc_rport_state(rdata));
1026 
1027 	fc_rport_state_enter(rdata, RPORT_ST_ADISC);
1028 
1029 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
1030 	if (!fp) {
1031 		fc_rport_error_retry(rdata, fp);
1032 		return;
1033 	}
1034 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
1035 				  fc_rport_adisc_resp, rdata, lport->e_d_tov))
1036 		fc_rport_error_retry(rdata, NULL);
1037 	else
1038 		kref_get(&rdata->kref);
1039 }
1040 
1041 /**
1042  * fc_rport_recv_adisc_req() - Handle incoming Address Discovery (ADISC) Request
1043  * @rdata: remote port private
1044  * @sp: current sequence in the ADISC exchange
1045  * @in_fp: ADISC request frame
1046  *
1047  * Locking Note:  Called with the lport and rport locks held.
1048  */
1049 static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
1050 				    struct fc_seq *sp, struct fc_frame *in_fp)
1051 {
1052 	struct fc_lport *lport = rdata->local_port;
1053 	struct fc_frame *fp;
1054 	struct fc_exch *ep = fc_seq_exch(sp);
1055 	struct fc_els_adisc *adisc;
1056 	struct fc_seq_els_data rjt_data;
1057 	u32 f_ctl;
1058 
1059 	FC_RPORT_DBG(rdata, "Received ADISC request\n");
1060 
1061 	adisc = fc_frame_payload_get(in_fp, sizeof(*adisc));
1062 	if (!adisc) {
1063 		rjt_data.fp = NULL;
1064 		rjt_data.reason = ELS_RJT_PROT;
1065 		rjt_data.explan = ELS_EXPL_INV_LEN;
1066 		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1067 		goto drop;
1068 	}
1069 
1070 	fp = fc_frame_alloc(lport, sizeof(*adisc));
1071 	if (!fp)
1072 		goto drop;
1073 	fc_adisc_fill(lport, fp);
1074 	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
1075 	adisc->adisc_cmd = ELS_LS_ACC;
1076 	sp = lport->tt.seq_start_next(sp);
1077 	f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1078 	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1079 		       FC_TYPE_ELS, f_ctl, 0);
1080 	lport->tt.seq_send(lport, sp, fp);
1081 drop:
1082 	fc_frame_free(in_fp);
1083 }
1084 
1085 /**
1086  * fc_rport_recv_els_req() - handle a validated ELS request.
1087  * @lport: Fibre Channel local port
1088  * @sp: current sequence in the PLOGI exchange
1089  * @fp: response frame
1090  *
1091  * Handle incoming ELS requests that require port login.
1092  * The ELS opcode has already been validated by the caller.
1093  *
1094  * Locking Note: Called with the lport lock held.
1095  */
1096 static void fc_rport_recv_els_req(struct fc_lport *lport,
1097 				  struct fc_seq *sp, struct fc_frame *fp)
1098 {
1099 	struct fc_rport_priv *rdata;
1100 	struct fc_frame_header *fh;
1101 	struct fc_seq_els_data els_data;
1102 
1103 	els_data.fp = NULL;
1104 	els_data.reason = ELS_RJT_UNAB;
1105 	els_data.explan = ELS_EXPL_PLOGI_REQD;
1106 
1107 	fh = fc_frame_header_get(fp);
1108 
1109 	mutex_lock(&lport->disc.disc_mutex);
1110 	rdata = lport->tt.rport_lookup(lport, ntoh24(fh->fh_s_id));
1111 	if (!rdata) {
1112 		mutex_unlock(&lport->disc.disc_mutex);
1113 		goto reject;
1114 	}
1115 	mutex_lock(&rdata->rp_mutex);
1116 	mutex_unlock(&lport->disc.disc_mutex);
1117 
1118 	switch (rdata->rp_state) {
1119 	case RPORT_ST_PRLI:
1120 	case RPORT_ST_RTV:
1121 	case RPORT_ST_READY:
1122 	case RPORT_ST_ADISC:
1123 		break;
1124 	default:
1125 		mutex_unlock(&rdata->rp_mutex);
1126 		goto reject;
1127 	}
1128 
1129 	switch (fc_frame_payload_op(fp)) {
1130 	case ELS_PRLI:
1131 		fc_rport_recv_prli_req(rdata, sp, fp);
1132 		break;
1133 	case ELS_PRLO:
1134 		fc_rport_recv_prlo_req(rdata, sp, fp);
1135 		break;
1136 	case ELS_ADISC:
1137 		fc_rport_recv_adisc_req(rdata, sp, fp);
1138 		break;
1139 	case ELS_RRQ:
1140 		els_data.fp = fp;
1141 		lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
1142 		break;
1143 	case ELS_REC:
1144 		els_data.fp = fp;
1145 		lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
1146 		break;
1147 	default:
1148 		fc_frame_free(fp);	/* can't happen */
1149 		break;
1150 	}
1151 
1152 	mutex_unlock(&rdata->rp_mutex);
1153 	return;
1154 
1155 reject:
1156 	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
1157 	fc_frame_free(fp);
1158 }
1159 
1160 /**
1161  * fc_rport_recv_req() - Handle a received ELS request from a rport
1162  * @sp: current sequence in the PLOGI exchange
1163  * @fp: response frame
1164  * @lport: Fibre Channel local port
1165  *
1166  * Locking Note: Called with the lport lock held.
1167  */
1168 void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
1169 		       struct fc_lport *lport)
1170 {
1171 	struct fc_seq_els_data els_data;
1172 
1173 	/*
1174 	 * Handle PLOGI and LOGO requests separately, since they
1175 	 * don't require prior login.
1176 	 * Check for unsupported opcodes first and reject them.
1177 	 * For some ops, it would be incorrect to reject with "PLOGI required".
1178 	 */
1179 	switch (fc_frame_payload_op(fp)) {
1180 	case ELS_PLOGI:
1181 		fc_rport_recv_plogi_req(lport, sp, fp);
1182 		break;
1183 	case ELS_LOGO:
1184 		fc_rport_recv_logo_req(lport, sp, fp);
1185 		break;
1186 	case ELS_PRLI:
1187 	case ELS_PRLO:
1188 	case ELS_ADISC:
1189 	case ELS_RRQ:
1190 	case ELS_REC:
1191 		fc_rport_recv_els_req(lport, sp, fp);
1192 		break;
1193 	default:
1194 		fc_frame_free(fp);
1195 		els_data.fp = NULL;
1196 		els_data.reason = ELS_RJT_UNSUP;
1197 		els_data.explan = ELS_EXPL_NONE;
1198 		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
1199 		break;
1200 	}
1201 }
1202 
1203 /**
1204  * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
1205  * @lport: local port
1206  * @sp: current sequence in the PLOGI exchange
1207  * @fp: PLOGI request frame
1208  *
1209  * Locking Note: The rport lock is held before calling this function.
1210  */
1211 static void fc_rport_recv_plogi_req(struct fc_lport *lport,
1212 				    struct fc_seq *sp, struct fc_frame *rx_fp)
1213 {
1214 	struct fc_disc *disc;
1215 	struct fc_rport_priv *rdata;
1216 	struct fc_frame *fp = rx_fp;
1217 	struct fc_exch *ep;
1218 	struct fc_frame_header *fh;
1219 	struct fc_els_flogi *pl;
1220 	struct fc_seq_els_data rjt_data;
1221 	u32 sid, f_ctl;
1222 
1223 	rjt_data.fp = NULL;
1224 	fh = fc_frame_header_get(fp);
1225 	sid = ntoh24(fh->fh_s_id);
1226 
1227 	FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
1228 
1229 	pl = fc_frame_payload_get(fp, sizeof(*pl));
1230 	if (!pl) {
1231 		FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n");
1232 		rjt_data.reason = ELS_RJT_PROT;
1233 		rjt_data.explan = ELS_EXPL_INV_LEN;
1234 		goto reject;
1235 	}
1236 
1237 	disc = &lport->disc;
1238 	mutex_lock(&disc->disc_mutex);
1239 	rdata = lport->tt.rport_create(lport, sid);
1240 	if (!rdata) {
1241 		mutex_unlock(&disc->disc_mutex);
1242 		rjt_data.reason = ELS_RJT_UNAB;
1243 		rjt_data.explan = ELS_EXPL_INSUF_RES;
1244 		goto reject;
1245 	}
1246 
1247 	mutex_lock(&rdata->rp_mutex);
1248 	mutex_unlock(&disc->disc_mutex);
1249 
1250 	rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn);
1251 	rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn);
1252 
1253 	/*
1254 	 * If the rport was just created, possibly due to the incoming PLOGI,
1255 	 * set the state appropriately and accept the PLOGI.
1256 	 *
1257 	 * If we had also sent a PLOGI, and if the received PLOGI is from a
1258 	 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
1259 	 * "command already in progress".
1260 	 *
1261 	 * XXX TBD: If the session was ready before, the PLOGI should result in
1262 	 * all outstanding exchanges being reset.
1263 	 */
1264 	switch (rdata->rp_state) {
1265 	case RPORT_ST_INIT:
1266 		FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n");
1267 		break;
1268 	case RPORT_ST_PLOGI:
1269 		FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n");
1270 		if (rdata->ids.port_name < lport->wwpn) {
1271 			mutex_unlock(&rdata->rp_mutex);
1272 			rjt_data.reason = ELS_RJT_INPROG;
1273 			rjt_data.explan = ELS_EXPL_NONE;
1274 			goto reject;
1275 		}
1276 		break;
1277 	case RPORT_ST_PRLI:
1278 	case RPORT_ST_RTV:
1279 	case RPORT_ST_READY:
1280 	case RPORT_ST_ADISC:
1281 		FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
1282 			     "- ignored for now\n", rdata->rp_state);
1283 		/* XXX TBD - should reset */
1284 		break;
1285 	case RPORT_ST_DELETE:
1286 	case RPORT_ST_LOGO:
1287 	case RPORT_ST_RESTART:
1288 		FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n",
1289 			     fc_rport_state(rdata));
1290 		mutex_unlock(&rdata->rp_mutex);
1291 		rjt_data.reason = ELS_RJT_BUSY;
1292 		rjt_data.explan = ELS_EXPL_NONE;
1293 		goto reject;
1294 	}
1295 
1296 	/*
1297 	 * Get session payload size from incoming PLOGI.
1298 	 */
1299 	rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs);
1300 	fc_frame_free(rx_fp);
1301 
1302 	/*
1303 	 * Send LS_ACC.	 If this fails, the originator should retry.
1304 	 */
1305 	sp = lport->tt.seq_start_next(sp);
1306 	if (!sp)
1307 		goto out;
1308 	fp = fc_frame_alloc(lport, sizeof(*pl));
1309 	if (!fp)
1310 		goto out;
1311 
1312 	fc_plogi_fill(lport, fp, ELS_LS_ACC);
1313 	f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1314 	ep = fc_seq_exch(sp);
1315 	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1316 		       FC_TYPE_ELS, f_ctl, 0);
1317 	lport->tt.seq_send(lport, sp, fp);
1318 	fc_rport_enter_prli(rdata);
1319 out:
1320 	mutex_unlock(&rdata->rp_mutex);
1321 	return;
1322 
1323 reject:
1324 	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1325 	fc_frame_free(fp);
1326 }
1327 
1328 /**
1329  * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
1330  * @rdata: private remote port data
1331  * @sp: current sequence in the PRLI exchange
1332  * @fp: PRLI request frame
1333  *
1334  * Locking Note: The rport lock is exected to be held before calling
1335  * this function.
1336  */
1337 static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1338 				   struct fc_seq *sp, struct fc_frame *rx_fp)
1339 {
1340 	struct fc_lport *lport = rdata->local_port;
1341 	struct fc_exch *ep;
1342 	struct fc_frame *fp;
1343 	struct fc_frame_header *fh;
1344 	struct {
1345 		struct fc_els_prli prli;
1346 		struct fc_els_spp spp;
1347 	} *pp;
1348 	struct fc_els_spp *rspp;	/* request service param page */
1349 	struct fc_els_spp *spp;	/* response spp */
1350 	unsigned int len;
1351 	unsigned int plen;
1352 	enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1353 	enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1354 	enum fc_els_spp_resp resp;
1355 	struct fc_seq_els_data rjt_data;
1356 	u32 f_ctl;
1357 	u32 fcp_parm;
1358 	u32 roles = FC_RPORT_ROLE_UNKNOWN;
1359 	rjt_data.fp = NULL;
1360 
1361 	fh = fc_frame_header_get(rx_fp);
1362 
1363 	FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
1364 		     fc_rport_state(rdata));
1365 
1366 	switch (rdata->rp_state) {
1367 	case RPORT_ST_PRLI:
1368 	case RPORT_ST_RTV:
1369 	case RPORT_ST_READY:
1370 	case RPORT_ST_ADISC:
1371 		reason = ELS_RJT_NONE;
1372 		break;
1373 	default:
1374 		fc_frame_free(rx_fp);
1375 		return;
1376 		break;
1377 	}
1378 	len = fr_len(rx_fp) - sizeof(*fh);
1379 	pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1380 	if (pp == NULL) {
1381 		reason = ELS_RJT_PROT;
1382 		explan = ELS_EXPL_INV_LEN;
1383 	} else {
1384 		plen = ntohs(pp->prli.prli_len);
1385 		if ((plen % 4) != 0 || plen > len) {
1386 			reason = ELS_RJT_PROT;
1387 			explan = ELS_EXPL_INV_LEN;
1388 		} else if (plen < len) {
1389 			len = plen;
1390 		}
1391 		plen = pp->prli.prli_spp_len;
1392 		if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1393 		    plen > len || len < sizeof(*pp)) {
1394 			reason = ELS_RJT_PROT;
1395 			explan = ELS_EXPL_INV_LEN;
1396 		}
1397 		rspp = &pp->spp;
1398 	}
1399 	if (reason != ELS_RJT_NONE ||
1400 	    (fp = fc_frame_alloc(lport, len)) == NULL) {
1401 		rjt_data.reason = reason;
1402 		rjt_data.explan = explan;
1403 		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1404 	} else {
1405 		sp = lport->tt.seq_start_next(sp);
1406 		WARN_ON(!sp);
1407 		pp = fc_frame_payload_get(fp, len);
1408 		WARN_ON(!pp);
1409 		memset(pp, 0, len);
1410 		pp->prli.prli_cmd = ELS_LS_ACC;
1411 		pp->prli.prli_spp_len = plen;
1412 		pp->prli.prli_len = htons(len);
1413 		len -= sizeof(struct fc_els_prli);
1414 
1415 		/* reinitialize remote port roles */
1416 		rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
1417 
1418 		/*
1419 		 * Go through all the service parameter pages and build
1420 		 * response.  If plen indicates longer SPP than standard,
1421 		 * use that.  The entire response has been pre-cleared above.
1422 		 */
1423 		spp = &pp->spp;
1424 		while (len >= plen) {
1425 			spp->spp_type = rspp->spp_type;
1426 			spp->spp_type_ext = rspp->spp_type_ext;
1427 			spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1428 			resp = FC_SPP_RESP_ACK;
1429 			if (rspp->spp_flags & FC_SPP_RPA_VAL)
1430 				resp = FC_SPP_RESP_NO_PA;
1431 			switch (rspp->spp_type) {
1432 			case 0:	/* common to all FC-4 types */
1433 				break;
1434 			case FC_TYPE_FCP:
1435 				fcp_parm = ntohl(rspp->spp_params);
1436 				if (fcp_parm & FCP_SPPF_RETRY)
1437 					rdata->flags |= FC_RP_FLAGS_RETRY;
1438 				rdata->supported_classes = FC_COS_CLASS3;
1439 				if (fcp_parm & FCP_SPPF_INIT_FCN)
1440 					roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1441 				if (fcp_parm & FCP_SPPF_TARG_FCN)
1442 					roles |= FC_RPORT_ROLE_FCP_TARGET;
1443 				rdata->ids.roles = roles;
1444 
1445 				spp->spp_params =
1446 					htonl(lport->service_params);
1447 				break;
1448 			default:
1449 				resp = FC_SPP_RESP_INVL;
1450 				break;
1451 			}
1452 			spp->spp_flags |= resp;
1453 			len -= plen;
1454 			rspp = (struct fc_els_spp *)((char *)rspp + plen);
1455 			spp = (struct fc_els_spp *)((char *)spp + plen);
1456 		}
1457 
1458 		/*
1459 		 * Send LS_ACC.	 If this fails, the originator should retry.
1460 		 */
1461 		f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1462 		f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1463 		ep = fc_seq_exch(sp);
1464 		fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1465 			       FC_TYPE_ELS, f_ctl, 0);
1466 		lport->tt.seq_send(lport, sp, fp);
1467 
1468 		/*
1469 		 * Get lock and re-check state.
1470 		 */
1471 		switch (rdata->rp_state) {
1472 		case RPORT_ST_PRLI:
1473 			fc_rport_enter_ready(rdata);
1474 			break;
1475 		case RPORT_ST_READY:
1476 		case RPORT_ST_ADISC:
1477 			break;
1478 		default:
1479 			break;
1480 		}
1481 	}
1482 	fc_frame_free(rx_fp);
1483 }
1484 
1485 /**
1486  * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
1487  * @rdata: private remote port data
1488  * @sp: current sequence in the PRLO exchange
1489  * @fp: PRLO request frame
1490  *
1491  * Locking Note: The rport lock is exected to be held before calling
1492  * this function.
1493  */
1494 static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1495 				   struct fc_seq *sp,
1496 				   struct fc_frame *fp)
1497 {
1498 	struct fc_lport *lport = rdata->local_port;
1499 
1500 	struct fc_frame_header *fh;
1501 	struct fc_seq_els_data rjt_data;
1502 
1503 	fh = fc_frame_header_get(fp);
1504 
1505 	FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
1506 		     fc_rport_state(rdata));
1507 
1508 	rjt_data.fp = NULL;
1509 	rjt_data.reason = ELS_RJT_UNAB;
1510 	rjt_data.explan = ELS_EXPL_NONE;
1511 	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1512 	fc_frame_free(fp);
1513 }
1514 
1515 /**
1516  * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
1517  * @lport: local port.
1518  * @sp: current sequence in the LOGO exchange
1519  * @fp: LOGO request frame
1520  *
1521  * Locking Note: The rport lock is exected to be held before calling
1522  * this function.
1523  */
1524 static void fc_rport_recv_logo_req(struct fc_lport *lport,
1525 				   struct fc_seq *sp,
1526 				   struct fc_frame *fp)
1527 {
1528 	struct fc_frame_header *fh;
1529 	struct fc_rport_priv *rdata;
1530 	u32 sid;
1531 
1532 	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1533 
1534 	fh = fc_frame_header_get(fp);
1535 	sid = ntoh24(fh->fh_s_id);
1536 
1537 	mutex_lock(&lport->disc.disc_mutex);
1538 	rdata = lport->tt.rport_lookup(lport, sid);
1539 	if (rdata) {
1540 		mutex_lock(&rdata->rp_mutex);
1541 		FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1542 			     fc_rport_state(rdata));
1543 
1544 		fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
1545 
1546 		/*
1547 		 * If the remote port was created due to discovery, set state
1548 		 * to log back in.  It may have seen a stale RSCN about us.
1549 		 */
1550 		if (rdata->disc_id)
1551 			fc_rport_state_enter(rdata, RPORT_ST_RESTART);
1552 		mutex_unlock(&rdata->rp_mutex);
1553 	} else
1554 		FC_RPORT_ID_DBG(lport, sid,
1555 				"Received LOGO from non-logged-in port\n");
1556 	mutex_unlock(&lport->disc.disc_mutex);
1557 	fc_frame_free(fp);
1558 }
1559 
1560 static void fc_rport_flush_queue(void)
1561 {
1562 	flush_workqueue(rport_event_queue);
1563 }
1564 
1565 int fc_rport_init(struct fc_lport *lport)
1566 {
1567 	if (!lport->tt.rport_lookup)
1568 		lport->tt.rport_lookup = fc_rport_lookup;
1569 
1570 	if (!lport->tt.rport_create)
1571 		lport->tt.rport_create = fc_rport_create;
1572 
1573 	if (!lport->tt.rport_login)
1574 		lport->tt.rport_login = fc_rport_login;
1575 
1576 	if (!lport->tt.rport_logoff)
1577 		lport->tt.rport_logoff = fc_rport_logoff;
1578 
1579 	if (!lport->tt.rport_recv_req)
1580 		lport->tt.rport_recv_req = fc_rport_recv_req;
1581 
1582 	if (!lport->tt.rport_flush_queue)
1583 		lport->tt.rport_flush_queue = fc_rport_flush_queue;
1584 
1585 	if (!lport->tt.rport_destroy)
1586 		lport->tt.rport_destroy = fc_rport_destroy;
1587 
1588 	return 0;
1589 }
1590 EXPORT_SYMBOL(fc_rport_init);
1591 
1592 int fc_setup_rport(void)
1593 {
1594 	rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1595 	if (!rport_event_queue)
1596 		return -ENOMEM;
1597 	return 0;
1598 }
1599 
1600 void fc_destroy_rport(void)
1601 {
1602 	destroy_workqueue(rport_event_queue);
1603 }
1604 
1605 void fc_rport_terminate_io(struct fc_rport *rport)
1606 {
1607 	struct fc_rport_libfc_priv *rp = rport->dd_data;
1608 	struct fc_lport *lport = rp->local_port;
1609 
1610 	lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1611 	lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
1612 }
1613 EXPORT_SYMBOL(fc_rport_terminate_io);
1614