xref: /openbmc/linux/drivers/scsi/libfc/fc_rport.c (revision feab4ae7)
1 /*
2  * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19 
20 /*
21  * RPORT GENERAL INFO
22  *
23  * This file contains all processing regarding fc_rports. It contains the
24  * rport state machine and does all rport interaction with the transport class.
25  * There should be no other places in libfc that interact directly with the
26  * transport class in regards to adding and deleting rports.
27  *
28  * fc_rport's represent N_Port's within the fabric.
29  */
30 
31 /*
32  * RPORT LOCKING
33  *
34  * The rport should never hold the rport mutex and then attempt to acquire
35  * either the lport or disc mutexes. The rport's mutex is considered lesser
36  * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37  * more comments on the heirarchy.
38  *
39  * The locking strategy is similar to the lport's strategy. The lock protects
40  * the rport's states and is held and released by the entry points to the rport
41  * block. All _enter_* functions correspond to rport states and expect the rport
42  * mutex to be locked before calling them. This means that rports only handle
43  * one request or response at a time, since they're not critical for the I/O
44  * path this potential over-use of the mutex is acceptable.
45  */
46 
47 #include <linux/kernel.h>
48 #include <linux/spinlock.h>
49 #include <linux/interrupt.h>
50 #include <linux/rcupdate.h>
51 #include <linux/timer.h>
52 #include <linux/workqueue.h>
53 #include <asm/unaligned.h>
54 
55 #include <scsi/libfc.h>
56 #include <scsi/fc_encode.h>
57 
58 struct workqueue_struct *rport_event_queue;
59 
60 static void fc_rport_enter_plogi(struct fc_rport_priv *);
61 static void fc_rport_enter_prli(struct fc_rport_priv *);
62 static void fc_rport_enter_rtv(struct fc_rport_priv *);
63 static void fc_rport_enter_ready(struct fc_rport_priv *);
64 static void fc_rport_enter_logo(struct fc_rport_priv *);
65 
66 static void fc_rport_recv_plogi_req(struct fc_lport *,
67 				    struct fc_seq *, struct fc_frame *);
68 static void fc_rport_recv_prli_req(struct fc_rport_priv *,
69 				   struct fc_seq *, struct fc_frame *);
70 static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
71 				   struct fc_seq *, struct fc_frame *);
72 static void fc_rport_recv_logo_req(struct fc_lport *,
73 				   struct fc_seq *, struct fc_frame *);
74 static void fc_rport_timeout(struct work_struct *);
75 static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
76 static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
77 static void fc_rport_work(struct work_struct *);
78 
79 static const char *fc_rport_state_names[] = {
80 	[RPORT_ST_INIT] = "Init",
81 	[RPORT_ST_PLOGI] = "PLOGI",
82 	[RPORT_ST_PRLI] = "PRLI",
83 	[RPORT_ST_RTV] = "RTV",
84 	[RPORT_ST_READY] = "Ready",
85 	[RPORT_ST_LOGO] = "LOGO",
86 	[RPORT_ST_DELETE] = "Delete",
87 };
88 
89 /**
90  * fc_rport_lookup() - lookup a remote port by port_id
91  * @lport: Fibre Channel host port instance
92  * @port_id: remote port port_id to match
93  */
94 static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
95 					     u32 port_id)
96 {
97 	struct fc_rport_priv *rdata;
98 
99 	list_for_each_entry(rdata, &lport->disc.rports, peers)
100 		if (rdata->ids.port_id == port_id &&
101 		    rdata->rp_state != RPORT_ST_DELETE)
102 			return rdata;
103 	return NULL;
104 }
105 
106 /**
107  * fc_rport_create() - Create a new remote port
108  * @lport:   The local port that the new remote port is for
109  * @port_id: The port ID for the new remote port
110  *
111  * Locking note:  must be called with the disc_mutex held.
112  */
113 static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
114 					     u32 port_id)
115 {
116 	struct fc_rport_priv *rdata;
117 
118 	rdata = lport->tt.rport_lookup(lport, port_id);
119 	if (rdata)
120 		return rdata;
121 
122 	rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
123 	if (!rdata)
124 		return NULL;
125 
126 	rdata->ids.node_name = -1;
127 	rdata->ids.port_name = -1;
128 	rdata->ids.port_id = port_id;
129 	rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
130 
131 	kref_init(&rdata->kref);
132 	mutex_init(&rdata->rp_mutex);
133 	rdata->local_port = lport;
134 	rdata->rp_state = RPORT_ST_INIT;
135 	rdata->event = RPORT_EV_NONE;
136 	rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
137 	rdata->e_d_tov = lport->e_d_tov;
138 	rdata->r_a_tov = lport->r_a_tov;
139 	rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
140 	INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
141 	INIT_WORK(&rdata->event_work, fc_rport_work);
142 	if (port_id != FC_FID_DIR_SERV)
143 		list_add(&rdata->peers, &lport->disc.rports);
144 	return rdata;
145 }
146 
147 /**
148  * fc_rport_destroy() - free a remote port after last reference is released.
149  * @kref: pointer to kref inside struct fc_rport_priv
150  */
151 static void fc_rport_destroy(struct kref *kref)
152 {
153 	struct fc_rport_priv *rdata;
154 
155 	rdata = container_of(kref, struct fc_rport_priv, kref);
156 	kfree(rdata);
157 }
158 
159 /**
160  * fc_rport_state() - return a string for the state the rport is in
161  * @rdata: remote port private data
162  */
163 static const char *fc_rport_state(struct fc_rport_priv *rdata)
164 {
165 	const char *cp;
166 
167 	cp = fc_rport_state_names[rdata->rp_state];
168 	if (!cp)
169 		cp = "Unknown";
170 	return cp;
171 }
172 
173 /**
174  * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
175  * @rport: Pointer to Fibre Channel remote port structure
176  * @timeout: timeout in seconds
177  */
178 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
179 {
180 	if (timeout)
181 		rport->dev_loss_tmo = timeout + 5;
182 	else
183 		rport->dev_loss_tmo = 30;
184 }
185 EXPORT_SYMBOL(fc_set_rport_loss_tmo);
186 
187 /**
188  * fc_plogi_get_maxframe() - Get max payload from the common service parameters
189  * @flp: FLOGI payload structure
190  * @maxval: upper limit, may be less than what is in the service parameters
191  */
192 static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
193 					  unsigned int maxval)
194 {
195 	unsigned int mfs;
196 
197 	/*
198 	 * Get max payload from the common service parameters and the
199 	 * class 3 receive data field size.
200 	 */
201 	mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
202 	if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
203 		maxval = mfs;
204 	mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
205 	if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
206 		maxval = mfs;
207 	return maxval;
208 }
209 
210 /**
211  * fc_rport_state_enter() - Change the rport's state
212  * @rdata: The rport whose state should change
213  * @new: The new state of the rport
214  *
215  * Locking Note: Called with the rport lock held
216  */
217 static void fc_rport_state_enter(struct fc_rport_priv *rdata,
218 				 enum fc_rport_state new)
219 {
220 	if (rdata->rp_state != new)
221 		rdata->retries = 0;
222 	rdata->rp_state = new;
223 }
224 
225 static void fc_rport_work(struct work_struct *work)
226 {
227 	u32 port_id;
228 	struct fc_rport_priv *rdata =
229 		container_of(work, struct fc_rport_priv, event_work);
230 	struct fc_rport_libfc_priv *rp;
231 	enum fc_rport_event event;
232 	struct fc_lport *lport = rdata->local_port;
233 	struct fc_rport_operations *rport_ops;
234 	struct fc_rport_identifiers ids;
235 	struct fc_rport *rport;
236 
237 	mutex_lock(&rdata->rp_mutex);
238 	event = rdata->event;
239 	rport_ops = rdata->ops;
240 	rport = rdata->rport;
241 
242 	FC_RPORT_DBG(rdata, "work event %u\n", event);
243 
244 	switch (event) {
245 	case RPORT_EV_READY:
246 		ids = rdata->ids;
247 		rdata->event = RPORT_EV_NONE;
248 		kref_get(&rdata->kref);
249 		mutex_unlock(&rdata->rp_mutex);
250 
251 		if (!rport)
252 			rport = fc_remote_port_add(lport->host, 0, &ids);
253 		if (!rport) {
254 			FC_RPORT_DBG(rdata, "Failed to add the rport\n");
255 			lport->tt.rport_logoff(rdata);
256 			kref_put(&rdata->kref, lport->tt.rport_destroy);
257 			return;
258 		}
259 		mutex_lock(&rdata->rp_mutex);
260 		if (rdata->rport)
261 			FC_RPORT_DBG(rdata, "rport already allocated\n");
262 		rdata->rport = rport;
263 		rport->maxframe_size = rdata->maxframe_size;
264 		rport->supported_classes = rdata->supported_classes;
265 
266 		rp = rport->dd_data;
267 		rp->local_port = lport;
268 		rp->rp_state = rdata->rp_state;
269 		rp->flags = rdata->flags;
270 		rp->e_d_tov = rdata->e_d_tov;
271 		rp->r_a_tov = rdata->r_a_tov;
272 		mutex_unlock(&rdata->rp_mutex);
273 
274 		if (rport_ops && rport_ops->event_callback) {
275 			FC_RPORT_DBG(rdata, "callback ev %d\n", event);
276 			rport_ops->event_callback(lport, rdata, event);
277 		}
278 		kref_put(&rdata->kref, lport->tt.rport_destroy);
279 		break;
280 
281 	case RPORT_EV_FAILED:
282 	case RPORT_EV_LOGO:
283 	case RPORT_EV_STOP:
284 		port_id = rdata->ids.port_id;
285 		mutex_unlock(&rdata->rp_mutex);
286 
287 		if (port_id != FC_FID_DIR_SERV) {
288 			mutex_lock(&lport->disc.disc_mutex);
289 			list_del(&rdata->peers);
290 			mutex_unlock(&lport->disc.disc_mutex);
291 		}
292 
293 		if (rport_ops && rport_ops->event_callback) {
294 			FC_RPORT_DBG(rdata, "callback ev %d\n", event);
295 			rport_ops->event_callback(lport, rdata, event);
296 		}
297 		cancel_delayed_work_sync(&rdata->retry_work);
298 
299 		/*
300 		 * Reset any outstanding exchanges before freeing rport.
301 		 */
302 		lport->tt.exch_mgr_reset(lport, 0, port_id);
303 		lport->tt.exch_mgr_reset(lport, port_id, 0);
304 
305 		if (rport) {
306 			rp = rport->dd_data;
307 			rp->rp_state = RPORT_ST_DELETE;
308 			mutex_lock(&rdata->rp_mutex);
309 			rdata->rport = NULL;
310 			mutex_unlock(&rdata->rp_mutex);
311 			fc_remote_port_delete(rport);
312 		}
313 		kref_put(&rdata->kref, lport->tt.rport_destroy);
314 		break;
315 
316 	default:
317 		mutex_unlock(&rdata->rp_mutex);
318 		break;
319 	}
320 }
321 
322 /**
323  * fc_rport_login() - Start the remote port login state machine
324  * @rdata: private remote port
325  *
326  * Locking Note: Called without the rport lock held. This
327  * function will hold the rport lock, call an _enter_*
328  * function and then unlock the rport.
329  */
330 int fc_rport_login(struct fc_rport_priv *rdata)
331 {
332 	mutex_lock(&rdata->rp_mutex);
333 
334 	FC_RPORT_DBG(rdata, "Login to port\n");
335 
336 	fc_rport_enter_plogi(rdata);
337 
338 	mutex_unlock(&rdata->rp_mutex);
339 
340 	return 0;
341 }
342 
343 /**
344  * fc_rport_enter_delete() - schedule a remote port to be deleted.
345  * @rdata: private remote port
346  * @event: event to report as the reason for deletion
347  *
348  * Locking Note: Called with the rport lock held.
349  *
350  * Allow state change into DELETE only once.
351  *
352  * Call queue_work only if there's no event already pending.
353  * Set the new event so that the old pending event will not occur.
354  * Since we have the mutex, even if fc_rport_work() is already started,
355  * it'll see the new event.
356  */
357 static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
358 				  enum fc_rport_event event)
359 {
360 	if (rdata->rp_state == RPORT_ST_DELETE)
361 		return;
362 
363 	FC_RPORT_DBG(rdata, "Delete port\n");
364 
365 	fc_rport_state_enter(rdata, RPORT_ST_DELETE);
366 
367 	if (rdata->event == RPORT_EV_NONE)
368 		queue_work(rport_event_queue, &rdata->event_work);
369 	rdata->event = event;
370 }
371 
372 /**
373  * fc_rport_logoff() - Logoff and remove an rport
374  * @rdata: private remote port
375  *
376  * Locking Note: Called without the rport lock held. This
377  * function will hold the rport lock, call an _enter_*
378  * function and then unlock the rport.
379  */
380 int fc_rport_logoff(struct fc_rport_priv *rdata)
381 {
382 	mutex_lock(&rdata->rp_mutex);
383 
384 	FC_RPORT_DBG(rdata, "Remove port\n");
385 
386 	if (rdata->rp_state == RPORT_ST_DELETE) {
387 		FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
388 		mutex_unlock(&rdata->rp_mutex);
389 		goto out;
390 	}
391 
392 	fc_rport_enter_logo(rdata);
393 
394 	/*
395 	 * Change the state to Delete so that we discard
396 	 * the response.
397 	 */
398 	fc_rport_enter_delete(rdata, RPORT_EV_STOP);
399 	mutex_unlock(&rdata->rp_mutex);
400 
401 out:
402 	return 0;
403 }
404 
405 /**
406  * fc_rport_enter_ready() - The rport is ready
407  * @rdata: private remote port
408  *
409  * Locking Note: The rport lock is expected to be held before calling
410  * this routine.
411  */
412 static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
413 {
414 	fc_rport_state_enter(rdata, RPORT_ST_READY);
415 
416 	FC_RPORT_DBG(rdata, "Port is Ready\n");
417 
418 	if (rdata->event == RPORT_EV_NONE)
419 		queue_work(rport_event_queue, &rdata->event_work);
420 	rdata->event = RPORT_EV_READY;
421 }
422 
423 /**
424  * fc_rport_timeout() - Handler for the retry_work timer.
425  * @work: The work struct of the fc_rport_priv
426  *
427  * Locking Note: Called without the rport lock held. This
428  * function will hold the rport lock, call an _enter_*
429  * function and then unlock the rport.
430  */
431 static void fc_rport_timeout(struct work_struct *work)
432 {
433 	struct fc_rport_priv *rdata =
434 		container_of(work, struct fc_rport_priv, retry_work.work);
435 
436 	mutex_lock(&rdata->rp_mutex);
437 
438 	switch (rdata->rp_state) {
439 	case RPORT_ST_PLOGI:
440 		fc_rport_enter_plogi(rdata);
441 		break;
442 	case RPORT_ST_PRLI:
443 		fc_rport_enter_prli(rdata);
444 		break;
445 	case RPORT_ST_RTV:
446 		fc_rport_enter_rtv(rdata);
447 		break;
448 	case RPORT_ST_LOGO:
449 		fc_rport_enter_logo(rdata);
450 		break;
451 	case RPORT_ST_READY:
452 	case RPORT_ST_INIT:
453 	case RPORT_ST_DELETE:
454 		break;
455 	}
456 
457 	mutex_unlock(&rdata->rp_mutex);
458 }
459 
460 /**
461  * fc_rport_error() - Error handler, called once retries have been exhausted
462  * @rdata: private remote port
463  * @fp: The frame pointer
464  *
465  * Locking Note: The rport lock is expected to be held before
466  * calling this routine
467  */
468 static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
469 {
470 	FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
471 		     IS_ERR(fp) ? -PTR_ERR(fp) : 0,
472 		     fc_rport_state(rdata), rdata->retries);
473 
474 	switch (rdata->rp_state) {
475 	case RPORT_ST_PLOGI:
476 	case RPORT_ST_PRLI:
477 	case RPORT_ST_LOGO:
478 		fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
479 		break;
480 	case RPORT_ST_RTV:
481 		fc_rport_enter_ready(rdata);
482 		break;
483 	case RPORT_ST_DELETE:
484 	case RPORT_ST_READY:
485 	case RPORT_ST_INIT:
486 		break;
487 	}
488 }
489 
490 /**
491  * fc_rport_error_retry() - Error handler when retries are desired
492  * @rdata: private remote port data
493  * @fp: The frame pointer
494  *
495  * If the error was an exchange timeout retry immediately,
496  * otherwise wait for E_D_TOV.
497  *
498  * Locking Note: The rport lock is expected to be held before
499  * calling this routine
500  */
501 static void fc_rport_error_retry(struct fc_rport_priv *rdata,
502 				 struct fc_frame *fp)
503 {
504 	unsigned long delay = FC_DEF_E_D_TOV;
505 
506 	/* make sure this isn't an FC_EX_CLOSED error, never retry those */
507 	if (PTR_ERR(fp) == -FC_EX_CLOSED)
508 		return fc_rport_error(rdata, fp);
509 
510 	if (rdata->retries < rdata->local_port->max_rport_retry_count) {
511 		FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
512 			     PTR_ERR(fp), fc_rport_state(rdata));
513 		rdata->retries++;
514 		/* no additional delay on exchange timeouts */
515 		if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
516 			delay = 0;
517 		schedule_delayed_work(&rdata->retry_work, delay);
518 		return;
519 	}
520 
521 	return fc_rport_error(rdata, fp);
522 }
523 
524 /**
525  * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
526  * @sp: current sequence in the PLOGI exchange
527  * @fp: response frame
528  * @rdata_arg: private remote port data
529  *
530  * Locking Note: This function will be called without the rport lock
531  * held, but it will lock, call an _enter_* function or fc_rport_error
532  * and then unlock the rport.
533  */
534 static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
535 				void *rdata_arg)
536 {
537 	struct fc_rport_priv *rdata = rdata_arg;
538 	struct fc_lport *lport = rdata->local_port;
539 	struct fc_els_flogi *plp = NULL;
540 	unsigned int tov;
541 	u16 csp_seq;
542 	u16 cssp_seq;
543 	u8 op;
544 
545 	mutex_lock(&rdata->rp_mutex);
546 
547 	FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
548 
549 	if (rdata->rp_state != RPORT_ST_PLOGI) {
550 		FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
551 			     "%s\n", fc_rport_state(rdata));
552 		if (IS_ERR(fp))
553 			goto err;
554 		goto out;
555 	}
556 
557 	if (IS_ERR(fp)) {
558 		fc_rport_error_retry(rdata, fp);
559 		goto err;
560 	}
561 
562 	op = fc_frame_payload_op(fp);
563 	if (op == ELS_LS_ACC &&
564 	    (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
565 		rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
566 		rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
567 
568 		tov = ntohl(plp->fl_csp.sp_e_d_tov);
569 		if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
570 			tov /= 1000;
571 		if (tov > rdata->e_d_tov)
572 			rdata->e_d_tov = tov;
573 		csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
574 		cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
575 		if (cssp_seq < csp_seq)
576 			csp_seq = cssp_seq;
577 		rdata->max_seq = csp_seq;
578 		rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
579 		fc_rport_enter_prli(rdata);
580 	} else
581 		fc_rport_error_retry(rdata, fp);
582 
583 out:
584 	fc_frame_free(fp);
585 err:
586 	mutex_unlock(&rdata->rp_mutex);
587 	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
588 }
589 
590 /**
591  * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
592  * @rdata: private remote port data
593  *
594  * Locking Note: The rport lock is expected to be held before calling
595  * this routine.
596  */
597 static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
598 {
599 	struct fc_lport *lport = rdata->local_port;
600 	struct fc_frame *fp;
601 
602 	FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
603 		     fc_rport_state(rdata));
604 
605 	fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
606 
607 	rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
608 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
609 	if (!fp) {
610 		fc_rport_error_retry(rdata, fp);
611 		return;
612 	}
613 	rdata->e_d_tov = lport->e_d_tov;
614 
615 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
616 				  fc_rport_plogi_resp, rdata, lport->e_d_tov))
617 		fc_rport_error_retry(rdata, fp);
618 	else
619 		kref_get(&rdata->kref);
620 }
621 
622 /**
623  * fc_rport_prli_resp() - Process Login (PRLI) response handler
624  * @sp: current sequence in the PRLI exchange
625  * @fp: response frame
626  * @rdata_arg: private remote port data
627  *
628  * Locking Note: This function will be called without the rport lock
629  * held, but it will lock, call an _enter_* function or fc_rport_error
630  * and then unlock the rport.
631  */
632 static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
633 			       void *rdata_arg)
634 {
635 	struct fc_rport_priv *rdata = rdata_arg;
636 	struct {
637 		struct fc_els_prli prli;
638 		struct fc_els_spp spp;
639 	} *pp;
640 	u32 roles = FC_RPORT_ROLE_UNKNOWN;
641 	u32 fcp_parm = 0;
642 	u8 op;
643 
644 	mutex_lock(&rdata->rp_mutex);
645 
646 	FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
647 
648 	if (rdata->rp_state != RPORT_ST_PRLI) {
649 		FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
650 			     "%s\n", fc_rport_state(rdata));
651 		if (IS_ERR(fp))
652 			goto err;
653 		goto out;
654 	}
655 
656 	if (IS_ERR(fp)) {
657 		fc_rport_error_retry(rdata, fp);
658 		goto err;
659 	}
660 
661 	/* reinitialize remote port roles */
662 	rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
663 
664 	op = fc_frame_payload_op(fp);
665 	if (op == ELS_LS_ACC) {
666 		pp = fc_frame_payload_get(fp, sizeof(*pp));
667 		if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
668 			fcp_parm = ntohl(pp->spp.spp_params);
669 			if (fcp_parm & FCP_SPPF_RETRY)
670 				rdata->flags |= FC_RP_FLAGS_RETRY;
671 		}
672 
673 		rdata->supported_classes = FC_COS_CLASS3;
674 		if (fcp_parm & FCP_SPPF_INIT_FCN)
675 			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
676 		if (fcp_parm & FCP_SPPF_TARG_FCN)
677 			roles |= FC_RPORT_ROLE_FCP_TARGET;
678 
679 		rdata->ids.roles = roles;
680 		fc_rport_enter_rtv(rdata);
681 
682 	} else {
683 		FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
684 		fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
685 	}
686 
687 out:
688 	fc_frame_free(fp);
689 err:
690 	mutex_unlock(&rdata->rp_mutex);
691 	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
692 }
693 
694 /**
695  * fc_rport_logo_resp() - Logout (LOGO) response handler
696  * @sp: current sequence in the LOGO exchange
697  * @fp: response frame
698  * @rdata_arg: private remote port data
699  *
700  * Locking Note: This function will be called without the rport lock
701  * held, but it will lock, call an _enter_* function or fc_rport_error
702  * and then unlock the rport.
703  */
704 static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
705 			       void *rdata_arg)
706 {
707 	struct fc_rport_priv *rdata = rdata_arg;
708 	u8 op;
709 
710 	mutex_lock(&rdata->rp_mutex);
711 
712 	FC_RPORT_DBG(rdata, "Received a LOGO %s\n", fc_els_resp_type(fp));
713 
714 	if (rdata->rp_state != RPORT_ST_LOGO) {
715 		FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
716 			     "%s\n", fc_rport_state(rdata));
717 		if (IS_ERR(fp))
718 			goto err;
719 		goto out;
720 	}
721 
722 	if (IS_ERR(fp)) {
723 		fc_rport_error_retry(rdata, fp);
724 		goto err;
725 	}
726 
727 	op = fc_frame_payload_op(fp);
728 	if (op == ELS_LS_ACC) {
729 		fc_rport_enter_rtv(rdata);
730 	} else {
731 		FC_RPORT_DBG(rdata, "Bad ELS response for LOGO command\n");
732 		fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
733 	}
734 
735 out:
736 	fc_frame_free(fp);
737 err:
738 	mutex_unlock(&rdata->rp_mutex);
739 	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
740 }
741 
742 /**
743  * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
744  * @rdata: private remote port data
745  *
746  * Locking Note: The rport lock is expected to be held before calling
747  * this routine.
748  */
749 static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
750 {
751 	struct fc_lport *lport = rdata->local_port;
752 	struct {
753 		struct fc_els_prli prli;
754 		struct fc_els_spp spp;
755 	} *pp;
756 	struct fc_frame *fp;
757 
758 	/*
759 	 * If the rport is one of the well known addresses
760 	 * we skip PRLI and RTV and go straight to READY.
761 	 */
762 	if (rdata->ids.port_id >= FC_FID_DOM_MGR) {
763 		fc_rport_enter_ready(rdata);
764 		return;
765 	}
766 
767 	FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
768 		     fc_rport_state(rdata));
769 
770 	fc_rport_state_enter(rdata, RPORT_ST_PRLI);
771 
772 	fp = fc_frame_alloc(lport, sizeof(*pp));
773 	if (!fp) {
774 		fc_rport_error_retry(rdata, fp);
775 		return;
776 	}
777 
778 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
779 				  fc_rport_prli_resp, rdata, lport->e_d_tov))
780 		fc_rport_error_retry(rdata, fp);
781 	else
782 		kref_get(&rdata->kref);
783 }
784 
785 /**
786  * fc_rport_els_rtv_resp() - Request Timeout Value response handler
787  * @sp: current sequence in the RTV exchange
788  * @fp: response frame
789  * @rdata_arg: private remote port data
790  *
791  * Many targets don't seem to support this.
792  *
793  * Locking Note: This function will be called without the rport lock
794  * held, but it will lock, call an _enter_* function or fc_rport_error
795  * and then unlock the rport.
796  */
797 static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
798 			      void *rdata_arg)
799 {
800 	struct fc_rport_priv *rdata = rdata_arg;
801 	u8 op;
802 
803 	mutex_lock(&rdata->rp_mutex);
804 
805 	FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
806 
807 	if (rdata->rp_state != RPORT_ST_RTV) {
808 		FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
809 			     "%s\n", fc_rport_state(rdata));
810 		if (IS_ERR(fp))
811 			goto err;
812 		goto out;
813 	}
814 
815 	if (IS_ERR(fp)) {
816 		fc_rport_error(rdata, fp);
817 		goto err;
818 	}
819 
820 	op = fc_frame_payload_op(fp);
821 	if (op == ELS_LS_ACC) {
822 		struct fc_els_rtv_acc *rtv;
823 		u32 toq;
824 		u32 tov;
825 
826 		rtv = fc_frame_payload_get(fp, sizeof(*rtv));
827 		if (rtv) {
828 			toq = ntohl(rtv->rtv_toq);
829 			tov = ntohl(rtv->rtv_r_a_tov);
830 			if (tov == 0)
831 				tov = 1;
832 			rdata->r_a_tov = tov;
833 			tov = ntohl(rtv->rtv_e_d_tov);
834 			if (toq & FC_ELS_RTV_EDRES)
835 				tov /= 1000000;
836 			if (tov == 0)
837 				tov = 1;
838 			rdata->e_d_tov = tov;
839 		}
840 	}
841 
842 	fc_rport_enter_ready(rdata);
843 
844 out:
845 	fc_frame_free(fp);
846 err:
847 	mutex_unlock(&rdata->rp_mutex);
848 	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
849 }
850 
851 /**
852  * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
853  * @rdata: private remote port data
854  *
855  * Locking Note: The rport lock is expected to be held before calling
856  * this routine.
857  */
858 static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
859 {
860 	struct fc_frame *fp;
861 	struct fc_lport *lport = rdata->local_port;
862 
863 	FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
864 		     fc_rport_state(rdata));
865 
866 	fc_rport_state_enter(rdata, RPORT_ST_RTV);
867 
868 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
869 	if (!fp) {
870 		fc_rport_error_retry(rdata, fp);
871 		return;
872 	}
873 
874 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
875 				     fc_rport_rtv_resp, rdata, lport->e_d_tov))
876 		fc_rport_error_retry(rdata, fp);
877 	else
878 		kref_get(&rdata->kref);
879 }
880 
881 /**
882  * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
883  * @rdata: private remote port data
884  *
885  * Locking Note: The rport lock is expected to be held before calling
886  * this routine.
887  */
888 static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
889 {
890 	struct fc_lport *lport = rdata->local_port;
891 	struct fc_frame *fp;
892 
893 	FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n",
894 		     fc_rport_state(rdata));
895 
896 	fc_rport_state_enter(rdata, RPORT_ST_LOGO);
897 
898 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
899 	if (!fp) {
900 		fc_rport_error_retry(rdata, fp);
901 		return;
902 	}
903 
904 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
905 				  fc_rport_logo_resp, rdata, lport->e_d_tov))
906 		fc_rport_error_retry(rdata, fp);
907 	else
908 		kref_get(&rdata->kref);
909 }
910 
911 /**
912  * fc_rport_recv_els_req() - handle a validated ELS request.
913  * @lport: Fibre Channel local port
914  * @sp: current sequence in the PLOGI exchange
915  * @fp: response frame
916  *
917  * Handle incoming ELS requests that require port login.
918  * The ELS opcode has already been validated by the caller.
919  *
920  * Locking Note: Called with the lport lock held.
921  */
922 static void fc_rport_recv_els_req(struct fc_lport *lport,
923 				  struct fc_seq *sp, struct fc_frame *fp)
924 {
925 	struct fc_rport_priv *rdata;
926 	struct fc_frame_header *fh;
927 	struct fc_seq_els_data els_data;
928 
929 	els_data.fp = NULL;
930 	els_data.reason = ELS_RJT_UNAB;
931 	els_data.explan = ELS_EXPL_PLOGI_REQD;
932 
933 	fh = fc_frame_header_get(fp);
934 
935 	mutex_lock(&lport->disc.disc_mutex);
936 	rdata = lport->tt.rport_lookup(lport, ntoh24(fh->fh_s_id));
937 	if (!rdata) {
938 		mutex_unlock(&lport->disc.disc_mutex);
939 		goto reject;
940 	}
941 	mutex_lock(&rdata->rp_mutex);
942 	mutex_unlock(&lport->disc.disc_mutex);
943 
944 	switch (rdata->rp_state) {
945 	case RPORT_ST_PRLI:
946 	case RPORT_ST_RTV:
947 	case RPORT_ST_READY:
948 		break;
949 	default:
950 		mutex_unlock(&rdata->rp_mutex);
951 		goto reject;
952 	}
953 
954 	switch (fc_frame_payload_op(fp)) {
955 	case ELS_PRLI:
956 		fc_rport_recv_prli_req(rdata, sp, fp);
957 		break;
958 	case ELS_PRLO:
959 		fc_rport_recv_prlo_req(rdata, sp, fp);
960 		break;
961 	case ELS_RRQ:
962 		els_data.fp = fp;
963 		lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
964 		break;
965 	case ELS_REC:
966 		els_data.fp = fp;
967 		lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
968 		break;
969 	default:
970 		fc_frame_free(fp);	/* can't happen */
971 		break;
972 	}
973 
974 	mutex_unlock(&rdata->rp_mutex);
975 	return;
976 
977 reject:
978 	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
979 	fc_frame_free(fp);
980 }
981 
982 /**
983  * fc_rport_recv_req() - Handle a received ELS request from a rport
984  * @sp: current sequence in the PLOGI exchange
985  * @fp: response frame
986  * @lport: Fibre Channel local port
987  *
988  * Locking Note: Called with the lport lock held.
989  */
990 void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
991 		       struct fc_lport *lport)
992 {
993 	struct fc_seq_els_data els_data;
994 
995 	/*
996 	 * Handle PLOGI and LOGO requests separately, since they
997 	 * don't require prior login.
998 	 * Check for unsupported opcodes first and reject them.
999 	 * For some ops, it would be incorrect to reject with "PLOGI required".
1000 	 */
1001 	switch (fc_frame_payload_op(fp)) {
1002 	case ELS_PLOGI:
1003 		fc_rport_recv_plogi_req(lport, sp, fp);
1004 		break;
1005 	case ELS_LOGO:
1006 		fc_rport_recv_logo_req(lport, sp, fp);
1007 		break;
1008 	case ELS_PRLI:
1009 	case ELS_PRLO:
1010 	case ELS_RRQ:
1011 	case ELS_REC:
1012 		fc_rport_recv_els_req(lport, sp, fp);
1013 		break;
1014 	default:
1015 		fc_frame_free(fp);
1016 		els_data.fp = NULL;
1017 		els_data.reason = ELS_RJT_UNSUP;
1018 		els_data.explan = ELS_EXPL_NONE;
1019 		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
1020 		break;
1021 	}
1022 }
1023 
1024 /**
1025  * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
1026  * @lport: local port
1027  * @sp: current sequence in the PLOGI exchange
1028  * @fp: PLOGI request frame
1029  *
1030  * Locking Note: The rport lock is held before calling this function.
1031  */
1032 static void fc_rport_recv_plogi_req(struct fc_lport *lport,
1033 				    struct fc_seq *sp, struct fc_frame *rx_fp)
1034 {
1035 	struct fc_disc *disc;
1036 	struct fc_rport_priv *rdata;
1037 	struct fc_frame *fp = rx_fp;
1038 	struct fc_exch *ep;
1039 	struct fc_frame_header *fh;
1040 	struct fc_els_flogi *pl;
1041 	struct fc_seq_els_data rjt_data;
1042 	u32 sid, f_ctl;
1043 
1044 	rjt_data.fp = NULL;
1045 	fh = fc_frame_header_get(fp);
1046 	sid = ntoh24(fh->fh_s_id);
1047 
1048 	FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
1049 
1050 	pl = fc_frame_payload_get(fp, sizeof(*pl));
1051 	if (!pl) {
1052 		FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n");
1053 		rjt_data.reason = ELS_RJT_PROT;
1054 		rjt_data.explan = ELS_EXPL_INV_LEN;
1055 		goto reject;
1056 	}
1057 
1058 	disc = &lport->disc;
1059 	mutex_lock(&disc->disc_mutex);
1060 	rdata = lport->tt.rport_create(lport, sid);
1061 	if (!rdata) {
1062 		mutex_unlock(&disc->disc_mutex);
1063 		rjt_data.reason = ELS_RJT_UNAB;
1064 		rjt_data.explan = ELS_EXPL_INSUF_RES;
1065 		goto reject;
1066 	}
1067 
1068 	mutex_lock(&rdata->rp_mutex);
1069 	mutex_unlock(&disc->disc_mutex);
1070 
1071 	rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn);
1072 	rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn);
1073 
1074 	/*
1075 	 * If the rport was just created, possibly due to the incoming PLOGI,
1076 	 * set the state appropriately and accept the PLOGI.
1077 	 *
1078 	 * If we had also sent a PLOGI, and if the received PLOGI is from a
1079 	 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
1080 	 * "command already in progress".
1081 	 *
1082 	 * XXX TBD: If the session was ready before, the PLOGI should result in
1083 	 * all outstanding exchanges being reset.
1084 	 */
1085 	switch (rdata->rp_state) {
1086 	case RPORT_ST_INIT:
1087 		FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n");
1088 		break;
1089 	case RPORT_ST_PLOGI:
1090 		FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n");
1091 		if (rdata->ids.port_name < lport->wwpn) {
1092 			mutex_unlock(&rdata->rp_mutex);
1093 			rjt_data.reason = ELS_RJT_INPROG;
1094 			rjt_data.explan = ELS_EXPL_NONE;
1095 			goto reject;
1096 		}
1097 		break;
1098 	case RPORT_ST_PRLI:
1099 	case RPORT_ST_READY:
1100 		break;
1101 	case RPORT_ST_DELETE:
1102 	default:
1103 		FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n",
1104 			     rdata->rp_state);
1105 		fc_frame_free(rx_fp);
1106 		goto out;
1107 	}
1108 
1109 	/*
1110 	 * Get session payload size from incoming PLOGI.
1111 	 */
1112 	rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs);
1113 	fc_frame_free(rx_fp);
1114 
1115 	/*
1116 	 * Send LS_ACC.	 If this fails, the originator should retry.
1117 	 */
1118 	sp = lport->tt.seq_start_next(sp);
1119 	if (!sp)
1120 		goto out;
1121 	fp = fc_frame_alloc(lport, sizeof(*pl));
1122 	if (!fp)
1123 		goto out;
1124 
1125 	fc_plogi_fill(lport, fp, ELS_LS_ACC);
1126 	f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1127 	ep = fc_seq_exch(sp);
1128 	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1129 		       FC_TYPE_ELS, f_ctl, 0);
1130 	lport->tt.seq_send(lport, sp, fp);
1131 	fc_rport_enter_prli(rdata);
1132 out:
1133 	mutex_unlock(&rdata->rp_mutex);
1134 	return;
1135 
1136 reject:
1137 	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1138 	fc_frame_free(fp);
1139 }
1140 
1141 /**
1142  * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
1143  * @rdata: private remote port data
1144  * @sp: current sequence in the PRLI exchange
1145  * @fp: PRLI request frame
1146  *
1147  * Locking Note: The rport lock is exected to be held before calling
1148  * this function.
1149  */
1150 static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1151 				   struct fc_seq *sp, struct fc_frame *rx_fp)
1152 {
1153 	struct fc_lport *lport = rdata->local_port;
1154 	struct fc_exch *ep;
1155 	struct fc_frame *fp;
1156 	struct fc_frame_header *fh;
1157 	struct {
1158 		struct fc_els_prli prli;
1159 		struct fc_els_spp spp;
1160 	} *pp;
1161 	struct fc_els_spp *rspp;	/* request service param page */
1162 	struct fc_els_spp *spp;	/* response spp */
1163 	unsigned int len;
1164 	unsigned int plen;
1165 	enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1166 	enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1167 	enum fc_els_spp_resp resp;
1168 	struct fc_seq_els_data rjt_data;
1169 	u32 f_ctl;
1170 	u32 fcp_parm;
1171 	u32 roles = FC_RPORT_ROLE_UNKNOWN;
1172 	rjt_data.fp = NULL;
1173 
1174 	fh = fc_frame_header_get(rx_fp);
1175 
1176 	FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
1177 		     fc_rport_state(rdata));
1178 
1179 	switch (rdata->rp_state) {
1180 	case RPORT_ST_PRLI:
1181 	case RPORT_ST_RTV:
1182 	case RPORT_ST_READY:
1183 		reason = ELS_RJT_NONE;
1184 		break;
1185 	default:
1186 		fc_frame_free(rx_fp);
1187 		return;
1188 		break;
1189 	}
1190 	len = fr_len(rx_fp) - sizeof(*fh);
1191 	pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1192 	if (pp == NULL) {
1193 		reason = ELS_RJT_PROT;
1194 		explan = ELS_EXPL_INV_LEN;
1195 	} else {
1196 		plen = ntohs(pp->prli.prli_len);
1197 		if ((plen % 4) != 0 || plen > len) {
1198 			reason = ELS_RJT_PROT;
1199 			explan = ELS_EXPL_INV_LEN;
1200 		} else if (plen < len) {
1201 			len = plen;
1202 		}
1203 		plen = pp->prli.prli_spp_len;
1204 		if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1205 		    plen > len || len < sizeof(*pp)) {
1206 			reason = ELS_RJT_PROT;
1207 			explan = ELS_EXPL_INV_LEN;
1208 		}
1209 		rspp = &pp->spp;
1210 	}
1211 	if (reason != ELS_RJT_NONE ||
1212 	    (fp = fc_frame_alloc(lport, len)) == NULL) {
1213 		rjt_data.reason = reason;
1214 		rjt_data.explan = explan;
1215 		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1216 	} else {
1217 		sp = lport->tt.seq_start_next(sp);
1218 		WARN_ON(!sp);
1219 		pp = fc_frame_payload_get(fp, len);
1220 		WARN_ON(!pp);
1221 		memset(pp, 0, len);
1222 		pp->prli.prli_cmd = ELS_LS_ACC;
1223 		pp->prli.prli_spp_len = plen;
1224 		pp->prli.prli_len = htons(len);
1225 		len -= sizeof(struct fc_els_prli);
1226 
1227 		/* reinitialize remote port roles */
1228 		rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
1229 
1230 		/*
1231 		 * Go through all the service parameter pages and build
1232 		 * response.  If plen indicates longer SPP than standard,
1233 		 * use that.  The entire response has been pre-cleared above.
1234 		 */
1235 		spp = &pp->spp;
1236 		while (len >= plen) {
1237 			spp->spp_type = rspp->spp_type;
1238 			spp->spp_type_ext = rspp->spp_type_ext;
1239 			spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1240 			resp = FC_SPP_RESP_ACK;
1241 			if (rspp->spp_flags & FC_SPP_RPA_VAL)
1242 				resp = FC_SPP_RESP_NO_PA;
1243 			switch (rspp->spp_type) {
1244 			case 0:	/* common to all FC-4 types */
1245 				break;
1246 			case FC_TYPE_FCP:
1247 				fcp_parm = ntohl(rspp->spp_params);
1248 				if (fcp_parm * FCP_SPPF_RETRY)
1249 					rdata->flags |= FC_RP_FLAGS_RETRY;
1250 				rdata->supported_classes = FC_COS_CLASS3;
1251 				if (fcp_parm & FCP_SPPF_INIT_FCN)
1252 					roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1253 				if (fcp_parm & FCP_SPPF_TARG_FCN)
1254 					roles |= FC_RPORT_ROLE_FCP_TARGET;
1255 				rdata->ids.roles = roles;
1256 
1257 				spp->spp_params =
1258 					htonl(lport->service_params);
1259 				break;
1260 			default:
1261 				resp = FC_SPP_RESP_INVL;
1262 				break;
1263 			}
1264 			spp->spp_flags |= resp;
1265 			len -= plen;
1266 			rspp = (struct fc_els_spp *)((char *)rspp + plen);
1267 			spp = (struct fc_els_spp *)((char *)spp + plen);
1268 		}
1269 
1270 		/*
1271 		 * Send LS_ACC.	 If this fails, the originator should retry.
1272 		 */
1273 		f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1274 		f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1275 		ep = fc_seq_exch(sp);
1276 		fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1277 			       FC_TYPE_ELS, f_ctl, 0);
1278 		lport->tt.seq_send(lport, sp, fp);
1279 
1280 		/*
1281 		 * Get lock and re-check state.
1282 		 */
1283 		switch (rdata->rp_state) {
1284 		case RPORT_ST_PRLI:
1285 			fc_rport_enter_ready(rdata);
1286 			break;
1287 		case RPORT_ST_READY:
1288 			break;
1289 		default:
1290 			break;
1291 		}
1292 	}
1293 	fc_frame_free(rx_fp);
1294 }
1295 
1296 /**
1297  * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
1298  * @rdata: private remote port data
1299  * @sp: current sequence in the PRLO exchange
1300  * @fp: PRLO request frame
1301  *
1302  * Locking Note: The rport lock is exected to be held before calling
1303  * this function.
1304  */
1305 static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1306 				   struct fc_seq *sp,
1307 				   struct fc_frame *fp)
1308 {
1309 	struct fc_lport *lport = rdata->local_port;
1310 
1311 	struct fc_frame_header *fh;
1312 	struct fc_seq_els_data rjt_data;
1313 
1314 	fh = fc_frame_header_get(fp);
1315 
1316 	FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
1317 		     fc_rport_state(rdata));
1318 
1319 	rjt_data.fp = NULL;
1320 	rjt_data.reason = ELS_RJT_UNAB;
1321 	rjt_data.explan = ELS_EXPL_NONE;
1322 	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1323 	fc_frame_free(fp);
1324 }
1325 
1326 /**
1327  * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
1328  * @lport: local port.
1329  * @sp: current sequence in the LOGO exchange
1330  * @fp: LOGO request frame
1331  *
1332  * Locking Note: The rport lock is exected to be held before calling
1333  * this function.
1334  */
1335 static void fc_rport_recv_logo_req(struct fc_lport *lport,
1336 				   struct fc_seq *sp,
1337 				   struct fc_frame *fp)
1338 {
1339 	struct fc_frame_header *fh;
1340 	struct fc_rport_priv *rdata;
1341 	u32 sid;
1342 
1343 	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1344 
1345 	fh = fc_frame_header_get(fp);
1346 	sid = ntoh24(fh->fh_s_id);
1347 
1348 	mutex_lock(&lport->disc.disc_mutex);
1349 	rdata = lport->tt.rport_lookup(lport, sid);
1350 	if (rdata) {
1351 		mutex_lock(&rdata->rp_mutex);
1352 		FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1353 			     fc_rport_state(rdata));
1354 
1355 		/*
1356 		 * If the remote port was created due to discovery,
1357 		 * log back in.  It may have seen a stale RSCN about us.
1358 		 */
1359 		if (rdata->rp_state != RPORT_ST_DELETE && rdata->disc_id)
1360 			fc_rport_enter_plogi(rdata);
1361 		else
1362 			fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
1363 		mutex_unlock(&rdata->rp_mutex);
1364 	} else
1365 		FC_RPORT_ID_DBG(lport, sid,
1366 				"Received LOGO from non-logged-in port\n");
1367 	mutex_unlock(&lport->disc.disc_mutex);
1368 	fc_frame_free(fp);
1369 }
1370 
1371 static void fc_rport_flush_queue(void)
1372 {
1373 	flush_workqueue(rport_event_queue);
1374 }
1375 
1376 int fc_rport_init(struct fc_lport *lport)
1377 {
1378 	if (!lport->tt.rport_lookup)
1379 		lport->tt.rport_lookup = fc_rport_lookup;
1380 
1381 	if (!lport->tt.rport_create)
1382 		lport->tt.rport_create = fc_rport_create;
1383 
1384 	if (!lport->tt.rport_login)
1385 		lport->tt.rport_login = fc_rport_login;
1386 
1387 	if (!lport->tt.rport_logoff)
1388 		lport->tt.rport_logoff = fc_rport_logoff;
1389 
1390 	if (!lport->tt.rport_recv_req)
1391 		lport->tt.rport_recv_req = fc_rport_recv_req;
1392 
1393 	if (!lport->tt.rport_flush_queue)
1394 		lport->tt.rport_flush_queue = fc_rport_flush_queue;
1395 
1396 	if (!lport->tt.rport_destroy)
1397 		lport->tt.rport_destroy = fc_rport_destroy;
1398 
1399 	return 0;
1400 }
1401 EXPORT_SYMBOL(fc_rport_init);
1402 
1403 int fc_setup_rport(void)
1404 {
1405 	rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1406 	if (!rport_event_queue)
1407 		return -ENOMEM;
1408 	return 0;
1409 }
1410 EXPORT_SYMBOL(fc_setup_rport);
1411 
1412 void fc_destroy_rport(void)
1413 {
1414 	destroy_workqueue(rport_event_queue);
1415 }
1416 EXPORT_SYMBOL(fc_destroy_rport);
1417 
1418 void fc_rport_terminate_io(struct fc_rport *rport)
1419 {
1420 	struct fc_rport_libfc_priv *rp = rport->dd_data;
1421 	struct fc_lport *lport = rp->local_port;
1422 
1423 	lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1424 	lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
1425 }
1426 EXPORT_SYMBOL(fc_rport_terminate_io);
1427