xref: /openbmc/linux/drivers/scsi/libfc/fc_rport.c (revision 370c3bd0)
1 /*
2  * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19 
20 /*
21  * RPORT GENERAL INFO
22  *
23  * This file contains all processing regarding fc_rports. It contains the
24  * rport state machine and does all rport interaction with the transport class.
25  * There should be no other places in libfc that interact directly with the
26  * transport class in regards to adding and deleting rports.
27  *
28  * fc_rport's represent N_Port's within the fabric.
29  */
30 
31 /*
32  * RPORT LOCKING
33  *
34  * The rport should never hold the rport mutex and then attempt to acquire
35  * either the lport or disc mutexes. The rport's mutex is considered lesser
36  * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37  * more comments on the heirarchy.
38  *
39  * The locking strategy is similar to the lport's strategy. The lock protects
40  * the rport's states and is held and released by the entry points to the rport
41  * block. All _enter_* functions correspond to rport states and expect the rport
42  * mutex to be locked before calling them. This means that rports only handle
43  * one request or response at a time, since they're not critical for the I/O
44  * path this potential over-use of the mutex is acceptable.
45  */
46 
47 #include <linux/kernel.h>
48 #include <linux/spinlock.h>
49 #include <linux/interrupt.h>
50 #include <linux/rcupdate.h>
51 #include <linux/timer.h>
52 #include <linux/workqueue.h>
53 #include <asm/unaligned.h>
54 
55 #include <scsi/libfc.h>
56 #include <scsi/fc_encode.h>
57 
58 struct workqueue_struct *rport_event_queue;
59 
60 static void fc_rport_enter_plogi(struct fc_rport_priv *);
61 static void fc_rport_enter_prli(struct fc_rport_priv *);
62 static void fc_rport_enter_rtv(struct fc_rport_priv *);
63 static void fc_rport_enter_ready(struct fc_rport_priv *);
64 static void fc_rport_enter_logo(struct fc_rport_priv *);
65 static void fc_rport_enter_adisc(struct fc_rport_priv *);
66 
67 static void fc_rport_recv_plogi_req(struct fc_lport *,
68 				    struct fc_seq *, struct fc_frame *);
69 static void fc_rport_recv_prli_req(struct fc_rport_priv *,
70 				   struct fc_seq *, struct fc_frame *);
71 static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
72 				   struct fc_seq *, struct fc_frame *);
73 static void fc_rport_recv_logo_req(struct fc_lport *,
74 				   struct fc_seq *, struct fc_frame *);
75 static void fc_rport_timeout(struct work_struct *);
76 static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
77 static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
78 static void fc_rport_work(struct work_struct *);
79 
80 static const char *fc_rport_state_names[] = {
81 	[RPORT_ST_INIT] = "Init",
82 	[RPORT_ST_PLOGI] = "PLOGI",
83 	[RPORT_ST_PRLI] = "PRLI",
84 	[RPORT_ST_RTV] = "RTV",
85 	[RPORT_ST_READY] = "Ready",
86 	[RPORT_ST_LOGO] = "LOGO",
87 	[RPORT_ST_ADISC] = "ADISC",
88 	[RPORT_ST_DELETE] = "Delete",
89 };
90 
91 /**
92  * fc_rport_lookup() - lookup a remote port by port_id
93  * @lport: Fibre Channel host port instance
94  * @port_id: remote port port_id to match
95  */
96 static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
97 					     u32 port_id)
98 {
99 	struct fc_rport_priv *rdata;
100 
101 	list_for_each_entry(rdata, &lport->disc.rports, peers)
102 		if (rdata->ids.port_id == port_id &&
103 		    rdata->rp_state != RPORT_ST_DELETE)
104 			return rdata;
105 	return NULL;
106 }
107 
108 /**
109  * fc_rport_create() - Create a new remote port
110  * @lport:   The local port that the new remote port is for
111  * @port_id: The port ID for the new remote port
112  *
113  * Locking note:  must be called with the disc_mutex held.
114  */
115 static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
116 					     u32 port_id)
117 {
118 	struct fc_rport_priv *rdata;
119 
120 	rdata = lport->tt.rport_lookup(lport, port_id);
121 	if (rdata)
122 		return rdata;
123 
124 	rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
125 	if (!rdata)
126 		return NULL;
127 
128 	rdata->ids.node_name = -1;
129 	rdata->ids.port_name = -1;
130 	rdata->ids.port_id = port_id;
131 	rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
132 
133 	kref_init(&rdata->kref);
134 	mutex_init(&rdata->rp_mutex);
135 	rdata->local_port = lport;
136 	rdata->rp_state = RPORT_ST_INIT;
137 	rdata->event = RPORT_EV_NONE;
138 	rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
139 	rdata->e_d_tov = lport->e_d_tov;
140 	rdata->r_a_tov = lport->r_a_tov;
141 	rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
142 	INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
143 	INIT_WORK(&rdata->event_work, fc_rport_work);
144 	if (port_id != FC_FID_DIR_SERV)
145 		list_add(&rdata->peers, &lport->disc.rports);
146 	return rdata;
147 }
148 
149 /**
150  * fc_rport_destroy() - free a remote port after last reference is released.
151  * @kref: pointer to kref inside struct fc_rport_priv
152  */
153 static void fc_rport_destroy(struct kref *kref)
154 {
155 	struct fc_rport_priv *rdata;
156 
157 	rdata = container_of(kref, struct fc_rport_priv, kref);
158 	kfree(rdata);
159 }
160 
161 /**
162  * fc_rport_state() - return a string for the state the rport is in
163  * @rdata: remote port private data
164  */
165 static const char *fc_rport_state(struct fc_rport_priv *rdata)
166 {
167 	const char *cp;
168 
169 	cp = fc_rport_state_names[rdata->rp_state];
170 	if (!cp)
171 		cp = "Unknown";
172 	return cp;
173 }
174 
175 /**
176  * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
177  * @rport: Pointer to Fibre Channel remote port structure
178  * @timeout: timeout in seconds
179  */
180 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
181 {
182 	if (timeout)
183 		rport->dev_loss_tmo = timeout + 5;
184 	else
185 		rport->dev_loss_tmo = 30;
186 }
187 EXPORT_SYMBOL(fc_set_rport_loss_tmo);
188 
189 /**
190  * fc_plogi_get_maxframe() - Get max payload from the common service parameters
191  * @flp: FLOGI payload structure
192  * @maxval: upper limit, may be less than what is in the service parameters
193  */
194 static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
195 					  unsigned int maxval)
196 {
197 	unsigned int mfs;
198 
199 	/*
200 	 * Get max payload from the common service parameters and the
201 	 * class 3 receive data field size.
202 	 */
203 	mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
204 	if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
205 		maxval = mfs;
206 	mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
207 	if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
208 		maxval = mfs;
209 	return maxval;
210 }
211 
212 /**
213  * fc_rport_state_enter() - Change the rport's state
214  * @rdata: The rport whose state should change
215  * @new: The new state of the rport
216  *
217  * Locking Note: Called with the rport lock held
218  */
219 static void fc_rport_state_enter(struct fc_rport_priv *rdata,
220 				 enum fc_rport_state new)
221 {
222 	if (rdata->rp_state != new)
223 		rdata->retries = 0;
224 	rdata->rp_state = new;
225 }
226 
227 static void fc_rport_work(struct work_struct *work)
228 {
229 	u32 port_id;
230 	struct fc_rport_priv *rdata =
231 		container_of(work, struct fc_rport_priv, event_work);
232 	struct fc_rport_libfc_priv *rp;
233 	enum fc_rport_event event;
234 	struct fc_lport *lport = rdata->local_port;
235 	struct fc_rport_operations *rport_ops;
236 	struct fc_rport_identifiers ids;
237 	struct fc_rport *rport;
238 
239 	mutex_lock(&rdata->rp_mutex);
240 	event = rdata->event;
241 	rport_ops = rdata->ops;
242 	rport = rdata->rport;
243 
244 	FC_RPORT_DBG(rdata, "work event %u\n", event);
245 
246 	switch (event) {
247 	case RPORT_EV_READY:
248 		ids = rdata->ids;
249 		rdata->event = RPORT_EV_NONE;
250 		kref_get(&rdata->kref);
251 		mutex_unlock(&rdata->rp_mutex);
252 
253 		if (!rport)
254 			rport = fc_remote_port_add(lport->host, 0, &ids);
255 		if (!rport) {
256 			FC_RPORT_DBG(rdata, "Failed to add the rport\n");
257 			lport->tt.rport_logoff(rdata);
258 			kref_put(&rdata->kref, lport->tt.rport_destroy);
259 			return;
260 		}
261 		mutex_lock(&rdata->rp_mutex);
262 		if (rdata->rport)
263 			FC_RPORT_DBG(rdata, "rport already allocated\n");
264 		rdata->rport = rport;
265 		rport->maxframe_size = rdata->maxframe_size;
266 		rport->supported_classes = rdata->supported_classes;
267 
268 		rp = rport->dd_data;
269 		rp->local_port = lport;
270 		rp->rp_state = rdata->rp_state;
271 		rp->flags = rdata->flags;
272 		rp->e_d_tov = rdata->e_d_tov;
273 		rp->r_a_tov = rdata->r_a_tov;
274 		mutex_unlock(&rdata->rp_mutex);
275 
276 		if (rport_ops && rport_ops->event_callback) {
277 			FC_RPORT_DBG(rdata, "callback ev %d\n", event);
278 			rport_ops->event_callback(lport, rdata, event);
279 		}
280 		kref_put(&rdata->kref, lport->tt.rport_destroy);
281 		break;
282 
283 	case RPORT_EV_FAILED:
284 	case RPORT_EV_LOGO:
285 	case RPORT_EV_STOP:
286 		port_id = rdata->ids.port_id;
287 		mutex_unlock(&rdata->rp_mutex);
288 
289 		if (port_id != FC_FID_DIR_SERV) {
290 			mutex_lock(&lport->disc.disc_mutex);
291 			list_del(&rdata->peers);
292 			mutex_unlock(&lport->disc.disc_mutex);
293 		}
294 
295 		if (rport_ops && rport_ops->event_callback) {
296 			FC_RPORT_DBG(rdata, "callback ev %d\n", event);
297 			rport_ops->event_callback(lport, rdata, event);
298 		}
299 		cancel_delayed_work_sync(&rdata->retry_work);
300 
301 		/*
302 		 * Reset any outstanding exchanges before freeing rport.
303 		 */
304 		lport->tt.exch_mgr_reset(lport, 0, port_id);
305 		lport->tt.exch_mgr_reset(lport, port_id, 0);
306 
307 		if (rport) {
308 			rp = rport->dd_data;
309 			rp->rp_state = RPORT_ST_DELETE;
310 			mutex_lock(&rdata->rp_mutex);
311 			rdata->rport = NULL;
312 			mutex_unlock(&rdata->rp_mutex);
313 			fc_remote_port_delete(rport);
314 		}
315 		kref_put(&rdata->kref, lport->tt.rport_destroy);
316 		break;
317 
318 	default:
319 		mutex_unlock(&rdata->rp_mutex);
320 		break;
321 	}
322 }
323 
324 /**
325  * fc_rport_login() - Start the remote port login state machine
326  * @rdata: private remote port
327  *
328  * Locking Note: Called without the rport lock held. This
329  * function will hold the rport lock, call an _enter_*
330  * function and then unlock the rport.
331  *
332  * This indicates the intent to be logged into the remote port.
333  * If it appears we are already logged in, ADISC is used to verify
334  * the setup.
335  */
336 int fc_rport_login(struct fc_rport_priv *rdata)
337 {
338 	mutex_lock(&rdata->rp_mutex);
339 
340 	switch (rdata->rp_state) {
341 	case RPORT_ST_READY:
342 		FC_RPORT_DBG(rdata, "ADISC port\n");
343 		fc_rport_enter_adisc(rdata);
344 		break;
345 	default:
346 		FC_RPORT_DBG(rdata, "Login to port\n");
347 		fc_rport_enter_plogi(rdata);
348 		break;
349 	}
350 	mutex_unlock(&rdata->rp_mutex);
351 
352 	return 0;
353 }
354 
355 /**
356  * fc_rport_enter_delete() - schedule a remote port to be deleted.
357  * @rdata: private remote port
358  * @event: event to report as the reason for deletion
359  *
360  * Locking Note: Called with the rport lock held.
361  *
362  * Allow state change into DELETE only once.
363  *
364  * Call queue_work only if there's no event already pending.
365  * Set the new event so that the old pending event will not occur.
366  * Since we have the mutex, even if fc_rport_work() is already started,
367  * it'll see the new event.
368  */
369 static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
370 				  enum fc_rport_event event)
371 {
372 	if (rdata->rp_state == RPORT_ST_DELETE)
373 		return;
374 
375 	FC_RPORT_DBG(rdata, "Delete port\n");
376 
377 	fc_rport_state_enter(rdata, RPORT_ST_DELETE);
378 
379 	if (rdata->event == RPORT_EV_NONE)
380 		queue_work(rport_event_queue, &rdata->event_work);
381 	rdata->event = event;
382 }
383 
384 /**
385  * fc_rport_logoff() - Logoff and remove an rport
386  * @rdata: private remote port
387  *
388  * Locking Note: Called without the rport lock held. This
389  * function will hold the rport lock, call an _enter_*
390  * function and then unlock the rport.
391  */
392 int fc_rport_logoff(struct fc_rport_priv *rdata)
393 {
394 	mutex_lock(&rdata->rp_mutex);
395 
396 	FC_RPORT_DBG(rdata, "Remove port\n");
397 
398 	if (rdata->rp_state == RPORT_ST_DELETE) {
399 		FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
400 		mutex_unlock(&rdata->rp_mutex);
401 		goto out;
402 	}
403 
404 	fc_rport_enter_logo(rdata);
405 
406 	/*
407 	 * Change the state to Delete so that we discard
408 	 * the response.
409 	 */
410 	fc_rport_enter_delete(rdata, RPORT_EV_STOP);
411 	mutex_unlock(&rdata->rp_mutex);
412 
413 out:
414 	return 0;
415 }
416 
417 /**
418  * fc_rport_enter_ready() - The rport is ready
419  * @rdata: private remote port
420  *
421  * Locking Note: The rport lock is expected to be held before calling
422  * this routine.
423  */
424 static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
425 {
426 	fc_rport_state_enter(rdata, RPORT_ST_READY);
427 
428 	FC_RPORT_DBG(rdata, "Port is Ready\n");
429 
430 	if (rdata->event == RPORT_EV_NONE)
431 		queue_work(rport_event_queue, &rdata->event_work);
432 	rdata->event = RPORT_EV_READY;
433 }
434 
435 /**
436  * fc_rport_timeout() - Handler for the retry_work timer.
437  * @work: The work struct of the fc_rport_priv
438  *
439  * Locking Note: Called without the rport lock held. This
440  * function will hold the rport lock, call an _enter_*
441  * function and then unlock the rport.
442  */
443 static void fc_rport_timeout(struct work_struct *work)
444 {
445 	struct fc_rport_priv *rdata =
446 		container_of(work, struct fc_rport_priv, retry_work.work);
447 
448 	mutex_lock(&rdata->rp_mutex);
449 
450 	switch (rdata->rp_state) {
451 	case RPORT_ST_PLOGI:
452 		fc_rport_enter_plogi(rdata);
453 		break;
454 	case RPORT_ST_PRLI:
455 		fc_rport_enter_prli(rdata);
456 		break;
457 	case RPORT_ST_RTV:
458 		fc_rport_enter_rtv(rdata);
459 		break;
460 	case RPORT_ST_LOGO:
461 		fc_rport_enter_logo(rdata);
462 		break;
463 	case RPORT_ST_ADISC:
464 		fc_rport_enter_adisc(rdata);
465 		break;
466 	case RPORT_ST_READY:
467 	case RPORT_ST_INIT:
468 	case RPORT_ST_DELETE:
469 		break;
470 	}
471 
472 	mutex_unlock(&rdata->rp_mutex);
473 }
474 
475 /**
476  * fc_rport_error() - Error handler, called once retries have been exhausted
477  * @rdata: private remote port
478  * @fp: The frame pointer
479  *
480  * Locking Note: The rport lock is expected to be held before
481  * calling this routine
482  */
483 static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
484 {
485 	FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
486 		     IS_ERR(fp) ? -PTR_ERR(fp) : 0,
487 		     fc_rport_state(rdata), rdata->retries);
488 
489 	switch (rdata->rp_state) {
490 	case RPORT_ST_PLOGI:
491 	case RPORT_ST_LOGO:
492 		fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
493 		break;
494 	case RPORT_ST_RTV:
495 		fc_rport_enter_ready(rdata);
496 		break;
497 	case RPORT_ST_PRLI:
498 	case RPORT_ST_ADISC:
499 		fc_rport_enter_logo(rdata);
500 		break;
501 	case RPORT_ST_DELETE:
502 	case RPORT_ST_READY:
503 	case RPORT_ST_INIT:
504 		break;
505 	}
506 }
507 
508 /**
509  * fc_rport_error_retry() - Error handler when retries are desired
510  * @rdata: private remote port data
511  * @fp: The frame pointer
512  *
513  * If the error was an exchange timeout retry immediately,
514  * otherwise wait for E_D_TOV.
515  *
516  * Locking Note: The rport lock is expected to be held before
517  * calling this routine
518  */
519 static void fc_rport_error_retry(struct fc_rport_priv *rdata,
520 				 struct fc_frame *fp)
521 {
522 	unsigned long delay = FC_DEF_E_D_TOV;
523 
524 	/* make sure this isn't an FC_EX_CLOSED error, never retry those */
525 	if (PTR_ERR(fp) == -FC_EX_CLOSED)
526 		return fc_rport_error(rdata, fp);
527 
528 	if (rdata->retries < rdata->local_port->max_rport_retry_count) {
529 		FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
530 			     PTR_ERR(fp), fc_rport_state(rdata));
531 		rdata->retries++;
532 		/* no additional delay on exchange timeouts */
533 		if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
534 			delay = 0;
535 		schedule_delayed_work(&rdata->retry_work, delay);
536 		return;
537 	}
538 
539 	return fc_rport_error(rdata, fp);
540 }
541 
542 /**
543  * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
544  * @sp: current sequence in the PLOGI exchange
545  * @fp: response frame
546  * @rdata_arg: private remote port data
547  *
548  * Locking Note: This function will be called without the rport lock
549  * held, but it will lock, call an _enter_* function or fc_rport_error
550  * and then unlock the rport.
551  */
552 static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
553 				void *rdata_arg)
554 {
555 	struct fc_rport_priv *rdata = rdata_arg;
556 	struct fc_lport *lport = rdata->local_port;
557 	struct fc_els_flogi *plp = NULL;
558 	unsigned int tov;
559 	u16 csp_seq;
560 	u16 cssp_seq;
561 	u8 op;
562 
563 	mutex_lock(&rdata->rp_mutex);
564 
565 	FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
566 
567 	if (rdata->rp_state != RPORT_ST_PLOGI) {
568 		FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
569 			     "%s\n", fc_rport_state(rdata));
570 		if (IS_ERR(fp))
571 			goto err;
572 		goto out;
573 	}
574 
575 	if (IS_ERR(fp)) {
576 		fc_rport_error_retry(rdata, fp);
577 		goto err;
578 	}
579 
580 	op = fc_frame_payload_op(fp);
581 	if (op == ELS_LS_ACC &&
582 	    (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
583 		rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
584 		rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
585 
586 		tov = ntohl(plp->fl_csp.sp_e_d_tov);
587 		if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
588 			tov /= 1000;
589 		if (tov > rdata->e_d_tov)
590 			rdata->e_d_tov = tov;
591 		csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
592 		cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
593 		if (cssp_seq < csp_seq)
594 			csp_seq = cssp_seq;
595 		rdata->max_seq = csp_seq;
596 		rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
597 		fc_rport_enter_prli(rdata);
598 	} else
599 		fc_rport_error_retry(rdata, fp);
600 
601 out:
602 	fc_frame_free(fp);
603 err:
604 	mutex_unlock(&rdata->rp_mutex);
605 	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
606 }
607 
608 /**
609  * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
610  * @rdata: private remote port data
611  *
612  * Locking Note: The rport lock is expected to be held before calling
613  * this routine.
614  */
615 static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
616 {
617 	struct fc_lport *lport = rdata->local_port;
618 	struct fc_frame *fp;
619 
620 	FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
621 		     fc_rport_state(rdata));
622 
623 	fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
624 
625 	rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
626 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
627 	if (!fp) {
628 		fc_rport_error_retry(rdata, fp);
629 		return;
630 	}
631 	rdata->e_d_tov = lport->e_d_tov;
632 
633 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
634 				  fc_rport_plogi_resp, rdata, lport->e_d_tov))
635 		fc_rport_error_retry(rdata, fp);
636 	else
637 		kref_get(&rdata->kref);
638 }
639 
640 /**
641  * fc_rport_prli_resp() - Process Login (PRLI) response handler
642  * @sp: current sequence in the PRLI exchange
643  * @fp: response frame
644  * @rdata_arg: private remote port data
645  *
646  * Locking Note: This function will be called without the rport lock
647  * held, but it will lock, call an _enter_* function or fc_rport_error
648  * and then unlock the rport.
649  */
650 static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
651 			       void *rdata_arg)
652 {
653 	struct fc_rport_priv *rdata = rdata_arg;
654 	struct {
655 		struct fc_els_prli prli;
656 		struct fc_els_spp spp;
657 	} *pp;
658 	u32 roles = FC_RPORT_ROLE_UNKNOWN;
659 	u32 fcp_parm = 0;
660 	u8 op;
661 
662 	mutex_lock(&rdata->rp_mutex);
663 
664 	FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
665 
666 	if (rdata->rp_state != RPORT_ST_PRLI) {
667 		FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
668 			     "%s\n", fc_rport_state(rdata));
669 		if (IS_ERR(fp))
670 			goto err;
671 		goto out;
672 	}
673 
674 	if (IS_ERR(fp)) {
675 		fc_rport_error_retry(rdata, fp);
676 		goto err;
677 	}
678 
679 	/* reinitialize remote port roles */
680 	rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
681 
682 	op = fc_frame_payload_op(fp);
683 	if (op == ELS_LS_ACC) {
684 		pp = fc_frame_payload_get(fp, sizeof(*pp));
685 		if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
686 			fcp_parm = ntohl(pp->spp.spp_params);
687 			if (fcp_parm & FCP_SPPF_RETRY)
688 				rdata->flags |= FC_RP_FLAGS_RETRY;
689 		}
690 
691 		rdata->supported_classes = FC_COS_CLASS3;
692 		if (fcp_parm & FCP_SPPF_INIT_FCN)
693 			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
694 		if (fcp_parm & FCP_SPPF_TARG_FCN)
695 			roles |= FC_RPORT_ROLE_FCP_TARGET;
696 
697 		rdata->ids.roles = roles;
698 		fc_rport_enter_rtv(rdata);
699 
700 	} else {
701 		FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
702 		fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
703 	}
704 
705 out:
706 	fc_frame_free(fp);
707 err:
708 	mutex_unlock(&rdata->rp_mutex);
709 	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
710 }
711 
712 /**
713  * fc_rport_logo_resp() - Logout (LOGO) response handler
714  * @sp: current sequence in the LOGO exchange
715  * @fp: response frame
716  * @rdata_arg: private remote port data
717  *
718  * Locking Note: This function will be called without the rport lock
719  * held, but it will lock, call an _enter_* function or fc_rport_error
720  * and then unlock the rport.
721  */
722 static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
723 			       void *rdata_arg)
724 {
725 	struct fc_rport_priv *rdata = rdata_arg;
726 	u8 op;
727 
728 	mutex_lock(&rdata->rp_mutex);
729 
730 	FC_RPORT_DBG(rdata, "Received a LOGO %s\n", fc_els_resp_type(fp));
731 
732 	if (rdata->rp_state != RPORT_ST_LOGO) {
733 		FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
734 			     "%s\n", fc_rport_state(rdata));
735 		if (IS_ERR(fp))
736 			goto err;
737 		goto out;
738 	}
739 
740 	if (IS_ERR(fp)) {
741 		fc_rport_error_retry(rdata, fp);
742 		goto err;
743 	}
744 
745 	op = fc_frame_payload_op(fp);
746 	if (op != ELS_LS_ACC)
747 		FC_RPORT_DBG(rdata, "Bad ELS response op %x for LOGO command\n",
748 			     op);
749 	fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
750 
751 out:
752 	fc_frame_free(fp);
753 err:
754 	mutex_unlock(&rdata->rp_mutex);
755 	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
756 }
757 
758 /**
759  * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
760  * @rdata: private remote port data
761  *
762  * Locking Note: The rport lock is expected to be held before calling
763  * this routine.
764  */
765 static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
766 {
767 	struct fc_lport *lport = rdata->local_port;
768 	struct {
769 		struct fc_els_prli prli;
770 		struct fc_els_spp spp;
771 	} *pp;
772 	struct fc_frame *fp;
773 
774 	/*
775 	 * If the rport is one of the well known addresses
776 	 * we skip PRLI and RTV and go straight to READY.
777 	 */
778 	if (rdata->ids.port_id >= FC_FID_DOM_MGR) {
779 		fc_rport_enter_ready(rdata);
780 		return;
781 	}
782 
783 	FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
784 		     fc_rport_state(rdata));
785 
786 	fc_rport_state_enter(rdata, RPORT_ST_PRLI);
787 
788 	fp = fc_frame_alloc(lport, sizeof(*pp));
789 	if (!fp) {
790 		fc_rport_error_retry(rdata, fp);
791 		return;
792 	}
793 
794 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
795 				  fc_rport_prli_resp, rdata, lport->e_d_tov))
796 		fc_rport_error_retry(rdata, fp);
797 	else
798 		kref_get(&rdata->kref);
799 }
800 
801 /**
802  * fc_rport_els_rtv_resp() - Request Timeout Value response handler
803  * @sp: current sequence in the RTV exchange
804  * @fp: response frame
805  * @rdata_arg: private remote port data
806  *
807  * Many targets don't seem to support this.
808  *
809  * Locking Note: This function will be called without the rport lock
810  * held, but it will lock, call an _enter_* function or fc_rport_error
811  * and then unlock the rport.
812  */
813 static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
814 			      void *rdata_arg)
815 {
816 	struct fc_rport_priv *rdata = rdata_arg;
817 	u8 op;
818 
819 	mutex_lock(&rdata->rp_mutex);
820 
821 	FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
822 
823 	if (rdata->rp_state != RPORT_ST_RTV) {
824 		FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
825 			     "%s\n", fc_rport_state(rdata));
826 		if (IS_ERR(fp))
827 			goto err;
828 		goto out;
829 	}
830 
831 	if (IS_ERR(fp)) {
832 		fc_rport_error(rdata, fp);
833 		goto err;
834 	}
835 
836 	op = fc_frame_payload_op(fp);
837 	if (op == ELS_LS_ACC) {
838 		struct fc_els_rtv_acc *rtv;
839 		u32 toq;
840 		u32 tov;
841 
842 		rtv = fc_frame_payload_get(fp, sizeof(*rtv));
843 		if (rtv) {
844 			toq = ntohl(rtv->rtv_toq);
845 			tov = ntohl(rtv->rtv_r_a_tov);
846 			if (tov == 0)
847 				tov = 1;
848 			rdata->r_a_tov = tov;
849 			tov = ntohl(rtv->rtv_e_d_tov);
850 			if (toq & FC_ELS_RTV_EDRES)
851 				tov /= 1000000;
852 			if (tov == 0)
853 				tov = 1;
854 			rdata->e_d_tov = tov;
855 		}
856 	}
857 
858 	fc_rport_enter_ready(rdata);
859 
860 out:
861 	fc_frame_free(fp);
862 err:
863 	mutex_unlock(&rdata->rp_mutex);
864 	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
865 }
866 
867 /**
868  * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
869  * @rdata: private remote port data
870  *
871  * Locking Note: The rport lock is expected to be held before calling
872  * this routine.
873  */
874 static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
875 {
876 	struct fc_frame *fp;
877 	struct fc_lport *lport = rdata->local_port;
878 
879 	FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
880 		     fc_rport_state(rdata));
881 
882 	fc_rport_state_enter(rdata, RPORT_ST_RTV);
883 
884 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
885 	if (!fp) {
886 		fc_rport_error_retry(rdata, fp);
887 		return;
888 	}
889 
890 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
891 				     fc_rport_rtv_resp, rdata, lport->e_d_tov))
892 		fc_rport_error_retry(rdata, fp);
893 	else
894 		kref_get(&rdata->kref);
895 }
896 
897 /**
898  * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
899  * @rdata: private remote port data
900  *
901  * Locking Note: The rport lock is expected to be held before calling
902  * this routine.
903  */
904 static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
905 {
906 	struct fc_lport *lport = rdata->local_port;
907 	struct fc_frame *fp;
908 
909 	FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n",
910 		     fc_rport_state(rdata));
911 
912 	fc_rport_state_enter(rdata, RPORT_ST_LOGO);
913 
914 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
915 	if (!fp) {
916 		fc_rport_error_retry(rdata, fp);
917 		return;
918 	}
919 
920 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
921 				  fc_rport_logo_resp, rdata, lport->e_d_tov))
922 		fc_rport_error_retry(rdata, fp);
923 	else
924 		kref_get(&rdata->kref);
925 }
926 
927 /**
928  * fc_rport_els_adisc_resp() - Address Discovery response handler
929  * @sp: current sequence in the ADISC exchange
930  * @fp: response frame
931  * @rdata_arg: remote port private.
932  *
933  * Locking Note: This function will be called without the rport lock
934  * held, but it will lock, call an _enter_* function or fc_rport_error
935  * and then unlock the rport.
936  */
937 static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
938 			      void *rdata_arg)
939 {
940 	struct fc_rport_priv *rdata = rdata_arg;
941 	struct fc_els_adisc *adisc;
942 	u8 op;
943 
944 	mutex_lock(&rdata->rp_mutex);
945 
946 	FC_RPORT_DBG(rdata, "Received a ADISC response\n");
947 
948 	if (rdata->rp_state != RPORT_ST_ADISC) {
949 		FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
950 			     fc_rport_state(rdata));
951 		if (IS_ERR(fp))
952 			goto err;
953 		goto out;
954 	}
955 
956 	if (IS_ERR(fp)) {
957 		fc_rport_error(rdata, fp);
958 		goto err;
959 	}
960 
961 	/*
962 	 * If address verification failed.  Consider us logged out of the rport.
963 	 * Since the rport is still in discovery, we want to be
964 	 * logged in, so go to PLOGI state.  Otherwise, go back to READY.
965 	 */
966 	op = fc_frame_payload_op(fp);
967 	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
968 	if (op != ELS_LS_ACC || !adisc ||
969 	    ntoh24(adisc->adisc_port_id) != rdata->ids.port_id ||
970 	    get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name ||
971 	    get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) {
972 		FC_RPORT_DBG(rdata, "ADISC error or mismatch\n");
973 		fc_rport_enter_plogi(rdata);
974 	} else {
975 		FC_RPORT_DBG(rdata, "ADISC OK\n");
976 		fc_rport_enter_ready(rdata);
977 	}
978 out:
979 	fc_frame_free(fp);
980 err:
981 	mutex_unlock(&rdata->rp_mutex);
982 	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
983 }
984 
985 /**
986  * fc_rport_enter_adisc() - Send Address Discover (ADISC) request to peer
987  * @rdata: remote port private data
988  *
989  * Locking Note: The rport lock is expected to be held before calling
990  * this routine.
991  */
992 static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
993 {
994 	struct fc_lport *lport = rdata->local_port;
995 	struct fc_frame *fp;
996 
997 	FC_RPORT_DBG(rdata, "sending ADISC from %s state\n",
998 		     fc_rport_state(rdata));
999 
1000 	fc_rport_state_enter(rdata, RPORT_ST_ADISC);
1001 
1002 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
1003 	if (!fp) {
1004 		fc_rport_error_retry(rdata, fp);
1005 		return;
1006 	}
1007 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
1008 				  fc_rport_adisc_resp, rdata, lport->e_d_tov))
1009 		fc_rport_error_retry(rdata, fp);
1010 	else
1011 		kref_get(&rdata->kref);
1012 }
1013 
1014 /**
1015  * fc_rport_recv_els_req() - handle a validated ELS request.
1016  * @lport: Fibre Channel local port
1017  * @sp: current sequence in the PLOGI exchange
1018  * @fp: response frame
1019  *
1020  * Handle incoming ELS requests that require port login.
1021  * The ELS opcode has already been validated by the caller.
1022  *
1023  * Locking Note: Called with the lport lock held.
1024  */
1025 static void fc_rport_recv_els_req(struct fc_lport *lport,
1026 				  struct fc_seq *sp, struct fc_frame *fp)
1027 {
1028 	struct fc_rport_priv *rdata;
1029 	struct fc_frame_header *fh;
1030 	struct fc_seq_els_data els_data;
1031 
1032 	els_data.fp = NULL;
1033 	els_data.reason = ELS_RJT_UNAB;
1034 	els_data.explan = ELS_EXPL_PLOGI_REQD;
1035 
1036 	fh = fc_frame_header_get(fp);
1037 
1038 	mutex_lock(&lport->disc.disc_mutex);
1039 	rdata = lport->tt.rport_lookup(lport, ntoh24(fh->fh_s_id));
1040 	if (!rdata) {
1041 		mutex_unlock(&lport->disc.disc_mutex);
1042 		goto reject;
1043 	}
1044 	mutex_lock(&rdata->rp_mutex);
1045 	mutex_unlock(&lport->disc.disc_mutex);
1046 
1047 	switch (rdata->rp_state) {
1048 	case RPORT_ST_PRLI:
1049 	case RPORT_ST_RTV:
1050 	case RPORT_ST_READY:
1051 	case RPORT_ST_ADISC:
1052 		break;
1053 	default:
1054 		mutex_unlock(&rdata->rp_mutex);
1055 		goto reject;
1056 	}
1057 
1058 	switch (fc_frame_payload_op(fp)) {
1059 	case ELS_PRLI:
1060 		fc_rport_recv_prli_req(rdata, sp, fp);
1061 		break;
1062 	case ELS_PRLO:
1063 		fc_rport_recv_prlo_req(rdata, sp, fp);
1064 		break;
1065 	case ELS_RRQ:
1066 		els_data.fp = fp;
1067 		lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
1068 		break;
1069 	case ELS_REC:
1070 		els_data.fp = fp;
1071 		lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
1072 		break;
1073 	default:
1074 		fc_frame_free(fp);	/* can't happen */
1075 		break;
1076 	}
1077 
1078 	mutex_unlock(&rdata->rp_mutex);
1079 	return;
1080 
1081 reject:
1082 	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
1083 	fc_frame_free(fp);
1084 }
1085 
1086 /**
1087  * fc_rport_recv_req() - Handle a received ELS request from a rport
1088  * @sp: current sequence in the PLOGI exchange
1089  * @fp: response frame
1090  * @lport: Fibre Channel local port
1091  *
1092  * Locking Note: Called with the lport lock held.
1093  */
1094 void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
1095 		       struct fc_lport *lport)
1096 {
1097 	struct fc_seq_els_data els_data;
1098 
1099 	/*
1100 	 * Handle PLOGI and LOGO requests separately, since they
1101 	 * don't require prior login.
1102 	 * Check for unsupported opcodes first and reject them.
1103 	 * For some ops, it would be incorrect to reject with "PLOGI required".
1104 	 */
1105 	switch (fc_frame_payload_op(fp)) {
1106 	case ELS_PLOGI:
1107 		fc_rport_recv_plogi_req(lport, sp, fp);
1108 		break;
1109 	case ELS_LOGO:
1110 		fc_rport_recv_logo_req(lport, sp, fp);
1111 		break;
1112 	case ELS_PRLI:
1113 	case ELS_PRLO:
1114 	case ELS_RRQ:
1115 	case ELS_REC:
1116 		fc_rport_recv_els_req(lport, sp, fp);
1117 		break;
1118 	default:
1119 		fc_frame_free(fp);
1120 		els_data.fp = NULL;
1121 		els_data.reason = ELS_RJT_UNSUP;
1122 		els_data.explan = ELS_EXPL_NONE;
1123 		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
1124 		break;
1125 	}
1126 }
1127 
1128 /**
1129  * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
1130  * @lport: local port
1131  * @sp: current sequence in the PLOGI exchange
1132  * @fp: PLOGI request frame
1133  *
1134  * Locking Note: The rport lock is held before calling this function.
1135  */
1136 static void fc_rport_recv_plogi_req(struct fc_lport *lport,
1137 				    struct fc_seq *sp, struct fc_frame *rx_fp)
1138 {
1139 	struct fc_disc *disc;
1140 	struct fc_rport_priv *rdata;
1141 	struct fc_frame *fp = rx_fp;
1142 	struct fc_exch *ep;
1143 	struct fc_frame_header *fh;
1144 	struct fc_els_flogi *pl;
1145 	struct fc_seq_els_data rjt_data;
1146 	u32 sid, f_ctl;
1147 
1148 	rjt_data.fp = NULL;
1149 	fh = fc_frame_header_get(fp);
1150 	sid = ntoh24(fh->fh_s_id);
1151 
1152 	FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
1153 
1154 	pl = fc_frame_payload_get(fp, sizeof(*pl));
1155 	if (!pl) {
1156 		FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n");
1157 		rjt_data.reason = ELS_RJT_PROT;
1158 		rjt_data.explan = ELS_EXPL_INV_LEN;
1159 		goto reject;
1160 	}
1161 
1162 	disc = &lport->disc;
1163 	mutex_lock(&disc->disc_mutex);
1164 	rdata = lport->tt.rport_create(lport, sid);
1165 	if (!rdata) {
1166 		mutex_unlock(&disc->disc_mutex);
1167 		rjt_data.reason = ELS_RJT_UNAB;
1168 		rjt_data.explan = ELS_EXPL_INSUF_RES;
1169 		goto reject;
1170 	}
1171 
1172 	mutex_lock(&rdata->rp_mutex);
1173 	mutex_unlock(&disc->disc_mutex);
1174 
1175 	rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn);
1176 	rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn);
1177 
1178 	/*
1179 	 * If the rport was just created, possibly due to the incoming PLOGI,
1180 	 * set the state appropriately and accept the PLOGI.
1181 	 *
1182 	 * If we had also sent a PLOGI, and if the received PLOGI is from a
1183 	 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
1184 	 * "command already in progress".
1185 	 *
1186 	 * XXX TBD: If the session was ready before, the PLOGI should result in
1187 	 * all outstanding exchanges being reset.
1188 	 */
1189 	switch (rdata->rp_state) {
1190 	case RPORT_ST_INIT:
1191 		FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n");
1192 		break;
1193 	case RPORT_ST_PLOGI:
1194 		FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n");
1195 		if (rdata->ids.port_name < lport->wwpn) {
1196 			mutex_unlock(&rdata->rp_mutex);
1197 			rjt_data.reason = ELS_RJT_INPROG;
1198 			rjt_data.explan = ELS_EXPL_NONE;
1199 			goto reject;
1200 		}
1201 		break;
1202 	case RPORT_ST_PRLI:
1203 	case RPORT_ST_READY:
1204 	case RPORT_ST_ADISC:
1205 		FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
1206 			     "- ignored for now\n", rdata->rp_state);
1207 		/* XXX TBD - should reset */
1208 		break;
1209 	case RPORT_ST_DELETE:
1210 	default:
1211 		FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n",
1212 			     rdata->rp_state);
1213 		fc_frame_free(rx_fp);
1214 		goto out;
1215 	}
1216 
1217 	/*
1218 	 * Get session payload size from incoming PLOGI.
1219 	 */
1220 	rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs);
1221 	fc_frame_free(rx_fp);
1222 
1223 	/*
1224 	 * Send LS_ACC.	 If this fails, the originator should retry.
1225 	 */
1226 	sp = lport->tt.seq_start_next(sp);
1227 	if (!sp)
1228 		goto out;
1229 	fp = fc_frame_alloc(lport, sizeof(*pl));
1230 	if (!fp)
1231 		goto out;
1232 
1233 	fc_plogi_fill(lport, fp, ELS_LS_ACC);
1234 	f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1235 	ep = fc_seq_exch(sp);
1236 	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1237 		       FC_TYPE_ELS, f_ctl, 0);
1238 	lport->tt.seq_send(lport, sp, fp);
1239 	fc_rport_enter_prli(rdata);
1240 out:
1241 	mutex_unlock(&rdata->rp_mutex);
1242 	return;
1243 
1244 reject:
1245 	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1246 	fc_frame_free(fp);
1247 }
1248 
1249 /**
1250  * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
1251  * @rdata: private remote port data
1252  * @sp: current sequence in the PRLI exchange
1253  * @fp: PRLI request frame
1254  *
1255  * Locking Note: The rport lock is exected to be held before calling
1256  * this function.
1257  */
1258 static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1259 				   struct fc_seq *sp, struct fc_frame *rx_fp)
1260 {
1261 	struct fc_lport *lport = rdata->local_port;
1262 	struct fc_exch *ep;
1263 	struct fc_frame *fp;
1264 	struct fc_frame_header *fh;
1265 	struct {
1266 		struct fc_els_prli prli;
1267 		struct fc_els_spp spp;
1268 	} *pp;
1269 	struct fc_els_spp *rspp;	/* request service param page */
1270 	struct fc_els_spp *spp;	/* response spp */
1271 	unsigned int len;
1272 	unsigned int plen;
1273 	enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1274 	enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1275 	enum fc_els_spp_resp resp;
1276 	struct fc_seq_els_data rjt_data;
1277 	u32 f_ctl;
1278 	u32 fcp_parm;
1279 	u32 roles = FC_RPORT_ROLE_UNKNOWN;
1280 	rjt_data.fp = NULL;
1281 
1282 	fh = fc_frame_header_get(rx_fp);
1283 
1284 	FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
1285 		     fc_rport_state(rdata));
1286 
1287 	switch (rdata->rp_state) {
1288 	case RPORT_ST_PRLI:
1289 	case RPORT_ST_RTV:
1290 	case RPORT_ST_READY:
1291 	case RPORT_ST_ADISC:
1292 		reason = ELS_RJT_NONE;
1293 		break;
1294 	default:
1295 		fc_frame_free(rx_fp);
1296 		return;
1297 		break;
1298 	}
1299 	len = fr_len(rx_fp) - sizeof(*fh);
1300 	pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1301 	if (pp == NULL) {
1302 		reason = ELS_RJT_PROT;
1303 		explan = ELS_EXPL_INV_LEN;
1304 	} else {
1305 		plen = ntohs(pp->prli.prli_len);
1306 		if ((plen % 4) != 0 || plen > len) {
1307 			reason = ELS_RJT_PROT;
1308 			explan = ELS_EXPL_INV_LEN;
1309 		} else if (plen < len) {
1310 			len = plen;
1311 		}
1312 		plen = pp->prli.prli_spp_len;
1313 		if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1314 		    plen > len || len < sizeof(*pp)) {
1315 			reason = ELS_RJT_PROT;
1316 			explan = ELS_EXPL_INV_LEN;
1317 		}
1318 		rspp = &pp->spp;
1319 	}
1320 	if (reason != ELS_RJT_NONE ||
1321 	    (fp = fc_frame_alloc(lport, len)) == NULL) {
1322 		rjt_data.reason = reason;
1323 		rjt_data.explan = explan;
1324 		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1325 	} else {
1326 		sp = lport->tt.seq_start_next(sp);
1327 		WARN_ON(!sp);
1328 		pp = fc_frame_payload_get(fp, len);
1329 		WARN_ON(!pp);
1330 		memset(pp, 0, len);
1331 		pp->prli.prli_cmd = ELS_LS_ACC;
1332 		pp->prli.prli_spp_len = plen;
1333 		pp->prli.prli_len = htons(len);
1334 		len -= sizeof(struct fc_els_prli);
1335 
1336 		/* reinitialize remote port roles */
1337 		rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
1338 
1339 		/*
1340 		 * Go through all the service parameter pages and build
1341 		 * response.  If plen indicates longer SPP than standard,
1342 		 * use that.  The entire response has been pre-cleared above.
1343 		 */
1344 		spp = &pp->spp;
1345 		while (len >= plen) {
1346 			spp->spp_type = rspp->spp_type;
1347 			spp->spp_type_ext = rspp->spp_type_ext;
1348 			spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1349 			resp = FC_SPP_RESP_ACK;
1350 			if (rspp->spp_flags & FC_SPP_RPA_VAL)
1351 				resp = FC_SPP_RESP_NO_PA;
1352 			switch (rspp->spp_type) {
1353 			case 0:	/* common to all FC-4 types */
1354 				break;
1355 			case FC_TYPE_FCP:
1356 				fcp_parm = ntohl(rspp->spp_params);
1357 				if (fcp_parm * FCP_SPPF_RETRY)
1358 					rdata->flags |= FC_RP_FLAGS_RETRY;
1359 				rdata->supported_classes = FC_COS_CLASS3;
1360 				if (fcp_parm & FCP_SPPF_INIT_FCN)
1361 					roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1362 				if (fcp_parm & FCP_SPPF_TARG_FCN)
1363 					roles |= FC_RPORT_ROLE_FCP_TARGET;
1364 				rdata->ids.roles = roles;
1365 
1366 				spp->spp_params =
1367 					htonl(lport->service_params);
1368 				break;
1369 			default:
1370 				resp = FC_SPP_RESP_INVL;
1371 				break;
1372 			}
1373 			spp->spp_flags |= resp;
1374 			len -= plen;
1375 			rspp = (struct fc_els_spp *)((char *)rspp + plen);
1376 			spp = (struct fc_els_spp *)((char *)spp + plen);
1377 		}
1378 
1379 		/*
1380 		 * Send LS_ACC.	 If this fails, the originator should retry.
1381 		 */
1382 		f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1383 		f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1384 		ep = fc_seq_exch(sp);
1385 		fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1386 			       FC_TYPE_ELS, f_ctl, 0);
1387 		lport->tt.seq_send(lport, sp, fp);
1388 
1389 		/*
1390 		 * Get lock and re-check state.
1391 		 */
1392 		switch (rdata->rp_state) {
1393 		case RPORT_ST_PRLI:
1394 			fc_rport_enter_ready(rdata);
1395 			break;
1396 		case RPORT_ST_READY:
1397 		case RPORT_ST_ADISC:
1398 			break;
1399 		default:
1400 			break;
1401 		}
1402 	}
1403 	fc_frame_free(rx_fp);
1404 }
1405 
1406 /**
1407  * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
1408  * @rdata: private remote port data
1409  * @sp: current sequence in the PRLO exchange
1410  * @fp: PRLO request frame
1411  *
1412  * Locking Note: The rport lock is exected to be held before calling
1413  * this function.
1414  */
1415 static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1416 				   struct fc_seq *sp,
1417 				   struct fc_frame *fp)
1418 {
1419 	struct fc_lport *lport = rdata->local_port;
1420 
1421 	struct fc_frame_header *fh;
1422 	struct fc_seq_els_data rjt_data;
1423 
1424 	fh = fc_frame_header_get(fp);
1425 
1426 	FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
1427 		     fc_rport_state(rdata));
1428 
1429 	rjt_data.fp = NULL;
1430 	rjt_data.reason = ELS_RJT_UNAB;
1431 	rjt_data.explan = ELS_EXPL_NONE;
1432 	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1433 	fc_frame_free(fp);
1434 }
1435 
1436 /**
1437  * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
1438  * @lport: local port.
1439  * @sp: current sequence in the LOGO exchange
1440  * @fp: LOGO request frame
1441  *
1442  * Locking Note: The rport lock is exected to be held before calling
1443  * this function.
1444  */
1445 static void fc_rport_recv_logo_req(struct fc_lport *lport,
1446 				   struct fc_seq *sp,
1447 				   struct fc_frame *fp)
1448 {
1449 	struct fc_frame_header *fh;
1450 	struct fc_rport_priv *rdata;
1451 	u32 sid;
1452 
1453 	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1454 
1455 	fh = fc_frame_header_get(fp);
1456 	sid = ntoh24(fh->fh_s_id);
1457 
1458 	mutex_lock(&lport->disc.disc_mutex);
1459 	rdata = lport->tt.rport_lookup(lport, sid);
1460 	if (rdata) {
1461 		mutex_lock(&rdata->rp_mutex);
1462 		FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1463 			     fc_rport_state(rdata));
1464 
1465 		/*
1466 		 * If the remote port was created due to discovery,
1467 		 * log back in.  It may have seen a stale RSCN about us.
1468 		 */
1469 		if (rdata->rp_state != RPORT_ST_DELETE && rdata->disc_id)
1470 			fc_rport_enter_plogi(rdata);
1471 		else
1472 			fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
1473 		mutex_unlock(&rdata->rp_mutex);
1474 	} else
1475 		FC_RPORT_ID_DBG(lport, sid,
1476 				"Received LOGO from non-logged-in port\n");
1477 	mutex_unlock(&lport->disc.disc_mutex);
1478 	fc_frame_free(fp);
1479 }
1480 
1481 static void fc_rport_flush_queue(void)
1482 {
1483 	flush_workqueue(rport_event_queue);
1484 }
1485 
1486 int fc_rport_init(struct fc_lport *lport)
1487 {
1488 	if (!lport->tt.rport_lookup)
1489 		lport->tt.rport_lookup = fc_rport_lookup;
1490 
1491 	if (!lport->tt.rport_create)
1492 		lport->tt.rport_create = fc_rport_create;
1493 
1494 	if (!lport->tt.rport_login)
1495 		lport->tt.rport_login = fc_rport_login;
1496 
1497 	if (!lport->tt.rport_logoff)
1498 		lport->tt.rport_logoff = fc_rport_logoff;
1499 
1500 	if (!lport->tt.rport_recv_req)
1501 		lport->tt.rport_recv_req = fc_rport_recv_req;
1502 
1503 	if (!lport->tt.rport_flush_queue)
1504 		lport->tt.rport_flush_queue = fc_rport_flush_queue;
1505 
1506 	if (!lport->tt.rport_destroy)
1507 		lport->tt.rport_destroy = fc_rport_destroy;
1508 
1509 	return 0;
1510 }
1511 EXPORT_SYMBOL(fc_rport_init);
1512 
1513 int fc_setup_rport(void)
1514 {
1515 	rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1516 	if (!rport_event_queue)
1517 		return -ENOMEM;
1518 	return 0;
1519 }
1520 EXPORT_SYMBOL(fc_setup_rport);
1521 
1522 void fc_destroy_rport(void)
1523 {
1524 	destroy_workqueue(rport_event_queue);
1525 }
1526 EXPORT_SYMBOL(fc_destroy_rport);
1527 
1528 void fc_rport_terminate_io(struct fc_rport *rport)
1529 {
1530 	struct fc_rport_libfc_priv *rp = rport->dd_data;
1531 	struct fc_lport *lport = rp->local_port;
1532 
1533 	lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1534 	lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
1535 }
1536 EXPORT_SYMBOL(fc_rport_terminate_io);
1537