xref: /openbmc/linux/drivers/scsi/libfc/fc_lport.c (revision b5cbf083)
1 /*
2  * Copyright(c) 2007 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19 
20 /*
21  * PORT LOCKING NOTES
22  *
23  * These comments only apply to the 'port code' which consists of the lport,
24  * disc and rport blocks.
25  *
26  * MOTIVATION
27  *
28  * The lport, disc and rport blocks all have mutexes that are used to protect
29  * those objects. The main motivation for these locks is to prevent from
30  * having an lport reset just before we send a frame. In that scenario the
31  * lport's FID would get set to zero and then we'd send a frame with an
32  * invalid SID. We also need to ensure that states don't change unexpectedly
33  * while processing another state.
34  *
35  * HEIRARCHY
36  *
37  * The following heirarchy defines the locking rules. A greater lock
38  * may be held before acquiring a lesser lock, but a lesser lock should never
39  * be held while attempting to acquire a greater lock. Here is the heirarchy-
40  *
41  * lport > disc, lport > rport, disc > rport
42  *
43  * CALLBACKS
44  *
45  * The callbacks cause complications with this scheme. There is a callback
46  * from the rport (to either lport or disc) and a callback from disc
47  * (to the lport).
48  *
49  * As rports exit the rport state machine a callback is made to the owner of
50  * the rport to notify success or failure. Since the callback is likely to
51  * cause the lport or disc to grab its lock we cannot hold the rport lock
52  * while making the callback. To ensure that the rport is not free'd while
53  * processing the callback the rport callbacks are serialized through a
54  * single-threaded workqueue. An rport would never be free'd while in a
55  * callback handler becuase no other rport work in this queue can be executed
56  * at the same time.
57  *
58  * When discovery succeeds or fails a callback is made to the lport as
59  * notification. Currently, succesful discovery causes the lport to take no
60  * action. A failure will cause the lport to reset. There is likely a circular
61  * locking problem with this implementation.
62  */
63 
64 /*
65  * LPORT LOCKING
66  *
67  * The critical sections protected by the lport's mutex are quite broad and
68  * may be improved upon in the future. The lport code and its locking doesn't
69  * influence the I/O path, so excessive locking doesn't penalize I/O
70  * performance.
71  *
72  * The strategy is to lock whenever processing a request or response. Note
73  * that every _enter_* function corresponds to a state change. They generally
74  * change the lports state and then send a request out on the wire. We lock
75  * before calling any of these functions to protect that state change. This
76  * means that the entry points into the lport block manage the locks while
77  * the state machine can transition between states (i.e. _enter_* functions)
78  * while always staying protected.
79  *
80  * When handling responses we also hold the lport mutex broadly. When the
81  * lport receives the response frame it locks the mutex and then calls the
82  * appropriate handler for the particuar response. Generally a response will
83  * trigger a state change and so the lock must already be held.
84  *
85  * Retries also have to consider the locking. The retries occur from a work
86  * context and the work function will lock the lport and then retry the state
87  * (i.e. _enter_* function).
88  */
89 
90 #include <linux/timer.h>
91 #include <asm/unaligned.h>
92 
93 #include <scsi/fc/fc_gs.h>
94 
95 #include <scsi/libfc.h>
96 #include <scsi/fc_encode.h>
97 
98 /* Fabric IDs to use for point-to-point mode, chosen on whims. */
99 #define FC_LOCAL_PTP_FID_LO   0x010101
100 #define FC_LOCAL_PTP_FID_HI   0x010102
101 
102 #define	DNS_DELAY	      3 /* Discovery delay after RSCN (in seconds)*/
103 
104 static void fc_lport_error(struct fc_lport *, struct fc_frame *);
105 
106 static void fc_lport_enter_reset(struct fc_lport *);
107 static void fc_lport_enter_flogi(struct fc_lport *);
108 static void fc_lport_enter_dns(struct fc_lport *);
109 static void fc_lport_enter_rpn_id(struct fc_lport *);
110 static void fc_lport_enter_rft_id(struct fc_lport *);
111 static void fc_lport_enter_scr(struct fc_lport *);
112 static void fc_lport_enter_ready(struct fc_lport *);
113 static void fc_lport_enter_logo(struct fc_lport *);
114 
115 static const char *fc_lport_state_names[] = {
116 	[LPORT_ST_DISABLED] = "disabled",
117 	[LPORT_ST_FLOGI] =    "FLOGI",
118 	[LPORT_ST_DNS] =      "dNS",
119 	[LPORT_ST_RPN_ID] =   "RPN_ID",
120 	[LPORT_ST_RFT_ID] =   "RFT_ID",
121 	[LPORT_ST_SCR] =      "SCR",
122 	[LPORT_ST_READY] =    "Ready",
123 	[LPORT_ST_LOGO] =     "LOGO",
124 	[LPORT_ST_RESET] =    "reset",
125 };
126 
127 static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
128 {
129 	fc_frame_free(fp);
130 	return 0;
131 }
132 
133 /**
134  * fc_lport_rport_callback() - Event handler for rport events
135  * @lport: The lport which is receiving the event
136  * @rdata: private remote port data
137  * @event: The event that occured
138  *
139  * Locking Note: The rport lock should not be held when calling
140  *		 this function.
141  */
142 static void fc_lport_rport_callback(struct fc_lport *lport,
143 				    struct fc_rport_priv *rdata,
144 				    enum fc_rport_event event)
145 {
146 	FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
147 		     rdata->ids.port_id);
148 
149 	mutex_lock(&lport->lp_mutex);
150 	switch (event) {
151 	case RPORT_EV_READY:
152 		if (lport->state == LPORT_ST_DNS) {
153 			lport->dns_rp = rdata;
154 			fc_lport_enter_rpn_id(lport);
155 		} else {
156 			FC_LPORT_DBG(lport, "Received an READY event "
157 				     "on port (%6x) for the directory "
158 				     "server, but the lport is not "
159 				     "in the DNS state, it's in the "
160 				     "%d state", rdata->ids.port_id,
161 				     lport->state);
162 			lport->tt.rport_logoff(rdata);
163 		}
164 		break;
165 	case RPORT_EV_LOGO:
166 	case RPORT_EV_FAILED:
167 	case RPORT_EV_STOP:
168 		lport->dns_rp = NULL;
169 		break;
170 	case RPORT_EV_NONE:
171 		break;
172 	}
173 	mutex_unlock(&lport->lp_mutex);
174 }
175 
176 /**
177  * fc_lport_state() - Return a string which represents the lport's state
178  * @lport: The lport whose state is to converted to a string
179  */
180 static const char *fc_lport_state(struct fc_lport *lport)
181 {
182 	const char *cp;
183 
184 	cp = fc_lport_state_names[lport->state];
185 	if (!cp)
186 		cp = "unknown";
187 	return cp;
188 }
189 
190 /**
191  * fc_lport_ptp_setup() - Create an rport for point-to-point mode
192  * @lport: The lport to attach the ptp rport to
193  * @fid: The FID of the ptp rport
194  * @remote_wwpn: The WWPN of the ptp rport
195  * @remote_wwnn: The WWNN of the ptp rport
196  */
197 static void fc_lport_ptp_setup(struct fc_lport *lport,
198 			       u32 remote_fid, u64 remote_wwpn,
199 			       u64 remote_wwnn)
200 {
201 	struct fc_rport_identifiers ids;
202 
203 	ids.port_id = remote_fid;
204 	ids.port_name = remote_wwpn;
205 	ids.node_name = remote_wwnn;
206 	ids.roles = FC_RPORT_ROLE_UNKNOWN;
207 
208 	if (lport->ptp_rp) {
209 		lport->tt.rport_logoff(lport->ptp_rp);
210 		lport->ptp_rp = NULL;
211 	}
212 
213 	lport->ptp_rp = lport->tt.rport_create(lport, &ids);
214 
215 	lport->tt.rport_login(lport->ptp_rp);
216 
217 	fc_lport_enter_ready(lport);
218 }
219 
220 void fc_get_host_port_type(struct Scsi_Host *shost)
221 {
222 	/* TODO - currently just NPORT */
223 	fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
224 }
225 EXPORT_SYMBOL(fc_get_host_port_type);
226 
227 void fc_get_host_port_state(struct Scsi_Host *shost)
228 {
229 	struct fc_lport *lp = shost_priv(shost);
230 
231 	if (lp->link_up)
232 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
233 	else
234 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
235 }
236 EXPORT_SYMBOL(fc_get_host_port_state);
237 
238 void fc_get_host_speed(struct Scsi_Host *shost)
239 {
240 	struct fc_lport *lport = shost_priv(shost);
241 
242 	fc_host_speed(shost) = lport->link_speed;
243 }
244 EXPORT_SYMBOL(fc_get_host_speed);
245 
246 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
247 {
248 	struct fc_host_statistics *fcoe_stats;
249 	struct fc_lport *lp = shost_priv(shost);
250 	struct timespec v0, v1;
251 	unsigned int cpu;
252 
253 	fcoe_stats = &lp->host_stats;
254 	memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
255 
256 	jiffies_to_timespec(jiffies, &v0);
257 	jiffies_to_timespec(lp->boot_time, &v1);
258 	fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
259 
260 	for_each_possible_cpu(cpu) {
261 		struct fcoe_dev_stats *stats;
262 
263 		stats = per_cpu_ptr(lp->dev_stats, cpu);
264 
265 		fcoe_stats->tx_frames += stats->TxFrames;
266 		fcoe_stats->tx_words += stats->TxWords;
267 		fcoe_stats->rx_frames += stats->RxFrames;
268 		fcoe_stats->rx_words += stats->RxWords;
269 		fcoe_stats->error_frames += stats->ErrorFrames;
270 		fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
271 		fcoe_stats->fcp_input_requests += stats->InputRequests;
272 		fcoe_stats->fcp_output_requests += stats->OutputRequests;
273 		fcoe_stats->fcp_control_requests += stats->ControlRequests;
274 		fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
275 		fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
276 		fcoe_stats->link_failure_count += stats->LinkFailureCount;
277 	}
278 	fcoe_stats->lip_count = -1;
279 	fcoe_stats->nos_count = -1;
280 	fcoe_stats->loss_of_sync_count = -1;
281 	fcoe_stats->loss_of_signal_count = -1;
282 	fcoe_stats->prim_seq_protocol_err_count = -1;
283 	fcoe_stats->dumped_frames = -1;
284 	return fcoe_stats;
285 }
286 EXPORT_SYMBOL(fc_get_host_stats);
287 
288 /*
289  * Fill in FLOGI command for request.
290  */
291 static void
292 fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi,
293 		    unsigned int op)
294 {
295 	struct fc_els_csp *sp;
296 	struct fc_els_cssp *cp;
297 
298 	memset(flogi, 0, sizeof(*flogi));
299 	flogi->fl_cmd = (u8) op;
300 	put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
301 	put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
302 	sp = &flogi->fl_csp;
303 	sp->sp_hi_ver = 0x20;
304 	sp->sp_lo_ver = 0x20;
305 	sp->sp_bb_cred = htons(10);	/* this gets set by gateway */
306 	sp->sp_bb_data = htons((u16) lport->mfs);
307 	cp = &flogi->fl_cssp[3 - 1];	/* class 3 parameters */
308 	cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
309 	if (op != ELS_FLOGI) {
310 		sp->sp_features = htons(FC_SP_FT_CIRO);
311 		sp->sp_tot_seq = htons(255);	/* seq. we accept */
312 		sp->sp_rel_off = htons(0x1f);
313 		sp->sp_e_d_tov = htonl(lport->e_d_tov);
314 
315 		cp->cp_rdfs = htons((u16) lport->mfs);
316 		cp->cp_con_seq = htons(255);
317 		cp->cp_open_seq = 1;
318 	}
319 }
320 
321 /*
322  * Add a supported FC-4 type.
323  */
324 static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
325 {
326 	__be32 *mp;
327 
328 	mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
329 	*mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
330 }
331 
332 /**
333  * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
334  * @lport: Fibre Channel local port recieving the RLIR
335  * @sp: current sequence in the RLIR exchange
336  * @fp: RLIR request frame
337  *
338  * Locking Note: The lport lock is exected to be held before calling
339  * this function.
340  */
341 static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
342 				   struct fc_lport *lport)
343 {
344 	FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
345 		     fc_lport_state(lport));
346 
347 	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
348 	fc_frame_free(fp);
349 }
350 
351 /**
352  * fc_lport_recv_echo_req() - Handle received ECHO request
353  * @lport: Fibre Channel local port recieving the ECHO
354  * @sp: current sequence in the ECHO exchange
355  * @fp: ECHO request frame
356  *
357  * Locking Note: The lport lock is exected to be held before calling
358  * this function.
359  */
360 static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
361 				   struct fc_lport *lport)
362 {
363 	struct fc_frame *fp;
364 	struct fc_exch *ep = fc_seq_exch(sp);
365 	unsigned int len;
366 	void *pp;
367 	void *dp;
368 	u32 f_ctl;
369 
370 	FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
371 		     fc_lport_state(lport));
372 
373 	len = fr_len(in_fp) - sizeof(struct fc_frame_header);
374 	pp = fc_frame_payload_get(in_fp, len);
375 
376 	if (len < sizeof(__be32))
377 		len = sizeof(__be32);
378 
379 	fp = fc_frame_alloc(lport, len);
380 	if (fp) {
381 		dp = fc_frame_payload_get(fp, len);
382 		memcpy(dp, pp, len);
383 		*((u32 *)dp) = htonl(ELS_LS_ACC << 24);
384 		sp = lport->tt.seq_start_next(sp);
385 		f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
386 		fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
387 			       FC_TYPE_ELS, f_ctl, 0);
388 		lport->tt.seq_send(lport, sp, fp);
389 	}
390 	fc_frame_free(in_fp);
391 }
392 
393 /**
394  * fc_lport_recv_echo_req() - Handle received Request Node ID data request
395  * @lport: Fibre Channel local port recieving the RNID
396  * @sp: current sequence in the RNID exchange
397  * @fp: RNID request frame
398  *
399  * Locking Note: The lport lock is exected to be held before calling
400  * this function.
401  */
402 static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
403 				   struct fc_lport *lport)
404 {
405 	struct fc_frame *fp;
406 	struct fc_exch *ep = fc_seq_exch(sp);
407 	struct fc_els_rnid *req;
408 	struct {
409 		struct fc_els_rnid_resp rnid;
410 		struct fc_els_rnid_cid cid;
411 		struct fc_els_rnid_gen gen;
412 	} *rp;
413 	struct fc_seq_els_data rjt_data;
414 	u8 fmt;
415 	size_t len;
416 	u32 f_ctl;
417 
418 	FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
419 		     fc_lport_state(lport));
420 
421 	req = fc_frame_payload_get(in_fp, sizeof(*req));
422 	if (!req) {
423 		rjt_data.fp = NULL;
424 		rjt_data.reason = ELS_RJT_LOGIC;
425 		rjt_data.explan = ELS_EXPL_NONE;
426 		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
427 	} else {
428 		fmt = req->rnid_fmt;
429 		len = sizeof(*rp);
430 		if (fmt != ELS_RNIDF_GEN ||
431 		    ntohl(lport->rnid_gen.rnid_atype) == 0) {
432 			fmt = ELS_RNIDF_NONE;	/* nothing to provide */
433 			len -= sizeof(rp->gen);
434 		}
435 		fp = fc_frame_alloc(lport, len);
436 		if (fp) {
437 			rp = fc_frame_payload_get(fp, len);
438 			memset(rp, 0, len);
439 			rp->rnid.rnid_cmd = ELS_LS_ACC;
440 			rp->rnid.rnid_fmt = fmt;
441 			rp->rnid.rnid_cid_len = sizeof(rp->cid);
442 			rp->cid.rnid_wwpn = htonll(lport->wwpn);
443 			rp->cid.rnid_wwnn = htonll(lport->wwnn);
444 			if (fmt == ELS_RNIDF_GEN) {
445 				rp->rnid.rnid_sid_len = sizeof(rp->gen);
446 				memcpy(&rp->gen, &lport->rnid_gen,
447 				       sizeof(rp->gen));
448 			}
449 			sp = lport->tt.seq_start_next(sp);
450 			f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
451 			f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
452 			fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
453 				       FC_TYPE_ELS, f_ctl, 0);
454 			lport->tt.seq_send(lport, sp, fp);
455 		}
456 	}
457 	fc_frame_free(in_fp);
458 }
459 
460 /**
461  * fc_lport_recv_adisc_req() - Handle received Address Discovery Request
462  * @lport: Fibre Channel local port recieving the ADISC
463  * @sp: current sequence in the ADISC exchange
464  * @fp: ADISC request frame
465  *
466  * Locking Note: The lport lock is expected to be held before calling
467  * this function.
468  */
469 static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
470 				    struct fc_lport *lport)
471 {
472 	struct fc_frame *fp;
473 	struct fc_exch *ep = fc_seq_exch(sp);
474 	struct fc_els_adisc *req, *rp;
475 	struct fc_seq_els_data rjt_data;
476 	size_t len;
477 	u32 f_ctl;
478 
479 	FC_LPORT_DBG(lport, "Received ADISC request while in state %s\n",
480 		     fc_lport_state(lport));
481 
482 	req = fc_frame_payload_get(in_fp, sizeof(*req));
483 	if (!req) {
484 		rjt_data.fp = NULL;
485 		rjt_data.reason = ELS_RJT_LOGIC;
486 		rjt_data.explan = ELS_EXPL_NONE;
487 		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
488 	} else {
489 		len = sizeof(*rp);
490 		fp = fc_frame_alloc(lport, len);
491 		if (fp) {
492 			rp = fc_frame_payload_get(fp, len);
493 			memset(rp, 0, len);
494 			rp->adisc_cmd = ELS_LS_ACC;
495 			rp->adisc_wwpn = htonll(lport->wwpn);
496 			rp->adisc_wwnn = htonll(lport->wwnn);
497 			hton24(rp->adisc_port_id,
498 			       fc_host_port_id(lport->host));
499 			sp = lport->tt.seq_start_next(sp);
500 			f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
501 			f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
502 			fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
503 				       FC_TYPE_ELS, f_ctl, 0);
504 			lport->tt.seq_send(lport, sp, fp);
505 		}
506 	}
507 	fc_frame_free(in_fp);
508 }
509 
510 /**
511  * fc_lport_recv_logo_req() - Handle received fabric LOGO request
512  * @lport: Fibre Channel local port recieving the LOGO
513  * @sp: current sequence in the LOGO exchange
514  * @fp: LOGO request frame
515  *
516  * Locking Note: The lport lock is exected to be held before calling
517  * this function.
518  */
519 static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
520 				   struct fc_lport *lport)
521 {
522 	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
523 	fc_lport_enter_reset(lport);
524 	fc_frame_free(fp);
525 }
526 
527 /**
528  * fc_fabric_login() - Start the lport state machine
529  * @lport: The lport that should log into the fabric
530  *
531  * Locking Note: This function should not be called
532  *		 with the lport lock held.
533  */
534 int fc_fabric_login(struct fc_lport *lport)
535 {
536 	int rc = -1;
537 
538 	mutex_lock(&lport->lp_mutex);
539 	if (lport->state == LPORT_ST_DISABLED) {
540 		fc_lport_enter_reset(lport);
541 		rc = 0;
542 	}
543 	mutex_unlock(&lport->lp_mutex);
544 
545 	return rc;
546 }
547 EXPORT_SYMBOL(fc_fabric_login);
548 
549 /**
550  * fc_linkup() - Handler for transport linkup events
551  * @lport: The lport whose link is up
552  */
553 void fc_linkup(struct fc_lport *lport)
554 {
555 	printk(KERN_INFO "libfc: Link up on port (%6x)\n",
556 	       fc_host_port_id(lport->host));
557 
558 	mutex_lock(&lport->lp_mutex);
559 	if (!lport->link_up) {
560 		lport->link_up = 1;
561 
562 		if (lport->state == LPORT_ST_RESET)
563 			fc_lport_enter_flogi(lport);
564 	}
565 	mutex_unlock(&lport->lp_mutex);
566 }
567 EXPORT_SYMBOL(fc_linkup);
568 
569 /**
570  * fc_linkdown() - Handler for transport linkdown events
571  * @lport: The lport whose link is down
572  */
573 void fc_linkdown(struct fc_lport *lport)
574 {
575 	mutex_lock(&lport->lp_mutex);
576 	printk(KERN_INFO "libfc: Link down on port (%6x)\n",
577 	       fc_host_port_id(lport->host));
578 
579 	if (lport->link_up) {
580 		lport->link_up = 0;
581 		fc_lport_enter_reset(lport);
582 		lport->tt.fcp_cleanup(lport);
583 	}
584 	mutex_unlock(&lport->lp_mutex);
585 }
586 EXPORT_SYMBOL(fc_linkdown);
587 
588 /**
589  * fc_fabric_logoff() - Logout of the fabric
590  * @lport:	      fc_lport pointer to logoff the fabric
591  *
592  * Return value:
593  *	0 for success, -1 for failure
594  */
595 int fc_fabric_logoff(struct fc_lport *lport)
596 {
597 	lport->tt.disc_stop_final(lport);
598 	mutex_lock(&lport->lp_mutex);
599 	if (lport->dns_rp)
600 		lport->tt.rport_logoff(lport->dns_rp);
601 	mutex_unlock(&lport->lp_mutex);
602 	lport->tt.rport_flush_queue();
603 	mutex_lock(&lport->lp_mutex);
604 	fc_lport_enter_logo(lport);
605 	mutex_unlock(&lport->lp_mutex);
606 	cancel_delayed_work_sync(&lport->retry_work);
607 	return 0;
608 }
609 EXPORT_SYMBOL(fc_fabric_logoff);
610 
611 /**
612  * fc_lport_destroy() - unregister a fc_lport
613  * @lport:	      fc_lport pointer to unregister
614  *
615  * Return value:
616  *	None
617  * Note:
618  * exit routine for fc_lport instance
619  * clean-up all the allocated memory
620  * and free up other system resources.
621  *
622  */
623 int fc_lport_destroy(struct fc_lport *lport)
624 {
625 	mutex_lock(&lport->lp_mutex);
626 	lport->state = LPORT_ST_DISABLED;
627 	lport->link_up = 0;
628 	lport->tt.frame_send = fc_frame_drop;
629 	mutex_unlock(&lport->lp_mutex);
630 
631 	lport->tt.fcp_abort_io(lport);
632 	lport->tt.disc_stop_final(lport);
633 	lport->tt.exch_mgr_reset(lport, 0, 0);
634 	return 0;
635 }
636 EXPORT_SYMBOL(fc_lport_destroy);
637 
638 /**
639  * fc_set_mfs() - sets up the mfs for the corresponding fc_lport
640  * @lport: fc_lport pointer to unregister
641  * @mfs: the new mfs for fc_lport
642  *
643  * Set mfs for the given fc_lport to the new mfs.
644  *
645  * Return: 0 for success
646  */
647 int fc_set_mfs(struct fc_lport *lport, u32 mfs)
648 {
649 	unsigned int old_mfs;
650 	int rc = -EINVAL;
651 
652 	mutex_lock(&lport->lp_mutex);
653 
654 	old_mfs = lport->mfs;
655 
656 	if (mfs >= FC_MIN_MAX_FRAME) {
657 		mfs &= ~3;
658 		if (mfs > FC_MAX_FRAME)
659 			mfs = FC_MAX_FRAME;
660 		mfs -= sizeof(struct fc_frame_header);
661 		lport->mfs = mfs;
662 		rc = 0;
663 	}
664 
665 	if (!rc && mfs < old_mfs)
666 		fc_lport_enter_reset(lport);
667 
668 	mutex_unlock(&lport->lp_mutex);
669 
670 	return rc;
671 }
672 EXPORT_SYMBOL(fc_set_mfs);
673 
674 /**
675  * fc_lport_disc_callback() - Callback for discovery events
676  * @lport: FC local port
677  * @event: The discovery event
678  */
679 void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
680 {
681 	switch (event) {
682 	case DISC_EV_SUCCESS:
683 		FC_LPORT_DBG(lport, "Discovery succeeded\n");
684 		break;
685 	case DISC_EV_FAILED:
686 		printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n",
687 		       fc_host_port_id(lport->host));
688 		mutex_lock(&lport->lp_mutex);
689 		fc_lport_enter_reset(lport);
690 		mutex_unlock(&lport->lp_mutex);
691 		break;
692 	case DISC_EV_NONE:
693 		WARN_ON(1);
694 		break;
695 	}
696 }
697 
698 /**
699  * fc_rport_enter_ready() - Enter the ready state and start discovery
700  * @lport: Fibre Channel local port that is ready
701  *
702  * Locking Note: The lport lock is expected to be held before calling
703  * this routine.
704  */
705 static void fc_lport_enter_ready(struct fc_lport *lport)
706 {
707 	FC_LPORT_DBG(lport, "Entered READY from state %s\n",
708 		     fc_lport_state(lport));
709 
710 	fc_lport_state_enter(lport, LPORT_ST_READY);
711 
712 	lport->tt.disc_start(fc_lport_disc_callback, lport);
713 }
714 
715 /**
716  * fc_lport_recv_flogi_req() - Receive a FLOGI request
717  * @sp_in: The sequence the FLOGI is on
718  * @rx_fp: The frame the FLOGI is in
719  * @lport: The lport that recieved the request
720  *
721  * A received FLOGI request indicates a point-to-point connection.
722  * Accept it with the common service parameters indicating our N port.
723  * Set up to do a PLOGI if we have the higher-number WWPN.
724  *
725  * Locking Note: The lport lock is exected to be held before calling
726  * this function.
727  */
728 static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
729 				    struct fc_frame *rx_fp,
730 				    struct fc_lport *lport)
731 {
732 	struct fc_frame *fp;
733 	struct fc_frame_header *fh;
734 	struct fc_seq *sp;
735 	struct fc_exch *ep;
736 	struct fc_els_flogi *flp;
737 	struct fc_els_flogi *new_flp;
738 	u64 remote_wwpn;
739 	u32 remote_fid;
740 	u32 local_fid;
741 	u32 f_ctl;
742 
743 	FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
744 		     fc_lport_state(lport));
745 
746 	fh = fc_frame_header_get(rx_fp);
747 	remote_fid = ntoh24(fh->fh_s_id);
748 	flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
749 	if (!flp)
750 		goto out;
751 	remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
752 	if (remote_wwpn == lport->wwpn) {
753 		printk(KERN_WARNING "libfc: Received FLOGI from port "
754 		       "with same WWPN %llx\n", remote_wwpn);
755 		goto out;
756 	}
757 	FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);
758 
759 	/*
760 	 * XXX what is the right thing to do for FIDs?
761 	 * The originator might expect our S_ID to be 0xfffffe.
762 	 * But if so, both of us could end up with the same FID.
763 	 */
764 	local_fid = FC_LOCAL_PTP_FID_LO;
765 	if (remote_wwpn < lport->wwpn) {
766 		local_fid = FC_LOCAL_PTP_FID_HI;
767 		if (!remote_fid || remote_fid == local_fid)
768 			remote_fid = FC_LOCAL_PTP_FID_LO;
769 	} else if (!remote_fid) {
770 		remote_fid = FC_LOCAL_PTP_FID_HI;
771 	}
772 
773 	fc_host_port_id(lport->host) = local_fid;
774 
775 	fp = fc_frame_alloc(lport, sizeof(*flp));
776 	if (fp) {
777 		sp = lport->tt.seq_start_next(fr_seq(rx_fp));
778 		new_flp = fc_frame_payload_get(fp, sizeof(*flp));
779 		fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
780 		new_flp->fl_cmd = (u8) ELS_LS_ACC;
781 
782 		/*
783 		 * Send the response.  If this fails, the originator should
784 		 * repeat the sequence.
785 		 */
786 		f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
787 		ep = fc_seq_exch(sp);
788 		fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
789 			       FC_TYPE_ELS, f_ctl, 0);
790 		lport->tt.seq_send(lport, sp, fp);
791 
792 	} else {
793 		fc_lport_error(lport, fp);
794 	}
795 	fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
796 			   get_unaligned_be64(&flp->fl_wwnn));
797 
798 	lport->tt.disc_start(fc_lport_disc_callback, lport);
799 
800 out:
801 	sp = fr_seq(rx_fp);
802 	fc_frame_free(rx_fp);
803 }
804 
805 /**
806  * fc_lport_recv_req() - The generic lport request handler
807  * @lport: The lport that received the request
808  * @sp: The sequence the request is on
809  * @fp: The frame the request is in
810  *
811  * This function will see if the lport handles the request or
812  * if an rport should handle the request.
813  *
814  * Locking Note: This function should not be called with the lport
815  *		 lock held becuase it will grab the lock.
816  */
817 static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
818 			      struct fc_frame *fp)
819 {
820 	struct fc_frame_header *fh = fc_frame_header_get(fp);
821 	void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
822 	struct fc_rport_priv *rdata;
823 	u32 s_id;
824 	u32 d_id;
825 	struct fc_seq_els_data rjt_data;
826 
827 	mutex_lock(&lport->lp_mutex);
828 
829 	/*
830 	 * Handle special ELS cases like FLOGI, LOGO, and
831 	 * RSCN here.  These don't require a session.
832 	 * Even if we had a session, it might not be ready.
833 	 */
834 	if (!lport->link_up)
835 		fc_frame_free(fp);
836 	else if (fh->fh_type == FC_TYPE_ELS &&
837 		 fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
838 		/*
839 		 * Check opcode.
840 		 */
841 		recv = NULL;
842 		switch (fc_frame_payload_op(fp)) {
843 		case ELS_FLOGI:
844 			recv = fc_lport_recv_flogi_req;
845 			break;
846 		case ELS_LOGO:
847 			fh = fc_frame_header_get(fp);
848 			if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
849 				recv = fc_lport_recv_logo_req;
850 			break;
851 		case ELS_RSCN:
852 			recv = lport->tt.disc_recv_req;
853 			break;
854 		case ELS_ECHO:
855 			recv = fc_lport_recv_echo_req;
856 			break;
857 		case ELS_RLIR:
858 			recv = fc_lport_recv_rlir_req;
859 			break;
860 		case ELS_RNID:
861 			recv = fc_lport_recv_rnid_req;
862 			break;
863 		case ELS_ADISC:
864 			recv = fc_lport_recv_adisc_req;
865 			break;
866 		}
867 
868 		if (recv)
869 			recv(sp, fp, lport);
870 		else {
871 			/*
872 			 * Find session.
873 			 * If this is a new incoming PLOGI, we won't find it.
874 			 */
875 			s_id = ntoh24(fh->fh_s_id);
876 			d_id = ntoh24(fh->fh_d_id);
877 
878 			rdata = lport->tt.rport_lookup(lport, s_id);
879 			if (rdata)
880 				lport->tt.rport_recv_req(sp, fp, rdata);
881 			else {
882 				rjt_data.fp = NULL;
883 				rjt_data.reason = ELS_RJT_UNAB;
884 				rjt_data.explan = ELS_EXPL_NONE;
885 				lport->tt.seq_els_rsp_send(sp,
886 							   ELS_LS_RJT,
887 							   &rjt_data);
888 				fc_frame_free(fp);
889 			}
890 		}
891 	} else {
892 		FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
893 			     fr_eof(fp));
894 		fc_frame_free(fp);
895 	}
896 	mutex_unlock(&lport->lp_mutex);
897 
898 	/*
899 	 *  The common exch_done for all request may not be good
900 	 *  if any request requires longer hold on exhange. XXX
901 	 */
902 	lport->tt.exch_done(sp);
903 }
904 
905 /**
906  * fc_lport_reset() - Reset an lport
907  * @lport: The lport which should be reset
908  *
909  * Locking Note: This functions should not be called with the
910  *		 lport lock held.
911  */
912 int fc_lport_reset(struct fc_lport *lport)
913 {
914 	cancel_delayed_work_sync(&lport->retry_work);
915 	mutex_lock(&lport->lp_mutex);
916 	fc_lport_enter_reset(lport);
917 	mutex_unlock(&lport->lp_mutex);
918 	return 0;
919 }
920 EXPORT_SYMBOL(fc_lport_reset);
921 
922 /**
923  * fc_lport_reset_locked() - Reset the local port
924  * @lport: Fibre Channel local port to be reset
925  *
926  * Locking Note: The lport lock is expected to be held before calling
927  * this routine.
928  */
929 static void fc_lport_reset_locked(struct fc_lport *lport)
930 {
931 	if (lport->dns_rp)
932 		lport->tt.rport_logoff(lport->dns_rp);
933 
934 	if (lport->ptp_rp) {
935 		lport->tt.rport_logoff(lport->ptp_rp);
936 		lport->ptp_rp = NULL;
937 	}
938 
939 	lport->tt.disc_stop(lport);
940 
941 	lport->tt.exch_mgr_reset(lport, 0, 0);
942 	fc_host_fabric_name(lport->host) = 0;
943 	fc_host_port_id(lport->host) = 0;
944 }
945 
946 /**
947  * fc_lport_enter_reset() - Reset the local port
948  * @lport: Fibre Channel local port to be reset
949  *
950  * Locking Note: The lport lock is expected to be held before calling
951  * this routine.
952  */
953 static void fc_lport_enter_reset(struct fc_lport *lport)
954 {
955 	FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
956 		     fc_lport_state(lport));
957 
958 	fc_lport_state_enter(lport, LPORT_ST_RESET);
959 	fc_lport_reset_locked(lport);
960 	if (lport->link_up)
961 		fc_lport_enter_flogi(lport);
962 }
963 
964 /**
965  * fc_lport_enter_disabled() - disable the local port
966  * @lport: Fibre Channel local port to be reset
967  *
968  * Locking Note: The lport lock is expected to be held before calling
969  * this routine.
970  */
971 static void fc_lport_enter_disabled(struct fc_lport *lport)
972 {
973 	FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
974 		     fc_lport_state(lport));
975 
976 	fc_lport_state_enter(lport, LPORT_ST_DISABLED);
977 	fc_lport_reset_locked(lport);
978 }
979 
980 /**
981  * fc_lport_error() - Handler for any errors
982  * @lport: The fc_lport object
983  * @fp: The frame pointer
984  *
985  * If the error was caused by a resource allocation failure
986  * then wait for half a second and retry, otherwise retry
987  * after the e_d_tov time.
988  */
989 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
990 {
991 	unsigned long delay = 0;
992 	FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
993 		     PTR_ERR(fp), fc_lport_state(lport),
994 		     lport->retry_count);
995 
996 	if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
997 		/*
998 		 * Memory allocation failure, or the exchange timed out.
999 		 *  Retry after delay
1000 		 */
1001 		if (lport->retry_count < lport->max_retry_count) {
1002 			lport->retry_count++;
1003 			if (!fp)
1004 				delay = msecs_to_jiffies(500);
1005 			else
1006 				delay =	msecs_to_jiffies(lport->e_d_tov);
1007 
1008 			schedule_delayed_work(&lport->retry_work, delay);
1009 		} else {
1010 			switch (lport->state) {
1011 			case LPORT_ST_DISABLED:
1012 			case LPORT_ST_READY:
1013 			case LPORT_ST_RESET:
1014 			case LPORT_ST_RPN_ID:
1015 			case LPORT_ST_RFT_ID:
1016 			case LPORT_ST_SCR:
1017 			case LPORT_ST_DNS:
1018 			case LPORT_ST_FLOGI:
1019 			case LPORT_ST_LOGO:
1020 				fc_lport_enter_reset(lport);
1021 				break;
1022 			}
1023 		}
1024 	}
1025 }
1026 
1027 /**
1028  * fc_lport_rft_id_resp() - Handle response to Register Fibre
1029  *			    Channel Types by ID (RPN_ID) request
1030  * @sp: current sequence in RPN_ID exchange
1031  * @fp: response frame
1032  * @lp_arg: Fibre Channel host port instance
1033  *
1034  * Locking Note: This function will be called without the lport lock
1035  * held, but it will lock, call an _enter_* function or fc_lport_error
1036  * and then unlock the lport.
1037  */
1038 static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1039 				 void *lp_arg)
1040 {
1041 	struct fc_lport *lport = lp_arg;
1042 	struct fc_frame_header *fh;
1043 	struct fc_ct_hdr *ct;
1044 
1045 	if (fp == ERR_PTR(-FC_EX_CLOSED))
1046 		return;
1047 
1048 	mutex_lock(&lport->lp_mutex);
1049 
1050 	FC_LPORT_DBG(lport, "Received a RFT_ID response\n");
1051 
1052 	if (lport->state != LPORT_ST_RFT_ID) {
1053 		FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state "
1054 			     "%s\n", fc_lport_state(lport));
1055 		if (IS_ERR(fp))
1056 			goto err;
1057 		goto out;
1058 	}
1059 
1060 	if (IS_ERR(fp)) {
1061 		fc_lport_error(lport, fp);
1062 		goto err;
1063 	}
1064 
1065 	fh = fc_frame_header_get(fp);
1066 	ct = fc_frame_payload_get(fp, sizeof(*ct));
1067 
1068 	if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1069 	    ct->ct_fs_type == FC_FST_DIR &&
1070 	    ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1071 	    ntohs(ct->ct_cmd) == FC_FS_ACC)
1072 		fc_lport_enter_scr(lport);
1073 	else
1074 		fc_lport_error(lport, fp);
1075 out:
1076 	fc_frame_free(fp);
1077 err:
1078 	mutex_unlock(&lport->lp_mutex);
1079 }
1080 
1081 /**
1082  * fc_lport_rpn_id_resp() - Handle response to Register Port
1083  *			    Name by ID (RPN_ID) request
1084  * @sp: current sequence in RPN_ID exchange
1085  * @fp: response frame
1086  * @lp_arg: Fibre Channel host port instance
1087  *
1088  * Locking Note: This function will be called without the lport lock
1089  * held, but it will lock, call an _enter_* function or fc_lport_error
1090  * and then unlock the lport.
1091  */
1092 static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1093 				 void *lp_arg)
1094 {
1095 	struct fc_lport *lport = lp_arg;
1096 	struct fc_frame_header *fh;
1097 	struct fc_ct_hdr *ct;
1098 
1099 	if (fp == ERR_PTR(-FC_EX_CLOSED))
1100 		return;
1101 
1102 	mutex_lock(&lport->lp_mutex);
1103 
1104 	FC_LPORT_DBG(lport, "Received a RPN_ID response\n");
1105 
1106 	if (lport->state != LPORT_ST_RPN_ID) {
1107 		FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state "
1108 			     "%s\n", fc_lport_state(lport));
1109 		if (IS_ERR(fp))
1110 			goto err;
1111 		goto out;
1112 	}
1113 
1114 	if (IS_ERR(fp)) {
1115 		fc_lport_error(lport, fp);
1116 		goto err;
1117 	}
1118 
1119 	fh = fc_frame_header_get(fp);
1120 	ct = fc_frame_payload_get(fp, sizeof(*ct));
1121 	if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1122 	    ct->ct_fs_type == FC_FST_DIR &&
1123 	    ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1124 	    ntohs(ct->ct_cmd) == FC_FS_ACC)
1125 		fc_lport_enter_rft_id(lport);
1126 	else
1127 		fc_lport_error(lport, fp);
1128 
1129 out:
1130 	fc_frame_free(fp);
1131 err:
1132 	mutex_unlock(&lport->lp_mutex);
1133 }
1134 
1135 /**
1136  * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
1137  * @sp: current sequence in SCR exchange
1138  * @fp: response frame
1139  * @lp_arg: Fibre Channel lport port instance that sent the registration request
1140  *
1141  * Locking Note: This function will be called without the lport lock
1142  * held, but it will lock, call an _enter_* function or fc_lport_error
1143  * and then unlock the lport.
1144  */
1145 static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1146 			      void *lp_arg)
1147 {
1148 	struct fc_lport *lport = lp_arg;
1149 	u8 op;
1150 
1151 	if (fp == ERR_PTR(-FC_EX_CLOSED))
1152 		return;
1153 
1154 	mutex_lock(&lport->lp_mutex);
1155 
1156 	FC_LPORT_DBG(lport, "Received a SCR response\n");
1157 
1158 	if (lport->state != LPORT_ST_SCR) {
1159 		FC_LPORT_DBG(lport, "Received a SCR response, but in state "
1160 			     "%s\n", fc_lport_state(lport));
1161 		if (IS_ERR(fp))
1162 			goto err;
1163 		goto out;
1164 	}
1165 
1166 	if (IS_ERR(fp)) {
1167 		fc_lport_error(lport, fp);
1168 		goto err;
1169 	}
1170 
1171 	op = fc_frame_payload_op(fp);
1172 	if (op == ELS_LS_ACC)
1173 		fc_lport_enter_ready(lport);
1174 	else
1175 		fc_lport_error(lport, fp);
1176 
1177 out:
1178 	fc_frame_free(fp);
1179 err:
1180 	mutex_unlock(&lport->lp_mutex);
1181 }
1182 
1183 /**
1184  * fc_lport_enter_scr() - Send a State Change Register (SCR) request
1185  * @lport: Fibre Channel local port to register for state changes
1186  *
1187  * Locking Note: The lport lock is expected to be held before calling
1188  * this routine.
1189  */
1190 static void fc_lport_enter_scr(struct fc_lport *lport)
1191 {
1192 	struct fc_frame *fp;
1193 
1194 	FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
1195 		     fc_lport_state(lport));
1196 
1197 	fc_lport_state_enter(lport, LPORT_ST_SCR);
1198 
1199 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1200 	if (!fp) {
1201 		fc_lport_error(lport, fp);
1202 		return;
1203 	}
1204 
1205 	if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
1206 				  fc_lport_scr_resp, lport, lport->e_d_tov))
1207 		fc_lport_error(lport, fp);
1208 }
1209 
1210 /**
1211  * fc_lport_enter_rft_id() - Register FC4-types with the name server
1212  * @lport: Fibre Channel local port to register
1213  *
1214  * Locking Note: The lport lock is expected to be held before calling
1215  * this routine.
1216  */
1217 static void fc_lport_enter_rft_id(struct fc_lport *lport)
1218 {
1219 	struct fc_frame *fp;
1220 	struct fc_ns_fts *lps;
1221 	int i;
1222 
1223 	FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n",
1224 		     fc_lport_state(lport));
1225 
1226 	fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
1227 
1228 	lps = &lport->fcts;
1229 	i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]);
1230 	while (--i >= 0)
1231 		if (ntohl(lps->ff_type_map[i]) != 0)
1232 			break;
1233 	if (i < 0) {
1234 		/* nothing to register, move on to SCR */
1235 		fc_lport_enter_scr(lport);
1236 		return;
1237 	}
1238 
1239 	fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1240 			    sizeof(struct fc_ns_rft));
1241 	if (!fp) {
1242 		fc_lport_error(lport, fp);
1243 		return;
1244 	}
1245 
1246 	if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RFT_ID,
1247 				  fc_lport_rft_id_resp,
1248 				  lport, lport->e_d_tov))
1249 		fc_lport_error(lport, fp);
1250 }
1251 
1252 /**
1253  * fc_rport_enter_rft_id() - Register port name with the name server
1254  * @lport: Fibre Channel local port to register
1255  *
1256  * Locking Note: The lport lock is expected to be held before calling
1257  * this routine.
1258  */
1259 static void fc_lport_enter_rpn_id(struct fc_lport *lport)
1260 {
1261 	struct fc_frame *fp;
1262 
1263 	FC_LPORT_DBG(lport, "Entered RPN_ID state from %s state\n",
1264 		     fc_lport_state(lport));
1265 
1266 	fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
1267 
1268 	fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1269 			    sizeof(struct fc_ns_rn_id));
1270 	if (!fp) {
1271 		fc_lport_error(lport, fp);
1272 		return;
1273 	}
1274 
1275 	if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID,
1276 				  fc_lport_rpn_id_resp,
1277 				  lport, lport->e_d_tov))
1278 		fc_lport_error(lport, fp);
1279 }
1280 
1281 static struct fc_rport_operations fc_lport_rport_ops = {
1282 	.event_callback = fc_lport_rport_callback,
1283 };
1284 
1285 /**
1286  * fc_rport_enter_dns() - Create a rport to the name server
1287  * @lport: Fibre Channel local port requesting a rport for the name server
1288  *
1289  * Locking Note: The lport lock is expected to be held before calling
1290  * this routine.
1291  */
1292 static void fc_lport_enter_dns(struct fc_lport *lport)
1293 {
1294 	struct fc_rport_priv *rdata;
1295 	struct fc_rport_identifiers ids;
1296 
1297 	ids.port_id = FC_FID_DIR_SERV;
1298 	ids.port_name = -1;
1299 	ids.node_name = -1;
1300 	ids.roles = FC_RPORT_ROLE_UNKNOWN;
1301 
1302 	FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
1303 		     fc_lport_state(lport));
1304 
1305 	fc_lport_state_enter(lport, LPORT_ST_DNS);
1306 
1307 	rdata = lport->tt.rport_create(lport, &ids);
1308 	if (!rdata)
1309 		goto err;
1310 
1311 	rdata->ops = &fc_lport_rport_ops;
1312 	lport->tt.rport_login(rdata);
1313 	return;
1314 
1315 err:
1316 	fc_lport_error(lport, NULL);
1317 }
1318 
1319 /**
1320  * fc_lport_timeout() - Handler for the retry_work timer.
1321  * @work: The work struct of the fc_lport
1322  */
1323 static void fc_lport_timeout(struct work_struct *work)
1324 {
1325 	struct fc_lport *lport =
1326 		container_of(work, struct fc_lport,
1327 			     retry_work.work);
1328 
1329 	mutex_lock(&lport->lp_mutex);
1330 
1331 	switch (lport->state) {
1332 	case LPORT_ST_DISABLED:
1333 	case LPORT_ST_READY:
1334 	case LPORT_ST_RESET:
1335 		WARN_ON(1);
1336 		break;
1337 	case LPORT_ST_FLOGI:
1338 		fc_lport_enter_flogi(lport);
1339 		break;
1340 	case LPORT_ST_DNS:
1341 		fc_lport_enter_dns(lport);
1342 		break;
1343 	case LPORT_ST_RPN_ID:
1344 		fc_lport_enter_rpn_id(lport);
1345 		break;
1346 	case LPORT_ST_RFT_ID:
1347 		fc_lport_enter_rft_id(lport);
1348 		break;
1349 	case LPORT_ST_SCR:
1350 		fc_lport_enter_scr(lport);
1351 		break;
1352 	case LPORT_ST_LOGO:
1353 		fc_lport_enter_logo(lport);
1354 		break;
1355 	}
1356 
1357 	mutex_unlock(&lport->lp_mutex);
1358 }
1359 
1360 /**
1361  * fc_lport_logo_resp() - Handle response to LOGO request
1362  * @sp: current sequence in LOGO exchange
1363  * @fp: response frame
1364  * @lp_arg: Fibre Channel lport port instance that sent the LOGO request
1365  *
1366  * Locking Note: This function will be called without the lport lock
1367  * held, but it will lock, call an _enter_* function or fc_lport_error
1368  * and then unlock the lport.
1369  */
1370 static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1371 			       void *lp_arg)
1372 {
1373 	struct fc_lport *lport = lp_arg;
1374 	u8 op;
1375 
1376 	if (fp == ERR_PTR(-FC_EX_CLOSED))
1377 		return;
1378 
1379 	mutex_lock(&lport->lp_mutex);
1380 
1381 	FC_LPORT_DBG(lport, "Received a LOGO response\n");
1382 
1383 	if (lport->state != LPORT_ST_LOGO) {
1384 		FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
1385 			     "%s\n", fc_lport_state(lport));
1386 		if (IS_ERR(fp))
1387 			goto err;
1388 		goto out;
1389 	}
1390 
1391 	if (IS_ERR(fp)) {
1392 		fc_lport_error(lport, fp);
1393 		goto err;
1394 	}
1395 
1396 	op = fc_frame_payload_op(fp);
1397 	if (op == ELS_LS_ACC)
1398 		fc_lport_enter_disabled(lport);
1399 	else
1400 		fc_lport_error(lport, fp);
1401 
1402 out:
1403 	fc_frame_free(fp);
1404 err:
1405 	mutex_unlock(&lport->lp_mutex);
1406 }
1407 
1408 /**
1409  * fc_rport_enter_logo() - Logout of the fabric
1410  * @lport: Fibre Channel local port to be logged out
1411  *
1412  * Locking Note: The lport lock is expected to be held before calling
1413  * this routine.
1414  */
1415 static void fc_lport_enter_logo(struct fc_lport *lport)
1416 {
1417 	struct fc_frame *fp;
1418 	struct fc_els_logo *logo;
1419 
1420 	FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
1421 		     fc_lport_state(lport));
1422 
1423 	fc_lport_state_enter(lport, LPORT_ST_LOGO);
1424 
1425 	fp = fc_frame_alloc(lport, sizeof(*logo));
1426 	if (!fp) {
1427 		fc_lport_error(lport, fp);
1428 		return;
1429 	}
1430 
1431 	if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
1432 				  fc_lport_logo_resp, lport, lport->e_d_tov))
1433 		fc_lport_error(lport, fp);
1434 }
1435 
1436 /**
1437  * fc_lport_flogi_resp() - Handle response to FLOGI request
1438  * @sp: current sequence in FLOGI exchange
1439  * @fp: response frame
1440  * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request
1441  *
1442  * Locking Note: This function will be called without the lport lock
1443  * held, but it will lock, call an _enter_* function or fc_lport_error
1444  * and then unlock the lport.
1445  */
1446 static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1447 				void *lp_arg)
1448 {
1449 	struct fc_lport *lport = lp_arg;
1450 	struct fc_frame_header *fh;
1451 	struct fc_els_flogi *flp;
1452 	u32 did;
1453 	u16 csp_flags;
1454 	unsigned int r_a_tov;
1455 	unsigned int e_d_tov;
1456 	u16 mfs;
1457 
1458 	if (fp == ERR_PTR(-FC_EX_CLOSED))
1459 		return;
1460 
1461 	mutex_lock(&lport->lp_mutex);
1462 
1463 	FC_LPORT_DBG(lport, "Received a FLOGI response\n");
1464 
1465 	if (lport->state != LPORT_ST_FLOGI) {
1466 		FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
1467 			     "%s\n", fc_lport_state(lport));
1468 		if (IS_ERR(fp))
1469 			goto err;
1470 		goto out;
1471 	}
1472 
1473 	if (IS_ERR(fp)) {
1474 		fc_lport_error(lport, fp);
1475 		goto err;
1476 	}
1477 
1478 	fh = fc_frame_header_get(fp);
1479 	did = ntoh24(fh->fh_d_id);
1480 	if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
1481 
1482 		printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n",
1483 		       did);
1484 		fc_host_port_id(lport->host) = did;
1485 
1486 		flp = fc_frame_payload_get(fp, sizeof(*flp));
1487 		if (flp) {
1488 			mfs = ntohs(flp->fl_csp.sp_bb_data) &
1489 				FC_SP_BB_DATA_MASK;
1490 			if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
1491 			    mfs < lport->mfs)
1492 				lport->mfs = mfs;
1493 			csp_flags = ntohs(flp->fl_csp.sp_features);
1494 			r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1495 			e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1496 			if (csp_flags & FC_SP_FT_EDTR)
1497 				e_d_tov /= 1000000;
1498 			if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1499 				if (e_d_tov > lport->e_d_tov)
1500 					lport->e_d_tov = e_d_tov;
1501 				lport->r_a_tov = 2 * e_d_tov;
1502 				printk(KERN_INFO "libfc: Port (%6x) entered "
1503 				       "point to point mode\n", did);
1504 				fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
1505 						   get_unaligned_be64(
1506 							   &flp->fl_wwpn),
1507 						   get_unaligned_be64(
1508 							   &flp->fl_wwnn));
1509 			} else {
1510 				lport->e_d_tov = e_d_tov;
1511 				lport->r_a_tov = r_a_tov;
1512 				fc_host_fabric_name(lport->host) =
1513 					get_unaligned_be64(&flp->fl_wwnn);
1514 				fc_lport_enter_dns(lport);
1515 			}
1516 		}
1517 
1518 		if (flp) {
1519 			csp_flags = ntohs(flp->fl_csp.sp_features);
1520 			if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1521 				lport->tt.disc_start(fc_lport_disc_callback,
1522 						     lport);
1523 			}
1524 		}
1525 	} else {
1526 		FC_LPORT_DBG(lport, "Bad FLOGI response\n");
1527 	}
1528 
1529 out:
1530 	fc_frame_free(fp);
1531 err:
1532 	mutex_unlock(&lport->lp_mutex);
1533 }
1534 
1535 /**
1536  * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
1537  * @lport: Fibre Channel local port to be logged in to the fabric
1538  *
1539  * Locking Note: The lport lock is expected to be held before calling
1540  * this routine.
1541  */
1542 void fc_lport_enter_flogi(struct fc_lport *lport)
1543 {
1544 	struct fc_frame *fp;
1545 
1546 	FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
1547 		     fc_lport_state(lport));
1548 
1549 	fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1550 
1551 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1552 	if (!fp)
1553 		return fc_lport_error(lport, fp);
1554 
1555 	if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI,
1556 				  fc_lport_flogi_resp, lport, lport->e_d_tov))
1557 		fc_lport_error(lport, fp);
1558 }
1559 
1560 /* Configure a fc_lport */
1561 int fc_lport_config(struct fc_lport *lport)
1562 {
1563 	INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1564 	mutex_init(&lport->lp_mutex);
1565 
1566 	fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1567 
1568 	fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1569 	fc_lport_add_fc4_type(lport, FC_TYPE_CT);
1570 
1571 	return 0;
1572 }
1573 EXPORT_SYMBOL(fc_lport_config);
1574 
1575 int fc_lport_init(struct fc_lport *lport)
1576 {
1577 	if (!lport->tt.lport_recv)
1578 		lport->tt.lport_recv = fc_lport_recv_req;
1579 
1580 	if (!lport->tt.lport_reset)
1581 		lport->tt.lport_reset = fc_lport_reset;
1582 
1583 	fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1584 	fc_host_node_name(lport->host) = lport->wwnn;
1585 	fc_host_port_name(lport->host) = lport->wwpn;
1586 	fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1587 	memset(fc_host_supported_fc4s(lport->host), 0,
1588 	       sizeof(fc_host_supported_fc4s(lport->host)));
1589 	fc_host_supported_fc4s(lport->host)[2] = 1;
1590 	fc_host_supported_fc4s(lport->host)[7] = 1;
1591 
1592 	/* This value is also unchanging */
1593 	memset(fc_host_active_fc4s(lport->host), 0,
1594 	       sizeof(fc_host_active_fc4s(lport->host)));
1595 	fc_host_active_fc4s(lport->host)[2] = 1;
1596 	fc_host_active_fc4s(lport->host)[7] = 1;
1597 	fc_host_maxframe_size(lport->host) = lport->mfs;
1598 	fc_host_supported_speeds(lport->host) = 0;
1599 	if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1600 		fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1601 	if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1602 		fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1603 
1604 	INIT_LIST_HEAD(&lport->ema_list);
1605 	return 0;
1606 }
1607 EXPORT_SYMBOL(fc_lport_init);
1608