xref: /openbmc/linux/drivers/scsi/libfc/fc_lport.c (revision e8e0929d)
1 /*
2  * Copyright(c) 2007 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19 
20 /*
21  * PORT LOCKING NOTES
22  *
23  * These comments only apply to the 'port code' which consists of the lport,
24  * disc and rport blocks.
25  *
26  * MOTIVATION
27  *
28  * The lport, disc and rport blocks all have mutexes that are used to protect
29  * those objects. The main motivation for these locks is to prevent from
30  * having an lport reset just before we send a frame. In that scenario the
31  * lport's FID would get set to zero and then we'd send a frame with an
32  * invalid SID. We also need to ensure that states don't change unexpectedly
33  * while processing another state.
34  *
35  * HEIRARCHY
36  *
37  * The following heirarchy defines the locking rules. A greater lock
38  * may be held before acquiring a lesser lock, but a lesser lock should never
39  * be held while attempting to acquire a greater lock. Here is the heirarchy-
40  *
41  * lport > disc, lport > rport, disc > rport
42  *
43  * CALLBACKS
44  *
45  * The callbacks cause complications with this scheme. There is a callback
46  * from the rport (to either lport or disc) and a callback from disc
47  * (to the lport).
48  *
49  * As rports exit the rport state machine a callback is made to the owner of
50  * the rport to notify success or failure. Since the callback is likely to
51  * cause the lport or disc to grab its lock we cannot hold the rport lock
52  * while making the callback. To ensure that the rport is not free'd while
53  * processing the callback the rport callbacks are serialized through a
54  * single-threaded workqueue. An rport would never be free'd while in a
55  * callback handler becuase no other rport work in this queue can be executed
56  * at the same time.
57  *
58  * When discovery succeeds or fails a callback is made to the lport as
59  * notification. Currently, succesful discovery causes the lport to take no
60  * action. A failure will cause the lport to reset. There is likely a circular
61  * locking problem with this implementation.
62  */
63 
64 /*
65  * LPORT LOCKING
66  *
67  * The critical sections protected by the lport's mutex are quite broad and
68  * may be improved upon in the future. The lport code and its locking doesn't
69  * influence the I/O path, so excessive locking doesn't penalize I/O
70  * performance.
71  *
72  * The strategy is to lock whenever processing a request or response. Note
73  * that every _enter_* function corresponds to a state change. They generally
74  * change the lports state and then send a request out on the wire. We lock
75  * before calling any of these functions to protect that state change. This
76  * means that the entry points into the lport block manage the locks while
77  * the state machine can transition between states (i.e. _enter_* functions)
78  * while always staying protected.
79  *
80  * When handling responses we also hold the lport mutex broadly. When the
81  * lport receives the response frame it locks the mutex and then calls the
82  * appropriate handler for the particuar response. Generally a response will
83  * trigger a state change and so the lock must already be held.
84  *
85  * Retries also have to consider the locking. The retries occur from a work
86  * context and the work function will lock the lport and then retry the state
87  * (i.e. _enter_* function).
88  */
89 
90 #include <linux/timer.h>
91 #include <asm/unaligned.h>
92 
93 #include <scsi/fc/fc_gs.h>
94 
95 #include <scsi/libfc.h>
96 #include <scsi/fc_encode.h>
97 
98 /* Fabric IDs to use for point-to-point mode, chosen on whims. */
99 #define FC_LOCAL_PTP_FID_LO   0x010101
100 #define FC_LOCAL_PTP_FID_HI   0x010102
101 
102 #define	DNS_DELAY	      3 /* Discovery delay after RSCN (in seconds)*/
103 
104 static void fc_lport_error(struct fc_lport *, struct fc_frame *);
105 
106 static void fc_lport_enter_reset(struct fc_lport *);
107 static void fc_lport_enter_flogi(struct fc_lport *);
108 static void fc_lport_enter_dns(struct fc_lport *);
109 static void fc_lport_enter_rpn_id(struct fc_lport *);
110 static void fc_lport_enter_rft_id(struct fc_lport *);
111 static void fc_lport_enter_scr(struct fc_lport *);
112 static void fc_lport_enter_ready(struct fc_lport *);
113 static void fc_lport_enter_logo(struct fc_lport *);
114 
115 static const char *fc_lport_state_names[] = {
116 	[LPORT_ST_DISABLED] = "disabled",
117 	[LPORT_ST_FLOGI] =    "FLOGI",
118 	[LPORT_ST_DNS] =      "dNS",
119 	[LPORT_ST_RPN_ID] =   "RPN_ID",
120 	[LPORT_ST_RFT_ID] =   "RFT_ID",
121 	[LPORT_ST_SCR] =      "SCR",
122 	[LPORT_ST_READY] =    "Ready",
123 	[LPORT_ST_LOGO] =     "LOGO",
124 	[LPORT_ST_RESET] =    "reset",
125 };
126 
127 static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
128 {
129 	fc_frame_free(fp);
130 	return 0;
131 }
132 
133 /**
134  * fc_lport_rport_callback() - Event handler for rport events
135  * @lport: The lport which is receiving the event
136  * @rdata: private remote port data
137  * @event: The event that occured
138  *
139  * Locking Note: The rport lock should not be held when calling
140  *		 this function.
141  */
142 static void fc_lport_rport_callback(struct fc_lport *lport,
143 				    struct fc_rport_priv *rdata,
144 				    enum fc_rport_event event)
145 {
146 	FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
147 		     rdata->ids.port_id);
148 
149 	mutex_lock(&lport->lp_mutex);
150 	switch (event) {
151 	case RPORT_EV_READY:
152 		if (lport->state == LPORT_ST_DNS) {
153 			lport->dns_rp = rdata;
154 			fc_lport_enter_rpn_id(lport);
155 		} else {
156 			FC_LPORT_DBG(lport, "Received an READY event "
157 				     "on port (%6x) for the directory "
158 				     "server, but the lport is not "
159 				     "in the DNS state, it's in the "
160 				     "%d state", rdata->ids.port_id,
161 				     lport->state);
162 			lport->tt.rport_logoff(rdata);
163 		}
164 		break;
165 	case RPORT_EV_LOGO:
166 	case RPORT_EV_FAILED:
167 	case RPORT_EV_STOP:
168 		lport->dns_rp = NULL;
169 		break;
170 	case RPORT_EV_NONE:
171 		break;
172 	}
173 	mutex_unlock(&lport->lp_mutex);
174 }
175 
176 /**
177  * fc_lport_state() - Return a string which represents the lport's state
178  * @lport: The lport whose state is to converted to a string
179  */
180 static const char *fc_lport_state(struct fc_lport *lport)
181 {
182 	const char *cp;
183 
184 	cp = fc_lport_state_names[lport->state];
185 	if (!cp)
186 		cp = "unknown";
187 	return cp;
188 }
189 
190 /**
191  * fc_lport_ptp_setup() - Create an rport for point-to-point mode
192  * @lport: The lport to attach the ptp rport to
193  * @fid: The FID of the ptp rport
194  * @remote_wwpn: The WWPN of the ptp rport
195  * @remote_wwnn: The WWNN of the ptp rport
196  */
197 static void fc_lport_ptp_setup(struct fc_lport *lport,
198 			       u32 remote_fid, u64 remote_wwpn,
199 			       u64 remote_wwnn)
200 {
201 	mutex_lock(&lport->disc.disc_mutex);
202 	if (lport->ptp_rp)
203 		lport->tt.rport_logoff(lport->ptp_rp);
204 	lport->ptp_rp = lport->tt.rport_create(lport, remote_fid);
205 	lport->ptp_rp->ids.port_name = remote_wwpn;
206 	lport->ptp_rp->ids.node_name = remote_wwnn;
207 	mutex_unlock(&lport->disc.disc_mutex);
208 
209 	lport->tt.rport_login(lport->ptp_rp);
210 
211 	fc_lport_enter_ready(lport);
212 }
213 
214 void fc_get_host_port_type(struct Scsi_Host *shost)
215 {
216 	/* TODO - currently just NPORT */
217 	fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
218 }
219 EXPORT_SYMBOL(fc_get_host_port_type);
220 
221 void fc_get_host_port_state(struct Scsi_Host *shost)
222 {
223 	struct fc_lport *lp = shost_priv(shost);
224 
225 	if (lp->link_up)
226 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
227 	else
228 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
229 }
230 EXPORT_SYMBOL(fc_get_host_port_state);
231 
232 void fc_get_host_speed(struct Scsi_Host *shost)
233 {
234 	struct fc_lport *lport = shost_priv(shost);
235 
236 	fc_host_speed(shost) = lport->link_speed;
237 }
238 EXPORT_SYMBOL(fc_get_host_speed);
239 
240 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
241 {
242 	struct fc_host_statistics *fcoe_stats;
243 	struct fc_lport *lp = shost_priv(shost);
244 	struct timespec v0, v1;
245 	unsigned int cpu;
246 
247 	fcoe_stats = &lp->host_stats;
248 	memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
249 
250 	jiffies_to_timespec(jiffies, &v0);
251 	jiffies_to_timespec(lp->boot_time, &v1);
252 	fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
253 
254 	for_each_possible_cpu(cpu) {
255 		struct fcoe_dev_stats *stats;
256 
257 		stats = per_cpu_ptr(lp->dev_stats, cpu);
258 
259 		fcoe_stats->tx_frames += stats->TxFrames;
260 		fcoe_stats->tx_words += stats->TxWords;
261 		fcoe_stats->rx_frames += stats->RxFrames;
262 		fcoe_stats->rx_words += stats->RxWords;
263 		fcoe_stats->error_frames += stats->ErrorFrames;
264 		fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
265 		fcoe_stats->fcp_input_requests += stats->InputRequests;
266 		fcoe_stats->fcp_output_requests += stats->OutputRequests;
267 		fcoe_stats->fcp_control_requests += stats->ControlRequests;
268 		fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
269 		fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
270 		fcoe_stats->link_failure_count += stats->LinkFailureCount;
271 	}
272 	fcoe_stats->lip_count = -1;
273 	fcoe_stats->nos_count = -1;
274 	fcoe_stats->loss_of_sync_count = -1;
275 	fcoe_stats->loss_of_signal_count = -1;
276 	fcoe_stats->prim_seq_protocol_err_count = -1;
277 	fcoe_stats->dumped_frames = -1;
278 	return fcoe_stats;
279 }
280 EXPORT_SYMBOL(fc_get_host_stats);
281 
282 /*
283  * Fill in FLOGI command for request.
284  */
285 static void
286 fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi,
287 		    unsigned int op)
288 {
289 	struct fc_els_csp *sp;
290 	struct fc_els_cssp *cp;
291 
292 	memset(flogi, 0, sizeof(*flogi));
293 	flogi->fl_cmd = (u8) op;
294 	put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
295 	put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
296 	sp = &flogi->fl_csp;
297 	sp->sp_hi_ver = 0x20;
298 	sp->sp_lo_ver = 0x20;
299 	sp->sp_bb_cred = htons(10);	/* this gets set by gateway */
300 	sp->sp_bb_data = htons((u16) lport->mfs);
301 	cp = &flogi->fl_cssp[3 - 1];	/* class 3 parameters */
302 	cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
303 	if (op != ELS_FLOGI) {
304 		sp->sp_features = htons(FC_SP_FT_CIRO);
305 		sp->sp_tot_seq = htons(255);	/* seq. we accept */
306 		sp->sp_rel_off = htons(0x1f);
307 		sp->sp_e_d_tov = htonl(lport->e_d_tov);
308 
309 		cp->cp_rdfs = htons((u16) lport->mfs);
310 		cp->cp_con_seq = htons(255);
311 		cp->cp_open_seq = 1;
312 	}
313 }
314 
315 /*
316  * Add a supported FC-4 type.
317  */
318 static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
319 {
320 	__be32 *mp;
321 
322 	mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
323 	*mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
324 }
325 
326 /**
327  * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
328  * @lport: Fibre Channel local port recieving the RLIR
329  * @sp: current sequence in the RLIR exchange
330  * @fp: RLIR request frame
331  *
332  * Locking Note: The lport lock is exected to be held before calling
333  * this function.
334  */
335 static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
336 				   struct fc_lport *lport)
337 {
338 	FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
339 		     fc_lport_state(lport));
340 
341 	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
342 	fc_frame_free(fp);
343 }
344 
345 /**
346  * fc_lport_recv_echo_req() - Handle received ECHO request
347  * @lport: Fibre Channel local port recieving the ECHO
348  * @sp: current sequence in the ECHO exchange
349  * @fp: ECHO request frame
350  *
351  * Locking Note: The lport lock is exected to be held before calling
352  * this function.
353  */
354 static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
355 				   struct fc_lport *lport)
356 {
357 	struct fc_frame *fp;
358 	struct fc_exch *ep = fc_seq_exch(sp);
359 	unsigned int len;
360 	void *pp;
361 	void *dp;
362 	u32 f_ctl;
363 
364 	FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
365 		     fc_lport_state(lport));
366 
367 	len = fr_len(in_fp) - sizeof(struct fc_frame_header);
368 	pp = fc_frame_payload_get(in_fp, len);
369 
370 	if (len < sizeof(__be32))
371 		len = sizeof(__be32);
372 
373 	fp = fc_frame_alloc(lport, len);
374 	if (fp) {
375 		dp = fc_frame_payload_get(fp, len);
376 		memcpy(dp, pp, len);
377 		*((u32 *)dp) = htonl(ELS_LS_ACC << 24);
378 		sp = lport->tt.seq_start_next(sp);
379 		f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
380 		fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
381 			       FC_TYPE_ELS, f_ctl, 0);
382 		lport->tt.seq_send(lport, sp, fp);
383 	}
384 	fc_frame_free(in_fp);
385 }
386 
387 /**
388  * fc_lport_recv_echo_req() - Handle received Request Node ID data request
389  * @lport: Fibre Channel local port recieving the RNID
390  * @sp: current sequence in the RNID exchange
391  * @fp: RNID request frame
392  *
393  * Locking Note: The lport lock is exected to be held before calling
394  * this function.
395  */
396 static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
397 				   struct fc_lport *lport)
398 {
399 	struct fc_frame *fp;
400 	struct fc_exch *ep = fc_seq_exch(sp);
401 	struct fc_els_rnid *req;
402 	struct {
403 		struct fc_els_rnid_resp rnid;
404 		struct fc_els_rnid_cid cid;
405 		struct fc_els_rnid_gen gen;
406 	} *rp;
407 	struct fc_seq_els_data rjt_data;
408 	u8 fmt;
409 	size_t len;
410 	u32 f_ctl;
411 
412 	FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
413 		     fc_lport_state(lport));
414 
415 	req = fc_frame_payload_get(in_fp, sizeof(*req));
416 	if (!req) {
417 		rjt_data.fp = NULL;
418 		rjt_data.reason = ELS_RJT_LOGIC;
419 		rjt_data.explan = ELS_EXPL_NONE;
420 		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
421 	} else {
422 		fmt = req->rnid_fmt;
423 		len = sizeof(*rp);
424 		if (fmt != ELS_RNIDF_GEN ||
425 		    ntohl(lport->rnid_gen.rnid_atype) == 0) {
426 			fmt = ELS_RNIDF_NONE;	/* nothing to provide */
427 			len -= sizeof(rp->gen);
428 		}
429 		fp = fc_frame_alloc(lport, len);
430 		if (fp) {
431 			rp = fc_frame_payload_get(fp, len);
432 			memset(rp, 0, len);
433 			rp->rnid.rnid_cmd = ELS_LS_ACC;
434 			rp->rnid.rnid_fmt = fmt;
435 			rp->rnid.rnid_cid_len = sizeof(rp->cid);
436 			rp->cid.rnid_wwpn = htonll(lport->wwpn);
437 			rp->cid.rnid_wwnn = htonll(lport->wwnn);
438 			if (fmt == ELS_RNIDF_GEN) {
439 				rp->rnid.rnid_sid_len = sizeof(rp->gen);
440 				memcpy(&rp->gen, &lport->rnid_gen,
441 				       sizeof(rp->gen));
442 			}
443 			sp = lport->tt.seq_start_next(sp);
444 			f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
445 			f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
446 			fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
447 				       FC_TYPE_ELS, f_ctl, 0);
448 			lport->tt.seq_send(lport, sp, fp);
449 		}
450 	}
451 	fc_frame_free(in_fp);
452 }
453 
454 /**
455  * fc_lport_recv_logo_req() - Handle received fabric LOGO request
456  * @lport: Fibre Channel local port recieving the LOGO
457  * @sp: current sequence in the LOGO exchange
458  * @fp: LOGO request frame
459  *
460  * Locking Note: The lport lock is exected to be held before calling
461  * this function.
462  */
463 static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
464 				   struct fc_lport *lport)
465 {
466 	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
467 	fc_lport_enter_reset(lport);
468 	fc_frame_free(fp);
469 }
470 
471 /**
472  * fc_fabric_login() - Start the lport state machine
473  * @lport: The lport that should log into the fabric
474  *
475  * Locking Note: This function should not be called
476  *		 with the lport lock held.
477  */
478 int fc_fabric_login(struct fc_lport *lport)
479 {
480 	int rc = -1;
481 
482 	mutex_lock(&lport->lp_mutex);
483 	if (lport->state == LPORT_ST_DISABLED) {
484 		fc_lport_enter_reset(lport);
485 		rc = 0;
486 	}
487 	mutex_unlock(&lport->lp_mutex);
488 
489 	return rc;
490 }
491 EXPORT_SYMBOL(fc_fabric_login);
492 
493 /**
494  * fc_linkup() - Handler for transport linkup events
495  * @lport: The lport whose link is up
496  */
497 void fc_linkup(struct fc_lport *lport)
498 {
499 	printk(KERN_INFO "libfc: Link up on port (%6x)\n",
500 	       fc_host_port_id(lport->host));
501 
502 	mutex_lock(&lport->lp_mutex);
503 	if (!lport->link_up) {
504 		lport->link_up = 1;
505 
506 		if (lport->state == LPORT_ST_RESET)
507 			fc_lport_enter_flogi(lport);
508 	}
509 	mutex_unlock(&lport->lp_mutex);
510 }
511 EXPORT_SYMBOL(fc_linkup);
512 
513 /**
514  * fc_linkdown() - Handler for transport linkdown events
515  * @lport: The lport whose link is down
516  */
517 void fc_linkdown(struct fc_lport *lport)
518 {
519 	mutex_lock(&lport->lp_mutex);
520 	printk(KERN_INFO "libfc: Link down on port (%6x)\n",
521 	       fc_host_port_id(lport->host));
522 
523 	if (lport->link_up) {
524 		lport->link_up = 0;
525 		fc_lport_enter_reset(lport);
526 		lport->tt.fcp_cleanup(lport);
527 	}
528 	mutex_unlock(&lport->lp_mutex);
529 }
530 EXPORT_SYMBOL(fc_linkdown);
531 
532 /**
533  * fc_fabric_logoff() - Logout of the fabric
534  * @lport:	      fc_lport pointer to logoff the fabric
535  *
536  * Return value:
537  *	0 for success, -1 for failure
538  */
539 int fc_fabric_logoff(struct fc_lport *lport)
540 {
541 	lport->tt.disc_stop_final(lport);
542 	mutex_lock(&lport->lp_mutex);
543 	if (lport->dns_rp)
544 		lport->tt.rport_logoff(lport->dns_rp);
545 	mutex_unlock(&lport->lp_mutex);
546 	lport->tt.rport_flush_queue();
547 	mutex_lock(&lport->lp_mutex);
548 	fc_lport_enter_logo(lport);
549 	mutex_unlock(&lport->lp_mutex);
550 	cancel_delayed_work_sync(&lport->retry_work);
551 	return 0;
552 }
553 EXPORT_SYMBOL(fc_fabric_logoff);
554 
555 /**
556  * fc_lport_destroy() - unregister a fc_lport
557  * @lport:	      fc_lport pointer to unregister
558  *
559  * Return value:
560  *	None
561  * Note:
562  * exit routine for fc_lport instance
563  * clean-up all the allocated memory
564  * and free up other system resources.
565  *
566  */
567 int fc_lport_destroy(struct fc_lport *lport)
568 {
569 	mutex_lock(&lport->lp_mutex);
570 	lport->state = LPORT_ST_DISABLED;
571 	lport->link_up = 0;
572 	lport->tt.frame_send = fc_frame_drop;
573 	mutex_unlock(&lport->lp_mutex);
574 
575 	lport->tt.fcp_abort_io(lport);
576 	lport->tt.disc_stop_final(lport);
577 	lport->tt.exch_mgr_reset(lport, 0, 0);
578 	return 0;
579 }
580 EXPORT_SYMBOL(fc_lport_destroy);
581 
582 /**
583  * fc_set_mfs() - sets up the mfs for the corresponding fc_lport
584  * @lport: fc_lport pointer to unregister
585  * @mfs: the new mfs for fc_lport
586  *
587  * Set mfs for the given fc_lport to the new mfs.
588  *
589  * Return: 0 for success
590  */
591 int fc_set_mfs(struct fc_lport *lport, u32 mfs)
592 {
593 	unsigned int old_mfs;
594 	int rc = -EINVAL;
595 
596 	mutex_lock(&lport->lp_mutex);
597 
598 	old_mfs = lport->mfs;
599 
600 	if (mfs >= FC_MIN_MAX_FRAME) {
601 		mfs &= ~3;
602 		if (mfs > FC_MAX_FRAME)
603 			mfs = FC_MAX_FRAME;
604 		mfs -= sizeof(struct fc_frame_header);
605 		lport->mfs = mfs;
606 		rc = 0;
607 	}
608 
609 	if (!rc && mfs < old_mfs)
610 		fc_lport_enter_reset(lport);
611 
612 	mutex_unlock(&lport->lp_mutex);
613 
614 	return rc;
615 }
616 EXPORT_SYMBOL(fc_set_mfs);
617 
618 /**
619  * fc_lport_disc_callback() - Callback for discovery events
620  * @lport: FC local port
621  * @event: The discovery event
622  */
623 void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
624 {
625 	switch (event) {
626 	case DISC_EV_SUCCESS:
627 		FC_LPORT_DBG(lport, "Discovery succeeded\n");
628 		break;
629 	case DISC_EV_FAILED:
630 		printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n",
631 		       fc_host_port_id(lport->host));
632 		mutex_lock(&lport->lp_mutex);
633 		fc_lport_enter_reset(lport);
634 		mutex_unlock(&lport->lp_mutex);
635 		break;
636 	case DISC_EV_NONE:
637 		WARN_ON(1);
638 		break;
639 	}
640 }
641 
642 /**
643  * fc_rport_enter_ready() - Enter the ready state and start discovery
644  * @lport: Fibre Channel local port that is ready
645  *
646  * Locking Note: The lport lock is expected to be held before calling
647  * this routine.
648  */
649 static void fc_lport_enter_ready(struct fc_lport *lport)
650 {
651 	FC_LPORT_DBG(lport, "Entered READY from state %s\n",
652 		     fc_lport_state(lport));
653 
654 	fc_lport_state_enter(lport, LPORT_ST_READY);
655 
656 	if (!lport->ptp_rp)
657 		lport->tt.disc_start(fc_lport_disc_callback, lport);
658 }
659 
660 /**
661  * fc_lport_recv_flogi_req() - Receive a FLOGI request
662  * @sp_in: The sequence the FLOGI is on
663  * @rx_fp: The frame the FLOGI is in
664  * @lport: The lport that recieved the request
665  *
666  * A received FLOGI request indicates a point-to-point connection.
667  * Accept it with the common service parameters indicating our N port.
668  * Set up to do a PLOGI if we have the higher-number WWPN.
669  *
670  * Locking Note: The lport lock is exected to be held before calling
671  * this function.
672  */
673 static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
674 				    struct fc_frame *rx_fp,
675 				    struct fc_lport *lport)
676 {
677 	struct fc_frame *fp;
678 	struct fc_frame_header *fh;
679 	struct fc_seq *sp;
680 	struct fc_exch *ep;
681 	struct fc_els_flogi *flp;
682 	struct fc_els_flogi *new_flp;
683 	u64 remote_wwpn;
684 	u32 remote_fid;
685 	u32 local_fid;
686 	u32 f_ctl;
687 
688 	FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
689 		     fc_lport_state(lport));
690 
691 	fh = fc_frame_header_get(rx_fp);
692 	remote_fid = ntoh24(fh->fh_s_id);
693 	flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
694 	if (!flp)
695 		goto out;
696 	remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
697 	if (remote_wwpn == lport->wwpn) {
698 		printk(KERN_WARNING "libfc: Received FLOGI from port "
699 		       "with same WWPN %llx\n", remote_wwpn);
700 		goto out;
701 	}
702 	FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);
703 
704 	/*
705 	 * XXX what is the right thing to do for FIDs?
706 	 * The originator might expect our S_ID to be 0xfffffe.
707 	 * But if so, both of us could end up with the same FID.
708 	 */
709 	local_fid = FC_LOCAL_PTP_FID_LO;
710 	if (remote_wwpn < lport->wwpn) {
711 		local_fid = FC_LOCAL_PTP_FID_HI;
712 		if (!remote_fid || remote_fid == local_fid)
713 			remote_fid = FC_LOCAL_PTP_FID_LO;
714 	} else if (!remote_fid) {
715 		remote_fid = FC_LOCAL_PTP_FID_HI;
716 	}
717 
718 	fc_host_port_id(lport->host) = local_fid;
719 
720 	fp = fc_frame_alloc(lport, sizeof(*flp));
721 	if (fp) {
722 		sp = lport->tt.seq_start_next(fr_seq(rx_fp));
723 		new_flp = fc_frame_payload_get(fp, sizeof(*flp));
724 		fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
725 		new_flp->fl_cmd = (u8) ELS_LS_ACC;
726 
727 		/*
728 		 * Send the response.  If this fails, the originator should
729 		 * repeat the sequence.
730 		 */
731 		f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
732 		ep = fc_seq_exch(sp);
733 		fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
734 			       FC_TYPE_ELS, f_ctl, 0);
735 		lport->tt.seq_send(lport, sp, fp);
736 
737 	} else {
738 		fc_lport_error(lport, fp);
739 	}
740 	fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
741 			   get_unaligned_be64(&flp->fl_wwnn));
742 
743 out:
744 	sp = fr_seq(rx_fp);
745 	fc_frame_free(rx_fp);
746 }
747 
748 /**
749  * fc_lport_recv_req() - The generic lport request handler
750  * @lport: The lport that received the request
751  * @sp: The sequence the request is on
752  * @fp: The frame the request is in
753  *
754  * This function will see if the lport handles the request or
755  * if an rport should handle the request.
756  *
757  * Locking Note: This function should not be called with the lport
758  *		 lock held becuase it will grab the lock.
759  */
760 static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
761 			      struct fc_frame *fp)
762 {
763 	struct fc_frame_header *fh = fc_frame_header_get(fp);
764 	void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
765 
766 	mutex_lock(&lport->lp_mutex);
767 
768 	/*
769 	 * Handle special ELS cases like FLOGI, LOGO, and
770 	 * RSCN here.  These don't require a session.
771 	 * Even if we had a session, it might not be ready.
772 	 */
773 	if (!lport->link_up)
774 		fc_frame_free(fp);
775 	else if (fh->fh_type == FC_TYPE_ELS &&
776 		 fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
777 		/*
778 		 * Check opcode.
779 		 */
780 		recv = lport->tt.rport_recv_req;
781 		switch (fc_frame_payload_op(fp)) {
782 		case ELS_FLOGI:
783 			recv = fc_lport_recv_flogi_req;
784 			break;
785 		case ELS_LOGO:
786 			fh = fc_frame_header_get(fp);
787 			if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
788 				recv = fc_lport_recv_logo_req;
789 			break;
790 		case ELS_RSCN:
791 			recv = lport->tt.disc_recv_req;
792 			break;
793 		case ELS_ECHO:
794 			recv = fc_lport_recv_echo_req;
795 			break;
796 		case ELS_RLIR:
797 			recv = fc_lport_recv_rlir_req;
798 			break;
799 		case ELS_RNID:
800 			recv = fc_lport_recv_rnid_req;
801 			break;
802 		}
803 
804 		recv(sp, fp, lport);
805 	} else {
806 		FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
807 			     fr_eof(fp));
808 		fc_frame_free(fp);
809 	}
810 	mutex_unlock(&lport->lp_mutex);
811 
812 	/*
813 	 *  The common exch_done for all request may not be good
814 	 *  if any request requires longer hold on exhange. XXX
815 	 */
816 	lport->tt.exch_done(sp);
817 }
818 
819 /**
820  * fc_lport_reset() - Reset an lport
821  * @lport: The lport which should be reset
822  *
823  * Locking Note: This functions should not be called with the
824  *		 lport lock held.
825  */
826 int fc_lport_reset(struct fc_lport *lport)
827 {
828 	cancel_delayed_work_sync(&lport->retry_work);
829 	mutex_lock(&lport->lp_mutex);
830 	fc_lport_enter_reset(lport);
831 	mutex_unlock(&lport->lp_mutex);
832 	return 0;
833 }
834 EXPORT_SYMBOL(fc_lport_reset);
835 
836 /**
837  * fc_lport_reset_locked() - Reset the local port
838  * @lport: Fibre Channel local port to be reset
839  *
840  * Locking Note: The lport lock is expected to be held before calling
841  * this routine.
842  */
843 static void fc_lport_reset_locked(struct fc_lport *lport)
844 {
845 	if (lport->dns_rp)
846 		lport->tt.rport_logoff(lport->dns_rp);
847 
848 	lport->ptp_rp = NULL;
849 
850 	lport->tt.disc_stop(lport);
851 
852 	lport->tt.exch_mgr_reset(lport, 0, 0);
853 	fc_host_fabric_name(lport->host) = 0;
854 	fc_host_port_id(lport->host) = 0;
855 }
856 
857 /**
858  * fc_lport_enter_reset() - Reset the local port
859  * @lport: Fibre Channel local port to be reset
860  *
861  * Locking Note: The lport lock is expected to be held before calling
862  * this routine.
863  */
864 static void fc_lport_enter_reset(struct fc_lport *lport)
865 {
866 	FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
867 		     fc_lport_state(lport));
868 
869 	fc_lport_state_enter(lport, LPORT_ST_RESET);
870 	fc_lport_reset_locked(lport);
871 	if (lport->link_up)
872 		fc_lport_enter_flogi(lport);
873 }
874 
875 /**
876  * fc_lport_enter_disabled() - disable the local port
877  * @lport: Fibre Channel local port to be reset
878  *
879  * Locking Note: The lport lock is expected to be held before calling
880  * this routine.
881  */
882 static void fc_lport_enter_disabled(struct fc_lport *lport)
883 {
884 	FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
885 		     fc_lport_state(lport));
886 
887 	fc_lport_state_enter(lport, LPORT_ST_DISABLED);
888 	fc_lport_reset_locked(lport);
889 }
890 
891 /**
892  * fc_lport_error() - Handler for any errors
893  * @lport: The fc_lport object
894  * @fp: The frame pointer
895  *
896  * If the error was caused by a resource allocation failure
897  * then wait for half a second and retry, otherwise retry
898  * after the e_d_tov time.
899  */
900 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
901 {
902 	unsigned long delay = 0;
903 	FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
904 		     PTR_ERR(fp), fc_lport_state(lport),
905 		     lport->retry_count);
906 
907 	if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
908 		/*
909 		 * Memory allocation failure, or the exchange timed out.
910 		 *  Retry after delay
911 		 */
912 		if (lport->retry_count < lport->max_retry_count) {
913 			lport->retry_count++;
914 			if (!fp)
915 				delay = msecs_to_jiffies(500);
916 			else
917 				delay =	msecs_to_jiffies(lport->e_d_tov);
918 
919 			schedule_delayed_work(&lport->retry_work, delay);
920 		} else {
921 			switch (lport->state) {
922 			case LPORT_ST_DISABLED:
923 			case LPORT_ST_READY:
924 			case LPORT_ST_RESET:
925 			case LPORT_ST_RPN_ID:
926 			case LPORT_ST_RFT_ID:
927 			case LPORT_ST_SCR:
928 			case LPORT_ST_DNS:
929 			case LPORT_ST_FLOGI:
930 			case LPORT_ST_LOGO:
931 				fc_lport_enter_reset(lport);
932 				break;
933 			}
934 		}
935 	}
936 }
937 
938 /**
939  * fc_lport_rft_id_resp() - Handle response to Register Fibre
940  *			    Channel Types by ID (RPN_ID) request
941  * @sp: current sequence in RPN_ID exchange
942  * @fp: response frame
943  * @lp_arg: Fibre Channel host port instance
944  *
945  * Locking Note: This function will be called without the lport lock
946  * held, but it will lock, call an _enter_* function or fc_lport_error
947  * and then unlock the lport.
948  */
949 static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
950 				 void *lp_arg)
951 {
952 	struct fc_lport *lport = lp_arg;
953 	struct fc_frame_header *fh;
954 	struct fc_ct_hdr *ct;
955 
956 	FC_LPORT_DBG(lport, "Received a RFT_ID %s\n", fc_els_resp_type(fp));
957 
958 	if (fp == ERR_PTR(-FC_EX_CLOSED))
959 		return;
960 
961 	mutex_lock(&lport->lp_mutex);
962 
963 	if (lport->state != LPORT_ST_RFT_ID) {
964 		FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state "
965 			     "%s\n", fc_lport_state(lport));
966 		if (IS_ERR(fp))
967 			goto err;
968 		goto out;
969 	}
970 
971 	if (IS_ERR(fp)) {
972 		fc_lport_error(lport, fp);
973 		goto err;
974 	}
975 
976 	fh = fc_frame_header_get(fp);
977 	ct = fc_frame_payload_get(fp, sizeof(*ct));
978 
979 	if (fh && ct && fh->fh_type == FC_TYPE_CT &&
980 	    ct->ct_fs_type == FC_FST_DIR &&
981 	    ct->ct_fs_subtype == FC_NS_SUBTYPE &&
982 	    ntohs(ct->ct_cmd) == FC_FS_ACC)
983 		fc_lport_enter_scr(lport);
984 	else
985 		fc_lport_error(lport, fp);
986 out:
987 	fc_frame_free(fp);
988 err:
989 	mutex_unlock(&lport->lp_mutex);
990 }
991 
992 /**
993  * fc_lport_rpn_id_resp() - Handle response to Register Port
994  *			    Name by ID (RPN_ID) request
995  * @sp: current sequence in RPN_ID exchange
996  * @fp: response frame
997  * @lp_arg: Fibre Channel host port instance
998  *
999  * Locking Note: This function will be called without the lport lock
1000  * held, but it will lock, call an _enter_* function or fc_lport_error
1001  * and then unlock the lport.
1002  */
1003 static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1004 				 void *lp_arg)
1005 {
1006 	struct fc_lport *lport = lp_arg;
1007 	struct fc_frame_header *fh;
1008 	struct fc_ct_hdr *ct;
1009 
1010 	FC_LPORT_DBG(lport, "Received a RPN_ID %s\n", fc_els_resp_type(fp));
1011 
1012 	if (fp == ERR_PTR(-FC_EX_CLOSED))
1013 		return;
1014 
1015 	mutex_lock(&lport->lp_mutex);
1016 
1017 	if (lport->state != LPORT_ST_RPN_ID) {
1018 		FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state "
1019 			     "%s\n", fc_lport_state(lport));
1020 		if (IS_ERR(fp))
1021 			goto err;
1022 		goto out;
1023 	}
1024 
1025 	if (IS_ERR(fp)) {
1026 		fc_lport_error(lport, fp);
1027 		goto err;
1028 	}
1029 
1030 	fh = fc_frame_header_get(fp);
1031 	ct = fc_frame_payload_get(fp, sizeof(*ct));
1032 	if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1033 	    ct->ct_fs_type == FC_FST_DIR &&
1034 	    ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1035 	    ntohs(ct->ct_cmd) == FC_FS_ACC)
1036 		fc_lport_enter_rft_id(lport);
1037 	else
1038 		fc_lport_error(lport, fp);
1039 
1040 out:
1041 	fc_frame_free(fp);
1042 err:
1043 	mutex_unlock(&lport->lp_mutex);
1044 }
1045 
1046 /**
1047  * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
1048  * @sp: current sequence in SCR exchange
1049  * @fp: response frame
1050  * @lp_arg: Fibre Channel lport port instance that sent the registration request
1051  *
1052  * Locking Note: This function will be called without the lport lock
1053  * held, but it will lock, call an _enter_* function or fc_lport_error
1054  * and then unlock the lport.
1055  */
1056 static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1057 			      void *lp_arg)
1058 {
1059 	struct fc_lport *lport = lp_arg;
1060 	u8 op;
1061 
1062 	FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
1063 
1064 	if (fp == ERR_PTR(-FC_EX_CLOSED))
1065 		return;
1066 
1067 	mutex_lock(&lport->lp_mutex);
1068 
1069 	if (lport->state != LPORT_ST_SCR) {
1070 		FC_LPORT_DBG(lport, "Received a SCR response, but in state "
1071 			     "%s\n", fc_lport_state(lport));
1072 		if (IS_ERR(fp))
1073 			goto err;
1074 		goto out;
1075 	}
1076 
1077 	if (IS_ERR(fp)) {
1078 		fc_lport_error(lport, fp);
1079 		goto err;
1080 	}
1081 
1082 	op = fc_frame_payload_op(fp);
1083 	if (op == ELS_LS_ACC)
1084 		fc_lport_enter_ready(lport);
1085 	else
1086 		fc_lport_error(lport, fp);
1087 
1088 out:
1089 	fc_frame_free(fp);
1090 err:
1091 	mutex_unlock(&lport->lp_mutex);
1092 }
1093 
1094 /**
1095  * fc_lport_enter_scr() - Send a State Change Register (SCR) request
1096  * @lport: Fibre Channel local port to register for state changes
1097  *
1098  * Locking Note: The lport lock is expected to be held before calling
1099  * this routine.
1100  */
1101 static void fc_lport_enter_scr(struct fc_lport *lport)
1102 {
1103 	struct fc_frame *fp;
1104 
1105 	FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
1106 		     fc_lport_state(lport));
1107 
1108 	fc_lport_state_enter(lport, LPORT_ST_SCR);
1109 
1110 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1111 	if (!fp) {
1112 		fc_lport_error(lport, fp);
1113 		return;
1114 	}
1115 
1116 	if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
1117 				  fc_lport_scr_resp, lport, lport->e_d_tov))
1118 		fc_lport_error(lport, fp);
1119 }
1120 
1121 /**
1122  * fc_lport_enter_rft_id() - Register FC4-types with the name server
1123  * @lport: Fibre Channel local port to register
1124  *
1125  * Locking Note: The lport lock is expected to be held before calling
1126  * this routine.
1127  */
1128 static void fc_lport_enter_rft_id(struct fc_lport *lport)
1129 {
1130 	struct fc_frame *fp;
1131 	struct fc_ns_fts *lps;
1132 	int i;
1133 
1134 	FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n",
1135 		     fc_lport_state(lport));
1136 
1137 	fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
1138 
1139 	lps = &lport->fcts;
1140 	i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]);
1141 	while (--i >= 0)
1142 		if (ntohl(lps->ff_type_map[i]) != 0)
1143 			break;
1144 	if (i < 0) {
1145 		/* nothing to register, move on to SCR */
1146 		fc_lport_enter_scr(lport);
1147 		return;
1148 	}
1149 
1150 	fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1151 			    sizeof(struct fc_ns_rft));
1152 	if (!fp) {
1153 		fc_lport_error(lport, fp);
1154 		return;
1155 	}
1156 
1157 	if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RFT_ID,
1158 				  fc_lport_rft_id_resp,
1159 				  lport, lport->e_d_tov))
1160 		fc_lport_error(lport, fp);
1161 }
1162 
1163 /**
1164  * fc_rport_enter_rft_id() - Register port name with the name server
1165  * @lport: Fibre Channel local port to register
1166  *
1167  * Locking Note: The lport lock is expected to be held before calling
1168  * this routine.
1169  */
1170 static void fc_lport_enter_rpn_id(struct fc_lport *lport)
1171 {
1172 	struct fc_frame *fp;
1173 
1174 	FC_LPORT_DBG(lport, "Entered RPN_ID state from %s state\n",
1175 		     fc_lport_state(lport));
1176 
1177 	fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
1178 
1179 	fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1180 			    sizeof(struct fc_ns_rn_id));
1181 	if (!fp) {
1182 		fc_lport_error(lport, fp);
1183 		return;
1184 	}
1185 
1186 	if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID,
1187 				  fc_lport_rpn_id_resp,
1188 				  lport, lport->e_d_tov))
1189 		fc_lport_error(lport, fp);
1190 }
1191 
1192 static struct fc_rport_operations fc_lport_rport_ops = {
1193 	.event_callback = fc_lport_rport_callback,
1194 };
1195 
1196 /**
1197  * fc_rport_enter_dns() - Create a rport to the name server
1198  * @lport: Fibre Channel local port requesting a rport for the name server
1199  *
1200  * Locking Note: The lport lock is expected to be held before calling
1201  * this routine.
1202  */
1203 static void fc_lport_enter_dns(struct fc_lport *lport)
1204 {
1205 	struct fc_rport_priv *rdata;
1206 
1207 	FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
1208 		     fc_lport_state(lport));
1209 
1210 	fc_lport_state_enter(lport, LPORT_ST_DNS);
1211 
1212 	mutex_lock(&lport->disc.disc_mutex);
1213 	rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
1214 	mutex_unlock(&lport->disc.disc_mutex);
1215 	if (!rdata)
1216 		goto err;
1217 
1218 	rdata->ops = &fc_lport_rport_ops;
1219 	lport->tt.rport_login(rdata);
1220 	return;
1221 
1222 err:
1223 	fc_lport_error(lport, NULL);
1224 }
1225 
1226 /**
1227  * fc_lport_timeout() - Handler for the retry_work timer.
1228  * @work: The work struct of the fc_lport
1229  */
1230 static void fc_lport_timeout(struct work_struct *work)
1231 {
1232 	struct fc_lport *lport =
1233 		container_of(work, struct fc_lport,
1234 			     retry_work.work);
1235 
1236 	mutex_lock(&lport->lp_mutex);
1237 
1238 	switch (lport->state) {
1239 	case LPORT_ST_DISABLED:
1240 	case LPORT_ST_READY:
1241 	case LPORT_ST_RESET:
1242 		WARN_ON(1);
1243 		break;
1244 	case LPORT_ST_FLOGI:
1245 		fc_lport_enter_flogi(lport);
1246 		break;
1247 	case LPORT_ST_DNS:
1248 		fc_lport_enter_dns(lport);
1249 		break;
1250 	case LPORT_ST_RPN_ID:
1251 		fc_lport_enter_rpn_id(lport);
1252 		break;
1253 	case LPORT_ST_RFT_ID:
1254 		fc_lport_enter_rft_id(lport);
1255 		break;
1256 	case LPORT_ST_SCR:
1257 		fc_lport_enter_scr(lport);
1258 		break;
1259 	case LPORT_ST_LOGO:
1260 		fc_lport_enter_logo(lport);
1261 		break;
1262 	}
1263 
1264 	mutex_unlock(&lport->lp_mutex);
1265 }
1266 
1267 /**
1268  * fc_lport_logo_resp() - Handle response to LOGO request
1269  * @sp: current sequence in LOGO exchange
1270  * @fp: response frame
1271  * @lp_arg: Fibre Channel lport port instance that sent the LOGO request
1272  *
1273  * Locking Note: This function will be called without the lport lock
1274  * held, but it will lock, call an _enter_* function or fc_lport_error
1275  * and then unlock the lport.
1276  */
1277 static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1278 			       void *lp_arg)
1279 {
1280 	struct fc_lport *lport = lp_arg;
1281 	u8 op;
1282 
1283 	FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
1284 
1285 	if (fp == ERR_PTR(-FC_EX_CLOSED))
1286 		return;
1287 
1288 	mutex_lock(&lport->lp_mutex);
1289 
1290 	if (lport->state != LPORT_ST_LOGO) {
1291 		FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
1292 			     "%s\n", fc_lport_state(lport));
1293 		if (IS_ERR(fp))
1294 			goto err;
1295 		goto out;
1296 	}
1297 
1298 	if (IS_ERR(fp)) {
1299 		fc_lport_error(lport, fp);
1300 		goto err;
1301 	}
1302 
1303 	op = fc_frame_payload_op(fp);
1304 	if (op == ELS_LS_ACC)
1305 		fc_lport_enter_disabled(lport);
1306 	else
1307 		fc_lport_error(lport, fp);
1308 
1309 out:
1310 	fc_frame_free(fp);
1311 err:
1312 	mutex_unlock(&lport->lp_mutex);
1313 }
1314 
1315 /**
1316  * fc_rport_enter_logo() - Logout of the fabric
1317  * @lport: Fibre Channel local port to be logged out
1318  *
1319  * Locking Note: The lport lock is expected to be held before calling
1320  * this routine.
1321  */
1322 static void fc_lport_enter_logo(struct fc_lport *lport)
1323 {
1324 	struct fc_frame *fp;
1325 	struct fc_els_logo *logo;
1326 
1327 	FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
1328 		     fc_lport_state(lport));
1329 
1330 	fc_lport_state_enter(lport, LPORT_ST_LOGO);
1331 
1332 	fp = fc_frame_alloc(lport, sizeof(*logo));
1333 	if (!fp) {
1334 		fc_lport_error(lport, fp);
1335 		return;
1336 	}
1337 
1338 	if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
1339 				  fc_lport_logo_resp, lport, lport->e_d_tov))
1340 		fc_lport_error(lport, fp);
1341 }
1342 
1343 /**
1344  * fc_lport_flogi_resp() - Handle response to FLOGI request
1345  * @sp: current sequence in FLOGI exchange
1346  * @fp: response frame
1347  * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request
1348  *
1349  * Locking Note: This function will be called without the lport lock
1350  * held, but it will lock, call an _enter_* function or fc_lport_error
1351  * and then unlock the lport.
1352  */
1353 static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1354 				void *lp_arg)
1355 {
1356 	struct fc_lport *lport = lp_arg;
1357 	struct fc_frame_header *fh;
1358 	struct fc_els_flogi *flp;
1359 	u32 did;
1360 	u16 csp_flags;
1361 	unsigned int r_a_tov;
1362 	unsigned int e_d_tov;
1363 	u16 mfs;
1364 
1365 	FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
1366 
1367 	if (fp == ERR_PTR(-FC_EX_CLOSED))
1368 		return;
1369 
1370 	mutex_lock(&lport->lp_mutex);
1371 
1372 	if (lport->state != LPORT_ST_FLOGI) {
1373 		FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
1374 			     "%s\n", fc_lport_state(lport));
1375 		if (IS_ERR(fp))
1376 			goto err;
1377 		goto out;
1378 	}
1379 
1380 	if (IS_ERR(fp)) {
1381 		fc_lport_error(lport, fp);
1382 		goto err;
1383 	}
1384 
1385 	fh = fc_frame_header_get(fp);
1386 	did = ntoh24(fh->fh_d_id);
1387 	if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
1388 
1389 		printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n",
1390 		       did);
1391 		fc_host_port_id(lport->host) = did;
1392 
1393 		flp = fc_frame_payload_get(fp, sizeof(*flp));
1394 		if (flp) {
1395 			mfs = ntohs(flp->fl_csp.sp_bb_data) &
1396 				FC_SP_BB_DATA_MASK;
1397 			if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
1398 			    mfs < lport->mfs)
1399 				lport->mfs = mfs;
1400 			csp_flags = ntohs(flp->fl_csp.sp_features);
1401 			r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1402 			e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1403 			if (csp_flags & FC_SP_FT_EDTR)
1404 				e_d_tov /= 1000000;
1405 			if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1406 				if (e_d_tov > lport->e_d_tov)
1407 					lport->e_d_tov = e_d_tov;
1408 				lport->r_a_tov = 2 * e_d_tov;
1409 				printk(KERN_INFO "libfc: Port (%6x) entered "
1410 				       "point to point mode\n", did);
1411 				fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
1412 						   get_unaligned_be64(
1413 							   &flp->fl_wwpn),
1414 						   get_unaligned_be64(
1415 							   &flp->fl_wwnn));
1416 			} else {
1417 				lport->e_d_tov = e_d_tov;
1418 				lport->r_a_tov = r_a_tov;
1419 				fc_host_fabric_name(lport->host) =
1420 					get_unaligned_be64(&flp->fl_wwnn);
1421 				fc_lport_enter_dns(lport);
1422 			}
1423 		}
1424 	} else {
1425 		FC_LPORT_DBG(lport, "Bad FLOGI response\n");
1426 	}
1427 
1428 out:
1429 	fc_frame_free(fp);
1430 err:
1431 	mutex_unlock(&lport->lp_mutex);
1432 }
1433 
1434 /**
1435  * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
1436  * @lport: Fibre Channel local port to be logged in to the fabric
1437  *
1438  * Locking Note: The lport lock is expected to be held before calling
1439  * this routine.
1440  */
1441 void fc_lport_enter_flogi(struct fc_lport *lport)
1442 {
1443 	struct fc_frame *fp;
1444 
1445 	FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
1446 		     fc_lport_state(lport));
1447 
1448 	fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1449 
1450 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1451 	if (!fp)
1452 		return fc_lport_error(lport, fp);
1453 
1454 	if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI,
1455 				  fc_lport_flogi_resp, lport, lport->e_d_tov))
1456 		fc_lport_error(lport, fp);
1457 }
1458 
1459 /* Configure a fc_lport */
1460 int fc_lport_config(struct fc_lport *lport)
1461 {
1462 	INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1463 	mutex_init(&lport->lp_mutex);
1464 
1465 	fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1466 
1467 	fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1468 	fc_lport_add_fc4_type(lport, FC_TYPE_CT);
1469 
1470 	return 0;
1471 }
1472 EXPORT_SYMBOL(fc_lport_config);
1473 
1474 int fc_lport_init(struct fc_lport *lport)
1475 {
1476 	if (!lport->tt.lport_recv)
1477 		lport->tt.lport_recv = fc_lport_recv_req;
1478 
1479 	if (!lport->tt.lport_reset)
1480 		lport->tt.lport_reset = fc_lport_reset;
1481 
1482 	fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1483 	fc_host_node_name(lport->host) = lport->wwnn;
1484 	fc_host_port_name(lport->host) = lport->wwpn;
1485 	fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1486 	memset(fc_host_supported_fc4s(lport->host), 0,
1487 	       sizeof(fc_host_supported_fc4s(lport->host)));
1488 	fc_host_supported_fc4s(lport->host)[2] = 1;
1489 	fc_host_supported_fc4s(lport->host)[7] = 1;
1490 
1491 	/* This value is also unchanging */
1492 	memset(fc_host_active_fc4s(lport->host), 0,
1493 	       sizeof(fc_host_active_fc4s(lport->host)));
1494 	fc_host_active_fc4s(lport->host)[2] = 1;
1495 	fc_host_active_fc4s(lport->host)[7] = 1;
1496 	fc_host_maxframe_size(lport->host) = lport->mfs;
1497 	fc_host_supported_speeds(lport->host) = 0;
1498 	if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1499 		fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1500 	if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1501 		fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1502 
1503 	INIT_LIST_HEAD(&lport->ema_list);
1504 	return 0;
1505 }
1506 EXPORT_SYMBOL(fc_lport_init);
1507