xref: /openbmc/linux/drivers/scsi/libfc/fc_lport.c (revision 24f089e2)
1 /*
2  * Copyright(c) 2007 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19 
20 /*
21  * PORT LOCKING NOTES
22  *
23  * These comments only apply to the 'port code' which consists of the lport,
24  * disc and rport blocks.
25  *
26  * MOTIVATION
27  *
28  * The lport, disc and rport blocks all have mutexes that are used to protect
29  * those objects. The main motivation for these locks is to prevent from
30  * having an lport reset just before we send a frame. In that scenario the
31  * lport's FID would get set to zero and then we'd send a frame with an
32  * invalid SID. We also need to ensure that states don't change unexpectedly
33  * while processing another state.
34  *
35  * HEIRARCHY
36  *
37  * The following heirarchy defines the locking rules. A greater lock
38  * may be held before acquiring a lesser lock, but a lesser lock should never
39  * be held while attempting to acquire a greater lock. Here is the heirarchy-
40  *
41  * lport > disc, lport > rport, disc > rport
42  *
43  * CALLBACKS
44  *
45  * The callbacks cause complications with this scheme. There is a callback
46  * from the rport (to either lport or disc) and a callback from disc
47  * (to the lport).
48  *
49  * As rports exit the rport state machine a callback is made to the owner of
50  * the rport to notify success or failure. Since the callback is likely to
51  * cause the lport or disc to grab its lock we cannot hold the rport lock
52  * while making the callback. To ensure that the rport is not free'd while
53  * processing the callback the rport callbacks are serialized through a
54  * single-threaded workqueue. An rport would never be free'd while in a
55  * callback handler becuase no other rport work in this queue can be executed
56  * at the same time.
57  *
58  * When discovery succeeds or fails a callback is made to the lport as
59  * notification. Currently, successful discovery causes the lport to take no
60  * action. A failure will cause the lport to reset. There is likely a circular
61  * locking problem with this implementation.
62  */
63 
64 /*
65  * LPORT LOCKING
66  *
67  * The critical sections protected by the lport's mutex are quite broad and
68  * may be improved upon in the future. The lport code and its locking doesn't
69  * influence the I/O path, so excessive locking doesn't penalize I/O
70  * performance.
71  *
72  * The strategy is to lock whenever processing a request or response. Note
73  * that every _enter_* function corresponds to a state change. They generally
74  * change the lports state and then send a request out on the wire. We lock
75  * before calling any of these functions to protect that state change. This
76  * means that the entry points into the lport block manage the locks while
77  * the state machine can transition between states (i.e. _enter_* functions)
78  * while always staying protected.
79  *
80  * When handling responses we also hold the lport mutex broadly. When the
81  * lport receives the response frame it locks the mutex and then calls the
82  * appropriate handler for the particuar response. Generally a response will
83  * trigger a state change and so the lock must already be held.
84  *
85  * Retries also have to consider the locking. The retries occur from a work
86  * context and the work function will lock the lport and then retry the state
87  * (i.e. _enter_* function).
88  */
89 
90 #include <linux/timer.h>
91 #include <linux/slab.h>
92 #include <asm/unaligned.h>
93 
94 #include <scsi/fc/fc_gs.h>
95 
96 #include <scsi/libfc.h>
97 #include <scsi/fc_encode.h>
98 #include <linux/scatterlist.h>
99 
100 #include "fc_libfc.h"
101 
102 /* Fabric IDs to use for point-to-point mode, chosen on whims. */
103 #define FC_LOCAL_PTP_FID_LO   0x010101
104 #define FC_LOCAL_PTP_FID_HI   0x010102
105 
106 #define	DNS_DELAY	      3 /* Discovery delay after RSCN (in seconds)*/
107 
108 static void fc_lport_error(struct fc_lport *, struct fc_frame *);
109 
110 static void fc_lport_enter_reset(struct fc_lport *);
111 static void fc_lport_enter_flogi(struct fc_lport *);
112 static void fc_lport_enter_dns(struct fc_lport *);
113 static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
114 static void fc_lport_enter_scr(struct fc_lport *);
115 static void fc_lport_enter_ready(struct fc_lport *);
116 static void fc_lport_enter_logo(struct fc_lport *);
117 
118 static const char *fc_lport_state_names[] = {
119 	[LPORT_ST_DISABLED] = "disabled",
120 	[LPORT_ST_FLOGI] =    "FLOGI",
121 	[LPORT_ST_DNS] =      "dNS",
122 	[LPORT_ST_RNN_ID] =   "RNN_ID",
123 	[LPORT_ST_RSNN_NN] =  "RSNN_NN",
124 	[LPORT_ST_RSPN_ID] =  "RSPN_ID",
125 	[LPORT_ST_RFT_ID] =   "RFT_ID",
126 	[LPORT_ST_RFF_ID] =   "RFF_ID",
127 	[LPORT_ST_SCR] =      "SCR",
128 	[LPORT_ST_READY] =    "Ready",
129 	[LPORT_ST_LOGO] =     "LOGO",
130 	[LPORT_ST_RESET] =    "reset",
131 };
132 
133 /**
134  * struct fc_bsg_info - FC Passthrough managemet structure
135  * @job:      The passthrough job
136  * @lport:    The local port to pass through a command
137  * @rsp_code: The expected response code
138  * @sg:	      job->reply_payload.sg_list
139  * @nents:    job->reply_payload.sg_cnt
140  * @offset:   The offset into the response data
141  */
142 struct fc_bsg_info {
143 	struct fc_bsg_job *job;
144 	struct fc_lport *lport;
145 	u16 rsp_code;
146 	struct scatterlist *sg;
147 	u32 nents;
148 	size_t offset;
149 };
150 
151 /**
152  * fc_frame_drop() - Dummy frame handler
153  * @lport: The local port the frame was received on
154  * @fp:	   The received frame
155  */
156 static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
157 {
158 	fc_frame_free(fp);
159 	return 0;
160 }
161 
162 /**
163  * fc_lport_rport_callback() - Event handler for rport events
164  * @lport: The lport which is receiving the event
165  * @rdata: private remote port data
166  * @event: The event that occured
167  *
168  * Locking Note: The rport lock should not be held when calling
169  *		 this function.
170  */
171 static void fc_lport_rport_callback(struct fc_lport *lport,
172 				    struct fc_rport_priv *rdata,
173 				    enum fc_rport_event event)
174 {
175 	FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event,
176 		     rdata->ids.port_id);
177 
178 	mutex_lock(&lport->lp_mutex);
179 	switch (event) {
180 	case RPORT_EV_READY:
181 		if (lport->state == LPORT_ST_DNS) {
182 			lport->dns_rdata = rdata;
183 			fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
184 		} else {
185 			FC_LPORT_DBG(lport, "Received an READY event "
186 				     "on port (%6.6x) for the directory "
187 				     "server, but the lport is not "
188 				     "in the DNS state, it's in the "
189 				     "%d state", rdata->ids.port_id,
190 				     lport->state);
191 			lport->tt.rport_logoff(rdata);
192 		}
193 		break;
194 	case RPORT_EV_LOGO:
195 	case RPORT_EV_FAILED:
196 	case RPORT_EV_STOP:
197 		lport->dns_rdata = NULL;
198 		break;
199 	case RPORT_EV_NONE:
200 		break;
201 	}
202 	mutex_unlock(&lport->lp_mutex);
203 }
204 
205 /**
206  * fc_lport_state() - Return a string which represents the lport's state
207  * @lport: The lport whose state is to converted to a string
208  */
209 static const char *fc_lport_state(struct fc_lport *lport)
210 {
211 	const char *cp;
212 
213 	cp = fc_lport_state_names[lport->state];
214 	if (!cp)
215 		cp = "unknown";
216 	return cp;
217 }
218 
219 /**
220  * fc_lport_ptp_setup() - Create an rport for point-to-point mode
221  * @lport:	 The lport to attach the ptp rport to
222  * @remote_fid:	 The FID of the ptp rport
223  * @remote_wwpn: The WWPN of the ptp rport
224  * @remote_wwnn: The WWNN of the ptp rport
225  */
226 static void fc_lport_ptp_setup(struct fc_lport *lport,
227 			       u32 remote_fid, u64 remote_wwpn,
228 			       u64 remote_wwnn)
229 {
230 	mutex_lock(&lport->disc.disc_mutex);
231 	if (lport->ptp_rdata) {
232 		lport->tt.rport_logoff(lport->ptp_rdata);
233 		kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
234 	}
235 	lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
236 	kref_get(&lport->ptp_rdata->kref);
237 	lport->ptp_rdata->ids.port_name = remote_wwpn;
238 	lport->ptp_rdata->ids.node_name = remote_wwnn;
239 	mutex_unlock(&lport->disc.disc_mutex);
240 
241 	lport->tt.rport_login(lport->ptp_rdata);
242 
243 	fc_lport_enter_ready(lport);
244 }
245 
246 /**
247  * fc_get_host_port_state() - Return the port state of the given Scsi_Host
248  * @shost:  The SCSI host whose port state is to be determined
249  */
250 void fc_get_host_port_state(struct Scsi_Host *shost)
251 {
252 	struct fc_lport *lport = shost_priv(shost);
253 
254 	mutex_lock(&lport->lp_mutex);
255 	if (!lport->link_up)
256 		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
257 	else
258 		switch (lport->state) {
259 		case LPORT_ST_READY:
260 			fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
261 			break;
262 		default:
263 			fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
264 		}
265 	mutex_unlock(&lport->lp_mutex);
266 }
267 EXPORT_SYMBOL(fc_get_host_port_state);
268 
269 /**
270  * fc_get_host_speed() - Return the speed of the given Scsi_Host
271  * @shost: The SCSI host whose port speed is to be determined
272  */
273 void fc_get_host_speed(struct Scsi_Host *shost)
274 {
275 	struct fc_lport *lport = shost_priv(shost);
276 
277 	fc_host_speed(shost) = lport->link_speed;
278 }
279 EXPORT_SYMBOL(fc_get_host_speed);
280 
281 /**
282  * fc_get_host_stats() - Return the Scsi_Host's statistics
283  * @shost: The SCSI host whose statistics are to be returned
284  */
285 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
286 {
287 	struct fc_host_statistics *fcoe_stats;
288 	struct fc_lport *lport = shost_priv(shost);
289 	struct timespec v0, v1;
290 	unsigned int cpu;
291 
292 	fcoe_stats = &lport->host_stats;
293 	memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
294 
295 	jiffies_to_timespec(jiffies, &v0);
296 	jiffies_to_timespec(lport->boot_time, &v1);
297 	fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
298 
299 	for_each_possible_cpu(cpu) {
300 		struct fcoe_dev_stats *stats;
301 
302 		stats = per_cpu_ptr(lport->dev_stats, cpu);
303 
304 		fcoe_stats->tx_frames += stats->TxFrames;
305 		fcoe_stats->tx_words += stats->TxWords;
306 		fcoe_stats->rx_frames += stats->RxFrames;
307 		fcoe_stats->rx_words += stats->RxWords;
308 		fcoe_stats->error_frames += stats->ErrorFrames;
309 		fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
310 		fcoe_stats->fcp_input_requests += stats->InputRequests;
311 		fcoe_stats->fcp_output_requests += stats->OutputRequests;
312 		fcoe_stats->fcp_control_requests += stats->ControlRequests;
313 		fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
314 		fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
315 		fcoe_stats->link_failure_count += stats->LinkFailureCount;
316 	}
317 	fcoe_stats->lip_count = -1;
318 	fcoe_stats->nos_count = -1;
319 	fcoe_stats->loss_of_sync_count = -1;
320 	fcoe_stats->loss_of_signal_count = -1;
321 	fcoe_stats->prim_seq_protocol_err_count = -1;
322 	fcoe_stats->dumped_frames = -1;
323 	return fcoe_stats;
324 }
325 EXPORT_SYMBOL(fc_get_host_stats);
326 
327 /**
328  * fc_lport_flogi_fill() - Fill in FLOGI command for request
329  * @lport: The local port the FLOGI is for
330  * @flogi: The FLOGI command
331  * @op:	   The opcode
332  */
333 static void fc_lport_flogi_fill(struct fc_lport *lport,
334 				struct fc_els_flogi *flogi,
335 				unsigned int op)
336 {
337 	struct fc_els_csp *sp;
338 	struct fc_els_cssp *cp;
339 
340 	memset(flogi, 0, sizeof(*flogi));
341 	flogi->fl_cmd = (u8) op;
342 	put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
343 	put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
344 	sp = &flogi->fl_csp;
345 	sp->sp_hi_ver = 0x20;
346 	sp->sp_lo_ver = 0x20;
347 	sp->sp_bb_cred = htons(10);	/* this gets set by gateway */
348 	sp->sp_bb_data = htons((u16) lport->mfs);
349 	cp = &flogi->fl_cssp[3 - 1];	/* class 3 parameters */
350 	cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
351 	if (op != ELS_FLOGI) {
352 		sp->sp_features = htons(FC_SP_FT_CIRO);
353 		sp->sp_tot_seq = htons(255);	/* seq. we accept */
354 		sp->sp_rel_off = htons(0x1f);
355 		sp->sp_e_d_tov = htonl(lport->e_d_tov);
356 
357 		cp->cp_rdfs = htons((u16) lport->mfs);
358 		cp->cp_con_seq = htons(255);
359 		cp->cp_open_seq = 1;
360 	}
361 }
362 
363 /**
364  * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port
365  * @lport: The local port to add a new FC-4 type to
366  * @type:  The new FC-4 type
367  */
368 static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
369 {
370 	__be32 *mp;
371 
372 	mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
373 	*mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
374 }
375 
376 /**
377  * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
378  * @sp:	   The sequence in the RLIR exchange
379  * @fp:	   The RLIR request frame
380  * @lport: Fibre Channel local port recieving the RLIR
381  *
382  * Locking Note: The lport lock is expected to be held before calling
383  * this function.
384  */
385 static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
386 				   struct fc_lport *lport)
387 {
388 	FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
389 		     fc_lport_state(lport));
390 
391 	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
392 	fc_frame_free(fp);
393 }
394 
395 /**
396  * fc_lport_recv_echo_req() - Handle received ECHO request
397  * @sp:	   The sequence in the ECHO exchange
398  * @fp:	   ECHO request frame
399  * @lport: The local port recieving the ECHO
400  *
401  * Locking Note: The lport lock is expected to be held before calling
402  * this function.
403  */
404 static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
405 				   struct fc_lport *lport)
406 {
407 	struct fc_frame *fp;
408 	unsigned int len;
409 	void *pp;
410 	void *dp;
411 
412 	FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
413 		     fc_lport_state(lport));
414 
415 	len = fr_len(in_fp) - sizeof(struct fc_frame_header);
416 	pp = fc_frame_payload_get(in_fp, len);
417 
418 	if (len < sizeof(__be32))
419 		len = sizeof(__be32);
420 
421 	fp = fc_frame_alloc(lport, len);
422 	if (fp) {
423 		dp = fc_frame_payload_get(fp, len);
424 		memcpy(dp, pp, len);
425 		*((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
426 		fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
427 		lport->tt.frame_send(lport, fp);
428 	}
429 	fc_frame_free(in_fp);
430 }
431 
432 /**
433  * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
434  * @sp:	   The sequence in the RNID exchange
435  * @fp:	   The RNID request frame
436  * @lport: The local port recieving the RNID
437  *
438  * Locking Note: The lport lock is expected to be held before calling
439  * this function.
440  */
441 static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
442 				   struct fc_lport *lport)
443 {
444 	struct fc_frame *fp;
445 	struct fc_els_rnid *req;
446 	struct {
447 		struct fc_els_rnid_resp rnid;
448 		struct fc_els_rnid_cid cid;
449 		struct fc_els_rnid_gen gen;
450 	} *rp;
451 	struct fc_seq_els_data rjt_data;
452 	u8 fmt;
453 	size_t len;
454 
455 	FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
456 		     fc_lport_state(lport));
457 
458 	req = fc_frame_payload_get(in_fp, sizeof(*req));
459 	if (!req) {
460 		rjt_data.fp = NULL;
461 		rjt_data.reason = ELS_RJT_LOGIC;
462 		rjt_data.explan = ELS_EXPL_NONE;
463 		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
464 	} else {
465 		fmt = req->rnid_fmt;
466 		len = sizeof(*rp);
467 		if (fmt != ELS_RNIDF_GEN ||
468 		    ntohl(lport->rnid_gen.rnid_atype) == 0) {
469 			fmt = ELS_RNIDF_NONE;	/* nothing to provide */
470 			len -= sizeof(rp->gen);
471 		}
472 		fp = fc_frame_alloc(lport, len);
473 		if (fp) {
474 			rp = fc_frame_payload_get(fp, len);
475 			memset(rp, 0, len);
476 			rp->rnid.rnid_cmd = ELS_LS_ACC;
477 			rp->rnid.rnid_fmt = fmt;
478 			rp->rnid.rnid_cid_len = sizeof(rp->cid);
479 			rp->cid.rnid_wwpn = htonll(lport->wwpn);
480 			rp->cid.rnid_wwnn = htonll(lport->wwnn);
481 			if (fmt == ELS_RNIDF_GEN) {
482 				rp->rnid.rnid_sid_len = sizeof(rp->gen);
483 				memcpy(&rp->gen, &lport->rnid_gen,
484 				       sizeof(rp->gen));
485 			}
486 			fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
487 			lport->tt.frame_send(lport, fp);
488 		}
489 	}
490 	fc_frame_free(in_fp);
491 }
492 
493 /**
494  * fc_lport_recv_logo_req() - Handle received fabric LOGO request
495  * @sp:	   The sequence in the LOGO exchange
496  * @fp:	   The LOGO request frame
497  * @lport: The local port recieving the LOGO
498  *
499  * Locking Note: The lport lock is exected to be held before calling
500  * this function.
501  */
502 static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
503 				   struct fc_lport *lport)
504 {
505 	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
506 	fc_lport_enter_reset(lport);
507 	fc_frame_free(fp);
508 }
509 
510 /**
511  * fc_fabric_login() - Start the lport state machine
512  * @lport: The local port that should log into the fabric
513  *
514  * Locking Note: This function should not be called
515  *		 with the lport lock held.
516  */
517 int fc_fabric_login(struct fc_lport *lport)
518 {
519 	int rc = -1;
520 
521 	mutex_lock(&lport->lp_mutex);
522 	if (lport->state == LPORT_ST_DISABLED ||
523 	    lport->state == LPORT_ST_LOGO) {
524 		fc_lport_state_enter(lport, LPORT_ST_RESET);
525 		fc_lport_enter_reset(lport);
526 		rc = 0;
527 	}
528 	mutex_unlock(&lport->lp_mutex);
529 
530 	return rc;
531 }
532 EXPORT_SYMBOL(fc_fabric_login);
533 
534 /**
535  * __fc_linkup() - Handler for transport linkup events
536  * @lport: The lport whose link is up
537  *
538  * Locking: must be called with the lp_mutex held
539  */
540 void __fc_linkup(struct fc_lport *lport)
541 {
542 	if (!lport->link_up) {
543 		lport->link_up = 1;
544 
545 		if (lport->state == LPORT_ST_RESET)
546 			fc_lport_enter_flogi(lport);
547 	}
548 }
549 
550 /**
551  * fc_linkup() - Handler for transport linkup events
552  * @lport: The local port whose link is up
553  */
554 void fc_linkup(struct fc_lport *lport)
555 {
556 	printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n",
557 	       lport->host->host_no, lport->port_id);
558 
559 	mutex_lock(&lport->lp_mutex);
560 	__fc_linkup(lport);
561 	mutex_unlock(&lport->lp_mutex);
562 }
563 EXPORT_SYMBOL(fc_linkup);
564 
565 /**
566  * __fc_linkdown() - Handler for transport linkdown events
567  * @lport: The lport whose link is down
568  *
569  * Locking: must be called with the lp_mutex held
570  */
571 void __fc_linkdown(struct fc_lport *lport)
572 {
573 	if (lport->link_up) {
574 		lport->link_up = 0;
575 		fc_lport_enter_reset(lport);
576 		lport->tt.fcp_cleanup(lport);
577 	}
578 }
579 
580 /**
581  * fc_linkdown() - Handler for transport linkdown events
582  * @lport: The local port whose link is down
583  */
584 void fc_linkdown(struct fc_lport *lport)
585 {
586 	printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n",
587 	       lport->host->host_no, lport->port_id);
588 
589 	mutex_lock(&lport->lp_mutex);
590 	__fc_linkdown(lport);
591 	mutex_unlock(&lport->lp_mutex);
592 }
593 EXPORT_SYMBOL(fc_linkdown);
594 
595 /**
596  * fc_fabric_logoff() - Logout of the fabric
597  * @lport: The local port to logoff the fabric
598  *
599  * Return value:
600  *	0 for success, -1 for failure
601  */
602 int fc_fabric_logoff(struct fc_lport *lport)
603 {
604 	lport->tt.disc_stop_final(lport);
605 	mutex_lock(&lport->lp_mutex);
606 	if (lport->dns_rdata)
607 		lport->tt.rport_logoff(lport->dns_rdata);
608 	mutex_unlock(&lport->lp_mutex);
609 	lport->tt.rport_flush_queue();
610 	mutex_lock(&lport->lp_mutex);
611 	fc_lport_enter_logo(lport);
612 	mutex_unlock(&lport->lp_mutex);
613 	cancel_delayed_work_sync(&lport->retry_work);
614 	return 0;
615 }
616 EXPORT_SYMBOL(fc_fabric_logoff);
617 
618 /**
619  * fc_lport_destroy() - Unregister a fc_lport
620  * @lport: The local port to unregister
621  *
622  * Note:
623  * exit routine for fc_lport instance
624  * clean-up all the allocated memory
625  * and free up other system resources.
626  *
627  */
628 int fc_lport_destroy(struct fc_lport *lport)
629 {
630 	mutex_lock(&lport->lp_mutex);
631 	lport->state = LPORT_ST_DISABLED;
632 	lport->link_up = 0;
633 	lport->tt.frame_send = fc_frame_drop;
634 	mutex_unlock(&lport->lp_mutex);
635 
636 	lport->tt.fcp_abort_io(lport);
637 	lport->tt.disc_stop_final(lport);
638 	lport->tt.exch_mgr_reset(lport, 0, 0);
639 	return 0;
640 }
641 EXPORT_SYMBOL(fc_lport_destroy);
642 
643 /**
644  * fc_set_mfs() - Set the maximum frame size for a local port
645  * @lport: The local port to set the MFS for
646  * @mfs:   The new MFS
647  */
648 int fc_set_mfs(struct fc_lport *lport, u32 mfs)
649 {
650 	unsigned int old_mfs;
651 	int rc = -EINVAL;
652 
653 	mutex_lock(&lport->lp_mutex);
654 
655 	old_mfs = lport->mfs;
656 
657 	if (mfs >= FC_MIN_MAX_FRAME) {
658 		mfs &= ~3;
659 		if (mfs > FC_MAX_FRAME)
660 			mfs = FC_MAX_FRAME;
661 		mfs -= sizeof(struct fc_frame_header);
662 		lport->mfs = mfs;
663 		rc = 0;
664 	}
665 
666 	if (!rc && mfs < old_mfs)
667 		fc_lport_enter_reset(lport);
668 
669 	mutex_unlock(&lport->lp_mutex);
670 
671 	return rc;
672 }
673 EXPORT_SYMBOL(fc_set_mfs);
674 
675 /**
676  * fc_lport_disc_callback() - Callback for discovery events
677  * @lport: The local port receiving the event
678  * @event: The discovery event
679  */
680 void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
681 {
682 	switch (event) {
683 	case DISC_EV_SUCCESS:
684 		FC_LPORT_DBG(lport, "Discovery succeeded\n");
685 		break;
686 	case DISC_EV_FAILED:
687 		printk(KERN_ERR "host%d: libfc: "
688 		       "Discovery failed for port (%6.6x)\n",
689 		       lport->host->host_no, lport->port_id);
690 		mutex_lock(&lport->lp_mutex);
691 		fc_lport_enter_reset(lport);
692 		mutex_unlock(&lport->lp_mutex);
693 		break;
694 	case DISC_EV_NONE:
695 		WARN_ON(1);
696 		break;
697 	}
698 }
699 
700 /**
701  * fc_rport_enter_ready() - Enter the ready state and start discovery
702  * @lport: The local port that is ready
703  *
704  * Locking Note: The lport lock is expected to be held before calling
705  * this routine.
706  */
707 static void fc_lport_enter_ready(struct fc_lport *lport)
708 {
709 	FC_LPORT_DBG(lport, "Entered READY from state %s\n",
710 		     fc_lport_state(lport));
711 
712 	fc_lport_state_enter(lport, LPORT_ST_READY);
713 	if (lport->vport)
714 		fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
715 	fc_vports_linkchange(lport);
716 
717 	if (!lport->ptp_rdata)
718 		lport->tt.disc_start(fc_lport_disc_callback, lport);
719 }
720 
721 /**
722  * fc_lport_set_port_id() - set the local port Port ID
723  * @lport: The local port which will have its Port ID set.
724  * @port_id: The new port ID.
725  * @fp: The frame containing the incoming request, or NULL.
726  *
727  * Locking Note: The lport lock is expected to be held before calling
728  * this function.
729  */
730 static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
731 				 struct fc_frame *fp)
732 {
733 	if (port_id)
734 		printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
735 		       lport->host->host_no, port_id);
736 
737 	lport->port_id = port_id;
738 
739 	/* Update the fc_host */
740 	fc_host_port_id(lport->host) = port_id;
741 
742 	if (lport->tt.lport_set_port_id)
743 		lport->tt.lport_set_port_id(lport, port_id, fp);
744 }
745 
746 /**
747  * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint
748  * @lport: The local port which will have its Port ID set.
749  * @port_id: The new port ID.
750  *
751  * Called by the lower-level driver when transport sets the local port_id.
752  * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and
753  * discovery to be skipped.
754  */
755 void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id)
756 {
757 	mutex_lock(&lport->lp_mutex);
758 
759 	fc_lport_set_port_id(lport, port_id, NULL);
760 
761 	switch (lport->state) {
762 	case LPORT_ST_RESET:
763 	case LPORT_ST_FLOGI:
764 		if (port_id)
765 			fc_lport_enter_ready(lport);
766 		break;
767 	default:
768 		break;
769 	}
770 	mutex_unlock(&lport->lp_mutex);
771 }
772 EXPORT_SYMBOL(fc_lport_set_local_id);
773 
774 /**
775  * fc_lport_recv_flogi_req() - Receive a FLOGI request
776  * @sp_in: The sequence the FLOGI is on
777  * @rx_fp: The FLOGI frame
778  * @lport: The local port that recieved the request
779  *
780  * A received FLOGI request indicates a point-to-point connection.
781  * Accept it with the common service parameters indicating our N port.
782  * Set up to do a PLOGI if we have the higher-number WWPN.
783  *
784  * Locking Note: The lport lock is expected to be held before calling
785  * this function.
786  */
787 static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
788 				    struct fc_frame *rx_fp,
789 				    struct fc_lport *lport)
790 {
791 	struct fc_frame *fp;
792 	struct fc_frame_header *fh;
793 	struct fc_seq *sp;
794 	struct fc_els_flogi *flp;
795 	struct fc_els_flogi *new_flp;
796 	u64 remote_wwpn;
797 	u32 remote_fid;
798 	u32 local_fid;
799 
800 	FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
801 		     fc_lport_state(lport));
802 
803 	remote_fid = fc_frame_sid(rx_fp);
804 	flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
805 	if (!flp)
806 		goto out;
807 	remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
808 	if (remote_wwpn == lport->wwpn) {
809 		printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
810 		       "with same WWPN %16.16llx\n",
811 		       lport->host->host_no, remote_wwpn);
812 		goto out;
813 	}
814 	FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn);
815 
816 	/*
817 	 * XXX what is the right thing to do for FIDs?
818 	 * The originator might expect our S_ID to be 0xfffffe.
819 	 * But if so, both of us could end up with the same FID.
820 	 */
821 	local_fid = FC_LOCAL_PTP_FID_LO;
822 	if (remote_wwpn < lport->wwpn) {
823 		local_fid = FC_LOCAL_PTP_FID_HI;
824 		if (!remote_fid || remote_fid == local_fid)
825 			remote_fid = FC_LOCAL_PTP_FID_LO;
826 	} else if (!remote_fid) {
827 		remote_fid = FC_LOCAL_PTP_FID_HI;
828 	}
829 
830 	fc_lport_set_port_id(lport, local_fid, rx_fp);
831 
832 	fp = fc_frame_alloc(lport, sizeof(*flp));
833 	if (fp) {
834 		new_flp = fc_frame_payload_get(fp, sizeof(*flp));
835 		fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
836 		new_flp->fl_cmd = (u8) ELS_LS_ACC;
837 
838 		/*
839 		 * Send the response.  If this fails, the originator should
840 		 * repeat the sequence.
841 		 */
842 		fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
843 		fh = fc_frame_header_get(fp);
844 		hton24(fh->fh_s_id, local_fid);
845 		hton24(fh->fh_d_id, remote_fid);
846 		lport->tt.frame_send(lport, fp);
847 
848 	} else {
849 		fc_lport_error(lport, fp);
850 	}
851 	fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
852 			   get_unaligned_be64(&flp->fl_wwnn));
853 
854 out:
855 	sp = fr_seq(rx_fp);
856 	fc_frame_free(rx_fp);
857 }
858 
859 /**
860  * fc_lport_recv_req() - The generic lport request handler
861  * @lport: The local port that received the request
862  * @sp:	   The sequence the request is on
863  * @fp:	   The request frame
864  *
865  * This function will see if the lport handles the request or
866  * if an rport should handle the request.
867  *
868  * Locking Note: This function should not be called with the lport
869  *		 lock held becuase it will grab the lock.
870  */
871 static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
872 			      struct fc_frame *fp)
873 {
874 	struct fc_frame_header *fh = fc_frame_header_get(fp);
875 	void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
876 
877 	mutex_lock(&lport->lp_mutex);
878 
879 	/*
880 	 * Handle special ELS cases like FLOGI, LOGO, and
881 	 * RSCN here.  These don't require a session.
882 	 * Even if we had a session, it might not be ready.
883 	 */
884 	if (!lport->link_up)
885 		fc_frame_free(fp);
886 	else if (fh->fh_type == FC_TYPE_ELS &&
887 		 fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
888 		/*
889 		 * Check opcode.
890 		 */
891 		recv = lport->tt.rport_recv_req;
892 		switch (fc_frame_payload_op(fp)) {
893 		case ELS_FLOGI:
894 			if (!lport->point_to_multipoint)
895 				recv = fc_lport_recv_flogi_req;
896 			break;
897 		case ELS_LOGO:
898 			if (fc_frame_sid(fp) == FC_FID_FLOGI)
899 				recv = fc_lport_recv_logo_req;
900 			break;
901 		case ELS_RSCN:
902 			recv = lport->tt.disc_recv_req;
903 			break;
904 		case ELS_ECHO:
905 			recv = fc_lport_recv_echo_req;
906 			break;
907 		case ELS_RLIR:
908 			recv = fc_lport_recv_rlir_req;
909 			break;
910 		case ELS_RNID:
911 			recv = fc_lport_recv_rnid_req;
912 			break;
913 		}
914 
915 		recv(sp, fp, lport);
916 	} else {
917 		FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
918 			     fr_eof(fp));
919 		fc_frame_free(fp);
920 	}
921 	mutex_unlock(&lport->lp_mutex);
922 
923 	/*
924 	 *  The common exch_done for all request may not be good
925 	 *  if any request requires longer hold on exhange. XXX
926 	 */
927 	lport->tt.exch_done(sp);
928 }
929 
930 /**
931  * fc_lport_reset() - Reset a local port
932  * @lport: The local port which should be reset
933  *
934  * Locking Note: This functions should not be called with the
935  *		 lport lock held.
936  */
937 int fc_lport_reset(struct fc_lport *lport)
938 {
939 	cancel_delayed_work_sync(&lport->retry_work);
940 	mutex_lock(&lport->lp_mutex);
941 	fc_lport_enter_reset(lport);
942 	mutex_unlock(&lport->lp_mutex);
943 	return 0;
944 }
945 EXPORT_SYMBOL(fc_lport_reset);
946 
947 /**
948  * fc_lport_reset_locked() - Reset the local port w/ the lport lock held
949  * @lport: The local port to be reset
950  *
951  * Locking Note: The lport lock is expected to be held before calling
952  * this routine.
953  */
954 static void fc_lport_reset_locked(struct fc_lport *lport)
955 {
956 	if (lport->dns_rdata)
957 		lport->tt.rport_logoff(lport->dns_rdata);
958 
959 	if (lport->ptp_rdata) {
960 		lport->tt.rport_logoff(lport->ptp_rdata);
961 		kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
962 		lport->ptp_rdata = NULL;
963 	}
964 
965 	lport->tt.disc_stop(lport);
966 
967 	lport->tt.exch_mgr_reset(lport, 0, 0);
968 	fc_host_fabric_name(lport->host) = 0;
969 
970 	if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up))
971 		fc_lport_set_port_id(lport, 0, NULL);
972 }
973 
974 /**
975  * fc_lport_enter_reset() - Reset the local port
976  * @lport: The local port to be reset
977  *
978  * Locking Note: The lport lock is expected to be held before calling
979  * this routine.
980  */
981 static void fc_lport_enter_reset(struct fc_lport *lport)
982 {
983 	FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
984 		     fc_lport_state(lport));
985 
986 	if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
987 		return;
988 
989 	if (lport->vport) {
990 		if (lport->link_up)
991 			fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
992 		else
993 			fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
994 	}
995 	fc_lport_state_enter(lport, LPORT_ST_RESET);
996 	fc_vports_linkchange(lport);
997 	fc_lport_reset_locked(lport);
998 	if (lport->link_up)
999 		fc_lport_enter_flogi(lport);
1000 }
1001 
1002 /**
1003  * fc_lport_enter_disabled() - Disable the local port
1004  * @lport: The local port to be reset
1005  *
1006  * Locking Note: The lport lock is expected to be held before calling
1007  * this routine.
1008  */
1009 static void fc_lport_enter_disabled(struct fc_lport *lport)
1010 {
1011 	FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
1012 		     fc_lport_state(lport));
1013 
1014 	fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1015 	fc_vports_linkchange(lport);
1016 	fc_lport_reset_locked(lport);
1017 }
1018 
1019 /**
1020  * fc_lport_error() - Handler for any errors
1021  * @lport: The local port that the error was on
1022  * @fp:	   The error code encoded in a frame pointer
1023  *
1024  * If the error was caused by a resource allocation failure
1025  * then wait for half a second and retry, otherwise retry
1026  * after the e_d_tov time.
1027  */
1028 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
1029 {
1030 	unsigned long delay = 0;
1031 	FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
1032 		     PTR_ERR(fp), fc_lport_state(lport),
1033 		     lport->retry_count);
1034 
1035 	if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
1036 		/*
1037 		 * Memory allocation failure, or the exchange timed out.
1038 		 *  Retry after delay
1039 		 */
1040 		if (lport->retry_count < lport->max_retry_count) {
1041 			lport->retry_count++;
1042 			if (!fp)
1043 				delay = msecs_to_jiffies(500);
1044 			else
1045 				delay =	msecs_to_jiffies(lport->e_d_tov);
1046 
1047 			schedule_delayed_work(&lport->retry_work, delay);
1048 		} else {
1049 			switch (lport->state) {
1050 			case LPORT_ST_DISABLED:
1051 			case LPORT_ST_READY:
1052 			case LPORT_ST_RESET:
1053 			case LPORT_ST_RNN_ID:
1054 			case LPORT_ST_RSNN_NN:
1055 			case LPORT_ST_RSPN_ID:
1056 			case LPORT_ST_RFT_ID:
1057 			case LPORT_ST_RFF_ID:
1058 			case LPORT_ST_SCR:
1059 			case LPORT_ST_DNS:
1060 			case LPORT_ST_FLOGI:
1061 			case LPORT_ST_LOGO:
1062 				fc_lport_enter_reset(lport);
1063 				break;
1064 			}
1065 		}
1066 	}
1067 }
1068 
1069 /**
1070  * fc_lport_ns_resp() - Handle response to a name server
1071  *			registration exchange
1072  * @sp:	    current sequence in exchange
1073  * @fp:	    response frame
1074  * @lp_arg: Fibre Channel host port instance
1075  *
1076  * Locking Note: This function will be called without the lport lock
1077  * held, but it will lock, call an _enter_* function or fc_lport_error()
1078  * and then unlock the lport.
1079  */
1080 static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
1081 			     void *lp_arg)
1082 {
1083 	struct fc_lport *lport = lp_arg;
1084 	struct fc_frame_header *fh;
1085 	struct fc_ct_hdr *ct;
1086 
1087 	FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp));
1088 
1089 	if (fp == ERR_PTR(-FC_EX_CLOSED))
1090 		return;
1091 
1092 	mutex_lock(&lport->lp_mutex);
1093 
1094 	if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) {
1095 		FC_LPORT_DBG(lport, "Received a name server response, "
1096 			     "but in state %s\n", fc_lport_state(lport));
1097 		if (IS_ERR(fp))
1098 			goto err;
1099 		goto out;
1100 	}
1101 
1102 	if (IS_ERR(fp)) {
1103 		fc_lport_error(lport, fp);
1104 		goto err;
1105 	}
1106 
1107 	fh = fc_frame_header_get(fp);
1108 	ct = fc_frame_payload_get(fp, sizeof(*ct));
1109 
1110 	if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1111 	    ct->ct_fs_type == FC_FST_DIR &&
1112 	    ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1113 	    ntohs(ct->ct_cmd) == FC_FS_ACC)
1114 		switch (lport->state) {
1115 		case LPORT_ST_RNN_ID:
1116 			fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN);
1117 			break;
1118 		case LPORT_ST_RSNN_NN:
1119 			fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID);
1120 			break;
1121 		case LPORT_ST_RSPN_ID:
1122 			fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1123 			break;
1124 		case LPORT_ST_RFT_ID:
1125 			fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
1126 			break;
1127 		case LPORT_ST_RFF_ID:
1128 			fc_lport_enter_scr(lport);
1129 			break;
1130 		default:
1131 			/* should have already been caught by state checks */
1132 			break;
1133 		}
1134 	else
1135 		fc_lport_error(lport, fp);
1136 out:
1137 	fc_frame_free(fp);
1138 err:
1139 	mutex_unlock(&lport->lp_mutex);
1140 }
1141 
1142 /**
1143  * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
1144  * @sp:	    current sequence in SCR exchange
1145  * @fp:	    response frame
1146  * @lp_arg: Fibre Channel lport port instance that sent the registration request
1147  *
1148  * Locking Note: This function will be called without the lport lock
1149  * held, but it will lock, call an _enter_* function or fc_lport_error
1150  * and then unlock the lport.
1151  */
1152 static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1153 			      void *lp_arg)
1154 {
1155 	struct fc_lport *lport = lp_arg;
1156 	u8 op;
1157 
1158 	FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
1159 
1160 	if (fp == ERR_PTR(-FC_EX_CLOSED))
1161 		return;
1162 
1163 	mutex_lock(&lport->lp_mutex);
1164 
1165 	if (lport->state != LPORT_ST_SCR) {
1166 		FC_LPORT_DBG(lport, "Received a SCR response, but in state "
1167 			     "%s\n", fc_lport_state(lport));
1168 		if (IS_ERR(fp))
1169 			goto err;
1170 		goto out;
1171 	}
1172 
1173 	if (IS_ERR(fp)) {
1174 		fc_lport_error(lport, fp);
1175 		goto err;
1176 	}
1177 
1178 	op = fc_frame_payload_op(fp);
1179 	if (op == ELS_LS_ACC)
1180 		fc_lport_enter_ready(lport);
1181 	else
1182 		fc_lport_error(lport, fp);
1183 
1184 out:
1185 	fc_frame_free(fp);
1186 err:
1187 	mutex_unlock(&lport->lp_mutex);
1188 }
1189 
1190 /**
1191  * fc_lport_enter_scr() - Send a SCR (State Change Register) request
1192  * @lport: The local port to register for state changes
1193  *
1194  * Locking Note: The lport lock is expected to be held before calling
1195  * this routine.
1196  */
1197 static void fc_lport_enter_scr(struct fc_lport *lport)
1198 {
1199 	struct fc_frame *fp;
1200 
1201 	FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
1202 		     fc_lport_state(lport));
1203 
1204 	fc_lport_state_enter(lport, LPORT_ST_SCR);
1205 
1206 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1207 	if (!fp) {
1208 		fc_lport_error(lport, fp);
1209 		return;
1210 	}
1211 
1212 	if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
1213 				  fc_lport_scr_resp, lport,
1214 				  2 * lport->r_a_tov))
1215 		fc_lport_error(lport, NULL);
1216 }
1217 
1218 /**
1219  * fc_lport_enter_ns() - register some object with the name server
1220  * @lport: Fibre Channel local port to register
1221  *
1222  * Locking Note: The lport lock is expected to be held before calling
1223  * this routine.
1224  */
1225 static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
1226 {
1227 	struct fc_frame *fp;
1228 	enum fc_ns_req cmd;
1229 	int size = sizeof(struct fc_ct_hdr);
1230 	size_t len;
1231 
1232 	FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
1233 		     fc_lport_state_names[state],
1234 		     fc_lport_state(lport));
1235 
1236 	fc_lport_state_enter(lport, state);
1237 
1238 	switch (state) {
1239 	case LPORT_ST_RNN_ID:
1240 		cmd = FC_NS_RNN_ID;
1241 		size += sizeof(struct fc_ns_rn_id);
1242 		break;
1243 	case LPORT_ST_RSNN_NN:
1244 		len = strnlen(fc_host_symbolic_name(lport->host), 255);
1245 		/* if there is no symbolic name, skip to RFT_ID */
1246 		if (!len)
1247 			return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1248 		cmd = FC_NS_RSNN_NN;
1249 		size += sizeof(struct fc_ns_rsnn) + len;
1250 		break;
1251 	case LPORT_ST_RSPN_ID:
1252 		len = strnlen(fc_host_symbolic_name(lport->host), 255);
1253 		/* if there is no symbolic name, skip to RFT_ID */
1254 		if (!len)
1255 			return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1256 		cmd = FC_NS_RSPN_ID;
1257 		size += sizeof(struct fc_ns_rspn) + len;
1258 		break;
1259 	case LPORT_ST_RFT_ID:
1260 		cmd = FC_NS_RFT_ID;
1261 		size += sizeof(struct fc_ns_rft);
1262 		break;
1263 	case LPORT_ST_RFF_ID:
1264 		cmd = FC_NS_RFF_ID;
1265 		size += sizeof(struct fc_ns_rff_id);
1266 		break;
1267 	default:
1268 		fc_lport_error(lport, NULL);
1269 		return;
1270 	}
1271 
1272 	fp = fc_frame_alloc(lport, size);
1273 	if (!fp) {
1274 		fc_lport_error(lport, fp);
1275 		return;
1276 	}
1277 
1278 	if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd,
1279 				  fc_lport_ns_resp,
1280 				  lport, 3 * lport->r_a_tov))
1281 		fc_lport_error(lport, fp);
1282 }
1283 
1284 static struct fc_rport_operations fc_lport_rport_ops = {
1285 	.event_callback = fc_lport_rport_callback,
1286 };
1287 
1288 /**
1289  * fc_rport_enter_dns() - Create a fc_rport for the name server
1290  * @lport: The local port requesting a remote port for the name server
1291  *
1292  * Locking Note: The lport lock is expected to be held before calling
1293  * this routine.
1294  */
1295 static void fc_lport_enter_dns(struct fc_lport *lport)
1296 {
1297 	struct fc_rport_priv *rdata;
1298 
1299 	FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
1300 		     fc_lport_state(lport));
1301 
1302 	fc_lport_state_enter(lport, LPORT_ST_DNS);
1303 
1304 	mutex_lock(&lport->disc.disc_mutex);
1305 	rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
1306 	mutex_unlock(&lport->disc.disc_mutex);
1307 	if (!rdata)
1308 		goto err;
1309 
1310 	rdata->ops = &fc_lport_rport_ops;
1311 	lport->tt.rport_login(rdata);
1312 	return;
1313 
1314 err:
1315 	fc_lport_error(lport, NULL);
1316 }
1317 
1318 /**
1319  * fc_lport_timeout() - Handler for the retry_work timer
1320  * @work: The work struct of the local port
1321  */
1322 static void fc_lport_timeout(struct work_struct *work)
1323 {
1324 	struct fc_lport *lport =
1325 		container_of(work, struct fc_lport,
1326 			     retry_work.work);
1327 
1328 	mutex_lock(&lport->lp_mutex);
1329 
1330 	switch (lport->state) {
1331 	case LPORT_ST_DISABLED:
1332 		WARN_ON(1);
1333 		break;
1334 	case LPORT_ST_READY:
1335 		WARN_ON(1);
1336 		break;
1337 	case LPORT_ST_RESET:
1338 		break;
1339 	case LPORT_ST_FLOGI:
1340 		fc_lport_enter_flogi(lport);
1341 		break;
1342 	case LPORT_ST_DNS:
1343 		fc_lport_enter_dns(lport);
1344 		break;
1345 	case LPORT_ST_RNN_ID:
1346 	case LPORT_ST_RSNN_NN:
1347 	case LPORT_ST_RSPN_ID:
1348 	case LPORT_ST_RFT_ID:
1349 	case LPORT_ST_RFF_ID:
1350 		fc_lport_enter_ns(lport, lport->state);
1351 		break;
1352 	case LPORT_ST_SCR:
1353 		fc_lport_enter_scr(lport);
1354 		break;
1355 	case LPORT_ST_LOGO:
1356 		fc_lport_enter_logo(lport);
1357 		break;
1358 	}
1359 
1360 	mutex_unlock(&lport->lp_mutex);
1361 }
1362 
1363 /**
1364  * fc_lport_logo_resp() - Handle response to LOGO request
1365  * @sp:	    The sequence that the LOGO was on
1366  * @fp:	    The LOGO frame
1367  * @lp_arg: The lport port that received the LOGO request
1368  *
1369  * Locking Note: This function will be called without the lport lock
1370  * held, but it will lock, call an _enter_* function or fc_lport_error()
1371  * and then unlock the lport.
1372  */
1373 void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1374 			void *lp_arg)
1375 {
1376 	struct fc_lport *lport = lp_arg;
1377 	u8 op;
1378 
1379 	FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
1380 
1381 	if (fp == ERR_PTR(-FC_EX_CLOSED))
1382 		return;
1383 
1384 	mutex_lock(&lport->lp_mutex);
1385 
1386 	if (lport->state != LPORT_ST_LOGO) {
1387 		FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
1388 			     "%s\n", fc_lport_state(lport));
1389 		if (IS_ERR(fp))
1390 			goto err;
1391 		goto out;
1392 	}
1393 
1394 	if (IS_ERR(fp)) {
1395 		fc_lport_error(lport, fp);
1396 		goto err;
1397 	}
1398 
1399 	op = fc_frame_payload_op(fp);
1400 	if (op == ELS_LS_ACC)
1401 		fc_lport_enter_disabled(lport);
1402 	else
1403 		fc_lport_error(lport, fp);
1404 
1405 out:
1406 	fc_frame_free(fp);
1407 err:
1408 	mutex_unlock(&lport->lp_mutex);
1409 }
1410 EXPORT_SYMBOL(fc_lport_logo_resp);
1411 
1412 /**
1413  * fc_rport_enter_logo() - Logout of the fabric
1414  * @lport: The local port to be logged out
1415  *
1416  * Locking Note: The lport lock is expected to be held before calling
1417  * this routine.
1418  */
1419 static void fc_lport_enter_logo(struct fc_lport *lport)
1420 {
1421 	struct fc_frame *fp;
1422 	struct fc_els_logo *logo;
1423 
1424 	FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
1425 		     fc_lport_state(lport));
1426 
1427 	fc_lport_state_enter(lport, LPORT_ST_LOGO);
1428 	fc_vports_linkchange(lport);
1429 
1430 	fp = fc_frame_alloc(lport, sizeof(*logo));
1431 	if (!fp) {
1432 		fc_lport_error(lport, fp);
1433 		return;
1434 	}
1435 
1436 	if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
1437 				  fc_lport_logo_resp, lport,
1438 				  2 * lport->r_a_tov))
1439 		fc_lport_error(lport, NULL);
1440 }
1441 
1442 /**
1443  * fc_lport_flogi_resp() - Handle response to FLOGI request
1444  * @sp:	    The sequence that the FLOGI was on
1445  * @fp:	    The FLOGI response frame
1446  * @lp_arg: The lport port that received the FLOGI response
1447  *
1448  * Locking Note: This function will be called without the lport lock
1449  * held, but it will lock, call an _enter_* function or fc_lport_error()
1450  * and then unlock the lport.
1451  */
1452 void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1453 			 void *lp_arg)
1454 {
1455 	struct fc_lport *lport = lp_arg;
1456 	struct fc_els_flogi *flp;
1457 	u32 did;
1458 	u16 csp_flags;
1459 	unsigned int r_a_tov;
1460 	unsigned int e_d_tov;
1461 	u16 mfs;
1462 
1463 	FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
1464 
1465 	if (fp == ERR_PTR(-FC_EX_CLOSED))
1466 		return;
1467 
1468 	mutex_lock(&lport->lp_mutex);
1469 
1470 	if (lport->state != LPORT_ST_FLOGI) {
1471 		FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
1472 			     "%s\n", fc_lport_state(lport));
1473 		if (IS_ERR(fp))
1474 			goto err;
1475 		goto out;
1476 	}
1477 
1478 	if (IS_ERR(fp)) {
1479 		fc_lport_error(lport, fp);
1480 		goto err;
1481 	}
1482 
1483 	did = fc_frame_did(fp);
1484 	if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
1485 		flp = fc_frame_payload_get(fp, sizeof(*flp));
1486 		if (flp) {
1487 			mfs = ntohs(flp->fl_csp.sp_bb_data) &
1488 				FC_SP_BB_DATA_MASK;
1489 			if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
1490 			    mfs < lport->mfs)
1491 				lport->mfs = mfs;
1492 			csp_flags = ntohs(flp->fl_csp.sp_features);
1493 			r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1494 			e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1495 			if (csp_flags & FC_SP_FT_EDTR)
1496 				e_d_tov /= 1000000;
1497 
1498 			lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
1499 
1500 			if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1501 				if (e_d_tov > lport->e_d_tov)
1502 					lport->e_d_tov = e_d_tov;
1503 				lport->r_a_tov = 2 * e_d_tov;
1504 				fc_lport_set_port_id(lport, did, fp);
1505 				printk(KERN_INFO "host%d: libfc: "
1506 				       "Port (%6.6x) entered "
1507 				       "point-to-point mode\n",
1508 				       lport->host->host_no, did);
1509 				fc_lport_ptp_setup(lport, fc_frame_sid(fp),
1510 						   get_unaligned_be64(
1511 							   &flp->fl_wwpn),
1512 						   get_unaligned_be64(
1513 							   &flp->fl_wwnn));
1514 			} else {
1515 				lport->e_d_tov = e_d_tov;
1516 				lport->r_a_tov = r_a_tov;
1517 				fc_host_fabric_name(lport->host) =
1518 					get_unaligned_be64(&flp->fl_wwnn);
1519 				fc_lport_set_port_id(lport, did, fp);
1520 				fc_lport_enter_dns(lport);
1521 			}
1522 		}
1523 	} else {
1524 		FC_LPORT_DBG(lport, "Bad FLOGI response\n");
1525 	}
1526 
1527 out:
1528 	fc_frame_free(fp);
1529 err:
1530 	mutex_unlock(&lport->lp_mutex);
1531 }
1532 EXPORT_SYMBOL(fc_lport_flogi_resp);
1533 
1534 /**
1535  * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
1536  * @lport: Fibre Channel local port to be logged in to the fabric
1537  *
1538  * Locking Note: The lport lock is expected to be held before calling
1539  * this routine.
1540  */
1541 void fc_lport_enter_flogi(struct fc_lport *lport)
1542 {
1543 	struct fc_frame *fp;
1544 
1545 	FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
1546 		     fc_lport_state(lport));
1547 
1548 	fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1549 
1550 	if (lport->point_to_multipoint) {
1551 		if (lport->port_id)
1552 			fc_lport_enter_ready(lport);
1553 		return;
1554 	}
1555 
1556 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1557 	if (!fp)
1558 		return fc_lport_error(lport, fp);
1559 
1560 	if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
1561 				  lport->vport ? ELS_FDISC : ELS_FLOGI,
1562 				  fc_lport_flogi_resp, lport,
1563 				  lport->vport ? 2 * lport->r_a_tov :
1564 				  lport->e_d_tov))
1565 		fc_lport_error(lport, NULL);
1566 }
1567 
1568 /**
1569  * fc_lport_config() - Configure a fc_lport
1570  * @lport: The local port to be configured
1571  */
1572 int fc_lport_config(struct fc_lport *lport)
1573 {
1574 	INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1575 	mutex_init(&lport->lp_mutex);
1576 
1577 	fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1578 
1579 	fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1580 	fc_lport_add_fc4_type(lport, FC_TYPE_CT);
1581 
1582 	return 0;
1583 }
1584 EXPORT_SYMBOL(fc_lport_config);
1585 
1586 /**
1587  * fc_lport_init() - Initialize the lport layer for a local port
1588  * @lport: The local port to initialize the exchange layer for
1589  */
1590 int fc_lport_init(struct fc_lport *lport)
1591 {
1592 	if (!lport->tt.lport_recv)
1593 		lport->tt.lport_recv = fc_lport_recv_req;
1594 
1595 	if (!lport->tt.lport_reset)
1596 		lport->tt.lport_reset = fc_lport_reset;
1597 
1598 	fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1599 	fc_host_node_name(lport->host) = lport->wwnn;
1600 	fc_host_port_name(lport->host) = lport->wwpn;
1601 	fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1602 	memset(fc_host_supported_fc4s(lport->host), 0,
1603 	       sizeof(fc_host_supported_fc4s(lport->host)));
1604 	fc_host_supported_fc4s(lport->host)[2] = 1;
1605 	fc_host_supported_fc4s(lport->host)[7] = 1;
1606 
1607 	/* This value is also unchanging */
1608 	memset(fc_host_active_fc4s(lport->host), 0,
1609 	       sizeof(fc_host_active_fc4s(lport->host)));
1610 	fc_host_active_fc4s(lport->host)[2] = 1;
1611 	fc_host_active_fc4s(lport->host)[7] = 1;
1612 	fc_host_maxframe_size(lport->host) = lport->mfs;
1613 	fc_host_supported_speeds(lport->host) = 0;
1614 	if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1615 		fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1616 	if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1617 		fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1618 
1619 	return 0;
1620 }
1621 EXPORT_SYMBOL(fc_lport_init);
1622 
1623 /**
1624  * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests
1625  * @sp:	      The sequence for the FC Passthrough response
1626  * @fp:	      The response frame
1627  * @info_arg: The BSG info that the response is for
1628  */
1629 static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
1630 			      void *info_arg)
1631 {
1632 	struct fc_bsg_info *info = info_arg;
1633 	struct fc_bsg_job *job = info->job;
1634 	struct fc_lport *lport = info->lport;
1635 	struct fc_frame_header *fh;
1636 	size_t len;
1637 	void *buf;
1638 
1639 	if (IS_ERR(fp)) {
1640 		job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
1641 			-ECONNABORTED : -ETIMEDOUT;
1642 		job->reply_len = sizeof(uint32_t);
1643 		job->state_flags |= FC_RQST_STATE_DONE;
1644 		job->job_done(job);
1645 		kfree(info);
1646 		return;
1647 	}
1648 
1649 	mutex_lock(&lport->lp_mutex);
1650 	fh = fc_frame_header_get(fp);
1651 	len = fr_len(fp) - sizeof(*fh);
1652 	buf = fc_frame_payload_get(fp, 0);
1653 
1654 	if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) {
1655 		/* Get the response code from the first frame payload */
1656 		unsigned short cmd = (info->rsp_code == FC_FS_ACC) ?
1657 			ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) :
1658 			(unsigned short)fc_frame_payload_op(fp);
1659 
1660 		/* Save the reply status of the job */
1661 		job->reply->reply_data.ctels_reply.status =
1662 			(cmd == info->rsp_code) ?
1663 			FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
1664 	}
1665 
1666 	job->reply->reply_payload_rcv_len +=
1667 		fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
1668 					 &info->offset, KM_BIO_SRC_IRQ, NULL);
1669 
1670 	if (fr_eof(fp) == FC_EOF_T &&
1671 	    (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1672 	    (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1673 		if (job->reply->reply_payload_rcv_len >
1674 		    job->reply_payload.payload_len)
1675 			job->reply->reply_payload_rcv_len =
1676 				job->reply_payload.payload_len;
1677 		job->reply->result = 0;
1678 		job->state_flags |= FC_RQST_STATE_DONE;
1679 		job->job_done(job);
1680 		kfree(info);
1681 	}
1682 	fc_frame_free(fp);
1683 	mutex_unlock(&lport->lp_mutex);
1684 }
1685 
1686 /**
1687  * fc_lport_els_request() - Send ELS passthrough request
1688  * @job:   The BSG Passthrough job
1689  * @lport: The local port sending the request
1690  * @did:   The destination port id
1691  *
1692  * Locking Note: The lport lock is expected to be held before calling
1693  * this routine.
1694  */
1695 static int fc_lport_els_request(struct fc_bsg_job *job,
1696 				struct fc_lport *lport,
1697 				u32 did, u32 tov)
1698 {
1699 	struct fc_bsg_info *info;
1700 	struct fc_frame *fp;
1701 	struct fc_frame_header *fh;
1702 	char *pp;
1703 	int len;
1704 
1705 	fp = fc_frame_alloc(lport, job->request_payload.payload_len);
1706 	if (!fp)
1707 		return -ENOMEM;
1708 
1709 	len = job->request_payload.payload_len;
1710 	pp = fc_frame_payload_get(fp, len);
1711 
1712 	sg_copy_to_buffer(job->request_payload.sg_list,
1713 			  job->request_payload.sg_cnt,
1714 			  pp, len);
1715 
1716 	fh = fc_frame_header_get(fp);
1717 	fh->fh_r_ctl = FC_RCTL_ELS_REQ;
1718 	hton24(fh->fh_d_id, did);
1719 	hton24(fh->fh_s_id, lport->port_id);
1720 	fh->fh_type = FC_TYPE_ELS;
1721 	hton24(fh->fh_f_ctl, FC_FCTL_REQ);
1722 	fh->fh_cs_ctl = 0;
1723 	fh->fh_df_ctl = 0;
1724 	fh->fh_parm_offset = 0;
1725 
1726 	info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
1727 	if (!info) {
1728 		fc_frame_free(fp);
1729 		return -ENOMEM;
1730 	}
1731 
1732 	info->job = job;
1733 	info->lport = lport;
1734 	info->rsp_code = ELS_LS_ACC;
1735 	info->nents = job->reply_payload.sg_cnt;
1736 	info->sg = job->reply_payload.sg_list;
1737 
1738 	if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
1739 				     NULL, info, tov))
1740 		return -ECOMM;
1741 	return 0;
1742 }
1743 
1744 /**
1745  * fc_lport_ct_request() - Send CT Passthrough request
1746  * @job:   The BSG Passthrough job
1747  * @lport: The local port sending the request
1748  * @did:   The destination FC-ID
1749  * @tov:   The timeout period to wait for the response
1750  *
1751  * Locking Note: The lport lock is expected to be held before calling
1752  * this routine.
1753  */
1754 static int fc_lport_ct_request(struct fc_bsg_job *job,
1755 			       struct fc_lport *lport, u32 did, u32 tov)
1756 {
1757 	struct fc_bsg_info *info;
1758 	struct fc_frame *fp;
1759 	struct fc_frame_header *fh;
1760 	struct fc_ct_req *ct;
1761 	size_t len;
1762 
1763 	fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1764 			    job->request_payload.payload_len);
1765 	if (!fp)
1766 		return -ENOMEM;
1767 
1768 	len = job->request_payload.payload_len;
1769 	ct = fc_frame_payload_get(fp, len);
1770 
1771 	sg_copy_to_buffer(job->request_payload.sg_list,
1772 			  job->request_payload.sg_cnt,
1773 			  ct, len);
1774 
1775 	fh = fc_frame_header_get(fp);
1776 	fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
1777 	hton24(fh->fh_d_id, did);
1778 	hton24(fh->fh_s_id, lport->port_id);
1779 	fh->fh_type = FC_TYPE_CT;
1780 	hton24(fh->fh_f_ctl, FC_FCTL_REQ);
1781 	fh->fh_cs_ctl = 0;
1782 	fh->fh_df_ctl = 0;
1783 	fh->fh_parm_offset = 0;
1784 
1785 	info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
1786 	if (!info) {
1787 		fc_frame_free(fp);
1788 		return -ENOMEM;
1789 	}
1790 
1791 	info->job = job;
1792 	info->lport = lport;
1793 	info->rsp_code = FC_FS_ACC;
1794 	info->nents = job->reply_payload.sg_cnt;
1795 	info->sg = job->reply_payload.sg_list;
1796 
1797 	if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
1798 				     NULL, info, tov))
1799 		return -ECOMM;
1800 	return 0;
1801 }
1802 
1803 /**
1804  * fc_lport_bsg_request() - The common entry point for sending
1805  *			    FC Passthrough requests
1806  * @job: The BSG passthrough job
1807  */
1808 int fc_lport_bsg_request(struct fc_bsg_job *job)
1809 {
1810 	struct request *rsp = job->req->next_rq;
1811 	struct Scsi_Host *shost = job->shost;
1812 	struct fc_lport *lport = shost_priv(shost);
1813 	struct fc_rport *rport;
1814 	struct fc_rport_priv *rdata;
1815 	int rc = -EINVAL;
1816 	u32 did;
1817 
1818 	job->reply->reply_payload_rcv_len = 0;
1819 	if (rsp)
1820 		rsp->resid_len = job->reply_payload.payload_len;
1821 
1822 	mutex_lock(&lport->lp_mutex);
1823 
1824 	switch (job->request->msgcode) {
1825 	case FC_BSG_RPT_ELS:
1826 		rport = job->rport;
1827 		if (!rport)
1828 			break;
1829 
1830 		rdata = rport->dd_data;
1831 		rc = fc_lport_els_request(job, lport, rport->port_id,
1832 					  rdata->e_d_tov);
1833 		break;
1834 
1835 	case FC_BSG_RPT_CT:
1836 		rport = job->rport;
1837 		if (!rport)
1838 			break;
1839 
1840 		rdata = rport->dd_data;
1841 		rc = fc_lport_ct_request(job, lport, rport->port_id,
1842 					 rdata->e_d_tov);
1843 		break;
1844 
1845 	case FC_BSG_HST_CT:
1846 		did = ntoh24(job->request->rqst_data.h_ct.port_id);
1847 		if (did == FC_FID_DIR_SERV)
1848 			rdata = lport->dns_rdata;
1849 		else
1850 			rdata = lport->tt.rport_lookup(lport, did);
1851 
1852 		if (!rdata)
1853 			break;
1854 
1855 		rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov);
1856 		break;
1857 
1858 	case FC_BSG_HST_ELS_NOLOGIN:
1859 		did = ntoh24(job->request->rqst_data.h_els.port_id);
1860 		rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
1861 		break;
1862 	}
1863 
1864 	mutex_unlock(&lport->lp_mutex);
1865 	return rc;
1866 }
1867 EXPORT_SYMBOL(fc_lport_bsg_request);
1868