xref: /openbmc/linux/drivers/scsi/libfc/fc_disc.c (revision 29d898e9)
1 /*
2  * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19 
20 /*
21  * Target Discovery
22  *
23  * This block discovers all FC-4 remote ports, including FCP initiators. It
24  * also handles RSCN events and re-discovery if necessary.
25  */
26 
27 /*
28  * DISC LOCKING
29  *
30  * The disc mutex is can be locked when acquiring rport locks, but may not
31  * be held when acquiring the lport lock. Refer to fc_lport.c for more
32  * details.
33  */
34 
35 #include <linux/timer.h>
36 #include <linux/err.h>
37 #include <asm/unaligned.h>
38 
39 #include <scsi/fc/fc_gs.h>
40 
41 #include <scsi/libfc.h>
42 
43 #define FC_DISC_RETRY_LIMIT	3	/* max retries */
44 #define FC_DISC_RETRY_DELAY	500UL	/* (msecs) delay */
45 
46 static void fc_disc_gpn_ft_req(struct fc_disc *);
47 static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
48 static void fc_disc_done(struct fc_disc *, enum fc_disc_event);
49 static void fc_disc_timeout(struct work_struct *);
50 static void fc_disc_single(struct fc_disc *, struct fc_disc_port *);
51 static void fc_disc_restart(struct fc_disc *);
52 
53 /**
54  * fc_disc_stop_rports() - delete all the remote ports associated with the lport
55  * @disc: The discovery job to stop rports on
56  *
57  * Locking Note: This function expects that the lport mutex is locked before
58  * calling it.
59  */
60 void fc_disc_stop_rports(struct fc_disc *disc)
61 {
62 	struct fc_lport *lport;
63 	struct fc_rport_priv *rdata, *next;
64 
65 	lport = disc->lport;
66 
67 	mutex_lock(&disc->disc_mutex);
68 	list_for_each_entry_safe(rdata, next, &disc->rports, peers)
69 		lport->tt.rport_logoff(rdata);
70 	mutex_unlock(&disc->disc_mutex);
71 }
72 
73 /**
74  * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
75  * @sp: Current sequence of the RSCN exchange
76  * @fp: RSCN Frame
77  * @lport: Fibre Channel host port instance
78  *
79  * Locking Note: This function expects that the disc_mutex is locked
80  *		 before it is called.
81  */
82 static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
83 				  struct fc_disc *disc)
84 {
85 	struct fc_lport *lport;
86 	struct fc_rport_priv *rdata;
87 	struct fc_els_rscn *rp;
88 	struct fc_els_rscn_page *pp;
89 	struct fc_seq_els_data rjt_data;
90 	unsigned int len;
91 	int redisc = 0;
92 	enum fc_els_rscn_ev_qual ev_qual;
93 	enum fc_els_rscn_addr_fmt fmt;
94 	LIST_HEAD(disc_ports);
95 	struct fc_disc_port *dp, *next;
96 
97 	lport = disc->lport;
98 
99 	FC_DISC_DBG(disc, "Received an RSCN event\n");
100 
101 	/* make sure the frame contains an RSCN message */
102 	rp = fc_frame_payload_get(fp, sizeof(*rp));
103 	if (!rp)
104 		goto reject;
105 	/* make sure the page length is as expected (4 bytes) */
106 	if (rp->rscn_page_len != sizeof(*pp))
107 		goto reject;
108 	/* get the RSCN payload length */
109 	len = ntohs(rp->rscn_plen);
110 	if (len < sizeof(*rp))
111 		goto reject;
112 	/* make sure the frame contains the expected payload */
113 	rp = fc_frame_payload_get(fp, len);
114 	if (!rp)
115 		goto reject;
116 	/* payload must be a multiple of the RSCN page size */
117 	len -= sizeof(*rp);
118 	if (len % sizeof(*pp))
119 		goto reject;
120 
121 	for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
122 		ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
123 		ev_qual &= ELS_RSCN_EV_QUAL_MASK;
124 		fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
125 		fmt &= ELS_RSCN_ADDR_FMT_MASK;
126 		/*
127 		 * if we get an address format other than port
128 		 * (area, domain, fabric), then do a full discovery
129 		 */
130 		switch (fmt) {
131 		case ELS_ADDR_FMT_PORT:
132 			FC_DISC_DBG(disc, "Port address format for port "
133 				    "(%6x)\n", ntoh24(pp->rscn_fid));
134 			dp = kzalloc(sizeof(*dp), GFP_KERNEL);
135 			if (!dp) {
136 				redisc = 1;
137 				break;
138 			}
139 			dp->lp = lport;
140 			dp->ids.port_id = ntoh24(pp->rscn_fid);
141 			dp->ids.port_name = -1;
142 			dp->ids.node_name = -1;
143 			dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
144 			list_add_tail(&dp->peers, &disc_ports);
145 			break;
146 		case ELS_ADDR_FMT_AREA:
147 		case ELS_ADDR_FMT_DOM:
148 		case ELS_ADDR_FMT_FAB:
149 		default:
150 			FC_DISC_DBG(disc, "Address format is (%d)\n", fmt);
151 			redisc = 1;
152 			break;
153 		}
154 	}
155 	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
156 	if (redisc) {
157 		FC_DISC_DBG(disc, "RSCN received: rediscovering\n");
158 		fc_disc_restart(disc);
159 	} else {
160 		FC_DISC_DBG(disc, "RSCN received: not rediscovering. "
161 			    "redisc %d state %d in_prog %d\n",
162 			    redisc, lport->state, disc->pending);
163 		list_for_each_entry_safe(dp, next, &disc_ports, peers) {
164 			list_del(&dp->peers);
165 			rdata = lport->tt.rport_lookup(lport, dp->ids.port_id);
166 			if (rdata) {
167 				lport->tt.rport_logoff(rdata);
168 			}
169 			fc_disc_single(disc, dp);
170 		}
171 	}
172 	fc_frame_free(fp);
173 	return;
174 reject:
175 	FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
176 	rjt_data.fp = NULL;
177 	rjt_data.reason = ELS_RJT_LOGIC;
178 	rjt_data.explan = ELS_EXPL_NONE;
179 	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
180 	fc_frame_free(fp);
181 }
182 
183 /**
184  * fc_disc_recv_req() - Handle incoming requests
185  * @sp: Current sequence of the request exchange
186  * @fp: The frame
187  * @lport: The FC local port
188  *
189  * Locking Note: This function is called from the EM and will lock
190  *		 the disc_mutex before calling the handler for the
191  *		 request.
192  */
193 static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
194 			     struct fc_lport *lport)
195 {
196 	u8 op;
197 	struct fc_disc *disc = &lport->disc;
198 
199 	op = fc_frame_payload_op(fp);
200 	switch (op) {
201 	case ELS_RSCN:
202 		mutex_lock(&disc->disc_mutex);
203 		fc_disc_recv_rscn_req(sp, fp, disc);
204 		mutex_unlock(&disc->disc_mutex);
205 		break;
206 	default:
207 		FC_DISC_DBG(disc, "Received an unsupported request, "
208 			    "the opcode is (%x)\n", op);
209 		break;
210 	}
211 }
212 
213 /**
214  * fc_disc_restart() - Restart discovery
215  * @lport: FC discovery context
216  *
217  * Locking Note: This function expects that the disc mutex
218  *		 is already locked.
219  */
220 static void fc_disc_restart(struct fc_disc *disc)
221 {
222 	FC_DISC_DBG(disc, "Restarting discovery\n");
223 
224 	disc->requested = 1;
225 	if (disc->pending)
226 		return;
227 
228 	/*
229 	 * Advance disc_id.  This is an arbitrary non-zero number that will
230 	 * match the value in the fc_rport_priv after discovery for all
231 	 * freshly-discovered remote ports.  Avoid wrapping to zero.
232 	 */
233 	disc->disc_id = (disc->disc_id + 2) | 1;
234 	disc->retry_count = 0;
235 	fc_disc_gpn_ft_req(disc);
236 }
237 
238 /**
239  * fc_disc_start() - Fibre Channel Target discovery
240  * @lport: FC local port
241  * @disc_callback: function to be called when discovery is complete
242  */
243 static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
244 						enum fc_disc_event),
245 			  struct fc_lport *lport)
246 {
247 	struct fc_disc *disc = &lport->disc;
248 
249 	/*
250 	 * At this point we may have a new disc job or an existing
251 	 * one. Either way, let's lock when we make changes to it
252 	 * and send the GPN_FT request.
253 	 */
254 	mutex_lock(&disc->disc_mutex);
255 	disc->disc_callback = disc_callback;
256 	fc_disc_restart(disc);
257 	mutex_unlock(&disc->disc_mutex);
258 }
259 
260 /**
261  * fc_disc_done() - Discovery has been completed
262  * @disc: FC discovery context
263  * @event: discovery completion status
264  *
265  * Locking Note: This function expects that the disc mutex is locked before
266  * it is called. The discovery callback is then made with the lock released,
267  * and the lock is re-taken before returning from this function
268  */
269 static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
270 {
271 	struct fc_lport *lport = disc->lport;
272 	struct fc_rport_priv *rdata;
273 
274 	FC_DISC_DBG(disc, "Discovery complete\n");
275 
276 	disc->pending = 0;
277 	if (disc->requested) {
278 		fc_disc_restart(disc);
279 		return;
280 	}
281 
282 	/*
283 	 * Go through all remote ports.  If they were found in the latest
284 	 * discovery, reverify or log them in.  Otherwise, log them out.
285 	 * Skip ports which were never discovered.  These are the dNS port
286 	 * and ports which were created by PLOGI.
287 	 */
288 	list_for_each_entry(rdata, &disc->rports, peers) {
289 		if (!rdata->disc_id)
290 			continue;
291 		if (rdata->disc_id == disc->disc_id)
292 			lport->tt.rport_login(rdata);
293 		else
294 			lport->tt.rport_logoff(rdata);
295 	}
296 
297 	mutex_unlock(&disc->disc_mutex);
298 	disc->disc_callback(lport, event);
299 	mutex_lock(&disc->disc_mutex);
300 }
301 
302 /**
303  * fc_disc_error() - Handle error on dNS request
304  * @disc: FC discovery context
305  * @fp: The frame pointer
306  */
307 static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
308 {
309 	struct fc_lport *lport = disc->lport;
310 	unsigned long delay = 0;
311 
312 	FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n",
313 		    PTR_ERR(fp), disc->retry_count,
314 		    FC_DISC_RETRY_LIMIT);
315 
316 	if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
317 		/*
318 		 * Memory allocation failure, or the exchange timed out,
319 		 * retry after delay.
320 		 */
321 		if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
322 			/* go ahead and retry */
323 			if (!fp)
324 				delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY);
325 			else {
326 				delay = msecs_to_jiffies(lport->e_d_tov);
327 
328 				/* timeout faster first time */
329 				if (!disc->retry_count)
330 					delay /= 4;
331 			}
332 			disc->retry_count++;
333 			schedule_delayed_work(&disc->disc_work, delay);
334 		} else
335 			fc_disc_done(disc, DISC_EV_FAILED);
336 	}
337 }
338 
339 /**
340  * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request
341  * @lport: FC discovery context
342  *
343  * Locking Note: This function expects that the disc_mutex is locked
344  *		 before it is called.
345  */
346 static void fc_disc_gpn_ft_req(struct fc_disc *disc)
347 {
348 	struct fc_frame *fp;
349 	struct fc_lport *lport = disc->lport;
350 
351 	WARN_ON(!fc_lport_test_ready(lport));
352 
353 	disc->pending = 1;
354 	disc->requested = 0;
355 
356 	disc->buf_len = 0;
357 	disc->seq_count = 0;
358 	fp = fc_frame_alloc(lport,
359 			    sizeof(struct fc_ct_hdr) +
360 			    sizeof(struct fc_ns_gid_ft));
361 	if (!fp)
362 		goto err;
363 
364 	if (lport->tt.elsct_send(lport, 0, fp,
365 				 FC_NS_GPN_FT,
366 				 fc_disc_gpn_ft_resp,
367 				 disc, lport->e_d_tov))
368 		return;
369 err:
370 	fc_disc_error(disc, fp);
371 }
372 
373 /**
374  * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response.
375  * @lport: Fibre Channel host port instance
376  * @buf: GPN_FT response buffer
377  * @len: size of response buffer
378  *
379  * Goes through the list of IDs and names resulting from a request.
380  */
381 static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
382 {
383 	struct fc_lport *lport;
384 	struct fc_gpn_ft_resp *np;
385 	char *bp;
386 	size_t plen;
387 	size_t tlen;
388 	int error = 0;
389 	struct fc_rport_identifiers ids;
390 	struct fc_rport_priv *rdata;
391 
392 	lport = disc->lport;
393 	disc->seq_count++;
394 
395 	/*
396 	 * Handle partial name record left over from previous call.
397 	 */
398 	bp = buf;
399 	plen = len;
400 	np = (struct fc_gpn_ft_resp *)bp;
401 	tlen = disc->buf_len;
402 	disc->buf_len = 0;
403 	if (tlen) {
404 		WARN_ON(tlen >= sizeof(*np));
405 		plen = sizeof(*np) - tlen;
406 		WARN_ON(plen <= 0);
407 		WARN_ON(plen >= sizeof(*np));
408 		if (plen > len)
409 			plen = len;
410 		np = &disc->partial_buf;
411 		memcpy((char *)np + tlen, bp, plen);
412 
413 		/*
414 		 * Set bp so that the loop below will advance it to the
415 		 * first valid full name element.
416 		 */
417 		bp -= tlen;
418 		len += tlen;
419 		plen += tlen;
420 		disc->buf_len = (unsigned char) plen;
421 		if (plen == sizeof(*np))
422 			disc->buf_len = 0;
423 	}
424 
425 	/*
426 	 * Handle full name records, including the one filled from above.
427 	 * Normally, np == bp and plen == len, but from the partial case above,
428 	 * bp, len describe the overall buffer, and np, plen describe the
429 	 * partial buffer, which if would usually be full now.
430 	 * After the first time through the loop, things return to "normal".
431 	 */
432 	while (plen >= sizeof(*np)) {
433 		ids.port_id = ntoh24(np->fp_fid);
434 		ids.port_name = ntohll(np->fp_wwpn);
435 		ids.node_name = -1;
436 		ids.roles = FC_RPORT_ROLE_UNKNOWN;
437 
438 		if (ids.port_id != fc_host_port_id(lport->host) &&
439 		    ids.port_name != lport->wwpn) {
440 			rdata = lport->tt.rport_create(lport, &ids);
441 			if (rdata)
442 				rdata->disc_id = disc->disc_id;
443 			else {
444 				printk(KERN_WARNING "libfc: Failed to allocate "
445 				       "memory for the newly discovered port "
446 				       "(%6x)\n", ids.port_id);
447 				error = -ENOMEM;
448 			}
449 		}
450 
451 		if (np->fp_flags & FC_NS_FID_LAST) {
452 			fc_disc_done(disc, DISC_EV_SUCCESS);
453 			len = 0;
454 			break;
455 		}
456 		len -= sizeof(*np);
457 		bp += sizeof(*np);
458 		np = (struct fc_gpn_ft_resp *)bp;
459 		plen = len;
460 	}
461 
462 	/*
463 	 * Save any partial record at the end of the buffer for next time.
464 	 */
465 	if (error == 0 && len > 0 && len < sizeof(*np)) {
466 		if (np != &disc->partial_buf) {
467 			FC_DISC_DBG(disc, "Partial buffer remains "
468 				    "for discovery\n");
469 			memcpy(&disc->partial_buf, np, len);
470 		}
471 		disc->buf_len = (unsigned char) len;
472 	}
473 	return error;
474 }
475 
476 /**
477  * fc_disc_timeout() - Retry handler for the disc component
478  * @work: Structure holding disc obj that needs retry discovery
479  *
480  * Handle retry of memory allocation for remote ports.
481  */
482 static void fc_disc_timeout(struct work_struct *work)
483 {
484 	struct fc_disc *disc = container_of(work,
485 					    struct fc_disc,
486 					    disc_work.work);
487 	mutex_lock(&disc->disc_mutex);
488 	fc_disc_gpn_ft_req(disc);
489 	mutex_unlock(&disc->disc_mutex);
490 }
491 
492 /**
493  * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT)
494  * @sp: Current sequence of GPN_FT exchange
495  * @fp: response frame
496  * @lp_arg: Fibre Channel host port instance
497  *
498  * Locking Note: This function is called without disc mutex held, and
499  *		 should do all its processing with the mutex held
500  */
501 static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
502 				void *disc_arg)
503 {
504 	struct fc_disc *disc = disc_arg;
505 	struct fc_ct_hdr *cp;
506 	struct fc_frame_header *fh;
507 	enum fc_disc_event event = DISC_EV_NONE;
508 	unsigned int seq_cnt;
509 	unsigned int len;
510 	int error = 0;
511 
512 	mutex_lock(&disc->disc_mutex);
513 	FC_DISC_DBG(disc, "Received a GPN_FT response\n");
514 
515 	if (IS_ERR(fp)) {
516 		fc_disc_error(disc, fp);
517 		mutex_unlock(&disc->disc_mutex);
518 		return;
519 	}
520 
521 	WARN_ON(!fc_frame_is_linear(fp));	/* buffer must be contiguous */
522 	fh = fc_frame_header_get(fp);
523 	len = fr_len(fp) - sizeof(*fh);
524 	seq_cnt = ntohs(fh->fh_seq_cnt);
525 	if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 && disc->seq_count == 0) {
526 		cp = fc_frame_payload_get(fp, sizeof(*cp));
527 		if (!cp) {
528 			FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n",
529 				    fr_len(fp));
530 			event = DISC_EV_FAILED;
531 		} else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
532 
533 			/* Accepted, parse the response. */
534 			len -= sizeof(*cp);
535 			error = fc_disc_gpn_ft_parse(disc, cp + 1, len);
536 		} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
537 			FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x "
538 				    "(check zoning)\n", cp->ct_reason,
539 				    cp->ct_explan);
540 			event = DISC_EV_FAILED;
541 			if (cp->ct_reason == FC_FS_RJT_UNABL &&
542 			    cp->ct_explan == FC_FS_EXP_FTNR)
543 				event = DISC_EV_SUCCESS;
544 		} else {
545 			FC_DISC_DBG(disc, "GPN_FT unexpected response code "
546 				    "%x\n", ntohs(cp->ct_cmd));
547 			event = DISC_EV_FAILED;
548 		}
549 	} else if (fr_sof(fp) == FC_SOF_N3 && seq_cnt == disc->seq_count) {
550 		error = fc_disc_gpn_ft_parse(disc, fh + 1, len);
551 	} else {
552 		FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? "
553 			    "seq_cnt %x expected %x sof %x eof %x\n",
554 			    seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
555 		event = DISC_EV_FAILED;
556 	}
557 	if (error)
558 		fc_disc_error(disc, fp);
559 	else if (event != DISC_EV_NONE)
560 		fc_disc_done(disc, event);
561 	fc_frame_free(fp);
562 	mutex_unlock(&disc->disc_mutex);
563 }
564 
565 /**
566  * fc_disc_single() - Discover the directory information for a single target
567  * @lport: FC local port
568  * @dp: The port to rediscover
569  *
570  * Locking Note: This function expects that the disc_mutex is locked
571  *		 before it is called.
572  */
573 static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
574 {
575 	struct fc_lport *lport;
576 	struct fc_rport_priv *rdata;
577 
578 	lport = disc->lport;
579 
580 	if (dp->ids.port_id == fc_host_port_id(lport->host))
581 		goto out;
582 
583 	rdata = lport->tt.rport_create(lport, &dp->ids);
584 	if (rdata) {
585 		rdata->disc_id = disc->disc_id;
586 		kfree(dp);
587 		lport->tt.rport_login(rdata);
588 	}
589 	return;
590 out:
591 	kfree(dp);
592 }
593 
594 /**
595  * fc_disc_stop() - Stop discovery for a given lport
596  * @lport: The lport that discovery should stop for
597  */
598 void fc_disc_stop(struct fc_lport *lport)
599 {
600 	struct fc_disc *disc = &lport->disc;
601 
602 	if (disc) {
603 		cancel_delayed_work_sync(&disc->disc_work);
604 		fc_disc_stop_rports(disc);
605 	}
606 }
607 
608 /**
609  * fc_disc_stop_final() - Stop discovery for a given lport
610  * @lport: The lport that discovery should stop for
611  *
612  * This function will block until discovery has been
613  * completely stopped and all rports have been deleted.
614  */
615 void fc_disc_stop_final(struct fc_lport *lport)
616 {
617 	fc_disc_stop(lport);
618 	lport->tt.rport_flush_queue();
619 }
620 
621 /**
622  * fc_disc_init() - Initialize the discovery block
623  * @lport: FC local port
624  */
625 int fc_disc_init(struct fc_lport *lport)
626 {
627 	struct fc_disc *disc;
628 
629 	if (!lport->tt.disc_start)
630 		lport->tt.disc_start = fc_disc_start;
631 
632 	if (!lport->tt.disc_stop)
633 		lport->tt.disc_stop = fc_disc_stop;
634 
635 	if (!lport->tt.disc_stop_final)
636 		lport->tt.disc_stop_final = fc_disc_stop_final;
637 
638 	if (!lport->tt.disc_recv_req)
639 		lport->tt.disc_recv_req = fc_disc_recv_req;
640 
641 	disc = &lport->disc;
642 	INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
643 	mutex_init(&disc->disc_mutex);
644 	INIT_LIST_HEAD(&disc->rports);
645 
646 	disc->lport = lport;
647 
648 	return 0;
649 }
650 EXPORT_SYMBOL(fc_disc_init);
651