xref: /openbmc/linux/drivers/nvme/target/fcloop.c (revision 160b8e75)
1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
18 #include <linux/parser.h>
19 #include <uapi/scsi/fc/fc_fs.h>
20 
21 #include "../host/nvme.h"
22 #include "../target/nvmet.h"
23 #include <linux/nvme-fc-driver.h>
24 #include <linux/nvme-fc.h>
25 
26 
27 enum {
28 	NVMF_OPT_ERR		= 0,
29 	NVMF_OPT_WWNN		= 1 << 0,
30 	NVMF_OPT_WWPN		= 1 << 1,
31 	NVMF_OPT_ROLES		= 1 << 2,
32 	NVMF_OPT_FCADDR		= 1 << 3,
33 	NVMF_OPT_LPWWNN		= 1 << 4,
34 	NVMF_OPT_LPWWPN		= 1 << 5,
35 };
36 
37 struct fcloop_ctrl_options {
38 	int			mask;
39 	u64			wwnn;
40 	u64			wwpn;
41 	u32			roles;
42 	u32			fcaddr;
43 	u64			lpwwnn;
44 	u64			lpwwpn;
45 };
46 
47 static const match_table_t opt_tokens = {
48 	{ NVMF_OPT_WWNN,	"wwnn=%s"	},
49 	{ NVMF_OPT_WWPN,	"wwpn=%s"	},
50 	{ NVMF_OPT_ROLES,	"roles=%d"	},
51 	{ NVMF_OPT_FCADDR,	"fcaddr=%x"	},
52 	{ NVMF_OPT_LPWWNN,	"lpwwnn=%s"	},
53 	{ NVMF_OPT_LPWWPN,	"lpwwpn=%s"	},
54 	{ NVMF_OPT_ERR,		NULL		}
55 };
56 
57 static int
58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
59 		const char *buf)
60 {
61 	substring_t args[MAX_OPT_ARGS];
62 	char *options, *o, *p;
63 	int token, ret = 0;
64 	u64 token64;
65 
66 	options = o = kstrdup(buf, GFP_KERNEL);
67 	if (!options)
68 		return -ENOMEM;
69 
70 	while ((p = strsep(&o, ",\n")) != NULL) {
71 		if (!*p)
72 			continue;
73 
74 		token = match_token(p, opt_tokens, args);
75 		opts->mask |= token;
76 		switch (token) {
77 		case NVMF_OPT_WWNN:
78 			if (match_u64(args, &token64)) {
79 				ret = -EINVAL;
80 				goto out_free_options;
81 			}
82 			opts->wwnn = token64;
83 			break;
84 		case NVMF_OPT_WWPN:
85 			if (match_u64(args, &token64)) {
86 				ret = -EINVAL;
87 				goto out_free_options;
88 			}
89 			opts->wwpn = token64;
90 			break;
91 		case NVMF_OPT_ROLES:
92 			if (match_int(args, &token)) {
93 				ret = -EINVAL;
94 				goto out_free_options;
95 			}
96 			opts->roles = token;
97 			break;
98 		case NVMF_OPT_FCADDR:
99 			if (match_hex(args, &token)) {
100 				ret = -EINVAL;
101 				goto out_free_options;
102 			}
103 			opts->fcaddr = token;
104 			break;
105 		case NVMF_OPT_LPWWNN:
106 			if (match_u64(args, &token64)) {
107 				ret = -EINVAL;
108 				goto out_free_options;
109 			}
110 			opts->lpwwnn = token64;
111 			break;
112 		case NVMF_OPT_LPWWPN:
113 			if (match_u64(args, &token64)) {
114 				ret = -EINVAL;
115 				goto out_free_options;
116 			}
117 			opts->lpwwpn = token64;
118 			break;
119 		default:
120 			pr_warn("unknown parameter or missing value '%s'\n", p);
121 			ret = -EINVAL;
122 			goto out_free_options;
123 		}
124 	}
125 
126 out_free_options:
127 	kfree(options);
128 	return ret;
129 }
130 
131 
132 static int
133 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
134 		const char *buf)
135 {
136 	substring_t args[MAX_OPT_ARGS];
137 	char *options, *o, *p;
138 	int token, ret = 0;
139 	u64 token64;
140 
141 	*nname = -1;
142 	*pname = -1;
143 
144 	options = o = kstrdup(buf, GFP_KERNEL);
145 	if (!options)
146 		return -ENOMEM;
147 
148 	while ((p = strsep(&o, ",\n")) != NULL) {
149 		if (!*p)
150 			continue;
151 
152 		token = match_token(p, opt_tokens, args);
153 		switch (token) {
154 		case NVMF_OPT_WWNN:
155 			if (match_u64(args, &token64)) {
156 				ret = -EINVAL;
157 				goto out_free_options;
158 			}
159 			*nname = token64;
160 			break;
161 		case NVMF_OPT_WWPN:
162 			if (match_u64(args, &token64)) {
163 				ret = -EINVAL;
164 				goto out_free_options;
165 			}
166 			*pname = token64;
167 			break;
168 		default:
169 			pr_warn("unknown parameter or missing value '%s'\n", p);
170 			ret = -EINVAL;
171 			goto out_free_options;
172 		}
173 	}
174 
175 out_free_options:
176 	kfree(options);
177 
178 	if (!ret) {
179 		if (*nname == -1)
180 			return -EINVAL;
181 		if (*pname == -1)
182 			return -EINVAL;
183 	}
184 
185 	return ret;
186 }
187 
188 
189 #define LPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
190 
191 #define RPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN |  \
192 			 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
193 
194 #define TGTPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
195 
196 
197 static DEFINE_SPINLOCK(fcloop_lock);
198 static LIST_HEAD(fcloop_lports);
199 static LIST_HEAD(fcloop_nports);
200 
201 struct fcloop_lport {
202 	struct nvme_fc_local_port *localport;
203 	struct list_head lport_list;
204 	struct completion unreg_done;
205 };
206 
207 struct fcloop_lport_priv {
208 	struct fcloop_lport *lport;
209 };
210 
211 struct fcloop_rport {
212 	struct nvme_fc_remote_port *remoteport;
213 	struct nvmet_fc_target_port *targetport;
214 	struct fcloop_nport *nport;
215 	struct fcloop_lport *lport;
216 };
217 
218 struct fcloop_tport {
219 	struct nvmet_fc_target_port *targetport;
220 	struct nvme_fc_remote_port *remoteport;
221 	struct fcloop_nport *nport;
222 	struct fcloop_lport *lport;
223 };
224 
225 struct fcloop_nport {
226 	struct fcloop_rport *rport;
227 	struct fcloop_tport *tport;
228 	struct fcloop_lport *lport;
229 	struct list_head nport_list;
230 	struct kref ref;
231 	u64 node_name;
232 	u64 port_name;
233 	u32 port_role;
234 	u32 port_id;
235 };
236 
237 struct fcloop_lsreq {
238 	struct fcloop_tport		*tport;
239 	struct nvmefc_ls_req		*lsreq;
240 	struct work_struct		work;
241 	struct nvmefc_tgt_ls_req	tgt_ls_req;
242 	int				status;
243 };
244 
245 enum {
246 	INI_IO_START		= 0,
247 	INI_IO_ACTIVE		= 1,
248 	INI_IO_ABORTED		= 2,
249 	INI_IO_COMPLETED	= 3,
250 };
251 
252 struct fcloop_fcpreq {
253 	struct fcloop_tport		*tport;
254 	struct nvmefc_fcp_req		*fcpreq;
255 	spinlock_t			reqlock;
256 	u16				status;
257 	u32				inistate;
258 	bool				active;
259 	bool				aborted;
260 	struct kref			ref;
261 	struct work_struct		fcp_rcv_work;
262 	struct work_struct		abort_rcv_work;
263 	struct work_struct		tio_done_work;
264 	struct nvmefc_tgt_fcp_req	tgt_fcp_req;
265 };
266 
267 struct fcloop_ini_fcpreq {
268 	struct nvmefc_fcp_req		*fcpreq;
269 	struct fcloop_fcpreq		*tfcp_req;
270 	spinlock_t			inilock;
271 };
272 
273 static inline struct fcloop_lsreq *
274 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
275 {
276 	return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
277 }
278 
279 static inline struct fcloop_fcpreq *
280 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
281 {
282 	return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
283 }
284 
285 
286 static int
287 fcloop_create_queue(struct nvme_fc_local_port *localport,
288 			unsigned int qidx, u16 qsize,
289 			void **handle)
290 {
291 	*handle = localport;
292 	return 0;
293 }
294 
295 static void
296 fcloop_delete_queue(struct nvme_fc_local_port *localport,
297 			unsigned int idx, void *handle)
298 {
299 }
300 
301 
302 /*
303  * Transmit of LS RSP done (e.g. buffers all set). call back up
304  * initiator "done" flows.
305  */
306 static void
307 fcloop_tgt_lsrqst_done_work(struct work_struct *work)
308 {
309 	struct fcloop_lsreq *tls_req =
310 		container_of(work, struct fcloop_lsreq, work);
311 	struct fcloop_tport *tport = tls_req->tport;
312 	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
313 
314 	if (tport->remoteport)
315 		lsreq->done(lsreq, tls_req->status);
316 }
317 
318 static int
319 fcloop_ls_req(struct nvme_fc_local_port *localport,
320 			struct nvme_fc_remote_port *remoteport,
321 			struct nvmefc_ls_req *lsreq)
322 {
323 	struct fcloop_lsreq *tls_req = lsreq->private;
324 	struct fcloop_rport *rport = remoteport->private;
325 	int ret = 0;
326 
327 	tls_req->lsreq = lsreq;
328 	INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
329 
330 	if (!rport->targetport) {
331 		tls_req->status = -ECONNREFUSED;
332 		schedule_work(&tls_req->work);
333 		return ret;
334 	}
335 
336 	tls_req->status = 0;
337 	tls_req->tport = rport->targetport->private;
338 	ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
339 				 lsreq->rqstaddr, lsreq->rqstlen);
340 
341 	return ret;
342 }
343 
344 static int
345 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
346 			struct nvmefc_tgt_ls_req *tgt_lsreq)
347 {
348 	struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
349 	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
350 
351 	memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
352 		((lsreq->rsplen < tgt_lsreq->rsplen) ?
353 				lsreq->rsplen : tgt_lsreq->rsplen));
354 	tgt_lsreq->done(tgt_lsreq);
355 
356 	schedule_work(&tls_req->work);
357 
358 	return 0;
359 }
360 
361 static void
362 fcloop_tfcp_req_free(struct kref *ref)
363 {
364 	struct fcloop_fcpreq *tfcp_req =
365 		container_of(ref, struct fcloop_fcpreq, ref);
366 
367 	kfree(tfcp_req);
368 }
369 
370 static void
371 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
372 {
373 	kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
374 }
375 
376 static int
377 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
378 {
379 	return kref_get_unless_zero(&tfcp_req->ref);
380 }
381 
382 static void
383 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
384 			struct fcloop_fcpreq *tfcp_req, int status)
385 {
386 	struct fcloop_ini_fcpreq *inireq = NULL;
387 
388 	if (fcpreq) {
389 		inireq = fcpreq->private;
390 		spin_lock(&inireq->inilock);
391 		inireq->tfcp_req = NULL;
392 		spin_unlock(&inireq->inilock);
393 
394 		fcpreq->status = status;
395 		fcpreq->done(fcpreq);
396 	}
397 
398 	/* release original io reference on tgt struct */
399 	fcloop_tfcp_req_put(tfcp_req);
400 }
401 
402 static void
403 fcloop_fcp_recv_work(struct work_struct *work)
404 {
405 	struct fcloop_fcpreq *tfcp_req =
406 		container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
407 	struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
408 	int ret = 0;
409 	bool aborted = false;
410 
411 	spin_lock(&tfcp_req->reqlock);
412 	switch (tfcp_req->inistate) {
413 	case INI_IO_START:
414 		tfcp_req->inistate = INI_IO_ACTIVE;
415 		break;
416 	case INI_IO_ABORTED:
417 		aborted = true;
418 		break;
419 	default:
420 		spin_unlock(&tfcp_req->reqlock);
421 		WARN_ON(1);
422 		return;
423 	}
424 	spin_unlock(&tfcp_req->reqlock);
425 
426 	if (unlikely(aborted))
427 		ret = -ECANCELED;
428 	else
429 		ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
430 				&tfcp_req->tgt_fcp_req,
431 				fcpreq->cmdaddr, fcpreq->cmdlen);
432 	if (ret)
433 		fcloop_call_host_done(fcpreq, tfcp_req, ret);
434 
435 	return;
436 }
437 
438 static void
439 fcloop_fcp_abort_recv_work(struct work_struct *work)
440 {
441 	struct fcloop_fcpreq *tfcp_req =
442 		container_of(work, struct fcloop_fcpreq, abort_rcv_work);
443 	struct nvmefc_fcp_req *fcpreq;
444 	bool completed = false;
445 
446 	spin_lock(&tfcp_req->reqlock);
447 	fcpreq = tfcp_req->fcpreq;
448 	switch (tfcp_req->inistate) {
449 	case INI_IO_ABORTED:
450 		break;
451 	case INI_IO_COMPLETED:
452 		completed = true;
453 		break;
454 	default:
455 		spin_unlock(&tfcp_req->reqlock);
456 		WARN_ON(1);
457 		return;
458 	}
459 	spin_unlock(&tfcp_req->reqlock);
460 
461 	if (unlikely(completed)) {
462 		/* remove reference taken in original abort downcall */
463 		fcloop_tfcp_req_put(tfcp_req);
464 		return;
465 	}
466 
467 	if (tfcp_req->tport->targetport)
468 		nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
469 					&tfcp_req->tgt_fcp_req);
470 
471 	spin_lock(&tfcp_req->reqlock);
472 	tfcp_req->fcpreq = NULL;
473 	spin_unlock(&tfcp_req->reqlock);
474 
475 	fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
476 	/* call_host_done releases reference for abort downcall */
477 }
478 
479 /*
480  * FCP IO operation done by target completion.
481  * call back up initiator "done" flows.
482  */
483 static void
484 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
485 {
486 	struct fcloop_fcpreq *tfcp_req =
487 		container_of(work, struct fcloop_fcpreq, tio_done_work);
488 	struct nvmefc_fcp_req *fcpreq;
489 
490 	spin_lock(&tfcp_req->reqlock);
491 	fcpreq = tfcp_req->fcpreq;
492 	tfcp_req->inistate = INI_IO_COMPLETED;
493 	spin_unlock(&tfcp_req->reqlock);
494 
495 	fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
496 }
497 
498 
499 static int
500 fcloop_fcp_req(struct nvme_fc_local_port *localport,
501 			struct nvme_fc_remote_port *remoteport,
502 			void *hw_queue_handle,
503 			struct nvmefc_fcp_req *fcpreq)
504 {
505 	struct fcloop_rport *rport = remoteport->private;
506 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
507 	struct fcloop_fcpreq *tfcp_req;
508 
509 	if (!rport->targetport)
510 		return -ECONNREFUSED;
511 
512 	tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
513 	if (!tfcp_req)
514 		return -ENOMEM;
515 
516 	inireq->fcpreq = fcpreq;
517 	inireq->tfcp_req = tfcp_req;
518 	spin_lock_init(&inireq->inilock);
519 
520 	tfcp_req->fcpreq = fcpreq;
521 	tfcp_req->tport = rport->targetport->private;
522 	tfcp_req->inistate = INI_IO_START;
523 	spin_lock_init(&tfcp_req->reqlock);
524 	INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
525 	INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
526 	INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
527 	kref_init(&tfcp_req->ref);
528 
529 	schedule_work(&tfcp_req->fcp_rcv_work);
530 
531 	return 0;
532 }
533 
534 static void
535 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
536 			struct scatterlist *io_sg, u32 offset, u32 length)
537 {
538 	void *data_p, *io_p;
539 	u32 data_len, io_len, tlen;
540 
541 	io_p = sg_virt(io_sg);
542 	io_len = io_sg->length;
543 
544 	for ( ; offset; ) {
545 		tlen = min_t(u32, offset, io_len);
546 		offset -= tlen;
547 		io_len -= tlen;
548 		if (!io_len) {
549 			io_sg = sg_next(io_sg);
550 			io_p = sg_virt(io_sg);
551 			io_len = io_sg->length;
552 		} else
553 			io_p += tlen;
554 	}
555 
556 	data_p = sg_virt(data_sg);
557 	data_len = data_sg->length;
558 
559 	for ( ; length; ) {
560 		tlen = min_t(u32, io_len, data_len);
561 		tlen = min_t(u32, tlen, length);
562 
563 		if (op == NVMET_FCOP_WRITEDATA)
564 			memcpy(data_p, io_p, tlen);
565 		else
566 			memcpy(io_p, data_p, tlen);
567 
568 		length -= tlen;
569 
570 		io_len -= tlen;
571 		if ((!io_len) && (length)) {
572 			io_sg = sg_next(io_sg);
573 			io_p = sg_virt(io_sg);
574 			io_len = io_sg->length;
575 		} else
576 			io_p += tlen;
577 
578 		data_len -= tlen;
579 		if ((!data_len) && (length)) {
580 			data_sg = sg_next(data_sg);
581 			data_p = sg_virt(data_sg);
582 			data_len = data_sg->length;
583 		} else
584 			data_p += tlen;
585 	}
586 }
587 
588 static int
589 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
590 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
591 {
592 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
593 	struct nvmefc_fcp_req *fcpreq;
594 	u32 rsplen = 0, xfrlen = 0;
595 	int fcp_err = 0, active, aborted;
596 	u8 op = tgt_fcpreq->op;
597 
598 	spin_lock(&tfcp_req->reqlock);
599 	fcpreq = tfcp_req->fcpreq;
600 	active = tfcp_req->active;
601 	aborted = tfcp_req->aborted;
602 	tfcp_req->active = true;
603 	spin_unlock(&tfcp_req->reqlock);
604 
605 	if (unlikely(active))
606 		/* illegal - call while i/o active */
607 		return -EALREADY;
608 
609 	if (unlikely(aborted)) {
610 		/* target transport has aborted i/o prior */
611 		spin_lock(&tfcp_req->reqlock);
612 		tfcp_req->active = false;
613 		spin_unlock(&tfcp_req->reqlock);
614 		tgt_fcpreq->transferred_length = 0;
615 		tgt_fcpreq->fcp_error = -ECANCELED;
616 		tgt_fcpreq->done(tgt_fcpreq);
617 		return 0;
618 	}
619 
620 	/*
621 	 * if fcpreq is NULL, the I/O has been aborted (from
622 	 * initiator side). For the target side, act as if all is well
623 	 * but don't actually move data.
624 	 */
625 
626 	switch (op) {
627 	case NVMET_FCOP_WRITEDATA:
628 		xfrlen = tgt_fcpreq->transfer_length;
629 		if (fcpreq) {
630 			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
631 					fcpreq->first_sgl, tgt_fcpreq->offset,
632 					xfrlen);
633 			fcpreq->transferred_length += xfrlen;
634 		}
635 		break;
636 
637 	case NVMET_FCOP_READDATA:
638 	case NVMET_FCOP_READDATA_RSP:
639 		xfrlen = tgt_fcpreq->transfer_length;
640 		if (fcpreq) {
641 			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
642 					fcpreq->first_sgl, tgt_fcpreq->offset,
643 					xfrlen);
644 			fcpreq->transferred_length += xfrlen;
645 		}
646 		if (op == NVMET_FCOP_READDATA)
647 			break;
648 
649 		/* Fall-Thru to RSP handling */
650 
651 	case NVMET_FCOP_RSP:
652 		if (fcpreq) {
653 			rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
654 					fcpreq->rsplen : tgt_fcpreq->rsplen);
655 			memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
656 			if (rsplen < tgt_fcpreq->rsplen)
657 				fcp_err = -E2BIG;
658 			fcpreq->rcv_rsplen = rsplen;
659 			fcpreq->status = 0;
660 		}
661 		tfcp_req->status = 0;
662 		break;
663 
664 	default:
665 		fcp_err = -EINVAL;
666 		break;
667 	}
668 
669 	spin_lock(&tfcp_req->reqlock);
670 	tfcp_req->active = false;
671 	spin_unlock(&tfcp_req->reqlock);
672 
673 	tgt_fcpreq->transferred_length = xfrlen;
674 	tgt_fcpreq->fcp_error = fcp_err;
675 	tgt_fcpreq->done(tgt_fcpreq);
676 
677 	return 0;
678 }
679 
680 static void
681 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
682 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
683 {
684 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
685 
686 	/*
687 	 * mark aborted only in case there were 2 threads in transport
688 	 * (one doing io, other doing abort) and only kills ops posted
689 	 * after the abort request
690 	 */
691 	spin_lock(&tfcp_req->reqlock);
692 	tfcp_req->aborted = true;
693 	spin_unlock(&tfcp_req->reqlock);
694 
695 	tfcp_req->status = NVME_SC_INTERNAL;
696 
697 	/*
698 	 * nothing more to do. If io wasn't active, the transport should
699 	 * immediately call the req_release. If it was active, the op
700 	 * will complete, and the lldd should call req_release.
701 	 */
702 }
703 
704 static void
705 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
706 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
707 {
708 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
709 
710 	schedule_work(&tfcp_req->tio_done_work);
711 }
712 
713 static void
714 fcloop_ls_abort(struct nvme_fc_local_port *localport,
715 			struct nvme_fc_remote_port *remoteport,
716 				struct nvmefc_ls_req *lsreq)
717 {
718 }
719 
720 static void
721 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
722 			struct nvme_fc_remote_port *remoteport,
723 			void *hw_queue_handle,
724 			struct nvmefc_fcp_req *fcpreq)
725 {
726 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
727 	struct fcloop_fcpreq *tfcp_req;
728 	bool abortio = true;
729 
730 	spin_lock(&inireq->inilock);
731 	tfcp_req = inireq->tfcp_req;
732 	if (tfcp_req)
733 		fcloop_tfcp_req_get(tfcp_req);
734 	spin_unlock(&inireq->inilock);
735 
736 	if (!tfcp_req)
737 		/* abort has already been called */
738 		return;
739 
740 	/* break initiator/target relationship for io */
741 	spin_lock(&tfcp_req->reqlock);
742 	switch (tfcp_req->inistate) {
743 	case INI_IO_START:
744 	case INI_IO_ACTIVE:
745 		tfcp_req->inistate = INI_IO_ABORTED;
746 		break;
747 	case INI_IO_COMPLETED:
748 		abortio = false;
749 		break;
750 	default:
751 		spin_unlock(&tfcp_req->reqlock);
752 		WARN_ON(1);
753 		return;
754 	}
755 	spin_unlock(&tfcp_req->reqlock);
756 
757 	if (abortio)
758 		/* leave the reference while the work item is scheduled */
759 		WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
760 	else  {
761 		/*
762 		 * as the io has already had the done callback made,
763 		 * nothing more to do. So release the reference taken above
764 		 */
765 		fcloop_tfcp_req_put(tfcp_req);
766 	}
767 }
768 
769 static void
770 fcloop_nport_free(struct kref *ref)
771 {
772 	struct fcloop_nport *nport =
773 		container_of(ref, struct fcloop_nport, ref);
774 	unsigned long flags;
775 
776 	spin_lock_irqsave(&fcloop_lock, flags);
777 	list_del(&nport->nport_list);
778 	spin_unlock_irqrestore(&fcloop_lock, flags);
779 
780 	kfree(nport);
781 }
782 
783 static void
784 fcloop_nport_put(struct fcloop_nport *nport)
785 {
786 	kref_put(&nport->ref, fcloop_nport_free);
787 }
788 
789 static int
790 fcloop_nport_get(struct fcloop_nport *nport)
791 {
792 	return kref_get_unless_zero(&nport->ref);
793 }
794 
795 static void
796 fcloop_localport_delete(struct nvme_fc_local_port *localport)
797 {
798 	struct fcloop_lport_priv *lport_priv = localport->private;
799 	struct fcloop_lport *lport = lport_priv->lport;
800 
801 	/* release any threads waiting for the unreg to complete */
802 	complete(&lport->unreg_done);
803 }
804 
805 static void
806 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
807 {
808 	struct fcloop_rport *rport = remoteport->private;
809 
810 	fcloop_nport_put(rport->nport);
811 }
812 
813 static void
814 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
815 {
816 	struct fcloop_tport *tport = targetport->private;
817 
818 	fcloop_nport_put(tport->nport);
819 }
820 
821 #define	FCLOOP_HW_QUEUES		4
822 #define	FCLOOP_SGL_SEGS			256
823 #define FCLOOP_DMABOUND_4G		0xFFFFFFFF
824 
825 static struct nvme_fc_port_template fctemplate = {
826 	.localport_delete	= fcloop_localport_delete,
827 	.remoteport_delete	= fcloop_remoteport_delete,
828 	.create_queue		= fcloop_create_queue,
829 	.delete_queue		= fcloop_delete_queue,
830 	.ls_req			= fcloop_ls_req,
831 	.fcp_io			= fcloop_fcp_req,
832 	.ls_abort		= fcloop_ls_abort,
833 	.fcp_abort		= fcloop_fcp_abort,
834 	.max_hw_queues		= FCLOOP_HW_QUEUES,
835 	.max_sgl_segments	= FCLOOP_SGL_SEGS,
836 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
837 	.dma_boundary		= FCLOOP_DMABOUND_4G,
838 	/* sizes of additional private data for data structures */
839 	.local_priv_sz		= sizeof(struct fcloop_lport_priv),
840 	.remote_priv_sz		= sizeof(struct fcloop_rport),
841 	.lsrqst_priv_sz		= sizeof(struct fcloop_lsreq),
842 	.fcprqst_priv_sz	= sizeof(struct fcloop_ini_fcpreq),
843 };
844 
845 static struct nvmet_fc_target_template tgttemplate = {
846 	.targetport_delete	= fcloop_targetport_delete,
847 	.xmt_ls_rsp		= fcloop_xmt_ls_rsp,
848 	.fcp_op			= fcloop_fcp_op,
849 	.fcp_abort		= fcloop_tgt_fcp_abort,
850 	.fcp_req_release	= fcloop_fcp_req_release,
851 	.max_hw_queues		= FCLOOP_HW_QUEUES,
852 	.max_sgl_segments	= FCLOOP_SGL_SEGS,
853 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
854 	.dma_boundary		= FCLOOP_DMABOUND_4G,
855 	/* optional features */
856 	.target_features	= 0,
857 	/* sizes of additional private data for data structures */
858 	.target_priv_sz		= sizeof(struct fcloop_tport),
859 };
860 
861 static ssize_t
862 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
863 		const char *buf, size_t count)
864 {
865 	struct nvme_fc_port_info pinfo;
866 	struct fcloop_ctrl_options *opts;
867 	struct nvme_fc_local_port *localport;
868 	struct fcloop_lport *lport;
869 	struct fcloop_lport_priv *lport_priv;
870 	unsigned long flags;
871 	int ret = -ENOMEM;
872 
873 	lport = kzalloc(sizeof(*lport), GFP_KERNEL);
874 	if (!lport)
875 		return -ENOMEM;
876 
877 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
878 	if (!opts)
879 		goto out_free_lport;
880 
881 	ret = fcloop_parse_options(opts, buf);
882 	if (ret)
883 		goto out_free_opts;
884 
885 	/* everything there ? */
886 	if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
887 		ret = -EINVAL;
888 		goto out_free_opts;
889 	}
890 
891 	memset(&pinfo, 0, sizeof(pinfo));
892 	pinfo.node_name = opts->wwnn;
893 	pinfo.port_name = opts->wwpn;
894 	pinfo.port_role = opts->roles;
895 	pinfo.port_id = opts->fcaddr;
896 
897 	ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
898 	if (!ret) {
899 		/* success */
900 		lport_priv = localport->private;
901 		lport_priv->lport = lport;
902 
903 		lport->localport = localport;
904 		INIT_LIST_HEAD(&lport->lport_list);
905 
906 		spin_lock_irqsave(&fcloop_lock, flags);
907 		list_add_tail(&lport->lport_list, &fcloop_lports);
908 		spin_unlock_irqrestore(&fcloop_lock, flags);
909 	}
910 
911 out_free_opts:
912 	kfree(opts);
913 out_free_lport:
914 	/* free only if we're going to fail */
915 	if (ret)
916 		kfree(lport);
917 
918 	return ret ? ret : count;
919 }
920 
921 
922 static void
923 __unlink_local_port(struct fcloop_lport *lport)
924 {
925 	list_del(&lport->lport_list);
926 }
927 
928 static int
929 __wait_localport_unreg(struct fcloop_lport *lport)
930 {
931 	int ret;
932 
933 	init_completion(&lport->unreg_done);
934 
935 	ret = nvme_fc_unregister_localport(lport->localport);
936 
937 	wait_for_completion(&lport->unreg_done);
938 
939 	kfree(lport);
940 
941 	return ret;
942 }
943 
944 
945 static ssize_t
946 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
947 		const char *buf, size_t count)
948 {
949 	struct fcloop_lport *tlport, *lport = NULL;
950 	u64 nodename, portname;
951 	unsigned long flags;
952 	int ret;
953 
954 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
955 	if (ret)
956 		return ret;
957 
958 	spin_lock_irqsave(&fcloop_lock, flags);
959 
960 	list_for_each_entry(tlport, &fcloop_lports, lport_list) {
961 		if (tlport->localport->node_name == nodename &&
962 		    tlport->localport->port_name == portname) {
963 			lport = tlport;
964 			__unlink_local_port(lport);
965 			break;
966 		}
967 	}
968 	spin_unlock_irqrestore(&fcloop_lock, flags);
969 
970 	if (!lport)
971 		return -ENOENT;
972 
973 	ret = __wait_localport_unreg(lport);
974 
975 	return ret ? ret : count;
976 }
977 
978 static struct fcloop_nport *
979 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
980 {
981 	struct fcloop_nport *newnport, *nport = NULL;
982 	struct fcloop_lport *tmplport, *lport = NULL;
983 	struct fcloop_ctrl_options *opts;
984 	unsigned long flags;
985 	u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
986 	int ret;
987 
988 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
989 	if (!opts)
990 		return NULL;
991 
992 	ret = fcloop_parse_options(opts, buf);
993 	if (ret)
994 		goto out_free_opts;
995 
996 	/* everything there ? */
997 	if ((opts->mask & opts_mask) != opts_mask) {
998 		ret = -EINVAL;
999 		goto out_free_opts;
1000 	}
1001 
1002 	newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
1003 	if (!newnport)
1004 		goto out_free_opts;
1005 
1006 	INIT_LIST_HEAD(&newnport->nport_list);
1007 	newnport->node_name = opts->wwnn;
1008 	newnport->port_name = opts->wwpn;
1009 	if (opts->mask & NVMF_OPT_ROLES)
1010 		newnport->port_role = opts->roles;
1011 	if (opts->mask & NVMF_OPT_FCADDR)
1012 		newnport->port_id = opts->fcaddr;
1013 	kref_init(&newnport->ref);
1014 
1015 	spin_lock_irqsave(&fcloop_lock, flags);
1016 
1017 	list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
1018 		if (tmplport->localport->node_name == opts->wwnn &&
1019 		    tmplport->localport->port_name == opts->wwpn)
1020 			goto out_invalid_opts;
1021 
1022 		if (tmplport->localport->node_name == opts->lpwwnn &&
1023 		    tmplport->localport->port_name == opts->lpwwpn)
1024 			lport = tmplport;
1025 	}
1026 
1027 	if (remoteport) {
1028 		if (!lport)
1029 			goto out_invalid_opts;
1030 		newnport->lport = lport;
1031 	}
1032 
1033 	list_for_each_entry(nport, &fcloop_nports, nport_list) {
1034 		if (nport->node_name == opts->wwnn &&
1035 		    nport->port_name == opts->wwpn) {
1036 			if ((remoteport && nport->rport) ||
1037 			    (!remoteport && nport->tport)) {
1038 				nport = NULL;
1039 				goto out_invalid_opts;
1040 			}
1041 
1042 			fcloop_nport_get(nport);
1043 
1044 			spin_unlock_irqrestore(&fcloop_lock, flags);
1045 
1046 			if (remoteport)
1047 				nport->lport = lport;
1048 			if (opts->mask & NVMF_OPT_ROLES)
1049 				nport->port_role = opts->roles;
1050 			if (opts->mask & NVMF_OPT_FCADDR)
1051 				nport->port_id = opts->fcaddr;
1052 			goto out_free_newnport;
1053 		}
1054 	}
1055 
1056 	list_add_tail(&newnport->nport_list, &fcloop_nports);
1057 
1058 	spin_unlock_irqrestore(&fcloop_lock, flags);
1059 
1060 	kfree(opts);
1061 	return newnport;
1062 
1063 out_invalid_opts:
1064 	spin_unlock_irqrestore(&fcloop_lock, flags);
1065 out_free_newnport:
1066 	kfree(newnport);
1067 out_free_opts:
1068 	kfree(opts);
1069 	return nport;
1070 }
1071 
1072 static ssize_t
1073 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1074 		const char *buf, size_t count)
1075 {
1076 	struct nvme_fc_remote_port *remoteport;
1077 	struct fcloop_nport *nport;
1078 	struct fcloop_rport *rport;
1079 	struct nvme_fc_port_info pinfo;
1080 	int ret;
1081 
1082 	nport = fcloop_alloc_nport(buf, count, true);
1083 	if (!nport)
1084 		return -EIO;
1085 
1086 	memset(&pinfo, 0, sizeof(pinfo));
1087 	pinfo.node_name = nport->node_name;
1088 	pinfo.port_name = nport->port_name;
1089 	pinfo.port_role = nport->port_role;
1090 	pinfo.port_id = nport->port_id;
1091 
1092 	ret = nvme_fc_register_remoteport(nport->lport->localport,
1093 						&pinfo, &remoteport);
1094 	if (ret || !remoteport) {
1095 		fcloop_nport_put(nport);
1096 		return ret;
1097 	}
1098 
1099 	/* success */
1100 	rport = remoteport->private;
1101 	rport->remoteport = remoteport;
1102 	rport->targetport = (nport->tport) ?  nport->tport->targetport : NULL;
1103 	if (nport->tport) {
1104 		nport->tport->remoteport = remoteport;
1105 		nport->tport->lport = nport->lport;
1106 	}
1107 	rport->nport = nport;
1108 	rport->lport = nport->lport;
1109 	nport->rport = rport;
1110 
1111 	return count;
1112 }
1113 
1114 
1115 static struct fcloop_rport *
1116 __unlink_remote_port(struct fcloop_nport *nport)
1117 {
1118 	struct fcloop_rport *rport = nport->rport;
1119 
1120 	if (rport && nport->tport)
1121 		nport->tport->remoteport = NULL;
1122 	nport->rport = NULL;
1123 
1124 	return rport;
1125 }
1126 
1127 static int
1128 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1129 {
1130 	if (!rport)
1131 		return -EALREADY;
1132 
1133 	return nvme_fc_unregister_remoteport(rport->remoteport);
1134 }
1135 
1136 static ssize_t
1137 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1138 		const char *buf, size_t count)
1139 {
1140 	struct fcloop_nport *nport = NULL, *tmpport;
1141 	static struct fcloop_rport *rport;
1142 	u64 nodename, portname;
1143 	unsigned long flags;
1144 	int ret;
1145 
1146 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1147 	if (ret)
1148 		return ret;
1149 
1150 	spin_lock_irqsave(&fcloop_lock, flags);
1151 
1152 	list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1153 		if (tmpport->node_name == nodename &&
1154 		    tmpport->port_name == portname && tmpport->rport) {
1155 			nport = tmpport;
1156 			rport = __unlink_remote_port(nport);
1157 			break;
1158 		}
1159 	}
1160 
1161 	spin_unlock_irqrestore(&fcloop_lock, flags);
1162 
1163 	if (!nport)
1164 		return -ENOENT;
1165 
1166 	ret = __remoteport_unreg(nport, rport);
1167 
1168 	return ret ? ret : count;
1169 }
1170 
1171 static ssize_t
1172 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1173 		const char *buf, size_t count)
1174 {
1175 	struct nvmet_fc_target_port *targetport;
1176 	struct fcloop_nport *nport;
1177 	struct fcloop_tport *tport;
1178 	struct nvmet_fc_port_info tinfo;
1179 	int ret;
1180 
1181 	nport = fcloop_alloc_nport(buf, count, false);
1182 	if (!nport)
1183 		return -EIO;
1184 
1185 	tinfo.node_name = nport->node_name;
1186 	tinfo.port_name = nport->port_name;
1187 	tinfo.port_id = nport->port_id;
1188 
1189 	ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1190 						&targetport);
1191 	if (ret) {
1192 		fcloop_nport_put(nport);
1193 		return ret;
1194 	}
1195 
1196 	/* success */
1197 	tport = targetport->private;
1198 	tport->targetport = targetport;
1199 	tport->remoteport = (nport->rport) ?  nport->rport->remoteport : NULL;
1200 	if (nport->rport)
1201 		nport->rport->targetport = targetport;
1202 	tport->nport = nport;
1203 	tport->lport = nport->lport;
1204 	nport->tport = tport;
1205 
1206 	return count;
1207 }
1208 
1209 
1210 static struct fcloop_tport *
1211 __unlink_target_port(struct fcloop_nport *nport)
1212 {
1213 	struct fcloop_tport *tport = nport->tport;
1214 
1215 	if (tport && nport->rport)
1216 		nport->rport->targetport = NULL;
1217 	nport->tport = NULL;
1218 
1219 	return tport;
1220 }
1221 
1222 static int
1223 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1224 {
1225 	if (!tport)
1226 		return -EALREADY;
1227 
1228 	return nvmet_fc_unregister_targetport(tport->targetport);
1229 }
1230 
1231 static ssize_t
1232 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1233 		const char *buf, size_t count)
1234 {
1235 	struct fcloop_nport *nport = NULL, *tmpport;
1236 	struct fcloop_tport *tport = NULL;
1237 	u64 nodename, portname;
1238 	unsigned long flags;
1239 	int ret;
1240 
1241 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1242 	if (ret)
1243 		return ret;
1244 
1245 	spin_lock_irqsave(&fcloop_lock, flags);
1246 
1247 	list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1248 		if (tmpport->node_name == nodename &&
1249 		    tmpport->port_name == portname && tmpport->tport) {
1250 			nport = tmpport;
1251 			tport = __unlink_target_port(nport);
1252 			break;
1253 		}
1254 	}
1255 
1256 	spin_unlock_irqrestore(&fcloop_lock, flags);
1257 
1258 	if (!nport)
1259 		return -ENOENT;
1260 
1261 	ret = __targetport_unreg(nport, tport);
1262 
1263 	return ret ? ret : count;
1264 }
1265 
1266 
1267 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1268 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1269 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1270 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1271 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1272 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1273 
1274 static struct attribute *fcloop_dev_attrs[] = {
1275 	&dev_attr_add_local_port.attr,
1276 	&dev_attr_del_local_port.attr,
1277 	&dev_attr_add_remote_port.attr,
1278 	&dev_attr_del_remote_port.attr,
1279 	&dev_attr_add_target_port.attr,
1280 	&dev_attr_del_target_port.attr,
1281 	NULL
1282 };
1283 
1284 static struct attribute_group fclopp_dev_attrs_group = {
1285 	.attrs		= fcloop_dev_attrs,
1286 };
1287 
1288 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1289 	&fclopp_dev_attrs_group,
1290 	NULL,
1291 };
1292 
1293 static struct class *fcloop_class;
1294 static struct device *fcloop_device;
1295 
1296 
1297 static int __init fcloop_init(void)
1298 {
1299 	int ret;
1300 
1301 	fcloop_class = class_create(THIS_MODULE, "fcloop");
1302 	if (IS_ERR(fcloop_class)) {
1303 		pr_err("couldn't register class fcloop\n");
1304 		ret = PTR_ERR(fcloop_class);
1305 		return ret;
1306 	}
1307 
1308 	fcloop_device = device_create_with_groups(
1309 				fcloop_class, NULL, MKDEV(0, 0), NULL,
1310 				fcloop_dev_attr_groups, "ctl");
1311 	if (IS_ERR(fcloop_device)) {
1312 		pr_err("couldn't create ctl device!\n");
1313 		ret = PTR_ERR(fcloop_device);
1314 		goto out_destroy_class;
1315 	}
1316 
1317 	get_device(fcloop_device);
1318 
1319 	return 0;
1320 
1321 out_destroy_class:
1322 	class_destroy(fcloop_class);
1323 	return ret;
1324 }
1325 
1326 static void __exit fcloop_exit(void)
1327 {
1328 	struct fcloop_lport *lport;
1329 	struct fcloop_nport *nport;
1330 	struct fcloop_tport *tport;
1331 	struct fcloop_rport *rport;
1332 	unsigned long flags;
1333 	int ret;
1334 
1335 	spin_lock_irqsave(&fcloop_lock, flags);
1336 
1337 	for (;;) {
1338 		nport = list_first_entry_or_null(&fcloop_nports,
1339 						typeof(*nport), nport_list);
1340 		if (!nport)
1341 			break;
1342 
1343 		tport = __unlink_target_port(nport);
1344 		rport = __unlink_remote_port(nport);
1345 
1346 		spin_unlock_irqrestore(&fcloop_lock, flags);
1347 
1348 		ret = __targetport_unreg(nport, tport);
1349 		if (ret)
1350 			pr_warn("%s: Failed deleting target port\n", __func__);
1351 
1352 		ret = __remoteport_unreg(nport, rport);
1353 		if (ret)
1354 			pr_warn("%s: Failed deleting remote port\n", __func__);
1355 
1356 		spin_lock_irqsave(&fcloop_lock, flags);
1357 	}
1358 
1359 	for (;;) {
1360 		lport = list_first_entry_or_null(&fcloop_lports,
1361 						typeof(*lport), lport_list);
1362 		if (!lport)
1363 			break;
1364 
1365 		__unlink_local_port(lport);
1366 
1367 		spin_unlock_irqrestore(&fcloop_lock, flags);
1368 
1369 		ret = __wait_localport_unreg(lport);
1370 		if (ret)
1371 			pr_warn("%s: Failed deleting local port\n", __func__);
1372 
1373 		spin_lock_irqsave(&fcloop_lock, flags);
1374 	}
1375 
1376 	spin_unlock_irqrestore(&fcloop_lock, flags);
1377 
1378 	put_device(fcloop_device);
1379 
1380 	device_destroy(fcloop_class, MKDEV(0, 0));
1381 	class_destroy(fcloop_class);
1382 }
1383 
1384 module_init(fcloop_init);
1385 module_exit(fcloop_exit);
1386 
1387 MODULE_LICENSE("GPL v2");
1388