xref: /openbmc/linux/drivers/nvme/target/fcloop.c (revision f8c760e8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
4  */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9 
10 #include "../host/nvme.h"
11 #include "../target/nvmet.h"
12 #include <linux/nvme-fc-driver.h>
13 #include <linux/nvme-fc.h>
14 
15 
16 enum {
17 	NVMF_OPT_ERR		= 0,
18 	NVMF_OPT_WWNN		= 1 << 0,
19 	NVMF_OPT_WWPN		= 1 << 1,
20 	NVMF_OPT_ROLES		= 1 << 2,
21 	NVMF_OPT_FCADDR		= 1 << 3,
22 	NVMF_OPT_LPWWNN		= 1 << 4,
23 	NVMF_OPT_LPWWPN		= 1 << 5,
24 };
25 
26 struct fcloop_ctrl_options {
27 	int			mask;
28 	u64			wwnn;
29 	u64			wwpn;
30 	u32			roles;
31 	u32			fcaddr;
32 	u64			lpwwnn;
33 	u64			lpwwpn;
34 };
35 
36 static const match_table_t opt_tokens = {
37 	{ NVMF_OPT_WWNN,	"wwnn=%s"	},
38 	{ NVMF_OPT_WWPN,	"wwpn=%s"	},
39 	{ NVMF_OPT_ROLES,	"roles=%d"	},
40 	{ NVMF_OPT_FCADDR,	"fcaddr=%x"	},
41 	{ NVMF_OPT_LPWWNN,	"lpwwnn=%s"	},
42 	{ NVMF_OPT_LPWWPN,	"lpwwpn=%s"	},
43 	{ NVMF_OPT_ERR,		NULL		}
44 };
45 
46 static int fcloop_verify_addr(substring_t *s)
47 {
48 	size_t blen = s->to - s->from + 1;
49 
50 	if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
51 	    strncmp(s->from, "0x", 2))
52 		return -EINVAL;
53 
54 	return 0;
55 }
56 
57 static int
58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
59 		const char *buf)
60 {
61 	substring_t args[MAX_OPT_ARGS];
62 	char *options, *o, *p;
63 	int token, ret = 0;
64 	u64 token64;
65 
66 	options = o = kstrdup(buf, GFP_KERNEL);
67 	if (!options)
68 		return -ENOMEM;
69 
70 	while ((p = strsep(&o, ",\n")) != NULL) {
71 		if (!*p)
72 			continue;
73 
74 		token = match_token(p, opt_tokens, args);
75 		opts->mask |= token;
76 		switch (token) {
77 		case NVMF_OPT_WWNN:
78 			if (fcloop_verify_addr(args) ||
79 			    match_u64(args, &token64)) {
80 				ret = -EINVAL;
81 				goto out_free_options;
82 			}
83 			opts->wwnn = token64;
84 			break;
85 		case NVMF_OPT_WWPN:
86 			if (fcloop_verify_addr(args) ||
87 			    match_u64(args, &token64)) {
88 				ret = -EINVAL;
89 				goto out_free_options;
90 			}
91 			opts->wwpn = token64;
92 			break;
93 		case NVMF_OPT_ROLES:
94 			if (match_int(args, &token)) {
95 				ret = -EINVAL;
96 				goto out_free_options;
97 			}
98 			opts->roles = token;
99 			break;
100 		case NVMF_OPT_FCADDR:
101 			if (match_hex(args, &token)) {
102 				ret = -EINVAL;
103 				goto out_free_options;
104 			}
105 			opts->fcaddr = token;
106 			break;
107 		case NVMF_OPT_LPWWNN:
108 			if (fcloop_verify_addr(args) ||
109 			    match_u64(args, &token64)) {
110 				ret = -EINVAL;
111 				goto out_free_options;
112 			}
113 			opts->lpwwnn = token64;
114 			break;
115 		case NVMF_OPT_LPWWPN:
116 			if (fcloop_verify_addr(args) ||
117 			    match_u64(args, &token64)) {
118 				ret = -EINVAL;
119 				goto out_free_options;
120 			}
121 			opts->lpwwpn = token64;
122 			break;
123 		default:
124 			pr_warn("unknown parameter or missing value '%s'\n", p);
125 			ret = -EINVAL;
126 			goto out_free_options;
127 		}
128 	}
129 
130 out_free_options:
131 	kfree(options);
132 	return ret;
133 }
134 
135 
136 static int
137 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
138 		const char *buf)
139 {
140 	substring_t args[MAX_OPT_ARGS];
141 	char *options, *o, *p;
142 	int token, ret = 0;
143 	u64 token64;
144 
145 	*nname = -1;
146 	*pname = -1;
147 
148 	options = o = kstrdup(buf, GFP_KERNEL);
149 	if (!options)
150 		return -ENOMEM;
151 
152 	while ((p = strsep(&o, ",\n")) != NULL) {
153 		if (!*p)
154 			continue;
155 
156 		token = match_token(p, opt_tokens, args);
157 		switch (token) {
158 		case NVMF_OPT_WWNN:
159 			if (fcloop_verify_addr(args) ||
160 			    match_u64(args, &token64)) {
161 				ret = -EINVAL;
162 				goto out_free_options;
163 			}
164 			*nname = token64;
165 			break;
166 		case NVMF_OPT_WWPN:
167 			if (fcloop_verify_addr(args) ||
168 			    match_u64(args, &token64)) {
169 				ret = -EINVAL;
170 				goto out_free_options;
171 			}
172 			*pname = token64;
173 			break;
174 		default:
175 			pr_warn("unknown parameter or missing value '%s'\n", p);
176 			ret = -EINVAL;
177 			goto out_free_options;
178 		}
179 	}
180 
181 out_free_options:
182 	kfree(options);
183 
184 	if (!ret) {
185 		if (*nname == -1)
186 			return -EINVAL;
187 		if (*pname == -1)
188 			return -EINVAL;
189 	}
190 
191 	return ret;
192 }
193 
194 
195 #define LPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
196 
197 #define RPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN |  \
198 			 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
199 
200 #define TGTPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
201 
202 
203 static DEFINE_SPINLOCK(fcloop_lock);
204 static LIST_HEAD(fcloop_lports);
205 static LIST_HEAD(fcloop_nports);
206 
207 struct fcloop_lport {
208 	struct nvme_fc_local_port *localport;
209 	struct list_head lport_list;
210 	struct completion unreg_done;
211 };
212 
213 struct fcloop_lport_priv {
214 	struct fcloop_lport *lport;
215 };
216 
217 struct fcloop_rport {
218 	struct nvme_fc_remote_port	*remoteport;
219 	struct nvmet_fc_target_port	*targetport;
220 	struct fcloop_nport		*nport;
221 	struct fcloop_lport		*lport;
222 	spinlock_t			lock;
223 	struct list_head		ls_list;
224 	struct work_struct		ls_work;
225 };
226 
227 struct fcloop_tport {
228 	struct nvmet_fc_target_port	*targetport;
229 	struct nvme_fc_remote_port	*remoteport;
230 	struct fcloop_nport		*nport;
231 	struct fcloop_lport		*lport;
232 	spinlock_t			lock;
233 	struct list_head		ls_list;
234 	struct work_struct		ls_work;
235 };
236 
237 struct fcloop_nport {
238 	struct fcloop_rport *rport;
239 	struct fcloop_tport *tport;
240 	struct fcloop_lport *lport;
241 	struct list_head nport_list;
242 	struct kref ref;
243 	u64 node_name;
244 	u64 port_name;
245 	u32 port_role;
246 	u32 port_id;
247 };
248 
249 struct fcloop_lsreq {
250 	struct nvmefc_ls_req		*lsreq;
251 	struct nvmefc_ls_rsp		ls_rsp;
252 	int				lsdir;	/* H2T or T2H */
253 	int				status;
254 	struct list_head		ls_list; /* fcloop_rport->ls_list */
255 };
256 
257 struct fcloop_rscn {
258 	struct fcloop_tport		*tport;
259 	struct work_struct		work;
260 };
261 
262 enum {
263 	INI_IO_START		= 0,
264 	INI_IO_ACTIVE		= 1,
265 	INI_IO_ABORTED		= 2,
266 	INI_IO_COMPLETED	= 3,
267 };
268 
269 struct fcloop_fcpreq {
270 	struct fcloop_tport		*tport;
271 	struct nvmefc_fcp_req		*fcpreq;
272 	spinlock_t			reqlock;
273 	u16				status;
274 	u32				inistate;
275 	bool				active;
276 	bool				aborted;
277 	struct kref			ref;
278 	struct work_struct		fcp_rcv_work;
279 	struct work_struct		abort_rcv_work;
280 	struct work_struct		tio_done_work;
281 	struct nvmefc_tgt_fcp_req	tgt_fcp_req;
282 };
283 
284 struct fcloop_ini_fcpreq {
285 	struct nvmefc_fcp_req		*fcpreq;
286 	struct fcloop_fcpreq		*tfcp_req;
287 	spinlock_t			inilock;
288 };
289 
290 static inline struct fcloop_lsreq *
291 ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
292 {
293 	return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
294 }
295 
296 static inline struct fcloop_fcpreq *
297 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
298 {
299 	return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
300 }
301 
302 
303 static int
304 fcloop_create_queue(struct nvme_fc_local_port *localport,
305 			unsigned int qidx, u16 qsize,
306 			void **handle)
307 {
308 	*handle = localport;
309 	return 0;
310 }
311 
312 static void
313 fcloop_delete_queue(struct nvme_fc_local_port *localport,
314 			unsigned int idx, void *handle)
315 {
316 }
317 
318 static void
319 fcloop_rport_lsrqst_work(struct work_struct *work)
320 {
321 	struct fcloop_rport *rport =
322 		container_of(work, struct fcloop_rport, ls_work);
323 	struct fcloop_lsreq *tls_req;
324 
325 	spin_lock(&rport->lock);
326 	for (;;) {
327 		tls_req = list_first_entry_or_null(&rport->ls_list,
328 				struct fcloop_lsreq, ls_list);
329 		if (!tls_req)
330 			break;
331 
332 		list_del(&tls_req->ls_list);
333 		spin_unlock(&rport->lock);
334 
335 		tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
336 		/*
337 		 * callee may free memory containing tls_req.
338 		 * do not reference lsreq after this.
339 		 */
340 
341 		spin_lock(&rport->lock);
342 	}
343 	spin_unlock(&rport->lock);
344 }
345 
346 static int
347 fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
348 			struct nvme_fc_remote_port *remoteport,
349 			struct nvmefc_ls_req *lsreq)
350 {
351 	struct fcloop_lsreq *tls_req = lsreq->private;
352 	struct fcloop_rport *rport = remoteport->private;
353 	int ret = 0;
354 
355 	tls_req->lsreq = lsreq;
356 	INIT_LIST_HEAD(&tls_req->ls_list);
357 
358 	if (!rport->targetport) {
359 		tls_req->status = -ECONNREFUSED;
360 		spin_lock(&rport->lock);
361 		list_add_tail(&rport->ls_list, &tls_req->ls_list);
362 		spin_unlock(&rport->lock);
363 		queue_work(nvmet_wq, &rport->ls_work);
364 		return ret;
365 	}
366 
367 	tls_req->status = 0;
368 	ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
369 				  &tls_req->ls_rsp,
370 				  lsreq->rqstaddr, lsreq->rqstlen);
371 
372 	return ret;
373 }
374 
375 static int
376 fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
377 			struct nvmefc_ls_rsp *lsrsp)
378 {
379 	struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
380 	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
381 	struct fcloop_tport *tport = targetport->private;
382 	struct nvme_fc_remote_port *remoteport = tport->remoteport;
383 	struct fcloop_rport *rport;
384 
385 	memcpy(lsreq->rspaddr, lsrsp->rspbuf,
386 		((lsreq->rsplen < lsrsp->rsplen) ?
387 				lsreq->rsplen : lsrsp->rsplen));
388 
389 	lsrsp->done(lsrsp);
390 
391 	if (remoteport) {
392 		rport = remoteport->private;
393 		spin_lock(&rport->lock);
394 		list_add_tail(&rport->ls_list, &tls_req->ls_list);
395 		spin_unlock(&rport->lock);
396 		queue_work(nvmet_wq, &rport->ls_work);
397 	}
398 
399 	return 0;
400 }
401 
402 static void
403 fcloop_tport_lsrqst_work(struct work_struct *work)
404 {
405 	struct fcloop_tport *tport =
406 		container_of(work, struct fcloop_tport, ls_work);
407 	struct fcloop_lsreq *tls_req;
408 
409 	spin_lock(&tport->lock);
410 	for (;;) {
411 		tls_req = list_first_entry_or_null(&tport->ls_list,
412 				struct fcloop_lsreq, ls_list);
413 		if (!tls_req)
414 			break;
415 
416 		list_del(&tls_req->ls_list);
417 		spin_unlock(&tport->lock);
418 
419 		tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
420 		/*
421 		 * callee may free memory containing tls_req.
422 		 * do not reference lsreq after this.
423 		 */
424 
425 		spin_lock(&tport->lock);
426 	}
427 	spin_unlock(&tport->lock);
428 }
429 
430 static int
431 fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
432 			struct nvmefc_ls_req *lsreq)
433 {
434 	struct fcloop_lsreq *tls_req = lsreq->private;
435 	struct fcloop_tport *tport = targetport->private;
436 	int ret = 0;
437 
438 	/*
439 	 * hosthandle should be the dst.rport value.
440 	 * hosthandle ignored as fcloop currently is
441 	 * 1:1 tgtport vs remoteport
442 	 */
443 	tls_req->lsreq = lsreq;
444 	INIT_LIST_HEAD(&tls_req->ls_list);
445 
446 	if (!tport->remoteport) {
447 		tls_req->status = -ECONNREFUSED;
448 		spin_lock(&tport->lock);
449 		list_add_tail(&tport->ls_list, &tls_req->ls_list);
450 		spin_unlock(&tport->lock);
451 		queue_work(nvmet_wq, &tport->ls_work);
452 		return ret;
453 	}
454 
455 	tls_req->status = 0;
456 	ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
457 				 lsreq->rqstaddr, lsreq->rqstlen);
458 
459 	return ret;
460 }
461 
462 static int
463 fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
464 			struct nvme_fc_remote_port *remoteport,
465 			struct nvmefc_ls_rsp *lsrsp)
466 {
467 	struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
468 	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
469 	struct fcloop_rport *rport = remoteport->private;
470 	struct nvmet_fc_target_port *targetport = rport->targetport;
471 	struct fcloop_tport *tport;
472 
473 	memcpy(lsreq->rspaddr, lsrsp->rspbuf,
474 		((lsreq->rsplen < lsrsp->rsplen) ?
475 				lsreq->rsplen : lsrsp->rsplen));
476 	lsrsp->done(lsrsp);
477 
478 	if (targetport) {
479 		tport = targetport->private;
480 		spin_lock(&tport->lock);
481 		list_add_tail(&tport->ls_list, &tls_req->ls_list);
482 		spin_unlock(&tport->lock);
483 		queue_work(nvmet_wq, &tport->ls_work);
484 	}
485 
486 	return 0;
487 }
488 
489 static void
490 fcloop_t2h_host_release(void *hosthandle)
491 {
492 	/* host handle ignored for now */
493 }
494 
495 /*
496  * Simulate reception of RSCN and converting it to a initiator transport
497  * call to rescan a remote port.
498  */
499 static void
500 fcloop_tgt_rscn_work(struct work_struct *work)
501 {
502 	struct fcloop_rscn *tgt_rscn =
503 		container_of(work, struct fcloop_rscn, work);
504 	struct fcloop_tport *tport = tgt_rscn->tport;
505 
506 	if (tport->remoteport)
507 		nvme_fc_rescan_remoteport(tport->remoteport);
508 	kfree(tgt_rscn);
509 }
510 
511 static void
512 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
513 {
514 	struct fcloop_rscn *tgt_rscn;
515 
516 	tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
517 	if (!tgt_rscn)
518 		return;
519 
520 	tgt_rscn->tport = tgtport->private;
521 	INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
522 
523 	queue_work(nvmet_wq, &tgt_rscn->work);
524 }
525 
526 static void
527 fcloop_tfcp_req_free(struct kref *ref)
528 {
529 	struct fcloop_fcpreq *tfcp_req =
530 		container_of(ref, struct fcloop_fcpreq, ref);
531 
532 	kfree(tfcp_req);
533 }
534 
535 static void
536 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
537 {
538 	kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
539 }
540 
541 static int
542 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
543 {
544 	return kref_get_unless_zero(&tfcp_req->ref);
545 }
546 
547 static void
548 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
549 			struct fcloop_fcpreq *tfcp_req, int status)
550 {
551 	struct fcloop_ini_fcpreq *inireq = NULL;
552 
553 	if (fcpreq) {
554 		inireq = fcpreq->private;
555 		spin_lock(&inireq->inilock);
556 		inireq->tfcp_req = NULL;
557 		spin_unlock(&inireq->inilock);
558 
559 		fcpreq->status = status;
560 		fcpreq->done(fcpreq);
561 	}
562 
563 	/* release original io reference on tgt struct */
564 	fcloop_tfcp_req_put(tfcp_req);
565 }
566 
567 static bool drop_fabric_opcode;
568 #define DROP_OPCODE_MASK	0x00FF
569 /* fabrics opcode will have a bit set above 1st byte */
570 static int drop_opcode = -1;
571 static int drop_instance;
572 static int drop_amount;
573 static int drop_current_cnt;
574 
575 /*
576  * Routine to parse io and determine if the io is to be dropped.
577  * Returns:
578  *  0 if io is not obstructed
579  *  1 if io was dropped
580  */
581 static int check_for_drop(struct fcloop_fcpreq *tfcp_req)
582 {
583 	struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
584 	struct nvme_fc_cmd_iu *cmdiu = fcpreq->cmdaddr;
585 	struct nvme_command *sqe = &cmdiu->sqe;
586 
587 	if (drop_opcode == -1)
588 		return 0;
589 
590 	pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x "
591 		"inst %d start %d amt %d\n",
592 		__func__, sqe->common.opcode, sqe->fabrics.fctype,
593 		drop_fabric_opcode ? "y" : "n",
594 		drop_opcode, drop_current_cnt, drop_instance, drop_amount);
595 
596 	if ((drop_fabric_opcode &&
597 	     (sqe->common.opcode != nvme_fabrics_command ||
598 	      sqe->fabrics.fctype != drop_opcode)) ||
599 	    (!drop_fabric_opcode && sqe->common.opcode != drop_opcode))
600 		return 0;
601 
602 	if (++drop_current_cnt >= drop_instance) {
603 		if (drop_current_cnt >= drop_instance + drop_amount)
604 			drop_opcode = -1;
605 		return 1;
606 	}
607 
608 	return 0;
609 }
610 
611 static void
612 fcloop_fcp_recv_work(struct work_struct *work)
613 {
614 	struct fcloop_fcpreq *tfcp_req =
615 		container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
616 	struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
617 	int ret = 0;
618 	bool aborted = false;
619 
620 	spin_lock_irq(&tfcp_req->reqlock);
621 	switch (tfcp_req->inistate) {
622 	case INI_IO_START:
623 		tfcp_req->inistate = INI_IO_ACTIVE;
624 		break;
625 	case INI_IO_ABORTED:
626 		aborted = true;
627 		break;
628 	default:
629 		spin_unlock_irq(&tfcp_req->reqlock);
630 		WARN_ON(1);
631 		return;
632 	}
633 	spin_unlock_irq(&tfcp_req->reqlock);
634 
635 	if (unlikely(aborted))
636 		ret = -ECANCELED;
637 	else {
638 		if (likely(!check_for_drop(tfcp_req)))
639 			ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
640 				&tfcp_req->tgt_fcp_req,
641 				fcpreq->cmdaddr, fcpreq->cmdlen);
642 		else
643 			pr_info("%s: dropped command ********\n", __func__);
644 	}
645 	if (ret)
646 		fcloop_call_host_done(fcpreq, tfcp_req, ret);
647 
648 	return;
649 }
650 
651 static void
652 fcloop_fcp_abort_recv_work(struct work_struct *work)
653 {
654 	struct fcloop_fcpreq *tfcp_req =
655 		container_of(work, struct fcloop_fcpreq, abort_rcv_work);
656 	struct nvmefc_fcp_req *fcpreq;
657 	bool completed = false;
658 
659 	spin_lock_irq(&tfcp_req->reqlock);
660 	fcpreq = tfcp_req->fcpreq;
661 	switch (tfcp_req->inistate) {
662 	case INI_IO_ABORTED:
663 		break;
664 	case INI_IO_COMPLETED:
665 		completed = true;
666 		break;
667 	default:
668 		spin_unlock_irq(&tfcp_req->reqlock);
669 		WARN_ON(1);
670 		return;
671 	}
672 	spin_unlock_irq(&tfcp_req->reqlock);
673 
674 	if (unlikely(completed)) {
675 		/* remove reference taken in original abort downcall */
676 		fcloop_tfcp_req_put(tfcp_req);
677 		return;
678 	}
679 
680 	if (tfcp_req->tport->targetport)
681 		nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
682 					&tfcp_req->tgt_fcp_req);
683 
684 	spin_lock_irq(&tfcp_req->reqlock);
685 	tfcp_req->fcpreq = NULL;
686 	spin_unlock_irq(&tfcp_req->reqlock);
687 
688 	fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
689 	/* call_host_done releases reference for abort downcall */
690 }
691 
692 /*
693  * FCP IO operation done by target completion.
694  * call back up initiator "done" flows.
695  */
696 static void
697 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
698 {
699 	struct fcloop_fcpreq *tfcp_req =
700 		container_of(work, struct fcloop_fcpreq, tio_done_work);
701 	struct nvmefc_fcp_req *fcpreq;
702 
703 	spin_lock_irq(&tfcp_req->reqlock);
704 	fcpreq = tfcp_req->fcpreq;
705 	tfcp_req->inistate = INI_IO_COMPLETED;
706 	spin_unlock_irq(&tfcp_req->reqlock);
707 
708 	fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
709 }
710 
711 
712 static int
713 fcloop_fcp_req(struct nvme_fc_local_port *localport,
714 			struct nvme_fc_remote_port *remoteport,
715 			void *hw_queue_handle,
716 			struct nvmefc_fcp_req *fcpreq)
717 {
718 	struct fcloop_rport *rport = remoteport->private;
719 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
720 	struct fcloop_fcpreq *tfcp_req;
721 
722 	if (!rport->targetport)
723 		return -ECONNREFUSED;
724 
725 	tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
726 	if (!tfcp_req)
727 		return -ENOMEM;
728 
729 	inireq->fcpreq = fcpreq;
730 	inireq->tfcp_req = tfcp_req;
731 	spin_lock_init(&inireq->inilock);
732 
733 	tfcp_req->fcpreq = fcpreq;
734 	tfcp_req->tport = rport->targetport->private;
735 	tfcp_req->inistate = INI_IO_START;
736 	spin_lock_init(&tfcp_req->reqlock);
737 	INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
738 	INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
739 	INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
740 	kref_init(&tfcp_req->ref);
741 
742 	queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
743 
744 	return 0;
745 }
746 
747 static void
748 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
749 			struct scatterlist *io_sg, u32 offset, u32 length)
750 {
751 	void *data_p, *io_p;
752 	u32 data_len, io_len, tlen;
753 
754 	io_p = sg_virt(io_sg);
755 	io_len = io_sg->length;
756 
757 	for ( ; offset; ) {
758 		tlen = min_t(u32, offset, io_len);
759 		offset -= tlen;
760 		io_len -= tlen;
761 		if (!io_len) {
762 			io_sg = sg_next(io_sg);
763 			io_p = sg_virt(io_sg);
764 			io_len = io_sg->length;
765 		} else
766 			io_p += tlen;
767 	}
768 
769 	data_p = sg_virt(data_sg);
770 	data_len = data_sg->length;
771 
772 	for ( ; length; ) {
773 		tlen = min_t(u32, io_len, data_len);
774 		tlen = min_t(u32, tlen, length);
775 
776 		if (op == NVMET_FCOP_WRITEDATA)
777 			memcpy(data_p, io_p, tlen);
778 		else
779 			memcpy(io_p, data_p, tlen);
780 
781 		length -= tlen;
782 
783 		io_len -= tlen;
784 		if ((!io_len) && (length)) {
785 			io_sg = sg_next(io_sg);
786 			io_p = sg_virt(io_sg);
787 			io_len = io_sg->length;
788 		} else
789 			io_p += tlen;
790 
791 		data_len -= tlen;
792 		if ((!data_len) && (length)) {
793 			data_sg = sg_next(data_sg);
794 			data_p = sg_virt(data_sg);
795 			data_len = data_sg->length;
796 		} else
797 			data_p += tlen;
798 	}
799 }
800 
801 static int
802 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
803 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
804 {
805 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
806 	struct nvmefc_fcp_req *fcpreq;
807 	u32 rsplen = 0, xfrlen = 0;
808 	int fcp_err = 0, active, aborted;
809 	u8 op = tgt_fcpreq->op;
810 
811 	spin_lock_irq(&tfcp_req->reqlock);
812 	fcpreq = tfcp_req->fcpreq;
813 	active = tfcp_req->active;
814 	aborted = tfcp_req->aborted;
815 	tfcp_req->active = true;
816 	spin_unlock_irq(&tfcp_req->reqlock);
817 
818 	if (unlikely(active))
819 		/* illegal - call while i/o active */
820 		return -EALREADY;
821 
822 	if (unlikely(aborted)) {
823 		/* target transport has aborted i/o prior */
824 		spin_lock_irq(&tfcp_req->reqlock);
825 		tfcp_req->active = false;
826 		spin_unlock_irq(&tfcp_req->reqlock);
827 		tgt_fcpreq->transferred_length = 0;
828 		tgt_fcpreq->fcp_error = -ECANCELED;
829 		tgt_fcpreq->done(tgt_fcpreq);
830 		return 0;
831 	}
832 
833 	/*
834 	 * if fcpreq is NULL, the I/O has been aborted (from
835 	 * initiator side). For the target side, act as if all is well
836 	 * but don't actually move data.
837 	 */
838 
839 	switch (op) {
840 	case NVMET_FCOP_WRITEDATA:
841 		xfrlen = tgt_fcpreq->transfer_length;
842 		if (fcpreq) {
843 			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
844 					fcpreq->first_sgl, tgt_fcpreq->offset,
845 					xfrlen);
846 			fcpreq->transferred_length += xfrlen;
847 		}
848 		break;
849 
850 	case NVMET_FCOP_READDATA:
851 	case NVMET_FCOP_READDATA_RSP:
852 		xfrlen = tgt_fcpreq->transfer_length;
853 		if (fcpreq) {
854 			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
855 					fcpreq->first_sgl, tgt_fcpreq->offset,
856 					xfrlen);
857 			fcpreq->transferred_length += xfrlen;
858 		}
859 		if (op == NVMET_FCOP_READDATA)
860 			break;
861 
862 		/* Fall-Thru to RSP handling */
863 		fallthrough;
864 
865 	case NVMET_FCOP_RSP:
866 		if (fcpreq) {
867 			rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
868 					fcpreq->rsplen : tgt_fcpreq->rsplen);
869 			memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
870 			if (rsplen < tgt_fcpreq->rsplen)
871 				fcp_err = -E2BIG;
872 			fcpreq->rcv_rsplen = rsplen;
873 			fcpreq->status = 0;
874 		}
875 		tfcp_req->status = 0;
876 		break;
877 
878 	default:
879 		fcp_err = -EINVAL;
880 		break;
881 	}
882 
883 	spin_lock_irq(&tfcp_req->reqlock);
884 	tfcp_req->active = false;
885 	spin_unlock_irq(&tfcp_req->reqlock);
886 
887 	tgt_fcpreq->transferred_length = xfrlen;
888 	tgt_fcpreq->fcp_error = fcp_err;
889 	tgt_fcpreq->done(tgt_fcpreq);
890 
891 	return 0;
892 }
893 
894 static void
895 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
896 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
897 {
898 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
899 
900 	/*
901 	 * mark aborted only in case there were 2 threads in transport
902 	 * (one doing io, other doing abort) and only kills ops posted
903 	 * after the abort request
904 	 */
905 	spin_lock_irq(&tfcp_req->reqlock);
906 	tfcp_req->aborted = true;
907 	spin_unlock_irq(&tfcp_req->reqlock);
908 
909 	tfcp_req->status = NVME_SC_INTERNAL;
910 
911 	/*
912 	 * nothing more to do. If io wasn't active, the transport should
913 	 * immediately call the req_release. If it was active, the op
914 	 * will complete, and the lldd should call req_release.
915 	 */
916 }
917 
918 static void
919 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
920 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
921 {
922 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
923 
924 	queue_work(nvmet_wq, &tfcp_req->tio_done_work);
925 }
926 
927 static void
928 fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
929 			struct nvme_fc_remote_port *remoteport,
930 				struct nvmefc_ls_req *lsreq)
931 {
932 }
933 
934 static void
935 fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
936 			void *hosthandle, struct nvmefc_ls_req *lsreq)
937 {
938 }
939 
940 static void
941 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
942 			struct nvme_fc_remote_port *remoteport,
943 			void *hw_queue_handle,
944 			struct nvmefc_fcp_req *fcpreq)
945 {
946 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
947 	struct fcloop_fcpreq *tfcp_req;
948 	bool abortio = true;
949 
950 	spin_lock(&inireq->inilock);
951 	tfcp_req = inireq->tfcp_req;
952 	if (tfcp_req)
953 		fcloop_tfcp_req_get(tfcp_req);
954 	spin_unlock(&inireq->inilock);
955 
956 	if (!tfcp_req)
957 		/* abort has already been called */
958 		return;
959 
960 	/* break initiator/target relationship for io */
961 	spin_lock_irq(&tfcp_req->reqlock);
962 	switch (tfcp_req->inistate) {
963 	case INI_IO_START:
964 	case INI_IO_ACTIVE:
965 		tfcp_req->inistate = INI_IO_ABORTED;
966 		break;
967 	case INI_IO_COMPLETED:
968 		abortio = false;
969 		break;
970 	default:
971 		spin_unlock_irq(&tfcp_req->reqlock);
972 		WARN_ON(1);
973 		return;
974 	}
975 	spin_unlock_irq(&tfcp_req->reqlock);
976 
977 	if (abortio)
978 		/* leave the reference while the work item is scheduled */
979 		WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
980 	else  {
981 		/*
982 		 * as the io has already had the done callback made,
983 		 * nothing more to do. So release the reference taken above
984 		 */
985 		fcloop_tfcp_req_put(tfcp_req);
986 	}
987 }
988 
989 static void
990 fcloop_nport_free(struct kref *ref)
991 {
992 	struct fcloop_nport *nport =
993 		container_of(ref, struct fcloop_nport, ref);
994 	unsigned long flags;
995 
996 	spin_lock_irqsave(&fcloop_lock, flags);
997 	list_del(&nport->nport_list);
998 	spin_unlock_irqrestore(&fcloop_lock, flags);
999 
1000 	kfree(nport);
1001 }
1002 
1003 static void
1004 fcloop_nport_put(struct fcloop_nport *nport)
1005 {
1006 	kref_put(&nport->ref, fcloop_nport_free);
1007 }
1008 
1009 static int
1010 fcloop_nport_get(struct fcloop_nport *nport)
1011 {
1012 	return kref_get_unless_zero(&nport->ref);
1013 }
1014 
1015 static void
1016 fcloop_localport_delete(struct nvme_fc_local_port *localport)
1017 {
1018 	struct fcloop_lport_priv *lport_priv = localport->private;
1019 	struct fcloop_lport *lport = lport_priv->lport;
1020 
1021 	/* release any threads waiting for the unreg to complete */
1022 	complete(&lport->unreg_done);
1023 }
1024 
1025 static void
1026 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
1027 {
1028 	struct fcloop_rport *rport = remoteport->private;
1029 
1030 	flush_work(&rport->ls_work);
1031 	fcloop_nport_put(rport->nport);
1032 }
1033 
1034 static void
1035 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
1036 {
1037 	struct fcloop_tport *tport = targetport->private;
1038 
1039 	flush_work(&tport->ls_work);
1040 	fcloop_nport_put(tport->nport);
1041 }
1042 
1043 #define	FCLOOP_HW_QUEUES		4
1044 #define	FCLOOP_SGL_SEGS			256
1045 #define FCLOOP_DMABOUND_4G		0xFFFFFFFF
1046 
1047 static struct nvme_fc_port_template fctemplate = {
1048 	.localport_delete	= fcloop_localport_delete,
1049 	.remoteport_delete	= fcloop_remoteport_delete,
1050 	.create_queue		= fcloop_create_queue,
1051 	.delete_queue		= fcloop_delete_queue,
1052 	.ls_req			= fcloop_h2t_ls_req,
1053 	.fcp_io			= fcloop_fcp_req,
1054 	.ls_abort		= fcloop_h2t_ls_abort,
1055 	.fcp_abort		= fcloop_fcp_abort,
1056 	.xmt_ls_rsp		= fcloop_t2h_xmt_ls_rsp,
1057 	.max_hw_queues		= FCLOOP_HW_QUEUES,
1058 	.max_sgl_segments	= FCLOOP_SGL_SEGS,
1059 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
1060 	.dma_boundary		= FCLOOP_DMABOUND_4G,
1061 	/* sizes of additional private data for data structures */
1062 	.local_priv_sz		= sizeof(struct fcloop_lport_priv),
1063 	.remote_priv_sz		= sizeof(struct fcloop_rport),
1064 	.lsrqst_priv_sz		= sizeof(struct fcloop_lsreq),
1065 	.fcprqst_priv_sz	= sizeof(struct fcloop_ini_fcpreq),
1066 };
1067 
1068 static struct nvmet_fc_target_template tgttemplate = {
1069 	.targetport_delete	= fcloop_targetport_delete,
1070 	.xmt_ls_rsp		= fcloop_h2t_xmt_ls_rsp,
1071 	.fcp_op			= fcloop_fcp_op,
1072 	.fcp_abort		= fcloop_tgt_fcp_abort,
1073 	.fcp_req_release	= fcloop_fcp_req_release,
1074 	.discovery_event	= fcloop_tgt_discovery_evt,
1075 	.ls_req			= fcloop_t2h_ls_req,
1076 	.ls_abort		= fcloop_t2h_ls_abort,
1077 	.host_release		= fcloop_t2h_host_release,
1078 	.max_hw_queues		= FCLOOP_HW_QUEUES,
1079 	.max_sgl_segments	= FCLOOP_SGL_SEGS,
1080 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
1081 	.dma_boundary		= FCLOOP_DMABOUND_4G,
1082 	/* optional features */
1083 	.target_features	= 0,
1084 	/* sizes of additional private data for data structures */
1085 	.target_priv_sz		= sizeof(struct fcloop_tport),
1086 	.lsrqst_priv_sz		= sizeof(struct fcloop_lsreq),
1087 };
1088 
1089 static ssize_t
1090 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
1091 		const char *buf, size_t count)
1092 {
1093 	struct nvme_fc_port_info pinfo;
1094 	struct fcloop_ctrl_options *opts;
1095 	struct nvme_fc_local_port *localport;
1096 	struct fcloop_lport *lport;
1097 	struct fcloop_lport_priv *lport_priv;
1098 	unsigned long flags;
1099 	int ret = -ENOMEM;
1100 
1101 	lport = kzalloc(sizeof(*lport), GFP_KERNEL);
1102 	if (!lport)
1103 		return -ENOMEM;
1104 
1105 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1106 	if (!opts)
1107 		goto out_free_lport;
1108 
1109 	ret = fcloop_parse_options(opts, buf);
1110 	if (ret)
1111 		goto out_free_opts;
1112 
1113 	/* everything there ? */
1114 	if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
1115 		ret = -EINVAL;
1116 		goto out_free_opts;
1117 	}
1118 
1119 	memset(&pinfo, 0, sizeof(pinfo));
1120 	pinfo.node_name = opts->wwnn;
1121 	pinfo.port_name = opts->wwpn;
1122 	pinfo.port_role = opts->roles;
1123 	pinfo.port_id = opts->fcaddr;
1124 
1125 	ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
1126 	if (!ret) {
1127 		/* success */
1128 		lport_priv = localport->private;
1129 		lport_priv->lport = lport;
1130 
1131 		lport->localport = localport;
1132 		INIT_LIST_HEAD(&lport->lport_list);
1133 
1134 		spin_lock_irqsave(&fcloop_lock, flags);
1135 		list_add_tail(&lport->lport_list, &fcloop_lports);
1136 		spin_unlock_irqrestore(&fcloop_lock, flags);
1137 	}
1138 
1139 out_free_opts:
1140 	kfree(opts);
1141 out_free_lport:
1142 	/* free only if we're going to fail */
1143 	if (ret)
1144 		kfree(lport);
1145 
1146 	return ret ? ret : count;
1147 }
1148 
1149 
1150 static void
1151 __unlink_local_port(struct fcloop_lport *lport)
1152 {
1153 	list_del(&lport->lport_list);
1154 }
1155 
1156 static int
1157 __wait_localport_unreg(struct fcloop_lport *lport)
1158 {
1159 	int ret;
1160 
1161 	init_completion(&lport->unreg_done);
1162 
1163 	ret = nvme_fc_unregister_localport(lport->localport);
1164 
1165 	wait_for_completion(&lport->unreg_done);
1166 
1167 	kfree(lport);
1168 
1169 	return ret;
1170 }
1171 
1172 
1173 static ssize_t
1174 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
1175 		const char *buf, size_t count)
1176 {
1177 	struct fcloop_lport *tlport, *lport = NULL;
1178 	u64 nodename, portname;
1179 	unsigned long flags;
1180 	int ret;
1181 
1182 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1183 	if (ret)
1184 		return ret;
1185 
1186 	spin_lock_irqsave(&fcloop_lock, flags);
1187 
1188 	list_for_each_entry(tlport, &fcloop_lports, lport_list) {
1189 		if (tlport->localport->node_name == nodename &&
1190 		    tlport->localport->port_name == portname) {
1191 			lport = tlport;
1192 			__unlink_local_port(lport);
1193 			break;
1194 		}
1195 	}
1196 	spin_unlock_irqrestore(&fcloop_lock, flags);
1197 
1198 	if (!lport)
1199 		return -ENOENT;
1200 
1201 	ret = __wait_localport_unreg(lport);
1202 
1203 	return ret ? ret : count;
1204 }
1205 
1206 static struct fcloop_nport *
1207 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
1208 {
1209 	struct fcloop_nport *newnport, *nport = NULL;
1210 	struct fcloop_lport *tmplport, *lport = NULL;
1211 	struct fcloop_ctrl_options *opts;
1212 	unsigned long flags;
1213 	u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
1214 	int ret;
1215 
1216 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1217 	if (!opts)
1218 		return NULL;
1219 
1220 	ret = fcloop_parse_options(opts, buf);
1221 	if (ret)
1222 		goto out_free_opts;
1223 
1224 	/* everything there ? */
1225 	if ((opts->mask & opts_mask) != opts_mask) {
1226 		ret = -EINVAL;
1227 		goto out_free_opts;
1228 	}
1229 
1230 	newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
1231 	if (!newnport)
1232 		goto out_free_opts;
1233 
1234 	INIT_LIST_HEAD(&newnport->nport_list);
1235 	newnport->node_name = opts->wwnn;
1236 	newnport->port_name = opts->wwpn;
1237 	if (opts->mask & NVMF_OPT_ROLES)
1238 		newnport->port_role = opts->roles;
1239 	if (opts->mask & NVMF_OPT_FCADDR)
1240 		newnport->port_id = opts->fcaddr;
1241 	kref_init(&newnport->ref);
1242 
1243 	spin_lock_irqsave(&fcloop_lock, flags);
1244 
1245 	list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
1246 		if (tmplport->localport->node_name == opts->wwnn &&
1247 		    tmplport->localport->port_name == opts->wwpn)
1248 			goto out_invalid_opts;
1249 
1250 		if (tmplport->localport->node_name == opts->lpwwnn &&
1251 		    tmplport->localport->port_name == opts->lpwwpn)
1252 			lport = tmplport;
1253 	}
1254 
1255 	if (remoteport) {
1256 		if (!lport)
1257 			goto out_invalid_opts;
1258 		newnport->lport = lport;
1259 	}
1260 
1261 	list_for_each_entry(nport, &fcloop_nports, nport_list) {
1262 		if (nport->node_name == opts->wwnn &&
1263 		    nport->port_name == opts->wwpn) {
1264 			if ((remoteport && nport->rport) ||
1265 			    (!remoteport && nport->tport)) {
1266 				nport = NULL;
1267 				goto out_invalid_opts;
1268 			}
1269 
1270 			fcloop_nport_get(nport);
1271 
1272 			spin_unlock_irqrestore(&fcloop_lock, flags);
1273 
1274 			if (remoteport)
1275 				nport->lport = lport;
1276 			if (opts->mask & NVMF_OPT_ROLES)
1277 				nport->port_role = opts->roles;
1278 			if (opts->mask & NVMF_OPT_FCADDR)
1279 				nport->port_id = opts->fcaddr;
1280 			goto out_free_newnport;
1281 		}
1282 	}
1283 
1284 	list_add_tail(&newnport->nport_list, &fcloop_nports);
1285 
1286 	spin_unlock_irqrestore(&fcloop_lock, flags);
1287 
1288 	kfree(opts);
1289 	return newnport;
1290 
1291 out_invalid_opts:
1292 	spin_unlock_irqrestore(&fcloop_lock, flags);
1293 out_free_newnport:
1294 	kfree(newnport);
1295 out_free_opts:
1296 	kfree(opts);
1297 	return nport;
1298 }
1299 
1300 static ssize_t
1301 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1302 		const char *buf, size_t count)
1303 {
1304 	struct nvme_fc_remote_port *remoteport;
1305 	struct fcloop_nport *nport;
1306 	struct fcloop_rport *rport;
1307 	struct nvme_fc_port_info pinfo;
1308 	int ret;
1309 
1310 	nport = fcloop_alloc_nport(buf, count, true);
1311 	if (!nport)
1312 		return -EIO;
1313 
1314 	memset(&pinfo, 0, sizeof(pinfo));
1315 	pinfo.node_name = nport->node_name;
1316 	pinfo.port_name = nport->port_name;
1317 	pinfo.port_role = nport->port_role;
1318 	pinfo.port_id = nport->port_id;
1319 
1320 	ret = nvme_fc_register_remoteport(nport->lport->localport,
1321 						&pinfo, &remoteport);
1322 	if (ret || !remoteport) {
1323 		fcloop_nport_put(nport);
1324 		return ret;
1325 	}
1326 
1327 	/* success */
1328 	rport = remoteport->private;
1329 	rport->remoteport = remoteport;
1330 	rport->targetport = (nport->tport) ?  nport->tport->targetport : NULL;
1331 	if (nport->tport) {
1332 		nport->tport->remoteport = remoteport;
1333 		nport->tport->lport = nport->lport;
1334 	}
1335 	rport->nport = nport;
1336 	rport->lport = nport->lport;
1337 	nport->rport = rport;
1338 	spin_lock_init(&rport->lock);
1339 	INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
1340 	INIT_LIST_HEAD(&rport->ls_list);
1341 
1342 	return count;
1343 }
1344 
1345 
1346 static struct fcloop_rport *
1347 __unlink_remote_port(struct fcloop_nport *nport)
1348 {
1349 	struct fcloop_rport *rport = nport->rport;
1350 
1351 	if (rport && nport->tport)
1352 		nport->tport->remoteport = NULL;
1353 	nport->rport = NULL;
1354 
1355 	return rport;
1356 }
1357 
1358 static int
1359 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1360 {
1361 	if (!rport)
1362 		return -EALREADY;
1363 
1364 	return nvme_fc_unregister_remoteport(rport->remoteport);
1365 }
1366 
1367 static ssize_t
1368 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1369 		const char *buf, size_t count)
1370 {
1371 	struct fcloop_nport *nport = NULL, *tmpport;
1372 	static struct fcloop_rport *rport;
1373 	u64 nodename, portname;
1374 	unsigned long flags;
1375 	int ret;
1376 
1377 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1378 	if (ret)
1379 		return ret;
1380 
1381 	spin_lock_irqsave(&fcloop_lock, flags);
1382 
1383 	list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1384 		if (tmpport->node_name == nodename &&
1385 		    tmpport->port_name == portname && tmpport->rport) {
1386 			nport = tmpport;
1387 			rport = __unlink_remote_port(nport);
1388 			break;
1389 		}
1390 	}
1391 
1392 	spin_unlock_irqrestore(&fcloop_lock, flags);
1393 
1394 	if (!nport)
1395 		return -ENOENT;
1396 
1397 	ret = __remoteport_unreg(nport, rport);
1398 
1399 	return ret ? ret : count;
1400 }
1401 
1402 static ssize_t
1403 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1404 		const char *buf, size_t count)
1405 {
1406 	struct nvmet_fc_target_port *targetport;
1407 	struct fcloop_nport *nport;
1408 	struct fcloop_tport *tport;
1409 	struct nvmet_fc_port_info tinfo;
1410 	int ret;
1411 
1412 	nport = fcloop_alloc_nport(buf, count, false);
1413 	if (!nport)
1414 		return -EIO;
1415 
1416 	tinfo.node_name = nport->node_name;
1417 	tinfo.port_name = nport->port_name;
1418 	tinfo.port_id = nport->port_id;
1419 
1420 	ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1421 						&targetport);
1422 	if (ret) {
1423 		fcloop_nport_put(nport);
1424 		return ret;
1425 	}
1426 
1427 	/* success */
1428 	tport = targetport->private;
1429 	tport->targetport = targetport;
1430 	tport->remoteport = (nport->rport) ?  nport->rport->remoteport : NULL;
1431 	if (nport->rport)
1432 		nport->rport->targetport = targetport;
1433 	tport->nport = nport;
1434 	tport->lport = nport->lport;
1435 	nport->tport = tport;
1436 	spin_lock_init(&tport->lock);
1437 	INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
1438 	INIT_LIST_HEAD(&tport->ls_list);
1439 
1440 	return count;
1441 }
1442 
1443 
1444 static struct fcloop_tport *
1445 __unlink_target_port(struct fcloop_nport *nport)
1446 {
1447 	struct fcloop_tport *tport = nport->tport;
1448 
1449 	if (tport && nport->rport)
1450 		nport->rport->targetport = NULL;
1451 	nport->tport = NULL;
1452 
1453 	return tport;
1454 }
1455 
1456 static int
1457 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1458 {
1459 	if (!tport)
1460 		return -EALREADY;
1461 
1462 	return nvmet_fc_unregister_targetport(tport->targetport);
1463 }
1464 
1465 static ssize_t
1466 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1467 		const char *buf, size_t count)
1468 {
1469 	struct fcloop_nport *nport = NULL, *tmpport;
1470 	struct fcloop_tport *tport = NULL;
1471 	u64 nodename, portname;
1472 	unsigned long flags;
1473 	int ret;
1474 
1475 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1476 	if (ret)
1477 		return ret;
1478 
1479 	spin_lock_irqsave(&fcloop_lock, flags);
1480 
1481 	list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1482 		if (tmpport->node_name == nodename &&
1483 		    tmpport->port_name == portname && tmpport->tport) {
1484 			nport = tmpport;
1485 			tport = __unlink_target_port(nport);
1486 			break;
1487 		}
1488 	}
1489 
1490 	spin_unlock_irqrestore(&fcloop_lock, flags);
1491 
1492 	if (!nport)
1493 		return -ENOENT;
1494 
1495 	ret = __targetport_unreg(nport, tport);
1496 
1497 	return ret ? ret : count;
1498 }
1499 
1500 static ssize_t
1501 fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
1502 		const char *buf, size_t count)
1503 {
1504 	unsigned int opcode;
1505 	int starting, amount;
1506 
1507 	if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
1508 		return -EBADRQC;
1509 
1510 	drop_current_cnt = 0;
1511 	drop_fabric_opcode = (opcode & ~DROP_OPCODE_MASK) ? true : false;
1512 	drop_opcode = (opcode & DROP_OPCODE_MASK);
1513 	drop_instance = starting;
1514 	/* the check to drop routine uses instance + count to know when
1515 	 * to end. Thus, if dropping 1 instance, count should be 0.
1516 	 * so subtract 1 from the count.
1517 	 */
1518 	drop_amount = amount - 1;
1519 
1520 	pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d "
1521 		"instances\n",
1522 		__func__, drop_instance, drop_fabric_opcode ? " fabric" : "",
1523 		drop_opcode, drop_amount);
1524 
1525 	return count;
1526 }
1527 
1528 
1529 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1530 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1531 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1532 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1533 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1534 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1535 static DEVICE_ATTR(set_cmd_drop, 0200, NULL, fcloop_set_cmd_drop);
1536 
1537 static struct attribute *fcloop_dev_attrs[] = {
1538 	&dev_attr_add_local_port.attr,
1539 	&dev_attr_del_local_port.attr,
1540 	&dev_attr_add_remote_port.attr,
1541 	&dev_attr_del_remote_port.attr,
1542 	&dev_attr_add_target_port.attr,
1543 	&dev_attr_del_target_port.attr,
1544 	&dev_attr_set_cmd_drop.attr,
1545 	NULL
1546 };
1547 
1548 static const struct attribute_group fclopp_dev_attrs_group = {
1549 	.attrs		= fcloop_dev_attrs,
1550 };
1551 
1552 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1553 	&fclopp_dev_attrs_group,
1554 	NULL,
1555 };
1556 
1557 static struct class *fcloop_class;
1558 static struct device *fcloop_device;
1559 
1560 
1561 static int __init fcloop_init(void)
1562 {
1563 	int ret;
1564 
1565 	fcloop_class = class_create(THIS_MODULE, "fcloop");
1566 	if (IS_ERR(fcloop_class)) {
1567 		pr_err("couldn't register class fcloop\n");
1568 		ret = PTR_ERR(fcloop_class);
1569 		return ret;
1570 	}
1571 
1572 	fcloop_device = device_create_with_groups(
1573 				fcloop_class, NULL, MKDEV(0, 0), NULL,
1574 				fcloop_dev_attr_groups, "ctl");
1575 	if (IS_ERR(fcloop_device)) {
1576 		pr_err("couldn't create ctl device!\n");
1577 		ret = PTR_ERR(fcloop_device);
1578 		goto out_destroy_class;
1579 	}
1580 
1581 	get_device(fcloop_device);
1582 
1583 	return 0;
1584 
1585 out_destroy_class:
1586 	class_destroy(fcloop_class);
1587 	return ret;
1588 }
1589 
1590 static void __exit fcloop_exit(void)
1591 {
1592 	struct fcloop_lport *lport = NULL;
1593 	struct fcloop_nport *nport = NULL;
1594 	struct fcloop_tport *tport;
1595 	struct fcloop_rport *rport;
1596 	unsigned long flags;
1597 	int ret;
1598 
1599 	spin_lock_irqsave(&fcloop_lock, flags);
1600 
1601 	for (;;) {
1602 		nport = list_first_entry_or_null(&fcloop_nports,
1603 						typeof(*nport), nport_list);
1604 		if (!nport)
1605 			break;
1606 
1607 		tport = __unlink_target_port(nport);
1608 		rport = __unlink_remote_port(nport);
1609 
1610 		spin_unlock_irqrestore(&fcloop_lock, flags);
1611 
1612 		ret = __targetport_unreg(nport, tport);
1613 		if (ret)
1614 			pr_warn("%s: Failed deleting target port\n", __func__);
1615 
1616 		ret = __remoteport_unreg(nport, rport);
1617 		if (ret)
1618 			pr_warn("%s: Failed deleting remote port\n", __func__);
1619 
1620 		spin_lock_irqsave(&fcloop_lock, flags);
1621 	}
1622 
1623 	for (;;) {
1624 		lport = list_first_entry_or_null(&fcloop_lports,
1625 						typeof(*lport), lport_list);
1626 		if (!lport)
1627 			break;
1628 
1629 		__unlink_local_port(lport);
1630 
1631 		spin_unlock_irqrestore(&fcloop_lock, flags);
1632 
1633 		ret = __wait_localport_unreg(lport);
1634 		if (ret)
1635 			pr_warn("%s: Failed deleting local port\n", __func__);
1636 
1637 		spin_lock_irqsave(&fcloop_lock, flags);
1638 	}
1639 
1640 	spin_unlock_irqrestore(&fcloop_lock, flags);
1641 
1642 	put_device(fcloop_device);
1643 
1644 	device_destroy(fcloop_class, MKDEV(0, 0));
1645 	class_destroy(fcloop_class);
1646 }
1647 
1648 module_init(fcloop_init);
1649 module_exit(fcloop_exit);
1650 
1651 MODULE_LICENSE("GPL v2");
1652