xref: /openbmc/linux/drivers/nvme/target/fcloop.c (revision 711aab1d)
1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
18 #include <linux/parser.h>
19 #include <uapi/scsi/fc/fc_fs.h>
20 
21 #include "../host/nvme.h"
22 #include "../target/nvmet.h"
23 #include <linux/nvme-fc-driver.h>
24 #include <linux/nvme-fc.h>
25 
26 
27 enum {
28 	NVMF_OPT_ERR		= 0,
29 	NVMF_OPT_WWNN		= 1 << 0,
30 	NVMF_OPT_WWPN		= 1 << 1,
31 	NVMF_OPT_ROLES		= 1 << 2,
32 	NVMF_OPT_FCADDR		= 1 << 3,
33 	NVMF_OPT_LPWWNN		= 1 << 4,
34 	NVMF_OPT_LPWWPN		= 1 << 5,
35 };
36 
37 struct fcloop_ctrl_options {
38 	int			mask;
39 	u64			wwnn;
40 	u64			wwpn;
41 	u32			roles;
42 	u32			fcaddr;
43 	u64			lpwwnn;
44 	u64			lpwwpn;
45 };
46 
47 static const match_table_t opt_tokens = {
48 	{ NVMF_OPT_WWNN,	"wwnn=%s"	},
49 	{ NVMF_OPT_WWPN,	"wwpn=%s"	},
50 	{ NVMF_OPT_ROLES,	"roles=%d"	},
51 	{ NVMF_OPT_FCADDR,	"fcaddr=%x"	},
52 	{ NVMF_OPT_LPWWNN,	"lpwwnn=%s"	},
53 	{ NVMF_OPT_LPWWPN,	"lpwwpn=%s"	},
54 	{ NVMF_OPT_ERR,		NULL		}
55 };
56 
57 static int
58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
59 		const char *buf)
60 {
61 	substring_t args[MAX_OPT_ARGS];
62 	char *options, *o, *p;
63 	int token, ret = 0;
64 	u64 token64;
65 
66 	options = o = kstrdup(buf, GFP_KERNEL);
67 	if (!options)
68 		return -ENOMEM;
69 
70 	while ((p = strsep(&o, ",\n")) != NULL) {
71 		if (!*p)
72 			continue;
73 
74 		token = match_token(p, opt_tokens, args);
75 		opts->mask |= token;
76 		switch (token) {
77 		case NVMF_OPT_WWNN:
78 			if (match_u64(args, &token64)) {
79 				ret = -EINVAL;
80 				goto out_free_options;
81 			}
82 			opts->wwnn = token64;
83 			break;
84 		case NVMF_OPT_WWPN:
85 			if (match_u64(args, &token64)) {
86 				ret = -EINVAL;
87 				goto out_free_options;
88 			}
89 			opts->wwpn = token64;
90 			break;
91 		case NVMF_OPT_ROLES:
92 			if (match_int(args, &token)) {
93 				ret = -EINVAL;
94 				goto out_free_options;
95 			}
96 			opts->roles = token;
97 			break;
98 		case NVMF_OPT_FCADDR:
99 			if (match_hex(args, &token)) {
100 				ret = -EINVAL;
101 				goto out_free_options;
102 			}
103 			opts->fcaddr = token;
104 			break;
105 		case NVMF_OPT_LPWWNN:
106 			if (match_u64(args, &token64)) {
107 				ret = -EINVAL;
108 				goto out_free_options;
109 			}
110 			opts->lpwwnn = token64;
111 			break;
112 		case NVMF_OPT_LPWWPN:
113 			if (match_u64(args, &token64)) {
114 				ret = -EINVAL;
115 				goto out_free_options;
116 			}
117 			opts->lpwwpn = token64;
118 			break;
119 		default:
120 			pr_warn("unknown parameter or missing value '%s'\n", p);
121 			ret = -EINVAL;
122 			goto out_free_options;
123 		}
124 	}
125 
126 out_free_options:
127 	kfree(options);
128 	return ret;
129 }
130 
131 
132 static int
133 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
134 		const char *buf)
135 {
136 	substring_t args[MAX_OPT_ARGS];
137 	char *options, *o, *p;
138 	int token, ret = 0;
139 	u64 token64;
140 
141 	*nname = -1;
142 	*pname = -1;
143 
144 	options = o = kstrdup(buf, GFP_KERNEL);
145 	if (!options)
146 		return -ENOMEM;
147 
148 	while ((p = strsep(&o, ",\n")) != NULL) {
149 		if (!*p)
150 			continue;
151 
152 		token = match_token(p, opt_tokens, args);
153 		switch (token) {
154 		case NVMF_OPT_WWNN:
155 			if (match_u64(args, &token64)) {
156 				ret = -EINVAL;
157 				goto out_free_options;
158 			}
159 			*nname = token64;
160 			break;
161 		case NVMF_OPT_WWPN:
162 			if (match_u64(args, &token64)) {
163 				ret = -EINVAL;
164 				goto out_free_options;
165 			}
166 			*pname = token64;
167 			break;
168 		default:
169 			pr_warn("unknown parameter or missing value '%s'\n", p);
170 			ret = -EINVAL;
171 			goto out_free_options;
172 		}
173 	}
174 
175 out_free_options:
176 	kfree(options);
177 
178 	if (!ret) {
179 		if (*nname == -1)
180 			return -EINVAL;
181 		if (*pname == -1)
182 			return -EINVAL;
183 	}
184 
185 	return ret;
186 }
187 
188 
189 #define LPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
190 
191 #define RPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN |  \
192 			 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
193 
194 #define TGTPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
195 
196 
197 static DEFINE_SPINLOCK(fcloop_lock);
198 static LIST_HEAD(fcloop_lports);
199 static LIST_HEAD(fcloop_nports);
200 
201 struct fcloop_lport {
202 	struct nvme_fc_local_port *localport;
203 	struct list_head lport_list;
204 	struct completion unreg_done;
205 };
206 
207 struct fcloop_rport {
208 	struct nvme_fc_remote_port *remoteport;
209 	struct nvmet_fc_target_port *targetport;
210 	struct fcloop_nport *nport;
211 	struct fcloop_lport *lport;
212 };
213 
214 struct fcloop_tport {
215 	struct nvmet_fc_target_port *targetport;
216 	struct nvme_fc_remote_port *remoteport;
217 	struct fcloop_nport *nport;
218 	struct fcloop_lport *lport;
219 };
220 
221 struct fcloop_nport {
222 	struct fcloop_rport *rport;
223 	struct fcloop_tport *tport;
224 	struct fcloop_lport *lport;
225 	struct list_head nport_list;
226 	struct kref ref;
227 	struct completion rport_unreg_done;
228 	struct completion tport_unreg_done;
229 	u64 node_name;
230 	u64 port_name;
231 	u32 port_role;
232 	u32 port_id;
233 };
234 
235 struct fcloop_lsreq {
236 	struct fcloop_tport		*tport;
237 	struct nvmefc_ls_req		*lsreq;
238 	struct work_struct		work;
239 	struct nvmefc_tgt_ls_req	tgt_ls_req;
240 	int				status;
241 };
242 
243 struct fcloop_fcpreq {
244 	struct fcloop_tport		*tport;
245 	struct nvmefc_fcp_req		*fcpreq;
246 	spinlock_t			reqlock;
247 	u16				status;
248 	bool				active;
249 	bool				aborted;
250 	struct work_struct		work;
251 	struct nvmefc_tgt_fcp_req	tgt_fcp_req;
252 };
253 
254 struct fcloop_ini_fcpreq {
255 	struct nvmefc_fcp_req		*fcpreq;
256 	struct fcloop_fcpreq		*tfcp_req;
257 	struct work_struct		iniwork;
258 };
259 
260 static inline struct fcloop_lsreq *
261 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
262 {
263 	return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
264 }
265 
266 static inline struct fcloop_fcpreq *
267 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
268 {
269 	return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
270 }
271 
272 
273 static int
274 fcloop_create_queue(struct nvme_fc_local_port *localport,
275 			unsigned int qidx, u16 qsize,
276 			void **handle)
277 {
278 	*handle = localport;
279 	return 0;
280 }
281 
282 static void
283 fcloop_delete_queue(struct nvme_fc_local_port *localport,
284 			unsigned int idx, void *handle)
285 {
286 }
287 
288 
289 /*
290  * Transmit of LS RSP done (e.g. buffers all set). call back up
291  * initiator "done" flows.
292  */
293 static void
294 fcloop_tgt_lsrqst_done_work(struct work_struct *work)
295 {
296 	struct fcloop_lsreq *tls_req =
297 		container_of(work, struct fcloop_lsreq, work);
298 	struct fcloop_tport *tport = tls_req->tport;
299 	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
300 
301 	if (tport->remoteport)
302 		lsreq->done(lsreq, tls_req->status);
303 }
304 
305 static int
306 fcloop_ls_req(struct nvme_fc_local_port *localport,
307 			struct nvme_fc_remote_port *remoteport,
308 			struct nvmefc_ls_req *lsreq)
309 {
310 	struct fcloop_lsreq *tls_req = lsreq->private;
311 	struct fcloop_rport *rport = remoteport->private;
312 	int ret = 0;
313 
314 	tls_req->lsreq = lsreq;
315 	INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
316 
317 	if (!rport->targetport) {
318 		tls_req->status = -ECONNREFUSED;
319 		schedule_work(&tls_req->work);
320 		return ret;
321 	}
322 
323 	tls_req->status = 0;
324 	tls_req->tport = rport->targetport->private;
325 	ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
326 				 lsreq->rqstaddr, lsreq->rqstlen);
327 
328 	return ret;
329 }
330 
331 static int
332 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
333 			struct nvmefc_tgt_ls_req *tgt_lsreq)
334 {
335 	struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
336 	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
337 
338 	memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
339 		((lsreq->rsplen < tgt_lsreq->rsplen) ?
340 				lsreq->rsplen : tgt_lsreq->rsplen));
341 	tgt_lsreq->done(tgt_lsreq);
342 
343 	schedule_work(&tls_req->work);
344 
345 	return 0;
346 }
347 
348 /*
349  * FCP IO operation done by initiator abort.
350  * call back up initiator "done" flows.
351  */
352 static void
353 fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
354 {
355 	struct fcloop_ini_fcpreq *inireq =
356 		container_of(work, struct fcloop_ini_fcpreq, iniwork);
357 
358 	inireq->fcpreq->done(inireq->fcpreq);
359 }
360 
361 /*
362  * FCP IO operation done by target completion.
363  * call back up initiator "done" flows.
364  */
365 static void
366 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
367 {
368 	struct fcloop_fcpreq *tfcp_req =
369 		container_of(work, struct fcloop_fcpreq, work);
370 	struct fcloop_tport *tport = tfcp_req->tport;
371 	struct nvmefc_fcp_req *fcpreq;
372 
373 	spin_lock(&tfcp_req->reqlock);
374 	fcpreq = tfcp_req->fcpreq;
375 	spin_unlock(&tfcp_req->reqlock);
376 
377 	if (tport->remoteport && fcpreq) {
378 		fcpreq->status = tfcp_req->status;
379 		fcpreq->done(fcpreq);
380 	}
381 
382 	kfree(tfcp_req);
383 }
384 
385 
386 static int
387 fcloop_fcp_req(struct nvme_fc_local_port *localport,
388 			struct nvme_fc_remote_port *remoteport,
389 			void *hw_queue_handle,
390 			struct nvmefc_fcp_req *fcpreq)
391 {
392 	struct fcloop_rport *rport = remoteport->private;
393 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
394 	struct fcloop_fcpreq *tfcp_req;
395 	int ret = 0;
396 
397 	if (!rport->targetport)
398 		return -ECONNREFUSED;
399 
400 	tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
401 	if (!tfcp_req)
402 		return -ENOMEM;
403 
404 	inireq->fcpreq = fcpreq;
405 	inireq->tfcp_req = tfcp_req;
406 	INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
407 	tfcp_req->fcpreq = fcpreq;
408 	tfcp_req->tport = rport->targetport->private;
409 	spin_lock_init(&tfcp_req->reqlock);
410 	INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
411 
412 	ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
413 				 fcpreq->cmdaddr, fcpreq->cmdlen);
414 
415 	return ret;
416 }
417 
418 static void
419 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
420 			struct scatterlist *io_sg, u32 offset, u32 length)
421 {
422 	void *data_p, *io_p;
423 	u32 data_len, io_len, tlen;
424 
425 	io_p = sg_virt(io_sg);
426 	io_len = io_sg->length;
427 
428 	for ( ; offset; ) {
429 		tlen = min_t(u32, offset, io_len);
430 		offset -= tlen;
431 		io_len -= tlen;
432 		if (!io_len) {
433 			io_sg = sg_next(io_sg);
434 			io_p = sg_virt(io_sg);
435 			io_len = io_sg->length;
436 		} else
437 			io_p += tlen;
438 	}
439 
440 	data_p = sg_virt(data_sg);
441 	data_len = data_sg->length;
442 
443 	for ( ; length; ) {
444 		tlen = min_t(u32, io_len, data_len);
445 		tlen = min_t(u32, tlen, length);
446 
447 		if (op == NVMET_FCOP_WRITEDATA)
448 			memcpy(data_p, io_p, tlen);
449 		else
450 			memcpy(io_p, data_p, tlen);
451 
452 		length -= tlen;
453 
454 		io_len -= tlen;
455 		if ((!io_len) && (length)) {
456 			io_sg = sg_next(io_sg);
457 			io_p = sg_virt(io_sg);
458 			io_len = io_sg->length;
459 		} else
460 			io_p += tlen;
461 
462 		data_len -= tlen;
463 		if ((!data_len) && (length)) {
464 			data_sg = sg_next(data_sg);
465 			data_p = sg_virt(data_sg);
466 			data_len = data_sg->length;
467 		} else
468 			data_p += tlen;
469 	}
470 }
471 
472 static int
473 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
474 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
475 {
476 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
477 	struct nvmefc_fcp_req *fcpreq;
478 	u32 rsplen = 0, xfrlen = 0;
479 	int fcp_err = 0, active, aborted;
480 	u8 op = tgt_fcpreq->op;
481 
482 	spin_lock(&tfcp_req->reqlock);
483 	fcpreq = tfcp_req->fcpreq;
484 	active = tfcp_req->active;
485 	aborted = tfcp_req->aborted;
486 	tfcp_req->active = true;
487 	spin_unlock(&tfcp_req->reqlock);
488 
489 	if (unlikely(active))
490 		/* illegal - call while i/o active */
491 		return -EALREADY;
492 
493 	if (unlikely(aborted)) {
494 		/* target transport has aborted i/o prior */
495 		spin_lock(&tfcp_req->reqlock);
496 		tfcp_req->active = false;
497 		spin_unlock(&tfcp_req->reqlock);
498 		tgt_fcpreq->transferred_length = 0;
499 		tgt_fcpreq->fcp_error = -ECANCELED;
500 		tgt_fcpreq->done(tgt_fcpreq);
501 		return 0;
502 	}
503 
504 	/*
505 	 * if fcpreq is NULL, the I/O has been aborted (from
506 	 * initiator side). For the target side, act as if all is well
507 	 * but don't actually move data.
508 	 */
509 
510 	switch (op) {
511 	case NVMET_FCOP_WRITEDATA:
512 		xfrlen = tgt_fcpreq->transfer_length;
513 		if (fcpreq) {
514 			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
515 					fcpreq->first_sgl, tgt_fcpreq->offset,
516 					xfrlen);
517 			fcpreq->transferred_length += xfrlen;
518 		}
519 		break;
520 
521 	case NVMET_FCOP_READDATA:
522 	case NVMET_FCOP_READDATA_RSP:
523 		xfrlen = tgt_fcpreq->transfer_length;
524 		if (fcpreq) {
525 			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
526 					fcpreq->first_sgl, tgt_fcpreq->offset,
527 					xfrlen);
528 			fcpreq->transferred_length += xfrlen;
529 		}
530 		if (op == NVMET_FCOP_READDATA)
531 			break;
532 
533 		/* Fall-Thru to RSP handling */
534 
535 	case NVMET_FCOP_RSP:
536 		if (fcpreq) {
537 			rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
538 					fcpreq->rsplen : tgt_fcpreq->rsplen);
539 			memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
540 			if (rsplen < tgt_fcpreq->rsplen)
541 				fcp_err = -E2BIG;
542 			fcpreq->rcv_rsplen = rsplen;
543 			fcpreq->status = 0;
544 		}
545 		tfcp_req->status = 0;
546 		break;
547 
548 	default:
549 		fcp_err = -EINVAL;
550 		break;
551 	}
552 
553 	spin_lock(&tfcp_req->reqlock);
554 	tfcp_req->active = false;
555 	spin_unlock(&tfcp_req->reqlock);
556 
557 	tgt_fcpreq->transferred_length = xfrlen;
558 	tgt_fcpreq->fcp_error = fcp_err;
559 	tgt_fcpreq->done(tgt_fcpreq);
560 
561 	return 0;
562 }
563 
564 static void
565 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
566 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
567 {
568 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
569 
570 	/*
571 	 * mark aborted only in case there were 2 threads in transport
572 	 * (one doing io, other doing abort) and only kills ops posted
573 	 * after the abort request
574 	 */
575 	spin_lock(&tfcp_req->reqlock);
576 	tfcp_req->aborted = true;
577 	spin_unlock(&tfcp_req->reqlock);
578 
579 	tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED;
580 
581 	/*
582 	 * nothing more to do. If io wasn't active, the transport should
583 	 * immediately call the req_release. If it was active, the op
584 	 * will complete, and the lldd should call req_release.
585 	 */
586 }
587 
588 static void
589 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
590 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
591 {
592 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
593 
594 	schedule_work(&tfcp_req->work);
595 }
596 
597 static void
598 fcloop_ls_abort(struct nvme_fc_local_port *localport,
599 			struct nvme_fc_remote_port *remoteport,
600 				struct nvmefc_ls_req *lsreq)
601 {
602 }
603 
604 static void
605 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
606 			struct nvme_fc_remote_port *remoteport,
607 			void *hw_queue_handle,
608 			struct nvmefc_fcp_req *fcpreq)
609 {
610 	struct fcloop_rport *rport = remoteport->private;
611 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
612 	struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
613 
614 	if (!tfcp_req)
615 		/* abort has already been called */
616 		return;
617 
618 	if (rport->targetport)
619 		nvmet_fc_rcv_fcp_abort(rport->targetport,
620 					&tfcp_req->tgt_fcp_req);
621 
622 	/* break initiator/target relationship for io */
623 	spin_lock(&tfcp_req->reqlock);
624 	inireq->tfcp_req = NULL;
625 	tfcp_req->fcpreq = NULL;
626 	spin_unlock(&tfcp_req->reqlock);
627 
628 	/* post the aborted io completion */
629 	fcpreq->status = -ECANCELED;
630 	schedule_work(&inireq->iniwork);
631 }
632 
633 static void
634 fcloop_localport_delete(struct nvme_fc_local_port *localport)
635 {
636 	struct fcloop_lport *lport = localport->private;
637 
638 	/* release any threads waiting for the unreg to complete */
639 	complete(&lport->unreg_done);
640 }
641 
642 static void
643 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
644 {
645 	struct fcloop_rport *rport = remoteport->private;
646 
647 	/* release any threads waiting for the unreg to complete */
648 	complete(&rport->nport->rport_unreg_done);
649 }
650 
651 static void
652 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
653 {
654 	struct fcloop_tport *tport = targetport->private;
655 
656 	/* release any threads waiting for the unreg to complete */
657 	complete(&tport->nport->tport_unreg_done);
658 }
659 
660 #define	FCLOOP_HW_QUEUES		4
661 #define	FCLOOP_SGL_SEGS			256
662 #define FCLOOP_DMABOUND_4G		0xFFFFFFFF
663 
664 static struct nvme_fc_port_template fctemplate = {
665 	.localport_delete	= fcloop_localport_delete,
666 	.remoteport_delete	= fcloop_remoteport_delete,
667 	.create_queue		= fcloop_create_queue,
668 	.delete_queue		= fcloop_delete_queue,
669 	.ls_req			= fcloop_ls_req,
670 	.fcp_io			= fcloop_fcp_req,
671 	.ls_abort		= fcloop_ls_abort,
672 	.fcp_abort		= fcloop_fcp_abort,
673 	.max_hw_queues		= FCLOOP_HW_QUEUES,
674 	.max_sgl_segments	= FCLOOP_SGL_SEGS,
675 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
676 	.dma_boundary		= FCLOOP_DMABOUND_4G,
677 	/* sizes of additional private data for data structures */
678 	.local_priv_sz		= sizeof(struct fcloop_lport),
679 	.remote_priv_sz		= sizeof(struct fcloop_rport),
680 	.lsrqst_priv_sz		= sizeof(struct fcloop_lsreq),
681 	.fcprqst_priv_sz	= sizeof(struct fcloop_ini_fcpreq),
682 };
683 
684 static struct nvmet_fc_target_template tgttemplate = {
685 	.targetport_delete	= fcloop_targetport_delete,
686 	.xmt_ls_rsp		= fcloop_xmt_ls_rsp,
687 	.fcp_op			= fcloop_fcp_op,
688 	.fcp_abort		= fcloop_tgt_fcp_abort,
689 	.fcp_req_release	= fcloop_fcp_req_release,
690 	.max_hw_queues		= FCLOOP_HW_QUEUES,
691 	.max_sgl_segments	= FCLOOP_SGL_SEGS,
692 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
693 	.dma_boundary		= FCLOOP_DMABOUND_4G,
694 	/* optional features */
695 	.target_features	= NVMET_FCTGTFEAT_CMD_IN_ISR |
696 				  NVMET_FCTGTFEAT_OPDONE_IN_ISR,
697 	/* sizes of additional private data for data structures */
698 	.target_priv_sz		= sizeof(struct fcloop_tport),
699 };
700 
701 static ssize_t
702 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
703 		const char *buf, size_t count)
704 {
705 	struct nvme_fc_port_info pinfo;
706 	struct fcloop_ctrl_options *opts;
707 	struct nvme_fc_local_port *localport;
708 	struct fcloop_lport *lport;
709 	int ret;
710 
711 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
712 	if (!opts)
713 		return -ENOMEM;
714 
715 	ret = fcloop_parse_options(opts, buf);
716 	if (ret)
717 		goto out_free_opts;
718 
719 	/* everything there ? */
720 	if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
721 		ret = -EINVAL;
722 		goto out_free_opts;
723 	}
724 
725 	pinfo.node_name = opts->wwnn;
726 	pinfo.port_name = opts->wwpn;
727 	pinfo.port_role = opts->roles;
728 	pinfo.port_id = opts->fcaddr;
729 
730 	ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
731 	if (!ret) {
732 		unsigned long flags;
733 
734 		/* success */
735 		lport = localport->private;
736 		lport->localport = localport;
737 		INIT_LIST_HEAD(&lport->lport_list);
738 
739 		spin_lock_irqsave(&fcloop_lock, flags);
740 		list_add_tail(&lport->lport_list, &fcloop_lports);
741 		spin_unlock_irqrestore(&fcloop_lock, flags);
742 
743 		/* mark all of the input buffer consumed */
744 		ret = count;
745 	}
746 
747 out_free_opts:
748 	kfree(opts);
749 	return ret ? ret : count;
750 }
751 
752 
753 static void
754 __unlink_local_port(struct fcloop_lport *lport)
755 {
756 	list_del(&lport->lport_list);
757 }
758 
759 static int
760 __wait_localport_unreg(struct fcloop_lport *lport)
761 {
762 	int ret;
763 
764 	init_completion(&lport->unreg_done);
765 
766 	ret = nvme_fc_unregister_localport(lport->localport);
767 
768 	wait_for_completion(&lport->unreg_done);
769 
770 	return ret;
771 }
772 
773 
774 static ssize_t
775 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
776 		const char *buf, size_t count)
777 {
778 	struct fcloop_lport *tlport, *lport = NULL;
779 	u64 nodename, portname;
780 	unsigned long flags;
781 	int ret;
782 
783 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
784 	if (ret)
785 		return ret;
786 
787 	spin_lock_irqsave(&fcloop_lock, flags);
788 
789 	list_for_each_entry(tlport, &fcloop_lports, lport_list) {
790 		if (tlport->localport->node_name == nodename &&
791 		    tlport->localport->port_name == portname) {
792 			lport = tlport;
793 			__unlink_local_port(lport);
794 			break;
795 		}
796 	}
797 	spin_unlock_irqrestore(&fcloop_lock, flags);
798 
799 	if (!lport)
800 		return -ENOENT;
801 
802 	ret = __wait_localport_unreg(lport);
803 
804 	return ret ? ret : count;
805 }
806 
807 static void
808 fcloop_nport_free(struct kref *ref)
809 {
810 	struct fcloop_nport *nport =
811 		container_of(ref, struct fcloop_nport, ref);
812 	unsigned long flags;
813 
814 	spin_lock_irqsave(&fcloop_lock, flags);
815 	list_del(&nport->nport_list);
816 	spin_unlock_irqrestore(&fcloop_lock, flags);
817 
818 	kfree(nport);
819 }
820 
821 static void
822 fcloop_nport_put(struct fcloop_nport *nport)
823 {
824 	kref_put(&nport->ref, fcloop_nport_free);
825 }
826 
827 static int
828 fcloop_nport_get(struct fcloop_nport *nport)
829 {
830 	return kref_get_unless_zero(&nport->ref);
831 }
832 
833 static struct fcloop_nport *
834 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
835 {
836 	struct fcloop_nport *newnport, *nport = NULL;
837 	struct fcloop_lport *tmplport, *lport = NULL;
838 	struct fcloop_ctrl_options *opts;
839 	unsigned long flags;
840 	u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
841 	int ret;
842 
843 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
844 	if (!opts)
845 		return NULL;
846 
847 	ret = fcloop_parse_options(opts, buf);
848 	if (ret)
849 		goto out_free_opts;
850 
851 	/* everything there ? */
852 	if ((opts->mask & opts_mask) != opts_mask) {
853 		ret = -EINVAL;
854 		goto out_free_opts;
855 	}
856 
857 	newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
858 	if (!newnport)
859 		goto out_free_opts;
860 
861 	INIT_LIST_HEAD(&newnport->nport_list);
862 	newnport->node_name = opts->wwnn;
863 	newnport->port_name = opts->wwpn;
864 	if (opts->mask & NVMF_OPT_ROLES)
865 		newnport->port_role = opts->roles;
866 	if (opts->mask & NVMF_OPT_FCADDR)
867 		newnport->port_id = opts->fcaddr;
868 	kref_init(&newnport->ref);
869 
870 	spin_lock_irqsave(&fcloop_lock, flags);
871 
872 	list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
873 		if (tmplport->localport->node_name == opts->wwnn &&
874 		    tmplport->localport->port_name == opts->wwpn)
875 			goto out_invalid_opts;
876 
877 		if (tmplport->localport->node_name == opts->lpwwnn &&
878 		    tmplport->localport->port_name == opts->lpwwpn)
879 			lport = tmplport;
880 	}
881 
882 	if (remoteport) {
883 		if (!lport)
884 			goto out_invalid_opts;
885 		newnport->lport = lport;
886 	}
887 
888 	list_for_each_entry(nport, &fcloop_nports, nport_list) {
889 		if (nport->node_name == opts->wwnn &&
890 		    nport->port_name == opts->wwpn) {
891 			if ((remoteport && nport->rport) ||
892 			    (!remoteport && nport->tport)) {
893 				nport = NULL;
894 				goto out_invalid_opts;
895 			}
896 
897 			fcloop_nport_get(nport);
898 
899 			spin_unlock_irqrestore(&fcloop_lock, flags);
900 
901 			if (remoteport)
902 				nport->lport = lport;
903 			if (opts->mask & NVMF_OPT_ROLES)
904 				nport->port_role = opts->roles;
905 			if (opts->mask & NVMF_OPT_FCADDR)
906 				nport->port_id = opts->fcaddr;
907 			goto out_free_newnport;
908 		}
909 	}
910 
911 	list_add_tail(&newnport->nport_list, &fcloop_nports);
912 
913 	spin_unlock_irqrestore(&fcloop_lock, flags);
914 
915 	kfree(opts);
916 	return newnport;
917 
918 out_invalid_opts:
919 	spin_unlock_irqrestore(&fcloop_lock, flags);
920 out_free_newnport:
921 	kfree(newnport);
922 out_free_opts:
923 	kfree(opts);
924 	return nport;
925 }
926 
927 static ssize_t
928 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
929 		const char *buf, size_t count)
930 {
931 	struct nvme_fc_remote_port *remoteport;
932 	struct fcloop_nport *nport;
933 	struct fcloop_rport *rport;
934 	struct nvme_fc_port_info pinfo;
935 	int ret;
936 
937 	nport = fcloop_alloc_nport(buf, count, true);
938 	if (!nport)
939 		return -EIO;
940 
941 	pinfo.node_name = nport->node_name;
942 	pinfo.port_name = nport->port_name;
943 	pinfo.port_role = nport->port_role;
944 	pinfo.port_id = nport->port_id;
945 
946 	ret = nvme_fc_register_remoteport(nport->lport->localport,
947 						&pinfo, &remoteport);
948 	if (ret || !remoteport) {
949 		fcloop_nport_put(nport);
950 		return ret;
951 	}
952 
953 	/* success */
954 	rport = remoteport->private;
955 	rport->remoteport = remoteport;
956 	rport->targetport = (nport->tport) ?  nport->tport->targetport : NULL;
957 	if (nport->tport) {
958 		nport->tport->remoteport = remoteport;
959 		nport->tport->lport = nport->lport;
960 	}
961 	rport->nport = nport;
962 	rport->lport = nport->lport;
963 	nport->rport = rport;
964 
965 	return count;
966 }
967 
968 
969 static struct fcloop_rport *
970 __unlink_remote_port(struct fcloop_nport *nport)
971 {
972 	struct fcloop_rport *rport = nport->rport;
973 
974 	if (rport && nport->tport)
975 		nport->tport->remoteport = NULL;
976 	nport->rport = NULL;
977 
978 	return rport;
979 }
980 
981 static int
982 __wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
983 {
984 	int ret;
985 
986 	if (!rport)
987 		return -EALREADY;
988 
989 	init_completion(&nport->rport_unreg_done);
990 
991 	ret = nvme_fc_unregister_remoteport(rport->remoteport);
992 	if (ret)
993 		return ret;
994 
995 	wait_for_completion(&nport->rport_unreg_done);
996 
997 	fcloop_nport_put(nport);
998 
999 	return ret;
1000 }
1001 
1002 static ssize_t
1003 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1004 		const char *buf, size_t count)
1005 {
1006 	struct fcloop_nport *nport = NULL, *tmpport;
1007 	static struct fcloop_rport *rport;
1008 	u64 nodename, portname;
1009 	unsigned long flags;
1010 	int ret;
1011 
1012 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1013 	if (ret)
1014 		return ret;
1015 
1016 	spin_lock_irqsave(&fcloop_lock, flags);
1017 
1018 	list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1019 		if (tmpport->node_name == nodename &&
1020 		    tmpport->port_name == portname && tmpport->rport) {
1021 			nport = tmpport;
1022 			rport = __unlink_remote_port(nport);
1023 			break;
1024 		}
1025 	}
1026 
1027 	spin_unlock_irqrestore(&fcloop_lock, flags);
1028 
1029 	if (!nport)
1030 		return -ENOENT;
1031 
1032 	ret = __wait_remoteport_unreg(nport, rport);
1033 
1034 	return ret ? ret : count;
1035 }
1036 
1037 static ssize_t
1038 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1039 		const char *buf, size_t count)
1040 {
1041 	struct nvmet_fc_target_port *targetport;
1042 	struct fcloop_nport *nport;
1043 	struct fcloop_tport *tport;
1044 	struct nvmet_fc_port_info tinfo;
1045 	int ret;
1046 
1047 	nport = fcloop_alloc_nport(buf, count, false);
1048 	if (!nport)
1049 		return -EIO;
1050 
1051 	tinfo.node_name = nport->node_name;
1052 	tinfo.port_name = nport->port_name;
1053 	tinfo.port_id = nport->port_id;
1054 
1055 	ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1056 						&targetport);
1057 	if (ret) {
1058 		fcloop_nport_put(nport);
1059 		return ret;
1060 	}
1061 
1062 	/* success */
1063 	tport = targetport->private;
1064 	tport->targetport = targetport;
1065 	tport->remoteport = (nport->rport) ?  nport->rport->remoteport : NULL;
1066 	if (nport->rport)
1067 		nport->rport->targetport = targetport;
1068 	tport->nport = nport;
1069 	tport->lport = nport->lport;
1070 	nport->tport = tport;
1071 
1072 	return count;
1073 }
1074 
1075 
1076 static struct fcloop_tport *
1077 __unlink_target_port(struct fcloop_nport *nport)
1078 {
1079 	struct fcloop_tport *tport = nport->tport;
1080 
1081 	if (tport && nport->rport)
1082 		nport->rport->targetport = NULL;
1083 	nport->tport = NULL;
1084 
1085 	return tport;
1086 }
1087 
1088 static int
1089 __wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1090 {
1091 	int ret;
1092 
1093 	if (!tport)
1094 		return -EALREADY;
1095 
1096 	init_completion(&nport->tport_unreg_done);
1097 
1098 	ret = nvmet_fc_unregister_targetport(tport->targetport);
1099 	if (ret)
1100 		return ret;
1101 
1102 	wait_for_completion(&nport->tport_unreg_done);
1103 
1104 	fcloop_nport_put(nport);
1105 
1106 	return ret;
1107 }
1108 
1109 static ssize_t
1110 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1111 		const char *buf, size_t count)
1112 {
1113 	struct fcloop_nport *nport = NULL, *tmpport;
1114 	struct fcloop_tport *tport;
1115 	u64 nodename, portname;
1116 	unsigned long flags;
1117 	int ret;
1118 
1119 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1120 	if (ret)
1121 		return ret;
1122 
1123 	spin_lock_irqsave(&fcloop_lock, flags);
1124 
1125 	list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1126 		if (tmpport->node_name == nodename &&
1127 		    tmpport->port_name == portname && tmpport->tport) {
1128 			nport = tmpport;
1129 			tport = __unlink_target_port(nport);
1130 			break;
1131 		}
1132 	}
1133 
1134 	spin_unlock_irqrestore(&fcloop_lock, flags);
1135 
1136 	if (!nport)
1137 		return -ENOENT;
1138 
1139 	ret = __wait_targetport_unreg(nport, tport);
1140 
1141 	return ret ? ret : count;
1142 }
1143 
1144 
1145 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1146 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1147 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1148 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1149 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1150 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1151 
1152 static struct attribute *fcloop_dev_attrs[] = {
1153 	&dev_attr_add_local_port.attr,
1154 	&dev_attr_del_local_port.attr,
1155 	&dev_attr_add_remote_port.attr,
1156 	&dev_attr_del_remote_port.attr,
1157 	&dev_attr_add_target_port.attr,
1158 	&dev_attr_del_target_port.attr,
1159 	NULL
1160 };
1161 
1162 static struct attribute_group fclopp_dev_attrs_group = {
1163 	.attrs		= fcloop_dev_attrs,
1164 };
1165 
1166 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1167 	&fclopp_dev_attrs_group,
1168 	NULL,
1169 };
1170 
1171 static struct class *fcloop_class;
1172 static struct device *fcloop_device;
1173 
1174 
1175 static int __init fcloop_init(void)
1176 {
1177 	int ret;
1178 
1179 	fcloop_class = class_create(THIS_MODULE, "fcloop");
1180 	if (IS_ERR(fcloop_class)) {
1181 		pr_err("couldn't register class fcloop\n");
1182 		ret = PTR_ERR(fcloop_class);
1183 		return ret;
1184 	}
1185 
1186 	fcloop_device = device_create_with_groups(
1187 				fcloop_class, NULL, MKDEV(0, 0), NULL,
1188 				fcloop_dev_attr_groups, "ctl");
1189 	if (IS_ERR(fcloop_device)) {
1190 		pr_err("couldn't create ctl device!\n");
1191 		ret = PTR_ERR(fcloop_device);
1192 		goto out_destroy_class;
1193 	}
1194 
1195 	get_device(fcloop_device);
1196 
1197 	return 0;
1198 
1199 out_destroy_class:
1200 	class_destroy(fcloop_class);
1201 	return ret;
1202 }
1203 
1204 static void __exit fcloop_exit(void)
1205 {
1206 	struct fcloop_lport *lport;
1207 	struct fcloop_nport *nport;
1208 	struct fcloop_tport *tport;
1209 	struct fcloop_rport *rport;
1210 	unsigned long flags;
1211 	int ret;
1212 
1213 	spin_lock_irqsave(&fcloop_lock, flags);
1214 
1215 	for (;;) {
1216 		nport = list_first_entry_or_null(&fcloop_nports,
1217 						typeof(*nport), nport_list);
1218 		if (!nport)
1219 			break;
1220 
1221 		tport = __unlink_target_port(nport);
1222 		rport = __unlink_remote_port(nport);
1223 
1224 		spin_unlock_irqrestore(&fcloop_lock, flags);
1225 
1226 		ret = __wait_targetport_unreg(nport, tport);
1227 		if (ret)
1228 			pr_warn("%s: Failed deleting target port\n", __func__);
1229 
1230 		ret = __wait_remoteport_unreg(nport, rport);
1231 		if (ret)
1232 			pr_warn("%s: Failed deleting remote port\n", __func__);
1233 
1234 		spin_lock_irqsave(&fcloop_lock, flags);
1235 	}
1236 
1237 	for (;;) {
1238 		lport = list_first_entry_or_null(&fcloop_lports,
1239 						typeof(*lport), lport_list);
1240 		if (!lport)
1241 			break;
1242 
1243 		__unlink_local_port(lport);
1244 
1245 		spin_unlock_irqrestore(&fcloop_lock, flags);
1246 
1247 		ret = __wait_localport_unreg(lport);
1248 		if (ret)
1249 			pr_warn("%s: Failed deleting local port\n", __func__);
1250 
1251 		spin_lock_irqsave(&fcloop_lock, flags);
1252 	}
1253 
1254 	spin_unlock_irqrestore(&fcloop_lock, flags);
1255 
1256 	put_device(fcloop_device);
1257 
1258 	device_destroy(fcloop_class, MKDEV(0, 0));
1259 	class_destroy(fcloop_class);
1260 }
1261 
1262 module_init(fcloop_init);
1263 module_exit(fcloop_exit);
1264 
1265 MODULE_LICENSE("GPL v2");
1266