xref: /openbmc/linux/drivers/nvme/target/fcloop.c (revision b7019ac5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
4  */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9 
10 #include "../host/nvme.h"
11 #include "../target/nvmet.h"
12 #include <linux/nvme-fc-driver.h>
13 #include <linux/nvme-fc.h>
14 
15 
16 enum {
17 	NVMF_OPT_ERR		= 0,
18 	NVMF_OPT_WWNN		= 1 << 0,
19 	NVMF_OPT_WWPN		= 1 << 1,
20 	NVMF_OPT_ROLES		= 1 << 2,
21 	NVMF_OPT_FCADDR		= 1 << 3,
22 	NVMF_OPT_LPWWNN		= 1 << 4,
23 	NVMF_OPT_LPWWPN		= 1 << 5,
24 };
25 
26 struct fcloop_ctrl_options {
27 	int			mask;
28 	u64			wwnn;
29 	u64			wwpn;
30 	u32			roles;
31 	u32			fcaddr;
32 	u64			lpwwnn;
33 	u64			lpwwpn;
34 };
35 
36 static const match_table_t opt_tokens = {
37 	{ NVMF_OPT_WWNN,	"wwnn=%s"	},
38 	{ NVMF_OPT_WWPN,	"wwpn=%s"	},
39 	{ NVMF_OPT_ROLES,	"roles=%d"	},
40 	{ NVMF_OPT_FCADDR,	"fcaddr=%x"	},
41 	{ NVMF_OPT_LPWWNN,	"lpwwnn=%s"	},
42 	{ NVMF_OPT_LPWWPN,	"lpwwpn=%s"	},
43 	{ NVMF_OPT_ERR,		NULL		}
44 };
45 
46 static int
47 fcloop_parse_options(struct fcloop_ctrl_options *opts,
48 		const char *buf)
49 {
50 	substring_t args[MAX_OPT_ARGS];
51 	char *options, *o, *p;
52 	int token, ret = 0;
53 	u64 token64;
54 
55 	options = o = kstrdup(buf, GFP_KERNEL);
56 	if (!options)
57 		return -ENOMEM;
58 
59 	while ((p = strsep(&o, ",\n")) != NULL) {
60 		if (!*p)
61 			continue;
62 
63 		token = match_token(p, opt_tokens, args);
64 		opts->mask |= token;
65 		switch (token) {
66 		case NVMF_OPT_WWNN:
67 			if (match_u64(args, &token64)) {
68 				ret = -EINVAL;
69 				goto out_free_options;
70 			}
71 			opts->wwnn = token64;
72 			break;
73 		case NVMF_OPT_WWPN:
74 			if (match_u64(args, &token64)) {
75 				ret = -EINVAL;
76 				goto out_free_options;
77 			}
78 			opts->wwpn = token64;
79 			break;
80 		case NVMF_OPT_ROLES:
81 			if (match_int(args, &token)) {
82 				ret = -EINVAL;
83 				goto out_free_options;
84 			}
85 			opts->roles = token;
86 			break;
87 		case NVMF_OPT_FCADDR:
88 			if (match_hex(args, &token)) {
89 				ret = -EINVAL;
90 				goto out_free_options;
91 			}
92 			opts->fcaddr = token;
93 			break;
94 		case NVMF_OPT_LPWWNN:
95 			if (match_u64(args, &token64)) {
96 				ret = -EINVAL;
97 				goto out_free_options;
98 			}
99 			opts->lpwwnn = token64;
100 			break;
101 		case NVMF_OPT_LPWWPN:
102 			if (match_u64(args, &token64)) {
103 				ret = -EINVAL;
104 				goto out_free_options;
105 			}
106 			opts->lpwwpn = token64;
107 			break;
108 		default:
109 			pr_warn("unknown parameter or missing value '%s'\n", p);
110 			ret = -EINVAL;
111 			goto out_free_options;
112 		}
113 	}
114 
115 out_free_options:
116 	kfree(options);
117 	return ret;
118 }
119 
120 
121 static int
122 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
123 		const char *buf)
124 {
125 	substring_t args[MAX_OPT_ARGS];
126 	char *options, *o, *p;
127 	int token, ret = 0;
128 	u64 token64;
129 
130 	*nname = -1;
131 	*pname = -1;
132 
133 	options = o = kstrdup(buf, GFP_KERNEL);
134 	if (!options)
135 		return -ENOMEM;
136 
137 	while ((p = strsep(&o, ",\n")) != NULL) {
138 		if (!*p)
139 			continue;
140 
141 		token = match_token(p, opt_tokens, args);
142 		switch (token) {
143 		case NVMF_OPT_WWNN:
144 			if (match_u64(args, &token64)) {
145 				ret = -EINVAL;
146 				goto out_free_options;
147 			}
148 			*nname = token64;
149 			break;
150 		case NVMF_OPT_WWPN:
151 			if (match_u64(args, &token64)) {
152 				ret = -EINVAL;
153 				goto out_free_options;
154 			}
155 			*pname = token64;
156 			break;
157 		default:
158 			pr_warn("unknown parameter or missing value '%s'\n", p);
159 			ret = -EINVAL;
160 			goto out_free_options;
161 		}
162 	}
163 
164 out_free_options:
165 	kfree(options);
166 
167 	if (!ret) {
168 		if (*nname == -1)
169 			return -EINVAL;
170 		if (*pname == -1)
171 			return -EINVAL;
172 	}
173 
174 	return ret;
175 }
176 
177 
178 #define LPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
179 
180 #define RPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN |  \
181 			 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
182 
183 #define TGTPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
184 
185 
186 static DEFINE_SPINLOCK(fcloop_lock);
187 static LIST_HEAD(fcloop_lports);
188 static LIST_HEAD(fcloop_nports);
189 
190 struct fcloop_lport {
191 	struct nvme_fc_local_port *localport;
192 	struct list_head lport_list;
193 	struct completion unreg_done;
194 };
195 
196 struct fcloop_lport_priv {
197 	struct fcloop_lport *lport;
198 };
199 
200 struct fcloop_rport {
201 	struct nvme_fc_remote_port *remoteport;
202 	struct nvmet_fc_target_port *targetport;
203 	struct fcloop_nport *nport;
204 	struct fcloop_lport *lport;
205 };
206 
207 struct fcloop_tport {
208 	struct nvmet_fc_target_port *targetport;
209 	struct nvme_fc_remote_port *remoteport;
210 	struct fcloop_nport *nport;
211 	struct fcloop_lport *lport;
212 };
213 
214 struct fcloop_nport {
215 	struct fcloop_rport *rport;
216 	struct fcloop_tport *tport;
217 	struct fcloop_lport *lport;
218 	struct list_head nport_list;
219 	struct kref ref;
220 	u64 node_name;
221 	u64 port_name;
222 	u32 port_role;
223 	u32 port_id;
224 };
225 
226 struct fcloop_lsreq {
227 	struct fcloop_tport		*tport;
228 	struct nvmefc_ls_req		*lsreq;
229 	struct work_struct		work;
230 	struct nvmefc_tgt_ls_req	tgt_ls_req;
231 	int				status;
232 };
233 
234 enum {
235 	INI_IO_START		= 0,
236 	INI_IO_ACTIVE		= 1,
237 	INI_IO_ABORTED		= 2,
238 	INI_IO_COMPLETED	= 3,
239 };
240 
241 struct fcloop_fcpreq {
242 	struct fcloop_tport		*tport;
243 	struct nvmefc_fcp_req		*fcpreq;
244 	spinlock_t			reqlock;
245 	u16				status;
246 	u32				inistate;
247 	bool				active;
248 	bool				aborted;
249 	struct kref			ref;
250 	struct work_struct		fcp_rcv_work;
251 	struct work_struct		abort_rcv_work;
252 	struct work_struct		tio_done_work;
253 	struct nvmefc_tgt_fcp_req	tgt_fcp_req;
254 };
255 
256 struct fcloop_ini_fcpreq {
257 	struct nvmefc_fcp_req		*fcpreq;
258 	struct fcloop_fcpreq		*tfcp_req;
259 	spinlock_t			inilock;
260 };
261 
262 static inline struct fcloop_lsreq *
263 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
264 {
265 	return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
266 }
267 
268 static inline struct fcloop_fcpreq *
269 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
270 {
271 	return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
272 }
273 
274 
275 static int
276 fcloop_create_queue(struct nvme_fc_local_port *localport,
277 			unsigned int qidx, u16 qsize,
278 			void **handle)
279 {
280 	*handle = localport;
281 	return 0;
282 }
283 
284 static void
285 fcloop_delete_queue(struct nvme_fc_local_port *localport,
286 			unsigned int idx, void *handle)
287 {
288 }
289 
290 
291 /*
292  * Transmit of LS RSP done (e.g. buffers all set). call back up
293  * initiator "done" flows.
294  */
295 static void
296 fcloop_tgt_lsrqst_done_work(struct work_struct *work)
297 {
298 	struct fcloop_lsreq *tls_req =
299 		container_of(work, struct fcloop_lsreq, work);
300 	struct fcloop_tport *tport = tls_req->tport;
301 	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
302 
303 	if (!tport || tport->remoteport)
304 		lsreq->done(lsreq, tls_req->status);
305 }
306 
307 static int
308 fcloop_ls_req(struct nvme_fc_local_port *localport,
309 			struct nvme_fc_remote_port *remoteport,
310 			struct nvmefc_ls_req *lsreq)
311 {
312 	struct fcloop_lsreq *tls_req = lsreq->private;
313 	struct fcloop_rport *rport = remoteport->private;
314 	int ret = 0;
315 
316 	tls_req->lsreq = lsreq;
317 	INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
318 
319 	if (!rport->targetport) {
320 		tls_req->status = -ECONNREFUSED;
321 		tls_req->tport = NULL;
322 		schedule_work(&tls_req->work);
323 		return ret;
324 	}
325 
326 	tls_req->status = 0;
327 	tls_req->tport = rport->targetport->private;
328 	ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
329 				 lsreq->rqstaddr, lsreq->rqstlen);
330 
331 	return ret;
332 }
333 
334 static int
335 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
336 			struct nvmefc_tgt_ls_req *tgt_lsreq)
337 {
338 	struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
339 	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
340 
341 	memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
342 		((lsreq->rsplen < tgt_lsreq->rsplen) ?
343 				lsreq->rsplen : tgt_lsreq->rsplen));
344 	tgt_lsreq->done(tgt_lsreq);
345 
346 	schedule_work(&tls_req->work);
347 
348 	return 0;
349 }
350 
351 static void
352 fcloop_tfcp_req_free(struct kref *ref)
353 {
354 	struct fcloop_fcpreq *tfcp_req =
355 		container_of(ref, struct fcloop_fcpreq, ref);
356 
357 	kfree(tfcp_req);
358 }
359 
360 static void
361 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
362 {
363 	kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
364 }
365 
366 static int
367 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
368 {
369 	return kref_get_unless_zero(&tfcp_req->ref);
370 }
371 
372 static void
373 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
374 			struct fcloop_fcpreq *tfcp_req, int status)
375 {
376 	struct fcloop_ini_fcpreq *inireq = NULL;
377 
378 	if (fcpreq) {
379 		inireq = fcpreq->private;
380 		spin_lock(&inireq->inilock);
381 		inireq->tfcp_req = NULL;
382 		spin_unlock(&inireq->inilock);
383 
384 		fcpreq->status = status;
385 		fcpreq->done(fcpreq);
386 	}
387 
388 	/* release original io reference on tgt struct */
389 	fcloop_tfcp_req_put(tfcp_req);
390 }
391 
392 static void
393 fcloop_fcp_recv_work(struct work_struct *work)
394 {
395 	struct fcloop_fcpreq *tfcp_req =
396 		container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
397 	struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
398 	int ret = 0;
399 	bool aborted = false;
400 
401 	spin_lock(&tfcp_req->reqlock);
402 	switch (tfcp_req->inistate) {
403 	case INI_IO_START:
404 		tfcp_req->inistate = INI_IO_ACTIVE;
405 		break;
406 	case INI_IO_ABORTED:
407 		aborted = true;
408 		break;
409 	default:
410 		spin_unlock(&tfcp_req->reqlock);
411 		WARN_ON(1);
412 		return;
413 	}
414 	spin_unlock(&tfcp_req->reqlock);
415 
416 	if (unlikely(aborted))
417 		ret = -ECANCELED;
418 	else
419 		ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
420 				&tfcp_req->tgt_fcp_req,
421 				fcpreq->cmdaddr, fcpreq->cmdlen);
422 	if (ret)
423 		fcloop_call_host_done(fcpreq, tfcp_req, ret);
424 
425 	return;
426 }
427 
428 static void
429 fcloop_fcp_abort_recv_work(struct work_struct *work)
430 {
431 	struct fcloop_fcpreq *tfcp_req =
432 		container_of(work, struct fcloop_fcpreq, abort_rcv_work);
433 	struct nvmefc_fcp_req *fcpreq;
434 	bool completed = false;
435 
436 	spin_lock(&tfcp_req->reqlock);
437 	fcpreq = tfcp_req->fcpreq;
438 	switch (tfcp_req->inistate) {
439 	case INI_IO_ABORTED:
440 		break;
441 	case INI_IO_COMPLETED:
442 		completed = true;
443 		break;
444 	default:
445 		spin_unlock(&tfcp_req->reqlock);
446 		WARN_ON(1);
447 		return;
448 	}
449 	spin_unlock(&tfcp_req->reqlock);
450 
451 	if (unlikely(completed)) {
452 		/* remove reference taken in original abort downcall */
453 		fcloop_tfcp_req_put(tfcp_req);
454 		return;
455 	}
456 
457 	if (tfcp_req->tport->targetport)
458 		nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
459 					&tfcp_req->tgt_fcp_req);
460 
461 	spin_lock(&tfcp_req->reqlock);
462 	tfcp_req->fcpreq = NULL;
463 	spin_unlock(&tfcp_req->reqlock);
464 
465 	fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
466 	/* call_host_done releases reference for abort downcall */
467 }
468 
469 /*
470  * FCP IO operation done by target completion.
471  * call back up initiator "done" flows.
472  */
473 static void
474 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
475 {
476 	struct fcloop_fcpreq *tfcp_req =
477 		container_of(work, struct fcloop_fcpreq, tio_done_work);
478 	struct nvmefc_fcp_req *fcpreq;
479 
480 	spin_lock(&tfcp_req->reqlock);
481 	fcpreq = tfcp_req->fcpreq;
482 	tfcp_req->inistate = INI_IO_COMPLETED;
483 	spin_unlock(&tfcp_req->reqlock);
484 
485 	fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
486 }
487 
488 
489 static int
490 fcloop_fcp_req(struct nvme_fc_local_port *localport,
491 			struct nvme_fc_remote_port *remoteport,
492 			void *hw_queue_handle,
493 			struct nvmefc_fcp_req *fcpreq)
494 {
495 	struct fcloop_rport *rport = remoteport->private;
496 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
497 	struct fcloop_fcpreq *tfcp_req;
498 
499 	if (!rport->targetport)
500 		return -ECONNREFUSED;
501 
502 	tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
503 	if (!tfcp_req)
504 		return -ENOMEM;
505 
506 	inireq->fcpreq = fcpreq;
507 	inireq->tfcp_req = tfcp_req;
508 	spin_lock_init(&inireq->inilock);
509 
510 	tfcp_req->fcpreq = fcpreq;
511 	tfcp_req->tport = rport->targetport->private;
512 	tfcp_req->inistate = INI_IO_START;
513 	spin_lock_init(&tfcp_req->reqlock);
514 	INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
515 	INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
516 	INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
517 	kref_init(&tfcp_req->ref);
518 
519 	schedule_work(&tfcp_req->fcp_rcv_work);
520 
521 	return 0;
522 }
523 
524 static void
525 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
526 			struct scatterlist *io_sg, u32 offset, u32 length)
527 {
528 	void *data_p, *io_p;
529 	u32 data_len, io_len, tlen;
530 
531 	io_p = sg_virt(io_sg);
532 	io_len = io_sg->length;
533 
534 	for ( ; offset; ) {
535 		tlen = min_t(u32, offset, io_len);
536 		offset -= tlen;
537 		io_len -= tlen;
538 		if (!io_len) {
539 			io_sg = sg_next(io_sg);
540 			io_p = sg_virt(io_sg);
541 			io_len = io_sg->length;
542 		} else
543 			io_p += tlen;
544 	}
545 
546 	data_p = sg_virt(data_sg);
547 	data_len = data_sg->length;
548 
549 	for ( ; length; ) {
550 		tlen = min_t(u32, io_len, data_len);
551 		tlen = min_t(u32, tlen, length);
552 
553 		if (op == NVMET_FCOP_WRITEDATA)
554 			memcpy(data_p, io_p, tlen);
555 		else
556 			memcpy(io_p, data_p, tlen);
557 
558 		length -= tlen;
559 
560 		io_len -= tlen;
561 		if ((!io_len) && (length)) {
562 			io_sg = sg_next(io_sg);
563 			io_p = sg_virt(io_sg);
564 			io_len = io_sg->length;
565 		} else
566 			io_p += tlen;
567 
568 		data_len -= tlen;
569 		if ((!data_len) && (length)) {
570 			data_sg = sg_next(data_sg);
571 			data_p = sg_virt(data_sg);
572 			data_len = data_sg->length;
573 		} else
574 			data_p += tlen;
575 	}
576 }
577 
578 static int
579 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
580 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
581 {
582 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
583 	struct nvmefc_fcp_req *fcpreq;
584 	u32 rsplen = 0, xfrlen = 0;
585 	int fcp_err = 0, active, aborted;
586 	u8 op = tgt_fcpreq->op;
587 
588 	spin_lock(&tfcp_req->reqlock);
589 	fcpreq = tfcp_req->fcpreq;
590 	active = tfcp_req->active;
591 	aborted = tfcp_req->aborted;
592 	tfcp_req->active = true;
593 	spin_unlock(&tfcp_req->reqlock);
594 
595 	if (unlikely(active))
596 		/* illegal - call while i/o active */
597 		return -EALREADY;
598 
599 	if (unlikely(aborted)) {
600 		/* target transport has aborted i/o prior */
601 		spin_lock(&tfcp_req->reqlock);
602 		tfcp_req->active = false;
603 		spin_unlock(&tfcp_req->reqlock);
604 		tgt_fcpreq->transferred_length = 0;
605 		tgt_fcpreq->fcp_error = -ECANCELED;
606 		tgt_fcpreq->done(tgt_fcpreq);
607 		return 0;
608 	}
609 
610 	/*
611 	 * if fcpreq is NULL, the I/O has been aborted (from
612 	 * initiator side). For the target side, act as if all is well
613 	 * but don't actually move data.
614 	 */
615 
616 	switch (op) {
617 	case NVMET_FCOP_WRITEDATA:
618 		xfrlen = tgt_fcpreq->transfer_length;
619 		if (fcpreq) {
620 			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
621 					fcpreq->first_sgl, tgt_fcpreq->offset,
622 					xfrlen);
623 			fcpreq->transferred_length += xfrlen;
624 		}
625 		break;
626 
627 	case NVMET_FCOP_READDATA:
628 	case NVMET_FCOP_READDATA_RSP:
629 		xfrlen = tgt_fcpreq->transfer_length;
630 		if (fcpreq) {
631 			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
632 					fcpreq->first_sgl, tgt_fcpreq->offset,
633 					xfrlen);
634 			fcpreq->transferred_length += xfrlen;
635 		}
636 		if (op == NVMET_FCOP_READDATA)
637 			break;
638 
639 		/* Fall-Thru to RSP handling */
640 		/* FALLTHRU */
641 
642 	case NVMET_FCOP_RSP:
643 		if (fcpreq) {
644 			rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
645 					fcpreq->rsplen : tgt_fcpreq->rsplen);
646 			memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
647 			if (rsplen < tgt_fcpreq->rsplen)
648 				fcp_err = -E2BIG;
649 			fcpreq->rcv_rsplen = rsplen;
650 			fcpreq->status = 0;
651 		}
652 		tfcp_req->status = 0;
653 		break;
654 
655 	default:
656 		fcp_err = -EINVAL;
657 		break;
658 	}
659 
660 	spin_lock(&tfcp_req->reqlock);
661 	tfcp_req->active = false;
662 	spin_unlock(&tfcp_req->reqlock);
663 
664 	tgt_fcpreq->transferred_length = xfrlen;
665 	tgt_fcpreq->fcp_error = fcp_err;
666 	tgt_fcpreq->done(tgt_fcpreq);
667 
668 	return 0;
669 }
670 
671 static void
672 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
673 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
674 {
675 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
676 
677 	/*
678 	 * mark aborted only in case there were 2 threads in transport
679 	 * (one doing io, other doing abort) and only kills ops posted
680 	 * after the abort request
681 	 */
682 	spin_lock(&tfcp_req->reqlock);
683 	tfcp_req->aborted = true;
684 	spin_unlock(&tfcp_req->reqlock);
685 
686 	tfcp_req->status = NVME_SC_INTERNAL;
687 
688 	/*
689 	 * nothing more to do. If io wasn't active, the transport should
690 	 * immediately call the req_release. If it was active, the op
691 	 * will complete, and the lldd should call req_release.
692 	 */
693 }
694 
695 static void
696 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
697 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
698 {
699 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
700 
701 	schedule_work(&tfcp_req->tio_done_work);
702 }
703 
704 static void
705 fcloop_ls_abort(struct nvme_fc_local_port *localport,
706 			struct nvme_fc_remote_port *remoteport,
707 				struct nvmefc_ls_req *lsreq)
708 {
709 }
710 
711 static void
712 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
713 			struct nvme_fc_remote_port *remoteport,
714 			void *hw_queue_handle,
715 			struct nvmefc_fcp_req *fcpreq)
716 {
717 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
718 	struct fcloop_fcpreq *tfcp_req;
719 	bool abortio = true;
720 
721 	spin_lock(&inireq->inilock);
722 	tfcp_req = inireq->tfcp_req;
723 	if (tfcp_req)
724 		fcloop_tfcp_req_get(tfcp_req);
725 	spin_unlock(&inireq->inilock);
726 
727 	if (!tfcp_req)
728 		/* abort has already been called */
729 		return;
730 
731 	/* break initiator/target relationship for io */
732 	spin_lock(&tfcp_req->reqlock);
733 	switch (tfcp_req->inistate) {
734 	case INI_IO_START:
735 	case INI_IO_ACTIVE:
736 		tfcp_req->inistate = INI_IO_ABORTED;
737 		break;
738 	case INI_IO_COMPLETED:
739 		abortio = false;
740 		break;
741 	default:
742 		spin_unlock(&tfcp_req->reqlock);
743 		WARN_ON(1);
744 		return;
745 	}
746 	spin_unlock(&tfcp_req->reqlock);
747 
748 	if (abortio)
749 		/* leave the reference while the work item is scheduled */
750 		WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
751 	else  {
752 		/*
753 		 * as the io has already had the done callback made,
754 		 * nothing more to do. So release the reference taken above
755 		 */
756 		fcloop_tfcp_req_put(tfcp_req);
757 	}
758 }
759 
760 static void
761 fcloop_nport_free(struct kref *ref)
762 {
763 	struct fcloop_nport *nport =
764 		container_of(ref, struct fcloop_nport, ref);
765 	unsigned long flags;
766 
767 	spin_lock_irqsave(&fcloop_lock, flags);
768 	list_del(&nport->nport_list);
769 	spin_unlock_irqrestore(&fcloop_lock, flags);
770 
771 	kfree(nport);
772 }
773 
774 static void
775 fcloop_nport_put(struct fcloop_nport *nport)
776 {
777 	kref_put(&nport->ref, fcloop_nport_free);
778 }
779 
780 static int
781 fcloop_nport_get(struct fcloop_nport *nport)
782 {
783 	return kref_get_unless_zero(&nport->ref);
784 }
785 
786 static void
787 fcloop_localport_delete(struct nvme_fc_local_port *localport)
788 {
789 	struct fcloop_lport_priv *lport_priv = localport->private;
790 	struct fcloop_lport *lport = lport_priv->lport;
791 
792 	/* release any threads waiting for the unreg to complete */
793 	complete(&lport->unreg_done);
794 }
795 
796 static void
797 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
798 {
799 	struct fcloop_rport *rport = remoteport->private;
800 
801 	fcloop_nport_put(rport->nport);
802 }
803 
804 static void
805 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
806 {
807 	struct fcloop_tport *tport = targetport->private;
808 
809 	fcloop_nport_put(tport->nport);
810 }
811 
812 #define	FCLOOP_HW_QUEUES		4
813 #define	FCLOOP_SGL_SEGS			256
814 #define FCLOOP_DMABOUND_4G		0xFFFFFFFF
815 
816 static struct nvme_fc_port_template fctemplate = {
817 	.localport_delete	= fcloop_localport_delete,
818 	.remoteport_delete	= fcloop_remoteport_delete,
819 	.create_queue		= fcloop_create_queue,
820 	.delete_queue		= fcloop_delete_queue,
821 	.ls_req			= fcloop_ls_req,
822 	.fcp_io			= fcloop_fcp_req,
823 	.ls_abort		= fcloop_ls_abort,
824 	.fcp_abort		= fcloop_fcp_abort,
825 	.max_hw_queues		= FCLOOP_HW_QUEUES,
826 	.max_sgl_segments	= FCLOOP_SGL_SEGS,
827 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
828 	.dma_boundary		= FCLOOP_DMABOUND_4G,
829 	/* sizes of additional private data for data structures */
830 	.local_priv_sz		= sizeof(struct fcloop_lport_priv),
831 	.remote_priv_sz		= sizeof(struct fcloop_rport),
832 	.lsrqst_priv_sz		= sizeof(struct fcloop_lsreq),
833 	.fcprqst_priv_sz	= sizeof(struct fcloop_ini_fcpreq),
834 };
835 
836 static struct nvmet_fc_target_template tgttemplate = {
837 	.targetport_delete	= fcloop_targetport_delete,
838 	.xmt_ls_rsp		= fcloop_xmt_ls_rsp,
839 	.fcp_op			= fcloop_fcp_op,
840 	.fcp_abort		= fcloop_tgt_fcp_abort,
841 	.fcp_req_release	= fcloop_fcp_req_release,
842 	.max_hw_queues		= FCLOOP_HW_QUEUES,
843 	.max_sgl_segments	= FCLOOP_SGL_SEGS,
844 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
845 	.dma_boundary		= FCLOOP_DMABOUND_4G,
846 	/* optional features */
847 	.target_features	= 0,
848 	/* sizes of additional private data for data structures */
849 	.target_priv_sz		= sizeof(struct fcloop_tport),
850 };
851 
852 static ssize_t
853 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
854 		const char *buf, size_t count)
855 {
856 	struct nvme_fc_port_info pinfo;
857 	struct fcloop_ctrl_options *opts;
858 	struct nvme_fc_local_port *localport;
859 	struct fcloop_lport *lport;
860 	struct fcloop_lport_priv *lport_priv;
861 	unsigned long flags;
862 	int ret = -ENOMEM;
863 
864 	lport = kzalloc(sizeof(*lport), GFP_KERNEL);
865 	if (!lport)
866 		return -ENOMEM;
867 
868 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
869 	if (!opts)
870 		goto out_free_lport;
871 
872 	ret = fcloop_parse_options(opts, buf);
873 	if (ret)
874 		goto out_free_opts;
875 
876 	/* everything there ? */
877 	if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
878 		ret = -EINVAL;
879 		goto out_free_opts;
880 	}
881 
882 	memset(&pinfo, 0, sizeof(pinfo));
883 	pinfo.node_name = opts->wwnn;
884 	pinfo.port_name = opts->wwpn;
885 	pinfo.port_role = opts->roles;
886 	pinfo.port_id = opts->fcaddr;
887 
888 	ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
889 	if (!ret) {
890 		/* success */
891 		lport_priv = localport->private;
892 		lport_priv->lport = lport;
893 
894 		lport->localport = localport;
895 		INIT_LIST_HEAD(&lport->lport_list);
896 
897 		spin_lock_irqsave(&fcloop_lock, flags);
898 		list_add_tail(&lport->lport_list, &fcloop_lports);
899 		spin_unlock_irqrestore(&fcloop_lock, flags);
900 	}
901 
902 out_free_opts:
903 	kfree(opts);
904 out_free_lport:
905 	/* free only if we're going to fail */
906 	if (ret)
907 		kfree(lport);
908 
909 	return ret ? ret : count;
910 }
911 
912 
913 static void
914 __unlink_local_port(struct fcloop_lport *lport)
915 {
916 	list_del(&lport->lport_list);
917 }
918 
919 static int
920 __wait_localport_unreg(struct fcloop_lport *lport)
921 {
922 	int ret;
923 
924 	init_completion(&lport->unreg_done);
925 
926 	ret = nvme_fc_unregister_localport(lport->localport);
927 
928 	wait_for_completion(&lport->unreg_done);
929 
930 	kfree(lport);
931 
932 	return ret;
933 }
934 
935 
936 static ssize_t
937 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
938 		const char *buf, size_t count)
939 {
940 	struct fcloop_lport *tlport, *lport = NULL;
941 	u64 nodename, portname;
942 	unsigned long flags;
943 	int ret;
944 
945 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
946 	if (ret)
947 		return ret;
948 
949 	spin_lock_irqsave(&fcloop_lock, flags);
950 
951 	list_for_each_entry(tlport, &fcloop_lports, lport_list) {
952 		if (tlport->localport->node_name == nodename &&
953 		    tlport->localport->port_name == portname) {
954 			lport = tlport;
955 			__unlink_local_port(lport);
956 			break;
957 		}
958 	}
959 	spin_unlock_irqrestore(&fcloop_lock, flags);
960 
961 	if (!lport)
962 		return -ENOENT;
963 
964 	ret = __wait_localport_unreg(lport);
965 
966 	return ret ? ret : count;
967 }
968 
969 static struct fcloop_nport *
970 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
971 {
972 	struct fcloop_nport *newnport, *nport = NULL;
973 	struct fcloop_lport *tmplport, *lport = NULL;
974 	struct fcloop_ctrl_options *opts;
975 	unsigned long flags;
976 	u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
977 	int ret;
978 
979 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
980 	if (!opts)
981 		return NULL;
982 
983 	ret = fcloop_parse_options(opts, buf);
984 	if (ret)
985 		goto out_free_opts;
986 
987 	/* everything there ? */
988 	if ((opts->mask & opts_mask) != opts_mask) {
989 		ret = -EINVAL;
990 		goto out_free_opts;
991 	}
992 
993 	newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
994 	if (!newnport)
995 		goto out_free_opts;
996 
997 	INIT_LIST_HEAD(&newnport->nport_list);
998 	newnport->node_name = opts->wwnn;
999 	newnport->port_name = opts->wwpn;
1000 	if (opts->mask & NVMF_OPT_ROLES)
1001 		newnport->port_role = opts->roles;
1002 	if (opts->mask & NVMF_OPT_FCADDR)
1003 		newnport->port_id = opts->fcaddr;
1004 	kref_init(&newnport->ref);
1005 
1006 	spin_lock_irqsave(&fcloop_lock, flags);
1007 
1008 	list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
1009 		if (tmplport->localport->node_name == opts->wwnn &&
1010 		    tmplport->localport->port_name == opts->wwpn)
1011 			goto out_invalid_opts;
1012 
1013 		if (tmplport->localport->node_name == opts->lpwwnn &&
1014 		    tmplport->localport->port_name == opts->lpwwpn)
1015 			lport = tmplport;
1016 	}
1017 
1018 	if (remoteport) {
1019 		if (!lport)
1020 			goto out_invalid_opts;
1021 		newnport->lport = lport;
1022 	}
1023 
1024 	list_for_each_entry(nport, &fcloop_nports, nport_list) {
1025 		if (nport->node_name == opts->wwnn &&
1026 		    nport->port_name == opts->wwpn) {
1027 			if ((remoteport && nport->rport) ||
1028 			    (!remoteport && nport->tport)) {
1029 				nport = NULL;
1030 				goto out_invalid_opts;
1031 			}
1032 
1033 			fcloop_nport_get(nport);
1034 
1035 			spin_unlock_irqrestore(&fcloop_lock, flags);
1036 
1037 			if (remoteport)
1038 				nport->lport = lport;
1039 			if (opts->mask & NVMF_OPT_ROLES)
1040 				nport->port_role = opts->roles;
1041 			if (opts->mask & NVMF_OPT_FCADDR)
1042 				nport->port_id = opts->fcaddr;
1043 			goto out_free_newnport;
1044 		}
1045 	}
1046 
1047 	list_add_tail(&newnport->nport_list, &fcloop_nports);
1048 
1049 	spin_unlock_irqrestore(&fcloop_lock, flags);
1050 
1051 	kfree(opts);
1052 	return newnport;
1053 
1054 out_invalid_opts:
1055 	spin_unlock_irqrestore(&fcloop_lock, flags);
1056 out_free_newnport:
1057 	kfree(newnport);
1058 out_free_opts:
1059 	kfree(opts);
1060 	return nport;
1061 }
1062 
1063 static ssize_t
1064 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1065 		const char *buf, size_t count)
1066 {
1067 	struct nvme_fc_remote_port *remoteport;
1068 	struct fcloop_nport *nport;
1069 	struct fcloop_rport *rport;
1070 	struct nvme_fc_port_info pinfo;
1071 	int ret;
1072 
1073 	nport = fcloop_alloc_nport(buf, count, true);
1074 	if (!nport)
1075 		return -EIO;
1076 
1077 	memset(&pinfo, 0, sizeof(pinfo));
1078 	pinfo.node_name = nport->node_name;
1079 	pinfo.port_name = nport->port_name;
1080 	pinfo.port_role = nport->port_role;
1081 	pinfo.port_id = nport->port_id;
1082 
1083 	ret = nvme_fc_register_remoteport(nport->lport->localport,
1084 						&pinfo, &remoteport);
1085 	if (ret || !remoteport) {
1086 		fcloop_nport_put(nport);
1087 		return ret;
1088 	}
1089 
1090 	/* success */
1091 	rport = remoteport->private;
1092 	rport->remoteport = remoteport;
1093 	rport->targetport = (nport->tport) ?  nport->tport->targetport : NULL;
1094 	if (nport->tport) {
1095 		nport->tport->remoteport = remoteport;
1096 		nport->tport->lport = nport->lport;
1097 	}
1098 	rport->nport = nport;
1099 	rport->lport = nport->lport;
1100 	nport->rport = rport;
1101 
1102 	return count;
1103 }
1104 
1105 
1106 static struct fcloop_rport *
1107 __unlink_remote_port(struct fcloop_nport *nport)
1108 {
1109 	struct fcloop_rport *rport = nport->rport;
1110 
1111 	if (rport && nport->tport)
1112 		nport->tport->remoteport = NULL;
1113 	nport->rport = NULL;
1114 
1115 	return rport;
1116 }
1117 
1118 static int
1119 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1120 {
1121 	if (!rport)
1122 		return -EALREADY;
1123 
1124 	return nvme_fc_unregister_remoteport(rport->remoteport);
1125 }
1126 
1127 static ssize_t
1128 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1129 		const char *buf, size_t count)
1130 {
1131 	struct fcloop_nport *nport = NULL, *tmpport;
1132 	static struct fcloop_rport *rport;
1133 	u64 nodename, portname;
1134 	unsigned long flags;
1135 	int ret;
1136 
1137 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1138 	if (ret)
1139 		return ret;
1140 
1141 	spin_lock_irqsave(&fcloop_lock, flags);
1142 
1143 	list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1144 		if (tmpport->node_name == nodename &&
1145 		    tmpport->port_name == portname && tmpport->rport) {
1146 			nport = tmpport;
1147 			rport = __unlink_remote_port(nport);
1148 			break;
1149 		}
1150 	}
1151 
1152 	spin_unlock_irqrestore(&fcloop_lock, flags);
1153 
1154 	if (!nport)
1155 		return -ENOENT;
1156 
1157 	ret = __remoteport_unreg(nport, rport);
1158 
1159 	return ret ? ret : count;
1160 }
1161 
1162 static ssize_t
1163 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1164 		const char *buf, size_t count)
1165 {
1166 	struct nvmet_fc_target_port *targetport;
1167 	struct fcloop_nport *nport;
1168 	struct fcloop_tport *tport;
1169 	struct nvmet_fc_port_info tinfo;
1170 	int ret;
1171 
1172 	nport = fcloop_alloc_nport(buf, count, false);
1173 	if (!nport)
1174 		return -EIO;
1175 
1176 	tinfo.node_name = nport->node_name;
1177 	tinfo.port_name = nport->port_name;
1178 	tinfo.port_id = nport->port_id;
1179 
1180 	ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1181 						&targetport);
1182 	if (ret) {
1183 		fcloop_nport_put(nport);
1184 		return ret;
1185 	}
1186 
1187 	/* success */
1188 	tport = targetport->private;
1189 	tport->targetport = targetport;
1190 	tport->remoteport = (nport->rport) ?  nport->rport->remoteport : NULL;
1191 	if (nport->rport)
1192 		nport->rport->targetport = targetport;
1193 	tport->nport = nport;
1194 	tport->lport = nport->lport;
1195 	nport->tport = tport;
1196 
1197 	return count;
1198 }
1199 
1200 
1201 static struct fcloop_tport *
1202 __unlink_target_port(struct fcloop_nport *nport)
1203 {
1204 	struct fcloop_tport *tport = nport->tport;
1205 
1206 	if (tport && nport->rport)
1207 		nport->rport->targetport = NULL;
1208 	nport->tport = NULL;
1209 
1210 	return tport;
1211 }
1212 
1213 static int
1214 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1215 {
1216 	if (!tport)
1217 		return -EALREADY;
1218 
1219 	return nvmet_fc_unregister_targetport(tport->targetport);
1220 }
1221 
1222 static ssize_t
1223 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1224 		const char *buf, size_t count)
1225 {
1226 	struct fcloop_nport *nport = NULL, *tmpport;
1227 	struct fcloop_tport *tport = NULL;
1228 	u64 nodename, portname;
1229 	unsigned long flags;
1230 	int ret;
1231 
1232 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1233 	if (ret)
1234 		return ret;
1235 
1236 	spin_lock_irqsave(&fcloop_lock, flags);
1237 
1238 	list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1239 		if (tmpport->node_name == nodename &&
1240 		    tmpport->port_name == portname && tmpport->tport) {
1241 			nport = tmpport;
1242 			tport = __unlink_target_port(nport);
1243 			break;
1244 		}
1245 	}
1246 
1247 	spin_unlock_irqrestore(&fcloop_lock, flags);
1248 
1249 	if (!nport)
1250 		return -ENOENT;
1251 
1252 	ret = __targetport_unreg(nport, tport);
1253 
1254 	return ret ? ret : count;
1255 }
1256 
1257 
1258 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1259 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1260 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1261 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1262 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1263 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1264 
1265 static struct attribute *fcloop_dev_attrs[] = {
1266 	&dev_attr_add_local_port.attr,
1267 	&dev_attr_del_local_port.attr,
1268 	&dev_attr_add_remote_port.attr,
1269 	&dev_attr_del_remote_port.attr,
1270 	&dev_attr_add_target_port.attr,
1271 	&dev_attr_del_target_port.attr,
1272 	NULL
1273 };
1274 
1275 static struct attribute_group fclopp_dev_attrs_group = {
1276 	.attrs		= fcloop_dev_attrs,
1277 };
1278 
1279 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1280 	&fclopp_dev_attrs_group,
1281 	NULL,
1282 };
1283 
1284 static struct class *fcloop_class;
1285 static struct device *fcloop_device;
1286 
1287 
1288 static int __init fcloop_init(void)
1289 {
1290 	int ret;
1291 
1292 	fcloop_class = class_create(THIS_MODULE, "fcloop");
1293 	if (IS_ERR(fcloop_class)) {
1294 		pr_err("couldn't register class fcloop\n");
1295 		ret = PTR_ERR(fcloop_class);
1296 		return ret;
1297 	}
1298 
1299 	fcloop_device = device_create_with_groups(
1300 				fcloop_class, NULL, MKDEV(0, 0), NULL,
1301 				fcloop_dev_attr_groups, "ctl");
1302 	if (IS_ERR(fcloop_device)) {
1303 		pr_err("couldn't create ctl device!\n");
1304 		ret = PTR_ERR(fcloop_device);
1305 		goto out_destroy_class;
1306 	}
1307 
1308 	get_device(fcloop_device);
1309 
1310 	return 0;
1311 
1312 out_destroy_class:
1313 	class_destroy(fcloop_class);
1314 	return ret;
1315 }
1316 
1317 static void __exit fcloop_exit(void)
1318 {
1319 	struct fcloop_lport *lport;
1320 	struct fcloop_nport *nport;
1321 	struct fcloop_tport *tport;
1322 	struct fcloop_rport *rport;
1323 	unsigned long flags;
1324 	int ret;
1325 
1326 	spin_lock_irqsave(&fcloop_lock, flags);
1327 
1328 	for (;;) {
1329 		nport = list_first_entry_or_null(&fcloop_nports,
1330 						typeof(*nport), nport_list);
1331 		if (!nport)
1332 			break;
1333 
1334 		tport = __unlink_target_port(nport);
1335 		rport = __unlink_remote_port(nport);
1336 
1337 		spin_unlock_irqrestore(&fcloop_lock, flags);
1338 
1339 		ret = __targetport_unreg(nport, tport);
1340 		if (ret)
1341 			pr_warn("%s: Failed deleting target port\n", __func__);
1342 
1343 		ret = __remoteport_unreg(nport, rport);
1344 		if (ret)
1345 			pr_warn("%s: Failed deleting remote port\n", __func__);
1346 
1347 		spin_lock_irqsave(&fcloop_lock, flags);
1348 	}
1349 
1350 	for (;;) {
1351 		lport = list_first_entry_or_null(&fcloop_lports,
1352 						typeof(*lport), lport_list);
1353 		if (!lport)
1354 			break;
1355 
1356 		__unlink_local_port(lport);
1357 
1358 		spin_unlock_irqrestore(&fcloop_lock, flags);
1359 
1360 		ret = __wait_localport_unreg(lport);
1361 		if (ret)
1362 			pr_warn("%s: Failed deleting local port\n", __func__);
1363 
1364 		spin_lock_irqsave(&fcloop_lock, flags);
1365 	}
1366 
1367 	spin_unlock_irqrestore(&fcloop_lock, flags);
1368 
1369 	put_device(fcloop_device);
1370 
1371 	device_destroy(fcloop_class, MKDEV(0, 0));
1372 	class_destroy(fcloop_class);
1373 }
1374 
1375 module_init(fcloop_init);
1376 module_exit(fcloop_exit);
1377 
1378 MODULE_LICENSE("GPL v2");
1379