xref: /openbmc/linux/drivers/nvme/target/fc.c (revision 020c5260)
1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  *
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
25 
26 #include "nvmet.h"
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
29 
30 
31 /* *************************** Data Structures/Defines ****************** */
32 
33 
34 #define NVMET_LS_CTX_COUNT		4
35 
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE		2048
38 
39 struct nvmet_fc_tgtport;
40 struct nvmet_fc_tgt_assoc;
41 
42 struct nvmet_fc_ls_iod {
43 	struct nvmefc_tgt_ls_req	*lsreq;
44 	struct nvmefc_tgt_fcp_req	*fcpreq;	/* only if RS */
45 
46 	struct list_head		ls_list;	/* tgtport->ls_list */
47 
48 	struct nvmet_fc_tgtport		*tgtport;
49 	struct nvmet_fc_tgt_assoc	*assoc;
50 
51 	u8				*rqstbuf;
52 	u8				*rspbuf;
53 	u16				rqstdatalen;
54 	dma_addr_t			rspdma;
55 
56 	struct scatterlist		sg[2];
57 
58 	struct work_struct		work;
59 } __aligned(sizeof(unsigned long long));
60 
61 #define NVMET_FC_MAX_KB_PER_XFR		256
62 
63 enum nvmet_fcp_datadir {
64 	NVMET_FCP_NODATA,
65 	NVMET_FCP_WRITE,
66 	NVMET_FCP_READ,
67 	NVMET_FCP_ABORTED,
68 };
69 
70 struct nvmet_fc_fcp_iod {
71 	struct nvmefc_tgt_fcp_req	*fcpreq;
72 
73 	struct nvme_fc_cmd_iu		cmdiubuf;
74 	struct nvme_fc_ersp_iu		rspiubuf;
75 	dma_addr_t			rspdma;
76 	struct scatterlist		*data_sg;
77 	struct scatterlist		*next_sg;
78 	int				data_sg_cnt;
79 	u32				next_sg_offset;
80 	u32				total_length;
81 	u32				offset;
82 	enum nvmet_fcp_datadir		io_dir;
83 	bool				active;
84 	bool				abort;
85 	bool				aborted;
86 	bool				writedataactive;
87 	spinlock_t			flock;
88 
89 	struct nvmet_req		req;
90 	struct work_struct		work;
91 	struct work_struct		done_work;
92 
93 	struct nvmet_fc_tgtport		*tgtport;
94 	struct nvmet_fc_tgt_queue	*queue;
95 
96 	struct list_head		fcp_list;	/* tgtport->fcp_list */
97 };
98 
99 struct nvmet_fc_tgtport {
100 
101 	struct nvmet_fc_target_port	fc_target_port;
102 
103 	struct list_head		tgt_list; /* nvmet_fc_target_list */
104 	struct device			*dev;	/* dev for dma mapping */
105 	struct nvmet_fc_target_template	*ops;
106 
107 	struct nvmet_fc_ls_iod		*iod;
108 	spinlock_t			lock;
109 	struct list_head		ls_list;
110 	struct list_head		ls_busylist;
111 	struct list_head		assoc_list;
112 	struct ida			assoc_cnt;
113 	struct nvmet_port		*port;
114 	struct kref			ref;
115 };
116 
117 struct nvmet_fc_tgt_queue {
118 	bool				ninetypercent;
119 	u16				qid;
120 	u16				sqsize;
121 	u16				ersp_ratio;
122 	__le16				sqhd;
123 	int				cpu;
124 	atomic_t			connected;
125 	atomic_t			sqtail;
126 	atomic_t			zrspcnt;
127 	atomic_t			rsn;
128 	spinlock_t			qlock;
129 	struct nvmet_port		*port;
130 	struct nvmet_cq			nvme_cq;
131 	struct nvmet_sq			nvme_sq;
132 	struct nvmet_fc_tgt_assoc	*assoc;
133 	struct nvmet_fc_fcp_iod		*fod;		/* array of fcp_iods */
134 	struct list_head		fod_list;
135 	struct workqueue_struct		*work_q;
136 	struct kref			ref;
137 } __aligned(sizeof(unsigned long long));
138 
139 struct nvmet_fc_tgt_assoc {
140 	u64				association_id;
141 	u32				a_id;
142 	struct nvmet_fc_tgtport		*tgtport;
143 	struct list_head		a_list;
144 	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES];
145 	struct kref			ref;
146 };
147 
148 
149 static inline int
150 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
151 {
152 	return (iodptr - iodptr->tgtport->iod);
153 }
154 
155 static inline int
156 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
157 {
158 	return (fodptr - fodptr->queue->fod);
159 }
160 
161 
162 /*
163  * Association and Connection IDs:
164  *
165  * Association ID will have random number in upper 6 bytes and zero
166  *   in lower 2 bytes
167  *
168  * Connection IDs will be Association ID with QID or'd in lower 2 bytes
169  *
170  * note: Association ID = Connection ID for queue 0
171  */
172 #define BYTES_FOR_QID			sizeof(u16)
173 #define BYTES_FOR_QID_SHIFT		(BYTES_FOR_QID * 8)
174 #define NVMET_FC_QUEUEID_MASK		((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
175 
176 static inline u64
177 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
178 {
179 	return (assoc->association_id | qid);
180 }
181 
182 static inline u64
183 nvmet_fc_getassociationid(u64 connectionid)
184 {
185 	return connectionid & ~NVMET_FC_QUEUEID_MASK;
186 }
187 
188 static inline u16
189 nvmet_fc_getqueueid(u64 connectionid)
190 {
191 	return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
192 }
193 
194 static inline struct nvmet_fc_tgtport *
195 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
196 {
197 	return container_of(targetport, struct nvmet_fc_tgtport,
198 				 fc_target_port);
199 }
200 
201 static inline struct nvmet_fc_fcp_iod *
202 nvmet_req_to_fod(struct nvmet_req *nvme_req)
203 {
204 	return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
205 }
206 
207 
208 /* *************************** Globals **************************** */
209 
210 
211 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
212 
213 static LIST_HEAD(nvmet_fc_target_list);
214 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
215 
216 
217 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
218 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
219 static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
220 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
221 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
222 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
223 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
224 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
225 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
226 
227 
228 /* *********************** FC-NVME DMA Handling **************************** */
229 
230 /*
231  * The fcloop device passes in a NULL device pointer. Real LLD's will
232  * pass in a valid device pointer. If NULL is passed to the dma mapping
233  * routines, depending on the platform, it may or may not succeed, and
234  * may crash.
235  *
236  * As such:
237  * Wrapper all the dma routines and check the dev pointer.
238  *
239  * If simple mappings (return just a dma address, we'll noop them,
240  * returning a dma address of 0.
241  *
242  * On more complex mappings (dma_map_sg), a pseudo routine fills
243  * in the scatter list, setting all dma addresses to 0.
244  */
245 
246 static inline dma_addr_t
247 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
248 		enum dma_data_direction dir)
249 {
250 	return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
251 }
252 
253 static inline int
254 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
255 {
256 	return dev ? dma_mapping_error(dev, dma_addr) : 0;
257 }
258 
259 static inline void
260 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
261 	enum dma_data_direction dir)
262 {
263 	if (dev)
264 		dma_unmap_single(dev, addr, size, dir);
265 }
266 
267 static inline void
268 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
269 		enum dma_data_direction dir)
270 {
271 	if (dev)
272 		dma_sync_single_for_cpu(dev, addr, size, dir);
273 }
274 
275 static inline void
276 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
277 		enum dma_data_direction dir)
278 {
279 	if (dev)
280 		dma_sync_single_for_device(dev, addr, size, dir);
281 }
282 
283 /* pseudo dma_map_sg call */
284 static int
285 fc_map_sg(struct scatterlist *sg, int nents)
286 {
287 	struct scatterlist *s;
288 	int i;
289 
290 	WARN_ON(nents == 0 || sg[0].length == 0);
291 
292 	for_each_sg(sg, s, nents, i) {
293 		s->dma_address = 0L;
294 #ifdef CONFIG_NEED_SG_DMA_LENGTH
295 		s->dma_length = s->length;
296 #endif
297 	}
298 	return nents;
299 }
300 
301 static inline int
302 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
303 		enum dma_data_direction dir)
304 {
305 	return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
306 }
307 
308 static inline void
309 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
310 		enum dma_data_direction dir)
311 {
312 	if (dev)
313 		dma_unmap_sg(dev, sg, nents, dir);
314 }
315 
316 
317 /* *********************** FC-NVME Port Management ************************ */
318 
319 
320 static int
321 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
322 {
323 	struct nvmet_fc_ls_iod *iod;
324 	int i;
325 
326 	iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
327 			GFP_KERNEL);
328 	if (!iod)
329 		return -ENOMEM;
330 
331 	tgtport->iod = iod;
332 
333 	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
334 		INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
335 		iod->tgtport = tgtport;
336 		list_add_tail(&iod->ls_list, &tgtport->ls_list);
337 
338 		iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
339 			GFP_KERNEL);
340 		if (!iod->rqstbuf)
341 			goto out_fail;
342 
343 		iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
344 
345 		iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
346 						NVME_FC_MAX_LS_BUFFER_SIZE,
347 						DMA_TO_DEVICE);
348 		if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
349 			goto out_fail;
350 	}
351 
352 	return 0;
353 
354 out_fail:
355 	kfree(iod->rqstbuf);
356 	list_del(&iod->ls_list);
357 	for (iod--, i--; i >= 0; iod--, i--) {
358 		fc_dma_unmap_single(tgtport->dev, iod->rspdma,
359 				NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
360 		kfree(iod->rqstbuf);
361 		list_del(&iod->ls_list);
362 	}
363 
364 	kfree(iod);
365 
366 	return -EFAULT;
367 }
368 
369 static void
370 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
371 {
372 	struct nvmet_fc_ls_iod *iod = tgtport->iod;
373 	int i;
374 
375 	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
376 		fc_dma_unmap_single(tgtport->dev,
377 				iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
378 				DMA_TO_DEVICE);
379 		kfree(iod->rqstbuf);
380 		list_del(&iod->ls_list);
381 	}
382 	kfree(tgtport->iod);
383 }
384 
385 static struct nvmet_fc_ls_iod *
386 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
387 {
388 	static struct nvmet_fc_ls_iod *iod;
389 	unsigned long flags;
390 
391 	spin_lock_irqsave(&tgtport->lock, flags);
392 	iod = list_first_entry_or_null(&tgtport->ls_list,
393 					struct nvmet_fc_ls_iod, ls_list);
394 	if (iod)
395 		list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
396 	spin_unlock_irqrestore(&tgtport->lock, flags);
397 	return iod;
398 }
399 
400 
401 static void
402 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
403 			struct nvmet_fc_ls_iod *iod)
404 {
405 	unsigned long flags;
406 
407 	spin_lock_irqsave(&tgtport->lock, flags);
408 	list_move(&iod->ls_list, &tgtport->ls_list);
409 	spin_unlock_irqrestore(&tgtport->lock, flags);
410 }
411 
412 static void
413 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
414 				struct nvmet_fc_tgt_queue *queue)
415 {
416 	struct nvmet_fc_fcp_iod *fod = queue->fod;
417 	int i;
418 
419 	for (i = 0; i < queue->sqsize; fod++, i++) {
420 		INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
421 		INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
422 		fod->tgtport = tgtport;
423 		fod->queue = queue;
424 		fod->active = false;
425 		fod->abort = false;
426 		fod->aborted = false;
427 		fod->fcpreq = NULL;
428 		list_add_tail(&fod->fcp_list, &queue->fod_list);
429 		spin_lock_init(&fod->flock);
430 
431 		fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
432 					sizeof(fod->rspiubuf), DMA_TO_DEVICE);
433 		if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
434 			list_del(&fod->fcp_list);
435 			for (fod--, i--; i >= 0; fod--, i--) {
436 				fc_dma_unmap_single(tgtport->dev, fod->rspdma,
437 						sizeof(fod->rspiubuf),
438 						DMA_TO_DEVICE);
439 				fod->rspdma = 0L;
440 				list_del(&fod->fcp_list);
441 			}
442 
443 			return;
444 		}
445 	}
446 }
447 
448 static void
449 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
450 				struct nvmet_fc_tgt_queue *queue)
451 {
452 	struct nvmet_fc_fcp_iod *fod = queue->fod;
453 	int i;
454 
455 	for (i = 0; i < queue->sqsize; fod++, i++) {
456 		if (fod->rspdma)
457 			fc_dma_unmap_single(tgtport->dev, fod->rspdma,
458 				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
459 	}
460 }
461 
462 static struct nvmet_fc_fcp_iod *
463 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
464 {
465 	static struct nvmet_fc_fcp_iod *fod;
466 	unsigned long flags;
467 
468 	spin_lock_irqsave(&queue->qlock, flags);
469 	fod = list_first_entry_or_null(&queue->fod_list,
470 					struct nvmet_fc_fcp_iod, fcp_list);
471 	if (fod) {
472 		list_del(&fod->fcp_list);
473 		fod->active = true;
474 		/*
475 		 * no queue reference is taken, as it was taken by the
476 		 * queue lookup just prior to the allocation. The iod
477 		 * will "inherit" that reference.
478 		 */
479 	}
480 	spin_unlock_irqrestore(&queue->qlock, flags);
481 	return fod;
482 }
483 
484 
485 static void
486 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
487 			struct nvmet_fc_fcp_iod *fod)
488 {
489 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
490 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
491 	unsigned long flags;
492 
493 	fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
494 				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
495 
496 	fcpreq->nvmet_fc_private = NULL;
497 
498 	spin_lock_irqsave(&queue->qlock, flags);
499 	list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
500 	fod->active = false;
501 	fod->abort = false;
502 	fod->aborted = false;
503 	fod->writedataactive = false;
504 	fod->fcpreq = NULL;
505 	spin_unlock_irqrestore(&queue->qlock, flags);
506 
507 	/*
508 	 * release the reference taken at queue lookup and fod allocation
509 	 */
510 	nvmet_fc_tgt_q_put(queue);
511 
512 	tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
513 }
514 
515 static int
516 nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
517 {
518 	int cpu, idx, cnt;
519 
520 	if (!(tgtport->ops->target_features &
521 			NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) ||
522 	    tgtport->ops->max_hw_queues == 1)
523 		return WORK_CPU_UNBOUND;
524 
525 	/* Simple cpu selection based on qid modulo active cpu count */
526 	idx = !qid ? 0 : (qid - 1) % num_active_cpus();
527 
528 	/* find the n'th active cpu */
529 	for (cpu = 0, cnt = 0; ; ) {
530 		if (cpu_active(cpu)) {
531 			if (cnt == idx)
532 				break;
533 			cnt++;
534 		}
535 		cpu = (cpu + 1) % num_possible_cpus();
536 	}
537 
538 	return cpu;
539 }
540 
541 static struct nvmet_fc_tgt_queue *
542 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
543 			u16 qid, u16 sqsize)
544 {
545 	struct nvmet_fc_tgt_queue *queue;
546 	unsigned long flags;
547 	int ret;
548 
549 	if (qid >= NVMET_NR_QUEUES)
550 		return NULL;
551 
552 	queue = kzalloc((sizeof(*queue) +
553 				(sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
554 				GFP_KERNEL);
555 	if (!queue)
556 		return NULL;
557 
558 	if (!nvmet_fc_tgt_a_get(assoc))
559 		goto out_free_queue;
560 
561 	queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
562 				assoc->tgtport->fc_target_port.port_num,
563 				assoc->a_id, qid);
564 	if (!queue->work_q)
565 		goto out_a_put;
566 
567 	queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
568 	queue->qid = qid;
569 	queue->sqsize = sqsize;
570 	queue->assoc = assoc;
571 	queue->port = assoc->tgtport->port;
572 	queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
573 	INIT_LIST_HEAD(&queue->fod_list);
574 	atomic_set(&queue->connected, 0);
575 	atomic_set(&queue->sqtail, 0);
576 	atomic_set(&queue->rsn, 1);
577 	atomic_set(&queue->zrspcnt, 0);
578 	spin_lock_init(&queue->qlock);
579 	kref_init(&queue->ref);
580 
581 	nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
582 
583 	ret = nvmet_sq_init(&queue->nvme_sq);
584 	if (ret)
585 		goto out_fail_iodlist;
586 
587 	WARN_ON(assoc->queues[qid]);
588 	spin_lock_irqsave(&assoc->tgtport->lock, flags);
589 	assoc->queues[qid] = queue;
590 	spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
591 
592 	return queue;
593 
594 out_fail_iodlist:
595 	nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
596 	destroy_workqueue(queue->work_q);
597 out_a_put:
598 	nvmet_fc_tgt_a_put(assoc);
599 out_free_queue:
600 	kfree(queue);
601 	return NULL;
602 }
603 
604 
605 static void
606 nvmet_fc_tgt_queue_free(struct kref *ref)
607 {
608 	struct nvmet_fc_tgt_queue *queue =
609 		container_of(ref, struct nvmet_fc_tgt_queue, ref);
610 	unsigned long flags;
611 
612 	spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
613 	queue->assoc->queues[queue->qid] = NULL;
614 	spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
615 
616 	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
617 
618 	nvmet_fc_tgt_a_put(queue->assoc);
619 
620 	destroy_workqueue(queue->work_q);
621 
622 	kfree(queue);
623 }
624 
625 static void
626 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
627 {
628 	kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
629 }
630 
631 static int
632 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
633 {
634 	return kref_get_unless_zero(&queue->ref);
635 }
636 
637 
638 static void
639 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
640 {
641 	struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
642 	struct nvmet_fc_fcp_iod *fod = queue->fod;
643 	unsigned long flags;
644 	int i, writedataactive;
645 	bool disconnect;
646 
647 	disconnect = atomic_xchg(&queue->connected, 0);
648 
649 	spin_lock_irqsave(&queue->qlock, flags);
650 	/* about outstanding io's */
651 	for (i = 0; i < queue->sqsize; fod++, i++) {
652 		if (fod->active) {
653 			spin_lock(&fod->flock);
654 			fod->abort = true;
655 			writedataactive = fod->writedataactive;
656 			spin_unlock(&fod->flock);
657 			/*
658 			 * only call lldd abort routine if waiting for
659 			 * writedata. other outstanding ops should finish
660 			 * on their own.
661 			 */
662 			if (writedataactive) {
663 				spin_lock(&fod->flock);
664 				fod->aborted = true;
665 				spin_unlock(&fod->flock);
666 				tgtport->ops->fcp_abort(
667 					&tgtport->fc_target_port, fod->fcpreq);
668 			}
669 		}
670 	}
671 	spin_unlock_irqrestore(&queue->qlock, flags);
672 
673 	flush_workqueue(queue->work_q);
674 
675 	if (disconnect)
676 		nvmet_sq_destroy(&queue->nvme_sq);
677 
678 	nvmet_fc_tgt_q_put(queue);
679 }
680 
681 static struct nvmet_fc_tgt_queue *
682 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
683 				u64 connection_id)
684 {
685 	struct nvmet_fc_tgt_assoc *assoc;
686 	struct nvmet_fc_tgt_queue *queue;
687 	u64 association_id = nvmet_fc_getassociationid(connection_id);
688 	u16 qid = nvmet_fc_getqueueid(connection_id);
689 	unsigned long flags;
690 
691 	spin_lock_irqsave(&tgtport->lock, flags);
692 	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
693 		if (association_id == assoc->association_id) {
694 			queue = assoc->queues[qid];
695 			if (queue &&
696 			    (!atomic_read(&queue->connected) ||
697 			     !nvmet_fc_tgt_q_get(queue)))
698 				queue = NULL;
699 			spin_unlock_irqrestore(&tgtport->lock, flags);
700 			return queue;
701 		}
702 	}
703 	spin_unlock_irqrestore(&tgtport->lock, flags);
704 	return NULL;
705 }
706 
707 static struct nvmet_fc_tgt_assoc *
708 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
709 {
710 	struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
711 	unsigned long flags;
712 	u64 ran;
713 	int idx;
714 	bool needrandom = true;
715 
716 	assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
717 	if (!assoc)
718 		return NULL;
719 
720 	idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
721 	if (idx < 0)
722 		goto out_free_assoc;
723 
724 	if (!nvmet_fc_tgtport_get(tgtport))
725 		goto out_ida_put;
726 
727 	assoc->tgtport = tgtport;
728 	assoc->a_id = idx;
729 	INIT_LIST_HEAD(&assoc->a_list);
730 	kref_init(&assoc->ref);
731 
732 	while (needrandom) {
733 		get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
734 		ran = ran << BYTES_FOR_QID_SHIFT;
735 
736 		spin_lock_irqsave(&tgtport->lock, flags);
737 		needrandom = false;
738 		list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
739 			if (ran == tmpassoc->association_id) {
740 				needrandom = true;
741 				break;
742 			}
743 		if (!needrandom) {
744 			assoc->association_id = ran;
745 			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
746 		}
747 		spin_unlock_irqrestore(&tgtport->lock, flags);
748 	}
749 
750 	return assoc;
751 
752 out_ida_put:
753 	ida_simple_remove(&tgtport->assoc_cnt, idx);
754 out_free_assoc:
755 	kfree(assoc);
756 	return NULL;
757 }
758 
759 static void
760 nvmet_fc_target_assoc_free(struct kref *ref)
761 {
762 	struct nvmet_fc_tgt_assoc *assoc =
763 		container_of(ref, struct nvmet_fc_tgt_assoc, ref);
764 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
765 	unsigned long flags;
766 
767 	spin_lock_irqsave(&tgtport->lock, flags);
768 	list_del(&assoc->a_list);
769 	spin_unlock_irqrestore(&tgtport->lock, flags);
770 	ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
771 	kfree(assoc);
772 	nvmet_fc_tgtport_put(tgtport);
773 }
774 
775 static void
776 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
777 {
778 	kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
779 }
780 
781 static int
782 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
783 {
784 	return kref_get_unless_zero(&assoc->ref);
785 }
786 
787 static void
788 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
789 {
790 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
791 	struct nvmet_fc_tgt_queue *queue;
792 	unsigned long flags;
793 	int i;
794 
795 	spin_lock_irqsave(&tgtport->lock, flags);
796 	for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
797 		queue = assoc->queues[i];
798 		if (queue) {
799 			if (!nvmet_fc_tgt_q_get(queue))
800 				continue;
801 			spin_unlock_irqrestore(&tgtport->lock, flags);
802 			nvmet_fc_delete_target_queue(queue);
803 			nvmet_fc_tgt_q_put(queue);
804 			spin_lock_irqsave(&tgtport->lock, flags);
805 		}
806 	}
807 	spin_unlock_irqrestore(&tgtport->lock, flags);
808 
809 	nvmet_fc_tgt_a_put(assoc);
810 }
811 
812 static struct nvmet_fc_tgt_assoc *
813 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
814 				u64 association_id)
815 {
816 	struct nvmet_fc_tgt_assoc *assoc;
817 	struct nvmet_fc_tgt_assoc *ret = NULL;
818 	unsigned long flags;
819 
820 	spin_lock_irqsave(&tgtport->lock, flags);
821 	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
822 		if (association_id == assoc->association_id) {
823 			ret = assoc;
824 			nvmet_fc_tgt_a_get(assoc);
825 			break;
826 		}
827 	}
828 	spin_unlock_irqrestore(&tgtport->lock, flags);
829 
830 	return ret;
831 }
832 
833 
834 /**
835  * nvme_fc_register_targetport - transport entry point called by an
836  *                              LLDD to register the existence of a local
837  *                              NVME subystem FC port.
838  * @pinfo:     pointer to information about the port to be registered
839  * @template:  LLDD entrypoints and operational parameters for the port
840  * @dev:       physical hardware device node port corresponds to. Will be
841  *             used for DMA mappings
842  * @portptr:   pointer to a local port pointer. Upon success, the routine
843  *             will allocate a nvme_fc_local_port structure and place its
844  *             address in the local port pointer. Upon failure, local port
845  *             pointer will be set to NULL.
846  *
847  * Returns:
848  * a completion status. Must be 0 upon success; a negative errno
849  * (ex: -ENXIO) upon failure.
850  */
851 int
852 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
853 			struct nvmet_fc_target_template *template,
854 			struct device *dev,
855 			struct nvmet_fc_target_port **portptr)
856 {
857 	struct nvmet_fc_tgtport *newrec;
858 	unsigned long flags;
859 	int ret, idx;
860 
861 	if (!template->xmt_ls_rsp || !template->fcp_op ||
862 	    !template->fcp_abort ||
863 	    !template->fcp_req_release || !template->targetport_delete ||
864 	    !template->max_hw_queues || !template->max_sgl_segments ||
865 	    !template->max_dif_sgl_segments || !template->dma_boundary) {
866 		ret = -EINVAL;
867 		goto out_regtgt_failed;
868 	}
869 
870 	newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
871 			 GFP_KERNEL);
872 	if (!newrec) {
873 		ret = -ENOMEM;
874 		goto out_regtgt_failed;
875 	}
876 
877 	idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
878 	if (idx < 0) {
879 		ret = -ENOSPC;
880 		goto out_fail_kfree;
881 	}
882 
883 	if (!get_device(dev) && dev) {
884 		ret = -ENODEV;
885 		goto out_ida_put;
886 	}
887 
888 	newrec->fc_target_port.node_name = pinfo->node_name;
889 	newrec->fc_target_port.port_name = pinfo->port_name;
890 	newrec->fc_target_port.private = &newrec[1];
891 	newrec->fc_target_port.port_id = pinfo->port_id;
892 	newrec->fc_target_port.port_num = idx;
893 	INIT_LIST_HEAD(&newrec->tgt_list);
894 	newrec->dev = dev;
895 	newrec->ops = template;
896 	spin_lock_init(&newrec->lock);
897 	INIT_LIST_HEAD(&newrec->ls_list);
898 	INIT_LIST_HEAD(&newrec->ls_busylist);
899 	INIT_LIST_HEAD(&newrec->assoc_list);
900 	kref_init(&newrec->ref);
901 	ida_init(&newrec->assoc_cnt);
902 
903 	ret = nvmet_fc_alloc_ls_iodlist(newrec);
904 	if (ret) {
905 		ret = -ENOMEM;
906 		goto out_free_newrec;
907 	}
908 
909 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
910 	list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
911 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
912 
913 	*portptr = &newrec->fc_target_port;
914 	return 0;
915 
916 out_free_newrec:
917 	put_device(dev);
918 out_ida_put:
919 	ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
920 out_fail_kfree:
921 	kfree(newrec);
922 out_regtgt_failed:
923 	*portptr = NULL;
924 	return ret;
925 }
926 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
927 
928 
929 static void
930 nvmet_fc_free_tgtport(struct kref *ref)
931 {
932 	struct nvmet_fc_tgtport *tgtport =
933 		container_of(ref, struct nvmet_fc_tgtport, ref);
934 	struct device *dev = tgtport->dev;
935 	unsigned long flags;
936 
937 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
938 	list_del(&tgtport->tgt_list);
939 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
940 
941 	nvmet_fc_free_ls_iodlist(tgtport);
942 
943 	/* let the LLDD know we've finished tearing it down */
944 	tgtport->ops->targetport_delete(&tgtport->fc_target_port);
945 
946 	ida_simple_remove(&nvmet_fc_tgtport_cnt,
947 			tgtport->fc_target_port.port_num);
948 
949 	ida_destroy(&tgtport->assoc_cnt);
950 
951 	kfree(tgtport);
952 
953 	put_device(dev);
954 }
955 
956 static void
957 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
958 {
959 	kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
960 }
961 
962 static int
963 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
964 {
965 	return kref_get_unless_zero(&tgtport->ref);
966 }
967 
968 static void
969 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
970 {
971 	struct nvmet_fc_tgt_assoc *assoc, *next;
972 	unsigned long flags;
973 
974 	spin_lock_irqsave(&tgtport->lock, flags);
975 	list_for_each_entry_safe(assoc, next,
976 				&tgtport->assoc_list, a_list) {
977 		if (!nvmet_fc_tgt_a_get(assoc))
978 			continue;
979 		spin_unlock_irqrestore(&tgtport->lock, flags);
980 		nvmet_fc_delete_target_assoc(assoc);
981 		nvmet_fc_tgt_a_put(assoc);
982 		spin_lock_irqsave(&tgtport->lock, flags);
983 	}
984 	spin_unlock_irqrestore(&tgtport->lock, flags);
985 }
986 
987 /*
988  * nvmet layer has called to terminate an association
989  */
990 static void
991 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
992 {
993 	struct nvmet_fc_tgtport *tgtport, *next;
994 	struct nvmet_fc_tgt_assoc *assoc;
995 	struct nvmet_fc_tgt_queue *queue;
996 	unsigned long flags;
997 	bool found_ctrl = false;
998 
999 	/* this is a bit ugly, but don't want to make locks layered */
1000 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1001 	list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1002 			tgt_list) {
1003 		if (!nvmet_fc_tgtport_get(tgtport))
1004 			continue;
1005 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1006 
1007 		spin_lock_irqsave(&tgtport->lock, flags);
1008 		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1009 			queue = assoc->queues[0];
1010 			if (queue && queue->nvme_sq.ctrl == ctrl) {
1011 				if (nvmet_fc_tgt_a_get(assoc))
1012 					found_ctrl = true;
1013 				break;
1014 			}
1015 		}
1016 		spin_unlock_irqrestore(&tgtport->lock, flags);
1017 
1018 		nvmet_fc_tgtport_put(tgtport);
1019 
1020 		if (found_ctrl) {
1021 			nvmet_fc_delete_target_assoc(assoc);
1022 			nvmet_fc_tgt_a_put(assoc);
1023 			return;
1024 		}
1025 
1026 		spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1027 	}
1028 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1029 }
1030 
1031 /**
1032  * nvme_fc_unregister_targetport - transport entry point called by an
1033  *                              LLDD to deregister/remove a previously
1034  *                              registered a local NVME subsystem FC port.
1035  * @tgtport: pointer to the (registered) target port that is to be
1036  *           deregistered.
1037  *
1038  * Returns:
1039  * a completion status. Must be 0 upon success; a negative errno
1040  * (ex: -ENXIO) upon failure.
1041  */
1042 int
1043 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1044 {
1045 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1046 
1047 	/* terminate any outstanding associations */
1048 	__nvmet_fc_free_assocs(tgtport);
1049 
1050 	nvmet_fc_tgtport_put(tgtport);
1051 
1052 	return 0;
1053 }
1054 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1055 
1056 
1057 /* *********************** FC-NVME LS Handling **************************** */
1058 
1059 
1060 static void
1061 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
1062 {
1063 	struct fcnvme_ls_acc_hdr *acc = buf;
1064 
1065 	acc->w0.ls_cmd = ls_cmd;
1066 	acc->desc_list_len = desc_len;
1067 	acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1068 	acc->rqst.desc_len =
1069 			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1070 	acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1071 }
1072 
1073 static int
1074 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1075 			u8 reason, u8 explanation, u8 vendor)
1076 {
1077 	struct fcnvme_ls_rjt *rjt = buf;
1078 
1079 	nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1080 			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1081 			ls_cmd);
1082 	rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1083 	rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1084 	rjt->rjt.reason_code = reason;
1085 	rjt->rjt.reason_explanation = explanation;
1086 	rjt->rjt.vendor = vendor;
1087 
1088 	return sizeof(struct fcnvme_ls_rjt);
1089 }
1090 
1091 /* Validation Error indexes into the string table below */
1092 enum {
1093 	VERR_NO_ERROR		= 0,
1094 	VERR_CR_ASSOC_LEN	= 1,
1095 	VERR_CR_ASSOC_RQST_LEN	= 2,
1096 	VERR_CR_ASSOC_CMD	= 3,
1097 	VERR_CR_ASSOC_CMD_LEN	= 4,
1098 	VERR_ERSP_RATIO		= 5,
1099 	VERR_ASSOC_ALLOC_FAIL	= 6,
1100 	VERR_QUEUE_ALLOC_FAIL	= 7,
1101 	VERR_CR_CONN_LEN	= 8,
1102 	VERR_CR_CONN_RQST_LEN	= 9,
1103 	VERR_ASSOC_ID		= 10,
1104 	VERR_ASSOC_ID_LEN	= 11,
1105 	VERR_NO_ASSOC		= 12,
1106 	VERR_CONN_ID		= 13,
1107 	VERR_CONN_ID_LEN	= 14,
1108 	VERR_NO_CONN		= 15,
1109 	VERR_CR_CONN_CMD	= 16,
1110 	VERR_CR_CONN_CMD_LEN	= 17,
1111 	VERR_DISCONN_LEN	= 18,
1112 	VERR_DISCONN_RQST_LEN	= 19,
1113 	VERR_DISCONN_CMD	= 20,
1114 	VERR_DISCONN_CMD_LEN	= 21,
1115 	VERR_DISCONN_SCOPE	= 22,
1116 	VERR_RS_LEN		= 23,
1117 	VERR_RS_RQST_LEN	= 24,
1118 	VERR_RS_CMD		= 25,
1119 	VERR_RS_CMD_LEN		= 26,
1120 	VERR_RS_RCTL		= 27,
1121 	VERR_RS_RO		= 28,
1122 };
1123 
1124 static char *validation_errors[] = {
1125 	"OK",
1126 	"Bad CR_ASSOC Length",
1127 	"Bad CR_ASSOC Rqst Length",
1128 	"Not CR_ASSOC Cmd",
1129 	"Bad CR_ASSOC Cmd Length",
1130 	"Bad Ersp Ratio",
1131 	"Association Allocation Failed",
1132 	"Queue Allocation Failed",
1133 	"Bad CR_CONN Length",
1134 	"Bad CR_CONN Rqst Length",
1135 	"Not Association ID",
1136 	"Bad Association ID Length",
1137 	"No Association",
1138 	"Not Connection ID",
1139 	"Bad Connection ID Length",
1140 	"No Connection",
1141 	"Not CR_CONN Cmd",
1142 	"Bad CR_CONN Cmd Length",
1143 	"Bad DISCONN Length",
1144 	"Bad DISCONN Rqst Length",
1145 	"Not DISCONN Cmd",
1146 	"Bad DISCONN Cmd Length",
1147 	"Bad Disconnect Scope",
1148 	"Bad RS Length",
1149 	"Bad RS Rqst Length",
1150 	"Not RS Cmd",
1151 	"Bad RS Cmd Length",
1152 	"Bad RS R_CTL",
1153 	"Bad RS Relative Offset",
1154 };
1155 
1156 static void
1157 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1158 			struct nvmet_fc_ls_iod *iod)
1159 {
1160 	struct fcnvme_ls_cr_assoc_rqst *rqst =
1161 				(struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1162 	struct fcnvme_ls_cr_assoc_acc *acc =
1163 				(struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1164 	struct nvmet_fc_tgt_queue *queue;
1165 	int ret = 0;
1166 
1167 	memset(acc, 0, sizeof(*acc));
1168 
1169 	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_assoc_rqst))
1170 		ret = VERR_CR_ASSOC_LEN;
1171 	else if (rqst->desc_list_len !=
1172 			fcnvme_lsdesc_len(
1173 				sizeof(struct fcnvme_ls_cr_assoc_rqst)))
1174 		ret = VERR_CR_ASSOC_RQST_LEN;
1175 	else if (rqst->assoc_cmd.desc_tag !=
1176 			cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1177 		ret = VERR_CR_ASSOC_CMD;
1178 	else if (rqst->assoc_cmd.desc_len !=
1179 			fcnvme_lsdesc_len(
1180 				sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)))
1181 		ret = VERR_CR_ASSOC_CMD_LEN;
1182 	else if (!rqst->assoc_cmd.ersp_ratio ||
1183 		 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1184 				be16_to_cpu(rqst->assoc_cmd.sqsize)))
1185 		ret = VERR_ERSP_RATIO;
1186 
1187 	else {
1188 		/* new association w/ admin queue */
1189 		iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1190 		if (!iod->assoc)
1191 			ret = VERR_ASSOC_ALLOC_FAIL;
1192 		else {
1193 			queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1194 					be16_to_cpu(rqst->assoc_cmd.sqsize));
1195 			if (!queue)
1196 				ret = VERR_QUEUE_ALLOC_FAIL;
1197 		}
1198 	}
1199 
1200 	if (ret) {
1201 		dev_err(tgtport->dev,
1202 			"Create Association LS failed: %s\n",
1203 			validation_errors[ret]);
1204 		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1205 				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1206 				FCNVME_RJT_RC_LOGIC,
1207 				FCNVME_RJT_EXP_NONE, 0);
1208 		return;
1209 	}
1210 
1211 	queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1212 	atomic_set(&queue->connected, 1);
1213 	queue->sqhd = 0;	/* best place to init value */
1214 
1215 	/* format a response */
1216 
1217 	iod->lsreq->rsplen = sizeof(*acc);
1218 
1219 	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1220 			fcnvme_lsdesc_len(
1221 				sizeof(struct fcnvme_ls_cr_assoc_acc)),
1222 			FCNVME_LS_CREATE_ASSOCIATION);
1223 	acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1224 	acc->associd.desc_len =
1225 			fcnvme_lsdesc_len(
1226 				sizeof(struct fcnvme_lsdesc_assoc_id));
1227 	acc->associd.association_id =
1228 			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1229 	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1230 	acc->connectid.desc_len =
1231 			fcnvme_lsdesc_len(
1232 				sizeof(struct fcnvme_lsdesc_conn_id));
1233 	acc->connectid.connection_id = acc->associd.association_id;
1234 }
1235 
1236 static void
1237 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1238 			struct nvmet_fc_ls_iod *iod)
1239 {
1240 	struct fcnvme_ls_cr_conn_rqst *rqst =
1241 				(struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1242 	struct fcnvme_ls_cr_conn_acc *acc =
1243 				(struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1244 	struct nvmet_fc_tgt_queue *queue;
1245 	int ret = 0;
1246 
1247 	memset(acc, 0, sizeof(*acc));
1248 
1249 	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1250 		ret = VERR_CR_CONN_LEN;
1251 	else if (rqst->desc_list_len !=
1252 			fcnvme_lsdesc_len(
1253 				sizeof(struct fcnvme_ls_cr_conn_rqst)))
1254 		ret = VERR_CR_CONN_RQST_LEN;
1255 	else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1256 		ret = VERR_ASSOC_ID;
1257 	else if (rqst->associd.desc_len !=
1258 			fcnvme_lsdesc_len(
1259 				sizeof(struct fcnvme_lsdesc_assoc_id)))
1260 		ret = VERR_ASSOC_ID_LEN;
1261 	else if (rqst->connect_cmd.desc_tag !=
1262 			cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1263 		ret = VERR_CR_CONN_CMD;
1264 	else if (rqst->connect_cmd.desc_len !=
1265 			fcnvme_lsdesc_len(
1266 				sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1267 		ret = VERR_CR_CONN_CMD_LEN;
1268 	else if (!rqst->connect_cmd.ersp_ratio ||
1269 		 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1270 				be16_to_cpu(rqst->connect_cmd.sqsize)))
1271 		ret = VERR_ERSP_RATIO;
1272 
1273 	else {
1274 		/* new io queue */
1275 		iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1276 				be64_to_cpu(rqst->associd.association_id));
1277 		if (!iod->assoc)
1278 			ret = VERR_NO_ASSOC;
1279 		else {
1280 			queue = nvmet_fc_alloc_target_queue(iod->assoc,
1281 					be16_to_cpu(rqst->connect_cmd.qid),
1282 					be16_to_cpu(rqst->connect_cmd.sqsize));
1283 			if (!queue)
1284 				ret = VERR_QUEUE_ALLOC_FAIL;
1285 
1286 			/* release get taken in nvmet_fc_find_target_assoc */
1287 			nvmet_fc_tgt_a_put(iod->assoc);
1288 		}
1289 	}
1290 
1291 	if (ret) {
1292 		dev_err(tgtport->dev,
1293 			"Create Connection LS failed: %s\n",
1294 			validation_errors[ret]);
1295 		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1296 				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1297 				(ret == VERR_NO_ASSOC) ?
1298 					FCNVME_RJT_RC_INV_ASSOC :
1299 					FCNVME_RJT_RC_LOGIC,
1300 				FCNVME_RJT_EXP_NONE, 0);
1301 		return;
1302 	}
1303 
1304 	queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1305 	atomic_set(&queue->connected, 1);
1306 	queue->sqhd = 0;	/* best place to init value */
1307 
1308 	/* format a response */
1309 
1310 	iod->lsreq->rsplen = sizeof(*acc);
1311 
1312 	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1313 			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1314 			FCNVME_LS_CREATE_CONNECTION);
1315 	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1316 	acc->connectid.desc_len =
1317 			fcnvme_lsdesc_len(
1318 				sizeof(struct fcnvme_lsdesc_conn_id));
1319 	acc->connectid.connection_id =
1320 			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1321 				be16_to_cpu(rqst->connect_cmd.qid)));
1322 }
1323 
1324 static void
1325 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1326 			struct nvmet_fc_ls_iod *iod)
1327 {
1328 	struct fcnvme_ls_disconnect_rqst *rqst =
1329 			(struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1330 	struct fcnvme_ls_disconnect_acc *acc =
1331 			(struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1332 	struct nvmet_fc_tgt_queue *queue = NULL;
1333 	struct nvmet_fc_tgt_assoc *assoc;
1334 	int ret = 0;
1335 	bool del_assoc = false;
1336 
1337 	memset(acc, 0, sizeof(*acc));
1338 
1339 	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1340 		ret = VERR_DISCONN_LEN;
1341 	else if (rqst->desc_list_len !=
1342 			fcnvme_lsdesc_len(
1343 				sizeof(struct fcnvme_ls_disconnect_rqst)))
1344 		ret = VERR_DISCONN_RQST_LEN;
1345 	else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1346 		ret = VERR_ASSOC_ID;
1347 	else if (rqst->associd.desc_len !=
1348 			fcnvme_lsdesc_len(
1349 				sizeof(struct fcnvme_lsdesc_assoc_id)))
1350 		ret = VERR_ASSOC_ID_LEN;
1351 	else if (rqst->discon_cmd.desc_tag !=
1352 			cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1353 		ret = VERR_DISCONN_CMD;
1354 	else if (rqst->discon_cmd.desc_len !=
1355 			fcnvme_lsdesc_len(
1356 				sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1357 		ret = VERR_DISCONN_CMD_LEN;
1358 	else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1359 			(rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1360 		ret = VERR_DISCONN_SCOPE;
1361 	else {
1362 		/* match an active association */
1363 		assoc = nvmet_fc_find_target_assoc(tgtport,
1364 				be64_to_cpu(rqst->associd.association_id));
1365 		iod->assoc = assoc;
1366 		if (assoc) {
1367 			if (rqst->discon_cmd.scope ==
1368 					FCNVME_DISCONN_CONNECTION) {
1369 				queue = nvmet_fc_find_target_queue(tgtport,
1370 						be64_to_cpu(
1371 							rqst->discon_cmd.id));
1372 				if (!queue) {
1373 					nvmet_fc_tgt_a_put(assoc);
1374 					ret = VERR_NO_CONN;
1375 				}
1376 			}
1377 		} else
1378 			ret = VERR_NO_ASSOC;
1379 	}
1380 
1381 	if (ret) {
1382 		dev_err(tgtport->dev,
1383 			"Disconnect LS failed: %s\n",
1384 			validation_errors[ret]);
1385 		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1386 				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1387 				(ret == VERR_NO_ASSOC) ?
1388 					FCNVME_RJT_RC_INV_ASSOC :
1389 					(ret == VERR_NO_CONN) ?
1390 						FCNVME_RJT_RC_INV_CONN :
1391 						FCNVME_RJT_RC_LOGIC,
1392 				FCNVME_RJT_EXP_NONE, 0);
1393 		return;
1394 	}
1395 
1396 	/* format a response */
1397 
1398 	iod->lsreq->rsplen = sizeof(*acc);
1399 
1400 	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1401 			fcnvme_lsdesc_len(
1402 				sizeof(struct fcnvme_ls_disconnect_acc)),
1403 			FCNVME_LS_DISCONNECT);
1404 
1405 
1406 	/* are we to delete a Connection ID (queue) */
1407 	if (queue) {
1408 		int qid = queue->qid;
1409 
1410 		nvmet_fc_delete_target_queue(queue);
1411 
1412 		/* release the get taken by find_target_queue */
1413 		nvmet_fc_tgt_q_put(queue);
1414 
1415 		/* tear association down if io queue terminated */
1416 		if (!qid)
1417 			del_assoc = true;
1418 	}
1419 
1420 	/* release get taken in nvmet_fc_find_target_assoc */
1421 	nvmet_fc_tgt_a_put(iod->assoc);
1422 
1423 	if (del_assoc)
1424 		nvmet_fc_delete_target_assoc(iod->assoc);
1425 }
1426 
1427 
1428 /* *********************** NVME Ctrl Routines **************************** */
1429 
1430 
1431 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1432 
1433 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1434 
1435 static void
1436 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1437 {
1438 	struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1439 	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1440 
1441 	fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1442 				NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1443 	nvmet_fc_free_ls_iod(tgtport, iod);
1444 	nvmet_fc_tgtport_put(tgtport);
1445 }
1446 
1447 static void
1448 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1449 				struct nvmet_fc_ls_iod *iod)
1450 {
1451 	int ret;
1452 
1453 	fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1454 				  NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1455 
1456 	ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1457 	if (ret)
1458 		nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1459 }
1460 
1461 /*
1462  * Actual processing routine for received FC-NVME LS Requests from the LLD
1463  */
1464 static void
1465 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1466 			struct nvmet_fc_ls_iod *iod)
1467 {
1468 	struct fcnvme_ls_rqst_w0 *w0 =
1469 			(struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1470 
1471 	iod->lsreq->nvmet_fc_private = iod;
1472 	iod->lsreq->rspbuf = iod->rspbuf;
1473 	iod->lsreq->rspdma = iod->rspdma;
1474 	iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1475 	/* Be preventative. handlers will later set to valid length */
1476 	iod->lsreq->rsplen = 0;
1477 
1478 	iod->assoc = NULL;
1479 
1480 	/*
1481 	 * handlers:
1482 	 *   parse request input, execute the request, and format the
1483 	 *   LS response
1484 	 */
1485 	switch (w0->ls_cmd) {
1486 	case FCNVME_LS_CREATE_ASSOCIATION:
1487 		/* Creates Association and initial Admin Queue/Connection */
1488 		nvmet_fc_ls_create_association(tgtport, iod);
1489 		break;
1490 	case FCNVME_LS_CREATE_CONNECTION:
1491 		/* Creates an IO Queue/Connection */
1492 		nvmet_fc_ls_create_connection(tgtport, iod);
1493 		break;
1494 	case FCNVME_LS_DISCONNECT:
1495 		/* Terminate a Queue/Connection or the Association */
1496 		nvmet_fc_ls_disconnect(tgtport, iod);
1497 		break;
1498 	default:
1499 		iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1500 				NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1501 				FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1502 	}
1503 
1504 	nvmet_fc_xmt_ls_rsp(tgtport, iod);
1505 }
1506 
1507 /*
1508  * Actual processing routine for received FC-NVME LS Requests from the LLD
1509  */
1510 static void
1511 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1512 {
1513 	struct nvmet_fc_ls_iod *iod =
1514 		container_of(work, struct nvmet_fc_ls_iod, work);
1515 	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1516 
1517 	nvmet_fc_handle_ls_rqst(tgtport, iod);
1518 }
1519 
1520 
1521 /**
1522  * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1523  *                       upon the reception of a NVME LS request.
1524  *
1525  * The nvmet-fc layer will copy payload to an internal structure for
1526  * processing.  As such, upon completion of the routine, the LLDD may
1527  * immediately free/reuse the LS request buffer passed in the call.
1528  *
1529  * If this routine returns error, the LLDD should abort the exchange.
1530  *
1531  * @tgtport:    pointer to the (registered) target port the LS was
1532  *              received on.
1533  * @lsreq:      pointer to a lsreq request structure to be used to reference
1534  *              the exchange corresponding to the LS.
1535  * @lsreqbuf:   pointer to the buffer containing the LS Request
1536  * @lsreqbuf_len: length, in bytes, of the received LS request
1537  */
1538 int
1539 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1540 			struct nvmefc_tgt_ls_req *lsreq,
1541 			void *lsreqbuf, u32 lsreqbuf_len)
1542 {
1543 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1544 	struct nvmet_fc_ls_iod *iod;
1545 
1546 	if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1547 		return -E2BIG;
1548 
1549 	if (!nvmet_fc_tgtport_get(tgtport))
1550 		return -ESHUTDOWN;
1551 
1552 	iod = nvmet_fc_alloc_ls_iod(tgtport);
1553 	if (!iod) {
1554 		nvmet_fc_tgtport_put(tgtport);
1555 		return -ENOENT;
1556 	}
1557 
1558 	iod->lsreq = lsreq;
1559 	iod->fcpreq = NULL;
1560 	memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1561 	iod->rqstdatalen = lsreqbuf_len;
1562 
1563 	schedule_work(&iod->work);
1564 
1565 	return 0;
1566 }
1567 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1568 
1569 
1570 /*
1571  * **********************
1572  * Start of FCP handling
1573  * **********************
1574  */
1575 
1576 static int
1577 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1578 {
1579 	struct scatterlist *sg;
1580 	struct page *page;
1581 	unsigned int nent;
1582 	u32 page_len, length;
1583 	int i = 0;
1584 
1585 	length = fod->total_length;
1586 	nent = DIV_ROUND_UP(length, PAGE_SIZE);
1587 	sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1588 	if (!sg)
1589 		goto out;
1590 
1591 	sg_init_table(sg, nent);
1592 
1593 	while (length) {
1594 		page_len = min_t(u32, length, PAGE_SIZE);
1595 
1596 		page = alloc_page(GFP_KERNEL);
1597 		if (!page)
1598 			goto out_free_pages;
1599 
1600 		sg_set_page(&sg[i], page, page_len, 0);
1601 		length -= page_len;
1602 		i++;
1603 	}
1604 
1605 	fod->data_sg = sg;
1606 	fod->data_sg_cnt = nent;
1607 	fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1608 				((fod->io_dir == NVMET_FCP_WRITE) ?
1609 					DMA_FROM_DEVICE : DMA_TO_DEVICE));
1610 				/* note: write from initiator perspective */
1611 
1612 	return 0;
1613 
1614 out_free_pages:
1615 	while (i > 0) {
1616 		i--;
1617 		__free_page(sg_page(&sg[i]));
1618 	}
1619 	kfree(sg);
1620 	fod->data_sg = NULL;
1621 	fod->data_sg_cnt = 0;
1622 out:
1623 	return NVME_SC_INTERNAL;
1624 }
1625 
1626 static void
1627 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1628 {
1629 	struct scatterlist *sg;
1630 	int count;
1631 
1632 	if (!fod->data_sg || !fod->data_sg_cnt)
1633 		return;
1634 
1635 	fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1636 				((fod->io_dir == NVMET_FCP_WRITE) ?
1637 					DMA_FROM_DEVICE : DMA_TO_DEVICE));
1638 	for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1639 		__free_page(sg_page(sg));
1640 	kfree(fod->data_sg);
1641 	fod->data_sg = NULL;
1642 	fod->data_sg_cnt = 0;
1643 }
1644 
1645 
1646 static bool
1647 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1648 {
1649 	u32 sqtail, used;
1650 
1651 	/* egad, this is ugly. And sqtail is just a best guess */
1652 	sqtail = atomic_read(&q->sqtail) % q->sqsize;
1653 
1654 	used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1655 	return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1656 }
1657 
1658 /*
1659  * Prep RSP payload.
1660  * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1661  */
1662 static void
1663 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1664 				struct nvmet_fc_fcp_iod *fod)
1665 {
1666 	struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1667 	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1668 	struct nvme_completion *cqe = &ersp->cqe;
1669 	u32 *cqewd = (u32 *)cqe;
1670 	bool send_ersp = false;
1671 	u32 rsn, rspcnt, xfr_length;
1672 
1673 	if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1674 		xfr_length = fod->total_length;
1675 	else
1676 		xfr_length = fod->offset;
1677 
1678 	/*
1679 	 * check to see if we can send a 0's rsp.
1680 	 *   Note: to send a 0's response, the NVME-FC host transport will
1681 	 *   recreate the CQE. The host transport knows: sq id, SQHD (last
1682 	 *   seen in an ersp), and command_id. Thus it will create a
1683 	 *   zero-filled CQE with those known fields filled in. Transport
1684 	 *   must send an ersp for any condition where the cqe won't match
1685 	 *   this.
1686 	 *
1687 	 * Here are the FC-NVME mandated cases where we must send an ersp:
1688 	 *  every N responses, where N=ersp_ratio
1689 	 *  force fabric commands to send ersp's (not in FC-NVME but good
1690 	 *    practice)
1691 	 *  normal cmds: any time status is non-zero, or status is zero
1692 	 *     but words 0 or 1 are non-zero.
1693 	 *  the SQ is 90% or more full
1694 	 *  the cmd is a fused command
1695 	 *  transferred data length not equal to cmd iu length
1696 	 */
1697 	rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1698 	if (!(rspcnt % fod->queue->ersp_ratio) ||
1699 	    sqe->opcode == nvme_fabrics_command ||
1700 	    xfr_length != fod->total_length ||
1701 	    (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1702 	    (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1703 	    queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
1704 		send_ersp = true;
1705 
1706 	/* re-set the fields */
1707 	fod->fcpreq->rspaddr = ersp;
1708 	fod->fcpreq->rspdma = fod->rspdma;
1709 
1710 	if (!send_ersp) {
1711 		memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1712 		fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1713 	} else {
1714 		ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1715 		rsn = atomic_inc_return(&fod->queue->rsn);
1716 		ersp->rsn = cpu_to_be32(rsn);
1717 		ersp->xfrd_len = cpu_to_be32(xfr_length);
1718 		fod->fcpreq->rsplen = sizeof(*ersp);
1719 	}
1720 
1721 	fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1722 				  sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1723 }
1724 
1725 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1726 
1727 static void
1728 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1729 				struct nvmet_fc_fcp_iod *fod)
1730 {
1731 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1732 
1733 	/* data no longer needed */
1734 	nvmet_fc_free_tgt_pgs(fod);
1735 
1736 	/*
1737 	 * if an ABTS was received or we issued the fcp_abort early
1738 	 * don't call abort routine again.
1739 	 */
1740 	/* no need to take lock - lock was taken earlier to get here */
1741 	if (!fod->aborted)
1742 		tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1743 
1744 	nvmet_fc_free_fcp_iod(fod->queue, fod);
1745 }
1746 
1747 static void
1748 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1749 				struct nvmet_fc_fcp_iod *fod)
1750 {
1751 	int ret;
1752 
1753 	fod->fcpreq->op = NVMET_FCOP_RSP;
1754 	fod->fcpreq->timeout = 0;
1755 
1756 	nvmet_fc_prep_fcp_rsp(tgtport, fod);
1757 
1758 	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1759 	if (ret)
1760 		nvmet_fc_abort_op(tgtport, fod);
1761 }
1762 
1763 static void
1764 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1765 				struct nvmet_fc_fcp_iod *fod, u8 op)
1766 {
1767 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1768 	struct scatterlist *sg, *datasg;
1769 	unsigned long flags;
1770 	u32 tlen, sg_off;
1771 	int ret;
1772 
1773 	fcpreq->op = op;
1774 	fcpreq->offset = fod->offset;
1775 	fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1776 	tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
1777 			(fod->total_length - fod->offset));
1778 	tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
1779 	tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
1780 					* PAGE_SIZE);
1781 	fcpreq->transfer_length = tlen;
1782 	fcpreq->transferred_length = 0;
1783 	fcpreq->fcp_error = 0;
1784 	fcpreq->rsplen = 0;
1785 
1786 	fcpreq->sg_cnt = 0;
1787 
1788 	datasg = fod->next_sg;
1789 	sg_off = fod->next_sg_offset;
1790 
1791 	for (sg = fcpreq->sg ; tlen; sg++) {
1792 		*sg = *datasg;
1793 		if (sg_off) {
1794 			sg->offset += sg_off;
1795 			sg->length -= sg_off;
1796 			sg->dma_address += sg_off;
1797 			sg_off = 0;
1798 		}
1799 		if (tlen < sg->length) {
1800 			sg->length = tlen;
1801 			fod->next_sg = datasg;
1802 			fod->next_sg_offset += tlen;
1803 		} else if (tlen == sg->length) {
1804 			fod->next_sg_offset = 0;
1805 			fod->next_sg = sg_next(datasg);
1806 		} else {
1807 			fod->next_sg_offset = 0;
1808 			datasg = sg_next(datasg);
1809 		}
1810 		tlen -= sg->length;
1811 		fcpreq->sg_cnt++;
1812 	}
1813 
1814 	/*
1815 	 * If the last READDATA request: check if LLDD supports
1816 	 * combined xfr with response.
1817 	 */
1818 	if ((op == NVMET_FCOP_READDATA) &&
1819 	    ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
1820 	    (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1821 		fcpreq->op = NVMET_FCOP_READDATA_RSP;
1822 		nvmet_fc_prep_fcp_rsp(tgtport, fod);
1823 	}
1824 
1825 	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1826 	if (ret) {
1827 		/*
1828 		 * should be ok to set w/o lock as its in the thread of
1829 		 * execution (not an async timer routine) and doesn't
1830 		 * contend with any clearing action
1831 		 */
1832 		fod->abort = true;
1833 
1834 		if (op == NVMET_FCOP_WRITEDATA) {
1835 			spin_lock_irqsave(&fod->flock, flags);
1836 			fod->writedataactive = false;
1837 			spin_unlock_irqrestore(&fod->flock, flags);
1838 			nvmet_req_complete(&fod->req,
1839 					NVME_SC_FC_TRANSPORT_ERROR);
1840 		} else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1841 			fcpreq->fcp_error = ret;
1842 			fcpreq->transferred_length = 0;
1843 			nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1844 		}
1845 	}
1846 }
1847 
1848 static inline bool
1849 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1850 {
1851 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1852 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1853 
1854 	/* if in the middle of an io and we need to tear down */
1855 	if (abort) {
1856 		if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
1857 			nvmet_req_complete(&fod->req,
1858 					NVME_SC_FC_TRANSPORT_ERROR);
1859 			return true;
1860 		}
1861 
1862 		nvmet_fc_abort_op(tgtport, fod);
1863 		return true;
1864 	}
1865 
1866 	return false;
1867 }
1868 
1869 /*
1870  * actual done handler for FCP operations when completed by the lldd
1871  */
1872 static void
1873 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1874 {
1875 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1876 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1877 	unsigned long flags;
1878 	bool abort;
1879 
1880 	spin_lock_irqsave(&fod->flock, flags);
1881 	abort = fod->abort;
1882 	fod->writedataactive = false;
1883 	spin_unlock_irqrestore(&fod->flock, flags);
1884 
1885 	switch (fcpreq->op) {
1886 
1887 	case NVMET_FCOP_WRITEDATA:
1888 		if (__nvmet_fc_fod_op_abort(fod, abort))
1889 			return;
1890 		if (fcpreq->fcp_error ||
1891 		    fcpreq->transferred_length != fcpreq->transfer_length) {
1892 			spin_lock(&fod->flock);
1893 			fod->abort = true;
1894 			spin_unlock(&fod->flock);
1895 
1896 			nvmet_req_complete(&fod->req,
1897 					NVME_SC_FC_TRANSPORT_ERROR);
1898 			return;
1899 		}
1900 
1901 		fod->offset += fcpreq->transferred_length;
1902 		if (fod->offset != fod->total_length) {
1903 			spin_lock_irqsave(&fod->flock, flags);
1904 			fod->writedataactive = true;
1905 			spin_unlock_irqrestore(&fod->flock, flags);
1906 
1907 			/* transfer the next chunk */
1908 			nvmet_fc_transfer_fcp_data(tgtport, fod,
1909 						NVMET_FCOP_WRITEDATA);
1910 			return;
1911 		}
1912 
1913 		/* data transfer complete, resume with nvmet layer */
1914 
1915 		fod->req.execute(&fod->req);
1916 
1917 		break;
1918 
1919 	case NVMET_FCOP_READDATA:
1920 	case NVMET_FCOP_READDATA_RSP:
1921 		if (__nvmet_fc_fod_op_abort(fod, abort))
1922 			return;
1923 		if (fcpreq->fcp_error ||
1924 		    fcpreq->transferred_length != fcpreq->transfer_length) {
1925 			nvmet_fc_abort_op(tgtport, fod);
1926 			return;
1927 		}
1928 
1929 		/* success */
1930 
1931 		if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
1932 			/* data no longer needed */
1933 			nvmet_fc_free_tgt_pgs(fod);
1934 			nvmet_fc_free_fcp_iod(fod->queue, fod);
1935 			return;
1936 		}
1937 
1938 		fod->offset += fcpreq->transferred_length;
1939 		if (fod->offset != fod->total_length) {
1940 			/* transfer the next chunk */
1941 			nvmet_fc_transfer_fcp_data(tgtport, fod,
1942 						NVMET_FCOP_READDATA);
1943 			return;
1944 		}
1945 
1946 		/* data transfer complete, send response */
1947 
1948 		/* data no longer needed */
1949 		nvmet_fc_free_tgt_pgs(fod);
1950 
1951 		nvmet_fc_xmt_fcp_rsp(tgtport, fod);
1952 
1953 		break;
1954 
1955 	case NVMET_FCOP_RSP:
1956 		if (__nvmet_fc_fod_op_abort(fod, abort))
1957 			return;
1958 		nvmet_fc_free_fcp_iod(fod->queue, fod);
1959 		break;
1960 
1961 	default:
1962 		break;
1963 	}
1964 }
1965 
1966 static void
1967 nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
1968 {
1969 	struct nvmet_fc_fcp_iod *fod =
1970 		container_of(work, struct nvmet_fc_fcp_iod, done_work);
1971 
1972 	nvmet_fc_fod_op_done(fod);
1973 }
1974 
1975 static void
1976 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
1977 {
1978 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
1979 	struct nvmet_fc_tgt_queue *queue = fod->queue;
1980 
1981 	if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
1982 		/* context switch so completion is not in ISR context */
1983 		queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
1984 	else
1985 		nvmet_fc_fod_op_done(fod);
1986 }
1987 
1988 /*
1989  * actual completion handler after execution by the nvmet layer
1990  */
1991 static void
1992 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
1993 			struct nvmet_fc_fcp_iod *fod, int status)
1994 {
1995 	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1996 	struct nvme_completion *cqe = &fod->rspiubuf.cqe;
1997 	unsigned long flags;
1998 	bool abort;
1999 
2000 	spin_lock_irqsave(&fod->flock, flags);
2001 	abort = fod->abort;
2002 	spin_unlock_irqrestore(&fod->flock, flags);
2003 
2004 	/* if we have a CQE, snoop the last sq_head value */
2005 	if (!status)
2006 		fod->queue->sqhd = cqe->sq_head;
2007 
2008 	if (abort) {
2009 		nvmet_fc_abort_op(tgtport, fod);
2010 		return;
2011 	}
2012 
2013 	/* if an error handling the cmd post initial parsing */
2014 	if (status) {
2015 		/* fudge up a failed CQE status for our transport error */
2016 		memset(cqe, 0, sizeof(*cqe));
2017 		cqe->sq_head = fod->queue->sqhd;	/* echo last cqe sqhd */
2018 		cqe->sq_id = cpu_to_le16(fod->queue->qid);
2019 		cqe->command_id = sqe->command_id;
2020 		cqe->status = cpu_to_le16(status);
2021 	} else {
2022 
2023 		/*
2024 		 * try to push the data even if the SQE status is non-zero.
2025 		 * There may be a status where data still was intended to
2026 		 * be moved
2027 		 */
2028 		if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2029 			/* push the data over before sending rsp */
2030 			nvmet_fc_transfer_fcp_data(tgtport, fod,
2031 						NVMET_FCOP_READDATA);
2032 			return;
2033 		}
2034 
2035 		/* writes & no data - fall thru */
2036 	}
2037 
2038 	/* data no longer needed */
2039 	nvmet_fc_free_tgt_pgs(fod);
2040 
2041 	nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2042 }
2043 
2044 
2045 static void
2046 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2047 {
2048 	struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2049 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2050 
2051 	__nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2052 }
2053 
2054 
2055 /*
2056  * Actual processing routine for received FC-NVME LS Requests from the LLD
2057  */
2058 static void
2059 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2060 			struct nvmet_fc_fcp_iod *fod)
2061 {
2062 	struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2063 	int ret;
2064 
2065 	/*
2066 	 * Fused commands are currently not supported in the linux
2067 	 * implementation.
2068 	 *
2069 	 * As such, the implementation of the FC transport does not
2070 	 * look at the fused commands and order delivery to the upper
2071 	 * layer until we have both based on csn.
2072 	 */
2073 
2074 	fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2075 
2076 	fod->total_length = be32_to_cpu(cmdiu->data_len);
2077 	if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2078 		fod->io_dir = NVMET_FCP_WRITE;
2079 		if (!nvme_is_write(&cmdiu->sqe))
2080 			goto transport_error;
2081 	} else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2082 		fod->io_dir = NVMET_FCP_READ;
2083 		if (nvme_is_write(&cmdiu->sqe))
2084 			goto transport_error;
2085 	} else {
2086 		fod->io_dir = NVMET_FCP_NODATA;
2087 		if (fod->total_length)
2088 			goto transport_error;
2089 	}
2090 
2091 	fod->req.cmd = &fod->cmdiubuf.sqe;
2092 	fod->req.rsp = &fod->rspiubuf.cqe;
2093 	fod->req.port = fod->queue->port;
2094 
2095 	/* ensure nvmet handlers will set cmd handler callback */
2096 	fod->req.execute = NULL;
2097 
2098 	/* clear any response payload */
2099 	memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2100 
2101 	ret = nvmet_req_init(&fod->req,
2102 				&fod->queue->nvme_cq,
2103 				&fod->queue->nvme_sq,
2104 				&nvmet_fc_tgt_fcp_ops);
2105 	if (!ret) {	/* bad SQE content or invalid ctrl state */
2106 		nvmet_fc_abort_op(tgtport, fod);
2107 		return;
2108 	}
2109 
2110 	/* keep a running counter of tail position */
2111 	atomic_inc(&fod->queue->sqtail);
2112 
2113 	fod->data_sg = NULL;
2114 	fod->data_sg_cnt = 0;
2115 	if (fod->total_length) {
2116 		ret = nvmet_fc_alloc_tgt_pgs(fod);
2117 		if (ret) {
2118 			nvmet_req_complete(&fod->req, ret);
2119 			return;
2120 		}
2121 	}
2122 	fod->req.sg = fod->data_sg;
2123 	fod->req.sg_cnt = fod->data_sg_cnt;
2124 	fod->offset = 0;
2125 	fod->next_sg = fod->data_sg;
2126 	fod->next_sg_offset = 0;
2127 
2128 	if (fod->io_dir == NVMET_FCP_WRITE) {
2129 		/* pull the data over before invoking nvmet layer */
2130 		nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2131 		return;
2132 	}
2133 
2134 	/*
2135 	 * Reads or no data:
2136 	 *
2137 	 * can invoke the nvmet_layer now. If read data, cmd completion will
2138 	 * push the data
2139 	 */
2140 
2141 	fod->req.execute(&fod->req);
2142 
2143 	return;
2144 
2145 transport_error:
2146 	nvmet_fc_abort_op(tgtport, fod);
2147 }
2148 
2149 /*
2150  * Actual processing routine for received FC-NVME LS Requests from the LLD
2151  */
2152 static void
2153 nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2154 {
2155 	struct nvmet_fc_fcp_iod *fod =
2156 		container_of(work, struct nvmet_fc_fcp_iod, work);
2157 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2158 
2159 	nvmet_fc_handle_fcp_rqst(tgtport, fod);
2160 }
2161 
2162 /**
2163  * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2164  *                       upon the reception of a NVME FCP CMD IU.
2165  *
2166  * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2167  * layer for processing.
2168  *
2169  * The nvmet-fc layer will copy cmd payload to an internal structure for
2170  * processing.  As such, upon completion of the routine, the LLDD may
2171  * immediately free/reuse the CMD IU buffer passed in the call.
2172  *
2173  * If this routine returns error, the lldd should abort the exchange.
2174  *
2175  * @target_port: pointer to the (registered) target port the FCP CMD IU
2176  *              was received on.
2177  * @fcpreq:     pointer to a fcpreq request structure to be used to reference
2178  *              the exchange corresponding to the FCP Exchange.
2179  * @cmdiubuf:   pointer to the buffer containing the FCP CMD IU
2180  * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2181  */
2182 int
2183 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2184 			struct nvmefc_tgt_fcp_req *fcpreq,
2185 			void *cmdiubuf, u32 cmdiubuf_len)
2186 {
2187 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2188 	struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2189 	struct nvmet_fc_tgt_queue *queue;
2190 	struct nvmet_fc_fcp_iod *fod;
2191 
2192 	/* validate iu, so the connection id can be used to find the queue */
2193 	if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2194 			(cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2195 			(cmdiu->fc_id != NVME_CMD_FC_ID) ||
2196 			(be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2197 		return -EIO;
2198 
2199 	queue = nvmet_fc_find_target_queue(tgtport,
2200 				be64_to_cpu(cmdiu->connection_id));
2201 	if (!queue)
2202 		return -ENOTCONN;
2203 
2204 	/*
2205 	 * note: reference taken by find_target_queue
2206 	 * After successful fod allocation, the fod will inherit the
2207 	 * ownership of that reference and will remove the reference
2208 	 * when the fod is freed.
2209 	 */
2210 
2211 	fod = nvmet_fc_alloc_fcp_iod(queue);
2212 	if (!fod) {
2213 		/* release the queue lookup reference */
2214 		nvmet_fc_tgt_q_put(queue);
2215 		return -ENOENT;
2216 	}
2217 
2218 	fcpreq->nvmet_fc_private = fod;
2219 	fod->fcpreq = fcpreq;
2220 	/*
2221 	 * put all admin cmds on hw queue id 0. All io commands go to
2222 	 * the respective hw queue based on a modulo basis
2223 	 */
2224 	fcpreq->hwqid = queue->qid ?
2225 			((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
2226 	memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2227 
2228 	if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
2229 		queue_work_on(queue->cpu, queue->work_q, &fod->work);
2230 	else
2231 		nvmet_fc_handle_fcp_rqst(tgtport, fod);
2232 
2233 	return 0;
2234 }
2235 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2236 
2237 /**
2238  * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2239  *                       upon the reception of an ABTS for a FCP command
2240  *
2241  * Notify the transport that an ABTS has been received for a FCP command
2242  * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2243  * LLDD believes the command is still being worked on
2244  * (template_ops->fcp_req_release() has not been called).
2245  *
2246  * The transport will wait for any outstanding work (an op to the LLDD,
2247  * which the lldd should complete with error due to the ABTS; or the
2248  * completion from the nvmet layer of the nvme command), then will
2249  * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2250  * return the i/o context to the LLDD.  The LLDD may send the BA_ACC
2251  * to the ABTS either after return from this function (assuming any
2252  * outstanding op work has been terminated) or upon the callback being
2253  * called.
2254  *
2255  * @target_port: pointer to the (registered) target port the FCP CMD IU
2256  *              was received on.
2257  * @fcpreq:     pointer to the fcpreq request structure that corresponds
2258  *              to the exchange that received the ABTS.
2259  */
2260 void
2261 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2262 			struct nvmefc_tgt_fcp_req *fcpreq)
2263 {
2264 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2265 	struct nvmet_fc_tgt_queue *queue;
2266 	unsigned long flags;
2267 
2268 	if (!fod || fod->fcpreq != fcpreq)
2269 		/* job appears to have already completed, ignore abort */
2270 		return;
2271 
2272 	queue = fod->queue;
2273 
2274 	spin_lock_irqsave(&queue->qlock, flags);
2275 	if (fod->active) {
2276 		/*
2277 		 * mark as abort. The abort handler, invoked upon completion
2278 		 * of any work, will detect the aborted status and do the
2279 		 * callback.
2280 		 */
2281 		spin_lock(&fod->flock);
2282 		fod->abort = true;
2283 		fod->aborted = true;
2284 		spin_unlock(&fod->flock);
2285 	}
2286 	spin_unlock_irqrestore(&queue->qlock, flags);
2287 }
2288 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2289 
2290 enum {
2291 	FCT_TRADDR_ERR		= 0,
2292 	FCT_TRADDR_WWNN		= 1 << 0,
2293 	FCT_TRADDR_WWPN		= 1 << 1,
2294 };
2295 
2296 struct nvmet_fc_traddr {
2297 	u64	nn;
2298 	u64	pn;
2299 };
2300 
2301 static const match_table_t traddr_opt_tokens = {
2302 	{ FCT_TRADDR_WWNN,	"nn-%s"		},
2303 	{ FCT_TRADDR_WWPN,	"pn-%s"		},
2304 	{ FCT_TRADDR_ERR,	NULL		}
2305 };
2306 
2307 static int
2308 nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
2309 {
2310 	substring_t args[MAX_OPT_ARGS];
2311 	char *options, *o, *p;
2312 	int token, ret = 0;
2313 	u64 token64;
2314 
2315 	options = o = kstrdup(buf, GFP_KERNEL);
2316 	if (!options)
2317 		return -ENOMEM;
2318 
2319 	while ((p = strsep(&o, ":\n")) != NULL) {
2320 		if (!*p)
2321 			continue;
2322 
2323 		token = match_token(p, traddr_opt_tokens, args);
2324 		switch (token) {
2325 		case FCT_TRADDR_WWNN:
2326 			if (match_u64(args, &token64)) {
2327 				ret = -EINVAL;
2328 				goto out;
2329 			}
2330 			traddr->nn = token64;
2331 			break;
2332 		case FCT_TRADDR_WWPN:
2333 			if (match_u64(args, &token64)) {
2334 				ret = -EINVAL;
2335 				goto out;
2336 			}
2337 			traddr->pn = token64;
2338 			break;
2339 		default:
2340 			pr_warn("unknown traddr token or missing value '%s'\n",
2341 					p);
2342 			ret = -EINVAL;
2343 			goto out;
2344 		}
2345 	}
2346 
2347 out:
2348 	kfree(options);
2349 	return ret;
2350 }
2351 
2352 static int
2353 nvmet_fc_add_port(struct nvmet_port *port)
2354 {
2355 	struct nvmet_fc_tgtport *tgtport;
2356 	struct nvmet_fc_traddr traddr = { 0L, 0L };
2357 	unsigned long flags;
2358 	int ret;
2359 
2360 	/* validate the address info */
2361 	if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2362 	    (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2363 		return -EINVAL;
2364 
2365 	/* map the traddr address info to a target port */
2366 
2367 	ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
2368 	if (ret)
2369 		return ret;
2370 
2371 	ret = -ENXIO;
2372 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2373 	list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2374 		if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2375 		    (tgtport->fc_target_port.port_name == traddr.pn)) {
2376 			/* a FC port can only be 1 nvmet port id */
2377 			if (!tgtport->port) {
2378 				tgtport->port = port;
2379 				port->priv = tgtport;
2380 				nvmet_fc_tgtport_get(tgtport);
2381 				ret = 0;
2382 			} else
2383 				ret = -EALREADY;
2384 			break;
2385 		}
2386 	}
2387 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2388 	return ret;
2389 }
2390 
2391 static void
2392 nvmet_fc_remove_port(struct nvmet_port *port)
2393 {
2394 	struct nvmet_fc_tgtport *tgtport = port->priv;
2395 	unsigned long flags;
2396 
2397 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2398 	if (tgtport->port == port) {
2399 		nvmet_fc_tgtport_put(tgtport);
2400 		tgtport->port = NULL;
2401 	}
2402 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2403 }
2404 
2405 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2406 	.owner			= THIS_MODULE,
2407 	.type			= NVMF_TRTYPE_FC,
2408 	.msdbd			= 1,
2409 	.add_port		= nvmet_fc_add_port,
2410 	.remove_port		= nvmet_fc_remove_port,
2411 	.queue_response		= nvmet_fc_fcp_nvme_cmd_done,
2412 	.delete_ctrl		= nvmet_fc_delete_ctrl,
2413 };
2414 
2415 static int __init nvmet_fc_init_module(void)
2416 {
2417 	return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2418 }
2419 
2420 static void __exit nvmet_fc_exit_module(void)
2421 {
2422 	/* sanity check - all lports should be removed */
2423 	if (!list_empty(&nvmet_fc_target_list))
2424 		pr_warn("%s: targetport list not empty\n", __func__);
2425 
2426 	nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2427 
2428 	ida_destroy(&nvmet_fc_tgtport_cnt);
2429 }
2430 
2431 module_init(nvmet_fc_init_module);
2432 module_exit(nvmet_fc_exit_module);
2433 
2434 MODULE_LICENSE("GPL v2");
2435