xref: /openbmc/linux/drivers/nvme/target/fc.c (revision 5d331b7f)
1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  *
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
25 
26 #include "nvmet.h"
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
29 
30 
31 /* *************************** Data Structures/Defines ****************** */
32 
33 
34 #define NVMET_LS_CTX_COUNT		256
35 
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE		2048
38 
39 struct nvmet_fc_tgtport;
40 struct nvmet_fc_tgt_assoc;
41 
42 struct nvmet_fc_ls_iod {
43 	struct nvmefc_tgt_ls_req	*lsreq;
44 	struct nvmefc_tgt_fcp_req	*fcpreq;	/* only if RS */
45 
46 	struct list_head		ls_list;	/* tgtport->ls_list */
47 
48 	struct nvmet_fc_tgtport		*tgtport;
49 	struct nvmet_fc_tgt_assoc	*assoc;
50 
51 	u8				*rqstbuf;
52 	u8				*rspbuf;
53 	u16				rqstdatalen;
54 	dma_addr_t			rspdma;
55 
56 	struct scatterlist		sg[2];
57 
58 	struct work_struct		work;
59 } __aligned(sizeof(unsigned long long));
60 
61 /* desired maximum for a single sequence - if sg list allows it */
62 #define NVMET_FC_MAX_SEQ_LENGTH		(256 * 1024)
63 
64 enum nvmet_fcp_datadir {
65 	NVMET_FCP_NODATA,
66 	NVMET_FCP_WRITE,
67 	NVMET_FCP_READ,
68 	NVMET_FCP_ABORTED,
69 };
70 
71 struct nvmet_fc_fcp_iod {
72 	struct nvmefc_tgt_fcp_req	*fcpreq;
73 
74 	struct nvme_fc_cmd_iu		cmdiubuf;
75 	struct nvme_fc_ersp_iu		rspiubuf;
76 	dma_addr_t			rspdma;
77 	struct scatterlist		*next_sg;
78 	struct scatterlist		*data_sg;
79 	int				data_sg_cnt;
80 	u32				offset;
81 	enum nvmet_fcp_datadir		io_dir;
82 	bool				active;
83 	bool				abort;
84 	bool				aborted;
85 	bool				writedataactive;
86 	spinlock_t			flock;
87 
88 	struct nvmet_req		req;
89 	struct work_struct		work;
90 	struct work_struct		done_work;
91 	struct work_struct		defer_work;
92 
93 	struct nvmet_fc_tgtport		*tgtport;
94 	struct nvmet_fc_tgt_queue	*queue;
95 
96 	struct list_head		fcp_list;	/* tgtport->fcp_list */
97 };
98 
99 struct nvmet_fc_tgtport {
100 
101 	struct nvmet_fc_target_port	fc_target_port;
102 
103 	struct list_head		tgt_list; /* nvmet_fc_target_list */
104 	struct device			*dev;	/* dev for dma mapping */
105 	struct nvmet_fc_target_template	*ops;
106 
107 	struct nvmet_fc_ls_iod		*iod;
108 	spinlock_t			lock;
109 	struct list_head		ls_list;
110 	struct list_head		ls_busylist;
111 	struct list_head		assoc_list;
112 	struct ida			assoc_cnt;
113 	struct nvmet_fc_port_entry	*pe;
114 	struct kref			ref;
115 	u32				max_sg_cnt;
116 };
117 
118 struct nvmet_fc_port_entry {
119 	struct nvmet_fc_tgtport		*tgtport;
120 	struct nvmet_port		*port;
121 	u64				node_name;
122 	u64				port_name;
123 	struct list_head		pe_list;
124 };
125 
126 struct nvmet_fc_defer_fcp_req {
127 	struct list_head		req_list;
128 	struct nvmefc_tgt_fcp_req	*fcp_req;
129 };
130 
131 struct nvmet_fc_tgt_queue {
132 	bool				ninetypercent;
133 	u16				qid;
134 	u16				sqsize;
135 	u16				ersp_ratio;
136 	__le16				sqhd;
137 	int				cpu;
138 	atomic_t			connected;
139 	atomic_t			sqtail;
140 	atomic_t			zrspcnt;
141 	atomic_t			rsn;
142 	spinlock_t			qlock;
143 	struct nvmet_cq			nvme_cq;
144 	struct nvmet_sq			nvme_sq;
145 	struct nvmet_fc_tgt_assoc	*assoc;
146 	struct nvmet_fc_fcp_iod		*fod;		/* array of fcp_iods */
147 	struct list_head		fod_list;
148 	struct list_head		pending_cmd_list;
149 	struct list_head		avail_defer_list;
150 	struct workqueue_struct		*work_q;
151 	struct kref			ref;
152 } __aligned(sizeof(unsigned long long));
153 
154 struct nvmet_fc_tgt_assoc {
155 	u64				association_id;
156 	u32				a_id;
157 	struct nvmet_fc_tgtport		*tgtport;
158 	struct list_head		a_list;
159 	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
160 	struct kref			ref;
161 	struct work_struct		del_work;
162 };
163 
164 
165 static inline int
166 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
167 {
168 	return (iodptr - iodptr->tgtport->iod);
169 }
170 
171 static inline int
172 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
173 {
174 	return (fodptr - fodptr->queue->fod);
175 }
176 
177 
178 /*
179  * Association and Connection IDs:
180  *
181  * Association ID will have random number in upper 6 bytes and zero
182  *   in lower 2 bytes
183  *
184  * Connection IDs will be Association ID with QID or'd in lower 2 bytes
185  *
186  * note: Association ID = Connection ID for queue 0
187  */
188 #define BYTES_FOR_QID			sizeof(u16)
189 #define BYTES_FOR_QID_SHIFT		(BYTES_FOR_QID * 8)
190 #define NVMET_FC_QUEUEID_MASK		((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
191 
192 static inline u64
193 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
194 {
195 	return (assoc->association_id | qid);
196 }
197 
198 static inline u64
199 nvmet_fc_getassociationid(u64 connectionid)
200 {
201 	return connectionid & ~NVMET_FC_QUEUEID_MASK;
202 }
203 
204 static inline u16
205 nvmet_fc_getqueueid(u64 connectionid)
206 {
207 	return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
208 }
209 
210 static inline struct nvmet_fc_tgtport *
211 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
212 {
213 	return container_of(targetport, struct nvmet_fc_tgtport,
214 				 fc_target_port);
215 }
216 
217 static inline struct nvmet_fc_fcp_iod *
218 nvmet_req_to_fod(struct nvmet_req *nvme_req)
219 {
220 	return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
221 }
222 
223 
224 /* *************************** Globals **************************** */
225 
226 
227 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
228 
229 static LIST_HEAD(nvmet_fc_target_list);
230 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
231 static LIST_HEAD(nvmet_fc_portentry_list);
232 
233 
234 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
235 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
236 static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
237 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
238 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
239 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
240 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
241 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
242 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
243 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
244 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
245 					struct nvmet_fc_fcp_iod *fod);
246 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
247 
248 
249 /* *********************** FC-NVME DMA Handling **************************** */
250 
251 /*
252  * The fcloop device passes in a NULL device pointer. Real LLD's will
253  * pass in a valid device pointer. If NULL is passed to the dma mapping
254  * routines, depending on the platform, it may or may not succeed, and
255  * may crash.
256  *
257  * As such:
258  * Wrapper all the dma routines and check the dev pointer.
259  *
260  * If simple mappings (return just a dma address, we'll noop them,
261  * returning a dma address of 0.
262  *
263  * On more complex mappings (dma_map_sg), a pseudo routine fills
264  * in the scatter list, setting all dma addresses to 0.
265  */
266 
267 static inline dma_addr_t
268 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
269 		enum dma_data_direction dir)
270 {
271 	return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
272 }
273 
274 static inline int
275 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
276 {
277 	return dev ? dma_mapping_error(dev, dma_addr) : 0;
278 }
279 
280 static inline void
281 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
282 	enum dma_data_direction dir)
283 {
284 	if (dev)
285 		dma_unmap_single(dev, addr, size, dir);
286 }
287 
288 static inline void
289 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
290 		enum dma_data_direction dir)
291 {
292 	if (dev)
293 		dma_sync_single_for_cpu(dev, addr, size, dir);
294 }
295 
296 static inline void
297 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
298 		enum dma_data_direction dir)
299 {
300 	if (dev)
301 		dma_sync_single_for_device(dev, addr, size, dir);
302 }
303 
304 /* pseudo dma_map_sg call */
305 static int
306 fc_map_sg(struct scatterlist *sg, int nents)
307 {
308 	struct scatterlist *s;
309 	int i;
310 
311 	WARN_ON(nents == 0 || sg[0].length == 0);
312 
313 	for_each_sg(sg, s, nents, i) {
314 		s->dma_address = 0L;
315 #ifdef CONFIG_NEED_SG_DMA_LENGTH
316 		s->dma_length = s->length;
317 #endif
318 	}
319 	return nents;
320 }
321 
322 static inline int
323 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
324 		enum dma_data_direction dir)
325 {
326 	return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
327 }
328 
329 static inline void
330 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
331 		enum dma_data_direction dir)
332 {
333 	if (dev)
334 		dma_unmap_sg(dev, sg, nents, dir);
335 }
336 
337 
338 /* *********************** FC-NVME Port Management ************************ */
339 
340 
341 static int
342 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
343 {
344 	struct nvmet_fc_ls_iod *iod;
345 	int i;
346 
347 	iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
348 			GFP_KERNEL);
349 	if (!iod)
350 		return -ENOMEM;
351 
352 	tgtport->iod = iod;
353 
354 	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
355 		INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
356 		iod->tgtport = tgtport;
357 		list_add_tail(&iod->ls_list, &tgtport->ls_list);
358 
359 		iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
360 			GFP_KERNEL);
361 		if (!iod->rqstbuf)
362 			goto out_fail;
363 
364 		iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
365 
366 		iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
367 						NVME_FC_MAX_LS_BUFFER_SIZE,
368 						DMA_TO_DEVICE);
369 		if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
370 			goto out_fail;
371 	}
372 
373 	return 0;
374 
375 out_fail:
376 	kfree(iod->rqstbuf);
377 	list_del(&iod->ls_list);
378 	for (iod--, i--; i >= 0; iod--, i--) {
379 		fc_dma_unmap_single(tgtport->dev, iod->rspdma,
380 				NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
381 		kfree(iod->rqstbuf);
382 		list_del(&iod->ls_list);
383 	}
384 
385 	kfree(iod);
386 
387 	return -EFAULT;
388 }
389 
390 static void
391 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
392 {
393 	struct nvmet_fc_ls_iod *iod = tgtport->iod;
394 	int i;
395 
396 	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
397 		fc_dma_unmap_single(tgtport->dev,
398 				iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
399 				DMA_TO_DEVICE);
400 		kfree(iod->rqstbuf);
401 		list_del(&iod->ls_list);
402 	}
403 	kfree(tgtport->iod);
404 }
405 
406 static struct nvmet_fc_ls_iod *
407 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
408 {
409 	struct nvmet_fc_ls_iod *iod;
410 	unsigned long flags;
411 
412 	spin_lock_irqsave(&tgtport->lock, flags);
413 	iod = list_first_entry_or_null(&tgtport->ls_list,
414 					struct nvmet_fc_ls_iod, ls_list);
415 	if (iod)
416 		list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
417 	spin_unlock_irqrestore(&tgtport->lock, flags);
418 	return iod;
419 }
420 
421 
422 static void
423 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
424 			struct nvmet_fc_ls_iod *iod)
425 {
426 	unsigned long flags;
427 
428 	spin_lock_irqsave(&tgtport->lock, flags);
429 	list_move(&iod->ls_list, &tgtport->ls_list);
430 	spin_unlock_irqrestore(&tgtport->lock, flags);
431 }
432 
433 static void
434 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
435 				struct nvmet_fc_tgt_queue *queue)
436 {
437 	struct nvmet_fc_fcp_iod *fod = queue->fod;
438 	int i;
439 
440 	for (i = 0; i < queue->sqsize; fod++, i++) {
441 		INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
442 		INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
443 		INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
444 		fod->tgtport = tgtport;
445 		fod->queue = queue;
446 		fod->active = false;
447 		fod->abort = false;
448 		fod->aborted = false;
449 		fod->fcpreq = NULL;
450 		list_add_tail(&fod->fcp_list, &queue->fod_list);
451 		spin_lock_init(&fod->flock);
452 
453 		fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
454 					sizeof(fod->rspiubuf), DMA_TO_DEVICE);
455 		if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
456 			list_del(&fod->fcp_list);
457 			for (fod--, i--; i >= 0; fod--, i--) {
458 				fc_dma_unmap_single(tgtport->dev, fod->rspdma,
459 						sizeof(fod->rspiubuf),
460 						DMA_TO_DEVICE);
461 				fod->rspdma = 0L;
462 				list_del(&fod->fcp_list);
463 			}
464 
465 			return;
466 		}
467 	}
468 }
469 
470 static void
471 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
472 				struct nvmet_fc_tgt_queue *queue)
473 {
474 	struct nvmet_fc_fcp_iod *fod = queue->fod;
475 	int i;
476 
477 	for (i = 0; i < queue->sqsize; fod++, i++) {
478 		if (fod->rspdma)
479 			fc_dma_unmap_single(tgtport->dev, fod->rspdma,
480 				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
481 	}
482 }
483 
484 static struct nvmet_fc_fcp_iod *
485 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
486 {
487 	struct nvmet_fc_fcp_iod *fod;
488 
489 	lockdep_assert_held(&queue->qlock);
490 
491 	fod = list_first_entry_or_null(&queue->fod_list,
492 					struct nvmet_fc_fcp_iod, fcp_list);
493 	if (fod) {
494 		list_del(&fod->fcp_list);
495 		fod->active = true;
496 		/*
497 		 * no queue reference is taken, as it was taken by the
498 		 * queue lookup just prior to the allocation. The iod
499 		 * will "inherit" that reference.
500 		 */
501 	}
502 	return fod;
503 }
504 
505 
506 static void
507 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
508 		       struct nvmet_fc_tgt_queue *queue,
509 		       struct nvmefc_tgt_fcp_req *fcpreq)
510 {
511 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
512 
513 	/*
514 	 * put all admin cmds on hw queue id 0. All io commands go to
515 	 * the respective hw queue based on a modulo basis
516 	 */
517 	fcpreq->hwqid = queue->qid ?
518 			((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
519 
520 	if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
521 		queue_work_on(queue->cpu, queue->work_q, &fod->work);
522 	else
523 		nvmet_fc_handle_fcp_rqst(tgtport, fod);
524 }
525 
526 static void
527 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
528 {
529 	struct nvmet_fc_fcp_iod *fod =
530 		container_of(work, struct nvmet_fc_fcp_iod, defer_work);
531 
532 	/* Submit deferred IO for processing */
533 	nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
534 
535 }
536 
537 static void
538 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
539 			struct nvmet_fc_fcp_iod *fod)
540 {
541 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
542 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
543 	struct nvmet_fc_defer_fcp_req *deferfcp;
544 	unsigned long flags;
545 
546 	fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
547 				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
548 
549 	fcpreq->nvmet_fc_private = NULL;
550 
551 	fod->active = false;
552 	fod->abort = false;
553 	fod->aborted = false;
554 	fod->writedataactive = false;
555 	fod->fcpreq = NULL;
556 
557 	tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
558 
559 	/* release the queue lookup reference on the completed IO */
560 	nvmet_fc_tgt_q_put(queue);
561 
562 	spin_lock_irqsave(&queue->qlock, flags);
563 	deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
564 				struct nvmet_fc_defer_fcp_req, req_list);
565 	if (!deferfcp) {
566 		list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
567 		spin_unlock_irqrestore(&queue->qlock, flags);
568 		return;
569 	}
570 
571 	/* Re-use the fod for the next pending cmd that was deferred */
572 	list_del(&deferfcp->req_list);
573 
574 	fcpreq = deferfcp->fcp_req;
575 
576 	/* deferfcp can be reused for another IO at a later date */
577 	list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
578 
579 	spin_unlock_irqrestore(&queue->qlock, flags);
580 
581 	/* Save NVME CMD IO in fod */
582 	memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
583 
584 	/* Setup new fcpreq to be processed */
585 	fcpreq->rspaddr = NULL;
586 	fcpreq->rsplen  = 0;
587 	fcpreq->nvmet_fc_private = fod;
588 	fod->fcpreq = fcpreq;
589 	fod->active = true;
590 
591 	/* inform LLDD IO is now being processed */
592 	tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
593 
594 	/*
595 	 * Leave the queue lookup get reference taken when
596 	 * fod was originally allocated.
597 	 */
598 
599 	queue_work(queue->work_q, &fod->defer_work);
600 }
601 
602 static int
603 nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
604 {
605 	int cpu, idx, cnt;
606 
607 	if (tgtport->ops->max_hw_queues == 1)
608 		return WORK_CPU_UNBOUND;
609 
610 	/* Simple cpu selection based on qid modulo active cpu count */
611 	idx = !qid ? 0 : (qid - 1) % num_active_cpus();
612 
613 	/* find the n'th active cpu */
614 	for (cpu = 0, cnt = 0; ; ) {
615 		if (cpu_active(cpu)) {
616 			if (cnt == idx)
617 				break;
618 			cnt++;
619 		}
620 		cpu = (cpu + 1) % num_possible_cpus();
621 	}
622 
623 	return cpu;
624 }
625 
626 static struct nvmet_fc_tgt_queue *
627 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
628 			u16 qid, u16 sqsize)
629 {
630 	struct nvmet_fc_tgt_queue *queue;
631 	unsigned long flags;
632 	int ret;
633 
634 	if (qid > NVMET_NR_QUEUES)
635 		return NULL;
636 
637 	queue = kzalloc((sizeof(*queue) +
638 				(sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
639 				GFP_KERNEL);
640 	if (!queue)
641 		return NULL;
642 
643 	if (!nvmet_fc_tgt_a_get(assoc))
644 		goto out_free_queue;
645 
646 	queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
647 				assoc->tgtport->fc_target_port.port_num,
648 				assoc->a_id, qid);
649 	if (!queue->work_q)
650 		goto out_a_put;
651 
652 	queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
653 	queue->qid = qid;
654 	queue->sqsize = sqsize;
655 	queue->assoc = assoc;
656 	queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
657 	INIT_LIST_HEAD(&queue->fod_list);
658 	INIT_LIST_HEAD(&queue->avail_defer_list);
659 	INIT_LIST_HEAD(&queue->pending_cmd_list);
660 	atomic_set(&queue->connected, 0);
661 	atomic_set(&queue->sqtail, 0);
662 	atomic_set(&queue->rsn, 1);
663 	atomic_set(&queue->zrspcnt, 0);
664 	spin_lock_init(&queue->qlock);
665 	kref_init(&queue->ref);
666 
667 	nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
668 
669 	ret = nvmet_sq_init(&queue->nvme_sq);
670 	if (ret)
671 		goto out_fail_iodlist;
672 
673 	WARN_ON(assoc->queues[qid]);
674 	spin_lock_irqsave(&assoc->tgtport->lock, flags);
675 	assoc->queues[qid] = queue;
676 	spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
677 
678 	return queue;
679 
680 out_fail_iodlist:
681 	nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
682 	destroy_workqueue(queue->work_q);
683 out_a_put:
684 	nvmet_fc_tgt_a_put(assoc);
685 out_free_queue:
686 	kfree(queue);
687 	return NULL;
688 }
689 
690 
691 static void
692 nvmet_fc_tgt_queue_free(struct kref *ref)
693 {
694 	struct nvmet_fc_tgt_queue *queue =
695 		container_of(ref, struct nvmet_fc_tgt_queue, ref);
696 	unsigned long flags;
697 
698 	spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
699 	queue->assoc->queues[queue->qid] = NULL;
700 	spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
701 
702 	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
703 
704 	nvmet_fc_tgt_a_put(queue->assoc);
705 
706 	destroy_workqueue(queue->work_q);
707 
708 	kfree(queue);
709 }
710 
711 static void
712 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
713 {
714 	kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
715 }
716 
717 static int
718 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
719 {
720 	return kref_get_unless_zero(&queue->ref);
721 }
722 
723 
724 static void
725 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
726 {
727 	struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
728 	struct nvmet_fc_fcp_iod *fod = queue->fod;
729 	struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
730 	unsigned long flags;
731 	int i, writedataactive;
732 	bool disconnect;
733 
734 	disconnect = atomic_xchg(&queue->connected, 0);
735 
736 	spin_lock_irqsave(&queue->qlock, flags);
737 	/* about outstanding io's */
738 	for (i = 0; i < queue->sqsize; fod++, i++) {
739 		if (fod->active) {
740 			spin_lock(&fod->flock);
741 			fod->abort = true;
742 			writedataactive = fod->writedataactive;
743 			spin_unlock(&fod->flock);
744 			/*
745 			 * only call lldd abort routine if waiting for
746 			 * writedata. other outstanding ops should finish
747 			 * on their own.
748 			 */
749 			if (writedataactive) {
750 				spin_lock(&fod->flock);
751 				fod->aborted = true;
752 				spin_unlock(&fod->flock);
753 				tgtport->ops->fcp_abort(
754 					&tgtport->fc_target_port, fod->fcpreq);
755 			}
756 		}
757 	}
758 
759 	/* Cleanup defer'ed IOs in queue */
760 	list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
761 				req_list) {
762 		list_del(&deferfcp->req_list);
763 		kfree(deferfcp);
764 	}
765 
766 	for (;;) {
767 		deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
768 				struct nvmet_fc_defer_fcp_req, req_list);
769 		if (!deferfcp)
770 			break;
771 
772 		list_del(&deferfcp->req_list);
773 		spin_unlock_irqrestore(&queue->qlock, flags);
774 
775 		tgtport->ops->defer_rcv(&tgtport->fc_target_port,
776 				deferfcp->fcp_req);
777 
778 		tgtport->ops->fcp_abort(&tgtport->fc_target_port,
779 				deferfcp->fcp_req);
780 
781 		tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
782 				deferfcp->fcp_req);
783 
784 		/* release the queue lookup reference */
785 		nvmet_fc_tgt_q_put(queue);
786 
787 		kfree(deferfcp);
788 
789 		spin_lock_irqsave(&queue->qlock, flags);
790 	}
791 	spin_unlock_irqrestore(&queue->qlock, flags);
792 
793 	flush_workqueue(queue->work_q);
794 
795 	if (disconnect)
796 		nvmet_sq_destroy(&queue->nvme_sq);
797 
798 	nvmet_fc_tgt_q_put(queue);
799 }
800 
801 static struct nvmet_fc_tgt_queue *
802 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
803 				u64 connection_id)
804 {
805 	struct nvmet_fc_tgt_assoc *assoc;
806 	struct nvmet_fc_tgt_queue *queue;
807 	u64 association_id = nvmet_fc_getassociationid(connection_id);
808 	u16 qid = nvmet_fc_getqueueid(connection_id);
809 	unsigned long flags;
810 
811 	if (qid > NVMET_NR_QUEUES)
812 		return NULL;
813 
814 	spin_lock_irqsave(&tgtport->lock, flags);
815 	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
816 		if (association_id == assoc->association_id) {
817 			queue = assoc->queues[qid];
818 			if (queue &&
819 			    (!atomic_read(&queue->connected) ||
820 			     !nvmet_fc_tgt_q_get(queue)))
821 				queue = NULL;
822 			spin_unlock_irqrestore(&tgtport->lock, flags);
823 			return queue;
824 		}
825 	}
826 	spin_unlock_irqrestore(&tgtport->lock, flags);
827 	return NULL;
828 }
829 
830 static void
831 nvmet_fc_delete_assoc(struct work_struct *work)
832 {
833 	struct nvmet_fc_tgt_assoc *assoc =
834 		container_of(work, struct nvmet_fc_tgt_assoc, del_work);
835 
836 	nvmet_fc_delete_target_assoc(assoc);
837 	nvmet_fc_tgt_a_put(assoc);
838 }
839 
840 static struct nvmet_fc_tgt_assoc *
841 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
842 {
843 	struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
844 	unsigned long flags;
845 	u64 ran;
846 	int idx;
847 	bool needrandom = true;
848 
849 	assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
850 	if (!assoc)
851 		return NULL;
852 
853 	idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
854 	if (idx < 0)
855 		goto out_free_assoc;
856 
857 	if (!nvmet_fc_tgtport_get(tgtport))
858 		goto out_ida_put;
859 
860 	assoc->tgtport = tgtport;
861 	assoc->a_id = idx;
862 	INIT_LIST_HEAD(&assoc->a_list);
863 	kref_init(&assoc->ref);
864 	INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
865 
866 	while (needrandom) {
867 		get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
868 		ran = ran << BYTES_FOR_QID_SHIFT;
869 
870 		spin_lock_irqsave(&tgtport->lock, flags);
871 		needrandom = false;
872 		list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
873 			if (ran == tmpassoc->association_id) {
874 				needrandom = true;
875 				break;
876 			}
877 		if (!needrandom) {
878 			assoc->association_id = ran;
879 			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
880 		}
881 		spin_unlock_irqrestore(&tgtport->lock, flags);
882 	}
883 
884 	return assoc;
885 
886 out_ida_put:
887 	ida_simple_remove(&tgtport->assoc_cnt, idx);
888 out_free_assoc:
889 	kfree(assoc);
890 	return NULL;
891 }
892 
893 static void
894 nvmet_fc_target_assoc_free(struct kref *ref)
895 {
896 	struct nvmet_fc_tgt_assoc *assoc =
897 		container_of(ref, struct nvmet_fc_tgt_assoc, ref);
898 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
899 	unsigned long flags;
900 
901 	spin_lock_irqsave(&tgtport->lock, flags);
902 	list_del(&assoc->a_list);
903 	spin_unlock_irqrestore(&tgtport->lock, flags);
904 	ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
905 	kfree(assoc);
906 	nvmet_fc_tgtport_put(tgtport);
907 }
908 
909 static void
910 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
911 {
912 	kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
913 }
914 
915 static int
916 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
917 {
918 	return kref_get_unless_zero(&assoc->ref);
919 }
920 
921 static void
922 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
923 {
924 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
925 	struct nvmet_fc_tgt_queue *queue;
926 	unsigned long flags;
927 	int i;
928 
929 	spin_lock_irqsave(&tgtport->lock, flags);
930 	for (i = NVMET_NR_QUEUES; i >= 0; i--) {
931 		queue = assoc->queues[i];
932 		if (queue) {
933 			if (!nvmet_fc_tgt_q_get(queue))
934 				continue;
935 			spin_unlock_irqrestore(&tgtport->lock, flags);
936 			nvmet_fc_delete_target_queue(queue);
937 			nvmet_fc_tgt_q_put(queue);
938 			spin_lock_irqsave(&tgtport->lock, flags);
939 		}
940 	}
941 	spin_unlock_irqrestore(&tgtport->lock, flags);
942 
943 	nvmet_fc_tgt_a_put(assoc);
944 }
945 
946 static struct nvmet_fc_tgt_assoc *
947 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
948 				u64 association_id)
949 {
950 	struct nvmet_fc_tgt_assoc *assoc;
951 	struct nvmet_fc_tgt_assoc *ret = NULL;
952 	unsigned long flags;
953 
954 	spin_lock_irqsave(&tgtport->lock, flags);
955 	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
956 		if (association_id == assoc->association_id) {
957 			ret = assoc;
958 			nvmet_fc_tgt_a_get(assoc);
959 			break;
960 		}
961 	}
962 	spin_unlock_irqrestore(&tgtport->lock, flags);
963 
964 	return ret;
965 }
966 
967 static void
968 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
969 			struct nvmet_fc_port_entry *pe,
970 			struct nvmet_port *port)
971 {
972 	lockdep_assert_held(&nvmet_fc_tgtlock);
973 
974 	pe->tgtport = tgtport;
975 	tgtport->pe = pe;
976 
977 	pe->port = port;
978 	port->priv = pe;
979 
980 	pe->node_name = tgtport->fc_target_port.node_name;
981 	pe->port_name = tgtport->fc_target_port.port_name;
982 	INIT_LIST_HEAD(&pe->pe_list);
983 
984 	list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
985 }
986 
987 static void
988 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
989 {
990 	unsigned long flags;
991 
992 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
993 	if (pe->tgtport)
994 		pe->tgtport->pe = NULL;
995 	list_del(&pe->pe_list);
996 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
997 }
998 
999 /*
1000  * called when a targetport deregisters. Breaks the relationship
1001  * with the nvmet port, but leaves the port_entry in place so that
1002  * re-registration can resume operation.
1003  */
1004 static void
1005 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
1006 {
1007 	struct nvmet_fc_port_entry *pe;
1008 	unsigned long flags;
1009 
1010 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1011 	pe = tgtport->pe;
1012 	if (pe)
1013 		pe->tgtport = NULL;
1014 	tgtport->pe = NULL;
1015 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1016 }
1017 
1018 /*
1019  * called when a new targetport is registered. Looks in the
1020  * existing nvmet port_entries to see if the nvmet layer is
1021  * configured for the targetport's wwn's. (the targetport existed,
1022  * nvmet configured, the lldd unregistered the tgtport, and is now
1023  * reregistering the same targetport).  If so, set the nvmet port
1024  * port entry on the targetport.
1025  */
1026 static void
1027 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
1028 {
1029 	struct nvmet_fc_port_entry *pe;
1030 	unsigned long flags;
1031 
1032 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1033 	list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
1034 		if (tgtport->fc_target_port.node_name == pe->node_name &&
1035 		    tgtport->fc_target_port.port_name == pe->port_name) {
1036 			WARN_ON(pe->tgtport);
1037 			tgtport->pe = pe;
1038 			pe->tgtport = tgtport;
1039 			break;
1040 		}
1041 	}
1042 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1043 }
1044 
1045 /**
1046  * nvme_fc_register_targetport - transport entry point called by an
1047  *                              LLDD to register the existence of a local
1048  *                              NVME subystem FC port.
1049  * @pinfo:     pointer to information about the port to be registered
1050  * @template:  LLDD entrypoints and operational parameters for the port
1051  * @dev:       physical hardware device node port corresponds to. Will be
1052  *             used for DMA mappings
1053  * @portptr:   pointer to a local port pointer. Upon success, the routine
1054  *             will allocate a nvme_fc_local_port structure and place its
1055  *             address in the local port pointer. Upon failure, local port
1056  *             pointer will be set to NULL.
1057  *
1058  * Returns:
1059  * a completion status. Must be 0 upon success; a negative errno
1060  * (ex: -ENXIO) upon failure.
1061  */
1062 int
1063 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
1064 			struct nvmet_fc_target_template *template,
1065 			struct device *dev,
1066 			struct nvmet_fc_target_port **portptr)
1067 {
1068 	struct nvmet_fc_tgtport *newrec;
1069 	unsigned long flags;
1070 	int ret, idx;
1071 
1072 	if (!template->xmt_ls_rsp || !template->fcp_op ||
1073 	    !template->fcp_abort ||
1074 	    !template->fcp_req_release || !template->targetport_delete ||
1075 	    !template->max_hw_queues || !template->max_sgl_segments ||
1076 	    !template->max_dif_sgl_segments || !template->dma_boundary) {
1077 		ret = -EINVAL;
1078 		goto out_regtgt_failed;
1079 	}
1080 
1081 	newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
1082 			 GFP_KERNEL);
1083 	if (!newrec) {
1084 		ret = -ENOMEM;
1085 		goto out_regtgt_failed;
1086 	}
1087 
1088 	idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
1089 	if (idx < 0) {
1090 		ret = -ENOSPC;
1091 		goto out_fail_kfree;
1092 	}
1093 
1094 	if (!get_device(dev) && dev) {
1095 		ret = -ENODEV;
1096 		goto out_ida_put;
1097 	}
1098 
1099 	newrec->fc_target_port.node_name = pinfo->node_name;
1100 	newrec->fc_target_port.port_name = pinfo->port_name;
1101 	newrec->fc_target_port.private = &newrec[1];
1102 	newrec->fc_target_port.port_id = pinfo->port_id;
1103 	newrec->fc_target_port.port_num = idx;
1104 	INIT_LIST_HEAD(&newrec->tgt_list);
1105 	newrec->dev = dev;
1106 	newrec->ops = template;
1107 	spin_lock_init(&newrec->lock);
1108 	INIT_LIST_HEAD(&newrec->ls_list);
1109 	INIT_LIST_HEAD(&newrec->ls_busylist);
1110 	INIT_LIST_HEAD(&newrec->assoc_list);
1111 	kref_init(&newrec->ref);
1112 	ida_init(&newrec->assoc_cnt);
1113 	newrec->max_sg_cnt = template->max_sgl_segments;
1114 
1115 	ret = nvmet_fc_alloc_ls_iodlist(newrec);
1116 	if (ret) {
1117 		ret = -ENOMEM;
1118 		goto out_free_newrec;
1119 	}
1120 
1121 	nvmet_fc_portentry_rebind_tgt(newrec);
1122 
1123 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1124 	list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1125 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1126 
1127 	*portptr = &newrec->fc_target_port;
1128 	return 0;
1129 
1130 out_free_newrec:
1131 	put_device(dev);
1132 out_ida_put:
1133 	ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1134 out_fail_kfree:
1135 	kfree(newrec);
1136 out_regtgt_failed:
1137 	*portptr = NULL;
1138 	return ret;
1139 }
1140 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1141 
1142 
1143 static void
1144 nvmet_fc_free_tgtport(struct kref *ref)
1145 {
1146 	struct nvmet_fc_tgtport *tgtport =
1147 		container_of(ref, struct nvmet_fc_tgtport, ref);
1148 	struct device *dev = tgtport->dev;
1149 	unsigned long flags;
1150 
1151 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1152 	list_del(&tgtport->tgt_list);
1153 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1154 
1155 	nvmet_fc_free_ls_iodlist(tgtport);
1156 
1157 	/* let the LLDD know we've finished tearing it down */
1158 	tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1159 
1160 	ida_simple_remove(&nvmet_fc_tgtport_cnt,
1161 			tgtport->fc_target_port.port_num);
1162 
1163 	ida_destroy(&tgtport->assoc_cnt);
1164 
1165 	kfree(tgtport);
1166 
1167 	put_device(dev);
1168 }
1169 
1170 static void
1171 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1172 {
1173 	kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1174 }
1175 
1176 static int
1177 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1178 {
1179 	return kref_get_unless_zero(&tgtport->ref);
1180 }
1181 
1182 static void
1183 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1184 {
1185 	struct nvmet_fc_tgt_assoc *assoc, *next;
1186 	unsigned long flags;
1187 
1188 	spin_lock_irqsave(&tgtport->lock, flags);
1189 	list_for_each_entry_safe(assoc, next,
1190 				&tgtport->assoc_list, a_list) {
1191 		if (!nvmet_fc_tgt_a_get(assoc))
1192 			continue;
1193 		spin_unlock_irqrestore(&tgtport->lock, flags);
1194 		nvmet_fc_delete_target_assoc(assoc);
1195 		nvmet_fc_tgt_a_put(assoc);
1196 		spin_lock_irqsave(&tgtport->lock, flags);
1197 	}
1198 	spin_unlock_irqrestore(&tgtport->lock, flags);
1199 }
1200 
1201 /*
1202  * nvmet layer has called to terminate an association
1203  */
1204 static void
1205 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1206 {
1207 	struct nvmet_fc_tgtport *tgtport, *next;
1208 	struct nvmet_fc_tgt_assoc *assoc;
1209 	struct nvmet_fc_tgt_queue *queue;
1210 	unsigned long flags;
1211 	bool found_ctrl = false;
1212 
1213 	/* this is a bit ugly, but don't want to make locks layered */
1214 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1215 	list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1216 			tgt_list) {
1217 		if (!nvmet_fc_tgtport_get(tgtport))
1218 			continue;
1219 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1220 
1221 		spin_lock_irqsave(&tgtport->lock, flags);
1222 		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1223 			queue = assoc->queues[0];
1224 			if (queue && queue->nvme_sq.ctrl == ctrl) {
1225 				if (nvmet_fc_tgt_a_get(assoc))
1226 					found_ctrl = true;
1227 				break;
1228 			}
1229 		}
1230 		spin_unlock_irqrestore(&tgtport->lock, flags);
1231 
1232 		nvmet_fc_tgtport_put(tgtport);
1233 
1234 		if (found_ctrl) {
1235 			schedule_work(&assoc->del_work);
1236 			return;
1237 		}
1238 
1239 		spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1240 	}
1241 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1242 }
1243 
1244 /**
1245  * nvme_fc_unregister_targetport - transport entry point called by an
1246  *                              LLDD to deregister/remove a previously
1247  *                              registered a local NVME subsystem FC port.
1248  * @target_port: pointer to the (registered) target port that is to be
1249  *               deregistered.
1250  *
1251  * Returns:
1252  * a completion status. Must be 0 upon success; a negative errno
1253  * (ex: -ENXIO) upon failure.
1254  */
1255 int
1256 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1257 {
1258 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1259 
1260 	nvmet_fc_portentry_unbind_tgt(tgtport);
1261 
1262 	/* terminate any outstanding associations */
1263 	__nvmet_fc_free_assocs(tgtport);
1264 
1265 	nvmet_fc_tgtport_put(tgtport);
1266 
1267 	return 0;
1268 }
1269 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1270 
1271 
1272 /* *********************** FC-NVME LS Handling **************************** */
1273 
1274 
1275 static void
1276 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
1277 {
1278 	struct fcnvme_ls_acc_hdr *acc = buf;
1279 
1280 	acc->w0.ls_cmd = ls_cmd;
1281 	acc->desc_list_len = desc_len;
1282 	acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1283 	acc->rqst.desc_len =
1284 			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1285 	acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1286 }
1287 
1288 static int
1289 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1290 			u8 reason, u8 explanation, u8 vendor)
1291 {
1292 	struct fcnvme_ls_rjt *rjt = buf;
1293 
1294 	nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1295 			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1296 			ls_cmd);
1297 	rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1298 	rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1299 	rjt->rjt.reason_code = reason;
1300 	rjt->rjt.reason_explanation = explanation;
1301 	rjt->rjt.vendor = vendor;
1302 
1303 	return sizeof(struct fcnvme_ls_rjt);
1304 }
1305 
1306 /* Validation Error indexes into the string table below */
1307 enum {
1308 	VERR_NO_ERROR		= 0,
1309 	VERR_CR_ASSOC_LEN	= 1,
1310 	VERR_CR_ASSOC_RQST_LEN	= 2,
1311 	VERR_CR_ASSOC_CMD	= 3,
1312 	VERR_CR_ASSOC_CMD_LEN	= 4,
1313 	VERR_ERSP_RATIO		= 5,
1314 	VERR_ASSOC_ALLOC_FAIL	= 6,
1315 	VERR_QUEUE_ALLOC_FAIL	= 7,
1316 	VERR_CR_CONN_LEN	= 8,
1317 	VERR_CR_CONN_RQST_LEN	= 9,
1318 	VERR_ASSOC_ID		= 10,
1319 	VERR_ASSOC_ID_LEN	= 11,
1320 	VERR_NO_ASSOC		= 12,
1321 	VERR_CONN_ID		= 13,
1322 	VERR_CONN_ID_LEN	= 14,
1323 	VERR_NO_CONN		= 15,
1324 	VERR_CR_CONN_CMD	= 16,
1325 	VERR_CR_CONN_CMD_LEN	= 17,
1326 	VERR_DISCONN_LEN	= 18,
1327 	VERR_DISCONN_RQST_LEN	= 19,
1328 	VERR_DISCONN_CMD	= 20,
1329 	VERR_DISCONN_CMD_LEN	= 21,
1330 	VERR_DISCONN_SCOPE	= 22,
1331 	VERR_RS_LEN		= 23,
1332 	VERR_RS_RQST_LEN	= 24,
1333 	VERR_RS_CMD		= 25,
1334 	VERR_RS_CMD_LEN		= 26,
1335 	VERR_RS_RCTL		= 27,
1336 	VERR_RS_RO		= 28,
1337 };
1338 
1339 static char *validation_errors[] = {
1340 	"OK",
1341 	"Bad CR_ASSOC Length",
1342 	"Bad CR_ASSOC Rqst Length",
1343 	"Not CR_ASSOC Cmd",
1344 	"Bad CR_ASSOC Cmd Length",
1345 	"Bad Ersp Ratio",
1346 	"Association Allocation Failed",
1347 	"Queue Allocation Failed",
1348 	"Bad CR_CONN Length",
1349 	"Bad CR_CONN Rqst Length",
1350 	"Not Association ID",
1351 	"Bad Association ID Length",
1352 	"No Association",
1353 	"Not Connection ID",
1354 	"Bad Connection ID Length",
1355 	"No Connection",
1356 	"Not CR_CONN Cmd",
1357 	"Bad CR_CONN Cmd Length",
1358 	"Bad DISCONN Length",
1359 	"Bad DISCONN Rqst Length",
1360 	"Not DISCONN Cmd",
1361 	"Bad DISCONN Cmd Length",
1362 	"Bad Disconnect Scope",
1363 	"Bad RS Length",
1364 	"Bad RS Rqst Length",
1365 	"Not RS Cmd",
1366 	"Bad RS Cmd Length",
1367 	"Bad RS R_CTL",
1368 	"Bad RS Relative Offset",
1369 };
1370 
1371 static void
1372 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1373 			struct nvmet_fc_ls_iod *iod)
1374 {
1375 	struct fcnvme_ls_cr_assoc_rqst *rqst =
1376 				(struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1377 	struct fcnvme_ls_cr_assoc_acc *acc =
1378 				(struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1379 	struct nvmet_fc_tgt_queue *queue;
1380 	int ret = 0;
1381 
1382 	memset(acc, 0, sizeof(*acc));
1383 
1384 	/*
1385 	 * FC-NVME spec changes. There are initiators sending different
1386 	 * lengths as padding sizes for Create Association Cmd descriptor
1387 	 * was incorrect.
1388 	 * Accept anything of "minimum" length. Assume format per 1.15
1389 	 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1390 	 * trailing pad length is.
1391 	 */
1392 	if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1393 		ret = VERR_CR_ASSOC_LEN;
1394 	else if (be32_to_cpu(rqst->desc_list_len) <
1395 			FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1396 		ret = VERR_CR_ASSOC_RQST_LEN;
1397 	else if (rqst->assoc_cmd.desc_tag !=
1398 			cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1399 		ret = VERR_CR_ASSOC_CMD;
1400 	else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1401 			FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1402 		ret = VERR_CR_ASSOC_CMD_LEN;
1403 	else if (!rqst->assoc_cmd.ersp_ratio ||
1404 		 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1405 				be16_to_cpu(rqst->assoc_cmd.sqsize)))
1406 		ret = VERR_ERSP_RATIO;
1407 
1408 	else {
1409 		/* new association w/ admin queue */
1410 		iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1411 		if (!iod->assoc)
1412 			ret = VERR_ASSOC_ALLOC_FAIL;
1413 		else {
1414 			queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1415 					be16_to_cpu(rqst->assoc_cmd.sqsize));
1416 			if (!queue)
1417 				ret = VERR_QUEUE_ALLOC_FAIL;
1418 		}
1419 	}
1420 
1421 	if (ret) {
1422 		dev_err(tgtport->dev,
1423 			"Create Association LS failed: %s\n",
1424 			validation_errors[ret]);
1425 		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1426 				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1427 				FCNVME_RJT_RC_LOGIC,
1428 				FCNVME_RJT_EXP_NONE, 0);
1429 		return;
1430 	}
1431 
1432 	queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1433 	atomic_set(&queue->connected, 1);
1434 	queue->sqhd = 0;	/* best place to init value */
1435 
1436 	/* format a response */
1437 
1438 	iod->lsreq->rsplen = sizeof(*acc);
1439 
1440 	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1441 			fcnvme_lsdesc_len(
1442 				sizeof(struct fcnvme_ls_cr_assoc_acc)),
1443 			FCNVME_LS_CREATE_ASSOCIATION);
1444 	acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1445 	acc->associd.desc_len =
1446 			fcnvme_lsdesc_len(
1447 				sizeof(struct fcnvme_lsdesc_assoc_id));
1448 	acc->associd.association_id =
1449 			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1450 	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1451 	acc->connectid.desc_len =
1452 			fcnvme_lsdesc_len(
1453 				sizeof(struct fcnvme_lsdesc_conn_id));
1454 	acc->connectid.connection_id = acc->associd.association_id;
1455 }
1456 
1457 static void
1458 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1459 			struct nvmet_fc_ls_iod *iod)
1460 {
1461 	struct fcnvme_ls_cr_conn_rqst *rqst =
1462 				(struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1463 	struct fcnvme_ls_cr_conn_acc *acc =
1464 				(struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1465 	struct nvmet_fc_tgt_queue *queue;
1466 	int ret = 0;
1467 
1468 	memset(acc, 0, sizeof(*acc));
1469 
1470 	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1471 		ret = VERR_CR_CONN_LEN;
1472 	else if (rqst->desc_list_len !=
1473 			fcnvme_lsdesc_len(
1474 				sizeof(struct fcnvme_ls_cr_conn_rqst)))
1475 		ret = VERR_CR_CONN_RQST_LEN;
1476 	else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1477 		ret = VERR_ASSOC_ID;
1478 	else if (rqst->associd.desc_len !=
1479 			fcnvme_lsdesc_len(
1480 				sizeof(struct fcnvme_lsdesc_assoc_id)))
1481 		ret = VERR_ASSOC_ID_LEN;
1482 	else if (rqst->connect_cmd.desc_tag !=
1483 			cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1484 		ret = VERR_CR_CONN_CMD;
1485 	else if (rqst->connect_cmd.desc_len !=
1486 			fcnvme_lsdesc_len(
1487 				sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1488 		ret = VERR_CR_CONN_CMD_LEN;
1489 	else if (!rqst->connect_cmd.ersp_ratio ||
1490 		 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1491 				be16_to_cpu(rqst->connect_cmd.sqsize)))
1492 		ret = VERR_ERSP_RATIO;
1493 
1494 	else {
1495 		/* new io queue */
1496 		iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1497 				be64_to_cpu(rqst->associd.association_id));
1498 		if (!iod->assoc)
1499 			ret = VERR_NO_ASSOC;
1500 		else {
1501 			queue = nvmet_fc_alloc_target_queue(iod->assoc,
1502 					be16_to_cpu(rqst->connect_cmd.qid),
1503 					be16_to_cpu(rqst->connect_cmd.sqsize));
1504 			if (!queue)
1505 				ret = VERR_QUEUE_ALLOC_FAIL;
1506 
1507 			/* release get taken in nvmet_fc_find_target_assoc */
1508 			nvmet_fc_tgt_a_put(iod->assoc);
1509 		}
1510 	}
1511 
1512 	if (ret) {
1513 		dev_err(tgtport->dev,
1514 			"Create Connection LS failed: %s\n",
1515 			validation_errors[ret]);
1516 		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1517 				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1518 				(ret == VERR_NO_ASSOC) ?
1519 					FCNVME_RJT_RC_INV_ASSOC :
1520 					FCNVME_RJT_RC_LOGIC,
1521 				FCNVME_RJT_EXP_NONE, 0);
1522 		return;
1523 	}
1524 
1525 	queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1526 	atomic_set(&queue->connected, 1);
1527 	queue->sqhd = 0;	/* best place to init value */
1528 
1529 	/* format a response */
1530 
1531 	iod->lsreq->rsplen = sizeof(*acc);
1532 
1533 	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1534 			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1535 			FCNVME_LS_CREATE_CONNECTION);
1536 	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1537 	acc->connectid.desc_len =
1538 			fcnvme_lsdesc_len(
1539 				sizeof(struct fcnvme_lsdesc_conn_id));
1540 	acc->connectid.connection_id =
1541 			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1542 				be16_to_cpu(rqst->connect_cmd.qid)));
1543 }
1544 
1545 static void
1546 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1547 			struct nvmet_fc_ls_iod *iod)
1548 {
1549 	struct fcnvme_ls_disconnect_rqst *rqst =
1550 			(struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1551 	struct fcnvme_ls_disconnect_acc *acc =
1552 			(struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1553 	struct nvmet_fc_tgt_queue *queue = NULL;
1554 	struct nvmet_fc_tgt_assoc *assoc;
1555 	int ret = 0;
1556 	bool del_assoc = false;
1557 
1558 	memset(acc, 0, sizeof(*acc));
1559 
1560 	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1561 		ret = VERR_DISCONN_LEN;
1562 	else if (rqst->desc_list_len !=
1563 			fcnvme_lsdesc_len(
1564 				sizeof(struct fcnvme_ls_disconnect_rqst)))
1565 		ret = VERR_DISCONN_RQST_LEN;
1566 	else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1567 		ret = VERR_ASSOC_ID;
1568 	else if (rqst->associd.desc_len !=
1569 			fcnvme_lsdesc_len(
1570 				sizeof(struct fcnvme_lsdesc_assoc_id)))
1571 		ret = VERR_ASSOC_ID_LEN;
1572 	else if (rqst->discon_cmd.desc_tag !=
1573 			cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1574 		ret = VERR_DISCONN_CMD;
1575 	else if (rqst->discon_cmd.desc_len !=
1576 			fcnvme_lsdesc_len(
1577 				sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1578 		ret = VERR_DISCONN_CMD_LEN;
1579 	else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1580 			(rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1581 		ret = VERR_DISCONN_SCOPE;
1582 	else {
1583 		/* match an active association */
1584 		assoc = nvmet_fc_find_target_assoc(tgtport,
1585 				be64_to_cpu(rqst->associd.association_id));
1586 		iod->assoc = assoc;
1587 		if (assoc) {
1588 			if (rqst->discon_cmd.scope ==
1589 					FCNVME_DISCONN_CONNECTION) {
1590 				queue = nvmet_fc_find_target_queue(tgtport,
1591 						be64_to_cpu(
1592 							rqst->discon_cmd.id));
1593 				if (!queue) {
1594 					nvmet_fc_tgt_a_put(assoc);
1595 					ret = VERR_NO_CONN;
1596 				}
1597 			}
1598 		} else
1599 			ret = VERR_NO_ASSOC;
1600 	}
1601 
1602 	if (ret) {
1603 		dev_err(tgtport->dev,
1604 			"Disconnect LS failed: %s\n",
1605 			validation_errors[ret]);
1606 		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1607 				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1608 				(ret == VERR_NO_ASSOC) ?
1609 					FCNVME_RJT_RC_INV_ASSOC :
1610 					(ret == VERR_NO_CONN) ?
1611 						FCNVME_RJT_RC_INV_CONN :
1612 						FCNVME_RJT_RC_LOGIC,
1613 				FCNVME_RJT_EXP_NONE, 0);
1614 		return;
1615 	}
1616 
1617 	/* format a response */
1618 
1619 	iod->lsreq->rsplen = sizeof(*acc);
1620 
1621 	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1622 			fcnvme_lsdesc_len(
1623 				sizeof(struct fcnvme_ls_disconnect_acc)),
1624 			FCNVME_LS_DISCONNECT);
1625 
1626 
1627 	/* are we to delete a Connection ID (queue) */
1628 	if (queue) {
1629 		int qid = queue->qid;
1630 
1631 		nvmet_fc_delete_target_queue(queue);
1632 
1633 		/* release the get taken by find_target_queue */
1634 		nvmet_fc_tgt_q_put(queue);
1635 
1636 		/* tear association down if io queue terminated */
1637 		if (!qid)
1638 			del_assoc = true;
1639 	}
1640 
1641 	/* release get taken in nvmet_fc_find_target_assoc */
1642 	nvmet_fc_tgt_a_put(iod->assoc);
1643 
1644 	if (del_assoc)
1645 		nvmet_fc_delete_target_assoc(iod->assoc);
1646 }
1647 
1648 
1649 /* *********************** NVME Ctrl Routines **************************** */
1650 
1651 
1652 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1653 
1654 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1655 
1656 static void
1657 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1658 {
1659 	struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1660 	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1661 
1662 	fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1663 				NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1664 	nvmet_fc_free_ls_iod(tgtport, iod);
1665 	nvmet_fc_tgtport_put(tgtport);
1666 }
1667 
1668 static void
1669 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1670 				struct nvmet_fc_ls_iod *iod)
1671 {
1672 	int ret;
1673 
1674 	fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1675 				  NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1676 
1677 	ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1678 	if (ret)
1679 		nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1680 }
1681 
1682 /*
1683  * Actual processing routine for received FC-NVME LS Requests from the LLD
1684  */
1685 static void
1686 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1687 			struct nvmet_fc_ls_iod *iod)
1688 {
1689 	struct fcnvme_ls_rqst_w0 *w0 =
1690 			(struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1691 
1692 	iod->lsreq->nvmet_fc_private = iod;
1693 	iod->lsreq->rspbuf = iod->rspbuf;
1694 	iod->lsreq->rspdma = iod->rspdma;
1695 	iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1696 	/* Be preventative. handlers will later set to valid length */
1697 	iod->lsreq->rsplen = 0;
1698 
1699 	iod->assoc = NULL;
1700 
1701 	/*
1702 	 * handlers:
1703 	 *   parse request input, execute the request, and format the
1704 	 *   LS response
1705 	 */
1706 	switch (w0->ls_cmd) {
1707 	case FCNVME_LS_CREATE_ASSOCIATION:
1708 		/* Creates Association and initial Admin Queue/Connection */
1709 		nvmet_fc_ls_create_association(tgtport, iod);
1710 		break;
1711 	case FCNVME_LS_CREATE_CONNECTION:
1712 		/* Creates an IO Queue/Connection */
1713 		nvmet_fc_ls_create_connection(tgtport, iod);
1714 		break;
1715 	case FCNVME_LS_DISCONNECT:
1716 		/* Terminate a Queue/Connection or the Association */
1717 		nvmet_fc_ls_disconnect(tgtport, iod);
1718 		break;
1719 	default:
1720 		iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1721 				NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1722 				FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1723 	}
1724 
1725 	nvmet_fc_xmt_ls_rsp(tgtport, iod);
1726 }
1727 
1728 /*
1729  * Actual processing routine for received FC-NVME LS Requests from the LLD
1730  */
1731 static void
1732 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1733 {
1734 	struct nvmet_fc_ls_iod *iod =
1735 		container_of(work, struct nvmet_fc_ls_iod, work);
1736 	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1737 
1738 	nvmet_fc_handle_ls_rqst(tgtport, iod);
1739 }
1740 
1741 
1742 /**
1743  * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1744  *                       upon the reception of a NVME LS request.
1745  *
1746  * The nvmet-fc layer will copy payload to an internal structure for
1747  * processing.  As such, upon completion of the routine, the LLDD may
1748  * immediately free/reuse the LS request buffer passed in the call.
1749  *
1750  * If this routine returns error, the LLDD should abort the exchange.
1751  *
1752  * @target_port: pointer to the (registered) target port the LS was
1753  *              received on.
1754  * @lsreq:      pointer to a lsreq request structure to be used to reference
1755  *              the exchange corresponding to the LS.
1756  * @lsreqbuf:   pointer to the buffer containing the LS Request
1757  * @lsreqbuf_len: length, in bytes, of the received LS request
1758  */
1759 int
1760 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1761 			struct nvmefc_tgt_ls_req *lsreq,
1762 			void *lsreqbuf, u32 lsreqbuf_len)
1763 {
1764 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1765 	struct nvmet_fc_ls_iod *iod;
1766 
1767 	if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1768 		return -E2BIG;
1769 
1770 	if (!nvmet_fc_tgtport_get(tgtport))
1771 		return -ESHUTDOWN;
1772 
1773 	iod = nvmet_fc_alloc_ls_iod(tgtport);
1774 	if (!iod) {
1775 		nvmet_fc_tgtport_put(tgtport);
1776 		return -ENOENT;
1777 	}
1778 
1779 	iod->lsreq = lsreq;
1780 	iod->fcpreq = NULL;
1781 	memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1782 	iod->rqstdatalen = lsreqbuf_len;
1783 
1784 	schedule_work(&iod->work);
1785 
1786 	return 0;
1787 }
1788 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1789 
1790 
1791 /*
1792  * **********************
1793  * Start of FCP handling
1794  * **********************
1795  */
1796 
1797 static int
1798 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1799 {
1800 	struct scatterlist *sg;
1801 	unsigned int nent;
1802 
1803 	sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
1804 	if (!sg)
1805 		goto out;
1806 
1807 	fod->data_sg = sg;
1808 	fod->data_sg_cnt = nent;
1809 	fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1810 				((fod->io_dir == NVMET_FCP_WRITE) ?
1811 					DMA_FROM_DEVICE : DMA_TO_DEVICE));
1812 				/* note: write from initiator perspective */
1813 	fod->next_sg = fod->data_sg;
1814 
1815 	return 0;
1816 
1817 out:
1818 	return NVME_SC_INTERNAL;
1819 }
1820 
1821 static void
1822 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1823 {
1824 	if (!fod->data_sg || !fod->data_sg_cnt)
1825 		return;
1826 
1827 	fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1828 				((fod->io_dir == NVMET_FCP_WRITE) ?
1829 					DMA_FROM_DEVICE : DMA_TO_DEVICE));
1830 	sgl_free(fod->data_sg);
1831 	fod->data_sg = NULL;
1832 	fod->data_sg_cnt = 0;
1833 }
1834 
1835 
1836 static bool
1837 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1838 {
1839 	u32 sqtail, used;
1840 
1841 	/* egad, this is ugly. And sqtail is just a best guess */
1842 	sqtail = atomic_read(&q->sqtail) % q->sqsize;
1843 
1844 	used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1845 	return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1846 }
1847 
1848 /*
1849  * Prep RSP payload.
1850  * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1851  */
1852 static void
1853 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1854 				struct nvmet_fc_fcp_iod *fod)
1855 {
1856 	struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1857 	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1858 	struct nvme_completion *cqe = &ersp->cqe;
1859 	u32 *cqewd = (u32 *)cqe;
1860 	bool send_ersp = false;
1861 	u32 rsn, rspcnt, xfr_length;
1862 
1863 	if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1864 		xfr_length = fod->req.transfer_len;
1865 	else
1866 		xfr_length = fod->offset;
1867 
1868 	/*
1869 	 * check to see if we can send a 0's rsp.
1870 	 *   Note: to send a 0's response, the NVME-FC host transport will
1871 	 *   recreate the CQE. The host transport knows: sq id, SQHD (last
1872 	 *   seen in an ersp), and command_id. Thus it will create a
1873 	 *   zero-filled CQE with those known fields filled in. Transport
1874 	 *   must send an ersp for any condition where the cqe won't match
1875 	 *   this.
1876 	 *
1877 	 * Here are the FC-NVME mandated cases where we must send an ersp:
1878 	 *  every N responses, where N=ersp_ratio
1879 	 *  force fabric commands to send ersp's (not in FC-NVME but good
1880 	 *    practice)
1881 	 *  normal cmds: any time status is non-zero, or status is zero
1882 	 *     but words 0 or 1 are non-zero.
1883 	 *  the SQ is 90% or more full
1884 	 *  the cmd is a fused command
1885 	 *  transferred data length not equal to cmd iu length
1886 	 */
1887 	rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1888 	if (!(rspcnt % fod->queue->ersp_ratio) ||
1889 	    sqe->opcode == nvme_fabrics_command ||
1890 	    xfr_length != fod->req.transfer_len ||
1891 	    (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1892 	    (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1893 	    queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
1894 		send_ersp = true;
1895 
1896 	/* re-set the fields */
1897 	fod->fcpreq->rspaddr = ersp;
1898 	fod->fcpreq->rspdma = fod->rspdma;
1899 
1900 	if (!send_ersp) {
1901 		memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1902 		fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1903 	} else {
1904 		ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1905 		rsn = atomic_inc_return(&fod->queue->rsn);
1906 		ersp->rsn = cpu_to_be32(rsn);
1907 		ersp->xfrd_len = cpu_to_be32(xfr_length);
1908 		fod->fcpreq->rsplen = sizeof(*ersp);
1909 	}
1910 
1911 	fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1912 				  sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1913 }
1914 
1915 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1916 
1917 static void
1918 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1919 				struct nvmet_fc_fcp_iod *fod)
1920 {
1921 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1922 
1923 	/* data no longer needed */
1924 	nvmet_fc_free_tgt_pgs(fod);
1925 
1926 	/*
1927 	 * if an ABTS was received or we issued the fcp_abort early
1928 	 * don't call abort routine again.
1929 	 */
1930 	/* no need to take lock - lock was taken earlier to get here */
1931 	if (!fod->aborted)
1932 		tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1933 
1934 	nvmet_fc_free_fcp_iod(fod->queue, fod);
1935 }
1936 
1937 static void
1938 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1939 				struct nvmet_fc_fcp_iod *fod)
1940 {
1941 	int ret;
1942 
1943 	fod->fcpreq->op = NVMET_FCOP_RSP;
1944 	fod->fcpreq->timeout = 0;
1945 
1946 	nvmet_fc_prep_fcp_rsp(tgtport, fod);
1947 
1948 	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1949 	if (ret)
1950 		nvmet_fc_abort_op(tgtport, fod);
1951 }
1952 
1953 static void
1954 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1955 				struct nvmet_fc_fcp_iod *fod, u8 op)
1956 {
1957 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1958 	struct scatterlist *sg = fod->next_sg;
1959 	unsigned long flags;
1960 	u32 remaininglen = fod->req.transfer_len - fod->offset;
1961 	u32 tlen = 0;
1962 	int ret;
1963 
1964 	fcpreq->op = op;
1965 	fcpreq->offset = fod->offset;
1966 	fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1967 
1968 	/*
1969 	 * for next sequence:
1970 	 *  break at a sg element boundary
1971 	 *  attempt to keep sequence length capped at
1972 	 *    NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
1973 	 *    be longer if a single sg element is larger
1974 	 *    than that amount. This is done to avoid creating
1975 	 *    a new sg list to use for the tgtport api.
1976 	 */
1977 	fcpreq->sg = sg;
1978 	fcpreq->sg_cnt = 0;
1979 	while (tlen < remaininglen &&
1980 	       fcpreq->sg_cnt < tgtport->max_sg_cnt &&
1981 	       tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
1982 		fcpreq->sg_cnt++;
1983 		tlen += sg_dma_len(sg);
1984 		sg = sg_next(sg);
1985 	}
1986 	if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
1987 		fcpreq->sg_cnt++;
1988 		tlen += min_t(u32, sg_dma_len(sg), remaininglen);
1989 		sg = sg_next(sg);
1990 	}
1991 	if (tlen < remaininglen)
1992 		fod->next_sg = sg;
1993 	else
1994 		fod->next_sg = NULL;
1995 
1996 	fcpreq->transfer_length = tlen;
1997 	fcpreq->transferred_length = 0;
1998 	fcpreq->fcp_error = 0;
1999 	fcpreq->rsplen = 0;
2000 
2001 	/*
2002 	 * If the last READDATA request: check if LLDD supports
2003 	 * combined xfr with response.
2004 	 */
2005 	if ((op == NVMET_FCOP_READDATA) &&
2006 	    ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
2007 	    (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
2008 		fcpreq->op = NVMET_FCOP_READDATA_RSP;
2009 		nvmet_fc_prep_fcp_rsp(tgtport, fod);
2010 	}
2011 
2012 	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2013 	if (ret) {
2014 		/*
2015 		 * should be ok to set w/o lock as its in the thread of
2016 		 * execution (not an async timer routine) and doesn't
2017 		 * contend with any clearing action
2018 		 */
2019 		fod->abort = true;
2020 
2021 		if (op == NVMET_FCOP_WRITEDATA) {
2022 			spin_lock_irqsave(&fod->flock, flags);
2023 			fod->writedataactive = false;
2024 			spin_unlock_irqrestore(&fod->flock, flags);
2025 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2026 		} else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
2027 			fcpreq->fcp_error = ret;
2028 			fcpreq->transferred_length = 0;
2029 			nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
2030 		}
2031 	}
2032 }
2033 
2034 static inline bool
2035 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
2036 {
2037 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2038 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2039 
2040 	/* if in the middle of an io and we need to tear down */
2041 	if (abort) {
2042 		if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
2043 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2044 			return true;
2045 		}
2046 
2047 		nvmet_fc_abort_op(tgtport, fod);
2048 		return true;
2049 	}
2050 
2051 	return false;
2052 }
2053 
2054 /*
2055  * actual done handler for FCP operations when completed by the lldd
2056  */
2057 static void
2058 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
2059 {
2060 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2061 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2062 	unsigned long flags;
2063 	bool abort;
2064 
2065 	spin_lock_irqsave(&fod->flock, flags);
2066 	abort = fod->abort;
2067 	fod->writedataactive = false;
2068 	spin_unlock_irqrestore(&fod->flock, flags);
2069 
2070 	switch (fcpreq->op) {
2071 
2072 	case NVMET_FCOP_WRITEDATA:
2073 		if (__nvmet_fc_fod_op_abort(fod, abort))
2074 			return;
2075 		if (fcpreq->fcp_error ||
2076 		    fcpreq->transferred_length != fcpreq->transfer_length) {
2077 			spin_lock(&fod->flock);
2078 			fod->abort = true;
2079 			spin_unlock(&fod->flock);
2080 
2081 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2082 			return;
2083 		}
2084 
2085 		fod->offset += fcpreq->transferred_length;
2086 		if (fod->offset != fod->req.transfer_len) {
2087 			spin_lock_irqsave(&fod->flock, flags);
2088 			fod->writedataactive = true;
2089 			spin_unlock_irqrestore(&fod->flock, flags);
2090 
2091 			/* transfer the next chunk */
2092 			nvmet_fc_transfer_fcp_data(tgtport, fod,
2093 						NVMET_FCOP_WRITEDATA);
2094 			return;
2095 		}
2096 
2097 		/* data transfer complete, resume with nvmet layer */
2098 		nvmet_req_execute(&fod->req);
2099 		break;
2100 
2101 	case NVMET_FCOP_READDATA:
2102 	case NVMET_FCOP_READDATA_RSP:
2103 		if (__nvmet_fc_fod_op_abort(fod, abort))
2104 			return;
2105 		if (fcpreq->fcp_error ||
2106 		    fcpreq->transferred_length != fcpreq->transfer_length) {
2107 			nvmet_fc_abort_op(tgtport, fod);
2108 			return;
2109 		}
2110 
2111 		/* success */
2112 
2113 		if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2114 			/* data no longer needed */
2115 			nvmet_fc_free_tgt_pgs(fod);
2116 			nvmet_fc_free_fcp_iod(fod->queue, fod);
2117 			return;
2118 		}
2119 
2120 		fod->offset += fcpreq->transferred_length;
2121 		if (fod->offset != fod->req.transfer_len) {
2122 			/* transfer the next chunk */
2123 			nvmet_fc_transfer_fcp_data(tgtport, fod,
2124 						NVMET_FCOP_READDATA);
2125 			return;
2126 		}
2127 
2128 		/* data transfer complete, send response */
2129 
2130 		/* data no longer needed */
2131 		nvmet_fc_free_tgt_pgs(fod);
2132 
2133 		nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2134 
2135 		break;
2136 
2137 	case NVMET_FCOP_RSP:
2138 		if (__nvmet_fc_fod_op_abort(fod, abort))
2139 			return;
2140 		nvmet_fc_free_fcp_iod(fod->queue, fod);
2141 		break;
2142 
2143 	default:
2144 		break;
2145 	}
2146 }
2147 
2148 static void
2149 nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
2150 {
2151 	struct nvmet_fc_fcp_iod *fod =
2152 		container_of(work, struct nvmet_fc_fcp_iod, done_work);
2153 
2154 	nvmet_fc_fod_op_done(fod);
2155 }
2156 
2157 static void
2158 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2159 {
2160 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2161 	struct nvmet_fc_tgt_queue *queue = fod->queue;
2162 
2163 	if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
2164 		/* context switch so completion is not in ISR context */
2165 		queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
2166 	else
2167 		nvmet_fc_fod_op_done(fod);
2168 }
2169 
2170 /*
2171  * actual completion handler after execution by the nvmet layer
2172  */
2173 static void
2174 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2175 			struct nvmet_fc_fcp_iod *fod, int status)
2176 {
2177 	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2178 	struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2179 	unsigned long flags;
2180 	bool abort;
2181 
2182 	spin_lock_irqsave(&fod->flock, flags);
2183 	abort = fod->abort;
2184 	spin_unlock_irqrestore(&fod->flock, flags);
2185 
2186 	/* if we have a CQE, snoop the last sq_head value */
2187 	if (!status)
2188 		fod->queue->sqhd = cqe->sq_head;
2189 
2190 	if (abort) {
2191 		nvmet_fc_abort_op(tgtport, fod);
2192 		return;
2193 	}
2194 
2195 	/* if an error handling the cmd post initial parsing */
2196 	if (status) {
2197 		/* fudge up a failed CQE status for our transport error */
2198 		memset(cqe, 0, sizeof(*cqe));
2199 		cqe->sq_head = fod->queue->sqhd;	/* echo last cqe sqhd */
2200 		cqe->sq_id = cpu_to_le16(fod->queue->qid);
2201 		cqe->command_id = sqe->command_id;
2202 		cqe->status = cpu_to_le16(status);
2203 	} else {
2204 
2205 		/*
2206 		 * try to push the data even if the SQE status is non-zero.
2207 		 * There may be a status where data still was intended to
2208 		 * be moved
2209 		 */
2210 		if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2211 			/* push the data over before sending rsp */
2212 			nvmet_fc_transfer_fcp_data(tgtport, fod,
2213 						NVMET_FCOP_READDATA);
2214 			return;
2215 		}
2216 
2217 		/* writes & no data - fall thru */
2218 	}
2219 
2220 	/* data no longer needed */
2221 	nvmet_fc_free_tgt_pgs(fod);
2222 
2223 	nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2224 }
2225 
2226 
2227 static void
2228 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2229 {
2230 	struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2231 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2232 
2233 	__nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2234 }
2235 
2236 
2237 /*
2238  * Actual processing routine for received FC-NVME I/O Requests from the LLD
2239  */
2240 static void
2241 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2242 			struct nvmet_fc_fcp_iod *fod)
2243 {
2244 	struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2245 	u32 xfrlen = be32_to_cpu(cmdiu->data_len);
2246 	int ret;
2247 
2248 	/*
2249 	 * if there is no nvmet mapping to the targetport there
2250 	 * shouldn't be requests. just terminate them.
2251 	 */
2252 	if (!tgtport->pe)
2253 		goto transport_error;
2254 
2255 	/*
2256 	 * Fused commands are currently not supported in the linux
2257 	 * implementation.
2258 	 *
2259 	 * As such, the implementation of the FC transport does not
2260 	 * look at the fused commands and order delivery to the upper
2261 	 * layer until we have both based on csn.
2262 	 */
2263 
2264 	fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2265 
2266 	if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2267 		fod->io_dir = NVMET_FCP_WRITE;
2268 		if (!nvme_is_write(&cmdiu->sqe))
2269 			goto transport_error;
2270 	} else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2271 		fod->io_dir = NVMET_FCP_READ;
2272 		if (nvme_is_write(&cmdiu->sqe))
2273 			goto transport_error;
2274 	} else {
2275 		fod->io_dir = NVMET_FCP_NODATA;
2276 		if (xfrlen)
2277 			goto transport_error;
2278 	}
2279 
2280 	fod->req.cmd = &fod->cmdiubuf.sqe;
2281 	fod->req.rsp = &fod->rspiubuf.cqe;
2282 	fod->req.port = tgtport->pe->port;
2283 
2284 	/* clear any response payload */
2285 	memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2286 
2287 	fod->data_sg = NULL;
2288 	fod->data_sg_cnt = 0;
2289 
2290 	ret = nvmet_req_init(&fod->req,
2291 				&fod->queue->nvme_cq,
2292 				&fod->queue->nvme_sq,
2293 				&nvmet_fc_tgt_fcp_ops);
2294 	if (!ret) {
2295 		/* bad SQE content or invalid ctrl state */
2296 		/* nvmet layer has already called op done to send rsp. */
2297 		return;
2298 	}
2299 
2300 	fod->req.transfer_len = xfrlen;
2301 
2302 	/* keep a running counter of tail position */
2303 	atomic_inc(&fod->queue->sqtail);
2304 
2305 	if (fod->req.transfer_len) {
2306 		ret = nvmet_fc_alloc_tgt_pgs(fod);
2307 		if (ret) {
2308 			nvmet_req_complete(&fod->req, ret);
2309 			return;
2310 		}
2311 	}
2312 	fod->req.sg = fod->data_sg;
2313 	fod->req.sg_cnt = fod->data_sg_cnt;
2314 	fod->offset = 0;
2315 
2316 	if (fod->io_dir == NVMET_FCP_WRITE) {
2317 		/* pull the data over before invoking nvmet layer */
2318 		nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2319 		return;
2320 	}
2321 
2322 	/*
2323 	 * Reads or no data:
2324 	 *
2325 	 * can invoke the nvmet_layer now. If read data, cmd completion will
2326 	 * push the data
2327 	 */
2328 	nvmet_req_execute(&fod->req);
2329 	return;
2330 
2331 transport_error:
2332 	nvmet_fc_abort_op(tgtport, fod);
2333 }
2334 
2335 /*
2336  * Actual processing routine for received FC-NVME LS Requests from the LLD
2337  */
2338 static void
2339 nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2340 {
2341 	struct nvmet_fc_fcp_iod *fod =
2342 		container_of(work, struct nvmet_fc_fcp_iod, work);
2343 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2344 
2345 	nvmet_fc_handle_fcp_rqst(tgtport, fod);
2346 }
2347 
2348 /**
2349  * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2350  *                       upon the reception of a NVME FCP CMD IU.
2351  *
2352  * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2353  * layer for processing.
2354  *
2355  * The nvmet_fc layer allocates a local job structure (struct
2356  * nvmet_fc_fcp_iod) from the queue for the io and copies the
2357  * CMD IU buffer to the job structure. As such, on a successful
2358  * completion (returns 0), the LLDD may immediately free/reuse
2359  * the CMD IU buffer passed in the call.
2360  *
2361  * However, in some circumstances, due to the packetized nature of FC
2362  * and the api of the FC LLDD which may issue a hw command to send the
2363  * response, but the LLDD may not get the hw completion for that command
2364  * and upcall the nvmet_fc layer before a new command may be
2365  * asynchronously received - its possible for a command to be received
2366  * before the LLDD and nvmet_fc have recycled the job structure. It gives
2367  * the appearance of more commands received than fits in the sq.
2368  * To alleviate this scenario, a temporary queue is maintained in the
2369  * transport for pending LLDD requests waiting for a queue job structure.
2370  * In these "overrun" cases, a temporary queue element is allocated
2371  * the LLDD request and CMD iu buffer information remembered, and the
2372  * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2373  * structure is freed, it is immediately reallocated for anything on the
2374  * pending request list. The LLDDs defer_rcv() callback is called,
2375  * informing the LLDD that it may reuse the CMD IU buffer, and the io
2376  * is then started normally with the transport.
2377  *
2378  * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2379  * the completion as successful but must not reuse the CMD IU buffer
2380  * until the LLDD's defer_rcv() callback has been called for the
2381  * corresponding struct nvmefc_tgt_fcp_req pointer.
2382  *
2383  * If there is any other condition in which an error occurs, the
2384  * transport will return a non-zero status indicating the error.
2385  * In all cases other than -EOVERFLOW, the transport has not accepted the
2386  * request and the LLDD should abort the exchange.
2387  *
2388  * @target_port: pointer to the (registered) target port the FCP CMD IU
2389  *              was received on.
2390  * @fcpreq:     pointer to a fcpreq request structure to be used to reference
2391  *              the exchange corresponding to the FCP Exchange.
2392  * @cmdiubuf:   pointer to the buffer containing the FCP CMD IU
2393  * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2394  */
2395 int
2396 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2397 			struct nvmefc_tgt_fcp_req *fcpreq,
2398 			void *cmdiubuf, u32 cmdiubuf_len)
2399 {
2400 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2401 	struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2402 	struct nvmet_fc_tgt_queue *queue;
2403 	struct nvmet_fc_fcp_iod *fod;
2404 	struct nvmet_fc_defer_fcp_req *deferfcp;
2405 	unsigned long flags;
2406 
2407 	/* validate iu, so the connection id can be used to find the queue */
2408 	if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2409 			(cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2410 			(cmdiu->fc_id != NVME_CMD_FC_ID) ||
2411 			(be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2412 		return -EIO;
2413 
2414 	queue = nvmet_fc_find_target_queue(tgtport,
2415 				be64_to_cpu(cmdiu->connection_id));
2416 	if (!queue)
2417 		return -ENOTCONN;
2418 
2419 	/*
2420 	 * note: reference taken by find_target_queue
2421 	 * After successful fod allocation, the fod will inherit the
2422 	 * ownership of that reference and will remove the reference
2423 	 * when the fod is freed.
2424 	 */
2425 
2426 	spin_lock_irqsave(&queue->qlock, flags);
2427 
2428 	fod = nvmet_fc_alloc_fcp_iod(queue);
2429 	if (fod) {
2430 		spin_unlock_irqrestore(&queue->qlock, flags);
2431 
2432 		fcpreq->nvmet_fc_private = fod;
2433 		fod->fcpreq = fcpreq;
2434 
2435 		memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2436 
2437 		nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2438 
2439 		return 0;
2440 	}
2441 
2442 	if (!tgtport->ops->defer_rcv) {
2443 		spin_unlock_irqrestore(&queue->qlock, flags);
2444 		/* release the queue lookup reference */
2445 		nvmet_fc_tgt_q_put(queue);
2446 		return -ENOENT;
2447 	}
2448 
2449 	deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2450 			struct nvmet_fc_defer_fcp_req, req_list);
2451 	if (deferfcp) {
2452 		/* Just re-use one that was previously allocated */
2453 		list_del(&deferfcp->req_list);
2454 	} else {
2455 		spin_unlock_irqrestore(&queue->qlock, flags);
2456 
2457 		/* Now we need to dynamically allocate one */
2458 		deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2459 		if (!deferfcp) {
2460 			/* release the queue lookup reference */
2461 			nvmet_fc_tgt_q_put(queue);
2462 			return -ENOMEM;
2463 		}
2464 		spin_lock_irqsave(&queue->qlock, flags);
2465 	}
2466 
2467 	/* For now, use rspaddr / rsplen to save payload information */
2468 	fcpreq->rspaddr = cmdiubuf;
2469 	fcpreq->rsplen  = cmdiubuf_len;
2470 	deferfcp->fcp_req = fcpreq;
2471 
2472 	/* defer processing till a fod becomes available */
2473 	list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2474 
2475 	/* NOTE: the queue lookup reference is still valid */
2476 
2477 	spin_unlock_irqrestore(&queue->qlock, flags);
2478 
2479 	return -EOVERFLOW;
2480 }
2481 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2482 
2483 /**
2484  * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2485  *                       upon the reception of an ABTS for a FCP command
2486  *
2487  * Notify the transport that an ABTS has been received for a FCP command
2488  * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2489  * LLDD believes the command is still being worked on
2490  * (template_ops->fcp_req_release() has not been called).
2491  *
2492  * The transport will wait for any outstanding work (an op to the LLDD,
2493  * which the lldd should complete with error due to the ABTS; or the
2494  * completion from the nvmet layer of the nvme command), then will
2495  * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2496  * return the i/o context to the LLDD.  The LLDD may send the BA_ACC
2497  * to the ABTS either after return from this function (assuming any
2498  * outstanding op work has been terminated) or upon the callback being
2499  * called.
2500  *
2501  * @target_port: pointer to the (registered) target port the FCP CMD IU
2502  *              was received on.
2503  * @fcpreq:     pointer to the fcpreq request structure that corresponds
2504  *              to the exchange that received the ABTS.
2505  */
2506 void
2507 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2508 			struct nvmefc_tgt_fcp_req *fcpreq)
2509 {
2510 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2511 	struct nvmet_fc_tgt_queue *queue;
2512 	unsigned long flags;
2513 
2514 	if (!fod || fod->fcpreq != fcpreq)
2515 		/* job appears to have already completed, ignore abort */
2516 		return;
2517 
2518 	queue = fod->queue;
2519 
2520 	spin_lock_irqsave(&queue->qlock, flags);
2521 	if (fod->active) {
2522 		/*
2523 		 * mark as abort. The abort handler, invoked upon completion
2524 		 * of any work, will detect the aborted status and do the
2525 		 * callback.
2526 		 */
2527 		spin_lock(&fod->flock);
2528 		fod->abort = true;
2529 		fod->aborted = true;
2530 		spin_unlock(&fod->flock);
2531 	}
2532 	spin_unlock_irqrestore(&queue->qlock, flags);
2533 }
2534 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2535 
2536 
2537 struct nvmet_fc_traddr {
2538 	u64	nn;
2539 	u64	pn;
2540 };
2541 
2542 static int
2543 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2544 {
2545 	u64 token64;
2546 
2547 	if (match_u64(sstr, &token64))
2548 		return -EINVAL;
2549 	*val = token64;
2550 
2551 	return 0;
2552 }
2553 
2554 /*
2555  * This routine validates and extracts the WWN's from the TRADDR string.
2556  * As kernel parsers need the 0x to determine number base, universally
2557  * build string to parse with 0x prefix before parsing name strings.
2558  */
2559 static int
2560 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2561 {
2562 	char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2563 	substring_t wwn = { name, &name[sizeof(name)-1] };
2564 	int nnoffset, pnoffset;
2565 
2566 	/* validate if string is one of the 2 allowed formats */
2567 	if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2568 			!strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2569 			!strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2570 				"pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2571 		nnoffset = NVME_FC_TRADDR_OXNNLEN;
2572 		pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2573 						NVME_FC_TRADDR_OXNNLEN;
2574 	} else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2575 			!strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2576 			!strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2577 				"pn-", NVME_FC_TRADDR_NNLEN))) {
2578 		nnoffset = NVME_FC_TRADDR_NNLEN;
2579 		pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2580 	} else
2581 		goto out_einval;
2582 
2583 	name[0] = '0';
2584 	name[1] = 'x';
2585 	name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2586 
2587 	memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2588 	if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2589 		goto out_einval;
2590 
2591 	memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2592 	if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2593 		goto out_einval;
2594 
2595 	return 0;
2596 
2597 out_einval:
2598 	pr_warn("%s: bad traddr string\n", __func__);
2599 	return -EINVAL;
2600 }
2601 
2602 static int
2603 nvmet_fc_add_port(struct nvmet_port *port)
2604 {
2605 	struct nvmet_fc_tgtport *tgtport;
2606 	struct nvmet_fc_port_entry *pe;
2607 	struct nvmet_fc_traddr traddr = { 0L, 0L };
2608 	unsigned long flags;
2609 	int ret;
2610 
2611 	/* validate the address info */
2612 	if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2613 	    (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2614 		return -EINVAL;
2615 
2616 	/* map the traddr address info to a target port */
2617 
2618 	ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2619 			sizeof(port->disc_addr.traddr));
2620 	if (ret)
2621 		return ret;
2622 
2623 	pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2624 	if (!pe)
2625 		return -ENOMEM;
2626 
2627 	ret = -ENXIO;
2628 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2629 	list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2630 		if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2631 		    (tgtport->fc_target_port.port_name == traddr.pn)) {
2632 			/* a FC port can only be 1 nvmet port id */
2633 			if (!tgtport->pe) {
2634 				nvmet_fc_portentry_bind(tgtport, pe, port);
2635 				ret = 0;
2636 			} else
2637 				ret = -EALREADY;
2638 			break;
2639 		}
2640 	}
2641 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2642 
2643 	if (ret)
2644 		kfree(pe);
2645 
2646 	return ret;
2647 }
2648 
2649 static void
2650 nvmet_fc_remove_port(struct nvmet_port *port)
2651 {
2652 	struct nvmet_fc_port_entry *pe = port->priv;
2653 
2654 	nvmet_fc_portentry_unbind(pe);
2655 
2656 	kfree(pe);
2657 }
2658 
2659 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2660 	.owner			= THIS_MODULE,
2661 	.type			= NVMF_TRTYPE_FC,
2662 	.msdbd			= 1,
2663 	.add_port		= nvmet_fc_add_port,
2664 	.remove_port		= nvmet_fc_remove_port,
2665 	.queue_response		= nvmet_fc_fcp_nvme_cmd_done,
2666 	.delete_ctrl		= nvmet_fc_delete_ctrl,
2667 };
2668 
2669 static int __init nvmet_fc_init_module(void)
2670 {
2671 	return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2672 }
2673 
2674 static void __exit nvmet_fc_exit_module(void)
2675 {
2676 	/* sanity check - all lports should be removed */
2677 	if (!list_empty(&nvmet_fc_target_list))
2678 		pr_warn("%s: targetport list not empty\n", __func__);
2679 
2680 	nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2681 
2682 	ida_destroy(&nvmet_fc_tgtport_cnt);
2683 }
2684 
2685 module_init(nvmet_fc_init_module);
2686 module_exit(nvmet_fc_exit_module);
2687 
2688 MODULE_LICENSE("GPL v2");
2689