xref: /openbmc/linux/drivers/nvme/target/fc.c (revision 711aab1d)
1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  *
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
25 
26 #include "nvmet.h"
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
29 
30 
31 /* *************************** Data Structures/Defines ****************** */
32 
33 
34 #define NVMET_LS_CTX_COUNT		4
35 
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE		2048
38 
39 struct nvmet_fc_tgtport;
40 struct nvmet_fc_tgt_assoc;
41 
42 struct nvmet_fc_ls_iod {
43 	struct nvmefc_tgt_ls_req	*lsreq;
44 	struct nvmefc_tgt_fcp_req	*fcpreq;	/* only if RS */
45 
46 	struct list_head		ls_list;	/* tgtport->ls_list */
47 
48 	struct nvmet_fc_tgtport		*tgtport;
49 	struct nvmet_fc_tgt_assoc	*assoc;
50 
51 	u8				*rqstbuf;
52 	u8				*rspbuf;
53 	u16				rqstdatalen;
54 	dma_addr_t			rspdma;
55 
56 	struct scatterlist		sg[2];
57 
58 	struct work_struct		work;
59 } __aligned(sizeof(unsigned long long));
60 
61 #define NVMET_FC_MAX_SEQ_LENGTH		(256 * 1024)
62 #define NVMET_FC_MAX_XFR_SGENTS		(NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
63 
64 enum nvmet_fcp_datadir {
65 	NVMET_FCP_NODATA,
66 	NVMET_FCP_WRITE,
67 	NVMET_FCP_READ,
68 	NVMET_FCP_ABORTED,
69 };
70 
71 struct nvmet_fc_fcp_iod {
72 	struct nvmefc_tgt_fcp_req	*fcpreq;
73 
74 	struct nvme_fc_cmd_iu		cmdiubuf;
75 	struct nvme_fc_ersp_iu		rspiubuf;
76 	dma_addr_t			rspdma;
77 	struct scatterlist		*data_sg;
78 	int				data_sg_cnt;
79 	u32				total_length;
80 	u32				offset;
81 	enum nvmet_fcp_datadir		io_dir;
82 	bool				active;
83 	bool				abort;
84 	bool				aborted;
85 	bool				writedataactive;
86 	spinlock_t			flock;
87 
88 	struct nvmet_req		req;
89 	struct work_struct		work;
90 	struct work_struct		done_work;
91 
92 	struct nvmet_fc_tgtport		*tgtport;
93 	struct nvmet_fc_tgt_queue	*queue;
94 
95 	struct list_head		fcp_list;	/* tgtport->fcp_list */
96 };
97 
98 struct nvmet_fc_tgtport {
99 
100 	struct nvmet_fc_target_port	fc_target_port;
101 
102 	struct list_head		tgt_list; /* nvmet_fc_target_list */
103 	struct device			*dev;	/* dev for dma mapping */
104 	struct nvmet_fc_target_template	*ops;
105 
106 	struct nvmet_fc_ls_iod		*iod;
107 	spinlock_t			lock;
108 	struct list_head		ls_list;
109 	struct list_head		ls_busylist;
110 	struct list_head		assoc_list;
111 	struct ida			assoc_cnt;
112 	struct nvmet_port		*port;
113 	struct kref			ref;
114 	u32				max_sg_cnt;
115 };
116 
117 struct nvmet_fc_defer_fcp_req {
118 	struct list_head		req_list;
119 	struct nvmefc_tgt_fcp_req	*fcp_req;
120 };
121 
122 struct nvmet_fc_tgt_queue {
123 	bool				ninetypercent;
124 	u16				qid;
125 	u16				sqsize;
126 	u16				ersp_ratio;
127 	__le16				sqhd;
128 	int				cpu;
129 	atomic_t			connected;
130 	atomic_t			sqtail;
131 	atomic_t			zrspcnt;
132 	atomic_t			rsn;
133 	spinlock_t			qlock;
134 	struct nvmet_port		*port;
135 	struct nvmet_cq			nvme_cq;
136 	struct nvmet_sq			nvme_sq;
137 	struct nvmet_fc_tgt_assoc	*assoc;
138 	struct nvmet_fc_fcp_iod		*fod;		/* array of fcp_iods */
139 	struct list_head		fod_list;
140 	struct list_head		pending_cmd_list;
141 	struct list_head		avail_defer_list;
142 	struct workqueue_struct		*work_q;
143 	struct kref			ref;
144 } __aligned(sizeof(unsigned long long));
145 
146 struct nvmet_fc_tgt_assoc {
147 	u64				association_id;
148 	u32				a_id;
149 	struct nvmet_fc_tgtport		*tgtport;
150 	struct list_head		a_list;
151 	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES];
152 	struct kref			ref;
153 };
154 
155 
156 static inline int
157 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
158 {
159 	return (iodptr - iodptr->tgtport->iod);
160 }
161 
162 static inline int
163 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
164 {
165 	return (fodptr - fodptr->queue->fod);
166 }
167 
168 
169 /*
170  * Association and Connection IDs:
171  *
172  * Association ID will have random number in upper 6 bytes and zero
173  *   in lower 2 bytes
174  *
175  * Connection IDs will be Association ID with QID or'd in lower 2 bytes
176  *
177  * note: Association ID = Connection ID for queue 0
178  */
179 #define BYTES_FOR_QID			sizeof(u16)
180 #define BYTES_FOR_QID_SHIFT		(BYTES_FOR_QID * 8)
181 #define NVMET_FC_QUEUEID_MASK		((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
182 
183 static inline u64
184 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
185 {
186 	return (assoc->association_id | qid);
187 }
188 
189 static inline u64
190 nvmet_fc_getassociationid(u64 connectionid)
191 {
192 	return connectionid & ~NVMET_FC_QUEUEID_MASK;
193 }
194 
195 static inline u16
196 nvmet_fc_getqueueid(u64 connectionid)
197 {
198 	return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
199 }
200 
201 static inline struct nvmet_fc_tgtport *
202 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
203 {
204 	return container_of(targetport, struct nvmet_fc_tgtport,
205 				 fc_target_port);
206 }
207 
208 static inline struct nvmet_fc_fcp_iod *
209 nvmet_req_to_fod(struct nvmet_req *nvme_req)
210 {
211 	return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
212 }
213 
214 
215 /* *************************** Globals **************************** */
216 
217 
218 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
219 
220 static LIST_HEAD(nvmet_fc_target_list);
221 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
222 
223 
224 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
225 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
226 static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
227 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
228 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
229 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
230 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
231 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
232 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
233 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
234 					struct nvmet_fc_fcp_iod *fod);
235 
236 
237 /* *********************** FC-NVME DMA Handling **************************** */
238 
239 /*
240  * The fcloop device passes in a NULL device pointer. Real LLD's will
241  * pass in a valid device pointer. If NULL is passed to the dma mapping
242  * routines, depending on the platform, it may or may not succeed, and
243  * may crash.
244  *
245  * As such:
246  * Wrapper all the dma routines and check the dev pointer.
247  *
248  * If simple mappings (return just a dma address, we'll noop them,
249  * returning a dma address of 0.
250  *
251  * On more complex mappings (dma_map_sg), a pseudo routine fills
252  * in the scatter list, setting all dma addresses to 0.
253  */
254 
255 static inline dma_addr_t
256 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
257 		enum dma_data_direction dir)
258 {
259 	return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
260 }
261 
262 static inline int
263 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
264 {
265 	return dev ? dma_mapping_error(dev, dma_addr) : 0;
266 }
267 
268 static inline void
269 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
270 	enum dma_data_direction dir)
271 {
272 	if (dev)
273 		dma_unmap_single(dev, addr, size, dir);
274 }
275 
276 static inline void
277 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
278 		enum dma_data_direction dir)
279 {
280 	if (dev)
281 		dma_sync_single_for_cpu(dev, addr, size, dir);
282 }
283 
284 static inline void
285 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
286 		enum dma_data_direction dir)
287 {
288 	if (dev)
289 		dma_sync_single_for_device(dev, addr, size, dir);
290 }
291 
292 /* pseudo dma_map_sg call */
293 static int
294 fc_map_sg(struct scatterlist *sg, int nents)
295 {
296 	struct scatterlist *s;
297 	int i;
298 
299 	WARN_ON(nents == 0 || sg[0].length == 0);
300 
301 	for_each_sg(sg, s, nents, i) {
302 		s->dma_address = 0L;
303 #ifdef CONFIG_NEED_SG_DMA_LENGTH
304 		s->dma_length = s->length;
305 #endif
306 	}
307 	return nents;
308 }
309 
310 static inline int
311 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
312 		enum dma_data_direction dir)
313 {
314 	return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
315 }
316 
317 static inline void
318 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
319 		enum dma_data_direction dir)
320 {
321 	if (dev)
322 		dma_unmap_sg(dev, sg, nents, dir);
323 }
324 
325 
326 /* *********************** FC-NVME Port Management ************************ */
327 
328 
329 static int
330 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
331 {
332 	struct nvmet_fc_ls_iod *iod;
333 	int i;
334 
335 	iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
336 			GFP_KERNEL);
337 	if (!iod)
338 		return -ENOMEM;
339 
340 	tgtport->iod = iod;
341 
342 	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
343 		INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
344 		iod->tgtport = tgtport;
345 		list_add_tail(&iod->ls_list, &tgtport->ls_list);
346 
347 		iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
348 			GFP_KERNEL);
349 		if (!iod->rqstbuf)
350 			goto out_fail;
351 
352 		iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
353 
354 		iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
355 						NVME_FC_MAX_LS_BUFFER_SIZE,
356 						DMA_TO_DEVICE);
357 		if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
358 			goto out_fail;
359 	}
360 
361 	return 0;
362 
363 out_fail:
364 	kfree(iod->rqstbuf);
365 	list_del(&iod->ls_list);
366 	for (iod--, i--; i >= 0; iod--, i--) {
367 		fc_dma_unmap_single(tgtport->dev, iod->rspdma,
368 				NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
369 		kfree(iod->rqstbuf);
370 		list_del(&iod->ls_list);
371 	}
372 
373 	kfree(iod);
374 
375 	return -EFAULT;
376 }
377 
378 static void
379 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
380 {
381 	struct nvmet_fc_ls_iod *iod = tgtport->iod;
382 	int i;
383 
384 	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
385 		fc_dma_unmap_single(tgtport->dev,
386 				iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
387 				DMA_TO_DEVICE);
388 		kfree(iod->rqstbuf);
389 		list_del(&iod->ls_list);
390 	}
391 	kfree(tgtport->iod);
392 }
393 
394 static struct nvmet_fc_ls_iod *
395 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
396 {
397 	struct nvmet_fc_ls_iod *iod;
398 	unsigned long flags;
399 
400 	spin_lock_irqsave(&tgtport->lock, flags);
401 	iod = list_first_entry_or_null(&tgtport->ls_list,
402 					struct nvmet_fc_ls_iod, ls_list);
403 	if (iod)
404 		list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
405 	spin_unlock_irqrestore(&tgtport->lock, flags);
406 	return iod;
407 }
408 
409 
410 static void
411 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
412 			struct nvmet_fc_ls_iod *iod)
413 {
414 	unsigned long flags;
415 
416 	spin_lock_irqsave(&tgtport->lock, flags);
417 	list_move(&iod->ls_list, &tgtport->ls_list);
418 	spin_unlock_irqrestore(&tgtport->lock, flags);
419 }
420 
421 static void
422 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
423 				struct nvmet_fc_tgt_queue *queue)
424 {
425 	struct nvmet_fc_fcp_iod *fod = queue->fod;
426 	int i;
427 
428 	for (i = 0; i < queue->sqsize; fod++, i++) {
429 		INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
430 		INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
431 		fod->tgtport = tgtport;
432 		fod->queue = queue;
433 		fod->active = false;
434 		fod->abort = false;
435 		fod->aborted = false;
436 		fod->fcpreq = NULL;
437 		list_add_tail(&fod->fcp_list, &queue->fod_list);
438 		spin_lock_init(&fod->flock);
439 
440 		fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
441 					sizeof(fod->rspiubuf), DMA_TO_DEVICE);
442 		if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
443 			list_del(&fod->fcp_list);
444 			for (fod--, i--; i >= 0; fod--, i--) {
445 				fc_dma_unmap_single(tgtport->dev, fod->rspdma,
446 						sizeof(fod->rspiubuf),
447 						DMA_TO_DEVICE);
448 				fod->rspdma = 0L;
449 				list_del(&fod->fcp_list);
450 			}
451 
452 			return;
453 		}
454 	}
455 }
456 
457 static void
458 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
459 				struct nvmet_fc_tgt_queue *queue)
460 {
461 	struct nvmet_fc_fcp_iod *fod = queue->fod;
462 	int i;
463 
464 	for (i = 0; i < queue->sqsize; fod++, i++) {
465 		if (fod->rspdma)
466 			fc_dma_unmap_single(tgtport->dev, fod->rspdma,
467 				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
468 	}
469 }
470 
471 static struct nvmet_fc_fcp_iod *
472 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
473 {
474 	struct nvmet_fc_fcp_iod *fod;
475 
476 	lockdep_assert_held(&queue->qlock);
477 
478 	fod = list_first_entry_or_null(&queue->fod_list,
479 					struct nvmet_fc_fcp_iod, fcp_list);
480 	if (fod) {
481 		list_del(&fod->fcp_list);
482 		fod->active = true;
483 		/*
484 		 * no queue reference is taken, as it was taken by the
485 		 * queue lookup just prior to the allocation. The iod
486 		 * will "inherit" that reference.
487 		 */
488 	}
489 	return fod;
490 }
491 
492 
493 static void
494 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
495 		       struct nvmet_fc_tgt_queue *queue,
496 		       struct nvmefc_tgt_fcp_req *fcpreq)
497 {
498 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
499 
500 	/*
501 	 * put all admin cmds on hw queue id 0. All io commands go to
502 	 * the respective hw queue based on a modulo basis
503 	 */
504 	fcpreq->hwqid = queue->qid ?
505 			((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
506 
507 	if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
508 		queue_work_on(queue->cpu, queue->work_q, &fod->work);
509 	else
510 		nvmet_fc_handle_fcp_rqst(tgtport, fod);
511 }
512 
513 static void
514 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
515 			struct nvmet_fc_fcp_iod *fod)
516 {
517 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
518 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
519 	struct nvmet_fc_defer_fcp_req *deferfcp;
520 	unsigned long flags;
521 
522 	fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
523 				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
524 
525 	fcpreq->nvmet_fc_private = NULL;
526 
527 	fod->active = false;
528 	fod->abort = false;
529 	fod->aborted = false;
530 	fod->writedataactive = false;
531 	fod->fcpreq = NULL;
532 
533 	tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
534 
535 	spin_lock_irqsave(&queue->qlock, flags);
536 	deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
537 				struct nvmet_fc_defer_fcp_req, req_list);
538 	if (!deferfcp) {
539 		list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
540 		spin_unlock_irqrestore(&queue->qlock, flags);
541 
542 		/* Release reference taken at queue lookup and fod allocation */
543 		nvmet_fc_tgt_q_put(queue);
544 		return;
545 	}
546 
547 	/* Re-use the fod for the next pending cmd that was deferred */
548 	list_del(&deferfcp->req_list);
549 
550 	fcpreq = deferfcp->fcp_req;
551 
552 	/* deferfcp can be reused for another IO at a later date */
553 	list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
554 
555 	spin_unlock_irqrestore(&queue->qlock, flags);
556 
557 	/* Save NVME CMD IO in fod */
558 	memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
559 
560 	/* Setup new fcpreq to be processed */
561 	fcpreq->rspaddr = NULL;
562 	fcpreq->rsplen  = 0;
563 	fcpreq->nvmet_fc_private = fod;
564 	fod->fcpreq = fcpreq;
565 	fod->active = true;
566 
567 	/* inform LLDD IO is now being processed */
568 	tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
569 
570 	/* Submit deferred IO for processing */
571 	nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
572 
573 	/*
574 	 * Leave the queue lookup get reference taken when
575 	 * fod was originally allocated.
576 	 */
577 }
578 
579 static int
580 nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
581 {
582 	int cpu, idx, cnt;
583 
584 	if (tgtport->ops->max_hw_queues == 1)
585 		return WORK_CPU_UNBOUND;
586 
587 	/* Simple cpu selection based on qid modulo active cpu count */
588 	idx = !qid ? 0 : (qid - 1) % num_active_cpus();
589 
590 	/* find the n'th active cpu */
591 	for (cpu = 0, cnt = 0; ; ) {
592 		if (cpu_active(cpu)) {
593 			if (cnt == idx)
594 				break;
595 			cnt++;
596 		}
597 		cpu = (cpu + 1) % num_possible_cpus();
598 	}
599 
600 	return cpu;
601 }
602 
603 static struct nvmet_fc_tgt_queue *
604 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
605 			u16 qid, u16 sqsize)
606 {
607 	struct nvmet_fc_tgt_queue *queue;
608 	unsigned long flags;
609 	int ret;
610 
611 	if (qid >= NVMET_NR_QUEUES)
612 		return NULL;
613 
614 	queue = kzalloc((sizeof(*queue) +
615 				(sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
616 				GFP_KERNEL);
617 	if (!queue)
618 		return NULL;
619 
620 	if (!nvmet_fc_tgt_a_get(assoc))
621 		goto out_free_queue;
622 
623 	queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
624 				assoc->tgtport->fc_target_port.port_num,
625 				assoc->a_id, qid);
626 	if (!queue->work_q)
627 		goto out_a_put;
628 
629 	queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
630 	queue->qid = qid;
631 	queue->sqsize = sqsize;
632 	queue->assoc = assoc;
633 	queue->port = assoc->tgtport->port;
634 	queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
635 	INIT_LIST_HEAD(&queue->fod_list);
636 	INIT_LIST_HEAD(&queue->avail_defer_list);
637 	INIT_LIST_HEAD(&queue->pending_cmd_list);
638 	atomic_set(&queue->connected, 0);
639 	atomic_set(&queue->sqtail, 0);
640 	atomic_set(&queue->rsn, 1);
641 	atomic_set(&queue->zrspcnt, 0);
642 	spin_lock_init(&queue->qlock);
643 	kref_init(&queue->ref);
644 
645 	nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
646 
647 	ret = nvmet_sq_init(&queue->nvme_sq);
648 	if (ret)
649 		goto out_fail_iodlist;
650 
651 	WARN_ON(assoc->queues[qid]);
652 	spin_lock_irqsave(&assoc->tgtport->lock, flags);
653 	assoc->queues[qid] = queue;
654 	spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
655 
656 	return queue;
657 
658 out_fail_iodlist:
659 	nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
660 	destroy_workqueue(queue->work_q);
661 out_a_put:
662 	nvmet_fc_tgt_a_put(assoc);
663 out_free_queue:
664 	kfree(queue);
665 	return NULL;
666 }
667 
668 
669 static void
670 nvmet_fc_tgt_queue_free(struct kref *ref)
671 {
672 	struct nvmet_fc_tgt_queue *queue =
673 		container_of(ref, struct nvmet_fc_tgt_queue, ref);
674 	unsigned long flags;
675 
676 	spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
677 	queue->assoc->queues[queue->qid] = NULL;
678 	spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
679 
680 	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
681 
682 	nvmet_fc_tgt_a_put(queue->assoc);
683 
684 	destroy_workqueue(queue->work_q);
685 
686 	kfree(queue);
687 }
688 
689 static void
690 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
691 {
692 	kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
693 }
694 
695 static int
696 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
697 {
698 	return kref_get_unless_zero(&queue->ref);
699 }
700 
701 
702 static void
703 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
704 {
705 	struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
706 	struct nvmet_fc_fcp_iod *fod = queue->fod;
707 	struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
708 	unsigned long flags;
709 	int i, writedataactive;
710 	bool disconnect;
711 
712 	disconnect = atomic_xchg(&queue->connected, 0);
713 
714 	spin_lock_irqsave(&queue->qlock, flags);
715 	/* about outstanding io's */
716 	for (i = 0; i < queue->sqsize; fod++, i++) {
717 		if (fod->active) {
718 			spin_lock(&fod->flock);
719 			fod->abort = true;
720 			writedataactive = fod->writedataactive;
721 			spin_unlock(&fod->flock);
722 			/*
723 			 * only call lldd abort routine if waiting for
724 			 * writedata. other outstanding ops should finish
725 			 * on their own.
726 			 */
727 			if (writedataactive) {
728 				spin_lock(&fod->flock);
729 				fod->aborted = true;
730 				spin_unlock(&fod->flock);
731 				tgtport->ops->fcp_abort(
732 					&tgtport->fc_target_port, fod->fcpreq);
733 			}
734 		}
735 	}
736 
737 	/* Cleanup defer'ed IOs in queue */
738 	list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
739 				req_list) {
740 		list_del(&deferfcp->req_list);
741 		kfree(deferfcp);
742 	}
743 
744 	for (;;) {
745 		deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
746 				struct nvmet_fc_defer_fcp_req, req_list);
747 		if (!deferfcp)
748 			break;
749 
750 		list_del(&deferfcp->req_list);
751 		spin_unlock_irqrestore(&queue->qlock, flags);
752 
753 		tgtport->ops->defer_rcv(&tgtport->fc_target_port,
754 				deferfcp->fcp_req);
755 
756 		tgtport->ops->fcp_abort(&tgtport->fc_target_port,
757 				deferfcp->fcp_req);
758 
759 		tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
760 				deferfcp->fcp_req);
761 
762 		kfree(deferfcp);
763 
764 		spin_lock_irqsave(&queue->qlock, flags);
765 	}
766 	spin_unlock_irqrestore(&queue->qlock, flags);
767 
768 	flush_workqueue(queue->work_q);
769 
770 	if (disconnect)
771 		nvmet_sq_destroy(&queue->nvme_sq);
772 
773 	nvmet_fc_tgt_q_put(queue);
774 }
775 
776 static struct nvmet_fc_tgt_queue *
777 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
778 				u64 connection_id)
779 {
780 	struct nvmet_fc_tgt_assoc *assoc;
781 	struct nvmet_fc_tgt_queue *queue;
782 	u64 association_id = nvmet_fc_getassociationid(connection_id);
783 	u16 qid = nvmet_fc_getqueueid(connection_id);
784 	unsigned long flags;
785 
786 	spin_lock_irqsave(&tgtport->lock, flags);
787 	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
788 		if (association_id == assoc->association_id) {
789 			queue = assoc->queues[qid];
790 			if (queue &&
791 			    (!atomic_read(&queue->connected) ||
792 			     !nvmet_fc_tgt_q_get(queue)))
793 				queue = NULL;
794 			spin_unlock_irqrestore(&tgtport->lock, flags);
795 			return queue;
796 		}
797 	}
798 	spin_unlock_irqrestore(&tgtport->lock, flags);
799 	return NULL;
800 }
801 
802 static struct nvmet_fc_tgt_assoc *
803 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
804 {
805 	struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
806 	unsigned long flags;
807 	u64 ran;
808 	int idx;
809 	bool needrandom = true;
810 
811 	assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
812 	if (!assoc)
813 		return NULL;
814 
815 	idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
816 	if (idx < 0)
817 		goto out_free_assoc;
818 
819 	if (!nvmet_fc_tgtport_get(tgtport))
820 		goto out_ida_put;
821 
822 	assoc->tgtport = tgtport;
823 	assoc->a_id = idx;
824 	INIT_LIST_HEAD(&assoc->a_list);
825 	kref_init(&assoc->ref);
826 
827 	while (needrandom) {
828 		get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
829 		ran = ran << BYTES_FOR_QID_SHIFT;
830 
831 		spin_lock_irqsave(&tgtport->lock, flags);
832 		needrandom = false;
833 		list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
834 			if (ran == tmpassoc->association_id) {
835 				needrandom = true;
836 				break;
837 			}
838 		if (!needrandom) {
839 			assoc->association_id = ran;
840 			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
841 		}
842 		spin_unlock_irqrestore(&tgtport->lock, flags);
843 	}
844 
845 	return assoc;
846 
847 out_ida_put:
848 	ida_simple_remove(&tgtport->assoc_cnt, idx);
849 out_free_assoc:
850 	kfree(assoc);
851 	return NULL;
852 }
853 
854 static void
855 nvmet_fc_target_assoc_free(struct kref *ref)
856 {
857 	struct nvmet_fc_tgt_assoc *assoc =
858 		container_of(ref, struct nvmet_fc_tgt_assoc, ref);
859 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
860 	unsigned long flags;
861 
862 	spin_lock_irqsave(&tgtport->lock, flags);
863 	list_del(&assoc->a_list);
864 	spin_unlock_irqrestore(&tgtport->lock, flags);
865 	ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
866 	kfree(assoc);
867 	nvmet_fc_tgtport_put(tgtport);
868 }
869 
870 static void
871 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
872 {
873 	kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
874 }
875 
876 static int
877 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
878 {
879 	return kref_get_unless_zero(&assoc->ref);
880 }
881 
882 static void
883 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
884 {
885 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
886 	struct nvmet_fc_tgt_queue *queue;
887 	unsigned long flags;
888 	int i;
889 
890 	spin_lock_irqsave(&tgtport->lock, flags);
891 	for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
892 		queue = assoc->queues[i];
893 		if (queue) {
894 			if (!nvmet_fc_tgt_q_get(queue))
895 				continue;
896 			spin_unlock_irqrestore(&tgtport->lock, flags);
897 			nvmet_fc_delete_target_queue(queue);
898 			nvmet_fc_tgt_q_put(queue);
899 			spin_lock_irqsave(&tgtport->lock, flags);
900 		}
901 	}
902 	spin_unlock_irqrestore(&tgtport->lock, flags);
903 
904 	nvmet_fc_tgt_a_put(assoc);
905 }
906 
907 static struct nvmet_fc_tgt_assoc *
908 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
909 				u64 association_id)
910 {
911 	struct nvmet_fc_tgt_assoc *assoc;
912 	struct nvmet_fc_tgt_assoc *ret = NULL;
913 	unsigned long flags;
914 
915 	spin_lock_irqsave(&tgtport->lock, flags);
916 	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
917 		if (association_id == assoc->association_id) {
918 			ret = assoc;
919 			nvmet_fc_tgt_a_get(assoc);
920 			break;
921 		}
922 	}
923 	spin_unlock_irqrestore(&tgtport->lock, flags);
924 
925 	return ret;
926 }
927 
928 
929 /**
930  * nvme_fc_register_targetport - transport entry point called by an
931  *                              LLDD to register the existence of a local
932  *                              NVME subystem FC port.
933  * @pinfo:     pointer to information about the port to be registered
934  * @template:  LLDD entrypoints and operational parameters for the port
935  * @dev:       physical hardware device node port corresponds to. Will be
936  *             used for DMA mappings
937  * @portptr:   pointer to a local port pointer. Upon success, the routine
938  *             will allocate a nvme_fc_local_port structure and place its
939  *             address in the local port pointer. Upon failure, local port
940  *             pointer will be set to NULL.
941  *
942  * Returns:
943  * a completion status. Must be 0 upon success; a negative errno
944  * (ex: -ENXIO) upon failure.
945  */
946 int
947 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
948 			struct nvmet_fc_target_template *template,
949 			struct device *dev,
950 			struct nvmet_fc_target_port **portptr)
951 {
952 	struct nvmet_fc_tgtport *newrec;
953 	unsigned long flags;
954 	int ret, idx;
955 
956 	if (!template->xmt_ls_rsp || !template->fcp_op ||
957 	    !template->fcp_abort ||
958 	    !template->fcp_req_release || !template->targetport_delete ||
959 	    !template->max_hw_queues || !template->max_sgl_segments ||
960 	    !template->max_dif_sgl_segments || !template->dma_boundary) {
961 		ret = -EINVAL;
962 		goto out_regtgt_failed;
963 	}
964 
965 	newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
966 			 GFP_KERNEL);
967 	if (!newrec) {
968 		ret = -ENOMEM;
969 		goto out_regtgt_failed;
970 	}
971 
972 	idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
973 	if (idx < 0) {
974 		ret = -ENOSPC;
975 		goto out_fail_kfree;
976 	}
977 
978 	if (!get_device(dev) && dev) {
979 		ret = -ENODEV;
980 		goto out_ida_put;
981 	}
982 
983 	newrec->fc_target_port.node_name = pinfo->node_name;
984 	newrec->fc_target_port.port_name = pinfo->port_name;
985 	newrec->fc_target_port.private = &newrec[1];
986 	newrec->fc_target_port.port_id = pinfo->port_id;
987 	newrec->fc_target_port.port_num = idx;
988 	INIT_LIST_HEAD(&newrec->tgt_list);
989 	newrec->dev = dev;
990 	newrec->ops = template;
991 	spin_lock_init(&newrec->lock);
992 	INIT_LIST_HEAD(&newrec->ls_list);
993 	INIT_LIST_HEAD(&newrec->ls_busylist);
994 	INIT_LIST_HEAD(&newrec->assoc_list);
995 	kref_init(&newrec->ref);
996 	ida_init(&newrec->assoc_cnt);
997 	newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
998 					template->max_sgl_segments);
999 
1000 	ret = nvmet_fc_alloc_ls_iodlist(newrec);
1001 	if (ret) {
1002 		ret = -ENOMEM;
1003 		goto out_free_newrec;
1004 	}
1005 
1006 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1007 	list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1008 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1009 
1010 	*portptr = &newrec->fc_target_port;
1011 	return 0;
1012 
1013 out_free_newrec:
1014 	put_device(dev);
1015 out_ida_put:
1016 	ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1017 out_fail_kfree:
1018 	kfree(newrec);
1019 out_regtgt_failed:
1020 	*portptr = NULL;
1021 	return ret;
1022 }
1023 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1024 
1025 
1026 static void
1027 nvmet_fc_free_tgtport(struct kref *ref)
1028 {
1029 	struct nvmet_fc_tgtport *tgtport =
1030 		container_of(ref, struct nvmet_fc_tgtport, ref);
1031 	struct device *dev = tgtport->dev;
1032 	unsigned long flags;
1033 
1034 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1035 	list_del(&tgtport->tgt_list);
1036 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1037 
1038 	nvmet_fc_free_ls_iodlist(tgtport);
1039 
1040 	/* let the LLDD know we've finished tearing it down */
1041 	tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1042 
1043 	ida_simple_remove(&nvmet_fc_tgtport_cnt,
1044 			tgtport->fc_target_port.port_num);
1045 
1046 	ida_destroy(&tgtport->assoc_cnt);
1047 
1048 	kfree(tgtport);
1049 
1050 	put_device(dev);
1051 }
1052 
1053 static void
1054 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1055 {
1056 	kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1057 }
1058 
1059 static int
1060 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1061 {
1062 	return kref_get_unless_zero(&tgtport->ref);
1063 }
1064 
1065 static void
1066 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1067 {
1068 	struct nvmet_fc_tgt_assoc *assoc, *next;
1069 	unsigned long flags;
1070 
1071 	spin_lock_irqsave(&tgtport->lock, flags);
1072 	list_for_each_entry_safe(assoc, next,
1073 				&tgtport->assoc_list, a_list) {
1074 		if (!nvmet_fc_tgt_a_get(assoc))
1075 			continue;
1076 		spin_unlock_irqrestore(&tgtport->lock, flags);
1077 		nvmet_fc_delete_target_assoc(assoc);
1078 		nvmet_fc_tgt_a_put(assoc);
1079 		spin_lock_irqsave(&tgtport->lock, flags);
1080 	}
1081 	spin_unlock_irqrestore(&tgtport->lock, flags);
1082 }
1083 
1084 /*
1085  * nvmet layer has called to terminate an association
1086  */
1087 static void
1088 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1089 {
1090 	struct nvmet_fc_tgtport *tgtport, *next;
1091 	struct nvmet_fc_tgt_assoc *assoc;
1092 	struct nvmet_fc_tgt_queue *queue;
1093 	unsigned long flags;
1094 	bool found_ctrl = false;
1095 
1096 	/* this is a bit ugly, but don't want to make locks layered */
1097 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1098 	list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1099 			tgt_list) {
1100 		if (!nvmet_fc_tgtport_get(tgtport))
1101 			continue;
1102 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1103 
1104 		spin_lock_irqsave(&tgtport->lock, flags);
1105 		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1106 			queue = assoc->queues[0];
1107 			if (queue && queue->nvme_sq.ctrl == ctrl) {
1108 				if (nvmet_fc_tgt_a_get(assoc))
1109 					found_ctrl = true;
1110 				break;
1111 			}
1112 		}
1113 		spin_unlock_irqrestore(&tgtport->lock, flags);
1114 
1115 		nvmet_fc_tgtport_put(tgtport);
1116 
1117 		if (found_ctrl) {
1118 			nvmet_fc_delete_target_assoc(assoc);
1119 			nvmet_fc_tgt_a_put(assoc);
1120 			return;
1121 		}
1122 
1123 		spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1124 	}
1125 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1126 }
1127 
1128 /**
1129  * nvme_fc_unregister_targetport - transport entry point called by an
1130  *                              LLDD to deregister/remove a previously
1131  *                              registered a local NVME subsystem FC port.
1132  * @tgtport: pointer to the (registered) target port that is to be
1133  *           deregistered.
1134  *
1135  * Returns:
1136  * a completion status. Must be 0 upon success; a negative errno
1137  * (ex: -ENXIO) upon failure.
1138  */
1139 int
1140 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1141 {
1142 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1143 
1144 	/* terminate any outstanding associations */
1145 	__nvmet_fc_free_assocs(tgtport);
1146 
1147 	nvmet_fc_tgtport_put(tgtport);
1148 
1149 	return 0;
1150 }
1151 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1152 
1153 
1154 /* *********************** FC-NVME LS Handling **************************** */
1155 
1156 
1157 static void
1158 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
1159 {
1160 	struct fcnvme_ls_acc_hdr *acc = buf;
1161 
1162 	acc->w0.ls_cmd = ls_cmd;
1163 	acc->desc_list_len = desc_len;
1164 	acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1165 	acc->rqst.desc_len =
1166 			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1167 	acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1168 }
1169 
1170 static int
1171 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1172 			u8 reason, u8 explanation, u8 vendor)
1173 {
1174 	struct fcnvme_ls_rjt *rjt = buf;
1175 
1176 	nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1177 			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1178 			ls_cmd);
1179 	rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1180 	rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1181 	rjt->rjt.reason_code = reason;
1182 	rjt->rjt.reason_explanation = explanation;
1183 	rjt->rjt.vendor = vendor;
1184 
1185 	return sizeof(struct fcnvme_ls_rjt);
1186 }
1187 
1188 /* Validation Error indexes into the string table below */
1189 enum {
1190 	VERR_NO_ERROR		= 0,
1191 	VERR_CR_ASSOC_LEN	= 1,
1192 	VERR_CR_ASSOC_RQST_LEN	= 2,
1193 	VERR_CR_ASSOC_CMD	= 3,
1194 	VERR_CR_ASSOC_CMD_LEN	= 4,
1195 	VERR_ERSP_RATIO		= 5,
1196 	VERR_ASSOC_ALLOC_FAIL	= 6,
1197 	VERR_QUEUE_ALLOC_FAIL	= 7,
1198 	VERR_CR_CONN_LEN	= 8,
1199 	VERR_CR_CONN_RQST_LEN	= 9,
1200 	VERR_ASSOC_ID		= 10,
1201 	VERR_ASSOC_ID_LEN	= 11,
1202 	VERR_NO_ASSOC		= 12,
1203 	VERR_CONN_ID		= 13,
1204 	VERR_CONN_ID_LEN	= 14,
1205 	VERR_NO_CONN		= 15,
1206 	VERR_CR_CONN_CMD	= 16,
1207 	VERR_CR_CONN_CMD_LEN	= 17,
1208 	VERR_DISCONN_LEN	= 18,
1209 	VERR_DISCONN_RQST_LEN	= 19,
1210 	VERR_DISCONN_CMD	= 20,
1211 	VERR_DISCONN_CMD_LEN	= 21,
1212 	VERR_DISCONN_SCOPE	= 22,
1213 	VERR_RS_LEN		= 23,
1214 	VERR_RS_RQST_LEN	= 24,
1215 	VERR_RS_CMD		= 25,
1216 	VERR_RS_CMD_LEN		= 26,
1217 	VERR_RS_RCTL		= 27,
1218 	VERR_RS_RO		= 28,
1219 };
1220 
1221 static char *validation_errors[] = {
1222 	"OK",
1223 	"Bad CR_ASSOC Length",
1224 	"Bad CR_ASSOC Rqst Length",
1225 	"Not CR_ASSOC Cmd",
1226 	"Bad CR_ASSOC Cmd Length",
1227 	"Bad Ersp Ratio",
1228 	"Association Allocation Failed",
1229 	"Queue Allocation Failed",
1230 	"Bad CR_CONN Length",
1231 	"Bad CR_CONN Rqst Length",
1232 	"Not Association ID",
1233 	"Bad Association ID Length",
1234 	"No Association",
1235 	"Not Connection ID",
1236 	"Bad Connection ID Length",
1237 	"No Connection",
1238 	"Not CR_CONN Cmd",
1239 	"Bad CR_CONN Cmd Length",
1240 	"Bad DISCONN Length",
1241 	"Bad DISCONN Rqst Length",
1242 	"Not DISCONN Cmd",
1243 	"Bad DISCONN Cmd Length",
1244 	"Bad Disconnect Scope",
1245 	"Bad RS Length",
1246 	"Bad RS Rqst Length",
1247 	"Not RS Cmd",
1248 	"Bad RS Cmd Length",
1249 	"Bad RS R_CTL",
1250 	"Bad RS Relative Offset",
1251 };
1252 
1253 static void
1254 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1255 			struct nvmet_fc_ls_iod *iod)
1256 {
1257 	struct fcnvme_ls_cr_assoc_rqst *rqst =
1258 				(struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1259 	struct fcnvme_ls_cr_assoc_acc *acc =
1260 				(struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1261 	struct nvmet_fc_tgt_queue *queue;
1262 	int ret = 0;
1263 
1264 	memset(acc, 0, sizeof(*acc));
1265 
1266 	/*
1267 	 * FC-NVME spec changes. There are initiators sending different
1268 	 * lengths as padding sizes for Create Association Cmd descriptor
1269 	 * was incorrect.
1270 	 * Accept anything of "minimum" length. Assume format per 1.15
1271 	 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1272 	 * trailing pad length is.
1273 	 */
1274 	if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1275 		ret = VERR_CR_ASSOC_LEN;
1276 	else if (be32_to_cpu(rqst->desc_list_len) <
1277 			FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1278 		ret = VERR_CR_ASSOC_RQST_LEN;
1279 	else if (rqst->assoc_cmd.desc_tag !=
1280 			cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1281 		ret = VERR_CR_ASSOC_CMD;
1282 	else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1283 			FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1284 		ret = VERR_CR_ASSOC_CMD_LEN;
1285 	else if (!rqst->assoc_cmd.ersp_ratio ||
1286 		 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1287 				be16_to_cpu(rqst->assoc_cmd.sqsize)))
1288 		ret = VERR_ERSP_RATIO;
1289 
1290 	else {
1291 		/* new association w/ admin queue */
1292 		iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1293 		if (!iod->assoc)
1294 			ret = VERR_ASSOC_ALLOC_FAIL;
1295 		else {
1296 			queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1297 					be16_to_cpu(rqst->assoc_cmd.sqsize));
1298 			if (!queue)
1299 				ret = VERR_QUEUE_ALLOC_FAIL;
1300 		}
1301 	}
1302 
1303 	if (ret) {
1304 		dev_err(tgtport->dev,
1305 			"Create Association LS failed: %s\n",
1306 			validation_errors[ret]);
1307 		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1308 				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1309 				FCNVME_RJT_RC_LOGIC,
1310 				FCNVME_RJT_EXP_NONE, 0);
1311 		return;
1312 	}
1313 
1314 	queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1315 	atomic_set(&queue->connected, 1);
1316 	queue->sqhd = 0;	/* best place to init value */
1317 
1318 	/* format a response */
1319 
1320 	iod->lsreq->rsplen = sizeof(*acc);
1321 
1322 	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1323 			fcnvme_lsdesc_len(
1324 				sizeof(struct fcnvme_ls_cr_assoc_acc)),
1325 			FCNVME_LS_CREATE_ASSOCIATION);
1326 	acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1327 	acc->associd.desc_len =
1328 			fcnvme_lsdesc_len(
1329 				sizeof(struct fcnvme_lsdesc_assoc_id));
1330 	acc->associd.association_id =
1331 			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1332 	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1333 	acc->connectid.desc_len =
1334 			fcnvme_lsdesc_len(
1335 				sizeof(struct fcnvme_lsdesc_conn_id));
1336 	acc->connectid.connection_id = acc->associd.association_id;
1337 }
1338 
1339 static void
1340 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1341 			struct nvmet_fc_ls_iod *iod)
1342 {
1343 	struct fcnvme_ls_cr_conn_rqst *rqst =
1344 				(struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1345 	struct fcnvme_ls_cr_conn_acc *acc =
1346 				(struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1347 	struct nvmet_fc_tgt_queue *queue;
1348 	int ret = 0;
1349 
1350 	memset(acc, 0, sizeof(*acc));
1351 
1352 	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1353 		ret = VERR_CR_CONN_LEN;
1354 	else if (rqst->desc_list_len !=
1355 			fcnvme_lsdesc_len(
1356 				sizeof(struct fcnvme_ls_cr_conn_rqst)))
1357 		ret = VERR_CR_CONN_RQST_LEN;
1358 	else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1359 		ret = VERR_ASSOC_ID;
1360 	else if (rqst->associd.desc_len !=
1361 			fcnvme_lsdesc_len(
1362 				sizeof(struct fcnvme_lsdesc_assoc_id)))
1363 		ret = VERR_ASSOC_ID_LEN;
1364 	else if (rqst->connect_cmd.desc_tag !=
1365 			cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1366 		ret = VERR_CR_CONN_CMD;
1367 	else if (rqst->connect_cmd.desc_len !=
1368 			fcnvme_lsdesc_len(
1369 				sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1370 		ret = VERR_CR_CONN_CMD_LEN;
1371 	else if (!rqst->connect_cmd.ersp_ratio ||
1372 		 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1373 				be16_to_cpu(rqst->connect_cmd.sqsize)))
1374 		ret = VERR_ERSP_RATIO;
1375 
1376 	else {
1377 		/* new io queue */
1378 		iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1379 				be64_to_cpu(rqst->associd.association_id));
1380 		if (!iod->assoc)
1381 			ret = VERR_NO_ASSOC;
1382 		else {
1383 			queue = nvmet_fc_alloc_target_queue(iod->assoc,
1384 					be16_to_cpu(rqst->connect_cmd.qid),
1385 					be16_to_cpu(rqst->connect_cmd.sqsize));
1386 			if (!queue)
1387 				ret = VERR_QUEUE_ALLOC_FAIL;
1388 
1389 			/* release get taken in nvmet_fc_find_target_assoc */
1390 			nvmet_fc_tgt_a_put(iod->assoc);
1391 		}
1392 	}
1393 
1394 	if (ret) {
1395 		dev_err(tgtport->dev,
1396 			"Create Connection LS failed: %s\n",
1397 			validation_errors[ret]);
1398 		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1399 				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1400 				(ret == VERR_NO_ASSOC) ?
1401 					FCNVME_RJT_RC_INV_ASSOC :
1402 					FCNVME_RJT_RC_LOGIC,
1403 				FCNVME_RJT_EXP_NONE, 0);
1404 		return;
1405 	}
1406 
1407 	queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1408 	atomic_set(&queue->connected, 1);
1409 	queue->sqhd = 0;	/* best place to init value */
1410 
1411 	/* format a response */
1412 
1413 	iod->lsreq->rsplen = sizeof(*acc);
1414 
1415 	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1416 			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1417 			FCNVME_LS_CREATE_CONNECTION);
1418 	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1419 	acc->connectid.desc_len =
1420 			fcnvme_lsdesc_len(
1421 				sizeof(struct fcnvme_lsdesc_conn_id));
1422 	acc->connectid.connection_id =
1423 			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1424 				be16_to_cpu(rqst->connect_cmd.qid)));
1425 }
1426 
1427 static void
1428 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1429 			struct nvmet_fc_ls_iod *iod)
1430 {
1431 	struct fcnvme_ls_disconnect_rqst *rqst =
1432 			(struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1433 	struct fcnvme_ls_disconnect_acc *acc =
1434 			(struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1435 	struct nvmet_fc_tgt_queue *queue = NULL;
1436 	struct nvmet_fc_tgt_assoc *assoc;
1437 	int ret = 0;
1438 	bool del_assoc = false;
1439 
1440 	memset(acc, 0, sizeof(*acc));
1441 
1442 	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1443 		ret = VERR_DISCONN_LEN;
1444 	else if (rqst->desc_list_len !=
1445 			fcnvme_lsdesc_len(
1446 				sizeof(struct fcnvme_ls_disconnect_rqst)))
1447 		ret = VERR_DISCONN_RQST_LEN;
1448 	else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1449 		ret = VERR_ASSOC_ID;
1450 	else if (rqst->associd.desc_len !=
1451 			fcnvme_lsdesc_len(
1452 				sizeof(struct fcnvme_lsdesc_assoc_id)))
1453 		ret = VERR_ASSOC_ID_LEN;
1454 	else if (rqst->discon_cmd.desc_tag !=
1455 			cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1456 		ret = VERR_DISCONN_CMD;
1457 	else if (rqst->discon_cmd.desc_len !=
1458 			fcnvme_lsdesc_len(
1459 				sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1460 		ret = VERR_DISCONN_CMD_LEN;
1461 	else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1462 			(rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1463 		ret = VERR_DISCONN_SCOPE;
1464 	else {
1465 		/* match an active association */
1466 		assoc = nvmet_fc_find_target_assoc(tgtport,
1467 				be64_to_cpu(rqst->associd.association_id));
1468 		iod->assoc = assoc;
1469 		if (assoc) {
1470 			if (rqst->discon_cmd.scope ==
1471 					FCNVME_DISCONN_CONNECTION) {
1472 				queue = nvmet_fc_find_target_queue(tgtport,
1473 						be64_to_cpu(
1474 							rqst->discon_cmd.id));
1475 				if (!queue) {
1476 					nvmet_fc_tgt_a_put(assoc);
1477 					ret = VERR_NO_CONN;
1478 				}
1479 			}
1480 		} else
1481 			ret = VERR_NO_ASSOC;
1482 	}
1483 
1484 	if (ret) {
1485 		dev_err(tgtport->dev,
1486 			"Disconnect LS failed: %s\n",
1487 			validation_errors[ret]);
1488 		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1489 				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1490 				(ret == VERR_NO_ASSOC) ?
1491 					FCNVME_RJT_RC_INV_ASSOC :
1492 					(ret == VERR_NO_CONN) ?
1493 						FCNVME_RJT_RC_INV_CONN :
1494 						FCNVME_RJT_RC_LOGIC,
1495 				FCNVME_RJT_EXP_NONE, 0);
1496 		return;
1497 	}
1498 
1499 	/* format a response */
1500 
1501 	iod->lsreq->rsplen = sizeof(*acc);
1502 
1503 	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1504 			fcnvme_lsdesc_len(
1505 				sizeof(struct fcnvme_ls_disconnect_acc)),
1506 			FCNVME_LS_DISCONNECT);
1507 
1508 
1509 	/* are we to delete a Connection ID (queue) */
1510 	if (queue) {
1511 		int qid = queue->qid;
1512 
1513 		nvmet_fc_delete_target_queue(queue);
1514 
1515 		/* release the get taken by find_target_queue */
1516 		nvmet_fc_tgt_q_put(queue);
1517 
1518 		/* tear association down if io queue terminated */
1519 		if (!qid)
1520 			del_assoc = true;
1521 	}
1522 
1523 	/* release get taken in nvmet_fc_find_target_assoc */
1524 	nvmet_fc_tgt_a_put(iod->assoc);
1525 
1526 	if (del_assoc)
1527 		nvmet_fc_delete_target_assoc(iod->assoc);
1528 }
1529 
1530 
1531 /* *********************** NVME Ctrl Routines **************************** */
1532 
1533 
1534 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1535 
1536 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1537 
1538 static void
1539 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1540 {
1541 	struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1542 	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1543 
1544 	fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1545 				NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1546 	nvmet_fc_free_ls_iod(tgtport, iod);
1547 	nvmet_fc_tgtport_put(tgtport);
1548 }
1549 
1550 static void
1551 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1552 				struct nvmet_fc_ls_iod *iod)
1553 {
1554 	int ret;
1555 
1556 	fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1557 				  NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1558 
1559 	ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1560 	if (ret)
1561 		nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1562 }
1563 
1564 /*
1565  * Actual processing routine for received FC-NVME LS Requests from the LLD
1566  */
1567 static void
1568 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1569 			struct nvmet_fc_ls_iod *iod)
1570 {
1571 	struct fcnvme_ls_rqst_w0 *w0 =
1572 			(struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1573 
1574 	iod->lsreq->nvmet_fc_private = iod;
1575 	iod->lsreq->rspbuf = iod->rspbuf;
1576 	iod->lsreq->rspdma = iod->rspdma;
1577 	iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1578 	/* Be preventative. handlers will later set to valid length */
1579 	iod->lsreq->rsplen = 0;
1580 
1581 	iod->assoc = NULL;
1582 
1583 	/*
1584 	 * handlers:
1585 	 *   parse request input, execute the request, and format the
1586 	 *   LS response
1587 	 */
1588 	switch (w0->ls_cmd) {
1589 	case FCNVME_LS_CREATE_ASSOCIATION:
1590 		/* Creates Association and initial Admin Queue/Connection */
1591 		nvmet_fc_ls_create_association(tgtport, iod);
1592 		break;
1593 	case FCNVME_LS_CREATE_CONNECTION:
1594 		/* Creates an IO Queue/Connection */
1595 		nvmet_fc_ls_create_connection(tgtport, iod);
1596 		break;
1597 	case FCNVME_LS_DISCONNECT:
1598 		/* Terminate a Queue/Connection or the Association */
1599 		nvmet_fc_ls_disconnect(tgtport, iod);
1600 		break;
1601 	default:
1602 		iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1603 				NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1604 				FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1605 	}
1606 
1607 	nvmet_fc_xmt_ls_rsp(tgtport, iod);
1608 }
1609 
1610 /*
1611  * Actual processing routine for received FC-NVME LS Requests from the LLD
1612  */
1613 static void
1614 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1615 {
1616 	struct nvmet_fc_ls_iod *iod =
1617 		container_of(work, struct nvmet_fc_ls_iod, work);
1618 	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1619 
1620 	nvmet_fc_handle_ls_rqst(tgtport, iod);
1621 }
1622 
1623 
1624 /**
1625  * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1626  *                       upon the reception of a NVME LS request.
1627  *
1628  * The nvmet-fc layer will copy payload to an internal structure for
1629  * processing.  As such, upon completion of the routine, the LLDD may
1630  * immediately free/reuse the LS request buffer passed in the call.
1631  *
1632  * If this routine returns error, the LLDD should abort the exchange.
1633  *
1634  * @tgtport:    pointer to the (registered) target port the LS was
1635  *              received on.
1636  * @lsreq:      pointer to a lsreq request structure to be used to reference
1637  *              the exchange corresponding to the LS.
1638  * @lsreqbuf:   pointer to the buffer containing the LS Request
1639  * @lsreqbuf_len: length, in bytes, of the received LS request
1640  */
1641 int
1642 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1643 			struct nvmefc_tgt_ls_req *lsreq,
1644 			void *lsreqbuf, u32 lsreqbuf_len)
1645 {
1646 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1647 	struct nvmet_fc_ls_iod *iod;
1648 
1649 	if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1650 		return -E2BIG;
1651 
1652 	if (!nvmet_fc_tgtport_get(tgtport))
1653 		return -ESHUTDOWN;
1654 
1655 	iod = nvmet_fc_alloc_ls_iod(tgtport);
1656 	if (!iod) {
1657 		nvmet_fc_tgtport_put(tgtport);
1658 		return -ENOENT;
1659 	}
1660 
1661 	iod->lsreq = lsreq;
1662 	iod->fcpreq = NULL;
1663 	memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1664 	iod->rqstdatalen = lsreqbuf_len;
1665 
1666 	schedule_work(&iod->work);
1667 
1668 	return 0;
1669 }
1670 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1671 
1672 
1673 /*
1674  * **********************
1675  * Start of FCP handling
1676  * **********************
1677  */
1678 
1679 static int
1680 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1681 {
1682 	struct scatterlist *sg;
1683 	struct page *page;
1684 	unsigned int nent;
1685 	u32 page_len, length;
1686 	int i = 0;
1687 
1688 	length = fod->total_length;
1689 	nent = DIV_ROUND_UP(length, PAGE_SIZE);
1690 	sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1691 	if (!sg)
1692 		goto out;
1693 
1694 	sg_init_table(sg, nent);
1695 
1696 	while (length) {
1697 		page_len = min_t(u32, length, PAGE_SIZE);
1698 
1699 		page = alloc_page(GFP_KERNEL);
1700 		if (!page)
1701 			goto out_free_pages;
1702 
1703 		sg_set_page(&sg[i], page, page_len, 0);
1704 		length -= page_len;
1705 		i++;
1706 	}
1707 
1708 	fod->data_sg = sg;
1709 	fod->data_sg_cnt = nent;
1710 	fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1711 				((fod->io_dir == NVMET_FCP_WRITE) ?
1712 					DMA_FROM_DEVICE : DMA_TO_DEVICE));
1713 				/* note: write from initiator perspective */
1714 
1715 	return 0;
1716 
1717 out_free_pages:
1718 	while (i > 0) {
1719 		i--;
1720 		__free_page(sg_page(&sg[i]));
1721 	}
1722 	kfree(sg);
1723 	fod->data_sg = NULL;
1724 	fod->data_sg_cnt = 0;
1725 out:
1726 	return NVME_SC_INTERNAL;
1727 }
1728 
1729 static void
1730 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1731 {
1732 	struct scatterlist *sg;
1733 	int count;
1734 
1735 	if (!fod->data_sg || !fod->data_sg_cnt)
1736 		return;
1737 
1738 	fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1739 				((fod->io_dir == NVMET_FCP_WRITE) ?
1740 					DMA_FROM_DEVICE : DMA_TO_DEVICE));
1741 	for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1742 		__free_page(sg_page(sg));
1743 	kfree(fod->data_sg);
1744 	fod->data_sg = NULL;
1745 	fod->data_sg_cnt = 0;
1746 }
1747 
1748 
1749 static bool
1750 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1751 {
1752 	u32 sqtail, used;
1753 
1754 	/* egad, this is ugly. And sqtail is just a best guess */
1755 	sqtail = atomic_read(&q->sqtail) % q->sqsize;
1756 
1757 	used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1758 	return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1759 }
1760 
1761 /*
1762  * Prep RSP payload.
1763  * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1764  */
1765 static void
1766 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1767 				struct nvmet_fc_fcp_iod *fod)
1768 {
1769 	struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1770 	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1771 	struct nvme_completion *cqe = &ersp->cqe;
1772 	u32 *cqewd = (u32 *)cqe;
1773 	bool send_ersp = false;
1774 	u32 rsn, rspcnt, xfr_length;
1775 
1776 	if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1777 		xfr_length = fod->total_length;
1778 	else
1779 		xfr_length = fod->offset;
1780 
1781 	/*
1782 	 * check to see if we can send a 0's rsp.
1783 	 *   Note: to send a 0's response, the NVME-FC host transport will
1784 	 *   recreate the CQE. The host transport knows: sq id, SQHD (last
1785 	 *   seen in an ersp), and command_id. Thus it will create a
1786 	 *   zero-filled CQE with those known fields filled in. Transport
1787 	 *   must send an ersp for any condition where the cqe won't match
1788 	 *   this.
1789 	 *
1790 	 * Here are the FC-NVME mandated cases where we must send an ersp:
1791 	 *  every N responses, where N=ersp_ratio
1792 	 *  force fabric commands to send ersp's (not in FC-NVME but good
1793 	 *    practice)
1794 	 *  normal cmds: any time status is non-zero, or status is zero
1795 	 *     but words 0 or 1 are non-zero.
1796 	 *  the SQ is 90% or more full
1797 	 *  the cmd is a fused command
1798 	 *  transferred data length not equal to cmd iu length
1799 	 */
1800 	rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1801 	if (!(rspcnt % fod->queue->ersp_ratio) ||
1802 	    sqe->opcode == nvme_fabrics_command ||
1803 	    xfr_length != fod->total_length ||
1804 	    (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1805 	    (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1806 	    queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
1807 		send_ersp = true;
1808 
1809 	/* re-set the fields */
1810 	fod->fcpreq->rspaddr = ersp;
1811 	fod->fcpreq->rspdma = fod->rspdma;
1812 
1813 	if (!send_ersp) {
1814 		memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1815 		fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1816 	} else {
1817 		ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1818 		rsn = atomic_inc_return(&fod->queue->rsn);
1819 		ersp->rsn = cpu_to_be32(rsn);
1820 		ersp->xfrd_len = cpu_to_be32(xfr_length);
1821 		fod->fcpreq->rsplen = sizeof(*ersp);
1822 	}
1823 
1824 	fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1825 				  sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1826 }
1827 
1828 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1829 
1830 static void
1831 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1832 				struct nvmet_fc_fcp_iod *fod)
1833 {
1834 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1835 
1836 	/* data no longer needed */
1837 	nvmet_fc_free_tgt_pgs(fod);
1838 
1839 	/*
1840 	 * if an ABTS was received or we issued the fcp_abort early
1841 	 * don't call abort routine again.
1842 	 */
1843 	/* no need to take lock - lock was taken earlier to get here */
1844 	if (!fod->aborted)
1845 		tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1846 
1847 	nvmet_fc_free_fcp_iod(fod->queue, fod);
1848 }
1849 
1850 static void
1851 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1852 				struct nvmet_fc_fcp_iod *fod)
1853 {
1854 	int ret;
1855 
1856 	fod->fcpreq->op = NVMET_FCOP_RSP;
1857 	fod->fcpreq->timeout = 0;
1858 
1859 	nvmet_fc_prep_fcp_rsp(tgtport, fod);
1860 
1861 	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1862 	if (ret)
1863 		nvmet_fc_abort_op(tgtport, fod);
1864 }
1865 
1866 static void
1867 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1868 				struct nvmet_fc_fcp_iod *fod, u8 op)
1869 {
1870 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1871 	unsigned long flags;
1872 	u32 tlen;
1873 	int ret;
1874 
1875 	fcpreq->op = op;
1876 	fcpreq->offset = fod->offset;
1877 	fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1878 
1879 	tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
1880 			(fod->total_length - fod->offset));
1881 	fcpreq->transfer_length = tlen;
1882 	fcpreq->transferred_length = 0;
1883 	fcpreq->fcp_error = 0;
1884 	fcpreq->rsplen = 0;
1885 
1886 	fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
1887 	fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
1888 
1889 	/*
1890 	 * If the last READDATA request: check if LLDD supports
1891 	 * combined xfr with response.
1892 	 */
1893 	if ((op == NVMET_FCOP_READDATA) &&
1894 	    ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
1895 	    (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1896 		fcpreq->op = NVMET_FCOP_READDATA_RSP;
1897 		nvmet_fc_prep_fcp_rsp(tgtport, fod);
1898 	}
1899 
1900 	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1901 	if (ret) {
1902 		/*
1903 		 * should be ok to set w/o lock as its in the thread of
1904 		 * execution (not an async timer routine) and doesn't
1905 		 * contend with any clearing action
1906 		 */
1907 		fod->abort = true;
1908 
1909 		if (op == NVMET_FCOP_WRITEDATA) {
1910 			spin_lock_irqsave(&fod->flock, flags);
1911 			fod->writedataactive = false;
1912 			spin_unlock_irqrestore(&fod->flock, flags);
1913 			nvmet_req_complete(&fod->req,
1914 					NVME_SC_FC_TRANSPORT_ERROR);
1915 		} else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1916 			fcpreq->fcp_error = ret;
1917 			fcpreq->transferred_length = 0;
1918 			nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1919 		}
1920 	}
1921 }
1922 
1923 static inline bool
1924 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1925 {
1926 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1927 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1928 
1929 	/* if in the middle of an io and we need to tear down */
1930 	if (abort) {
1931 		if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
1932 			nvmet_req_complete(&fod->req,
1933 					NVME_SC_FC_TRANSPORT_ERROR);
1934 			return true;
1935 		}
1936 
1937 		nvmet_fc_abort_op(tgtport, fod);
1938 		return true;
1939 	}
1940 
1941 	return false;
1942 }
1943 
1944 /*
1945  * actual done handler for FCP operations when completed by the lldd
1946  */
1947 static void
1948 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1949 {
1950 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1951 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1952 	unsigned long flags;
1953 	bool abort;
1954 
1955 	spin_lock_irqsave(&fod->flock, flags);
1956 	abort = fod->abort;
1957 	fod->writedataactive = false;
1958 	spin_unlock_irqrestore(&fod->flock, flags);
1959 
1960 	switch (fcpreq->op) {
1961 
1962 	case NVMET_FCOP_WRITEDATA:
1963 		if (__nvmet_fc_fod_op_abort(fod, abort))
1964 			return;
1965 		if (fcpreq->fcp_error ||
1966 		    fcpreq->transferred_length != fcpreq->transfer_length) {
1967 			spin_lock(&fod->flock);
1968 			fod->abort = true;
1969 			spin_unlock(&fod->flock);
1970 
1971 			nvmet_req_complete(&fod->req,
1972 					NVME_SC_FC_TRANSPORT_ERROR);
1973 			return;
1974 		}
1975 
1976 		fod->offset += fcpreq->transferred_length;
1977 		if (fod->offset != fod->total_length) {
1978 			spin_lock_irqsave(&fod->flock, flags);
1979 			fod->writedataactive = true;
1980 			spin_unlock_irqrestore(&fod->flock, flags);
1981 
1982 			/* transfer the next chunk */
1983 			nvmet_fc_transfer_fcp_data(tgtport, fod,
1984 						NVMET_FCOP_WRITEDATA);
1985 			return;
1986 		}
1987 
1988 		/* data transfer complete, resume with nvmet layer */
1989 
1990 		fod->req.execute(&fod->req);
1991 
1992 		break;
1993 
1994 	case NVMET_FCOP_READDATA:
1995 	case NVMET_FCOP_READDATA_RSP:
1996 		if (__nvmet_fc_fod_op_abort(fod, abort))
1997 			return;
1998 		if (fcpreq->fcp_error ||
1999 		    fcpreq->transferred_length != fcpreq->transfer_length) {
2000 			nvmet_fc_abort_op(tgtport, fod);
2001 			return;
2002 		}
2003 
2004 		/* success */
2005 
2006 		if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2007 			/* data no longer needed */
2008 			nvmet_fc_free_tgt_pgs(fod);
2009 			nvmet_fc_free_fcp_iod(fod->queue, fod);
2010 			return;
2011 		}
2012 
2013 		fod->offset += fcpreq->transferred_length;
2014 		if (fod->offset != fod->total_length) {
2015 			/* transfer the next chunk */
2016 			nvmet_fc_transfer_fcp_data(tgtport, fod,
2017 						NVMET_FCOP_READDATA);
2018 			return;
2019 		}
2020 
2021 		/* data transfer complete, send response */
2022 
2023 		/* data no longer needed */
2024 		nvmet_fc_free_tgt_pgs(fod);
2025 
2026 		nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2027 
2028 		break;
2029 
2030 	case NVMET_FCOP_RSP:
2031 		if (__nvmet_fc_fod_op_abort(fod, abort))
2032 			return;
2033 		nvmet_fc_free_fcp_iod(fod->queue, fod);
2034 		break;
2035 
2036 	default:
2037 		break;
2038 	}
2039 }
2040 
2041 static void
2042 nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
2043 {
2044 	struct nvmet_fc_fcp_iod *fod =
2045 		container_of(work, struct nvmet_fc_fcp_iod, done_work);
2046 
2047 	nvmet_fc_fod_op_done(fod);
2048 }
2049 
2050 static void
2051 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2052 {
2053 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2054 	struct nvmet_fc_tgt_queue *queue = fod->queue;
2055 
2056 	if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
2057 		/* context switch so completion is not in ISR context */
2058 		queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
2059 	else
2060 		nvmet_fc_fod_op_done(fod);
2061 }
2062 
2063 /*
2064  * actual completion handler after execution by the nvmet layer
2065  */
2066 static void
2067 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2068 			struct nvmet_fc_fcp_iod *fod, int status)
2069 {
2070 	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2071 	struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2072 	unsigned long flags;
2073 	bool abort;
2074 
2075 	spin_lock_irqsave(&fod->flock, flags);
2076 	abort = fod->abort;
2077 	spin_unlock_irqrestore(&fod->flock, flags);
2078 
2079 	/* if we have a CQE, snoop the last sq_head value */
2080 	if (!status)
2081 		fod->queue->sqhd = cqe->sq_head;
2082 
2083 	if (abort) {
2084 		nvmet_fc_abort_op(tgtport, fod);
2085 		return;
2086 	}
2087 
2088 	/* if an error handling the cmd post initial parsing */
2089 	if (status) {
2090 		/* fudge up a failed CQE status for our transport error */
2091 		memset(cqe, 0, sizeof(*cqe));
2092 		cqe->sq_head = fod->queue->sqhd;	/* echo last cqe sqhd */
2093 		cqe->sq_id = cpu_to_le16(fod->queue->qid);
2094 		cqe->command_id = sqe->command_id;
2095 		cqe->status = cpu_to_le16(status);
2096 	} else {
2097 
2098 		/*
2099 		 * try to push the data even if the SQE status is non-zero.
2100 		 * There may be a status where data still was intended to
2101 		 * be moved
2102 		 */
2103 		if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2104 			/* push the data over before sending rsp */
2105 			nvmet_fc_transfer_fcp_data(tgtport, fod,
2106 						NVMET_FCOP_READDATA);
2107 			return;
2108 		}
2109 
2110 		/* writes & no data - fall thru */
2111 	}
2112 
2113 	/* data no longer needed */
2114 	nvmet_fc_free_tgt_pgs(fod);
2115 
2116 	nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2117 }
2118 
2119 
2120 static void
2121 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2122 {
2123 	struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2124 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2125 
2126 	__nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2127 }
2128 
2129 
2130 /*
2131  * Actual processing routine for received FC-NVME LS Requests from the LLD
2132  */
2133 static void
2134 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2135 			struct nvmet_fc_fcp_iod *fod)
2136 {
2137 	struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2138 	int ret;
2139 
2140 	/*
2141 	 * Fused commands are currently not supported in the linux
2142 	 * implementation.
2143 	 *
2144 	 * As such, the implementation of the FC transport does not
2145 	 * look at the fused commands and order delivery to the upper
2146 	 * layer until we have both based on csn.
2147 	 */
2148 
2149 	fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2150 
2151 	fod->total_length = be32_to_cpu(cmdiu->data_len);
2152 	if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2153 		fod->io_dir = NVMET_FCP_WRITE;
2154 		if (!nvme_is_write(&cmdiu->sqe))
2155 			goto transport_error;
2156 	} else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2157 		fod->io_dir = NVMET_FCP_READ;
2158 		if (nvme_is_write(&cmdiu->sqe))
2159 			goto transport_error;
2160 	} else {
2161 		fod->io_dir = NVMET_FCP_NODATA;
2162 		if (fod->total_length)
2163 			goto transport_error;
2164 	}
2165 
2166 	fod->req.cmd = &fod->cmdiubuf.sqe;
2167 	fod->req.rsp = &fod->rspiubuf.cqe;
2168 	fod->req.port = fod->queue->port;
2169 
2170 	/* ensure nvmet handlers will set cmd handler callback */
2171 	fod->req.execute = NULL;
2172 
2173 	/* clear any response payload */
2174 	memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2175 
2176 	fod->data_sg = NULL;
2177 	fod->data_sg_cnt = 0;
2178 
2179 	ret = nvmet_req_init(&fod->req,
2180 				&fod->queue->nvme_cq,
2181 				&fod->queue->nvme_sq,
2182 				&nvmet_fc_tgt_fcp_ops);
2183 	if (!ret) {
2184 		/* bad SQE content or invalid ctrl state */
2185 		/* nvmet layer has already called op done to send rsp. */
2186 		return;
2187 	}
2188 
2189 	/* keep a running counter of tail position */
2190 	atomic_inc(&fod->queue->sqtail);
2191 
2192 	if (fod->total_length) {
2193 		ret = nvmet_fc_alloc_tgt_pgs(fod);
2194 		if (ret) {
2195 			nvmet_req_complete(&fod->req, ret);
2196 			return;
2197 		}
2198 	}
2199 	fod->req.sg = fod->data_sg;
2200 	fod->req.sg_cnt = fod->data_sg_cnt;
2201 	fod->offset = 0;
2202 
2203 	if (fod->io_dir == NVMET_FCP_WRITE) {
2204 		/* pull the data over before invoking nvmet layer */
2205 		nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2206 		return;
2207 	}
2208 
2209 	/*
2210 	 * Reads or no data:
2211 	 *
2212 	 * can invoke the nvmet_layer now. If read data, cmd completion will
2213 	 * push the data
2214 	 */
2215 
2216 	fod->req.execute(&fod->req);
2217 
2218 	return;
2219 
2220 transport_error:
2221 	nvmet_fc_abort_op(tgtport, fod);
2222 }
2223 
2224 /*
2225  * Actual processing routine for received FC-NVME LS Requests from the LLD
2226  */
2227 static void
2228 nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2229 {
2230 	struct nvmet_fc_fcp_iod *fod =
2231 		container_of(work, struct nvmet_fc_fcp_iod, work);
2232 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2233 
2234 	nvmet_fc_handle_fcp_rqst(tgtport, fod);
2235 }
2236 
2237 /**
2238  * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2239  *                       upon the reception of a NVME FCP CMD IU.
2240  *
2241  * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2242  * layer for processing.
2243  *
2244  * The nvmet_fc layer allocates a local job structure (struct
2245  * nvmet_fc_fcp_iod) from the queue for the io and copies the
2246  * CMD IU buffer to the job structure. As such, on a successful
2247  * completion (returns 0), the LLDD may immediately free/reuse
2248  * the CMD IU buffer passed in the call.
2249  *
2250  * However, in some circumstances, due to the packetized nature of FC
2251  * and the api of the FC LLDD which may issue a hw command to send the
2252  * response, but the LLDD may not get the hw completion for that command
2253  * and upcall the nvmet_fc layer before a new command may be
2254  * asynchronously received - its possible for a command to be received
2255  * before the LLDD and nvmet_fc have recycled the job structure. It gives
2256  * the appearance of more commands received than fits in the sq.
2257  * To alleviate this scenario, a temporary queue is maintained in the
2258  * transport for pending LLDD requests waiting for a queue job structure.
2259  * In these "overrun" cases, a temporary queue element is allocated
2260  * the LLDD request and CMD iu buffer information remembered, and the
2261  * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2262  * structure is freed, it is immediately reallocated for anything on the
2263  * pending request list. The LLDDs defer_rcv() callback is called,
2264  * informing the LLDD that it may reuse the CMD IU buffer, and the io
2265  * is then started normally with the transport.
2266  *
2267  * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2268  * the completion as successful but must not reuse the CMD IU buffer
2269  * until the LLDD's defer_rcv() callback has been called for the
2270  * corresponding struct nvmefc_tgt_fcp_req pointer.
2271  *
2272  * If there is any other condition in which an error occurs, the
2273  * transport will return a non-zero status indicating the error.
2274  * In all cases other than -EOVERFLOW, the transport has not accepted the
2275  * request and the LLDD should abort the exchange.
2276  *
2277  * @target_port: pointer to the (registered) target port the FCP CMD IU
2278  *              was received on.
2279  * @fcpreq:     pointer to a fcpreq request structure to be used to reference
2280  *              the exchange corresponding to the FCP Exchange.
2281  * @cmdiubuf:   pointer to the buffer containing the FCP CMD IU
2282  * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2283  */
2284 int
2285 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2286 			struct nvmefc_tgt_fcp_req *fcpreq,
2287 			void *cmdiubuf, u32 cmdiubuf_len)
2288 {
2289 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2290 	struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2291 	struct nvmet_fc_tgt_queue *queue;
2292 	struct nvmet_fc_fcp_iod *fod;
2293 	struct nvmet_fc_defer_fcp_req *deferfcp;
2294 	unsigned long flags;
2295 
2296 	/* validate iu, so the connection id can be used to find the queue */
2297 	if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2298 			(cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2299 			(cmdiu->fc_id != NVME_CMD_FC_ID) ||
2300 			(be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2301 		return -EIO;
2302 
2303 	queue = nvmet_fc_find_target_queue(tgtport,
2304 				be64_to_cpu(cmdiu->connection_id));
2305 	if (!queue)
2306 		return -ENOTCONN;
2307 
2308 	/*
2309 	 * note: reference taken by find_target_queue
2310 	 * After successful fod allocation, the fod will inherit the
2311 	 * ownership of that reference and will remove the reference
2312 	 * when the fod is freed.
2313 	 */
2314 
2315 	spin_lock_irqsave(&queue->qlock, flags);
2316 
2317 	fod = nvmet_fc_alloc_fcp_iod(queue);
2318 	if (fod) {
2319 		spin_unlock_irqrestore(&queue->qlock, flags);
2320 
2321 		fcpreq->nvmet_fc_private = fod;
2322 		fod->fcpreq = fcpreq;
2323 
2324 		memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2325 
2326 		nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2327 
2328 		return 0;
2329 	}
2330 
2331 	if (!tgtport->ops->defer_rcv) {
2332 		spin_unlock_irqrestore(&queue->qlock, flags);
2333 		/* release the queue lookup reference */
2334 		nvmet_fc_tgt_q_put(queue);
2335 		return -ENOENT;
2336 	}
2337 
2338 	deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2339 			struct nvmet_fc_defer_fcp_req, req_list);
2340 	if (deferfcp) {
2341 		/* Just re-use one that was previously allocated */
2342 		list_del(&deferfcp->req_list);
2343 	} else {
2344 		spin_unlock_irqrestore(&queue->qlock, flags);
2345 
2346 		/* Now we need to dynamically allocate one */
2347 		deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2348 		if (!deferfcp) {
2349 			/* release the queue lookup reference */
2350 			nvmet_fc_tgt_q_put(queue);
2351 			return -ENOMEM;
2352 		}
2353 		spin_lock_irqsave(&queue->qlock, flags);
2354 	}
2355 
2356 	/* For now, use rspaddr / rsplen to save payload information */
2357 	fcpreq->rspaddr = cmdiubuf;
2358 	fcpreq->rsplen  = cmdiubuf_len;
2359 	deferfcp->fcp_req = fcpreq;
2360 
2361 	/* defer processing till a fod becomes available */
2362 	list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2363 
2364 	/* NOTE: the queue lookup reference is still valid */
2365 
2366 	spin_unlock_irqrestore(&queue->qlock, flags);
2367 
2368 	return -EOVERFLOW;
2369 }
2370 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2371 
2372 /**
2373  * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2374  *                       upon the reception of an ABTS for a FCP command
2375  *
2376  * Notify the transport that an ABTS has been received for a FCP command
2377  * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2378  * LLDD believes the command is still being worked on
2379  * (template_ops->fcp_req_release() has not been called).
2380  *
2381  * The transport will wait for any outstanding work (an op to the LLDD,
2382  * which the lldd should complete with error due to the ABTS; or the
2383  * completion from the nvmet layer of the nvme command), then will
2384  * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2385  * return the i/o context to the LLDD.  The LLDD may send the BA_ACC
2386  * to the ABTS either after return from this function (assuming any
2387  * outstanding op work has been terminated) or upon the callback being
2388  * called.
2389  *
2390  * @target_port: pointer to the (registered) target port the FCP CMD IU
2391  *              was received on.
2392  * @fcpreq:     pointer to the fcpreq request structure that corresponds
2393  *              to the exchange that received the ABTS.
2394  */
2395 void
2396 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2397 			struct nvmefc_tgt_fcp_req *fcpreq)
2398 {
2399 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2400 	struct nvmet_fc_tgt_queue *queue;
2401 	unsigned long flags;
2402 
2403 	if (!fod || fod->fcpreq != fcpreq)
2404 		/* job appears to have already completed, ignore abort */
2405 		return;
2406 
2407 	queue = fod->queue;
2408 
2409 	spin_lock_irqsave(&queue->qlock, flags);
2410 	if (fod->active) {
2411 		/*
2412 		 * mark as abort. The abort handler, invoked upon completion
2413 		 * of any work, will detect the aborted status and do the
2414 		 * callback.
2415 		 */
2416 		spin_lock(&fod->flock);
2417 		fod->abort = true;
2418 		fod->aborted = true;
2419 		spin_unlock(&fod->flock);
2420 	}
2421 	spin_unlock_irqrestore(&queue->qlock, flags);
2422 }
2423 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2424 
2425 
2426 struct nvmet_fc_traddr {
2427 	u64	nn;
2428 	u64	pn;
2429 };
2430 
2431 static int
2432 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2433 {
2434 	u64 token64;
2435 
2436 	if (match_u64(sstr, &token64))
2437 		return -EINVAL;
2438 	*val = token64;
2439 
2440 	return 0;
2441 }
2442 
2443 /*
2444  * This routine validates and extracts the WWN's from the TRADDR string.
2445  * As kernel parsers need the 0x to determine number base, universally
2446  * build string to parse with 0x prefix before parsing name strings.
2447  */
2448 static int
2449 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2450 {
2451 	char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2452 	substring_t wwn = { name, &name[sizeof(name)-1] };
2453 	int nnoffset, pnoffset;
2454 
2455 	/* validate it string one of the 2 allowed formats */
2456 	if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2457 			!strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2458 			!strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2459 				"pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2460 		nnoffset = NVME_FC_TRADDR_OXNNLEN;
2461 		pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2462 						NVME_FC_TRADDR_OXNNLEN;
2463 	} else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2464 			!strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2465 			!strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2466 				"pn-", NVME_FC_TRADDR_NNLEN))) {
2467 		nnoffset = NVME_FC_TRADDR_NNLEN;
2468 		pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2469 	} else
2470 		goto out_einval;
2471 
2472 	name[0] = '0';
2473 	name[1] = 'x';
2474 	name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2475 
2476 	memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2477 	if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2478 		goto out_einval;
2479 
2480 	memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2481 	if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2482 		goto out_einval;
2483 
2484 	return 0;
2485 
2486 out_einval:
2487 	pr_warn("%s: bad traddr string\n", __func__);
2488 	return -EINVAL;
2489 }
2490 
2491 static int
2492 nvmet_fc_add_port(struct nvmet_port *port)
2493 {
2494 	struct nvmet_fc_tgtport *tgtport;
2495 	struct nvmet_fc_traddr traddr = { 0L, 0L };
2496 	unsigned long flags;
2497 	int ret;
2498 
2499 	/* validate the address info */
2500 	if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2501 	    (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2502 		return -EINVAL;
2503 
2504 	/* map the traddr address info to a target port */
2505 
2506 	ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2507 			sizeof(port->disc_addr.traddr));
2508 	if (ret)
2509 		return ret;
2510 
2511 	ret = -ENXIO;
2512 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2513 	list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2514 		if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2515 		    (tgtport->fc_target_port.port_name == traddr.pn)) {
2516 			/* a FC port can only be 1 nvmet port id */
2517 			if (!tgtport->port) {
2518 				tgtport->port = port;
2519 				port->priv = tgtport;
2520 				nvmet_fc_tgtport_get(tgtport);
2521 				ret = 0;
2522 			} else
2523 				ret = -EALREADY;
2524 			break;
2525 		}
2526 	}
2527 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2528 	return ret;
2529 }
2530 
2531 static void
2532 nvmet_fc_remove_port(struct nvmet_port *port)
2533 {
2534 	struct nvmet_fc_tgtport *tgtport = port->priv;
2535 	unsigned long flags;
2536 
2537 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2538 	if (tgtport->port == port) {
2539 		nvmet_fc_tgtport_put(tgtport);
2540 		tgtport->port = NULL;
2541 	}
2542 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2543 }
2544 
2545 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2546 	.owner			= THIS_MODULE,
2547 	.type			= NVMF_TRTYPE_FC,
2548 	.msdbd			= 1,
2549 	.add_port		= nvmet_fc_add_port,
2550 	.remove_port		= nvmet_fc_remove_port,
2551 	.queue_response		= nvmet_fc_fcp_nvme_cmd_done,
2552 	.delete_ctrl		= nvmet_fc_delete_ctrl,
2553 };
2554 
2555 static int __init nvmet_fc_init_module(void)
2556 {
2557 	return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2558 }
2559 
2560 static void __exit nvmet_fc_exit_module(void)
2561 {
2562 	/* sanity check - all lports should be removed */
2563 	if (!list_empty(&nvmet_fc_target_list))
2564 		pr_warn("%s: targetport list not empty\n", __func__);
2565 
2566 	nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2567 
2568 	ida_destroy(&nvmet_fc_tgtport_cnt);
2569 }
2570 
2571 module_init(nvmet_fc_init_module);
2572 module_exit(nvmet_fc_exit_module);
2573 
2574 MODULE_LICENSE("GPL v2");
2575