xref: /openbmc/linux/drivers/nvme/target/fc.c (revision bc05aa6e)
1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  *
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
25 
26 #include "nvmet.h"
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
29 
30 
31 /* *************************** Data Structures/Defines ****************** */
32 
33 
34 #define NVMET_LS_CTX_COUNT		4
35 
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE		2048
38 
39 struct nvmet_fc_tgtport;
40 struct nvmet_fc_tgt_assoc;
41 
42 struct nvmet_fc_ls_iod {
43 	struct nvmefc_tgt_ls_req	*lsreq;
44 	struct nvmefc_tgt_fcp_req	*fcpreq;	/* only if RS */
45 
46 	struct list_head		ls_list;	/* tgtport->ls_list */
47 
48 	struct nvmet_fc_tgtport		*tgtport;
49 	struct nvmet_fc_tgt_assoc	*assoc;
50 
51 	u8				*rqstbuf;
52 	u8				*rspbuf;
53 	u16				rqstdatalen;
54 	dma_addr_t			rspdma;
55 
56 	struct scatterlist		sg[2];
57 
58 	struct work_struct		work;
59 } __aligned(sizeof(unsigned long long));
60 
61 #define NVMET_FC_MAX_SEQ_LENGTH		(256 * 1024)
62 #define NVMET_FC_MAX_XFR_SGENTS		(NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
63 
64 enum nvmet_fcp_datadir {
65 	NVMET_FCP_NODATA,
66 	NVMET_FCP_WRITE,
67 	NVMET_FCP_READ,
68 	NVMET_FCP_ABORTED,
69 };
70 
71 struct nvmet_fc_fcp_iod {
72 	struct nvmefc_tgt_fcp_req	*fcpreq;
73 
74 	struct nvme_fc_cmd_iu		cmdiubuf;
75 	struct nvme_fc_ersp_iu		rspiubuf;
76 	dma_addr_t			rspdma;
77 	struct scatterlist		*data_sg;
78 	int				data_sg_cnt;
79 	u32				offset;
80 	enum nvmet_fcp_datadir		io_dir;
81 	bool				active;
82 	bool				abort;
83 	bool				aborted;
84 	bool				writedataactive;
85 	spinlock_t			flock;
86 
87 	struct nvmet_req		req;
88 	struct work_struct		work;
89 	struct work_struct		done_work;
90 
91 	struct nvmet_fc_tgtport		*tgtport;
92 	struct nvmet_fc_tgt_queue	*queue;
93 
94 	struct list_head		fcp_list;	/* tgtport->fcp_list */
95 };
96 
97 struct nvmet_fc_tgtport {
98 
99 	struct nvmet_fc_target_port	fc_target_port;
100 
101 	struct list_head		tgt_list; /* nvmet_fc_target_list */
102 	struct device			*dev;	/* dev for dma mapping */
103 	struct nvmet_fc_target_template	*ops;
104 
105 	struct nvmet_fc_ls_iod		*iod;
106 	spinlock_t			lock;
107 	struct list_head		ls_list;
108 	struct list_head		ls_busylist;
109 	struct list_head		assoc_list;
110 	struct ida			assoc_cnt;
111 	struct nvmet_port		*port;
112 	struct kref			ref;
113 	u32				max_sg_cnt;
114 };
115 
116 struct nvmet_fc_defer_fcp_req {
117 	struct list_head		req_list;
118 	struct nvmefc_tgt_fcp_req	*fcp_req;
119 };
120 
121 struct nvmet_fc_tgt_queue {
122 	bool				ninetypercent;
123 	u16				qid;
124 	u16				sqsize;
125 	u16				ersp_ratio;
126 	__le16				sqhd;
127 	int				cpu;
128 	atomic_t			connected;
129 	atomic_t			sqtail;
130 	atomic_t			zrspcnt;
131 	atomic_t			rsn;
132 	spinlock_t			qlock;
133 	struct nvmet_port		*port;
134 	struct nvmet_cq			nvme_cq;
135 	struct nvmet_sq			nvme_sq;
136 	struct nvmet_fc_tgt_assoc	*assoc;
137 	struct nvmet_fc_fcp_iod		*fod;		/* array of fcp_iods */
138 	struct list_head		fod_list;
139 	struct list_head		pending_cmd_list;
140 	struct list_head		avail_defer_list;
141 	struct workqueue_struct		*work_q;
142 	struct kref			ref;
143 } __aligned(sizeof(unsigned long long));
144 
145 struct nvmet_fc_tgt_assoc {
146 	u64				association_id;
147 	u32				a_id;
148 	struct nvmet_fc_tgtport		*tgtport;
149 	struct list_head		a_list;
150 	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
151 	struct kref			ref;
152 	struct work_struct		del_work;
153 };
154 
155 
156 static inline int
157 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
158 {
159 	return (iodptr - iodptr->tgtport->iod);
160 }
161 
162 static inline int
163 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
164 {
165 	return (fodptr - fodptr->queue->fod);
166 }
167 
168 
169 /*
170  * Association and Connection IDs:
171  *
172  * Association ID will have random number in upper 6 bytes and zero
173  *   in lower 2 bytes
174  *
175  * Connection IDs will be Association ID with QID or'd in lower 2 bytes
176  *
177  * note: Association ID = Connection ID for queue 0
178  */
179 #define BYTES_FOR_QID			sizeof(u16)
180 #define BYTES_FOR_QID_SHIFT		(BYTES_FOR_QID * 8)
181 #define NVMET_FC_QUEUEID_MASK		((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
182 
183 static inline u64
184 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
185 {
186 	return (assoc->association_id | qid);
187 }
188 
189 static inline u64
190 nvmet_fc_getassociationid(u64 connectionid)
191 {
192 	return connectionid & ~NVMET_FC_QUEUEID_MASK;
193 }
194 
195 static inline u16
196 nvmet_fc_getqueueid(u64 connectionid)
197 {
198 	return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
199 }
200 
201 static inline struct nvmet_fc_tgtport *
202 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
203 {
204 	return container_of(targetport, struct nvmet_fc_tgtport,
205 				 fc_target_port);
206 }
207 
208 static inline struct nvmet_fc_fcp_iod *
209 nvmet_req_to_fod(struct nvmet_req *nvme_req)
210 {
211 	return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
212 }
213 
214 
215 /* *************************** Globals **************************** */
216 
217 
218 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
219 
220 static LIST_HEAD(nvmet_fc_target_list);
221 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
222 
223 
224 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
225 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
226 static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
227 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
228 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
229 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
230 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
231 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
232 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
233 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
234 					struct nvmet_fc_fcp_iod *fod);
235 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
236 
237 
238 /* *********************** FC-NVME DMA Handling **************************** */
239 
240 /*
241  * The fcloop device passes in a NULL device pointer. Real LLD's will
242  * pass in a valid device pointer. If NULL is passed to the dma mapping
243  * routines, depending on the platform, it may or may not succeed, and
244  * may crash.
245  *
246  * As such:
247  * Wrapper all the dma routines and check the dev pointer.
248  *
249  * If simple mappings (return just a dma address, we'll noop them,
250  * returning a dma address of 0.
251  *
252  * On more complex mappings (dma_map_sg), a pseudo routine fills
253  * in the scatter list, setting all dma addresses to 0.
254  */
255 
256 static inline dma_addr_t
257 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
258 		enum dma_data_direction dir)
259 {
260 	return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
261 }
262 
263 static inline int
264 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
265 {
266 	return dev ? dma_mapping_error(dev, dma_addr) : 0;
267 }
268 
269 static inline void
270 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
271 	enum dma_data_direction dir)
272 {
273 	if (dev)
274 		dma_unmap_single(dev, addr, size, dir);
275 }
276 
277 static inline void
278 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
279 		enum dma_data_direction dir)
280 {
281 	if (dev)
282 		dma_sync_single_for_cpu(dev, addr, size, dir);
283 }
284 
285 static inline void
286 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
287 		enum dma_data_direction dir)
288 {
289 	if (dev)
290 		dma_sync_single_for_device(dev, addr, size, dir);
291 }
292 
293 /* pseudo dma_map_sg call */
294 static int
295 fc_map_sg(struct scatterlist *sg, int nents)
296 {
297 	struct scatterlist *s;
298 	int i;
299 
300 	WARN_ON(nents == 0 || sg[0].length == 0);
301 
302 	for_each_sg(sg, s, nents, i) {
303 		s->dma_address = 0L;
304 #ifdef CONFIG_NEED_SG_DMA_LENGTH
305 		s->dma_length = s->length;
306 #endif
307 	}
308 	return nents;
309 }
310 
311 static inline int
312 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
313 		enum dma_data_direction dir)
314 {
315 	return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
316 }
317 
318 static inline void
319 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
320 		enum dma_data_direction dir)
321 {
322 	if (dev)
323 		dma_unmap_sg(dev, sg, nents, dir);
324 }
325 
326 
327 /* *********************** FC-NVME Port Management ************************ */
328 
329 
330 static int
331 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
332 {
333 	struct nvmet_fc_ls_iod *iod;
334 	int i;
335 
336 	iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
337 			GFP_KERNEL);
338 	if (!iod)
339 		return -ENOMEM;
340 
341 	tgtport->iod = iod;
342 
343 	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
344 		INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
345 		iod->tgtport = tgtport;
346 		list_add_tail(&iod->ls_list, &tgtport->ls_list);
347 
348 		iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
349 			GFP_KERNEL);
350 		if (!iod->rqstbuf)
351 			goto out_fail;
352 
353 		iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
354 
355 		iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
356 						NVME_FC_MAX_LS_BUFFER_SIZE,
357 						DMA_TO_DEVICE);
358 		if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
359 			goto out_fail;
360 	}
361 
362 	return 0;
363 
364 out_fail:
365 	kfree(iod->rqstbuf);
366 	list_del(&iod->ls_list);
367 	for (iod--, i--; i >= 0; iod--, i--) {
368 		fc_dma_unmap_single(tgtport->dev, iod->rspdma,
369 				NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
370 		kfree(iod->rqstbuf);
371 		list_del(&iod->ls_list);
372 	}
373 
374 	kfree(iod);
375 
376 	return -EFAULT;
377 }
378 
379 static void
380 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
381 {
382 	struct nvmet_fc_ls_iod *iod = tgtport->iod;
383 	int i;
384 
385 	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
386 		fc_dma_unmap_single(tgtport->dev,
387 				iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
388 				DMA_TO_DEVICE);
389 		kfree(iod->rqstbuf);
390 		list_del(&iod->ls_list);
391 	}
392 	kfree(tgtport->iod);
393 }
394 
395 static struct nvmet_fc_ls_iod *
396 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
397 {
398 	struct nvmet_fc_ls_iod *iod;
399 	unsigned long flags;
400 
401 	spin_lock_irqsave(&tgtport->lock, flags);
402 	iod = list_first_entry_or_null(&tgtport->ls_list,
403 					struct nvmet_fc_ls_iod, ls_list);
404 	if (iod)
405 		list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
406 	spin_unlock_irqrestore(&tgtport->lock, flags);
407 	return iod;
408 }
409 
410 
411 static void
412 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
413 			struct nvmet_fc_ls_iod *iod)
414 {
415 	unsigned long flags;
416 
417 	spin_lock_irqsave(&tgtport->lock, flags);
418 	list_move(&iod->ls_list, &tgtport->ls_list);
419 	spin_unlock_irqrestore(&tgtport->lock, flags);
420 }
421 
422 static void
423 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
424 				struct nvmet_fc_tgt_queue *queue)
425 {
426 	struct nvmet_fc_fcp_iod *fod = queue->fod;
427 	int i;
428 
429 	for (i = 0; i < queue->sqsize; fod++, i++) {
430 		INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
431 		INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
432 		fod->tgtport = tgtport;
433 		fod->queue = queue;
434 		fod->active = false;
435 		fod->abort = false;
436 		fod->aborted = false;
437 		fod->fcpreq = NULL;
438 		list_add_tail(&fod->fcp_list, &queue->fod_list);
439 		spin_lock_init(&fod->flock);
440 
441 		fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
442 					sizeof(fod->rspiubuf), DMA_TO_DEVICE);
443 		if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
444 			list_del(&fod->fcp_list);
445 			for (fod--, i--; i >= 0; fod--, i--) {
446 				fc_dma_unmap_single(tgtport->dev, fod->rspdma,
447 						sizeof(fod->rspiubuf),
448 						DMA_TO_DEVICE);
449 				fod->rspdma = 0L;
450 				list_del(&fod->fcp_list);
451 			}
452 
453 			return;
454 		}
455 	}
456 }
457 
458 static void
459 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
460 				struct nvmet_fc_tgt_queue *queue)
461 {
462 	struct nvmet_fc_fcp_iod *fod = queue->fod;
463 	int i;
464 
465 	for (i = 0; i < queue->sqsize; fod++, i++) {
466 		if (fod->rspdma)
467 			fc_dma_unmap_single(tgtport->dev, fod->rspdma,
468 				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
469 	}
470 }
471 
472 static struct nvmet_fc_fcp_iod *
473 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
474 {
475 	struct nvmet_fc_fcp_iod *fod;
476 
477 	lockdep_assert_held(&queue->qlock);
478 
479 	fod = list_first_entry_or_null(&queue->fod_list,
480 					struct nvmet_fc_fcp_iod, fcp_list);
481 	if (fod) {
482 		list_del(&fod->fcp_list);
483 		fod->active = true;
484 		/*
485 		 * no queue reference is taken, as it was taken by the
486 		 * queue lookup just prior to the allocation. The iod
487 		 * will "inherit" that reference.
488 		 */
489 	}
490 	return fod;
491 }
492 
493 
494 static void
495 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
496 		       struct nvmet_fc_tgt_queue *queue,
497 		       struct nvmefc_tgt_fcp_req *fcpreq)
498 {
499 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
500 
501 	/*
502 	 * put all admin cmds on hw queue id 0. All io commands go to
503 	 * the respective hw queue based on a modulo basis
504 	 */
505 	fcpreq->hwqid = queue->qid ?
506 			((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
507 
508 	if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
509 		queue_work_on(queue->cpu, queue->work_q, &fod->work);
510 	else
511 		nvmet_fc_handle_fcp_rqst(tgtport, fod);
512 }
513 
514 static void
515 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
516 			struct nvmet_fc_fcp_iod *fod)
517 {
518 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
519 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
520 	struct nvmet_fc_defer_fcp_req *deferfcp;
521 	unsigned long flags;
522 
523 	fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
524 				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
525 
526 	fcpreq->nvmet_fc_private = NULL;
527 
528 	fod->active = false;
529 	fod->abort = false;
530 	fod->aborted = false;
531 	fod->writedataactive = false;
532 	fod->fcpreq = NULL;
533 
534 	tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
535 
536 	/* release the queue lookup reference on the completed IO */
537 	nvmet_fc_tgt_q_put(queue);
538 
539 	spin_lock_irqsave(&queue->qlock, flags);
540 	deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
541 				struct nvmet_fc_defer_fcp_req, req_list);
542 	if (!deferfcp) {
543 		list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
544 		spin_unlock_irqrestore(&queue->qlock, flags);
545 		return;
546 	}
547 
548 	/* Re-use the fod for the next pending cmd that was deferred */
549 	list_del(&deferfcp->req_list);
550 
551 	fcpreq = deferfcp->fcp_req;
552 
553 	/* deferfcp can be reused for another IO at a later date */
554 	list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
555 
556 	spin_unlock_irqrestore(&queue->qlock, flags);
557 
558 	/* Save NVME CMD IO in fod */
559 	memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
560 
561 	/* Setup new fcpreq to be processed */
562 	fcpreq->rspaddr = NULL;
563 	fcpreq->rsplen  = 0;
564 	fcpreq->nvmet_fc_private = fod;
565 	fod->fcpreq = fcpreq;
566 	fod->active = true;
567 
568 	/* inform LLDD IO is now being processed */
569 	tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
570 
571 	/* Submit deferred IO for processing */
572 	nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
573 
574 	/*
575 	 * Leave the queue lookup get reference taken when
576 	 * fod was originally allocated.
577 	 */
578 }
579 
580 static int
581 nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
582 {
583 	int cpu, idx, cnt;
584 
585 	if (tgtport->ops->max_hw_queues == 1)
586 		return WORK_CPU_UNBOUND;
587 
588 	/* Simple cpu selection based on qid modulo active cpu count */
589 	idx = !qid ? 0 : (qid - 1) % num_active_cpus();
590 
591 	/* find the n'th active cpu */
592 	for (cpu = 0, cnt = 0; ; ) {
593 		if (cpu_active(cpu)) {
594 			if (cnt == idx)
595 				break;
596 			cnt++;
597 		}
598 		cpu = (cpu + 1) % num_possible_cpus();
599 	}
600 
601 	return cpu;
602 }
603 
604 static struct nvmet_fc_tgt_queue *
605 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
606 			u16 qid, u16 sqsize)
607 {
608 	struct nvmet_fc_tgt_queue *queue;
609 	unsigned long flags;
610 	int ret;
611 
612 	if (qid > NVMET_NR_QUEUES)
613 		return NULL;
614 
615 	queue = kzalloc((sizeof(*queue) +
616 				(sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
617 				GFP_KERNEL);
618 	if (!queue)
619 		return NULL;
620 
621 	if (!nvmet_fc_tgt_a_get(assoc))
622 		goto out_free_queue;
623 
624 	queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
625 				assoc->tgtport->fc_target_port.port_num,
626 				assoc->a_id, qid);
627 	if (!queue->work_q)
628 		goto out_a_put;
629 
630 	queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
631 	queue->qid = qid;
632 	queue->sqsize = sqsize;
633 	queue->assoc = assoc;
634 	queue->port = assoc->tgtport->port;
635 	queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
636 	INIT_LIST_HEAD(&queue->fod_list);
637 	INIT_LIST_HEAD(&queue->avail_defer_list);
638 	INIT_LIST_HEAD(&queue->pending_cmd_list);
639 	atomic_set(&queue->connected, 0);
640 	atomic_set(&queue->sqtail, 0);
641 	atomic_set(&queue->rsn, 1);
642 	atomic_set(&queue->zrspcnt, 0);
643 	spin_lock_init(&queue->qlock);
644 	kref_init(&queue->ref);
645 
646 	nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
647 
648 	ret = nvmet_sq_init(&queue->nvme_sq);
649 	if (ret)
650 		goto out_fail_iodlist;
651 
652 	WARN_ON(assoc->queues[qid]);
653 	spin_lock_irqsave(&assoc->tgtport->lock, flags);
654 	assoc->queues[qid] = queue;
655 	spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
656 
657 	return queue;
658 
659 out_fail_iodlist:
660 	nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
661 	destroy_workqueue(queue->work_q);
662 out_a_put:
663 	nvmet_fc_tgt_a_put(assoc);
664 out_free_queue:
665 	kfree(queue);
666 	return NULL;
667 }
668 
669 
670 static void
671 nvmet_fc_tgt_queue_free(struct kref *ref)
672 {
673 	struct nvmet_fc_tgt_queue *queue =
674 		container_of(ref, struct nvmet_fc_tgt_queue, ref);
675 	unsigned long flags;
676 
677 	spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
678 	queue->assoc->queues[queue->qid] = NULL;
679 	spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
680 
681 	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
682 
683 	nvmet_fc_tgt_a_put(queue->assoc);
684 
685 	destroy_workqueue(queue->work_q);
686 
687 	kfree(queue);
688 }
689 
690 static void
691 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
692 {
693 	kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
694 }
695 
696 static int
697 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
698 {
699 	return kref_get_unless_zero(&queue->ref);
700 }
701 
702 
703 static void
704 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
705 {
706 	struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
707 	struct nvmet_fc_fcp_iod *fod = queue->fod;
708 	struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
709 	unsigned long flags;
710 	int i, writedataactive;
711 	bool disconnect;
712 
713 	disconnect = atomic_xchg(&queue->connected, 0);
714 
715 	spin_lock_irqsave(&queue->qlock, flags);
716 	/* about outstanding io's */
717 	for (i = 0; i < queue->sqsize; fod++, i++) {
718 		if (fod->active) {
719 			spin_lock(&fod->flock);
720 			fod->abort = true;
721 			writedataactive = fod->writedataactive;
722 			spin_unlock(&fod->flock);
723 			/*
724 			 * only call lldd abort routine if waiting for
725 			 * writedata. other outstanding ops should finish
726 			 * on their own.
727 			 */
728 			if (writedataactive) {
729 				spin_lock(&fod->flock);
730 				fod->aborted = true;
731 				spin_unlock(&fod->flock);
732 				tgtport->ops->fcp_abort(
733 					&tgtport->fc_target_port, fod->fcpreq);
734 			}
735 		}
736 	}
737 
738 	/* Cleanup defer'ed IOs in queue */
739 	list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
740 				req_list) {
741 		list_del(&deferfcp->req_list);
742 		kfree(deferfcp);
743 	}
744 
745 	for (;;) {
746 		deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
747 				struct nvmet_fc_defer_fcp_req, req_list);
748 		if (!deferfcp)
749 			break;
750 
751 		list_del(&deferfcp->req_list);
752 		spin_unlock_irqrestore(&queue->qlock, flags);
753 
754 		tgtport->ops->defer_rcv(&tgtport->fc_target_port,
755 				deferfcp->fcp_req);
756 
757 		tgtport->ops->fcp_abort(&tgtport->fc_target_port,
758 				deferfcp->fcp_req);
759 
760 		tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
761 				deferfcp->fcp_req);
762 
763 		/* release the queue lookup reference */
764 		nvmet_fc_tgt_q_put(queue);
765 
766 		kfree(deferfcp);
767 
768 		spin_lock_irqsave(&queue->qlock, flags);
769 	}
770 	spin_unlock_irqrestore(&queue->qlock, flags);
771 
772 	flush_workqueue(queue->work_q);
773 
774 	if (disconnect)
775 		nvmet_sq_destroy(&queue->nvme_sq);
776 
777 	nvmet_fc_tgt_q_put(queue);
778 }
779 
780 static struct nvmet_fc_tgt_queue *
781 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
782 				u64 connection_id)
783 {
784 	struct nvmet_fc_tgt_assoc *assoc;
785 	struct nvmet_fc_tgt_queue *queue;
786 	u64 association_id = nvmet_fc_getassociationid(connection_id);
787 	u16 qid = nvmet_fc_getqueueid(connection_id);
788 	unsigned long flags;
789 
790 	if (qid > NVMET_NR_QUEUES)
791 		return NULL;
792 
793 	spin_lock_irqsave(&tgtport->lock, flags);
794 	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
795 		if (association_id == assoc->association_id) {
796 			queue = assoc->queues[qid];
797 			if (queue &&
798 			    (!atomic_read(&queue->connected) ||
799 			     !nvmet_fc_tgt_q_get(queue)))
800 				queue = NULL;
801 			spin_unlock_irqrestore(&tgtport->lock, flags);
802 			return queue;
803 		}
804 	}
805 	spin_unlock_irqrestore(&tgtport->lock, flags);
806 	return NULL;
807 }
808 
809 static void
810 nvmet_fc_delete_assoc(struct work_struct *work)
811 {
812 	struct nvmet_fc_tgt_assoc *assoc =
813 		container_of(work, struct nvmet_fc_tgt_assoc, del_work);
814 
815 	nvmet_fc_delete_target_assoc(assoc);
816 	nvmet_fc_tgt_a_put(assoc);
817 }
818 
819 static struct nvmet_fc_tgt_assoc *
820 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
821 {
822 	struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
823 	unsigned long flags;
824 	u64 ran;
825 	int idx;
826 	bool needrandom = true;
827 
828 	assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
829 	if (!assoc)
830 		return NULL;
831 
832 	idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
833 	if (idx < 0)
834 		goto out_free_assoc;
835 
836 	if (!nvmet_fc_tgtport_get(tgtport))
837 		goto out_ida_put;
838 
839 	assoc->tgtport = tgtport;
840 	assoc->a_id = idx;
841 	INIT_LIST_HEAD(&assoc->a_list);
842 	kref_init(&assoc->ref);
843 	INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
844 
845 	while (needrandom) {
846 		get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
847 		ran = ran << BYTES_FOR_QID_SHIFT;
848 
849 		spin_lock_irqsave(&tgtport->lock, flags);
850 		needrandom = false;
851 		list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
852 			if (ran == tmpassoc->association_id) {
853 				needrandom = true;
854 				break;
855 			}
856 		if (!needrandom) {
857 			assoc->association_id = ran;
858 			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
859 		}
860 		spin_unlock_irqrestore(&tgtport->lock, flags);
861 	}
862 
863 	return assoc;
864 
865 out_ida_put:
866 	ida_simple_remove(&tgtport->assoc_cnt, idx);
867 out_free_assoc:
868 	kfree(assoc);
869 	return NULL;
870 }
871 
872 static void
873 nvmet_fc_target_assoc_free(struct kref *ref)
874 {
875 	struct nvmet_fc_tgt_assoc *assoc =
876 		container_of(ref, struct nvmet_fc_tgt_assoc, ref);
877 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
878 	unsigned long flags;
879 
880 	spin_lock_irqsave(&tgtport->lock, flags);
881 	list_del(&assoc->a_list);
882 	spin_unlock_irqrestore(&tgtport->lock, flags);
883 	ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
884 	kfree(assoc);
885 	nvmet_fc_tgtport_put(tgtport);
886 }
887 
888 static void
889 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
890 {
891 	kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
892 }
893 
894 static int
895 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
896 {
897 	return kref_get_unless_zero(&assoc->ref);
898 }
899 
900 static void
901 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
902 {
903 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
904 	struct nvmet_fc_tgt_queue *queue;
905 	unsigned long flags;
906 	int i;
907 
908 	spin_lock_irqsave(&tgtport->lock, flags);
909 	for (i = NVMET_NR_QUEUES; i >= 0; i--) {
910 		queue = assoc->queues[i];
911 		if (queue) {
912 			if (!nvmet_fc_tgt_q_get(queue))
913 				continue;
914 			spin_unlock_irqrestore(&tgtport->lock, flags);
915 			nvmet_fc_delete_target_queue(queue);
916 			nvmet_fc_tgt_q_put(queue);
917 			spin_lock_irqsave(&tgtport->lock, flags);
918 		}
919 	}
920 	spin_unlock_irqrestore(&tgtport->lock, flags);
921 
922 	nvmet_fc_tgt_a_put(assoc);
923 }
924 
925 static struct nvmet_fc_tgt_assoc *
926 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
927 				u64 association_id)
928 {
929 	struct nvmet_fc_tgt_assoc *assoc;
930 	struct nvmet_fc_tgt_assoc *ret = NULL;
931 	unsigned long flags;
932 
933 	spin_lock_irqsave(&tgtport->lock, flags);
934 	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
935 		if (association_id == assoc->association_id) {
936 			ret = assoc;
937 			nvmet_fc_tgt_a_get(assoc);
938 			break;
939 		}
940 	}
941 	spin_unlock_irqrestore(&tgtport->lock, flags);
942 
943 	return ret;
944 }
945 
946 
947 /**
948  * nvme_fc_register_targetport - transport entry point called by an
949  *                              LLDD to register the existence of a local
950  *                              NVME subystem FC port.
951  * @pinfo:     pointer to information about the port to be registered
952  * @template:  LLDD entrypoints and operational parameters for the port
953  * @dev:       physical hardware device node port corresponds to. Will be
954  *             used for DMA mappings
955  * @portptr:   pointer to a local port pointer. Upon success, the routine
956  *             will allocate a nvme_fc_local_port structure and place its
957  *             address in the local port pointer. Upon failure, local port
958  *             pointer will be set to NULL.
959  *
960  * Returns:
961  * a completion status. Must be 0 upon success; a negative errno
962  * (ex: -ENXIO) upon failure.
963  */
964 int
965 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
966 			struct nvmet_fc_target_template *template,
967 			struct device *dev,
968 			struct nvmet_fc_target_port **portptr)
969 {
970 	struct nvmet_fc_tgtport *newrec;
971 	unsigned long flags;
972 	int ret, idx;
973 
974 	if (!template->xmt_ls_rsp || !template->fcp_op ||
975 	    !template->fcp_abort ||
976 	    !template->fcp_req_release || !template->targetport_delete ||
977 	    !template->max_hw_queues || !template->max_sgl_segments ||
978 	    !template->max_dif_sgl_segments || !template->dma_boundary) {
979 		ret = -EINVAL;
980 		goto out_regtgt_failed;
981 	}
982 
983 	newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
984 			 GFP_KERNEL);
985 	if (!newrec) {
986 		ret = -ENOMEM;
987 		goto out_regtgt_failed;
988 	}
989 
990 	idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
991 	if (idx < 0) {
992 		ret = -ENOSPC;
993 		goto out_fail_kfree;
994 	}
995 
996 	if (!get_device(dev) && dev) {
997 		ret = -ENODEV;
998 		goto out_ida_put;
999 	}
1000 
1001 	newrec->fc_target_port.node_name = pinfo->node_name;
1002 	newrec->fc_target_port.port_name = pinfo->port_name;
1003 	newrec->fc_target_port.private = &newrec[1];
1004 	newrec->fc_target_port.port_id = pinfo->port_id;
1005 	newrec->fc_target_port.port_num = idx;
1006 	INIT_LIST_HEAD(&newrec->tgt_list);
1007 	newrec->dev = dev;
1008 	newrec->ops = template;
1009 	spin_lock_init(&newrec->lock);
1010 	INIT_LIST_HEAD(&newrec->ls_list);
1011 	INIT_LIST_HEAD(&newrec->ls_busylist);
1012 	INIT_LIST_HEAD(&newrec->assoc_list);
1013 	kref_init(&newrec->ref);
1014 	ida_init(&newrec->assoc_cnt);
1015 	newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
1016 					template->max_sgl_segments);
1017 
1018 	ret = nvmet_fc_alloc_ls_iodlist(newrec);
1019 	if (ret) {
1020 		ret = -ENOMEM;
1021 		goto out_free_newrec;
1022 	}
1023 
1024 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1025 	list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1026 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1027 
1028 	*portptr = &newrec->fc_target_port;
1029 	return 0;
1030 
1031 out_free_newrec:
1032 	put_device(dev);
1033 out_ida_put:
1034 	ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1035 out_fail_kfree:
1036 	kfree(newrec);
1037 out_regtgt_failed:
1038 	*portptr = NULL;
1039 	return ret;
1040 }
1041 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1042 
1043 
1044 static void
1045 nvmet_fc_free_tgtport(struct kref *ref)
1046 {
1047 	struct nvmet_fc_tgtport *tgtport =
1048 		container_of(ref, struct nvmet_fc_tgtport, ref);
1049 	struct device *dev = tgtport->dev;
1050 	unsigned long flags;
1051 
1052 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1053 	list_del(&tgtport->tgt_list);
1054 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1055 
1056 	nvmet_fc_free_ls_iodlist(tgtport);
1057 
1058 	/* let the LLDD know we've finished tearing it down */
1059 	tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1060 
1061 	ida_simple_remove(&nvmet_fc_tgtport_cnt,
1062 			tgtport->fc_target_port.port_num);
1063 
1064 	ida_destroy(&tgtport->assoc_cnt);
1065 
1066 	kfree(tgtport);
1067 
1068 	put_device(dev);
1069 }
1070 
1071 static void
1072 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1073 {
1074 	kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1075 }
1076 
1077 static int
1078 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1079 {
1080 	return kref_get_unless_zero(&tgtport->ref);
1081 }
1082 
1083 static void
1084 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1085 {
1086 	struct nvmet_fc_tgt_assoc *assoc, *next;
1087 	unsigned long flags;
1088 
1089 	spin_lock_irqsave(&tgtport->lock, flags);
1090 	list_for_each_entry_safe(assoc, next,
1091 				&tgtport->assoc_list, a_list) {
1092 		if (!nvmet_fc_tgt_a_get(assoc))
1093 			continue;
1094 		spin_unlock_irqrestore(&tgtport->lock, flags);
1095 		nvmet_fc_delete_target_assoc(assoc);
1096 		nvmet_fc_tgt_a_put(assoc);
1097 		spin_lock_irqsave(&tgtport->lock, flags);
1098 	}
1099 	spin_unlock_irqrestore(&tgtport->lock, flags);
1100 }
1101 
1102 /*
1103  * nvmet layer has called to terminate an association
1104  */
1105 static void
1106 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1107 {
1108 	struct nvmet_fc_tgtport *tgtport, *next;
1109 	struct nvmet_fc_tgt_assoc *assoc;
1110 	struct nvmet_fc_tgt_queue *queue;
1111 	unsigned long flags;
1112 	bool found_ctrl = false;
1113 
1114 	/* this is a bit ugly, but don't want to make locks layered */
1115 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1116 	list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1117 			tgt_list) {
1118 		if (!nvmet_fc_tgtport_get(tgtport))
1119 			continue;
1120 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1121 
1122 		spin_lock_irqsave(&tgtport->lock, flags);
1123 		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1124 			queue = assoc->queues[0];
1125 			if (queue && queue->nvme_sq.ctrl == ctrl) {
1126 				if (nvmet_fc_tgt_a_get(assoc))
1127 					found_ctrl = true;
1128 				break;
1129 			}
1130 		}
1131 		spin_unlock_irqrestore(&tgtport->lock, flags);
1132 
1133 		nvmet_fc_tgtport_put(tgtport);
1134 
1135 		if (found_ctrl) {
1136 			schedule_work(&assoc->del_work);
1137 			return;
1138 		}
1139 
1140 		spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1141 	}
1142 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1143 }
1144 
1145 /**
1146  * nvme_fc_unregister_targetport - transport entry point called by an
1147  *                              LLDD to deregister/remove a previously
1148  *                              registered a local NVME subsystem FC port.
1149  * @tgtport: pointer to the (registered) target port that is to be
1150  *           deregistered.
1151  *
1152  * Returns:
1153  * a completion status. Must be 0 upon success; a negative errno
1154  * (ex: -ENXIO) upon failure.
1155  */
1156 int
1157 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1158 {
1159 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1160 
1161 	/* terminate any outstanding associations */
1162 	__nvmet_fc_free_assocs(tgtport);
1163 
1164 	nvmet_fc_tgtport_put(tgtport);
1165 
1166 	return 0;
1167 }
1168 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1169 
1170 
1171 /* *********************** FC-NVME LS Handling **************************** */
1172 
1173 
1174 static void
1175 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
1176 {
1177 	struct fcnvme_ls_acc_hdr *acc = buf;
1178 
1179 	acc->w0.ls_cmd = ls_cmd;
1180 	acc->desc_list_len = desc_len;
1181 	acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1182 	acc->rqst.desc_len =
1183 			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1184 	acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1185 }
1186 
1187 static int
1188 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1189 			u8 reason, u8 explanation, u8 vendor)
1190 {
1191 	struct fcnvme_ls_rjt *rjt = buf;
1192 
1193 	nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1194 			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1195 			ls_cmd);
1196 	rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1197 	rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1198 	rjt->rjt.reason_code = reason;
1199 	rjt->rjt.reason_explanation = explanation;
1200 	rjt->rjt.vendor = vendor;
1201 
1202 	return sizeof(struct fcnvme_ls_rjt);
1203 }
1204 
1205 /* Validation Error indexes into the string table below */
1206 enum {
1207 	VERR_NO_ERROR		= 0,
1208 	VERR_CR_ASSOC_LEN	= 1,
1209 	VERR_CR_ASSOC_RQST_LEN	= 2,
1210 	VERR_CR_ASSOC_CMD	= 3,
1211 	VERR_CR_ASSOC_CMD_LEN	= 4,
1212 	VERR_ERSP_RATIO		= 5,
1213 	VERR_ASSOC_ALLOC_FAIL	= 6,
1214 	VERR_QUEUE_ALLOC_FAIL	= 7,
1215 	VERR_CR_CONN_LEN	= 8,
1216 	VERR_CR_CONN_RQST_LEN	= 9,
1217 	VERR_ASSOC_ID		= 10,
1218 	VERR_ASSOC_ID_LEN	= 11,
1219 	VERR_NO_ASSOC		= 12,
1220 	VERR_CONN_ID		= 13,
1221 	VERR_CONN_ID_LEN	= 14,
1222 	VERR_NO_CONN		= 15,
1223 	VERR_CR_CONN_CMD	= 16,
1224 	VERR_CR_CONN_CMD_LEN	= 17,
1225 	VERR_DISCONN_LEN	= 18,
1226 	VERR_DISCONN_RQST_LEN	= 19,
1227 	VERR_DISCONN_CMD	= 20,
1228 	VERR_DISCONN_CMD_LEN	= 21,
1229 	VERR_DISCONN_SCOPE	= 22,
1230 	VERR_RS_LEN		= 23,
1231 	VERR_RS_RQST_LEN	= 24,
1232 	VERR_RS_CMD		= 25,
1233 	VERR_RS_CMD_LEN		= 26,
1234 	VERR_RS_RCTL		= 27,
1235 	VERR_RS_RO		= 28,
1236 };
1237 
1238 static char *validation_errors[] = {
1239 	"OK",
1240 	"Bad CR_ASSOC Length",
1241 	"Bad CR_ASSOC Rqst Length",
1242 	"Not CR_ASSOC Cmd",
1243 	"Bad CR_ASSOC Cmd Length",
1244 	"Bad Ersp Ratio",
1245 	"Association Allocation Failed",
1246 	"Queue Allocation Failed",
1247 	"Bad CR_CONN Length",
1248 	"Bad CR_CONN Rqst Length",
1249 	"Not Association ID",
1250 	"Bad Association ID Length",
1251 	"No Association",
1252 	"Not Connection ID",
1253 	"Bad Connection ID Length",
1254 	"No Connection",
1255 	"Not CR_CONN Cmd",
1256 	"Bad CR_CONN Cmd Length",
1257 	"Bad DISCONN Length",
1258 	"Bad DISCONN Rqst Length",
1259 	"Not DISCONN Cmd",
1260 	"Bad DISCONN Cmd Length",
1261 	"Bad Disconnect Scope",
1262 	"Bad RS Length",
1263 	"Bad RS Rqst Length",
1264 	"Not RS Cmd",
1265 	"Bad RS Cmd Length",
1266 	"Bad RS R_CTL",
1267 	"Bad RS Relative Offset",
1268 };
1269 
1270 static void
1271 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1272 			struct nvmet_fc_ls_iod *iod)
1273 {
1274 	struct fcnvme_ls_cr_assoc_rqst *rqst =
1275 				(struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1276 	struct fcnvme_ls_cr_assoc_acc *acc =
1277 				(struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1278 	struct nvmet_fc_tgt_queue *queue;
1279 	int ret = 0;
1280 
1281 	memset(acc, 0, sizeof(*acc));
1282 
1283 	/*
1284 	 * FC-NVME spec changes. There are initiators sending different
1285 	 * lengths as padding sizes for Create Association Cmd descriptor
1286 	 * was incorrect.
1287 	 * Accept anything of "minimum" length. Assume format per 1.15
1288 	 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1289 	 * trailing pad length is.
1290 	 */
1291 	if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1292 		ret = VERR_CR_ASSOC_LEN;
1293 	else if (be32_to_cpu(rqst->desc_list_len) <
1294 			FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1295 		ret = VERR_CR_ASSOC_RQST_LEN;
1296 	else if (rqst->assoc_cmd.desc_tag !=
1297 			cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1298 		ret = VERR_CR_ASSOC_CMD;
1299 	else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1300 			FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1301 		ret = VERR_CR_ASSOC_CMD_LEN;
1302 	else if (!rqst->assoc_cmd.ersp_ratio ||
1303 		 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1304 				be16_to_cpu(rqst->assoc_cmd.sqsize)))
1305 		ret = VERR_ERSP_RATIO;
1306 
1307 	else {
1308 		/* new association w/ admin queue */
1309 		iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1310 		if (!iod->assoc)
1311 			ret = VERR_ASSOC_ALLOC_FAIL;
1312 		else {
1313 			queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1314 					be16_to_cpu(rqst->assoc_cmd.sqsize));
1315 			if (!queue)
1316 				ret = VERR_QUEUE_ALLOC_FAIL;
1317 		}
1318 	}
1319 
1320 	if (ret) {
1321 		dev_err(tgtport->dev,
1322 			"Create Association LS failed: %s\n",
1323 			validation_errors[ret]);
1324 		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1325 				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1326 				FCNVME_RJT_RC_LOGIC,
1327 				FCNVME_RJT_EXP_NONE, 0);
1328 		return;
1329 	}
1330 
1331 	queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1332 	atomic_set(&queue->connected, 1);
1333 	queue->sqhd = 0;	/* best place to init value */
1334 
1335 	/* format a response */
1336 
1337 	iod->lsreq->rsplen = sizeof(*acc);
1338 
1339 	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1340 			fcnvme_lsdesc_len(
1341 				sizeof(struct fcnvme_ls_cr_assoc_acc)),
1342 			FCNVME_LS_CREATE_ASSOCIATION);
1343 	acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1344 	acc->associd.desc_len =
1345 			fcnvme_lsdesc_len(
1346 				sizeof(struct fcnvme_lsdesc_assoc_id));
1347 	acc->associd.association_id =
1348 			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1349 	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1350 	acc->connectid.desc_len =
1351 			fcnvme_lsdesc_len(
1352 				sizeof(struct fcnvme_lsdesc_conn_id));
1353 	acc->connectid.connection_id = acc->associd.association_id;
1354 }
1355 
1356 static void
1357 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1358 			struct nvmet_fc_ls_iod *iod)
1359 {
1360 	struct fcnvme_ls_cr_conn_rqst *rqst =
1361 				(struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1362 	struct fcnvme_ls_cr_conn_acc *acc =
1363 				(struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1364 	struct nvmet_fc_tgt_queue *queue;
1365 	int ret = 0;
1366 
1367 	memset(acc, 0, sizeof(*acc));
1368 
1369 	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1370 		ret = VERR_CR_CONN_LEN;
1371 	else if (rqst->desc_list_len !=
1372 			fcnvme_lsdesc_len(
1373 				sizeof(struct fcnvme_ls_cr_conn_rqst)))
1374 		ret = VERR_CR_CONN_RQST_LEN;
1375 	else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1376 		ret = VERR_ASSOC_ID;
1377 	else if (rqst->associd.desc_len !=
1378 			fcnvme_lsdesc_len(
1379 				sizeof(struct fcnvme_lsdesc_assoc_id)))
1380 		ret = VERR_ASSOC_ID_LEN;
1381 	else if (rqst->connect_cmd.desc_tag !=
1382 			cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1383 		ret = VERR_CR_CONN_CMD;
1384 	else if (rqst->connect_cmd.desc_len !=
1385 			fcnvme_lsdesc_len(
1386 				sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1387 		ret = VERR_CR_CONN_CMD_LEN;
1388 	else if (!rqst->connect_cmd.ersp_ratio ||
1389 		 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1390 				be16_to_cpu(rqst->connect_cmd.sqsize)))
1391 		ret = VERR_ERSP_RATIO;
1392 
1393 	else {
1394 		/* new io queue */
1395 		iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1396 				be64_to_cpu(rqst->associd.association_id));
1397 		if (!iod->assoc)
1398 			ret = VERR_NO_ASSOC;
1399 		else {
1400 			queue = nvmet_fc_alloc_target_queue(iod->assoc,
1401 					be16_to_cpu(rqst->connect_cmd.qid),
1402 					be16_to_cpu(rqst->connect_cmd.sqsize));
1403 			if (!queue)
1404 				ret = VERR_QUEUE_ALLOC_FAIL;
1405 
1406 			/* release get taken in nvmet_fc_find_target_assoc */
1407 			nvmet_fc_tgt_a_put(iod->assoc);
1408 		}
1409 	}
1410 
1411 	if (ret) {
1412 		dev_err(tgtport->dev,
1413 			"Create Connection LS failed: %s\n",
1414 			validation_errors[ret]);
1415 		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1416 				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1417 				(ret == VERR_NO_ASSOC) ?
1418 					FCNVME_RJT_RC_INV_ASSOC :
1419 					FCNVME_RJT_RC_LOGIC,
1420 				FCNVME_RJT_EXP_NONE, 0);
1421 		return;
1422 	}
1423 
1424 	queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1425 	atomic_set(&queue->connected, 1);
1426 	queue->sqhd = 0;	/* best place to init value */
1427 
1428 	/* format a response */
1429 
1430 	iod->lsreq->rsplen = sizeof(*acc);
1431 
1432 	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1433 			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1434 			FCNVME_LS_CREATE_CONNECTION);
1435 	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1436 	acc->connectid.desc_len =
1437 			fcnvme_lsdesc_len(
1438 				sizeof(struct fcnvme_lsdesc_conn_id));
1439 	acc->connectid.connection_id =
1440 			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1441 				be16_to_cpu(rqst->connect_cmd.qid)));
1442 }
1443 
1444 static void
1445 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1446 			struct nvmet_fc_ls_iod *iod)
1447 {
1448 	struct fcnvme_ls_disconnect_rqst *rqst =
1449 			(struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1450 	struct fcnvme_ls_disconnect_acc *acc =
1451 			(struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1452 	struct nvmet_fc_tgt_queue *queue = NULL;
1453 	struct nvmet_fc_tgt_assoc *assoc;
1454 	int ret = 0;
1455 	bool del_assoc = false;
1456 
1457 	memset(acc, 0, sizeof(*acc));
1458 
1459 	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1460 		ret = VERR_DISCONN_LEN;
1461 	else if (rqst->desc_list_len !=
1462 			fcnvme_lsdesc_len(
1463 				sizeof(struct fcnvme_ls_disconnect_rqst)))
1464 		ret = VERR_DISCONN_RQST_LEN;
1465 	else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1466 		ret = VERR_ASSOC_ID;
1467 	else if (rqst->associd.desc_len !=
1468 			fcnvme_lsdesc_len(
1469 				sizeof(struct fcnvme_lsdesc_assoc_id)))
1470 		ret = VERR_ASSOC_ID_LEN;
1471 	else if (rqst->discon_cmd.desc_tag !=
1472 			cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1473 		ret = VERR_DISCONN_CMD;
1474 	else if (rqst->discon_cmd.desc_len !=
1475 			fcnvme_lsdesc_len(
1476 				sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1477 		ret = VERR_DISCONN_CMD_LEN;
1478 	else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1479 			(rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1480 		ret = VERR_DISCONN_SCOPE;
1481 	else {
1482 		/* match an active association */
1483 		assoc = nvmet_fc_find_target_assoc(tgtport,
1484 				be64_to_cpu(rqst->associd.association_id));
1485 		iod->assoc = assoc;
1486 		if (assoc) {
1487 			if (rqst->discon_cmd.scope ==
1488 					FCNVME_DISCONN_CONNECTION) {
1489 				queue = nvmet_fc_find_target_queue(tgtport,
1490 						be64_to_cpu(
1491 							rqst->discon_cmd.id));
1492 				if (!queue) {
1493 					nvmet_fc_tgt_a_put(assoc);
1494 					ret = VERR_NO_CONN;
1495 				}
1496 			}
1497 		} else
1498 			ret = VERR_NO_ASSOC;
1499 	}
1500 
1501 	if (ret) {
1502 		dev_err(tgtport->dev,
1503 			"Disconnect LS failed: %s\n",
1504 			validation_errors[ret]);
1505 		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1506 				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1507 				(ret == VERR_NO_ASSOC) ?
1508 					FCNVME_RJT_RC_INV_ASSOC :
1509 					(ret == VERR_NO_CONN) ?
1510 						FCNVME_RJT_RC_INV_CONN :
1511 						FCNVME_RJT_RC_LOGIC,
1512 				FCNVME_RJT_EXP_NONE, 0);
1513 		return;
1514 	}
1515 
1516 	/* format a response */
1517 
1518 	iod->lsreq->rsplen = sizeof(*acc);
1519 
1520 	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1521 			fcnvme_lsdesc_len(
1522 				sizeof(struct fcnvme_ls_disconnect_acc)),
1523 			FCNVME_LS_DISCONNECT);
1524 
1525 
1526 	/* are we to delete a Connection ID (queue) */
1527 	if (queue) {
1528 		int qid = queue->qid;
1529 
1530 		nvmet_fc_delete_target_queue(queue);
1531 
1532 		/* release the get taken by find_target_queue */
1533 		nvmet_fc_tgt_q_put(queue);
1534 
1535 		/* tear association down if io queue terminated */
1536 		if (!qid)
1537 			del_assoc = true;
1538 	}
1539 
1540 	/* release get taken in nvmet_fc_find_target_assoc */
1541 	nvmet_fc_tgt_a_put(iod->assoc);
1542 
1543 	if (del_assoc)
1544 		nvmet_fc_delete_target_assoc(iod->assoc);
1545 }
1546 
1547 
1548 /* *********************** NVME Ctrl Routines **************************** */
1549 
1550 
1551 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1552 
1553 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1554 
1555 static void
1556 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1557 {
1558 	struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1559 	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1560 
1561 	fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1562 				NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1563 	nvmet_fc_free_ls_iod(tgtport, iod);
1564 	nvmet_fc_tgtport_put(tgtport);
1565 }
1566 
1567 static void
1568 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1569 				struct nvmet_fc_ls_iod *iod)
1570 {
1571 	int ret;
1572 
1573 	fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1574 				  NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1575 
1576 	ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1577 	if (ret)
1578 		nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1579 }
1580 
1581 /*
1582  * Actual processing routine for received FC-NVME LS Requests from the LLD
1583  */
1584 static void
1585 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1586 			struct nvmet_fc_ls_iod *iod)
1587 {
1588 	struct fcnvme_ls_rqst_w0 *w0 =
1589 			(struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1590 
1591 	iod->lsreq->nvmet_fc_private = iod;
1592 	iod->lsreq->rspbuf = iod->rspbuf;
1593 	iod->lsreq->rspdma = iod->rspdma;
1594 	iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1595 	/* Be preventative. handlers will later set to valid length */
1596 	iod->lsreq->rsplen = 0;
1597 
1598 	iod->assoc = NULL;
1599 
1600 	/*
1601 	 * handlers:
1602 	 *   parse request input, execute the request, and format the
1603 	 *   LS response
1604 	 */
1605 	switch (w0->ls_cmd) {
1606 	case FCNVME_LS_CREATE_ASSOCIATION:
1607 		/* Creates Association and initial Admin Queue/Connection */
1608 		nvmet_fc_ls_create_association(tgtport, iod);
1609 		break;
1610 	case FCNVME_LS_CREATE_CONNECTION:
1611 		/* Creates an IO Queue/Connection */
1612 		nvmet_fc_ls_create_connection(tgtport, iod);
1613 		break;
1614 	case FCNVME_LS_DISCONNECT:
1615 		/* Terminate a Queue/Connection or the Association */
1616 		nvmet_fc_ls_disconnect(tgtport, iod);
1617 		break;
1618 	default:
1619 		iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1620 				NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1621 				FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1622 	}
1623 
1624 	nvmet_fc_xmt_ls_rsp(tgtport, iod);
1625 }
1626 
1627 /*
1628  * Actual processing routine for received FC-NVME LS Requests from the LLD
1629  */
1630 static void
1631 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1632 {
1633 	struct nvmet_fc_ls_iod *iod =
1634 		container_of(work, struct nvmet_fc_ls_iod, work);
1635 	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1636 
1637 	nvmet_fc_handle_ls_rqst(tgtport, iod);
1638 }
1639 
1640 
1641 /**
1642  * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1643  *                       upon the reception of a NVME LS request.
1644  *
1645  * The nvmet-fc layer will copy payload to an internal structure for
1646  * processing.  As such, upon completion of the routine, the LLDD may
1647  * immediately free/reuse the LS request buffer passed in the call.
1648  *
1649  * If this routine returns error, the LLDD should abort the exchange.
1650  *
1651  * @tgtport:    pointer to the (registered) target port the LS was
1652  *              received on.
1653  * @lsreq:      pointer to a lsreq request structure to be used to reference
1654  *              the exchange corresponding to the LS.
1655  * @lsreqbuf:   pointer to the buffer containing the LS Request
1656  * @lsreqbuf_len: length, in bytes, of the received LS request
1657  */
1658 int
1659 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1660 			struct nvmefc_tgt_ls_req *lsreq,
1661 			void *lsreqbuf, u32 lsreqbuf_len)
1662 {
1663 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1664 	struct nvmet_fc_ls_iod *iod;
1665 
1666 	if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1667 		return -E2BIG;
1668 
1669 	if (!nvmet_fc_tgtport_get(tgtport))
1670 		return -ESHUTDOWN;
1671 
1672 	iod = nvmet_fc_alloc_ls_iod(tgtport);
1673 	if (!iod) {
1674 		nvmet_fc_tgtport_put(tgtport);
1675 		return -ENOENT;
1676 	}
1677 
1678 	iod->lsreq = lsreq;
1679 	iod->fcpreq = NULL;
1680 	memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1681 	iod->rqstdatalen = lsreqbuf_len;
1682 
1683 	schedule_work(&iod->work);
1684 
1685 	return 0;
1686 }
1687 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1688 
1689 
1690 /*
1691  * **********************
1692  * Start of FCP handling
1693  * **********************
1694  */
1695 
1696 static int
1697 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1698 {
1699 	struct scatterlist *sg;
1700 	unsigned int nent;
1701 
1702 	sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
1703 	if (!sg)
1704 		goto out;
1705 
1706 	fod->data_sg = sg;
1707 	fod->data_sg_cnt = nent;
1708 	fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1709 				((fod->io_dir == NVMET_FCP_WRITE) ?
1710 					DMA_FROM_DEVICE : DMA_TO_DEVICE));
1711 				/* note: write from initiator perspective */
1712 
1713 	return 0;
1714 
1715 out:
1716 	return NVME_SC_INTERNAL;
1717 }
1718 
1719 static void
1720 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1721 {
1722 	if (!fod->data_sg || !fod->data_sg_cnt)
1723 		return;
1724 
1725 	fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1726 				((fod->io_dir == NVMET_FCP_WRITE) ?
1727 					DMA_FROM_DEVICE : DMA_TO_DEVICE));
1728 	sgl_free(fod->data_sg);
1729 	fod->data_sg = NULL;
1730 	fod->data_sg_cnt = 0;
1731 }
1732 
1733 
1734 static bool
1735 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1736 {
1737 	u32 sqtail, used;
1738 
1739 	/* egad, this is ugly. And sqtail is just a best guess */
1740 	sqtail = atomic_read(&q->sqtail) % q->sqsize;
1741 
1742 	used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1743 	return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1744 }
1745 
1746 /*
1747  * Prep RSP payload.
1748  * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1749  */
1750 static void
1751 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1752 				struct nvmet_fc_fcp_iod *fod)
1753 {
1754 	struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1755 	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1756 	struct nvme_completion *cqe = &ersp->cqe;
1757 	u32 *cqewd = (u32 *)cqe;
1758 	bool send_ersp = false;
1759 	u32 rsn, rspcnt, xfr_length;
1760 
1761 	if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1762 		xfr_length = fod->req.transfer_len;
1763 	else
1764 		xfr_length = fod->offset;
1765 
1766 	/*
1767 	 * check to see if we can send a 0's rsp.
1768 	 *   Note: to send a 0's response, the NVME-FC host transport will
1769 	 *   recreate the CQE. The host transport knows: sq id, SQHD (last
1770 	 *   seen in an ersp), and command_id. Thus it will create a
1771 	 *   zero-filled CQE with those known fields filled in. Transport
1772 	 *   must send an ersp for any condition where the cqe won't match
1773 	 *   this.
1774 	 *
1775 	 * Here are the FC-NVME mandated cases where we must send an ersp:
1776 	 *  every N responses, where N=ersp_ratio
1777 	 *  force fabric commands to send ersp's (not in FC-NVME but good
1778 	 *    practice)
1779 	 *  normal cmds: any time status is non-zero, or status is zero
1780 	 *     but words 0 or 1 are non-zero.
1781 	 *  the SQ is 90% or more full
1782 	 *  the cmd is a fused command
1783 	 *  transferred data length not equal to cmd iu length
1784 	 */
1785 	rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1786 	if (!(rspcnt % fod->queue->ersp_ratio) ||
1787 	    sqe->opcode == nvme_fabrics_command ||
1788 	    xfr_length != fod->req.transfer_len ||
1789 	    (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1790 	    (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1791 	    queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
1792 		send_ersp = true;
1793 
1794 	/* re-set the fields */
1795 	fod->fcpreq->rspaddr = ersp;
1796 	fod->fcpreq->rspdma = fod->rspdma;
1797 
1798 	if (!send_ersp) {
1799 		memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1800 		fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1801 	} else {
1802 		ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1803 		rsn = atomic_inc_return(&fod->queue->rsn);
1804 		ersp->rsn = cpu_to_be32(rsn);
1805 		ersp->xfrd_len = cpu_to_be32(xfr_length);
1806 		fod->fcpreq->rsplen = sizeof(*ersp);
1807 	}
1808 
1809 	fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1810 				  sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1811 }
1812 
1813 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1814 
1815 static void
1816 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1817 				struct nvmet_fc_fcp_iod *fod)
1818 {
1819 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1820 
1821 	/* data no longer needed */
1822 	nvmet_fc_free_tgt_pgs(fod);
1823 
1824 	/*
1825 	 * if an ABTS was received or we issued the fcp_abort early
1826 	 * don't call abort routine again.
1827 	 */
1828 	/* no need to take lock - lock was taken earlier to get here */
1829 	if (!fod->aborted)
1830 		tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1831 
1832 	nvmet_fc_free_fcp_iod(fod->queue, fod);
1833 }
1834 
1835 static void
1836 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1837 				struct nvmet_fc_fcp_iod *fod)
1838 {
1839 	int ret;
1840 
1841 	fod->fcpreq->op = NVMET_FCOP_RSP;
1842 	fod->fcpreq->timeout = 0;
1843 
1844 	nvmet_fc_prep_fcp_rsp(tgtport, fod);
1845 
1846 	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1847 	if (ret)
1848 		nvmet_fc_abort_op(tgtport, fod);
1849 }
1850 
1851 static void
1852 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1853 				struct nvmet_fc_fcp_iod *fod, u8 op)
1854 {
1855 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1856 	unsigned long flags;
1857 	u32 tlen;
1858 	int ret;
1859 
1860 	fcpreq->op = op;
1861 	fcpreq->offset = fod->offset;
1862 	fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1863 
1864 	tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
1865 			(fod->req.transfer_len - fod->offset));
1866 	fcpreq->transfer_length = tlen;
1867 	fcpreq->transferred_length = 0;
1868 	fcpreq->fcp_error = 0;
1869 	fcpreq->rsplen = 0;
1870 
1871 	fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
1872 	fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
1873 
1874 	/*
1875 	 * If the last READDATA request: check if LLDD supports
1876 	 * combined xfr with response.
1877 	 */
1878 	if ((op == NVMET_FCOP_READDATA) &&
1879 	    ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
1880 	    (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1881 		fcpreq->op = NVMET_FCOP_READDATA_RSP;
1882 		nvmet_fc_prep_fcp_rsp(tgtport, fod);
1883 	}
1884 
1885 	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1886 	if (ret) {
1887 		/*
1888 		 * should be ok to set w/o lock as its in the thread of
1889 		 * execution (not an async timer routine) and doesn't
1890 		 * contend with any clearing action
1891 		 */
1892 		fod->abort = true;
1893 
1894 		if (op == NVMET_FCOP_WRITEDATA) {
1895 			spin_lock_irqsave(&fod->flock, flags);
1896 			fod->writedataactive = false;
1897 			spin_unlock_irqrestore(&fod->flock, flags);
1898 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1899 		} else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1900 			fcpreq->fcp_error = ret;
1901 			fcpreq->transferred_length = 0;
1902 			nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1903 		}
1904 	}
1905 }
1906 
1907 static inline bool
1908 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1909 {
1910 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1911 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1912 
1913 	/* if in the middle of an io and we need to tear down */
1914 	if (abort) {
1915 		if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
1916 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1917 			return true;
1918 		}
1919 
1920 		nvmet_fc_abort_op(tgtport, fod);
1921 		return true;
1922 	}
1923 
1924 	return false;
1925 }
1926 
1927 /*
1928  * actual done handler for FCP operations when completed by the lldd
1929  */
1930 static void
1931 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1932 {
1933 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1934 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1935 	unsigned long flags;
1936 	bool abort;
1937 
1938 	spin_lock_irqsave(&fod->flock, flags);
1939 	abort = fod->abort;
1940 	fod->writedataactive = false;
1941 	spin_unlock_irqrestore(&fod->flock, flags);
1942 
1943 	switch (fcpreq->op) {
1944 
1945 	case NVMET_FCOP_WRITEDATA:
1946 		if (__nvmet_fc_fod_op_abort(fod, abort))
1947 			return;
1948 		if (fcpreq->fcp_error ||
1949 		    fcpreq->transferred_length != fcpreq->transfer_length) {
1950 			spin_lock(&fod->flock);
1951 			fod->abort = true;
1952 			spin_unlock(&fod->flock);
1953 
1954 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1955 			return;
1956 		}
1957 
1958 		fod->offset += fcpreq->transferred_length;
1959 		if (fod->offset != fod->req.transfer_len) {
1960 			spin_lock_irqsave(&fod->flock, flags);
1961 			fod->writedataactive = true;
1962 			spin_unlock_irqrestore(&fod->flock, flags);
1963 
1964 			/* transfer the next chunk */
1965 			nvmet_fc_transfer_fcp_data(tgtport, fod,
1966 						NVMET_FCOP_WRITEDATA);
1967 			return;
1968 		}
1969 
1970 		/* data transfer complete, resume with nvmet layer */
1971 		nvmet_req_execute(&fod->req);
1972 		break;
1973 
1974 	case NVMET_FCOP_READDATA:
1975 	case NVMET_FCOP_READDATA_RSP:
1976 		if (__nvmet_fc_fod_op_abort(fod, abort))
1977 			return;
1978 		if (fcpreq->fcp_error ||
1979 		    fcpreq->transferred_length != fcpreq->transfer_length) {
1980 			nvmet_fc_abort_op(tgtport, fod);
1981 			return;
1982 		}
1983 
1984 		/* success */
1985 
1986 		if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
1987 			/* data no longer needed */
1988 			nvmet_fc_free_tgt_pgs(fod);
1989 			nvmet_fc_free_fcp_iod(fod->queue, fod);
1990 			return;
1991 		}
1992 
1993 		fod->offset += fcpreq->transferred_length;
1994 		if (fod->offset != fod->req.transfer_len) {
1995 			/* transfer the next chunk */
1996 			nvmet_fc_transfer_fcp_data(tgtport, fod,
1997 						NVMET_FCOP_READDATA);
1998 			return;
1999 		}
2000 
2001 		/* data transfer complete, send response */
2002 
2003 		/* data no longer needed */
2004 		nvmet_fc_free_tgt_pgs(fod);
2005 
2006 		nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2007 
2008 		break;
2009 
2010 	case NVMET_FCOP_RSP:
2011 		if (__nvmet_fc_fod_op_abort(fod, abort))
2012 			return;
2013 		nvmet_fc_free_fcp_iod(fod->queue, fod);
2014 		break;
2015 
2016 	default:
2017 		break;
2018 	}
2019 }
2020 
2021 static void
2022 nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
2023 {
2024 	struct nvmet_fc_fcp_iod *fod =
2025 		container_of(work, struct nvmet_fc_fcp_iod, done_work);
2026 
2027 	nvmet_fc_fod_op_done(fod);
2028 }
2029 
2030 static void
2031 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2032 {
2033 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2034 	struct nvmet_fc_tgt_queue *queue = fod->queue;
2035 
2036 	if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
2037 		/* context switch so completion is not in ISR context */
2038 		queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
2039 	else
2040 		nvmet_fc_fod_op_done(fod);
2041 }
2042 
2043 /*
2044  * actual completion handler after execution by the nvmet layer
2045  */
2046 static void
2047 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2048 			struct nvmet_fc_fcp_iod *fod, int status)
2049 {
2050 	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2051 	struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2052 	unsigned long flags;
2053 	bool abort;
2054 
2055 	spin_lock_irqsave(&fod->flock, flags);
2056 	abort = fod->abort;
2057 	spin_unlock_irqrestore(&fod->flock, flags);
2058 
2059 	/* if we have a CQE, snoop the last sq_head value */
2060 	if (!status)
2061 		fod->queue->sqhd = cqe->sq_head;
2062 
2063 	if (abort) {
2064 		nvmet_fc_abort_op(tgtport, fod);
2065 		return;
2066 	}
2067 
2068 	/* if an error handling the cmd post initial parsing */
2069 	if (status) {
2070 		/* fudge up a failed CQE status for our transport error */
2071 		memset(cqe, 0, sizeof(*cqe));
2072 		cqe->sq_head = fod->queue->sqhd;	/* echo last cqe sqhd */
2073 		cqe->sq_id = cpu_to_le16(fod->queue->qid);
2074 		cqe->command_id = sqe->command_id;
2075 		cqe->status = cpu_to_le16(status);
2076 	} else {
2077 
2078 		/*
2079 		 * try to push the data even if the SQE status is non-zero.
2080 		 * There may be a status where data still was intended to
2081 		 * be moved
2082 		 */
2083 		if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2084 			/* push the data over before sending rsp */
2085 			nvmet_fc_transfer_fcp_data(tgtport, fod,
2086 						NVMET_FCOP_READDATA);
2087 			return;
2088 		}
2089 
2090 		/* writes & no data - fall thru */
2091 	}
2092 
2093 	/* data no longer needed */
2094 	nvmet_fc_free_tgt_pgs(fod);
2095 
2096 	nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2097 }
2098 
2099 
2100 static void
2101 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2102 {
2103 	struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2104 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2105 
2106 	__nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2107 }
2108 
2109 
2110 /*
2111  * Actual processing routine for received FC-NVME LS Requests from the LLD
2112  */
2113 static void
2114 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2115 			struct nvmet_fc_fcp_iod *fod)
2116 {
2117 	struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2118 	u32 xfrlen = be32_to_cpu(cmdiu->data_len);
2119 	int ret;
2120 
2121 	/*
2122 	 * Fused commands are currently not supported in the linux
2123 	 * implementation.
2124 	 *
2125 	 * As such, the implementation of the FC transport does not
2126 	 * look at the fused commands and order delivery to the upper
2127 	 * layer until we have both based on csn.
2128 	 */
2129 
2130 	fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2131 
2132 	if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2133 		fod->io_dir = NVMET_FCP_WRITE;
2134 		if (!nvme_is_write(&cmdiu->sqe))
2135 			goto transport_error;
2136 	} else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2137 		fod->io_dir = NVMET_FCP_READ;
2138 		if (nvme_is_write(&cmdiu->sqe))
2139 			goto transport_error;
2140 	} else {
2141 		fod->io_dir = NVMET_FCP_NODATA;
2142 		if (xfrlen)
2143 			goto transport_error;
2144 	}
2145 
2146 	fod->req.cmd = &fod->cmdiubuf.sqe;
2147 	fod->req.rsp = &fod->rspiubuf.cqe;
2148 	fod->req.port = fod->queue->port;
2149 
2150 	/* clear any response payload */
2151 	memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2152 
2153 	fod->data_sg = NULL;
2154 	fod->data_sg_cnt = 0;
2155 
2156 	ret = nvmet_req_init(&fod->req,
2157 				&fod->queue->nvme_cq,
2158 				&fod->queue->nvme_sq,
2159 				&nvmet_fc_tgt_fcp_ops);
2160 	if (!ret) {
2161 		/* bad SQE content or invalid ctrl state */
2162 		/* nvmet layer has already called op done to send rsp. */
2163 		return;
2164 	}
2165 
2166 	fod->req.transfer_len = xfrlen;
2167 
2168 	/* keep a running counter of tail position */
2169 	atomic_inc(&fod->queue->sqtail);
2170 
2171 	if (fod->req.transfer_len) {
2172 		ret = nvmet_fc_alloc_tgt_pgs(fod);
2173 		if (ret) {
2174 			nvmet_req_complete(&fod->req, ret);
2175 			return;
2176 		}
2177 	}
2178 	fod->req.sg = fod->data_sg;
2179 	fod->req.sg_cnt = fod->data_sg_cnt;
2180 	fod->offset = 0;
2181 
2182 	if (fod->io_dir == NVMET_FCP_WRITE) {
2183 		/* pull the data over before invoking nvmet layer */
2184 		nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2185 		return;
2186 	}
2187 
2188 	/*
2189 	 * Reads or no data:
2190 	 *
2191 	 * can invoke the nvmet_layer now. If read data, cmd completion will
2192 	 * push the data
2193 	 */
2194 	nvmet_req_execute(&fod->req);
2195 	return;
2196 
2197 transport_error:
2198 	nvmet_fc_abort_op(tgtport, fod);
2199 }
2200 
2201 /*
2202  * Actual processing routine for received FC-NVME LS Requests from the LLD
2203  */
2204 static void
2205 nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2206 {
2207 	struct nvmet_fc_fcp_iod *fod =
2208 		container_of(work, struct nvmet_fc_fcp_iod, work);
2209 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2210 
2211 	nvmet_fc_handle_fcp_rqst(tgtport, fod);
2212 }
2213 
2214 /**
2215  * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2216  *                       upon the reception of a NVME FCP CMD IU.
2217  *
2218  * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2219  * layer for processing.
2220  *
2221  * The nvmet_fc layer allocates a local job structure (struct
2222  * nvmet_fc_fcp_iod) from the queue for the io and copies the
2223  * CMD IU buffer to the job structure. As such, on a successful
2224  * completion (returns 0), the LLDD may immediately free/reuse
2225  * the CMD IU buffer passed in the call.
2226  *
2227  * However, in some circumstances, due to the packetized nature of FC
2228  * and the api of the FC LLDD which may issue a hw command to send the
2229  * response, but the LLDD may not get the hw completion for that command
2230  * and upcall the nvmet_fc layer before a new command may be
2231  * asynchronously received - its possible for a command to be received
2232  * before the LLDD and nvmet_fc have recycled the job structure. It gives
2233  * the appearance of more commands received than fits in the sq.
2234  * To alleviate this scenario, a temporary queue is maintained in the
2235  * transport for pending LLDD requests waiting for a queue job structure.
2236  * In these "overrun" cases, a temporary queue element is allocated
2237  * the LLDD request and CMD iu buffer information remembered, and the
2238  * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2239  * structure is freed, it is immediately reallocated for anything on the
2240  * pending request list. The LLDDs defer_rcv() callback is called,
2241  * informing the LLDD that it may reuse the CMD IU buffer, and the io
2242  * is then started normally with the transport.
2243  *
2244  * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2245  * the completion as successful but must not reuse the CMD IU buffer
2246  * until the LLDD's defer_rcv() callback has been called for the
2247  * corresponding struct nvmefc_tgt_fcp_req pointer.
2248  *
2249  * If there is any other condition in which an error occurs, the
2250  * transport will return a non-zero status indicating the error.
2251  * In all cases other than -EOVERFLOW, the transport has not accepted the
2252  * request and the LLDD should abort the exchange.
2253  *
2254  * @target_port: pointer to the (registered) target port the FCP CMD IU
2255  *              was received on.
2256  * @fcpreq:     pointer to a fcpreq request structure to be used to reference
2257  *              the exchange corresponding to the FCP Exchange.
2258  * @cmdiubuf:   pointer to the buffer containing the FCP CMD IU
2259  * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2260  */
2261 int
2262 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2263 			struct nvmefc_tgt_fcp_req *fcpreq,
2264 			void *cmdiubuf, u32 cmdiubuf_len)
2265 {
2266 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2267 	struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2268 	struct nvmet_fc_tgt_queue *queue;
2269 	struct nvmet_fc_fcp_iod *fod;
2270 	struct nvmet_fc_defer_fcp_req *deferfcp;
2271 	unsigned long flags;
2272 
2273 	/* validate iu, so the connection id can be used to find the queue */
2274 	if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2275 			(cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2276 			(cmdiu->fc_id != NVME_CMD_FC_ID) ||
2277 			(be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2278 		return -EIO;
2279 
2280 	queue = nvmet_fc_find_target_queue(tgtport,
2281 				be64_to_cpu(cmdiu->connection_id));
2282 	if (!queue)
2283 		return -ENOTCONN;
2284 
2285 	/*
2286 	 * note: reference taken by find_target_queue
2287 	 * After successful fod allocation, the fod will inherit the
2288 	 * ownership of that reference and will remove the reference
2289 	 * when the fod is freed.
2290 	 */
2291 
2292 	spin_lock_irqsave(&queue->qlock, flags);
2293 
2294 	fod = nvmet_fc_alloc_fcp_iod(queue);
2295 	if (fod) {
2296 		spin_unlock_irqrestore(&queue->qlock, flags);
2297 
2298 		fcpreq->nvmet_fc_private = fod;
2299 		fod->fcpreq = fcpreq;
2300 
2301 		memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2302 
2303 		nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2304 
2305 		return 0;
2306 	}
2307 
2308 	if (!tgtport->ops->defer_rcv) {
2309 		spin_unlock_irqrestore(&queue->qlock, flags);
2310 		/* release the queue lookup reference */
2311 		nvmet_fc_tgt_q_put(queue);
2312 		return -ENOENT;
2313 	}
2314 
2315 	deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2316 			struct nvmet_fc_defer_fcp_req, req_list);
2317 	if (deferfcp) {
2318 		/* Just re-use one that was previously allocated */
2319 		list_del(&deferfcp->req_list);
2320 	} else {
2321 		spin_unlock_irqrestore(&queue->qlock, flags);
2322 
2323 		/* Now we need to dynamically allocate one */
2324 		deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2325 		if (!deferfcp) {
2326 			/* release the queue lookup reference */
2327 			nvmet_fc_tgt_q_put(queue);
2328 			return -ENOMEM;
2329 		}
2330 		spin_lock_irqsave(&queue->qlock, flags);
2331 	}
2332 
2333 	/* For now, use rspaddr / rsplen to save payload information */
2334 	fcpreq->rspaddr = cmdiubuf;
2335 	fcpreq->rsplen  = cmdiubuf_len;
2336 	deferfcp->fcp_req = fcpreq;
2337 
2338 	/* defer processing till a fod becomes available */
2339 	list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2340 
2341 	/* NOTE: the queue lookup reference is still valid */
2342 
2343 	spin_unlock_irqrestore(&queue->qlock, flags);
2344 
2345 	return -EOVERFLOW;
2346 }
2347 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2348 
2349 /**
2350  * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2351  *                       upon the reception of an ABTS for a FCP command
2352  *
2353  * Notify the transport that an ABTS has been received for a FCP command
2354  * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2355  * LLDD believes the command is still being worked on
2356  * (template_ops->fcp_req_release() has not been called).
2357  *
2358  * The transport will wait for any outstanding work (an op to the LLDD,
2359  * which the lldd should complete with error due to the ABTS; or the
2360  * completion from the nvmet layer of the nvme command), then will
2361  * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2362  * return the i/o context to the LLDD.  The LLDD may send the BA_ACC
2363  * to the ABTS either after return from this function (assuming any
2364  * outstanding op work has been terminated) or upon the callback being
2365  * called.
2366  *
2367  * @target_port: pointer to the (registered) target port the FCP CMD IU
2368  *              was received on.
2369  * @fcpreq:     pointer to the fcpreq request structure that corresponds
2370  *              to the exchange that received the ABTS.
2371  */
2372 void
2373 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2374 			struct nvmefc_tgt_fcp_req *fcpreq)
2375 {
2376 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2377 	struct nvmet_fc_tgt_queue *queue;
2378 	unsigned long flags;
2379 
2380 	if (!fod || fod->fcpreq != fcpreq)
2381 		/* job appears to have already completed, ignore abort */
2382 		return;
2383 
2384 	queue = fod->queue;
2385 
2386 	spin_lock_irqsave(&queue->qlock, flags);
2387 	if (fod->active) {
2388 		/*
2389 		 * mark as abort. The abort handler, invoked upon completion
2390 		 * of any work, will detect the aborted status and do the
2391 		 * callback.
2392 		 */
2393 		spin_lock(&fod->flock);
2394 		fod->abort = true;
2395 		fod->aborted = true;
2396 		spin_unlock(&fod->flock);
2397 	}
2398 	spin_unlock_irqrestore(&queue->qlock, flags);
2399 }
2400 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2401 
2402 
2403 struct nvmet_fc_traddr {
2404 	u64	nn;
2405 	u64	pn;
2406 };
2407 
2408 static int
2409 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2410 {
2411 	u64 token64;
2412 
2413 	if (match_u64(sstr, &token64))
2414 		return -EINVAL;
2415 	*val = token64;
2416 
2417 	return 0;
2418 }
2419 
2420 /*
2421  * This routine validates and extracts the WWN's from the TRADDR string.
2422  * As kernel parsers need the 0x to determine number base, universally
2423  * build string to parse with 0x prefix before parsing name strings.
2424  */
2425 static int
2426 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2427 {
2428 	char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2429 	substring_t wwn = { name, &name[sizeof(name)-1] };
2430 	int nnoffset, pnoffset;
2431 
2432 	/* validate it string one of the 2 allowed formats */
2433 	if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2434 			!strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2435 			!strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2436 				"pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2437 		nnoffset = NVME_FC_TRADDR_OXNNLEN;
2438 		pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2439 						NVME_FC_TRADDR_OXNNLEN;
2440 	} else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2441 			!strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2442 			!strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2443 				"pn-", NVME_FC_TRADDR_NNLEN))) {
2444 		nnoffset = NVME_FC_TRADDR_NNLEN;
2445 		pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2446 	} else
2447 		goto out_einval;
2448 
2449 	name[0] = '0';
2450 	name[1] = 'x';
2451 	name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2452 
2453 	memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2454 	if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2455 		goto out_einval;
2456 
2457 	memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2458 	if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2459 		goto out_einval;
2460 
2461 	return 0;
2462 
2463 out_einval:
2464 	pr_warn("%s: bad traddr string\n", __func__);
2465 	return -EINVAL;
2466 }
2467 
2468 static int
2469 nvmet_fc_add_port(struct nvmet_port *port)
2470 {
2471 	struct nvmet_fc_tgtport *tgtport;
2472 	struct nvmet_fc_traddr traddr = { 0L, 0L };
2473 	unsigned long flags;
2474 	int ret;
2475 
2476 	/* validate the address info */
2477 	if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2478 	    (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2479 		return -EINVAL;
2480 
2481 	/* map the traddr address info to a target port */
2482 
2483 	ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2484 			sizeof(port->disc_addr.traddr));
2485 	if (ret)
2486 		return ret;
2487 
2488 	ret = -ENXIO;
2489 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2490 	list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2491 		if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2492 		    (tgtport->fc_target_port.port_name == traddr.pn)) {
2493 			tgtport->port = port;
2494 			ret = 0;
2495 			break;
2496 		}
2497 	}
2498 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2499 	return ret;
2500 }
2501 
2502 static void
2503 nvmet_fc_remove_port(struct nvmet_port *port)
2504 {
2505 	/* nothing to do */
2506 }
2507 
2508 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2509 	.owner			= THIS_MODULE,
2510 	.type			= NVMF_TRTYPE_FC,
2511 	.msdbd			= 1,
2512 	.add_port		= nvmet_fc_add_port,
2513 	.remove_port		= nvmet_fc_remove_port,
2514 	.queue_response		= nvmet_fc_fcp_nvme_cmd_done,
2515 	.delete_ctrl		= nvmet_fc_delete_ctrl,
2516 };
2517 
2518 static int __init nvmet_fc_init_module(void)
2519 {
2520 	return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2521 }
2522 
2523 static void __exit nvmet_fc_exit_module(void)
2524 {
2525 	/* sanity check - all lports should be removed */
2526 	if (!list_empty(&nvmet_fc_target_list))
2527 		pr_warn("%s: targetport list not empty\n", __func__);
2528 
2529 	nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2530 
2531 	ida_destroy(&nvmet_fc_tgtport_cnt);
2532 }
2533 
2534 module_init(nvmet_fc_init_module);
2535 module_exit(nvmet_fc_exit_module);
2536 
2537 MODULE_LICENSE("GPL v2");
2538